Test portability fix.
* js/t6044-use-test-seq:
t6044: replace seq by test_seq
/gitweb/gitweb.cgi
/gitweb/static/gitweb.js
/gitweb/static/gitweb.min.*
-/test-chmtime
-/test-ctype
-/test-config
-/test-date
-/test-delta
-/test-dump-cache-tree
-/test-dump-split-index
-/test-dump-untracked-cache
-/test-fake-ssh
-/test-scrap-cache-tree
-/test-genrandom
-/test-hashmap
-/test-index-version
-/test-line-buffer
-/test-match-trees
-/test-mergesort
-/test-mktemp
-/test-parse-options
-/test-path-utils
-/test-prio-queue
-/test-read-cache
-/test-regex
-/test-revision-walking
-/test-run-command
-/test-sha1
-/test-sha1-array
-/test-sigchain
-/test-string-list
-/test-submodule-config
-/test-subprocess
-/test-svn-fe
-/test-urlmatch-normalization
-/test-wildmatch
/common-cmds.h
*.tar.gz
*.dsc
Eric Blake <eblake@redhat.com> <ebb9@byu.net>
Eric Hanchrow <eric.hanchrow@gmail.com> <offby1@blarg.net>
Eric S. Raymond <esr@thyrsus.com>
+Eric Wong <e@80x24.org> <normalperson@yhbt.net>
Erik Faye-Lund <kusmabite@gmail.com> <kusmabite@googlemail.com>
Eyvind Bernhardsen <eyvind.bernhardsen@gmail.com> <eyvind-git@orakel.ntnu.no>
Florian Achleitner <florian.achleitner.2.6.31@gmail.com> <florian.achleitner2.6.31@gmail.com>
apt:
packages:
- language-pack-is
+ - git-svn
env:
global:
- DEVELOPER=1
- - P4_VERSION="15.2"
- - GIT_LFS_VERSION="1.1.0"
+ # The Linux build installs the defined dependency versions below.
+ # The OS X build installs the latest available versions. Keep that
+ # in mind when you encounter a broken OS X build!
+ - LINUX_P4_VERSION="16.1"
+ - LINUX_GIT_LFS_VERSION="1.2.0"
- DEFAULT_TEST_TARGET=prove
- GIT_PROVE_OPTS="--timer --jobs 3 --state=failed,slow,save"
- GIT_TEST_OPTS="--verbose --tee"
# t9816 occasionally fails with "TAP out of sequence errors" on Travis CI OS X
- GIT_SKIP_TESTS="t9810 t9816"
+matrix:
+ include:
+ - env: Documentation
+ os: linux
+ compiler: clang
+ addons:
+ apt:
+ packages:
+ - asciidoc
+ - xmlto
+ before_install:
+ before_script:
+ script: ci/test-documentation.sh
+ after_failure:
+
before_install:
- >
case "${TRAVIS_OS_NAME:-linux}" in
linux)
mkdir --parents custom/p4
pushd custom/p4
- wget --quiet http://filehost.perforce.com/perforce/r$P4_VERSION/bin.linux26x86_64/p4d
- wget --quiet http://filehost.perforce.com/perforce/r$P4_VERSION/bin.linux26x86_64/p4
+ wget --quiet http://filehost.perforce.com/perforce/r$LINUX_P4_VERSION/bin.linux26x86_64/p4d
+ wget --quiet http://filehost.perforce.com/perforce/r$LINUX_P4_VERSION/bin.linux26x86_64/p4
chmod u+x p4d
chmod u+x p4
export PATH="$(pwd):$PATH"
popd
mkdir --parents custom/git-lfs
pushd custom/git-lfs
- wget --quiet https://github.com/github/git-lfs/releases/download/v$GIT_LFS_VERSION/git-lfs-linux-amd64-$GIT_LFS_VERSION.tar.gz
- tar --extract --gunzip --file "git-lfs-linux-amd64-$GIT_LFS_VERSION.tar.gz"
- cp git-lfs-$GIT_LFS_VERSION/git-lfs .
+ wget --quiet https://github.com/github/git-lfs/releases/download/v$LINUX_GIT_LFS_VERSION/git-lfs-linux-amd64-$LINUX_GIT_LFS_VERSION.tar.gz
+ tar --extract --gunzip --file "git-lfs-linux-amd64-$LINUX_GIT_LFS_VERSION.tar.gz"
+ cp git-lfs-$LINUX_GIT_LFS_VERSION/git-lfs .
export PATH="$(pwd):$PATH"
popd
;;
QUIET_DBLATEX = @echo ' ' DBLATEX $@;
QUIET_XSLTPROC = @echo ' ' XSLTPROC $@;
QUIET_GEN = @echo ' ' GEN $@;
+ QUIET_LINT = @echo ' ' LINT $@;
QUIET_STDERR = 2> /dev/null
QUIET_SUBDIR0 = +@subdir=
QUIET_SUBDIR1 = ;$(NO_SUBDIR) echo ' ' SUBDIR $$subdir; \
print-man1:
@for i in $(MAN1_TXT); do echo $$i; done
+lint-docs::
+ $(QUIET_LINT)$(PERL_PATH) lint-gitlink.perl
+
.PHONY: FORCE
--- /dev/null
+Git v2.8.2 Release Notes
+========================
+
+Fixes since v2.8.1
+------------------
+
+ * The embedded args argv-array in the child process is used to build
+ the command line to run pack-objects instead of using a separate
+ array of strings.
+
+ * Bunch of tests on "git clone" has been renumbered for better
+ organization.
+
+ * The tests that involve running httpd leaked the system-wide
+ configuration in /etc/gitconfig to the tested environment.
+
+ * "index-pack --keep=<msg>" was broken since v2.1.0 timeframe.
+
+ * "git config --get-urlmatch", unlike other variants of the "git
+ config --get" family, did not signal error with its exit status
+ when there was no matching configuration.
+
+ * The "--local-env-vars" and "--resolve-git-dir" options of "git
+ rev-parse" failed to work outside a repository when the command's
+ option parsing was rewritten in 1.8.5 era.
+
+ * Fetching of history by naming a commit object name directly didn't
+ work across remote-curl transport.
+
+ * A small memory leak in an error codepath has been plugged in xdiff
+ code.
+
+ * strbuf_getwholeline() did not NUL-terminate the buffer on certain
+ corner cases in its error codepath.
+
+ * The startup_info data, which records if we are working inside a
+ repository (among other things), are now uniformly available to Git
+ subcommand implementations, and Git avoids attempting to touch
+ references when we are not in a repository.
+
+ * "git mergetool" did not work well with conflicts that both sides
+ deleted.
+
+ * "git send-email" had trouble parsing alias file in mailrc format
+ when lines in it had trailing whitespaces on them.
+
+ * When "git merge --squash" stopped due to conflict, the concluding
+ "git commit" failed to read in the SQUASH_MSG that shows the log
+ messages from all the squashed commits.
+
+ * "git merge FETCH_HEAD" dereferenced NULL pointer when merging
+ nothing into an unborn history (which is arguably unusual usage,
+ which perhaps was the reason why nobody noticed it).
+
+ * Build updates for MSVC.
+
+ * "git diff -M" used to work better when two originally identical
+ files A and B got renamed to X/A and X/B by pairing A to X/A and B
+ to X/B, but this was broken in the 2.0 timeframe.
+
+ * "git send-pack --all <there>" was broken when its command line
+ option parsing was written in the 2.6 timeframe.
+
+ * When running "git blame $path" with unnormalized data in the index
+ for the path, the data in the working tree was blamed, even though
+ "git add" would not have changed what is already in the index, due
+ to "safe crlf" that disables the line-end conversion. It has been
+ corrected.
+
+Also contains minor documentation updates and code clean-ups.
--- /dev/null
+Git v2.8.3 Release Notes
+========================
+
+Fixes since v2.8.2
+------------------
+
+ * "git send-email" now uses a more readable timestamps when
+ formulating a message ID.
+
+ * The repository set-up sequence has been streamlined (the biggest
+ change is that there is no longer git_config_early()), so that we
+ do not attempt to look into refs/* when we know we do not have a
+ Git repository.
+
+ * When "git worktree" feature is in use, "git branch -d" allowed
+ deletion of a branch that is checked out in another worktree
+
+ * When "git worktree" feature is in use, "git branch -m" renamed a
+ branch that is checked out in another worktree without adjusting
+ the HEAD symbolic ref for the worktree.
+
+ * "git format-patch --help" showed `-s` and `--no-patch` as if these
+ are valid options to the command. We already hide `--patch` option
+ from the documentation, because format-patch is about showing the
+ diff, and the documentation now hides these options as well.
+
+ * A change back in version 2.7 to "git branch" broke display of a
+ symbolic ref in a non-standard place in the refs/ hierarchy (we
+ expect symbolic refs to appear in refs/remotes/*/HEAD to point at
+ the primary branch the remote has, and as .git/HEAD to point at the
+ branch we locally checked out).
+
+ * A partial rewrite of "git submodule" in the 2.7 timeframe changed
+ the way the gitdir: pointer in the submodules point at the real
+ repository location to use absolute paths by accident. This has
+ been corrected.
+
+ * "git commit" misbehaved in a few minor ways when an empty message
+ is given via -m '', all of which has been corrected.
+
+ * Support for CRAM-MD5 authentication method in "git imap-send" did
+ not work well.
+
+ * The socks5:// proxy support added back in 2.6.4 days was not aware
+ that socks5h:// proxies behave differently.
+
+ * "git config" had a codepath that tried to pass a NULL to
+ printf("%s"), which nobody seems to have noticed.
+
+ * On Cygwin, object creation uses the "create a temporary and then
+ rename it to the final name" pattern, not "create a temporary,
+ hardlink it to the final name and then unlink the temporary"
+ pattern.
+
+ This is necessary to use Git on Windows shared directories, and is
+ already enabled for the MinGW and plain Windows builds. It also
+ has been used in Cygwin packaged versions of Git for quite a while.
+ See http://thread.gmane.org/gmane.comp.version-control.git/291853
+ and http://thread.gmane.org/gmane.comp.version-control.git/275680.
+
+ * "git replace -e" did not honour "core.editor" configuration.
+
+ * Upcoming OpenSSL 1.1.0 will break compilation b updating a few APIs
+ we use in imap-send, which has been adjusted for the change.
+
+ * "git submodule" reports the paths of submodules the command
+ recurses into, but this was incorrect when the command was not run
+ from the root level of the superproject.
+
+ * The test scripts for "git p4" (but not "git p4" implementation
+ itself) has been updated so that they would work even on a system
+ where the installed version of Python is python 3.
+
+ * The "user.useConfigOnly" configuration variable makes it an error
+ if users do not explicitly set user.name and user.email. However,
+ its check was not done early enough and allowed another error to
+ trigger, reporting that the default value we guessed from the
+ system setting was unusable. This was a suboptimal end-user
+ experience as we want the users to set user.name/user.email without
+ relying on the auto-detection at all.
+
+ * "git mv old new" did not adjust the path for a submodule that lives
+ as a subdirectory inside old/ directory correctly.
+
+ * "git push" from a corrupt repository that attempts to push a large
+ number of refs deadlocked; the thread to relay rejection notices
+ for these ref updates blocked on writing them to the main thread,
+ after the main thread at the receiving end notices that the push
+ failed and decides not to read these notices and return a failure.
+
+ * A question by "git send-email" to ask the identity of the sender
+ has been updated.
+
+ * Recent update to Git LFS broke "git p4" by changing the output from
+ its "lfs pointer" subcommand.
+
+ * Some multi-byte encoding can have a backslash byte as a later part
+ of one letter, which would confuse "highlight" filter used in
+ gitweb.
+
+Also contains minor documentation updates and code clean-ups.
--- /dev/null
+Git v2.8.4 Release Notes
+========================
+
+Fixes since v2.8.3
+------------------
+
+ * Documentation for "git merge --verify-signatures" has been updated
+ to clarify that the signature of only the commit at the tip is
+ verified. Also the phrasing used for signature and key validity is
+ adjusted to align with that used by OpenPGP.
+
+ * On Windows, .git and optionally any files whose name starts with a
+ dot are now marked as hidden, with a core.hideDotFiles knob to
+ customize this behaviour.
+
+ * Portability enhancement for "rebase -i" to help platforms whose
+ shell does not like "for i in <empty>" (which is not POSIX-kosher).
+
+ * "git fsck" learned to catch NUL byte in a commit object as
+ potential error and warn.
+
+ * CI test was taught to build documentation pages.
+
+ * Many 'linkgit:<git documentation page>' references were broken,
+ which are all fixed with this.
+
+Also contains other minor documentation updates and code clean-ups.
by default forbidden now to prevent creating such an unusual merge by
mistake.
+The output formats of "git log" that indents the commit log message by
+4 spaces now expands HT in the log message by default. You can use
+the "--no-expand-tabs" option to disable this.
+
+"git commit-tree" plumbing command required the user to always sign
+its result when the user sets the commit.gpgsign configuration
+variable, which was an ancient mistake, which this release corrects.
+A script that drives commit-tree, if it relies on this mistake, now
+needs to read commit.gpgsign and pass the -S option as necessary.
+
Updates since v2.8
------------------
UI, Workflows & Features
+ * Comes with git-multimail 1.3.1 (in contrib/).
+
* The end-user facing Porcelain level commands like "diff" and "log"
now enables the rename detection by default.
default, with an escape hatch "--allow-unrelated-histories" option
to be used in a rare event that merges histories of two projects
that started their lives independently.
- (merge e379fdf jc/merge-refuse-new-root later to maint).
+
+ * "git pull" has been taught to pass --allow-unrelated-histories
+ option to underlying "git merge".
* "git apply -v" learned to report paths in the patch that were
skipped via --include/--exclude mechanism or being outside the
current working directory.
- (merge 3f57944 nd/apply-report-skip later to maint).
+
+ * Shell completion (in contrib/) updates.
+
+ * The commit object name reported when "rebase -i" stops has been
+ shortened.
+
+ * "git worktree add" can be given "--no-checkout" option to only
+ create an empty worktree without checking out the files.
+
+ * "git mergetools" learned to drive ExamDiff.
+
+ * "git pull --rebase" learned "--[no-]autostash" option, so that
+ the rebase.autostash configuration variable set to true can be
+ overridden from the command line.
+
+ * When "git log" shows the log message indented by 4-spaces, the
+ remainder of a line after a HT does not align in the way the author
+ originally intended. The command now expands tabs by default in
+ such a case, and allows the users to override it with a new option,
+ "--no-expand-tabs".
+
+ * "git send-email" now uses a more readable timestamps when
+ formulating a message ID.
+
+ * "git rerere" can encounter two or more files with the same conflict
+ signature that have to be resolved in different ways, but there was
+ no way to record these separate resolutions.
+ (merge d9d501b068 jc/rerere-multi later to maint).
+
+ * "git p4" learned to record P4 jobs in Git commit that imports from
+ the history in Perforce.
+
+ * "git describe --contains" often made a hard-to-justify choice of
+ tag to give name to a given commit, because it tried to come up
+ with a name with smallest number of hops from a tag, causing an old
+ commit whose close descendant that is recently tagged were not
+ described with respect to an old tag but with a newer tag. It did
+ not help that its computation of "hop" count was further tweaked to
+ penalize being on a side branch of a merge. The logic has been
+ updated to favor using the tag with the oldest tagger date, which
+ is a lot easier to explain to the end users: "We describe a commit
+ in terms of the (chronologically) oldest tag that contains the
+ commit."
+ (merge 7550424 js/name-rev-use-oldest-ref later to maint).
+
+ * "git clone" learned "--shallow-submodules" option.
+
+ * HTTP transport clients learned to throw extra HTTP headers at the
+ server, specified via http.extraHeader configuration variable.
+
+ * Patch output from "git diff" and friends has been tweaked to be
+ more readable by using a blank line as a strong hint that the
+ contents before and after it belong to a logically separate unit.
+
+ * A new configuration variable core.hooksPath allows customizing
+ where the hook directory is.
+
+ * An earlier addition of "sanitize_submodule_env" with 14111fc4 (git:
+ submodule honor -c credential.* from command line, 2016-02-29)
+ turned out to be a convoluted no-op; implement what it wanted to do
+ correctly, and stop filtering settings given via "git -c var=val".
+
+ * "git commit --dry-run" reported "No, no, you cannot commit." in one
+ case where "git commit" would have allowed you to commit, and this
+ improves it a little bit ("git commit --dry-run --short" still does
+ not give you the correct answer, for example). This is a stop-gap
+ measure in that "commit --short --dry-run" still gives an incorrect
+ result.
+
+ * The experimental "multiple worktree" feature gains more safety to
+ forbid operations on a branch that is checked out or being actively
+ worked on elsewhere, by noticing that e.g. it is being rebased.
+
+ * "git format-patch" learned a new "--base" option to record what
+ (public, well-known) commit the original series was built on in
+ its output.
+
+ * "git commit" learned to pay attention to "commit.verbose"
+ configuration variable and act as if "--verbose" option was
+ given from the command line.
Performance, Internal Implementation, Development Support etc.
* The embedded args argv-array in the child process is used to build
the command line to run pack-objects instead of using a separate
array of strings.
- (merge 65a3629 mp/upload-pack-use-embedded-args later to maint).
* A test for tags has been restructured so that more parts of it can
easily be run on a platform without a working GnuPG.
repository (among other things), are now uniformly available to Git
subcommand implementations, and Git avoids attempting to touch
references when we are not in a repository.
- (merge 11e6b3f jk/startup-info later to maint).
* The command line argument parser for "receive-pack" has been
rewritten to use parse-options.
parallel.
* Rename bunch of tests on "git clone" for better organization.
- (merge 8fbb03a sb/clone-t57-t56 later to maint).
* The tests that involve running httpd leaked the system-wide
configuration in /etc/gitconfig to the tested environment.
- (merge 1fad503 jk/test-httpd-config-nosystem later to maint).
* Build updates for MSVC.
- (merge 0ef60af ss/msvc later to maint).
+
+ * The repository set-up sequence has been streamlined (the biggest
+ change is that there is no longer git_config_early()), so that we
+ do not attempt to look into refs/* when we know we do not have a
+ Git repository.
+
+ * Code restructuring around the "refs" area to prepare for pluggable
+ refs backends.
+
+ * Sources to many test helper binaries (and the generated helpers)
+ have been moved to t/helper/ subdirectory to reduce clutter at the
+ top level of the tree.
+
+ * Unify internal logic between "git tag -v" and "git verify-tag"
+ commands by making one directly call into the other.
+ (merge bef234b st/verify-tag later to maint).
+
+ * "merge-recursive" strategy incorrectly checked if a path that is
+ involved in its internal merge exists in the working tree.
+
+ * The test scripts for "git p4" (but not "git p4" implementation
+ itself) has been updated so that they would work even on a system
+ where the installed version of Python is python 3.
+
+ * As nobody maintains our in-tree git.spec.in and distros use their
+ own spec file, we stopped pretending that we support "make rpm".
+
+ * Move from unsigned char[20] to struct object_id continues.
+
+ * Update of "git submodule" to move pieces of logic to C continues.
+
+ * The code for warning_errno/die_errno has been refactored and a new
+ error_errno() reporting helper is introduced.
+ (merge 1da045f nd/error-errno later to maint).
+
+ * Running tests with '-x' option to trace the individual command
+ executions is a useful way to debug test scripts, but some tests
+ that capture the standard error stream and check what the command
+ said can be broken with the trace output mixed in. When running
+ our tests under "bash", however, we can redirect the trace output
+ to another file descriptor to keep the standard error of programs
+ being tested intact.
+ (merge d88785e jk/test-send-sh-x-trace-elsewhere later to maint).
+
+ * t0040 had too many unnecessary repetitions in its test data. Teach
+ test-parse-options program so that a caller can tell what it
+ expects in its output, so that these repetitions can be cleaned up.
+
+ * Add perf test for "rebase -i"
+
+ * Common mistakes when writing gitlink: in our documentation are
+ found by "make check-docs".
+
+ * t9xxx series has been updated primarily for readability, while
+ fixing small bugs in it. A few scripted Porcelains have also been
+ updated to fix possible bugs around their use of "test -z" and
+ "test -n".
+
+ * CI test was taught to run git-svn tests.
+
Also contains various documentation updates and code clean-ups.
* "git config --get-urlmatch", unlike other variants of the "git
config --get" family, did not signal error with its exit status
when there was no matching configuration.
- (merge 24990b2 jk/config-get-urlmatch later to maint).
* The "--local-env-vars" and "--resolve-git-dir" options of "git
rev-parse" failed to work outside a repository when the command's
option parsing was rewritten in 1.8.5 era.
- (merge fc7d47f jk/rev-parse-local-env-vars later to maint).
* "git index-pack --keep[=<msg>] pack-$name.pack" simply did not work.
- (merge 0e94242 jc/maint-index-pack-keep later to maint).
* Fetching of history by naming a commit object name directly didn't
work across remote-curl transport.
- (merge 754ecb1 gf/fetch-pack-direct-object-fetch later to maint).
* A small memory leak in an error codepath has been plugged in xdiff
code.
- (merge 87f1625 rj/xdiff-prepare-plug-leak-on-error-codepath later to maint).
* strbuf_getwholeline() did not NUL-terminate the buffer on certain
corner cases in its error codepath.
- (merge b709043 jk/getwholeline-getdelim-empty later to maint).
* "git mergetool" did not work well with conflicts that both sides
deleted.
- (merge a298604 da/mergetool-delete-delete-conflict later to maint).
* "git send-email" had trouble parsing alias file in mailrc format
when lines in it had trailing whitespaces on them.
- (merge a277d1e jk/send-email-rtrim-mailrc-alias later to maint).
* When "git merge --squash" stopped due to conflict, the concluding
"git commit" failed to read in the SQUASH_MSG that shows the log
messages from all the squashed commits.
- (merge b64c1e0 ss/commit-squash-msg later to maint).
* "git merge FETCH_HEAD" dereferenced NULL pointer when merging
nothing into an unborn history (which is arguably unusual usage,
which perhaps was the reason why nobody noticed it).
- (merge b84e65d jv/merge-nothing-into-void later to maint).
+
+ * When "git worktree" feature is in use, "git branch -d" allowed
+ deletion of a branch that is checked out in another worktree,
+ which was wrong.
+
+ * When "git worktree" feature is in use, "git branch -m" renamed a
+ branch that is checked out in another worktree without adjusting
+ the HEAD symbolic ref for the worktree.
+
+ * "git diff -M" used to work better when two originally identical
+ files A and B got renamed to X/A and X/B by pairing A to X/A and B
+ to X/B, but this was broken in the 2.0 timeframe.
+
+ * "git send-pack --all <there>" was broken when its command line
+ option parsing was written in the 2.6 timeframe.
+
+ * "git format-patch --help" showed `-s` and `--no-patch` as if these
+ are valid options to the command. We already hide `--patch` option
+ from the documentation, because format-patch is about showing the
+ diff, and the documentation now hides these options as well.
+
+ * When running "git blame $path" with unnormalized data in the index
+ for the path, the data in the working tree was blamed, even though
+ "git add" would not have changed what is already in the index, due
+ to "safe crlf" that disables the line-end conversion. It has been
+ corrected.
+
+ * A change back in version 2.7 to "git branch" broke display of a
+ symbolic ref in a non-standard place in the refs/ hierarchy (we
+ expect symbolic refs to appear in refs/remotes/*/HEAD to point at
+ the primary branch the remote has, and as .git/HEAD to point at the
+ branch we locally checked out).
+
+ * A partial rewrite of "git submodule" in the 2.7 timeframe changed
+ the way the gitdir: pointer in the submodules point at the real
+ repository location to use absolute paths by accident. This has
+ been corrected.
+
+ * "git commit" misbehaved in a few minor ways when an empty message
+ is given via -m '', all of which has been corrected.
+
+ * Support for CRAM-MD5 authentication method in "git imap-send" did
+ not work well.
+
+ * Upcoming OpenSSL 1.1.0 will break compilation b updating a few APIs
+ we use in imap-send, which has been adjusted for the change.
+
+ * The socks5:// proxy support added back in 2.6.4 days was not aware
+ that socks5h:// proxies behave differently.
+
+ * "git config" had a codepath that tried to pass a NULL to
+ printf("%s"), which nobody seems to have noticed.
+
+ * On Cygwin, object creation uses the "create a temporary and then
+ rename it to the final name" pattern, not "create a temporary,
+ hardlink it to the final name and then unlink the temporary"
+ pattern.
+
+ This is necessary to use Git on Windows shared directories, and is
+ already enabled for the MinGW and plain Windows builds. It also
+ has been used in Cygwin packaged versions of Git for quite a while.
+ See http://thread.gmane.org/gmane.comp.version-control.git/291853
+
+ * "merge-octopus" strategy did not ensure that the index is clean
+ when merge begins.
+
+ * When "git merge" notices that the merge can be resolved purely at
+ the tree level (without having to merge blobs) and the resulting
+ tree happens to already exist in the object store, it forgot to
+ update the index, which lead to an inconsistent state for later
+ operations.
+
+ * "git submodule" reports the paths of submodules the command
+ recurses into, but this was incorrect when the command was not run
+ from the root level of the superproject.
+
+ * The "user.useConfigOnly" configuration variable makes it an error
+ if users do not explicitly set user.name and user.email. However,
+ its check was not done early enough and allowed another error to
+ trigger, reporting that the default value we guessed from the
+ system setting was unusable. This was a suboptimal end-user
+ experience as we want the users to set user.name/user.email without
+ relying on the auto-detection at all.
+
+ * "git mv old new" did not adjust the path for a submodule that lives
+ as a subdirectory inside old/ directory correctly.
+
+ * "git replace -e" did not honour "core.editor" configuration.
+
+ * "git push" from a corrupt repository that attempts to push a large
+ number of refs deadlocked; the thread to relay rejection notices
+ for these ref updates blocked on writing them to the main thread,
+ after the main thread at the receiving end notices that the push
+ failed and decides not to read these notices and return a failure.
+
+ * mmap emulation on Windows has been optimized and work better without
+ consuming paging store when not needed.
+
+ * A question by "git send-email" to ask the identity of the sender
+ has been updated.
+
+ * UI consistency improvements for "git mergetool".
+
+ * "git rebase -m" could be asked to rebase an entire branch starting
+ from the root, but failed by assuming that there always is a parent
+ commit to the first commit on the branch.
+ (merge 79f4344 bw/rebase-merge-entire-branch later to maint).
+
+ * Fix a broken "p4 lfs" test.
+
+ * Recent update to Git LFS broke "git p4" by changing the output from
+ its "lfs pointer" subcommand.
+
+ * "git fetch" test t5510 was flaky while running a (forced) automagic
+ garbage collection.
+
+ * Documentation updates to help contributors setting up Travis CI
+ test for their patches.
+
+ * Some multi-byte encoding can have a backslash byte as a later part
+ of one letter, which would confuse "highlight" filter used in
+ gitweb.
+
+ * "git commit-tree" plumbing command required the user to always sign
+ its result when the user sets the commit.gpgsign configuration
+ variable, which was an ancient mistake. Rework "git rebase" that
+ relied on this mistake so that it reads commit.gpgsign and pass (or
+ not pass) the -S option to "git commit-tree" to keep the end-user
+ expectation the same, while teaching "git commit-tree" to ignore
+ the configuration variable. This will stop requiring the users to
+ sign commit objects used internally as an implementation detail of
+ "git stash".
+ (merge 6694856 jc/commit-tree-ignore-commit-gpgsign later to maint).
+
+ * "http.cookieFile" configuration variable clearly wants a pathname,
+ but we forgot to treat it as such by e.g. applying tilde expansion.
+ (merge e5a39ad bn/http-cookiefile-config later to maint).
+
+ * Consolidate description of tilde-expansion that is done to
+ configuration variables that take pathname to a single place.
+ (merge dca83ab jc/config-pathname-type later to maint).
+
+ * Correct faulty recommendation to use "git submodule deinit ." when
+ de-initialising all submodules, which would result in a strange
+ error message in a pathological corner case.
+ (merge f6a5279 sb/submodule-deinit-all later to maint).
+
+ * Many 'linkgit:<git documentation page>' references were broken,
+ which are all fixed with this.
+ (merge 1cca17d jc/linkgit-fix later to maint).
+
+ * "git rerere" can get confused by conflict markers deliberately left
+ by the inner merge step, because they are indistinguishable from
+ the real conflict markers left by the outermost merge which are
+ what the end user and "rerere" need to look at. This was fixed by
+ making the conflict markers left by the inner merges a bit longer.
+ (merge 0f9fd5c jc/ll-merge-internal later to maint).
+
+ * CI test was taught to build documentation pages.
+ (merge b98712b ls/travis-build-doc later to maint).
+
+ * "git fsck" learned to catch NUL byte in a commit object as
+ potential error and warn.
+ (merge 6d2d780 jc/fsck-nul-in-commit later to maint).
+
+ * Portability enhancement for "rebase -i" to help platforms whose
+ shell does not like "for i in <empty>" (which is not POSIX-kosher).
+ (merge 8e98b35 jk/rebase-interative-eval-fix later to maint).
+
+ * On Windows, .git and optionally any files whose name starts with a
+ dot are now marked as hidden, with a core.hideDotFiles knob to
+ customize this behaviour.
+ (merge ebf31e7 js/windows-dotgit later to maint).
+
+ * Documentation for "git merge --verify-signatures" has been updated
+ to clarify that the signature of only the commit at the tip is
+ verified. Also the phrasing used for signature and key validity is
+ adjusted to align with that used by OpenPGP.
+ (merge 05a5869 kf/gpg-sig-verification-doc later to maint).
+
+ * A couple of bugs around core.autocrlf have been fixed.
+ (merge caa47ad tb/core-eol-fix later to maint).
+
+ * Many commands normalize command line arguments from NFD to NFC
+ variant of UTF-8 on OSX, but commands in the "diff" family did
+ not, causing "git diff $path" to complain that no such path is
+ known to Git. They have been taught to do the normalization.
+ (merge 90a78b8 ar/diff-args-osx-precompose later to maint).
+
+ * "git difftool" learned to handle unmerged paths correctly in
+ dir-diff mode.
+ (merge 366f9ce da/difftool later to maint).
* Other minor clean-ups and documentation updates
- (merge aed7480 mm/lockfile-error-message later to maint).
- (merge bfee614 jc/index-pack later to maint).
- (merge f870899 ss/exc-flag-is-a-collection-of-bits later to maint).
- (merge dde7891 pb/t7502-drop-dup later to maint).
- (merge 3bd1b51 cc/doc-recommend-performance-trace-to-file later to maint).
- (merge 7d5e9c9 jk/credential-cache-comment-exit later to maint).
- (merge 16a86d4 nd/apply-doc later to maint).
- (merge c3f6b85 pb/opt-cmdmode-doc later to maint).
- (merge 30211fb oa/doc-diff-check later to maint).
+ (merge 832c0e5 lp/typofixes later to maint).
+ (merge f5ee54a sb/z-is-gnutar-ism later to maint).
+ (merge 2e3926b va/i18n-misc-updates later to maint).
+ (merge f212dcc bn/config-doc-tt-varnames later to maint).
+ (merge f54bea4 nd/remote-plural-ours-plus-theirs later to maint).
+ (merge 2bb0518 ak/t4151-ls-files-could-be-empty later to maint).
+ (merge 4df4313 jc/test-seq later to maint).
+ (merge a75a308 tb/t5601-sed-fix later to maint).
+ (merge 6c1fbe1 va/i18n-remote-comment-to-align later to maint).
+ (merge dee2303 va/mailinfo-doc-typofix later to maint).
t/README for guidance.
When adding a new feature, make sure that you have new tests to show
-the feature triggers the new behaviour when it should, and to show the
-feature does not trigger when it shouldn't. Also make sure that the
-test suite passes after your commit. Do not forget to update the
-documentation to describe the updated behaviour.
-
-Speaking of the documentation, it is currently a liberal mixture of US
-and UK English norms for spelling and grammar, which is somewhat
-unfortunate. A huge patch that touches the files all over the place
-only to correct the inconsistency is not welcome, though. Potential
-clashes with other changes that can result from such a patch are not
-worth it. We prefer to gradually reconcile the inconsistencies in
-favor of US English, with small and easily digestible patches, as a
-side effect of doing some other real work in the vicinity (e.g.
-rewriting a paragraph for clarity, while turning en_UK spelling to
-en_US). Obvious typographical fixes are much more welcomed ("teh ->
-"the"), preferably submitted as independent patches separate from
-other documentation changes.
+the feature triggers the new behavior when it should, and to show the
+feature does not trigger when it shouldn't. After any code change, make
+sure that the entire test suite passes.
+
+If you have an account at GitHub (and you can get one for free to work
+on open source projects), you can use their Travis CI integration to
+test your changes on Linux, Mac (and hopefully soon Windows). See
+GitHub-Travis CI hints section for details.
+
+Do not forget to update the documentation to describe the updated
+behavior and make sure that the resulting documentation set formats
+well. It is currently a liberal mixture of US and UK English norms for
+spelling and grammar, which is somewhat unfortunate. A huge patch that
+touches the files all over the place only to correct the inconsistency
+is not welcome, though. Potential clashes with other changes that can
+result from such a patch are not worth it. We prefer to gradually
+reconcile the inconsistencies in favor of US English, with small and
+easily digestible patches, as a side effect of doing some other real
+work in the vicinity (e.g. rewriting a paragraph for clarity, while
+turning en_UK spelling to en_US). Obvious typographical fixes are much
+more welcomed ("teh -> "the"), preferably submitted as independent
+patches separate from other documentation changes.
Oh, another thing. We are picky about whitespaces. Make sure your
changes do not trigger errors with the sample pre-commit hook shipped
entitled "What's cooking in git.git" and "What's in git.git" giving
the status of various proposed changes.
+--------------------------------------------------
+GitHub-Travis CI hints
+
+With an account at GitHub (you can get one for free to work on open
+source projects), you can use Travis CI to test your changes on Linux,
+Mac (and hopefully soon Windows). You can find a successful example
+test build here: https://travis-ci.org/git/git/builds/120473209
+
+Follow these steps for the initial setup:
+
+ (1) Fork https://github.com/git/git to your GitHub account.
+ You can find detailed instructions how to fork here:
+ https://help.github.com/articles/fork-a-repo/
+
+ (2) Open the Travis CI website: https://travis-ci.org
+
+ (3) Press the "Sign in with GitHub" button.
+
+ (4) Grant Travis CI permissions to access your GitHub account.
+ You can find more information about the required permissions here:
+ https://docs.travis-ci.com/user/github-oauth-scopes
+
+ (5) Open your Travis CI profile page: https://travis-ci.org/profile
+
+ (6) Enable Travis CI builds for your Git fork.
+
+After the initial setup, Travis CI will run whenever you push new changes
+to your fork of Git on GitHub. You can monitor the test state of all your
+branches here: https://travis-ci.org/<Your GitHub handle>/git/branches
+
+If a branch did not pass all test cases then it is marked with a red
+cross. In that case you can click on the failing Travis CI job and
+scroll all the way down in the log. Find the line "<-- Click here to see
+detailed test output!" and click on the triangle next to the log line
+number to expand the detailed test output. Here is such a failing
+example: https://travis-ci.org/git/git/jobs/122676187
+
+Fix the problem and push your fix to your Git fork. This will trigger
+a new Travis CI build to ensure all tests pass.
+
+
------------------------------------------------
MUA specific hints
You can include one config file from another by setting the special
`include.path` variable to the name of the file to be included. The
+variable takes a pathname as its value, and is subject to tilde
+expansion.
+
+The
included file is expanded immediately, as if its contents had been
found at the location of the include directive. If the value of the
`include.path` variable is a relative path, the path is considered to be
relative to the configuration file in which the include directive was
-found. The value of `include.path` is subject to tilde expansion: `~/`
-is expanded to the value of `$HOME`, and `~user/` to the specified
-user's home directory. See below for examples.
+found. See below for examples.
+
Example
~~~~~~~
[include]
path = /path/to/foo.inc ; include by absolute path
path = foo ; expand "foo" relative to the current file
- path = ~/foo ; expand "foo" in your $HOME directory
+ path = ~/foo ; expand "foo" in your `$HOME` directory
Values
list of branch names in `log --decorate` output) is set to be
painted with `bold` or some other attribute.
+pathname::
+ A variable that takes a pathname value can be given a
+ string that begins with "`~/`" or "`~user/`", and the usual
+ tilde expansion happens to such a string: `~/`
+ is expanded to the value of `$HOME`, and `~user/` to the
+ specified user's home directory.
+
Variables
~~~~~~~~~
+
The default is true (when core.filemode is not specified in the config file).
+core.hideDotFiles::
+ (Windows-only) If true, mark newly-created directories and files whose
+ name starts with a dot as hidden. If 'dotGitOnly', only the `.git/`
+ directory is hidden, but no other files starting with a dot. The
+ default mode is 'dotGitOnly'.
+
core.ignoreCase::
If true, this option enables various workarounds to enable
Git to work better on filesystems that are not case sensitive,
core.eol::
Sets the line ending type to use in the working directory for
- files that have the `text` property set. Alternatives are
- 'lf', 'crlf' and 'native', which uses the platform's native
- line ending. The default value is `native`. See
+ files that have the `text` property set when core.autocrlf is false.
+ Alternatives are 'lf', 'crlf' and 'native', which uses the platform's
+ native line ending. The default value is `native`. See
linkgit:gitattributes[5] for more information on end-of-line
conversion.
core.logAllRefUpdates::
Enable the reflog. Updates to a ref <ref> is logged to the file
- "$GIT_DIR/logs/<ref>", by appending the new and old
+ "`$GIT_DIR/logs/<ref>`", by appending the new and old
SHA-1, the date/time and the reason of the update, but
only when the file exists. If this configuration
- variable is set to true, missing "$GIT_DIR/logs/<ref>"
+ variable is set to true, missing "`$GIT_DIR/logs/<ref>`"
file is automatically created for branch heads (i.e. under
refs/heads/), remote refs (i.e. under refs/remotes/),
note refs (i.e. under refs/notes/), and the symbolic ref HEAD.
Common unit suffixes of 'k', 'm', or 'g' are supported.
core.excludesFile::
- In addition to '.gitignore' (per-directory) and
- '.git/info/exclude', Git looks into this file for patterns
- of files which are not meant to be tracked. "`~/`" is expanded
- to the value of `$HOME` and "`~user/`" to the specified user's
- home directory. Its default value is $XDG_CONFIG_HOME/git/ignore.
- If $XDG_CONFIG_HOME is either not set or empty, $HOME/.config/git/ignore
+ Specifies the pathname to the file that contains patterns to
+ describe paths that are not meant to be tracked, in addition
+ to '.gitignore' (per-directory) and '.git/info/exclude'.
+ Defaults to `$XDG_CONFIG_HOME/git/ignore`.
+ If `$XDG_CONFIG_HOME` is either not set or empty, `$HOME/.config/git/ignore`
is used instead. See linkgit:gitignore[5].
core.askPass::
'.git/info/attributes', Git looks into this file for attributes
(see linkgit:gitattributes[5]). Path expansions are made the same
way as for `core.excludesFile`. Its default value is
- $XDG_CONFIG_HOME/git/attributes. If $XDG_CONFIG_HOME is either not
- set or empty, $HOME/.config/git/attributes is used instead.
+ `$XDG_CONFIG_HOME/git/attributes`. If `$XDG_CONFIG_HOME` is either not
+ set or empty, `$HOME/.config/git/attributes` is used instead.
+
+core.hooksPath::
+ By default Git will look for your hooks in the
+ '$GIT_DIR/hooks' directory. Set this to different path,
+ e.g. '/etc/git/hooks', and Git will try to find your hooks in
+ that directory, e.g. '/etc/git/hooks/pre-receive' instead of
+ in '$GIT_DIR/hooks/pre-receive'.
++
+The path can be either absolute or relative. A relative path is
+taken as relative to the directory where the hooks are run (see
+the "DESCRIPTION" section of linkgit:githooks[5]).
++
+This configuration variable is useful in cases where you'd like to
+centrally configure your Git hooks instead of configuring them on a
+per-repository basis, or as a more flexible and centralized
+alternative to having an `init.templateDir` where you've changed
+default hooks.
core.editor::
Commands such as `commit` and `tag` that lets you edit
message. Defaults to true.
commit.template::
- Specify a file to use as the template for new commit messages.
- "`~/`" is expanded to the value of `$HOME` and "`~user/`" to the
- specified user's home directory.
+ Specify the pathname of a file to use as the template for
+ new commit messages.
+
+commit.verbose::
+ A boolean or int to specify the level of verbose with `git commit`.
+ See linkgit:git-commit[1].
credential.helper::
Specify an external helper to be called when a username or
Set a custom directory to store the resulting files instead of the
current working directory.
+format.useAutoBase::
+ A boolean value which lets you enable the `--base=auto` option of
+ format-patch by default.
+
filter.<driver>.clean::
The command which is used to convert the content of a worktree
file to a blob upon checkin. See linkgit:gitattributes[5] for
'git worktree prune --expire 3.months.ago'.
This config variable can be used to set a different grace
period. The value "now" may be used to disable the grace
- period and prune $GIT_DIR/worktrees immediately, or "never"
+ period and prune `$GIT_DIR/worktrees` immediately, or "never"
may be used to suppress pruning.
gc.reflogExpire::
is executed outside of a git repository. Defaults to false.
gpg.program::
- Use this custom program instead of "gpg" found on $PATH when
+ Use this custom program instead of "`gpg`" found on `$PATH` when
making or verifying a PGP signature. The program must support the
same command-line interface as GPG, namely, to verify a detached
- signature, "gpg --verify $file - <$signature" is run, and the
+ signature, "`gpg --verify $file - <$signature`" is run, and the
program is expected to signal a good signature by exiting with
code 0, and to generate an ASCII-armored detached signature, the
- standard input of "gpg -bsau $key" is fed with the contents to be
+ standard input of "`gpg -bsau $key`" is fed with the contents to be
signed, and the program is expected to send the result to its
standard output.
made by the linkgit:git-gui[1]. The default is "5".
gui.displayUntracked::
- Determines if linkgit::git-gui[1] shows untracked files
+ Determines if linkgit:git-gui[1] shows untracked files
in the file list. The default is "true".
gui.encoding::
a username in the URL, as libcurl normally requires a username for
authentication.
+http.extraHeader::
+ Pass an additional HTTP header when communicating with a server. If
+ more than one such entry exists, all of them are added as extra
+ headers. To allow overriding the settings inherited from the system
+ config, an empty value will reset the extra headers to the empty list.
+
http.cookieFile::
- File containing previously stored cookie lines which should be used
+ The pathname of a file containing previously stored cookie lines,
+ which should be used
in the Git http session, if they match the server. The file format
of the file to read cookies from should be plain HTTP headers or
- the Netscape/Mozilla cookie file format (see linkgit:curl[1]).
- NOTE that the file specified with http.cookieFile is only used as
+ the Netscape/Mozilla cookie file format (see `curl(1)`).
+ NOTE that the file specified with http.cookieFile is used only as
input unless http.saveCookies is set.
http.saveCookies::
command. If 'short' is specified, the ref name prefixes 'refs/heads/',
'refs/tags/' and 'refs/remotes/' will not be printed. If 'full' is
specified, the full ref name (including prefix) will be printed.
- This is the same as the log commands '--decorate' option.
+ If 'auto' is specified, then if the output is going to a terminal,
+ the ref names are shown as if 'short' were given, otherwise no ref
+ names are shown. This is the same as the '--decorate' option
+ of the `git log`.
log.follow::
If `true`, `git log` will act as if the `--follow` option was used when
The maximum size of a pack. This setting only affects
packing to a file when repacking, i.e. the git:// protocol
is unaffected. It can be overridden by the `--max-pack-size`
- option of linkgit:git-repack[1]. The minimum size allowed is
- limited to 1 MiB. The default is unlimited.
+ option of linkgit:git-repack[1]. Reaching this limit results
+ in the creation of multiple packfiles; which in turn prevents
+ bitmaps from being created.
+ The minimum size allowed is limited to 1 MiB.
+ The default is unlimited.
Common unit suffixes of 'k', 'm', or 'g' are
supported.
objects to disk (e.g., when `git repack -a` is run). This
index can speed up the "counting objects" phase of subsequent
packs created for clones and fetches, at the cost of some disk
- space and extra time spent on the initial repack. Defaults to
- false.
+ space and extra time spent on the initial repack. This has
+ no effect if multiple packfiles are created.
+ Defaults to false.
rerere.autoUpdate::
When set to true, `git-rerere` updates the index with the
ifdef::git-diff[]
This is the default.
endif::git-diff[]
-endif::git-format-patch[]
-s::
--no-patch::
Suppress diff output. Useful for commands like `git show` that
show the patch by default, or to cancel the effect of `--patch`.
+endif::git-format-patch[]
-U<n>::
--unified=<n>::
and, correspondingly, show differences character by character.
+
The regex can also be set via a diff driver or configuration option, see
-linkgit:gitattributes[1] or linkgit:git-config[1]. Giving it explicitly
+linkgit:gitattributes[5] or linkgit:git-config[1]. Giving it explicitly
overrides any diff driver or configuration setting. Diff drivers
override configuration settings.
Everyday Git With 20 Commands Or So
===================================
-This document has been moved to linkgit:giteveryday[1].
+This document has been moved to linkgit:giteveryday[7].
Please let the owners of the referring site know so that they can update the
link you clicked to get here.
SEE ALSO
--------
linkgit:gitignore[5]
-linkgit:gitconfig[5]
+linkgit:git-config[1]
linkgit:git-ls-files[1]
GIT
[-o <name>] [-b <name>] [-u <upload-pack>] [--reference <repository>]
[--dissociate] [--separate-git-dir <git dir>]
[--depth <depth>] [--[no-]single-branch]
- [--recursive | --recurse-submodules] [--jobs <n>] [--] <repository>
- [<directory>]
+ [--recursive | --recurse-submodules] [--[no-]shallow-submodules]
+ [--jobs <n>] [--] <repository> [<directory>]
DESCRIPTION
-----------
Create a 'shallow' clone with a history truncated to the
specified number of commits. Implies `--single-branch` unless
`--no-single-branch` is given to fetch the histories near the
- tips of all branches.
+ tips of all branches. This implies `--shallow-submodules`. If
+ you want to have a shallow superproject clone, but full submodules,
+ also pass `--no-shallow-submodules`.
--[no-]single-branch::
Clone only the history leading to the tip of a single branch,
repository does not have a worktree/checkout (i.e. if any of
`--no-checkout`/`-n`, `--bare`, or `--mirror` is given)
+--[no-]shallow-submodules::
+ All submodules which are cloned will be shallow with a depth of 1.
+
--separate-git-dir=<git dir>::
Instead of placing the cloned repository where it is supposed
to be, place the cloned repository at the specified directory,
stuck to the option without a space.
--no-gpg-sign::
- Countermand `commit.gpgSign` configuration variable that is
- set to force each and every commit to be signed.
+ Do not GPG-sign commit, to countermand a `--gpg-sign` option
+ given earlier on the command line.
Commit Information
what changes the commit has.
Note that this diff output doesn't have its
lines prefixed with '#'. This diff will not be a part
- of the commit message.
+ of the commit message. See the `commit.verbose` configuration
+ variable in linkgit:git-config[1].
+
If specified twice, show in addition the unified diff between
what would be committed and the worktree files, i.e. the unstaged
This command will fail with non-zero status upon error. Some exit
codes are:
-- The config file is invalid (ret=3),
-- can not write to the config file (ret=4),
+- The section or key is invalid (ret=1),
- no section or name was provided (ret=2),
-- the section or key is invalid (ret=1),
+- the config file is invalid (ret=3),
+- the config file cannot be written (ret=4),
- you try to unset an option which does not exist (ret=5),
- you try to unset/set an option for which multiple lines match (ret=5), or
- you try to use an invalid regexp (ret=6).
Remap to ancestor
~~~~~~~~~~~~~~~~~
-By using linkgit:rev-list[1] arguments, e.g., path limiters, you can limit the
+By using linkgit:git-rev-list[1] arguments, e.g., path limiters, you can limit the
set of revisions which get rewritten. However, positive refs on the command
line are distinguished: we don't let them be excluded by such limiters. For
this purpose, they are instead rewritten to point at the nearest ancestor that
specified commit (HEAD if not specified).
--contains [<object>]::
- Only list tags which contain the specified commit (HEAD if not
+ Only list refs which contain the specified commit (HEAD if not
specified).
FIELD NAMES
As a special case for the date-type fields, you may specify a format for
the date by adding `:` followed by date format name (see the
-values the `--date` option to linkgit::git-rev-list[1] takes).
+values the `--date` option to linkgit:git-rev-list[1] takes).
EXAMPLES
Output an all-zero hash in each patch's From header instead
of the hash of the commit.
+--base=<commit>::
+ Record the base tree information to identify the state the
+ patch series applies to. See the BASE TREE INFORMATION section
+ below for details.
+
--root::
Treat the revision argument as a <revision range>, even if it
is just a single commit (that would normally be treated as a
5. Back in the compose window: add whatever other text you wish to the
message, complete the addressing and subject fields, and press send.
+BASE TREE INFORMATION
+---------------------
+
+The base tree information block is used for maintainers or third party
+testers to know the exact state the patch series applies to. It consists
+of the 'base commit', which is a well-known commit that is part of the
+stable part of the project history everybody else works off of, and zero
+or more 'prerequisite patches', which are well-known patches in flight
+that is not yet part of the 'base commit' that need to be applied on top
+of 'base commit' in topological order before the patches can be applied.
+
+The 'base commit' is shown as "base-commit: " followed by the 40-hex of
+the commit object name. A 'prerequisite patch' is shown as
+"prerequisite-patch-id: " followed by the 40-hex 'patch id', which can
+be obtained by passing the patch through the `git patch-id --stable`
+command.
+
+Imagine that on top of the public commit P, you applied well-known
+patches X, Y and Z from somebody else, and then built your three-patch
+series A, B, C, the history would be like:
+
+................................................
+---P---X---Y---Z---A---B---C
+................................................
+
+With `git format-patch --base=P -3 C` (or variants thereof, e.g. with
+`--cover-letter` of using `Z..C` instead of `-3 C` to specify the
+range), the base tree information block is shown at the end of the
+first message the command outputs (either the first patch, or the
+cover letter), like this:
+
+------------
+base-commit: P
+prerequisite-patch-id: X
+prerequisite-patch-id: Y
+prerequisite-patch-id: Z
+------------
+
+For non-linear topology, such as
+
+................................................
+---P---X---A---M---C
+ \ /
+ Y---Z---B
+................................................
+
+You can also use `git format-patch --base=P -3 C` to generate patches
+for A, B and C, and the identifiers for P, X, Y, Z are appended at the
+end of the first message.
+
+If set `--base=auto` in cmdline, it will track base commit automatically,
+the base commit will be the merge base of tip commit of the remote-tracking
+branch and revision-range specified in cmdline.
+For a local branch, you need to track a remote branch by `git branch
+--set-upstream-to` before using this option.
EXAMPLES
--------
- the default template directory: `/usr/share/git-core/templates`.
The default template directory includes some directory structure, suggested
-"exclude patterns" (see linkgit:gitignore[5]), and sample hook files (see linkgit:githooks[5]).
+"exclude patterns" (see linkgit:gitignore[5]), and sample hook files.
+
+The sample hooks are all disabled by default, To enable one of the
+sample hooks rename it by removing its `.sample` suffix.
+
+See linkgit:githooks[5] for more general info on hook execution.
EXAMPLES
--------
(works only for a single file).
--no-decorate::
---decorate[=short|full|no]::
+--decorate[=short|full|auto|no]::
Print out the ref names of any commits that are shown. If 'short' is
specified, the ref name prefixes 'refs/heads/', 'refs/tags/' and
'refs/remotes/' will not be printed. If 'full' is specified, the
- full ref name (including prefix) will be printed. The default option
- is 'short'.
+ full ref name (including prefix) will be printed. If 'auto' is
+ specified, then if the output is going to a terminal, the ref names
+ are shown as if 'short' were given, otherwise no ref names are
+ shown. The default option is 'short'.
--source::
Print out the ref name given on the command line by which each
conclude it with a patch submission, separating the discussion and the
beginning of the proposed commit log message with a scissors line.
+
-This can enabled by default with the configuration option mailinfo.scissors.
+This can be enabled by default with the configuration option mailinfo.scissors.
--no-scissors::
Ignore scissors lines. Useful for overriding mailinfo.scissors settings.
[verse]
'git merge' [-n] [--stat] [--no-commit] [--squash] [--[no-]edit]
[-s <strategy>] [-X <strategy-option>] [-S[<keyid>]]
+ [--[no-]allow-unrelated-histories]
[--[no-]rerere-autoupdate] [-m <msg>] [<commit>...]
'git merge' <msg> HEAD <commit>...
'git merge' --abort
'git merge --abort' is equivalent to 'git reset --merge' when
`MERGE_HEAD` is present.
---allow-unrelated-histories::
- By default, `git merge` command refuses to merge histories
- that do not share a common ancestor. This option can be
- used to override this safety when merging histories of two
- projects that started their lives independently. As that is
- a very rare occasion, no configuration variable to enable
- this by default exists and will not be added, and the list
- of options at the top of this documentation does not mention
- this option. Also `git pull` does not pass this option down
- to `git merge` (instead, you `git fetch` first, examine what
- you will be merging and then `git merge` locally with this
- option).
-
<commit>...::
Commits, usually other branch heads, to merge into our branch.
Specifying more than one commit will create a merge with
GIT
---
-Part of the linkgit:git[7] suite
+Part of the linkgit:git[1] suite
--max-pack-size=<n>::
Maximum size of each output pack file. The size can be suffixed with
"k", "m", or "g". The minimum size allowed is limited to 1 MiB.
- If specified, multiple packfiles may be created.
+ If specified, multiple packfiles may be created, which also
+ prevents the creation of a bitmap index.
The default is unlimited, unless the config variable
`pack.packSizeLimit` is set.
--no-rebase::
Override earlier --rebase.
+--autostash::
+--no-autostash::
+ Before starting rebase, stash local modifications away (see
+ linkgit:git-stash[1]) if needed, and apply the stash when
+ done. `--no-autostash` is useful to override the `rebase.autoStash`
+ configuration variable (see linkgit:git-config[1]).
++
+This option is only valid when "--rebase" is used.
+
Options related to fetching
~~~~~~~~~~~~~~~~~~~~~~~~~~~
--max-pack-size=<n>::
Maximum size of each output pack file. The size can be suffixed with
"k", "m", or "g". The minimum size allowed is limited to 1 MiB.
- If specified, multiple packfiles may be created.
+ If specified, multiple packfiles may be created, which also
+ prevents the creation of a bitmap index.
The default is unlimited, unless the config variable
`pack.packSizeLimit` is set.
Write a reachability bitmap index as part of the repack. This
only makes sense when used with `-a` or `-A`, as the bitmaps
must be able to refer to all reachable objects. This option
- overrides the setting of `pack.writeBitmaps`.
+ overrides the setting of `repack.writeBitmaps`. This option
+ has no effect if multiple packfiles are created.
--pack-kept-objects::
Include objects in `.keep` files when repacking. Note that we
This means that we may duplicate objects, but this makes the
option safe to use when there are concurrent pushes or fetches.
This option is generally only useful if you are writing bitmaps
- with `-b` or `pack.writeBitmaps`, as it ensures that the
+ with `-b` or `repack.writeBitmaps`, as it ensures that the
bitmapped packfile has the necessary objects.
Configuration
smtpUser = yourname@gmail.com
smtpServerPort = 587
+If you have multifactor authentication setup on your gmail acocunt, you will
+need to generate an app-specific password for use with 'git send-email'. Visit
+https://security.google.com/settings/security/apppasswords to setup an
+app-specific password. Once setup, you can store it with the credentials
+helper:
+
+ $ git credential fill
+ protocol=smtp
+ host=smtp.gmail.com
+ username=youname@gmail.com
+ password=app-password
+
+
Once your commits are ready to be sent to the mailing list, run the
following commands:
[--reference <repository>] [--depth <depth>] [--] <repository> [<path>]
'git submodule' [--quiet] status [--cached] [--recursive] [--] [<path>...]
'git submodule' [--quiet] init [--] [<path>...]
-'git submodule' [--quiet] deinit [-f|--force] [--] <path>...
+'git submodule' [--quiet] deinit [-f|--force] (--all|[--] <path>...)
'git submodule' [--quiet] update [--init] [--remote] [-N|--no-fetch]
[-f|--force] [--rebase|--merge] [--reference <repository>]
[--depth <depth>] [--recursive] [--jobs <n>] [--] [<path>...]
tree. Further calls to `git submodule update`, `git submodule foreach`
and `git submodule sync` will skip any unregistered submodules until
they are initialized again, so use this command if you don't want to
- have a local checkout of the submodule in your work tree anymore. If
+ have a local checkout of the submodule in your working tree anymore. If
you really want to remove a submodule from the repository and commit
that use linkgit:git-rm[1] instead.
+
-If `--force` is specified, the submodule's work tree will be removed even if
-it contains local modifications.
+When the command is run without pathspec, it errors out,
+instead of deinit-ing everything, to prevent mistakes.
++
+If `--force` is specified, the submodule's working tree will
+be removed even if it contains local modifications.
update::
+
--quiet::
Only print error messages.
+--all::
+ This option is only valid for the deinit command. Unregister all
+ submodules in the working tree.
+
-b::
--branch::
Branch of repository to add as submodule.
--force::
This option is only valid for add, deinit and update commands.
When running add, allow adding an otherwise ignored submodule path.
- When running deinit the submodule work trees will be removed even if
- they contain local changes.
+ When running deinit the submodule working trees will be removed even
+ if they contain local changes.
When running update (only effective with the checkout procedure),
throw away local changes in submodules when switching to a
different commit; and always run a checkout operation in the
SYNOPSIS
--------
[verse]
-'git worktree add' [-f] [--detach] [-b <new-branch>] <path> [<branch>]
+'git worktree add' [-f] [--detach] [--checkout] [-b <new-branch>] <path> [<branch>]
'git worktree prune' [-n] [-v] [--expire <expire>]
'git worktree list' [--porcelain]
With `add`, detach HEAD in the new working tree. See "DETACHED HEAD"
in linkgit:git-checkout[1].
+--[no-]checkout::
+ By default, `add` checks out `<branch>`, however, `--no-checkout` can
+ be used to suppress checkout in order to make customizations,
+ such as configuring sparse-checkout. See "Sparse checkout"
+ in linkgit:git-read-tree[1].
+
-n::
--dry-run::
With `prune`, do not remove anything; just report what it would
branch of the `git.git` repository.
Documentation for older releases are available here:
-* link:v2.8.1/git.html[documentation for release 2.8.1]
+* link:v2.8.3/git.html[documentation for release 2.8.3]
* release notes for
- link:RelNotes/2.8.1.txt[2.8.1].
+ link:RelNotes/2.8.3.txt[2.8.3],
+ link:RelNotes/2.8.2.txt[2.8.2],
+ link:RelNotes/2.8.1.txt[2.8.1],
link:RelNotes/2.8.0.txt[2.8].
* link:v2.7.3/git.html[documentation for release 2.7.3]
SYNOPSIS
--------
-$GIT_DIR/hooks/*
+$GIT_DIR/hooks/* (or \`git config core.hooksPath`/*)
DESCRIPTION
-----------
-Hooks are little scripts you can place in `$GIT_DIR/hooks`
-directory to trigger action at certain points. When
-'git init' is run, a handful of example hooks are copied into the
-`hooks` directory of the new repository, but by default they are
-all disabled. To enable a hook, rename it by removing its `.sample`
-suffix.
+Hooks are programs you can place in a hooks directory to trigger
+actions at certain points in git's execution. Hooks that don't have
+the executable bit set are ignored.
-NOTE: It is also a requirement for a given hook to be executable.
-However - in a freshly initialized repository - the `.sample` files are
-executable by default.
+By default the hooks directory is `$GIT_DIR/hooks`, but that can be
+changed via the `core.hooksPath` configuration variable (see
+linkgit:git-config[1]).
-This document describes the currently defined hooks.
+Before Git invokes a hook, it changes its working directory to either
+the root of the working tree in a non-bare repository, or to the
+$GIT_DIR in a bare repository.
+
+Hooks can get their arguments via the environment, command-line
+arguments, and stdin. See the documentation for each hook below for
+details.
+
+'git init' may copy hooks to the new repository, depending on its
+configuration. See the "TEMPLATE DIRECTORY" section in
+linkgit:git-init[1] for details. When the rest of this document refers
+to "default hooks" it's talking about the default template shipped
+with Git.
+
+The currently supported hooks are described below.
HOOKS
-----
applypatch-msg
~~~~~~~~~~~~~~
-This hook is invoked by 'git am' script. It takes a single
+This hook is invoked by 'git am'. It takes a single
parameter, the name of the file that holds the proposed commit
-log message. Exiting with non-zero status causes
-'git am' to abort before applying the patch.
+log message. Exiting with a non-zero status causes 'git am' to abort
+before applying the patch.
The hook is allowed to edit the message file in place, and can
be used to normalize the message into some project standard
-format (if the project has one). It can also be used to refuse
-the commit after inspecting the message file.
+format. It can also be used to refuse the commit after inspecting
+the message file.
The default 'applypatch-msg' hook, when enabled, runs the
'commit-msg' hook, if the latter is enabled.
~~~~~~~~~~
This hook is invoked by 'git commit', and can be bypassed
-with `--no-verify` option. It takes no parameter, and is
+with the `--no-verify` option. It takes no parameters, and is
invoked before obtaining the proposed commit log message and
-making a commit. Exiting with non-zero status from this script
-causes the 'git commit' to abort.
+making a commit. Exiting with a non-zero status from this script
+causes the 'git commit' command to abort before creating a commit.
The default 'pre-commit' hook, when enabled, catches introduction
of lines with trailing whitespaces and aborts the commit when
~~~~~~~~~~
This hook is invoked by 'git commit', and can be bypassed
-with `--no-verify` option. It takes a single parameter, the
+with the `--no-verify` option. It takes a single parameter, the
name of the file that holds the proposed commit log message.
-Exiting with non-zero status causes the 'git commit' to
+Exiting with a non-zero status causes the 'git commit' to
abort.
-The hook is allowed to edit the message file in place, and can
-be used to normalize the message into some project standard
-format (if the project has one). It can also be used to refuse
-the commit after inspecting the message file.
+The hook is allowed to edit the message file in place, and can be used
+to normalize the message into some project standard format. It
+can also be used to refuse the commit after inspecting the message
+file.
The default 'commit-msg' hook, when enabled, detects duplicate
"Signed-off-by" lines, and aborts the commit if one is found.
post-commit
~~~~~~~~~~~
-This hook is invoked by 'git commit'. It takes no
-parameter, and is invoked after a commit is made.
+This hook is invoked by 'git commit'. It takes no parameters, and is
+invoked after a commit is made.
This hook is meant primarily for notification, and cannot affect
the outcome of 'git commit'.
firing one e-mail per ref when used naively, though. The
<<post-receive,'post-receive'>> hook is more suited to that.
-Another use suggested on the mailing list is to use this hook to
-implement access control which is finer grained than the one
-based on filesystem group.
+In an environment that restricts the users' access only to git
+commands over the wire, this hook can be used to implement access
+control without relying on filesystem ownership and group
+membership. See linkgit:git-shell[1] for how you might use the login
+shell to restrict the user's access to only git commands.
Both standard output and standard error output are forwarded to
'git send-pack' on the other end, so you can simply `echo` messages
A fast-forward is a special type of <<def_merge,merge>> where you have a
<<def_revision,revision>> and you are "merging" another
<<def_branch,branch>>'s changes that happen to be a descendant of what
- you have. In such these cases, you do not make a new <<def_merge,merge>>
+ you have. In such a case, you do not make a new <<def_merge,merge>>
<<def_commit,commit>> but instead just update to his
revision. This will happen frequently on a
<<def_remote_tracking_branch,remote-tracking branch>> of a remote
--- /dev/null
+#!/usr/bin/perl
+
+use File::Find;
+use Getopt::Long;
+
+my $basedir = ".";
+GetOptions("basedir=s" => \$basedir)
+ or die("Cannot parse command line arguments\n");
+
+my $found_errors = 0;
+
+sub report {
+ my ($where, $what, $error) = @_;
+ print "$where: $error: $what\n";
+ $found_errors = 1;
+}
+
+sub grab_section {
+ my ($page) = @_;
+ open my $fh, "<", "$basedir/$page.txt";
+ my $firstline = <$fh>;
+ chomp $firstline;
+ close $fh;
+ my ($section) = ($firstline =~ /.*\((\d)\)$/);
+ return $section;
+}
+
+sub lint {
+ my ($file) = @_;
+ open my $fh, "<", $file
+ or return;
+ while (<$fh>) {
+ my $where = "$file:$.";
+ while (s/linkgit:((.*?)\[(\d)\])//) {
+ my ($target, $page, $section) = ($1, $2, $3);
+
+ # De-AsciiDoc
+ $page =~ s/{litdd}/--/g;
+
+ if ($page !~ /^git/) {
+ report($where, $target, "nongit link");
+ next;
+ }
+ if (! -f "$basedir/$page.txt") {
+ report($where, $target, "no such source");
+ next;
+ }
+ $real_section = grab_section($page);
+ if ($real_section != $section) {
+ report($where, $target,
+ "wrong section (should be $real_section)");
+ next;
+ }
+ }
+ }
+ close $fh;
+}
+
+sub lint_it {
+ lint($File::Find::name) if -f && /\.txt$/;
+}
+
+if (!@ARGV) {
+ find({ wanted => \&lint_it, no_chdir => 1 }, $basedir);
+} else {
+ for (@ARGV) {
+ lint($_);
+ }
+}
+
+exit $found_errors;
--verify-signatures::
--no-verify-signatures::
- Verify that the commits being merged have good and trusted GPG signatures
- and abort the merge in case they do not.
+ Verify that the tip commit of the side branch being merged is
+ signed with a valid key, i.e. a key that has a valid uid: in the
+ default trust model, this means the signing key has been signed by
+ a trusted key. If the tip commit of the side branch is not signed
+ with a valid key, the merge is aborted.
--summary::
--no-summary::
reporting.
endif::git-pull[]
+
+--allow-unrelated-histories::
+ By default, `git merge` command refuses to merge histories
+ that do not share a common ancestor. This option can be
+ used to override this safety when merging histories of two
+ projects that started their lives independently. As that is
+ a very rare occasion, no configuration variable to enable
+ this by default exists and will not be added.
- '%N': commit notes
endif::git-rev-list[]
- '%GG': raw verification message from GPG for a signed commit
-- '%G?': show "G" for a Good signature, "B" for a Bad signature, "U" for a good,
- untrusted signature and "N" for no signature
+- '%G?': show "G" for a good (valid) signature, "B" for a bad signature,
+ "U" for a good signature with unknown validity and "N" for no signature
- '%GS': show the name of the signer for a signed commit
- '%GK': show the key used to sign a signed commit
- '%gD': reflog selector, e.g., `refs/stash@{1}`
verbatim; this means that invalid sequences in the original
commit may be copied to the output.
+--expand-tabs=<n>::
+--expand-tabs::
+--no-expand-tabs::
+ Perform a tab expansion (replace each tab with enough spaces
+ to fill to the next display column that is multiple of '<n>')
+ in the log message before showing it in the output.
+ `--expand-tabs` is a short-hand for `--expand-tabs=8`, and
+ `--no-expand-tabs` is a short-hand for `--expand-tabs=0`,
+ which disables tab expansion.
++
+By default, tabs are expanded in pretty formats that indent the log
+message by 4 spaces (i.e. 'medium', which is the default, 'full',
+and 'fuller').
+
ifndef::git-rev-list[]
--notes[=<treeish>]::
Show the notes (see linkgit:git-notes[1]) that annotate the
Specify whether include directives should be followed in parsed files.
Regular `git_config` defaults to `1`.
-There is a special version of `git_config` called `git_config_early`.
-This version takes an additional parameter to specify the repository
-config, instead of having it looked up via `git_path`. This is useful
-early in a Git program before the repository has been found. Unless
-you're working with early setup code, you probably don't want to use
-this.
-
Reading Specific Files
----------------------
The details of the credential will be provided on the helper's stdin
stream. The exact format is the same as the input/output format of the
`git credential` plumbing command (see the section `INPUT/OUTPUT
-FORMAT` in linkgit:git-credential[7] for a detailed specification).
+FORMAT` in linkgit:git-credential[1] for a detailed specification).
For a `get` operation, the helper should produce a list of attributes
on stdout in the same format. A helper is free to produce a subset, or
linkgit:gitcredentials[7]
-linkgit:git-config[5] (See configuration variables `credential.*`)
+linkgit:git-config[1] (See configuration variables `credential.*`)
`OPT_COUNTUP(short, long, &int_var, description)`::
Introduce a count-up option.
- `int_var` is incremented on each use of `--option`, and
- reset to zero with `--no-option`.
+ Each use of `--option` increments `int_var`, starting from zero
+ (even if initially negative), and `--no-option` resets it to
+ zero. To determine if `--option` or `--no-option` was encountered at
+ all, initialize `int_var` to a negative value, and if it is still
+ negative after parse_options(), then neither `--option` nor
+ `--no-option` was seen.
`OPT_BIT(short, long, &int_var, description, mask)`::
Introduce a boolean option.
static void trace_print_foo(const char *message)
{
- trace_print_key(&trace_foo, message);
+ trace_printf_key(&trace_foo, "%s", message);
}
------------
+
A push certificate begins with a set of header lines. After the
header and an empty line, the protocol commands follow, one per
-line. Note that the the trailing LF in push-cert PKT-LINEs is _not_
+line. Note that the trailing LF in push-cert PKT-LINEs is _not_
optional; it must be present.
Currently, the following header fields are defined:
#!/bin/sh
GVF=GIT-VERSION-FILE
-DEF_VER=v2.8.0.GIT
+DEF_VER=v2.9.0-rc0
LF='
'
#
# Define HAVE_CLOCK_MONOTONIC if your platform has CLOCK_MONOTONIC in librt.
#
-# Define NO_HMAC_CTX_CLEANUP if your OpenSSL is version 0.9.6b or earlier to
-# cleanup the HMAC context with the older HMAC_cleanup function.
-#
# Define USE_PARENS_AROUND_GETTEXT_N to "yes" if your compiler happily
# compiles the following initialization:
#
TAR = tar
FIND = find
INSTALL = install
-RPMBUILD = rpmbuild
TCL_PATH = tclsh
TCLTK_PATH = wish
XGETTEXT = xgettext
TEST_PROGRAMS_NEED_X += test-urlmatch-normalization
TEST_PROGRAMS_NEED_X += test-wildmatch
-TEST_PROGRAMS = $(patsubst %,%$X,$(TEST_PROGRAMS_NEED_X))
+TEST_PROGRAMS = $(patsubst %,t/helper/%$X,$(TEST_PROGRAMS_NEED_X))
# List built-in command $C whose implementation cmd_$C() is not in
# builtin/$C.o but is linked in as part of some other command.
ifdef NEEDS_CRYPTO_WITH_SSL
OPENSSL_LIBSSL += -lcrypto
endif
- ifdef NO_HMAC_CTX_CLEANUP
- BASIC_CFLAGS += -DNO_HMAC_CTX_CLEANUP
- endif
else
BASIC_CFLAGS += -DNO_OPENSSL
BLK_SHA1 = 1
VCSSVN_OBJS += vcs-svn/svndiff.o
VCSSVN_OBJS += vcs-svn/svndump.o
-TEST_OBJS := $(patsubst test-%$X,test-%.o,$(TEST_PROGRAMS))
+TEST_OBJS := $(patsubst %$X,%.o,$(TEST_PROGRAMS))
OBJECTS := $(LIB_OBJS) $(BUILTIN_OBJS) $(PROGRAM_OBJS) $(TEST_OBJS) \
$(XDIFF_OBJS) \
$(VCSSVN_OBJS) \
--keyword=gettextln --keyword=eval_gettextln
XGETTEXT_FLAGS_PERL = $(XGETTEXT_FLAGS) --keyword=__ --language=Perl
LOCALIZED_C = $(C_OBJ:o=c) $(LIB_H) $(GENERATED_H)
-LOCALIZED_SH = $(SCRIPT_SH)
+LOCALIZED_SH = $(SCRIPT_SH) git-parse-remote.sh
LOCALIZED_PERL = $(SCRIPT_PERL)
ifdef XGETTEXT_INCLUDE_TESTS
@mkdir -p bin-wrappers
$(QUIET_GEN)sed -e '1s|#!.*/sh|#!$(SHELL_PATH_SQ)|' \
-e 's|@@BUILD_DIR@@|$(shell pwd)|' \
- -e 's|@@PROG@@|$(@F)|' < $< > $@ && \
+ -e 's|@@PROG@@|$(patsubst test-%,t/helper/test-%,$(@F))|' < $< > $@ && \
chmod +x $@
# GNU make supports exporting all variables by "export" without parameters.
.PHONY: test perf
-test-ctype$X: ctype.o
+t/helper/test-ctype$X: ctype.o
-test-date$X: date.o ctype.o
+t/helper/test-date$X: date.o ctype.o
-test-delta$X: diff-delta.o patch-delta.o
+t/helper/test-delta$X: diff-delta.o patch-delta.o
-test-line-buffer$X: vcs-svn/lib.a
+t/helper/test-line-buffer$X: vcs-svn/lib.a
-test-parse-options$X: parse-options.o parse-options-cb.o
+t/helper/test-parse-options$X: parse-options.o parse-options-cb.o
-test-svn-fe$X: vcs-svn/lib.a
+t/helper/test-svn-fe$X: vcs-svn/lib.a
.PRECIOUS: $(TEST_OBJS)
-test-%$X: test-%.o GIT-LDFLAGS $(GITLIBS)
+t/helper/test-%$X: t/helper/test-%.o GIT-LDFLAGS $(GITLIBS)
$(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) $(filter %.a,$^) $(LIBS)
-check-sha1:: test-sha1$X
- ./test-sha1.sh
+check-sha1:: t/helper/test-sha1$X
+ t/helper/test-sha1.sh
SP_OBJ = $(patsubst %.o,%.sp,$(C_OBJ))
check: common-cmds.h
@if sparse; \
then \
- echo 2>&1 "Use 'make sparse' instead"; \
+ echo >&2 "Use 'make sparse' instead"; \
$(MAKE) --no-print-directory sparse; \
else \
- echo 2>&1 "Did you mean 'make test'?"; \
+ echo >&2 "Did you mean 'make test'?"; \
exit 1; \
fi
### Maintainer's dist rules
-git.spec: git.spec.in GIT-VERSION-FILE
- sed -e 's/@@VERSION@@/$(GIT_VERSION)/g' < $< > $@+
- mv $@+ $@
-
GIT_TARNAME = git-$(GIT_VERSION)
-dist: git.spec git-archive$(X) configure
+dist: git-archive$(X) configure
./git-archive --format=tar \
--prefix=$(GIT_TARNAME)/ HEAD^{tree} > $(GIT_TARNAME).tar
@mkdir -p $(GIT_TARNAME)
- @cp git.spec configure $(GIT_TARNAME)
+ @cp configure $(GIT_TARNAME)
@echo $(GIT_VERSION) > $(GIT_TARNAME)/version
@$(MAKE) -C git-gui TARDIR=../$(GIT_TARNAME)/git-gui dist-version
$(TAR) rf $(GIT_TARNAME).tar \
- $(GIT_TARNAME)/git.spec \
$(GIT_TARNAME)/configure \
$(GIT_TARNAME)/version \
$(GIT_TARNAME)/git-gui/version
@$(RM) -r $(GIT_TARNAME)
gzip -f -9 $(GIT_TARNAME).tar
-rpm: dist
- $(RPMBUILD) \
- --define "_source_filedigest_algorithm md5" \
- --define "_binary_filedigest_algorithm md5" \
- -ta $(GIT_TARNAME).tar.gz
+rpm::
+ @echo >&2 "Use distro packaged sources to run rpmbuild"
+ @false
+.PHONY: rpm
htmldocs = git-htmldocs-$(GIT_VERSION)
manpages = git-manpages-$(GIT_VERSION)
$(RM) $(addsuffix *.gcno,$(addprefix $(PROFILE_DIR)/, $(object_dirs)))
clean: profile-clean coverage-clean
- $(RM) *.o *.res refs/*.o block-sha1/*.o ppc/*.o compat/*.o compat/*/*.o
- $(RM) xdiff/*.o vcs-svn/*.o ewah/*.o builtin/*.o
+ $(RM) *.res
+ $(RM) $(OBJECTS)
$(RM) $(LIB_FILE) $(XDIFF_LIB) $(VCSSVN_LIB)
$(RM) $(ALL_PROGRAMS) $(SCRIPT_LIB) $(BUILT_INS) git$X
$(RM) $(TEST_PROGRAMS) $(NO_INSTALL)
.PHONY: check-docs
check-docs::
+ $(MAKE) -C Documentation lint-docs
@(for v in $(ALL_COMMANDS); \
do \
case "$$v" in \
strbuf_add(&path, pfx, pfx_len);
strbuf_addstr(&path, arg);
#else
- char *p;
/* don't add prefix to absolute paths, but still replace '\' by '/' */
strbuf_reset(&path);
if (is_absolute_path(arg))
else if (pfx_len)
strbuf_add(&path, pfx, pfx_len);
strbuf_addstr(&path, arg);
- for (p = path.buf + pfx_len; *p; p++)
- if (*p == '\\')
- *p = '/';
+ convert_slashes(path.buf + pfx_len);
#endif
return path.buf;
}
memcpy(header->magic, "ustar", 6);
memcpy(header->version, "00", 2);
- snprintf(header->chksum, sizeof(header->chksum), "%07o", ustar_header_chksum(header));
+ xsnprintf(header->chksum, sizeof(header->chksum), "%07o", ustar_header_chksum(header));
}
static int write_extended_header(struct archiver_args *args,
/* Create file BISECT_ANCESTORS_OK. */
fd = open(filename, O_CREAT | O_TRUNC | O_WRONLY, 0600);
if (fd < 0)
- warning("could not create file '%s': %s",
- filename, strerror(errno));
+ warning_errno("could not create file '%s'",
+ filename);
else
close(fd);
done:
*read_good = "good";
return;
} else {
- die("could not read file '%s': %s", filename,
- strerror(errno));
+ die_errno("could not read file '%s'", filename);
}
} else {
strbuf_getline_lf(&str, fp);
unlink(git_path_squash_msg());
}
-void die_if_checked_out(const char *branch)
+void die_if_checked_out(const char *branch, int ignore_current_worktree)
{
- char *existing;
+ const struct worktree *wt;
- existing = find_shared_symref("HEAD", branch);
- if (existing) {
- skip_prefix(branch, "refs/heads/", &branch);
- die(_("'%s' is already checked out at '%s'"), branch, existing);
+ wt = find_shared_symref("HEAD", branch);
+ if (!wt || (ignore_current_worktree && wt->is_current))
+ return;
+ skip_prefix(branch, "refs/heads/", &branch);
+ die(_("'%s' is already checked out at '%s'"),
+ branch, wt->path);
+}
+
+int replace_each_worktree_head_symref(const char *oldref, const char *newref)
+{
+ int ret = 0;
+ struct worktree **worktrees = get_worktrees();
+ int i;
+
+ for (i = 0; worktrees[i]; i++) {
+ if (worktrees[i]->is_detached)
+ continue;
+ if (strcmp(oldref, worktrees[i]->head_ref))
+ continue;
+
+ if (set_worktree_head_symref(get_worktree_git_dir(worktrees[i]),
+ newref)) {
+ ret = -1;
+ error(_("HEAD of working tree %s is not updated"),
+ worktrees[i]->path);
+ }
}
+
+ free_worktrees(worktrees);
+ return ret;
}
* worktree and die (with a message describing its checkout location) if
* it is.
*/
-extern void die_if_checked_out(const char *branch);
+extern void die_if_checked_out(const char *branch, int ignore_current_worktree);
+
+/*
+ * Update all per-worktree HEADs pointing at the old ref to point the new ref.
+ * This will be used when renaming a branch. Returns 0 if successful, non-zero
+ * otherwise.
+ */
+extern int replace_each_worktree_head_symref(const char *oldref, const char *newref);
#endif
in = fopen(*paths, "r");
if (!in)
- return error(_("could not open '%s' for reading: %s"),
- *paths, strerror(errno));
+ return error_errno(_("could not open '%s' for reading"),
+ *paths);
mail = mkpath("%s/%0*d", state->dir, state->prec, i + 1);
out = fopen(mail, "w");
if (!out)
- return error(_("could not open '%s' for writing: %s"),
- mail, strerror(errno));
+ return error_errno(_("could not open '%s' for writing"),
+ mail);
ret = fn(out, in, keep_cr);
fp = fopen(*paths, "r");
if (!fp)
- return error(_("could not open '%s' for reading: %s"), *paths,
- strerror(errno));
+ return error_errno(_("could not open '%s' for reading"), *paths);
while (!strbuf_getline_lf(&sb, fp)) {
if (*sb.buf == '#')
return find_name(line, NULL, p_value, TERM_TAB);
if (orig_name) {
- int len;
- const char *name;
+ int len = strlen(orig_name);
char *another;
- name = orig_name;
- len = strlen(name);
if (isnull)
- die(_("git apply: bad git-diff - expected /dev/null, got %s on line %d"), name, linenr);
+ die(_("git apply: bad git-diff - expected /dev/null, got %s on line %d"),
+ orig_name, linenr);
another = find_name(line, NULL, p_value, TERM_TAB);
- if (!another || memcmp(another, name, len + 1))
+ if (!another || memcmp(another, orig_name, len + 1))
die((side == DIFF_NEW_NAME) ?
_("git apply: bad git-diff - inconsistent new filename on line %d") :
_("git apply: bad git-diff - inconsistent old filename on line %d"), linenr);
free(another);
return orig_name;
- }
- else {
+ } else {
/* expect "/dev/null" */
if (memcmp("/dev/null", line, 9) || line[9] != '\n')
die(_("git apply: bad git-diff - expected /dev/null on line %d"), linenr);
static int gitdiff_oldname(const char *line, struct patch *patch)
{
- char *orig = patch->old_name;
patch->old_name = gitdiff_verify_name(line, patch->is_new, patch->old_name,
DIFF_OLD_NAME);
- if (orig != patch->old_name)
- free(orig);
return 0;
}
static int gitdiff_newname(const char *line, struct patch *patch)
{
- char *orig = patch->new_name;
patch->new_name = gitdiff_verify_name(line, patch->is_delete, patch->new_name,
DIFF_NEW_NAME);
- if (orig != patch->new_name)
- free(orig);
return 0;
}
return NULL;
}
+/*
+ * Returns:
+ * -1 in case of error,
+ * the length of the parsed binary patch otherwise
+ */
static int parse_binary(char *buffer, unsigned long size, struct patch *patch)
{
/*
linenr++;
used = parse_binary(buffer + hd + llen,
size - hd - llen, patch);
+ if (used < 0)
+ return -1;
if (used)
patchsize = used + llen;
else
patch->inaccurate_eof = !!(options & INACCURATE_EOF);
patch->recount = !!(options & RECOUNT);
nr = parse_chunk(buf.buf + offset, buf.len - offset, patch);
- if (nr < 0)
+ if (nr < 0) {
+ free_patch(patch);
break;
+ }
if (apply_in_reverse)
reverse_patches(patch);
if (use_patch(patch)) {
unsigned mode;
struct strbuf msg = STRBUF_INIT;
+ read_cache();
time(&now);
commit = alloc_commit_node();
commit->object.parsed = 1;
#include "utf8.h"
#include "wt-status.h"
#include "ref-filter.h"
+#include "worktree.h"
static const char * const builtin_branch_usage[] = {
N_("git branch [<options>] [-r | -a] [--merged | --no-merged]"),
int flags = 0;
strbuf_branchname(&bname, argv[i]);
- if (kinds == FILTER_REFS_BRANCHES && !strcmp(head, bname.buf)) {
- error(_("Cannot delete the branch '%s' "
- "which you are currently on."), bname.buf);
- ret = 1;
- continue;
- }
-
free(name);
-
name = mkpathdup(fmt, bname.buf);
+
+ if (kinds == FILTER_REFS_BRANCHES) {
+ const struct worktree *wt =
+ find_shared_symref("HEAD", name);
+ if (wt) {
+ error(_("Cannot delete branch '%s' "
+ "checked out at '%s'"),
+ bname.buf, wt->path);
+ ret = 1;
+ continue;
+ }
+ }
+
target = resolve_ref_unsafe(name,
RESOLVE_REF_READING
| RESOLVE_REF_NO_RECURSE
strbuf_addf(&desc, _("(no branch, bisect started on %s)"),
state.branch);
else if (state.detached_from) {
- /* TRANSLATORS: make sure these match _("HEAD detached at ")
- and _("HEAD detached from ") in wt-status.c */
if (state.detached_at)
+ /* TRANSLATORS: make sure this matches
+ "HEAD detached at " in wt-status.c */
strbuf_addf(&desc, _("(HEAD detached at %s)"),
state.detached_from);
else
+ /* TRANSLATORS: make sure this matches
+ "HEAD detached from " in wt-status.c */
strbuf_addf(&desc, _("(HEAD detached from %s)"),
state.detached_from);
}
int current = 0;
int color;
struct strbuf out = STRBUF_INIT, name = STRBUF_INIT;
- const char *prefix = "";
+ const char *prefix_to_show = "";
+ const char *prefix_to_skip = NULL;
const char *desc = item->refname;
char *to_free = NULL;
switch (item->kind) {
case FILTER_REFS_BRANCHES:
- skip_prefix(desc, "refs/heads/", &desc);
+ prefix_to_skip = "refs/heads/";
+ skip_prefix(desc, prefix_to_skip, &desc);
if (!filter->detached && !strcmp(desc, head))
current = 1;
else
color = BRANCH_COLOR_LOCAL;
break;
case FILTER_REFS_REMOTES:
- skip_prefix(desc, "refs/remotes/", &desc);
+ prefix_to_skip = "refs/remotes/";
+ skip_prefix(desc, prefix_to_skip, &desc);
color = BRANCH_COLOR_REMOTE;
- prefix = remote_prefix;
+ prefix_to_show = remote_prefix;
break;
case FILTER_REFS_DETACHED_HEAD:
desc = to_free = get_head_description();
color = BRANCH_COLOR_CURRENT;
}
- strbuf_addf(&name, "%s%s", prefix, desc);
+ strbuf_addf(&name, "%s%s", prefix_to_show, desc);
if (filter->verbose) {
int utf8_compensation = strlen(name.buf) - utf8_strwidth(name.buf);
strbuf_addf(&out, "%c %s%-*s%s", c, branch_get_color(color),
name.buf, branch_get_color(BRANCH_COLOR_RESET));
if (item->symref) {
- skip_prefix(item->symref, "refs/remotes/", &desc);
- strbuf_addf(&out, " -> %s", desc);
+ const char *symref = item->symref;
+ if (prefix_to_skip)
+ skip_prefix(symref, prefix_to_skip, &symref);
+ strbuf_addf(&out, " -> %s", symref);
}
else if (filter->verbose)
/* " f7c0c00 [ahead 58, behind 197] vcs-svn: drop obj_pool.h" */
ref_array_clear(&array);
}
+static void reject_rebase_or_bisect_branch(const char *target)
+{
+ struct worktree **worktrees = get_worktrees();
+ int i;
+
+ for (i = 0; worktrees[i]; i++) {
+ struct worktree *wt = worktrees[i];
+
+ if (!wt->is_detached)
+ continue;
+
+ if (is_worktree_being_rebased(wt, target))
+ die(_("Branch %s is being rebased at %s"),
+ target, wt->path);
+
+ if (is_worktree_being_bisected(wt, target))
+ die(_("Branch %s is being bisected at %s"),
+ target, wt->path);
+ }
+
+ free_worktrees(worktrees);
+}
+
static void rename_branch(const char *oldname, const char *newname, int force)
{
struct strbuf oldref = STRBUF_INIT, newref = STRBUF_INIT, logmsg = STRBUF_INIT;
validate_new_branchname(newname, &newref, force, clobber_head_ok);
+ reject_rebase_or_bisect_branch(oldref.buf);
+
strbuf_addf(&logmsg, "Branch: renamed %s to %s",
oldref.buf, newref.buf);
if (recovery)
warning(_("Renamed a misnamed branch '%s' away"), oldref.buf + 11);
- /* no need to pass logmsg here as HEAD didn't really move */
- if (!strcmp(oldname, head) && create_symref("HEAD", newref.buf, NULL))
+ if (replace_each_worktree_head_symref(oldref.buf, newref.buf))
die(_("Branch renamed to %s, but HEAD is not updated!"), newname);
strbuf_addf(&oldsection, "branch.%s", oldref.buf + 11);
branch_name, comment_line_char);
if (write_file_gently(git_path(edit_description), "%s", buf.buf)) {
strbuf_release(&buf);
- return error(_("could not write branch description template: %s"),
- strerror(errno));
+ return error_errno(_("could not write branch description template"));
}
strbuf_reset(&buf);
if (launch_editor(git_path(edit_description), &buf, NULL)) {
BRANCH_TRACK_EXPLICIT),
OPT_SET_INT( 0, "set-upstream", &track, N_("change upstream info"),
BRANCH_TRACK_OVERRIDE),
- OPT_STRING('u', "set-upstream-to", &new_upstream, "upstream", "change the upstream info"),
+ OPT_STRING('u', "set-upstream-to", &new_upstream, N_("upstream"), N_("change the upstream info")),
OPT_BOOL(0, "unset-upstream", &unset_upstream, "Unset the upstream info"),
OPT__COLOR(&branch_use_color, N_("use colored output")),
OPT_SET_INT('r', "remotes", &filter.kind, N_("act on remote-tracking branches"),
if (argc == 1 && track == BRANCH_TRACK_OVERRIDE &&
!branch_existed && remote_tracking) {
fprintf(stderr, _("\nIf you wanted to make '%s' track '%s', do this:\n\n"), head, branch->name);
- fprintf(stderr, _(" git branch -d %s\n"), branch->name);
- fprintf(stderr, _(" git branch --set-upstream-to %s\n"), branch->name);
+ fprintf(stderr, " git branch -d %s\n", branch->name);
+ fprintf(stderr, " git branch --set-upstream-to %s\n", branch->name);
}
} else
struct checkout state;
static char *ps_matched;
unsigned char rev[20];
- int flag;
struct commit *head;
int errs = 0;
struct lock_file *lock_file;
if (write_locked_index(&the_index, lock_file, COMMIT_LOCK))
die(_("unable to write new index file"));
- read_ref_full("HEAD", 0, rev, &flag);
+ read_ref_full("HEAD", 0, rev, NULL);
head = lookup_commit_reference_gently(rev, 1);
errs |= post_checkout_hook(head, head, 0);
char *head_ref = resolve_refdup("HEAD", 0, sha1, &flag);
if (head_ref &&
(!(flag & REF_ISSYMREF) || strcmp(head_ref, new->path)))
- die_if_checked_out(new->path);
+ die_if_checked_out(new->path, 1);
free(head_ref);
}
static int option_no_checkout, option_bare, option_mirror, option_single_branch = -1;
static int option_local = -1, option_no_hardlinks, option_shared, option_recursive;
+static int option_shallow_submodules = -1;
static char *option_template, *option_depth;
static char *option_origin = NULL;
static char *option_branch = NULL;
N_("create a shallow clone of that depth")),
OPT_BOOL(0, "single-branch", &option_single_branch,
N_("clone only one branch, HEAD or --branch")),
+ OPT_BOOL(0, "shallow-submodules", &option_shallow_submodules,
+ N_("any cloned submodules will be shallow")),
OPT_STRING(0, "separate-git-dir", &real_git_dir, N_("gitdir"),
N_("separate git dir from working tree")),
OPT_STRING_LIST('c', "config", &option_config, N_("key=value"),
struct argv_array args = ARGV_ARRAY_INIT;
argv_array_pushl(&args, "submodule", "update", "--init", "--recursive", NULL);
+ if (option_shallow_submodules == 1
+ || (option_shallow_submodules == -1 && option_depth))
+ argv_array_push(&args, "--depth=1");
+
if (max_jobs != -1)
argv_array_pushf(&args, "--jobs=%d", max_jobs);
int status = git_gpg_config(var, value, NULL);
if (status)
return status;
- if (!strcmp(var, "commit.gpgsign")) {
- sign_commit = git_config_bool(var, value) ? "" : NULL;
- return 0;
- }
return git_default_config(var, value, cb);
}
static int all, also, interactive, patch_interactive, only, amend, signoff;
static int edit_flag = -1; /* unspecified */
static int quiet, verbose, no_verify, allow_empty, dry_run, renew_authorship;
+static int config_commit_verbose = -1; /* unspecified */
static int no_post_rewrite, allow_empty_message;
static char *untracked_files_arg, *force_date, *ignore_submodule_arg;
static char *sign_commit;
}
}
- if (message.len) {
+ if (have_option_m) {
strbuf_addbuf(&sb, &message);
hook_arg1 = "message";
} else if (logfile && !strcmp(logfile, "-")) {
f++;
if (f > 1)
die(_("Only one of -c/-C/-F/--fixup can be used."));
- if (message.len && f > 0)
+ if (have_option_m && f > 0)
die((_("Option -m cannot be combined with -c/-C/-F/--fixup.")));
- if (f || message.len)
+ if (f || have_option_m)
template_file = NULL;
if (edit_message)
use_message = edit_message;
sign_commit = git_config_bool(k, v) ? "" : NULL;
return 0;
}
+ if (!strcmp(k, "commit.verbose")) {
+ int is_bool;
+ config_commit_verbose = git_config_bool_or_int(k, v, &is_bool);
+ return 0;
+ }
status = git_gpg_config(k, v, NULL);
if (status)
if (parse_commit(current_head))
die(_("could not parse HEAD commit"));
}
+ verbose = -1; /* unspecified */
argc = parse_and_validate_options(argc, argv, builtin_commit_options,
builtin_commit_usage,
prefix, current_head, &s);
+ if (verbose == -1)
+ verbose = (config_commit_verbose < 0) ? 0 : config_commit_verbose;
+
if (dry_run)
return dry_run_commit(argc, argv, prefix, current_head, &s);
index_file = prepare_index(argc, argv, prefix, current_head, 0);
gitmodules_config();
git_config(git_diff_basic_config, NULL); /* no "diff" UI options */
rev.abbrev = 0;
+ precompose_argv(argc, argv);
argc = setup_revisions(argc, argv, &rev, NULL);
while (1 < argc && argv[1][0] == '-') {
gitmodules_config();
git_config(git_diff_basic_config, NULL); /* no "diff" UI options */
rev.abbrev = 0;
+ precompose_argv(argc, argv);
argc = setup_revisions(argc, argv, &rev, NULL);
for (i = 1; i < argc; i++) {
opt->disable_stdin = 1;
memset(&s_r_opt, 0, sizeof(s_r_opt));
s_r_opt.tweak = diff_tree_tweak_rev;
+
+ precompose_argv(argc, argv);
argc = setup_revisions(argc, argv, opt, &s_r_opt);
while (--argc > 0) {
gitmodules_config();
init_diff_ui_defaults();
git_config(git_diff_ui_config, NULL);
+ precompose_argv(argc, argv);
init_revisions(&rev, prefix);
fp = fopen(filename, "a");
if (!fp)
- return error(_("cannot open %s: %s\n"), filename, strerror(errno));
+ return error_errno(_("cannot open %s"), filename);
if (raw_url)
url = transport_anonymize_url(raw_url);
FILE *fp = fopen_for_writing(filename);
if (!fp)
- return error(_("cannot open %s: %s\n"), filename, strerror(errno));
+ return error_errno(_("cannot open %s"), filename);
fclose(fp);
return 0;
}
static int fsck_head_link(void)
{
- int flag;
int null_is_error = 0;
if (verbose)
fprintf(stderr, "Checking HEAD link\n");
- head_points_at = resolve_ref_unsafe("HEAD", 0, head_oid.hash, &flag);
+ head_points_at = resolve_ref_unsafe("HEAD", 0, head_oid.hash, NULL);
if (!head_points_at) {
errors_found |= ERROR_REFS;
return error("Invalid HEAD");
strbuf_add(base, entry.path, te_len);
if (S_ISREG(entry.mode)) {
- hit |= grep_sha1(opt, entry.sha1, base->buf, tn_len,
+ hit |= grep_sha1(opt, entry.oid->hash, base->buf, tn_len,
check_attr ? base->buf + tn_len : NULL);
}
else if (S_ISDIR(entry.mode)) {
void *data;
unsigned long size;
- data = lock_and_read_sha1_file(entry.sha1, &type, &size);
+ data = lock_and_read_sha1_file(entry.oid->hash, &type, &size);
if (!data)
die(_("unable to read tree (%s)"),
- sha1_to_hex(entry.sha1));
+ oid_to_hex(entry.oid));
strbuf_addch(base, '/');
init_tree_desc(&sub, data, size);
path = "emacsclient";
strbuf_addf(&man_page, "(woman \"%s\")", page);
execlp(path, "emacsclient", "-e", man_page.buf, (char *)NULL);
- warning(_("failed to exec '%s': %s"), path, strerror(errno));
+ warning_errno(_("failed to exec '%s'"), path);
}
}
path = "kfmclient";
strbuf_addf(&man_page, "man:%s(1)", page);
execlp(path, filename, "newTab", man_page.buf, (char *)NULL);
- warning(_("failed to exec '%s': %s"), path, strerror(errno));
+ warning_errno(_("failed to exec '%s'"), path);
}
}
if (!path)
path = "man";
execlp(path, "man", page, (char *)NULL);
- warning(_("failed to exec '%s': %s"), path, strerror(errno));
+ warning_errno(_("failed to exec '%s'"), path);
}
static void exec_man_cmd(const char *cmd, const char *page)
struct strbuf shell_cmd = STRBUF_INIT;
strbuf_addf(&shell_cmd, "%s %s", cmd, page);
execl(SHELL_PATH, SHELL_PATH, "-c", shell_cmd.buf, (char *)NULL);
- warning(_("failed to exec '%s': %s"), cmd, strerror(errno));
+ warning(_("failed to exec '%s'"), cmd);
}
static void add_man_viewer(const char *name)
nr_unresolved * sizeof(*objects));
f = sha1fd(output_fd, curr_pack);
fix_unresolved_deltas(f);
- strbuf_addf(&msg, _("completed with %d local objects"),
+ strbuf_addf(&msg, Q_("completed with %d local object",
+ "completed with %d local objects",
+ nr_objects - nr_objects_initial),
nr_objects - nr_objects_initial);
stop_progress_msg(&progress, msg.buf);
strbuf_release(&msg);
struct strbuf path = STRBUF_INIT;
struct strbuf template_path = STRBUF_INIT;
size_t template_len;
+ struct repository_format template_format;
+ struct strbuf err = STRBUF_INIT;
DIR *dir;
char *to_free = NULL;
/* Make sure that template is from the correct vintage */
strbuf_addstr(&template_path, "config");
- repository_format_version = 0;
- git_config_from_file(check_repository_format_version,
- template_path.buf, NULL);
+ read_repository_format(&template_format, template_path.buf);
strbuf_setlen(&template_path, template_len);
- if (repository_format_version &&
- repository_format_version != GIT_REPO_VERSION) {
- warning(_("not copying templates of "
- "a wrong format version %d from '%s'"),
- repository_format_version,
- template_dir);
+ /*
+ * No mention of version at all is OK, but anything else should be
+ * verified.
+ */
+ if (template_format.version >= 0 &&
+ verify_repository_format(&template_format, &err) < 0) {
+ warning(_("not copying templates from '%s': %s"),
+ template_dir, err.buf);
+ strbuf_release(&err);
goto close_free_return;
}
/* reading existing config may have overwrote it */
if (init_shared_repository != -1)
- shared_repository = init_shared_repository;
+ set_shared_repository(init_shared_repository);
/*
* We would have created the above under user's umask -- under
* shared-repository settings, we would need to fix them up.
*/
- if (shared_repository) {
+ if (get_shared_repository()) {
adjust_shared_perm(get_git_dir());
adjust_shared_perm(git_path_buf(&buf, "refs"));
adjust_shared_perm(git_path_buf(&buf, "refs/heads"));
create_object_directory();
- if (shared_repository) {
+ if (get_shared_repository()) {
char buf[10];
/* We do not spell "group" and such, so that
* the configuration can be read by older version
* and compatibility values for PERM_GROUP and
* PERM_EVERYBODY.
*/
- if (shared_repository < 0)
+ if (get_shared_repository() < 0)
/* force to the mode value */
- xsnprintf(buf, sizeof(buf), "0%o", -shared_repository);
- else if (shared_repository == PERM_GROUP)
+ xsnprintf(buf, sizeof(buf), "0%o", -get_shared_repository());
+ else if (get_shared_repository() == PERM_GROUP)
xsnprintf(buf, sizeof(buf), "%d", OLD_PERM_GROUP);
- else if (shared_repository == PERM_EVERYBODY)
+ else if (get_shared_repository() == PERM_EVERYBODY)
xsnprintf(buf, sizeof(buf), "%d", OLD_PERM_EVERYBODY);
else
die("BUG: invalid value for shared_repository");
"", and the last '%s%s' is the verbatim directory name. */
printf(_("%s%s Git repository in %s%s\n"),
reinit ? _("Reinitialized existing") : _("Initialized empty"),
- shared_repository ? _(" shared") : "",
+ get_shared_repository() ? _(" shared") : "",
git_dir, len && git_dir[len-1] != '/' ? "/" : "");
}
* and we know shared_repository should always be 0;
* but just in case we play safe.
*/
- saved = shared_repository;
- shared_repository = 0;
+ saved = get_shared_repository();
+ set_shared_repository(0);
switch (safe_create_leading_directories_const(argv[0])) {
case SCLD_OK:
case SCLD_PERMS:
die_errno(_("cannot mkdir %s"), argv[0]);
break;
}
- shared_repository = saved;
+ set_shared_repository(saved);
if (mkdir(argv[0], 0777) < 0)
die_errno(_("cannot mkdir %s"), argv[0]);
mkdir_tried = 1;
}
if (init_shared_repository != -1)
- shared_repository = init_shared_repository;
+ set_shared_repository(init_shared_repository);
/*
* GIT_WORK_TREE makes sense only in conjunction with GIT_DIR
#define THREAD_DEEP 2
static int thread;
static int do_signoff;
+static int base_auto;
static const char *signature = git_version_string;
static const char *signature_file;
static int config_cover_letter;
}
if (!strcmp(var, "format.outputdirectory"))
return git_config_string(&config_output_directory, var, value);
+ if (!strcmp(var, "format.useautobase")) {
+ base_auto = git_config_bool(var, value);
+ return 0;
+ }
return git_log_config(var, value, cb);
}
return 0;
}
+struct base_tree_info {
+ struct object_id base_commit;
+ int nr_patch_id, alloc_patch_id;
+ struct object_id *patch_id;
+};
+
+static struct commit *get_base_commit(const char *base_commit,
+ struct commit **list,
+ int total)
+{
+ struct commit *base = NULL;
+ struct commit **rev;
+ int i = 0, rev_nr = 0;
+
+ if (base_commit && strcmp(base_commit, "auto")) {
+ base = lookup_commit_reference_by_name(base_commit);
+ if (!base)
+ die(_("Unknown commit %s"), base_commit);
+ } else if ((base_commit && !strcmp(base_commit, "auto")) || base_auto) {
+ struct branch *curr_branch = branch_get(NULL);
+ const char *upstream = branch_get_upstream(curr_branch, NULL);
+ if (upstream) {
+ struct commit_list *base_list;
+ struct commit *commit;
+ unsigned char sha1[20];
+
+ if (get_sha1(upstream, sha1))
+ die(_("Failed to resolve '%s' as a valid ref."), upstream);
+ commit = lookup_commit_or_die(sha1, "upstream base");
+ base_list = get_merge_bases_many(commit, total, list);
+ /* There should be one and only one merge base. */
+ if (!base_list || base_list->next)
+ die(_("Could not find exact merge base."));
+ base = base_list->item;
+ free_commit_list(base_list);
+ } else {
+ die(_("Failed to get upstream, if you want to record base commit automatically,\n"
+ "please use git branch --set-upstream-to to track a remote branch.\n"
+ "Or you could specify base commit by --base=<base-commit-id> manually."));
+ }
+ }
+
+ ALLOC_ARRAY(rev, total);
+ for (i = 0; i < total; i++)
+ rev[i] = list[i];
+
+ rev_nr = total;
+ /*
+ * Get merge base through pair-wise computations
+ * and store it in rev[0].
+ */
+ while (rev_nr > 1) {
+ for (i = 0; i < rev_nr / 2; i++) {
+ struct commit_list *merge_base;
+ merge_base = get_merge_bases(rev[2 * i], rev[2 * i + 1]);
+ if (!merge_base || merge_base->next)
+ die(_("Failed to find exact merge base"));
+
+ rev[i] = merge_base->item;
+ }
+
+ if (rev_nr % 2)
+ rev[i] = rev[2 * i];
+ rev_nr = (rev_nr + 1) / 2;
+ }
+
+ if (!in_merge_bases(base, rev[0]))
+ die(_("base commit should be the ancestor of revision list"));
+
+ for (i = 0; i < total; i++) {
+ if (base == list[i])
+ die(_("base commit shouldn't be in revision list"));
+ }
+
+ free(rev);
+ return base;
+}
+
+static void prepare_bases(struct base_tree_info *bases,
+ struct commit *base,
+ struct commit **list,
+ int total)
+{
+ struct commit *commit;
+ struct rev_info revs;
+ struct diff_options diffopt;
+ int i;
+
+ if (!base)
+ return;
+
+ diff_setup(&diffopt);
+ DIFF_OPT_SET(&diffopt, RECURSIVE);
+ diff_setup_done(&diffopt);
+
+ oidcpy(&bases->base_commit, &base->object.oid);
+
+ init_revisions(&revs, NULL);
+ revs.max_parents = 1;
+ revs.topo_order = 1;
+ for (i = 0; i < total; i++) {
+ list[i]->object.flags &= ~UNINTERESTING;
+ add_pending_object(&revs, &list[i]->object, "rev_list");
+ list[i]->util = (void *)1;
+ }
+ base->object.flags |= UNINTERESTING;
+ add_pending_object(&revs, &base->object, "base");
+
+ if (prepare_revision_walk(&revs))
+ die(_("revision walk setup failed"));
+ /*
+ * Traverse the commits list, get prerequisite patch ids
+ * and stuff them in bases structure.
+ */
+ while ((commit = get_revision(&revs)) != NULL) {
+ unsigned char sha1[20];
+ struct object_id *patch_id;
+ if (commit->util)
+ continue;
+ if (commit_patch_id(commit, &diffopt, sha1))
+ die(_("cannot get patch id"));
+ ALLOC_GROW(bases->patch_id, bases->nr_patch_id + 1, bases->alloc_patch_id);
+ patch_id = bases->patch_id + bases->nr_patch_id;
+ hashcpy(patch_id->hash, sha1);
+ bases->nr_patch_id++;
+ }
+}
+
+static void print_bases(struct base_tree_info *bases)
+{
+ int i;
+
+ /* Only do this once, either for the cover or for the first one */
+ if (is_null_oid(&bases->base_commit))
+ return;
+
+ /* Show the base commit */
+ printf("base-commit: %s\n", oid_to_hex(&bases->base_commit));
+
+ /* Show the prerequisite patches */
+ for (i = bases->nr_patch_id - 1; i >= 0; i--)
+ printf("prerequisite-patch-id: %s\n", oid_to_hex(&bases->patch_id[i]));
+
+ free(bases->patch_id);
+ bases->nr_patch_id = 0;
+ bases->alloc_patch_id = 0;
+ oidclr(&bases->base_commit);
+}
+
int cmd_format_patch(int argc, const char **argv, const char *prefix)
{
struct commit *commit;
int reroll_count = -1;
char *branch_name = NULL;
char *from = NULL;
+ char *base_commit = NULL;
+ struct base_tree_info bases;
+
const struct option builtin_format_patch_options[] = {
{ OPTION_CALLBACK, 'n', "numbered", &numbered, NULL,
N_("use [PATCH n/m] even with a single patch"),
PARSE_OPT_OPTARG, thread_callback },
OPT_STRING(0, "signature", &signature, N_("signature"),
N_("add a signature")),
+ OPT_STRING(0, "base", &base_commit, N_("base-commit"),
+ N_("add prerequisite tree info to the patch series")),
OPT_FILENAME(0, "signature-file", &signature_file,
N_("add a signature from a file")),
OPT__QUIET(&quiet, N_("don't print the patch filenames")),
git_config(git_format_config, NULL);
init_revisions(&rev, prefix);
rev.commit_format = CMIT_FMT_EMAIL;
+ rev.expand_tabs_in_log_default = 0;
rev.verbose_header = 1;
rev.diff = 1;
rev.max_parents = 1;
signature = strbuf_detach(&buf, NULL);
}
+ memset(&bases, 0, sizeof(bases));
+ if (base_commit || base_auto) {
+ struct commit *base = get_base_commit(base_commit, list, nr);
+ reset_revision_walk();
+ prepare_bases(&bases, base, list, nr);
+ }
+
if (in_reply_to || thread || cover_letter)
rev.ref_message_ids = xcalloc(1, sizeof(struct string_list));
if (in_reply_to) {
gen_message_id(&rev, "cover");
make_cover_letter(&rev, use_stdout,
origin, nr, list, branch_name, quiet);
+ print_bases(&bases);
total++;
start_number--;
}
rev.mime_boundary);
else
print_signature();
+ print_bases(&bases);
}
if (!use_stdout)
fclose(stdout);
if ((dir = opendir(name)) == NULL) {
if (errno == ENOENT)
continue;
- error("cannot opendir %s (%s)", name, strerror(errno));
+ error_errno("cannot opendir %s", name);
goto out;
}
f = fopen(file, "r");
if (!f) {
- error("cannot open mail %s (%s)", file, strerror(errno));
+ error_errno("cannot open mail %s", file);
goto out;
}
if (strbuf_getwholeline(&buf, f, '\n')) {
- error("cannot read mail %s (%s)", file, strerror(errno));
+ error_errno("cannot read mail %s", file);
goto out;
}
int file_done = 0;
if (!f) {
- error("cannot open mbox %s", file);
+ error_errno("cannot open mbox %s", file);
goto out;
}
}
if (stat(arg, &argstat) == -1) {
- error("cannot stat %s (%s)", arg, strerror(errno));
+ error_errno("cannot stat %s", arg);
return 1;
}
usage_with_options(merge_file_usage, options);
if (quiet) {
if (!freopen("/dev/null", "w", stderr))
- return error("failed to redirect stderr to /dev/null: "
- "%s", strerror(errno));
+ return error_errno("failed to redirect stderr to /dev/null");
}
if (prefix)
FILE *f = to_stdout ? stdout : fopen(fpath, "wb");
if (!f)
- ret = error("Could not open %s for writing", filename);
+ ret = error_errno("Could not open %s for writing",
+ filename);
else if (result.size &&
fwrite(result.ptr, result.size, 1, f) != 1)
- ret = error("Could not write to %s", filename);
+ ret = error_errno("Could not write to %s", filename);
else if (fclose(f))
- ret = error("Could not close %s", filename);
+ ret = error_errno("Could not close %s", filename);
free(result.ptr);
}
/* An empty entry never compares same, not even to another empty entry */
static int same_entry(struct name_entry *a, struct name_entry *b)
{
- return a->sha1 &&
- b->sha1 &&
- !hashcmp(a->sha1, b->sha1) &&
+ return a->oid &&
+ b->oid &&
+ !oidcmp(a->oid, b->oid) &&
a->mode == b->mode;
}
static int both_empty(struct name_entry *a, struct name_entry *b)
{
- return !(a->sha1 || b->sha1);
+ return !(a->oid || b->oid);
}
static struct merge_list *create_entry(unsigned stage, unsigned mode, const unsigned char *sha1, const char *path)
return;
path = traverse_path(info, result);
- orig = create_entry(2, ours->mode, ours->sha1, path);
- final = create_entry(0, result->mode, result->sha1, path);
+ orig = create_entry(2, ours->mode, ours->oid->hash, path);
+ final = create_entry(0, result->mode, result->oid->hash, path);
final->link = orig;
newbase = traverse_path(info, p);
-#define ENTRY_SHA1(e) (((e)->mode && S_ISDIR((e)->mode)) ? (e)->sha1 : NULL)
+#define ENTRY_SHA1(e) (((e)->mode && S_ISDIR((e)->mode)) ? (e)->oid->hash : NULL)
buf0 = fill_tree_descriptor(t+0, ENTRY_SHA1(n + 0));
buf1 = fill_tree_descriptor(t+1, ENTRY_SHA1(n + 1));
buf2 = fill_tree_descriptor(t+2, ENTRY_SHA1(n + 2));
path = entry->path;
else
path = traverse_path(info, n);
- link = create_entry(stage, n->mode, n->sha1, path);
+ link = create_entry(stage, n->mode, n->oid->hash, path);
link->link = entry;
return link;
}
}
if (same_entry(entry+0, entry+1)) {
- if (entry[2].sha1 && !S_ISDIR(entry[2].mode)) {
+ if (entry[2].oid && !S_ISDIR(entry[2].mode)) {
/* We did not touch, they modified -- take theirs */
resolve(info, entry+1, entry+2);
return mask;
{
unsigned char result_tree[20], result_commit[20];
struct commit_list *parents, **pptr = &parents;
+ static struct lock_file lock;
+
+ hold_locked_index(&lock, 1);
+ refresh_cache(REFRESH_QUIET);
+ if (active_cache_changed &&
+ write_locked_index(&the_index, &lock, COMMIT_LOCK))
+ return error(_("Unable to write index."));
+ rollback_lock_file(&lock);
write_tree_trivial(result_tree);
printf(_("Wonderful.\n"));
struct commit *head_commit;
struct strbuf buf = STRBUF_INIT;
const char *head_arg;
- int flag, i, ret = 0, head_subsumed;
+ int i, ret = 0, head_subsumed;
int best_cnt = -1, merge_was_ok = 0, automerge_was_ok = 0;
struct commit_list *common = NULL;
const char *best_strategy = NULL, *wt_strategy = NULL;
* Check if we are _not_ on a detached HEAD, i.e. if there is a
* current branch.
*/
- branch = branch_to_free = resolve_refdup("HEAD", 0, head_sha1, &flag);
+ branch = branch_to_free = resolve_refdup("HEAD", 0, head_sha1, NULL);
if (branch && starts_with(branch, "refs/heads/"))
branch += 11;
if (!branch || is_null_sha1(head_sha1))
int pos;
if (show_only || verbose)
printf(_("Renaming %s to %s\n"), src, dst);
- if (!show_only && mode != INDEX) {
- if (rename(src, dst) < 0 && !ignore_errors)
- die_errno(_("renaming '%s' failed"), src);
- if (submodule_gitfile[i]) {
- if (submodule_gitfile[i] != SUBMODULE_WITH_GITDIR)
- connect_work_tree_and_git_dir(dst, submodule_gitfile[i]);
- if (!update_path_in_gitmodules(src, dst))
- gitmodules_modified = 1;
- }
+ if (show_only)
+ continue;
+ if (mode != INDEX && rename(src, dst) < 0) {
+ if (ignore_errors)
+ continue;
+ die_errno(_("renaming '%s' failed"), src);
+ }
+ if (submodule_gitfile[i]) {
+ if (submodule_gitfile[i] != SUBMODULE_WITH_GITDIR)
+ connect_work_tree_and_git_dir(dst, submodule_gitfile[i]);
+ if (!update_path_in_gitmodules(src, dst))
+ gitmodules_modified = 1;
}
if (mode == WORKING_DIRECTORY)
typedef struct rev_name {
const char *tip_name;
+ unsigned long taggerdate;
int generation;
int distance;
} rev_name;
#define MERGE_TRAVERSAL_WEIGHT 65535
static void name_rev(struct commit *commit,
- const char *tip_name, int generation, int distance,
+ const char *tip_name, unsigned long taggerdate,
+ int generation, int distance,
int deref)
{
struct rev_name *name = (struct rev_name *)commit->util;
name = xmalloc(sizeof(rev_name));
commit->util = name;
goto copy_data;
- } else if (name->distance > distance) {
+ } else if (name->taggerdate > taggerdate ||
+ (name->taggerdate == taggerdate &&
+ name->distance > distance)) {
copy_data:
name->tip_name = tip_name;
+ name->taggerdate = taggerdate;
name->generation = generation;
name->distance = distance;
} else
new_name = xstrfmt("%.*s^%d", (int)len, tip_name,
parent_number);
- name_rev(parents->item, new_name, 0,
+ name_rev(parents->item, new_name, taggerdate, 0,
distance + MERGE_TRAVERSAL_WEIGHT, 0);
} else {
- name_rev(parents->item, tip_name, generation + 1,
- distance + 1, 0);
+ name_rev(parents->item, tip_name, taggerdate,
+ generation + 1, distance + 1, 0);
}
}
}
struct name_ref_data *data = cb_data;
int can_abbreviate_output = data->tags_only && data->name_only;
int deref = 0;
+ unsigned long taggerdate = ULONG_MAX;
if (data->tags_only && !starts_with(path, "refs/tags/"))
return 0;
break; /* broken repository */
o = parse_object(t->tagged->oid.hash);
deref = 1;
+ taggerdate = t->date;
}
if (o && o->type == OBJ_COMMIT) {
struct commit *commit = (struct commit *)o;
path = name_ref_abbrev(path, can_abbreviate_output);
- name_rev(commit, xstrdup(path), 0, 0, deref);
+ name_rev(commit, xstrdup(path), taggerdate, 0, 0, deref);
}
return 0;
}
static int git_config_get_notes_strategy(const char *key,
enum notes_merge_strategy *strategy)
{
- const char *value;
+ char *value;
- if (git_config_get_string_const(key, &value))
+ if (git_config_get_string(key, &value))
return 1;
if (parse_notes_merge_strategy(value, strategy))
git_die_config(key, "unknown notes merge strategy %s", value);
+ free(value);
return 0;
}
update_ref(msg.buf, default_notes_ref(), result_sha1, NULL,
0, UPDATE_REFS_DIE_ON_ERR);
else { /* Merge has unresolved conflicts */
- char *existing;
+ const struct worktree *wt;
/* Update .git/NOTES_MERGE_PARTIAL with partial merge result */
update_ref(msg.buf, "NOTES_MERGE_PARTIAL", result_sha1, NULL,
0, UPDATE_REFS_DIE_ON_ERR);
/* Store ref-to-be-updated into .git/NOTES_MERGE_REF */
- existing = find_shared_symref("NOTES_MERGE_REF", default_notes_ref());
- if (existing)
+ wt = find_shared_symref("NOTES_MERGE_REF", default_notes_ref());
+ if (wt)
die(_("A notes merge into %s is already in-progress at %s"),
- default_notes_ref(), existing);
+ default_notes_ref(), wt->path);
if (create_symref("NOTES_MERGE_REF", default_notes_ref(), NULL))
die("Failed to store link to current notes ref (%s)",
default_notes_ref());
return reuse_packfile_offset - sizeof(struct pack_header);
}
+static const char no_split_warning[] = N_(
+"disabling bitmap writing, packs are split due to pack.packSizeLimit"
+);
+
static void write_pack_file(void)
{
uint32_t i = 0, j;
fixup_pack_header_footer(fd, sha1, pack_tmp_name,
nr_written, sha1, offset);
close(fd);
- write_bitmap_index = 0;
+ if (write_bitmap_index) {
+ warning(_(no_split_warning));
+ write_bitmap_index = 0;
+ }
}
if (!pack_to_stdout) {
* to preserve this property.
*/
if (stat(pack_tmp_name, &st) < 0) {
- warning("failed to stat %s: %s",
- pack_tmp_name, strerror(errno));
+ warning_errno("failed to stat %s", pack_tmp_name);
} else if (!last_mtime) {
last_mtime = st.st_mtime;
} else {
utb.actime = st.st_atime;
utb.modtime = --last_mtime;
if (utime(pack_tmp_name, &utb) < 0)
- warning("failed utime() on %s: %s",
- pack_tmp_name, strerror(errno));
+ warning_errno("failed utime() on %s", pack_tmp_name);
}
strbuf_addf(&tmpname, "%s-", base_name);
if (cmp < 0)
return;
if (name[cmplen] != '/') {
- add_object_entry(entry.sha1,
+ add_object_entry(entry.oid->hash,
object_type(entry.mode),
fullname, 1);
return;
const char *down = name+cmplen+1;
int downlen = name_cmp_len(down);
- tree = pbase_tree_get(entry.sha1);
+ tree = pbase_tree_get(entry.oid->hash);
if (!tree)
return;
init_tree_desc(&sub, tree->tree_data, tree->tree_size);
static char *opt_edit;
static char *opt_ff;
static char *opt_verify_signatures;
+static int opt_autostash = -1;
+static int config_autostash;
static struct argv_array opt_strategies = ARGV_ARRAY_INIT;
static struct argv_array opt_strategy_opts = ARGV_ARRAY_INIT;
static char *opt_gpg_sign;
+static int opt_allow_unrelated_histories;
/* Options passed to git-fetch */
static char *opt_all;
OPT_PASSTHRU(0, "verify-signatures", &opt_verify_signatures, NULL,
N_("verify that the named commit has a valid GPG signature"),
PARSE_OPT_NOARG),
+ OPT_BOOL(0, "autostash", &opt_autostash,
+ N_("automatically stash/stash pop before and after rebase")),
OPT_PASSTHRU_ARGV('s', "strategy", &opt_strategies, N_("strategy"),
N_("merge strategy to use"),
0),
OPT_PASSTHRU('S', "gpg-sign", &opt_gpg_sign, N_("key-id"),
N_("GPG sign commit"),
PARSE_OPT_OPTARG),
+ OPT_SET_INT(0, "allow-unrelated-histories",
+ &opt_allow_unrelated_histories,
+ N_("allow merging unrelated histories"), 1),
/* Options passed to git-fetch */
OPT_GROUP(N_("Options related to fetching")),
return REBASE_FALSE;
}
+/**
+ * Read config variables.
+ */
+static int git_pull_config(const char *var, const char *value, void *cb)
+{
+ if (!strcmp(var, "rebase.autostash")) {
+ config_autostash = git_config_bool(var, value);
+ return 0;
+ }
+ return git_default_config(var, value, cb);
+}
+
/**
* Returns 1 if there are unstaged changes, 0 otherwise.
*/
fprintf_ln(stderr, _("Please specify which branch you want to merge with."));
fprintf_ln(stderr, _("See git-pull(1) for details."));
fprintf(stderr, "\n");
- fprintf_ln(stderr, " git pull <remote> <branch>");
+ fprintf_ln(stderr, " git pull %s %s", _("<remote>"), _("<branch>"));
fprintf(stderr, "\n");
} else if (!curr_branch->merge_nr) {
const char *remote_name = NULL;
if (for_each_remote(get_only_remote, &remote_name) || !remote_name)
- remote_name = "<remote>";
+ remote_name = _("<remote>");
fprintf_ln(stderr, _("There is no tracking information for the current branch."));
if (opt_rebase)
fprintf_ln(stderr, _("Please specify which branch you want to merge with."));
fprintf_ln(stderr, _("See git-pull(1) for details."));
fprintf(stderr, "\n");
- fprintf_ln(stderr, " git pull <remote> <branch>");
+ fprintf_ln(stderr, " git pull %s %s", _("<remote>"), _("<branch>"));
fprintf(stderr, "\n");
- fprintf_ln(stderr, _("If you wish to set tracking information for this branch you can do so with:\n"
- "\n"
- " git branch --set-upstream-to=%s/<branch> %s\n"),
- remote_name, curr_branch->name);
+ fprintf_ln(stderr, _("If you wish to set tracking information for this branch you can do so with:"));
+ fprintf(stderr, "\n");
+ fprintf_ln(stderr, " git branch --set-upstream-to=%s/%s %s\n",
+ remote_name, _("<branch>"), curr_branch->name);
} else
fprintf_ln(stderr, _("Your configuration specifies to merge with the ref '%s'\n"
"from the remote, but no such ref was fetched."),
argv_array_pushv(&args, opt_strategy_opts.argv);
if (opt_gpg_sign)
argv_array_push(&args, opt_gpg_sign);
+ if (opt_allow_unrelated_histories > 0)
+ argv_array_push(&args, "--allow-unrelated-histories");
argv_array_push(&args, "FETCH_HEAD");
ret = run_command_v_opt(args.argv, RUN_GIT_CMD);
argv_array_pushv(&args, opt_strategy_opts.argv);
if (opt_gpg_sign)
argv_array_push(&args, opt_gpg_sign);
+ if (opt_autostash == 0)
+ argv_array_push(&args, "--no-autostash");
+ else if (opt_autostash == 1)
+ argv_array_push(&args, "--autostash");
argv_array_push(&args, "--onto");
argv_array_push(&args, sha1_to_hex(merge_head));
if (opt_rebase < 0)
opt_rebase = config_get_rebase();
- git_config(git_default_config, NULL);
+ git_config(git_pull_config, NULL);
if (read_cache_unmerged())
die_resolve_conflict("Pull");
if (get_sha1("HEAD", orig_head))
hashclr(orig_head);
+ if (!opt_rebase && opt_autostash != -1)
+ die(_("--[no-]autostash option is only valid with --rebase."));
+
if (opt_rebase) {
- int autostash = 0;
+ int autostash = config_autostash;
+ if (opt_autostash != -1)
+ autostash = opt_autostash;
if (is_null_sha1(orig_head) && !is_cache_unborn())
die(_("Updating an unborn branch with changes added to the index."));
- git_config_get_bool("rebase.autostash", &autostash);
if (!autostash)
die_on_unclean_work_tree(prefix);
if (!(flag & REF_ISSYMREF))
return;
- dst_name = strip_namespace(dst_name);
if (!dst_name) {
rp_error("refusing update to broken symref '%s'", cmd->ref_name);
cmd->skip_update = 1;
cmd->error_string = "broken symref";
return;
}
+ dst_name = strip_namespace(dst_name);
if ((item = string_list_lookup(list, dst_name)) == NULL)
return;
init_tree_desc(&desc, tree->buffer, tree->size);
complete = 1;
while (tree_entry(&desc, &entry)) {
- if (!has_sha1_file(entry.sha1) ||
- (S_ISDIR(entry.mode) && !tree_is_complete(entry.sha1))) {
+ if (!has_sha1_file(entry.oid->hash) ||
+ (S_ISDIR(entry.mode) && !tree_is_complete(entry.oid->hash))) {
tree->object.flags |= INCOMPLETE;
complete = 0;
}
size_t i;
if (!fgets(buffer, MAXCOMMAND - 1, stdin)) {
if (ferror(stdin))
- die("Comammand input error");
+ die("Command input error");
exit(0);
}
/* Strip end of line characters. */
url_nr = states.remote->url_nr;
}
for (i = 0; i < url_nr; i++)
+ /* TRANSLATORS: the colon ':' should align with
+ the one in " Fetch URL: %s" translation */
printf_ln(_(" Push URL: %s"), url[i]);
if (!i)
printf_ln(_(" Push URL: %s"), "(no URL)");
};
check_replace_refs = 0;
+ git_config(git_default_config, NULL);
argc = parse_options(argc, argv, prefix, options, git_replace_usage, 0);
if (lstat(ce->name, &st) < 0) {
if (errno != ENOENT && errno != ENOTDIR)
- warning("'%s': %s", ce->name, strerror(errno));
+ warning_errno(_("failed to stat '%s'"), ce->name);
/* It already vanished from the working tree */
continue;
}
list.entry[list.nr].is_submodule = S_ISGITLINK(ce->ce_mode);
if (list.entry[list.nr++].is_submodule &&
!is_staging_gitmodules_ok())
- die (_("Please, stage your changes to .gitmodules or stash them to proceed"));
+ die (_("Please stage your changes to .gitmodules or stash them to proceed"));
}
if (pathspec.nr) {
* --all and --mirror are incompatible; neither makes sense
* with any refspecs.
*/
- if ((refspecs && (send_all || args.send_mirror)) ||
+ if ((nr_refspecs > 0 && (send_all || args.send_mirror)) ||
(send_all && args.send_mirror))
usage_with_options(send_pack_usage, options);
#include "submodule-config.h"
#include "string-list.h"
#include "run-command.h"
+#include "remote.h"
+#include "refs.h"
+#include "connect.h"
+
+static char *get_default_remote(void)
+{
+ char *dest = NULL, *ret;
+ unsigned char sha1[20];
+ struct strbuf sb = STRBUF_INIT;
+ const char *refname = resolve_ref_unsafe("HEAD", 0, sha1, NULL);
+
+ if (!refname)
+ die(_("No such ref: %s"), "HEAD");
+
+ /* detached HEAD */
+ if (!strcmp(refname, "HEAD"))
+ return xstrdup("origin");
+
+ if (!skip_prefix(refname, "refs/heads/", &refname))
+ die(_("Expecting a full ref name, got %s"), refname);
+
+ strbuf_addf(&sb, "branch.%s.remote", refname);
+ if (git_config_get_string(sb.buf, &dest))
+ ret = xstrdup("origin");
+ else
+ ret = dest;
+
+ strbuf_release(&sb);
+ return ret;
+}
+
+static int starts_with_dot_slash(const char *str)
+{
+ return str[0] == '.' && is_dir_sep(str[1]);
+}
+
+static int starts_with_dot_dot_slash(const char *str)
+{
+ return str[0] == '.' && str[1] == '.' && is_dir_sep(str[2]);
+}
+
+/*
+ * Returns 1 if it was the last chop before ':'.
+ */
+static int chop_last_dir(char **remoteurl, int is_relative)
+{
+ char *rfind = find_last_dir_sep(*remoteurl);
+ if (rfind) {
+ *rfind = '\0';
+ return 0;
+ }
+
+ rfind = strrchr(*remoteurl, ':');
+ if (rfind) {
+ *rfind = '\0';
+ return 1;
+ }
+
+ if (is_relative || !strcmp(".", *remoteurl))
+ die(_("cannot strip one component off url '%s'"),
+ *remoteurl);
+
+ free(*remoteurl);
+ *remoteurl = xstrdup(".");
+ return 0;
+}
+
+/*
+ * The `url` argument is the URL that navigates to the submodule origin
+ * repo. When relative, this URL is relative to the superproject origin
+ * URL repo. The `up_path` argument, if specified, is the relative
+ * path that navigates from the submodule working tree to the superproject
+ * working tree. Returns the origin URL of the submodule.
+ *
+ * Return either an absolute URL or filesystem path (if the superproject
+ * origin URL is an absolute URL or filesystem path, respectively) or a
+ * relative file system path (if the superproject origin URL is a relative
+ * file system path).
+ *
+ * When the output is a relative file system path, the path is either
+ * relative to the submodule working tree, if up_path is specified, or to
+ * the superproject working tree otherwise.
+ *
+ * NEEDSWORK: This works incorrectly on the domain and protocol part.
+ * remote_url url outcome expectation
+ * http://a.com/b ../c http://a.com/c as is
+ * http://a.com/b ../../c http://c error out
+ * http://a.com/b ../../../c http:/c error out
+ * http://a.com/b ../../../../c http:c error out
+ * http://a.com/b ../../../../../c .:c error out
+ * NEEDSWORK: Given how chop_last_dir() works, this function is broken
+ * when a local part has a colon in its path component, too.
+ */
+static char *relative_url(const char *remote_url,
+ const char *url,
+ const char *up_path)
+{
+ int is_relative = 0;
+ int colonsep = 0;
+ char *out;
+ char *remoteurl = xstrdup(remote_url);
+ struct strbuf sb = STRBUF_INIT;
+ size_t len = strlen(remoteurl);
+
+ if (is_dir_sep(remoteurl[len]))
+ remoteurl[len] = '\0';
+
+ if (!url_is_local_not_ssh(remoteurl) || is_absolute_path(remoteurl))
+ is_relative = 0;
+ else {
+ is_relative = 1;
+ /*
+ * Prepend a './' to ensure all relative
+ * remoteurls start with './' or '../'
+ */
+ if (!starts_with_dot_slash(remoteurl) &&
+ !starts_with_dot_dot_slash(remoteurl)) {
+ strbuf_reset(&sb);
+ strbuf_addf(&sb, "./%s", remoteurl);
+ free(remoteurl);
+ remoteurl = strbuf_detach(&sb, NULL);
+ }
+ }
+ /*
+ * When the url starts with '../', remove that and the
+ * last directory in remoteurl.
+ */
+ while (url) {
+ if (starts_with_dot_dot_slash(url)) {
+ url += 3;
+ colonsep |= chop_last_dir(&remoteurl, is_relative);
+ } else if (starts_with_dot_slash(url))
+ url += 2;
+ else
+ break;
+ }
+ strbuf_reset(&sb);
+ strbuf_addf(&sb, "%s%s%s", remoteurl, colonsep ? ":" : "/", url);
+ free(remoteurl);
+
+ if (starts_with_dot_slash(sb.buf))
+ out = xstrdup(sb.buf + 2);
+ else
+ out = xstrdup(sb.buf);
+ strbuf_reset(&sb);
+
+ if (!up_path || !is_relative)
+ return out;
+
+ strbuf_addf(&sb, "%s%s", up_path, out);
+ free(out);
+ return strbuf_detach(&sb, NULL);
+}
+
+static int resolve_relative_url(int argc, const char **argv, const char *prefix)
+{
+ char *remoteurl = NULL;
+ char *remote = get_default_remote();
+ const char *up_path = NULL;
+ char *res;
+ const char *url;
+ struct strbuf sb = STRBUF_INIT;
+
+ if (argc != 2 && argc != 3)
+ die("resolve-relative-url only accepts one or two arguments");
+
+ url = argv[1];
+ strbuf_addf(&sb, "remote.%s.url", remote);
+ free(remote);
+
+ if (git_config_get_string(sb.buf, &remoteurl))
+ /* the repository is its own authoritative upstream */
+ remoteurl = xgetcwd();
+
+ if (argc == 3)
+ up_path = argv[2];
+
+ res = relative_url(remoteurl, url, up_path);
+ puts(res);
+ free(res);
+ free(remoteurl);
+ return 0;
+}
+
+static int resolve_relative_url_test(int argc, const char **argv, const char *prefix)
+{
+ char *remoteurl, *res;
+ const char *up_path, *url;
+
+ if (argc != 4)
+ die("resolve-relative-url-test only accepts three arguments: <up_path> <remoteurl> <url>");
+
+ up_path = argv[1];
+ remoteurl = xstrdup(argv[2]);
+ url = argv[3];
+
+ if (!strcmp(up_path, "(null)"))
+ up_path = NULL;
+
+ res = relative_url(remoteurl, url, up_path);
+ puts(res);
+ free(res);
+ free(remoteurl);
+ return 0;
+}
struct module_list {
const struct cache_entry **entries;
return 0;
}
-static int module_name(int argc, const char **argv, const char *prefix)
+static void init_submodule(const char *path, const char *prefix, int quiet)
{
const struct submodule *sub;
+ struct strbuf sb = STRBUF_INIT;
+ char *upd = NULL, *url = NULL, *displaypath;
- if (argc != 2)
- usage(_("git submodule--helper name <path>"));
-
+ /* Only loads from .gitmodules, no overlay with .git/config */
gitmodules_config();
- sub = submodule_from_path(null_sha1, argv[1]);
+
+ if (prefix) {
+ strbuf_addf(&sb, "%s%s", prefix, path);
+ displaypath = strbuf_detach(&sb, NULL);
+ } else
+ displaypath = xstrdup(path);
+
+ sub = submodule_from_path(null_sha1, path);
if (!sub)
- die(_("no submodule mapping found in .gitmodules for path '%s'"),
- argv[1]);
+ die(_("No url found for submodule path '%s' in .gitmodules"),
+ displaypath);
- printf("%s\n", sub->name);
+ /*
+ * Copy url setting when it is not set yet.
+ * To look up the url in .git/config, we must not fall back to
+ * .gitmodules, so look it up directly.
+ */
+ strbuf_reset(&sb);
+ strbuf_addf(&sb, "submodule.%s.url", sub->name);
+ if (git_config_get_string(sb.buf, &url)) {
+ url = xstrdup(sub->url);
- return 0;
+ if (!url)
+ die(_("No url found for submodule path '%s' in .gitmodules"),
+ displaypath);
+
+ /* Possibly a url relative to parent */
+ if (starts_with_dot_dot_slash(url) ||
+ starts_with_dot_slash(url)) {
+ char *remoteurl, *relurl;
+ char *remote = get_default_remote();
+ struct strbuf remotesb = STRBUF_INIT;
+ strbuf_addf(&remotesb, "remote.%s.url", remote);
+ free(remote);
+
+ if (git_config_get_string(remotesb.buf, &remoteurl))
+ /*
+ * The repository is its own
+ * authoritative upstream
+ */
+ remoteurl = xgetcwd();
+ relurl = relative_url(remoteurl, url, NULL);
+ strbuf_release(&remotesb);
+ free(remoteurl);
+ free(url);
+ url = relurl;
+ }
+
+ if (git_config_set_gently(sb.buf, url))
+ die(_("Failed to register url for submodule path '%s'"),
+ displaypath);
+ if (!quiet)
+ fprintf(stderr,
+ _("Submodule '%s' (%s) registered for path '%s'\n"),
+ sub->name, url, displaypath);
+ }
+
+ /* Copy "update" setting when it is not set yet */
+ strbuf_reset(&sb);
+ strbuf_addf(&sb, "submodule.%s.update", sub->name);
+ if (git_config_get_string(sb.buf, &upd) &&
+ sub->update_strategy.type != SM_UPDATE_UNSPECIFIED) {
+ if (sub->update_strategy.type == SM_UPDATE_COMMAND) {
+ fprintf(stderr, _("warning: command update mode suggested for submodule '%s'\n"),
+ sub->name);
+ upd = xstrdup("none");
+ } else
+ upd = xstrdup(submodule_strategy_to_string(&sub->update_strategy));
+
+ if (git_config_set_gently(sb.buf, upd))
+ die(_("Failed to register update mode for submodule path '%s'"), displaypath);
+ }
+ strbuf_release(&sb);
+ free(displaypath);
+ free(url);
+ free(upd);
}
-/*
- * Rules to sanitize configuration variables that are Ok to be passed into
- * submodule operations from the parent project using "-c". Should only
- * include keys which are both (a) safe and (b) necessary for proper
- * operation.
- */
-static int submodule_config_ok(const char *var)
+static int module_init(int argc, const char **argv, const char *prefix)
{
- if (starts_with(var, "credential."))
+ struct pathspec pathspec;
+ struct module_list list = MODULE_LIST_INIT;
+ int quiet = 0;
+ int i;
+
+ struct option module_init_options[] = {
+ OPT_STRING(0, "prefix", &prefix,
+ N_("path"),
+ N_("alternative anchor for relative paths")),
+ OPT__QUIET(&quiet, N_("Suppress output for initializing a submodule")),
+ OPT_END()
+ };
+
+ const char *const git_submodule_helper_usage[] = {
+ N_("git submodule--helper init [<path>]"),
+ NULL
+ };
+
+ argc = parse_options(argc, argv, prefix, module_init_options,
+ git_submodule_helper_usage, 0);
+
+ if (module_list_compute(argc, argv, prefix, &pathspec, &list) < 0)
return 1;
+
+ for (i = 0; i < list.nr; i++)
+ init_submodule(list.entries[i]->name, prefix, quiet);
+
return 0;
}
-static int sanitize_submodule_config(const char *var, const char *value, void *data)
+static int module_name(int argc, const char **argv, const char *prefix)
{
- struct strbuf *out = data;
+ const struct submodule *sub;
- if (submodule_config_ok(var)) {
- if (out->len)
- strbuf_addch(out, ' ');
+ if (argc != 2)
+ usage(_("git submodule--helper name <path>"));
- if (value)
- sq_quotef(out, "%s=%s", var, value);
- else
- sq_quote_buf(out, var);
- }
+ gitmodules_config();
+ sub = submodule_from_path(null_sha1, argv[1]);
- return 0;
-}
+ if (!sub)
+ die(_("no submodule mapping found in .gitmodules for path '%s'"),
+ argv[1]);
-static void prepare_submodule_repo_env(struct argv_array *out)
-{
- const char * const *var;
-
- for (var = local_repo_env; *var; var++) {
- if (!strcmp(*var, CONFIG_DATA_ENVIRONMENT)) {
- struct strbuf sanitized_config = STRBUF_INIT;
- git_config_from_parameters(sanitize_submodule_config,
- &sanitized_config);
- argv_array_pushf(out, "%s=%s", *var, sanitized_config.buf);
- strbuf_release(&sanitized_config);
- } else {
- argv_array_push(out, *var);
- }
- }
+ printf("%s\n", sub->name);
+ return 0;
}
static int clone_submodule(const char *path, const char *gitdir, const char *url,
static int module_clone(int argc, const char **argv, const char *prefix)
{
- const char *path = NULL, *name = NULL, *url = NULL;
+ const char *name = NULL, *url = NULL;
const char *reference = NULL, *depth = NULL;
int quiet = 0;
FILE *submodule_dot_git;
- char *sm_gitdir, *cwd, *p;
+ char *p, *path = NULL, *sm_gitdir;
struct strbuf rel_path = STRBUF_INIT;
struct strbuf sb = STRBUF_INIT;
argc = parse_options(argc, argv, prefix, module_clone_options,
git_submodule_helper_usage, 0);
- if (argc || !url || !path)
+ if (argc || !url || !path || !*path)
usage_with_options(git_submodule_helper_usage,
module_clone_options);
strbuf_addf(&sb, "%s/modules/%s", get_git_dir(), name);
- sm_gitdir = strbuf_detach(&sb, NULL);
+ sm_gitdir = xstrdup(absolute_path(sb.buf));
+ strbuf_reset(&sb);
+
+ if (!is_absolute_path(path)) {
+ strbuf_addf(&sb, "%s/%s", get_git_work_tree(), path);
+ path = strbuf_detach(&sb, NULL);
+ } else
+ path = xstrdup(path);
if (!file_exists(sm_gitdir)) {
if (safe_create_leading_directories_const(sm_gitdir) < 0)
}
/* Write a .git file in the submodule to redirect to the superproject. */
- if (safe_create_leading_directories_const(path) < 0)
- die(_("could not create directory '%s'"), path);
-
- if (path && *path)
- strbuf_addf(&sb, "%s/.git", path);
- else
- strbuf_addstr(&sb, ".git");
-
+ strbuf_addf(&sb, "%s/.git", path);
if (safe_create_leading_directories_const(sb.buf) < 0)
die(_("could not create leading directories of '%s'"), sb.buf);
submodule_dot_git = fopen(sb.buf, "w");
if (!submodule_dot_git)
die_errno(_("cannot open file '%s'"), sb.buf);
- fprintf(submodule_dot_git, "gitdir: %s\n",
- relative_path(sm_gitdir, path, &rel_path));
+ fprintf_or_die(submodule_dot_git, "gitdir: %s\n",
+ relative_path(sm_gitdir, path, &rel_path));
if (fclose(submodule_dot_git))
die(_("could not close file %s"), sb.buf);
strbuf_reset(&sb);
strbuf_reset(&rel_path);
- cwd = xgetcwd();
/* Redirect the worktree of the submodule in the superproject's config */
- if (!is_absolute_path(sm_gitdir)) {
- strbuf_addf(&sb, "%s/%s", cwd, sm_gitdir);
- free(sm_gitdir);
- sm_gitdir = strbuf_detach(&sb, NULL);
- }
-
- strbuf_addf(&sb, "%s/%s", cwd, path);
p = git_pathdup_submodule(path, "config");
if (!p)
die(_("could not get submodule directory for '%s'"), path);
git_config_set_in_file(p, "core.worktree",
- relative_path(sb.buf, sm_gitdir, &rel_path));
+ relative_path(path, sm_gitdir, &rel_path));
strbuf_release(&sb);
strbuf_release(&rel_path);
free(sm_gitdir);
- free(cwd);
+ free(path);
free(p);
return 0;
}
-static int module_sanitize_config(int argc, const char **argv, const char *prefix)
-{
- struct strbuf sanitized_config = STRBUF_INIT;
-
- if (argc > 1)
- usage(_("git submodule--helper sanitize-config"));
-
- git_config_from_parameters(sanitize_submodule_config, &sanitized_config);
- if (sanitized_config.len)
- printf("%s\n", sanitized_config.buf);
-
- strbuf_release(&sanitized_config);
-
- return 0;
-}
-
struct submodule_update_clone {
/* index into 'list', the list of submodules to look into for cloning */
int current;
SUBMODULE_UPDATE_STRATEGY_INIT, 0, NULL, NULL, NULL, NULL, \
STRING_LIST_INIT_DUP, 0}
+
+static void next_submodule_warn_missing(struct submodule_update_clone *suc,
+ struct strbuf *out, const char *displaypath)
+{
+ /*
+ * Only mention uninitialized submodules when their
+ * paths have been specified.
+ */
+ if (suc->warn_if_uninitialized) {
+ strbuf_addf(out,
+ _("Submodule path '%s' not initialized"),
+ displaypath);
+ strbuf_addch(out, '\n');
+ strbuf_addstr(out,
+ _("Maybe you want to use 'update --init'?"));
+ strbuf_addch(out, '\n');
+ }
+}
+
/**
* Determine whether 'ce' needs to be cloned. If so, prepare the 'child' to
* run the clone. Returns 1 if 'ce' needs to be cloned, 0 otherwise.
else
displaypath = ce->name;
+ if (!sub) {
+ next_submodule_warn_missing(suc, out, displaypath);
+ goto cleanup;
+ }
+
if (suc->update.type == SM_UPDATE_NONE
|| (suc->update.type == SM_UPDATE_UNSPECIFIED
&& sub->update_strategy.type == SM_UPDATE_NONE)) {
strbuf_addf(&sb, "submodule.%s.url", sub->name);
git_config_get_string(sb.buf, &url);
if (!url) {
- /*
- * Only mention uninitialized submodules when their
- * path have been specified
- */
- if (suc->warn_if_uninitialized) {
- strbuf_addf(out,
- _("Submodule path '%s' not initialized"),
- displaypath);
- strbuf_addch(out, '\n');
- strbuf_addstr(out,
- _("Maybe you want to use 'update --init'?"));
- strbuf_addch(out, '\n');
- }
+ next_submodule_warn_missing(suc, out, displaypath);
goto cleanup;
}
{"list", module_list},
{"name", module_name},
{"clone", module_clone},
- {"sanitize-config", module_sanitize_config},
- {"update-clone", update_clone}
+ {"update-clone", update_clone},
+ {"resolve-relative-url", resolve_relative_url},
+ {"resolve-relative-url-test", resolve_relative_url_test},
+ {"init", module_init}
};
int cmd_submodule__helper(int argc, const char **argv, const char *prefix)
static int verify_tag(const char *name, const char *ref,
const unsigned char *sha1)
{
- const char *argv_verify_tag[] = {"verify-tag",
- "-v", "SHA1_HEX", NULL};
- argv_verify_tag[2] = sha1_to_hex(sha1);
-
- if (run_command_v_opt(argv_verify_tag, RUN_GIT_CMD))
- return error(_("could not verify the tag '%s'"), name);
- return 0;
+ return gpg_verify_tag(sha1, name, GPG_VERIFY_VERBOSE);
}
static int do_sign(struct strbuf *buffer)
{
if (err == ENOENT || err == ENOTDIR)
return remove_one_path(path);
- return error("lstat(\"%s\"): %s", path, strerror(errno));
+ return error("lstat(\"%s\"): %s", path, strerror(err));
}
static int add_one_path(const struct cache_entry *old, const char *path, int len, struct stat *st)
pfd[1].events = POLLIN;
if (poll(pfd, 2, -1) < 0) {
if (errno != EINTR) {
- error("poll failed resuming: %s",
- strerror(errno));
+ error_errno("poll failed resuming");
sleep(1);
}
continue;
NULL
};
-static int run_gpg_verify(const char *buf, unsigned long size, unsigned flags)
-{
- struct signature_check sigc;
- int len;
- int ret;
-
- memset(&sigc, 0, sizeof(sigc));
-
- len = parse_signature(buf, size);
-
- if (size == len) {
- if (flags & GPG_VERIFY_VERBOSE)
- write_in_full(1, buf, len);
- return error("no signature found");
- }
-
- ret = check_signature(buf, len, buf + len, size - len, &sigc);
- print_signature_buffer(&sigc, flags);
-
- signature_check_clear(&sigc);
- return ret;
-}
-
-static int verify_tag(const char *name, unsigned flags)
-{
- enum object_type type;
- unsigned char sha1[20];
- char *buf;
- unsigned long size;
- int ret;
-
- if (get_sha1(name, sha1))
- return error("tag '%s' not found.", name);
-
- type = sha1_object_info(sha1, NULL);
- if (type != OBJ_TAG)
- return error("%s: cannot verify a non-tag object of type %s.",
- name, typename(type));
-
- buf = read_sha1_file(sha1, &type, &size);
- if (!buf)
- return error("%s: unable to read file.", name);
-
- ret = run_gpg_verify(buf, size, flags);
-
- free(buf);
- return ret;
-}
-
static int git_verify_tag_config(const char *var, const char *value, void *cb)
{
int status = git_gpg_config(var, value, cb);
if (verbose)
flags |= GPG_VERIFY_VERBOSE;
- /* sometimes the program was terminated because this signal
- * was received in the process of writing the gpg input: */
- signal(SIGPIPE, SIG_IGN);
- while (i < argc)
- if (verify_tag(argv[i++], flags))
+ while (i < argc) {
+ unsigned char sha1[20];
+ const char *name = argv[i++];
+ if (get_sha1(name, sha1))
+ had_error = !!error("tag '%s' not found.", name);
+ else if (gpg_verify_tag(sha1, name, flags))
had_error = 1;
+ }
return had_error;
}
struct add_opts {
int force;
int detach;
+ int checkout;
const char *new_branch;
int force_new_branch;
};
if (ret < 0 && errno == ENOTDIR)
ret = unlink(path.buf);
if (ret)
- error(_("failed to remove: %s"), strerror(errno));
+ error_errno(_("failed to remove '%s'"), path.buf);
}
closedir(dir);
if (!show_only)
if (!opts->detach && !strbuf_check_branch_ref(&symref, refname) &&
ref_exists(symref.buf)) { /* it's a branch */
if (!opts->force)
- die_if_checked_out(symref.buf);
+ die_if_checked_out(symref.buf, 0);
} else { /* must be a commit */
commit = lookup_commit_reference_by_name(refname);
if (!commit)
if (ret)
goto done;
- cp.argv = NULL;
- argv_array_clear(&cp.args);
- argv_array_pushl(&cp.args, "reset", "--hard", NULL);
- cp.env = child_env.argv;
- ret = run_command(&cp);
- if (!ret) {
- is_junk = 0;
- free(junk_work_tree);
- free(junk_git_dir);
- junk_work_tree = NULL;
- junk_git_dir = NULL;
+ if (opts->checkout) {
+ cp.argv = NULL;
+ argv_array_clear(&cp.args);
+ argv_array_pushl(&cp.args, "reset", "--hard", NULL);
+ cp.env = child_env.argv;
+ ret = run_command(&cp);
+ if (ret)
+ goto done;
}
+
+ is_junk = 0;
+ free(junk_work_tree);
+ free(junk_git_dir);
+ junk_work_tree = NULL;
+ junk_git_dir = NULL;
+
done:
strbuf_reset(&sb);
strbuf_addf(&sb, "%s/locked", sb_repo.buf);
OPT_STRING('B', NULL, &new_branch_force, N_("branch"),
N_("create or reset a branch")),
OPT_BOOL(0, "detach", &opts.detach, N_("detach HEAD at named commit")),
+ OPT_BOOL(0, "checkout", &opts.checkout, N_("populate the new working tree")),
OPT_END()
};
memset(&opts, 0, sizeof(opts));
+ opts.checkout = 1;
ac = parse_options(ac, av, prefix, options, worktree_usage, 0);
if (!!opts.detach + !!opts.new_branch + !!new_branch_force > 1)
die(_("-b, -B, and --detach are mutually exclusive"));
if (!opts.force &&
!strbuf_check_branch_ref(&symref, opts.new_branch) &&
ref_exists(symref.buf))
- die_if_checked_out(symref.buf);
+ die_if_checked_out(symref.buf, 0);
strbuf_release(&symref);
}
/* write prerequisites */
if (compute_and_write_prerequisites(bundle_fd, &revs, argc, argv))
- return -1;
+ goto err;
argc = setup_revisions(argc, argv, &revs, NULL);
- if (argc > 1)
- return error(_("unrecognized argument: %s"), argv[1]);
+ if (argc > 1) {
+ error(_("unrecognized argument: %s"), argv[1]);
+ goto err;
+ }
object_array_remove_duplicates(&revs.pending);
if (!ref_count)
die(_("Refusing to create empty bundle."));
else if (ref_count < 0)
- return -1;
+ goto err;
/* write pack */
- if (write_pack_data(bundle_fd, &revs))
- return -1;
+ if (write_pack_data(bundle_fd, &revs)) {
+ bundle_fd = -1; /* already closed by the above call */
+ goto err;
+ }
if (!bundle_to_stdout) {
if (commit_lock_file(&lock))
die_errno(_("cannot create '%s'"), path);
}
return 0;
+err:
+ if (!bundle_to_stdout) {
+ if (0 <= bundle_fd)
+ close(bundle_fd);
+ rollback_lock_file(&lock);
+ }
+ return -1;
}
int unbundle(struct bundle_header *header, int bundle_fd, int flags)
cnt++;
else {
struct cache_tree_sub *sub;
- struct tree *subtree = lookup_tree(entry.sha1);
+ struct tree *subtree = lookup_tree(entry.oid->hash);
if (!subtree->object.parsed)
parse_tree(subtree);
sub = cache_tree_sub(it, entry.path);
it = find_cache_tree_from_traversal(root, info);
it = cache_tree_find(it, ent->path);
- if (it && it->entry_count > 0 && !hashcmp(ent->sha1, it->sha1))
+ if (it && it->entry_count > 0 && !hashcmp(ent->oid->hash, it->sha1))
return it->entry_count;
return 0;
}
extern int log_all_ref_updates;
extern int warn_ambiguous_refs;
extern int warn_on_object_refname_ambiguity;
-extern int shared_repository;
extern const char *apply_default_whitespace;
extern const char *apply_default_ignorewhitespace;
extern const char *git_attributes_file;
+extern const char *git_hooks_path;
extern int zlib_compression_level;
extern int core_compression_level;
extern int core_compression_seen;
extern unsigned long big_file_threshold;
extern unsigned long pack_size_limit_cfg;
+void set_shared_repository(int value);
+int get_shared_repository(void);
+
/*
* Do replace refs need to be checked this run? This variable is
* initialized to true unless --no-replace-object is used or
extern char comment_line_char;
extern int auto_comment_line_char;
+/* Windows only */
+enum hide_dotfiles_type {
+ HIDE_DOTFILES_FALSE = 0,
+ HIDE_DOTFILES_TRUE,
+ HIDE_DOTFILES_DOTGITONLY
+};
+extern enum hide_dotfiles_type hide_dotfiles;
+
enum branch_track {
BRANCH_TRACK_UNSPECIFIED = -1,
BRANCH_TRACK_NEVER = 0,
*/
#define GIT_REPO_VERSION 0
#define GIT_REPO_VERSION_READ 1
-extern int repository_format_version;
extern int repository_format_precious_objects;
-extern int check_repository_format(void);
+
+struct repository_format {
+ int version;
+ int precious_objects;
+ int is_bare;
+ char *work_tree;
+ struct string_list unknown_extensions;
+};
+
+/*
+ * Read the repository format characteristics from the config file "path" into
+ * "format" struct. Returns the numeric version. On error, -1 is returned,
+ * format->version is set to -1, and all other fields in the struct are
+ * undefined.
+ */
+int read_repository_format(struct repository_format *format, const char *path);
+
+/*
+ * Verify that the repository described by repository_format is something we
+ * can read. If it is, return 0. Otherwise, return -1, and "err" will describe
+ * any errors encountered.
+ */
+int verify_repository_format(const struct repository_format *format,
+ struct strbuf *err);
+
+/*
+ * Check the repository format version in the path found in get_git_dir(),
+ * and die if it is a version we don't understand. Generally one would
+ * set_git_dir() before calling this, and use it only for "are we in a valid
+ * repo?".
+ */
+extern void check_repository_format(void);
#define MTIME_CHANGED 0x0001
#define CTIME_CHANGED 0x0002
*/
extern const char *mkpath(const char *fmt, ...) __attribute__((format (printf, 1, 2)));
extern const char *git_path(const char *fmt, ...) __attribute__((format (printf, 1, 2)));
+extern const char *git_common_path(const char *fmt, ...) __attribute__((format (printf, 1, 2)));
extern char *mksnpath(char *buf, size_t n, const char *fmt, ...)
__attribute__((format (printf, 3, 4)));
extern void strbuf_git_path(struct strbuf *sb, const char *fmt, ...)
__attribute__((format (printf, 2, 3)));
+extern void strbuf_git_common_path(struct strbuf *sb, const char *fmt, ...)
+ __attribute__((format (printf, 2, 3)));
extern char *git_path_buf(struct strbuf *buf, const char *fmt, ...)
__attribute__((format (printf, 2, 3)));
extern void strbuf_git_path_submodule(struct strbuf *sb, const char *path,
int git_mkstemp(char *path, size_t n, const char *template);
-int git_mkstemps(char *path, size_t n, const char *template, int suffix_len);
-
/* set default permissions by passing mode arguments to open(2) */
int git_mkstemps_mode(char *pattern, int suffix_len, int mode);
int git_mkstemp_mode(char *pattern, int mode);
extern void maybe_die_on_misspelt_object_name(const char *name, const char *prefix);
extern int get_sha1_with_context(const char *str, unsigned flags, unsigned char *sha1, struct object_context *orc);
+extern int get_oid(const char *str, struct object_id *oid);
+
typedef int each_abbrev_fn(const unsigned char *sha1, void *);
extern int for_each_abbrev(const char *prefix, each_abbrev_fn, void *);
extern int git_config_with_options(config_fn_t fn, void *,
struct git_config_source *config_source,
int respect_includes);
-extern int git_config_early(config_fn_t fn, void *, const char *repo_config);
extern int git_parse_ulong(const char *, unsigned long *);
extern int git_parse_maybe_bool(const char *);
extern int git_config_int(const char *, const char *);
extern int git_config_rename_section(const char *, const char *);
extern int git_config_rename_section_in_file(const char *, const char *, const char *);
extern const char *git_etc_gitconfig(void);
-extern int check_repository_format_version(const char *var, const char *value, void *cb);
extern int git_env_bool(const char *, int);
extern unsigned long git_env_ulong(const char *, unsigned long);
extern int git_config_system(void);
extern int diff_auto_refresh_index;
/* match-trees.c */
-void shift_tree(const unsigned char *, const unsigned char *, unsigned char *, int);
-void shift_tree_by(const unsigned char *, const unsigned char *, unsigned char *, const char *);
+void shift_tree(const struct object_id *, const struct object_id *, struct object_id *, int);
+void shift_tree_by(const struct object_id *, const struct object_id *, struct object_id *, const char *);
/*
* whitespace rules.
struct stat st;
if (lstat(ce->name, &st)) {
- error("lstat(%s): %s", ce->name, strerror(errno));
+ error_errno("lstat(%s)", ce->name);
continue;
}
--- /dev/null
+#!/bin/sh
+#
+# Perform sanity checks on documentation and build it.
+#
+
+set -e
+
+make check-builtins
+make check-docs
+make doc
+
+test -s Documentation/git.html
+test -s Documentation/git.xml
+test -s Documentation/git.1
struct strbuf buf = STRBUF_INIT;
if (strbuf_readlink(&buf, elem->path, st.st_size) < 0) {
- error("readlink(%s): %s", elem->path,
- strerror(errno));
+ error_errno("readlink(%s)", elem->path);
return;
}
result_size = buf.len;
int preserve_subject;
struct date_mode date_mode;
unsigned date_mode_explicit:1;
+ int expand_tabs_in_log;
int need_8bit_cte;
char *notes_message;
struct reflog_walk_info *reflog_info;
#define HEADER_HMAC_H
#define HEADER_SHA_H
#include <CommonCrypto/CommonHMAC.h>
-#define HMAC_CTX CCHmacContext
-#define HMAC_Init(hmac, key, len, algo) CCHmacInit(hmac, algo, key, len)
-#define HMAC_Update CCHmacUpdate
-#define HMAC_Final(hmac, hash, ptr) CCHmacFinal(hmac, hash)
-#define HMAC_CTX_cleanup(ignore)
#define EVP_md5(...) kCCHmacAlgMD5
+/* CCHmac doesn't take md_len and the return type is void */
+#define HMAC git_CC_HMAC
+static inline unsigned char *git_CC_HMAC(CCHmacAlgorithm alg,
+ const void *key, int key_len,
+ const unsigned char *data, size_t data_len,
+ unsigned char *md, unsigned int *md_len)
+{
+ CCHmac(alg, key, key_len, data, data_len, md);
+ return md;
+}
+
#if __MAC_OS_X_VERSION_MIN_REQUIRED >= 1070
#define APPLE_LION_OR_NEWER
#include <Security/Security.h>
return ret;
}
+static inline int needs_hiding(const char *path)
+{
+ const char *basename;
+
+ if (hide_dotfiles == HIDE_DOTFILES_FALSE)
+ return 0;
+
+ /* We cannot use basename(), as it would remove trailing slashes */
+ mingw_skip_dos_drive_prefix((char **)&path);
+ if (!*path)
+ return 0;
+
+ for (basename = path; *path; path++)
+ if (is_dir_sep(*path)) {
+ do {
+ path++;
+ } while (is_dir_sep(*path));
+ /* ignore trailing slashes */
+ if (*path)
+ basename = path;
+ }
+
+ if (hide_dotfiles == HIDE_DOTFILES_TRUE)
+ return *basename == '.';
+
+ assert(hide_dotfiles == HIDE_DOTFILES_DOTGITONLY);
+ return !strncasecmp(".git", basename, 4) &&
+ (!basename[4] || is_dir_sep(basename[4]));
+}
+
+static int set_hidden_flag(const wchar_t *path, int set)
+{
+ DWORD original = GetFileAttributesW(path), modified;
+ if (set)
+ modified = original | FILE_ATTRIBUTE_HIDDEN;
+ else
+ modified = original & ~FILE_ATTRIBUTE_HIDDEN;
+ if (original == modified || SetFileAttributesW(path, modified))
+ return 0;
+ errno = err_win_to_posix(GetLastError());
+ return -1;
+}
+
int mingw_mkdir(const char *path, int mode)
{
int ret;
if (xutftowcs_path(wpath, path) < 0)
return -1;
ret = _wmkdir(wpath);
+ if (!ret && needs_hiding(path))
+ return set_hidden_flag(wpath, 1);
return ret;
}
if (attrs != INVALID_FILE_ATTRIBUTES && (attrs & FILE_ATTRIBUTE_DIRECTORY))
errno = EISDIR;
}
+ if ((oflags & O_CREAT) && needs_hiding(filename)) {
+ /*
+ * Internally, _wopen() uses the CreateFile() API which errors
+ * out with an ERROR_ACCESS_DENIED if CREATE_ALWAYS was
+ * specified and an already existing file's attributes do not
+ * match *exactly*. As there is no mode or flag we can set that
+ * would correspond to FILE_ATTRIBUTE_HIDDEN, let's just try
+ * again *without* the O_CREAT flag (that corresponds to the
+ * CREATE_ALWAYS flag of CreateFile()).
+ */
+ if (fd < 0 && errno == EACCES)
+ fd = _wopen(wfilename, oflags & ~O_CREAT, mode);
+ if (fd >= 0 && set_hidden_flag(wfilename, 1))
+ warning("could not mark '%s' as hidden.", filename);
+ }
return fd;
}
#undef fopen
FILE *mingw_fopen (const char *filename, const char *otype)
{
+ int hide = needs_hiding(filename);
FILE *file;
wchar_t wfilename[MAX_PATH], wotype[4];
if (filename && !strcmp(filename, "/dev/null"))
if (xutftowcs_path(wfilename, filename) < 0 ||
xutftowcs(wotype, otype, ARRAY_SIZE(wotype)) < 0)
return NULL;
+ if (hide && !access(filename, F_OK) && set_hidden_flag(wfilename, 0)) {
+ error("could not unhide %s", filename);
+ return NULL;
+ }
file = _wfopen(wfilename, wotype);
+ if (file && hide && set_hidden_flag(wfilename, 1))
+ warning("could not mark '%s' as hidden.", filename);
return file;
}
FILE *mingw_freopen (const char *filename, const char *otype, FILE *stream)
{
+ int hide = needs_hiding(filename);
FILE *file;
wchar_t wfilename[MAX_PATH], wotype[4];
if (filename && !strcmp(filename, "/dev/null"))
if (xutftowcs_path(wfilename, filename) < 0 ||
xutftowcs(wotype, otype, ARRAY_SIZE(wotype)) < 0)
return NULL;
+ if (hide && !access(filename, F_OK) && set_hidden_flag(wfilename, 0)) {
+ error("could not unhide %s", filename);
+ return NULL;
+ }
file = _wfreopen(wfilename, wotype, stream);
+ if (file && hide && set_hidden_flag(wfilename, 1))
+ warning("could not mark '%s' as hidden.", filename);
return file;
}
char *mingw_getcwd(char *pointer, int len)
{
- int i;
wchar_t wpointer[MAX_PATH];
if (!_wgetcwd(wpointer, ARRAY_SIZE(wpointer)))
return NULL;
if (xwcstoutf(pointer, wpointer, len) < 0)
return NULL;
- for (i = 0; pointer[i]; i++)
- if (pointer[i] == '\\')
- pointer[i] = '/';
+ convert_slashes(pointer);
return pointer;
}
* executable (by not mistaking the dir separators
* for escape characters).
*/
- for (; *tmp; tmp++)
- if (*tmp == '\\')
- *tmp = '/';
+ convert_slashes(tmp);
}
/* simulate TERM to enable auto-color (see color.c) */
#define sigemptyset(x) (void)0
static inline int sigaddset(sigset_t *set, int signum)
{ return 0; }
+#define SIG_BLOCK 0
#define SIG_UNBLOCK 0
static inline int sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
{ return 0; }
void mingw_open_html(const char *path);
#define open_html mingw_open_html
-void mingw_mark_as_git_dir(const char *dir);
-#define mark_as_git_dir mingw_mark_as_git_dir
-
/**
* Converts UTF-8 encoded string to UTF-16LE.
*
if (errno || inleft) {
/*
* iconv() failed and errno could be E2BIG, EILSEQ, EINVAL, EBADF
- * MacOS X avoids illegal byte sequemces.
+ * MacOS X avoids illegal byte sequences.
* If they occur on a mounted drive (e.g. NFS) it is not worth to
* die() for that, but rather let the user see the original name
*/
return TlsGetValue(key);
}
+#ifndef __MINGW64_VERSION_MAJOR
+static inline int pthread_sigmask(int how, const sigset_t *set, sigset_t *oset)
+{
+ return 0;
+}
+#endif
+
#endif /* PTHREAD_H */
va_end(ap);
if (str_len < 0) {
- warning("vsnprintf failed: '%s'", strerror(errno));
+ warning_errno("vsnprintf failed");
return;
}
str = malloc(st_add(str_len, 1));
if (!str) {
- warning("malloc failed: '%s'", strerror(errno));
+ warning_errno("malloc failed");
return;
}
while ((pos = strstr(str, "%1")) != NULL) {
str = realloc(str, st_add(++str_len, 1));
if (!str) {
- warning("realloc failed: '%s'", strerror(errno));
+ warning_errno("realloc failed");
return;
}
memmove(pos + 2, pos + 1, strlen(pos));
void *git_mmap(void *start, size_t length, int prot, int flags, int fd, off_t offset)
{
- HANDLE hmap;
+ HANDLE osfhandle, hmap;
void *temp;
- off_t len;
- struct stat st;
+ LARGE_INTEGER len;
uint64_t o = offset;
uint32_t l = o & 0xFFFFFFFF;
uint32_t h = (o >> 32) & 0xFFFFFFFF;
- if (!fstat(fd, &st))
- len = st.st_size;
- else
+ osfhandle = (HANDLE)_get_osfhandle(fd);
+ if (!GetFileSizeEx(osfhandle, &len))
die("mmap: could not determine filesize");
- if ((length + offset) > len)
- length = xsize_t(len - offset);
+ if ((length + offset) > len.QuadPart)
+ length = xsize_t(len.QuadPart - offset);
if (!(flags & MAP_PRIVATE))
die("Invalid usage of mmap when built with USE_WIN32_MMAP");
- hmap = CreateFileMapping((HANDLE)_get_osfhandle(fd), NULL,
- PAGE_WRITECOPY, 0, 0, NULL);
+ hmap = CreateFileMapping(osfhandle, NULL,
+ prot == PROT_READ ? PAGE_READONLY : PAGE_WRITECOPY, 0, 0, NULL);
- if (!hmap)
+ if (!hmap) {
+ errno = EINVAL;
return MAP_FAILED;
+ }
- temp = MapViewOfFileEx(hmap, FILE_MAP_COPY, h, l, length, start);
+ temp = MapViewOfFileEx(hmap, prot == PROT_READ ?
+ FILE_MAP_READ : FILE_MAP_COPY, h, l, length, start);
if (!CloseHandle(hmap))
warning("unable to close file mapping handle");
- return temp ? temp : MAP_FAILED;
+ if (temp)
+ return temp;
+
+ errno = GetLastError() == ERROR_COMMITMENT_LIMIT ? EFBIG : EINVAL;
+ return MAP_FAILED;
}
int git_munmap(void *start, size_t length)
#define IOINFO_L2E 5
#define IOINFO_ARRAY_ELTS (1 << IOINFO_L2E)
+#define FPIPE 0x08
#define FDEV 0x40
static inline ioinfo* _pioinfo(int fd)
return old_handle;
}
+#ifdef DETECT_MSYS_TTY
+
+#include <winternl.h>
+#include <ntstatus.h>
+
+static void detect_msys_tty(int fd)
+{
+ ULONG result;
+ BYTE buffer[1024];
+ POBJECT_NAME_INFORMATION nameinfo = (POBJECT_NAME_INFORMATION) buffer;
+ PWSTR name;
+
+ /* check if fd is a pipe */
+ HANDLE h = (HANDLE) _get_osfhandle(fd);
+ if (GetFileType(h) != FILE_TYPE_PIPE)
+ return;
+
+ /* get pipe name */
+ if (!NT_SUCCESS(NtQueryObject(h, ObjectNameInformation,
+ buffer, sizeof(buffer) - 2, &result)))
+ return;
+ name = nameinfo->Name.Buffer;
+ name[nameinfo->Name.Length] = 0;
+
+ /* check if this could be a MSYS2 pty pipe ('msys-XXXX-ptyN-XX') */
+ if (!wcsstr(name, L"msys-") || !wcsstr(name, L"-pty"))
+ return;
+
+ /* init ioinfo size if we haven't done so */
+ if (init_sizeof_ioinfo())
+ return;
+
+ /* set FDEV flag, reset FPIPE flag */
+ _pioinfo(fd)->osflags &= ~FPIPE;
+ _pioinfo(fd)->osflags |= FDEV;
+}
+
+#endif
+
void winansi_init(void)
{
int con1, con2;
/* check if either stdout or stderr is a console output screen buffer */
con1 = is_console(1);
con2 = is_console(2);
- if (!con1 && !con2)
+ if (!con1 && !con2) {
+#ifdef DETECT_MSYS_TTY
+ /* check if stdin / stdout / stderr are MSYS2 pty pipes */
+ detect_msys_tty(0);
+ detect_msys_tty(1);
+ detect_msys_tty(2);
+#endif
return;
+ }
/* create a named pipe to communicate with the console thread */
xsnprintf(name, sizeof(name), "\\\\.\\pipe\\winansi%lu", GetCurrentProcessId());
HANDLE winansi_get_osfhandle(int fd)
{
HANDLE hnd = (HANDLE) _get_osfhandle(fd);
- if ((fd == 1 || fd == 2) && isatty(fd)
- && GetFileType(hnd) == FILE_TYPE_PIPE)
- return (fd == 1) ? hconsole1 : hconsole2;
+ if (isatty(fd) && GetFileType(hnd) == FILE_TYPE_PIPE) {
+ if (fd == 1 && hconsole1)
+ return hconsole1;
+ else if (fd == 2 && hconsole2)
+ return hconsole2;
+ }
return hnd;
}
expanded = expand_user_path(path);
if (!expanded)
- return error("Could not expand include path '%s'", path);
+ return error("could not expand include path '%s'", path);
path = expanded;
/*
if (!strcmp(var, "core.attributesfile"))
return git_config_pathname(&git_attributes_file, var, value);
+ if (!strcmp(var, "core.hookspath"))
+ return git_config_pathname(&git_hooks_path, var, value);
+
if (!strcmp(var, "core.bare")) {
is_bare_repository_cfg = git_config_bool(var, value);
return 0;
if (!strcmp(var, "core.autocrlf")) {
if (value && !strcasecmp(value, "input")) {
- if (core_eol == EOL_CRLF)
- return error("core.autocrlf=input conflicts with core.eol=crlf");
auto_crlf = AUTO_CRLF_INPUT;
return 0;
}
core_eol = EOL_NATIVE;
else
core_eol = EOL_UNSET;
- if (core_eol == EOL_CRLF && auto_crlf == AUTO_CRLF_INPUT)
- return error("core.autocrlf=input conflicts with core.eol=crlf");
return 0;
}
return 0;
}
+ if (!strcmp(var, "core.hidedotfiles")) {
+ if (value && !strcasecmp(value, "dotgitonly"))
+ hide_dotfiles = HIDE_DOTFILES_DOTGITONLY;
+ else
+ hide_dotfiles = git_config_bool(var, value);
+ return 0;
+ }
+
/* Add other config variables here and to Documentation/config.txt. */
return 0;
}
else if (!strcmp(value, "always"))
autorebase = AUTOREBASE_ALWAYS;
else
- return error("Malformed value for %s", var);
+ return error("malformed value for %s", var);
return 0;
}
else if (!strcmp(value, "current"))
push_default = PUSH_DEFAULT_CURRENT;
else {
- error("Malformed value for %s: %s", var, value);
+ error("malformed value for %s: %s", var, value);
return error("Must be one of nothing, matching, simple, "
"upstream or current.");
}
return !git_env_bool("GIT_CONFIG_NOSYSTEM", 0);
}
-int git_config_early(config_fn_t fn, void *data, const char *repo_config)
+static int do_git_config_sequence(config_fn_t fn, void *data)
{
int ret = 0, found = 0;
char *xdg_config = xdg_config_home("config");
char *user_config = expand_user_path("~/.gitconfig");
+ char *repo_config = git_pathdup("config");
if (git_config_system() && !access_or_die(git_etc_gitconfig(), R_OK, 0)) {
ret += git_config_from_file(fn, git_etc_gitconfig(),
free(xdg_config);
free(user_config);
+ free(repo_config);
return ret == 0 ? found : ret;
}
struct git_config_source *config_source,
int respect_includes)
{
- char *repo_config = NULL;
- int ret;
struct config_include_data inc = CONFIG_INCLUDE_INIT;
if (respect_includes) {
else if (config_source && config_source->blob)
return git_config_from_blob_ref(fn, config_source->blob, data);
- repo_config = git_pathdup("config");
- ret = git_config_early(fn, data, repo_config);
- if (repo_config)
- free(repo_config);
- return ret;
+ return do_git_config_sequence(fn, data);
}
static void git_config_raw(config_fn_t fn, void *data)
struct config_set_element k;
struct config_set_element *found_entry;
char *normalized_key;
- int ret;
/*
* `key` may come from the user, so normalize it before using it
* for querying entries from the hashmap.
*/
- ret = git_config_parse_key(key, &normalized_key, NULL);
-
- if (ret)
+ if (git_config_parse_key(key, &normalized_key, NULL))
return NULL;
hashmap_entry_init(&k, strhash(normalized_key));
lock = xcalloc(1, sizeof(struct lock_file));
fd = hold_lock_file_for_update(lock, config_filename, 0);
if (fd < 0) {
- error("could not lock config file %s: %s", config_filename, strerror(errno));
+ error_errno("could not lock config file %s", config_filename);
free(store.key);
ret = CONFIG_NO_LOCK;
goto out_free;
free(store.key);
if ( ENOENT != errno ) {
- error("opening %s: %s", config_filename,
- strerror(errno));
+ error_errno("opening %s", config_filename);
ret = CONFIG_INVALID_FILE; /* same as "invalid config file" */
goto out_free;
}
if (contents == MAP_FAILED) {
if (errno == ENODEV && S_ISDIR(st.st_mode))
errno = EISDIR;
- error("unable to mmap '%s': %s",
- config_filename, strerror(errno));
+ error_errno("unable to mmap '%s'", config_filename);
ret = CONFIG_INVALID_FILE;
contents = NULL;
goto out_free;
in_fd = -1;
if (chmod(get_lock_file_path(lock), st.st_mode & 07777) < 0) {
- error("chmod on %s failed: %s",
- get_lock_file_path(lock), strerror(errno));
+ error_errno("chmod on %s failed", get_lock_file_path(lock));
ret = CONFIG_NO_WRITE;
goto out_free;
}
}
if (commit_lock_file(lock) < 0) {
- error("could not write config file %s: %s", config_filename,
- strerror(errno));
+ error_errno("could not write config file %s", config_filename);
ret = CONFIG_NO_WRITE;
lock = NULL;
goto out_free;
const char *key, const char *value,
const char *value_regex, int multi_replace)
{
- if (git_config_set_multivar_in_file_gently(config_filename, key, value,
- value_regex, multi_replace) < 0)
- die(_("Could not set '%s' to '%s'"), key, value);
+ if (!git_config_set_multivar_in_file_gently(config_filename, key, value,
+ value_regex, multi_replace))
+ return;
+ if (value)
+ die(_("could not set '%s' to '%s'"), key, value);
+ else
+ die(_("could not unset '%s'"), key);
}
int git_config_set_multivar_gently(const char *key, const char *value,
fstat(fileno(config_file), &st);
if (chmod(get_lock_file_path(lock), st.st_mode & 07777) < 0) {
- ret = error("chmod on %s failed: %s",
- get_lock_file_path(lock), strerror(errno));
+ ret = error_errno("chmod on %s failed",
+ get_lock_file_path(lock));
goto out;
}
fclose(config_file);
unlock_and_out:
if (commit_lock_file(lock) < 0)
- ret = error("could not write config file %s: %s",
- config_filename, strerror(errno));
+ ret = error_errno("could not write config file %s",
+ config_filename);
out:
free(filename_buf);
return ret;
#undef config_error_nonbool
int config_error_nonbool(const char *var)
{
- return error("Missing value for '%s'", var);
+ return error("missing value for '%s'", var);
}
int parse_config_key(const char *var,
X = .exe
UNRELIABLE_FSTAT = UnfortunatelyYes
SPARSE_FLAGS = -isystem /usr/include/w32api -Wno-one-bit-signed-bitfield
+ OBJECT_CREATION_USES_RENAMES = UnfortunatelyNeedsTo
endif
ifeq ($(uname_S),FreeBSD)
NEEDS_LIBICONV = YesPlease
BASIC_LDFLAGS += -Wl,--large-address-aware
endif
CC = gcc
- COMPAT_CFLAGS += -D__USE_MINGW_ANSI_STDIO=0
+ COMPAT_CFLAGS += -D__USE_MINGW_ANSI_STDIO=0 -DDETECT_MSYS_TTY
+ EXTLIBS += -lntdll
INSTALL = /bin/install
NO_R_TO_GCC_LINKER = YesPlease
INTERNAL_QSORT = YesPlease
[CHARSET_LIB=-lcharset])])
GIT_CONF_SUBST([CHARSET_LIB])
#
-# Define NO_HMAC_CTX_CLEANUP=YesPlease if HMAC_CTX_cleanup is missing.
-AC_CHECK_LIB([crypto], [HMAC_CTX_cleanup],
- [], [GIT_CONF_SUBST([NO_HMAC_CTX_CLEANUP], [YesPlease])])
-#
# Define HAVE_CLOCK_GETTIME=YesPlease if clock_gettime is available.
GIT_CHECK_FUNC(clock_gettime,
[HAVE_CLOCK_GETTIME=YesPlease],
memcpy(commit, sha1_to_hex(sha1), 40);
if (write_in_full(rev_list.in, commit, 41) < 0) {
if (errno != EPIPE && errno != EINVAL)
- error(_("failed write to rev-list: %s"),
- strerror(errno));
+ error_errno(_("failed write to rev-list"));
err = -1;
break;
}
} while (!fn(cb_data, sha1));
- if (close(rev_list.in)) {
- error(_("failed to close rev-list's stdin: %s"), strerror(errno));
- err = -1;
- }
+ if (close(rev_list.in))
+ err = error_errno(_("failed to close rev-list's stdin"));
sigchain_pop(SIGPIPE);
return finish_command(&rev_list) || err;
{
case "$cur" in
--*)
- __gitcomp "--all --info --man --web"
+ __gitcomp "--all --guides --info --man --web"
return
;;
esac
__git_compute_all_commands
__gitcomp "$__git_all_commands $(__git_aliases)
attributes cli core-tutorial cvs-migration
- diffcore gitk glossary hooks ignore modules
- namespaces repository-layout tutorial tutorial-2
+ diffcore everyday gitk glossary hooks ignore modules
+ namespaces repository-layout revisions tutorial tutorial-2
workflows
"
}
--relative-date --date=
--pretty= --format= --oneline
--show-signature
+ --cherry-mark
--cherry-pick
--graph
--decorate --decorate=
+Release 1.3.1 (bugfix-only release)
+===================================
+
+* Generate links to commits in combined emails (it was done only for
+ commit emails in 1.3.0).
+
+* Fix broken links on PyPi.
+
+Release 1.3.0
+=============
+
+* New options multimailhook.htmlInIntro and multimailhook.htmlInFooter
+ now allow using HTML in the introduction and footer of emails (e.g.
+ for a more pleasant formatting or to insert a link to the commit on
+ a web interface).
+
+* A new option multimailhook.commitBrowseURL gives a simpler (and less
+ flexible) way to add a link to a web interface for commit emails
+ than multimailhook.htmlInIntro and multimailhook.htmlInFooter.
+
+* A new public function config.add_config_parameters was added to
+ allow custom hooks to set specific Git configuration variables
+ without modifying the configuration files. See an example in
+ post-receive.example.
+
+* Error handling for SMTP has been improved (we used to print Python
+ backtraces for legitimate errors).
+
+* The SMTP mailer can now check TLS certificates when the newly added
+ configuration variable multimailhook.smtpCACerts.
+
+* Python 3 portability has been improved.
+
+* The documentation's formatting has been improved.
+
+* The testsuite has been improved (we now use pyflakes to check for
+ errors in the code).
+
+This version has been tested with Python 2.4 and 2.6 to 3.5, and Git
+v1.7.10-406-gdc801e7, 2.1.4 and 2.8.1.339.g3ad15fd.
+
+No change since 1.3 RC1.
+
Release 1.2.0
=============
+Contributing
+============
+
git-multimail is an open-source project, built by volunteers. We would
welcome your help!
Please note that although a copy of git-multimail is distributed in
the "contrib" section of the main Git project, development takes place
-in a separate git-multimail repository on GitHub:
-
- https://github.com/git-multimail/git-multimail
+in a separate `git-multimail repository on GitHub`_.
Whenever enough changes to git-multimail have accumulated, a new
code-drop of git-multimail will be submitted for inclusion in the Git
project practice
<https://github.com/git/git/blob/master/Documentation/SubmittingPatches#L234>`__.
-General discussion of git-multimail can take place on the main Git
-mailing list,
-
- git@vger.kernel.org
+General discussion of git-multimail can take place on the main `Git
+mailing list`_.
Please CC emails regarding git-multimail to the maintainers so that we
don't overlook them.
+
+
+.. _`git-multimail repository on GitHub`: https://github.com/git-multimail/git-multimail
+.. _`Git mailing list`: git@vger.kernel.org
-git-multimail (version 1.2.0)
-=============================
+git-multimail 1.3.1
+===================
.. image:: https://travis-ci.org/git-multimail/git-multimail.svg?branch=master
:target: https://travis-ci.org/git-multimail/git-multimail
community.)
+Troubleshooting/FAQ
+-------------------
+
+Please read `<doc/troubleshooting.rst>`__ for frequently asked
+questions and common issues with git-multimail.
+
+
Configuration
-------------
following ``git config`` settings:
multimailhook.environment
-
This describes the general environment of the repository. In most
cases, you do not need to specify a value for this variable:
`git-multimail` will autodetect which environment to use.
Currently supported values:
- * generic
-
+ generic
the username of the pusher is read from $USER or $USERNAME and
the repository name is derived from the repository's path.
- * gitolite
-
+ gitolite
the username of the pusher is read from $GL_USER, the repository
name is read from $GL_REPO, and the From: header value is
optionally read from gitolite.conf (see multimailhook.from).
For more information about gitolite and git-multimail, read
`<doc/gitolite.rst>`__
- * stash
-
+ stash
Environment to use when ``git-multimail`` is ran as an Atlassian
BitBucket Server (formerly known as Atlassian Stash) hook.
and repo come from these two command line flags, which must be
specified.
- * gerrit
-
+ gerrit
Environment to use when ``git-multimail`` is ran as a
``ref-updated`` Gerrit hook.
* If none of the above apply, then ``generic`` is used.
multimailhook.repoName
-
A short name of this Git repository, to be used in various places
in the notification email text. The default is to use $GL_REPO
for gitolite repositories, or otherwise to derive this value from
the repository path name.
multimailhook.mailingList
-
The list of email addresses to which notification emails should be
sent, as RFC 2822 email addresses separated by commas. This
configuration option can be multivalued. Leave it unset or set it
specific types of notification email.
multimailhook.refchangeList
-
The list of email addresses to which summary emails about
reference changes should be sent, as RFC 2822 email addresses
separated by commas. This configuration option can be
multimailhook.mailingList is set.
multimailhook.announceList
-
The list of email addresses to which emails about new annotated
tags should be sent, as RFC 2822 email addresses separated by
commas. This configuration option can be multivalued. The
even if one of the other values is set.
multimailhook.commitList
-
The list of email addresses to which emails about individual new
commits should be sent, as RFC 2822 email addresses separated by
commas. This configuration option can be multivalued. The
multimailhook.mailingList is set.
multimailhook.announceShortlog
-
If this option is set to true, then emails about changes to
annotated tags include a shortlog of changes since the previous
tag. This can be useful if the annotated tags represent releases;
rather than useful. Default is false.
multimailhook.commitEmailFormat
-
The format of email messages for the individual commits, can be "text" or
"html". In the latter case, the emails will include diffs using colorized
HTML instead of plain text used by default. Note that this currently the
the message starting with ``+++`` or ``---`` colored in red or
green).
-multimailhook.refchangeShowGraph
+ By default, all the message is HTML-escaped. See
+ ``multimailhook.htmlInIntro`` to change this behavior.
+
+multimailhook.commitBrowseURL
+ Used to generate a link to an online repository browser in commit
+ emails. This variable must be a string. Format directives like
+ ``%(<variable>)s`` will be expanded the same way as template
+ strings. In particular, ``%(id)s`` will be replaced by the full
+ Git commit identifier (40-chars hexadecimal).
+
+ If the string does not contain any format directive, then
+ ``%(id)s`` will be automatically added to the string. If you don't
+ want ``%(id)s`` to be automatically added, use the empty format
+ directive ``%()s`` anywhere in the string.
+
+ For example, a suitable value for the git-multimail project itself
+ would be
+ ``https://github.com/git-multimail/git-multimail/commit/%(id)s``.
+
+multimailhook.htmlInIntro, multimailhook.htmlInFooter
+ When generating an HTML message, git-multimail escapes any HTML
+ sequence by default. This means that if a template contains HTML
+ like ``<a href="foo">link</a>``, the reader will see the HTML
+ source code and not a proper link.
+
+ Set ``multimailhook.htmlInIntro`` to true to allow writting HTML
+ formatting in introduction templates. Similarly, set
+ ``multimailhook.htmlInFooter`` for HTML in the footer.
+ Variables expanded in the template are still escaped. For example,
+ if a repository's path contains a ``<``, it will be rendered as
+ such in the message.
+
+ Read `<doc/customizing-emails.rst>`__ for more details and
+ examples.
+
+multimailhook.refchangeShowGraph
If this option is set to true, then summary emails about reference
changes will additionally include:
specified in graphOpts. The default is false.
multimailhook.refchangeShowLog
-
If this option is set to true, then summary emails about reference
changes will include a detailed log of the added commits in
addition to the one line summary. The log is generated by running
Default is false.
multimailhook.mailer
-
This option changes the way emails are sent. Accepted values are:
- - sendmail (the default): use the command ``/usr/sbin/sendmail`` or
+ * **sendmail (the default)**: use the command ``/usr/sbin/sendmail`` or
``/usr/lib/sendmail`` (or sendmailCommand, if configured). This
mode can be further customized via the following options:
- * multimailhook.sendmailCommand
-
- The command used by mailer ``sendmail`` to send emails. Shell
- quoting is allowed in the value of this setting, but remember that
- Git requires double-quotes to be escaped; e.g.::
+ multimailhook.sendmailCommand
+ The command used by mailer ``sendmail`` to send emails. Shell
+ quoting is allowed in the value of this setting, but remember that
+ Git requires double-quotes to be escaped; e.g.::
- git config multimailhook.sendmailcommand '/usr/sbin/sendmail -oi -t -F \"Git Repo\"'
+ git config multimailhook.sendmailcommand '/usr/sbin/sendmail -oi -t -F \"Git Repo\"'
- Default is '/usr/sbin/sendmail -oi -t' or
- '/usr/lib/sendmail -oi -t' (depending on which file is
- present and executable).
+ Default is '/usr/sbin/sendmail -oi -t' or
+ '/usr/lib/sendmail -oi -t' (depending on which file is
+ present and executable).
- * multimailhook.envelopeSender
+ multimailhook.envelopeSender
+ If set then pass this value to sendmail via the -f option to set
+ the envelope sender address.
- If set then pass this value to sendmail via the -f option to set
- the envelope sender address.
-
- - smtp: use Python's smtplib. This is useful when the sendmail
+ * **smtp**: use Python's smtplib. This is useful when the sendmail
command is not available on the system. This mode can be
further customized via the following options:
- * multimailhook.smtpServer
-
- The name of the SMTP server to connect to. The value can
- also include a colon and a port number; e.g.,
- ``mail.example.com:25``. Default is 'localhost' using port 25.
-
- * multimailhook.smtpUser
- * multimailhook.smtpPass
-
- Server username and password. Required if smtpEncryption is 'ssl'.
- Note that the username and password currently need to be
- set cleartext in the configuration file, which is not
- recommended. If you need to use this option, be sure your
- configuration file is read-only.
+ multimailhook.smtpServer
+ The name of the SMTP server to connect to. The value can
+ also include a colon and a port number; e.g.,
+ ``mail.example.com:25``. Default is 'localhost' using port 25.
- * multimailhook.envelopeSender
+ multimailhook.smtpUser, multimailhook.smtpPass
+ Server username and password. Required if smtpEncryption is 'ssl'.
+ Note that the username and password currently need to be
+ set cleartext in the configuration file, which is not
+ recommended. If you need to use this option, be sure your
+ configuration file is read-only.
+ multimailhook.envelopeSender
The sender address to be passed to the SMTP server. If
unset, then the value of multimailhook.from is used.
- * multimailhook.smtpServerTimeout
-
+ multimailhook.smtpServerTimeout
Timeout in seconds.
- * multimailhook.smtpEncryption
-
- Set the security type. Allowed values: none, ssl, tls.
- Default=none.
-
- * multimailhook.smtpServerDebugLevel
-
+ multimailhook.smtpEncryption
+ Set the security type. Allowed values: ``none``, ``ssl``, ``tls`` (starttls).
+ Default is ``none``.
+
+ multimailhook.smtpCACerts
+ Set the path to a list of trusted CA certificate to verify the
+ server certificate, only supported when ``smtpEncryption`` is
+ ``tls``. If unset or empty, the server certificate is not
+ verified. If it targets a file containing a list of trusted CA
+ certificates (PEM format) these CAs will be used to verify the
+ server certificate. For debian, you can set
+ ``/etc/ssl/certs/ca-certificates.crt`` for using the system
+ trusted CAs. For self-signed server, you can add your server
+ certificate to the system store::
+
+ cd /usr/local/share/ca-certificates/
+ openssl s_client -starttls smtp \
+ -connect mail.example.net:587 -showcerts \
+ </dev/null 2>/dev/null \
+ | openssl x509 -outform PEM >mail.example.net.crt
+ update-ca-certificates
+
+ and used the updated ``/etc/ssl/certs/ca-certificates.crt``. Or
+ directly use your ``/path/to/mail.example.net.crt``. Default is
+ unset.
+
+ multimailhook.smtpServerDebugLevel
Integer number. Set to greater than 0 to activate debugging.
-multimailhook.from
-multimailhook.fromCommit
-multimailhook.fromRefchange
-
+multimailhook.from, multimailhook.fromCommit, multimailhook.fromRefchange
If set, use this value in the From: field of generated emails.
``fromCommit`` is used for commit emails, ``fromRefchange`` is
used for refchange emails, and ``from`` is used as fall-back in
- The value ``pusher``, in which case the pusher's address (if
available) will be used.
- - The value ``author`` (meaningful only for replyToCommit), in which
+ - The value ``author`` (meaningful only for ``fromCommit``), in which
case the commit author's address will be used.
If config values are unset, the value of the From: header is
3. Use the value of multimailhook.envelopeSender.
multimailhook.administrator
-
The name and/or email address of the administrator of the Git
repository; used in FOOTER_TEMPLATE. Default is
multimailhook.envelopesender if it is set; otherwise a generic
string is used.
multimailhook.emailPrefix
-
All emails have this string prepended to their subjects, to aid
email filtering (though filtering based on the X-Git-* email
headers is probably more robust). Default is the short name of
value to the empty string to suppress the email prefix.
multimailhook.emailMaxLines
-
The maximum number of lines that should be included in the body of
a generated email. If not specified, there is no limit. Lines
beyond the limit are suppressed and counted, and a final line is
added indicating the number of suppressed lines.
multimailhook.emailMaxLineLength
-
The maximum length of a line in the email body. Lines longer than
- this limit are truncated to this length with a trailing `` [...]``
+ this limit are truncated to this length with a trailing ``[...]``
added to indicate the missing text. The default is 500, because
(a) diffs with longer lines are probably from binary files, for
which a diff is useless, and (b) even if a text file has such long
truncation, set this option to 0.
multimailhook.maxCommitEmails
-
The maximum number of commit emails to send for a given change.
When the number of patches is larger that this value, only the
summary refchange email is sent. This can avoid accidental
emails limit, set this option to 0. The default is 500.
multimailhook.emailStrictUTF8
-
If this boolean option is set to `true`, then the main part of the
email body is forced to be valid UTF-8. Any characters that are
not valid UTF-8 are converted to the Unicode replacement
character, U+FFFD. The default is `true`.
multimailhook.diffOpts
-
Options passed to ``git diff-tree`` when generating the summary
information for ReferenceChange emails. Default is ``--stat
--summary --find-copies-harder``. Add -p to those options to
details.
multimailhook.graphOpts
-
Options passed to ``git log --graph`` when generating graphs for the
reference change summary emails (used only if refchangeShowGraph
is true). The default is '--oneline --decorate'.
Shell quoting is allowed; see logOpts for details.
multimailhook.logOpts
-
Options passed to ``git log`` to generate additional info for
reference change emails (used only if refchangeShowLog is set).
For example, adding -p will show each commit's complete diff. The
logopts = --pretty=format:\"%h %aN <%aE>%n%s%n%n%b%n\"
multimailhook.commitLogOpts
-
Options passed to ``git log`` to generate additional info for
revision change emails. For example, adding --ignore-all-spaces
will suppress whitespace changes. The default options are ``-C
multimailhook.logOpts for details.
multimailhook.dateSubstitute
-
String to use as a substitute for ``Date:`` in the output of ``git
log`` while formatting commit messages. This is usefull to avoid
emitting a line that can be interpreted by mailers as the start of
a cited message (Zimbra webmail in particular). Defaults to
- ``CommitDate: ``. Set to an empty string or ``none`` to deactivate
+ ``CommitDate:``. Set to an empty string or ``none`` to deactivate
the behavior.
multimailhook.emailDomain
-
Domain name appended to the username of the person doing the push
to convert it into an email address
(via ``"%s@%s" % (username, emaildomain)``). More complicated
schemes can be implemented by overriding Environment and
overriding its get_pusher_email() method.
-multimailhook.replyTo
-multimailhook.replyToCommit
-multimailhook.replyToRefchange
-
+multimailhook.replyTo, multimailhook.replyToCommit, multimailhook.replyToRefchange
Addresses to use in the Reply-To: field for commit emails
(replyToCommit) and refchange emails (replyToRefchange).
multimailhook.replyTo is used as default when replyToCommit or
commit emails.
multimailhook.quiet
-
Do not output the list of email recipients from the hook
multimailhook.stdout
-
For debugging, send emails to stdout rather than to the
mailer. Equivalent to the --stdout command line option
multimailhook.scanCommitForCc
-
If this option is set to true, than recipients from lines in commit body
that starts with ``CC:`` will be added to CC list.
Default: false
multimailhook.combineWhenSingleCommit
-
If this option is set to true and a single new commit is pushed to
a branch, combine the summary and commit email messages into a
single email.
Default: true
-multimailhook.refFilterInclusionRegex
-multimailhook.refFilterExclusionRegex
-multimailhook.refFilterDoSendRegex
-multimailhook.refFilterDontSendRegex
-
+multimailhook.refFilterInclusionRegex, multimailhook.refFilterExclusionRegex, multimailhook.refFilterDoSendRegex, multimailhook.refFilterDontSendRegex
**Warning:** these options are experimental. They should work, but
the user-interface is not stable yet (in particular, the option
names may change). If you want to participate in stabilizing the
the local environment in which Git is running. Two types of
environment are built in:
-* GenericEnvironment: a stand-alone Git repository.
+GenericEnvironment
+ a stand-alone Git repository.
-* GitoliteEnvironment: a Git repository that is managed by gitolite
- [3]_. For such repositories, the identity of the pusher is read from
- environment variable $GL_USER, the name of the repository is read
- from $GL_REPO (if it is not overridden by multimailhook.reponame),
- and the From: header value is optionally read from gitolite.conf
- (see multimailhook.from).
+GitoliteEnvironment
+ a Git repository that is managed by gitolite
+ [3]_. For such repositories, the identity of the pusher is read from
+ environment variable $GL_USER, the name of the repository is read
+ from $GL_REPO (if it is not overridden by multimailhook.reponame),
+ and the From: header value is optionally read from gitolite.conf
+ (see multimailhook.from).
By default, git-multimail assumes GitoliteEnvironment if $GL_USER and
$GL_REPO are set, and otherwise assumes GenericEnvironment.
https://github.com/git-multimail/git-multimail
The version in this directory was obtained from the upstream project
-on October 11 2015 and consists of the "git-multimail" subdirectory from
+on May 13 2016 and consists of the "git-multimail" subdirectory from
revision
- c0791b9ef5821a746fc3475c25765e640452eaae refs/tags/1.2.0
+ 3ce5470d4abf7251604cbf64e73a962e1b617f5e refs/tags/1.3.1
Please see the README file in this directory for information about how
to report bugs or contribute to git-multimail.
--- /dev/null
+Customizing the content and formatting of emails
+================================================
+
+Overloading template strings
+----------------------------
+
+The content of emails is generated based on template strings defined
+in ``git_multimail.py``. You can customize these template strings
+without changing the script itself, by defining a Python wrapper
+around it. The python wrapper should ``import git_multimail`` and then
+override the ``git_multimail.*`` strings like this::
+
+ import sys # needed for sys.argv
+
+ # Import and customize git_multimail:
+ import git_multimail
+ git_multimail.REVISION_INTRO_TEMPLATE = """..."""
+ git_multimail.COMBINED_INTRO_TEMPLATE = git_multimail.REVISION_INTRO_TEMPLATE
+
+ # start git_multimail itself:
+ git_multimail.main(sys.argv[1:])
+
+The template strings can use any value already used in the existing
+templates (read the source code).
+
+Using HTML in template strings
+------------------------------
+
+If ``multimailhook.commitEmailFormat`` is set to HTML, then
+git-multimail will generate HTML emails for commit notifications. The
+log and diff will be formatted automatically by git-multimail. By
+default, any HTML special character in the templates will be escaped.
+
+To use HTML formatting in the introduction of the email, set
+``multimailhook.htmlInIntro`` to ``true``. Then, the template can
+contain any HTML tags, that will be sent as-is in the email. For
+example, to add some formatting and a link to the online commit, use
+a format like::
+
+ git_multimail.REVISION_INTRO_TEMPLATE = """\
+ <span style="color:#808080">This is an automated email from the git hooks/post-receive script.</span><br /><br />
+
+ <strong>%(pusher)s</strong> pushed a commit to %(refname_type)s %(short_refname)s
+ in repository %(repo_shortname)s.<br />
+
+ <a href="https://github.com/git-multimail/git-multimail/commit/%(newrev)s">View on GitHub</a>.
+ """
+
+Note that the values expanded from ``%(variable)s`` in the format
+strings will still be escaped.
+
+For a less flexible but easier to set up way to add a link to commit
+emails, see ``multimailhook.commitBrowseURL``.
+
+Similarly, one can set ``multimailhook.htmlInFooter`` and override any
+of the ``*_FOOTER*`` template strings.
--- /dev/null
+Troubleshooting issues with git-multimail: a FAQ
+================================================
+
+Git is not using the right address in the From/To/Reply-To field
+----------------------------------------------------------------
+
+First, make sure that git-multimail actually uses what you think it is
+using. A lot happens to your email (especially when posting to a
+mailing-list) between the time `git_multimail.py` sends it and the
+time it reaches your inbox.
+
+A simple test (to do on a test repository, do not use in production as
+it would disable email sending): change your post-receive hook to call
+`git_multimail.py` with the `--stdout` option, and try to push to the
+repository. You should see something like::
+
+ Counting objects: 3, done.
+ Writing objects: 100% (3/3), 263 bytes | 0 bytes/s, done.
+ Total 3 (delta 0), reused 0 (delta 0)
+ remote: Sending notification emails to: foo.bar@example.com
+ remote: ===========================================================================
+ remote: Date: Mon, 25 Apr 2016 18:39:59 +0200
+ remote: To: foo.bar@example.com
+ remote: Subject: [git] branch master updated: foo
+ remote: MIME-Version: 1.0
+ remote: Content-Type: text/plain; charset=utf-8
+ remote: Content-Transfer-Encoding: 8bit
+ remote: Message-ID: <20160425163959.2311.20498@anie>
+ remote: From: Auth Or <Foo.Bar@example.com>
+ remote: Reply-To: Auth Or <Foo.Bar@example.com>
+ remote: X-Git-Host: example
+ ...
+ remote: --
+ remote: To stop receiving notification emails like this one, please contact
+ remote: the administrator of this repository.
+ remote: ===========================================================================
+ To /path/to/repo
+ 6278f04..e173f20 master -> master
+
+Note: this does not include the sender (Return-Path: header), as it is
+not part of the message content but passed to the mailer. Some mailer
+show the ``Sender:`` field instead of the ``From:`` field (for
+example, Zimbra Webmail shows ``From: <sender-field> on behalf of
+<from-field>``).
#! /usr/bin/env python
-__version__ = '1.2.0'
+__version__ = '1.3.1'
# Copyright (c) 2015 Matthieu Moy and others
# Copyright (c) 2012-2014 Michael Haggerty and others
import shlex
import optparse
import smtplib
+try:
+ import ssl
+except ImportError:
+ # Python < 2.6 do not have ssl, but that's OK if we don't use it.
+ pass
import time
import cgi
if PYTHON3:
+ def is_string(s):
+ return isinstance(s, str)
+
def str_to_bytes(s):
return s.encode(ENCODING)
except UnicodeEncodeError:
f.buffer.write(msg.encode(ENCODING))
else:
+ def is_string(s):
+ try:
+ return isinstance(s, basestring)
+ except NameError: # Silence Pyflakes warning
+ raise
+
def str_to_bytes(s):
return s
"""
+LINK_TEXT_TEMPLATE = """\
+View the commit online:
+%(browse_url)s
+
+"""
+
+LINK_HTML_TEMPLATE = """\
+<p><a href="%(browse_url)s">View the commit online</a>.</p>
+"""
+
REVISION_FOOTER_TEMPLATE = FOOTER_TEMPLATE
assert words[-1] == ''
return words[:-1]
+ @staticmethod
+ def add_config_parameters(c):
+ """Add configuration parameters to Git.
+
+ c is either an str or a list of str, each element being of the
+ form 'var=val' or 'var', with the same syntax and meaning as
+ the argument of 'git -c var=val'.
+ """
+ if isinstance(c, str):
+ c = (c,)
+ parameters = os.environ.get('GIT_CONFIG_PARAMETERS', '')
+ if parameters:
+ parameters += ' '
+ # git expects GIT_CONFIG_PARAMETERS to be of the form
+ # "'name1=value1' 'name2=value2' 'name3=value3'"
+ # including everything inside the double quotes (but not the double
+ # quotes themselves). Spacing is critical. Also, if a value contains
+ # a literal single quote that quote must be represented using the
+ # four character sequence: '\''
+ parameters += ' '.join("'" + x.replace("'", "'\\''") + "'" for x in c)
+ os.environ['GIT_CONFIG_PARAMETERS'] = parameters
+
def get(self, name, default=None):
try:
values = self._split(read_git_output(
values['multimail_version'] = get_version()
return values
+ # Aliases usable in template strings. Tuple of pairs (destination,
+ # source).
+ VALUES_ALIAS = (
+ ("id", "newrev"),
+ )
+
def get_values(self, **extra_values):
"""Return a dictionary {keyword: expansion} for this Change.
values = self._values.copy()
if extra_values:
values.update(extra_values)
+
+ for alias, val in self.VALUES_ALIAS:
+ values[alias] = values[val]
return values
def expand(self, template, **extra_values):
return template % self.get_values(**extra_values)
- def expand_lines(self, template, **extra_values):
+ def expand_lines(self, template, html_escape_val=False, **extra_values):
"""Break template into lines and expand each line."""
values = self.get_values(**extra_values)
+ if html_escape_val:
+ for k in values:
+ if is_string(values[k]):
+ values[k] = cgi.escape(values[k], True)
for line in template.splitlines(True):
yield line % values
values = self.get_values(**extra_values)
if self._contains_html_diff:
- values['contenttype'] = 'html'
+ self._content_type = 'html'
else:
- values['contenttype'] = 'plain'
+ self._content_type = 'plain'
+ values['contenttype'] = self._content_type
for line in template.splitlines():
(name, value) = line.split(': ', 1)
raise NotImplementedError()
- def generate_email_intro(self):
+ def generate_browse_link(self, base_url):
+ """Generate a link to an online repository browser."""
+ return iter(())
+
+ def generate_email_intro(self, html_escape_val=False):
"""Generate the email intro for this Change, a line at a time.
The output will be used as the standard boilerplate at the top
raise NotImplementedError()
- def generate_email_footer(self):
+ def generate_email_footer(self, html_escape_val):
"""Generate the footer of the email, a line at a time.
The footer is always included, irrespective of
for line in self.generate_email_header(**extra_header_values):
yield line
yield '\n'
- for line in self._wrap_for_html(self.generate_email_intro()):
+ html_escape_val = (self.environment.html_in_intro and
+ self._contains_html_diff)
+ intro = self.generate_email_intro(html_escape_val)
+ if not self.environment.html_in_intro:
+ intro = self._wrap_for_html(intro)
+ for line in intro:
yield line
+ if self.environment.commitBrowseURL:
+ for line in self.generate_browse_link(self.environment.commitBrowseURL):
+ yield line
+
body = self.generate_email_body(push)
if body_filter is not None:
body = body_filter(body)
yield line
if self._contains_html_diff:
yield '</pre>'
-
- for line in self._wrap_for_html(self.generate_email_footer()):
+ html_escape_val = (self.environment.html_in_footer and
+ self._contains_html_diff)
+ footer = self.generate_email_footer(html_escape_val)
+ if not self.environment.html_in_footer:
+ footer = self._wrap_for_html(footer)
+ for line in footer:
yield line
def get_alt_fromaddr(self):
values['rev_short'] = self.rev.short
values['change_type'] = self.change_type
values['refname'] = self.refname
+ values['newrev'] = self.rev.sha1
values['short_refname'] = self.reference_change.short_refname
values['refname_type'] = self.reference_change.refname_type
values['reply_to_msgid'] = self.reference_change.msgid
):
yield line
- def generate_email_intro(self):
- for line in self.expand_lines(REVISION_INTRO_TEMPLATE):
+ def generate_browse_link(self, base_url):
+ if '%(' not in base_url:
+ base_url += '%(id)s'
+ url = "".join(self.expand_lines(base_url))
+ if self._content_type == 'html':
+ for line in self.expand_lines(LINK_HTML_TEMPLATE,
+ html_escape_val=True,
+ browse_url=url):
+ yield line
+ elif self._content_type == 'plain':
+ for line in self.expand_lines(LINK_TEXT_TEMPLATE,
+ html_escape_val=False,
+ browse_url=url):
+ yield line
+ else:
+ raise NotImplementedError("Content-type %s unsupported. Please report it as a bug.")
+
+ def generate_email_intro(self, html_escape_val=False):
+ for line in self.expand_lines(REVISION_INTRO_TEMPLATE,
+ html_escape_val=html_escape_val):
yield line
def generate_email_body(self, push):
else:
yield line
- def generate_email_footer(self):
- return self.expand_lines(REVISION_FOOTER_TEMPLATE)
+ def generate_email_footer(self, html_escape_val):
+ return self.expand_lines(REVISION_FOOTER_TEMPLATE,
+ html_escape_val=html_escape_val)
def generate_email(self, push, body_filter=None, extra_header_values={}):
self._contains_diff()
):
yield line
- def generate_email_intro(self):
- for line in self.expand_lines(self.intro_template):
+ def generate_email_intro(self, html_escape_val=False):
+ for line in self.expand_lines(self.intro_template,
+ html_escape_val=html_escape_val):
yield line
def generate_email_body(self, push):
for line in self.generate_revision_change_summary(push):
yield line
- def generate_email_footer(self):
- return self.expand_lines(self.footer_template)
+ def generate_email_footer(self, html_escape_val):
+ return self.expand_lines(self.footer_template,
+ html_escape_val=html_escape_val)
def generate_revision_change_graph(self, push):
if self.showgraph:
self.header_template = COMBINED_HEADER_TEMPLATE
self.intro_template = COMBINED_INTRO_TEMPLATE
self.footer_template = COMBINED_FOOTER_TEMPLATE
+
+ def revision_gen_link(base_url):
+ # revision is used only to generate the body, and
+ # _content_type is set while generating headers. Get it
+ # from the BranchChange object.
+ revision._content_type = self._content_type
+ return revision.generate_browse_link(base_url)
+ self.generate_browse_link = revision_gen_link
for line in self.generate_email(push, body_filter, values):
yield line
smtpservertimeout=10.0, smtpserverdebuglevel=0,
smtpencryption='none',
smtpuser='', smtppass='',
+ smtpcacerts=''
):
if not envelopesender:
sys.stderr.write(
self.security = smtpencryption
self.username = smtpuser
self.password = smtppass
+ self.smtpcacerts = smtpcacerts
try:
def call(klass, server, timeout):
try:
if self.security == 'none':
self.smtp = call(smtplib.SMTP, self.smtpserver, timeout=self.smtpservertimeout)
elif self.security == 'ssl':
+ if self.smtpcacerts:
+ raise smtplib.SMTPException(
+ "Checking certificate is not supported for ssl, prefer starttls"
+ )
self.smtp = call(smtplib.SMTP_SSL, self.smtpserver, timeout=self.smtpservertimeout)
elif self.security == 'tls':
+ if 'ssl' not in sys.modules:
+ sys.stderr.write(
+ '*** Your Python version does not have the ssl library installed\n'
+ '*** smtpEncryption=tls is not available.\n'
+ '*** Either upgrade Python to 2.6 or later\n'
+ ' or use git_multimail.py version 1.2.\n')
if ':' not in self.smtpserver:
self.smtpserver += ':587' # default port for TLS
self.smtp = call(smtplib.SMTP, self.smtpserver, timeout=self.smtpservertimeout)
+ # start: ehlo + starttls
+ # equivalent to
+ # self.smtp.ehlo()
+ # self.smtp.starttls()
+ # with acces to the ssl layer
self.smtp.ehlo()
- self.smtp.starttls()
+ if not self.smtp.has_extn("starttls"):
+ raise smtplib.SMTPException("STARTTLS extension not supported by server")
+ resp, reply = self.smtp.docmd("STARTTLS")
+ if resp != 220:
+ raise smtplib.SMTPException("Wrong answer to the STARTTLS command")
+ if self.smtpcacerts:
+ self.smtp.sock = ssl.wrap_socket(
+ self.smtp.sock,
+ ca_certs=self.smtpcacerts,
+ cert_reqs=ssl.CERT_REQUIRED
+ )
+ else:
+ self.smtp.sock = ssl.wrap_socket(
+ self.smtp.sock,
+ cert_reqs=ssl.CERT_NONE
+ )
+ sys.stderr.write(
+ '*** Warning, the server certificat is not verified (smtp) ***\n'
+ '*** set the option smtpCACerts ***\n'
+ )
+ if not hasattr(self.smtp.sock, "read"):
+ # using httplib.FakeSocket with Python 2.5.x or earlier
+ self.smtp.sock.read = self.smtp.sock.recv
+ self.smtp.file = smtplib.SSLFakeFile(self.smtp.sock)
+ self.smtp.helo_resp = None
+ self.smtp.ehlo_resp = None
+ self.smtp.esmtp_features = {}
+ self.smtp.does_esmtp = 0
+ # end: ehlo + starttls
self.smtp.ehlo()
else:
sys.stdout.write('*** Error: Control reached an invalid option. ***')
def __del__(self):
if hasattr(self, 'smtp'):
self.smtp.quit()
+ del self.smtp
def send(self, lines, to_addrs):
try:
self.smtp.login(self.username, self.password)
msg = ''.join(lines)
# turn comma-separated list into Python list if needed.
- if isinstance(to_addrs, basestring):
+ if is_string(to_addrs):
to_addrs = [email for (name, email) in getaddresses([to_addrs])]
self.smtp.sendmail(self.envelopesender, to_addrs, msg)
- except Exception:
+ except smtplib.SMTPResponseException:
sys.stderr.write('*** Error sending email ***\n')
- sys.stderr.write('*** %s\n' % sys.exc_info()[1])
- self.smtp.quit()
+ err = sys.exc_info()[1]
+ sys.stderr.write('*** Error %d: %s\n' % (err.smtp_code,
+ bytes_to_str(err.smtp_error)))
+ try:
+ smtp = self.smtp
+ # delete the field before quit() so that in case of
+ # error, self.smtp is deleted anyway.
+ del self.smtp
+ smtp.quit()
+ except:
+ sys.stderr.write('*** Error closing the SMTP connection ***\n')
+ sys.stderr.write('*** Exiting anyway ... ***\n')
+ sys.stderr.write('*** %s\n' % sys.exc_info()[1])
sys.exit(1)
If "html", generate commit emails in HTML instead of plain text
used by default.
+ html_in_intro (bool)
+ html_in_footer (bool)
+
+ When generating HTML emails, the introduction (respectively,
+ the footer) will be HTML-escaped iff html_in_intro (respectively,
+ the footer) is true. When false, only the values used to expand
+ the template are escaped.
+
refchange_showgraph (bool)
True iff refchanges emails should include a detailed graph.
self.osenv = osenv or os.environ
self.announce_show_shortlog = False
self.commit_email_format = "text"
+ self.html_in_intro = False
+ self.html_in_footer = False
+ self.commitBrowseURL = None
self.maxcommitemails = 500
self.diffopts = ['--stat', '--summary', '--find-copies-harder']
self.graphopts = ['--oneline', '--decorate']
The return value is always a new dictionary."""
if self._values is None:
- values = {}
+ values = {'': ''} # %()s expands to the empty string.
for key in self.COMPUTED_KEYS:
value = getattr(self, 'get_%s' % (key,))()
else:
self.commit_email_format = commit_email_format
+ html_in_intro = config.get_bool('htmlInIntro')
+ if html_in_intro is not None:
+ self.html_in_intro = html_in_intro
+
+ html_in_footer = config.get_bool('htmlInFooter')
+ if html_in_footer is not None:
+ self.html_in_footer = html_in_footer
+
+ self.commitBrowseURL = config.get('commitBrowseURL')
+
maxcommitemails = config.get('maxcommitemails')
if maxcommitemails is not None:
try:
['author'])
self.__reply_to_commit = config.get('replyToCommit', default=reply_to)
- from_addr = self.config.get('from')
self.from_refchange = config.get('fromRefchange')
self.forbid_field_values('fromRefchange',
self.from_refchange,
if changes:
push = Push(environment, changes)
push.send_emails(mailer, body_filter=environment.filter_body)
+ if hasattr(mailer, '__del__'):
+ mailer.__del__()
def run_as_update_hook(environment, mailer, refname, oldrev, newrev, force_send=False):
]
push = Push(environment, changes, force_send)
push.send_emails(mailer, body_filter=environment.filter_body)
+ if hasattr(mailer, '__del__'):
+ mailer.__del__()
def choose_mailer(config, environment):
smtpencryption = config.get('smtpencryption', default='none')
smtpuser = config.get('smtpuser', default='')
smtppass = config.get('smtppass', default='')
+ smtpcacerts = config.get('smtpcacerts', default='')
mailer = SMTPMailer(
envelopesender=(environment.get_sender() or environment.get_fromaddr()),
smtpserver=smtpserver, smtpservertimeout=smtpservertimeout,
smtpencryption=smtpencryption,
smtpuser=smtpuser,
smtppass=smtppass,
+ smtpcacerts=smtpcacerts
)
elif mailer == 'sendmail':
command = config.get('sendmailcommand')
return
if options.c:
- parameters = os.environ.get('GIT_CONFIG_PARAMETERS', '')
- if parameters:
- parameters += ' '
- # git expects GIT_CONFIG_PARAMETERS to be of the form
- # "'name1=value1' 'name2=value2' 'name3=value3'"
- # including everything inside the double quotes (but not the double
- # quotes themselves). Spacing is critical. Also, if a value contains
- # a literal single quote that quote must be represented using the
- # four character sequence: '\''
- parameters += ' '.join("'" + x.replace("'", "'\\''") + "'" for x in options.c)
- os.environ['GIT_CONFIG_PARAMETERS'] = parameters
+ Config.add_config_parameters(options.c)
config = Config('multimailhook')
# git-multimail:
config = git_multimail.Config('multimailhook')
+# Set some Git configuration variables. Equivalent to passing var=val
+# to "git -c var=val" each time git is called, or to adding the
+# configuration in .git/config (must come before instanciating the
+# environment) :
+#git_multimail.Config.add_config_parameters('multimailhook.commitEmailFormat=html')
+#git_multimail.Config.add_config_parameters(('user.name=foo', 'user.email=foo@example.com'))
# Select the type of environment:
try:
struct stream_filter *get_stream_filter(const char *path, const unsigned char *sha1)
{
struct conv_attrs ca;
- enum crlf_action crlf_action;
struct stream_filter *filter = NULL;
convert_attrs(&ca, path);
-
if (ca.drv && (ca.drv->smudge || ca.drv->clean))
- return filter;
+ return NULL;
+
+ if (ca.crlf_action == CRLF_AUTO || ca.crlf_action == CRLF_AUTO_CRLF)
+ return NULL;
if (ca.ident)
filter = ident_filter(sha1);
- crlf_action = ca.crlf_action;
-
- if ((crlf_action == CRLF_BINARY) ||
- crlf_action == CRLF_AUTO_INPUT ||
- (crlf_action == CRLF_TEXT_INPUT))
- filter = cascade_filter(filter, &null_filter_singleton);
-
- else if (output_eol(crlf_action) == EOL_CRLF &&
- !(crlf_action == CRLF_AUTO || crlf_action == CRLF_AUTO_CRLF))
+ if (output_eol(ca.crlf_action) == EOL_CRLF)
filter = cascade_filter(filter, lf_to_crlf_filter());
+ else
+ filter = cascade_filter(filter, &null_filter_singleton);
return filter;
}
status = copy_fd(fdi, fdo);
switch (status) {
case COPY_READ_ERROR:
- error("copy-fd: read returned %s", strerror(errno));
+ error_errno("copy-fd: read returned");
break;
case COPY_WRITE_ERROR:
- error("copy-fd: write returned %s", strerror(errno));
+ error_errno("copy-fd: write returned");
break;
}
close(fdi);
if (close(fdo) != 0)
- return error("%s: close error: %s", dst, strerror(errno));
+ return error_errno("%s: close error", dst);
if (!status && adjust_shared_perm(dst))
return -1;
client = accept(fd, NULL, NULL);
if (client < 0) {
- warning("accept failed: %s", strerror(errno));
+ warning_errno("accept failed");
return 1;
}
client2 = dup(client);
if (client2 < 0) {
- warning("dup failed: %s", strerror(errno));
+ warning_errno("dup failed");
close(client);
return 1;
}
write_or_die(1, in, r);
got_data = 1;
}
+ close(fd);
return got_data;
}
size_t size = 0;
if (strbuf_read(&buf, 0, 0) < 0)
- return error("error while reading from stdin %s",
- strerror(errno));
+ return error_errno("error while reading from stdin");
s->should_munmap = 0;
s->data = strbuf_detach(&buf, &size);
#endif
static int diff_detect_rename_default;
+static int diff_compaction_heuristic = 1;
static int diff_rename_limit_default = 400;
static int diff_suppress_blank_empty;
static int diff_use_color_default = -1;
diff_detect_rename_default = git_config_rename(var, value);
return 0;
}
+ if (!strcmp(var, "diff.compactionheuristic")) {
+ diff_compaction_heuristic = git_config_bool(var, value);
+ return 0;
+ }
if (!strcmp(var, "diff.autorefreshindex")) {
diff_auto_refresh_index = git_config_bool(var, value);
return 0;
options->use_color = diff_use_color_default;
options->detect_rename = diff_detect_rename_default;
options->xdl_opts |= diff_algorithm;
+ if (diff_compaction_heuristic)
+ DIFF_XDL_SET(options, COMPACTION_HEURISTIC);
options->orderfile = diff_order_file_cfg;
DIFF_XDL_SET(options, IGNORE_WHITESPACE_AT_EOL);
else if (!strcmp(arg, "--ignore-blank-lines"))
DIFF_XDL_SET(options, IGNORE_BLANK_LINES);
+ else if (!strcmp(arg, "--compaction-heuristic"))
+ DIFF_XDL_SET(options, COMPACTION_HEURISTIC);
+ else if (!strcmp(arg, "--no-compaction-heuristic"))
+ DIFF_XDL_CLR(options, COMPACTION_HEURISTIC);
else if (!strcmp(arg, "--patience"))
options->xdl_opts = DIFF_WITH_ALG(options, PATIENCE_DIFF);
else if (!strcmp(arg, "--histogram"))
int i, renames = 0;
struct hashmap file_table;
- /* Add all sources to the hash table */
+ /* Add all sources to the hash table in reverse order, because
+ * later on they will be retrieved in LIFO order.
+ */
hashmap_init(&file_table, NULL, rename_src_nr);
- for (i = 0; i < rename_src_nr; i++)
+ for (i = rename_src_nr-1; i >= 0; i--)
insert_file_table(&file_table, i, rename_src[i].p->one);
/* Walk the destinations and find best source match */
int check_only, const struct path_simplify *simplify);
static int get_dtype(struct dirent *de, const char *path, int len);
-/* helper string functions with support for the ignore_case flag */
-int strcmp_icase(const char *a, const char *b)
+int fspathcmp(const char *a, const char *b)
{
return ignore_case ? strcasecmp(a, b) : strcmp(a, b);
}
-int strncmp_icase(const char *a, const char *b, size_t count)
+int fspathncmp(const char *a, const char *b, size_t count)
{
return ignore_case ? strncasecmp(a, b, count) : strncmp(a, b, count);
}
-int fnmatch_icase(const char *pattern, const char *string, int flags)
-{
- return wildmatch(pattern, string,
- flags | (ignore_case ? WM_CASEFOLD : 0),
- NULL);
-}
-
int git_fnmatch(const struct pathspec_item *item,
const char *pattern, const char *string,
int prefix)
{
if (prefix == patternlen) {
if (patternlen == basenamelen &&
- !strncmp_icase(pattern, basename, basenamelen))
+ !fspathncmp(pattern, basename, basenamelen))
return 1;
} else if (flags & EXC_FLAG_ENDSWITH) {
/* "*literal" matching against "fooliteral" */
if (patternlen - 1 <= basenamelen &&
- !strncmp_icase(pattern + 1,
+ !fspathncmp(pattern + 1,
basename + basenamelen - (patternlen - 1),
patternlen - 1))
return 1;
*/
if (pathlen < baselen + 1 ||
(baselen && pathname[baselen] != '/') ||
- strncmp_icase(pathname, base, baselen))
+ fspathncmp(pathname, base, baselen))
return 0;
namelen = baselen ? pathlen - baselen - 1 : pathlen;
if (prefix > namelen)
return 0;
- if (strncmp_icase(pattern, name, prefix))
+ if (fspathncmp(pattern, name, prefix))
return 0;
pattern += prefix;
patternlen -= prefix;
/* tries to remove the path with empty directories along it, ignores ENOENT */
extern int remove_path(const char *path);
-extern int strcmp_icase(const char *a, const char *b);
-extern int strncmp_icase(const char *a, const char *b, size_t count);
-extern int fnmatch_icase(const char *pattern, const char *string, int flags);
+extern int fspathcmp(const char *a, const char *b);
+extern int fspathncmp(const char *a, const char *b, size_t count);
/*
* The prefix part of pattern must not contains wildcards.
if (!buffer)
return 0;
if (strbuf_read_file(buffer, path, 0) < 0)
- return error("could not read file '%s': %s",
- path, strerror(errno));
+ return error_errno("could not read file '%s'", path);
return 0;
}
ret = symlink(new, path);
free(new);
if (ret)
- return error("unable to create symlink %s (%s)",
- path, strerror(errno));
+ return error_errno("unable to create symlink %s",
+ path);
break;
}
fd = open_output_fd(path, ce, to_tempfile);
if (fd < 0) {
free(new);
- return error("unable to create file %s (%s)",
- path, strerror(errno));
+ return error_errno("unable to create file %s", path);
}
wrote = write_in_full(fd, new, size);
return error("%s is a directory", path.buf);
remove_subtree(&path);
} else if (unlink(path.buf))
- return error("unable to unlink old '%s' (%s)",
- path.buf, strerror(errno));
+ return error_errno("unable to unlink old '%s'", path.buf);
} else if (state->not_new)
return 0;
int warn_ambiguous_refs = 1;
int warn_on_object_refname_ambiguity = 1;
int ref_paranoia = -1;
-int repository_format_version;
int repository_format_precious_objects;
const char *git_commit_encoding;
const char *git_log_output_encoding;
-int shared_repository = PERM_UMASK;
const char *apply_default_whitespace;
const char *apply_default_ignorewhitespace;
const char *git_attributes_file;
+const char *git_hooks_path;
int zlib_compression_level = Z_BEST_SPEED;
int core_compression_level;
int core_compression_seen;
int merge_log_config = -1;
int precomposed_unicode = -1; /* see probe_utf8_pathname_composition() */
unsigned long pack_size_limit_cfg;
+enum hide_dotfiles_type hide_dotfiles = HIDE_DOTFILES_DOTGITONLY;
#ifndef PROTECT_HFS_DEFAULT
#define PROTECT_HFS_DEFAULT 0
{
return git_commit_encoding ? git_commit_encoding : "UTF-8";
}
+
+static int the_shared_repository = PERM_UMASK;
+static int need_shared_repository_from_config = 1;
+
+void set_shared_repository(int value)
+{
+ the_shared_repository = value;
+ need_shared_repository_from_config = 0;
+}
+
+int get_shared_repository(void)
+{
+ if (need_shared_repository_from_config) {
+ const char *var = "core.sharedrepository";
+ const char *value;
+ if (!git_config_get_value(var, &value))
+ the_shared_repository = git_config_perm(var, value);
+ need_shared_repository_from_config = 0;
+ }
+ return the_shared_repository;
+}
struct recent_command *rc;
if (!rpt) {
- error("can't write crash report %s: %s", loc, strerror(errno));
+ error_errno("can't write crash report %s", loc);
free(loc);
return;
}
t = root->tree;
for (i = 0; i < t->entry_count; i++) {
e = t->entries[i];
- if (e->name->str_len == n && !strncmp_icase(p, e->name->str_dat, n)) {
+ if (e->name->str_len == n && !fspathncmp(p, e->name->str_dat, n)) {
if (!*slash1) {
if (!S_ISDIR(mode)
&& e->versions[1].mode == mode
t = root->tree;
for (i = 0; i < t->entry_count; i++) {
e = t->entries[i];
- if (e->name->str_len == n && !strncmp_icase(p, e->name->str_dat, n)) {
+ if (e->name->str_len == n && !fspathncmp(p, e->name->str_dat, n)) {
if (*slash1 && !S_ISDIR(e->versions[1].mode))
/*
* If p names a file in some subdirectory, and a
t = root->tree;
for (i = 0; i < t->entry_count; i++) {
e = t->entries[i];
- if (e->name->str_len == n && !strncmp_icase(p, e->name->str_dat, n)) {
+ if (e->name->str_len == n && !fspathncmp(p, e->name->str_dat, n)) {
if (!*slash1)
goto found_entry;
if (!S_ISDIR(e->versions[1].mode))
return;
if (hold_lock_file_for_update(&mark_lock, export_marks_file, 0) < 0) {
- failure |= error("Unable to write marks file %s: %s",
- export_marks_file, strerror(errno));
+ failure |= error_errno("Unable to write marks file %s",
+ export_marks_file);
return;
}
dump_marks_helper(f, 0, marks);
if (commit_lock_file(&mark_lock)) {
- failure |= error("Unable to write file %s: %s",
- export_marks_file, strerror(errno));
+ failure |= error_errno("Unable to write file %s",
+ export_marks_file);
return;
}
}
#include "version.h"
#include "prio-queue.h"
#include "sha1-array.h"
-#include "sigchain.h"
static int transfer_unpack_limit = -1;
static int fetch_unpack_limit = -1;
int *xd = data;
int ret;
- sigchain_push(SIGPIPE, SIG_IGN);
ret = recv_sideband("fetch-pack", xd[0], out);
close(out);
- sigchain_pop(SIGPIPE);
return ret;
}
demux.proc = sideband_demux;
demux.data = xd;
demux.out = -1;
+ demux.isolate_sigpipe = 1;
if (start_async(&demux))
die("fetch-pack: unable to fork off sideband"
" demultiplexer");
FUNC(HAS_DOTGIT, WARN) \
FUNC(NULL_SHA1, WARN) \
FUNC(ZERO_PADDED_FILEMODE, WARN) \
+ FUNC(NUL_IN_COMMIT, WARN) \
/* infos (reported as warnings, but ignored by default) */ \
FUNC(BAD_TAG_NAME, INFO) \
FUNC(MISSING_TAGGER_ENTRY, INFO)
if (S_ISGITLINK(entry.mode))
continue;
if (S_ISDIR(entry.mode))
- result = options->walk(&lookup_tree(entry.sha1)->object, OBJ_TREE, data, options);
+ result = options->walk(&lookup_tree(entry.oid->hash)->object, OBJ_TREE, data, options);
else if (S_ISREG(entry.mode) || S_ISLNK(entry.mode))
- result = options->walk(&lookup_blob(entry.sha1)->object, OBJ_BLOB, data, options);
+ result = options->walk(&lookup_blob(entry.oid->hash)->object, OBJ_BLOB, data, options);
else {
result = error("in tree %s: entry %s has bad mode %.6o",
oid_to_hex(&tree->object.oid), entry.path, entry.mode);
while (desc.size) {
unsigned mode;
const char *name;
- const unsigned char *sha1;
+ const struct object_id *oid;
- sha1 = tree_entry_extract(&desc, &name, &mode);
+ oid = tree_entry_extract(&desc, &name, &mode);
- has_null_sha1 |= is_null_sha1(sha1);
+ has_null_sha1 |= is_null_oid(oid);
has_full_path |= !!strchr(name, '/');
has_empty_name |= !*name;
has_dot |= !strcmp(name, ".");
struct commit_graft *graft;
unsigned parent_count, parent_line_count = 0, author_count;
int err;
+ const char *buffer_begin = buffer;
if (verify_headers(buffer, size, &commit->object, options))
return -1;
err = fsck_ident(&buffer, &commit->object, options);
if (err)
return err;
- if (!commit->tree)
- return report(options, &commit->object, FSCK_MSG_BAD_TREE, "could not load commit's tree %s", sha1_to_hex(tree_sha1));
-
+ if (!commit->tree) {
+ err = report(options, &commit->object, FSCK_MSG_BAD_TREE, "could not load commit's tree %s", sha1_to_hex(tree_sha1));
+ if (err)
+ return err;
+ }
+ if (memchr(buffer_begin, '\0', size)) {
+ err = report(options, &commit->object, FSCK_MSG_NUL_IN_COMMIT,
+ "NUL byte in the commit object body");
+ if (err)
+ return err;
+ }
return 0;
}
#endif
#include <openssl/ssl.h>
#include <openssl/err.h>
-#ifdef NO_HMAC_CTX_CLEANUP
-#define HMAC_CTX_cleanup HMAC_cleanup
-#endif
#endif
/* On most systems <netdb.h> would have given us this, but
extern NORETURN void die(const char *err, ...) __attribute__((format (printf, 1, 2)));
extern NORETURN void die_errno(const char *err, ...) __attribute__((format (printf, 1, 2)));
extern int error(const char *err, ...) __attribute__((format (printf, 1, 2)));
+extern int error_errno(const char *err, ...) __attribute__((format (printf, 1, 2)));
extern void warning(const char *err, ...) __attribute__((format (printf, 1, 2)));
+extern void warning_errno(const char *err, ...) __attribute__((format (printf, 1, 2)));
#ifndef NO_OPENSSL
#ifdef APPLE_COMMON_CRYPTO
# FUTURE: This would more accurately emulate CVS by sending
# another copy of sticky after processing the files in that
# directory. Or intermediate: perhaps send all sticky's for
- # $seendirs after after processing all files.
+ # $seendirs after processing all files.
}
# update \n
}
# Return working directory CVS revision "1.X" out
-# of the the working directory "entries" state, for the given filename.
+# of the working directory "entries" state, for the given filename.
# This is prefixed with a dash if the file is scheduled for removal
# when it is committed.
sub revparse
return $filename;
}
-# Remove prependdir from the path, so that is is relative to the directory
+# Remove prependdir from the path, so that it is relative to the directory
# the CVS client was started from, rather than the top of the project.
# Essentially the inverse of filecleanup().
sub remove_prependdir
"$GIT_DIFF_PATH_TOTAL" "$MERGED"
if use_ext_cmd
then
- printf "Launch '%s' [Y/n]: " \
+ printf "Launch '%s' [Y/n]? " \
"$GIT_DIFFTOOL_EXTCMD"
else
- printf "Launch '%s' [Y/n]: " "$merge_tool"
+ printf "Launch '%s' [Y/n]? " "$merge_tool"
fi
read ans || return
if test "$ans" = n
my %submodule;
my %symlink;
my @working_tree = ();
+ my %working_tree_dups = ();
my @rawdiff = split('\0', $diffrtn);
my $i = 0;
}
if ($rmode ne $null_mode) {
+ # Avoid duplicate working_tree entries
+ if ($working_tree_dups{$dst_path}++) {
+ next;
+ }
my ($use, $wt_sha1) = use_wt_file($repo, $workdir,
$dst_path, $rsha1);
if ($use) {
# temporary file to both the left and right directories to show the
# change in the recorded SHA1 for the submodule.
for my $path (keys %submodule) {
- my $ok;
+ my $ok = 0;
if (defined($submodule{$path}{left})) {
$ok = write_to_file("$ldir/$path",
"Subproject commit $submodule{$path}{left}");
# shows only the link itself, not the contents of the link target.
# This loop replicates that behavior.
for my $path (keys %symlink) {
- my $ok;
+ my $ok = 0;
if (defined($symlink{$path}{left})) {
$ok = write_to_file("$ldir/$path",
$symlink{$path}{left});
# MRC is the current "merge reference commit"
# MRT is the current "merge result tree"
+if ! git diff-index --quiet --cached HEAD --
+then
+ echo "Error: Your local changes to the following files would be overwritten by merge"
+ git diff-index --cached --name-only HEAD -- | sed -e 's/^/ /'
+ exit 2
+fi
MRC=$(git rev-parse --verify -q $head)
MRT=$(git write-tree)
NON_FF_MERGE=0
while true
do
echo "$MERGED seems unchanged."
- printf "Was the merge successful? [y/n] "
+ printf "Was the merge successful [y/n]? "
read answer || return 1
case "$answer" in
y*|Y*) return 0 ;;
fi
echo "$merge_tool"
}
+
+mergetool_find_win32_cmd () {
+ executable=$1
+ sub_directory=$2
+
+ # Use $executable if it exists in $PATH
+ if type -p "$executable" >/dev/null 2>&1
+ then
+ printf '%s' "$executable"
+ return
+ fi
+
+ # Look for executable in the typical locations
+ for directory in $(env | grep -Ei '^PROGRAM(FILES(\(X86\))?|W6432)=' |
+ cut -d '=' -f 2- | sort -u)
+ do
+ if test -n "$directory" && test -x "$directory/$sub_directory/$executable"
+ then
+ printf '%s' "$directory/$sub_directory/$executable"
+ return
+ fi
+ done
+
+ printf '%s' "$executable"
+}
prompt_after_failed_merge () {
while true
do
- printf "Continue merging other unresolved paths (y/n) ? "
+ printf "Continue merging other unresolved paths [y/n]? "
read ans || return 1
case "$ans" in
[yY]*)
if pointerProcess.wait():
os.remove(contentFile)
die('git-lfs pointer command failed. Did you install the extension?')
- pointerContents = [i+'\n' for i in pointerFile.split('\n')[2:][:-1]]
- oid = pointerContents[1].split(' ')[1].split(':')[1][:-1]
+
+ # Git LFS removed the preamble in the output of the 'pointer' command
+ # starting from version 1.2.0. Check for the preamble here to support
+ # earlier versions.
+ # c.f. https://github.com/github/git-lfs/commit/da2935d9a739592bc775c98d8ef4df9c72ea3b43
+ if pointerFile.startswith('Git LFS pointer for'):
+ pointerFile = re.sub(r'Git LFS pointer for.*\n\n', '', pointerFile)
+
+ oid = re.search(r'^oid \w+:(\w+)', pointerFile, re.MULTILINE).group(1)
localLargeFile = os.path.join(
os.getcwd(),
'.git', 'lfs', 'objects', oid[:2], oid[2:4],
)
# LFS Spec states that pointer files should not have the executable bit set.
gitMode = '100644'
- return (gitMode, pointerContents, localLargeFile)
+ return (gitMode, pointerFile, localLargeFile)
def pushFile(self, localLargeFile):
uploadProcess = subprocess.Popen(
fnum = fnum + 1
return files
+ def extractJobsFromCommit(self, commit):
+ jobs = []
+ jnum = 0
+ while commit.has_key("job%s" % jnum):
+ job = commit["job%s" % jnum]
+ jobs.append(job)
+ jnum = jnum + 1
+ return jobs
+
def stripRepoPath(self, path, prefixes):
"""When streaming files, this is called to map a p4 depot path
to where it should go in git. The prefixes are either
def commit(self, details, files, branch, parent = ""):
epoch = details["time"]
author = details["user"]
+ jobs = self.extractJobsFromCommit(details)
if self.verbose:
print('commit into {0}'.format(branch))
self.gitStream.write("data <<EOT\n")
self.gitStream.write(details["desc"])
+ if len(jobs) > 0:
+ self.gitStream.write("\nJobs: %s" % (' '.join(jobs)))
self.gitStream.write("\n[git-p4: depot-paths = \"%s\": change = %s" %
(','.join(self.branchPrefixes), details["change"]))
if len(details['options']) > 0:
error_on_missing_default_upstream () {
cmd="$1"
op_type="$2"
- op_prep="$3"
+ op_prep="$3" # FIXME: op_prep is no longer used
example="$4"
branch_name=$(git symbolic-ref -q HEAD)
+ display_branch_name="${branch_name#refs/heads/}"
# If there's only one remote, use that in the suggestion
- remote="<remote>"
+ remote="$(gettext "<remote>")"
+ branch="$(gettext "<branch>")"
if test $(git remote | wc -l) = 1
then
remote=$(git remote)
if test -z "$branch_name"
then
- echo "You are not currently on a branch. Please specify which
-branch you want to $op_type $op_prep. See git-${cmd}(1) for details.
-
- $example
-"
+ gettextln "You are not currently on a branch."
else
- echo "There is no tracking information for the current branch.
-Please specify which branch you want to $op_type $op_prep.
-See git-${cmd}(1) for details
-
- $example
-
-If you wish to set tracking information for this branch you can do so with:
-
- git branch --set-upstream-to=$remote/<branch> ${branch_name#refs/heads/}
-"
+ gettextln "There is no tracking information for the current branch."
+ fi
+ case "$op_type" in
+ rebase)
+ gettextln "Please specify which branch you want to rebase against."
+ ;;
+ merge)
+ gettextln "Please specify which branch you want to merge with."
+ ;;
+ *)
+ echo >&2 "BUG: unknown operation type: $op_type"
+ exit 1
+ ;;
+ esac
+ eval_gettextln "See git-\${cmd}(1) for details."
+ echo
+ echo " $example"
+ echo
+ if test -n "$branch_name"
+ then
+ gettextln "If you wish to set tracking information for this branch you can do so with:"
+ echo
+ echo " git branch --set-upstream-to=$remote/$branch $display_branch_name"
+ echo
fi
exit 1
}
cr=$(printf "\015")
strategy_args=${strategy:+--strategy=$strategy}
+test -n "$strategy_opts" &&
eval '
for strategy_opt in '"$strategy_opts"'
do
mark_action_done
do_pick $sha1 "$rest"
- warn "Stopped at $sha1... $rest"
+ sha1_abbrev=$(git rev-parse --short $sha1)
+ warn "Stopped at $sha1_abbrev... $rest"
exit_with_patch $sha1 0
;;
squash|s|fixup|f)
# $3: the input filename
check_commit_sha () {
badsha=0
- if test -z $1
+ if test -z "$1"
then
badsha=1
else
sha1_verif="$(git rev-parse --verify --quiet $1^{commit})"
- if test -z $sha1_verif
+ if test -z "$sha1_verif"
then
badsha=1
fi
GIT_MERGE_VERBOSITY=1 && export GIT_MERGE_VERBOSITY
fi
test -z "$strategy" && strategy=recursive
- eval 'git-merge-$strategy' $strategy_opts '"$cmt^" -- "$hd" "$cmt"'
+ # If cmt doesn't have a parent, don't include it as a base
+ base=$(git rev-parse --verify --quiet $cmt^)
+ eval 'git-merge-$strategy' $strategy_opts $base ' -- "$hd" "$cmt"'
rv=$?
case "$rv" in
0)
autosquash=
keep_empty=
test "$(git config --bool rebase.autosquash)" = "true" && autosquash=t
-gpg_sign_opt=
+case "$(git config --bool commit.gpgsign)" in
+true) gpg_sign_opt=-S ;;
+*) gpg_sign_opt= ;;
+esac
read_basic_state () {
test -f "$state_dir/head-name" &&
use 5.008;
use strict;
use warnings;
+use POSIX qw/strftime/;
use Term::ReadLine;
use Getopt::Long;
use Text::ParseWords;
-use Data::Dumper;
use Term::ANSIColor;
use File::Temp qw/ tempdir tempfile /;
use File::Spec::Functions qw(catfile);
# But it's a no-op to run sanitize_address on an already sanitized address.
$sender = sanitize_address($sender);
+my $to_whom = "To whom should the emails be sent (if anyone)?";
my $prompting = 0;
if (!@initial_to && !defined $to_cmd) {
- my $to = ask("Who should the emails be sent to (if any)? ",
+ my $to = ask("$to_whom ",
default => "",
valid_re => qr/\@.*\./, confirm_only => 1);
push @initial_to, parse_address_line($to) if defined $to; # sanitized/validated later
cleanup_compose_files();
exit(0);
}
- $address = ask("Who should the email be sent to (if any)? ",
+ $address = ask("$to_whom ",
default => "",
valid_re => qr/\@.*\./, confirm_only => 1);
}
sub make_message_id {
my $uniq;
if (!defined $message_id_stamp) {
- $message_id_stamp = sprintf("%s-%s", time, $$);
+ $message_id_stamp = strftime("%Y%m%d%H%M%S.$$", gmtime(time));
$message_id_serial = 0;
}
$message_id_serial++;
require Sys::Hostname;
$du_part = 'user@' . Sys::Hostname::hostname();
}
- my $message_id_template = "<%s-git-send-email-%s>";
+ my $message_id_template = "<%s-%s>";
$message_id = sprintf($message_id_template, $uniq, $du_part);
#print "new message id = $message_id\n"; # Was useful for debugging
}
git update-ref --create-reflog -m "$stash_msg" $ref_stash $w_commit
ret=$?
- test $ret != 0 && test -z $quiet &&
+ test $ret != 0 && test -z "$quiet" &&
die "$(eval_gettext "Cannot update \$ref_stash with \$w_commit")"
return $ret
}
git clean --force --quiet -d $CLEAN_X_OPTION
fi
- if test "$keep_index" = "t" && test -n $i_tree
+ if test "$keep_index" = "t" && test -n "$i_tree"
then
git read-tree --reset -u $i_tree
fi
USAGE="[--quiet] add [-b <branch>] [-f|--force] [--name <name>] [--reference <repository>] [--] <repository> [<path>]
or: $dashless [--quiet] status [--cached] [--recursive] [--] [<path>...]
or: $dashless [--quiet] init [--] [<path>...]
- or: $dashless [--quiet] deinit [-f|--force] [--] <path>...
+ or: $dashless [--quiet] deinit [-f|--force] (--all| [--] <path>...)
or: $dashless [--quiet] update [--init] [--remote] [-N|--no-fetch] [-f|--force] [--checkout|--merge|--rebase] [--reference <repository>] [--recursive] [--] [<path>...]
or: $dashless [--quiet] summary [--cached|--files] [--summary-limit <n>] [commit] [--] [<path>...]
or: $dashless [--quiet] foreach [--recursive] <command>
custom_name=
depth=
-# The function takes at most 2 arguments. The first argument is the
-# URL that navigates to the submodule origin repo. When relative, this URL
-# is relative to the superproject origin URL repo. The second up_path
-# argument, if specified, is the relative path that navigates
-# from the submodule working tree to the superproject working tree.
-#
-# The output of the function is the origin URL of the submodule.
-#
-# The output will either be an absolute URL or filesystem path (if the
-# superproject origin URL is an absolute URL or filesystem path,
-# respectively) or a relative file system path (if the superproject
-# origin URL is a relative file system path).
-#
-# When the output is a relative file system path, the path is either
-# relative to the submodule working tree, if up_path is specified, or to
-# the superproject working tree otherwise.
-resolve_relative_url ()
-{
- remote=$(get_default_remote)
- remoteurl=$(git config "remote.$remote.url") ||
- remoteurl=$(pwd) # the repository is its own authoritative upstream
- url="$1"
- remoteurl=${remoteurl%/}
- sep=/
- up_path="$2"
-
- case "$remoteurl" in
- *:*|/*)
- is_relative=
- ;;
- ./*|../*)
- is_relative=t
- ;;
- *)
- is_relative=t
- remoteurl="./$remoteurl"
- ;;
- esac
-
- while test -n "$url"
- do
- case "$url" in
- ../*)
- url="${url#../}"
- case "$remoteurl" in
- */*)
- remoteurl="${remoteurl%/*}"
- ;;
- *:*)
- remoteurl="${remoteurl%:*}"
- sep=:
- ;;
- *)
- if test -z "$is_relative" || test "." = "$remoteurl"
- then
- die "$(eval_gettext "cannot strip one component off url '\$remoteurl'")"
- else
- remoteurl=.
- fi
- ;;
- esac
- ;;
- ./*)
- url="${url#./}"
- ;;
- *)
- break;;
- esac
- done
- remoteurl="$remoteurl$sep${url%/}"
- echo "${is_relative:+${up_path}}${remoteurl#./}"
-}
-
# Resolve a path to be relative to another path. This is intended for
# converting submodule paths when git-submodule is run in a subdirectory
# and only handles paths where the directory separator is '/'.
# of the settings from GIT_CONFIG_PARAMETERS.
sanitize_submodule_env()
{
- sanitized_config=$(git submodule--helper sanitize-config)
+ save_config=$GIT_CONFIG_PARAMETERS
clear_local_git_env
- GIT_CONFIG_PARAMETERS=$sanitized_config
+ GIT_CONFIG_PARAMETERS=$save_config
+ export GIT_CONFIG_PARAMETERS
}
#
die "$(gettext "Relative path can only be used from the toplevel of the working tree")"
# dereference source url relative to parent's url
- realrepo=$(resolve_relative_url "$repo") || exit
+ realrepo=$(git submodule--helper resolve-relative-url "$repo") || exit
;;
*:*|/*)
# absolute url
die_if_unmatched "$mode"
if test -e "$sm_path"/.git
then
- displaypath=$(relative_path "$sm_path")
- say "$(eval_gettext "Entering '\$prefix\$displaypath'")"
+ displaypath=$(relative_path "$prefix$sm_path")
+ say "$(eval_gettext "Entering '\$displaypath'")"
name=$(git submodule--helper name "$sm_path")
(
prefix="$prefix$sm_path/"
cmd_foreach "--recursive" "$@"
fi
) <&3 3<&- ||
- die "$(eval_gettext "Stopping at '\$prefix\$displaypath'; script returned non-zero status.")"
+ die "$(eval_gettext "Stopping at '\$displaypath'; script returned non-zero status.")"
fi
done
}
shift
done
- git submodule--helper list --prefix "$wt_prefix" "$@" |
- while read mode sha1 stage sm_path
- do
- die_if_unmatched "$mode"
- name=$(git submodule--helper name "$sm_path") || exit
-
- displaypath=$(relative_path "$sm_path")
-
- # Copy url setting when it is not set yet
- if test -z "$(git config "submodule.$name.url")"
- then
- url=$(git config -f .gitmodules submodule."$name".url)
- test -z "$url" &&
- die "$(eval_gettext "No url found for submodule path '\$displaypath' in .gitmodules")"
-
- # Possibly a url relative to parent
- case "$url" in
- ./*|../*)
- url=$(resolve_relative_url "$url") || exit
- ;;
- esac
- git config submodule."$name".url "$url" ||
- die "$(eval_gettext "Failed to register url for submodule path '\$displaypath'")"
-
- say "$(eval_gettext "Submodule '\$name' (\$url) registered for path '\$displaypath'")"
- fi
-
- # Copy "update" setting when it is not set yet
- if upd="$(git config -f .gitmodules submodule."$name".update)" &&
- test -n "$upd" &&
- test -z "$(git config submodule."$name".update)"
- then
- case "$upd" in
- checkout | rebase | merge | none)
- ;; # known modes of updating
- *)
- echo >&2 "warning: unknown update mode '$upd' suggested for submodule '$name'"
- upd=none
- ;;
- esac
- git config submodule."$name".update "$upd" ||
- die "$(eval_gettext "Failed to register update mode for submodule path '\$displaypath'")"
- fi
- done
+ git ${wt_prefix:+-C "$wt_prefix"} submodule--helper init ${GIT_QUIET:+--quiet} ${prefix:+--prefix "$prefix"} "$@"
}
#
cmd_deinit()
{
# parse $args after "submodule ... deinit".
+ deinit_all=
while test $# -ne 0
do
case "$1" in
-q|--quiet)
GIT_QUIET=1
;;
+ --all)
+ deinit_all=t
+ ;;
--)
shift
break
shift
done
- if test $# = 0
+ if test -n "$deinit_all" && test "$#" -ne 0
+ then
+ echo >&2 "$(eval_gettext "pathspec and --all are incompatible")"
+ usage
+ fi
+ if test $# = 0 && test -z "$deinit_all"
then
- die "$(eval_gettext "Use '.' if you really want to deinitialize all submodules")"
+ die "$(eval_gettext "Use '--all' if you really want to deinitialize all submodules")"
fi
git submodule--helper list --prefix "$wt_prefix" "$@" |
;;
!*)
command="${update_module#!}"
- die_msg="$(eval_gettext "Execution of '\$command \$sha1' failed in submodule path '\$prefix\$sm_path'")"
- say_msg="$(eval_gettext "Submodule path '\$prefix\$sm_path': '\$command \$sha1'")"
+ die_msg="$(eval_gettext "Execution of '\$command \$sha1' failed in submodule path '\$displaypath'")"
+ say_msg="$(eval_gettext "Submodule path '\$displaypath': '\$command \$sha1'")"
must_die_on_failure=yes
;;
*)
if test -n "$recursive"
then
(
- prefix="$prefix$sm_path/"
+ prefix=$(relative_path "$prefix$sm_path/")
+ wt_prefix=
sanitize_submodule_env
cd "$sm_path" &&
eval cmd_update
(
prefix="$displaypath/"
sanitize_submodule_env
+ wt_prefix=
cd "$sm_path" &&
eval cmd_status
) ||
# guarantee a trailing /
up_path=${up_path%/}/ &&
# path from submodule work tree to submodule origin repo
- sub_origin_url=$(resolve_relative_url "$url" "$up_path") &&
+ sub_origin_url=$(git submodule--helper resolve-relative-url "$url" "$up_path") &&
# path from superproject work tree to submodule origin repo
- super_config_url=$(resolve_relative_url "$url") || exit
+ super_config_url=$(git submodule--helper resolve-relative-url "$url") || exit
;;
*)
sub_origin_url="$url"
+++ /dev/null
-# Pass --without docs to rpmbuild if you don't want the documentation
-
-Name: git
-Version: @@VERSION@@
-Release: 1%{?dist}
-Summary: Core git tools
-License: GPL
-Group: Development/Tools
-URL: http://kernel.org/pub/software/scm/git/
-Source: http://kernel.org/pub/software/scm/git/%{name}-%{version}.tar.gz
-BuildRequires: zlib-devel >= 1.2, openssl-devel, curl-devel, expat-devel, gettext %{!?_without_docs:, xmlto, asciidoc > 6.0.3}
-BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n)
-
-Requires: perl-Git = %{version}-%{release}
-Requires: zlib >= 1.2, rsync, less, openssh-clients, expat
-Provides: git-core = %{version}-%{release}
-Obsoletes: git-core <= 1.5.4.2
-Obsoletes: git-p4
-
-%description
-Git is a fast, scalable, distributed revision control system with an
-unusually rich command set that provides both high-level operations
-and full access to internals.
-
-The git rpm installs the core tools with minimal dependencies. To
-install all git packages, including tools for integrating with other
-SCMs, install the git-all meta-package.
-
-%package all
-Summary: Meta-package to pull in all git tools
-Group: Development/Tools
-Requires: git = %{version}-%{release}
-Requires: git-svn = %{version}-%{release}
-Requires: git-cvs = %{version}-%{release}
-Requires: git-arch = %{version}-%{release}
-Requires: git-email = %{version}-%{release}
-Requires: gitk = %{version}-%{release}
-Requires: gitweb = %{version}-%{release}
-Requires: git-gui = %{version}-%{release}
-Obsoletes: git <= 1.5.4.2
-
-%description all
-Git is a fast, scalable, distributed revision control system with an
-unusually rich command set that provides both high-level operations
-and full access to internals.
-
-This is a dummy package which brings in all subpackages.
-
-%package svn
-Summary: Git tools for importing Subversion repositories
-Group: Development/Tools
-Requires: git = %{version}-%{release}, subversion
-%description svn
-Git tools for importing Subversion repositories.
-
-%package cvs
-Summary: Git tools for importing CVS repositories
-Group: Development/Tools
-Requires: git = %{version}-%{release}, cvs, cvsps
-%description cvs
-Git tools for importing CVS repositories.
-
-%package arch
-Summary: Git tools for importing Arch repositories
-Group: Development/Tools
-Requires: git = %{version}-%{release}, tla
-%description arch
-Git tools for importing Arch repositories.
-
-%package email
-Summary: Git tools for sending email
-Group: Development/Tools
-Requires: git = %{version}-%{release}
-%description email
-Git tools for sending email.
-
-%package gui
-Summary: Git GUI tool
-Group: Development/Tools
-Requires: git = %{version}-%{release}, tk >= 8.4
-%description gui
-Git GUI tool
-
-%package -n gitk
-Summary: Git revision tree visualiser ('gitk')
-Group: Development/Tools
-Requires: git = %{version}-%{release}, tk >= 8.4
-%description -n gitk
-Git revision tree visualiser ('gitk')
-
-%package -n gitweb
-Summary: Git web interface
-Group: Development/Tools
-Requires: git = %{version}-%{release}
-%description -n gitweb
-Browsing git repository on the web
-
-%package -n perl-Git
-Summary: Perl interface to Git
-Group: Development/Libraries
-Requires: git = %{version}-%{release}
-Requires: perl(:MODULE_COMPAT_%(eval "`%{__perl} -V:version`"; echo $version))
-BuildRequires: perl(Error)
-BuildRequires: perl(ExtUtils::MakeMaker)
-
-%description -n perl-Git
-Perl interface to Git
-
-%define path_settings ETC_GITCONFIG=/etc/gitconfig prefix=%{_prefix} mandir=%{_mandir} htmldir=%{_docdir}/%{name}-%{version}
-%{!?python_sitelib: %global python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print get_python_lib()")}
-
-%prep
-%setup -q
-
-%build
-make %{_smp_mflags} CFLAGS="$RPM_OPT_FLAGS" \
- %{path_settings} \
- all %{!?_without_docs: doc}
-
-%install
-rm -rf $RPM_BUILD_ROOT
-make %{_smp_mflags} CFLAGS="$RPM_OPT_FLAGS" DESTDIR=$RPM_BUILD_ROOT \
- %{path_settings} \
- INSTALLDIRS=vendor install %{!?_without_docs: install-doc}
-test ! -d $RPM_BUILD_ROOT%{python_sitelib} || rm -fr $RPM_BUILD_ROOT%{python_sitelib}
-find $RPM_BUILD_ROOT -type f -name .packlist -exec rm -f {} ';'
-find $RPM_BUILD_ROOT -type f -name '*.bs' -empty -exec rm -f {} ';'
-find $RPM_BUILD_ROOT -type f -name perllocal.pod -exec rm -f {} ';'
-
-(find $RPM_BUILD_ROOT%{_bindir} -type f | grep -vE "archimport|svn|cvs|email|gitk|git-gui|git-citool" | sed -e s@^$RPM_BUILD_ROOT@@) > bin-man-doc-files
-(find $RPM_BUILD_ROOT%{_libexecdir}/git-core -type f | grep -vE "archimport|svn|cvs|email|gitk|git-gui|git-citool" | sed -e s@^$RPM_BUILD_ROOT@@) >> bin-man-doc-files
-(find $RPM_BUILD_ROOT%{perl_vendorlib} -type f | sed -e s@^$RPM_BUILD_ROOT@@) >> perl-files
-%if %{!?_without_docs:1}0
-(find $RPM_BUILD_ROOT%{_mandir} $RPM_BUILD_ROOT/Documentation -type f | grep -vE "archimport|svn|git-cvs|email|gitk|git-gui|git-citool" | sed -e s@^$RPM_BUILD_ROOT@@ -e 's/$/*/' ) >> bin-man-doc-files
-%else
-rm -rf $RPM_BUILD_ROOT%{_mandir}
-%endif
-rm -rf $RPM_BUILD_ROOT%{_datadir}/locale
-
-mkdir -p $RPM_BUILD_ROOT%{_sysconfdir}/bash_completion.d
-install -m 644 -T contrib/completion/git-completion.bash $RPM_BUILD_ROOT%{_sysconfdir}/bash_completion.d/git
-
-%clean
-rm -rf $RPM_BUILD_ROOT
-
-%files -f bin-man-doc-files
-%defattr(-,root,root)
-%{_datadir}/git-core/
-%doc README.md COPYING Documentation/*.txt
-%{!?_without_docs: %doc Documentation/*.html Documentation/howto}
-%{!?_without_docs: %doc Documentation/technical}
-%{_sysconfdir}/bash_completion.d
-
-%files svn
-%defattr(-,root,root)
-%{_libexecdir}/git-core/*svn*
-%doc Documentation/*svn*.txt
-%{!?_without_docs: %{_mandir}/man1/*svn*.1*}
-%{!?_without_docs: %doc Documentation/*svn*.html }
-
-%files cvs
-%defattr(-,root,root)
-%doc Documentation/*git-cvs*.txt
-%{_bindir}/git-cvsserver
-%{_libexecdir}/git-core/*cvs*
-%{!?_without_docs: %{_mandir}/man1/*cvs*.1*}
-%{!?_without_docs: %doc Documentation/*git-cvs*.html }
-
-%files arch
-%defattr(-,root,root)
-%doc Documentation/git-archimport.txt
-%{_libexecdir}/git-core/git-archimport
-%{!?_without_docs: %{_mandir}/man1/git-archimport.1*}
-%{!?_without_docs: %doc Documentation/git-archimport.html }
-
-%files email
-%defattr(-,root,root)
-%doc Documentation/*email*.txt
-%{_libexecdir}/git-core/*email*
-%{!?_without_docs: %{_mandir}/man1/*email*.1*}
-%{!?_without_docs: %doc Documentation/*email*.html }
-
-%files gui
-%defattr(-,root,root)
-%{_libexecdir}/git-core/git-gui
-%{_libexecdir}/git-core/git-citool
-%{_libexecdir}/git-core/git-gui--askpass
-%{_datadir}/git-gui/
-%{!?_without_docs: %{_mandir}/man1/git-gui.1*}
-%{!?_without_docs: %doc Documentation/git-gui.html}
-%{!?_without_docs: %{_mandir}/man1/git-citool.1*}
-%{!?_without_docs: %doc Documentation/git-citool.html}
-
-%files -n gitk
-%defattr(-,root,root)
-%doc Documentation/*gitk*.txt
-%{_bindir}/*gitk*
-%{_datadir}/gitk/
-%{!?_without_docs: %{_mandir}/man1/*gitk*.1*}
-%{!?_without_docs: %doc Documentation/*gitk*.html }
-
-%files -n gitweb
-%defattr(-,root,root)
-%doc gitweb/README gitweb/INSTALL Documentation/*gitweb*.txt
-%{_datadir}/gitweb
-%{!?_without_docs: %{_mandir}/man1/*gitweb*.1*}
-%{!?_without_docs: %{_mandir}/man5/*gitweb*.5*}
-%{!?_without_docs: %doc Documentation/*gitweb*.html }
-
-%files -n perl-Git -f perl-files
-%defattr(-,root,root)
-
-%files all
-# No files for you!
-
-%changelog
-* Sun Sep 18 2011 Jakub Narebski <jnareb@gmail.com>
-- Add gitweb manpages to 'gitweb' subpackage
-
-* Wed Jun 30 2010 Junio C Hamano <gitster@pobox.com>
-- Add 'gitweb' subpackage.
-
-* Fri Mar 26 2010 Ian Ward Comfort <icomfort@stanford.edu>
-- Ship bash completion support from contrib/ in the core package.
-
-* Sun Jan 31 2010 Junio C Hamano <gitster@pobox.com>
-- Do not use %define inside %{!?...} construct.
-
-* Sat Jan 30 2010 Junio C Hamano <gitster@pobox.com>
-- We don't ship Python bits until a real foreign scm interface comes.
-
-* Mon Feb 04 2009 David J. Mellor <dmellor@whistlingcat.com>
-- fixed broken git help -w after renaming the git-core package to git.
-
-* Fri Sep 12 2008 Quy Tonthat <qtonthat@gmail.com>
-- move git-cvsserver to bindir.
-
-* Sun Jun 15 2008 Junio C Hamano <gitster@pobox.com>
-- Remove curl from Requires list.
-
-* Fri Feb 15 2008 Kristian Høgsberg <krh@redhat.com>
-- Rename git-core to just git and rename meta package from git to git-all.
-
-* Sun Feb 03 2008 James Bowes <jbowes@dangerouslyinc.com>
-- Add a BuildRequires for gettext
-
-* Fri Jan 11 2008 Junio C Hamano <gitster@pobox.com>
-- Include gitk message files
-
-* Sun Jan 06 2008 James Bowes <jbowes@dangerouslyinc.com>
-- Make the metapackage require the same version of the subpackages.
-
-* Wed Dec 12 2007 Junio C Hamano <gitster@pobox.com>
-- Adjust htmldir to point at /usr/share/doc/git-core-$version/
-
-* Sun Jul 15 2007 Sean Estabrooks <seanlkml@sympatico.ca>
-- Removed p4import.
-
-* Tue Jun 26 2007 Quy Tonthat <qtonthat@gmail.com>
-- Fixed problems looking for wrong manpages.
-
-* Thu Jun 21 2007 Shawn O. Pearce <spearce@spearce.org>
-- Added documentation files for git-gui
-
-* Tue May 13 2007 Quy Tonthat <qtonthat@gmail.com>
-- Added lib files for git-gui
-- Added Documentation/technical (As needed by Git Users Manual)
-
-* Tue May 8 2007 Quy Tonthat <qtonthat@gmail.com>
-- Added howto files
-
-* Tue Mar 27 2007 Eygene Ryabinkin <rea-git@codelabs.ru>
-- Added the git-p4 package: Perforce import stuff.
-
-* Mon Feb 13 2007 Nicolas Pitre <nico@fluxnic.net>
-- Update core package description (Git isn't as stupid as it used to be)
-
-* Mon Feb 12 2007 Junio C Hamano <junkio@cox.net>
-- Add git-gui and git-citool.
-
-* Mon Nov 14 2005 H. Peter Anvin <hpa@zytor.com> 0.99.9j-1
-- Change subpackage names to git-<name> instead of git-core-<name>
-- Create empty root package which brings in all subpackages
-- Rename git-tk -> gitk
-
-* Thu Nov 10 2005 Chris Wright <chrisw@osdl.org> 0.99.9g-1
-- zlib dependency fix
-- Minor cleanups from split
-- Move arch import to separate package as well
-
-* Tue Sep 27 2005 Jim Radford <radford@blackbean.org>
-- Move programs with non-standard dependencies (svn, cvs, email)
- into separate packages
-
-* Tue Sep 27 2005 H. Peter Anvin <hpa@zytor.com>
-- parallelize build
-- COPTS -> CFLAGS
-
-* Fri Sep 16 2005 Chris Wright <chrisw@osdl.org> 0.99.6-1
-- update to 0.99.6
-
-* Fri Sep 16 2005 Horst H. von Brand <vonbrand@inf.utfsm.cl>
-- Linus noticed that less is required, added to the dependencies
-
-* Sun Sep 11 2005 Horst H. von Brand <vonbrand@inf.utfsm.cl>
-- Updated dependencies
-- Don't assume manpages are gzipped
-
-* Thu Aug 18 2005 Chris Wright <chrisw@osdl.org> 0.99.4-4
-- drop sh_utils, sh-utils, diffutils, mktemp, and openssl Requires
-- use RPM_OPT_FLAGS in spec file, drop patch0
-
-* Wed Aug 17 2005 Tom "spot" Callaway <tcallawa@redhat.com> 0.99.4-3
-- use dist tag to differentiate between branches
-- use rpm optflags by default (patch0)
-- own %{_datadir}/git-core/
-
-* Mon Aug 15 2005 Chris Wright <chrisw@osdl.org>
-- update spec file to fix Buildroot, Requires, and drop Vendor
-
-* Sun Aug 07 2005 Horst H. von Brand <vonbrand@inf.utfsm.cl>
-- Redid the description
-- Cut overlong make line, loosened changelog a bit
-- I think Junio (or perhaps OSDL?) should be vendor...
-
-* Thu Jul 14 2005 Eric Biederman <ebiederm@xmission.com>
-- Add the man pages, and the --without docs build option
-
-* Wed Jul 7 2005 Chris Wright <chris@osdl.org>
-- initial git spec file
close $fd;
open $fd, quote_command(git_cmd(), "cat-file", "blob", $hash)." | ".
+ quote_command($^X, '-CO', '-MEncode=decode,FB_DEFAULT', '-pse',
+ '$_ = decode($fe, $_, FB_DEFAULT) if !utf8::decode($_);',
+ '--', "-fe=$fallback_encoding")." | ".
quote_command($highlight_bin).
" --replace-tabs=8 --fragment --syntax $syntax |"
or die_error(500, "Couldn't open file or run syntax highlighter");
args_gpg[0] = gpg_program;
fd = git_mkstemp(path, PATH_MAX, ".git_vtag_tmpXXXXXX");
if (fd < 0)
- return error(_("could not create temporary file '%s': %s"),
- path, strerror(errno));
+ return error_errno(_("could not create temporary file '%s'"), path);
if (write_in_full(fd, signature, signature_size) < 0)
- return error(_("failed writing detached signature to '%s': %s"),
- path, strerror(errno));
+ return error_errno(_("failed writing detached signature to '%s'"), path);
close(fd);
gpg.argv = args_gpg;
return error(_("could not run gpg."));
}
+ sigchain_push(SIGPIPE, SIG_IGN);
write_in_full(gpg.in, payload, payload_size);
close(gpg.in);
close(gpg.out);
ret = finish_command(&gpg);
+ sigchain_pop(SIGPIPE);
unlink_or_warn(path);
if (lstat(filename, &st) < 0) {
err_ret:
if (errno != ENOENT)
- error(_("'%s': %s"), filename, strerror(errno));
+ error_errno(_("failed to stat '%s'"), filename);
return -1;
}
if (!S_ISREG(st.st_mode))
goto err_ret;
data = xmallocz(size);
if (st.st_size != read_in_full(i, data, size)) {
- error(_("'%s': short read %s"), filename, strerror(errno));
+ error_errno(_("'%s': short read"), filename);
close(i);
free(data);
return -1;
const char *target = resolve_ref_unsafe(refname,
RESOLVE_REF_READING,
unused.hash, NULL);
- const char *target_nons = strip_namespace(target);
- strbuf_addf(buf, "ref: %s\n", target_nons);
+ if (target)
+ strbuf_addf(buf, "ref: %s\n", strip_namespace(target));
} else {
strbuf_addf(buf, "%s\n", oid_to_hex(oid));
}
static struct curl_slist *get_dav_token_headers(struct remote_lock *lock, enum dav_header_flag options)
{
struct strbuf buf = STRBUF_INIT;
- struct curl_slist *dav_headers = NULL;
+ struct curl_slist *dav_headers = http_copy_default_headers();
if (options & DAV_HEADER_IF) {
strbuf_addf(&buf, "If: (<%s>)", lock->token);
static void start_move(struct transfer_request *request)
{
struct active_request_slot *slot;
- struct curl_slist *dav_headers = NULL;
+ struct curl_slist *dav_headers = http_copy_default_headers();
slot = get_active_slot();
slot->callback_func = process_response;
char *ep;
char timeout_header[25];
struct remote_lock *lock = NULL;
- struct curl_slist *dav_headers = NULL;
+ struct curl_slist *dav_headers = http_copy_default_headers();
struct xml_ctx ctx;
char *escaped;
struct slot_results results;
struct strbuf in_buffer = STRBUF_INIT;
struct buffer out_buffer = { STRBUF_INIT, 0 };
- struct curl_slist *dav_headers = NULL;
+ struct curl_slist *dav_headers = http_copy_default_headers();
struct xml_ctx ctx;
struct remote_ls_ctx ls;
struct slot_results results;
struct strbuf in_buffer = STRBUF_INIT;
struct buffer out_buffer = { STRBUF_INIT, 0 };
- struct curl_slist *dav_headers = NULL;
+ struct curl_slist *dav_headers = http_copy_default_headers();
struct xml_ctx ctx;
int lock_flags = 0;
char *escaped;
while (tree_entry(&desc, &entry))
switch (object_type(entry.mode)) {
case OBJ_TREE:
- p = process_tree(lookup_tree(entry.sha1), p);
+ p = process_tree(lookup_tree(entry.oid->hash), p);
break;
case OBJ_BLOB:
- p = process_blob(lookup_blob(entry.sha1), p);
+ p = process_blob(lookup_blob(entry.oid->hash), p);
break;
default:
/* Subproject commit - not in this repository */
static struct curl_slist *pragma_header;
static struct curl_slist *no_pragma_header;
+static struct curl_slist *extra_http_headers;
static struct active_request_slot *active_queue_head;
return git_config_string(&http_proxy_authmethod, var, value);
if (!strcmp("http.cookiefile", var))
- return git_config_string(&curl_cookie_file, var, value);
+ return git_config_pathname(&curl_cookie_file, var, value);
if (!strcmp("http.savecookies", var)) {
curl_save_cookies = git_config_bool(var, value);
return 0;
#endif
}
+ if (!strcmp("http.extraheader", var)) {
+ if (!value) {
+ return config_error_nonbool(var);
+ } else if (!*value) {
+ curl_slist_free_all(extra_http_headers);
+ extra_http_headers = NULL;
+ } else {
+ extra_http_headers =
+ curl_slist_append(extra_http_headers, value);
+ }
+ return 0;
+ }
+
/* Fall back on the default ones */
return git_default_config(var, value, cb);
}
rc = setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, (void *)&ka, len);
if (rc < 0)
- warning("unable to set SO_KEEPALIVE on socket %s",
- strerror(errno));
+ warning_errno("unable to set SO_KEEPALIVE on socket");
return 0; /* CURL_SOCKOPT_OK only exists since curl 7.21.5 */
}
if (curl_http_proxy) {
curl_easy_setopt(result, CURLOPT_PROXY, curl_http_proxy);
#if LIBCURL_VERSION_NUM >= 0x071800
- if (starts_with(curl_http_proxy, "socks5"))
+ if (starts_with(curl_http_proxy, "socks5h"))
+ curl_easy_setopt(result,
+ CURLOPT_PROXYTYPE, CURLPROXY_SOCKS5_HOSTNAME);
+ else if (starts_with(curl_http_proxy, "socks5"))
curl_easy_setopt(result,
CURLOPT_PROXYTYPE, CURLPROXY_SOCKS5);
else if (starts_with(curl_http_proxy, "socks4a"))
if (remote)
var_override(&http_proxy_authmethod, remote->http_proxy_authmethod);
- pragma_header = curl_slist_append(pragma_header, "Pragma: no-cache");
- no_pragma_header = curl_slist_append(no_pragma_header, "Pragma:");
+ pragma_header = curl_slist_append(http_copy_default_headers(),
+ "Pragma: no-cache");
+ no_pragma_header = curl_slist_append(http_copy_default_headers(),
+ "Pragma:");
#ifdef USE_CURL_MULTI
{
#endif
curl_global_cleanup();
+ curl_slist_free_all(extra_http_headers);
+ extra_http_headers = NULL;
+
curl_slist_free_all(pragma_header);
pragma_header = NULL;
return handle_curl_result(results);
}
+struct curl_slist *http_copy_default_headers(void)
+{
+ struct curl_slist *headers = NULL, *h;
+
+ for (h = extra_http_headers; h; h = h->next)
+ headers = curl_slist_append(headers, h->data);
+
+ return headers;
+}
+
static CURLcode curlinfo_strbuf(CURL *curl, CURLINFO info, struct strbuf *buf)
{
char *ptr;
{
struct active_request_slot *slot;
struct slot_results results;
- struct curl_slist *headers = NULL;
+ struct curl_slist *headers = http_copy_default_headers();
struct strbuf buf = STRBUF_INIT;
const char *accept_language;
int ret;
}
if (freq->localfile < 0) {
- error("Couldn't create temporary file %s: %s",
- freq->tmpfile, strerror(errno));
+ error_errno("Couldn't create temporary file %s", freq->tmpfile);
goto abort;
}
prev_posn = 0;
lseek(freq->localfile, 0, SEEK_SET);
if (ftruncate(freq->localfile, 0) < 0) {
- error("Couldn't truncate temporary file %s: %s",
- freq->tmpfile, strerror(errno));
+ error_errno("Couldn't truncate temporary file %s",
+ freq->tmpfile);
goto abort;
}
}
extern void http_init(struct remote *remote, const char *url,
int proactive_auth);
extern void http_cleanup(void);
+extern struct curl_slist *http_copy_default_headers(void);
extern long int git_curl_ipresolve;
extern int active_requests;
mailname = fopen("/etc/mailname", "r");
if (!mailname) {
if (errno != ENOENT)
- warning("cannot open /etc/mailname: %s",
- strerror(errno));
+ warning_errno("cannot open /etc/mailname");
return -1;
}
if (strbuf_getline(&mailnamebuf, mailname) == EOF) {
if (ferror(mailname))
- warning("cannot read /etc/mailname: %s",
- strerror(errno));
+ warning_errno("cannot read /etc/mailname");
strbuf_release(&mailnamebuf);
fclose(mailname);
return -1;
char buf[1024];
if (gethostname(buf, sizeof(buf))) {
- warning("cannot get host name: %s", strerror(errno));
+ warning_errno("cannot get host name");
strbuf_addstr(out, "(none)");
*is_bogus = 1;
return;
if (want_name) {
int using_default = 0;
if (!name) {
+ if (strict && ident_use_config_only
+ && !(ident_config_given & IDENT_NAME_GIVEN)) {
+ fputs(env_hint, stderr);
+ die("no name was given and auto-detection is disabled");
+ }
name = ident_default_name();
using_default = 1;
if (strict && default_name_is_bogus) {
fputs(env_hint, stderr);
die("unable to auto-detect name (got '%s')", name);
}
- if (strict && ident_use_config_only
- && !(ident_config_given & IDENT_NAME_GIVEN))
- die("user.useConfigOnly set but no name given");
}
if (!*name) {
struct passwd *pw;
}
if (!email) {
+ if (strict && ident_use_config_only
+ && !(ident_config_given & IDENT_MAIL_GIVEN)) {
+ fputs(env_hint, stderr);
+ die("no email was given and auto-detection is disabled");
+ }
email = ident_default_email();
if (strict && default_email_is_bogus) {
fputs(env_hint, stderr);
die("unable to auto-detect email address (got '%s')", email);
}
- if (strict && ident_use_config_only
- && !(ident_config_given & IDENT_MAIL_GIVEN))
- die("user.useConfigOnly set but no mail given");
}
strbuf_reset(&ident);
SSL_library_init();
SSL_load_error_strings();
- if (use_tls_only)
- meth = TLSv1_method();
- else
- meth = SSLv23_method();
-
+ meth = SSLv23_method();
if (!meth) {
ssl_socket_perror("SSLv23_method");
return -1;
}
ctx = SSL_CTX_new(meth);
+ if (!ctx) {
+ ssl_socket_perror("SSL_CTX_new");
+ return -1;
+ }
+
+ if (use_tls_only)
+ SSL_CTX_set_options(ctx, SSL_OP_NO_SSLv2 | SSL_OP_NO_SSLv3);
if (verify)
SSL_CTX_set_verify(ctx, SSL_VERIFY_PEER, NULL);
static char *cram(const char *challenge_64, const char *user, const char *pass)
{
int i, resp_len, encoded_len, decoded_len;
- HMAC_CTX hmac;
unsigned char hash[16];
char hex[33];
char *response, *response_64, *challenge;
(unsigned char *)challenge_64, encoded_len);
if (decoded_len < 0)
die("invalid challenge %s", challenge_64);
- HMAC_Init(&hmac, (unsigned char *)pass, strlen(pass), EVP_md5());
- HMAC_Update(&hmac, (unsigned char *)challenge, decoded_len);
- HMAC_Final(&hmac, hash, NULL);
- HMAC_CTX_cleanup(&hmac);
+ if (!HMAC(EVP_md5(), pass, strlen(pass), (unsigned char *)challenge, decoded_len, hash, NULL))
+ die("HMAC error");
hex[32] = 0;
for (i = 0; i < 16; i++) {
/* response: "<user> <digest in hex>" */
response = xstrfmt("%s %s", user, hex);
- resp_len = strlen(response) + 1;
+ resp_len = strlen(response);
response_64 = xmallocz(ENCODED_SIZE(resp_len));
encoded_len = EVP_EncodeBlock((unsigned char *)response_64,
srvc->pass = xstrdup(cred.password);
}
- if (CAP(NOLOGIN)) {
- fprintf(stderr, "Skipping account %s@%s, server forbids LOGIN\n", srvc->user, srvc->host);
- goto bail;
- }
-
if (srvc->auth_method) {
struct imap_cmd_cb cb;
goto bail;
}
} else {
+ if (CAP(NOLOGIN)) {
+ fprintf(stderr, "Skipping account %s@%s, server forbids LOGIN\n",
+ srvc->user, srvc->host);
+ goto bail;
+ }
if (!imap->buf.sock.ssl)
imap_warn("*** IMAP Warning *** Password is being "
"sent in the clear\n");
if (S_ISDIR(entry.mode))
process_tree(revs,
- lookup_tree(entry.sha1),
+ lookup_tree(entry.oid->hash),
show, base, entry.path,
cb_data);
else if (S_ISGITLINK(entry.mode))
- process_gitlink(revs, entry.sha1,
+ process_gitlink(revs, entry.oid->hash,
show, base, entry.path,
cb_data);
else
process_blob(revs,
- lookup_blob(entry.sha1),
+ lookup_blob(entry.oid->hash),
show, base, entry.path,
cb_data);
}
assert(opts);
/*
- * The tentative merge result is the or common ancestor for an internal merge.
+ * The tentative merge result is the common ancestor for an
+ * internal merge. For the final merge, it is "ours" by
+ * default but -Xours/-Xtheirs can tweak the choice.
*/
if (opts->virtual_ancestor) {
stolen = orig;
}
}
driver = find_ll_merge_driver(ll_driver_name);
- if (opts->virtual_ancestor && driver->recursive)
- driver = find_ll_merge_driver(driver->recursive);
+
+ if (opts->virtual_ancestor) {
+ if (driver->recursive)
+ driver = find_ll_merge_driver(driver->recursive);
+ marker_size += 2;
+ }
return driver->fn(driver, result_buf, path, ancestor, ancestor_label,
ours, our_label, theirs, their_label,
opts, marker_size);
ctx.fmt = opt->commit_format;
ctx.mailmap = opt->mailmap;
ctx.color = opt->diffopt.use_color;
+ ctx.expand_tabs_in_log = opt->expand_tabs_in_log;
ctx.output_encoding = get_log_output_encoding();
if (opt->from_ident.mail_begin && opt->from_ident.name_begin)
ctx.from_ident = &opt->from_ident;
if (!f) {
if (errno == ENOENT)
return 0;
- return error("unable to open mailmap at %s: %s",
- filename, strerror(errno));
+ return error_errno("unable to open mailmap at %s", filename);
}
while (fgets(buffer, sizeof(buffer), f) != NULL)
}
static void *fill_tree_desc_strict(struct tree_desc *desc,
- const unsigned char *hash)
+ const struct object_id *hash)
{
void *buffer;
enum object_type type;
unsigned long size;
- buffer = read_sha1_file(hash, &type, &size);
+ buffer = read_sha1_file(hash->hash, &type, &size);
if (!buffer)
- die("unable to read tree (%s)", sha1_to_hex(hash));
+ die("unable to read tree (%s)", oid_to_hex(hash));
if (type != OBJ_TREE)
- die("%s is not a tree", sha1_to_hex(hash));
+ die("%s is not a tree", oid_to_hex(hash));
init_tree_desc(desc, buffer, size);
return buffer;
}
/*
* Inspect two trees, and give a score that tells how similar they are.
*/
-static int score_trees(const unsigned char *hash1, const unsigned char *hash2)
+static int score_trees(const struct object_id *hash1, const struct object_id *hash2)
{
struct tree_desc one;
struct tree_desc two;
else if (cmp > 0)
/* path2 does not appear in one */
score += score_missing(e2.mode, e2.path);
- else if (hashcmp(e1.sha1, e2.sha1))
+ else if (oidcmp(e1.oid, e2.oid))
/* they are different */
score += score_differs(e1.mode, e2.mode, e1.path);
else
/*
* Match one itself and its subtrees with two and pick the best match.
*/
-static void match_trees(const unsigned char *hash1,
- const unsigned char *hash2,
+static void match_trees(const struct object_id *hash1,
+ const struct object_id *hash2,
int *best_score,
char **best_match,
const char *base,
while (one.size) {
const char *path;
- const unsigned char *elem;
+ const struct object_id *elem;
unsigned mode;
int score;
while (desc.size) {
const char *name;
unsigned mode;
- const unsigned char *sha1;
+ const struct object_id *oid;
- sha1 = tree_entry_extract(&desc, &name, &mode);
+ oid = tree_entry_extract(&desc, &name, &mode);
if (strlen(name) == toplen &&
!memcmp(name, prefix, toplen)) {
if (!S_ISDIR(mode))
die("entry %s in tree %s is not a tree",
name, sha1_to_hex(hash1));
- rewrite_here = (unsigned char *) sha1;
+ rewrite_here = (unsigned char *) oid->hash;
break;
}
update_tree_entry(&desc);
* other hand, it could cover tree one and we might need to pick a
* subtree of it.
*/
-void shift_tree(const unsigned char *hash1,
- const unsigned char *hash2,
- unsigned char *shifted,
+void shift_tree(const struct object_id *hash1,
+ const struct object_id *hash2,
+ struct object_id *shifted,
int depth_limit)
{
char *add_prefix;
match_trees(hash2, hash1, &del_score, &del_prefix, "", depth_limit);
/* Assume we do not have to do any shifting */
- hashcpy(shifted, hash2);
+ oidcpy(shifted, hash2);
if (add_score < del_score) {
/* We need to pick a subtree of two */
if (!*del_prefix)
return;
- if (get_tree_entry(hash2, del_prefix, shifted, &mode))
+ if (get_tree_entry(hash2->hash, del_prefix, shifted->hash, &mode))
die("cannot find path %s in tree %s",
- del_prefix, sha1_to_hex(hash2));
+ del_prefix, oid_to_hex(hash2));
return;
}
if (!*add_prefix)
return;
- splice_tree(hash1, add_prefix, hash2, shifted);
+ splice_tree(hash1->hash, add_prefix, hash2->hash, shifted->hash);
}
/*
* Unfortunately we cannot fundamentally tell which one to
* be prefixed, as recursive merge can work in either direction.
*/
-void shift_tree_by(const unsigned char *hash1,
- const unsigned char *hash2,
- unsigned char *shifted,
+void shift_tree_by(const struct object_id *hash1,
+ const struct object_id *hash2,
+ struct object_id *shifted,
const char *shift_prefix)
{
- unsigned char sub1[20], sub2[20];
+ struct object_id sub1, sub2;
unsigned mode1, mode2;
unsigned candidate = 0;
/* Can hash2 be a tree at shift_prefix in tree hash1? */
- if (!get_tree_entry(hash1, shift_prefix, sub1, &mode1) &&
+ if (!get_tree_entry(hash1->hash, shift_prefix, sub1.hash, &mode1) &&
S_ISDIR(mode1))
candidate |= 1;
/* Can hash1 be a tree at shift_prefix in tree hash2? */
- if (!get_tree_entry(hash2, shift_prefix, sub2, &mode2) &&
+ if (!get_tree_entry(hash2->hash, shift_prefix, sub2.hash, &mode2) &&
S_ISDIR(mode2))
candidate |= 2;
int score;
candidate = 0;
- score = score_trees(sub1, hash2);
+ score = score_trees(&sub1, hash2);
if (score > best_score) {
candidate = 1;
best_score = score;
}
- score = score_trees(sub2, hash1);
+ score = score_trees(&sub2, hash1);
if (score > best_score)
candidate = 2;
}
if (!candidate) {
/* Neither is plausible -- do not shift */
- hashcpy(shifted, hash2);
+ oidcpy(shifted, hash2);
return;
}
* shift tree2 down by adding shift_prefix above it
* to match tree1.
*/
- splice_tree(hash1, shift_prefix, hash2, shifted);
+ splice_tree(hash1->hash, shift_prefix, hash2->hash, shifted->hash);
else
/*
* shift tree2 up by removing shift_prefix from it
* to match tree1.
*/
- hashcpy(shifted, sub2);
+ oidcpy(shifted, &sub2);
}
struct object_id shifted;
if (!*subtree_shift) {
- shift_tree(one->object.oid.hash, two->object.oid.hash, shifted.hash, 0);
+ shift_tree(&one->object.oid, &two->object.oid, &shifted, 0);
} else {
- shift_tree_by(one->object.oid.hash, two->object.oid.hash, shifted.hash,
+ shift_tree_by(&one->object.oid, &two->object.oid, &shifted,
subtree_shift);
}
if (!oidcmp(&two->object.oid, &shifted))
base_len = newpath.len;
while (string_list_has_string(&o->current_file_set, newpath.buf) ||
string_list_has_string(&o->current_directory_set, newpath.buf) ||
- file_exists(newpath.buf)) {
+ (!o->call_depth && file_exists(newpath.buf))) {
strbuf_setlen(&newpath, base_len);
strbuf_addf(&newpath, "_%d", suffix++);
}
a->path, c1->path, ci->branch1,
b->path, c2->path, ci->branch2);
- remove_file(o, 1, a->path, would_lose_untracked(a->path));
- remove_file(o, 1, b->path, would_lose_untracked(b->path));
+ remove_file(o, 1, a->path, o->call_depth || would_lose_untracked(a->path));
+ remove_file(o, 1, b->path, o->call_depth || would_lose_untracked(b->path));
mfi_c1 = merge_file_special_markers(o, a, c1, &ci->ren1_other,
o->branch1, c1->path,
output(o, 1, _("CONFLICT (%s): There is a directory with name %s in %s. "
"Adding %s as %s"),
conf, path, other_branch, path, new_path);
- if (o->call_depth)
- remove_file_from_cache(path);
update_file(o, 0, sha, mode, new_path);
if (o->call_depth)
remove_file_from_cache(path);
--- /dev/null
+diff_cmd () {
+ "$merge_tool_path" "$LOCAL" "$REMOTE" -nh
+}
+
+merge_cmd () {
+ touch "$BACKUP"
+ if $base_present
+ then
+ "$merge_tool_path" -merge "$LOCAL" "$BASE" "$REMOTE" -o:"$MERGED" -nh
+ else
+ "$merge_tool_path" -merge "$LOCAL" "$REMOTE" -o:"$MERGED" -nh
+ fi
+ check_unchanged
+}
+
+translate_merge_tool_path() {
+ mergetool_find_win32_cmd "ExamDiff.com" "ExamDiff Pro"
+}
}
translate_merge_tool_path() {
- # Use WinMergeU.exe if it exists in $PATH
- if type -p WinMergeU.exe >/dev/null 2>&1
- then
- printf WinMergeU.exe
- return
- fi
-
- # Look for WinMergeU.exe in the typical locations
- winmerge_exe="WinMerge/WinMergeU.exe"
- for directory in $(env | grep -Ei '^PROGRAM(FILES(\(X86\))?|W6432)=' |
- cut -d '=' -f 2- | sort -u)
- do
- if test -n "$directory" && test -x "$directory/$winmerge_exe"
- then
- printf '%s' "$directory/$winmerge_exe"
- return
- fi
- done
-
- printf WinMergeU.exe
+ mergetool_find_win32_cmd "WinMergeU.exe" "WinMerge"
}
l = (struct leaf_node *)
xcalloc(1, sizeof(struct leaf_node));
hashcpy(l->key_sha1, object_sha1);
- hashcpy(l->val_sha1, entry.sha1);
+ hashcpy(l->val_sha1, entry.oid->hash);
if (len < 20) {
if (!S_ISDIR(entry.mode) || path_len != 2)
goto handle_non_note; /* not subtree */
}
strbuf_addstr(&non_note_path, entry.path);
add_non_note(t, strbuf_detach(&non_note_path, NULL),
- entry.mode, entry.sha1);
+ entry.mode, entry.oid->hash);
}
}
free(buf);
return 0;
case OPTION_COUNTUP:
+ if (*(int *)opt->value < 0)
+ *(int *)opt->value = 0;
*(int *)opt->value = unset ? 0 : *(int *)opt->value + 1;
return 0;
#include "sha1-lookup.h"
#include "patch-ids.h"
-static int commit_patch_id(struct commit *commit, struct diff_options *options,
+int commit_patch_id(struct commit *commit, struct diff_options *options,
unsigned char *sha1)
{
if (commit->parents)
struct patch_id_bucket *patches;
};
+int commit_patch_id(struct commit *commit, struct diff_options *options,
+ unsigned char *sha1);
int init_patch_ids(struct patch_ids *);
int free_patch_ids(struct patch_ids *);
struct patch_id *add_commit_patch_id(struct commit *, struct patch_ids *);
#include "strbuf.h"
#include "string-list.h"
#include "dir.h"
+#include "worktree.h"
static int get_st_mode_bits(const char *path, int *mode)
{
* definite
* definition
*
- * The trie would look look like:
+ * The trie would look like:
* root: len = 0, children a and d non-NULL, value = NULL.
* a: len = 2, contents = bc, value = (data for "abc")
* d: len = 2, contents = ef, children i non-NULL, value = (data for "def")
update_common_dir(buf, git_dir_len, NULL);
}
-static void do_git_path(struct strbuf *buf, const char *fmt, va_list args)
+static void do_git_path(const struct worktree *wt, struct strbuf *buf,
+ const char *fmt, va_list args)
{
int gitdir_len;
- strbuf_addstr(buf, get_git_dir());
+ strbuf_addstr(buf, get_worktree_git_dir(wt));
if (buf->len && !is_dir_sep(buf->buf[buf->len - 1]))
strbuf_addch(buf, '/');
gitdir_len = buf->len;
va_list args;
strbuf_reset(buf);
va_start(args, fmt);
- do_git_path(buf, fmt, args);
+ do_git_path(NULL, buf, fmt, args);
va_end(args);
return buf->buf;
}
{
va_list args;
va_start(args, fmt);
- do_git_path(sb, fmt, args);
+ do_git_path(NULL, sb, fmt, args);
va_end(args);
}
struct strbuf *pathname = get_pathname();
va_list args;
va_start(args, fmt);
- do_git_path(pathname, fmt, args);
+ do_git_path(NULL, pathname, fmt, args);
va_end(args);
return pathname->buf;
}
struct strbuf path = STRBUF_INIT;
va_list args;
va_start(args, fmt);
- do_git_path(&path, fmt, args);
+ do_git_path(NULL, &path, fmt, args);
va_end(args);
return strbuf_detach(&path, NULL);
}
return cleanup_path(pathname->buf);
}
+const char *worktree_git_path(const struct worktree *wt, const char *fmt, ...)
+{
+ struct strbuf *pathname = get_pathname();
+ va_list args;
+ va_start(args, fmt);
+ do_git_path(wt, pathname, fmt, args);
+ va_end(args);
+ return pathname->buf;
+}
+
static void do_submodule_path(struct strbuf *buf, const char *path,
const char *fmt, va_list args)
{
va_end(args);
}
+static void do_git_common_path(struct strbuf *buf,
+ const char *fmt,
+ va_list args)
+{
+ strbuf_addstr(buf, get_git_common_dir());
+ if (buf->len && !is_dir_sep(buf->buf[buf->len - 1]))
+ strbuf_addch(buf, '/');
+ strbuf_vaddf(buf, fmt, args);
+ strbuf_cleanup_path(buf);
+}
+
+const char *git_common_path(const char *fmt, ...)
+{
+ struct strbuf *pathname = get_pathname();
+ va_list args;
+ va_start(args, fmt);
+ do_git_common_path(pathname, fmt, args);
+ va_end(args);
+ return pathname->buf;
+}
+
+void strbuf_git_common_path(struct strbuf *sb, const char *fmt, ...)
+{
+ va_list args;
+ va_start(args, fmt);
+ do_git_common_path(sb, fmt, args);
+ va_end(args);
+}
+
int validate_headref(const char *path)
{
struct stat st;
{
int tweak;
- if (shared_repository < 0)
- tweak = -shared_repository;
+ if (get_shared_repository() < 0)
+ tweak = -get_shared_repository();
else
- tweak = shared_repository;
+ tweak = get_shared_repository();
if (!(mode & S_IWUSR))
tweak &= ~0222;
if (mode & S_IXUSR)
/* Copy read bits to execute bits */
tweak |= (tweak & 0444) >> 2;
- if (shared_repository < 0)
+ if (get_shared_repository() < 0)
mode = (mode & ~0777) | tweak;
else
mode |= tweak;
{
int old_mode, new_mode;
- if (!shared_repository)
+ if (!get_shared_repository())
return 0;
if (get_st_mode_bits(path, &old_mode) < 0)
return -1;
Execute the given C<COMMAND> in the same way as command_output_pipe()
does but return both an input pipe filehandle and an output pipe filehandle.
-The function will return return C<($pid, $pipe_in, $pipe_out, $ctx)>.
+The function will return C<($pid, $pipe_in, $pipe_out, $ctx)>.
See C<command_close_bidi_pipe()> for details.
=cut
"existing: $existing\n",
" globbed: $refname\n";
}
- my $u = (::cmt_metadata("$refname"))[0];
+ my $u = (::cmt_metadata("$refname"))[0] or die
+ "$refname: no associated commit metadata\n";
$u =~ s!^\Q$url\E(/|$)!! or die
"$refname: '$url' not found in '$u'\n";
if ($pathname ne $u) {
#: builtin/am.c:2321 builtin/commit.c:1593 builtin/merge.c:225
#: builtin/pull.c:159 builtin/revert.c:92 builtin/tag.c:355
msgid "key-id"
-msgstr "id de clé"
+msgstr "id-clé"
#: builtin/am.c:2322
msgid "GPG-sign commits"
#: builtin/checkout.c:1154
msgid "conflict style (merge or diff3)"
-msgstr "style de conflit (fusion ou diff3)"
+msgstr "style de conflit (merge (fusion) ou diff3)"
#: builtin/checkout.c:1157
msgid "do not limit pathspecs to sparse entries only"
#: builtin/fetch.c:122 builtin/log.c:1236
msgid "dir"
-msgstr "dir"
+msgstr "répertoire"
#: builtin/fetch.c:123
msgid "prepend this to submodule path output"
#: builtin/show-ref.c:165
msgid "only show tags (can be combined with heads)"
-msgstr "afficher seulement les étiquettes (peut être combiné avec des têtes)"
+msgstr "afficher seulement les étiquettes (peut être combiné avec heads)"
#: builtin/show-ref.c:166
msgid "only show heads (can be combined with tags)"
-msgstr "afficher seulement les têtes (peut être combiné avec des étiquettes)"
+msgstr "afficher seulement les têtes (peut être combiné avec tags)"
#: builtin/show-ref.c:167
msgid "stricter reference checking, requires exact ref path"
const char *name;
enum cmit_fmt format;
int is_tformat;
+ int expand_tabs_in_log;
int is_alias;
const char *user_format;
} *commit_formats;
static void setup_commit_formats(void)
{
struct cmt_fmt_map builtin_formats[] = {
- { "raw", CMIT_FMT_RAW, 0 },
- { "medium", CMIT_FMT_MEDIUM, 0 },
- { "short", CMIT_FMT_SHORT, 0 },
- { "email", CMIT_FMT_EMAIL, 0 },
- { "fuller", CMIT_FMT_FULLER, 0 },
- { "full", CMIT_FMT_FULL, 0 },
- { "oneline", CMIT_FMT_ONELINE, 1 }
+ { "raw", CMIT_FMT_RAW, 0, 0 },
+ { "medium", CMIT_FMT_MEDIUM, 0, 8 },
+ { "short", CMIT_FMT_SHORT, 0, 0 },
+ { "email", CMIT_FMT_EMAIL, 0, 0 },
+ { "fuller", CMIT_FMT_FULLER, 0, 8 },
+ { "full", CMIT_FMT_FULL, 0, 8 },
+ { "oneline", CMIT_FMT_ONELINE, 1, 0 }
};
commit_formats_len = ARRAY_SIZE(builtin_formats);
builtin_formats_len = commit_formats_len;
rev->commit_format = commit_format->format;
rev->use_terminator = commit_format->is_tformat;
+ rev->expand_tabs_in_log_default = commit_format->expand_tabs_in_log;
if (commit_format->format == CMIT_FMT_USERFORMAT) {
save_user_format(rev, commit_format->user_format,
commit_format->is_tformat);
strbuf_release(&title);
}
+static int pp_utf8_width(const char *start, const char *end)
+{
+ int width = 0;
+ size_t remain = end - start;
+
+ while (remain) {
+ int n = utf8_width(&start, &remain);
+ if (n < 0 || !start)
+ return -1;
+ width += n;
+ }
+ return width;
+}
+
+static void strbuf_add_tabexpand(struct strbuf *sb, int tabwidth,
+ const char *line, int linelen)
+{
+ const char *tab;
+
+ while ((tab = memchr(line, '\t', linelen)) != NULL) {
+ int width = pp_utf8_width(line, tab);
+
+ /*
+ * If it wasn't well-formed utf8, or it
+ * had characters with badly defined
+ * width (control characters etc), just
+ * give up on trying to align things.
+ */
+ if (width < 0)
+ break;
+
+ /* Output the data .. */
+ strbuf_add(sb, line, tab - line);
+
+ /* .. and the de-tabified tab */
+ strbuf_addchars(sb, ' ', tabwidth - (width % tabwidth));
+
+ /* Skip over the printed part .. */
+ linelen -= tab + 1 - line;
+ line = tab + 1;
+ }
+
+ /*
+ * Print out everything after the last tab without
+ * worrying about width - there's nothing more to
+ * align.
+ */
+ strbuf_add(sb, line, linelen);
+}
+
+/*
+ * pp_handle_indent() prints out the intendation, and
+ * the whole line (without the final newline), after
+ * de-tabifying.
+ */
+static void pp_handle_indent(struct pretty_print_context *pp,
+ struct strbuf *sb, int indent,
+ const char *line, int linelen)
+{
+ strbuf_addchars(sb, ' ', indent);
+ if (pp->expand_tabs_in_log)
+ strbuf_add_tabexpand(sb, pp->expand_tabs_in_log, line, linelen);
+ else
+ strbuf_add(sb, line, linelen);
+}
+
void pp_remainder(struct pretty_print_context *pp,
const char **msg_p,
struct strbuf *sb,
strbuf_grow(sb, linelen + indent + 20);
if (indent)
- strbuf_addchars(sb, ' ', indent);
- strbuf_add(sb, line, linelen);
+ pp_handle_indent(pp, sb, indent, line, linelen);
+ else if (pp->expand_tabs_in_log)
+ strbuf_add_tabexpand(sb, pp->expand_tabs_in_log,
+ line, linelen);
+ else
+ strbuf_add(sb, line, linelen);
strbuf_addch(sb, '\n');
}
}
*/
if (errno == ENOENT)
return 0;
- return error("unable to stat %s: %s",
- sha1_to_hex(sha1), strerror(errno));
+ return error_errno("unable to stat %s", sha1_to_hex(sha1));
}
add_recent_object(sha1, st.st_mtime, data);
strbuf_release(&err);
return ret;
}
+
+int head_ref_submodule(const char *submodule, each_ref_fn fn, void *cb_data)
+{
+ struct object_id oid;
+ int flag;
+
+ if (submodule) {
+ if (resolve_gitlink_ref(submodule, "HEAD", oid.hash) == 0)
+ return fn("HEAD", &oid, 0, cb_data);
+
+ return 0;
+ }
+
+ if (!read_ref_full("HEAD", RESOLVE_REF_READING, oid.hash, &flag))
+ return fn("HEAD", &oid, flag, cb_data);
+
+ return 0;
+}
+
+int head_ref(each_ref_fn fn, void *cb_data)
+{
+ return head_ref_submodule(NULL, fn, cb_data);
+}
+
+int for_each_ref(each_ref_fn fn, void *cb_data)
+{
+ return do_for_each_ref(NULL, "", fn, 0, 0, cb_data);
+}
+
+int for_each_ref_submodule(const char *submodule, each_ref_fn fn, void *cb_data)
+{
+ return do_for_each_ref(submodule, "", fn, 0, 0, cb_data);
+}
+
+int for_each_ref_in(const char *prefix, each_ref_fn fn, void *cb_data)
+{
+ return do_for_each_ref(NULL, prefix, fn, strlen(prefix), 0, cb_data);
+}
+
+int for_each_fullref_in(const char *prefix, each_ref_fn fn, void *cb_data, unsigned int broken)
+{
+ unsigned int flag = 0;
+
+ if (broken)
+ flag = DO_FOR_EACH_INCLUDE_BROKEN;
+ return do_for_each_ref(NULL, prefix, fn, 0, flag, cb_data);
+}
+
+int for_each_ref_in_submodule(const char *submodule, const char *prefix,
+ each_ref_fn fn, void *cb_data)
+{
+ return do_for_each_ref(submodule, prefix, fn, strlen(prefix), 0, cb_data);
+}
+
+int for_each_replace_ref(each_ref_fn fn, void *cb_data)
+{
+ return do_for_each_ref(NULL, git_replace_ref_base, fn,
+ strlen(git_replace_ref_base), 0, cb_data);
+}
+
+int for_each_namespaced_ref(each_ref_fn fn, void *cb_data)
+{
+ struct strbuf buf = STRBUF_INIT;
+ int ret;
+ strbuf_addf(&buf, "%srefs/", get_git_namespace());
+ ret = do_for_each_ref(NULL, buf.buf, fn, 0, 0, cb_data);
+ strbuf_release(&buf);
+ return ret;
+}
+
+int for_each_rawref(each_ref_fn fn, void *cb_data)
+{
+ return do_for_each_ref(NULL, "", fn, 0,
+ DO_FOR_EACH_INCLUDE_BROKEN, cb_data);
+}
+
+/* This function needs to return a meaningful errno on failure */
+const char *resolve_ref_unsafe(const char *refname, int resolve_flags,
+ unsigned char *sha1, int *flags)
+{
+ static struct strbuf sb_refname = STRBUF_INIT;
+ int unused_flags;
+ int symref_count;
+
+ if (!flags)
+ flags = &unused_flags;
+
+ *flags = 0;
+
+ if (check_refname_format(refname, REFNAME_ALLOW_ONELEVEL)) {
+ if (!(resolve_flags & RESOLVE_REF_ALLOW_BAD_NAME) ||
+ !refname_is_safe(refname)) {
+ errno = EINVAL;
+ return NULL;
+ }
+
+ /*
+ * dwim_ref() uses REF_ISBROKEN to distinguish between
+ * missing refs and refs that were present but invalid,
+ * to complain about the latter to stderr.
+ *
+ * We don't know whether the ref exists, so don't set
+ * REF_ISBROKEN yet.
+ */
+ *flags |= REF_BAD_NAME;
+ }
+
+ for (symref_count = 0; symref_count < SYMREF_MAXDEPTH; symref_count++) {
+ unsigned int read_flags = 0;
+
+ if (read_raw_ref(refname, sha1, &sb_refname, &read_flags)) {
+ *flags |= read_flags;
+ if (errno != ENOENT || (resolve_flags & RESOLVE_REF_READING))
+ return NULL;
+ hashclr(sha1);
+ if (*flags & REF_BAD_NAME)
+ *flags |= REF_ISBROKEN;
+ return refname;
+ }
+
+ *flags |= read_flags;
+
+ if (!(read_flags & REF_ISSYMREF)) {
+ if (*flags & REF_BAD_NAME) {
+ hashclr(sha1);
+ *flags |= REF_ISBROKEN;
+ }
+ return refname;
+ }
+
+ refname = sb_refname.buf;
+ if (resolve_flags & RESOLVE_REF_NO_RECURSE) {
+ hashclr(sha1);
+ return refname;
+ }
+ if (check_refname_format(refname, REFNAME_ALLOW_ONELEVEL)) {
+ if (!(resolve_flags & RESOLVE_REF_ALLOW_BAD_NAME) ||
+ !refname_is_safe(refname)) {
+ errno = EINVAL;
+ return NULL;
+ }
+
+ *flags |= REF_ISBROKEN | REF_BAD_NAME;
+ }
+ }
+
+ errno = ELOOP;
+ return NULL;
+}
extern int create_symref(const char *refname, const char *target, const char *logmsg);
+/*
+ * Update HEAD of the specified gitdir.
+ * Similar to create_symref("relative-git-dir/HEAD", target, NULL), but
+ * this can update the main working tree's HEAD regardless of where
+ * $GIT_DIR points to.
+ * Return 0 if successful, non-zero otherwise.
+ * */
+extern int set_worktree_head_symref(const char *gitdir, const char *target);
+
enum action_on_err {
UPDATE_REFS_MSG_ON_ERR,
UPDATE_REFS_DIE_ON_ERR,
dir->sorted = dir->nr = i;
}
-/* Include broken references in a do_for_each_ref*() iteration: */
-#define DO_FOR_EACH_INCLUDE_BROKEN 0x01
-
/*
* Return true iff the reference described by entry can be resolved to
* an object in the database. Emit a warning if the referred-to
return get_ref_dir(refs->loose);
}
-/* We allow "recursive" symbolic refs. Only within reason, though */
-#define MAXDEPTH 5
#define MAXREFLEN (1024)
/*
char buffer[128], *p;
char *path;
- if (recursion > MAXDEPTH || strlen(refname) > MAXREFLEN)
+ if (recursion > SYMREF_MAXDEPTH || strlen(refname) > MAXREFLEN)
return -1;
path = *refs->name
? git_pathdup_submodule(refs->name, "%s", refname)
}
/*
- * A loose ref file doesn't exist; check for a packed ref. The
- * options are forwarded from resolve_safe_unsafe().
+ * A loose ref file doesn't exist; check for a packed ref.
*/
static int resolve_missing_loose_ref(const char *refname,
- int resolve_flags,
unsigned char *sha1,
- int *flags)
+ unsigned int *flags)
{
struct ref_entry *entry;
entry = get_packed_ref(refname);
if (entry) {
hashcpy(sha1, entry->u.value.oid.hash);
- if (flags)
- *flags |= REF_ISPACKED;
- return 0;
- }
- /* The reference is not a packed reference, either. */
- if (resolve_flags & RESOLVE_REF_READING) {
- errno = ENOENT;
- return -1;
- } else {
- hashclr(sha1);
+ *flags |= REF_ISPACKED;
return 0;
}
+ /* refname is not a packed reference. */
+ return -1;
}
-/* This function needs to return a meaningful errno on failure */
-static const char *resolve_ref_1(const char *refname,
- int resolve_flags,
- unsigned char *sha1,
- int *flags,
- struct strbuf *sb_refname,
- struct strbuf *sb_path,
- struct strbuf *sb_contents)
+/*
+ * Read a raw ref from the filesystem or packed refs file.
+ *
+ * If the ref is a sha1, fill in sha1 and return 0.
+ *
+ * If the ref is symbolic, fill in *symref with the referrent
+ * (e.g. "refs/heads/master") and return 0. The caller is responsible
+ * for validating the referrent. Set REF_ISSYMREF in flags.
+ *
+ * If the ref doesn't exist, set errno to ENOENT and return -1.
+ *
+ * If the ref exists but is neither a symbolic ref nor a sha1, it is
+ * broken. Set REF_ISBROKEN in flags, set errno to EINVAL, and return
+ * -1.
+ *
+ * If there is another error reading the ref, set errno appropriately and
+ * return -1.
+ *
+ * Backend-specific flags might be set in flags as well, regardless of
+ * outcome.
+ *
+ * sb_path is workspace: the caller should allocate and free it.
+ *
+ * It is OK for refname to point into symref. In this case:
+ * - if the function succeeds with REF_ISSYMREF, symref will be
+ * overwritten and the memory pointed to by refname might be changed
+ * or even freed.
+ * - in all other cases, symref will be untouched, and therefore
+ * refname will still be valid and unchanged.
+ */
+int read_raw_ref(const char *refname, unsigned char *sha1,
+ struct strbuf *symref, unsigned int *flags)
{
- int depth = MAXDEPTH;
- int bad_name = 0;
+ struct strbuf sb_contents = STRBUF_INIT;
+ struct strbuf sb_path = STRBUF_INIT;
+ const char *path;
+ const char *buf;
+ struct stat st;
+ int fd;
+ int ret = -1;
+ int save_errno;
- if (flags)
- *flags = 0;
+ strbuf_reset(&sb_path);
+ strbuf_git_path(&sb_path, "%s", refname);
+ path = sb_path.buf;
- if (check_refname_format(refname, REFNAME_ALLOW_ONELEVEL)) {
- if (flags)
- *flags |= REF_BAD_NAME;
+stat_ref:
+ /*
+ * We might have to loop back here to avoid a race
+ * condition: first we lstat() the file, then we try
+ * to read it as a link or as a file. But if somebody
+ * changes the type of the file (file <-> directory
+ * <-> symlink) between the lstat() and reading, then
+ * we don't want to report that as an error but rather
+ * try again starting with the lstat().
+ */
- if (!(resolve_flags & RESOLVE_REF_ALLOW_BAD_NAME) ||
- !refname_is_safe(refname)) {
- errno = EINVAL;
- return NULL;
+ if (lstat(path, &st) < 0) {
+ if (errno != ENOENT)
+ goto out;
+ if (resolve_missing_loose_ref(refname, sha1, flags)) {
+ errno = ENOENT;
+ goto out;
}
- /*
- * dwim_ref() uses REF_ISBROKEN to distinguish between
- * missing refs and refs that were present but invalid,
- * to complain about the latter to stderr.
- *
- * We don't know whether the ref exists, so don't set
- * REF_ISBROKEN yet.
- */
- bad_name = 1;
+ ret = 0;
+ goto out;
}
- for (;;) {
- const char *path;
- struct stat st;
- char *buf;
- int fd;
-
- if (--depth < 0) {
- errno = ELOOP;
- return NULL;
- }
-
- strbuf_reset(sb_path);
- strbuf_git_path(sb_path, "%s", refname);
- path = sb_path->buf;
- /*
- * We might have to loop back here to avoid a race
- * condition: first we lstat() the file, then we try
- * to read it as a link or as a file. But if somebody
- * changes the type of the file (file <-> directory
- * <-> symlink) between the lstat() and reading, then
- * we don't want to report that as an error but rather
- * try again starting with the lstat().
- */
- stat_ref:
- if (lstat(path, &st) < 0) {
- if (errno != ENOENT)
- return NULL;
- if (resolve_missing_loose_ref(refname, resolve_flags,
- sha1, flags))
- return NULL;
- if (bad_name) {
- hashclr(sha1);
- if (flags)
- *flags |= REF_ISBROKEN;
- }
- return refname;
- }
-
- /* Follow "normalized" - ie "refs/.." symlinks by hand */
- if (S_ISLNK(st.st_mode)) {
- strbuf_reset(sb_contents);
- if (strbuf_readlink(sb_contents, path, 0) < 0) {
- if (errno == ENOENT || errno == EINVAL)
- /* inconsistent with lstat; retry */
- goto stat_ref;
- else
- return NULL;
- }
- if (starts_with(sb_contents->buf, "refs/") &&
- !check_refname_format(sb_contents->buf, 0)) {
- strbuf_swap(sb_refname, sb_contents);
- refname = sb_refname->buf;
- if (flags)
- *flags |= REF_ISSYMREF;
- if (resolve_flags & RESOLVE_REF_NO_RECURSE) {
- hashclr(sha1);
- return refname;
- }
- continue;
- }
- }
-
- /* Is it a directory? */
- if (S_ISDIR(st.st_mode)) {
- errno = EISDIR;
- return NULL;
- }
-
- /*
- * Anything else, just open it and try to use it as
- * a ref
- */
- fd = open(path, O_RDONLY);
- if (fd < 0) {
- if (errno == ENOENT)
+ /* Follow "normalized" - ie "refs/.." symlinks by hand */
+ if (S_ISLNK(st.st_mode)) {
+ strbuf_reset(&sb_contents);
+ if (strbuf_readlink(&sb_contents, path, 0) < 0) {
+ if (errno == ENOENT || errno == EINVAL)
/* inconsistent with lstat; retry */
goto stat_ref;
else
- return NULL;
+ goto out;
}
- strbuf_reset(sb_contents);
- if (strbuf_read(sb_contents, fd, 256) < 0) {
- int save_errno = errno;
- close(fd);
- errno = save_errno;
- return NULL;
+ if (starts_with(sb_contents.buf, "refs/") &&
+ !check_refname_format(sb_contents.buf, 0)) {
+ strbuf_swap(&sb_contents, symref);
+ *flags |= REF_ISSYMREF;
+ ret = 0;
+ goto out;
}
- close(fd);
- strbuf_rtrim(sb_contents);
+ }
- /*
- * Is it a symbolic ref?
- */
- if (!starts_with(sb_contents->buf, "ref:")) {
- /*
- * Please note that FETCH_HEAD has a second
- * line containing other data.
- */
- if (get_sha1_hex(sb_contents->buf, sha1) ||
- (sb_contents->buf[40] != '\0' && !isspace(sb_contents->buf[40]))) {
- if (flags)
- *flags |= REF_ISBROKEN;
- errno = EINVAL;
- return NULL;
- }
- if (bad_name) {
- hashclr(sha1);
- if (flags)
- *flags |= REF_ISBROKEN;
- }
- return refname;
- }
- if (flags)
- *flags |= REF_ISSYMREF;
- buf = sb_contents->buf + 4;
+ /* Is it a directory? */
+ if (S_ISDIR(st.st_mode)) {
+ errno = EISDIR;
+ goto out;
+ }
+
+ /*
+ * Anything else, just open it and try to use it as
+ * a ref
+ */
+ fd = open(path, O_RDONLY);
+ if (fd < 0) {
+ if (errno == ENOENT)
+ /* inconsistent with lstat; retry */
+ goto stat_ref;
+ else
+ goto out;
+ }
+ strbuf_reset(&sb_contents);
+ if (strbuf_read(&sb_contents, fd, 256) < 0) {
+ int save_errno = errno;
+ close(fd);
+ errno = save_errno;
+ goto out;
+ }
+ close(fd);
+ strbuf_rtrim(&sb_contents);
+ buf = sb_contents.buf;
+ if (starts_with(buf, "ref:")) {
+ buf += 4;
while (isspace(*buf))
buf++;
- strbuf_reset(sb_refname);
- strbuf_addstr(sb_refname, buf);
- refname = sb_refname->buf;
- if (resolve_flags & RESOLVE_REF_NO_RECURSE) {
- hashclr(sha1);
- return refname;
- }
- if (check_refname_format(buf, REFNAME_ALLOW_ONELEVEL)) {
- if (flags)
- *flags |= REF_ISBROKEN;
-
- if (!(resolve_flags & RESOLVE_REF_ALLOW_BAD_NAME) ||
- !refname_is_safe(buf)) {
- errno = EINVAL;
- return NULL;
- }
- bad_name = 1;
- }
+
+ strbuf_reset(symref);
+ strbuf_addstr(symref, buf);
+ *flags |= REF_ISSYMREF;
+ ret = 0;
+ goto out;
}
-}
-const char *resolve_ref_unsafe(const char *refname, int resolve_flags,
- unsigned char *sha1, int *flags)
-{
- static struct strbuf sb_refname = STRBUF_INIT;
- struct strbuf sb_contents = STRBUF_INIT;
- struct strbuf sb_path = STRBUF_INIT;
- const char *ret;
+ /*
+ * Please note that FETCH_HEAD has additional
+ * data after the sha.
+ */
+ if (get_sha1_hex(buf, sha1) ||
+ (buf[40] != '\0' && !isspace(buf[40]))) {
+ *flags |= REF_ISBROKEN;
+ errno = EINVAL;
+ goto out;
+ }
+
+ ret = 0;
- ret = resolve_ref_1(refname, resolve_flags, sha1, flags,
- &sb_refname, &sb_path, &sb_contents);
+out:
+ save_errno = errno;
strbuf_release(&sb_path);
strbuf_release(&sb_contents);
+ errno = save_errno;
return ret;
}
* value, stop the iteration and return that value; otherwise, return
* 0.
*/
-static int do_for_each_ref(struct ref_cache *refs, const char *base,
- each_ref_fn fn, int trim, int flags, void *cb_data)
+int do_for_each_ref(const char *submodule, const char *base,
+ each_ref_fn fn, int trim, int flags, void *cb_data)
{
struct ref_entry_cb data;
+ struct ref_cache *refs;
+
+ refs = get_ref_cache(submodule);
data.base = base;
data.trim = trim;
data.flags = flags;
return do_for_each_entry(refs, base, do_one_ref, &data);
}
-static int do_head_ref(const char *submodule, each_ref_fn fn, void *cb_data)
-{
- struct object_id oid;
- int flag;
-
- if (submodule) {
- if (resolve_gitlink_ref(submodule, "HEAD", oid.hash) == 0)
- return fn("HEAD", &oid, 0, cb_data);
-
- return 0;
- }
-
- if (!read_ref_full("HEAD", RESOLVE_REF_READING, oid.hash, &flag))
- return fn("HEAD", &oid, flag, cb_data);
-
- return 0;
-}
-
-int head_ref(each_ref_fn fn, void *cb_data)
-{
- return do_head_ref(NULL, fn, cb_data);
-}
-
-int head_ref_submodule(const char *submodule, each_ref_fn fn, void *cb_data)
-{
- return do_head_ref(submodule, fn, cb_data);
-}
-
-int for_each_ref(each_ref_fn fn, void *cb_data)
-{
- return do_for_each_ref(&ref_cache, "", fn, 0, 0, cb_data);
-}
-
-int for_each_ref_submodule(const char *submodule, each_ref_fn fn, void *cb_data)
-{
- return do_for_each_ref(get_ref_cache(submodule), "", fn, 0, 0, cb_data);
-}
-
-int for_each_ref_in(const char *prefix, each_ref_fn fn, void *cb_data)
-{
- return do_for_each_ref(&ref_cache, prefix, fn, strlen(prefix), 0, cb_data);
-}
-
-int for_each_fullref_in(const char *prefix, each_ref_fn fn, void *cb_data, unsigned int broken)
-{
- unsigned int flag = 0;
-
- if (broken)
- flag = DO_FOR_EACH_INCLUDE_BROKEN;
- return do_for_each_ref(&ref_cache, prefix, fn, 0, flag, cb_data);
-}
-
-int for_each_ref_in_submodule(const char *submodule, const char *prefix,
- each_ref_fn fn, void *cb_data)
-{
- return do_for_each_ref(get_ref_cache(submodule), prefix, fn, strlen(prefix), 0, cb_data);
-}
-
-int for_each_replace_ref(each_ref_fn fn, void *cb_data)
-{
- return do_for_each_ref(&ref_cache, git_replace_ref_base, fn,
- strlen(git_replace_ref_base), 0, cb_data);
-}
-
-int for_each_namespaced_ref(each_ref_fn fn, void *cb_data)
-{
- struct strbuf buf = STRBUF_INIT;
- int ret;
- strbuf_addf(&buf, "%srefs/", get_git_namespace());
- ret = do_for_each_ref(&ref_cache, buf.buf, fn, 0, 0, cb_data);
- strbuf_release(&buf);
- return ret;
-}
-
-int for_each_rawref(each_ref_fn fn, void *cb_data)
-{
- return do_for_each_ref(&ref_cache, "", fn, 0,
- DO_FOR_EACH_INCLUDE_BROKEN, cb_data);
-}
-
static void unlock_ref(struct ref_lock *lock)
{
/* Do not free lock->lk -- atexit() still looks at them */
return ret;
}
+int set_worktree_head_symref(const char *gitdir, const char *target)
+{
+ static struct lock_file head_lock;
+ struct ref_lock *lock;
+ struct strbuf head_path = STRBUF_INIT;
+ const char *head_rel;
+ int ret;
+
+ strbuf_addf(&head_path, "%s/HEAD", absolute_path(gitdir));
+ if (hold_lock_file_for_update(&head_lock, head_path.buf,
+ LOCK_NO_DEREF) < 0) {
+ struct strbuf err = STRBUF_INIT;
+ unable_to_lock_message(head_path.buf, errno, &err);
+ error("%s", err.buf);
+ strbuf_release(&err);
+ strbuf_release(&head_path);
+ return -1;
+ }
+
+ /* head_rel will be "HEAD" for the main tree, "worktrees/wt/HEAD" for
+ linked trees */
+ head_rel = remove_leading_path(head_path.buf,
+ absolute_path(get_git_common_dir()));
+ /* to make use of create_symref_locked(), initialize ref_lock */
+ lock = xcalloc(1, sizeof(struct ref_lock));
+ lock->lk = &head_lock;
+ lock->ref_name = xstrdup(head_rel);
+ lock->orig_ref_name = xstrdup(head_rel);
+
+ ret = create_symref_locked(lock, head_rel, target, NULL);
+
+ unlock_ref(lock); /* will free lock */
+ strbuf_release(&head_path);
+ return ret;
+}
+
int reflog_exists(const char *refname)
{
struct stat st;
* reference itself, plus we might need to update the
* reference if --updateref was specified:
*/
- lock = lock_ref_sha1_basic(refname, sha1, NULL, NULL, 0, &type, &err);
+ lock = lock_ref_sha1_basic(refname, sha1, NULL, NULL, REF_NODEREF,
+ &type, &err);
if (!lock) {
error("cannot lock ref '%s': %s", refname, err.buf);
strbuf_release(&err);
int rename_ref_available(const char *oldname, const char *newname);
+/* We allow "recursive" symbolic refs. Only within reason, though */
+#define SYMREF_MAXDEPTH 5
+
+/* Include broken references in a do_for_each_ref*() iteration: */
+#define DO_FOR_EACH_INCLUDE_BROKEN 0x01
+
+/*
+ * The common backend for the for_each_*ref* functions
+ */
+int do_for_each_ref(const char *submodule, const char *base,
+ each_ref_fn fn, int trim, int flags, void *cb_data);
+
+int read_raw_ref(const char *refname, unsigned char *sha1,
+ struct strbuf *symref, unsigned int *flags);
+
#endif /* REFS_REFS_INTERNAL_H */
static int probe_rpc(struct rpc_state *rpc, struct slot_results *results)
{
struct active_request_slot *slot;
- struct curl_slist *headers = NULL;
+ struct curl_slist *headers = http_copy_default_headers();
struct strbuf buf = STRBUF_INIT;
int err;
static int post_rpc(struct rpc_state *rpc)
{
struct active_request_slot *slot;
- struct curl_slist *headers = NULL;
+ struct curl_slist *headers = http_copy_default_headers();
int use_gzip = rpc->gzip_request;
char *gzip_body = NULL;
size_t gzip_size = 0;
return refname_match(branch->merge[i]->src, refname);
}
-__attribute((format (printf,2,3)))
+__attribute__((format (printf,2,3)))
static const char *error_buf(struct strbuf *err, const char *fmt, ...)
{
if (err) {
"Your branch and '%s' have diverged,\n"
"and have %d and %d different commits each, "
"respectively.\n",
- theirs),
+ ours + theirs),
base, ours, theirs);
if (advice_status_hints)
strbuf_addf(sb,
#include "ll-merge.h"
#include "attr.h"
#include "pathspec.h"
+#include "sha1-lookup.h"
#define RESOLVED 0
#define PUNTED 1
/* automatically update cleanly resolved paths to the index */
static int rerere_autoupdate;
+static int rerere_dir_nr;
+static int rerere_dir_alloc;
+
+#define RR_HAS_POSTIMAGE 1
+#define RR_HAS_PREIMAGE 2
+static struct rerere_dir {
+ unsigned char sha1[20];
+ int status_alloc, status_nr;
+ unsigned char *status;
+} **rerere_dir;
+
+static void free_rerere_dirs(void)
+{
+ int i;
+ for (i = 0; i < rerere_dir_nr; i++) {
+ free(rerere_dir[i]->status);
+ free(rerere_dir[i]);
+ }
+ free(rerere_dir);
+ rerere_dir_nr = rerere_dir_alloc = 0;
+ rerere_dir = NULL;
+}
+
static void free_rerere_id(struct string_list_item *item)
{
free(item->util);
static const char *rerere_id_hex(const struct rerere_id *id)
{
- return id->hex;
+ return sha1_to_hex(id->collection->sha1);
+}
+
+static void fit_variant(struct rerere_dir *rr_dir, int variant)
+{
+ variant++;
+ ALLOC_GROW(rr_dir->status, variant, rr_dir->status_alloc);
+ if (rr_dir->status_nr < variant) {
+ memset(rr_dir->status + rr_dir->status_nr,
+ '\0', variant - rr_dir->status_nr);
+ rr_dir->status_nr = variant;
+ }
+}
+
+static void assign_variant(struct rerere_id *id)
+{
+ int variant;
+ struct rerere_dir *rr_dir = id->collection;
+
+ variant = id->variant;
+ if (variant < 0) {
+ for (variant = 0; variant < rr_dir->status_nr; variant++)
+ if (!rr_dir->status[variant])
+ break;
+ }
+ fit_variant(rr_dir, variant);
+ id->variant = variant;
}
const char *rerere_path(const struct rerere_id *id, const char *file)
if (!file)
return git_path("rr-cache/%s", rerere_id_hex(id));
- return git_path("rr-cache/%s/%s", rerere_id_hex(id), file);
+ if (id->variant <= 0)
+ return git_path("rr-cache/%s/%s", rerere_id_hex(id), file);
+
+ return git_path("rr-cache/%s/%s.%d",
+ rerere_id_hex(id), file, id->variant);
+}
+
+static int is_rr_file(const char *name, const char *filename, int *variant)
+{
+ const char *suffix;
+ char *ep;
+
+ if (!strcmp(name, filename)) {
+ *variant = 0;
+ return 1;
+ }
+ if (!skip_prefix(name, filename, &suffix) || *suffix != '.')
+ return 0;
+
+ errno = 0;
+ *variant = strtol(suffix + 1, &ep, 10);
+ if (errno || *ep)
+ return 0;
+ return 1;
+}
+
+static void scan_rerere_dir(struct rerere_dir *rr_dir)
+{
+ struct dirent *de;
+ DIR *dir = opendir(git_path("rr-cache/%s", sha1_to_hex(rr_dir->sha1)));
+
+ if (!dir)
+ return;
+ while ((de = readdir(dir)) != NULL) {
+ int variant;
+
+ if (is_rr_file(de->d_name, "postimage", &variant)) {
+ fit_variant(rr_dir, variant);
+ rr_dir->status[variant] |= RR_HAS_POSTIMAGE;
+ } else if (is_rr_file(de->d_name, "preimage", &variant)) {
+ fit_variant(rr_dir, variant);
+ rr_dir->status[variant] |= RR_HAS_PREIMAGE;
+ }
+ }
+ closedir(dir);
+}
+
+static const unsigned char *rerere_dir_sha1(size_t i, void *table)
+{
+ struct rerere_dir **rr_dir = table;
+ return rr_dir[i]->sha1;
+}
+
+static struct rerere_dir *find_rerere_dir(const char *hex)
+{
+ unsigned char sha1[20];
+ struct rerere_dir *rr_dir;
+ int pos;
+
+ if (get_sha1_hex(hex, sha1))
+ return NULL; /* BUG */
+ pos = sha1_pos(sha1, rerere_dir, rerere_dir_nr, rerere_dir_sha1);
+ if (pos < 0) {
+ rr_dir = xmalloc(sizeof(*rr_dir));
+ hashcpy(rr_dir->sha1, sha1);
+ rr_dir->status = NULL;
+ rr_dir->status_nr = 0;
+ rr_dir->status_alloc = 0;
+ pos = -1 - pos;
+
+ /* Make sure the array is big enough ... */
+ ALLOC_GROW(rerere_dir, rerere_dir_nr + 1, rerere_dir_alloc);
+ /* ... and add it in. */
+ rerere_dir_nr++;
+ memmove(rerere_dir + pos + 1, rerere_dir + pos,
+ (rerere_dir_nr - pos - 1) * sizeof(*rerere_dir));
+ rerere_dir[pos] = rr_dir;
+ scan_rerere_dir(rr_dir);
+ }
+ return rerere_dir[pos];
}
static int has_rerere_resolution(const struct rerere_id *id)
{
- struct stat st;
+ const int both = RR_HAS_POSTIMAGE|RR_HAS_PREIMAGE;
+ int variant = id->variant;
- return !stat(rerere_path(id, "postimage"), &st);
+ if (variant < 0)
+ return 0;
+ return ((id->collection->status[variant] & both) == both);
}
static struct rerere_id *new_rerere_id_hex(char *hex)
{
struct rerere_id *id = xmalloc(sizeof(*id));
- xsnprintf(id->hex, sizeof(id->hex), "%s", hex);
+ id->collection = find_rerere_dir(hex);
+ id->variant = -1; /* not known yet */
return id;
}
char *path;
unsigned char sha1[20];
struct rerere_id *id;
+ int variant;
/* There has to be the hash, tab, path and then NUL */
if (buf.len < 42 || get_sha1_hex(buf.buf, sha1))
die("corrupt MERGE_RR");
- if (buf.buf[40] != '\t')
+ if (buf.buf[40] != '.') {
+ variant = 0;
+ path = buf.buf + 40;
+ } else {
+ errno = 0;
+ variant = strtol(buf.buf + 41, &path, 10);
+ if (errno)
+ die("corrupt MERGE_RR");
+ }
+ if (*(path++) != '\t')
die("corrupt MERGE_RR");
buf.buf[40] = '\0';
- path = buf.buf + 41;
id = new_rerere_id_hex(buf.buf);
+ id->variant = variant;
string_list_insert(rr, path)->util = id;
}
strbuf_release(&buf);
id = rr->items[i].util;
if (!id)
continue;
- strbuf_addf(&buf, "%s\t%s%c",
- rerere_id_hex(id),
- rr->items[i].string, 0);
+ assert(id->variant >= 0);
+ if (0 < id->variant)
+ strbuf_addf(&buf, "%s.%d\t%s%c",
+ rerere_id_hex(id), id->variant,
+ rr->items[i].string, 0);
+ else
+ strbuf_addf(&buf, "%s\t%s%c",
+ rerere_id_hex(id),
+ rr->items[i].string, 0);
+
if (write_in_full(out_fd, buf.buf, buf.len) != buf.len)
die("unable to write rerere record");
error("There were errors while writing %s (%s)",
path, strerror(io.io.wrerror));
if (io.io.output && fclose(io.io.output))
- io.io.wrerror = error("Failed to flush %s: %s",
- path, strerror(errno));
+ io.io.wrerror = error_errno("Failed to flush %s", path);
if (hunk_no < 0) {
if (output)
return hunk_no;
}
-/*
- * Subclass of rerere_io that reads from an in-core buffer that is a
- * strbuf
- */
-struct rerere_io_mem {
- struct rerere_io io;
- struct strbuf input;
-};
-
-/*
- * ... and its getline() method implementation
- */
-static int rerere_mem_getline(struct strbuf *sb, struct rerere_io *io_)
-{
- struct rerere_io_mem *io = (struct rerere_io_mem *)io_;
- char *ep;
- size_t len;
-
- strbuf_release(sb);
- if (!io->input.len)
- return -1;
- ep = memchr(io->input.buf, '\n', io->input.len);
- if (!ep)
- ep = io->input.buf + io->input.len;
- else if (*ep == '\n')
- ep++;
- len = ep - io->input.buf;
- strbuf_add(sb, io->input.buf, len);
- strbuf_remove(&io->input, 0, len);
- return 0;
-}
-
-static int handle_cache(const char *path, unsigned char *sha1, const char *output)
-{
- mmfile_t mmfile[3] = {{NULL}};
- mmbuffer_t result = {NULL, 0};
- const struct cache_entry *ce;
- int pos, len, i, hunk_no;
- struct rerere_io_mem io;
- int marker_size = ll_merge_marker_size(path);
-
- /*
- * Reproduce the conflicted merge in-core
- */
- len = strlen(path);
- pos = cache_name_pos(path, len);
- if (0 <= pos)
- return -1;
- pos = -pos - 1;
-
- while (pos < active_nr) {
- enum object_type type;
- unsigned long size;
-
- ce = active_cache[pos++];
- if (ce_namelen(ce) != len || memcmp(ce->name, path, len))
- break;
- i = ce_stage(ce) - 1;
- if (!mmfile[i].ptr) {
- mmfile[i].ptr = read_sha1_file(ce->sha1, &type, &size);
- mmfile[i].size = size;
- }
- }
- for (i = 0; i < 3; i++)
- if (!mmfile[i].ptr && !mmfile[i].size)
- mmfile[i].ptr = xstrdup("");
-
- /*
- * NEEDSWORK: handle conflicts from merges with
- * merge.renormalize set, too
- */
- ll_merge(&result, path, &mmfile[0], NULL,
- &mmfile[1], "ours",
- &mmfile[2], "theirs", NULL);
- for (i = 0; i < 3; i++)
- free(mmfile[i].ptr);
-
- memset(&io, 0, sizeof(io));
- io.io.getline = rerere_mem_getline;
- if (output)
- io.io.output = fopen(output, "w");
- else
- io.io.output = NULL;
- strbuf_init(&io.input, 0);
- strbuf_attach(&io.input, result.ptr, result.size, result.size);
-
- /*
- * Grab the conflict ID and optionally write the original
- * contents with conflict markers out.
- */
- hunk_no = handle_path(sha1, (struct rerere_io *)&io, marker_size);
- strbuf_release(&io.input);
- if (io.io.output)
- fclose(io.io.output);
- return hunk_no;
-}
-
/*
* Look at a cache entry at "i" and see if it is not conflicting,
* conflicting and we are willing to handle, or conflicting and
return 0;
}
+/*
+ * Try using the given conflict resolution "ID" to see
+ * if that recorded conflict resolves cleanly what we
+ * got in the "cur".
+ */
+static int try_merge(const struct rerere_id *id, const char *path,
+ mmfile_t *cur, mmbuffer_t *result)
+{
+ int ret;
+ mmfile_t base = {NULL, 0}, other = {NULL, 0};
+
+ if (read_mmfile(&base, rerere_path(id, "preimage")) ||
+ read_mmfile(&other, rerere_path(id, "postimage")))
+ ret = 1;
+ else
+ /*
+ * A three-way merge. Note that this honors user-customizable
+ * low-level merge driver settings.
+ */
+ ret = ll_merge(result, path, &base, NULL, cur, "", &other, "", NULL);
+
+ free(base.ptr);
+ free(other.ptr);
+
+ return ret;
+}
+
/*
* Find the conflict identified by "id"; the change between its
* "preimage" (i.e. a previous contents with conflict markers) and its
{
FILE *f;
int ret;
- mmfile_t cur = {NULL, 0}, base = {NULL, 0}, other = {NULL, 0};
+ mmfile_t cur = {NULL, 0};
mmbuffer_t result = {NULL, 0};
/*
* Normalize the conflicts in path and write it out to
* "thisimage" temporary file.
*/
- if (handle_file(path, NULL, rerere_path(id, "thisimage")) < 0) {
- ret = 1;
- goto out;
- }
-
- if (read_mmfile(&cur, rerere_path(id, "thisimage")) ||
- read_mmfile(&base, rerere_path(id, "preimage")) ||
- read_mmfile(&other, rerere_path(id, "postimage"))) {
+ if ((handle_file(path, NULL, rerere_path(id, "thisimage")) < 0) ||
+ read_mmfile(&cur, rerere_path(id, "thisimage"))) {
ret = 1;
goto out;
}
- /*
- * A three-way merge. Note that this honors user-customizable
- * low-level merge driver settings.
- */
- ret = ll_merge(&result, path, &base, NULL, &cur, "", &other, "", NULL);
+ ret = try_merge(id, path, &cur, &result);
if (ret)
goto out;
* Mark that "postimage" was used to help gc.
*/
if (utime(rerere_path(id, "postimage"), NULL) < 0)
- warning("failed utime() on %s: %s",
- rerere_path(id, "postimage"),
- strerror(errno));
+ warning_errno("failed utime() on %s",
+ rerere_path(id, "postimage"));
/* Update "path" with the resolution */
f = fopen(path, "w");
if (!f)
- return error("Could not open %s: %s", path,
- strerror(errno));
+ return error_errno("Could not open %s", path);
if (fwrite(result.ptr, result.size, 1, f) != 1)
- error("Could not write %s: %s", path, strerror(errno));
+ error_errno("Could not write %s", path);
if (fclose(f))
- return error("Writing %s failed: %s", path,
- strerror(errno));
+ return error_errno("Writing %s failed", path);
out:
free(cur.ptr);
- free(base.ptr);
- free(other.ptr);
free(result.ptr);
return ret;
rollback_lock_file(&index_lock);
}
+static void remove_variant(struct rerere_id *id)
+{
+ unlink_or_warn(rerere_path(id, "postimage"));
+ unlink_or_warn(rerere_path(id, "preimage"));
+ id->collection->status[id->variant] = 0;
+}
+
/*
* The path indicated by rr_item may still have conflict for which we
* have a recorded resolution, in which case replay it and optionally
struct string_list *update)
{
const char *path = rr_item->string;
- const struct rerere_id *id = rr_item->util;
+ struct rerere_id *id = rr_item->util;
+ struct rerere_dir *rr_dir = id->collection;
+ int variant;
+
+ variant = id->variant;
+
+ /* Has the user resolved it already? */
+ if (variant >= 0) {
+ if (!handle_file(path, NULL, NULL)) {
+ copy_file(rerere_path(id, "postimage"), path, 0666);
+ id->collection->status[variant] |= RR_HAS_POSTIMAGE;
+ fprintf(stderr, "Recorded resolution for '%s'.\n", path);
+ free_rerere_id(rr_item);
+ rr_item->util = NULL;
+ return;
+ }
+ /*
+ * There may be other variants that can cleanly
+ * replay. Try them and update the variant number for
+ * this one.
+ */
+ }
+
+ /* Does any existing resolution apply cleanly? */
+ for (variant = 0; variant < rr_dir->status_nr; variant++) {
+ const int both = RR_HAS_PREIMAGE | RR_HAS_POSTIMAGE;
+ struct rerere_id vid = *id;
+
+ if ((rr_dir->status[variant] & both) != both)
+ continue;
+
+ vid.variant = variant;
+ if (merge(&vid, path))
+ continue; /* failed to replay */
- /* Is there a recorded resolution we could attempt to apply? */
- if (has_rerere_resolution(id)) {
- if (merge(id, path))
- return; /* failed to replay */
+ /*
+ * If there already is a different variant that applies
+ * cleanly, there is no point maintaining our own variant.
+ */
+ if (0 <= id->variant && id->variant != variant)
+ remove_variant(id);
if (rerere_autoupdate)
string_list_insert(update, path);
fprintf(stderr,
"Resolved '%s' using previous resolution.\n",
path);
- } else if (!handle_file(path, NULL, NULL)) {
- /* The user has resolved it. */
- copy_file(rerere_path(id, "postimage"), path, 0666);
- fprintf(stderr, "Recorded resolution for '%s'.\n", path);
- } else {
+ free_rerere_id(rr_item);
+ rr_item->util = NULL;
return;
}
- free_rerere_id(rr_item);
- rr_item->util = NULL;
+
+ /* None of the existing one applies; we need a new variant */
+ assign_variant(id);
+
+ variant = id->variant;
+ handle_file(path, NULL, rerere_path(id, "preimage"));
+ if (id->collection->status[variant] & RR_HAS_POSTIMAGE) {
+ const char *path = rerere_path(id, "postimage");
+ if (unlink(path))
+ die_errno("cannot unlink stray '%s'", path);
+ id->collection->status[variant] &= ~RR_HAS_POSTIMAGE;
+ }
+ id->collection->status[variant] |= RR_HAS_PREIMAGE;
+ fprintf(stderr, "Recorded preimage for '%s'\n", path);
}
static int do_plain_rerere(struct string_list *rr, int fd)
id = new_rerere_id(sha1);
string_list_insert(rr, path)->util = id;
- /*
- * If the directory does not exist, create
- * it. mkdir_in_gitdir() will fail with
- * EEXIST if there already is one.
- *
- * NEEDSWORK: make sure "gc" does not remove
- * preimage without removing the directory.
- */
- if (mkdir_in_gitdir(rerere_path(id, NULL)))
- continue;
-
- /*
- * We are the first to encounter this
- * conflict. Ask handle_file() to write the
- * normalized contents to the "preimage" file.
- */
- handle_file(path, NULL, rerere_path(id, "preimage"));
- fprintf(stderr, "Recorded preimage for '%s'\n", path);
+ /* Ensure that the directory exists. */
+ mkdir_in_gitdir(rerere_path(id, NULL));
}
for (i = 0; i < rr->nr; i++)
int rerere(int flags)
{
struct string_list merge_rr = STRING_LIST_INIT_DUP;
- int fd;
+ int fd, status;
fd = setup_rerere(&merge_rr, flags);
if (fd < 0)
return 0;
- return do_plain_rerere(&merge_rr, fd);
+ status = do_plain_rerere(&merge_rr, fd);
+ free_rerere_dirs();
+ return status;
+}
+
+/*
+ * Subclass of rerere_io that reads from an in-core buffer that is a
+ * strbuf
+ */
+struct rerere_io_mem {
+ struct rerere_io io;
+ struct strbuf input;
+};
+
+/*
+ * ... and its getline() method implementation
+ */
+static int rerere_mem_getline(struct strbuf *sb, struct rerere_io *io_)
+{
+ struct rerere_io_mem *io = (struct rerere_io_mem *)io_;
+ char *ep;
+ size_t len;
+
+ strbuf_release(sb);
+ if (!io->input.len)
+ return -1;
+ ep = memchr(io->input.buf, '\n', io->input.len);
+ if (!ep)
+ ep = io->input.buf + io->input.len;
+ else if (*ep == '\n')
+ ep++;
+ len = ep - io->input.buf;
+ strbuf_add(sb, io->input.buf, len);
+ strbuf_remove(&io->input, 0, len);
+ return 0;
+}
+
+static int handle_cache(const char *path, unsigned char *sha1, const char *output)
+{
+ mmfile_t mmfile[3] = {{NULL}};
+ mmbuffer_t result = {NULL, 0};
+ const struct cache_entry *ce;
+ int pos, len, i, hunk_no;
+ struct rerere_io_mem io;
+ int marker_size = ll_merge_marker_size(path);
+
+ /*
+ * Reproduce the conflicted merge in-core
+ */
+ len = strlen(path);
+ pos = cache_name_pos(path, len);
+ if (0 <= pos)
+ return -1;
+ pos = -pos - 1;
+
+ while (pos < active_nr) {
+ enum object_type type;
+ unsigned long size;
+
+ ce = active_cache[pos++];
+ if (ce_namelen(ce) != len || memcmp(ce->name, path, len))
+ break;
+ i = ce_stage(ce) - 1;
+ if (!mmfile[i].ptr) {
+ mmfile[i].ptr = read_sha1_file(ce->sha1, &type, &size);
+ mmfile[i].size = size;
+ }
+ }
+ for (i = 0; i < 3; i++)
+ if (!mmfile[i].ptr && !mmfile[i].size)
+ mmfile[i].ptr = xstrdup("");
+
+ /*
+ * NEEDSWORK: handle conflicts from merges with
+ * merge.renormalize set, too?
+ */
+ ll_merge(&result, path, &mmfile[0], NULL,
+ &mmfile[1], "ours",
+ &mmfile[2], "theirs", NULL);
+ for (i = 0; i < 3; i++)
+ free(mmfile[i].ptr);
+
+ memset(&io, 0, sizeof(io));
+ io.io.getline = rerere_mem_getline;
+ if (output)
+ io.io.output = fopen(output, "w");
+ else
+ io.io.output = NULL;
+ strbuf_init(&io.input, 0);
+ strbuf_attach(&io.input, result.ptr, result.size, result.size);
+
+ /*
+ * Grab the conflict ID and optionally write the original
+ * contents with conflict markers out.
+ */
+ hunk_no = handle_path(sha1, (struct rerere_io *)&io, marker_size);
+ strbuf_release(&io.input);
+ if (io.io.output)
+ fclose(io.io.output);
+ return hunk_no;
}
static int rerere_forget_one_path(const char *path, struct string_list *rr)
/* Nuke the recorded resolution for the conflict */
id = new_rerere_id(sha1);
+
+ for (id->variant = 0;
+ id->variant < id->collection->status_nr;
+ id->variant++) {
+ mmfile_t cur = { NULL, 0 };
+ mmbuffer_t result = {NULL, 0};
+ int cleanly_resolved;
+
+ if (!has_rerere_resolution(id))
+ continue;
+
+ handle_cache(path, sha1, rerere_path(id, "thisimage"));
+ if (read_mmfile(&cur, rerere_path(id, "thisimage"))) {
+ free(cur.ptr);
+ error("Failed to update conflicted state in '%s'", path);
+ goto fail_exit;
+ }
+ cleanly_resolved = !try_merge(id, path, &cur, &result);
+ free(result.ptr);
+ free(cur.ptr);
+ if (cleanly_resolved)
+ break;
+ }
+
+ if (id->collection->status_nr <= id->variant) {
+ error("no remembered resolution for '%s'", path);
+ goto fail_exit;
+ }
+
filename = rerere_path(id, "postimage");
- if (unlink(filename))
- return (errno == ENOENT
- ? error("no remembered resolution for %s", path)
- : error("cannot unlink %s: %s", filename, strerror(errno)));
+ if (unlink(filename)) {
+ if (errno == ENOENT)
+ error("no remembered resolution for %s", path);
+ else
+ error_errno("cannot unlink %s", filename);
+ goto fail_exit;
+ }
/*
* Update the preimage so that the user can resolve the
item->util = id;
fprintf(stderr, "Forgot resolution for %s\n", path);
return 0;
+
+fail_exit:
+ free(id);
+ return -1;
}
int rerere_forget(struct pathspec *pathspec)
* Garbage collection support
*/
-/*
- * Note that this is not reentrant but is used only one-at-a-time
- * so it does not matter right now.
- */
-static struct rerere_id *dirname_to_id(const char *name)
-{
- static struct rerere_id id;
- xsnprintf(id.hex, sizeof(id.hex), "%s", name);
- return &id;
-}
-
-static time_t rerere_created_at(const char *dir_name)
+static time_t rerere_created_at(struct rerere_id *id)
{
struct stat st;
- struct rerere_id *id = dirname_to_id(dir_name);
return stat(rerere_path(id, "preimage"), &st) ? (time_t) 0 : st.st_mtime;
}
-static time_t rerere_last_used_at(const char *dir_name)
+static time_t rerere_last_used_at(struct rerere_id *id)
{
struct stat st;
- struct rerere_id *id = dirname_to_id(dir_name);
return stat(rerere_path(id, "postimage"), &st) ? (time_t) 0 : st.st_mtime;
}
*/
static void unlink_rr_item(struct rerere_id *id)
{
- unlink(rerere_path(id, "thisimage"));
- unlink(rerere_path(id, "preimage"));
- unlink(rerere_path(id, "postimage"));
- /*
- * NEEDSWORK: what if this rmdir() fails? Wouldn't we then
- * assume that we already have preimage recorded in
- * do_plain_rerere()?
- */
- rmdir(rerere_path(id, NULL));
+ unlink_or_warn(rerere_path(id, "thisimage"));
+ remove_variant(id);
+ id->collection->status[id->variant] = 0;
+}
+
+static void prune_one(struct rerere_id *id, time_t now,
+ int cutoff_resolve, int cutoff_noresolve)
+{
+ time_t then;
+ int cutoff;
+
+ then = rerere_last_used_at(id);
+ if (then)
+ cutoff = cutoff_resolve;
+ else {
+ then = rerere_created_at(id);
+ if (!then)
+ return;
+ cutoff = cutoff_noresolve;
+ }
+ if (then < now - cutoff * 86400)
+ unlink_rr_item(id);
}
void rerere_gc(struct string_list *rr)
struct string_list to_remove = STRING_LIST_INIT_DUP;
DIR *dir;
struct dirent *e;
- int i, cutoff;
- time_t now = time(NULL), then;
+ int i;
+ time_t now = time(NULL);
int cutoff_noresolve = 15;
int cutoff_resolve = 60;
die_errno("unable to open rr-cache directory");
/* Collect stale conflict IDs ... */
while ((e = readdir(dir))) {
+ struct rerere_dir *rr_dir;
+ struct rerere_id id;
+ int now_empty;
+
if (is_dot_or_dotdot(e->d_name))
continue;
-
- then = rerere_last_used_at(e->d_name);
- if (then) {
- cutoff = cutoff_resolve;
- } else {
- then = rerere_created_at(e->d_name);
- if (!then)
- continue;
- cutoff = cutoff_noresolve;
+ rr_dir = find_rerere_dir(e->d_name);
+ if (!rr_dir)
+ continue; /* or should we remove e->d_name? */
+
+ now_empty = 1;
+ for (id.variant = 0, id.collection = rr_dir;
+ id.variant < id.collection->status_nr;
+ id.variant++) {
+ prune_one(&id, now, cutoff_resolve, cutoff_noresolve);
+ if (id.collection->status[id.variant])
+ now_empty = 0;
}
- if (then < now - cutoff * 86400)
+ if (now_empty)
string_list_append(&to_remove, e->d_name);
}
closedir(dir);
- /* ... and then remove them one-by-one */
+
+ /* ... and then remove the empty directories */
for (i = 0; i < to_remove.nr; i++)
- unlink_rr_item(dirname_to_id(to_remove.items[i].string));
+ rmdir(git_path("rr-cache/%s", to_remove.items[i].string));
string_list_clear(&to_remove, 0);
rollback_lock_file(&write_lock);
}
for (i = 0; i < merge_rr->nr; i++) {
struct rerere_id *id = merge_rr->items[i].util;
- if (!has_rerere_resolution(id))
+ if (!has_rerere_resolution(id)) {
unlink_rr_item(id);
+ rmdir(rerere_path(id, NULL));
+ }
}
unlink_or_warn(git_path_merge_rr());
rollback_lock_file(&write_lock);
*/
extern void *RERERE_RESOLVED;
+struct rerere_dir;
struct rerere_id {
- char hex[41];
+ struct rerere_dir *collection;
+ int variant;
};
extern int setup_rerere(struct string_list *, int);
while (tree_entry(&desc, &entry)) {
switch (object_type(entry.mode)) {
case OBJ_TREE:
- mark_tree_uninteresting(lookup_tree(entry.sha1));
+ mark_tree_uninteresting(lookup_tree(entry.oid->hash));
break;
case OBJ_BLOB:
- mark_blob_uninteresting(lookup_blob(entry.sha1));
+ mark_blob_uninteresting(lookup_blob(entry.oid->hash));
break;
default:
/* Subproject commit - not in this repository */
revs->skip_count = -1;
revs->max_count = -1;
revs->max_parents = -1;
+ revs->expand_tabs_in_log = -1;
revs->commit_format = CMIT_FMT_DEFAULT;
+ revs->expand_tabs_in_log_default = 8;
init_grep_defaults();
grep_init(&revs->grep_filter, prefix);
revs->verbose_header = 1;
revs->pretty_given = 1;
get_commit_format(arg+9, revs);
+ } else if (!strcmp(arg, "--expand-tabs")) {
+ revs->expand_tabs_in_log = 8;
+ } else if (!strcmp(arg, "--no-expand-tabs")) {
+ revs->expand_tabs_in_log = 0;
+ } else if (skip_prefix(arg, "--expand-tabs=", &arg)) {
+ int val;
+ if (strtol_i(arg, 10, &val) < 0 || val < 0)
+ die("'%s': not a non-negative integer", arg);
+ revs->expand_tabs_in_log = val;
} else if (!strcmp(arg, "--show-notes") || !strcmp(arg, "--notes")) {
revs->show_notes = 1;
revs->show_notes_given = 1;
if (revs->first_parent_only && revs->bisect)
die(_("--first-parent is incompatible with --bisect"));
+ if (revs->expand_tabs_in_log < 0)
+ revs->expand_tabs_in_log = revs->expand_tabs_in_log_default;
+
return left;
}
linear:1;
struct date_mode date_mode;
+ int expand_tabs_in_log; /* unset if negative */
+ int expand_tabs_in_log_default;
unsigned int abbrev;
enum cmit_fmt commit_format;
if (waiting < 0) {
failed_errno = errno;
- error("waitpid for %s failed: %s", argv0, strerror(errno));
+ error_errno("waitpid for %s failed", argv0);
} else if (waiting != pid) {
error("waitpid is confused (%s)", argv0);
} else if (WIFSIGNALED(status)) {
}
}
if (cmd->pid < 0)
- error("cannot fork() for %s: %s", cmd->argv[0],
- strerror(errno));
+ error_errno("cannot fork() for %s", cmd->argv[0]);
else if (cmd->clean_on_exit)
mark_child_for_cleanup(cmd->pid);
cmd->dir, fhin, fhout, fherr);
failed_errno = errno;
if (cmd->pid < 0 && (!cmd->silent_exec_failure || errno != ENOENT))
- error("cannot spawn %s: %s", cmd->argv[0], strerror(errno));
+ error_errno("cannot spawn %s", cmd->argv[0]);
if (cmd->clean_on_exit && cmd->pid >= 0)
mark_child_for_cleanup(cmd->pid);
struct async *async = data;
intptr_t ret;
+ if (async->isolate_sigpipe) {
+ sigset_t mask;
+ sigemptyset(&mask);
+ sigaddset(&mask, SIGPIPE);
+ if (pthread_sigmask(SIG_BLOCK, &mask, NULL) < 0) {
+ ret = error("unable to block SIGPIPE in async thread");
+ return (void *)ret;
+ }
+ }
+
pthread_setspecific(async_key, async);
ret = async->proc(async->proc_in, async->proc_out, async->data);
return (void *)ret;
if (pipe(fdin) < 0) {
if (async->out > 0)
close(async->out);
- return error("cannot create pipe: %s", strerror(errno));
+ return error_errno("cannot create pipe");
}
async->in = fdin[1];
}
close_pair(fdin);
else if (async->in)
close(async->in);
- return error("cannot create pipe: %s", strerror(errno));
+ return error_errno("cannot create pipe");
}
async->out = fdout[0];
}
async->pid = fork();
if (async->pid < 0) {
- error("fork (async) failed: %s", strerror(errno));
+ error_errno("fork (async) failed");
goto error;
}
if (!async->pid) {
{
int err = pthread_create(&async->tid, NULL, run_thread, async);
if (err) {
- error("cannot create thread: %s", strerror(err));
+ error_errno("cannot create thread");
goto error;
}
}
static struct strbuf path = STRBUF_INIT;
strbuf_reset(&path);
- strbuf_git_path(&path, "hooks/%s", name);
+ if (git_hooks_path)
+ strbuf_addf(&path, "%s/%s", git_hooks_path, name);
+ else
+ strbuf_git_path(&path, "hooks/%s", name);
if (access(path.buf, X_OK) < 0)
return NULL;
return path.buf;
int proc_in;
int proc_out;
#endif
+ int isolate_sigpipe;
};
int start_async(struct async *async);
demux.proc = sideband_demux;
demux.data = fd;
demux.out = -1;
+ demux.isolate_sigpipe = 1;
if (start_async(&demux))
die("send-pack: unable to fork off sideband demultiplexer");
in = demux.out;
close(out);
if (git_connection_is_socket(conn))
shutdown(fd[0], SHUT_WR);
- if (use_sideband)
+ if (use_sideband) {
+ close(demux.out);
finish_async(&demux);
+ }
fd[1] = -1;
return -1;
}
packet_flush(out);
if (use_sideband && cmds_sent) {
+ close(demux.out);
if (finish_async(&demux)) {
error("error in sideband demultiplexer");
ret = -1;
}
- close(demux.out);
}
if (ret < 0)
return rollback_single_pick();
}
if (!f)
- return error(_("cannot open %s: %s"), git_path_head_file(),
- strerror(errno));
+ return error_errno(_("cannot open %s"), git_path_head_file());
if (strbuf_getline_lf(&buf, f)) {
error(_("cannot read %s: %s"), git_path_head_file(),
ferror(f) ? strerror(errno) : _("unexpected end of file"));
out:
if (ret) {
- error("unable to update %s: %s", path, strerror(errno));
+ error_errno("unable to update %s", path);
if (fp)
fclose(fp);
else if (fd >= 0)
static int inside_git_dir = -1;
static int inside_work_tree = -1;
static int work_tree_config_is_bogus;
-static struct string_list unknown_extensions = STRING_LIST_INIT_DUP;
static struct startup_info the_startup_info;
struct startup_info *startup_info = &the_startup_info;
return NULL;
}
} else {
- sanitized = xstrfmt("%.*s%s", len, prefix, path);
+ sanitized = xstrfmt("%.*s%s", len, len ? prefix : "", path);
if (remaining_prefix)
*remaining_prefix = len;
if (normalize_path_copy_len(sanitized, sanitized, remaining_prefix)) {
initialized = 1;
}
-static int check_repo_format(const char *var, const char *value, void *cb)
+static int check_repo_format(const char *var, const char *value, void *vdata)
{
+ struct repository_format *data = vdata;
const char *ext;
if (strcmp(var, "core.repositoryformatversion") == 0)
- repository_format_version = git_config_int(var, value);
- else if (strcmp(var, "core.sharedrepository") == 0)
- shared_repository = git_config_perm(var, value);
+ data->version = git_config_int(var, value);
else if (skip_prefix(var, "extensions.", &ext)) {
/*
* record any known extensions here; otherwise,
if (!strcmp(ext, "noop"))
;
else if (!strcmp(ext, "preciousobjects"))
- repository_format_precious_objects = git_config_bool(var, value);
+ data->precious_objects = git_config_bool(var, value);
else
- string_list_append(&unknown_extensions, ext);
+ string_list_append(&data->unknown_extensions, ext);
+ } else if (strcmp(var, "core.bare") == 0) {
+ data->is_bare = git_config_bool(var, value);
+ } else if (strcmp(var, "core.worktree") == 0) {
+ if (!value)
+ return config_error_nonbool(var);
+ data->work_tree = xstrdup(value);
}
return 0;
}
static int check_repository_format_gently(const char *gitdir, int *nongit_ok)
{
struct strbuf sb = STRBUF_INIT;
- const char *repo_config;
- config_fn_t fn;
- int ret = 0;
-
- string_list_clear(&unknown_extensions, 0);
+ struct strbuf err = STRBUF_INIT;
+ struct repository_format candidate;
+ int has_common;
- if (get_common_dir(&sb, gitdir))
- fn = check_repo_format;
- else
- fn = check_repository_format_version;
+ has_common = get_common_dir(&sb, gitdir);
strbuf_addstr(&sb, "/config");
- repo_config = sb.buf;
+ read_repository_format(&candidate, sb.buf);
+ strbuf_release(&sb);
/*
- * git_config() can't be used here because it calls git_pathdup()
- * to get $GIT_CONFIG/config. That call will make setup_git_env()
- * set git_dir to ".git".
- *
- * We are in gitdir setup, no git dir has been found useable yet.
- * Use a gentler version of git_config() to check if this repo
- * is a good one.
+ * For historical use of check_repository_format() in git-init,
+ * we treat a missing config as a silent "ok", even when nongit_ok
+ * is unset.
*/
- git_config_early(fn, NULL, repo_config);
- if (GIT_REPO_VERSION_READ < repository_format_version) {
- if (!nongit_ok)
- die ("Expected git repo version <= %d, found %d",
- GIT_REPO_VERSION_READ, repository_format_version);
- warning("Expected git repo version <= %d, found %d",
- GIT_REPO_VERSION_READ, repository_format_version);
- warning("Please upgrade Git");
- *nongit_ok = -1;
- ret = -1;
+ if (candidate.version < 0)
+ return 0;
+
+ if (verify_repository_format(&candidate, &err) < 0) {
+ if (nongit_ok) {
+ warning("%s", err.buf);
+ strbuf_release(&err);
+ *nongit_ok = -1;
+ return -1;
+ }
+ die("%s", err.buf);
}
- if (repository_format_version >= 1 && unknown_extensions.nr) {
+ repository_format_precious_objects = candidate.precious_objects;
+ string_list_clear(&candidate.unknown_extensions, 0);
+ if (!has_common) {
+ if (candidate.is_bare != -1) {
+ is_bare_repository_cfg = candidate.is_bare;
+ if (is_bare_repository_cfg == 1)
+ inside_work_tree = -1;
+ }
+ if (candidate.work_tree) {
+ free(git_work_tree_cfg);
+ git_work_tree_cfg = candidate.work_tree;
+ inside_work_tree = -1;
+ }
+ } else {
+ free(candidate.work_tree);
+ }
+
+ return 0;
+}
+
+int read_repository_format(struct repository_format *format, const char *path)
+{
+ memset(format, 0, sizeof(*format));
+ format->version = -1;
+ format->is_bare = -1;
+ string_list_init(&format->unknown_extensions, 1);
+ git_config_from_file(check_repo_format, path, format);
+ return format->version;
+}
+
+int verify_repository_format(const struct repository_format *format,
+ struct strbuf *err)
+{
+ if (GIT_REPO_VERSION_READ < format->version) {
+ strbuf_addf(err, _("Expected git repo version <= %d, found %d"),
+ GIT_REPO_VERSION_READ, format->version);
+ return -1;
+ }
+
+ if (format->version >= 1 && format->unknown_extensions.nr) {
int i;
- if (!nongit_ok)
- die("unknown repository extension: %s",
- unknown_extensions.items[0].string);
+ strbuf_addstr(err, _("unknown repository extensions found:"));
- for (i = 0; i < unknown_extensions.nr; i++)
- warning("unknown repository extension: %s",
- unknown_extensions.items[i].string);
- *nongit_ok = -1;
- ret = -1;
+ for (i = 0; i < format->unknown_extensions.nr; i++)
+ strbuf_addf(err, "\n\t%s",
+ format->unknown_extensions.items[i].string);
+ return -1;
}
- strbuf_release(&sb);
- return ret;
+ return 0;
}
/*
return -(i & 0666);
}
-int check_repository_format_version(const char *var, const char *value, void *cb)
-{
- int ret = check_repo_format(var, value, cb);
- if (ret)
- return ret;
- if (strcmp(var, "core.bare") == 0) {
- is_bare_repository_cfg = git_config_bool(var, value);
- if (is_bare_repository_cfg == 1)
- inside_work_tree = -1;
- } else if (strcmp(var, "core.worktree") == 0) {
- if (!value)
- return config_error_nonbool(var);
- free(git_work_tree_cfg);
- git_work_tree_cfg = xstrdup(value);
- inside_work_tree = -1;
- }
- return 0;
-}
-
-int check_repository_format(void)
+void check_repository_format(void)
{
check_repository_format_gently(get_git_dir(), NULL);
startup_info->have_repository = 1;
- return 0;
}
/*
return -1;
}
}
- if (!strcmp_icase(ent->base, normalized_objdir)) {
+ if (!fspathcmp(ent->base, normalized_objdir)) {
free(ent);
return -1;
}
PROT_READ, MAP_PRIVATE,
p->pack_fd, win->offset);
if (win->base == MAP_FAILED)
- die("packfile %s cannot be mapped: %s",
- p->pack_name,
- strerror(errno));
+ die_errno("packfile %s cannot be mapped",
+ p->pack_name);
if (!win->offset && win->len == p->pack_size
&& !p->do_not_close)
close_pack_fd(p);
dir = opendir(path.buf);
if (!dir) {
if (errno != ENOENT)
- error("unable to open object pack directory: %s: %s",
- path.buf, strerror(errno));
+ error_errno("unable to open object pack directory: %s",
+ path.buf);
strbuf_release(&path);
return;
}
unlink_or_warn(tmpfile);
if (ret) {
if (ret != EEXIST) {
- return error("unable to write sha1 filename %s: %s", filename, strerror(ret));
+ return error_errno("unable to write sha1 filename %s", filename);
}
/* FIXME!!! Collision check here ? */
}
static int write_buffer(int fd, const void *buf, size_t len)
{
if (write_in_full(fd, buf, len) < 0)
- return error("file write error (%s)", strerror(errno));
+ return error_errno("file write error");
return 0;
}
if (errno == EACCES)
return error("insufficient permission for adding an object to repository database %s", get_object_directory());
else
- return error("unable to create temporary file: %s", strerror(errno));
+ return error_errno("unable to create temporary file");
}
/* Set it up */
utb.actime = mtime;
utb.modtime = mtime;
if (utime(tmp_file.buf, &utb) < 0)
- warning("failed utime() on %s: %s",
- tmp_file.buf, strerror(errno));
+ warning_errno("failed utime() on %s", tmp_file.buf);
}
return finalize_object_file(tmp_file.buf, filename);
if (size == read_in_full(fd, buf, size))
ret = index_mem(sha1, buf, size, type, path, flags);
else
- ret = error("short read %s", strerror(errno));
+ ret = error_errno("short read");
free(buf);
} else {
void *buf = xmmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0);
case S_IFREG:
fd = open(path, O_RDONLY);
if (fd < 0)
- return error("open(\"%s\"): %s", path,
- strerror(errno));
+ return error_errno("open(\"%s\")", path);
if (index_fd(sha1, fd, st, OBJ_BLOB, path, flags) < 0)
return error("%s: failed to insert into database",
path);
break;
case S_IFLNK:
- if (strbuf_readlink(&sb, path, st->st_size)) {
- char *errstr = strerror(errno);
- return error("readlink(\"%s\"): %s", path,
- errstr);
- }
+ if (strbuf_readlink(&sb, path, st->st_size))
+ return error_errno("readlink(\"%s\")", path);
if (!(flags & HASH_WRITE_OBJECT))
hash_sha1_file(sb.buf, sb.len, blob_type, sha1);
else if (write_sha1_file(sb.buf, sb.len, blob_type, sha1))
if (!dir) {
if (errno == ENOENT)
return 0;
- return error("unable to open %s: %s", path->buf, strerror(errno));
+ return error_errno("unable to open %s", path->buf);
}
while ((de = readdir(dir))) {
return get_sha1_with_context(name, 0, sha1, &unused);
}
+/*
+ * This is like "get_sha1()", but for struct object_id.
+ */
+int get_oid(const char *name, struct object_id *oid)
+{
+ return get_sha1(name, oid->hash);
+}
+
+
/*
* Many callers know that the user meant to name a commit-ish by
* syntactical positions where the object name appears. Calling this
* To keep track of the shared entries between
* istate->base->cache[] and istate->cache[], base entry
* position is stored in each base entry. All positions start
- * from 1 instead of 0, which is resrved to say "this is a new
+ * from 1 instead of 0, which is reserved to say "this is a new
* entry".
*/
for (i = 0; i < base->cache_nr; i++)
struct string_list_item *unsorted_string_list_lookup(struct string_list *list,
const char *string)
{
- int i;
+ struct string_list_item *item;
compare_strings_fn cmp = list->cmp ? list->cmp : strcmp;
- for (i = 0; i < list->nr; i++)
- if (!cmp(string, list->items[i].string))
- return list->items + i;
+ for_each_string_list_item(item, list)
+ if (!cmp(string, item->string))
+ return item;
return NULL;
}
lookup_path
};
-static struct submodule_cache cache;
+static struct submodule_cache the_submodule_cache;
static int is_cache_init;
static int config_path_cmp(const struct submodule_entry *a,
struct hashmap_iter iter;
struct submodule_entry *entry;
- hashmap_iter_init(&cache->for_name, &iter);
- entry = hashmap_iter_next(&iter);
+ entry = hashmap_iter_first(&cache->for_name, &iter);
if (!entry)
return NULL;
return entry->config;
if (is_cache_init)
return;
- cache_init(&cache);
+ cache_init(&the_submodule_cache);
is_cache_init = 1;
}
int parse_submodule_config_option(const char *var, const char *value)
{
struct parse_config_parameter parameter;
- parameter.cache = &cache;
+ parameter.cache = &the_submodule_cache;
parameter.commit_sha1 = NULL;
parameter.gitmodules_sha1 = null_sha1;
parameter.overwrite = 1;
const char *name)
{
ensure_cache_init();
- return config_from_name(&cache, commit_sha1, name);
+ return config_from_name(&the_submodule_cache, commit_sha1, name);
}
const struct submodule *submodule_from_path(const unsigned char *commit_sha1,
const char *path)
{
ensure_cache_init();
- return config_from_path(&cache, commit_sha1, path);
+ return config_from_path(&the_submodule_cache, commit_sha1, path);
}
void submodule_free(void)
{
- cache_free(&cache);
+ cache_free(&the_submodule_cache);
is_cache_init = 0;
}
#include "argv-array.h"
#include "blob.h"
#include "thread-utils.h"
+#include "quote.h"
static int config_fetch_recurse_submodules = RECURSE_SUBMODULES_ON_DEMAND;
static int parallel_jobs = 1;
return 0;
}
+const char *submodule_strategy_to_string(const struct submodule_update_strategy *s)
+{
+ struct strbuf sb = STRBUF_INIT;
+ switch (s->type) {
+ case SM_UPDATE_CHECKOUT:
+ return "checkout";
+ case SM_UPDATE_MERGE:
+ return "merge";
+ case SM_UPDATE_REBASE:
+ return "rebase";
+ case SM_UPDATE_NONE:
+ return "none";
+ case SM_UPDATE_UNSPECIFIED:
+ return NULL;
+ case SM_UPDATE_COMMAND:
+ strbuf_addf(&sb, "!%s", s->command);
+ return strbuf_detach(&sb, NULL);
+ }
+ return NULL;
+}
+
void handle_ignore_submodules_arg(struct diff_options *diffopt,
const char *arg)
{
argv[1] = sha1_to_hex(sha1);
cp.argv = argv;
- cp.env = local_repo_env;
+ prepare_submodule_repo_env(&cp.env_array);
cp.git_cmd = 1;
cp.no_stdin = 1;
cp.out = -1;
const char *argv[] = {"push", NULL};
cp.argv = argv;
- cp.env = local_repo_env;
+ prepare_submodule_repo_env(&cp.env_array);
cp.git_cmd = 1;
cp.no_stdin = 1;
cp.dir = path;
argv[3] = sha1_to_hex(sha1);
cp.argv = argv;
- cp.env = local_repo_env;
+ prepare_submodule_repo_env(&cp.env_array);
cp.git_cmd = 1;
cp.no_stdin = 1;
cp.dir = path;
if (is_directory(git_dir)) {
child_process_init(cp);
cp->dir = strbuf_detach(&submodule_path, NULL);
- cp->env = local_repo_env;
+ prepare_submodule_repo_env(&cp->env_array);
cp->git_cmd = 1;
if (!spf->quiet)
strbuf_addf(err, "Fetching submodule %s%s\n",
argv[2] = "-uno";
cp.argv = argv;
- cp.env = local_repo_env;
+ prepare_submodule_repo_env(&cp.env_array);
cp.git_cmd = 1;
cp.no_stdin = 1;
cp.out = -1;
/* Now test that all nested submodules use a gitfile too */
cp.argv = argv;
- cp.env = local_repo_env;
+ prepare_submodule_repo_env(&cp.env_array);
cp.git_cmd = 1;
cp.no_stdin = 1;
cp.no_stderr = 1;
return 0;
cp.argv = argv;
- cp.env = local_repo_env;
+ prepare_submodule_repo_env(&cp.env_array);
cp.git_cmd = 1;
cp.no_stdin = 1;
cp.out = -1;
{
return parallel_jobs;
}
+
+void prepare_submodule_repo_env(struct argv_array *out)
+{
+ const char * const *var;
+
+ for (var = local_repo_env; *var; var++) {
+ if (strcmp(*var, CONFIG_DATA_ENVIRONMENT))
+ argv_array_push(out, *var);
+ }
+}
void gitmodules_config(void);
int parse_submodule_update_strategy(const char *value,
struct submodule_update_strategy *dst);
+const char *submodule_strategy_to_string(const struct submodule_update_strategy *s);
void handle_ignore_submodules_arg(struct diff_options *diffopt, const char *);
void show_submodule_summary(FILE *f, const char *path,
const char *line_prefix,
void connect_work_tree_and_git_dir(const char *work_tree, const char *git_dir);
int parallel_submodules(void);
+/*
+ * Prepare the "env_array" parameter of a "struct child_process" for executing
+ * a submodule by clearing any repo-specific envirionment variables, but
+ * retaining any config in the environment.
+ */
+void prepare_submodule_repo_env(struct argv_array *out);
+
#endif
-x::
Turn on shell tracing (i.e., `set -x`) during the tests
- themselves. Implies `--verbose`. Note that this can cause
- failures in some tests which redirect and test the
- output of shell functions. Use with caution.
+ themselves. Implies `--verbose`. Note that in non-bash shells,
+ this can cause failures in some tests which redirect and test
+ the output of shell functions. Use with caution.
-d::
--debug::
--- /dev/null
+/test-chmtime
+/test-ctype
+/test-config
+/test-date
+/test-delta
+/test-dump-cache-tree
+/test-dump-split-index
+/test-dump-untracked-cache
+/test-fake-ssh
+/test-scrap-cache-tree
+/test-genrandom
+/test-hashmap
+/test-index-version
+/test-line-buffer
+/test-match-trees
+/test-mergesort
+/test-mktemp
+/test-parse-options
+/test-path-utils
+/test-prio-queue
+/test-read-cache
+/test-regex
+/test-revision-walking
+/test-run-command
+/test-sha1
+/test-sha1-array
+/test-sigchain
+/test-string-list
+/test-submodule-config
+/test-subprocess
+/test-svn-fe
+/test-urlmatch-normalization
+/test-wildmatch
--- /dev/null
+/*
+ * This program can either change modification time of the given
+ * file(s) or just print it. The program does not change atime or
+ * ctime (their values are explicitly preserved).
+ *
+ * The mtime can be changed to an absolute value:
+ *
+ * test-chmtime =<seconds> file...
+ *
+ * Relative to the current time as returned by time(3):
+ *
+ * test-chmtime =+<seconds> (or =-<seconds>) file...
+ *
+ * Or relative to the current mtime of the file:
+ *
+ * test-chmtime <seconds> file...
+ * test-chmtime +<seconds> (or -<seconds>) file...
+ *
+ * Examples:
+ *
+ * To just print the mtime use --verbose and set the file mtime offset to 0:
+ *
+ * test-chmtime -v +0 file
+ *
+ * To set the mtime to current time:
+ *
+ * test-chmtime =+0 file
+ *
+ */
+#include "git-compat-util.h"
+#include <utime.h>
+
+static const char usage_str[] = "-v|--verbose (+|=|=+|=-|-)<seconds> <file>...";
+
+static int timespec_arg(const char *arg, long int *set_time, int *set_eq)
+{
+ char *test;
+ const char *timespec = arg;
+ *set_eq = (*timespec == '=') ? 1 : 0;
+ if (*set_eq) {
+ timespec++;
+ if (*timespec == '+') {
+ *set_eq = 2; /* relative "in the future" */
+ timespec++;
+ }
+ }
+ *set_time = strtol(timespec, &test, 10);
+ if (*test) {
+ fprintf(stderr, "Not a base-10 integer: %s\n", arg + 1);
+ return 0;
+ }
+ if ((*set_eq && *set_time < 0) || *set_eq == 2) {
+ time_t now = time(NULL);
+ *set_time += now;
+ }
+ return 1;
+}
+
+int main(int argc, char *argv[])
+{
+ static int verbose;
+
+ int i = 1;
+ /* no mtime change by default */
+ int set_eq = 0;
+ long int set_time = 0;
+
+ if (argc < 3)
+ goto usage;
+
+ if (strcmp(argv[i], "--verbose") == 0 || strcmp(argv[i], "-v") == 0) {
+ verbose = 1;
+ ++i;
+ }
+ if (timespec_arg(argv[i], &set_time, &set_eq))
+ ++i;
+ else
+ goto usage;
+
+ for (; i < argc; i++) {
+ struct stat sb;
+ struct utimbuf utb;
+
+ if (stat(argv[i], &sb) < 0) {
+ fprintf(stderr, "Failed to stat %s: %s\n",
+ argv[i], strerror(errno));
+ return 1;
+ }
+
+#ifdef GIT_WINDOWS_NATIVE
+ if (!(sb.st_mode & S_IWUSR) &&
+ chmod(argv[i], sb.st_mode | S_IWUSR)) {
+ fprintf(stderr, "Could not make user-writable %s: %s",
+ argv[i], strerror(errno));
+ return 1;
+ }
+#endif
+
+ utb.actime = sb.st_atime;
+ utb.modtime = set_eq ? set_time : sb.st_mtime + set_time;
+
+ if (verbose) {
+ uintmax_t mtime = utb.modtime < 0 ? 0: utb.modtime;
+ printf("%"PRIuMAX"\t%s\n", mtime, argv[i]);
+ }
+
+ if (utb.modtime != sb.st_mtime && utime(argv[i], &utb) < 0) {
+ fprintf(stderr, "Failed to modify time on %s: %s\n",
+ argv[i], strerror(errno));
+ return 1;
+ }
+ }
+
+ return 0;
+
+usage:
+ fprintf(stderr, "usage: %s %s\n", argv[0], usage_str);
+ return 1;
+}
--- /dev/null
+#include "cache.h"
+#include "string-list.h"
+
+/*
+ * This program exposes the C API of the configuration mechanism
+ * as a set of simple commands in order to facilitate testing.
+ *
+ * Reads stdin and prints result of command to stdout:
+ *
+ * get_value -> prints the value with highest priority for the entered key
+ *
+ * get_value_multi -> prints all values for the entered key in increasing order
+ * of priority
+ *
+ * get_int -> print integer value for the entered key or die
+ *
+ * get_bool -> print bool value for the entered key or die
+ *
+ * get_string -> print string value for the entered key or die
+ *
+ * configset_get_value -> returns value with the highest priority for the entered key
+ * from a config_set constructed from files entered as arguments.
+ *
+ * configset_get_value_multi -> returns value_list for the entered key sorted in
+ * ascending order of priority from a config_set
+ * constructed from files entered as arguments.
+ *
+ * Examples:
+ *
+ * To print the value with highest priority for key "foo.bAr Baz.rock":
+ * test-config get_value "foo.bAr Baz.rock"
+ *
+ */
+
+
+int main(int argc, char **argv)
+{
+ int i, val;
+ const char *v;
+ const struct string_list *strptr;
+ struct config_set cs;
+ git_configset_init(&cs);
+
+ if (argc < 2) {
+ fprintf(stderr, "Please, provide a command name on the command-line\n");
+ goto exit1;
+ } else if (argc == 3 && !strcmp(argv[1], "get_value")) {
+ if (!git_config_get_value(argv[2], &v)) {
+ if (!v)
+ printf("(NULL)\n");
+ else
+ printf("%s\n", v);
+ goto exit0;
+ } else {
+ printf("Value not found for \"%s\"\n", argv[2]);
+ goto exit1;
+ }
+ } else if (argc == 3 && !strcmp(argv[1], "get_value_multi")) {
+ strptr = git_config_get_value_multi(argv[2]);
+ if (strptr) {
+ for (i = 0; i < strptr->nr; i++) {
+ v = strptr->items[i].string;
+ if (!v)
+ printf("(NULL)\n");
+ else
+ printf("%s\n", v);
+ }
+ goto exit0;
+ } else {
+ printf("Value not found for \"%s\"\n", argv[2]);
+ goto exit1;
+ }
+ } else if (argc == 3 && !strcmp(argv[1], "get_int")) {
+ if (!git_config_get_int(argv[2], &val)) {
+ printf("%d\n", val);
+ goto exit0;
+ } else {
+ printf("Value not found for \"%s\"\n", argv[2]);
+ goto exit1;
+ }
+ } else if (argc == 3 && !strcmp(argv[1], "get_bool")) {
+ if (!git_config_get_bool(argv[2], &val)) {
+ printf("%d\n", val);
+ goto exit0;
+ } else {
+ printf("Value not found for \"%s\"\n", argv[2]);
+ goto exit1;
+ }
+ } else if (argc == 3 && !strcmp(argv[1], "get_string")) {
+ if (!git_config_get_string_const(argv[2], &v)) {
+ printf("%s\n", v);
+ goto exit0;
+ } else {
+ printf("Value not found for \"%s\"\n", argv[2]);
+ goto exit1;
+ }
+ } else if (!strcmp(argv[1], "configset_get_value")) {
+ for (i = 3; i < argc; i++) {
+ int err;
+ if ((err = git_configset_add_file(&cs, argv[i]))) {
+ fprintf(stderr, "Error (%d) reading configuration file %s.\n", err, argv[i]);
+ goto exit2;
+ }
+ }
+ if (!git_configset_get_value(&cs, argv[2], &v)) {
+ if (!v)
+ printf("(NULL)\n");
+ else
+ printf("%s\n", v);
+ goto exit0;
+ } else {
+ printf("Value not found for \"%s\"\n", argv[2]);
+ goto exit1;
+ }
+ } else if (!strcmp(argv[1], "configset_get_value_multi")) {
+ for (i = 3; i < argc; i++) {
+ int err;
+ if ((err = git_configset_add_file(&cs, argv[i]))) {
+ fprintf(stderr, "Error (%d) reading configuration file %s.\n", err, argv[i]);
+ goto exit2;
+ }
+ }
+ strptr = git_configset_get_value_multi(&cs, argv[2]);
+ if (strptr) {
+ for (i = 0; i < strptr->nr; i++) {
+ v = strptr->items[i].string;
+ if (!v)
+ printf("(NULL)\n");
+ else
+ printf("%s\n", v);
+ }
+ goto exit0;
+ } else {
+ printf("Value not found for \"%s\"\n", argv[2]);
+ goto exit1;
+ }
+ }
+
+ die("%s: Please check the syntax and the function name", argv[0]);
+
+exit0:
+ git_configset_clear(&cs);
+ return 0;
+
+exit1:
+ git_configset_clear(&cs);
+ return 1;
+
+exit2:
+ git_configset_clear(&cs);
+ return 2;
+}
--- /dev/null
+#include "cache.h"
+
+static int rc;
+
+static void report_error(const char *class, int ch)
+{
+ printf("%s classifies char %d (0x%02x) wrongly\n", class, ch, ch);
+ rc = 1;
+}
+
+static int is_in(const char *s, int ch)
+{
+ /* We can't find NUL using strchr. It's classless anyway. */
+ if (ch == '\0')
+ return 0;
+ return !!strchr(s, ch);
+}
+
+#define TEST_CLASS(t,s) { \
+ int i; \
+ for (i = 0; i < 256; i++) { \
+ if (is_in(s, i) != t(i)) \
+ report_error(#t, i); \
+ } \
+}
+
+#define DIGIT "0123456789"
+#define LOWER "abcdefghijklmnopqrstuvwxyz"
+#define UPPER "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+
+int main(int argc, char **argv)
+{
+ TEST_CLASS(isdigit, DIGIT);
+ TEST_CLASS(isspace, " \n\r\t");
+ TEST_CLASS(isalpha, LOWER UPPER);
+ TEST_CLASS(isalnum, LOWER UPPER DIGIT);
+ TEST_CLASS(is_glob_special, "*?[\\");
+ TEST_CLASS(is_regex_special, "$()*+.?[\\^{|");
+ TEST_CLASS(is_pathspec_magic, "!\"#%&',-/:;<=>@_`~");
+
+ return rc;
+}
--- /dev/null
+#include "cache.h"
+
+static const char *usage_msg = "\n"
+" test-date show [time_t]...\n"
+" test-date parse [date]...\n"
+" test-date approxidate [date]...\n";
+
+static void show_dates(char **argv, struct timeval *now)
+{
+ struct strbuf buf = STRBUF_INIT;
+
+ for (; *argv; argv++) {
+ time_t t = atoi(*argv);
+ show_date_relative(t, 0, now, &buf);
+ printf("%s -> %s\n", *argv, buf.buf);
+ }
+ strbuf_release(&buf);
+}
+
+static void parse_dates(char **argv, struct timeval *now)
+{
+ struct strbuf result = STRBUF_INIT;
+
+ for (; *argv; argv++) {
+ unsigned long t;
+ int tz;
+
+ strbuf_reset(&result);
+ parse_date(*argv, &result);
+ if (sscanf(result.buf, "%lu %d", &t, &tz) == 2)
+ printf("%s -> %s\n",
+ *argv, show_date(t, tz, DATE_MODE(ISO8601)));
+ else
+ printf("%s -> bad\n", *argv);
+ }
+ strbuf_release(&result);
+}
+
+static void parse_approxidate(char **argv, struct timeval *now)
+{
+ for (; *argv; argv++) {
+ time_t t;
+ t = approxidate_relative(*argv, now);
+ printf("%s -> %s\n", *argv, show_date(t, 0, DATE_MODE(ISO8601)));
+ }
+}
+
+int main(int argc, char **argv)
+{
+ struct timeval now;
+ const char *x;
+
+ x = getenv("TEST_DATE_NOW");
+ if (x) {
+ now.tv_sec = atoi(x);
+ now.tv_usec = 0;
+ }
+ else
+ gettimeofday(&now, NULL);
+
+ argv++;
+ if (!*argv)
+ usage(usage_msg);
+ if (!strcmp(*argv, "show"))
+ show_dates(argv+1, &now);
+ else if (!strcmp(*argv, "parse"))
+ parse_dates(argv+1, &now);
+ else if (!strcmp(*argv, "approxidate"))
+ parse_approxidate(argv+1, &now);
+ else
+ usage(usage_msg);
+ return 0;
+}
--- /dev/null
+/*
+ * test-delta.c: test code to exercise diff-delta.c and patch-delta.c
+ *
+ * (C) 2005 Nicolas Pitre <nico@fluxnic.net>
+ *
+ * This code is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "git-compat-util.h"
+#include "delta.h"
+#include "cache.h"
+
+static const char usage_str[] =
+ "test-delta (-d|-p) <from_file> <data_file> <out_file>";
+
+int main(int argc, char *argv[])
+{
+ int fd;
+ struct stat st;
+ void *from_buf, *data_buf, *out_buf;
+ unsigned long from_size, data_size, out_size;
+
+ if (argc != 5 || (strcmp(argv[1], "-d") && strcmp(argv[1], "-p"))) {
+ fprintf(stderr, "usage: %s\n", usage_str);
+ return 1;
+ }
+
+ fd = open(argv[2], O_RDONLY);
+ if (fd < 0 || fstat(fd, &st)) {
+ perror(argv[2]);
+ return 1;
+ }
+ from_size = st.st_size;
+ from_buf = mmap(NULL, from_size, PROT_READ, MAP_PRIVATE, fd, 0);
+ if (from_buf == MAP_FAILED) {
+ perror(argv[2]);
+ close(fd);
+ return 1;
+ }
+ close(fd);
+
+ fd = open(argv[3], O_RDONLY);
+ if (fd < 0 || fstat(fd, &st)) {
+ perror(argv[3]);
+ return 1;
+ }
+ data_size = st.st_size;
+ data_buf = mmap(NULL, data_size, PROT_READ, MAP_PRIVATE, fd, 0);
+ if (data_buf == MAP_FAILED) {
+ perror(argv[3]);
+ close(fd);
+ return 1;
+ }
+ close(fd);
+
+ if (argv[1][1] == 'd')
+ out_buf = diff_delta(from_buf, from_size,
+ data_buf, data_size,
+ &out_size, 0);
+ else
+ out_buf = patch_delta(from_buf, from_size,
+ data_buf, data_size,
+ &out_size);
+ if (!out_buf) {
+ fprintf(stderr, "delta operation failed (returned NULL)\n");
+ return 1;
+ }
+
+ fd = open (argv[4], O_WRONLY|O_CREAT|O_TRUNC, 0666);
+ if (fd < 0 || write_in_full(fd, out_buf, out_size) != out_size) {
+ perror(argv[4]);
+ return 1;
+ }
+
+ return 0;
+}
--- /dev/null
+#include "cache.h"
+#include "tree.h"
+#include "cache-tree.h"
+
+
+static void dump_one(struct cache_tree *it, const char *pfx, const char *x)
+{
+ if (it->entry_count < 0)
+ printf("%-40s %s%s (%d subtrees)\n",
+ "invalid", x, pfx, it->subtree_nr);
+ else
+ printf("%s %s%s (%d entries, %d subtrees)\n",
+ sha1_to_hex(it->sha1), x, pfx,
+ it->entry_count, it->subtree_nr);
+}
+
+static int dump_cache_tree(struct cache_tree *it,
+ struct cache_tree *ref,
+ const char *pfx)
+{
+ int i;
+ int errs = 0;
+
+ if (!it || !ref)
+ /* missing in either */
+ return 0;
+
+ if (it->entry_count < 0) {
+ /* invalid */
+ dump_one(it, pfx, "");
+ dump_one(ref, pfx, "#(ref) ");
+ }
+ else {
+ dump_one(it, pfx, "");
+ if (hashcmp(it->sha1, ref->sha1) ||
+ ref->entry_count != it->entry_count ||
+ ref->subtree_nr != it->subtree_nr) {
+ /* claims to be valid but is lying */
+ dump_one(ref, pfx, "#(ref) ");
+ errs = 1;
+ }
+ }
+
+ for (i = 0; i < it->subtree_nr; i++) {
+ char path[PATH_MAX];
+ struct cache_tree_sub *down = it->down[i];
+ struct cache_tree_sub *rdwn;
+
+ rdwn = cache_tree_sub(ref, down->name);
+ xsnprintf(path, sizeof(path), "%s%.*s/", pfx, down->namelen, down->name);
+ if (dump_cache_tree(down->cache_tree, rdwn->cache_tree, path))
+ errs = 1;
+ }
+ return errs;
+}
+
+int main(int ac, char **av)
+{
+ struct index_state istate;
+ struct cache_tree *another = cache_tree();
+ if (read_cache() < 0)
+ die("unable to read index file");
+ istate = the_index;
+ istate.cache_tree = another;
+ cache_tree_update(&istate, WRITE_TREE_DRY_RUN);
+ return dump_cache_tree(active_cache_tree, another, "");
+}
--- /dev/null
+#include "cache.h"
+#include "split-index.h"
+#include "ewah/ewok.h"
+
+static void show_bit(size_t pos, void *data)
+{
+ printf(" %d", (int)pos);
+}
+
+int main(int ac, char **av)
+{
+ struct split_index *si;
+ int i;
+
+ do_read_index(&the_index, av[1], 1);
+ printf("own %s\n", sha1_to_hex(the_index.sha1));
+ si = the_index.split_index;
+ if (!si) {
+ printf("not a split index\n");
+ return 0;
+ }
+ printf("base %s\n", sha1_to_hex(si->base_sha1));
+ for (i = 0; i < the_index.cache_nr; i++) {
+ struct cache_entry *ce = the_index.cache[i];
+ printf("%06o %s %d\t%s\n", ce->ce_mode,
+ sha1_to_hex(ce->sha1), ce_stage(ce), ce->name);
+ }
+ printf("replacements:");
+ if (si->replace_bitmap)
+ ewah_each_bit(si->replace_bitmap, show_bit, NULL);
+ printf("\ndeletions:");
+ if (si->delete_bitmap)
+ ewah_each_bit(si->delete_bitmap, show_bit, NULL);
+ printf("\n");
+ return 0;
+}
--- /dev/null
+#include "cache.h"
+#include "dir.h"
+
+static int compare_untracked(const void *a_, const void *b_)
+{
+ const char *const *a = a_;
+ const char *const *b = b_;
+ return strcmp(*a, *b);
+}
+
+static int compare_dir(const void *a_, const void *b_)
+{
+ const struct untracked_cache_dir *const *a = a_;
+ const struct untracked_cache_dir *const *b = b_;
+ return strcmp((*a)->name, (*b)->name);
+}
+
+static void dump(struct untracked_cache_dir *ucd, struct strbuf *base)
+{
+ int i, len;
+ qsort(ucd->untracked, ucd->untracked_nr, sizeof(*ucd->untracked),
+ compare_untracked);
+ qsort(ucd->dirs, ucd->dirs_nr, sizeof(*ucd->dirs),
+ compare_dir);
+ len = base->len;
+ strbuf_addf(base, "%s/", ucd->name);
+ printf("%s %s", base->buf,
+ sha1_to_hex(ucd->exclude_sha1));
+ if (ucd->recurse)
+ fputs(" recurse", stdout);
+ if (ucd->check_only)
+ fputs(" check_only", stdout);
+ if (ucd->valid)
+ fputs(" valid", stdout);
+ printf("\n");
+ for (i = 0; i < ucd->untracked_nr; i++)
+ printf("%s\n", ucd->untracked[i]);
+ for (i = 0; i < ucd->dirs_nr; i++)
+ dump(ucd->dirs[i], base);
+ strbuf_setlen(base, len);
+}
+
+int main(int ac, char **av)
+{
+ struct untracked_cache *uc;
+ struct strbuf base = STRBUF_INIT;
+
+ /* Hack to avoid modifying the untracked cache when we read it */
+ ignore_untracked_cache_config = 1;
+
+ setup_git_directory();
+ if (read_cache() < 0)
+ die("unable to read index file");
+ uc = the_index.untracked;
+ if (!uc) {
+ printf("no untracked cache\n");
+ return 0;
+ }
+ printf("info/exclude %s\n", sha1_to_hex(uc->ss_info_exclude.sha1));
+ printf("core.excludesfile %s\n", sha1_to_hex(uc->ss_excludes_file.sha1));
+ printf("exclude_per_dir %s\n", uc->exclude_per_dir);
+ printf("flags %08x\n", uc->dir_flags);
+ if (uc->root)
+ dump(uc->root, &base);
+ return 0;
+}
--- /dev/null
+#include "git-compat-util.h"
+#include "run-command.h"
+#include "strbuf.h"
+
+int main(int argc, char **argv)
+{
+ const char *trash_directory = getenv("TRASH_DIRECTORY");
+ struct strbuf buf = STRBUF_INIT;
+ FILE *f;
+ int i;
+ const char *child_argv[] = { NULL, NULL };
+
+ /* First, print all parameters into $TRASH_DIRECTORY/ssh-output */
+ if (!trash_directory)
+ die("Need a TRASH_DIRECTORY!");
+ strbuf_addf(&buf, "%s/ssh-output", trash_directory);
+ f = fopen(buf.buf, "w");
+ if (!f)
+ die("Could not write to %s", buf.buf);
+ for (i = 0; i < argc; i++)
+ fprintf(f, "%s%s", i > 0 ? " " : "", i > 0 ? argv[i] : "ssh:");
+ fprintf(f, "\n");
+ fclose(f);
+
+ /* Now, evaluate the *last* parameter */
+ if (argc < 2)
+ return 0;
+ child_argv[0] = argv[argc - 1];
+ return run_command_v_opt(child_argv, RUN_USING_SHELL);
+}
--- /dev/null
+/*
+ * Simple random data generator used to create reproducible test files.
+ * This is inspired from POSIX.1-2001 implementation example for rand().
+ * Copyright (C) 2007 by Nicolas Pitre, licensed under the GPL version 2.
+ */
+
+#include "git-compat-util.h"
+
+int main(int argc, char *argv[])
+{
+ unsigned long count, next = 0;
+ unsigned char *c;
+
+ if (argc < 2 || argc > 3) {
+ fprintf(stderr, "usage: %s <seed_string> [<size>]\n", argv[0]);
+ return 1;
+ }
+
+ c = (unsigned char *) argv[1];
+ do {
+ next = next * 11 + *c;
+ } while (*c++);
+
+ count = (argc == 3) ? strtoul(argv[2], NULL, 0) : -1L;
+
+ while (count--) {
+ next = next * 1103515245 + 12345;
+ if (putchar((next >> 16) & 0xff) == EOF)
+ return -1;
+ }
+
+ return 0;
+}
--- /dev/null
+#include "git-compat-util.h"
+#include "hashmap.h"
+
+struct test_entry
+{
+ struct hashmap_entry ent;
+ /* key and value as two \0-terminated strings */
+ char key[FLEX_ARRAY];
+};
+
+static const char *get_value(const struct test_entry *e)
+{
+ return e->key + strlen(e->key) + 1;
+}
+
+static int test_entry_cmp(const struct test_entry *e1,
+ const struct test_entry *e2, const char* key)
+{
+ return strcmp(e1->key, key ? key : e2->key);
+}
+
+static int test_entry_cmp_icase(const struct test_entry *e1,
+ const struct test_entry *e2, const char* key)
+{
+ return strcasecmp(e1->key, key ? key : e2->key);
+}
+
+static struct test_entry *alloc_test_entry(int hash, char *key, int klen,
+ char *value, int vlen)
+{
+ struct test_entry *entry = malloc(sizeof(struct test_entry) + klen
+ + vlen + 2);
+ hashmap_entry_init(entry, hash);
+ memcpy(entry->key, key, klen + 1);
+ memcpy(entry->key + klen + 1, value, vlen + 1);
+ return entry;
+}
+
+#define HASH_METHOD_FNV 0
+#define HASH_METHOD_I 1
+#define HASH_METHOD_IDIV10 2
+#define HASH_METHOD_0 3
+#define HASH_METHOD_X2 4
+#define TEST_SPARSE 8
+#define TEST_ADD 16
+#define TEST_SIZE 100000
+
+static unsigned int hash(unsigned int method, unsigned int i, const char *key)
+{
+ unsigned int hash = 0;
+ switch (method & 3)
+ {
+ case HASH_METHOD_FNV:
+ hash = strhash(key);
+ break;
+ case HASH_METHOD_I:
+ hash = i;
+ break;
+ case HASH_METHOD_IDIV10:
+ hash = i / 10;
+ break;
+ case HASH_METHOD_0:
+ hash = 0;
+ break;
+ }
+
+ if (method & HASH_METHOD_X2)
+ hash = 2 * hash;
+ return hash;
+}
+
+/*
+ * Test performance of hashmap.[ch]
+ * Usage: time echo "perfhashmap method rounds" | test-hashmap
+ */
+static void perf_hashmap(unsigned int method, unsigned int rounds)
+{
+ struct hashmap map;
+ char buf[16];
+ struct test_entry **entries;
+ unsigned int *hashes;
+ unsigned int i, j;
+
+ entries = malloc(TEST_SIZE * sizeof(struct test_entry *));
+ hashes = malloc(TEST_SIZE * sizeof(int));
+ for (i = 0; i < TEST_SIZE; i++) {
+ snprintf(buf, sizeof(buf), "%i", i);
+ entries[i] = alloc_test_entry(0, buf, strlen(buf), "", 0);
+ hashes[i] = hash(method, i, entries[i]->key);
+ }
+
+ if (method & TEST_ADD) {
+ /* test adding to the map */
+ for (j = 0; j < rounds; j++) {
+ hashmap_init(&map, (hashmap_cmp_fn) test_entry_cmp, 0);
+
+ /* add entries */
+ for (i = 0; i < TEST_SIZE; i++) {
+ hashmap_entry_init(entries[i], hashes[i]);
+ hashmap_add(&map, entries[i]);
+ }
+
+ hashmap_free(&map, 0);
+ }
+ } else {
+ /* test map lookups */
+ hashmap_init(&map, (hashmap_cmp_fn) test_entry_cmp, 0);
+
+ /* fill the map (sparsely if specified) */
+ j = (method & TEST_SPARSE) ? TEST_SIZE / 10 : TEST_SIZE;
+ for (i = 0; i < j; i++) {
+ hashmap_entry_init(entries[i], hashes[i]);
+ hashmap_add(&map, entries[i]);
+ }
+
+ for (j = 0; j < rounds; j++) {
+ for (i = 0; i < TEST_SIZE; i++) {
+ hashmap_get_from_hash(&map, hashes[i],
+ entries[i]->key);
+ }
+ }
+
+ hashmap_free(&map, 0);
+ }
+}
+
+#define DELIM " \t\r\n"
+
+/*
+ * Read stdin line by line and print result of commands to stdout:
+ *
+ * hash key -> strhash(key) memhash(key) strihash(key) memihash(key)
+ * put key value -> NULL / old value
+ * get key -> NULL / value
+ * remove key -> NULL / old value
+ * iterate -> key1 value1\nkey2 value2\n...
+ * size -> tablesize numentries
+ *
+ * perfhashmap method rounds -> test hashmap.[ch] performance
+ */
+int main(int argc, char *argv[])
+{
+ char line[1024];
+ struct hashmap map;
+ int icase;
+
+ /* init hash map */
+ icase = argc > 1 && !strcmp("ignorecase", argv[1]);
+ hashmap_init(&map, (hashmap_cmp_fn) (icase ? test_entry_cmp_icase
+ : test_entry_cmp), 0);
+
+ /* process commands from stdin */
+ while (fgets(line, sizeof(line), stdin)) {
+ char *cmd, *p1 = NULL, *p2 = NULL;
+ int l1 = 0, l2 = 0, hash = 0;
+ struct test_entry *entry;
+
+ /* break line into command and up to two parameters */
+ cmd = strtok(line, DELIM);
+ /* ignore empty lines */
+ if (!cmd || *cmd == '#')
+ continue;
+
+ p1 = strtok(NULL, DELIM);
+ if (p1) {
+ l1 = strlen(p1);
+ hash = icase ? strihash(p1) : strhash(p1);
+ p2 = strtok(NULL, DELIM);
+ if (p2)
+ l2 = strlen(p2);
+ }
+
+ if (!strcmp("hash", cmd) && l1) {
+
+ /* print results of different hash functions */
+ printf("%u %u %u %u\n", strhash(p1), memhash(p1, l1),
+ strihash(p1), memihash(p1, l1));
+
+ } else if (!strcmp("add", cmd) && l1 && l2) {
+
+ /* create entry with key = p1, value = p2 */
+ entry = alloc_test_entry(hash, p1, l1, p2, l2);
+
+ /* add to hashmap */
+ hashmap_add(&map, entry);
+
+ } else if (!strcmp("put", cmd) && l1 && l2) {
+
+ /* create entry with key = p1, value = p2 */
+ entry = alloc_test_entry(hash, p1, l1, p2, l2);
+
+ /* add / replace entry */
+ entry = hashmap_put(&map, entry);
+
+ /* print and free replaced entry, if any */
+ puts(entry ? get_value(entry) : "NULL");
+ free(entry);
+
+ } else if (!strcmp("get", cmd) && l1) {
+
+ /* lookup entry in hashmap */
+ entry = hashmap_get_from_hash(&map, hash, p1);
+
+ /* print result */
+ if (!entry)
+ puts("NULL");
+ while (entry) {
+ puts(get_value(entry));
+ entry = hashmap_get_next(&map, entry);
+ }
+
+ } else if (!strcmp("remove", cmd) && l1) {
+
+ /* setup static key */
+ struct hashmap_entry key;
+ hashmap_entry_init(&key, hash);
+
+ /* remove entry from hashmap */
+ entry = hashmap_remove(&map, &key, p1);
+
+ /* print result and free entry*/
+ puts(entry ? get_value(entry) : "NULL");
+ free(entry);
+
+ } else if (!strcmp("iterate", cmd)) {
+
+ struct hashmap_iter iter;
+ hashmap_iter_init(&map, &iter);
+ while ((entry = hashmap_iter_next(&iter)))
+ printf("%s %s\n", entry->key, get_value(entry));
+
+ } else if (!strcmp("size", cmd)) {
+
+ /* print table sizes */
+ printf("%u %u\n", map.tablesize, map.size);
+
+ } else if (!strcmp("intern", cmd) && l1) {
+
+ /* test that strintern works */
+ const char *i1 = strintern(p1);
+ const char *i2 = strintern(p1);
+ if (strcmp(i1, p1))
+ printf("strintern(%s) returns %s\n", p1, i1);
+ else if (i1 == p1)
+ printf("strintern(%s) returns input pointer\n", p1);
+ else if (i1 != i2)
+ printf("strintern(%s) != strintern(%s)", i1, i2);
+ else
+ printf("%s\n", i1);
+
+ } else if (!strcmp("perfhashmap", cmd) && l1 && l2) {
+
+ perf_hashmap(atoi(p1), atoi(p2));
+
+ } else {
+
+ printf("Unknown command %s\n", cmd);
+
+ }
+ }
+
+ hashmap_free(&map, 1);
+ return 0;
+}
--- /dev/null
+#include "cache.h"
+
+int main(int argc, char **argv)
+{
+ struct cache_header hdr;
+ int version;
+
+ memset(&hdr,0,sizeof(hdr));
+ if (read(0, &hdr, sizeof(hdr)) != sizeof(hdr))
+ return 0;
+ version = ntohl(hdr.hdr_version);
+ printf("%d\n", version);
+ return 0;
+}
--- /dev/null
+/*
+ * test-line-buffer.c: code to exercise the svn importer's input helper
+ */
+
+#include "git-compat-util.h"
+#include "strbuf.h"
+#include "vcs-svn/line_buffer.h"
+
+static uint32_t strtouint32(const char *s)
+{
+ char *end;
+ uintmax_t n = strtoumax(s, &end, 10);
+ if (*s == '\0' || *end != '\0')
+ die("invalid count: %s", s);
+ return (uint32_t) n;
+}
+
+static void handle_command(const char *command, const char *arg, struct line_buffer *buf)
+{
+ switch (*command) {
+ case 'b':
+ if (starts_with(command, "binary ")) {
+ struct strbuf sb = STRBUF_INIT;
+ strbuf_addch(&sb, '>');
+ buffer_read_binary(buf, &sb, strtouint32(arg));
+ fwrite(sb.buf, 1, sb.len, stdout);
+ strbuf_release(&sb);
+ return;
+ }
+ case 'c':
+ if (starts_with(command, "copy ")) {
+ buffer_copy_bytes(buf, strtouint32(arg));
+ return;
+ }
+ case 's':
+ if (starts_with(command, "skip ")) {
+ buffer_skip_bytes(buf, strtouint32(arg));
+ return;
+ }
+ default:
+ die("unrecognized command: %s", command);
+ }
+}
+
+static void handle_line(const char *line, struct line_buffer *stdin_buf)
+{
+ const char *arg = strchr(line, ' ');
+ if (!arg)
+ die("no argument in line: %s", line);
+ handle_command(line, arg + 1, stdin_buf);
+}
+
+int main(int argc, char *argv[])
+{
+ struct line_buffer stdin_buf = LINE_BUFFER_INIT;
+ struct line_buffer file_buf = LINE_BUFFER_INIT;
+ struct line_buffer *input = &stdin_buf;
+ const char *filename;
+ char *s;
+
+ if (argc == 1)
+ filename = NULL;
+ else if (argc == 2)
+ filename = argv[1];
+ else
+ usage("test-line-buffer [file | &fd] < script");
+
+ if (buffer_init(&stdin_buf, NULL))
+ die_errno("open error");
+ if (filename) {
+ if (*filename == '&') {
+ if (buffer_fdinit(&file_buf, strtouint32(filename + 1)))
+ die_errno("error opening fd %s", filename + 1);
+ } else {
+ if (buffer_init(&file_buf, filename))
+ die_errno("error opening %s", filename);
+ }
+ input = &file_buf;
+ }
+
+ while ((s = buffer_read_line(&stdin_buf)))
+ handle_line(s, input);
+
+ if (filename && buffer_deinit(&file_buf))
+ die("error reading from %s", filename);
+ if (buffer_deinit(&stdin_buf))
+ die("input error");
+ if (ferror(stdout))
+ die("output error");
+ return 0;
+}
--- /dev/null
+#include "cache.h"
+#include "tree.h"
+
+int main(int ac, char **av)
+{
+ struct object_id hash1, hash2, shifted;
+ struct tree *one, *two;
+
+ setup_git_directory();
+
+ if (get_oid(av[1], &hash1))
+ die("cannot parse %s as an object name", av[1]);
+ if (get_oid(av[2], &hash2))
+ die("cannot parse %s as an object name", av[2]);
+ one = parse_tree_indirect(hash1.hash);
+ if (!one)
+ die("not a tree-ish %s", av[1]);
+ two = parse_tree_indirect(hash2.hash);
+ if (!two)
+ die("not a tree-ish %s", av[2]);
+
+ shift_tree(&one->object.oid, &two->object.oid, &shifted, -1);
+ printf("shifted: %s\n", oid_to_hex(&shifted));
+
+ exit(0);
+}
--- /dev/null
+#include "cache.h"
+#include "mergesort.h"
+
+struct line {
+ char *text;
+ struct line *next;
+};
+
+static void *get_next(const void *a)
+{
+ return ((const struct line *)a)->next;
+}
+
+static void set_next(void *a, void *b)
+{
+ ((struct line *)a)->next = b;
+}
+
+static int compare_strings(const void *a, const void *b)
+{
+ const struct line *x = a, *y = b;
+ return strcmp(x->text, y->text);
+}
+
+int main(int argc, char **argv)
+{
+ struct line *line, *p = NULL, *lines = NULL;
+ struct strbuf sb = STRBUF_INIT;
+
+ for (;;) {
+ if (strbuf_getwholeline(&sb, stdin, '\n'))
+ break;
+ line = xmalloc(sizeof(struct line));
+ line->text = strbuf_detach(&sb, NULL);
+ if (p) {
+ line->next = p->next;
+ p->next = line;
+ } else {
+ line->next = NULL;
+ lines = line;
+ }
+ p = line;
+ }
+
+ lines = llist_mergesort(lines, get_next, set_next, compare_strings);
+
+ while (lines) {
+ printf("%s", lines->text);
+ lines = lines->next;
+ }
+ return 0;
+}
--- /dev/null
+/*
+ * test-mktemp.c: code to exercise the creation of temporary files
+ */
+#include "git-compat-util.h"
+
+int main(int argc, char *argv[])
+{
+ if (argc != 2)
+ usage("Expected 1 parameter defining the temporary file template");
+
+ xmkstemp(xstrdup(argv[1]));
+
+ return 0;
+}
--- /dev/null
+#include "cache.h"
+#include "parse-options.h"
+#include "string-list.h"
+
+static int boolean = 0;
+static int integer = 0;
+static unsigned long magnitude = 0;
+static unsigned long timestamp;
+static int abbrev = 7;
+static int verbose = -1; /* unspecified */
+static int dry_run = 0, quiet = 0;
+static char *string = NULL;
+static char *file = NULL;
+static int ambiguous;
+static struct string_list list;
+
+static struct {
+ int called;
+ const char *arg;
+ int unset;
+} length_cb;
+
+static int length_callback(const struct option *opt, const char *arg, int unset)
+{
+ length_cb.called = 1;
+ length_cb.arg = arg;
+ length_cb.unset = unset;
+
+ if (unset)
+ return 1; /* do not support unset */
+
+ *(int *)opt->value = strlen(arg);
+ return 0;
+}
+
+static int number_callback(const struct option *opt, const char *arg, int unset)
+{
+ *(int *)opt->value = strtol(arg, NULL, 10);
+ return 0;
+}
+
+static int collect_expect(const struct option *opt, const char *arg, int unset)
+{
+ struct string_list *expect;
+ struct string_list_item *item;
+ struct strbuf label = STRBUF_INIT;
+ const char *colon;
+
+ if (!arg || unset)
+ die("malformed --expect option");
+
+ expect = (struct string_list *)opt->value;
+ colon = strchr(arg, ':');
+ if (!colon)
+ die("malformed --expect option, lacking a colon");
+ strbuf_add(&label, arg, colon - arg);
+ item = string_list_insert(expect, strbuf_detach(&label, NULL));
+ if (item->util)
+ die("malformed --expect option, duplicate %s", label.buf);
+ item->util = (void *)arg;
+ return 0;
+}
+
+__attribute__((format (printf,3,4)))
+static void show(struct string_list *expect, int *status, const char *fmt, ...)
+{
+ struct string_list_item *item;
+ struct strbuf buf = STRBUF_INIT;
+ va_list args;
+
+ va_start(args, fmt);
+ strbuf_vaddf(&buf, fmt, args);
+ va_end(args);
+
+ if (!expect->nr)
+ printf("%s\n", buf.buf);
+ else {
+ char *colon = strchr(buf.buf, ':');
+ if (!colon)
+ die("malformed output format, output lacking colon: %s", fmt);
+ *colon = '\0';
+ item = string_list_lookup(expect, buf.buf);
+ *colon = ':';
+ if (!item)
+ ; /* not among entries being checked */
+ else {
+ if (strcmp((const char *)item->util, buf.buf)) {
+ printf("-%s\n", (char *)item->util);
+ printf("+%s\n", buf.buf);
+ *status = 1;
+ }
+ }
+ }
+ strbuf_release(&buf);
+}
+
+int main(int argc, char **argv)
+{
+ const char *prefix = "prefix/";
+ const char *usage[] = {
+ "test-parse-options <options>",
+ NULL
+ };
+ struct string_list expect = STRING_LIST_INIT_NODUP;
+ struct option options[] = {
+ OPT_BOOL(0, "yes", &boolean, "get a boolean"),
+ OPT_BOOL('D', "no-doubt", &boolean, "begins with 'no-'"),
+ { OPTION_SET_INT, 'B', "no-fear", &boolean, NULL,
+ "be brave", PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, 1 },
+ OPT_COUNTUP('b', "boolean", &boolean, "increment by one"),
+ OPT_BIT('4', "or4", &boolean,
+ "bitwise-or boolean with ...0100", 4),
+ OPT_NEGBIT(0, "neg-or4", &boolean, "same as --no-or4", 4),
+ OPT_GROUP(""),
+ OPT_INTEGER('i', "integer", &integer, "get a integer"),
+ OPT_INTEGER('j', NULL, &integer, "get a integer, too"),
+ OPT_MAGNITUDE('m', "magnitude", &magnitude, "get a magnitude"),
+ OPT_SET_INT(0, "set23", &integer, "set integer to 23", 23),
+ OPT_DATE('t', NULL, ×tamp, "get timestamp of <time>"),
+ OPT_CALLBACK('L', "length", &integer, "str",
+ "get length of <str>", length_callback),
+ OPT_FILENAME('F', "file", &file, "set file to <file>"),
+ OPT_GROUP("String options"),
+ OPT_STRING('s', "string", &string, "string", "get a string"),
+ OPT_STRING(0, "string2", &string, "str", "get another string"),
+ OPT_STRING(0, "st", &string, "st", "get another string (pervert ordering)"),
+ OPT_STRING('o', NULL, &string, "str", "get another string"),
+ OPT_NOOP_NOARG(0, "obsolete"),
+ OPT_STRING_LIST(0, "list", &list, "str", "add str to list"),
+ OPT_GROUP("Magic arguments"),
+ OPT_ARGUMENT("quux", "means --quux"),
+ OPT_NUMBER_CALLBACK(&integer, "set integer to NUM",
+ number_callback),
+ { OPTION_COUNTUP, '+', NULL, &boolean, NULL, "same as -b",
+ PARSE_OPT_NOARG | PARSE_OPT_NONEG | PARSE_OPT_NODASH },
+ { OPTION_COUNTUP, 0, "ambiguous", &ambiguous, NULL,
+ "positive ambiguity", PARSE_OPT_NOARG | PARSE_OPT_NONEG },
+ { OPTION_COUNTUP, 0, "no-ambiguous", &ambiguous, NULL,
+ "negative ambiguity", PARSE_OPT_NOARG | PARSE_OPT_NONEG },
+ OPT_GROUP("Standard options"),
+ OPT__ABBREV(&abbrev),
+ OPT__VERBOSE(&verbose, "be verbose"),
+ OPT__DRY_RUN(&dry_run, "dry run"),
+ OPT__QUIET(&quiet, "be quiet"),
+ OPT_CALLBACK(0, "expect", &expect, "string",
+ "expected output in the variable dump",
+ collect_expect),
+ OPT_END(),
+ };
+ int i;
+ int ret = 0;
+
+ argc = parse_options(argc, (const char **)argv, prefix, options, usage, 0);
+
+ if (length_cb.called) {
+ const char *arg = length_cb.arg;
+ int unset = length_cb.unset;
+ show(&expect, &ret, "Callback: \"%s\", %d",
+ (arg ? arg : "not set"), unset);
+ }
+ show(&expect, &ret, "boolean: %d", boolean);
+ show(&expect, &ret, "integer: %d", integer);
+ show(&expect, &ret, "magnitude: %lu", magnitude);
+ show(&expect, &ret, "timestamp: %lu", timestamp);
+ show(&expect, &ret, "string: %s", string ? string : "(not set)");
+ show(&expect, &ret, "abbrev: %d", abbrev);
+ show(&expect, &ret, "verbose: %d", verbose);
+ show(&expect, &ret, "quiet: %d", quiet);
+ show(&expect, &ret, "dry run: %s", dry_run ? "yes" : "no");
+ show(&expect, &ret, "file: %s", file ? file : "(not set)");
+
+ for (i = 0; i < list.nr; i++)
+ show(&expect, &ret, "list: %s", list.items[i].string);
+
+ for (i = 0; i < argc; i++)
+ show(&expect, &ret, "arg %02d: %s", i, argv[i]);
+
+ return ret;
+}
--- /dev/null
+#include "cache.h"
+#include "string-list.h"
+
+/*
+ * A "string_list_each_func_t" function that normalizes an entry from
+ * GIT_CEILING_DIRECTORIES. If the path is unusable for some reason,
+ * die with an explanation.
+ */
+static int normalize_ceiling_entry(struct string_list_item *item, void *unused)
+{
+ char *ceil = item->string;
+
+ if (!*ceil)
+ die("Empty path is not supported");
+ if (!is_absolute_path(ceil))
+ die("Path \"%s\" is not absolute", ceil);
+ if (normalize_path_copy(ceil, ceil) < 0)
+ die("Path \"%s\" could not be normalized", ceil);
+ return 1;
+}
+
+static void normalize_argv_string(const char **var, const char *input)
+{
+ if (!strcmp(input, "<null>"))
+ *var = NULL;
+ else if (!strcmp(input, "<empty>"))
+ *var = "";
+ else
+ *var = input;
+
+ if (*var && (**var == '<' || **var == '('))
+ die("Bad value: %s\n", input);
+}
+
+struct test_data {
+ const char *from; /* input: transform from this ... */
+ const char *to; /* output: ... to this. */
+ const char *alternative; /* output: ... or this. */
+};
+
+static int test_function(struct test_data *data, char *(*func)(char *input),
+ const char *funcname)
+{
+ int failed = 0, i;
+ char buffer[1024];
+ char *to;
+
+ for (i = 0; data[i].to; i++) {
+ if (!data[i].from)
+ to = func(NULL);
+ else {
+ xsnprintf(buffer, sizeof(buffer), "%s", data[i].from);
+ to = func(buffer);
+ }
+ if (!strcmp(to, data[i].to))
+ continue;
+ if (!data[i].alternative)
+ error("FAIL: %s(%s) => '%s' != '%s'\n",
+ funcname, data[i].from, to, data[i].to);
+ else if (!strcmp(to, data[i].alternative))
+ continue;
+ else
+ error("FAIL: %s(%s) => '%s' != '%s', '%s'\n",
+ funcname, data[i].from, to, data[i].to,
+ data[i].alternative);
+ failed = 1;
+ }
+ return failed;
+}
+
+static struct test_data basename_data[] = {
+ /* --- POSIX type paths --- */
+ { NULL, "." },
+ { "", "." },
+ { ".", "." },
+ { "..", ".." },
+ { "/", "/" },
+ { "//", "/", "//" },
+ { "///", "/", "//" },
+ { "////", "/", "//" },
+ { "usr", "usr" },
+ { "/usr", "usr" },
+ { "/usr/", "usr" },
+ { "/usr//", "usr" },
+ { "/usr/lib", "lib" },
+ { "usr/lib", "lib" },
+ { "usr/lib///", "lib" },
+
+#if defined(__MINGW32__) || defined(_MSC_VER)
+ /* --- win32 type paths --- */
+ { "\\usr", "usr" },
+ { "\\usr\\", "usr" },
+ { "\\usr\\\\", "usr" },
+ { "\\usr\\lib", "lib" },
+ { "usr\\lib", "lib" },
+ { "usr\\lib\\\\\\", "lib" },
+ { "C:/usr", "usr" },
+ { "C:/usr", "usr" },
+ { "C:/usr/", "usr" },
+ { "C:/usr//", "usr" },
+ { "C:/usr/lib", "lib" },
+ { "C:usr/lib", "lib" },
+ { "C:usr/lib///", "lib" },
+ { "C:", "." },
+ { "C:a", "a" },
+ { "C:/", "/" },
+ { "C:///", "/" },
+ { "\\", "\\", "/" },
+ { "\\\\", "\\", "/" },
+ { "\\\\\\", "\\", "/" },
+#endif
+ { NULL, NULL }
+};
+
+static struct test_data dirname_data[] = {
+ /* --- POSIX type paths --- */
+ { NULL, "." },
+ { "", "." },
+ { ".", "." },
+ { "..", "." },
+ { "/", "/" },
+ { "//", "/", "//" },
+ { "///", "/", "//" },
+ { "////", "/", "//" },
+ { "usr", "." },
+ { "/usr", "/" },
+ { "/usr/", "/" },
+ { "/usr//", "/" },
+ { "/usr/lib", "/usr" },
+ { "usr/lib", "usr" },
+ { "usr/lib///", "usr" },
+
+#if defined(__MINGW32__) || defined(_MSC_VER)
+ /* --- win32 type paths --- */
+ { "\\", "\\" },
+ { "\\\\", "\\\\" },
+ { "\\usr", "\\" },
+ { "\\usr\\", "\\" },
+ { "\\usr\\\\", "\\" },
+ { "\\usr\\lib", "\\usr" },
+ { "usr\\lib", "usr" },
+ { "usr\\lib\\\\\\", "usr" },
+ { "C:a", "C:." },
+ { "C:/", "C:/" },
+ { "C:///", "C:/" },
+ { "C:/usr", "C:/" },
+ { "C:/usr/", "C:/" },
+ { "C:/usr//", "C:/" },
+ { "C:/usr/lib", "C:/usr" },
+ { "C:usr/lib", "C:usr" },
+ { "C:usr/lib///", "C:usr" },
+ { "\\\\\\", "\\" },
+ { "\\\\\\\\", "\\" },
+ { "C:", "C:.", "." },
+#endif
+ { NULL, NULL }
+};
+
+int main(int argc, char **argv)
+{
+ if (argc == 3 && !strcmp(argv[1], "normalize_path_copy")) {
+ char *buf = xmallocz(strlen(argv[2]));
+ int rv = normalize_path_copy(buf, argv[2]);
+ if (rv)
+ buf = "++failed++";
+ puts(buf);
+ return 0;
+ }
+
+ if (argc >= 2 && !strcmp(argv[1], "real_path")) {
+ while (argc > 2) {
+ puts(real_path(argv[2]));
+ argc--;
+ argv++;
+ }
+ return 0;
+ }
+
+ if (argc >= 2 && !strcmp(argv[1], "absolute_path")) {
+ while (argc > 2) {
+ puts(absolute_path(argv[2]));
+ argc--;
+ argv++;
+ }
+ return 0;
+ }
+
+ if (argc == 4 && !strcmp(argv[1], "longest_ancestor_length")) {
+ int len;
+ struct string_list ceiling_dirs = STRING_LIST_INIT_DUP;
+ char *path = xstrdup(argv[2]);
+
+ /*
+ * We have to normalize the arguments because under
+ * Windows, bash mangles arguments that look like
+ * absolute POSIX paths or colon-separate lists of
+ * absolute POSIX paths into DOS paths (e.g.,
+ * "/foo:/foo/bar" might be converted to
+ * "D:\Src\msysgit\foo;D:\Src\msysgit\foo\bar"),
+ * whereas longest_ancestor_length() requires paths
+ * that use forward slashes.
+ */
+ if (normalize_path_copy(path, path))
+ die("Path \"%s\" could not be normalized", argv[2]);
+ string_list_split(&ceiling_dirs, argv[3], PATH_SEP, -1);
+ filter_string_list(&ceiling_dirs, 0,
+ normalize_ceiling_entry, NULL);
+ len = longest_ancestor_length(path, &ceiling_dirs);
+ string_list_clear(&ceiling_dirs, 0);
+ free(path);
+ printf("%d\n", len);
+ return 0;
+ }
+
+ if (argc >= 4 && !strcmp(argv[1], "prefix_path")) {
+ char *prefix = argv[2];
+ int prefix_len = strlen(prefix);
+ int nongit_ok;
+ setup_git_directory_gently(&nongit_ok);
+ while (argc > 3) {
+ puts(prefix_path(prefix, prefix_len, argv[3]));
+ argc--;
+ argv++;
+ }
+ return 0;
+ }
+
+ if (argc == 4 && !strcmp(argv[1], "strip_path_suffix")) {
+ char *prefix = strip_path_suffix(argv[2], argv[3]);
+ printf("%s\n", prefix ? prefix : "(null)");
+ return 0;
+ }
+
+ if (argc == 3 && !strcmp(argv[1], "print_path")) {
+ puts(argv[2]);
+ return 0;
+ }
+
+ if (argc == 4 && !strcmp(argv[1], "relative_path")) {
+ struct strbuf sb = STRBUF_INIT;
+ const char *in, *prefix, *rel;
+ normalize_argv_string(&in, argv[2]);
+ normalize_argv_string(&prefix, argv[3]);
+ rel = relative_path(in, prefix, &sb);
+ if (!rel)
+ puts("(null)");
+ else
+ puts(strlen(rel) > 0 ? rel : "(empty)");
+ strbuf_release(&sb);
+ return 0;
+ }
+
+ if (argc == 2 && !strcmp(argv[1], "basename"))
+ return test_function(basename_data, basename, argv[1]);
+
+ if (argc == 2 && !strcmp(argv[1], "dirname"))
+ return test_function(dirname_data, dirname, argv[1]);
+
+ fprintf(stderr, "%s: unknown function name: %s\n", argv[0],
+ argv[1] ? argv[1] : "(there was none)");
+ return 1;
+}
--- /dev/null
+#include "cache.h"
+#include "prio-queue.h"
+
+static int intcmp(const void *va, const void *vb, void *data)
+{
+ const int *a = va, *b = vb;
+ return *a - *b;
+}
+
+static void show(int *v)
+{
+ if (!v)
+ printf("NULL\n");
+ else
+ printf("%d\n", *v);
+ free(v);
+}
+
+int main(int argc, char **argv)
+{
+ struct prio_queue pq = { intcmp };
+
+ while (*++argv) {
+ if (!strcmp(*argv, "get"))
+ show(prio_queue_get(&pq));
+ else if (!strcmp(*argv, "dump")) {
+ int *v;
+ while ((v = prio_queue_get(&pq)))
+ show(v);
+ }
+ else {
+ int *v = malloc(sizeof(*v));
+ *v = atoi(*argv);
+ prio_queue_put(&pq, v);
+ }
+ }
+
+ return 0;
+}
--- /dev/null
+#include "cache.h"
+
+int main (int argc, char **argv)
+{
+ int i, cnt = 1;
+ if (argc == 2)
+ cnt = strtol(argv[1], NULL, 0);
+ for (i = 0; i < cnt; i++) {
+ read_cache();
+ discard_cache();
+ }
+ return 0;
+}
--- /dev/null
+#include "git-compat-util.h"
+
+int main(int argc, char **argv)
+{
+ char *pat = "[^={} \t]+";
+ char *str = "={}\nfred";
+ regex_t r;
+ regmatch_t m[1];
+
+ if (regcomp(&r, pat, REG_EXTENDED | REG_NEWLINE))
+ die("failed regcomp() for pattern '%s'", pat);
+ if (regexec(&r, str, 1, m, 0))
+ die("no match of pattern '%s' to string '%s'", pat, str);
+
+ /* http://sourceware.org/bugzilla/show_bug.cgi?id=3957 */
+ if (m[0].rm_so == 3) /* matches '\n' when it should not */
+ die("regex bug confirmed: re-build git with NO_REGEX=1");
+
+ exit(0);
+}
--- /dev/null
+/*
+ * test-revision-walking.c: test revision walking API.
+ *
+ * (C) 2012 Heiko Voigt <hvoigt@hvoigt.net>
+ *
+ * This code is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "cache.h"
+#include "commit.h"
+#include "diff.h"
+#include "revision.h"
+
+static void print_commit(struct commit *commit)
+{
+ struct strbuf sb = STRBUF_INIT;
+ struct pretty_print_context ctx = {0};
+ ctx.date_mode.type = DATE_NORMAL;
+ format_commit_message(commit, " %m %s", &sb, &ctx);
+ printf("%s\n", sb.buf);
+ strbuf_release(&sb);
+}
+
+static int run_revision_walk(void)
+{
+ struct rev_info rev;
+ struct commit *commit;
+ const char *argv[] = {NULL, "--all", NULL};
+ int argc = ARRAY_SIZE(argv) - 1;
+ int got_revision = 0;
+
+ init_revisions(&rev, NULL);
+ setup_revisions(argc, argv, &rev, NULL);
+ if (prepare_revision_walk(&rev))
+ die("revision walk setup failed");
+
+ while ((commit = get_revision(&rev)) != NULL) {
+ print_commit(commit);
+ got_revision = 1;
+ }
+
+ reset_revision_walk();
+ return got_revision;
+}
+
+int main(int argc, char **argv)
+{
+ if (argc < 2)
+ return 1;
+
+ setup_git_directory();
+
+ if (!strcmp(argv[1], "run-twice")) {
+ printf("1st\n");
+ if (!run_revision_walk())
+ return 1;
+ printf("2nd\n");
+ if (!run_revision_walk())
+ return 1;
+
+ return 0;
+ }
+
+ fprintf(stderr, "check usage\n");
+ return 1;
+}
--- /dev/null
+/*
+ * test-run-command.c: test run command API.
+ *
+ * (C) 2009 Ilari Liusvaara <ilari.liusvaara@elisanet.fi>
+ *
+ * This code is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "git-compat-util.h"
+#include "run-command.h"
+#include "argv-array.h"
+#include "strbuf.h"
+#include <string.h>
+#include <errno.h>
+
+static int number_callbacks;
+static int parallel_next(struct child_process *cp,
+ struct strbuf *err,
+ void *cb,
+ void **task_cb)
+{
+ struct child_process *d = cb;
+ if (number_callbacks >= 4)
+ return 0;
+
+ argv_array_pushv(&cp->args, d->argv);
+ strbuf_addf(err, "preloaded output of a child\n");
+ number_callbacks++;
+ return 1;
+}
+
+static int no_job(struct child_process *cp,
+ struct strbuf *err,
+ void *cb,
+ void **task_cb)
+{
+ strbuf_addf(err, "no further jobs available\n");
+ return 0;
+}
+
+static int task_finished(int result,
+ struct strbuf *err,
+ void *pp_cb,
+ void *pp_task_cb)
+{
+ strbuf_addf(err, "asking for a quick stop\n");
+ return 1;
+}
+
+int main(int argc, char **argv)
+{
+ struct child_process proc = CHILD_PROCESS_INIT;
+ int jobs;
+
+ if (argc < 3)
+ return 1;
+ proc.argv = (const char **)argv + 2;
+
+ if (!strcmp(argv[1], "start-command-ENOENT")) {
+ if (start_command(&proc) < 0 && errno == ENOENT)
+ return 0;
+ fprintf(stderr, "FAIL %s\n", argv[1]);
+ return 1;
+ }
+ if (!strcmp(argv[1], "run-command"))
+ exit(run_command(&proc));
+
+ jobs = atoi(argv[2]);
+ proc.argv = (const char **)argv + 3;
+
+ if (!strcmp(argv[1], "run-command-parallel"))
+ exit(run_processes_parallel(jobs, parallel_next,
+ NULL, NULL, &proc));
+
+ if (!strcmp(argv[1], "run-command-abort"))
+ exit(run_processes_parallel(jobs, parallel_next,
+ NULL, task_finished, &proc));
+
+ if (!strcmp(argv[1], "run-command-no-jobs"))
+ exit(run_processes_parallel(jobs, no_job,
+ NULL, task_finished, &proc));
+
+ fprintf(stderr, "check usage\n");
+ return 1;
+}
--- /dev/null
+#include "cache.h"
+#include "lockfile.h"
+#include "tree.h"
+#include "cache-tree.h"
+
+static struct lock_file index_lock;
+
+int main(int ac, char **av)
+{
+ hold_locked_index(&index_lock, 1);
+ if (read_cache() < 0)
+ die("unable to read index file");
+ active_cache_tree = NULL;
+ if (write_locked_index(&the_index, &index_lock, COMMIT_LOCK))
+ die("unable to write index file");
+ return 0;
+}
--- /dev/null
+#include "cache.h"
+#include "sha1-array.h"
+
+static void print_sha1(const unsigned char sha1[20], void *data)
+{
+ puts(sha1_to_hex(sha1));
+}
+
+int main(int argc, char **argv)
+{
+ struct sha1_array array = SHA1_ARRAY_INIT;
+ struct strbuf line = STRBUF_INIT;
+
+ while (strbuf_getline(&line, stdin) != EOF) {
+ const char *arg;
+ unsigned char sha1[20];
+
+ if (skip_prefix(line.buf, "append ", &arg)) {
+ if (get_sha1_hex(arg, sha1))
+ die("not a hexadecimal SHA1: %s", arg);
+ sha1_array_append(&array, sha1);
+ } else if (skip_prefix(line.buf, "lookup ", &arg)) {
+ if (get_sha1_hex(arg, sha1))
+ die("not a hexadecimal SHA1: %s", arg);
+ printf("%d\n", sha1_array_lookup(&array, sha1));
+ } else if (!strcmp(line.buf, "clear"))
+ sha1_array_clear(&array);
+ else if (!strcmp(line.buf, "for_each_unique"))
+ sha1_array_for_each_unique(&array, print_sha1, NULL);
+ else
+ die("unknown command: %s", line.buf);
+ }
+ return 0;
+}
--- /dev/null
+#include "cache.h"
+
+int main(int ac, char **av)
+{
+ git_SHA_CTX ctx;
+ unsigned char sha1[20];
+ unsigned bufsz = 8192;
+ int binary = 0;
+ char *buffer;
+
+ if (ac == 2) {
+ if (!strcmp(av[1], "-b"))
+ binary = 1;
+ else
+ bufsz = strtoul(av[1], NULL, 10) * 1024 * 1024;
+ }
+
+ if (!bufsz)
+ bufsz = 8192;
+
+ while ((buffer = malloc(bufsz)) == NULL) {
+ fprintf(stderr, "bufsz %u is too big, halving...\n", bufsz);
+ bufsz /= 2;
+ if (bufsz < 1024)
+ die("OOPS");
+ }
+
+ git_SHA1_Init(&ctx);
+
+ while (1) {
+ ssize_t sz, this_sz;
+ char *cp = buffer;
+ unsigned room = bufsz;
+ this_sz = 0;
+ while (room) {
+ sz = xread(0, cp, room);
+ if (sz == 0)
+ break;
+ if (sz < 0)
+ die_errno("test-sha1");
+ this_sz += sz;
+ cp += sz;
+ room -= sz;
+ }
+ if (this_sz == 0)
+ break;
+ git_SHA1_Update(&ctx, buffer, this_sz);
+ }
+ git_SHA1_Final(sha1, &ctx);
+
+ if (binary)
+ fwrite(sha1, 1, 20, stdout);
+ else
+ puts(sha1_to_hex(sha1));
+ exit(0);
+}
--- /dev/null
+#!/bin/sh
+
+dd if=/dev/zero bs=1048576 count=100 2>/dev/null |
+/usr/bin/time t/helper/test-sha1 >/dev/null
+
+while read expect cnt pfx
+do
+ case "$expect" in '#'*) continue ;; esac
+ actual=$(
+ {
+ test -z "$pfx" || echo "$pfx"
+ dd if=/dev/zero bs=1048576 count=$cnt 2>/dev/null |
+ perl -pe 'y/\000/g/'
+ } | ./t/helper/test-sha1 $cnt
+ )
+ if test "$expect" = "$actual"
+ then
+ echo "OK: $expect $cnt $pfx"
+ else
+ echo >&2 "OOPS: $cnt"
+ echo >&2 "expect: $expect"
+ echo >&2 "actual: $actual"
+ exit 1
+ fi
+done <<EOF
+da39a3ee5e6b4b0d3255bfef95601890afd80709 0
+3f786850e387550fdab836ed7e6dc881de23001b 0 a
+5277cbb45a15902137d332d97e89cf8136545485 0 ab
+03cfd743661f07975fa2f1220c5194cbaff48451 0 abc
+3330b4373640f9e4604991e73c7e86bfd8da2dc3 0 abcd
+ec11312386ad561674f724b8cca7cf1796e26d1d 0 abcde
+bdc37c074ec4ee6050d68bc133c6b912f36474df 0 abcdef
+69bca99b923859f2dc486b55b87f49689b7358c7 0 abcdefg
+e414af7161c9554089f4106d6f1797ef14a73666 0 abcdefgh
+0707f2970043f9f7c22029482db27733deaec029 0 abcdefghi
+a4dd8aa74a5636728fe52451636e2e17726033aa 1
+9986b45e2f4d7086372533bb6953a8652fa3644a 1 frotz
+23d8d4f788e8526b4877548a32577543cbaaf51f 10
+8cd23f822ab44c7f481b8c92d591f6d1fcad431c 10 frotz
+f3b5604a4e604899c1233edb3bf1cc0ede4d8c32 512
+b095bd837a371593048136e429e9ac4b476e1bb3 512 frotz
+08fa81d6190948de5ccca3966340cc48c10cceac 1200 xyzzy
+e33a291f42c30a159733dd98b8b3e4ff34158ca0 4090 4G
+#a3bf783bc20caa958f6cb24dd140a7b21984838d 9999 nitfol
+EOF
+
+exit
+
+# generating test vectors
+# inputs are number of megabytes followed by some random string to prefix.
+
+while read cnt pfx
+do
+ actual=$(
+ {
+ test -z "$pfx" || echo "$pfx"
+ dd if=/dev/zero bs=1048576 count=$cnt 2>/dev/null |
+ perl -pe 'y/\000/g/'
+ } | sha1sum |
+ sed -e 's/ .*//'
+ )
+ echo "$actual $cnt $pfx"
+done <<EOF
+0
+0 a
+0 ab
+0 abc
+0 abcd
+0 abcde
+0 abcdef
+0 abcdefg
+0 abcdefgh
+0 abcdefghi
+1
+1 frotz
+10
+10 frotz
+512
+512 frotz
+1200 xyzzy
+4090 4G
+9999 nitfol
+EOF
--- /dev/null
+#include "cache.h"
+#include "sigchain.h"
+
+#define X(f) \
+static void f(int sig) { \
+ puts(#f); \
+ fflush(stdout); \
+ sigchain_pop(sig); \
+ raise(sig); \
+}
+X(one)
+X(two)
+X(three)
+#undef X
+
+int main(int argc, char **argv) {
+ sigchain_push(SIGTERM, one);
+ sigchain_push(SIGTERM, two);
+ sigchain_push(SIGTERM, three);
+ raise(SIGTERM);
+ return 0;
+}
--- /dev/null
+#include "cache.h"
+#include "string-list.h"
+
+/*
+ * Parse an argument into a string list. arg should either be a
+ * ':'-separated list of strings, or "-" to indicate an empty string
+ * list (as opposed to "", which indicates a string list containing a
+ * single empty string). list->strdup_strings must be set.
+ */
+static void parse_string_list(struct string_list *list, const char *arg)
+{
+ if (!strcmp(arg, "-"))
+ return;
+
+ (void)string_list_split(list, arg, ':', -1);
+}
+
+static void write_list(const struct string_list *list)
+{
+ int i;
+ for (i = 0; i < list->nr; i++)
+ printf("[%d]: \"%s\"\n", i, list->items[i].string);
+}
+
+static void write_list_compact(const struct string_list *list)
+{
+ int i;
+ if (!list->nr)
+ printf("-\n");
+ else {
+ printf("%s", list->items[0].string);
+ for (i = 1; i < list->nr; i++)
+ printf(":%s", list->items[i].string);
+ printf("\n");
+ }
+}
+
+static int prefix_cb(struct string_list_item *item, void *cb_data)
+{
+ const char *prefix = (const char *)cb_data;
+ return starts_with(item->string, prefix);
+}
+
+int main(int argc, char **argv)
+{
+ if (argc == 5 && !strcmp(argv[1], "split")) {
+ struct string_list list = STRING_LIST_INIT_DUP;
+ int i;
+ const char *s = argv[2];
+ int delim = *argv[3];
+ int maxsplit = atoi(argv[4]);
+
+ i = string_list_split(&list, s, delim, maxsplit);
+ printf("%d\n", i);
+ write_list(&list);
+ string_list_clear(&list, 0);
+ return 0;
+ }
+
+ if (argc == 5 && !strcmp(argv[1], "split_in_place")) {
+ struct string_list list = STRING_LIST_INIT_NODUP;
+ int i;
+ char *s = xstrdup(argv[2]);
+ int delim = *argv[3];
+ int maxsplit = atoi(argv[4]);
+
+ i = string_list_split_in_place(&list, s, delim, maxsplit);
+ printf("%d\n", i);
+ write_list(&list);
+ string_list_clear(&list, 0);
+ free(s);
+ return 0;
+ }
+
+ if (argc == 4 && !strcmp(argv[1], "filter")) {
+ /*
+ * Retain only the items that have the specified prefix.
+ * Arguments: list|- prefix
+ */
+ struct string_list list = STRING_LIST_INIT_DUP;
+ const char *prefix = argv[3];
+
+ parse_string_list(&list, argv[2]);
+ filter_string_list(&list, 0, prefix_cb, (void *)prefix);
+ write_list_compact(&list);
+ string_list_clear(&list, 0);
+ return 0;
+ }
+
+ if (argc == 3 && !strcmp(argv[1], "remove_duplicates")) {
+ struct string_list list = STRING_LIST_INIT_DUP;
+
+ parse_string_list(&list, argv[2]);
+ string_list_remove_duplicates(&list, 0);
+ write_list_compact(&list);
+ string_list_clear(&list, 0);
+ return 0;
+ }
+
+ fprintf(stderr, "%s: unknown function name: %s\n", argv[0],
+ argv[1] ? argv[1] : "(there was none)");
+ return 1;
+}
--- /dev/null
+#include "cache.h"
+#include "submodule-config.h"
+#include "submodule.h"
+
+static void die_usage(int argc, char **argv, const char *msg)
+{
+ fprintf(stderr, "%s\n", msg);
+ fprintf(stderr, "Usage: %s [<commit> <submodulepath>] ...\n", argv[0]);
+ exit(1);
+}
+
+static int git_test_config(const char *var, const char *value, void *cb)
+{
+ return parse_submodule_config_option(var, value);
+}
+
+int main(int argc, char **argv)
+{
+ char **arg = argv;
+ int my_argc = argc;
+ int output_url = 0;
+ int lookup_name = 0;
+
+ arg++;
+ my_argc--;
+ while (starts_with(arg[0], "--")) {
+ if (!strcmp(arg[0], "--url"))
+ output_url = 1;
+ if (!strcmp(arg[0], "--name"))
+ lookup_name = 1;
+ arg++;
+ my_argc--;
+ }
+
+ if (my_argc % 2 != 0)
+ die_usage(argc, argv, "Wrong number of arguments.");
+
+ setup_git_directory();
+ gitmodules_config();
+ git_config(git_test_config, NULL);
+
+ while (*arg) {
+ unsigned char commit_sha1[20];
+ const struct submodule *submodule;
+ const char *commit;
+ const char *path_or_name;
+
+ commit = arg[0];
+ path_or_name = arg[1];
+
+ if (commit[0] == '\0')
+ hashcpy(commit_sha1, null_sha1);
+ else if (get_sha1(commit, commit_sha1) < 0)
+ die_usage(argc, argv, "Commit not found.");
+
+ if (lookup_name) {
+ submodule = submodule_from_name(commit_sha1, path_or_name);
+ } else
+ submodule = submodule_from_path(commit_sha1, path_or_name);
+ if (!submodule)
+ die_usage(argc, argv, "Submodule not found.");
+
+ if (output_url)
+ printf("Submodule url: '%s' for path '%s'\n",
+ submodule->url, submodule->path);
+ else
+ printf("Submodule name: '%s' for path '%s'\n",
+ submodule->name, submodule->path);
+
+ arg += 2;
+ }
+
+ submodule_free();
+
+ return 0;
+}
--- /dev/null
+#include "cache.h"
+#include "run-command.h"
+
+int main(int argc, char **argv)
+{
+ struct child_process cp = CHILD_PROCESS_INIT;
+ int nogit = 0;
+
+ setup_git_directory_gently(&nogit);
+ if (nogit)
+ die("No git repo found");
+ if (argc > 1 && !strcmp(argv[1], "--setup-work-tree")) {
+ setup_work_tree();
+ argv++;
+ }
+ cp.git_cmd = 1;
+ cp.argv = (const char **)argv + 1;
+ return run_command(&cp);
+}
--- /dev/null
+/*
+ * test-svn-fe: Code to exercise the svn import lib
+ */
+
+#include "git-compat-util.h"
+#include "vcs-svn/svndump.h"
+#include "vcs-svn/svndiff.h"
+#include "vcs-svn/sliding_window.h"
+#include "vcs-svn/line_buffer.h"
+
+static const char test_svnfe_usage[] =
+ "test-svn-fe (<dumpfile> | [-d] <preimage> <delta> <len>)";
+
+static int apply_delta(int argc, char *argv[])
+{
+ struct line_buffer preimage = LINE_BUFFER_INIT;
+ struct line_buffer delta = LINE_BUFFER_INIT;
+ struct sliding_view preimage_view = SLIDING_VIEW_INIT(&preimage, -1);
+
+ if (argc != 5)
+ usage(test_svnfe_usage);
+
+ if (buffer_init(&preimage, argv[2]))
+ die_errno("cannot open preimage");
+ if (buffer_init(&delta, argv[3]))
+ die_errno("cannot open delta");
+ if (svndiff0_apply(&delta, (off_t) strtoumax(argv[4], NULL, 0),
+ &preimage_view, stdout))
+ return 1;
+ if (buffer_deinit(&preimage))
+ die_errno("cannot close preimage");
+ if (buffer_deinit(&delta))
+ die_errno("cannot close delta");
+ strbuf_release(&preimage_view.buf);
+ return 0;
+}
+
+int main(int argc, char *argv[])
+{
+ if (argc == 2) {
+ if (svndump_init(argv[1]))
+ return 1;
+ svndump_read(NULL, "refs/heads/master", "refs/notes/svn/revs");
+ svndump_deinit();
+ svndump_reset();
+ return 0;
+ }
+
+ if (argc >= 2 && !strcmp(argv[1], "-d"))
+ return apply_delta(argc, argv);
+ usage(test_svnfe_usage);
+}
--- /dev/null
+#include "git-compat-util.h"
+#include "urlmatch.h"
+
+int main(int argc, char **argv)
+{
+ const char usage[] = "test-urlmatch-normalization [-p | -l] <url1> | <url1> <url2>";
+ char *url1, *url2;
+ int opt_p = 0, opt_l = 0;
+
+ /*
+ * For one url, succeed if url_normalize succeeds on it, fail otherwise.
+ * For two urls, succeed only if url_normalize succeeds on both and
+ * the results compare equal with strcmp. If -p is given (one url only)
+ * and url_normalize succeeds, print the result followed by "\n". If
+ * -l is given (one url only) and url_normalize succeeds, print the
+ * returned length in decimal followed by "\n".
+ */
+
+ if (argc > 1 && !strcmp(argv[1], "-p")) {
+ opt_p = 1;
+ argc--;
+ argv++;
+ } else if (argc > 1 && !strcmp(argv[1], "-l")) {
+ opt_l = 1;
+ argc--;
+ argv++;
+ }
+
+ if (argc < 2 || argc > 3)
+ die("%s", usage);
+
+ if (argc == 2) {
+ struct url_info info;
+ url1 = url_normalize(argv[1], &info);
+ if (!url1)
+ return 1;
+ if (opt_p)
+ printf("%s\n", url1);
+ if (opt_l)
+ printf("%u\n", (unsigned)info.url_len);
+ return 0;
+ }
+
+ if (opt_p || opt_l)
+ die("%s", usage);
+
+ url1 = url_normalize(argv[1], NULL);
+ url2 = url_normalize(argv[2], NULL);
+ return (url1 && url2 && !strcmp(url1, url2)) ? 0 : 1;
+}
--- /dev/null
+#include "cache.h"
+
+int main(int argc, char **argv)
+{
+ int i;
+ for (i = 2; i < argc; i++) {
+ if (argv[i][0] == '/')
+ die("Forward slash is not allowed at the beginning of the\n"
+ "pattern because Windows does not like it. Use `XXX/' instead.");
+ else if (!strncmp(argv[i], "XXX/", 4))
+ argv[i] += 3;
+ }
+ if (!strcmp(argv[1], "wildmatch"))
+ return !!wildmatch(argv[3], argv[2], WM_PATHNAME, NULL);
+ else if (!strcmp(argv[1], "iwildmatch"))
+ return !!wildmatch(argv[3], argv[2], WM_PATHNAME | WM_CASEFOLD, NULL);
+ else if (!strcmp(argv[1], "pathmatch"))
+ return !!wildmatch(argv[3], argv[2], 0, NULL);
+ else
+ return 1;
+}
# Older versions of perforce were available compiled natively for
# cygwin. Those do not accept native windows paths, so make sure
# not to convert for them.
-native_path() {
+native_path () {
path="$1" &&
if test_have_prereq CYGWIN && ! p4 -V | grep -q CYGWIN
then
# Attention: This function is not safe again against time offset updates
# at runtime (e.g. via NTP). The 'clock_gettime(CLOCK_MONOTONIC)'
# function could fix that but it is not in Python until 3.3.
-time_in_seconds() {
- python -c 'import time; print int(time.time())'
+time_in_seconds () {
+ (cd / && "$PYTHON_PATH" -c 'import time; print(int(time.time()))')
}
# Try to pick a unique port: guess a large number, then hope
pidfile="$TRASH_DIRECTORY/p4d.pid"
# Sometimes "prove" seems to hang on exit because p4d is still running
-cleanup() {
+cleanup () {
if test -f "$pidfile"
then
kill -9 $(cat "$pidfile") 2>/dev/null && exit 255
TMPDIR="$TRASH_DIRECTORY"
export TMPDIR
-start_p4d() {
+start_p4d () {
mkdir -p "$db" "$cli" "$git" &&
rm -f "$pidfile" &&
(
return 0
}
-p4_add_user() {
+p4_add_user () {
name=$1 &&
p4 user -f -i <<-EOF
User: $name
EOF
}
-retry_until_success() {
+p4_add_job () {
+ p4 job -f -i <<-EOF
+ Job: $1
+ Status: open
+ User: dummy
+ Description:
+ EOF
+}
+
+retry_until_success () {
timeout=$(($(time_in_seconds) + $RETRY_TIMEOUT))
until "$@" 2>/dev/null || test $(time_in_seconds) -gt $timeout
do
done
}
-retry_until_fail() {
+retry_until_fail () {
timeout=$(($(time_in_seconds) + $RETRY_TIMEOUT))
until ! "$@" 2>/dev/null || test $(time_in_seconds) -gt $timeout
do
done
}
-kill_p4d() {
+kill_p4d () {
pid=$(cat "$pidfile")
retry_until_fail kill $pid
retry_until_fail kill -9 $pid
retry_until_fail kill -9 $watchdog_pid
}
-cleanup_git() {
+cleanup_git () {
retry_until_success rm -r "$git"
test_must_fail test -d "$git" &&
retry_until_success mkdir "$git"
}
-marshal_dump() {
+marshal_dump () {
what=$1 &&
line=${2:-1} &&
cat >"$TRASH_DIRECTORY/marshal-dump.py" <<-EOF &&
import marshal
import sys
+ instream = getattr(sys.stdin, 'buffer', sys.stdin)
for i in range($line):
- d = marshal.load(sys.stdin)
- print d['$what']
+ d = marshal.load(instream)
+ print(d[b'$what'].decode('utf-8'))
EOF
"$PYTHON_PATH" "$TRASH_DIRECTORY/marshal-dump.py"
}
#
# Construct a client with this list of View lines
#
-client_view() {
+client_view () {
(
cat <<-EOF &&
Client: $P4CLIENT
) | p4 client -i
}
-is_cli_file_writeable() {
+is_cli_file_writeable () {
# cygwin version of p4 does not set read-only attr,
# will be marked 444 but -w is true
file="$1" &&
. ./test-lib.sh
-remotes_git_svn=remotes/git""-svn
-git_svn_id=git""-svn-id
-
if test -n "$NO_SVN_TESTS"
then
skip_all='skipping git svn tests, NO_SVN_TESTS defined'
SetEnv GIT_HTTP_EXPORT_ALL
Header set Set-Cookie name=value
</LocationMatch>
+<LocationMatch /smart_headers/>
+ SetEnv GIT_EXEC_PATH ${GIT_EXEC_PATH}
+ SetEnv GIT_HTTP_EXPORT_ALL
+</LocationMatch>
ScriptAliasMatch /smart_*[^/]*/(.*) ${GIT_EXEC_PATH}/git-http-backend/$1
ScriptAlias /broken_smart/ broken-smart-http.sh/
ScriptAlias /error/ error.sh/
RewriteRule ^/loop-redir/x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-(.*) /$1 [R=302]
RewriteRule ^/loop-redir/(.*)$ /loop-redir/x-$1 [R=302]
+# Apache 2.2 does not understand <RequireAll>, so we use RewriteCond.
+# And as RewriteCond does not allow testing for non-matches, we match
+# the desired case first (one has abra, two has cadabra), and let it
+# pass by marking the RewriteRule as [L], "last rule, do not process
+# any other matching RewriteRules after this"), and then have another
+# RewriteRule that matches all other cases and lets them fail via '[F]',
+# "fail the request".
+RewriteCond %{HTTP:x-magic-one} =abra
+RewriteCond %{HTTP:x-magic-two} =cadabra
+RewriteRule ^/smart_headers/.* - [L]
+RewriteRule ^/smart_headers/.* - [F]
+
<IfDefine SSL>
LoadModule ssl_module modules/mod_ssl.so
--- /dev/null
+#!/bin/sh
+
+test_description='Tests rebase -i performance'
+. ./perf-lib.sh
+
+test_perf_default_repo
+
+# This commit merges a sufficiently long topic branch for reasonable
+# performance testing
+branch_merge=ba5312da19c6fdb6c6747d479f58932aae6e900c^{commit}
+export branch_merge
+
+git rev-parse --verify $branch_merge >/dev/null 2>&1 || {
+ skip_all='skipping because $branch_merge was not found'
+ test_done
+}
+
+write_script swap-first-two.sh <<\EOF
+case "$1" in
+*/COMMIT_EDITMSG)
+ mv "$1" "$1".bak &&
+ sed -e '1{h;d}' -e 2G <"$1".bak >"$1"
+ ;;
+esac
+EOF
+
+test_expect_success 'setup' '
+ git config core.editor "\"$PWD"/swap-first-two.sh\" &&
+ git checkout -f $branch_merge^2
+'
+
+test_perf 'rebase -i' '
+ git rebase -i $branch_merge^
+'
+
+test_done
error "bug in the test script: not 2 parameters to test-create-repo"
repo="$1"
source="$2"
- source_git=$source/$(cd "$source" && git rev-parse --git-dir)
+ source_git="$(git -C "$source" rev-parse --git-dir)"
+ objects_dir="$(git -C "$source" rev-parse --git-path objects)"
mkdir -p "$repo/.git"
(
- cd "$repo/.git" &&
- { cp -Rl "$source_git/objects" . 2>/dev/null ||
- cp -R "$source_git/objects" .; } &&
+ { cp -Rl "$objects_dir" "$repo/.git/" 2>/dev/null ||
+ cp -R "$objects_dir" "$repo/.git/"; } &&
for stuff in "$source_git"/*; do
case "$stuff" in
- */objects|*/hooks|*/config)
+ */objects|*/hooks|*/config|*/commondir)
;;
*)
- cp -R "$stuff" . || exit 1
+ cp -R "$stuff" "$repo/.git/" || exit 1
;;
esac
done &&
- cd .. &&
- git init -q &&
+ cd "$repo" &&
+ git init -q && {
+ test_have_prereq SYMLINKS ||
+ git config core.symlinks false
+ } &&
mv .git/hooks .git/hooks-disabled 2>/dev/null
) || error "failed to copy repository '$source' to '$repo'"
}
}
check_sub_test_lib_test_err () {
- name="$1" # stdin is the expected output output from the test
+ name="$1" # stdin is the expected output from the test
# expected error output is in descriptior 3
(
cd "$name" &&
test_path_is_dir realgitdir/refs
'
+# Tests for the hidden file attribute on windows
+is_hidden () {
+ # Use the output of `attrib`, ignore the absolute path
+ case "$(attrib "$1")" in *H*?:*) return 0;; esac
+ return 1
+}
+
+test_expect_success MINGW '.git hidden' '
+ rm -rf newdir &&
+ (
+ unset GIT_DIR GIT_WORK_TREE
+ mkdir newdir &&
+ cd newdir &&
+ git init &&
+ is_hidden .git
+ ) &&
+ check_config newdir/.git false unset
+'
+
+test_expect_success MINGW 'bare git dir not hidden' '
+ rm -rf newdir &&
+ (
+ unset GIT_DIR GIT_WORK_TREE GIT_CONFIG
+ mkdir newdir &&
+ cd newdir &&
+ git --bare init
+ ) &&
+ ! is_hidden newdir
+'
+
test_done
compare_files () {
tr '\015\000' QN <"$1" >"$1".expect &&
- tr '\015\000' QN <"$2" >"$2".actual &&
+ tr '\015\000' QN <"$2" | tr -d 'Z' >"$2".actual &&
test_cmp "$1".expect "$2".actual &&
rm "$1".expect "$2".actual
}
create_NNO_files () {
for crlf in false true input
do
- for attr in "" auto text -text lf crlf
+ for attr in "" auto text -text
do
- pfx=NNO_${crlf}_attr_${attr} &&
- cp CRLF_mix_LF ${pfx}_LF.txt &&
- cp CRLF_mix_LF ${pfx}_CRLF.txt &&
- cp CRLF_mix_LF ${pfx}_CRLF_mix_LF.txt &&
- cp CRLF_mix_LF ${pfx}_LF_mix_CR.txt &&
- cp CRLF_mix_LF ${pfx}_CRLF_nul.txt
+ for aeol in "" lf crlf
+ do
+ pfx=NNO_attr_${attr}_aeol_${aeol}_${crlf}
+ cp CRLF_mix_LF ${pfx}_LF.txt &&
+ cp CRLF_mix_LF ${pfx}_CRLF.txt &&
+ cp CRLF_mix_LF ${pfx}_CRLF_mix_LF.txt &&
+ cp CRLF_mix_LF ${pfx}_LF_mix_CR.txt &&
+ cp CRLF_mix_LF ${pfx}_CRLF_nul.txt
+ done
done
done
}
}
commit_chk_wrnNNO () {
- crlf=$1
- attr=$2
- lfwarn=$3
- crlfwarn=$4
- lfmixcrlf=$5
- lfmixcr=$6
- crlfnul=$7
- pfx=NNO_${crlf}_attr_${attr}
+ attr=$1 ; shift
+ aeol=$1 ; shift
+ crlf=$1 ; shift
+ lfwarn=$1 ; shift
+ crlfwarn=$1 ; shift
+ lfmixcrlf=$1 ; shift
+ lfmixcr=$1 ; shift
+ crlfnul=$1 ; shift
+ pfx=NNO_attr_${attr}_aeol_${aeol}_${crlf}
#Commit files on top of existing file
- create_gitattributes "$attr" &&
+ create_gitattributes "$attr" $aeol &&
for f in LF CRLF CRLF_mix_LF LF_mix_CR CRLF_nul
do
fname=${pfx}_$f.txt &&
cp $f $fname &&
+ printf Z >>"$fname" &&
git -c core.autocrlf=$crlf add $fname 2>/dev/null &&
git -c core.autocrlf=$crlf commit -m "commit_$fname" $fname >"${pfx}_$f.err" 2>&1
done
test_expect_success "commit NNO files crlf=$crlf attr=$attr LF" '
check_warning "$lfwarn" ${pfx}_LF.err
'
- test_expect_success "commit NNO files crlf=$crlf attr=$attr CRLF" '
+ test_expect_success "commit NNO files attr=$attr aeol=$aeol crlf=$crlf CRLF" '
check_warning "$crlfwarn" ${pfx}_CRLF.err
'
- test_expect_success "commit NNO files crlf=$crlf attr=$attr CRLF_mix_LF" '
+ test_expect_success "commit NNO files attr=$attr aeol=$aeol crlf=$crlf CRLF_mix_LF" '
check_warning "$lfmixcrlf" ${pfx}_CRLF_mix_LF.err
'
- test_expect_success "commit NNO files crlf=$crlf attr=$attr LF_mix_cr" '
+ test_expect_success "commit NNO files attr=$attr aeol=$aeol crlf=$crlf LF_mix_cr" '
check_warning "$lfmixcr" ${pfx}_LF_mix_CR.err
'
- test_expect_success "commit NNO files crlf=$crlf attr=$attr CRLF_nul" '
+ test_expect_success "commit NNO files attr=$attr aeol=$aeol crlf=$crlf CRLF_nul" '
check_warning "$crlfnul" ${pfx}_CRLF_nul.err
'
}
# contruct the attr/ returned by git ls-files --eol
# Take none (=empty), one or two args
+# convert.c: eol=XX overrides text=auto
attr_ascii () {
case $1,$2 in
-text,*) echo "-text" ;;
text,lf) echo "text eol=lf" ;;
text,crlf) echo "text eol=crlf" ;;
auto,) echo "text=auto" ;;
- auto,lf) echo "text=auto eol=lf" ;;
- auto,crlf) echo "text=auto eol=crlf" ;;
+ auto,lf) echo "text eol=lf" ;;
+ auto,crlf) echo "text eol=crlf" ;;
lf,) echo "text eol=lf" ;;
crlf,) echo "text eol=crlf" ;;
,) echo "" ;;
}
check_in_repo_NNO () {
- crlf=$1
- attr=$2
- lfname=$3
- crlfname=$4
- lfmixcrlf=$5
- lfmixcr=$6
- crlfnul=$7
- pfx=NNO_${crlf}_attr_${attr}_
- test_expect_success "compare_files $lfname ${pfx}LF.txt" '
- compare_files $lfname ${pfx}LF.txt
+ attr=$1 ; shift
+ aeol=$1 ; shift
+ crlf=$1 ; shift
+ lfname=$1 ; shift
+ crlfname=$1 ; shift
+ lfmixcrlf=$1 ; shift
+ lfmixcr=$1 ; shift
+ crlfnul=$1 ; shift
+ pfx=NNO_attr_${attr}_aeol_${aeol}_${crlf}
+ test_expect_success "compare_files $lfname ${pfx}_LF.txt" '
+ compare_files $lfname ${pfx}_LF.txt
'
- test_expect_success "compare_files $crlfname ${pfx}CRLF.txt" '
- compare_files $crlfname ${pfx}CRLF.txt
+ test_expect_success "compare_files $crlfname ${pfx}_CRLF.txt" '
+ compare_files $crlfname ${pfx}_CRLF.txt
'
- test_expect_success "compare_files $lfmixcrlf ${pfx}CRLF_mix_LF.txt" '
- compare_files $lfmixcrlf ${pfx}CRLF_mix_LF.txt
+ test_expect_success "compare_files $lfmixcrlf ${pfx}_CRLF_mix_LF.txt" '
+ compare_files $lfmixcrlf ${pfx}_CRLF_mix_LF.txt
'
- test_expect_success "compare_files $lfmixcr ${pfx}LF_mix_CR.txt" '
- compare_files $lfmixcr ${pfx}LF_mix_CR.txt
+ test_expect_success "compare_files $lfmixcr ${pfx}_LF_mix_CR.txt" '
+ compare_files $lfmixcr ${pfx}_LF_mix_CR.txt
'
- test_expect_success "compare_files $crlfnul ${pfx}CRLF_nul.txt" '
- compare_files $crlfnul ${pfx}CRLF_nul.txt
+ test_expect_success "compare_files $crlfnul ${pfx}_CRLF_nul.txt" '
+ compare_files $crlfnul ${pfx}_CRLF_nul.txt
'
}
lfmixcrlf=$1 ; shift
lfmixcr=$1 ; shift
crlfnul=$1 ; shift
- create_gitattributes "$attr" "$ident" &&
+ create_gitattributes "$attr" $ident $aeol &&
git config core.autocrlf $crlf &&
pfx=eol_${ceol}_crlf_${crlf}_attr_${attr}_ &&
for f in LF CRLF LF_mix_CR CRLF_mix_LF LF_nul
fi
done
- test_expect_success "ls-files --eol attr=$attr $ident $aeol core.autocrlf=$crlf core.eol=$ceol" '
+ test_expect_success "ls-files --eol attr=$attr $ident aeol=$aeol core.autocrlf=$crlf core.eol=$ceol" '
test_when_finished "rm expect actual" &&
sort <<-EOF >expect &&
i/crlf w/$(stats_ascii $crlfname) attr/$(attr_ascii $attr $aeol) crlf_false_attr__CRLF.txt
sort >actual &&
test_cmp expect actual
'
- test_expect_success "checkout $ident $attr $aeol core.autocrlf=$crlf core.eol=$ceol file=LF" "
+ test_expect_success "checkout attr=$attr $ident aeol=$aeol core.autocrlf=$crlf core.eol=$ceol file=LF" "
compare_ws_file $pfx $lfname crlf_false_attr__LF.txt
"
- test_expect_success "checkout $ident $attr $aeol core.autocrlf=$crlf core.eol=$ceol file=CRLF" "
+ test_expect_success "checkout attr=$attr $ident aeol=$aeol core.autocrlf=$crlf core.eol=$ceol file=CRLF" "
compare_ws_file $pfx $crlfname crlf_false_attr__CRLF.txt
"
- test_expect_success "checkout $ident $attr $aeol core.autocrlf=$crlf core.eol=$ceol file=CRLF_mix_LF" "
+ test_expect_success "checkout attr=$attr $ident aeol=$aeol core.autocrlf=$crlf core.eol=$ceol file=CRLF_mix_LF" "
compare_ws_file $pfx $lfmixcrlf crlf_false_attr__CRLF_mix_LF.txt
"
- test_expect_success "checkout $ident $attr $aeol core.autocrlf=$crlf core.eol=$ceol file=LF_mix_CR" "
+ test_expect_success "checkout attr=$attr $ident aeol=$aeol core.autocrlf=$crlf core.eol=$ceol file=LF_mix_CR" "
compare_ws_file $pfx $lfmixcr crlf_false_attr__LF_mix_CR.txt
"
- test_expect_success "checkout $ident $attr $aeol core.autocrlf=$crlf core.eol=$ceol file=LF_nul" "
+ test_expect_success "checkout attr=$attr $ident aeol=$aeol core.autocrlf=$crlf core.eol=$ceol file=LF_nul" "
compare_ws_file $pfx $crlfnul crlf_false_attr__LF_nul.txt
"
}
commit_check_warn input "crlf" "LF_CRLF" "" "LF_CRLF" "LF_CRLF" ""
'
-# attr LF CRLF CRLFmixLF LF_mix_CR CRLFNUL
-commit_chk_wrnNNO false "" "" "" "" "" ""
-commit_chk_wrnNNO true "" "LF_CRLF" "" "" "" ""
-commit_chk_wrnNNO input "" "" "" "" "" ""
-
+# attr LF CRLF CRLFmixLF LF_mix_CR CRLFNUL
+commit_chk_wrnNNO "" "" false "" "" "" "" ""
+commit_chk_wrnNNO "" "" true LF_CRLF "" "" "" ""
+commit_chk_wrnNNO "" "" input "" "" "" "" ""
-commit_chk_wrnNNO false "auto" "$WILC" "$WICL" "$WAMIX" "" ""
-commit_chk_wrnNNO true "auto" "LF_CRLF" "" "LF_CRLF" "" ""
-commit_chk_wrnNNO input "auto" "" "CRLF_LF" "CRLF_LF" "" ""
+commit_chk_wrnNNO "auto" "" false "$WILC" "$WICL" "$WAMIX" "" ""
+commit_chk_wrnNNO "auto" "" true LF_CRLF "" LF_CRLF "" ""
+commit_chk_wrnNNO "auto" "" input "" CRLF_LF CRLF_LF "" ""
-commit_chk_wrnNNO false "text" "$WILC" "$WICL" "$WAMIX" "$WILC" "$WICL"
-commit_chk_wrnNNO true "text" "LF_CRLF" "" "LF_CRLF" "LF_CRLF" ""
-commit_chk_wrnNNO input "text" "" "CRLF_LF" "CRLF_LF" "" "CRLF_LF"
-
-commit_chk_wrnNNO false "-text" "" "" "" "" ""
-commit_chk_wrnNNO true "-text" "" "" "" "" ""
-commit_chk_wrnNNO input "-text" "" "" "" "" ""
-
-commit_chk_wrnNNO false "lf" "" "CRLF_LF" "CRLF_LF" "" "CRLF_LF"
-commit_chk_wrnNNO true "lf" "" "CRLF_LF" "CRLF_LF" "" "CRLF_LF"
-commit_chk_wrnNNO input "lf" "" "CRLF_LF" "CRLF_LF" "" "CRLF_LF"
+for crlf in true false input
+do
+ commit_chk_wrnNNO -text "" $crlf "" "" "" "" ""
+ commit_chk_wrnNNO -text lf $crlf "" "" "" "" ""
+ commit_chk_wrnNNO -text crlf $crlf "" "" "" "" ""
+ commit_chk_wrnNNO "" lf $crlf "" CRLF_LF CRLF_LF "" CRLF_LF
+ commit_chk_wrnNNO "" crlf $crlf LF_CRLF "" LF_CRLF LF_CRLF ""
+ commit_chk_wrnNNO auto lf $crlf "" CRLF_LF CRLF_LF "" CRLF_LF
+ commit_chk_wrnNNO auto crlf $crlf LF_CRLF "" LF_CRLF LF_CRLF ""
+ commit_chk_wrnNNO text lf $crlf "" CRLF_LF CRLF_LF "" CRLF_LF
+ commit_chk_wrnNNO text crlf $crlf LF_CRLF "" LF_CRLF LF_CRLF ""
+done
-commit_chk_wrnNNO false "crlf" "LF_CRLF" "" "LF_CRLF" "LF_CRLF" ""
-commit_chk_wrnNNO true "crlf" "LF_CRLF" "" "LF_CRLF" "LF_CRLF" ""
-commit_chk_wrnNNO input "crlf" "LF_CRLF" "" "LF_CRLF" "LF_CRLF" ""
+commit_chk_wrnNNO "text" "" false "$WILC" "$WICL" "$WAMIX" "$WILC" "$WICL"
+commit_chk_wrnNNO "text" "" true LF_CRLF "" LF_CRLF LF_CRLF ""
+commit_chk_wrnNNO "text" "" input "" CRLF_LF CRLF_LF "" CRLF_LF
test_expect_success 'create files cleanup' '
rm -f *.txt &&
check_files_in_repo input "-text" LF CRLF CRLF_mix_LF LF_mix_CR CRLF_nul
'
-# attr LF CRLF CRLF_mix_LF LF_mix_CR CRLFNUL
-check_in_repo_NNO false "" LF CRLF CRLF_mix_LF LF_mix_CR CRLF_nul
-check_in_repo_NNO true "" LF CRLF CRLF_mix_LF LF_mix_CR CRLF_nul
-check_in_repo_NNO input "" LF CRLF CRLF_mix_LF LF_mix_CR CRLF_nul
-
-check_in_repo_NNO false "auto" LF LF LF LF_mix_CR CRLF_nul
-check_in_repo_NNO true "auto" LF LF LF LF_mix_CR CRLF_nul
-check_in_repo_NNO input "auto" LF LF LF LF_mix_CR CRLF_nul
-
-check_in_repo_NNO false "text" LF LF LF LF_mix_CR LF_nul
-check_in_repo_NNO true "text" LF LF LF LF_mix_CR LF_nul
-check_in_repo_NNO input "text" LF LF LF LF_mix_CR LF_nul
-
-check_in_repo_NNO false "-text" LF CRLF CRLF_mix_LF LF_mix_CR CRLF_nul
-check_in_repo_NNO true "-text" LF CRLF CRLF_mix_LF LF_mix_CR CRLF_nul
-check_in_repo_NNO input "-text" LF CRLF CRLF_mix_LF LF_mix_CR CRLF_nul
-
-
+for crlf in true false input
+do
+ # attr aeol LF CRLF CRLF_mix_LF LF_mix_CR CRLFNUL
+ check_in_repo_NNO "" "" $crlf LF CRLF CRLF_mix_LF LF_mix_CR CRLF_nul
+ check_in_repo_NNO -text "" $crlf LF CRLF CRLF_mix_LF LF_mix_CR CRLF_nul
+ check_in_repo_NNO -text lf $crlf LF CRLF CRLF_mix_LF LF_mix_CR CRLF_nul
+ check_in_repo_NNO -text crlf $crlf LF CRLF CRLF_mix_LF LF_mix_CR CRLF_nul
+ check_in_repo_NNO auto "" $crlf LF LF LF LF_mix_CR CRLF_nul
+ check_in_repo_NNO auto lf $crlf LF LF LF LF_mix_CR LF_nul
+ check_in_repo_NNO auto crlf $crlf LF LF LF LF_mix_CR LF_nul
+ check_in_repo_NNO text "" $crlf LF LF LF LF_mix_CR LF_nul
+ check_in_repo_NNO text lf $crlf LF LF LF LF_mix_CR LF_nul
+ check_in_repo_NNO text crlf $crlf LF LF LF LF_mix_CR LF_nul
+done
################################################################################
# Check how files in the repo are changed when they are checked out
# How to read the table below:
fi
export CRLF_MIX_LF_CR MIX NL
-checkout_files "" "" "" false "" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-checkout_files "" "" "" false crlf LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-checkout_files "" "" "" false lf LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-checkout_files "" "" "" false native LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-checkout_files "" "" "" input "" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-checkout_files "" "" "" input lf LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-checkout_files "" "" "" true "" CRLF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-checkout_files "" "" "" true crlf CRLF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-checkout_files "" "" "" true lf CRLF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-checkout_files "" "" "" true native CRLF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-checkout_files "" ident "" false "" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-checkout_files "" ident "" false crlf LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-checkout_files "" ident "" false lf LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-checkout_files "" ident "" false native LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-checkout_files "" ident "" input "" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-checkout_files "" ident "" input lf LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-checkout_files "" ident "" true "" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-checkout_files "" ident "" true crlf LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-checkout_files "" ident "" true lf LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-checkout_files "" ident "" true native LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-checkout_files "auto" "" "" false "" $NL CRLF $MIX_CRLF_LF LF_mix_CR LF_nul
-checkout_files "auto" "" "" false crlf CRLF CRLF CRLF LF_mix_CR LF_nul
-checkout_files "auto" "" "" false lf LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-checkout_files "auto" "" "" false native $NL CRLF $MIX_CRLF_LF LF_mix_CR LF_nul
-checkout_files "auto" "" "" input "" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-checkout_files "auto" "" "" input lf LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-checkout_files "auto" "" "" true "" CRLF CRLF CRLF LF_mix_CR LF_nul
-checkout_files "auto" "" "" true crlf CRLF CRLF CRLF LF_mix_CR LF_nul
-checkout_files "auto" "" "" true lf CRLF CRLF CRLF LF_mix_CR LF_nul
-checkout_files "auto" "" "" true native CRLF CRLF CRLF LF_mix_CR LF_nul
-checkout_files "auto" ident "" false "" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-checkout_files "auto" ident "" false crlf LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-checkout_files "auto" ident "" false lf LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-checkout_files "auto" ident "" false native LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-checkout_files "auto" ident "" input "" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-checkout_files "auto" ident "" input lf LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-checkout_files "auto" ident "" true "" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-checkout_files "auto" ident "" true crlf LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-checkout_files "auto" ident "" true lf LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-checkout_files "auto" ident "" true native LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-
-for id in "" ident;
+# Same handling with and without ident
+for id in "" ident
do
- checkout_files "crlf" "$id" "" false "" CRLF CRLF CRLF CRLF_mix_CR CRLF_nul
- checkout_files "crlf" "$id" "" false crlf CRLF CRLF CRLF CRLF_mix_CR CRLF_nul
- checkout_files "crlf" "$id" "" false lf CRLF CRLF CRLF CRLF_mix_CR CRLF_nul
- checkout_files "crlf" "$id" "" false native CRLF CRLF CRLF CRLF_mix_CR CRLF_nul
- checkout_files "crlf" "$id" "" input "" CRLF CRLF CRLF CRLF_mix_CR CRLF_nul
- checkout_files "crlf" "$id" "" input lf CRLF CRLF CRLF CRLF_mix_CR CRLF_nul
- checkout_files "crlf" "$id" "" true "" CRLF CRLF CRLF CRLF_mix_CR CRLF_nul
- checkout_files "crlf" "$id" "" true crlf CRLF CRLF CRLF CRLF_mix_CR CRLF_nul
- checkout_files "crlf" "$id" "" true lf CRLF CRLF CRLF CRLF_mix_CR CRLF_nul
- checkout_files "crlf" "$id" "" true native CRLF CRLF CRLF CRLF_mix_CR CRLF_nul
- checkout_files "lf" "$id" "" false "" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
- checkout_files "lf" "$id" "" false crlf LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
- checkout_files "lf" "$id" "" false lf LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
- checkout_files "lf" "$id" "" false native LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
- checkout_files "lf" "$id" "" input "" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
- checkout_files "lf" "$id" "" input lf LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
- checkout_files "lf" "$id" "" true "" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
- checkout_files "lf" "$id" "" true crlf LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
- checkout_files "lf" "$id" "" true lf LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
- checkout_files "lf" "$id" "" true native LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
- checkout_files "text" "$id" "" false "" $NL CRLF $MIX_CRLF_LF $MIX_LF_CR $LFNUL
- checkout_files "text" "$id" "" false crlf CRLF CRLF CRLF CRLF_mix_CR CRLF_nul
- checkout_files "text" "$id" "" false lf LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
- checkout_files "text" "$id" "" false native $NL CRLF $MIX_CRLF_LF $MIX_LF_CR $LFNUL
- checkout_files "text" "$id" "" input "" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
- checkout_files "text" "$id" "" input lf LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
- checkout_files "text" "$id" "" true "" CRLF CRLF CRLF CRLF_mix_CR CRLF_nul
- checkout_files "text" "$id" "" true crlf CRLF CRLF CRLF CRLF_mix_CR CRLF_nul
- checkout_files "text" "$id" "" true lf CRLF CRLF CRLF CRLF_mix_CR CRLF_nul
- checkout_files "text" "$id" "" true native CRLF CRLF CRLF CRLF_mix_CR CRLF_nul
- checkout_files "-text" "$id" "" false "" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
- checkout_files "-text" "$id" "" false crlf LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
- checkout_files "-text" "$id" "" false lf LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
- checkout_files "-text" "$id" "" false native LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
- checkout_files "-text" "$id" "" input "" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
- checkout_files "-text" "$id" "" input lf LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
- checkout_files "-text" "$id" "" true "" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
- checkout_files "-text" "$id" "" true crlf LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
- checkout_files "-text" "$id" "" true lf LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
- checkout_files "-text" "$id" "" true native LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+ for ceol in lf crlf native
+ do
+ for crlf in true false input
+ do
+ # -text overrides core.autocrlf and core.eol
+ # text and eol=crlf or eol=lf override core.autocrlf and core.eol
+ checkout_files -text "$id" "" "$crlf" "$ceol" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+ checkout_files -text "$id" "lf" "$crlf" "$ceol" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+ checkout_files -text "$id" "crlf" "$crlf" "$ceol" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+ # text
+ checkout_files text "$id" "lf" "$crlf" "$ceol" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+ checkout_files text "$id" "crlf" "$crlf" "$ceol" CRLF CRLF CRLF CRLF_mix_CR CRLF_nul
+ # currently the same as text, eol=XXX
+ checkout_files auto "$id" "lf" "$crlf" "$ceol" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+ checkout_files auto "$id" "crlf" "$crlf" "$ceol" CRLF CRLF CRLF CRLF_mix_CR CRLF_nul
+ done
+
+ # core.autocrlf false, different core.eol
+ checkout_files "" "$id" "" false "$ceol" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+ # core.autocrlf true
+ checkout_files "" "$id" "" true "$ceol" CRLF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+ # text: core.autocrlf = true overrides core.eol
+ checkout_files auto "$id" "" true "$ceol" CRLF CRLF CRLF LF_mix_CR LF_nul
+ checkout_files text "$id" "" true "$ceol" CRLF CRLF CRLF CRLF_mix_CR CRLF_nul
+ # text: core.autocrlf = input overrides core.eol
+ checkout_files text "$id" "" input "$ceol" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+ checkout_files auto "$id" "" input "$ceol" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+ # text=auto + eol=XXX
+ done
+ # text: core.autocrlf=false uses core.eol
+ checkout_files text "$id" "" false crlf CRLF CRLF CRLF CRLF_mix_CR CRLF_nul
+ checkout_files text "$id" "" false lf LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+ # text: core.autocrlf=false and core.eol unset(or native) uses native eol
+ checkout_files text "$id" "" false "" $NL CRLF $MIX_CRLF_LF $MIX_LF_CR $LFNUL
+ checkout_files text "$id" "" false native $NL CRLF $MIX_CRLF_LF $MIX_LF_CR $LFNUL
+ # auto: core.autocrlf=false and core.eol unset(or native) uses native eol
+ checkout_files auto "$id" "" false "" $NL CRLF $MIX_CRLF_LF LF_mix_CR LF_nul
+ checkout_files auto "$id" "" false native $NL CRLF $MIX_CRLF_LF LF_mix_CR LF_nul
done
# Should be the last test case: remove some files from the worktree
. ./test-lib.sh
-cat > expect << EOF
+cat >expect <<\EOF
usage: test-parse-options <options>
--yes get a boolean
-v, --verbose be verbose
-n, --dry-run dry run
-q, --quiet be quiet
+ --expect <string> expected output in the variable dump
EOF
test_expect_success 'test help' '
- test_must_fail test-parse-options -h > output 2> output.err &&
+ test_must_fail test-parse-options -h >output 2>output.err &&
test_must_be_empty output.err &&
test_i18ncmp expect output
'
mv expect expect.err
-cat >expect.template <<EOF
-boolean: 0
-integer: 0
-magnitude: 0
-timestamp: 0
-string: (not set)
-abbrev: 7
-verbose: 0
-quiet: no
-dry run: no
-file: (not set)
-EOF
-
-check() {
+check () {
what="$1" &&
shift &&
expect="$1" &&
shift &&
- sed "s/^$what .*/$what $expect/" <expect.template >expect &&
- test-parse-options $* >output 2>output.err &&
- test_must_be_empty output.err &&
- test_cmp expect output
-}
-
-check_i18n() {
- what="$1" &&
- shift &&
- expect="$1" &&
- shift &&
- sed "s/^$what .*/$what $expect/" <expect.template >expect &&
- test-parse-options $* >output 2>output.err &&
- test_must_be_empty output.err &&
- test_i18ncmp expect output
-}
-
-check_unknown() {
- case "$1" in
- --*)
- echo error: unknown option \`${1#--}\' >expect ;;
- -*)
- echo error: unknown switch \`${1#-}\' >expect ;;
- esac &&
- cat expect.err >>expect &&
- test_must_fail test-parse-options $* >output 2>output.err &&
- test_must_be_empty output &&
- test_cmp expect output.err
+ test-parse-options --expect="$what $expect" "$@"
}
check_unknown_i18n() {
check magnitude: 3221225472 -m 3g
'
-cat > expect << EOF
+cat >expect <<\EOF
boolean: 2
integer: 1729
magnitude: 16384
string: 123
abbrev: 7
verbose: 2
-quiet: no
+quiet: 0
dry run: yes
file: prefix/my.file
EOF
test_must_be_empty output.err
'
-cat > expect << EOF
+cat >expect <<\EOF
boolean: 2
integer: 1729
magnitude: 16384
string: 321
abbrev: 10
verbose: 2
-quiet: no
+quiet: 0
dry run: no
file: prefix/fi.le
EOF
test_expect_code 129 test-parse-options --file
'
-cat > expect << EOF
+cat >expect <<\EOF
boolean: 1
integer: 13
magnitude: 0
timestamp: 0
string: 123
abbrev: 7
-verbose: 0
-quiet: no
+verbose: -1
+quiet: 0
dry run: no
file: (not set)
arg 00: a1
test_expect_success 'intermingled arguments' '
test-parse-options a1 --string 123 b1 --boolean -j 13 -- --boolean \
- > output 2> output.err &&
+ >output 2>output.err &&
test_must_be_empty output.err &&
test_cmp expect output
'
-cat > expect << EOF
+cat >expect <<\EOF
boolean: 0
integer: 2
magnitude: 0
timestamp: 0
string: (not set)
abbrev: 7
-verbose: 0
-quiet: no
+verbose: -1
+quiet: 0
dry run: no
file: (not set)
EOF
test_expect_success 'unambiguously abbreviated option' '
- test-parse-options --int 2 --boolean --no-bo > output 2> output.err &&
+ test-parse-options --int 2 --boolean --no-bo >output 2>output.err &&
test_must_be_empty output.err &&
test_cmp expect output
'
test_expect_success 'unambiguously abbreviated option with "="' '
- test-parse-options --int=2 > output 2> output.err &&
+ test-parse-options --int=2 >output 2>output.err &&
test_must_be_empty output.err &&
test_cmp expect output
'
test_expect_code 129 test-parse-options --strin 123
'
-cat > expect << EOF
+cat >expect <<\EOF
boolean: 0
integer: 0
magnitude: 0
timestamp: 0
string: 123
abbrev: 7
-verbose: 0
-quiet: no
+verbose: -1
+quiet: 0
dry run: no
file: (not set)
EOF
test_expect_success 'non ambiguous option (after two options it abbreviates)' '
- test-parse-options --st 123 > output 2> output.err &&
+ test-parse-options --st 123 >output 2>output.err &&
test_must_be_empty output.err &&
test_cmp expect output
'
-cat > typo.err << EOF
-error: did you mean \`--boolean\` (with two dashes ?)
+cat >typo.err <<\EOF
+error: did you mean `--boolean` (with two dashes ?)
EOF
test_expect_success 'detect possible typos' '
- test_must_fail test-parse-options -boolean > output 2> output.err &&
+ test_must_fail test-parse-options -boolean >output 2>output.err &&
test_must_be_empty output &&
test_cmp typo.err output.err
'
-cat > typo.err << EOF
-error: did you mean \`--ambiguous\` (with two dashes ?)
+cat >typo.err <<\EOF
+error: did you mean `--ambiguous` (with two dashes ?)
EOF
test_expect_success 'detect possible typos' '
- test_must_fail test-parse-options -ambiguous > output 2> output.err &&
+ test_must_fail test-parse-options -ambiguous >output 2>output.err &&
test_must_be_empty output &&
test_cmp typo.err output.err
'
-cat > expect <<EOF
+cat >expect <<\EOF
boolean: 0
integer: 0
magnitude: 0
timestamp: 0
string: (not set)
abbrev: 7
-verbose: 0
-quiet: no
+verbose: -1
+quiet: 0
dry run: no
file: (not set)
arg 00: --quux
EOF
test_expect_success 'keep some options as arguments' '
- test-parse-options --quux > output 2> output.err &&
+ test-parse-options --quux >output 2>output.err &&
test_must_be_empty output.err &&
- test_cmp expect output
+ test_cmp expect output
'
-cat > expect <<EOF
+cat >expect <<\EOF
boolean: 0
integer: 0
magnitude: 0
timestamp: 1
string: (not set)
abbrev: 7
-verbose: 0
-quiet: yes
+verbose: -1
+quiet: 1
dry run: no
file: (not set)
arg 00: foo
test_expect_success 'OPT_DATE() works' '
test-parse-options -t "1970-01-01 00:00:01 +0000" \
- foo -q > output 2> output.err &&
+ foo -q >output 2>output.err &&
test_must_be_empty output.err &&
test_cmp expect output
'
-cat > expect <<EOF
+cat >expect <<\EOF
Callback: "four", 0
boolean: 5
integer: 4
timestamp: 0
string: (not set)
abbrev: 7
-verbose: 0
-quiet: no
+verbose: -1
+quiet: 0
dry run: no
file: (not set)
EOF
test_expect_success 'OPT_CALLBACK() and OPT_BIT() work' '
- test-parse-options --length=four -b -4 > output 2> output.err &&
+ test-parse-options --length=four -b -4 >output 2>output.err &&
test_must_be_empty output.err &&
test_cmp expect output
'
-cat > expect <<EOF
-Callback: "not set", 1
-EOF
+>expect
test_expect_success 'OPT_CALLBACK() and callback errors work' '
- test_must_fail test-parse-options --no-length > output 2> output.err &&
+ test_must_fail test-parse-options --no-length >output 2>output.err &&
test_i18ncmp expect output &&
test_i18ncmp expect.err output.err
'
-cat > expect <<EOF
+cat >expect <<\EOF
boolean: 1
integer: 23
magnitude: 0
timestamp: 0
string: (not set)
abbrev: 7
-verbose: 0
-quiet: no
+verbose: -1
+quiet: 0
dry run: no
file: (not set)
EOF
test_expect_success 'OPT_BIT() and OPT_SET_INT() work' '
- test-parse-options --set23 -bbbbb --no-or4 > output 2> output.err &&
+ test-parse-options --set23 -bbbbb --no-or4 >output 2>output.err &&
test_must_be_empty output.err &&
test_cmp expect output
'
test_expect_success 'OPT_NEGBIT() and OPT_SET_INT() work' '
- test-parse-options --set23 -bbbbb --neg-or4 > output 2> output.err &&
+ test-parse-options --set23 -bbbbb --neg-or4 >output 2>output.err &&
test_must_be_empty output.err &&
test_cmp expect output
'
-cat > expect <<EOF
+cat >expect <<\EOF
boolean: 6
integer: 0
magnitude: 0
timestamp: 0
string: (not set)
abbrev: 7
-verbose: 0
-quiet: no
+verbose: -1
+quiet: 0
dry run: no
file: (not set)
EOF
test_expect_success 'OPT_BIT() works' '
- test-parse-options -bb --or4 > output 2> output.err &&
+ test-parse-options -bb --or4 >output 2>output.err &&
test_must_be_empty output.err &&
test_cmp expect output
'
test_expect_success 'OPT_NEGBIT() works' '
- test-parse-options -bb --no-neg-or4 > output 2> output.err &&
+ test-parse-options -bb --no-neg-or4 >output 2>output.err &&
test_must_be_empty output.err &&
test_cmp expect output
'
test_expect_success 'OPT_COUNTUP() with PARSE_OPT_NODASH works' '
- test-parse-options + + + + + + > output 2> output.err &&
+ test-parse-options + + + + + + >output 2>output.err &&
test_must_be_empty output.err &&
test_cmp expect output
'
-cat > expect <<EOF
+cat >expect <<\EOF
boolean: 0
integer: 12345
magnitude: 0
timestamp: 0
string: (not set)
abbrev: 7
-verbose: 0
-quiet: no
+verbose: -1
+quiet: 0
dry run: no
file: (not set)
EOF
test_expect_success 'OPT_NUMBER_CALLBACK() works' '
- test-parse-options -12345 > output 2> output.err &&
+ test-parse-options -12345 >output 2>output.err &&
test_must_be_empty output.err &&
test_cmp expect output
'
-cat >expect <<EOF
+cat >expect <<\EOF
boolean: 0
integer: 0
magnitude: 0
timestamp: 0
string: (not set)
abbrev: 7
-verbose: 0
-quiet: no
+verbose: -1
+quiet: 0
dry run: no
file: (not set)
EOF
test_cmp expect output
'
-cat >>expect <<'EOF'
+cat >>expect <<\EOF
list: foo
list: bar
list: baz
test_cmp expect output
'
+cat >expect <<\EOF
+boolean: 0
+integer: 0
+magnitude: 0
+timestamp: 0
+string: (not set)
+abbrev: 7
+verbose: -1
+quiet: 3
+dry run: no
+file: (not set)
+EOF
+
+test_expect_success 'multiple quiet levels' '
+ test-parse-options -q -q -q >output 2>output.err &&
+ test_must_be_empty output.err &&
+ test_cmp expect output
+'
+
+cat >expect <<\EOF
+boolean: 0
+integer: 0
+magnitude: 0
+timestamp: 0
+string: (not set)
+abbrev: 7
+verbose: 3
+quiet: 0
+dry run: no
+file: (not set)
+EOF
+
+test_expect_success 'multiple verbose levels' '
+ test-parse-options -v -v -v >output 2>output.err &&
+ test_must_be_empty output.err &&
+ test_cmp expect output
+'
+
+cat >expect <<\EOF
+boolean: 0
+integer: 0
+magnitude: 0
+timestamp: 0
+string: (not set)
+abbrev: 7
+verbose: -1
+quiet: 0
+dry run: no
+file: (not set)
+EOF
+
+test_expect_success '--no-quiet sets --quiet to 0' '
+ test-parse-options --no-quiet >output 2>output.err &&
+ test_must_be_empty output.err &&
+ test_cmp expect output
+'
+
+cat >expect <<\EOF
+boolean: 0
+integer: 0
+magnitude: 0
+timestamp: 0
+string: (not set)
+abbrev: 7
+verbose: -1
+quiet: 0
+dry run: no
+file: (not set)
+EOF
+
+test_expect_success '--no-quiet resets multiple -q to 0' '
+ test-parse-options -q -q -q --no-quiet >output 2>output.err &&
+ test_must_be_empty output.err &&
+ test_cmp expect output
+'
+
+cat >expect <<\EOF
+boolean: 0
+integer: 0
+magnitude: 0
+timestamp: 0
+string: (not set)
+abbrev: 7
+verbose: 0
+quiet: 0
+dry run: no
+file: (not set)
+EOF
+
+test_expect_success '--no-verbose sets verbose to 0' '
+ test-parse-options --no-verbose >output 2>output.err &&
+ test_must_be_empty output.err &&
+ test_cmp expect output
+'
+
+cat >expect <<\EOF
+boolean: 0
+integer: 0
+magnitude: 0
+timestamp: 0
+string: (not set)
+abbrev: 7
+verbose: 0
+quiet: 0
+dry run: no
+file: (not set)
+EOF
+
+test_expect_success '--no-verbose resets multiple verbose to 0' '
+ test-parse-options -v -v -v --no-verbose >output 2>output.err &&
+ test_must_be_empty output.err &&
+ test_cmp expect output
+'
+
test_done
"test \"\$(test-path-utils relative_path '$1' '$2')\" = '$expected'"
}
+test_submodule_relative_url() {
+ test_expect_success "test_submodule_relative_url: $1 $2 $3 => $4" "
+ actual=\$(git submodule--helper resolve-relative-url-test '$1' '$2' '$3') &&
+ test \"\$actual\" = '$4'
+ "
+}
+
test_git_path() {
test_expect_success "git-path $1 $2 => $3" "
$1 git rev-parse --git-path $2 >actual &&
test_git_path GIT_COMMON_DIR=bar packed-refs bar/packed-refs
test_git_path GIT_COMMON_DIR=bar shallow bar/shallow
+# In the tests below, the distinction between $PWD and $(pwd) is important:
+# on Windows, $PWD is POSIX style (/c/foo), $(pwd) has drive letter (c:/foo).
+
+test_submodule_relative_url "../" "../foo" "../submodule" "../../submodule"
+test_submodule_relative_url "../" "../foo/bar" "../submodule" "../../foo/submodule"
+test_submodule_relative_url "../" "../foo/submodule" "../submodule" "../../foo/submodule"
+test_submodule_relative_url "../" "./foo" "../submodule" "../submodule"
+test_submodule_relative_url "../" "./foo/bar" "../submodule" "../foo/submodule"
+test_submodule_relative_url "../../../" "../foo/bar" "../sub/a/b/c" "../../../../foo/sub/a/b/c"
+test_submodule_relative_url "../" "$PWD/addtest" "../repo" "$(pwd)/repo"
+test_submodule_relative_url "../" "foo/bar" "../submodule" "../foo/submodule"
+test_submodule_relative_url "../" "foo" "../submodule" "../submodule"
+
+test_submodule_relative_url "(null)" "../foo/bar" "../sub/a/b/c" "../foo/sub/a/b/c"
+test_submodule_relative_url "(null)" "../foo/bar" "../submodule" "../foo/submodule"
+test_submodule_relative_url "(null)" "../foo/submodule" "../submodule" "../foo/submodule"
+test_submodule_relative_url "(null)" "../foo" "../submodule" "../submodule"
+test_submodule_relative_url "(null)" "./foo/bar" "../submodule" "foo/submodule"
+test_submodule_relative_url "(null)" "./foo" "../submodule" "submodule"
+test_submodule_relative_url "(null)" "//somewhere else/repo" "../subrepo" "//somewhere else/subrepo"
+test_submodule_relative_url "(null)" "$PWD/subsuper_update_r" "../subsubsuper_update_r" "$(pwd)/subsubsuper_update_r"
+test_submodule_relative_url "(null)" "$PWD/super_update_r2" "../subsuper_update_r" "$(pwd)/subsuper_update_r"
+test_submodule_relative_url "(null)" "$PWD/." "../." "$(pwd)/."
+test_submodule_relative_url "(null)" "$PWD" "./." "$(pwd)/."
+test_submodule_relative_url "(null)" "$PWD/addtest" "../repo" "$(pwd)/repo"
+test_submodule_relative_url "(null)" "$PWD" "./å äö" "$(pwd)/å äö"
+test_submodule_relative_url "(null)" "$PWD/." "../submodule" "$(pwd)/submodule"
+test_submodule_relative_url "(null)" "$PWD/submodule" "../submodule" "$(pwd)/submodule"
+test_submodule_relative_url "(null)" "$PWD/home2/../remote" "../bundle1" "$(pwd)/home2/../bundle1"
+test_submodule_relative_url "(null)" "$PWD/submodule_update_repo" "./." "$(pwd)/submodule_update_repo/."
+test_submodule_relative_url "(null)" "file:///tmp/repo" "../subrepo" "file:///tmp/subrepo"
+test_submodule_relative_url "(null)" "foo/bar" "../submodule" "foo/submodule"
+test_submodule_relative_url "(null)" "foo" "../submodule" "submodule"
+test_submodule_relative_url "(null)" "helper:://hostname/repo" "../subrepo" "helper:://hostname/subrepo"
+test_submodule_relative_url "(null)" "ssh://hostname/repo" "../subrepo" "ssh://hostname/subrepo"
+test_submodule_relative_url "(null)" "ssh://hostname:22/repo" "../subrepo" "ssh://hostname:22/subrepo"
+test_submodule_relative_url "(null)" "user@host:path/to/repo" "../subrepo" "user@host:path/to/subrepo"
+test_submodule_relative_url "(null)" "user@host:repo" "../subrepo" "user@host:subrepo"
+
test_done
test_expect_success 'GIT_PREFIX for built-ins' '
# Use GIT_EXTERNAL_DIFF to test that the "diff" built-in
# receives the GIT_PREFIX variable.
- printf "dir/" >expect &&
- printf "#!/bin/sh\n" >diff &&
- printf "printf \"\$GIT_PREFIX\"" >>diff &&
- chmod +x diff &&
+ echo "dir/" >expect &&
+ write_script diff <<-\EOF &&
+ printf "%s\n" "$GIT_PREFIX"
+ EOF
(
cd dir &&
- printf "change" >two &&
+ echo "change" >two &&
GIT_EXTERNAL_DIFF=./diff git diff >../actual
git checkout -- two
) &&
--- /dev/null
+#!/bin/sh
+
+test_description='Test the core.hooksPath configuration variable'
+
+. ./test-lib.sh
+
+test_expect_success 'set up a pre-commit hook in core.hooksPath' '
+ mkdir -p .git/custom-hooks .git/hooks &&
+ write_script .git/custom-hooks/pre-commit <<-\EOF &&
+ echo CUSTOM >>actual
+ EOF
+ write_script .git/hooks/pre-commit <<-\EOF
+ echo NORMAL >>actual
+ EOF
+'
+
+test_expect_success 'Check that various forms of specifying core.hooksPath work' '
+ test_commit no_custom_hook &&
+ git config core.hooksPath .git/custom-hooks &&
+ test_commit have_custom_hook &&
+ git config core.hooksPath .git/custom-hooks/ &&
+ test_commit have_custom_hook_trailing_slash &&
+ git config core.hooksPath "$PWD/.git/custom-hooks" &&
+ test_commit have_custom_hook_abs_path &&
+ git config core.hooksPath "$PWD/.git/custom-hooks/" &&
+ test_commit have_custom_hook_abs_path_trailing_slash &&
+ cat >expect <<-\EOF &&
+ NORMAL
+ CUSTOM
+ CUSTOM
+ CUSTOM
+ CUSTOM
+ EOF
+ test_cmp expect actual
+'
+
+test_done
test_line_count = 3 actual
'
+test_expect_success 'reflog expire operates on symref not referrent' '
+ git branch -l the_symref &&
+ git branch -l referrent &&
+ git update-ref referrent HEAD &&
+ git symbolic-ref refs/heads/the_symref refs/heads/referrent &&
+ test_when_finished "rm -f .git/refs/heads/referrent.lock" &&
+ touch .git/refs/heads/referrent.lock &&
+ git reflog expire --expire=all the_symref
+'
+
test_done
cp .git/refs/heads/master .git/refs/heads/broken...ref &&
test_when_finished "rm -f .git/refs/heads/broken...ref" &&
git branch >output 2>error &&
- grep -e "broken\.\.\.ref" error &&
+ test_i18ngrep -e "ignoring ref with broken name refs/heads/broken\.\.\.ref" error &&
! grep -e "broken\.\.\.ref" output
'
test_when_finished "rm -f .git/refs/heads/broken...ref" &&
git branch shadow one &&
cp .git/refs/heads/master .git/refs/heads/broken...ref &&
- git symbolic-ref refs/tags/shadow refs/heads/broken...ref &&
-
+ printf "ref: refs/heads/broken...ref\n" >.git/refs/tags/shadow &&
+ test_when_finished "rm -f .git/refs/tags/shadow" &&
git rev-parse --verify one >expect &&
git rev-parse --verify shadow >actual 2>err &&
test_cmp expect actual &&
- test_i18ngrep "ignoring.*refs/tags/shadow" err
+ test_i18ngrep "ignoring dangling symref refs/tags/shadow" err
'
-test_expect_success 'update-ref --no-deref -d can delete reference to broken name' '
- git symbolic-ref refs/heads/badname refs/heads/broken...ref &&
+test_expect_success 'for-each-ref emits warnings for broken names' '
+ cp .git/refs/heads/master .git/refs/heads/broken...ref &&
+ test_when_finished "rm -f .git/refs/heads/broken...ref" &&
+ printf "ref: refs/heads/broken...ref\n" >.git/refs/heads/badname &&
test_when_finished "rm -f .git/refs/heads/badname" &&
- test_path_is_file .git/refs/heads/badname &&
- git update-ref --no-deref -d refs/heads/badname &&
- test_path_is_missing .git/refs/heads/badname
+ printf "ref: refs/heads/master\n" >.git/refs/heads/broken...symref &&
+ test_when_finished "rm -f .git/refs/heads/broken...symref" &&
+ git for-each-ref >output 2>error &&
+ ! grep -e "broken\.\.\.ref" output &&
+ ! grep -e "badname" output &&
+ ! grep -e "broken\.\.\.symref" output &&
+ test_i18ngrep "ignoring ref with broken name refs/heads/broken\.\.\.ref" error &&
+ test_i18ngrep "ignoring broken ref refs/heads/badname" error &&
+ test_i18ngrep "ignoring ref with broken name refs/heads/broken\.\.\.symref" error
'
test_expect_success 'update-ref -d can delete broken name' '
cp .git/refs/heads/master .git/refs/heads/broken...ref &&
test_when_finished "rm -f .git/refs/heads/broken...ref" &&
- git update-ref -d refs/heads/broken...ref &&
+ git update-ref -d refs/heads/broken...ref >output 2>error &&
+ test_must_be_empty output &&
+ test_must_be_empty error &&
+ git branch >output 2>error &&
+ ! grep -e "broken\.\.\.ref" error &&
+ ! grep -e "broken\.\.\.ref" output
+'
+
+test_expect_success 'branch -d can delete broken name' '
+ cp .git/refs/heads/master .git/refs/heads/broken...ref &&
+ test_when_finished "rm -f .git/refs/heads/broken...ref" &&
+ git branch -d broken...ref >output 2>error &&
+ test_i18ngrep "Deleted branch broken...ref (was broken)" output &&
+ test_must_be_empty error &&
git branch >output 2>error &&
! grep -e "broken\.\.\.ref" error &&
! grep -e "broken\.\.\.ref" output
'
+test_expect_success 'update-ref --no-deref -d can delete symref to broken name' '
+ cp .git/refs/heads/master .git/refs/heads/broken...ref &&
+ test_when_finished "rm -f .git/refs/heads/broken...ref" &&
+ printf "ref: refs/heads/broken...ref\n" >.git/refs/heads/badname &&
+ test_when_finished "rm -f .git/refs/heads/badname" &&
+ git update-ref --no-deref -d refs/heads/badname >output 2>error &&
+ test_path_is_missing .git/refs/heads/badname &&
+ test_must_be_empty output &&
+ test_must_be_empty error
+'
+
+test_expect_success 'branch -d can delete symref to broken name' '
+ cp .git/refs/heads/master .git/refs/heads/broken...ref &&
+ test_when_finished "rm -f .git/refs/heads/broken...ref" &&
+ printf "ref: refs/heads/broken...ref\n" >.git/refs/heads/badname &&
+ test_when_finished "rm -f .git/refs/heads/badname" &&
+ git branch -d badname >output 2>error &&
+ test_path_is_missing .git/refs/heads/badname &&
+ test_i18ngrep "Deleted branch badname (was refs/heads/broken\.\.\.ref)" output &&
+ test_must_be_empty error
+'
+
+test_expect_success 'update-ref --no-deref -d can delete dangling symref to broken name' '
+ printf "ref: refs/heads/broken...ref\n" >.git/refs/heads/badname &&
+ test_when_finished "rm -f .git/refs/heads/badname" &&
+ git update-ref --no-deref -d refs/heads/badname >output 2>error &&
+ test_path_is_missing .git/refs/heads/badname &&
+ test_must_be_empty output &&
+ test_must_be_empty error
+'
+
+test_expect_success 'branch -d can delete dangling symref to broken name' '
+ printf "ref: refs/heads/broken...ref\n" >.git/refs/heads/badname &&
+ test_when_finished "rm -f .git/refs/heads/badname" &&
+ git branch -d badname >output 2>error &&
+ test_path_is_missing .git/refs/heads/badname &&
+ test_i18ngrep "Deleted branch badname (was refs/heads/broken\.\.\.ref)" output &&
+ test_must_be_empty error
+'
+
+test_expect_success 'update-ref -d can delete broken name through symref' '
+ cp .git/refs/heads/master .git/refs/heads/broken...ref &&
+ test_when_finished "rm -f .git/refs/heads/broken...ref" &&
+ printf "ref: refs/heads/broken...ref\n" >.git/refs/heads/badname &&
+ test_when_finished "rm -f .git/refs/heads/badname" &&
+ git update-ref -d refs/heads/badname >output 2>error &&
+ test_path_is_missing .git/refs/heads/broken...ref &&
+ test_must_be_empty output &&
+ test_must_be_empty error
+'
+
+test_expect_success 'update-ref --no-deref -d can delete symref with broken name' '
+ printf "ref: refs/heads/master\n" >.git/refs/heads/broken...symref &&
+ test_when_finished "rm -f .git/refs/heads/broken...symref" &&
+ git update-ref --no-deref -d refs/heads/broken...symref >output 2>error &&
+ test_path_is_missing .git/refs/heads/broken...symref &&
+ test_must_be_empty output &&
+ test_must_be_empty error
+'
+
+test_expect_success 'branch -d can delete symref with broken name' '
+ printf "ref: refs/heads/master\n" >.git/refs/heads/broken...symref &&
+ test_when_finished "rm -f .git/refs/heads/broken...symref" &&
+ git branch -d broken...symref >output 2>error &&
+ test_path_is_missing .git/refs/heads/broken...symref &&
+ test_i18ngrep "Deleted branch broken...symref (was refs/heads/master)" output &&
+ test_must_be_empty error
+'
+
+test_expect_success 'update-ref --no-deref -d can delete dangling symref with broken name' '
+ printf "ref: refs/heads/idonotexist\n" >.git/refs/heads/broken...symref &&
+ test_when_finished "rm -f .git/refs/heads/broken...symref" &&
+ git update-ref --no-deref -d refs/heads/broken...symref >output 2>error &&
+ test_path_is_missing .git/refs/heads/broken...symref &&
+ test_must_be_empty output &&
+ test_must_be_empty error
+'
+
+test_expect_success 'branch -d can delete dangling symref with broken name' '
+ printf "ref: refs/heads/idonotexist\n" >.git/refs/heads/broken...symref &&
+ test_when_finished "rm -f .git/refs/heads/broken...symref" &&
+ git branch -d broken...symref >output 2>error &&
+ test_path_is_missing .git/refs/heads/broken...symref &&
+ test_i18ngrep "Deleted branch broken...symref (was refs/heads/idonotexist)" output &&
+ test_must_be_empty error
+'
+
test_expect_success 'update-ref -d cannot delete non-ref in .git dir' '
echo precious >.git/my-private-file &&
echo precious >expect &&
- test_must_fail git update-ref -d my-private-file &&
+ test_must_fail git update-ref -d my-private-file >output 2>error &&
+ test_must_be_empty output &&
+ test_i18ngrep -e "cannot lock .*: unable to resolve reference" error &&
test_cmp expect .git/my-private-file
'
)
'
+test_expect_success 'NUL in commit' '
+ rm -fr nul-in-commit &&
+ git init nul-in-commit &&
+ (
+ cd nul-in-commit &&
+ git commit --allow-empty -m "initial commitQNUL after message" &&
+ git cat-file commit HEAD >original &&
+ q_to_nul <original >munged &&
+ git hash-object -w -t commit --stdin <munged >name &&
+ git branch bad $(cat name) &&
+
+ test_must_fail git -c fsck.nulInCommit=error fsck 2>warn.1 &&
+ grep nulInCommit warn.1 &&
+ git fsck 2>warn.2 &&
+ grep nulInCommit warn.2
+ )
+'
+
# create a static test repo which is broken by omitting
# one particular object ($1, which is looked up via rev-parse
# in the new repository).
. ./test-lib.sh
+. "$TEST_DIRECTORY"/lib-rebase.sh
+
test_expect_success 'setup' '
test_commit init
'
( cd here-clone && git fsck )
'
+test_expect_success '"add" worktree with --no-checkout' '
+ git worktree add --no-checkout -b swamp swamp &&
+ ! test -e swamp/init.t &&
+ git -C swamp reset --hard &&
+ test_cmp init.t swamp/init.t
+'
+
+test_expect_success '"add" worktree with --checkout' '
+ git worktree add --checkout -b swmap2 swamp2 &&
+ test_cmp init.t swamp2/init.t
+'
+
+test_expect_success 'put a worktree under rebase' '
+ git worktree add under-rebase &&
+ (
+ cd under-rebase &&
+ set_fake_editor &&
+ FAKE_LINES="edit 1" git rebase -i HEAD^ &&
+ git worktree list | grep "under-rebase.*detached HEAD"
+ )
+'
+
+test_expect_success 'add a worktree, checking out a rebased branch' '
+ test_must_fail git worktree add new-rebase under-rebase &&
+ ! test -d new-rebase
+'
+
+test_expect_success 'checking out a rebased branch from another worktree' '
+ git worktree add new-place &&
+ test_must_fail git -C new-place checkout under-rebase
+'
+
+test_expect_success 'not allow to delete a branch under rebase' '
+ (
+ cd under-rebase &&
+ test_must_fail git branch -D under-rebase
+ )
+'
+
+test_expect_success 'rename a branch under rebase not allowed' '
+ test_must_fail git branch -M under-rebase rebase-with-new-name
+'
+
+test_expect_success 'check out from current worktree branch ok' '
+ (
+ cd under-rebase &&
+ git checkout under-rebase &&
+ git checkout - &&
+ git rebase --abort
+ )
+'
+
+test_expect_success 'checkout a branch under bisect' '
+ git worktree add under-bisect &&
+ (
+ cd under-bisect &&
+ git bisect start &&
+ git bisect bad &&
+ git bisect good HEAD~2 &&
+ git worktree list | grep "under-bisect.*detached HEAD" &&
+ test_must_fail git worktree add new-bisect under-bisect &&
+ ! test -d new-bisect
+ )
+'
+
+test_expect_success 'rename a branch under bisect not allowed' '
+ test_must_fail git branch -M under-bisect bisect-with-new-name
+'
+
test_done
test_commit three &&
git checkout right &&
test_commit four &&
- git checkout --orphan five &&
+ git checkout --orphan newroot &&
test_commit five &&
git checkout master
'
test_expect_success 'git branch -M baz bam should succeed when baz is checked out' '
git checkout -b baz &&
git branch bam &&
- git branch -M baz bam
+ git branch -M baz bam &&
+ test $(git rev-parse --abbrev-ref HEAD) = bam
+'
+
+test_expect_success 'git branch -M baz bam should succeed when baz is checked out as linked working tree' '
+ git checkout master &&
+ git worktree add -b baz bazdir &&
+ git worktree add -f bazdir2 baz &&
+ git branch -M baz bam &&
+ test $(git -C bazdir rev-parse --abbrev-ref HEAD) = bam &&
+ test $(git -C bazdir2 rev-parse --abbrev-ref HEAD) = bam
+'
+
+test_expect_success 'git branch -M baz bam should succeed within a worktree in which baz is checked out' '
+ git checkout -b baz &&
+ git worktree add -f bazdir3 baz &&
+ (
+ cd bazdir3 &&
+ git branch -M baz bam &&
+ test $(git rev-parse --abbrev-ref HEAD) = bam
+ ) &&
+ test $(git rev-parse --abbrev-ref HEAD) = bam
'
test_expect_success 'git branch -M master should work when master is checked out' '
test_i18ncmp expect actual
'
+test_expect_success 'deleting currently checked out branch fails' '
+ git worktree add -b my7 my7 &&
+ test_must_fail git -C my7 branch -d my7 &&
+ test_must_fail git branch -d my7
+'
+
test_expect_success 'test --track without .fetch entries' '
git branch --track my8 &&
test "$(git config branch.my8.remote)" &&
test_cmp expect actual
'
+test_expect_success 'local-branch symrefs shortened properly' '
+ git symbolic-ref refs/heads/ref-to-branch refs/heads/branch-one &&
+ git symbolic-ref refs/heads/ref-to-remote refs/remotes/origin/branch-one &&
+ cat >expect <<-\EOF &&
+ ref-to-branch -> branch-one
+ ref-to-remote -> refs/remotes/origin/branch-one
+ EOF
+ git branch >actual.raw &&
+ grep ref-to <actual.raw >actual &&
+ test_cmp expect actual
+'
+
test_done
! grep 11 original
'
+test_expect_success 'rebase -Xtheirs from orphan' '
+ git checkout --orphan orphan-conflicting master~2 &&
+ echo "AB $T" >> original &&
+ git commit -morphan-conflicting original &&
+ git rebase -Xtheirs master &&
+ grep AB original &&
+ ! grep 11 original
+'
+
test_expect_success 'merge and rebase should match' '
git diff-tree -r test-rebase test-merge >difference &&
if test -s difference
# "exec" commands are ran with the user shell by default, but this may
# be non-POSIX. For example, if SHELL=zsh then ">file" doesn't work
-# to create a file. Unseting SHELL avoids such non-portable behavior
+# to create a file. Unsetting SHELL avoids such non-portable behavior
# in tests. It must be exported for it to take effect where needed.
SHELL=
export SHELL
test_expect_success 'rebase a commit violating pre-commit' '
mkdir -p .git/hooks &&
- PRE_COMMIT=.git/hooks/pre-commit &&
- echo "#!/bin/sh" > $PRE_COMMIT &&
- echo "test -z \"\$(git diff --cached --check)\"" >> $PRE_COMMIT &&
- chmod a+x $PRE_COMMIT &&
+ write_script .git/hooks/pre-commit <<-\EOF &&
+ test -z "$(git diff --cached --check)"
+ EOF
echo "monde! " >> file1 &&
test_tick &&
test_must_fail git commit -m doesnt-verify file1 &&
run git format-patch --stdout --ignore-if-in-upstream master
"
- test_expect_success $pr 'detect upstream patch' "
+ test_expect_success $pr 'detect upstream patch' '
git checkout -q master &&
scramble file &&
git add file &&
- git commit -q -m 'change big file again' &&
+ git commit -q -m "change big file again" &&
git checkout -q other^{} &&
git rebase master &&
- test_must_fail test -n \"\$(git rev-list master...HEAD~)\"
- "
+ test_must_fail test -n "$(git rev-list master...HEAD~)"
+ '
- test_expect_success $pr 'do not drop patch' "
+ test_expect_success $pr 'do not drop patch' '
git branch -f squashed master &&
git checkout -q -f squashed &&
git reset -q --soft HEAD~2 &&
git checkout -q other^{} &&
test_must_fail git rebase squashed &&
rm -rf .git/rebase-apply
- "
+ '
}
do_tests 500
"
}
test_run_rebase success ''
-test_run_rebase failure -m
+test_run_rebase success -m
test_run_rebase success -i
test_run_rebase success -p
"
}
test_run_rebase success ''
-test_run_rebase failure -m
+test_run_rebase success -m
test_run_rebase success -i
test_run_rebase failure -p
git_revert () {
git status -su >expect &&
ls -1pR * >>expect &&
- tar czf "$TRASH_DIRECTORY/tmp.tgz" * &&
+ tar cf "$TRASH_DIRECTORY/tmp.tar" * &&
git checkout "$1" &&
git revert HEAD &&
rm -rf * &&
- tar xzf "$TRASH_DIRECTORY/tmp.tgz" &&
+ tar xf "$TRASH_DIRECTORY/tmp.tar" &&
git status -su >actual &&
ls -1pR * >>actual &&
test_cmp expect actual &&
test_expect_success "setup case mac" '
git checkout -b mac_os
'
+# This will test nfd2nfc in git diff
+test_expect_success "git diff f.Adiar" '
+ touch f.$Adiarnfc &&
+ git add f.$Adiarnfc &&
+ echo f.Adiarnfc >f.$Adiarnfc &&
+ git diff f.$Adiarnfd >expect &&
+ git diff f.$Adiarnfc >actual &&
+ test_cmp expect actual &&
+ git reset HEAD f.Adiarnfc &&
+ rm f.$Adiarnfc expect actual
+'
+# This will test nfd2nfc in git diff-files
+test_expect_success "git diff-files f.Adiar" '
+ touch f.$Adiarnfc &&
+ git add f.$Adiarnfc &&
+ echo f.Adiarnfc >f.$Adiarnfc &&
+ git diff-files f.$Adiarnfd >expect &&
+ git diff-files f.$Adiarnfc >actual &&
+ test_cmp expect actual &&
+ git reset HEAD f.Adiarnfc &&
+ rm f.$Adiarnfc expect actual
+'
+# This will test nfd2nfc in git diff-index
+test_expect_success "git diff-index f.Adiar" '
+ touch f.$Adiarnfc &&
+ git add f.$Adiarnfc &&
+ echo f.Adiarnfc >f.$Adiarnfc &&
+ git diff-index HEAD f.$Adiarnfd >expect &&
+ git diff-index HEAD f.$Adiarnfc >actual &&
+ test_cmp expect actual &&
+ git reset HEAD f.Adiarnfc &&
+ rm f.$Adiarnfc expect actual
+'
# This will test nfd2nfc in readdir()
test_expect_success "add file Adiarnfc" '
echo f.Adiarnfc >f.$Adiarnfc &&
git add f.$Adiarnfc &&
git commit -m "add f.$Adiarnfc"
'
+# This will test nfd2nfc in git diff-tree
+test_expect_success "git diff-tree f.Adiar" '
+ echo f.Adiarnfc >>f.$Adiarnfc &&
+ git diff-tree HEAD f.$Adiarnfd >expect &&
+ git diff-tree HEAD f.$Adiarnfc >actual &&
+ test_cmp expect actual &&
+ git checkout f.$Adiarnfc &&
+ rm expect actual
+'
# This will test nfd2nfc in git stage()
test_expect_success "stage file d.Adiarnfd/f.Adiarnfd" '
mkdir d.$Adiarnfd &&
git show HEAD:path1 | sed "s/15/16/" > subdir/path1 &&
git status | test_i18ngrep "renamed: .*path1 -> subdir/path1"'
+test_expect_success 'two files with same basename and same content' '
+ git reset --hard &&
+ mkdir -p dir/A dir/B &&
+ cp path1 dir/A/file &&
+ cp path1 dir/B/file &&
+ git add dir &&
+ git commit -m 2 &&
+ git mv dir other-dir &&
+ git status | test_i18ngrep "renamed: .*dir/A/file -> other-dir/A/file"
+'
+
test_expect_success 'setup for many rename source candidates' '
git reset --hard &&
for i in 0 1 2 3 4 5 6 7 8 9;
test_path_is_dir patchset
'
+test_expect_success 'format-patch --base' '
+ git checkout side &&
+ git format-patch --stdout --base=HEAD~3 -1 >patch &&
+ grep "^base-commit:" patch >actual &&
+ grep "^prerequisite-patch-id:" patch >>actual &&
+ echo "base-commit: $(git rev-parse HEAD~3)" >expected &&
+ echo "prerequisite-patch-id: $(git show --patch HEAD~2 | git patch-id --stable | awk "{print \$1}")" >>expected &&
+ echo "prerequisite-patch-id: $(git show --patch HEAD~1 | git patch-id --stable | awk "{print \$1}")" >>expected &&
+ test_cmp expected actual
+'
+
+test_expect_success 'format-patch --base errors out when base commit is in revision list' '
+ test_must_fail git format-patch --base=HEAD -2 &&
+ test_must_fail git format-patch --base=HEAD~1 -2 &&
+ git format-patch --stdout --base=HEAD~2 -2 >patch &&
+ grep "^base-commit:" patch >actual &&
+ echo "base-commit: $(git rev-parse HEAD~2)" >expected &&
+ test_cmp expected actual
+'
+
+test_expect_success 'format-patch --base errors out when base commit is not ancestor of revision list' '
+ # For history as below:
+ #
+ # ---Q---P---Z---Y---*---X
+ # \ /
+ # ------------W
+ #
+ # If "format-patch Z..X" is given, P and Z can not be specified as the base commit
+ git checkout -b topic1 master &&
+ git rev-parse HEAD >commit-id-base &&
+ test_commit P &&
+ git rev-parse HEAD >commit-id-P &&
+ test_commit Z &&
+ git rev-parse HEAD >commit-id-Z &&
+ test_commit Y &&
+ git checkout -b topic2 master &&
+ test_commit W &&
+ git merge topic1 &&
+ test_commit X &&
+ test_must_fail git format-patch --base=$(cat commit-id-P) -3 &&
+ test_must_fail git format-patch --base=$(cat commit-id-Z) -3 &&
+ git format-patch --stdout --base=$(cat commit-id-base) -3 >patch &&
+ grep "^base-commit:" patch >actual &&
+ echo "base-commit: $(cat commit-id-base)" >expected &&
+ test_cmp expected actual
+'
+
+test_expect_success 'format-patch --base=auto' '
+ git checkout -b upstream master &&
+ git checkout -b local upstream &&
+ git branch --set-upstream-to=upstream &&
+ test_commit N1 &&
+ test_commit N2 &&
+ git format-patch --stdout --base=auto -2 >patch &&
+ grep "^base-commit:" patch >actual &&
+ echo "base-commit: $(git rev-parse upstream)" >expected &&
+ test_cmp expected actual
+'
+
+test_expect_success 'format-patch errors out when history involves criss-cross' '
+ # setup criss-cross history
+ #
+ # B---M1---D
+ # / \ /
+ # A X
+ # \ / \
+ # C---M2---E
+ #
+ git checkout master &&
+ test_commit A &&
+ git checkout -b xb master &&
+ test_commit B &&
+ git checkout -b xc master &&
+ test_commit C &&
+ git checkout -b xbc xb -- &&
+ git merge xc &&
+ git checkout -b xcb xc -- &&
+ git branch --set-upstream-to=xbc &&
+ git merge xb &&
+ git checkout xbc &&
+ test_commit D &&
+ git checkout xcb &&
+ test_commit E &&
+ test_must_fail git format-patch --base=auto -1
+'
+
+test_expect_success 'format-patch format.useAutoBaseoption' '
+ test_when_finished "git config --unset format.useAutoBase" &&
+ git checkout local &&
+ git config format.useAutoBase true &&
+ git format-patch --stdout -1 >patch &&
+ grep "^base-commit:" patch >actual &&
+ echo "base-commit: $(git rev-parse upstream)" >expected &&
+ test_cmp expected actual
+'
+
+test_expect_success 'format-patch --base overrides format.useAutoBase' '
+ test_when_finished "git config --unset format.useAutoBase" &&
+ git config format.useAutoBase true &&
+ git format-patch --stdout --base=HEAD~1 -1 >patch &&
+ grep "^base-commit:" patch >actual &&
+ echo "base-commit: $(git rev-parse HEAD~1)" >expected &&
+ test_cmp expected actual
+'
+
test_done
test 4 = "$(cat otherfile-4)" &&
git am --abort &&
test_cmp_rev initial HEAD &&
- test -z $(git ls-files -u) &&
+ test -z "$(git ls-files -u)" &&
test_path_is_missing otherfile-4
'
'
test_expect_success 'rerere clear' '
- rm $rr/postimage &&
+ mv $rr/postimage .git/post-saved &&
echo "$sha1 a1" | perl -pe "y/\012/\000/" >.git/MERGE_RR &&
git rerere clear &&
! test -d $rr
'
+test_expect_success 'leftover directory' '
+ git reset --hard &&
+ mkdir -p $rr &&
+ test_must_fail git merge first &&
+ test -f $rr/preimage
+'
+
+test_expect_success 'missing preimage' '
+ git reset --hard &&
+ mkdir -p $rr &&
+ cp .git/post-saved $rr/postimage &&
+ test_must_fail git merge first &&
+ test -f $rr/preimage
+'
+
test_expect_success 'set up for garbage collection tests' '
mkdir -p $rr &&
echo Hello >$rr/preimage &&
test_i18ngrep [Uu]sage help
'
+concat_insert () {
+ last=$1
+ shift
+ cat early && printf "%s\n" "$@" && cat late "$last"
+}
+
+count_pre_post () {
+ find .git/rr-cache/ -type f -name "preimage*" >actual &&
+ test_line_count = "$1" actual &&
+ find .git/rr-cache/ -type f -name "postimage*" >actual &&
+ test_line_count = "$2" actual
+}
+
+test_expect_success 'rerere gc' '
+ find .git/rr-cache -type f >original &&
+ xargs test-chmtime -172800 <original &&
+
+ git -c gc.rerereresolved=5 -c gc.rerereunresolved=5 rerere gc &&
+ find .git/rr-cache -type f >actual &&
+ test_cmp original actual &&
+
+ git -c gc.rerereresolved=5 -c gc.rerereunresolved=0 rerere gc &&
+ find .git/rr-cache -type f >actual &&
+ test_cmp original actual &&
+
+ git -c gc.rerereresolved=0 -c gc.rerereunresolved=0 rerere gc &&
+ find .git/rr-cache -type f >actual &&
+ >expect &&
+ test_cmp expect actual
+'
+
+merge_conflict_resolve () {
+ git reset --hard &&
+ test_must_fail git merge six.1 &&
+ # Resolution is to replace 7 with 6.1 and 6.2 (i.e. take both)
+ concat_insert short 6.1 6.2 >file1 &&
+ concat_insert long 6.1 6.2 >file2
+}
+
+test_expect_success 'multiple identical conflicts' '
+ git reset --hard &&
+
+ test_seq 1 6 >early &&
+ >late &&
+ test_seq 11 15 >short &&
+ test_seq 111 120 >long &&
+ concat_insert short >file1 &&
+ concat_insert long >file2 &&
+ git add file1 file2 &&
+ git commit -m base &&
+ git tag base &&
+ git checkout -b six.1 &&
+ concat_insert short 6.1 >file1 &&
+ concat_insert long 6.1 >file2 &&
+ git add file1 file2 &&
+ git commit -m 6.1 &&
+ git checkout -b six.2 HEAD^ &&
+ concat_insert short 6.2 >file1 &&
+ concat_insert long 6.2 >file2 &&
+ git add file1 file2 &&
+ git commit -m 6.2 &&
+
+ # At this point, six.1 and six.2
+ # - derive from common ancestor that has two files
+ # 1...6 7 11..15 (file1) and 1...6 7 111..120 (file2)
+ # - six.1 replaces these 7s with 6.1
+ # - six.2 replaces these 7s with 6.2
+
+ merge_conflict_resolve &&
+
+ # Check that rerere knows that file1 and file2 have conflicts
+
+ printf "%s\n" file1 file2 >expect &&
+ git ls-files -u | sed -e "s/^.* //" | sort -u >actual &&
+ test_cmp expect actual &&
+
+ git rerere status | sort >actual &&
+ test_cmp expect actual &&
+
+ git rerere remaining >actual &&
+ test_cmp expect actual &&
+
+ count_pre_post 2 0 &&
+
+ # Pretend that the conflicts were made quite some time ago
+ find .git/rr-cache/ -type f | xargs test-chmtime -172800 &&
+
+ # Unresolved entries have not expired yet
+ git -c gc.rerereresolved=5 -c gc.rerereunresolved=5 rerere gc &&
+ count_pre_post 2 0 &&
+
+ # Unresolved entries have expired
+ git -c gc.rerereresolved=5 -c gc.rerereunresolved=1 rerere gc &&
+ count_pre_post 0 0 &&
+
+ # Recreate the conflicted state
+ merge_conflict_resolve &&
+ count_pre_post 2 0 &&
+
+ # Clear it
+ git rerere clear &&
+ count_pre_post 0 0 &&
+
+ # Recreate the conflicted state
+ merge_conflict_resolve &&
+ count_pre_post 2 0 &&
+
+ # We resolved file1 and file2
+ git rerere &&
+ >expect &&
+ git rerere remaining >actual &&
+ test_cmp expect actual &&
+
+ # We must have recorded both of them
+ count_pre_post 2 2 &&
+
+ # Now we should be able to resolve them both
+ git reset --hard &&
+ test_must_fail git merge six.1 &&
+ git rerere &&
+
+ >expect &&
+ git rerere remaining >actual &&
+ test_cmp expect actual &&
+
+ concat_insert short 6.1 6.2 >file1.expect &&
+ concat_insert long 6.1 6.2 >file2.expect &&
+ test_cmp file1.expect file1 &&
+ test_cmp file2.expect file2 &&
+
+ # Forget resolution for file2
+ git rerere forget file2 &&
+ echo file2 >expect &&
+ git rerere status >actual &&
+ test_cmp expect actual &&
+ count_pre_post 2 1 &&
+
+ # file2 already has correct resolution, so record it again
+ git rerere &&
+
+ # Pretend that the resolutions are old again
+ find .git/rr-cache/ -type f | xargs test-chmtime -172800 &&
+
+ # Resolved entries have not expired yet
+ git -c gc.rerereresolved=5 -c gc.rerereunresolved=5 rerere gc &&
+
+ count_pre_post 2 2 &&
+
+ # Resolved entries have expired
+ git -c gc.rerereresolved=1 -c gc.rerereunresolved=5 rerere gc &&
+ count_pre_post 0 0
+'
+
test_done
'
test_expect_success !MINGW 'shortlog from non-git directory' '
- git log HEAD >log &&
+ git log --no-expand-tabs HEAD >log &&
GIT_DIR=non-existing git shortlog -w <log >out &&
test_cmp expect out
'
#calculate patch id. Make sure output is not empty.
calc_patch_id () {
- name="$1"
+ patch_name="$1"
shift
git patch-id "$@" |
- sed "s/ .*//" >patch-id_"$name" &&
- test_line_count -gt 0 patch-id_"$name"
+ sed "s/ .*//" >patch-id_"$patch_name" &&
+ test_line_count -gt 0 patch-id_"$patch_name"
}
get_top_diff () {
--- /dev/null
+#!/bin/sh
+
+test_description='log/show --expand-tabs'
+
+. ./test-lib.sh
+
+HT=" "
+title='tab indent at the beginning of the title line'
+body='tab indent on a line in the body'
+
+# usage: count_expand $indent $numSP $numHT @format_args
+count_expand ()
+{
+ expect=
+ count=$(( $1 + $2 )) ;# expected spaces
+ while test $count -gt 0
+ do
+ expect="$expect "
+ count=$(( $count - 1 ))
+ done
+ shift 2
+ count=$1 ;# expected tabs
+ while test $count -gt 0
+ do
+ expect="$expect$HT"
+ count=$(( $count - 1 ))
+ done
+ shift
+
+ # The remainder of the command line is "git show -s" options
+ case " $* " in
+ *' --pretty=short '*)
+ line=$title ;;
+ *)
+ line=$body ;;
+ esac
+
+ # Prefix the output with the command line arguments, and
+ # replace SP with a dot both in the expecte and actual output
+ # so that test_cmp would show the differene together with the
+ # breakage in a way easier to consume by the debugging user.
+ {
+ echo "git show -s $*"
+ echo "$expect$line"
+ } | sed -e 's/ /./g' >expect
+
+ {
+ echo "git show -s $*"
+ git show -s "$@" |
+ sed -n -e "/$line\$/p"
+ } | sed -e 's/ /./g' >actual
+
+ test_cmp expect actual
+}
+
+test_expand ()
+{
+ fmt=$1
+ case "$fmt" in
+ *=raw | *=short | *=email)
+ default="0 1" ;;
+ *)
+ default="8 0" ;;
+ esac
+ case "$fmt" in
+ *=email)
+ in=0 ;;
+ *)
+ in=4 ;;
+ esac
+ test_expect_success "expand/no-expand${fmt:+ for $fmt}" '
+ count_expand $in $default $fmt &&
+ count_expand $in 8 0 $fmt --expand-tabs &&
+ count_expand $in 8 0 --expand-tabs $fmt &&
+ count_expand $in 8 0 $fmt --expand-tabs=8 &&
+ count_expand $in 8 0 --expand-tabs=8 $fmt &&
+ count_expand $in 0 1 $fmt --no-expand-tabs &&
+ count_expand $in 0 1 --no-expand-tabs $fmt &&
+ count_expand $in 0 1 $fmt --expand-tabs=0 &&
+ count_expand $in 0 1 --expand-tabs=0 $fmt &&
+ count_expand $in 4 0 $fmt --expand-tabs=4 &&
+ count_expand $in 4 0 --expand-tabs=4 $fmt
+ '
+}
+
+test_expect_success 'setup' '
+ test_tick &&
+ sed -e "s/Q/$HT/g" <<-EOF >msg &&
+ Q$title
+
+ Q$body
+ EOF
+ git commit --allow-empty -F msg
+'
+
+test_expand ""
+test_expand --pretty
+test_expand --pretty=short
+test_expand --pretty=medium
+test_expand --pretty=full
+test_expand --pretty=fuller
+test_expand --pretty=raw
+test_expand --pretty=email
+
+test_done
test "$victim_orig" = "$victim_head"
'
+test_expect_success 'send-pack --all sends all branches' '
+ # make sure we have at least 2 branches with different
+ # values, just to be thorough
+ git branch other-branch HEAD^ &&
+
+ git init --bare all.git &&
+ git send-pack --all all.git &&
+ git for-each-ref refs/heads >expect &&
+ git -C all.git for-each-ref refs/heads >actual &&
+ test_cmp expect actual
+'
+
test_expect_success 'push --all excludes remote-tracking hierarchy' '
mkdir parent &&
(
git config receive.fsckobjects true &&
git config transfer.fsckobjects false
) &&
- test_must_fail ok=sigpipe git push --porcelain dst master:refs/heads/test >act &&
- {
- test_cmp exp act ||
- ! test -s act
- }
+ test_must_fail git push --porcelain dst master:refs/heads/test >act &&
+ test_cmp exp act
'
test_expect_success 'push with transfer.fsckobjects' '
cd dst &&
git config transfer.fsckobjects true
) &&
- test_must_fail ok=sigpipe git push --porcelain dst master:refs/heads/test >act
+ test_must_fail git push --porcelain dst master:refs/heads/test >act &&
+ test_cmp exp act
'
cat >bogus-commit <<\EOF
(
cd auto-gc &&
git config gc.autoPackLimit 1 &&
+ git config gc.autoDetach false &&
GIT_ASK_YESNO="$D/askyesno" git fetch >fetch.out 2>&1 &&
! grep "Should I try again" fetch.out
)
mv "$2.x" "$2"
}
+test_pull_autostash () {
+ git reset --hard before-rebase &&
+ echo dirty >new_file &&
+ git add new_file &&
+ git pull "$@" . copy &&
+ test_cmp_rev HEAD^ copy &&
+ test "$(cat new_file)" = dirty &&
+ test "$(cat file)" = "modified again"
+}
+
+test_pull_autostash_fail () {
+ git reset --hard before-rebase &&
+ echo dirty >new_file &&
+ git add new_file &&
+ test_must_fail git pull "$@" . copy 2>err &&
+ test_i18ngrep "uncommitted changes." err
+}
+
test_expect_success setup '
echo file >file &&
git add file &&
test_expect_success 'pull --rebase succeeds with dirty working directory and rebase.autostash set' '
test_config rebase.autostash true &&
- git reset --hard before-rebase &&
- echo dirty >new_file &&
- git add new_file &&
- git pull --rebase . copy &&
- test_cmp_rev HEAD^ copy &&
- test "$(cat new_file)" = dirty &&
- test "$(cat file)" = "modified again"
+ test_pull_autostash --rebase
'
+test_expect_success 'pull --rebase --autostash & rebase.autostash=true' '
+ test_config rebase.autostash true &&
+ test_pull_autostash --rebase --autostash
+'
+
+test_expect_success 'pull --rebase --autostash & rebase.autostash=false' '
+ test_config rebase.autostash false &&
+ test_pull_autostash --rebase --autostash
+'
+
+test_expect_success 'pull --rebase --autostash & rebase.autostash unset' '
+ test_unconfig rebase.autostash &&
+ test_pull_autostash --rebase --autostash
+'
+
+test_expect_success 'pull --rebase --no-autostash & rebase.autostash=true' '
+ test_config rebase.autostash true &&
+ test_pull_autostash_fail --rebase --no-autostash
+'
+
+test_expect_success 'pull --rebase --no-autostash & rebase.autostash=false' '
+ test_config rebase.autostash false &&
+ test_pull_autostash_fail --rebase --no-autostash
+'
+
+test_expect_success 'pull --rebase --no-autostash & rebase.autostash unset' '
+ test_unconfig rebase.autostash &&
+ test_pull_autostash_fail --rebase --no-autostash
+'
+
+for i in --autostash --no-autostash
+do
+ test_expect_success "pull $i (without --rebase) is illegal" '
+ test_must_fail git pull $i . copy 2>err &&
+ test_i18ngrep "only valid with --rebase" err
+ '
+done
+
test_expect_success 'pull.rebase' '
git reset --hard before-rebase &&
test_config pull.rebase true &&
test new = "$(git show HEAD:file2)"
'
+test_expect_success 'pull --autostash & pull.rebase=true' '
+ test_config pull.rebase true &&
+ test_pull_autostash --autostash
+'
+
+test_expect_success 'pull --no-autostash & pull.rebase=true' '
+ test_config pull.rebase true &&
+ test_pull_autostash_fail --no-autostash
+'
+
test_expect_success 'branch.to-rebase.rebase' '
git reset --hard before-rebase &&
test_config branch.to-rebase.rebase true &&
)
'
+test_expect_success 'git pull --allow-unrelated-histories' '
+ test_when_finished "rm -fr src dst" &&
+ git init src &&
+ (
+ cd src &&
+ test_commit one &&
+ test_commit two
+ ) &&
+ git clone src dst &&
+ (
+ cd src &&
+ git checkout --orphan side HEAD^ &&
+ test_commit three
+ ) &&
+ (
+ cd dst &&
+ test_must_fail git pull ../src side &&
+ git pull --allow-unrelated-histories ../src side
+ )
+'
+
test_done
)
'
-cat >proxy <<'EOF'
-#!/bin/sh
-echo >&2 "proxying for $*"
-cmd=$("$PERL_PATH" -e '
+test_expect_success 'setup proxy script' '
+ write_script proxy-get-cmd "$PERL_PATH" <<-\EOF &&
read(STDIN, $buf, 4);
my $n = hex($buf) - 4;
read(STDIN, $buf, $n);
# drop absolute-path on repo name
$cmd =~ s{ /}{ };
print $cmd;
-')
-echo >&2 "Running '$cmd'"
-exec $cmd
-EOF
-chmod +x proxy
+ EOF
+
+ write_script proxy <<-\EOF
+ echo >&2 "proxying for $*"
+ cmd=$(./proxy-get-cmd)
+ echo >&2 "Running $cmd"
+ exec $cmd
+ EOF
+'
+
test_expect_success 'setup local repo' '
git remote add fake git://example.com/remote &&
git config core.gitproxy ./proxy
expect_askpass pass user@host
'
-test_expect_success 'cmdline credential config passes into submodules' '
+test_expect_success 'set up repo with http submodules' '
git init super &&
set_askpass user@host pass@host &&
(
cd super &&
git submodule add "$HTTPD_URL/auth/dumb/repo.git" sub &&
git commit -m "add submodule"
- ) &&
+ )
+'
+
+test_expect_success 'cmdline credential config passes to submodule via clone' '
set_askpass wrong pass@host &&
test_must_fail git clone --recursive super super-clone &&
rm -rf super-clone &&
+
set_askpass wrong pass@host &&
- git -c "credential.$HTTP_URL.username=user@host" \
+ git -c "credential.$HTTPD_URL.username=user@host" \
clone --recursive super super-clone &&
expect_askpass pass user@host
'
+test_expect_success 'cmdline credential config passes submodule via fetch' '
+ set_askpass wrong pass@host &&
+ test_must_fail git -C super-clone fetch --recurse-submodules &&
+
+ set_askpass wrong pass@host &&
+ git -C super-clone \
+ -c "credential.$HTTPD_URL.username=user@host" \
+ fetch --recurse-submodules &&
+ expect_askpass pass user@host
+'
+
+test_expect_success 'cmdline credential config passes submodule update' '
+ # advance the submodule HEAD so that a fetch is required
+ git commit --allow-empty -m foo &&
+ git push "$HTTPD_DOCUMENT_ROOT_PATH/auth/dumb/repo.git" HEAD &&
+ sha1=$(git rev-parse HEAD) &&
+ git -C super-clone update-index --cacheinfo 160000,$sha1,sub &&
+
+ set_askpass wrong pass@host &&
+ test_must_fail git -C super-clone submodule update &&
+
+ set_askpass wrong pass@host &&
+ git -C super-clone \
+ -c "credential.$HTTPD_URL.username=user@host" \
+ submodule update &&
+ expect_askpass pass user@host
+'
+
test_expect_success 'fetch changes via http' '
echo content >>file &&
git commit -a -m two &&
test_line_count = 100000 tags
'
+test_expect_success 'custom http headers' '
+ test_must_fail git -c http.extraheader="x-magic-two: cadabra" \
+ fetch "$HTTPD_URL/smart_headers/repo.git" &&
+ git -c http.extraheader="x-magic-one: abra" \
+ -c http.extraheader="x-magic-two: cadabra" \
+ fetch "$HTTPD_URL/smart_headers/repo.git" &&
+ git update-index --add --cacheinfo 160000,$(git rev-parse HEAD),sub &&
+ git config -f .gitmodules submodule.sub.path sub &&
+ git config -f .gitmodules submodule.sub.url \
+ "$HTTPD_URL/smart_headers/repo.git" &&
+ git submodule init sub &&
+ test_must_fail git submodule update sub &&
+ git -c http.extraheader="x-magic-one: abra" \
+ -c http.extraheader="x-magic-two: cadabra" \
+ submodule update sub
+'
+
stop_httpd
test_done
setup_ssh_wrapper () {
test_expect_success 'setup ssh wrapper' '
- cp "$GIT_BUILD_DIR/test-fake-ssh$X" \
+ cp "$GIT_BUILD_DIR/t/helper/test-fake-ssh$X" \
"$TRASH_DIRECTORY/ssh-wrapper$X" &&
GIT_SSH="$TRASH_DIRECTORY/ssh-wrapper$X" &&
export GIT_SSH &&
#IPv6
for tuah in ::1 [::1] [::1]: user@::1 user@[::1] user@[::1]: [user@::1] [user@::1]:
do
- ehost=$(echo $tuah | sed -e "s/1]:/1]/ "| tr -d "[]")
+ ehost=$(echo $tuah | sed -e "s/1]:/1]/" | tr -d "[]")
test_expect_success "clone ssh://$tuah/home/user/repo" "
test_clone_url ssh://$tuah/home/user/repo $ehost /home/user/repo
"
test_cmp expect child/file
'
+# Tests for the hidden file attribute on windows
+is_hidden () {
+ # Use the output of `attrib`, ignore the absolute path
+ case "$(attrib "$1")" in *H*?:*) return 0;; esac
+ return 1
+}
+
+test_expect_success MINGW 'clone -c core.hideDotFiles' '
+ test_commit attributes .gitattributes "" &&
+ rm -rf child &&
+ git clone -c core.hideDotFiles=false . child &&
+ ! is_hidden child/.gitattributes &&
+ rm -rf child &&
+ git clone -c core.hideDotFiles=dotGitOnly . child &&
+ ! is_hidden child/.gitattributes &&
+ rm -rf child &&
+ git clone -c core.hideDotFiles=true . child &&
+ is_hidden child/.gitattributes
+'
+
test_done
--- /dev/null
+#!/bin/sh
+
+test_description='Test shallow cloning of repos with submodules'
+
+. ./test-lib.sh
+
+pwd=$(pwd)
+
+test_expect_success 'setup' '
+ git checkout -b master &&
+ test_commit commit1 &&
+ test_commit commit2 &&
+ mkdir sub &&
+ (
+ cd sub &&
+ git init &&
+ test_commit subcommit1 &&
+ test_commit subcommit2 &&
+ test_commit subcommit3
+ ) &&
+ git submodule add "file://$pwd/sub" sub &&
+ git commit -m "add submodule"
+'
+
+test_expect_success 'nonshallow clone implies nonshallow submodule' '
+ test_when_finished "rm -rf super_clone" &&
+ git clone --recurse-submodules "file://$pwd/." super_clone &&
+ (
+ cd super_clone &&
+ git log --oneline >lines &&
+ test_line_count = 3 lines
+ ) &&
+ (
+ cd super_clone/sub &&
+ git log --oneline >lines &&
+ test_line_count = 3 lines
+ )
+'
+
+test_expect_success 'shallow clone implies shallow submodule' '
+ test_when_finished "rm -rf super_clone" &&
+ git clone --recurse-submodules --depth 2 "file://$pwd/." super_clone &&
+ (
+ cd super_clone &&
+ git log --oneline >lines &&
+ test_line_count = 2 lines
+ ) &&
+ (
+ cd super_clone/sub &&
+ git log --oneline >lines &&
+ test_line_count = 1 lines
+ )
+'
+
+test_expect_success 'shallow clone with non shallow submodule' '
+ test_when_finished "rm -rf super_clone" &&
+ git clone --recurse-submodules --depth 2 --no-shallow-submodules "file://$pwd/." super_clone &&
+ (
+ cd super_clone &&
+ git log --oneline >lines &&
+ test_line_count = 2 lines
+ ) &&
+ (
+ cd super_clone/sub &&
+ git log --oneline >lines &&
+ test_line_count = 3 lines
+ )
+'
+
+test_expect_success 'non shallow clone with shallow submodule' '
+ test_when_finished "rm -rf super_clone" &&
+ git clone --recurse-submodules --no-local --shallow-submodules "file://$pwd/." super_clone &&
+ (
+ cd super_clone &&
+ git log --oneline >lines &&
+ test_line_count = 3 lines
+ ) &&
+ (
+ cd super_clone/sub &&
+ git log --oneline >lines &&
+ test_line_count = 1 lines
+ )
+'
+
+test_done
git ls-files --stage > out
cat > expect << EOF
-100644 439cc46de773d8a83c77799b7cc9191c128bfcff 1 a1
+100644 ec3fe2a791706733f2d8fa7ad45d9a9672031f5e 1 a1
100644 cf84443e49e1b366fac938711ddf4be2d4d1d9e9 2 a1
100644 fd7923529855d0b274795ae3349c5e0438333979 3 a1
EOF
-L "" \
-L "Temporary merge branch 1" \
merged empty merge-me &&
- test $(git rev-parse :1:new_a) = $(git hash-object merged)
+ sed -e "s/^\([<=>]\)/\1\1\1/" merged >merged-internal &&
+ test $(git rev-parse :1:new_a) = $(git hash-object merged-internal)
'
#
test $(git rev-parse :3:file) = $(git rev-parse B:file)
'
-#
-# criss-cross + modify/modify with very contrived file contents:
-#
-# B D
-# o---o
-# / \ / \
-# A o X ? F
-# \ / \ /
-# o---o
-# C E
-#
-# Commit A: file with contents 'A\n'
-# Commit B: file with contents 'B\n'
-# Commit C: file with contents 'C\n'
-# Commit D: file with contents 'D\n'
-# Commit E: file with contents:
-# <<<<<<< Temporary merge branch 1
-# C
-# =======
-# B
-# >>>>>>> Temporary merge branch 2
-#
-# Now, when we merge commits D & E, does git detect the conflict?
-
-test_expect_success 'setup differently handled merges of content conflict' '
- git clean -fdqx &&
- rm -rf .git &&
- git init &&
-
- echo A >file &&
- git add file &&
- test_tick &&
- git commit -m A &&
-
- git branch B &&
- git checkout -b C &&
- echo C >file &&
- git add file &&
- test_tick &&
- git commit -m C &&
-
- git checkout B &&
- echo B >file &&
- git add file &&
- test_tick &&
- git commit -m B &&
-
- git checkout B^0 &&
- test_must_fail git merge C &&
- echo D >file &&
- git add file &&
- test_tick &&
- git commit -m D &&
- git tag D &&
-
- git checkout C^0 &&
- test_must_fail git merge B &&
- cat <<EOF >file &&
-<<<<<<< Temporary merge branch 1
-C
-=======
-B
->>>>>>> Temporary merge branch 2
-EOF
- git add file &&
- test_tick &&
- git commit -m E &&
- git tag E
-'
-
-test_expect_failure 'git detects conflict w/ criss-cross+contrived resolution' '
- git checkout D^0 &&
-
- test_must_fail git merge -s recursive E^0 &&
-
- test 3 -eq $(git ls-files -s | wc -l) &&
- test 3 -eq $(git ls-files -u | wc -l) &&
- test 0 -eq $(git ls-files -o | wc -l) &&
-
- test $(git rev-parse :2:file) = $(git rev-parse D:file) &&
- test $(git rev-parse :3:file) = $(git rev-parse E:file)
-'
-
#
# criss-cross + d/f conflict via add/add:
# Commit A: Neither file 'a' nor directory 'a/' exists.
git_bisect () {
git status -su >expect &&
ls -1pR * >>expect &&
- tar czf "$TRASH_DIRECTORY/tmp.tgz" * &&
+ tar cf "$TRASH_DIRECTORY/tmp.tar" * &&
GOOD=$(git rev-parse --verify HEAD) &&
git checkout "$1" &&
echo "foo" >bar &&
git bisect start &&
git bisect good $GOOD &&
rm -rf * &&
- tar xzf "$TRASH_DIRECTORY/tmp.tgz" &&
+ tar xf "$TRASH_DIRECTORY/tmp.tar" &&
git status -su >actual &&
ls -1pR * >>actual &&
test_cmp expect actual &&
test_must_fail git merge -s recursive C^0
'
-test_expect_failure 'octopus, unrelated file touched' '
+test_expect_success 'octopus, unrelated file touched' '
git reset --hard &&
git checkout B^0 &&
test_must_fail git merge C^0 D^0
'
-test_expect_failure 'octopus, related file removed' '
+test_expect_success 'octopus, related file removed' '
git reset --hard &&
git checkout B^0 &&
test_must_fail git merge C^0 D^0
'
-test_expect_failure 'octopus, related file modified' '
+test_expect_success 'octopus, related file modified' '
git reset --hard &&
git checkout B^0 &&
. ./test-lib.sh
. "$TEST_DIRECTORY"/lib-gpg.sh
-test_prepare_expect () {
- if test_have_prereq GPG
- then
- cat
- else
- sed '/signed/d'
- fi
-}
-
test_expect_success 'setup some history and refs' '
test_commit one &&
test_commit two &&
test_commit four &&
git tag -m "An annotated tag" annotated-tag &&
git tag -m "Annonated doubly" doubly-annotated-tag annotated-tag &&
+
+ # Note that these "signed" tags might not actually be signed.
+ # Tests which care about the distinction should be marked
+ # with the GPG prereq.
if test_have_prereq GPG
then
- git tag -s -m "A signed tag" signed-tag &&
- git tag -s -m "Signed doubly" doubly-signed-tag signed-tag
+ sign=-s
+ else
+ sign=
fi &&
+ git tag $sign -m "A signed tag" signed-tag &&
+ git tag $sign -m "Signed doubly" doubly-signed-tag signed-tag &&
+
git checkout master &&
git update-ref refs/odd/spot master
'
'
test_expect_success 'check signed tags with --points-at' '
- test_prepare_expect <<-\EOF | sed -e "s/Z$//" >expect &&
+ sed -e "s/Z$//" >expect <<-\EOF &&
refs/heads/side Z
refs/tags/annotated-tag four
refs/tags/four Z
'
test_expect_success 'filtering with --no-merged' '
- test_prepare_expect >expect <<-\EOF &&
+ cat >expect <<-\EOF &&
refs/heads/side
refs/tags/annotated-tag
refs/tags/doubly-annotated-tag
'
test_expect_success 'filtering with --contains' '
- test_prepare_expect >expect <<-\EOF &&
+ cat >expect <<-\EOF &&
refs/heads/master
refs/heads/side
refs/odd/spot
'
test_expect_success 'left alignment is default' '
- test_prepare_expect >expect <<-\EOF &&
+ cat >expect <<-\EOF &&
refname is refs/heads/master |refs/heads/master
refname is refs/heads/side |refs/heads/side
refname is refs/odd/spot |refs/odd/spot
'
test_expect_success 'middle alignment' '
- test_prepare_expect >expect <<-\EOF &&
+ cat >expect <<-\EOF &&
| refname is refs/heads/master |refs/heads/master
| refname is refs/heads/side |refs/heads/side
| refname is refs/odd/spot |refs/odd/spot
'
test_expect_success 'right alignment' '
- test_prepare_expect >expect <<-\EOF &&
+ cat >expect <<-\EOF &&
| refname is refs/heads/master|refs/heads/master
| refname is refs/heads/side|refs/heads/side
| refname is refs/odd/spot|refs/odd/spot
test_cmp expect actual
'
-test_prepare_expect >expect <<-\EOF
+cat >expect <<-\EOF
| refname is refs/heads/master |refs/heads/master
| refname is refs/heads/side |refs/heads/side
| refname is refs/odd/spot |refs/odd/spot
# Individual atoms inside %(align:...) and %(end) must not be quoted.
test_expect_success 'alignment with format quote' "
- test_prepare_expect >expect <<-\EOF &&
+ cat >expect <<-\EOF &&
|' '\''master| A U Thor'\'' '|
|' '\''side| A U Thor'\'' '|
|' '\''odd/spot| A U Thor'\'' '|
"
test_expect_success 'nested alignment with quote formatting' "
- test_prepare_expect >expect <<-\EOF &&
+ cat >expect <<-\EOF &&
|' master '|
|' side '|
|' odd/spot '|
"
test_expect_success 'check `%(contents:lines=1)`' '
- test_prepare_expect >expect <<-\EOF &&
+ cat >expect <<-\EOF &&
master |three
side |four
odd/spot |three
'
test_expect_success 'check `%(contents:lines=0)`' '
- test_prepare_expect >expect <<-\EOF &&
+ cat >expect <<-\EOF &&
master |
side |
odd/spot |
'
test_expect_success 'check `%(contents:lines=99999)`' '
- test_prepare_expect >expect <<-\EOF &&
+ cat >expect <<-\EOF &&
master |three
side |four
odd/spot |three
echo content >file &&
git add file &&
git commit -m "added sub and file" &&
+ mkdir -p deep/directory/hierachy &&
+ git submodule add ./. deep/directory/hierachy/sub &&
+ git commit -m "added another submodule" &&
git branch submodule
'
git checkout .
'
+test_expect_success 'moving a submodule in nested directories' '
+ (
+ cd deep &&
+ git mv directory ../ &&
+ # git status would fail if the update of linking git dir to
+ # work dir of the submodule failed.
+ git status &&
+ git config -f ../.gitmodules submodule.deep/directory/hierachy/sub.path >../actual &&
+ echo "directory/hierachy/sub" >../expect
+ ) &&
+ test_cmp actual expect
+'
+
test_done
)
'
+test_expect_success GPG 'verify multiple tags' '
+ tags="fourth-signed sixth-signed seventh-signed" &&
+ for i in $tags
+ do
+ git verify-tag -v --raw $i || return 1
+ done >expect.stdout 2>expect.stderr.1 &&
+ grep "^.GNUPG:." <expect.stderr.1 >expect.stderr &&
+ git verify-tag -v --raw $tags >actual.stdout 2>actual.stderr.1 &&
+ grep "^.GNUPG:." <actual.stderr.1 >actual.stderr &&
+ test_cmp expect.stdout actual.stdout &&
+ test_cmp expect.stderr actual.stderr
+'
+
test_done
test_path_is_missing to_clean
'
-test_expect_success POSIXPERM 'should avoid cleaning possible submodules' '
+test_expect_success POSIXPERM,SANITY 'should avoid cleaning possible submodules' '
rm -fr to_clean possible_sub1 &&
mkdir to_clean possible_sub1 &&
test_when_finished "rm -rf possible_sub*" &&
. ./test-lib.sh
+test_expect_success 'submodule deinit works on empty repository' '
+ git submodule deinit --all
+'
+
test_expect_success 'setup - initial commit' '
>t &&
git add t &&
git branch initial
'
+test_expect_success 'submodule init aborts on missing .gitmodules file' '
+ test_when_finished "git update-index --remove sub" &&
+ git update-index --add --cacheinfo 160000,$(git rev-parse HEAD),sub &&
+ # missing the .gitmodules file here
+ test_must_fail git submodule init 2>actual &&
+ test_i18ngrep "No url found for submodule path" actual
+'
+
+test_expect_success 'submodule update aborts on missing .gitmodules file' '
+ test_when_finished "git update-index --remove sub" &&
+ git update-index --add --cacheinfo 160000,$(git rev-parse HEAD),sub &&
+ # missing the .gitmodules file here
+ git submodule update sub 2>actual &&
+ test_i18ngrep "Submodule path .sub. not initialized" actual
+'
+
test_expect_success 'configuration parsing' '
test_when_finished "rm -f .gitmodules" &&
cat >.gitmodules <<-\EOF &&
)
'
+test_expect_success 'recursive relative submodules stay relative' '
+ test_when_finished "rm -rf super clone2 subsub sub3" &&
+ mkdir subsub &&
+ (
+ cd subsub &&
+ git init &&
+ >t &&
+ git add t &&
+ git commit -m "initial commit"
+ ) &&
+ mkdir sub3 &&
+ (
+ cd sub3 &&
+ git init &&
+ >t &&
+ git add t &&
+ git commit -m "initial commit" &&
+ git submodule add ../subsub dirdir/subsub &&
+ git commit -m "add submodule subsub"
+ ) &&
+ mkdir super &&
+ (
+ cd super &&
+ git init &&
+ >t &&
+ git add t &&
+ git commit -m "initial commit" &&
+ git submodule add ../sub3 &&
+ git commit -m "add submodule sub"
+ ) &&
+ git clone super clone2 &&
+ (
+ cd clone2 &&
+ git submodule update --init --recursive &&
+ echo "gitdir: ../.git/modules/sub3" >./sub3/.git_expect &&
+ echo "gitdir: ../../../.git/modules/sub3/modules/dirdir/subsub" >./sub3/dirdir/subsub/.git_expect
+ ) &&
+ test_cmp clone2/sub3/.git_expect clone2/sub3/.git &&
+ test_cmp clone2/sub3/dirdir/subsub/.git_expect clone2/sub3/dirdir/subsub/.git
+'
+
test_expect_success 'submodule add with an existing name fails unless forced' '
(
cd addtest2 &&
git init &&
>file &&
git add file &&
- git commit -m "repo should not be empty"
- git submodule deinit .
+ git commit -m "repo should not be empty" &&
+ git submodule deinit . &&
+ git submodule deinit --all
)
'
rmdir init example2
'
+test_expect_success 'submodule deinit --all deinits all initialized submodules' '
+ git submodule update --init &&
+ git config submodule.example.foo bar &&
+ git config submodule.example2.frotz nitfol &&
+ test_must_fail git submodule deinit &&
+ git submodule deinit --all >actual &&
+ test -z "$(git config --get-regexp "submodule\.example\.")" &&
+ test -z "$(git config --get-regexp "submodule\.example2\.")" &&
+ test_i18ngrep "Cleared directory .init" actual &&
+ test_i18ngrep "Cleared directory .example2" actual &&
+ rmdir init example2
+'
+
test_expect_success 'submodule deinit deinits a submodule when its work tree is missing or empty' '
git submodule update --init &&
rm -rf init example2/* example2/.git &&
test_i18ngrep ! "Submodule .example. (.*) unregistered for path .init" actual &&
test_i18ngrep ! "Submodule .example2. (.*) unregistered for path .example2" actual &&
test_i18ngrep "Cleared directory .init" actual &&
+ git submodule deinit --all >actual &&
+ test_i18ngrep ! "Submodule .example. (.*) unregistered for path .init" actual &&
+ test_i18ngrep ! "Submodule .example2. (.*) unregistered for path .example2" actual &&
+ test_i18ngrep "Cleared directory .init" actual &&
rmdir init example2
'
git submodule add ../none none &&
test_tick &&
git commit -m "none"
+ ) &&
+ git clone . recursivesuper &&
+ ( cd recursivesuper
+ git submodule add ../super super
)
'
)
'
+supersha1=$(git -C super rev-parse HEAD)
+mergingsha1=$(git -C super/merging rev-parse HEAD)
+nonesha1=$(git -C super/none rev-parse HEAD)
+rebasingsha1=$(git -C super/rebasing rev-parse HEAD)
+submodulesha1=$(git -C super/submodule rev-parse HEAD)
+pwd=$(pwd)
+
+cat <<EOF >expect
+Submodule path '../super': checked out '$supersha1'
+Submodule path '../super/merging': checked out '$mergingsha1'
+Submodule path '../super/none': checked out '$nonesha1'
+Submodule path '../super/rebasing': checked out '$rebasingsha1'
+Submodule path '../super/submodule': checked out '$submodulesha1'
+EOF
+
+cat <<EOF >expect2
+Submodule 'merging' ($pwd/merging) registered for path '../super/merging'
+Submodule 'none' ($pwd/none) registered for path '../super/none'
+Submodule 'rebasing' ($pwd/rebasing) registered for path '../super/rebasing'
+Submodule 'submodule' ($pwd/submodule) registered for path '../super/submodule'
+Cloning into '$pwd/recursivesuper/super/merging'...
+done.
+Cloning into '$pwd/recursivesuper/super/none'...
+done.
+Cloning into '$pwd/recursivesuper/super/rebasing'...
+done.
+Cloning into '$pwd/recursivesuper/super/submodule'...
+done.
+EOF
+
+test_expect_success 'submodule update --init --recursive from subdirectory' '
+ git -C recursivesuper/super reset --hard HEAD^ &&
+ (cd recursivesuper &&
+ mkdir tmp &&
+ cd tmp &&
+ git submodule update --init --recursive ../super >../../actual 2>../../actual2
+ ) &&
+ test_cmp expect actual &&
+ test_cmp expect2 actual2
+'
+
apos="'";
test_expect_success 'submodule update does not fetch already present commits' '
(cd submodule &&
)
'
+cat << EOF >expect
+Execution of 'false $submodulesha1' failed in submodule path 'submodule'
+EOF
+
test_expect_success 'submodule update - command in .git/config catches failure' '
(cd super &&
git config submodule.submodule.update "!false"
) &&
(cd super/submodule &&
- git reset --hard HEAD^
+ git reset --hard $submodulesha1^
) &&
(cd super &&
- test_must_fail git submodule update submodule
- )
+ test_must_fail git submodule update submodule 2>../actual
+ ) &&
+ test_cmp actual expect
+'
+
+cat << EOF >expect
+Execution of 'false $submodulesha1' failed in submodule path '../submodule'
+EOF
+
+test_expect_success 'submodule update - command in .git/config catches failure -- subdirectory' '
+ (cd super &&
+ git config submodule.submodule.update "!false"
+ ) &&
+ (cd super/submodule &&
+ git reset --hard $submodulesha1^
+ ) &&
+ (cd super &&
+ mkdir tmp && cd tmp &&
+ test_must_fail git submodule update ../submodule 2>../../actual
+ ) &&
+ test_cmp actual expect
+'
+
+cat << EOF >expect
+Execution of 'false $submodulesha1' failed in submodule path '../super/submodule'
+Failed to recurse into submodule path '../super'
+EOF
+
+test_expect_success 'recursive submodule update - command in .git/config catches failure -- subdirectory' '
+ (cd recursivesuper &&
+ git submodule update --remote super &&
+ git add super &&
+ git commit -m "update to latest to have more than one commit in submodules"
+ ) &&
+ git -C recursivesuper/super config submodule.submodule.update "!false" &&
+ git -C recursivesuper/super/submodule reset --hard $submodulesha1^ &&
+ (cd recursivesuper &&
+ mkdir -p tmp && cd tmp &&
+ test_must_fail git submodule update --recursive ../super 2>../../actual
+ ) &&
+ test_cmp actual expect
'
test_expect_success 'submodule init does not copy command into .git/config' '
test_i18ncmp expect actual
'
+cat > expect <<EOF
+Entering '../nested1'
+Entering '../nested1/nested2'
+Entering '../nested1/nested2/nested3'
+Entering '../nested1/nested2/nested3/submodule'
+Entering '../sub1'
+Entering '../sub2'
+Entering '../sub3'
+EOF
+
+test_expect_success 'test messages from "foreach --recursive" from subdirectory' '
+ (
+ cd clone2 &&
+ mkdir untracked &&
+ cd untracked &&
+ git submodule foreach --recursive >../../actual
+ ) &&
+ test_i18ncmp expect actual
+'
+
cat > expect <<EOF
nested1-nested1
nested2-nested2
test_cmp expect actual
'
-sed -e "/nested2 /s/.*/+$nested2sha1 nested1\/nested2 (file2~1)/;/sub[1-3]/d" < expect > expect2
-mv -f expect2 expect
+cat > expect <<EOF
+ $nested1sha1 nested1 (heads/master)
++$nested2sha1 nested1/nested2 (file2~1)
+ $nested3sha1 nested1/nested2/nested3 (heads/master)
+ $submodulesha1 nested1/nested2/nested3/submodule (heads/master)
+EOF
test_expect_success 'ensure "status --cached --recursive" preserves the --cached flag' '
(
test_cmp expect actual
'
+nested2sha1=$(git -C clone3/nested1/nested2 rev-parse HEAD)
+
+cat > expect <<EOF
+ $nested1sha1 ../nested1 (heads/master)
++$nested2sha1 ../nested1/nested2 (file2)
+ $nested3sha1 ../nested1/nested2/nested3 (heads/master)
+ $submodulesha1 ../nested1/nested2/nested3/submodule (heads/master)
+ $sub1sha1 ../sub1 ($sub1sha1_short)
+ $sub2sha1 ../sub2 ($sub2sha1_short)
+ $sub3sha1 ../sub3 (heads/master)
+EOF
+
+test_expect_success 'test "status --recursive" from sub directory' '
+ (
+ cd clone3 &&
+ mkdir tmp && cd tmp &&
+ git submodule status --recursive > ../../actual
+ ) &&
+ test_cmp expect actual
+'
+
test_expect_success 'use "git clone --recursive" to checkout all submodules' '
git clone --recursive super clone4 &&
(
+++ /dev/null
-#!/bin/sh
-#
-# Copyright (c) 2016 Jacob Keller
-#
-
-test_description='Basic plumbing support of submodule--helper
-
-This test verifies the submodule--helper plumbing command used to implement
-git-submodule.
-'
-
-. ./test-lib.sh
-
-test_expect_success 'sanitize-config clears configuration' '
- git -c user.name="Some User" submodule--helper sanitize-config >actual &&
- test_must_be_empty actual
-'
-
-sq="'"
-test_expect_success 'sanitize-config keeps credential.helper' '
- git -c credential.helper=helper submodule--helper sanitize-config >actual &&
- echo "${sq}credential.helper=helper${sq}" >expect &&
- test_cmp expect actual
-'
-
-test_done
test_cmp expect msg
'
+test_expect_success '--amend to set message to empty' '
+ echo bata >file &&
+ git add file &&
+ git commit -m "unamended" &&
+ git commit --amend --allow-empty-message -m "" &&
+ git diff-tree -s --format=%s HEAD >msg &&
+ echo "" >expect &&
+ test_cmp expect msg
+'
+
+test_expect_success '--amend to set empty message needs --allow-empty-message' '
+ echo conga >file &&
+ git add file &&
+ git commit -m "unamended" &&
+ test_must_fail git commit --amend -m "" &&
+ git diff-tree -s --format=%s HEAD >msg &&
+ echo "unamended" >expect &&
+ test_cmp expect msg
+'
+
test_expect_success '-m --edit' '
echo amended >expect &&
git commit --allow-empty -m buffer &&
test_cmp expected actual
'
+test_expect_success '--dry-run with conflicts fixed from a merge' '
+ # setup two branches with conflicting information
+ # in the same file, resolve the conflict,
+ # call commit with --dry-run
+ echo "Initial contents, unimportant" >test-file &&
+ git add test-file &&
+ git commit -m "Initial commit" &&
+ echo "commit-1-state" >test-file &&
+ git commit -m "commit 1" -i test-file &&
+ git tag commit-1 &&
+ git checkout -b branch-2 HEAD^1 &&
+ echo "commit-2-state" >test-file &&
+ git commit -m "commit 2" -i test-file &&
+ ! $(git merge --no-commit commit-1) &&
+ echo "commit-2-state" >test-file &&
+ git add test-file &&
+ git commit --dry-run &&
+ git commit -m "conflicts fixed from merge."
+'
+
test_done
test_description='verbose commit template'
. ./test-lib.sh
-cat >check-for-diff <<EOF
-#!$SHELL_PATH
-exec grep '^diff --git' "\$1"
+write_script "check-for-diff" <<\EOF &&
+grep '^diff --git' "$1" >out
+exit 0
EOF
-chmod +x check-for-diff
test_set_editor "$PWD/check-for-diff"
cat >message <<'EOF'
'
test_expect_success 'initial commit shows verbose diff' '
- git commit --amend -v
+ git commit --amend -v &&
+ test_line_count = 1 out
'
test_expect_success 'second commit' '
test_expect_success 'verbose diff is stripped out' '
git commit --amend -v &&
- check_message message
+ check_message message &&
+ test_line_count = 1 out
'
test_expect_success 'verbose diff is stripped out (mnemonicprefix)' '
git config diff.mnemonicprefix true &&
git commit --amend -v &&
- check_message message
+ check_message message &&
+ test_line_count = 1 out
'
cat >diff <<'EOF'
test_i18ngrep "Aborting commit due to empty commit message." err
'
+test_expect_success 'status does not verbose without --verbose' '
+ git status >actual &&
+ ! grep "^diff --git" actual
+'
+
+test_expect_success 'setup -v -v' '
+ echo dirty >file
+'
+
+for i in true 1
+do
+ test_expect_success "commit.verbose=$i and --verbose omitted" "
+ git -c commit.verbose=$i commit --amend &&
+ test_line_count = 1 out
+ "
+done
+
+for i in false -2 -1 0
+do
+ test_expect_success "commit.verbose=$i and --verbose omitted" "
+ git -c commit.verbose=$i commit --amend &&
+ test_line_count = 0 out
+ "
+done
+
+for i in 2 3
+do
+ test_expect_success "commit.verbose=$i and --verbose omitted" "
+ git -c commit.verbose=$i commit --amend &&
+ test_line_count = 2 out
+ "
+done
+
+for i in true false -2 -1 0 1 2 3
+do
+ test_expect_success "commit.verbose=$i and --verbose" "
+ git -c commit.verbose=$i commit --amend --verbose &&
+ test_line_count = 1 out
+ "
+
+ test_expect_success "commit.verbose=$i and --no-verbose" "
+ git -c commit.verbose=$i commit --amend --no-verbose &&
+ test_line_count = 0 out
+ "
+
+ test_expect_success "commit.verbose=$i and -v -v" "
+ git -c commit.verbose=$i commit --amend -v -v &&
+ test_line_count = 2 out
+ "
+done
+
+test_expect_success "status ignores commit.verbose=true" '
+ git -c commit.verbose=true status >actual &&
+ ! grep "^diff --git actual"
+'
+
test_done
git tag seventh-signed &&
echo 8 >file && test_tick && git commit -a -m eighth -SB7227189 &&
- git tag eighth-signed-alt
+ git tag eighth-signed-alt &&
+
+ # commit.gpgsign is still on but this must not be signed
+ git tag ninth-unsigned $(echo 9 | git commit-tree HEAD^{tree}) &&
+ # explicit -S of course must sign.
+ git tag tenth-signed $(echo 9 | git commit-tree -S HEAD^{tree})
'
test_expect_success GPG 'verify and show signatures' '
(
- for commit in initial second merge fourth-signed fifth-signed sixth-signed seventh-signed
+ for commit in initial second merge fourth-signed \
+ fifth-signed sixth-signed seventh-signed tenth-signed
do
git verify-commit $commit &&
git show --pretty=short --show-signature $commit >actual &&
done
) &&
(
- for commit in merge^2 fourth-unsigned sixth-unsigned seventh-unsigned
+ for commit in merge^2 fourth-unsigned sixth-unsigned \
+ seventh-unsigned ninth-unsigned
do
test_must_fail git verify-commit $commit &&
git show --pretty=short --show-signature $commit >actual &&
git tag c3
'
-test_expect_success 'merge c1 to c2' '
+merge_c1_to_c2_cmds='
git reset --hard c1 &&
git merge -s resolve c2 &&
test "$(git rev-parse c1)" != "$(git rev-parse HEAD)" &&
test 3 = $(git ls-files | wc -l)
'
+test_expect_success 'merge c1 to c2' "$merge_c1_to_c2_cmds"
+
+test_expect_success 'merge c1 to c2, again' "$merge_c1_to_c2_cmds"
+
test_expect_success 'merge c2 to c3 (fails)' '
git reset --hard c2 &&
test_must_fail git merge -s resolve c3
test_expect_success 'untracked files overwritten by merge (fast and non-fast forward)' '
test_must_fail git merge branch 2>out &&
- test_cmp out expect &&
+ test_i18ncmp out expect &&
git commit --allow-empty -m empty &&
(
GIT_MERGE_VERBOSITY=0 &&
export GIT_MERGE_VERBOSITY &&
test_must_fail git merge branch 2>out2
) &&
- test_cmp out2 expect &&
+ test_i18ncmp out2 expect &&
git reset --hard HEAD^
'
four
three
two
-Please, commit your changes or stash them before you can merge.
+Please commit your changes or stash them before you can merge.
error: The following untracked working tree files would be overwritten by merge:
five
Please move or remove them before you can merge.
git add three &&
git add four &&
test_must_fail git merge branch 2>out &&
- test_cmp out expect
+ test_i18ncmp out expect
'
cat >expect <<\EOF
error: Your local changes to the following files would be overwritten by checkout:
rep/one
rep/two
-Please, commit your changes or stash them before you can switch branches.
+Please commit your changes or stash them before you can switch branches.
Aborting
EOF
echo uno >rep/one &&
echo dos >rep/two &&
test_must_fail git checkout branch 2>out &&
- test_cmp out expect
+ test_i18ncmp out expect
'
cat >expect <<\EOF
error: Your local changes to the following files would be overwritten by checkout:
rep/one
rep/two
-Please, commit your changes or stash them before you can switch branches.
+Please commit your changes or stash them before you can switch branches.
Aborting
EOF
test_expect_success 'not uptodate file porcelain checkout error' '
git add rep/one rep/two &&
test_must_fail git checkout branch 2>out &&
- test_cmp out expect
+ test_i18ncmp out expect
'
cat >expect <<\EOF
>rep/untracked-file &&
>rep2/untracked-file &&
test_must_fail git checkout branch 2>out &&
- test_cmp out ../expect
+ test_i18ncmp out ../expect
'
test_done
prompt_given ()
{
prompt="$1"
- test "$prompt" = "Launch 'test-tool' [Y/n]: branch"
+ test "$prompt" = "Launch 'test-tool' [Y/n]? branch"
}
# Create a file on master and change it on branch
grep file2 output
'
+run_dir_diff_test 'difftool --dir-diff with unmerged files' '
+ test_when_finished git reset --hard &&
+ test_config difftool.echo.cmd "echo ok" &&
+ git checkout -B conflict-a &&
+ git checkout -B conflict-b &&
+ git checkout conflict-a &&
+ echo a >>file &&
+ git add file &&
+ git commit -m conflict-a &&
+ git checkout conflict-b &&
+ echo b >>file &&
+ git add file &&
+ git commit -m conflict-b &&
+ git checkout master &&
+ git merge conflict-a &&
+ test_must_fail git merge conflict-b &&
+ cat >expect <<-EOF &&
+ ok
+ EOF
+ git difftool --dir-diff $symlinks -t echo >actual &&
+ test_cmp expect actual
+'
+
write_script .git/CHECK_SYMLINKS <<\EOF
for f in file file2 sub/sub
do
grep "A U Thor" actual
'
+test_expect_success 'blame file with CRLF core.autocrlf=true' '
+ git config core.autocrlf false &&
+ printf "testcase\r\n" >crlfinrepo &&
+ >.gitattributes &&
+ git add crlfinrepo &&
+ git commit -m "add crlfinrepo" &&
+ git config core.autocrlf true &&
+ mv crlfinrepo tmp &&
+ git checkout crlfinrepo &&
+ rm tmp &&
+ git blame crlfinrepo >actual &&
+ grep "A U Thor" actual
+'
+
test_done
name='try a deep --rmdir with a commit'
test_expect_success "$name" '
- git checkout -f -b mybranch ${remotes_git_svn} &&
+ git checkout -f -b mybranch remotes/git-svn &&
mv dir/a/b/c/d/e/file dir/file &&
cp dir/file file &&
git update-index --add --remove dir/a/b/c/d/e/file dir/file file &&
git commit -m "$name" &&
git svn set-tree --find-copies-harder --rmdir \
- ${remotes_git_svn}..mybranch &&
+ remotes/git-svn..mybranch &&
svn_cmd up "$SVN_TREE" &&
test -d "$SVN_TREE"/dir && test ! -d "$SVN_TREE"/dir/a'
git update-index --add dir/file/file &&
git commit -m '$name' &&
test_must_fail git svn set-tree --find-copies-harder --rmdir \
- ${remotes_git_svn}..mybranch
+ remotes/git-svn..mybranch
"
name='detect node change from directory to file #1'
test_expect_success "$name" '
rm -rf dir "$GIT_DIR"/index &&
- git checkout -f -b mybranch2 ${remotes_git_svn} &&
+ git checkout -f -b mybranch2 remotes/git-svn &&
mv bar/zzz zzz &&
rm -rf bar &&
mv zzz bar &&
git update-index --add -- bar &&
git commit -m "$name" &&
test_must_fail git svn set-tree --find-copies-harder --rmdir \
- ${remotes_git_svn}..mybranch2
+ remotes/git-svn..mybranch2
'
name='detect node change from file to directory #2'
test_expect_success "$name" '
rm -f "$GIT_DIR"/index &&
- git checkout -f -b mybranch3 ${remotes_git_svn} &&
+ git checkout -f -b mybranch3 remotes/git-svn &&
rm bar/zzz &&
git update-index --remove bar/zzz &&
mkdir bar/zzz &&
git update-index --add bar/zzz/yyy &&
git commit -m "$name" &&
git svn set-tree --find-copies-harder --rmdir \
- ${remotes_git_svn}..mybranch3 &&
+ remotes/git-svn..mybranch3 &&
svn_cmd up "$SVN_TREE" &&
test -d "$SVN_TREE"/bar/zzz &&
test -e "$SVN_TREE"/bar/zzz/yyy
name='detect node change from directory to file #2'
test_expect_success "$name" '
rm -f "$GIT_DIR"/index &&
- git checkout -f -b mybranch4 ${remotes_git_svn} &&
+ git checkout -f -b mybranch4 remotes/git-svn &&
rm -rf dir &&
git update-index --remove -- dir/file &&
touch dir &&
git update-index --add -- dir &&
git commit -m "$name" &&
test_must_fail git svn set-tree --find-copies-harder --rmdir \
- ${remotes_git_svn}..mybranch4
+ remotes/git-svn..mybranch4
'
name='remove executable bit from a file'
test_expect_success POSIXPERM "$name" '
rm -f "$GIT_DIR"/index &&
- git checkout -f -b mybranch5 ${remotes_git_svn} &&
+ git checkout -f -b mybranch5 remotes/git-svn &&
chmod -x exec.sh &&
git update-index exec.sh &&
git commit -m "$name" &&
git svn set-tree --find-copies-harder --rmdir \
- ${remotes_git_svn}..mybranch5 &&
+ remotes/git-svn..mybranch5 &&
svn_cmd up "$SVN_TREE" &&
test ! -x "$SVN_TREE"/exec.sh'
git update-index exec.sh &&
git commit -m "$name" &&
git svn set-tree --find-copies-harder --rmdir \
- ${remotes_git_svn}..mybranch5 &&
+ remotes/git-svn..mybranch5 &&
svn_cmd up "$SVN_TREE" &&
test -x "$SVN_TREE"/exec.sh'
git update-index exec.sh &&
git commit -m "$name" &&
git svn set-tree --find-copies-harder --rmdir \
- ${remotes_git_svn}..mybranch5 &&
+ remotes/git-svn..mybranch5 &&
svn_cmd up "$SVN_TREE" &&
test -h "$SVN_TREE"/exec.sh'
git update-index --add file exec-2.sh &&
git commit -m "$name" &&
git svn set-tree --find-copies-harder --rmdir \
- ${remotes_git_svn}..mybranch5 &&
+ remotes/git-svn..mybranch5 &&
svn_cmd up "$SVN_TREE" &&
test -x "$SVN_TREE"/file &&
test -h "$SVN_TREE"/exec-2.sh'
git update-index exec-2.sh &&
git commit -m "$name" &&
git svn set-tree --find-copies-harder --rmdir \
- ${remotes_git_svn}..mybranch5 &&
+ remotes/git-svn..mybranch5 &&
svn_cmd up "$SVN_TREE" &&
test -f "$SVN_TREE"/exec-2.sh &&
test ! -h "$SVN_TREE"/exec-2.sh &&
export GIT_SVN_ID
test_expect_success "$name" \
'git svn init "$svnrepo" && git svn fetch &&
- git rev-list --pretty=raw ${remotes_git_svn} | grep ^tree | uniq > a &&
+ git rev-list --pretty=raw remotes/git-svn | grep ^tree | uniq > a &&
git rev-list --pretty=raw remotes/alt | grep ^tree | uniq > b &&
test_cmp a b'
test_expect_success POSIXPERM,SYMLINKS "$name" "test_cmp a expected"
-test_expect_success 'exit if remote refs are ambigious' "
+test_expect_success 'exit if remote refs are ambigious' '
git config --add svn-remote.svn.fetch \
- bar:refs/${remotes_git_svn} &&
+ bar:refs/remotes/git-svn &&
test_must_fail git svn migrate
-"
+'
test_expect_success 'exit if init-ing a would clobber a URL' '
svnadmin create "${PWD}/svnrepo2" &&
svn mkdir -m "mkdir bar" "${svnrepo}2/bar" &&
git config --unset svn-remote.svn.fetch \
- "^bar:refs/${remotes_git_svn}$" &&
+ "^bar:refs/remotes/git-svn$" &&
test_must_fail git svn init "${svnrepo}2/bar"
'
git config --get svn-remote.svn.fetch \
"^bar:refs/remotes/bar$" &&
git config --get svn-remote.svn.fetch \
- "^:refs/${remotes_git_svn}$"
+ "^:refs/remotes/git-svn$"
'
test_expect_success 'dcommit $rev does not clobber current branch' '
git branch -D my-bar
'
-test_expect_success 'able to dcommit to a subdirectory' "
+test_expect_success 'able to dcommit to a subdirectory' '
git svn fetch -i bar &&
git checkout -b my-bar refs/remotes/bar &&
echo abc > d &&
git update-index --add d &&
- git commit -m '/bar/d should be in the log' &&
+ git commit -m "/bar/d should be in the log" &&
git svn dcommit -i bar &&
- test -z \"\$(git diff refs/heads/my-bar refs/remotes/bar)\" &&
+ test -z "$(git diff refs/heads/my-bar refs/remotes/bar)" &&
mkdir newdir &&
echo new > newdir/dir &&
git update-index --add newdir/dir &&
- git commit -m 'add a new directory' &&
+ git commit -m "add a new directory" &&
git svn dcommit -i bar &&
- test -z \"\$(git diff refs/heads/my-bar refs/remotes/bar)\" &&
+ test -z "$(git diff refs/heads/my-bar refs/remotes/bar)" &&
echo foo >> newdir/dir &&
git update-index newdir/dir &&
- git commit -m 'modify a file in new directory' &&
+ git commit -m "modify a file in new directory" &&
git svn dcommit -i bar &&
- test -z \"\$(git diff refs/heads/my-bar refs/remotes/bar)\"
- "
+ test -z "$(git diff refs/heads/my-bar refs/remotes/bar)"
+'
test_expect_success 'dcommit should not fail with a touched file' '
test_commit "commit-new-file-foo2" foo2 &&
git svn rebase
'
-test_expect_success 'able to set-tree to a subdirectory' "
+test_expect_success 'able to set-tree to a subdirectory' '
echo cba > d &&
git update-index d &&
- git commit -m 'update /bar/d' &&
+ git commit -m "update /bar/d" &&
git svn set-tree -i bar HEAD &&
- test -z \"\$(git diff refs/heads/my-bar refs/remotes/bar)\"
- "
+ test -z "$(git diff refs/heads/my-bar refs/remotes/bar)"
+'
test_expect_success 'git-svn works in a bare repository' '
mkdir bare-repo &&
name='test svn:keywords ignoring'
test_expect_success "$name" \
- 'git checkout -b mybranch ${remotes_git_svn} &&
+ 'git checkout -b mybranch remotes/git-svn &&
echo Hi again >> kw.c &&
git commit -a -m "test keywords ignoring" &&
- git svn set-tree ${remotes_git_svn}..mybranch &&
- git pull . ${remotes_git_svn}'
+ git svn set-tree remotes/git-svn..mybranch &&
+ git pull . remotes/git-svn'
expect='/* $Id$ */'
got="$(sed -ne 2p kw.c)"
test_expect_success 'fetch and pull latest from svn and checkout a new wc' \
'git svn fetch &&
- git pull . ${remotes_git_svn} &&
+ git pull . remotes/git-svn &&
svn_cmd co "$svnrepo" new_wc'
for i in crlf ne_crlf lf ne_lf cr ne_cr empty_cr empty_lf empty empty_crlf
svn_cmd commit -m "propset CRLF on cr files"'
cd ..
test_expect_success 'fetch and pull latest from svn' \
- 'git svn fetch && git pull . ${remotes_git_svn}'
+ 'git svn fetch && git pull . remotes/git-svn'
b_cr="$(git hash-object cr)"
b_ne_cr="$(git hash-object ne_cr)"
EOF
test_expect_success 'test create-ignore' "
- git svn fetch && git pull . ${remotes_git_svn} &&
+ git svn fetch && git pull . remotes/git-svn &&
git svn create-ignore &&
cmp ./.gitignore create-ignore.expect &&
cmp ./deeply/.gitignore create-ignore.expect &&
test_expect_success 'mirror via git svn' '
git svn init "$svnrepo" &&
git svn fetch &&
- git checkout -f -b test-rmdir ${remotes_git_svn}
+ git checkout -f -b test-rmdir remotes/git-svn
'
test_expect_success 'Try a commit on rmdir' '
test_expect_success 'clone repo with git' '
git svn clone -s "$svnrepo" x &&
- test -f x/FOLLOWME &&
- test ! -f x/README
+ test_path_is_file x/FOLLOWME &&
+ test_path_is_missing x/README
'
-test_expect_success 'make sure r2 still has old file' "
- cd x &&
- test -n \"\$(git svn find-rev r1)\" &&
- git reset --hard \$(git svn find-rev r1) &&
- test -f README &&
- test ! -f FOLLOWME &&
- test x\$(git svn find-rev r2) = x
-"
+test_expect_success 'make sure r2 still has old file' '
+ (
+ cd x &&
+ test -n "$(git svn find-rev r1)" &&
+ git reset --hard "$(git svn find-rev r1)" &&
+ test_path_is_file README &&
+ test_path_is_missing FOLLOWME &&
+ test -z "$(git svn find-rev r2)"
+ )
+'
test_done
test_expect_success 'dcommit fails to commit because of conflict' '
git svn init "$svnrepo" &&
git svn fetch &&
- git reset --hard refs/${remotes_git_svn} &&
+ git reset --hard refs/remotes/git-svn &&
svn_cmd co "$svnrepo" t.svn &&
(
cd t.svn &&
'
test_expect_success 'dcommit does the svn equivalent of an index merge' "
- git reset --hard refs/${remotes_git_svn} &&
+ git reset --hard refs/remotes/git-svn &&
echo 'index merge' > file2 &&
git update-index --add file2 &&
git commit -a -m 'index merge' &&
'
test_expect_success 'multiple dcommit from git svn will not clobber svn' "
- git reset --hard refs/${remotes_git_svn} &&
+ git reset --hard refs/remotes/git-svn &&
echo new file >> new-file &&
git update-index --add new-file &&
git commit -a -m 'new file' &&
git svn init "$svnrepo" &&
git svn fetch &&
rm -rf "$GIT_DIR"/svn &&
- git update-ref refs/heads/git-svn-HEAD refs/${remotes_git_svn} &&
- git update-ref refs/heads/svn-HEAD refs/${remotes_git_svn} &&
- git update-ref -d refs/${remotes_git_svn} refs/${remotes_git_svn}
+ git update-ref refs/heads/git-svn-HEAD refs/remotes/git-svn &&
+ git update-ref refs/heads/svn-HEAD refs/remotes/git-svn &&
+ git update-ref -d refs/remotes/git-svn refs/remotes/git-svn
'
-head=$(git rev-parse --verify refs/heads/git-svn-HEAD^0)
-test_expect_success 'git-svn-HEAD is a real HEAD' "test -n '$head'"
+test_expect_success 'git-svn-HEAD is a real HEAD' '
+ git rev-parse --verify refs/heads/git-svn-HEAD^0
+'
svnrepo_escaped=$(echo $svnrepo | sed 's/ /%20/')
echo "$svnrepo" > "$GIT_DIR"/svn/info/url &&
git svn migrate &&
! test -d "$GIT_DIR"/git-svn &&
- git rev-parse --verify refs/${remotes_git_svn}^0 &&
+ git rev-parse --verify refs/remotes/git-svn^0 &&
git rev-parse --verify refs/remotes/svn^0 &&
test "$(git config --get svn-remote.svn.url)" = "$svnrepo_escaped" &&
test $(git config --get svn-remote.svn.fetch) = \
- ":refs/${remotes_git_svn}"
+ ":refs/remotes/git-svn"
'
test_expect_success 'initialize a multi-repository repo' '
"^tags/\*:refs/remotes/origin/tags/\*$" &&
git config --add svn-remote.svn.fetch "branches/a:refs/remotes/origin/a" &&
git config --add svn-remote.svn.fetch "branches/b:refs/remotes/origin/b" &&
- for i in tags/0.1 tags/0.2 tags/0.3; do
+ for i in tags/0.1 tags/0.2 tags/0.3
+ do
git config --add svn-remote.svn.fetch \
- $i:refs/remotes/origin/$i || exit 1; done &&
+ $i:refs/remotes/origin/$i || return 1
+ done &&
git config --get-all svn-remote.svn.fetch > fetch.out &&
grep "^trunk:refs/remotes/origin/trunk$" fetch.out &&
grep "^branches/a:refs/remotes/origin/a$" fetch.out &&
grep "^tags/0\.1:refs/remotes/origin/tags/0\.1$" fetch.out &&
grep "^tags/0\.2:refs/remotes/origin/tags/0\.2$" fetch.out &&
grep "^tags/0\.3:refs/remotes/origin/tags/0\.3$" fetch.out &&
- grep "^:refs/${remotes_git_svn}" fetch.out
+ grep "^:refs/remotes/git-svn" fetch.out
'
# refs should all be different, but the trees should all be the same:
-test_expect_success 'multi-fetch works on partial urls + paths' "
+test_expect_success 'multi-fetch works on partial urls + paths' '
+ refs="trunk a b tags/0.1 tags/0.2 tags/0.3" &&
git svn multi-fetch &&
- for i in trunk a b tags/0.1 tags/0.2 tags/0.3; do
- git rev-parse --verify refs/remotes/origin/\$i^0 >> refs.out || exit 1;
- done &&
- test -z \"\$(sort < refs.out | uniq -d)\" &&
- for i in trunk a b tags/0.1 tags/0.2 tags/0.3; do
- for j in trunk a b tags/0.1 tags/0.2 tags/0.3; do
- if test \$j != \$i; then continue; fi
- test -z \"\$(git diff refs/remotes/origin/\$i \
- refs/remotes/origin/\$j)\" ||exit 1; done; done
- "
+ for i in $refs
+ do
+ git rev-parse --verify refs/remotes/origin/$i^0 || return 1;
+ done >refs.out &&
+ test -z "$(sort <refs.out | uniq -d)" &&
+ for i in $refs
+ do
+ for j in $refs
+ do
+ git diff --exit-code refs/remotes/origin/$i \
+ refs/remotes/origin/$j ||
+ return 1
+ done
+ done
+'
test_expect_success 'migrate --minimize on old inited layout' '
git config --unset-all svn-remote.svn.fetch &&
git config --unset-all svn-remote.svn.url &&
rm -rf "$GIT_DIR"/svn &&
- for i in $(cat fetch.out); do
+ for i in $(cat fetch.out)
+ do
path=$(expr $i : "\([^:]*\):.*$")
ref=$(expr $i : "[^:]*:\(refs/remotes/.*\)$")
if test -z "$ref"; then continue; fi
if test -n "$path"; then path="/$path"; fi
- ( mkdir -p "$GIT_DIR"/svn/$ref/info/ &&
- echo "$svnrepo"$path > "$GIT_DIR"/svn/$ref/info/url ) || exit 1;
+ mkdir -p "$GIT_DIR"/svn/$ref/info/ &&
+ echo "$svnrepo"$path >"$GIT_DIR"/svn/$ref/info/url ||
+ return 1
done &&
git svn migrate --minimize &&
test -z "$(git config -l | grep "^svn-remote\.git-svn\.")" &&
grep "^tags/0\.1:refs/remotes/origin/tags/0\.1$" fetch.out &&
grep "^tags/0\.2:refs/remotes/origin/tags/0\.2$" fetch.out &&
grep "^tags/0\.3:refs/remotes/origin/tags/0\.3$" fetch.out &&
- grep "^:refs/${remotes_git_svn}" fetch.out
+ grep "^:refs/remotes/git-svn" fetch.out
'
test_expect_success ".rev_db auto-converted to .rev_map.UUID" '
bar_url=http://mayonaise/svnrepo/bar
test_expect_success 'verify metadata for /bar' "
git cat-file commit refs/remotes/bar | \
- grep '^${git_svn_id}: $bar_url@12 $uuid$' &&
+ grep '^git-svn-id: $bar_url@12 $uuid$' &&
git cat-file commit refs/remotes/bar~1 | \
- grep '^${git_svn_id}: $bar_url@11 $uuid$' &&
+ grep '^git-svn-id: $bar_url@11 $uuid$' &&
git cat-file commit refs/remotes/bar~2 | \
- grep '^${git_svn_id}: $bar_url@10 $uuid$' &&
+ grep '^git-svn-id: $bar_url@10 $uuid$' &&
git cat-file commit refs/remotes/bar~3 | \
- grep '^${git_svn_id}: $bar_url@9 $uuid$' &&
+ grep '^git-svn-id: $bar_url@9 $uuid$' &&
git cat-file commit refs/remotes/bar~4 | \
- grep '^${git_svn_id}: $bar_url@6 $uuid$' &&
+ grep '^git-svn-id: $bar_url@6 $uuid$' &&
git cat-file commit refs/remotes/bar~5 | \
- grep '^${git_svn_id}: $bar_url@1 $uuid$'
+ grep '^git-svn-id: $bar_url@1 $uuid$'
"
e_url=http://mayonaise/svnrepo/dir/a/b/c/d/e
test_expect_success 'verify metadata for /dir/a/b/c/d/e' "
git cat-file commit refs/remotes/e | \
- grep '^${git_svn_id}: $e_url@1 $uuid$'
+ grep '^git-svn-id: $e_url@1 $uuid$'
"
dir_url=http://mayonaise/svnrepo/dir
test_expect_success 'verify metadata for /dir' "
git cat-file commit refs/remotes/dir | \
- grep '^${git_svn_id}: $dir_url@2 $uuid$' &&
+ grep '^git-svn-id: $dir_url@2 $uuid$' &&
git cat-file commit refs/remotes/dir~1 | \
- grep '^${git_svn_id}: $dir_url@1 $uuid$'
+ grep '^git-svn-id: $dir_url@1 $uuid$'
"
test_expect_success 'find commit based on SVN revision number' "
bar_url=http://mayonaise/svnrepo/bar
test_expect_success 'verify metadata for /bar' "
git cat-file commit refs/remotes/bar | \
- grep '^${git_svn_id}: $bar_url@12 $uuid$' &&
+ grep '^git-svn-id: $bar_url@12 $uuid$' &&
git cat-file commit refs/remotes/bar~1 | \
- grep '^${git_svn_id}: $bar_url@11 $uuid$' &&
+ grep '^git-svn-id: $bar_url@11 $uuid$' &&
git cat-file commit refs/remotes/bar~2 | \
- grep '^${git_svn_id}: $bar_url@10 $uuid$' &&
+ grep '^git-svn-id: $bar_url@10 $uuid$' &&
git cat-file commit refs/remotes/bar~3 | \
- grep '^${git_svn_id}: $bar_url@9 $uuid$' &&
+ grep '^git-svn-id: $bar_url@9 $uuid$' &&
git cat-file commit refs/remotes/bar~4 | \
- grep '^${git_svn_id}: $bar_url@6 $uuid$' &&
+ grep '^git-svn-id: $bar_url@6 $uuid$' &&
git cat-file commit refs/remotes/bar~5 | \
- grep '^${git_svn_id}: $bar_url@1 $uuid$'
+ grep '^git-svn-id: $bar_url@1 $uuid$'
"
e_url=http://mayonaise/svnrepo/dir/a/b/c/d/e
test_expect_success 'verify metadata for /dir/a/b/c/d/e' "
git cat-file commit refs/remotes/e | \
- grep '^${git_svn_id}: $e_url@1 $uuid$'
+ grep '^git-svn-id: $e_url@1 $uuid$'
"
dir_url=http://mayonaise/svnrepo/dir
test_expect_success 'verify metadata for /dir' "
git cat-file commit refs/remotes/dir | \
- grep '^${git_svn_id}: $dir_url@2 $uuid$' &&
+ grep '^git-svn-id: $dir_url@2 $uuid$' &&
git cat-file commit refs/remotes/dir~1 | \
- grep '^${git_svn_id}: $dir_url@1 $uuid$'
+ grep '^git-svn-id: $dir_url@1 $uuid$'
"
test_done
git svn clone "$svnrepo/pr%20ject" clone &&
(
cd clone &&
- git rev-parse refs/${remotes_git_svn}
+ git rev-parse refs/remotes/git-svn
)
'
git svn clone --minimize-url "$svnrepo/pr%20ject/trunk" minimize &&
(
cd minimize &&
- git rev-parse refs/${remotes_git_svn}
+ git rev-parse refs/remotes/git-svn
)
'
git svn clone "$svnrepo/pr%20ject/trunk" trunk &&
(
cd trunk &&
- git rev-parse refs/${remotes_git_svn}
+ git rev-parse refs/remotes/git-svn
)
'
test_expect_success 'init, fetch and checkout repository' '
git svn init --rewrite-root=http://invalid.invalid/ "$svnrepo" &&
git svn fetch &&
- git checkout -b mybranch ${remotes_git_svn}
+ git checkout -b mybranch remotes/git-svn
'
test_expect_success 'remove rev_map' '
test_expect_success 'verify uuid' "
git cat-file commit refs/remotes/git-svn~0 | \
- grep '^${git_svn_id}: .*@2 $uuid$' &&
+ grep '^git-svn-id: .*@2 $uuid$' &&
git cat-file commit refs/remotes/git-svn~1 | \
- grep '^${git_svn_id}: .*@1 $uuid$'
+ grep '^git-svn-id: .*@1 $uuid$'
"
test_done
import sys
import struct
- s = struct.pack(">LL18s",
+ s = struct.pack(b">LL18s",
0x00051607, # AppleDouble
0x00020000, # version 2
- "" # pad to 26 bytes
+ b"" # pad to 26 bytes
)
- sys.stdout.write(s)
+ getattr(sys.stdout, 'buffer', sys.stdout).write(s)
EOF
}
FILE="$1" &&
SIZE="$2" &&
EXPECTED_CONTENT="$3" &&
+ sed -n '1,1 p' "$FILE" | grep "^version " &&
+ sed -n '2,2 p' "$FILE" | grep "^oid " &&
+ sed -n '3,3 p' "$FILE" | grep "^size " &&
+ test_line_count = 3 "$FILE" &&
cat "$FILE" | grep "size $SIZE" &&
HASH=$(cat "$FILE" | grep "oid sha256:" | sed -e "s/oid sha256://g") &&
LFS_FILE=".git/lfs/objects/$(echo "$HASH" | cut -c1-2)/$(echo "$HASH" | cut -c3-4)/$HASH" &&
# We only import HEAD here ("@all" is missing!)
git p4 clone --destination="$git" //depot &&
- test_file_in_lfs file6.bin 13 "content 6 bin 39 bytes XXXXXYYYYYZZZZZ"
+ test_file_in_lfs file6.bin 39 "content 6 bin 39 bytes XXXXXYYYYYZZZZZ" &&
test_file_count_in_dir ".git/lfs/objects" 1 &&
cat >expect <<-\EOF &&
git init . &&
git p4 clone --use-client-spec --destination="$git" //depot@all &&
cat >expect <<-\EOF &&
-Remove file 4
-[git-p4: depot-paths = "//depot/": change = 6]
+ Remove file 4
+ [git-p4: depot-paths = "//depot/": change = 6]
-Remove file 3
-[git-p4: depot-paths = "//depot/": change = 5]
+ Remove file 3
+ [git-p4: depot-paths = "//depot/": change = 5]
-Add file 4
-[git-p4: depot-paths = "//depot/": change = 4]
+ Add file 4
+ [git-p4: depot-paths = "//depot/": change = 4]
-Add file 3
-[git-p4: depot-paths = "//depot/": change = 3]
+ Add file 3
+ [git-p4: depot-paths = "//depot/": change = 3]
-Add file 2
-[git-p4: depot-paths = "//depot/": change = 2]
+ Add file 2
+ [git-p4: depot-paths = "//depot/": change = 2]
-Add file 1
-[git-p4: depot-paths = "//depot/": change = 1]
+ Add file 1
+ [git-p4: depot-paths = "//depot/": change = 1]
EOF
git log --format=%B >actual &&
git config git-p4.keepEmptyCommits true &&
git p4 clone --use-client-spec --destination="$git" //depot@all &&
cat >expect <<-\EOF &&
-Remove file 4
-[git-p4: depot-paths = "//depot/": change = 6]
+ Remove file 4
+ [git-p4: depot-paths = "//depot/": change = 6]
-Remove file 3
-[git-p4: depot-paths = "//depot/": change = 5]
+ Remove file 3
+ [git-p4: depot-paths = "//depot/": change = 5]
-Add file 4
-[git-p4: depot-paths = "//depot/": change = 4]
+ Add file 4
+ [git-p4: depot-paths = "//depot/": change = 4]
-Add file 3
-[git-p4: depot-paths = "//depot/": change = 3]
+ Add file 3
+ [git-p4: depot-paths = "//depot/": change = 3]
-Add file 2
-[git-p4: depot-paths = "//depot/": change = 2]
+ Add file 2
+ [git-p4: depot-paths = "//depot/": change = 2]
-Add file 1
-[git-p4: depot-paths = "//depot/": change = 1]
+ Add file 1
+ [git-p4: depot-paths = "//depot/": change = 1]
EOF
git log --format=%B >actual &&
git init . &&
git p4 clone --use-client-spec --destination="$git" --verbose //depot@all &&
cat >expect <<-\EOF &&
-Remove file 3
-[git-p4: depot-paths = "//depot/": change = 5]
+ Remove file 3
+ [git-p4: depot-paths = "//depot/": change = 5]
-Add file 3
-[git-p4: depot-paths = "//depot/": change = 3]
+ Add file 3
+ [git-p4: depot-paths = "//depot/": change = 3]
-Add file 1
-[git-p4: depot-paths = "//depot/": change = 1]
+ Add file 1
+ [git-p4: depot-paths = "//depot/": change = 1]
EOF
git log --format=%B >actual &&
--- /dev/null
+#!/bin/sh
+
+test_description='git p4 retrieve job info'
+
+. ./lib-git-p4.sh
+
+test_expect_success 'start p4d' '
+ start_p4d
+'
+
+test_expect_success 'add p4 jobs' '
+ (
+ p4_add_job TESTJOB-A &&
+ p4_add_job TESTJOB-B
+ )
+'
+
+test_expect_success 'add p4 files' '
+ client_view "//depot/... //client/..." &&
+ (
+ cd "$cli" &&
+ >file1 &&
+ p4 add file1 &&
+ p4 submit -d "Add file 1"
+ )
+'
+
+test_expect_success 'check log message of changelist with no jobs' '
+ client_view "//depot/... //client/..." &&
+ test_when_finished cleanup_git &&
+ (
+ cd "$git" &&
+ git init . &&
+ git p4 clone --use-client-spec --destination="$git" //depot@all &&
+ cat >expect <<-\EOF &&
+ Add file 1
+ [git-p4: depot-paths = "//depot/": change = 1]
+
+ EOF
+ git log --format=%B >actual &&
+ test_cmp expect actual
+ )
+'
+
+test_expect_success 'add TESTJOB-A to change 1' '
+ (
+ cd "$cli" &&
+ p4 fix -c 1 TESTJOB-A
+ )
+'
+
+test_expect_success 'check log message of changelist with one job' '
+ client_view "//depot/... //client/..." &&
+ test_when_finished cleanup_git &&
+ (
+ cd "$git" &&
+ git init . &&
+ git p4 clone --use-client-spec --destination="$git" //depot@all &&
+ cat >expect <<-\EOF &&
+ Add file 1
+ Jobs: TESTJOB-A
+ [git-p4: depot-paths = "//depot/": change = 1]
+
+ EOF
+ git log --format=%B >actual &&
+ test_cmp expect actual
+ )
+'
+
+test_expect_success 'add TESTJOB-B to change 1' '
+ (
+ cd "$cli" &&
+ p4 fix -c 1 TESTJOB-B
+ )
+'
+
+test_expect_success 'check log message of changelist with more jobs' '
+ client_view "//depot/... //client/..." &&
+ test_when_finished cleanup_git &&
+ (
+ cd "$git" &&
+ git init . &&
+ git p4 clone --use-client-spec --destination="$git" //depot@all &&
+ cat >expect <<-\EOF &&
+ Add file 1
+ Jobs: TESTJOB-A TESTJOB-B
+ [git-p4: depot-paths = "//depot/": change = 1]
+
+ EOF
+ git log --format=%B >actual &&
+ test_cmp expect actual
+ )
+'
+
+test_expect_success 'kill p4d' '
+ kill_p4d
+'
+
+test_done
'
test_expect_success 'prompt - describe detached head - branch' '
- printf " ((b1~1))" >expected &&
+ printf " ((tags/t2~1))" >expected &&
git checkout b1^ &&
test_when_finished "git checkout master" &&
(
test_cmp expect.rev actual.rev
}
-# Print a sequence of numbers or letters in increasing order. This is
-# similar to GNU seq(1), but the latter might not be available
-# everywhere (and does not do letters). It may be used like:
-#
-# for i in $(test_seq 100)
-# do
-# for j in $(test_seq 10 20)
-# do
-# for k in $(test_seq a z)
-# do
-# echo $i-$j-$k
-# done
-# done
-# done
+# Print a sequence of integers in increasing order, either with
+# two arguments (start and end):
+#
+# test_seq 1 5 -- outputs 1 2 3 4 5 one line at a time
+#
+# or with one argument (end), in which case it starts counting
+# from 1.
test_seq () {
case $# in
2) ;;
*) error "bug in the test script: not 1 or 2 parameters to test_seq" ;;
esac
- perl -le 'print for $ARGV[0]..$ARGV[1]' -- "$@"
+ test_seq_counter__=$1
+ while test "$test_seq_counter__" -le "$2"
+ do
+ echo "$test_seq_counter__"
+ test_seq_counter__=$(( $test_seq_counter__ + 1 ))
+ done
}
# This function can be used to schedule some commands to be run
}
run_list=$1; shift ;;
--run=*)
- run_list=$(expr "z$1" : 'z[^=]*=\(.*\)'); shift ;;
+ run_list=${1#--*=}; shift ;;
-h|--h|--he|--hel|--help)
help=t; shift ;;
-v|--v|--ve|--ver|--verb|--verbo|--verbos|--verbose)
verbose=t; shift ;;
--verbose-only=*)
- verbose_only=$(expr "z$1" : 'z[^=]*=\(.*\)')
+ verbose_only=${1#--*=}
shift ;;
-q|--q|--qu|--qui|--quie|--quiet)
# Ignore --quiet under a TAP::Harness. Saying how many tests
valgrind=memcheck
shift ;;
--valgrind=*)
- valgrind=$(expr "z$1" : 'z[^=]*=\(.*\)')
+ valgrind=${1#--*=}
shift ;;
--valgrind-only=*)
- valgrind_only=$(expr "z$1" : 'z[^=]*=\(.*\)')
+ valgrind_only=${1#--*=}
shift ;;
--tee)
shift ;; # was handled already
--root=*)
- root=$(expr "z$1" : 'z[^=]*=\(.*\)')
+ root=${1#--*=}
shift ;;
--chain-lint)
GIT_TEST_CHAIN_LINT=1
exec 4>/dev/null 3>/dev/null
fi
+# Send any "-x" output directly to stderr to avoid polluting tests
+# which capture stderr. We can do this unconditionally since it
+# has no effect if tracing isn't turned on.
+#
+# Note that this sets up the trace fd as soon as we assign the variable, so it
+# must come after the creation of descriptor 4 above. Likewise, we must never
+# unset this, as it has the side effect of closing descriptor 4, which we
+# use to show verbose tests to the user.
+#
+# Note also that we don't need or want to export it. The tracing is local to
+# this shell, and we would not want to influence any shells we exec.
+BASH_XTRACEFD=4
+
test_failure=0
test_count=0
test_fixed=0
error "You haven't built things yet, have you?"
}
-if ! test -x "$GIT_BUILD_DIR"/test-chmtime
+if ! test -x "$GIT_BUILD_DIR"/t/helper/test-chmtime
then
echo >&2 'You need to build test-chmtime:'
- echo >&2 'Run "make test-chmtime" in the source (toplevel) directory'
+ echo >&2 'Run "make t/helper/test-chmtime" in the source (toplevel) directory'
exit 1
fi
const char *tag_type = "tag";
+static int run_gpg_verify(const char *buf, unsigned long size, unsigned flags)
+{
+ struct signature_check sigc;
+ size_t payload_size;
+ int ret;
+
+ memset(&sigc, 0, sizeof(sigc));
+
+ payload_size = parse_signature(buf, size);
+
+ if (size == payload_size) {
+ if (flags & GPG_VERIFY_VERBOSE)
+ write_in_full(1, buf, payload_size);
+ return error("no signature found");
+ }
+
+ ret = check_signature(buf, payload_size, buf + payload_size,
+ size - payload_size, &sigc);
+ print_signature_buffer(&sigc, flags);
+
+ signature_check_clear(&sigc);
+ return ret;
+}
+
+int gpg_verify_tag(const unsigned char *sha1, const char *name_to_report,
+ unsigned flags)
+{
+ enum object_type type;
+ char *buf;
+ unsigned long size;
+ int ret;
+
+ type = sha1_object_info(sha1, NULL);
+ if (type != OBJ_TAG)
+ return error("%s: cannot verify a non-tag object of type %s.",
+ name_to_report ?
+ name_to_report :
+ find_unique_abbrev(sha1, DEFAULT_ABBREV),
+ typename(type));
+
+ buf = read_sha1_file(sha1, &type, &size);
+ if (!buf)
+ return error("%s: unable to read file.",
+ name_to_report ?
+ name_to_report :
+ find_unique_abbrev(sha1, DEFAULT_ABBREV));
+
+ ret = run_gpg_verify(buf, size, flags);
+
+ free(buf);
+ return ret;
+}
+
struct object *deref_tag(struct object *o, const char *warn, int warnlen)
{
while (o && o->type == OBJ_TAG)
extern int parse_tag(struct tag *item);
extern struct object *deref_tag(struct object *, const char *, int);
extern struct object *deref_tag_noverify(struct object *);
+extern int gpg_verify_tag(const unsigned char *sha1,
+ const char *name_to_report, unsigned flags);
#endif /* TAG_H */
+++ /dev/null
-/*
- * This program can either change modification time of the given
- * file(s) or just print it. The program does not change atime or
- * ctime (their values are explicitly preserved).
- *
- * The mtime can be changed to an absolute value:
- *
- * test-chmtime =<seconds> file...
- *
- * Relative to the current time as returned by time(3):
- *
- * test-chmtime =+<seconds> (or =-<seconds>) file...
- *
- * Or relative to the current mtime of the file:
- *
- * test-chmtime <seconds> file...
- * test-chmtime +<seconds> (or -<seconds>) file...
- *
- * Examples:
- *
- * To just print the mtime use --verbose and set the file mtime offset to 0:
- *
- * test-chmtime -v +0 file
- *
- * To set the mtime to current time:
- *
- * test-chmtime =+0 file
- *
- */
-#include "git-compat-util.h"
-#include <utime.h>
-
-static const char usage_str[] = "-v|--verbose (+|=|=+|=-|-)<seconds> <file>...";
-
-static int timespec_arg(const char *arg, long int *set_time, int *set_eq)
-{
- char *test;
- const char *timespec = arg;
- *set_eq = (*timespec == '=') ? 1 : 0;
- if (*set_eq) {
- timespec++;
- if (*timespec == '+') {
- *set_eq = 2; /* relative "in the future" */
- timespec++;
- }
- }
- *set_time = strtol(timespec, &test, 10);
- if (*test) {
- fprintf(stderr, "Not a base-10 integer: %s\n", arg + 1);
- return 0;
- }
- if ((*set_eq && *set_time < 0) || *set_eq == 2) {
- time_t now = time(NULL);
- *set_time += now;
- }
- return 1;
-}
-
-int main(int argc, char *argv[])
-{
- static int verbose;
-
- int i = 1;
- /* no mtime change by default */
- int set_eq = 0;
- long int set_time = 0;
-
- if (argc < 3)
- goto usage;
-
- if (strcmp(argv[i], "--verbose") == 0 || strcmp(argv[i], "-v") == 0) {
- verbose = 1;
- ++i;
- }
- if (timespec_arg(argv[i], &set_time, &set_eq))
- ++i;
- else
- goto usage;
-
- for (; i < argc; i++) {
- struct stat sb;
- struct utimbuf utb;
-
- if (stat(argv[i], &sb) < 0) {
- fprintf(stderr, "Failed to stat %s: %s\n",
- argv[i], strerror(errno));
- return 1;
- }
-
-#ifdef GIT_WINDOWS_NATIVE
- if (!(sb.st_mode & S_IWUSR) &&
- chmod(argv[i], sb.st_mode | S_IWUSR)) {
- fprintf(stderr, "Could not make user-writable %s: %s",
- argv[i], strerror(errno));
- return 1;
- }
-#endif
-
- utb.actime = sb.st_atime;
- utb.modtime = set_eq ? set_time : sb.st_mtime + set_time;
-
- if (verbose) {
- uintmax_t mtime = utb.modtime < 0 ? 0: utb.modtime;
- printf("%"PRIuMAX"\t%s\n", mtime, argv[i]);
- }
-
- if (utb.modtime != sb.st_mtime && utime(argv[i], &utb) < 0) {
- fprintf(stderr, "Failed to modify time on %s: %s\n",
- argv[i], strerror(errno));
- return 1;
- }
- }
-
- return 0;
-
-usage:
- fprintf(stderr, "usage: %s %s\n", argv[0], usage_str);
- return 1;
-}
+++ /dev/null
-#include "cache.h"
-#include "string-list.h"
-
-/*
- * This program exposes the C API of the configuration mechanism
- * as a set of simple commands in order to facilitate testing.
- *
- * Reads stdin and prints result of command to stdout:
- *
- * get_value -> prints the value with highest priority for the entered key
- *
- * get_value_multi -> prints all values for the entered key in increasing order
- * of priority
- *
- * get_int -> print integer value for the entered key or die
- *
- * get_bool -> print bool value for the entered key or die
- *
- * get_string -> print string value for the entered key or die
- *
- * configset_get_value -> returns value with the highest priority for the entered key
- * from a config_set constructed from files entered as arguments.
- *
- * configset_get_value_multi -> returns value_list for the entered key sorted in
- * ascending order of priority from a config_set
- * constructed from files entered as arguments.
- *
- * Examples:
- *
- * To print the value with highest priority for key "foo.bAr Baz.rock":
- * test-config get_value "foo.bAr Baz.rock"
- *
- */
-
-
-int main(int argc, char **argv)
-{
- int i, val;
- const char *v;
- const struct string_list *strptr;
- struct config_set cs;
- git_configset_init(&cs);
-
- if (argc < 2) {
- fprintf(stderr, "Please, provide a command name on the command-line\n");
- goto exit1;
- } else if (argc == 3 && !strcmp(argv[1], "get_value")) {
- if (!git_config_get_value(argv[2], &v)) {
- if (!v)
- printf("(NULL)\n");
- else
- printf("%s\n", v);
- goto exit0;
- } else {
- printf("Value not found for \"%s\"\n", argv[2]);
- goto exit1;
- }
- } else if (argc == 3 && !strcmp(argv[1], "get_value_multi")) {
- strptr = git_config_get_value_multi(argv[2]);
- if (strptr) {
- for (i = 0; i < strptr->nr; i++) {
- v = strptr->items[i].string;
- if (!v)
- printf("(NULL)\n");
- else
- printf("%s\n", v);
- }
- goto exit0;
- } else {
- printf("Value not found for \"%s\"\n", argv[2]);
- goto exit1;
- }
- } else if (argc == 3 && !strcmp(argv[1], "get_int")) {
- if (!git_config_get_int(argv[2], &val)) {
- printf("%d\n", val);
- goto exit0;
- } else {
- printf("Value not found for \"%s\"\n", argv[2]);
- goto exit1;
- }
- } else if (argc == 3 && !strcmp(argv[1], "get_bool")) {
- if (!git_config_get_bool(argv[2], &val)) {
- printf("%d\n", val);
- goto exit0;
- } else {
- printf("Value not found for \"%s\"\n", argv[2]);
- goto exit1;
- }
- } else if (argc == 3 && !strcmp(argv[1], "get_string")) {
- if (!git_config_get_string_const(argv[2], &v)) {
- printf("%s\n", v);
- goto exit0;
- } else {
- printf("Value not found for \"%s\"\n", argv[2]);
- goto exit1;
- }
- } else if (!strcmp(argv[1], "configset_get_value")) {
- for (i = 3; i < argc; i++) {
- int err;
- if ((err = git_configset_add_file(&cs, argv[i]))) {
- fprintf(stderr, "Error (%d) reading configuration file %s.\n", err, argv[i]);
- goto exit2;
- }
- }
- if (!git_configset_get_value(&cs, argv[2], &v)) {
- if (!v)
- printf("(NULL)\n");
- else
- printf("%s\n", v);
- goto exit0;
- } else {
- printf("Value not found for \"%s\"\n", argv[2]);
- goto exit1;
- }
- } else if (!strcmp(argv[1], "configset_get_value_multi")) {
- for (i = 3; i < argc; i++) {
- int err;
- if ((err = git_configset_add_file(&cs, argv[i]))) {
- fprintf(stderr, "Error (%d) reading configuration file %s.\n", err, argv[i]);
- goto exit2;
- }
- }
- strptr = git_configset_get_value_multi(&cs, argv[2]);
- if (strptr) {
- for (i = 0; i < strptr->nr; i++) {
- v = strptr->items[i].string;
- if (!v)
- printf("(NULL)\n");
- else
- printf("%s\n", v);
- }
- goto exit0;
- } else {
- printf("Value not found for \"%s\"\n", argv[2]);
- goto exit1;
- }
- }
-
- die("%s: Please check the syntax and the function name", argv[0]);
-
-exit0:
- git_configset_clear(&cs);
- return 0;
-
-exit1:
- git_configset_clear(&cs);
- return 1;
-
-exit2:
- git_configset_clear(&cs);
- return 2;
-}
+++ /dev/null
-#include "cache.h"
-
-static int rc;
-
-static void report_error(const char *class, int ch)
-{
- printf("%s classifies char %d (0x%02x) wrongly\n", class, ch, ch);
- rc = 1;
-}
-
-static int is_in(const char *s, int ch)
-{
- /* We can't find NUL using strchr. It's classless anyway. */
- if (ch == '\0')
- return 0;
- return !!strchr(s, ch);
-}
-
-#define TEST_CLASS(t,s) { \
- int i; \
- for (i = 0; i < 256; i++) { \
- if (is_in(s, i) != t(i)) \
- report_error(#t, i); \
- } \
-}
-
-#define DIGIT "0123456789"
-#define LOWER "abcdefghijklmnopqrstuvwxyz"
-#define UPPER "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
-
-int main(int argc, char **argv)
-{
- TEST_CLASS(isdigit, DIGIT);
- TEST_CLASS(isspace, " \n\r\t");
- TEST_CLASS(isalpha, LOWER UPPER);
- TEST_CLASS(isalnum, LOWER UPPER DIGIT);
- TEST_CLASS(is_glob_special, "*?[\\");
- TEST_CLASS(is_regex_special, "$()*+.?[\\^{|");
- TEST_CLASS(is_pathspec_magic, "!\"#%&',-/:;<=>@_`~");
-
- return rc;
-}
+++ /dev/null
-#include "cache.h"
-
-static const char *usage_msg = "\n"
-" test-date show [time_t]...\n"
-" test-date parse [date]...\n"
-" test-date approxidate [date]...\n";
-
-static void show_dates(char **argv, struct timeval *now)
-{
- struct strbuf buf = STRBUF_INIT;
-
- for (; *argv; argv++) {
- time_t t = atoi(*argv);
- show_date_relative(t, 0, now, &buf);
- printf("%s -> %s\n", *argv, buf.buf);
- }
- strbuf_release(&buf);
-}
-
-static void parse_dates(char **argv, struct timeval *now)
-{
- struct strbuf result = STRBUF_INIT;
-
- for (; *argv; argv++) {
- unsigned long t;
- int tz;
-
- strbuf_reset(&result);
- parse_date(*argv, &result);
- if (sscanf(result.buf, "%lu %d", &t, &tz) == 2)
- printf("%s -> %s\n",
- *argv, show_date(t, tz, DATE_MODE(ISO8601)));
- else
- printf("%s -> bad\n", *argv);
- }
- strbuf_release(&result);
-}
-
-static void parse_approxidate(char **argv, struct timeval *now)
-{
- for (; *argv; argv++) {
- time_t t;
- t = approxidate_relative(*argv, now);
- printf("%s -> %s\n", *argv, show_date(t, 0, DATE_MODE(ISO8601)));
- }
-}
-
-int main(int argc, char **argv)
-{
- struct timeval now;
- const char *x;
-
- x = getenv("TEST_DATE_NOW");
- if (x) {
- now.tv_sec = atoi(x);
- now.tv_usec = 0;
- }
- else
- gettimeofday(&now, NULL);
-
- argv++;
- if (!*argv)
- usage(usage_msg);
- if (!strcmp(*argv, "show"))
- show_dates(argv+1, &now);
- else if (!strcmp(*argv, "parse"))
- parse_dates(argv+1, &now);
- else if (!strcmp(*argv, "approxidate"))
- parse_approxidate(argv+1, &now);
- else
- usage(usage_msg);
- return 0;
-}
+++ /dev/null
-/*
- * test-delta.c: test code to exercise diff-delta.c and patch-delta.c
- *
- * (C) 2005 Nicolas Pitre <nico@fluxnic.net>
- *
- * This code is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include "git-compat-util.h"
-#include "delta.h"
-#include "cache.h"
-
-static const char usage_str[] =
- "test-delta (-d|-p) <from_file> <data_file> <out_file>";
-
-int main(int argc, char *argv[])
-{
- int fd;
- struct stat st;
- void *from_buf, *data_buf, *out_buf;
- unsigned long from_size, data_size, out_size;
-
- if (argc != 5 || (strcmp(argv[1], "-d") && strcmp(argv[1], "-p"))) {
- fprintf(stderr, "usage: %s\n", usage_str);
- return 1;
- }
-
- fd = open(argv[2], O_RDONLY);
- if (fd < 0 || fstat(fd, &st)) {
- perror(argv[2]);
- return 1;
- }
- from_size = st.st_size;
- from_buf = mmap(NULL, from_size, PROT_READ, MAP_PRIVATE, fd, 0);
- if (from_buf == MAP_FAILED) {
- perror(argv[2]);
- close(fd);
- return 1;
- }
- close(fd);
-
- fd = open(argv[3], O_RDONLY);
- if (fd < 0 || fstat(fd, &st)) {
- perror(argv[3]);
- return 1;
- }
- data_size = st.st_size;
- data_buf = mmap(NULL, data_size, PROT_READ, MAP_PRIVATE, fd, 0);
- if (data_buf == MAP_FAILED) {
- perror(argv[3]);
- close(fd);
- return 1;
- }
- close(fd);
-
- if (argv[1][1] == 'd')
- out_buf = diff_delta(from_buf, from_size,
- data_buf, data_size,
- &out_size, 0);
- else
- out_buf = patch_delta(from_buf, from_size,
- data_buf, data_size,
- &out_size);
- if (!out_buf) {
- fprintf(stderr, "delta operation failed (returned NULL)\n");
- return 1;
- }
-
- fd = open (argv[4], O_WRONLY|O_CREAT|O_TRUNC, 0666);
- if (fd < 0 || write_in_full(fd, out_buf, out_size) != out_size) {
- perror(argv[4]);
- return 1;
- }
-
- return 0;
-}
+++ /dev/null
-#include "cache.h"
-#include "tree.h"
-#include "cache-tree.h"
-
-
-static void dump_one(struct cache_tree *it, const char *pfx, const char *x)
-{
- if (it->entry_count < 0)
- printf("%-40s %s%s (%d subtrees)\n",
- "invalid", x, pfx, it->subtree_nr);
- else
- printf("%s %s%s (%d entries, %d subtrees)\n",
- sha1_to_hex(it->sha1), x, pfx,
- it->entry_count, it->subtree_nr);
-}
-
-static int dump_cache_tree(struct cache_tree *it,
- struct cache_tree *ref,
- const char *pfx)
-{
- int i;
- int errs = 0;
-
- if (!it || !ref)
- /* missing in either */
- return 0;
-
- if (it->entry_count < 0) {
- /* invalid */
- dump_one(it, pfx, "");
- dump_one(ref, pfx, "#(ref) ");
- }
- else {
- dump_one(it, pfx, "");
- if (hashcmp(it->sha1, ref->sha1) ||
- ref->entry_count != it->entry_count ||
- ref->subtree_nr != it->subtree_nr) {
- /* claims to be valid but is lying */
- dump_one(ref, pfx, "#(ref) ");
- errs = 1;
- }
- }
-
- for (i = 0; i < it->subtree_nr; i++) {
- char path[PATH_MAX];
- struct cache_tree_sub *down = it->down[i];
- struct cache_tree_sub *rdwn;
-
- rdwn = cache_tree_sub(ref, down->name);
- xsnprintf(path, sizeof(path), "%s%.*s/", pfx, down->namelen, down->name);
- if (dump_cache_tree(down->cache_tree, rdwn->cache_tree, path))
- errs = 1;
- }
- return errs;
-}
-
-int main(int ac, char **av)
-{
- struct index_state istate;
- struct cache_tree *another = cache_tree();
- if (read_cache() < 0)
- die("unable to read index file");
- istate = the_index;
- istate.cache_tree = another;
- cache_tree_update(&istate, WRITE_TREE_DRY_RUN);
- return dump_cache_tree(active_cache_tree, another, "");
-}
+++ /dev/null
-#include "cache.h"
-#include "split-index.h"
-#include "ewah/ewok.h"
-
-static void show_bit(size_t pos, void *data)
-{
- printf(" %d", (int)pos);
-}
-
-int main(int ac, char **av)
-{
- struct split_index *si;
- int i;
-
- do_read_index(&the_index, av[1], 1);
- printf("own %s\n", sha1_to_hex(the_index.sha1));
- si = the_index.split_index;
- if (!si) {
- printf("not a split index\n");
- return 0;
- }
- printf("base %s\n", sha1_to_hex(si->base_sha1));
- for (i = 0; i < the_index.cache_nr; i++) {
- struct cache_entry *ce = the_index.cache[i];
- printf("%06o %s %d\t%s\n", ce->ce_mode,
- sha1_to_hex(ce->sha1), ce_stage(ce), ce->name);
- }
- printf("replacements:");
- if (si->replace_bitmap)
- ewah_each_bit(si->replace_bitmap, show_bit, NULL);
- printf("\ndeletions:");
- if (si->delete_bitmap)
- ewah_each_bit(si->delete_bitmap, show_bit, NULL);
- printf("\n");
- return 0;
-}
+++ /dev/null
-#include "cache.h"
-#include "dir.h"
-
-static int compare_untracked(const void *a_, const void *b_)
-{
- const char *const *a = a_;
- const char *const *b = b_;
- return strcmp(*a, *b);
-}
-
-static int compare_dir(const void *a_, const void *b_)
-{
- const struct untracked_cache_dir *const *a = a_;
- const struct untracked_cache_dir *const *b = b_;
- return strcmp((*a)->name, (*b)->name);
-}
-
-static void dump(struct untracked_cache_dir *ucd, struct strbuf *base)
-{
- int i, len;
- qsort(ucd->untracked, ucd->untracked_nr, sizeof(*ucd->untracked),
- compare_untracked);
- qsort(ucd->dirs, ucd->dirs_nr, sizeof(*ucd->dirs),
- compare_dir);
- len = base->len;
- strbuf_addf(base, "%s/", ucd->name);
- printf("%s %s", base->buf,
- sha1_to_hex(ucd->exclude_sha1));
- if (ucd->recurse)
- fputs(" recurse", stdout);
- if (ucd->check_only)
- fputs(" check_only", stdout);
- if (ucd->valid)
- fputs(" valid", stdout);
- printf("\n");
- for (i = 0; i < ucd->untracked_nr; i++)
- printf("%s\n", ucd->untracked[i]);
- for (i = 0; i < ucd->dirs_nr; i++)
- dump(ucd->dirs[i], base);
- strbuf_setlen(base, len);
-}
-
-int main(int ac, char **av)
-{
- struct untracked_cache *uc;
- struct strbuf base = STRBUF_INIT;
-
- /* Hack to avoid modifying the untracked cache when we read it */
- ignore_untracked_cache_config = 1;
-
- setup_git_directory();
- if (read_cache() < 0)
- die("unable to read index file");
- uc = the_index.untracked;
- if (!uc) {
- printf("no untracked cache\n");
- return 0;
- }
- printf("info/exclude %s\n", sha1_to_hex(uc->ss_info_exclude.sha1));
- printf("core.excludesfile %s\n", sha1_to_hex(uc->ss_excludes_file.sha1));
- printf("exclude_per_dir %s\n", uc->exclude_per_dir);
- printf("flags %08x\n", uc->dir_flags);
- if (uc->root)
- dump(uc->root, &base);
- return 0;
-}
+++ /dev/null
-#include "git-compat-util.h"
-#include "run-command.h"
-#include "strbuf.h"
-
-int main(int argc, char **argv)
-{
- const char *trash_directory = getenv("TRASH_DIRECTORY");
- struct strbuf buf = STRBUF_INIT;
- FILE *f;
- int i;
- const char *child_argv[] = { NULL, NULL };
-
- /* First, print all parameters into $TRASH_DIRECTORY/ssh-output */
- if (!trash_directory)
- die("Need a TRASH_DIRECTORY!");
- strbuf_addf(&buf, "%s/ssh-output", trash_directory);
- f = fopen(buf.buf, "w");
- if (!f)
- die("Could not write to %s", buf.buf);
- for (i = 0; i < argc; i++)
- fprintf(f, "%s%s", i > 0 ? " " : "", i > 0 ? argv[i] : "ssh:");
- fprintf(f, "\n");
- fclose(f);
-
- /* Now, evaluate the *last* parameter */
- if (argc < 2)
- return 0;
- child_argv[0] = argv[argc - 1];
- return run_command_v_opt(child_argv, RUN_USING_SHELL);
-}
+++ /dev/null
-/*
- * Simple random data generator used to create reproducible test files.
- * This is inspired from POSIX.1-2001 implementation example for rand().
- * Copyright (C) 2007 by Nicolas Pitre, licensed under the GPL version 2.
- */
-
-#include "git-compat-util.h"
-
-int main(int argc, char *argv[])
-{
- unsigned long count, next = 0;
- unsigned char *c;
-
- if (argc < 2 || argc > 3) {
- fprintf(stderr, "usage: %s <seed_string> [<size>]\n", argv[0]);
- return 1;
- }
-
- c = (unsigned char *) argv[1];
- do {
- next = next * 11 + *c;
- } while (*c++);
-
- count = (argc == 3) ? strtoul(argv[2], NULL, 0) : -1L;
-
- while (count--) {
- next = next * 1103515245 + 12345;
- if (putchar((next >> 16) & 0xff) == EOF)
- return -1;
- }
-
- return 0;
-}
+++ /dev/null
-#include "git-compat-util.h"
-#include "hashmap.h"
-
-struct test_entry
-{
- struct hashmap_entry ent;
- /* key and value as two \0-terminated strings */
- char key[FLEX_ARRAY];
-};
-
-static const char *get_value(const struct test_entry *e)
-{
- return e->key + strlen(e->key) + 1;
-}
-
-static int test_entry_cmp(const struct test_entry *e1,
- const struct test_entry *e2, const char* key)
-{
- return strcmp(e1->key, key ? key : e2->key);
-}
-
-static int test_entry_cmp_icase(const struct test_entry *e1,
- const struct test_entry *e2, const char* key)
-{
- return strcasecmp(e1->key, key ? key : e2->key);
-}
-
-static struct test_entry *alloc_test_entry(int hash, char *key, int klen,
- char *value, int vlen)
-{
- struct test_entry *entry = malloc(sizeof(struct test_entry) + klen
- + vlen + 2);
- hashmap_entry_init(entry, hash);
- memcpy(entry->key, key, klen + 1);
- memcpy(entry->key + klen + 1, value, vlen + 1);
- return entry;
-}
-
-#define HASH_METHOD_FNV 0
-#define HASH_METHOD_I 1
-#define HASH_METHOD_IDIV10 2
-#define HASH_METHOD_0 3
-#define HASH_METHOD_X2 4
-#define TEST_SPARSE 8
-#define TEST_ADD 16
-#define TEST_SIZE 100000
-
-static unsigned int hash(unsigned int method, unsigned int i, const char *key)
-{
- unsigned int hash = 0;
- switch (method & 3)
- {
- case HASH_METHOD_FNV:
- hash = strhash(key);
- break;
- case HASH_METHOD_I:
- hash = i;
- break;
- case HASH_METHOD_IDIV10:
- hash = i / 10;
- break;
- case HASH_METHOD_0:
- hash = 0;
- break;
- }
-
- if (method & HASH_METHOD_X2)
- hash = 2 * hash;
- return hash;
-}
-
-/*
- * Test performance of hashmap.[ch]
- * Usage: time echo "perfhashmap method rounds" | test-hashmap
- */
-static void perf_hashmap(unsigned int method, unsigned int rounds)
-{
- struct hashmap map;
- char buf[16];
- struct test_entry **entries;
- unsigned int *hashes;
- unsigned int i, j;
-
- entries = malloc(TEST_SIZE * sizeof(struct test_entry *));
- hashes = malloc(TEST_SIZE * sizeof(int));
- for (i = 0; i < TEST_SIZE; i++) {
- snprintf(buf, sizeof(buf), "%i", i);
- entries[i] = alloc_test_entry(0, buf, strlen(buf), "", 0);
- hashes[i] = hash(method, i, entries[i]->key);
- }
-
- if (method & TEST_ADD) {
- /* test adding to the map */
- for (j = 0; j < rounds; j++) {
- hashmap_init(&map, (hashmap_cmp_fn) test_entry_cmp, 0);
-
- /* add entries */
- for (i = 0; i < TEST_SIZE; i++) {
- hashmap_entry_init(entries[i], hashes[i]);
- hashmap_add(&map, entries[i]);
- }
-
- hashmap_free(&map, 0);
- }
- } else {
- /* test map lookups */
- hashmap_init(&map, (hashmap_cmp_fn) test_entry_cmp, 0);
-
- /* fill the map (sparsely if specified) */
- j = (method & TEST_SPARSE) ? TEST_SIZE / 10 : TEST_SIZE;
- for (i = 0; i < j; i++) {
- hashmap_entry_init(entries[i], hashes[i]);
- hashmap_add(&map, entries[i]);
- }
-
- for (j = 0; j < rounds; j++) {
- for (i = 0; i < TEST_SIZE; i++) {
- hashmap_get_from_hash(&map, hashes[i],
- entries[i]->key);
- }
- }
-
- hashmap_free(&map, 0);
- }
-}
-
-#define DELIM " \t\r\n"
-
-/*
- * Read stdin line by line and print result of commands to stdout:
- *
- * hash key -> strhash(key) memhash(key) strihash(key) memihash(key)
- * put key value -> NULL / old value
- * get key -> NULL / value
- * remove key -> NULL / old value
- * iterate -> key1 value1\nkey2 value2\n...
- * size -> tablesize numentries
- *
- * perfhashmap method rounds -> test hashmap.[ch] performance
- */
-int main(int argc, char *argv[])
-{
- char line[1024];
- struct hashmap map;
- int icase;
-
- /* init hash map */
- icase = argc > 1 && !strcmp("ignorecase", argv[1]);
- hashmap_init(&map, (hashmap_cmp_fn) (icase ? test_entry_cmp_icase
- : test_entry_cmp), 0);
-
- /* process commands from stdin */
- while (fgets(line, sizeof(line), stdin)) {
- char *cmd, *p1 = NULL, *p2 = NULL;
- int l1 = 0, l2 = 0, hash = 0;
- struct test_entry *entry;
-
- /* break line into command and up to two parameters */
- cmd = strtok(line, DELIM);
- /* ignore empty lines */
- if (!cmd || *cmd == '#')
- continue;
-
- p1 = strtok(NULL, DELIM);
- if (p1) {
- l1 = strlen(p1);
- hash = icase ? strihash(p1) : strhash(p1);
- p2 = strtok(NULL, DELIM);
- if (p2)
- l2 = strlen(p2);
- }
-
- if (!strcmp("hash", cmd) && l1) {
-
- /* print results of different hash functions */
- printf("%u %u %u %u\n", strhash(p1), memhash(p1, l1),
- strihash(p1), memihash(p1, l1));
-
- } else if (!strcmp("add", cmd) && l1 && l2) {
-
- /* create entry with key = p1, value = p2 */
- entry = alloc_test_entry(hash, p1, l1, p2, l2);
-
- /* add to hashmap */
- hashmap_add(&map, entry);
-
- } else if (!strcmp("put", cmd) && l1 && l2) {
-
- /* create entry with key = p1, value = p2 */
- entry = alloc_test_entry(hash, p1, l1, p2, l2);
-
- /* add / replace entry */
- entry = hashmap_put(&map, entry);
-
- /* print and free replaced entry, if any */
- puts(entry ? get_value(entry) : "NULL");
- free(entry);
-
- } else if (!strcmp("get", cmd) && l1) {
-
- /* lookup entry in hashmap */
- entry = hashmap_get_from_hash(&map, hash, p1);
-
- /* print result */
- if (!entry)
- puts("NULL");
- while (entry) {
- puts(get_value(entry));
- entry = hashmap_get_next(&map, entry);
- }
-
- } else if (!strcmp("remove", cmd) && l1) {
-
- /* setup static key */
- struct hashmap_entry key;
- hashmap_entry_init(&key, hash);
-
- /* remove entry from hashmap */
- entry = hashmap_remove(&map, &key, p1);
-
- /* print result and free entry*/
- puts(entry ? get_value(entry) : "NULL");
- free(entry);
-
- } else if (!strcmp("iterate", cmd)) {
-
- struct hashmap_iter iter;
- hashmap_iter_init(&map, &iter);
- while ((entry = hashmap_iter_next(&iter)))
- printf("%s %s\n", entry->key, get_value(entry));
-
- } else if (!strcmp("size", cmd)) {
-
- /* print table sizes */
- printf("%u %u\n", map.tablesize, map.size);
-
- } else if (!strcmp("intern", cmd) && l1) {
-
- /* test that strintern works */
- const char *i1 = strintern(p1);
- const char *i2 = strintern(p1);
- if (strcmp(i1, p1))
- printf("strintern(%s) returns %s\n", p1, i1);
- else if (i1 == p1)
- printf("strintern(%s) returns input pointer\n", p1);
- else if (i1 != i2)
- printf("strintern(%s) != strintern(%s)", i1, i2);
- else
- printf("%s\n", i1);
-
- } else if (!strcmp("perfhashmap", cmd) && l1 && l2) {
-
- perf_hashmap(atoi(p1), atoi(p2));
-
- } else {
-
- printf("Unknown command %s\n", cmd);
-
- }
- }
-
- hashmap_free(&map, 1);
- return 0;
-}
+++ /dev/null
-#include "cache.h"
-
-int main(int argc, char **argv)
-{
- struct cache_header hdr;
- int version;
-
- memset(&hdr,0,sizeof(hdr));
- if (read(0, &hdr, sizeof(hdr)) != sizeof(hdr))
- return 0;
- version = ntohl(hdr.hdr_version);
- printf("%d\n", version);
- return 0;
-}
+++ /dev/null
-/*
- * test-line-buffer.c: code to exercise the svn importer's input helper
- */
-
-#include "git-compat-util.h"
-#include "strbuf.h"
-#include "vcs-svn/line_buffer.h"
-
-static uint32_t strtouint32(const char *s)
-{
- char *end;
- uintmax_t n = strtoumax(s, &end, 10);
- if (*s == '\0' || *end != '\0')
- die("invalid count: %s", s);
- return (uint32_t) n;
-}
-
-static void handle_command(const char *command, const char *arg, struct line_buffer *buf)
-{
- switch (*command) {
- case 'b':
- if (starts_with(command, "binary ")) {
- struct strbuf sb = STRBUF_INIT;
- strbuf_addch(&sb, '>');
- buffer_read_binary(buf, &sb, strtouint32(arg));
- fwrite(sb.buf, 1, sb.len, stdout);
- strbuf_release(&sb);
- return;
- }
- case 'c':
- if (starts_with(command, "copy ")) {
- buffer_copy_bytes(buf, strtouint32(arg));
- return;
- }
- case 's':
- if (starts_with(command, "skip ")) {
- buffer_skip_bytes(buf, strtouint32(arg));
- return;
- }
- default:
- die("unrecognized command: %s", command);
- }
-}
-
-static void handle_line(const char *line, struct line_buffer *stdin_buf)
-{
- const char *arg = strchr(line, ' ');
- if (!arg)
- die("no argument in line: %s", line);
- handle_command(line, arg + 1, stdin_buf);
-}
-
-int main(int argc, char *argv[])
-{
- struct line_buffer stdin_buf = LINE_BUFFER_INIT;
- struct line_buffer file_buf = LINE_BUFFER_INIT;
- struct line_buffer *input = &stdin_buf;
- const char *filename;
- char *s;
-
- if (argc == 1)
- filename = NULL;
- else if (argc == 2)
- filename = argv[1];
- else
- usage("test-line-buffer [file | &fd] < script");
-
- if (buffer_init(&stdin_buf, NULL))
- die_errno("open error");
- if (filename) {
- if (*filename == '&') {
- if (buffer_fdinit(&file_buf, strtouint32(filename + 1)))
- die_errno("error opening fd %s", filename + 1);
- } else {
- if (buffer_init(&file_buf, filename))
- die_errno("error opening %s", filename);
- }
- input = &file_buf;
- }
-
- while ((s = buffer_read_line(&stdin_buf)))
- handle_line(s, input);
-
- if (filename && buffer_deinit(&file_buf))
- die("error reading from %s", filename);
- if (buffer_deinit(&stdin_buf))
- die("input error");
- if (ferror(stdout))
- die("output error");
- return 0;
-}
+++ /dev/null
-#include "cache.h"
-#include "tree.h"
-
-int main(int ac, char **av)
-{
- unsigned char hash1[20], hash2[20], shifted[20];
- struct tree *one, *two;
-
- setup_git_directory();
-
- if (get_sha1(av[1], hash1))
- die("cannot parse %s as an object name", av[1]);
- if (get_sha1(av[2], hash2))
- die("cannot parse %s as an object name", av[2]);
- one = parse_tree_indirect(hash1);
- if (!one)
- die("not a tree-ish %s", av[1]);
- two = parse_tree_indirect(hash2);
- if (!two)
- die("not a tree-ish %s", av[2]);
-
- shift_tree(one->object.oid.hash, two->object.oid.hash, shifted, -1);
- printf("shifted: %s\n", sha1_to_hex(shifted));
-
- exit(0);
-}
+++ /dev/null
-#include "cache.h"
-#include "mergesort.h"
-
-struct line {
- char *text;
- struct line *next;
-};
-
-static void *get_next(const void *a)
-{
- return ((const struct line *)a)->next;
-}
-
-static void set_next(void *a, void *b)
-{
- ((struct line *)a)->next = b;
-}
-
-static int compare_strings(const void *a, const void *b)
-{
- const struct line *x = a, *y = b;
- return strcmp(x->text, y->text);
-}
-
-int main(int argc, char **argv)
-{
- struct line *line, *p = NULL, *lines = NULL;
- struct strbuf sb = STRBUF_INIT;
-
- for (;;) {
- if (strbuf_getwholeline(&sb, stdin, '\n'))
- break;
- line = xmalloc(sizeof(struct line));
- line->text = strbuf_detach(&sb, NULL);
- if (p) {
- line->next = p->next;
- p->next = line;
- } else {
- line->next = NULL;
- lines = line;
- }
- p = line;
- }
-
- lines = llist_mergesort(lines, get_next, set_next, compare_strings);
-
- while (lines) {
- printf("%s", lines->text);
- lines = lines->next;
- }
- return 0;
-}
+++ /dev/null
-/*
- * test-mktemp.c: code to exercise the creation of temporary files
- */
-#include "git-compat-util.h"
-
-int main(int argc, char *argv[])
-{
- if (argc != 2)
- usage("Expected 1 parameter defining the temporary file template");
-
- xmkstemp(xstrdup(argv[1]));
-
- return 0;
-}
+++ /dev/null
-#include "cache.h"
-#include "parse-options.h"
-#include "string-list.h"
-
-static int boolean = 0;
-static int integer = 0;
-static unsigned long magnitude = 0;
-static unsigned long timestamp;
-static int abbrev = 7;
-static int verbose = 0, dry_run = 0, quiet = 0;
-static char *string = NULL;
-static char *file = NULL;
-static int ambiguous;
-static struct string_list list;
-
-static int length_callback(const struct option *opt, const char *arg, int unset)
-{
- printf("Callback: \"%s\", %d\n",
- (arg ? arg : "not set"), unset);
- if (unset)
- return 1; /* do not support unset */
-
- *(int *)opt->value = strlen(arg);
- return 0;
-}
-
-static int number_callback(const struct option *opt, const char *arg, int unset)
-{
- *(int *)opt->value = strtol(arg, NULL, 10);
- return 0;
-}
-
-int main(int argc, char **argv)
-{
- const char *prefix = "prefix/";
- const char *usage[] = {
- "test-parse-options <options>",
- NULL
- };
- struct option options[] = {
- OPT_BOOL(0, "yes", &boolean, "get a boolean"),
- OPT_BOOL('D', "no-doubt", &boolean, "begins with 'no-'"),
- { OPTION_SET_INT, 'B', "no-fear", &boolean, NULL,
- "be brave", PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, 1 },
- OPT_COUNTUP('b', "boolean", &boolean, "increment by one"),
- OPT_BIT('4', "or4", &boolean,
- "bitwise-or boolean with ...0100", 4),
- OPT_NEGBIT(0, "neg-or4", &boolean, "same as --no-or4", 4),
- OPT_GROUP(""),
- OPT_INTEGER('i', "integer", &integer, "get a integer"),
- OPT_INTEGER('j', NULL, &integer, "get a integer, too"),
- OPT_MAGNITUDE('m', "magnitude", &magnitude, "get a magnitude"),
- OPT_SET_INT(0, "set23", &integer, "set integer to 23", 23),
- OPT_DATE('t', NULL, ×tamp, "get timestamp of <time>"),
- OPT_CALLBACK('L', "length", &integer, "str",
- "get length of <str>", length_callback),
- OPT_FILENAME('F', "file", &file, "set file to <file>"),
- OPT_GROUP("String options"),
- OPT_STRING('s', "string", &string, "string", "get a string"),
- OPT_STRING(0, "string2", &string, "str", "get another string"),
- OPT_STRING(0, "st", &string, "st", "get another string (pervert ordering)"),
- OPT_STRING('o', NULL, &string, "str", "get another string"),
- OPT_NOOP_NOARG(0, "obsolete"),
- OPT_STRING_LIST(0, "list", &list, "str", "add str to list"),
- OPT_GROUP("Magic arguments"),
- OPT_ARGUMENT("quux", "means --quux"),
- OPT_NUMBER_CALLBACK(&integer, "set integer to NUM",
- number_callback),
- { OPTION_COUNTUP, '+', NULL, &boolean, NULL, "same as -b",
- PARSE_OPT_NOARG | PARSE_OPT_NONEG | PARSE_OPT_NODASH },
- { OPTION_COUNTUP, 0, "ambiguous", &ambiguous, NULL,
- "positive ambiguity", PARSE_OPT_NOARG | PARSE_OPT_NONEG },
- { OPTION_COUNTUP, 0, "no-ambiguous", &ambiguous, NULL,
- "negative ambiguity", PARSE_OPT_NOARG | PARSE_OPT_NONEG },
- OPT_GROUP("Standard options"),
- OPT__ABBREV(&abbrev),
- OPT__VERBOSE(&verbose, "be verbose"),
- OPT__DRY_RUN(&dry_run, "dry run"),
- OPT__QUIET(&quiet, "be quiet"),
- OPT_END(),
- };
- int i;
-
- argc = parse_options(argc, (const char **)argv, prefix, options, usage, 0);
-
- printf("boolean: %d\n", boolean);
- printf("integer: %d\n", integer);
- printf("magnitude: %lu\n", magnitude);
- printf("timestamp: %lu\n", timestamp);
- printf("string: %s\n", string ? string : "(not set)");
- printf("abbrev: %d\n", abbrev);
- printf("verbose: %d\n", verbose);
- printf("quiet: %s\n", quiet ? "yes" : "no");
- printf("dry run: %s\n", dry_run ? "yes" : "no");
- printf("file: %s\n", file ? file : "(not set)");
-
- for (i = 0; i < list.nr; i++)
- printf("list: %s\n", list.items[i].string);
-
- for (i = 0; i < argc; i++)
- printf("arg %02d: %s\n", i, argv[i]);
-
- return 0;
-}
+++ /dev/null
-#include "cache.h"
-#include "string-list.h"
-
-/*
- * A "string_list_each_func_t" function that normalizes an entry from
- * GIT_CEILING_DIRECTORIES. If the path is unusable for some reason,
- * die with an explanation.
- */
-static int normalize_ceiling_entry(struct string_list_item *item, void *unused)
-{
- char *ceil = item->string;
-
- if (!*ceil)
- die("Empty path is not supported");
- if (!is_absolute_path(ceil))
- die("Path \"%s\" is not absolute", ceil);
- if (normalize_path_copy(ceil, ceil) < 0)
- die("Path \"%s\" could not be normalized", ceil);
- return 1;
-}
-
-static void normalize_argv_string(const char **var, const char *input)
-{
- if (!strcmp(input, "<null>"))
- *var = NULL;
- else if (!strcmp(input, "<empty>"))
- *var = "";
- else
- *var = input;
-
- if (*var && (**var == '<' || **var == '('))
- die("Bad value: %s\n", input);
-}
-
-struct test_data {
- const char *from; /* input: transform from this ... */
- const char *to; /* output: ... to this. */
- const char *alternative; /* output: ... or this. */
-};
-
-static int test_function(struct test_data *data, char *(*func)(char *input),
- const char *funcname)
-{
- int failed = 0, i;
- char buffer[1024];
- char *to;
-
- for (i = 0; data[i].to; i++) {
- if (!data[i].from)
- to = func(NULL);
- else {
- xsnprintf(buffer, sizeof(buffer), "%s", data[i].from);
- to = func(buffer);
- }
- if (!strcmp(to, data[i].to))
- continue;
- if (!data[i].alternative)
- error("FAIL: %s(%s) => '%s' != '%s'\n",
- funcname, data[i].from, to, data[i].to);
- else if (!strcmp(to, data[i].alternative))
- continue;
- else
- error("FAIL: %s(%s) => '%s' != '%s', '%s'\n",
- funcname, data[i].from, to, data[i].to,
- data[i].alternative);
- failed = 1;
- }
- return failed;
-}
-
-static struct test_data basename_data[] = {
- /* --- POSIX type paths --- */
- { NULL, "." },
- { "", "." },
- { ".", "." },
- { "..", ".." },
- { "/", "/" },
- { "//", "/", "//" },
- { "///", "/", "//" },
- { "////", "/", "//" },
- { "usr", "usr" },
- { "/usr", "usr" },
- { "/usr/", "usr" },
- { "/usr//", "usr" },
- { "/usr/lib", "lib" },
- { "usr/lib", "lib" },
- { "usr/lib///", "lib" },
-
-#if defined(__MINGW32__) || defined(_MSC_VER)
- /* --- win32 type paths --- */
- { "\\usr", "usr" },
- { "\\usr\\", "usr" },
- { "\\usr\\\\", "usr" },
- { "\\usr\\lib", "lib" },
- { "usr\\lib", "lib" },
- { "usr\\lib\\\\\\", "lib" },
- { "C:/usr", "usr" },
- { "C:/usr", "usr" },
- { "C:/usr/", "usr" },
- { "C:/usr//", "usr" },
- { "C:/usr/lib", "lib" },
- { "C:usr/lib", "lib" },
- { "C:usr/lib///", "lib" },
- { "C:", "." },
- { "C:a", "a" },
- { "C:/", "/" },
- { "C:///", "/" },
- { "\\", "\\", "/" },
- { "\\\\", "\\", "/" },
- { "\\\\\\", "\\", "/" },
-#endif
- { NULL, NULL }
-};
-
-static struct test_data dirname_data[] = {
- /* --- POSIX type paths --- */
- { NULL, "." },
- { "", "." },
- { ".", "." },
- { "..", "." },
- { "/", "/" },
- { "//", "/", "//" },
- { "///", "/", "//" },
- { "////", "/", "//" },
- { "usr", "." },
- { "/usr", "/" },
- { "/usr/", "/" },
- { "/usr//", "/" },
- { "/usr/lib", "/usr" },
- { "usr/lib", "usr" },
- { "usr/lib///", "usr" },
-
-#if defined(__MINGW32__) || defined(_MSC_VER)
- /* --- win32 type paths --- */
- { "\\", "\\" },
- { "\\\\", "\\\\" },
- { "\\usr", "\\" },
- { "\\usr\\", "\\" },
- { "\\usr\\\\", "\\" },
- { "\\usr\\lib", "\\usr" },
- { "usr\\lib", "usr" },
- { "usr\\lib\\\\\\", "usr" },
- { "C:a", "C:." },
- { "C:/", "C:/" },
- { "C:///", "C:/" },
- { "C:/usr", "C:/" },
- { "C:/usr/", "C:/" },
- { "C:/usr//", "C:/" },
- { "C:/usr/lib", "C:/usr" },
- { "C:usr/lib", "C:usr" },
- { "C:usr/lib///", "C:usr" },
- { "\\\\\\", "\\" },
- { "\\\\\\\\", "\\" },
- { "C:", "C:.", "." },
-#endif
- { NULL, NULL }
-};
-
-int main(int argc, char **argv)
-{
- if (argc == 3 && !strcmp(argv[1], "normalize_path_copy")) {
- char *buf = xmallocz(strlen(argv[2]));
- int rv = normalize_path_copy(buf, argv[2]);
- if (rv)
- buf = "++failed++";
- puts(buf);
- return 0;
- }
-
- if (argc >= 2 && !strcmp(argv[1], "real_path")) {
- while (argc > 2) {
- puts(real_path(argv[2]));
- argc--;
- argv++;
- }
- return 0;
- }
-
- if (argc >= 2 && !strcmp(argv[1], "absolute_path")) {
- while (argc > 2) {
- puts(absolute_path(argv[2]));
- argc--;
- argv++;
- }
- return 0;
- }
-
- if (argc == 4 && !strcmp(argv[1], "longest_ancestor_length")) {
- int len;
- struct string_list ceiling_dirs = STRING_LIST_INIT_DUP;
- char *path = xstrdup(argv[2]);
-
- /*
- * We have to normalize the arguments because under
- * Windows, bash mangles arguments that look like
- * absolute POSIX paths or colon-separate lists of
- * absolute POSIX paths into DOS paths (e.g.,
- * "/foo:/foo/bar" might be converted to
- * "D:\Src\msysgit\foo;D:\Src\msysgit\foo\bar"),
- * whereas longest_ancestor_length() requires paths
- * that use forward slashes.
- */
- if (normalize_path_copy(path, path))
- die("Path \"%s\" could not be normalized", argv[2]);
- string_list_split(&ceiling_dirs, argv[3], PATH_SEP, -1);
- filter_string_list(&ceiling_dirs, 0,
- normalize_ceiling_entry, NULL);
- len = longest_ancestor_length(path, &ceiling_dirs);
- string_list_clear(&ceiling_dirs, 0);
- free(path);
- printf("%d\n", len);
- return 0;
- }
-
- if (argc >= 4 && !strcmp(argv[1], "prefix_path")) {
- char *prefix = argv[2];
- int prefix_len = strlen(prefix);
- int nongit_ok;
- setup_git_directory_gently(&nongit_ok);
- while (argc > 3) {
- puts(prefix_path(prefix, prefix_len, argv[3]));
- argc--;
- argv++;
- }
- return 0;
- }
-
- if (argc == 4 && !strcmp(argv[1], "strip_path_suffix")) {
- char *prefix = strip_path_suffix(argv[2], argv[3]);
- printf("%s\n", prefix ? prefix : "(null)");
- return 0;
- }
-
- if (argc == 3 && !strcmp(argv[1], "print_path")) {
- puts(argv[2]);
- return 0;
- }
-
- if (argc == 4 && !strcmp(argv[1], "relative_path")) {
- struct strbuf sb = STRBUF_INIT;
- const char *in, *prefix, *rel;
- normalize_argv_string(&in, argv[2]);
- normalize_argv_string(&prefix, argv[3]);
- rel = relative_path(in, prefix, &sb);
- if (!rel)
- puts("(null)");
- else
- puts(strlen(rel) > 0 ? rel : "(empty)");
- strbuf_release(&sb);
- return 0;
- }
-
- if (argc == 2 && !strcmp(argv[1], "basename"))
- return test_function(basename_data, basename, argv[1]);
-
- if (argc == 2 && !strcmp(argv[1], "dirname"))
- return test_function(dirname_data, dirname, argv[1]);
-
- fprintf(stderr, "%s: unknown function name: %s\n", argv[0],
- argv[1] ? argv[1] : "(there was none)");
- return 1;
-}
+++ /dev/null
-#include "cache.h"
-#include "prio-queue.h"
-
-static int intcmp(const void *va, const void *vb, void *data)
-{
- const int *a = va, *b = vb;
- return *a - *b;
-}
-
-static void show(int *v)
-{
- if (!v)
- printf("NULL\n");
- else
- printf("%d\n", *v);
- free(v);
-}
-
-int main(int argc, char **argv)
-{
- struct prio_queue pq = { intcmp };
-
- while (*++argv) {
- if (!strcmp(*argv, "get"))
- show(prio_queue_get(&pq));
- else if (!strcmp(*argv, "dump")) {
- int *v;
- while ((v = prio_queue_get(&pq)))
- show(v);
- }
- else {
- int *v = malloc(sizeof(*v));
- *v = atoi(*argv);
- prio_queue_put(&pq, v);
- }
- }
-
- return 0;
-}
+++ /dev/null
-#include "cache.h"
-
-int main (int argc, char **argv)
-{
- int i, cnt = 1;
- if (argc == 2)
- cnt = strtol(argv[1], NULL, 0);
- for (i = 0; i < cnt; i++) {
- read_cache();
- discard_cache();
- }
- return 0;
-}
+++ /dev/null
-#include "git-compat-util.h"
-
-int main(int argc, char **argv)
-{
- char *pat = "[^={} \t]+";
- char *str = "={}\nfred";
- regex_t r;
- regmatch_t m[1];
-
- if (regcomp(&r, pat, REG_EXTENDED | REG_NEWLINE))
- die("failed regcomp() for pattern '%s'", pat);
- if (regexec(&r, str, 1, m, 0))
- die("no match of pattern '%s' to string '%s'", pat, str);
-
- /* http://sourceware.org/bugzilla/show_bug.cgi?id=3957 */
- if (m[0].rm_so == 3) /* matches '\n' when it should not */
- die("regex bug confirmed: re-build git with NO_REGEX=1");
-
- exit(0);
-}
+++ /dev/null
-/*
- * test-revision-walking.c: test revision walking API.
- *
- * (C) 2012 Heiko Voigt <hvoigt@hvoigt.net>
- *
- * This code is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include "cache.h"
-#include "commit.h"
-#include "diff.h"
-#include "revision.h"
-
-static void print_commit(struct commit *commit)
-{
- struct strbuf sb = STRBUF_INIT;
- struct pretty_print_context ctx = {0};
- ctx.date_mode.type = DATE_NORMAL;
- format_commit_message(commit, " %m %s", &sb, &ctx);
- printf("%s\n", sb.buf);
- strbuf_release(&sb);
-}
-
-static int run_revision_walk(void)
-{
- struct rev_info rev;
- struct commit *commit;
- const char *argv[] = {NULL, "--all", NULL};
- int argc = ARRAY_SIZE(argv) - 1;
- int got_revision = 0;
-
- init_revisions(&rev, NULL);
- setup_revisions(argc, argv, &rev, NULL);
- if (prepare_revision_walk(&rev))
- die("revision walk setup failed");
-
- while ((commit = get_revision(&rev)) != NULL) {
- print_commit(commit);
- got_revision = 1;
- }
-
- reset_revision_walk();
- return got_revision;
-}
-
-int main(int argc, char **argv)
-{
- if (argc < 2)
- return 1;
-
- setup_git_directory();
-
- if (!strcmp(argv[1], "run-twice")) {
- printf("1st\n");
- if (!run_revision_walk())
- return 1;
- printf("2nd\n");
- if (!run_revision_walk())
- return 1;
-
- return 0;
- }
-
- fprintf(stderr, "check usage\n");
- return 1;
-}
+++ /dev/null
-/*
- * test-run-command.c: test run command API.
- *
- * (C) 2009 Ilari Liusvaara <ilari.liusvaara@elisanet.fi>
- *
- * This code is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include "git-compat-util.h"
-#include "run-command.h"
-#include "argv-array.h"
-#include "strbuf.h"
-#include <string.h>
-#include <errno.h>
-
-static int number_callbacks;
-static int parallel_next(struct child_process *cp,
- struct strbuf *err,
- void *cb,
- void **task_cb)
-{
- struct child_process *d = cb;
- if (number_callbacks >= 4)
- return 0;
-
- argv_array_pushv(&cp->args, d->argv);
- strbuf_addf(err, "preloaded output of a child\n");
- number_callbacks++;
- return 1;
-}
-
-static int no_job(struct child_process *cp,
- struct strbuf *err,
- void *cb,
- void **task_cb)
-{
- strbuf_addf(err, "no further jobs available\n");
- return 0;
-}
-
-static int task_finished(int result,
- struct strbuf *err,
- void *pp_cb,
- void *pp_task_cb)
-{
- strbuf_addf(err, "asking for a quick stop\n");
- return 1;
-}
-
-int main(int argc, char **argv)
-{
- struct child_process proc = CHILD_PROCESS_INIT;
- int jobs;
-
- if (argc < 3)
- return 1;
- proc.argv = (const char **)argv + 2;
-
- if (!strcmp(argv[1], "start-command-ENOENT")) {
- if (start_command(&proc) < 0 && errno == ENOENT)
- return 0;
- fprintf(stderr, "FAIL %s\n", argv[1]);
- return 1;
- }
- if (!strcmp(argv[1], "run-command"))
- exit(run_command(&proc));
-
- jobs = atoi(argv[2]);
- proc.argv = (const char **)argv + 3;
-
- if (!strcmp(argv[1], "run-command-parallel"))
- exit(run_processes_parallel(jobs, parallel_next,
- NULL, NULL, &proc));
-
- if (!strcmp(argv[1], "run-command-abort"))
- exit(run_processes_parallel(jobs, parallel_next,
- NULL, task_finished, &proc));
-
- if (!strcmp(argv[1], "run-command-no-jobs"))
- exit(run_processes_parallel(jobs, no_job,
- NULL, task_finished, &proc));
-
- fprintf(stderr, "check usage\n");
- return 1;
-}
+++ /dev/null
-#include "cache.h"
-#include "lockfile.h"
-#include "tree.h"
-#include "cache-tree.h"
-
-static struct lock_file index_lock;
-
-int main(int ac, char **av)
-{
- hold_locked_index(&index_lock, 1);
- if (read_cache() < 0)
- die("unable to read index file");
- active_cache_tree = NULL;
- if (write_locked_index(&the_index, &index_lock, COMMIT_LOCK))
- die("unable to write index file");
- return 0;
-}
+++ /dev/null
-#include "cache.h"
-#include "sha1-array.h"
-
-static void print_sha1(const unsigned char sha1[20], void *data)
-{
- puts(sha1_to_hex(sha1));
-}
-
-int main(int argc, char **argv)
-{
- struct sha1_array array = SHA1_ARRAY_INIT;
- struct strbuf line = STRBUF_INIT;
-
- while (strbuf_getline(&line, stdin) != EOF) {
- const char *arg;
- unsigned char sha1[20];
-
- if (skip_prefix(line.buf, "append ", &arg)) {
- if (get_sha1_hex(arg, sha1))
- die("not a hexadecimal SHA1: %s", arg);
- sha1_array_append(&array, sha1);
- } else if (skip_prefix(line.buf, "lookup ", &arg)) {
- if (get_sha1_hex(arg, sha1))
- die("not a hexadecimal SHA1: %s", arg);
- printf("%d\n", sha1_array_lookup(&array, sha1));
- } else if (!strcmp(line.buf, "clear"))
- sha1_array_clear(&array);
- else if (!strcmp(line.buf, "for_each_unique"))
- sha1_array_for_each_unique(&array, print_sha1, NULL);
- else
- die("unknown command: %s", line.buf);
- }
- return 0;
-}
+++ /dev/null
-#include "cache.h"
-
-int main(int ac, char **av)
-{
- git_SHA_CTX ctx;
- unsigned char sha1[20];
- unsigned bufsz = 8192;
- int binary = 0;
- char *buffer;
-
- if (ac == 2) {
- if (!strcmp(av[1], "-b"))
- binary = 1;
- else
- bufsz = strtoul(av[1], NULL, 10) * 1024 * 1024;
- }
-
- if (!bufsz)
- bufsz = 8192;
-
- while ((buffer = malloc(bufsz)) == NULL) {
- fprintf(stderr, "bufsz %u is too big, halving...\n", bufsz);
- bufsz /= 2;
- if (bufsz < 1024)
- die("OOPS");
- }
-
- git_SHA1_Init(&ctx);
-
- while (1) {
- ssize_t sz, this_sz;
- char *cp = buffer;
- unsigned room = bufsz;
- this_sz = 0;
- while (room) {
- sz = xread(0, cp, room);
- if (sz == 0)
- break;
- if (sz < 0)
- die_errno("test-sha1");
- this_sz += sz;
- cp += sz;
- room -= sz;
- }
- if (this_sz == 0)
- break;
- git_SHA1_Update(&ctx, buffer, this_sz);
- }
- git_SHA1_Final(sha1, &ctx);
-
- if (binary)
- fwrite(sha1, 1, 20, stdout);
- else
- puts(sha1_to_hex(sha1));
- exit(0);
-}
+++ /dev/null
-#!/bin/sh
-
-dd if=/dev/zero bs=1048576 count=100 2>/dev/null |
-/usr/bin/time ./test-sha1 >/dev/null
-
-while read expect cnt pfx
-do
- case "$expect" in '#'*) continue ;; esac
- actual=$(
- {
- test -z "$pfx" || echo "$pfx"
- dd if=/dev/zero bs=1048576 count=$cnt 2>/dev/null |
- perl -pe 'y/\000/g/'
- } | ./test-sha1 $cnt
- )
- if test "$expect" = "$actual"
- then
- echo "OK: $expect $cnt $pfx"
- else
- echo >&2 "OOPS: $cnt"
- echo >&2 "expect: $expect"
- echo >&2 "actual: $actual"
- exit 1
- fi
-done <<EOF
-da39a3ee5e6b4b0d3255bfef95601890afd80709 0
-3f786850e387550fdab836ed7e6dc881de23001b 0 a
-5277cbb45a15902137d332d97e89cf8136545485 0 ab
-03cfd743661f07975fa2f1220c5194cbaff48451 0 abc
-3330b4373640f9e4604991e73c7e86bfd8da2dc3 0 abcd
-ec11312386ad561674f724b8cca7cf1796e26d1d 0 abcde
-bdc37c074ec4ee6050d68bc133c6b912f36474df 0 abcdef
-69bca99b923859f2dc486b55b87f49689b7358c7 0 abcdefg
-e414af7161c9554089f4106d6f1797ef14a73666 0 abcdefgh
-0707f2970043f9f7c22029482db27733deaec029 0 abcdefghi
-a4dd8aa74a5636728fe52451636e2e17726033aa 1
-9986b45e2f4d7086372533bb6953a8652fa3644a 1 frotz
-23d8d4f788e8526b4877548a32577543cbaaf51f 10
-8cd23f822ab44c7f481b8c92d591f6d1fcad431c 10 frotz
-f3b5604a4e604899c1233edb3bf1cc0ede4d8c32 512
-b095bd837a371593048136e429e9ac4b476e1bb3 512 frotz
-08fa81d6190948de5ccca3966340cc48c10cceac 1200 xyzzy
-e33a291f42c30a159733dd98b8b3e4ff34158ca0 4090 4G
-#a3bf783bc20caa958f6cb24dd140a7b21984838d 9999 nitfol
-EOF
-
-exit
-
-# generating test vectors
-# inputs are number of megabytes followed by some random string to prefix.
-
-while read cnt pfx
-do
- actual=$(
- {
- test -z "$pfx" || echo "$pfx"
- dd if=/dev/zero bs=1048576 count=$cnt 2>/dev/null |
- perl -pe 'y/\000/g/'
- } | sha1sum |
- sed -e 's/ .*//'
- )
- echo "$actual $cnt $pfx"
-done <<EOF
-0
-0 a
-0 ab
-0 abc
-0 abcd
-0 abcde
-0 abcdef
-0 abcdefg
-0 abcdefgh
-0 abcdefghi
-1
-1 frotz
-10
-10 frotz
-512
-512 frotz
-1200 xyzzy
-4090 4G
-9999 nitfol
-EOF
+++ /dev/null
-#include "cache.h"
-#include "sigchain.h"
-
-#define X(f) \
-static void f(int sig) { \
- puts(#f); \
- fflush(stdout); \
- sigchain_pop(sig); \
- raise(sig); \
-}
-X(one)
-X(two)
-X(three)
-#undef X
-
-int main(int argc, char **argv) {
- sigchain_push(SIGTERM, one);
- sigchain_push(SIGTERM, two);
- sigchain_push(SIGTERM, three);
- raise(SIGTERM);
- return 0;
-}
+++ /dev/null
-#include "cache.h"
-#include "string-list.h"
-
-/*
- * Parse an argument into a string list. arg should either be a
- * ':'-separated list of strings, or "-" to indicate an empty string
- * list (as opposed to "", which indicates a string list containing a
- * single empty string). list->strdup_strings must be set.
- */
-static void parse_string_list(struct string_list *list, const char *arg)
-{
- if (!strcmp(arg, "-"))
- return;
-
- (void)string_list_split(list, arg, ':', -1);
-}
-
-static void write_list(const struct string_list *list)
-{
- int i;
- for (i = 0; i < list->nr; i++)
- printf("[%d]: \"%s\"\n", i, list->items[i].string);
-}
-
-static void write_list_compact(const struct string_list *list)
-{
- int i;
- if (!list->nr)
- printf("-\n");
- else {
- printf("%s", list->items[0].string);
- for (i = 1; i < list->nr; i++)
- printf(":%s", list->items[i].string);
- printf("\n");
- }
-}
-
-static int prefix_cb(struct string_list_item *item, void *cb_data)
-{
- const char *prefix = (const char *)cb_data;
- return starts_with(item->string, prefix);
-}
-
-int main(int argc, char **argv)
-{
- if (argc == 5 && !strcmp(argv[1], "split")) {
- struct string_list list = STRING_LIST_INIT_DUP;
- int i;
- const char *s = argv[2];
- int delim = *argv[3];
- int maxsplit = atoi(argv[4]);
-
- i = string_list_split(&list, s, delim, maxsplit);
- printf("%d\n", i);
- write_list(&list);
- string_list_clear(&list, 0);
- return 0;
- }
-
- if (argc == 5 && !strcmp(argv[1], "split_in_place")) {
- struct string_list list = STRING_LIST_INIT_NODUP;
- int i;
- char *s = xstrdup(argv[2]);
- int delim = *argv[3];
- int maxsplit = atoi(argv[4]);
-
- i = string_list_split_in_place(&list, s, delim, maxsplit);
- printf("%d\n", i);
- write_list(&list);
- string_list_clear(&list, 0);
- free(s);
- return 0;
- }
-
- if (argc == 4 && !strcmp(argv[1], "filter")) {
- /*
- * Retain only the items that have the specified prefix.
- * Arguments: list|- prefix
- */
- struct string_list list = STRING_LIST_INIT_DUP;
- const char *prefix = argv[3];
-
- parse_string_list(&list, argv[2]);
- filter_string_list(&list, 0, prefix_cb, (void *)prefix);
- write_list_compact(&list);
- string_list_clear(&list, 0);
- return 0;
- }
-
- if (argc == 3 && !strcmp(argv[1], "remove_duplicates")) {
- struct string_list list = STRING_LIST_INIT_DUP;
-
- parse_string_list(&list, argv[2]);
- string_list_remove_duplicates(&list, 0);
- write_list_compact(&list);
- string_list_clear(&list, 0);
- return 0;
- }
-
- fprintf(stderr, "%s: unknown function name: %s\n", argv[0],
- argv[1] ? argv[1] : "(there was none)");
- return 1;
-}
+++ /dev/null
-#include "cache.h"
-#include "submodule-config.h"
-#include "submodule.h"
-
-static void die_usage(int argc, char **argv, const char *msg)
-{
- fprintf(stderr, "%s\n", msg);
- fprintf(stderr, "Usage: %s [<commit> <submodulepath>] ...\n", argv[0]);
- exit(1);
-}
-
-static int git_test_config(const char *var, const char *value, void *cb)
-{
- return parse_submodule_config_option(var, value);
-}
-
-int main(int argc, char **argv)
-{
- char **arg = argv;
- int my_argc = argc;
- int output_url = 0;
- int lookup_name = 0;
-
- arg++;
- my_argc--;
- while (starts_with(arg[0], "--")) {
- if (!strcmp(arg[0], "--url"))
- output_url = 1;
- if (!strcmp(arg[0], "--name"))
- lookup_name = 1;
- arg++;
- my_argc--;
- }
-
- if (my_argc % 2 != 0)
- die_usage(argc, argv, "Wrong number of arguments.");
-
- setup_git_directory();
- gitmodules_config();
- git_config(git_test_config, NULL);
-
- while (*arg) {
- unsigned char commit_sha1[20];
- const struct submodule *submodule;
- const char *commit;
- const char *path_or_name;
-
- commit = arg[0];
- path_or_name = arg[1];
-
- if (commit[0] == '\0')
- hashcpy(commit_sha1, null_sha1);
- else if (get_sha1(commit, commit_sha1) < 0)
- die_usage(argc, argv, "Commit not found.");
-
- if (lookup_name) {
- submodule = submodule_from_name(commit_sha1, path_or_name);
- } else
- submodule = submodule_from_path(commit_sha1, path_or_name);
- if (!submodule)
- die_usage(argc, argv, "Submodule not found.");
-
- if (output_url)
- printf("Submodule url: '%s' for path '%s'\n",
- submodule->url, submodule->path);
- else
- printf("Submodule name: '%s' for path '%s'\n",
- submodule->name, submodule->path);
-
- arg += 2;
- }
-
- submodule_free();
-
- return 0;
-}
+++ /dev/null
-#include "cache.h"
-#include "run-command.h"
-
-int main(int argc, char **argv)
-{
- struct child_process cp = CHILD_PROCESS_INIT;
- int nogit = 0;
-
- setup_git_directory_gently(&nogit);
- if (nogit)
- die("No git repo found");
- if (argc > 1 && !strcmp(argv[1], "--setup-work-tree")) {
- setup_work_tree();
- argv++;
- }
- cp.git_cmd = 1;
- cp.argv = (const char **)argv + 1;
- return run_command(&cp);
-}
+++ /dev/null
-/*
- * test-svn-fe: Code to exercise the svn import lib
- */
-
-#include "git-compat-util.h"
-#include "vcs-svn/svndump.h"
-#include "vcs-svn/svndiff.h"
-#include "vcs-svn/sliding_window.h"
-#include "vcs-svn/line_buffer.h"
-
-static const char test_svnfe_usage[] =
- "test-svn-fe (<dumpfile> | [-d] <preimage> <delta> <len>)";
-
-static int apply_delta(int argc, char *argv[])
-{
- struct line_buffer preimage = LINE_BUFFER_INIT;
- struct line_buffer delta = LINE_BUFFER_INIT;
- struct sliding_view preimage_view = SLIDING_VIEW_INIT(&preimage, -1);
-
- if (argc != 5)
- usage(test_svnfe_usage);
-
- if (buffer_init(&preimage, argv[2]))
- die_errno("cannot open preimage");
- if (buffer_init(&delta, argv[3]))
- die_errno("cannot open delta");
- if (svndiff0_apply(&delta, (off_t) strtoumax(argv[4], NULL, 0),
- &preimage_view, stdout))
- return 1;
- if (buffer_deinit(&preimage))
- die_errno("cannot close preimage");
- if (buffer_deinit(&delta))
- die_errno("cannot close delta");
- strbuf_release(&preimage_view.buf);
- return 0;
-}
-
-int main(int argc, char *argv[])
-{
- if (argc == 2) {
- if (svndump_init(argv[1]))
- return 1;
- svndump_read(NULL, "refs/heads/master", "refs/notes/svn/revs");
- svndump_deinit();
- svndump_reset();
- return 0;
- }
-
- if (argc >= 2 && !strcmp(argv[1], "-d"))
- return apply_delta(argc, argv);
- usage(test_svnfe_usage);
-}
+++ /dev/null
-#include "git-compat-util.h"
-#include "urlmatch.h"
-
-int main(int argc, char **argv)
-{
- const char usage[] = "test-urlmatch-normalization [-p | -l] <url1> | <url1> <url2>";
- char *url1, *url2;
- int opt_p = 0, opt_l = 0;
-
- /*
- * For one url, succeed if url_normalize succeeds on it, fail otherwise.
- * For two urls, succeed only if url_normalize succeeds on both and
- * the results compare equal with strcmp. If -p is given (one url only)
- * and url_normalize succeeds, print the result followed by "\n". If
- * -l is given (one url only) and url_normalize succeeds, print the
- * returned length in decimal followed by "\n".
- */
-
- if (argc > 1 && !strcmp(argv[1], "-p")) {
- opt_p = 1;
- argc--;
- argv++;
- } else if (argc > 1 && !strcmp(argv[1], "-l")) {
- opt_l = 1;
- argc--;
- argv++;
- }
-
- if (argc < 2 || argc > 3)
- die("%s", usage);
-
- if (argc == 2) {
- struct url_info info;
- url1 = url_normalize(argv[1], &info);
- if (!url1)
- return 1;
- if (opt_p)
- printf("%s\n", url1);
- if (opt_l)
- printf("%u\n", (unsigned)info.url_len);
- return 0;
- }
-
- if (opt_p || opt_l)
- die("%s", usage);
-
- url1 = url_normalize(argv[1], NULL);
- url2 = url_normalize(argv[2], NULL);
- return (url1 && url2 && !strcmp(url1, url2)) ? 0 : 1;
-}
+++ /dev/null
-#include "cache.h"
-
-int main(int argc, char **argv)
-{
- int i;
- for (i = 2; i < argc; i++) {
- if (argv[i][0] == '/')
- die("Forward slash is not allowed at the beginning of the\n"
- "pattern because Windows does not like it. Use `XXX/' instead.");
- else if (!strncmp(argv[i], "XXX/", 4))
- argv[i] += 3;
- }
- if (!strcmp(argv[1], "wildmatch"))
- return !!wildmatch(argv[3], argv[2], WM_PATHNAME, NULL);
- else if (!strcmp(argv[1], "iwildmatch"))
- return !!wildmatch(argv[3], argv[2], WM_PATHNAME | WM_CASEFOLD, NULL);
- else if (!strcmp(argv[1], "pathmatch"))
- return !!wildmatch(argv[3], argv[2], 0, NULL);
- else
- return 1;
-}
}
/*
- * Tries to read read data from source into buffer. If buffer is full,
+ * Tries to read data from source into buffer. If buffer is full,
* no data is read. Returns 0 on success, -1 on error.
*/
static int udt_do_read(struct unidirectional_transfer *t)
bytes = read(t->src, t->buf + t->bufuse, BUFFERSIZE - t->bufuse);
if (bytes < 0 && errno != EWOULDBLOCK && errno != EAGAIN &&
errno != EINTR) {
- error("read(%s) failed: %s", t->src_name, strerror(errno));
+ error_errno("read(%s) failed", t->src_name);
return -1;
} else if (bytes == 0) {
transfer_debug("%s EOF (with %i bytes in buffer)",
transfer_debug("%s is writable", t->dest_name);
bytes = xwrite(t->dest, t->buf, t->bufuse);
if (bytes < 0 && errno != EWOULDBLOCK) {
- error("write(%s) failed: %s", t->dest_name, strerror(errno));
+ error_errno("write(%s) failed", t->dest_name);
return -1;
} else if (bytes > 0) {
t->bufuse -= bytes;
{
int tret;
if (waitpid(pid, &tret, 0) < 0) {
- error("%s process failed to wait: %s", name, strerror(errno));
+ error_errno("%s process failed to wait", name);
return 1;
}
if (!WIFEXITED(tret) || WEXITSTATUS(tret)) {
if (t) {
/* path present in resulting tree */
- sha1 = tree_entry_extract(t, &path, &mode);
+ sha1 = tree_entry_extract(t, &path, &mode)->hash;
pathlen = tree_entry_len(&t->entry);
isdir = S_ISDIR(mode);
} else {
DIFF_STATUS_ADDED;
if (tpi_valid) {
- sha1_i = tp[i].entry.sha1;
+ sha1_i = tp[i].entry.oid->hash;
mode_i = tp[i].entry.mode;
}
else {
/* same rule as in emitthis */
int tpi_valid = tp && !(tp[i].entry.mode & S_IFXMIN_NEQ);
- parents_sha1[i] = tpi_valid ? tp[i].entry.sha1
+ parents_sha1[i] = tpi_valid ? tp[i].entry.oid->hash
: NULL;
}
continue;
/* diff(t,pi) != ø */
- if (hashcmp(t.entry.sha1, tp[i].entry.sha1) ||
+ if (oidcmp(t.entry.oid, tp[i].entry.oid) ||
(t.entry.mode != tp[i].entry.mode))
continue;
/* Initialize the descriptor entry */
desc->entry.path = path;
desc->entry.mode = canon_mode(mode);
- desc->entry.sha1 = (const unsigned char *)(path + len);
+ desc->entry.oid = (const struct object_id *)(path + len);
}
void init_tree_desc(struct tree_desc *desc, const void *buffer, unsigned long size)
void update_tree_entry(struct tree_desc *desc)
{
const void *buf = desc->buffer;
- const unsigned char *end = desc->entry.sha1 + 20;
+ const unsigned char *end = desc->entry.oid->hash + 20;
unsigned long size = desc->size;
unsigned long len = end - (const unsigned char *)buf;
pathlen--;
info->pathlen = pathlen ? pathlen + 1 : 0;
info->name.path = base;
- info->name.sha1 = (void *)(base + pathlen + 1);
+ info->name.oid = (void *)(base + pathlen + 1);
if (pathlen)
info->prev = &dummy;
}
int namelen = strlen(name);
while (t->size) {
const char *entry;
- const unsigned char *sha1;
+ const struct object_id *oid;
int entrylen, cmp;
- sha1 = tree_entry_extract(t, &entry, mode);
+ oid = tree_entry_extract(t, &entry, mode);
entrylen = tree_entry_len(&t->entry);
update_tree_entry(t);
if (entrylen > namelen)
if (cmp < 0)
break;
if (entrylen == namelen) {
- hashcpy(result, sha1);
+ hashcpy(result, oid->hash);
return 0;
}
if (name[entrylen] != '/')
if (!S_ISDIR(*mode))
break;
if (++entrylen == namelen) {
- hashcpy(result, sha1);
+ hashcpy(result, oid->hash);
return 0;
}
- return get_tree_entry(sha1, name + entrylen, result, mode);
+ return get_tree_entry(oid->hash, name + entrylen, result, mode);
}
return -1;
}
#define TREE_WALK_H
struct name_entry {
- const unsigned char *sha1;
+ const struct object_id *oid;
const char *path;
unsigned int mode;
};
unsigned int size;
};
-static inline const unsigned char *tree_entry_extract(struct tree_desc *desc, const char **pathp, unsigned int *modep)
+static inline const struct object_id *tree_entry_extract(struct tree_desc *desc, const char **pathp, unsigned int *modep)
{
*pathp = desc->entry.path;
*modep = desc->entry.mode;
- return desc->entry.sha1;
+ return desc->entry.oid;
}
static inline int tree_entry_len(const struct name_entry *ne)
{
- return (const char *)ne->sha1 - ne->path - 1;
+ return (const char *)ne->oid - ne->path - 1;
}
void update_tree_entry(struct tree_desc *);
continue;
}
- switch (fn(entry.sha1, base,
+ switch (fn(entry.oid->hash, base,
entry.path, entry.mode, stage, context)) {
case 0:
continue;
}
if (S_ISDIR(entry.mode))
- hashcpy(sha1, entry.sha1);
+ hashcpy(sha1, entry.oid->hash);
else if (S_ISGITLINK(entry.mode)) {
struct commit *commit;
- commit = lookup_commit(entry.sha1);
+ commit = lookup_commit(entry.oid->hash);
if (!commit)
die("Commit %s in submodule path %s%s not found",
- sha1_to_hex(entry.sha1),
+ oid_to_hex(entry.oid),
base->buf, entry.path);
if (parse_commit(commit))
die("Invalid commit %s in submodule path %s%s",
- sha1_to_hex(entry.sha1),
+ oid_to_hex(entry.oid),
base->buf, entry.path);
hashcpy(sha1, commit->tree->object.oid.hash);
int i;
const char **msgs = opts->msgs;
const char *msg;
- const char *cmd2 = strcmp(cmd, "checkout") ? cmd : "switch branches";
- if (advice_commit_before_merge)
- msg = "Your local changes to the following files would be overwritten by %s:\n%%s"
- "Please, commit your changes or stash them before you can %s.";
+ if (!strcmp(cmd, "checkout"))
+ msg = advice_commit_before_merge
+ ? _("Your local changes to the following files would be overwritten by checkout:\n%%s"
+ "Please commit your changes or stash them before you can switch branches.")
+ : _("Your local changes to the following files would be overwritten by checkout:\n%%s");
+ else if (!strcmp(cmd, "merge"))
+ msg = advice_commit_before_merge
+ ? _("Your local changes to the following files would be overwritten by merge:\n%%s"
+ "Please commit your changes or stash them before you can merge.")
+ : _("Your local changes to the following files would be overwritten by merge:\n%%s");
else
- msg = "Your local changes to the following files would be overwritten by %s:\n%%s";
+ msg = advice_commit_before_merge
+ ? _("Your local changes to the following files would be overwritten by %s:\n%%s"
+ "Please commit your changes or stash them before you can %s.")
+ : _("Your local changes to the following files would be overwritten by %s:\n%%s");
msgs[ERROR_WOULD_OVERWRITE] = msgs[ERROR_NOT_UPTODATE_FILE] =
- xstrfmt(msg, cmd, cmd2);
+ xstrfmt(msg, cmd, cmd);
msgs[ERROR_NOT_UPTODATE_DIR] =
- "Updating the following directories would lose untracked files in it:\n%s";
-
- if (advice_commit_before_merge)
- msg = "The following untracked working tree files would be %s by %s:\n%%s"
- "Please move or remove them before you can %s.";
+ _("Updating the following directories would lose untracked files in it:\n%s");
+
+ if (!strcmp(cmd, "checkout"))
+ msg = advice_commit_before_merge
+ ? _("The following untracked working tree files would be removed by checkout:\n%%s"
+ "Please move or remove them before you can switch branches.")
+ : _("The following untracked working tree files would be removed by checkout:\n%%s");
+ else if (!strcmp(cmd, "merge"))
+ msg = advice_commit_before_merge
+ ? _("The following untracked working tree files would be removed by merge:\n%%s"
+ "Please move or remove them before you can merge.")
+ : _("The following untracked working tree files would be removed by merge:\n%%s");
else
- msg = "The following untracked working tree files would be %s by %s:\n%%s";
-
- msgs[ERROR_WOULD_LOSE_UNTRACKED_REMOVED] = xstrfmt(msg, "removed", cmd, cmd2);
- msgs[ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN] = xstrfmt(msg, "overwritten", cmd, cmd2);
+ msg = advice_commit_before_merge
+ ? _("The following untracked working tree files would be removed by %s:\n%%s"
+ "Please move or remove them before you can %s.")
+ : _("The following untracked working tree files would be removed by %s:\n%%s");
+ msgs[ERROR_WOULD_LOSE_UNTRACKED_REMOVED] = xstrfmt(msg, cmd, cmd);
+
+ if (!strcmp(cmd, "checkout"))
+ msg = advice_commit_before_merge
+ ? _("The following untracked working tree files would be overwritten by checkout:\n%%s"
+ "Please move or remove them before you can switch branches.")
+ : _("The following untracked working tree files would be overwritten by checkout:\n%%s");
+ else if (!strcmp(cmd, "merge"))
+ msg = advice_commit_before_merge
+ ? _("The following untracked working tree files would be overwritten by merge:\n%%s"
+ "Please move or remove them before you can merge.")
+ : _("The following untracked working tree files would be overwritten by merge:\n%%s");
+ else
+ msg = advice_commit_before_merge
+ ? _("The following untracked working tree files would be overwritten by %s:\n%%s"
+ "Please move or remove them before you can %s.")
+ : _("The following untracked working tree files would be overwritten by %s:\n%%s");
+ msgs[ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN] = xstrfmt(msg, cmd, cmd);
/*
* Special case: ERROR_BIND_OVERLAP refers to a pair of paths, we
* cannot easily display it as a list.
*/
- msgs[ERROR_BIND_OVERLAP] = "Entry '%s' overlaps with '%s'. Cannot bind.";
+ msgs[ERROR_BIND_OVERLAP] = _("Entry '%s' overlaps with '%s'. Cannot bind.");
msgs[ERROR_SPARSE_NOT_UPTODATE_FILE] =
- "Cannot update sparse checkout: the following entries are not up-to-date:\n%s";
+ _("Cannot update sparse checkout: the following entries are not up-to-date:\n%s");
msgs[ERROR_WOULD_LOSE_ORPHANED_OVERWRITTEN] =
- "The following Working tree files would be overwritten by sparse checkout update:\n%s";
+ _("The following Working tree files would be overwritten by sparse checkout update:\n%s");
msgs[ERROR_WOULD_LOSE_ORPHANED_REMOVED] =
- "The following Working tree files would be removed by sparse checkout update:\n%s";
+ _("The following Working tree files would be removed by sparse checkout update:\n%s");
opts->show_all_errors = 1;
/* rejected paths may not have a static buffer */
string_list_clear(rejects, 0);
}
if (something_displayed)
- fprintf(stderr, "Aborting\n");
+ fprintf(stderr, _("Aborting\n"));
}
/*
for (i = 0; i < n; i++, dirmask >>= 1) {
const unsigned char *sha1 = NULL;
if (dirmask & 1)
- sha1 = names[i].sha1;
+ sha1 = names[i].oid->hash;
buf[i] = fill_tree_descriptor(t+i, sha1);
}
ce->ce_mode = create_ce_mode(n->mode);
ce->ce_flags = create_ce_flags(stage);
ce->ce_namelen = len;
- hashcpy(ce->sha1, n->sha1);
+ hashcpy(ce->sha1, n->oid->hash);
make_traverse_path(ce->name, info, n);
return ce;
path = xmemdupz(ce->name, len);
if (lstat(path, &st))
- ret = error("cannot stat '%s': %s", path,
- strerror(errno));
+ ret = error_errno("cannot stat '%s'", path);
else
ret = check_ok_to_remove(path, len, DT_UNKNOWN, NULL,
&st, error_type, o);
return ret;
} else if (lstat(ce->name, &st)) {
if (errno != ENOENT)
- return error("cannot stat '%s': %s", ce->name,
- strerror(errno));
+ return error_errno("cannot stat '%s'", ce->name);
return 0;
} else {
return check_ok_to_remove(ce->name, ce_namelen(ce),
if (ret < 0) {
if (errno != EINTR) {
- error("poll failed, resuming: %s",
- strerror(errno));
+ error_errno("poll failed, resuming");
sleep(1);
}
continue;
va_end(params);
}
-void NORETURN die_errno(const char *fmt, ...)
+static const char *fmt_with_err(char *buf, int n, const char *fmt)
{
- va_list params;
- char fmt_with_err[1024];
char str_error[256], *err;
int i, j;
- if (die_is_recursing()) {
- fputs("fatal: recursion detected in die_errno handler\n",
- stderr);
- exit(128);
- }
-
err = strerror(errno);
for (i = j = 0; err[i] && j < sizeof(str_error) - 1; ) {
if ((str_error[j++] = err[i++]) != '%')
}
}
str_error[j] = 0;
- snprintf(fmt_with_err, sizeof(fmt_with_err), "%s: %s", fmt, str_error);
+ snprintf(buf, n, "%s: %s", fmt, str_error);
+ return buf;
+}
+
+void NORETURN die_errno(const char *fmt, ...)
+{
+ char buf[1024];
+ va_list params;
+
+ if (die_is_recursing()) {
+ fputs("fatal: recursion detected in die_errno handler\n",
+ stderr);
+ exit(128);
+ }
va_start(params, fmt);
- die_routine(fmt_with_err, params);
+ die_routine(fmt_with_err(buf, sizeof(buf), fmt), params);
va_end(params);
}
+int error_errno(const char *fmt, ...)
+{
+ char buf[1024];
+ va_list params;
+
+ va_start(params, fmt);
+ error_routine(fmt_with_err(buf, sizeof(buf), fmt), params);
+ va_end(params);
+ return -1;
+}
+
#undef error
int error(const char *err, ...)
{
return -1;
}
+void warning_errno(const char *warn, ...)
+{
+ char buf[1024];
+ va_list params;
+
+ va_start(params, warn);
+ warn_routine(fmt_with_err(buf, sizeof(buf), warn), params);
+ va_end(params);
+}
+
void warning(const char *warn, ...)
{
va_list params;
int mbs_chrlen(const char **text, size_t *remainder_p, const char *encoding);
/*
- * Returns true if the the path would match ".git" after HFS case-folding.
+ * Returns true if the path would match ".git" after HFS case-folding.
* The path should be NUL-terminated, but we will match variants of both ".git\0"
* and ".git/..." (but _not_ ".../.git"). This makes it suitable for both fsck
* and verify_path().
{
long pos = ftell(buf->infile);
if (pos < 0)
- return error("ftell error: %s", strerror(errno));
+ return error_errno("ftell error");
if (fseek(buf->infile, 0, SEEK_SET))
- return error("seek error: %s", strerror(errno));
+ return error_errno("seek error");
return pos;
}
{
if (!buffer_ferror(file))
return error("delta preimage ends early");
- return error("cannot read delta preimage: %s", strerror(errno));
+ return error_errno("cannot read delta preimage");
}
static int skip_or_whine(struct line_buffer *file, off_t gap)
{
if (fwrite(sb->buf, 1, sb->len, out) == sb->len) /* Success. */
return 0;
- return error("cannot write delta postimage: %s", strerror(errno));
+ return error_errno("cannot write delta postimage");
}
static int error_short_read(struct line_buffer *input)
{
if (buffer_ferror(input))
- return error("error reading delta: %s", strerror(errno));
+ return error_errno("error reading delta");
return error("invalid delta: unexpected end of file");
}
int svndump_init(const char *filename)
{
if (buffer_init(&input, filename))
- return error("cannot open %s: %s", filename ? filename : "NULL", strerror(errno));
+ return error_errno("cannot open %s", filename ? filename : "NULL");
init(REPORT_FILENO);
return 0;
}
int svndump_init_fd(int in_fd, int back_fd)
{
if(buffer_fdinit(&input, xdup(in_fd)))
- return error("cannot open fd %d: %s", in_fd, strerror(errno));
+ return error_errno("cannot open fd %d", in_fd);
init(xdup(back_fd));
return 0;
}
if (S_ISGITLINK(entry.mode))
continue;
if (S_ISDIR(entry.mode)) {
- struct tree *tree = lookup_tree(entry.sha1);
+ struct tree *tree = lookup_tree(entry.oid->hash);
if (tree)
obj = &tree->object;
}
else {
- struct blob *blob = lookup_blob(entry.sha1);
+ struct blob *blob = lookup_blob(entry.oid->hash);
if (blob)
obj = &blob->object;
}
/*
* Try to advance faster when an asterisk is
* followed by a literal. We know in this case
- * that the the string before the literal
+ * that the string before the literal
* must belong to "*".
* If match_slash is false, do not look past
* the first slash as it cannot belong to '*'.
#include "refs.h"
#include "strbuf.h"
#include "worktree.h"
+#include "dir.h"
+#include "wt-status.h"
void free_worktrees(struct worktree **worktrees)
{
for (i = 0; worktrees[i]; i++) {
free(worktrees[i]->path);
- free(worktrees[i]->git_dir);
+ free(worktrees[i]->id);
free(worktrees[i]->head_ref);
free(worktrees[i]);
}
/*
* read 'path_to_ref' into 'ref'. Also if is_detached is not NULL,
- * set is_detached to 1 (0) if the ref is detatched (is not detached).
+ * set is_detached to 1 (0) if the ref is detached (is not detached).
*
* $GIT_COMMON_DIR/$symref (e.g. HEAD) is practically outside $GIT_DIR so
* for linked worktrees, `resolve_ref_unsafe()` won't work (it uses
struct worktree *worktree = NULL;
struct strbuf path = STRBUF_INIT;
struct strbuf worktree_path = STRBUF_INIT;
- struct strbuf gitdir = STRBUF_INIT;
struct strbuf head_ref = STRBUF_INIT;
int is_bare = 0;
int is_detached = 0;
- strbuf_addf(&gitdir, "%s", absolute_path(get_git_common_dir()));
- strbuf_addbuf(&worktree_path, &gitdir);
+ strbuf_addstr(&worktree_path, absolute_path(get_git_common_dir()));
is_bare = !strbuf_strip_suffix(&worktree_path, "/.git");
if (is_bare)
strbuf_strip_suffix(&worktree_path, "/.");
worktree = xmalloc(sizeof(struct worktree));
worktree->path = strbuf_detach(&worktree_path, NULL);
- worktree->git_dir = strbuf_detach(&gitdir, NULL);
+ worktree->id = NULL;
worktree->is_bare = is_bare;
worktree->head_ref = NULL;
worktree->is_detached = is_detached;
+ worktree->is_current = 0;
add_head_info(&head_ref, worktree);
done:
strbuf_release(&path);
- strbuf_release(&gitdir);
strbuf_release(&worktree_path);
strbuf_release(&head_ref);
return worktree;
struct worktree *worktree = NULL;
struct strbuf path = STRBUF_INIT;
struct strbuf worktree_path = STRBUF_INIT;
- struct strbuf gitdir = STRBUF_INIT;
struct strbuf head_ref = STRBUF_INIT;
int is_detached = 0;
if (!id)
die("Missing linked worktree name");
- strbuf_addf(&gitdir, "%s/worktrees/%s",
- absolute_path(get_git_common_dir()), id);
- strbuf_addf(&path, "%s/gitdir", gitdir.buf);
+ strbuf_git_common_path(&path, "worktrees/%s/gitdir", id);
if (strbuf_read_file(&worktree_path, path.buf, 0) <= 0)
/* invalid gitdir file */
goto done;
worktree = xmalloc(sizeof(struct worktree));
worktree->path = strbuf_detach(&worktree_path, NULL);
- worktree->git_dir = strbuf_detach(&gitdir, NULL);
+ worktree->id = xstrdup(id);
worktree->is_bare = 0;
worktree->head_ref = NULL;
worktree->is_detached = is_detached;
+ worktree->is_current = 0;
add_head_info(&head_ref, worktree);
done:
strbuf_release(&path);
- strbuf_release(&gitdir);
strbuf_release(&worktree_path);
strbuf_release(&head_ref);
return worktree;
}
+static void mark_current_worktree(struct worktree **worktrees)
+{
+ struct strbuf git_dir = STRBUF_INIT;
+ struct strbuf path = STRBUF_INIT;
+ int i;
+
+ strbuf_addstr(&git_dir, absolute_path(get_git_dir()));
+ for (i = 0; worktrees[i]; i++) {
+ struct worktree *wt = worktrees[i];
+ strbuf_addstr(&path, absolute_path(get_worktree_git_dir(wt)));
+ wt->is_current = !fspathcmp(git_dir.buf, path.buf);
+ strbuf_reset(&path);
+ if (wt->is_current)
+ break;
+ }
+ strbuf_release(&git_dir);
+ strbuf_release(&path);
+}
+
struct worktree **get_worktrees(void)
{
struct worktree **list = NULL;
}
ALLOC_GROW(list, counter + 1, alloc);
list[counter] = NULL;
+
+ mark_current_worktree(list);
return list;
}
-char *find_shared_symref(const char *symref, const char *target)
+const char *get_worktree_git_dir(const struct worktree *wt)
+{
+ if (!wt)
+ return get_git_dir();
+ else if (!wt->id)
+ return get_git_common_dir();
+ else
+ return git_common_path("worktrees/%s", wt->id);
+}
+
+int is_worktree_being_rebased(const struct worktree *wt,
+ const char *target)
+{
+ struct wt_status_state state;
+ int found_rebase;
+
+ memset(&state, 0, sizeof(state));
+ found_rebase = wt_status_check_rebase(wt, &state) &&
+ ((state.rebase_in_progress ||
+ state.rebase_interactive_in_progress) &&
+ state.branch &&
+ starts_with(target, "refs/heads/") &&
+ !strcmp(state.branch, target + strlen("refs/heads/")));
+ free(state.branch);
+ free(state.onto);
+ return found_rebase;
+}
+
+int is_worktree_being_bisected(const struct worktree *wt,
+ const char *target)
{
- char *existing = NULL;
+ struct wt_status_state state;
+ int found_rebase;
+
+ memset(&state, 0, sizeof(state));
+ found_rebase = wt_status_check_bisect(wt, &state) &&
+ state.branch &&
+ starts_with(target, "refs/heads/") &&
+ !strcmp(state.branch, target + strlen("refs/heads/"));
+ free(state.branch);
+ return found_rebase;
+}
+
+/*
+ * note: this function should be able to detect shared symref even if
+ * HEAD is temporarily detached (e.g. in the middle of rebase or
+ * bisect). New commands that do similar things should update this
+ * function as well.
+ */
+const struct worktree *find_shared_symref(const char *symref,
+ const char *target)
+{
+ const struct worktree *existing = NULL;
struct strbuf path = STRBUF_INIT;
struct strbuf sb = STRBUF_INIT;
- struct worktree **worktrees = get_worktrees();
+ static struct worktree **worktrees;
int i = 0;
+ if (worktrees)
+ free_worktrees(worktrees);
+ worktrees = get_worktrees();
+
for (i = 0; worktrees[i]; i++) {
+ struct worktree *wt = worktrees[i];
+
+ if (wt->is_detached && !strcmp(symref, "HEAD")) {
+ if (is_worktree_being_rebased(wt, target)) {
+ existing = wt;
+ break;
+ }
+ if (is_worktree_being_bisected(wt, target)) {
+ existing = wt;
+ break;
+ }
+ }
+
strbuf_reset(&path);
strbuf_reset(&sb);
- strbuf_addf(&path, "%s/%s", worktrees[i]->git_dir, symref);
+ strbuf_addf(&path, "%s/%s",
+ get_worktree_git_dir(wt),
+ symref);
if (parse_ref(path.buf, &sb, NULL)) {
continue;
}
if (!strcmp(sb.buf, target)) {
- existing = xstrdup(worktrees[i]->path);
+ existing = wt;
break;
}
}
strbuf_release(&path);
strbuf_release(&sb);
- free_worktrees(worktrees);
return existing;
}
struct worktree {
char *path;
- char *git_dir;
+ char *id;
char *head_ref;
unsigned char head_sha1[20];
int is_detached;
int is_bare;
+ int is_current;
};
/* Functions for acting on the information about worktrees. */
*/
extern struct worktree **get_worktrees(void);
+/*
+ * Return git dir of the worktree. Note that the path may be relative.
+ * If wt is NULL, git dir of current worktree is returned.
+ */
+extern const char *get_worktree_git_dir(const struct worktree *wt);
+
/*
* Free up the memory for worktree(s)
*/
/*
* Check if a per-worktree symref points to a ref in the main worktree
- * or any linked worktree, and return the path to the exising worktree
- * if it is. Returns NULL if there is no existing ref. The caller is
- * responsible for freeing the returned path.
+ * or any linked worktree, and return the worktree that holds the ref,
+ * or NULL otherwise. The result may be destroyed by the next call.
+ */
+extern const struct worktree *find_shared_symref(const char *symref,
+ const char *target);
+
+int is_worktree_being_rebased(const struct worktree *wt, const char *target);
+int is_worktree_being_bisected(const struct worktree *wt, const char *target);
+
+/*
+ * Similar to git_path() but can produce paths for a specified
+ * worktree instead of current one
*/
-extern char *find_shared_symref(const char *symref, const char *target);
+extern const char *worktree_git_path(const struct worktree *wt,
+ const char *fmt, ...)
+ __attribute__((format (printf, 2, 3)));
#endif
GITPERLLIB='@@BUILD_DIR@@/perl/blib/lib'"${GITPERLLIB:+:$GITPERLLIB}"
GIT_TEXTDOMAINDIR='@@BUILD_DIR@@/po/build/locale'
PATH='@@BUILD_DIR@@/bin-wrappers:'"$PATH"
+
export GIT_EXEC_PATH GITPERLLIB PATH GIT_TEXTDOMAINDIR
if test -n "$GIT_TEST_GDB"
return mkstemp(path);
}
-/* git_mkstemps() - create tmp file with suffix honoring TMPDIR variable. */
-int git_mkstemps(char *path, size_t len, const char *template, int suffix_len)
-{
- const char *tmp;
- size_t n;
-
- tmp = getenv("TMPDIR");
- if (!tmp)
- tmp = "/tmp";
- n = snprintf(path, len, "%s/%s", tmp, template);
- if (len <= n) {
- errno = ENAMETOOLONG;
- return -1;
- }
- return mkstemps(path, suffix_len);
-}
-
/* Adapted from libiberty's mkstemp.c. */
#undef TMP_MAX
if (!rc || errno == ENOENT)
return 0;
err = errno;
- warning("unable to %s %s: %s", op, file, strerror(errno));
+ warning_errno("unable to %s %s", op, file);
errno = err;
return rc;
}
void warn_on_inaccessible(const char *path)
{
- warning(_("unable to access '%s': %s"), path, strerror(errno));
+ warning_errno(_("unable to access '%s'"), path);
}
static int access_error_is_ok(int err, unsigned flag)
#include "column.h"
#include "strbuf.h"
#include "utf8.h"
+#include "worktree.h"
static const char cut_line[] =
"------------------------ >8 ------------------------\n";
status_printf_ln(s, color,
_(" (fix conflicts and run \"git commit\")"));
} else {
+ s-> commitable = 1;
status_printf_ln(s, color,
_("All conflicts fixed but you are still merging."));
if (s->hints)
strbuf_addf(line, "%s", split[i]->buf);
}
}
- for (i = 0; split[i]; i++)
- strbuf_release(split[i]);
-
+ strbuf_list_free(split);
}
static void read_rebase_todolist(const char *fname, struct string_list *lines)
/*
* Extract branch information from rebase/bisect
*/
-static char *read_and_strip_branch(const char *path)
+static char *get_branch(const struct worktree *wt, const char *path)
{
struct strbuf sb = STRBUF_INIT;
unsigned char sha1[20];
const char *branch_name;
- if (strbuf_read_file(&sb, git_path("%s", path), 0) <= 0)
+ if (strbuf_read_file(&sb, worktree_git_path(wt, "%s", path), 0) <= 0)
goto got_nothing;
while (sb.len && sb.buf[sb.len - 1] == '\n')
strbuf_release(&cb.buf);
}
-void wt_status_get_state(struct wt_status_state *state,
- int get_detached_from)
+int wt_status_check_rebase(const struct worktree *wt,
+ struct wt_status_state *state)
{
struct stat st;
- unsigned char sha1[20];
- if (!stat(git_path_merge_head(), &st)) {
- state->merge_in_progress = 1;
- } else if (!stat(git_path("rebase-apply"), &st)) {
- if (!stat(git_path("rebase-apply/applying"), &st)) {
+ if (!stat(worktree_git_path(wt, "rebase-apply"), &st)) {
+ if (!stat(worktree_git_path(wt, "rebase-apply/applying"), &st)) {
state->am_in_progress = 1;
- if (!stat(git_path("rebase-apply/patch"), &st) && !st.st_size)
+ if (!stat(worktree_git_path(wt, "rebase-apply/patch"), &st) && !st.st_size)
state->am_empty_patch = 1;
} else {
state->rebase_in_progress = 1;
- state->branch = read_and_strip_branch("rebase-apply/head-name");
- state->onto = read_and_strip_branch("rebase-apply/onto");
+ state->branch = get_branch(wt, "rebase-apply/head-name");
+ state->onto = get_branch(wt, "rebase-apply/onto");
}
- } else if (!stat(git_path("rebase-merge"), &st)) {
- if (!stat(git_path("rebase-merge/interactive"), &st))
+ } else if (!stat(worktree_git_path(wt, "rebase-merge"), &st)) {
+ if (!stat(worktree_git_path(wt, "rebase-merge/interactive"), &st))
state->rebase_interactive_in_progress = 1;
else
state->rebase_in_progress = 1;
- state->branch = read_and_strip_branch("rebase-merge/head-name");
- state->onto = read_and_strip_branch("rebase-merge/onto");
+ state->branch = get_branch(wt, "rebase-merge/head-name");
+ state->onto = get_branch(wt, "rebase-merge/onto");
+ } else
+ return 0;
+ return 1;
+}
+
+int wt_status_check_bisect(const struct worktree *wt,
+ struct wt_status_state *state)
+{
+ struct stat st;
+
+ if (!stat(worktree_git_path(wt, "BISECT_LOG"), &st)) {
+ state->bisect_in_progress = 1;
+ state->branch = get_branch(wt, "BISECT_START");
+ return 1;
+ }
+ return 0;
+}
+
+void wt_status_get_state(struct wt_status_state *state,
+ int get_detached_from)
+{
+ struct stat st;
+ unsigned char sha1[20];
+
+ if (!stat(git_path_merge_head(), &st)) {
+ state->merge_in_progress = 1;
+ } else if (wt_status_check_rebase(NULL, state)) {
+ ; /* all set */
} else if (!stat(git_path_cherry_pick_head(), &st) &&
!get_sha1("CHERRY_PICK_HEAD", sha1)) {
state->cherry_pick_in_progress = 1;
hashcpy(state->cherry_pick_head_sha1, sha1);
}
- if (!stat(git_path("BISECT_LOG"), &st)) {
- state->bisect_in_progress = 1;
- state->branch = read_and_strip_branch("BISECT_START");
- }
+ wt_status_check_bisect(NULL, state);
if (!stat(git_path_revert_head(), &st) &&
!get_sha1("REVERT_HEAD", sha1)) {
state->revert_in_progress = 1;
#include "color.h"
#include "pathspec.h"
+struct worktree;
+
enum color_wt_status {
WT_STATUS_HEADER = 0,
WT_STATUS_UPDATED,
void wt_status_print(struct wt_status *s);
void wt_status_collect(struct wt_status *s);
void wt_status_get_state(struct wt_status_state *state, int get_detached_from);
+int wt_status_check_rebase(const struct worktree *wt,
+ struct wt_status_state *state);
+int wt_status_check_bisect(const struct worktree *wt,
+ struct wt_status_state *state);
void wt_shortstatus_print(struct wt_status *s);
void wt_porcelain_print(struct wt_status *s);
#define XDF_IGNORE_BLANK_LINES (1 << 7)
+#define XDF_COMPACTION_HEURISTIC (1 << 8)
+
#define XDL_EMIT_FUNCNAMES (1 << 0)
#define XDL_EMIT_FUNCCONTEXT (1 << 2)
}
+static int is_blank_line(xrecord_t **recs, long ix, long flags)
+{
+ return xdl_blankline(recs[ix]->ptr, recs[ix]->size, flags);
+}
+
+static int recs_match(xrecord_t **recs, long ixs, long ix, long flags)
+{
+ return (recs[ixs]->ha == recs[ix]->ha &&
+ xdl_recmatch(recs[ixs]->ptr, recs[ixs]->size,
+ recs[ix]->ptr, recs[ix]->size,
+ flags));
+}
+
int xdl_change_compact(xdfile_t *xdf, xdfile_t *xdfo, long flags) {
long ix, ixo, ixs, ixref, grpsiz, nrec = xdf->nrec;
char *rchg = xdf->rchg, *rchgo = xdfo->rchg;
+ unsigned int blank_lines;
xrecord_t **recs = xdf->recs;
/*
do {
grpsiz = ix - ixs;
+ blank_lines = 0;
/*
* If the line before the current change group, is equal to
* the last line of the current change group, shift backward
* the group.
*/
- while (ixs > 0 && recs[ixs - 1]->ha == recs[ix - 1]->ha &&
- xdl_recmatch(recs[ixs - 1]->ptr, recs[ixs - 1]->size, recs[ix - 1]->ptr, recs[ix - 1]->size, flags)) {
+ while (ixs > 0 && recs_match(recs, ixs - 1, ix - 1, flags)) {
rchg[--ixs] = 1;
rchg[--ix] = 0;
* the line next of the current change group, shift forward
* the group.
*/
- while (ix < nrec && recs[ixs]->ha == recs[ix]->ha &&
- xdl_recmatch(recs[ixs]->ptr, recs[ixs]->size, recs[ix]->ptr, recs[ix]->size, flags)) {
+ while (ix < nrec && recs_match(recs, ixs, ix, flags)) {
+ blank_lines += is_blank_line(recs, ix, flags);
+
rchg[ixs++] = 0;
rchg[ix++] = 1;
rchg[--ix] = 0;
while (rchgo[--ixo]);
}
+
+ /*
+ * If a group can be moved back and forth, see if there is a
+ * blank line in the moving space. If there is a blank line,
+ * make sure the last blank line is the end of the group.
+ *
+ * As we already shifted the group forward as far as possible
+ * in the earlier loop, we need to shift it back only if at all.
+ */
+ if ((flags & XDF_COMPACTION_HEURISTIC) && blank_lines) {
+ while (ixs > 0 &&
+ !is_blank_line(recs, ix - 1, flags) &&
+ recs_match(recs, ixs - 1, ix - 1, flags)) {
+ rchg[--ixs] = 1;
+ rchg[--ix] = 0;
+ }
+ }
}
return 0;