/test-dump-cache-tree
/test-dump-split-index
/test-dump-untracked-cache
+/test-fake-ssh
/test-scrap-cache-tree
/test-genrandom
/test-hashmap
language: c
+sudo: false
+
+cache:
+ directories:
+ - $HOME/travis-cache
+
os:
- linux
- osx
env:
global:
+ - DEVELOPER=1
- P4_VERSION="15.2"
- GIT_LFS_VERSION="1.1.0"
- DEFAULT_TEST_TARGET=prove
- - GIT_PROVE_OPTS="--timer --jobs 3"
+ - GIT_PROVE_OPTS="--timer --jobs 3 --state=failed,slow,save"
- GIT_TEST_OPTS="--verbose --tee"
- - CFLAGS="-g -O2 -Wall -Werror"
- GIT_TEST_CLONE_2GB=YesPlease
# t9810 occasionally fails on Travis CI OS X
# t9816 occasionally fails with "TAP out of sequence errors" on Travis CI OS X
p4 -V | grep Rev.;
echo "$(tput setaf 6)Git-LFS Version$(tput sgr0)";
git-lfs version;
+ mkdir -p $HOME/travis-cache;
+ ln -s $HOME/travis-cache/.prove t/.prove;
before_script: make --jobs=2
- We try to keep to at most 80 characters per line.
+ - As a Git developer we assume you have a reasonably modern compiler
+ and we recommend you to enable the DEVELOPER makefile knob to
+ ensure your patch is clear of all compiler warnings we care about,
+ by e.g. "echo DEVELOPER=1 >>config.mak".
+
- We try to support a wide range of C compilers to compile Git with,
including old ones. That means that you should not use C99
initializers, even if a lot of compilers grok it.
--- /dev/null
+Git 2.8 Release Notes
+=====================
+
+Backward compatibility note
+---------------------------
+
+The rsync:// transport has been removed.
+
+
+Updates since v2.7
+------------------
+
+UI, Workflows & Features
+
+ * It turns out "git clone" over rsync transport has been broken when
+ the source repository has packed references for a long time, and
+ nobody noticed nor complained about it.
+
+ * "branch --delete" has "branch -d" but "push --delete" does not.
+
+ * "git blame" learned to produce the progress eye-candy when it takes
+ too much time before emitting the first line of the result.
+
+ * "git grep" can now be configured (or told from the command line)
+ how many threads to use when searching in the working tree files.
+
+ * Some "git notes" operations, e.g. "git log --notes=<note>", should
+ be able to read notes from any tree-ish that is shaped like a notes
+ tree, but the notes infrastructure required that the argument must
+ be a ref under refs/notes/. Loosen it to require a valid ref only
+ when the operation would update the notes (in which case we must
+ have a place to store the updated notes tree, iow, a ref).
+
+ * "git grep" by default does not fall back to its "--no-index"
+ behaviour outside a directory under Git's control (otherwise the
+ user may by mistake end up running a huge recursive search); with a
+ new configuration (set in $HOME/.gitconfig--by definition this
+ cannot be set in the config file per project), this safety can be
+ disabled.
+
+ * "git pull --rebase" has been extended to allow invoking
+ "rebase -i".
+
+ * "git p4" learned to cope with the type of a file getting changed.
+
+ * "git format-patch" learned to notice format.outputDirectory
+ configuration variable. This allows "-o <dir>" option to be
+ omitted on the command line if you always use the same directory in
+ your workflow.
+
+ * "interpret-trailers" has been taught to optionally update a file in
+ place, instead of always writing the result to the standard output.
+
+ * Many commands that read files that are expected to contain text
+ that is generated (or can be edited) by the end user to control
+ their behaviour (e.g. "git grep -f <filename>") have been updated
+ to be more tolerant to lines that are terminated with CRLF (they
+ used to treat such a line to contain payload that ends with CR,
+ which is usually not what the users expect).
+
+ * "git notes merge" used to limit the source of the merged notes tree
+ to somewhere under refs/notes/ hierarchy, which was too limiting
+ when inventing a workflow to exchange notes with remote
+ repositories using remote-tracking notes trees (located in e.g.
+ refs/remote-notes/ or somesuch).
+
+ * "git ls-files" learned a new "--eol" option to help diagnose
+ end-of-line problems.
+
+ * "ls-remote" learned an option to show which branch the remote
+ repository advertises as its primary by pointing its HEAD at.
+
+ * New http.proxyAuthMethod configuration variable can be used to
+ specify what authentication method to use, as a way to work around
+ proxies that do not give error response expected by libcurl when
+ CURLAUTH_ANY is used. Also, the codepath for proxy authentication
+ has been taught to use credential API to store the authentication
+ material in user's keyrings.
+
+ * Update the untracked cache subsystem and change its primary UI from
+ "git update-index" to "git config".
+
+ * There were a few "now I am doing this thing" progress messages in
+ the TCP connection code that can be triggered by setting a verbose
+ option internally in the code, but "git fetch -v" and friends never
+ passed the verbose option down to that codepath.
+
+ * Clean/smudge filters defined in a configuration file of lower
+ precedence can now be overridden to be a pass-through no-op by
+ setting the variable to an empty string.
+
+ * A new "<branch>^{/!-<pattern>}" notation can be used to name a
+ commit that is reachable from <branch> that does not match the
+ given <pattern>.
+
+ * The "user.useConfigOnly" configuration variable can be used to
+ force the user to always set user.email & user.name configuration
+ variables, serving as a reminder for those who work on multiple
+ projects and do not want to put these in their $HOME/.gitconfig.
+
+ * "git fetch" and friends that make network connections can now be
+ told to only use ipv4 (or ipv6).
+
+ * Some authentication methods do not need username or password, but
+ libcurl needs some hint that it needs to perform authentication.
+ Supplying an empty username and password string is a valid way to
+ do so, but you can set the http.[<url>.]emptyAuth configuration
+ variable to achieve the same, if you find it cleaner.
+
+ * You can now set http.[<url>.]pinnedpubkey to specify the pinned
+ public key when building with recent enough versions of libcURL.
+
+ * The configuration system has been taught to phrase where it found a
+ bad configuration variable in a better way in its error messages.
+ "git config" learnt a new "--show-origin" option to indicate where
+ the values come from.
+
+ * The "credential-cache" daemon process used to run in whatever
+ directory it happened to start in, but this made umount(2)ing the
+ filesystem that houses the repository harder; now the process
+ chdir()s to the directory that house its own socket on startup.
+
+ * When "git submodule update" did not result in fetching the commit
+ object in the submodule that is referenced by the superproject, the
+ command learned to retry another fetch, specifically asking for
+ that commit that may not be connected to the refs it usually
+ fetches.
+
+ * "git merge-recursive" learned "--no-renames" option to disable its
+ rename detection logic.
+
+ * Across the transition at around Git version 2.0, the user used to
+ get a pretty loud warning when running "git push" without setting
+ push.default configuration variable. We no longer warn, given that
+ the transition is over long time ago.
+
+ * README has been renamed to README.md and its contents got tweaked
+ slightly to make it easier on the eyes.
+
+
+Performance, Internal Implementation, Development Support etc.
+
+ * Add a framework to spawn a group of processes in parallel, and use
+ it to run "git fetch --recurse-submodules" in parallel.
+
+ * A slight update to the Makefile to mark "phoney" targets
+ as such correctly.
+
+ * In-core storage of the reverse index for .pack files (which lets
+ you go from a pack offset to an object name) has been streamlined.
+
+ * d95138e6 (setup: set env $GIT_WORK_TREE when work tree is set, like
+ $GIT_DIR, 2015-06-26) attempted to work around a glitch in alias
+ handling by overwriting GIT_WORK_TREE environment variable to
+ affect subprocesses when set_git_work_tree() gets called, which
+ resulted in a rather unpleasant regression to "clone" and "init".
+ Try to address the same issue by always restoring the environment
+ and respawning the real underlying command when handling alias.
+
+ * The low-level code that is used to create symbolic references has
+ been updated to share more code with the code that deals with
+ normal references.
+
+ * strbuf_getline() and friends have been redefined to make it easier
+ to identify which callsite of (new) strbuf_getline_lf() should
+ allow and silently ignore carriage-return at the end of the line to
+ help users on DOSsy systems.
+
+ * "git shortlog" used to accumulate various pieces of information
+ regardless of what was asked to be shown in the final output. It
+ has been optimized by noticing what need not to be collected
+ (e.g. there is no need to collect the log messages when showing
+ only the number of changes).
+
+ * "git checkout $branch" (and other operations that share the same
+ underlying machinery) has been optimized.
+
+ * Automated tests in Travis CI environment has been optimized by
+ persisting runtime statistics of previous "prove" run, executing
+ tests that take longer before other ones; this reduces the total
+ wallclock time.
+
+ * Test scripts have been updated to remove assumptions that are not
+ portable between Git for POSIX and Git for Windows, or to skip ones
+ with expectations that are not satisfiable on Git for Windows.
+
+ * Some calls to strcpy(3) triggers a false warning from static
+ analysers that are less intelligent than humans, and reducing the
+ number of these false hits helps us notice real issues. A few
+ calls to strcpy(3) in test-path-utils that are already safe has
+ been rewritten to avoid false wanings.
+
+ * Some calls to strcpy(3) triggers a false warning from static
+ analysers that are less intelligent than humans, and reducing the
+ number of these false hits helps us notice real issues. A few
+ calls to strcpy(3) in "git rerere" that are already safe has been
+ rewritten to avoid false wanings.
+
+ * The "name_path" API was an attempt to reduce the need to construct
+ the full path out of a series of path components while walking a
+ tree hierarchy, but over time made less efficient because the path
+ needs to be flattened, e.g. to be compared with another path that
+ is already flat. The API has been removed and its users have been
+ rewritten to simplify the overall code complexity.
+
+ * Help those who debug http(s) part of the system.
+ (merge 0054045 sp/remote-curl-ssl-strerror later to maint).
+
+ * The internal API to interact with "remote.*" configuration
+ variables has been streamlined.
+
+ * The ref-filter's format-parsing code has been refactored, in
+ preparation for "branch --format" and friends.
+
+ * Traditionally, the tests that try commands that work on the
+ contents in the working tree were named with "worktree" in their
+ filenames, but with the recent addition of "git worktree"
+ subcommand, whose tests are also named similarly, it has become
+ harder to tell them apart. The traditional tests have been renamed
+ to use "work-tree" instead in an attempt to differentiate them.
+ (merge 5549029 mg/work-tree-tests later to maint).
+
+ * Many codepaths forget to check return value from git_config_set();
+ the function is made to die() to make sure we do not proceed when
+ setting a configuration variable failed.
+ (merge 3d18064 ps/config-error later to maint).
+
+ * Handling of errors while writing into our internal asynchronous
+ process has been made more robust, which reduces flakiness in our
+ tests.
+ (merge 43f3afc jk/epipe-in-async later to maint).
+
+ * There is a new DEVELOPER knob that enables many compiler warning
+ options in the Makefile.
+
+
+Also contains various documentation updates and code clean-ups.
+
+
+Fixes since v2.7
+----------------
+
+Unless otherwise noted, all the fixes since v2.7 in the maintenance
+track are contained in this release (see the maintenance releases'
+notes for details).
+
+ * An earlier change in 2.5.x-era broke users' hooks and aliases by
+ exporting GIT_WORK_TREE to point at the root of the working tree,
+ interfering when they tried to use a different working tree without
+ setting GIT_WORK_TREE environment themselves.
+
+ * The "exclude_list" structure has the usual "alloc, nr" pair of
+ fields to be used by ALLOC_GROW(), but clear_exclude_list() forgot
+ to reset 'alloc' to 0 when it cleared 'nr' to discard the managed
+ array.
+
+ * Paths that have been told the index about with "add -N" are not
+ quite yet in the index, but a few commands behaved as if they
+ already are in a harmful way.
+
+ * "git send-email" was confused by escaped quotes stored in the alias
+ files saved by "mutt", which has been corrected.
+
+ * A few unportable C construct have been spotted by clang compiler
+ and have been fixed.
+
+ * The documentation has been updated to hint the connection between
+ the '--signoff' option and DCO.
+
+ * "git reflog" incorrectly assumed that all objects that used to be
+ at the tip of a ref must be commits, which caused it to segfault.
+
+ * The ignore mechanism saw a few regressions around untracked file
+ listing and sparse checkout selection areas in 2.7.0; the change
+ that is responsible for the regression has been reverted.
+
+ * Some codepaths used fopen(3) when opening a fixed path in $GIT_DIR
+ (e.g. COMMIT_EDITMSG) that is meant to be left after the command is
+ done. This however did not work well if the repository is set to
+ be shared with core.sharedRepository and the umask of the previous
+ user is tighter. They have been made to work better by calling
+ unlink(2) and retrying after fopen(3) fails with EPERM.
+
+ * Asking gitweb for a nonexistent commit left a warning in the server
+ log.
+
+ Somebody may want to follow this up with an additional test, perhaps?
+ IIRC, we do test that no Perl warnings are given to the server log,
+ so this should have been caught if our test coverage were good.
+
+ * "git rebase", unlike all other callers of "gc --auto", did not
+ ignore the exit code from "gc --auto".
+
+ * Many codepaths that run "gc --auto" before exiting kept packfiles
+ mapped and left the file descriptors to them open, which was not
+ friendly to systems that cannot remove files that are open. They
+ now close the packs before doing so.
+
+ * A recent optimization to filter-branch in v2.7.0 introduced a
+ regression when --prune-empty filter is used, which has been
+ corrected.
+
+ * The description for SANITY prerequisite the test suite uses has
+ been clarified both in the comment and in the implementation.
+
+ * "git tag" started listing a tag "foo" as "tags/foo" when a branch
+ named "foo" exists in the same repository; remove this unnecessary
+ disambiguation, which is a regression introduced in v2.7.0.
+
+ * The way "git svn" uses auth parameter was broken by Subversion
+ 1.9.0 and later.
+
+ * The "split" subcommand of "git subtree" (in contrib/) incorrectly
+ skipped merges when it shouldn't, which was corrected.
+
+ * A few options of "git diff" did not work well when the command was
+ run from a subdirectory.
+
+ * The command line completion learned a handful of additional options
+ and command specific syntax.
+
+ * dirname() emulation has been added, as Msys2 lacks it.
+
+ * The underlying machinery used by "ls-files -o" and other commands
+ have been taught not to create empty submodule ref cache for a
+ directory that is not a submodule. This removes a ton of wasted
+ CPU cycles.
+
+ * "git worktree" had a broken code that attempted to auto-fix
+ possible inconsistency that results from end-users moving a
+ worktree to different places without telling Git (the original
+ repository needs to maintain backpointers to its worktrees, but
+ "mv" run by end-users who are not familiar with that fact will
+ obviously not adjust them), which actually made things worse
+ when triggered.
+
+ * The low-level merge machinery has been taught to use CRLF line
+ termination when inserting conflict markers to merged contents that
+ are themselves CRLF line-terminated.
+
+ * "git push --force-with-lease" has been taught to report if the push
+ needed to force (or fast-forwarded).
+
+ * The emulated "yes" command used in our test scripts has been
+ tweaked not to spend too much time generating unnecessary output
+ that is not used, to help those who test on Windows where it would
+ not stop until it fills the pipe buffer due to lack of SIGPIPE.
+
+ * The documentation for "git clean" has been corrected; it mentioned
+ that .git/modules/* are removed by giving two "-f", which has never
+ been the case.
+
+ * The vimdiff backend for "git mergetool" has been tweaked to arrange
+ and number buffers in the order that would match the expectation of
+ majority of people who read left to right, then top down and assign
+ buffers 1 2 3 4 "mentally" to local base remote merge windows based
+ on that order.
+
+ * "git show 'HEAD:Foo[BAR]Baz'" did not interpret the argument as a
+ rev, i.e. the object named by the the pathname with wildcard
+ characters in a tree object.
+ (merge aac4fac nd/dwim-wildcards-as-pathspecs later to maint).
+
+ * "git rev-parse --git-common-dir" used in the worktree feature
+ misbehaved when run from a subdirectory.
+ (merge 17f1365 nd/git-common-dir-fix later to maint).
+
+ * Another try to add support to the ignore mechanism that lets you
+ say "this is excluded" and then later say "oh, no, this part (that
+ is a subset of the previous part) is not excluded".
+
+ * "git worktree add -B <branchname>" did not work.
+
+ * The "v(iew)" subcommand of the interactive "git am -i" command was
+ broken in 2.6.0 timeframe when the command was rewritten in C.
+ (merge 708b8cc jc/am-i-v-fix later to maint).
+
+ * "git merge-tree" used to mishandle "both sides added" conflict with
+ its own "create a fake ancestor file that has the common parts of
+ what both sides have added and do a 3-way merge" logic; this has
+ been updated to use the usual "3-way merge with an empty blob as
+ the fake common ancestor file" approach used in the rest of the
+ system.
+ (merge 907681e jk/no-diff-emit-common later to maint).
+
+ * The memory ownership rule of fill_textconv() API, which was a bit
+ tricky, has been documented a bit better.
+ (merge a64e6a4 jk/more-comments-on-textconv later to maint).
+
+ * Update various codepaths to avoid manually-counted malloc().
+ (merge 08c95df jk/tighten-alloc later to maint).
+
+ * The documentation did not clearly state that the 'simple' mode is
+ now the default for "git push" when push.default configuration is
+ not set.
+ (merge f6b1fb3 mm/push-simple-doc later to maint).
+
+ * Recent versions of GNU grep are pickier when their input contains
+ arbitrary binary data, which some of our tests uses. Rewrite the
+ tests to sidestep the problem.
+ (merge 3b1442d jk/grep-binary-workaround-in-test later to maint).
+
+ * A helper function "git submodule" uses since v2.7.0 to list the
+ modules that match the pathspec argument given to its subcommands
+ (e.g. "submodule add <repo> <path>") has been fixed.
+ (merge 2b56bb7 sb/submodule-module-list-fix later to maint).
+
+ * "git config section.var value" to set a value in per-repository
+ configuration file failed when it was run outside any repository,
+ but didn't say the reason correctly.
+ (merge 638fa62 js/config-set-in-non-repository later to maint).
+
+ * Other minor clean-ups and documentation updates
+ (merge f459823 ak/extract-argv0-last-dir-sep later to maint).
+ (merge 63ca1c0 ak/git-strip-extension-from-dashed-command later to maint).
+ (merge 4867f11 ps/plug-xdl-merge-leak later to maint).
+ (merge 4938686 dt/initial-ref-xn-commit-doc later to maint).
+ (merge 9537f21 ma/update-hooks-sample-typofix later to maint).
iso format is used. For supported values, see the discussion
of the --date option at linkgit:git-log[1].
+--[no-]progress::
+ Progress status is reported on the standard error stream
+ by default when it is attached to a terminal. This flag
+ enables progress reporting even if not attached to a
+ terminal. Can't use `--progress` together with `--porcelain`
+ or `--incremental`.
+
-M|<num>|::
Detect moved or copied lines within a file. When a commit
moves or copies a block of lines (e.g. the original file
crawlers and some backup systems).
See linkgit:git-update-index[1]. True by default.
+core.untrackedCache::
+ Determines what to do about the untracked cache feature of the
+ index. It will be kept, if this variable is unset or set to
+ `keep`. It will automatically be added if set to `true`. And
+ it will automatically be removed, if set to `false`. Before
+ setting it to `true`, you should check that mtime is working
+ properly on your system.
+ See linkgit:git-update-index[1]. `keep` by default.
+
core.checkStat::
Determines which stat fields to match between the index
and work tree. The user can set this to 'default' or
so that locally committed merge commits will not be flattened
by running 'git pull'.
+
+When the value is `interactive`, the rebase is run in interactive mode.
++
*NOTE*: this is a possibly dangerous operation; do *not* use
it unless you understand the implications (see linkgit:git-rebase[1]
for details).
format-patch is invoked, but in addition can be set to "auto", to
generate a cover-letter only when there's more than one patch.
+format.outputDirectory::
+ Set a custom directory to store the resulting files instead of the
+ current working directory.
+
filter.<driver>.clean::
The command which is used to convert the content of a worktree
file to a blob upon checkin. See linkgit:gitattributes[5] for
option is ignored when the 'grep.patternType' option is set to a value
other than 'default'.
+grep.threads::
+ Number of grep worker threads to use.
+ See `grep.threads` in linkgit:git-grep[1] for more information.
+
+grep.fallbackToNoIndex::
+ If set to true, fall back to git grep --no-index if git grep
+ is executed outside of a git repository. Defaults to false.
+
gpg.program::
Use this custom program instead of "gpg" found on $PATH when
making or verifying a PGP signature. The program must support the
http.proxy::
Override the HTTP proxy, normally configured using the 'http_proxy',
- 'https_proxy', and 'all_proxy' environment variables (see
- `curl(1)`). This can be overridden on a per-remote basis; see
- remote.<name>.proxy
+ 'https_proxy', and 'all_proxy' environment variables (see `curl(1)`). In
+ addition to the syntax understood by curl, it is possible to specify a
+ proxy string with a user name but no password, in which case git will
+ attempt to acquire one in the same way it does for other credentials. See
+ linkgit:gitcredentials[7] for more information. The syntax thus is
+ '[protocol://][user[:password]@]proxyhost[:port]'. This can be overridden
+ on a per-remote basis; see remote.<name>.proxy
+
+http.proxyAuthMethod::
+ Set the method with which to authenticate against the HTTP proxy. This
+ only takes effect if the configured proxy string contains a user name part
+ (i.e. is of the form 'user@host' or 'user@host:port'). This can be
+ overridden on a per-remote basis; see `remote.<name>.proxyAuthMethod`.
+ Both can be overridden by the 'GIT_HTTP_PROXY_AUTHMETHOD' environment
+ variable. Possible values are:
++
+--
+* `anyauth` - Automatically pick a suitable authentication method. It is
+ assumed that the proxy answers an unauthenticated request with a 407
+ status code and one or more Proxy-authenticate headers with supported
+ authentication methods. This is the default.
+* `basic` - HTTP Basic authentication
+* `digest` - HTTP Digest authentication; this prevents the password from being
+ transmitted to the proxy in clear text
+* `negotiate` - GSS-Negotiate authentication (compare the --negotiate option
+ of `curl(1)`)
+* `ntlm` - NTLM authentication (compare the --ntlm option of `curl(1)`)
+--
+
+http.emptyAuth::
+ Attempt authentication without seeking a username or password. This
+ can be used to attempt GSS-Negotiate authentication without specifying
+ a username in the URL, as libcurl normally requires a username for
+ authentication.
http.cookieFile::
File containing previously stored cookie lines which should be used
with when fetching or pushing over HTTPS. Can be overridden
by the 'GIT_SSL_CAPATH' environment variable.
+http.pinnedpubkey::
+ Public key of the https service. It may either be the filename of
+ a PEM or DER encoded public key file or a string starting with
+ 'sha256//' followed by the base64 encoded sha256 hash of the
+ public key. See also libcurl 'CURLOPT_PINNEDPUBLICKEY'. git will
+ exit with an error if this option is set but not supported by
+ cURL.
+
http.sslTry::
Attempt to use AUTH SSL/TLS and encrypted data transfers
when connecting via regular FTP protocol. This might be needed
larger than 2 GB.
+
If you have an old Git that does not understand the version 2 `*.idx` file,
-cloning or fetching over a non native protocol (e.g. "http" and "rsync")
+cloning or fetching over a non native protocol (e.g. "http")
that will copy both `*.pack` file and corresponding `*.idx` file from the
other side may give you a repository that cannot be accessed with your
older version of Git. If the `*.pack` file is smaller than 2 GB, however,
so that locally committed merge commits will not be flattened
by running 'git pull'.
+
+When the value is `interactive`, the rebase is run in interactive mode.
++
*NOTE*: this is a possibly dangerous operation; do *not* use
it unless you understand the implications (see linkgit:git-rebase[1]
for details).
the proxy to use for that remote. Set to the empty string to
disable proxying for that remote.
+remote.<name>.proxyAuthMethod::
+ For remotes that require curl (http, https and ftp), the method to use for
+ authenticating against the proxy in use (probably set in
+ `remote.<name>.proxy`). See `http.proxyAuthMethod`.
+
remote.<name>.fetch::
The default set of "refspec" for linkgit:git-fetch[1]. See
linkgit:git-fetch[1].
Can be overridden by the 'GIT_AUTHOR_NAME' and 'GIT_COMMITTER_NAME'
environment variables. See linkgit:git-commit-tree[1].
+user.useConfigOnly::
+ Instruct Git to avoid trying to guess defaults for 'user.email'
+ and 'user.name', and instead retrieve the values only from the
+ configuration. For example, if you have multiple email addresses
+ and would like to use a different one for each repository, then
+ with this configuration option set to `true` in the global config
+ along with a name, Git will prompt you to set up an email before
+ making new commits in a newly cloned repository.
+ Defaults to `false`.
+
user.signingKey::
If linkgit:git-tag[1] or linkgit:git-commit[1] is not selecting the
key you want it to automatically when creating a signed tag or
reference to a commit that isn't already in the local submodule
clone.
+-j::
+--jobs=<n>::
+ Number of parallel children to be used for fetching submodules.
+ Each will fetch from different submodules, such that fetching many
+ submodules will be faster. By default submodules will be fetched
+ one at a time.
+
--no-recurse-submodules::
Disable recursive fetching of submodules (this has the same effect as
using the '--recurse-submodules=no' option).
by default when it is attached to a terminal, unless -q
is specified. This flag forces progress status even if the
standard error stream is not directed to a terminal.
+
+-4::
+--ipv4::
+ Use IPv4 addresses only, ignoring IPv6 addresses.
+
+-6::
+--ipv6::
+ Use IPv6 addresses only, ignoring IPv4 addresses.
[verse]
'git blame' [-c] [-b] [-l] [--root] [-t] [-f] [-n] [-s] [-e] [-p] [-w] [--incremental]
[-L <range>] [-S <revs-file>] [-M] [-C] [-C] [-C] [--since=<date>]
- [--abbrev=<n>] [<rev> | --contents <file> | --reverse <rev>] [--] <file>
+ [--progress] [--abbrev=<n>] [<rev> | --contents <file> | --reverse <rev>]
+ [--] <file>
DESCRIPTION
-----------
Some workflows require that one or more branches of development on one
machine be replicated on another machine, but the two machines cannot
be directly connected, and therefore the interactive Git protocols (git,
-ssh, rsync, http) cannot be used. This command provides support for
+ssh, http) cannot be used. This command provides support for
'git fetch' and 'git pull' to operate by packaging objects and references
in an archive at the originating machine, then importing those into
another repository using 'git fetch' and 'git pull'
linkgit:gitignore[5]
linkgit:gitconfig[5]
linkgit:git-ls-files[1]
+GIT_TRACE_EXCLUDE in linkgit:git[1]
GIT
---
--quiet::
-q::
Operate quietly. Progress is not reported to the standard
- error stream. This flag is also passed to the `rsync'
- command when given.
+ error stream.
--verbose::
-v::
SYNOPSIS
--------
[verse]
-'git config' [<file-option>] [type] [-z|--null] name [value [value_regex]]
+'git config' [<file-option>] [type] [--show-origin] [-z|--null] name [value [value_regex]]
'git config' [<file-option>] [type] --add name value
'git config' [<file-option>] [type] --replace-all name value [value_regex]
-'git config' [<file-option>] [type] [-z|--null] --get name [value_regex]
-'git config' [<file-option>] [type] [-z|--null] --get-all name [value_regex]
-'git config' [<file-option>] [type] [-z|--null] [--name-only] --get-regexp name_regex [value_regex]
+'git config' [<file-option>] [type] [--show-origin] [-z|--null] --get name [value_regex]
+'git config' [<file-option>] [type] [--show-origin] [-z|--null] --get-all name [value_regex]
+'git config' [<file-option>] [type] [--show-origin] [-z|--null] [--name-only] --get-regexp name_regex [value_regex]
'git config' [<file-option>] [type] [-z|--null] --get-urlmatch name URL
'git config' [<file-option>] --unset name [value_regex]
'git config' [<file-option>] --unset-all name [value_regex]
'git config' [<file-option>] --rename-section old_name new_name
'git config' [<file-option>] --remove-section name
-'git config' [<file-option>] [-z|--null] [--name-only] -l | --list
+'git config' [<file-option>] [--show-origin] [-z|--null] [--name-only] -l | --list
'git config' [<file-option>] --get-color name [default]
'git config' [<file-option>] --get-colorbool name [stdout-is-tty]
'git config' [<file-option>] -e | --edit
Output only the names of config variables for `--list` or
`--get-regexp`.
+--show-origin::
+ Augment the output of all queried config options with the
+ origin type (file, standard input, blob, command line) and
+ the actual origin (config file path, ref, or blob id if
+ applicable).
+
--get-colorbool name [stdout-is-tty]::
Find the color setting for `name` (e.g. `color.diff`) and output
--[no-]includes::
Respect `include.*` directives in config files when looking up
- values. Defaults to on.
+ values. Defaults to `off` when a specific file is given (e.g.,
+ using `--file`, `--global`, etc) and `on` when searching all
+ config files.
[[FILES]]
FILES
cache daemon if one is not started). Defaults to
`~/.git-credential-cache/socket`. If your home directory is on a
network-mounted filesystem, you may need to change this to a
- local filesystem.
+ local filesystem. You must specify an absolute path.
CONTROLLING THE DAEMON
----------------------
align::
Left-, middle-, or right-align the content between
- %(align:...) and %(end). The "align:" is followed by `<width>`
- and `<position>` in any order separated by a comma, where the
- `<position>` is either left, right or middle, default being
- left and `<width>` is the total length of the content with
- alignment. If the contents length is more than the width then
- no alignment is performed. If used with '--quote' everything
- in between %(align:...) and %(end) is quoted, but if nested
- then only the topmost level performs quoting.
+ %(align:...) and %(end). The "align:" is followed by
+ `width=<width>` and `position=<position>` in any order
+ separated by a comma, where the `<position>` is either left,
+ right or middle, default being left and `<width>` is the total
+ length of the content with alignment. For brevity, the
+ "width=" and/or "position=" prefixes may be omitted, and bare
+ <width> and <position> used instead. For instance,
+ `%(align:<width>,<position>)`. If the contents length is more
+ than the width then no alignment is performed. If used with
+ '--quote' everything in between %(align:...) and %(end) is
+ quoted, but if nested then only the topmost level performs
+ quoting.
In addition to the above, for commit and tag objects, the header
field names (`tree`, `parent`, `object`, `type`, and `tag`) can
output, unless the `--stdout` option is specified.
If `-o` is specified, output files are created in <dir>. Otherwise
-they are created in the current working directory.
+they are created in the current working directory. The default path
+can be set with the 'format.outputDirectory' configuration option.
+The `-o` option takes precedence over `format.outputDirectory`.
+To store patches in the current working directory even when
+`format.outputDirectory` points elsewhere, use `-o .`.
By default, the subject of a single patch is "[PATCH] " followed by
the concatenation of lines from the commit message up to the first blank
[--break] [--heading] [-p | --show-function]
[-A <post-context>] [-B <pre-context>] [-C <context>]
[-W | --function-context]
+ [--threads <num>]
[-f <file>] [-e] <pattern>
[--and|--or|--not|(|)|-e <pattern>...]
[ [--[no-]exclude-standard] [--cached | --no-index | --untracked] | <tree>...]
option is ignored when the 'grep.patternType' option is set to a value
other than 'default'.
+grep.threads::
+ Number of grep worker threads to use. If unset (or set to 0),
+ 8 threads are used by default (for now).
+
grep.fullName::
If set to true, enable '--full-name' option by default.
+grep.fallbackToNoIndex::
+ If set to true, fall back to git grep --no-index if git grep
+ is executed outside of a git repository. Defaults to false.
+
OPTIONS
-------
effectively showing the whole function in which the match was
found.
+--threads <num>::
+ Number of grep worker threads to use.
+ See `grep.threads` in 'CONFIGURATION' for more information.
+
-f <file>::
Read patterns from <file>, one per line.
SYNOPSIS
--------
[verse]
-'git interpret-trailers' [--trim-empty] [(--trailer <token>[(=|:)<value>])...] [<file>...]
+'git interpret-trailers' [--in-place] [--trim-empty] [(--trailer <token>[(=|:)<value>])...] [<file>...]
DESCRIPTION
-----------
OPTIONS
-------
+--in-place::
+ Edit the files in place.
+
--trim-empty::
If the <value> part of any trailer contains only whitespace,
the whole trailer will be removed from the resulting message.
Signed-off-by: Bob <bob@example.com>
------------
+* Use the '--in-place' option to edit a message file in place:
++
+------------
+$ cat msg.txt
+subject
+
+message
+
+Signed-off-by: Bob <bob@example.com>
+$ git interpret-trailers --trailer 'Acked-by: Alice <alice@example.com>' --in-place msg.txt
+$ cat msg.txt
+subject
+
+message
+
+Signed-off-by: Bob <bob@example.com>
+Acked-by: Alice <alice@example.com>
+------------
+
* Extract the last commit as a patch, and add a 'Cc' and a
'Reviewed-by' trailer to it:
+
'git ls-files' [-z] [-t] [-v]
(--[cached|deleted|others|ignored|stage|unmerged|killed|modified])*
(-[c|d|o|i|s|u|k|m])*
+ [--eol]
[-x <pattern>|--exclude=<pattern>]
[-X <file>|--exclude-from=<file>]
[--exclude-per-directory=<file>]
possible for manual inspection; the exact format may change at
any time.
+--eol::
+ Show <eolinfo> and <eolattr> of files.
+ <eolinfo> is the file content identification used by Git when
+ the "text" attribute is "auto" (or not set and core.autocrlf is not false).
+ <eolinfo> is either "-text", "none", "lf", "crlf", "mixed" or "".
++
+"" means the file is not a regular file, it is not in the index or
+not accessible in the working tree.
++
+<eolattr> is the attribute that is used when checking out or committing,
+it is either "", "-text", "text", "text=auto", "text eol=lf", "text eol=crlf".
+Note: Currently Git does not support "text=auto eol=lf" or "text=auto eol=crlf",
+that may change in the future.
++
+Both the <eolinfo> in the index ("i/<eolinfo>")
+and in the working tree ("w/<eolinfo>") are shown for regular files,
+followed by the ("attr/<eolattr>").
+
\--::
Do not interpret any more arguments as options.
[<tag> ]<mode> <object> <stage> <file>
+'git ls-files --eol' will show
+ i/<eolinfo><SPACES>w/<eolinfo><SPACES>attr/<eolattr><SPACE*><TAB><file>
+
'git ls-files --unmerged' and 'git ls-files --stage' can be used to examine
detailed information on unmerged paths.
SYNOPSIS
--------
[verse]
-'git ls-remote' [--heads] [--tags] [--upload-pack=<exec>]
- [--exit-code] <repository> [<refs>...]
+'git ls-remote' [--heads] [--tags] [--refs] [--upload-pack=<exec>]
+ [-q | --quiet] [--exit-code] [--get-url]
+ [--symref] [<repository> [<refs>...]]
DESCRIPTION
-----------
both, references stored in refs/heads and refs/tags are
displayed.
+--refs::
+ Do not show peeled tags or pseudorefs like HEAD in the output.
+
+-q::
+--quiet::
+ Do not print remote URL to stderr.
+
--upload-pack=<exec>::
Specify the full path of 'git-upload-pack' on the remote
host. This allows listing references from repositories accessed via
"url.<base>.insteadOf" config setting (See linkgit:git-config[1]) and
exit without talking to the remote.
+--symref::
+ In addition to the object pointed by it, show the underlying
+ ref pointed by it when showing a symbolic ref. Currently,
+ upload-pack only shows the symref HEAD, so it will be the only
+ one shown by ls-remote.
+
<repository>::
The "remote" repository to query. This parameter can be
either a URL or the name of a remote (see the GIT URLS and
include::merge-options.txt[]
-r::
---rebase[=false|true|preserve]::
+--rebase[=false|true|preserve|interactive]::
When true, rebase the current branch on top of the upstream
branch after fetching. If there is a remote-tracking branch
corresponding to the upstream branch and the upstream branch
+
When false, merge the current branch into the upstream branch.
+
+When `interactive`, enable the interactive mode of rebase.
++
See `pull.rebase`, `branch.<name>.rebase` and `branch.autoSetupRebase` in
linkgit:git-config[1] if you want to make `git pull` always use
`--rebase` instead of merging.
--------
[verse]
'git push' [--all | --mirror | --tags] [--follow-tags] [--atomic] [-n | --dry-run] [--receive-pack=<git-receive-pack>]
- [--repo=<repository>] [-f | --force] [--prune] [-v | --verbose]
+ [--repo=<repository>] [-f | --force] [-d | --delete] [--prune] [-v | --verbose]
[-u | --set-upstream]
[--[no-]signed|--sign=(true|false|if-asked)]
[--force-with-lease[=<refname>[:<expect>]]]
and if it is not found, honors `push.default` configuration to decide
what to push (See linkgit:git-config[1] for the meaning of `push.default`).
+When neither the command-line nor the configuration specify what to
+push, the default behavior is used, which corresponds to the `simple`
+value for `push.default`: the current branch is pushed to the
+corresponding upstream branch, but as a safety measure, the push is
+aborted if the upstream branch does not have the same name as the
+local one.
+
OPTIONS[[OPTIONS]]
------------------
default is --verify, giving the hook a chance to prevent the
push. With --no-verify, the hook is bypassed completely.
+-4::
+--ipv4::
+ Use IPv4 addresses only, ignoring IPv6 addresses.
+
+-6::
+--ipv6::
+ Use IPv6 addresses only, ignoring IPv4 addresses.
include::urls-remotes.txt[]
'git pack-objects'; this typically results in slightly smaller packs,
but the generated packs are incompatible with versions of Git older than
version 1.4.4. If you need to share your repository with such ancient Git
-versions, either directly or via the dumb http or rsync protocol, then you
+versions, either directly or via the dumb http protocol, then you
need to set the configuration variable `repack.UseDeltaBaseOffset` to
"false" and repack. Access from old Git versions over the native protocol
is unaffected by this option as the conversion is performed on the fly
[--[no-]skip-worktree]
[--ignore-submodules]
[--[no-]split-index]
- [--[no-|force-]untracked-cache]
+ [--[no-|test-|force-]untracked-cache]
[--really-refresh] [--unresolve] [--again | -g]
[--info-only] [--index-info]
[-z] [--stdin] [--index-version <n>]
--untracked-cache::
--no-untracked-cache::
- Enable or disable untracked cache extension. This could speed
- up for commands that involve determining untracked files such
- as `git status`. The underlying operating system and file
- system must change `st_mtime` field of a directory if files
- are added or deleted in that directory.
+ Enable or disable untracked cache feature. Please use
+ `--test-untracked-cache` before enabling it.
++
+These options take effect whatever the value of the `core.untrackedCache`
+configuration variable (see linkgit:git-config[1]). But a warning is
+emitted when the change goes against the configured value, as the
+configured value will take effect next time the index is read and this
+will remove the intended effect of the option.
+
+--test-untracked-cache::
+ Only perform tests on the working directory to make sure
+ untracked cache can be used. You have to manually enable
+ untracked cache using `--untracked-cache` or
+ `--force-untracked-cache` or the `core.untrackedCache`
+ configuration variable afterwards if you really want to use
+ it. If a test fails the exit code is 1 and a message
+ explains what is not working as needed, otherwise the exit
+ code is 0 and OK is printed.
--force-untracked-cache::
- For safety, `--untracked-cache` performs tests on the working
- directory to make sure untracked cache can be used. These
- tests can take a few seconds. `--force-untracked-cache` can be
- used to skip the tests.
+ Same as `--untracked-cache`. Provided for backwards
+ compatibility with older versions of Git where
+ `--untracked-cache` used to imply `--test-untracked-cache` but
+ this option would enable the extension unconditionally.
\--::
Do not interpret any more arguments as options.
different from assume-unchanged bit's. Skip-worktree also takes
precedence over assume-unchanged bit when both are set.
+Untracked cache
+---------------
+
+This cache is meant to speed up commands that involve determining
+untracked files such as `git status`.
+
+This feature works by recording the mtime of the working tree
+directories and then omitting reading directories and stat calls
+against files in those directories whose mtime hasn't changed. For
+this to work the underlying operating system and file system must
+change the `st_mtime` field of directories if files in the directory
+are added, modified or deleted.
+
+You can test whether the filesystem supports that with the
+`--test-untracked-cache` option. The `--untracked-cache` option used
+to implicitly perform that test in older versions of Git, but that's
+no longer the case.
+
+If you want to enable (or disable) this feature, it is easier to use
+the `core.untrackedCache` configuration variable (see
+linkgit:git-config[1]) than using the `--untracked-cache` option to
+`git update-index` in each repository, especially if you want to do so
+across all repositories you use, because you can set the configuration
+variable to `true` (or `false`) in your `$HOME/.gitconfig` just once
+and have it affect all repositories you touch.
+
+When the `core.untrackedCache` configuration variable is changed, the
+untracked cache is added to or removed from the index the next time a
+command reads the index; while when `--[no-|force-]untracked-cache`
+are used, the untracked cache is immediately added to or removed from
+the index.
Configuration
-------------
something outside Git (file system crawlers and backup systems use
ctime for marking files processed) (see linkgit:git-config[1]).
+The untracked cache extension can be enabled by the
+`core.untrackedCache` configuration variable (see
+linkgit:git-config[1]).
SEE ALSO
--------
cloning of shallow repositories.
See 'GIT_TRACE' for available trace output options.
+'GIT_TRACE_EXCLUDE'::
+ Enables trace messages that can help debugging .gitignore
+ processing. See 'GIT_TRACE' for available trace output
+ options.
+
'GIT_LITERAL_PATHSPECS'::
Setting this variable to `1` will cause Git to treat all
pathspecs literally, rather than as glob patterns. For example,
- `ssh`: git over ssh (including `host:path` syntax,
`git+ssh://`, etc).
- - `rsync`: git over rsync
-
- `http`: git over http, both "smart http" and "dumb http".
Note that this does _not_ include `https`; if you want both,
you should specify both as `http:https`.
Again, this can all be simplified with
----------------
-$ git clone rsync://rsync.kernel.org/pub/scm/git/git.git/ my-git
+$ git clone git://git.kernel.org/pub/scm/git/git.git/ my-git
$ cd my-git
$ git checkout
----------------
One of the following transports can be used to name the
repository to download from:
-Rsync::
- `rsync://remote.machine/path/to/repo.git/`
-+
-Rsync transport is usable for both uploading and downloading,
-but is completely unaware of what git does, and can produce
-unexpected results when you download from the public repository
-while the repository owner is uploading into it via `rsync`
-transport. Most notably, it could update the files under
-`refs/` which holds the object name of the topmost commits
-before uploading the files in `objects/` -- the downloader would
-obtain head commit object name while that object itself is still
-not available in the repository. For this reason, it is
-considered deprecated.
-
SSH::
`remote.machine:/path/to/repo.git/` or
+
When a repository is synchronized via `git push` and `git pull`
objects packed in the source repository are usually stored
-unpacked in the destination, unless rsync transport is used.
+unpacked in the destination.
While this allows you to use different packing strategies on
both ends, it also means you may need to repack both
repositories every once in a while.
- An optional prefix "`!`" which negates the pattern; any
matching file excluded by a previous pattern will become
- included again. It is not possible to re-include a file if a parent
- directory of that file is excluded. Git doesn't list excluded
- directories for performance reasons, so any patterns on contained
- files have no effect, no matter where they are defined.
+ included again.
Put a backslash ("`\`") in front of the first "`!`" for patterns
that begin with a literal "`!`", for example, "`\!important!.txt`".
+ It is possible to re-include a file if a parent directory of that
+ file is excluded if certain conditions are met. See section NOTES
+ for detail.
- If the pattern ends with a slash, it is removed for the
purpose of the following description, but it would only find
To stop tracking a file that is currently tracked, use
'git rm --cached'.
+To re-include files or directories when their parent directory is
+excluded, the following conditions must be met:
+
+ - The rules to exclude a directory and re-include a subset back must
+ be in the same .gitignore file.
+
+ - The directory part in the re-include rules must be literal (i.e. no
+ wildcards)
+
EXAMPLES
--------
bob$ git clone alice.org:/home/alice/project myrepo
-------------------------------------
-Alternatively, Git has a native protocol, or can use rsync or http;
+Alternatively, Git has a native protocol, or can use http;
see linkgit:git-pull[1] for details.
Git can also be used in a CVS-like mode, with a central repository
Disables the `renormalize` option. This overrides the
`merge.renormalize` configuration variable.
+no-renames;;
+ Turn off rename detection.
+ See also linkgit:git-diff[1] `--no-renames`.
+
+find-renames[=<n>];;
+ Turn on rename detection, optionally setting the similarity
+ threshold. This is the default.
+ See also linkgit:git-diff[1] `--find-renames`.
+
rename-threshold=<n>;;
- Controls the similarity threshold used for rename detection.
- See also linkgit:git-diff[1] `-M`.
+ Deprecated synonym for `find-renames=<n>`.
subtree[=<path>];;
This option is a more advanced form of 'subtree' strategy, where
commit may be copied to the output.
ifndef::git-rev-list[]
---notes[=<ref>]::
+--notes[=<treeish>]::
Show the notes (see linkgit:git-notes[1]) that annotate the
commit, when showing the commit log message. This is the default
for `git log`, `git show` and `git whatchanged` commands when
'core.notesRef' and 'notes.displayRef' variables (or corresponding
environment overrides). See linkgit:git-config[1] for more details.
+
-With an optional '<ref>' argument, show this notes ref instead of the
-default notes ref(s). The ref specifies the full refname when it begins
+With an optional '<treeish>' argument, use the treeish to find the notes
+to display. The treeish can specify the full refname when it begins
with `refs/notes/`; when it begins with `notes/`, `refs/` and otherwise
`refs/notes/` is prefixed to form a full name of the ref.
+
"--notes --notes=foo --no-notes --notes=bar" will only show notes
from "refs/notes/bar".
---show-notes[=<ref>]::
+--show-notes[=<treeish>]::
--[no-]standard-notes::
These options are deprecated. Use the above --notes/--no-notes
options instead.
'@'::
'@' alone is a shortcut for 'HEAD'.
-'<refname>@\{<date>\}', e.g. 'master@\{yesterday\}', 'HEAD@\{5 minutes ago\}'::
+'<refname>@{<date>}', e.g. 'master@\{yesterday\}', 'HEAD@{5 minutes ago}'::
A ref followed by the suffix '@' with a date specification
enclosed in a brace
- pair (e.g. '\{yesterday\}', '\{1 month 2 weeks 3 days 1 hour 1
- second ago\}' or '\{1979-02-26 18:30:00\}') specifies the value
+ pair (e.g. '\{yesterday\}', '{1 month 2 weeks 3 days 1 hour 1
+ second ago}' or '{1979-02-26 18:30:00}') specifies the value
of the ref at a prior point in time. This suffix may only be
used immediately following a ref name and the ref must have an
existing log ('$GIT_DIR/logs/<ref>'). Note that this looks up the state
'master' branch last week. If you want to look at commits made during
certain times, see '--since' and '--until'.
-'<refname>@\{<n>\}', e.g. 'master@\{1\}'::
+'<refname>@{<n>}', e.g. 'master@\{1\}'::
A ref followed by the suffix '@' with an ordinal specification
enclosed in a brace pair (e.g. '\{1\}', '\{15\}') specifies
the n-th prior value of that ref. For example 'master@\{1\}'
immediately following a ref name and the ref must have an existing
log ('$GIT_DIR/logs/<refname>').
-'@\{<n>\}', e.g. '@\{1\}'::
+'@{<n>}', e.g. '@\{1\}'::
You can use the '@' construct with an empty ref part to get at a
reflog entry of the current branch. For example, if you are on
branch 'blabla' then '@\{1\}' means the same as 'blabla@\{1\}'.
-'@\{-<n>\}', e.g. '@\{-1\}'::
- The construct '@\{-<n>\}' means the <n>th branch/commit checked out
+'@{-<n>}', e.g. '@{-1}'::
+ The construct '@{-<n>}' means the <n>th branch/commit checked out
before the current one.
'<branchname>@\{upstream\}', e.g. 'master@\{upstream\}', '@\{u\}'::
'<rev>{caret}1{caret}1{caret}1'. See below for an illustration of
the usage of this form.
-'<rev>{caret}\{<type>\}', e.g. 'v0.99.8{caret}\{commit\}'::
+'<rev>{caret}{<type>}', e.g. 'v0.99.8{caret}\{commit\}'::
A suffix '{caret}' followed by an object type name enclosed in
brace pair means dereference the object at '<rev>' recursively until
an object of type '<type>' is found or the object cannot be
'rev{caret}\{tag\}' can be used to ensure that 'rev' identifies an
existing tag object.
-'<rev>{caret}\{\}', e.g. 'v0.99.8{caret}\{\}'::
+'<rev>{caret}{}', e.g. 'v0.99.8{caret}{}'::
A suffix '{caret}' followed by an empty brace pair
means the object could be a tag,
and dereference the tag recursively until a non-tag object is
found.
-'<rev>{caret}\{/<text>\}', e.g. 'HEAD^{/fix nasty bug}'::
+'<rev>{caret}{/<text>}', e.g. 'HEAD^{/fix nasty bug}'::
A suffix '{caret}' to a revision parameter, followed by a brace
pair that contains a text led by a slash,
is the same as the ':/fix nasty bug' syntax below except that
A colon, followed by a slash, followed by a text, names
a commit whose commit message matches the specified regular expression.
This name returns the youngest matching commit which is
- reachable from any ref. If the commit message starts with a
- '!' you have to repeat that; the special sequence ':/!',
- followed by something else than '!', is reserved for now.
- The regular expression can match any part of the commit message. To
- match messages starting with a string, one can use e.g. ':/^foo'.
+ reachable from any ref. The regular expression can match any part of the
+ commit message. To match messages starting with a string, one can use
+ e.g. ':/^foo'. The special sequence ':/!' is reserved for modifiers to what
+ is matched. ':/!-foo' performs a negative match, while ':/!!foo' matches a
+ literal '!' character, followed by 'foo'. Any other sequence beginning with
+ ':/!' is reserved for now.
'<rev>:<path>', e.g. 'HEAD:README', ':README', 'master:./README'::
A suffix ':' followed by a path names the blob or tree
`argv_array_clear`::
Free all memory associated with the array and return it to the
initial, empty state.
+
+`argv_array_detach`::
+ Disconnect the `argv` member from the `argv_array` struct and
+ return it. The caller is responsible for freeing the memory used
+ by the array, and by the strings it references. After detaching,
+ the `argv_array` is in a reinitialized state and can be pushed
+ into again.
The proxy to use for curl (http, https, ftp, etc.) URLs.
+`http_proxy_authmethod`::
+
+ The method used for authenticating against `http_proxy`.
+
struct remotes can be found by name with remote_get(), and iterated
through with for_each_remote(). remote_get(NULL) will return the
default remote, given the current branch and configuration.
absent.
Git supports ssh, git, http, and https protocols (in addition, ftp,
-and ftps can be used for fetching and rsync can be used for fetching
-and pushing, but these are inefficient and deprecated; do not use
-them).
+and ftps can be used for fetching, but this is inefficient and
+deprecated; do not use it).
The native transport (i.e. git:// URL) does no authentication and
should be used with caution on unsecured networks.
- git://host.xz{startsb}:port{endsb}/path/to/repo.git/
- http{startsb}s{endsb}://host.xz{startsb}:port{endsb}/path/to/repo.git/
- ftp{startsb}s{endsb}://host.xz{startsb}:port{endsb}/path/to/repo.git/
-- rsync://host.xz/path/to/repo.git/
An alternative scp-like syntax may also be used with the ssh protocol:
instaweb is lighttpd.
See the file gitweb/INSTALL in the Git source tree and
-linkgit:gitweb[1] for instructions on details setting up a permament
+linkgit:gitweb[1] for instructions on details setting up a permanent
installation with a CGI or Perl capable server.
[[how-to-get-a-git-repository-with-minimal-history]]
#!/bin/sh
GVF=GIT-VERSION-FILE
-DEF_VER=v2.7.2
+DEF_VER=v2.8.0-rc0
LF='
'
ALL_LDFLAGS = $(LDFLAGS)
STRIP ?= strip
+ifdef DEVELOPER
+CFLAGS += -Werror \
+ -Wdeclaration-after-statement \
+ -Wno-format-zero-length \
+ -Wold-style-definition \
+ -Woverflow \
+ -Wpointer-arith \
+ -Wstrict-prototypes \
+ -Wunused \
+ -Wvla
+endif
+
# Create as necessary, replace existing, make ranlib unneeded.
ARFLAGS = rcs
TEST_PROGRAMS_NEED_X += test-dump-cache-tree
TEST_PROGRAMS_NEED_X += test-dump-split-index
TEST_PROGRAMS_NEED_X += test-dump-untracked-cache
+TEST_PROGRAMS_NEED_X += test-fake-ssh
TEST_PROGRAMS_NEED_X += test-genrandom
TEST_PROGRAMS_NEED_X += test-hashmap
TEST_PROGRAMS_NEED_X += test-index-version
export DEFAULT_EDITOR DEFAULT_PAGER
+.PHONY: doc man html info pdf
doc:
$(MAKE) -C Documentation all
$(LOCALIZED_PERL)
mv $@+ $@
+.PHONY: pot
pot: po/git.pot
POFILES := $(wildcard po/*.po)
install_bindir_programs := $(patsubst %,%$X,$(BINDIR_PROGRAMS_NEED_X)) $(BINDIR_PROGRAMS_NO_X)
+.PHONY: profile-install profile-fast-install
profile-install: profile
$(MAKE) install
done && \
./check_bindir "z$$bindir" "z$$execdir" "$$bindir/git-add$X"
+.PHONY: install-gitweb install-doc install-man install-html install-info install-pdf
+.PHONY: quick-install-doc quick-install-man quick-install-html
install-gitweb:
$(MAKE) -C gitweb install
htmldocs = git-htmldocs-$(GIT_VERSION)
manpages = git-manpages-$(GIT_VERSION)
+.PHONY: dist-doc distclean
dist-doc:
$(RM) -r .doc-tmp-dir
mkdir .doc-tmp-dir
ALL_COMMANDS += gitk
ALL_COMMANDS += gitweb
ALL_COMMANDS += git-gui git-citool
+
+.PHONY: check-docs
check-docs::
@(for v in $(ALL_COMMANDS); \
do \
### Test suite coverage testing
#
.PHONY: coverage coverage-clean coverage-compile coverage-test coverage-report
+.PHONY: coverage-untested-functions cover_db cover_db_html
.PHONY: coverage-clean-results
coverage:
+++ /dev/null
-////////////////////////////////////////////////////////////////
-
- Git - the stupid content tracker
-
-////////////////////////////////////////////////////////////////
-
-"git" can mean anything, depending on your mood.
-
- - random three-letter combination that is pronounceable, and not
- actually used by any common UNIX command. The fact that it is a
- mispronunciation of "get" may or may not be relevant.
- - stupid. contemptible and despicable. simple. Take your pick from the
- dictionary of slang.
- - "global information tracker": you're in a good mood, and it actually
- works for you. Angels sing, and a light suddenly fills the room.
- - "goddamn idiotic truckload of sh*t": when it breaks
-
-Git is a fast, scalable, distributed revision control system with an
-unusually rich command set that provides both high-level operations
-and full access to internals.
-
-Git is an Open Source project covered by the GNU General Public
-License version 2 (some parts of it are under different licenses,
-compatible with the GPLv2). It was originally written by Linus
-Torvalds with help of a group of hackers around the net.
-
-Please read the file INSTALL for installation instructions.
-
-See Documentation/gittutorial.txt to get started, then see
-Documentation/giteveryday.txt for a useful minimum set of commands, and
-Documentation/git-commandname.txt for documentation of each command.
-If git has been correctly installed, then the tutorial can also be
-read with "man gittutorial" or "git help tutorial", and the
-documentation of each command with "man git-commandname" or "git help
-commandname".
-
-CVS users may also want to read Documentation/gitcvs-migration.txt
-("man gitcvs-migration" or "git help cvs-migration" if git is
-installed).
-
-Many Git online resources are accessible from http://git-scm.com/
-including full documentation and Git related tools.
-
-The user discussion and development of Git take place on the Git
-mailing list -- everyone is welcome to post bug reports, feature
-requests, comments and patches to git@vger.kernel.org (read
-Documentation/SubmittingPatches for instructions on patch submission).
-To subscribe to the list, send an email with just "subscribe git" in
-the body to majordomo@vger.kernel.org. The mailing list archives are
-available at http://news.gmane.org/gmane.comp.version-control.git/,
-http://marc.info/?l=git and other archival sites.
-
-The maintainer frequently sends the "What's cooking" reports that
-list the current status of various development topics to the mailing
-list. The discussion following them give a good reference for
-project status, development direction and remaining tasks.
--- /dev/null
+Git - fast, scalable, distributed revision control system
+=========================================================
+
+Git is a fast, scalable, distributed revision control system with an
+unusually rich command set that provides both high-level operations
+and full access to internals.
+
+Git is an Open Source project covered by the GNU General Public
+License version 2 (some parts of it are under different licenses,
+compatible with the GPLv2). It was originally written by Linus
+Torvalds with help of a group of hackers around the net.
+
+Please read the file [INSTALL][] for installation instructions.
+
+Many Git online resources are accessible from http://git-scm.com/
+including full documentation and Git related tools.
+
+See [Documentation/gittutorial.txt][] to get started, then see
+[Documentation/giteveryday.txt][] for a useful minimum set of commands, and
+[Documentation/git-commandname.txt][] for documentation of each command.
+If git has been correctly installed, then the tutorial can also be
+read with "man gittutorial" or "git help tutorial", and the
+documentation of each command with "man git-commandname" or "git help
+commandname".
+
+CVS users may also want to read [Documentation/gitcvs-migration.txt][]
+("man gitcvs-migration" or "git help cvs-migration" if git is
+installed).
+
+The user discussion and development of Git take place on the Git
+mailing list -- everyone is welcome to post bug reports, feature
+requests, comments and patches to git@vger.kernel.org (read
+[Documentation/SubmittingPatches][] for instructions on patch submission).
+To subscribe to the list, send an email with just "subscribe git" in
+the body to majordomo@vger.kernel.org. The mailing list archives are
+available at http://news.gmane.org/gmane.comp.version-control.git/,
+http://marc.info/?l=git and other archival sites.
+
+The maintainer frequently sends the "What's cooking" reports that
+list the current status of various development topics to the mailing
+list. The discussion following them give a good reference for
+project status, development direction and remaining tasks.
+
+The name "git" was given by Linus Torvalds when he wrote the very
+first version. He described the tool as "the stupid content tracker"
+and the name as (depending on your mood):
+
+ - random three-letter combination that is pronounceable, and not
+ actually used by any common UNIX command. The fact that it is a
+ mispronunciation of "get" may or may not be relevant.
+ - stupid. contemptible and despicable. simple. Take your pick from the
+ dictionary of slang.
+ - "global information tracker": you're in a good mood, and it actually
+ works for you. Angels sing, and a light suddenly fills the room.
+ - "goddamn idiotic truckload of sh*t": when it breaks
+
+[INSTALL]: INSTALL
+[Documentation/gittutorial.txt]: Documentation/gittutorial.txt
+[Documentation/giteveryday.txt]: Documentation/giteveryday.txt
+[Documentation/git-commandname.txt]: Documentation/git-commandname.txt
+[Documentation/gitcvs-migration.txt]: Documentation/gitcvs-migration.txt
+[Documentation/SubmittingPatches]: Documentation/SubmittingPatches
-Documentation/RelNotes/2.7.2.txt
\ No newline at end of file
+Documentation/RelNotes/2.8.0.txt
\ No newline at end of file
int src, dst, count = 0, size = 16;
char quoted = 0;
- *argv = xmalloc(sizeof(**argv) * size);
+ ALLOC_ARRAY(*argv, size);
/* split alias_string */
(*argv)[count++] = cmdline;
unsigned mode, int stage, struct archiver_context *c)
{
struct directory *d;
- size_t len = base->len + 1 + strlen(filename) + 1;
- d = xmalloc(sizeof(*d) + len);
+ size_t len = st_add4(base->len, 1, strlen(filename), 1);
+ d = xmalloc(st_add(sizeof(*d), len));
d->up = c->bottom;
d->baselen = base->len;
d->mode = mode;
}
argv_array_init(array);
}
+
+const char **argv_array_detach(struct argv_array *array)
+{
+ if (array->argv == empty_argv)
+ return xcalloc(1, sizeof(const char *));
+ else {
+ const char **ret = array->argv;
+ argv_array_init(array);
+ return ret;
+ }
+}
void argv_array_pushv(struct argv_array *, const char **);
void argv_array_pop(struct argv_array *);
void argv_array_clear(struct argv_array *);
+const char **argv_array_detach(struct argv_array *);
#endif /* ARGV_ARRAY_H */
if (invalid_attr_name(name, len))
return NULL;
- a = xmalloc(sizeof(*a) + len + 1);
- memcpy(a->name, name, len);
- a->name[len] = 0;
+ FLEX_ALLOC_MEM(a, name, name, len);
a->h = hval;
a->next = git_attr_hash[pos];
a->attr_nr = attr_nr++;
++count;
}
*num = count;
- *check = xmalloc(sizeof(**check) * count);
+ ALLOC_ARRAY(*check, count);
j = 0;
for (i = 0; i < attr_nr; i++) {
const char *value = check_all_attr[i].value;
if (!fp)
die_errno("Could not open file '%s'", filename);
- while (strbuf_getline(&str, fp, '\n') != EOF) {
+ while (strbuf_getline_lf(&str, fp) != EOF) {
strbuf_trim(&str);
if (sq_dequote_to_argv_array(str.buf, array))
die("Badly quoted content in file '%s': %s",
if (!fp)
return 0;
- if (strbuf_getline(&str, fp, '\n') != EOF)
+ if (strbuf_getline_lf(&str, fp) != EOF)
res = !strcmp(str.buf, oid_to_hex(oid));
strbuf_release(&str);
static struct commit **get_bad_and_good_commits(int *rev_nr)
{
- int len = 1 + good_revs.nr;
- struct commit **rev = xmalloc(len * sizeof(*rev));
+ struct commit **rev;
int i, n = 0;
+ ALLOC_ARRAY(rev, 1 + good_revs.nr);
rev[n++] = get_commit_reference(current_bad_oid->hash);
for (i = 0; i < good_revs.nr; i++)
rev[n++] = get_commit_reference(good_revs.sha1[i]);
strerror(errno));
}
} else {
- strbuf_getline(&str, fp, '\n');
+ strbuf_getline_lf(&str, fp);
*read_bad = strbuf_detach(&str, NULL);
- strbuf_getline(&str, fp, '\n');
+ strbuf_getline_lf(&str, fp);
*read_good = strbuf_detach(&str, NULL);
}
strbuf_release(&str);
return 0;
}
-void install_branch_config(int flag, const char *local, const char *origin, const char *remote)
+static const char tracking_advice[] =
+N_("\n"
+"After fixing the error cause you may try to fix up\n"
+"the remote tracking information by invoking\n"
+"\"git branch --set-upstream-to=%s%s%s\".");
+
+int install_branch_config(int flag, const char *local, const char *origin, const char *remote)
{
const char *shortname = NULL;
struct strbuf key = STRBUF_INIT;
&& !origin) {
warning(_("Not setting branch %s as its own upstream."),
local);
- return;
+ return 0;
}
strbuf_addf(&key, "branch.%s.remote", local);
- git_config_set(key.buf, origin ? origin : ".");
+ if (git_config_set_gently(key.buf, origin ? origin : ".") < 0)
+ goto out_err;
strbuf_reset(&key);
strbuf_addf(&key, "branch.%s.merge", local);
- git_config_set(key.buf, remote);
+ if (git_config_set_gently(key.buf, remote) < 0)
+ goto out_err;
if (rebasing) {
strbuf_reset(&key);
strbuf_addf(&key, "branch.%s.rebase", local);
- git_config_set(key.buf, "true");
+ if (git_config_set_gently(key.buf, "true") < 0)
+ goto out_err;
}
strbuf_release(&key);
local, remote);
}
}
+
+ return 0;
+
+out_err:
+ strbuf_release(&key);
+ error(_("Unable to write upstream branch configuration"));
+
+ advise(_(tracking_advice),
+ origin ? origin : "",
+ origin ? "/" : "",
+ shortname ? shortname : remote);
+
+ return -1;
}
/*
* to infer the settings for branch.<new_ref>.{remote,merge} from the
* config.
*/
-static int setup_tracking(const char *new_ref, const char *orig_ref,
- enum branch_track track, int quiet)
+static void setup_tracking(const char *new_ref, const char *orig_ref,
+ enum branch_track track, int quiet)
{
struct tracking tracking;
int config_flags = quiet ? 0 : BRANCH_CONFIG_VERBOSE;
memset(&tracking, 0, sizeof(tracking));
tracking.spec.dst = (char *)orig_ref;
if (for_each_remote(find_tracked_branch, &tracking))
- return 1;
+ return;
if (!tracking.matches)
switch (track) {
case BRANCH_TRACK_OVERRIDE:
break;
default:
- return 1;
+ return;
}
if (tracking.matches > 1)
- return error(_("Not tracking: ambiguous information for ref %s"),
- orig_ref);
+ die(_("Not tracking: ambiguous information for ref %s"),
+ orig_ref);
- install_branch_config(config_flags, new_ref, tracking.remote,
- tracking.src ? tracking.src : orig_ref);
+ if (install_branch_config(config_flags, new_ref, tracking.remote,
+ tracking.src ? tracking.src : orig_ref) < 0)
+ exit(-1);
free(tracking.src);
- return 0;
}
int read_branch_desc(struct strbuf *buf, const char *branch_name)
/*
* Configure local branch "local" as downstream to branch "remote"
* from remote "origin". Used by git branch --set-upstream.
+ * Returns 0 on success.
*/
#define BRANCH_CONFIG_VERBOSE 01
-extern void install_branch_config(int flag, const char *local, const char *origin, const char *remote);
+extern int install_branch_config(int flag, const char *local, const char *origin, const char *remote);
/*
* Read branch description
return !st.st_size;
}
-/**
- * Like strbuf_getline(), but treats both '\n' and "\r\n" as line terminators.
- */
-static int strbuf_getline_crlf(struct strbuf *sb, FILE *fp)
-{
- if (strbuf_getwholeline(sb, fp, '\n'))
- return EOF;
- if (sb->buf[sb->len - 1] == '\n') {
- strbuf_setlen(sb, sb->len - 1);
- if (sb->len > 0 && sb->buf[sb->len - 1] == '\r')
- strbuf_setlen(sb, sb->len - 1);
- }
- return 0;
-}
-
/**
* Returns the length of the first line of msg.
*/
struct strbuf sb = STRBUF_INIT;
const char *str;
- if (strbuf_getline(&sb, fp, '\n'))
+ if (strbuf_getline_lf(&sb, fp))
goto fail;
if (!skip_prefix(sb.buf, key, &str))
fp = xfopen(am_path(state, "rewritten"), "r");
- while (!strbuf_getline(&sb, fp, '\n')) {
+ while (!strbuf_getline_lf(&sb, fp)) {
unsigned char from_obj[GIT_SHA1_RAWSZ], to_obj[GIT_SHA1_RAWSZ];
if (sb.len != GIT_SHA1_HEXSZ * 2 + 1) {
if (regcomp(®ex, header_regex, REG_NOSUB | REG_EXTENDED))
die("invalid pattern: %s", header_regex);
- while (!strbuf_getline_crlf(&sb, fp)) {
+ while (!strbuf_getline(&sb, fp)) {
if (!sb.len)
break; /* End of header */
fp = xfopen(*paths, "r");
- while (!strbuf_getline_crlf(&l1, fp)) {
+ while (!strbuf_getline(&l1, fp)) {
if (l1.len)
break;
}
}
strbuf_reset(&l2);
- strbuf_getline_crlf(&l2, fp);
+ strbuf_getline(&l2, fp);
strbuf_reset(&l3);
- strbuf_getline_crlf(&l3, fp);
+ strbuf_getline(&l3, fp);
/*
* If the second line is empty and the third is a From, Author or Date
struct strbuf sb = STRBUF_INIT;
int subject_printed = 0;
- while (!strbuf_getline(&sb, in, '\n')) {
+ while (!strbuf_getline_lf(&sb, in)) {
const char *str;
if (str_isspace(sb.buf))
return error(_("could not open '%s' for reading: %s"), *paths,
strerror(errno));
- while (!strbuf_getline(&sb, fp, '\n')) {
+ while (!strbuf_getline_lf(&sb, fp)) {
if (*sb.buf == '#')
continue; /* skip comment lines */
{
struct strbuf sb = STRBUF_INIT;
- while (!strbuf_getline(&sb, in, '\n')) {
+ while (!strbuf_getline_lf(&sb, in)) {
const char *str;
if (skip_prefix(sb.buf, "# User ", &str))
/* Extract message and author information */
fp = xfopen(am_path(state, "info"), "r");
- while (!strbuf_getline(&sb, fp, '\n')) {
+ while (!strbuf_getline_lf(&sb, fp)) {
const char *x;
if (skip_prefix(sb.buf, "Subject: ", &x)) {
FILE *fp = xfopen(mail, "r");
const char *x;
- if (strbuf_getline(&sb, fp, '\n'))
+ if (strbuf_getline_lf(&sb, fp))
return -1;
if (!skip_prefix(sb.buf, "From ", &x))
if (!pager)
pager = "cat";
- argv_array_push(&cp.args, pager);
+ prepare_pager_args(&cp, pager);
argv_array_push(&cp.args, am_path(state, "patch"));
run_command(&cp);
}
insert_count = postimage->len;
/* Adjust the contents */
- result = xmalloc(img->len + insert_count - remove_count + 1);
+ result = xmalloc(st_add3(st_sub(img->len, remove_count), insert_count, 1));
memcpy(result, img->buf, applied_at);
memcpy(result + applied_at, postimage->buf, postimage->len);
memcpy(result + applied_at + postimage->len,
return 0;
}
-static int option_parse_z(const struct option *opt,
- const char *arg, int unset)
-{
- if (unset)
- line_termination = '\n';
- else
- line_termination = 0;
- return 0;
-}
-
static int option_parse_space_change(const struct option *opt,
const char *arg, int unset)
{
N_( "attempt three-way merge if a patch does not apply")),
OPT_FILENAME(0, "build-fake-ancestor", &fake_ancestor,
N_("build a temporary index based on embedded index information")),
- { OPTION_CALLBACK, 'z', NULL, NULL, NULL,
- N_("paths are separated with NUL character"),
- PARSE_OPT_NOARG, option_parse_z },
+ /* Think twice before adding "--nul" synonym to this */
+ OPT_SET_INT('z', NULL, &line_termination,
+ N_("paths are separated with NUL character"), '\0'),
OPT_INTEGER('C', NULL, &p_context,
N_("ensure at least <n> lines of context match")),
{ OPTION_CALLBACK, 0, "whitespace", &whitespace_option, N_("action"),
#include "line-range.h"
#include "line-log.h"
#include "dir.h"
+#include "progress.h"
static char blame_usage[] = N_("git blame [<options>] [<rev-opts>] [<rev>] [--] <file>");
static int xdl_opts;
static int abbrev = -1;
static int no_whole_file_rename;
+static int show_progress;
static struct date_mode blame_date_mode = { DATE_ISO8601 };
static size_t blame_date_width;
char path[FLEX_ARRAY];
};
+struct progress_info {
+ struct progress *progress;
+ int blamed_lines;
+};
+
static int diff_hunks(mmfile_t *file_a, mmfile_t *file_b, long ctxlen,
xdl_emit_hunk_consume_func_t hunk_func, void *cb_data)
{
static struct origin *make_origin(struct commit *commit, const char *path)
{
struct origin *o;
- size_t pathlen = strlen(path) + 1;
- o = xcalloc(1, sizeof(*o) + pathlen);
+ FLEX_ALLOC_STR(o, path, path);
o->commit = commit;
o->refcnt = 1;
o->next = commit->util;
commit->util = o;
- memcpy(o->path, path, pathlen); /* includes NUL */
return o;
}
* The blame_entry is found to be guilty for the range.
* Show it in incremental output.
*/
-static void found_guilty_entry(struct blame_entry *ent)
+static void found_guilty_entry(struct blame_entry *ent,
+ struct progress_info *pi)
{
if (incremental) {
struct origin *suspect = ent->suspect;
write_filename_info(suspect->path);
maybe_flush_or_die(stdout, "stdout");
}
+ pi->blamed_lines += ent->num_lines;
+ display_progress(pi->progress, pi->blamed_lines);
}
/*
{
struct rev_info *revs = sb->revs;
struct commit *commit = prio_queue_get(&sb->commits);
+ struct progress_info pi = { NULL, 0 };
+
+ if (show_progress)
+ pi.progress = start_progress_delay(_("Blaming lines"),
+ sb->num_lines, 50, 1);
while (commit) {
struct blame_entry *ent;
suspect->guilty = 1;
for (;;) {
struct blame_entry *next = ent->next;
- found_guilty_entry(ent);
+ found_guilty_entry(ent, &pi);
if (next) {
ent = next;
continue;
if (DEBUG) /* sanity */
sanity_check_refcnt(sb);
}
+
+ stop_progress(&pi.progress);
}
static const char *format_time(unsigned long time, const char *tz_str,
for (p = buf; p < end; p = get_next_line(p, end))
num++;
- sb->lineno = lineno = xmalloc(sizeof(*sb->lineno) * (num + 1));
+ ALLOC_ARRAY(sb->lineno, num + 1);
+ lineno = sb->lineno;
for (p = buf; p < end; p = get_next_line(p, end))
*lineno++ = p - buf;
OPT_BOOL('b', NULL, &blank_boundary, N_("Show blank SHA-1 for boundary commits (Default: off)")),
OPT_BOOL(0, "root", &show_root, N_("Do not treat root commits as boundaries (Default: off)")),
OPT_BOOL(0, "show-stats", &show_stats, N_("Show work cost statistics")),
+ OPT_BOOL(0, "progress", &show_progress, N_("Force progress reporting")),
OPT_BIT(0, "score-debug", &output_option, N_("Show output score for blame entries"), OUTPUT_SHOW_SCORE),
OPT_BIT('f', "show-name", &output_option, N_("Show original filename (Default: auto)"), OUTPUT_SHOW_NAME),
OPT_BIT('n', "show-number", &output_option, N_("Show original linenumber (Default: off)"), OUTPUT_SHOW_NUMBER),
save_commit_buffer = 0;
dashdash_pos = 0;
+ show_progress = -1;
parse_options_start(&ctx, argc, argv, prefix, options,
PARSE_OPT_KEEP_DASHDASH | PARSE_OPT_KEEP_ARGV0);
DIFF_OPT_CLR(&revs.diffopt, FOLLOW_RENAMES);
argc = parse_options_end(&ctx);
+ if (incremental || (output_option & OUTPUT_PORCELAIN)) {
+ if (show_progress > 0)
+ die("--progress can't be used with --incremental or porcelain formats");
+ show_progress = 0;
+ } else if (show_progress < 0)
+ show_progress = isatty(2);
+
if (0 < abbrev)
/* one more abbrev length is needed for the boundary commit */
abbrev++;
read_mailmap(&mailmap, NULL);
+ assign_blame(&sb, opt);
+
if (!incremental)
setup_pager();
- assign_blame(&sb, opt);
-
free(final_commit_name);
if (incremental)
static int edit_branch_description(const char *branch_name)
{
- int status;
struct strbuf buf = STRBUF_INIT;
struct strbuf name = STRBUF_INIT;
strbuf_stripspace(&buf, 1);
strbuf_addf(&name, "branch.%s.description", branch_name);
- status = git_config_set(name.buf, buf.len ? buf.buf : NULL);
+ git_config_set(name.buf, buf.len ? buf.buf : NULL);
strbuf_release(&name);
strbuf_release(&buf);
- return status;
+ return 0;
}
int cmd_branch(int argc, const char **argv, const char *prefix)
save_warning = warn_on_object_refname_ambiguity;
warn_on_object_refname_ambiguity = 0;
- while (strbuf_getline(&buf, stdin, '\n') != EOF) {
+ while (strbuf_getline(&buf, stdin) != EOF) {
if (data.split_on_whitespace) {
/*
* Split at first whitespace, tying off the beginning
static void check_attr_stdin_paths(const char *prefix, int cnt,
struct git_attr_check *check)
{
- struct strbuf buf, nbuf;
- int line_termination = nul_term_line ? 0 : '\n';
-
- strbuf_init(&buf, 0);
- strbuf_init(&nbuf, 0);
- while (strbuf_getline(&buf, stdin, line_termination) != EOF) {
- if (line_termination && buf.buf[0] == '"') {
- strbuf_reset(&nbuf);
- if (unquote_c_style(&nbuf, buf.buf, NULL))
+ struct strbuf buf = STRBUF_INIT;
+ struct strbuf unquoted = STRBUF_INIT;
+ strbuf_getline_fn getline_fn;
+
+ getline_fn = nul_term_line ? strbuf_getline_nul : strbuf_getline_lf;
+ while (getline_fn(&buf, stdin) != EOF) {
+ if (!nul_term_line && buf.buf[0] == '"') {
+ strbuf_reset(&unquoted);
+ if (unquote_c_style(&unquoted, buf.buf, NULL))
die("line is badly quoted");
- strbuf_swap(&buf, &nbuf);
+ strbuf_swap(&buf, &unquoted);
}
check_attr(prefix, cnt, check, buf.buf);
maybe_flush_or_die(stdout, "attribute to stdout");
}
strbuf_release(&buf);
- strbuf_release(&nbuf);
+ strbuf_release(&unquoted);
}
static NORETURN void error_with_usage(const char *msg)
static int check_ignore_stdin_paths(struct dir_struct *dir, const char *prefix)
{
- struct strbuf buf, nbuf;
+ struct strbuf buf = STRBUF_INIT;
+ struct strbuf unquoted = STRBUF_INIT;
char *pathspec[2] = { NULL, NULL };
- int line_termination = nul_term_line ? 0 : '\n';
+ strbuf_getline_fn getline_fn;
int num_ignored = 0;
- strbuf_init(&buf, 0);
- strbuf_init(&nbuf, 0);
- while (strbuf_getline(&buf, stdin, line_termination) != EOF) {
- if (line_termination && buf.buf[0] == '"') {
- strbuf_reset(&nbuf);
- if (unquote_c_style(&nbuf, buf.buf, NULL))
+ getline_fn = nul_term_line ? strbuf_getline_nul : strbuf_getline_lf;
+ while (getline_fn(&buf, stdin) != EOF) {
+ if (!nul_term_line && buf.buf[0] == '"') {
+ strbuf_reset(&unquoted);
+ if (unquote_c_style(&unquoted, buf.buf, NULL))
die("line is badly quoted");
- strbuf_swap(&buf, &nbuf);
+ strbuf_swap(&buf, &unquoted);
}
pathspec[0] = buf.buf;
num_ignored += check_ignore(dir, prefix,
maybe_flush_or_die(stdout, "check-ignore to stdout");
}
strbuf_release(&buf);
- strbuf_release(&nbuf);
+ strbuf_release(&unquoted);
return num_ignored;
}
if (use_stdin) {
struct strbuf buf = STRBUF_INIT;
- while (strbuf_getline(&buf, stdin, '\n') != EOF) {
+ while (strbuf_getline_lf(&buf, stdin) != EOF) {
check_mailmap(&mailmap, buf.buf);
maybe_flush_or_die(stdout, "stdout");
}
*/
static char *collapse_slashes(const char *refname)
{
- char *ret = xmalloc(strlen(refname) + 1);
+ char *ret = xmallocz(strlen(refname));
char ch;
char prev = '/';
char *cp = ret;
#include "parse-options.h"
#define CHECKOUT_ALL 4
-static int line_termination = '\n';
+static int nul_term_line;
static int checkout_stage; /* default to checkout stage0 */
static int to_tempfile;
static char topath[4][TEMPORARY_FILENAME_LENGTH + 1];
fputs(topath[checkout_stage], stdout);
putchar('\t');
- write_name_quoted_relative(name, prefix, stdout, line_termination);
+ write_name_quoted_relative(name, prefix, stdout,
+ nul_term_line ? '\0' : '\n');
for (i = 0; i < 4; i++) {
topath[i][0] = 0;
static struct lock_file lock_file;
-static int option_parse_u(const struct option *opt,
- const char *arg, int unset)
-{
- int *newfd = opt->value;
-
- state.refresh_cache = 1;
- state.istate = &the_index;
- if (*newfd < 0)
- *newfd = hold_locked_index(&lock_file, 1);
- return 0;
-}
-
-static int option_parse_z(const struct option *opt,
- const char *arg, int unset)
-{
- if (unset)
- line_termination = '\n';
- else
- line_termination = 0;
- return 0;
-}
-
-static int option_parse_prefix(const struct option *opt,
- const char *arg, int unset)
-{
- state.base_dir = arg;
- state.base_dir_len = strlen(arg);
- return 0;
-}
-
static int option_parse_stage(const struct option *opt,
const char *arg, int unset)
{
if ('1' <= ch && ch <= '3')
checkout_stage = arg[0] - '0';
else
- die("stage should be between 1 and 3 or all");
+ die(_("stage should be between 1 and 3 or all"));
}
return 0;
}
int read_from_stdin = 0;
int prefix_length;
int force = 0, quiet = 0, not_new = 0;
+ int index_opt = 0;
struct option builtin_checkout_index_options[] = {
OPT_BOOL('a', "all", &all,
N_("check out all files in the index")),
N_("no warning for existing files and files not in index")),
OPT_BOOL('n', "no-create", ¬_new,
N_("don't checkout new files")),
- { OPTION_CALLBACK, 'u', "index", &newfd, NULL,
- N_("update stat information in the index file"),
- PARSE_OPT_NOARG, option_parse_u },
- { OPTION_CALLBACK, 'z', NULL, NULL, NULL,
- N_("paths are separated with NUL character"),
- PARSE_OPT_NOARG, option_parse_z },
+ OPT_BOOL('u', "index", &index_opt,
+ N_("update stat information in the index file")),
+ OPT_BOOL('z', NULL, &nul_term_line,
+ N_("paths are separated with NUL character")),
OPT_BOOL(0, "stdin", &read_from_stdin,
N_("read list of paths from the standard input")),
OPT_BOOL(0, "temp", &to_tempfile,
N_("write the content to temporary files")),
- OPT_CALLBACK(0, "prefix", NULL, N_("string"),
- N_("when creating files, prepend <string>"),
- option_parse_prefix),
- OPT_CALLBACK(0, "stage", NULL, NULL,
+ OPT_STRING(0, "prefix", &state.base_dir, N_("string"),
+ N_("when creating files, prepend <string>")),
+ { OPTION_CALLBACK, 0, "stage", NULL, "1-3|all",
N_("copy out the files from named stage"),
- option_parse_stage),
+ PARSE_OPT_NONEG, option_parse_stage },
OPT_END()
};
usage_with_options(builtin_checkout_index_usage,
builtin_checkout_index_options);
git_config(git_default_config, NULL);
- state.base_dir = "";
prefix_length = prefix ? strlen(prefix) : 0;
if (read_cache() < 0) {
state.quiet = quiet;
state.not_new = not_new;
- if (state.base_dir_len || to_tempfile) {
- /* when --prefix is specified we do not
- * want to update cache.
- */
- if (state.refresh_cache) {
- rollback_lock_file(&lock_file);
- newfd = -1;
- }
- state.refresh_cache = 0;
+ if (!state.base_dir)
+ state.base_dir = "";
+ state.base_dir_len = strlen(state.base_dir);
+
+ /*
+ * when --prefix is specified we do not want to update cache.
+ */
+ if (index_opt && !state.base_dir_len && !to_tempfile) {
+ state.refresh_cache = 1;
+ state.istate = &the_index;
+ newfd = hold_locked_index(&lock_file, 1);
}
/* Check out named files first */
}
if (read_from_stdin) {
- struct strbuf buf = STRBUF_INIT, nbuf = STRBUF_INIT;
+ struct strbuf buf = STRBUF_INIT;
+ struct strbuf unquoted = STRBUF_INIT;
+ strbuf_getline_fn getline_fn;
if (all)
die("git checkout-index: don't mix '--all' and '--stdin'");
- while (strbuf_getline(&buf, stdin, line_termination) != EOF) {
+ getline_fn = nul_term_line ? strbuf_getline_nul : strbuf_getline_lf;
+ while (getline_fn(&buf, stdin) != EOF) {
char *p;
- if (line_termination && buf.buf[0] == '"') {
- strbuf_reset(&nbuf);
- if (unquote_c_style(&nbuf, buf.buf, NULL))
+ if (!nul_term_line && buf.buf[0] == '"') {
+ strbuf_reset(&unquoted);
+ if (unquote_c_style(&unquoted, buf.buf, NULL))
die("line is badly quoted");
- strbuf_swap(&buf, &nbuf);
+ strbuf_swap(&buf, &unquoted);
}
p = prefix_path(prefix, prefix_length, buf.buf);
checkout_file(p, prefix);
free(p);
}
- strbuf_release(&nbuf);
+ strbuf_release(&unquoted);
strbuf_release(&buf);
}
describe_detached_head(_("HEAD is now at"), new->commit);
}
} else if (new->path) { /* Switch branches. */
- create_symref("HEAD", new->path, msg.buf);
+ if (create_symref("HEAD", new->path, msg.buf) < 0)
+ die(_("unable to update HEAD"));
if (!opts->quiet) {
if (old->path && !strcmp(new->path, old->path)) {
if (opts->new_branch_force)
*/
int recover_with_dwim = dwim_new_local_branch_ok;
- if (check_filename(NULL, arg) && !has_dash_dash)
+ if (!has_dash_dash &&
+ (check_filename(NULL, arg) || !no_wildcard(arg)))
recover_with_dwim = 0;
/*
* Accept "git checkout foo" and "git checkout foo --"
int eof = 0;
int i;
- chosen = xmalloc(sizeof(int) * stuff->nr);
+ ALLOC_ARRAY(chosen, stuff->nr);
/* set chosen as uninitialized */
for (i = 0; i < stuff->nr; i++)
chosen[i] = -1;
clean_get_color(CLEAN_COLOR_RESET));
}
- if (strbuf_getline(&choice, stdin, '\n') != EOF) {
+ if (strbuf_getline_lf(&choice, stdin) != EOF) {
strbuf_trim(&choice);
} else {
eof = 1;
nr += chosen[i];
}
- result = xcalloc(nr + 1, sizeof(int));
+ result = xcalloc(st_add(nr, 1), sizeof(int));
for (i = 0; i < stuff->nr && j < nr; i++) {
if (chosen[i])
result[j++] = i;
clean_print_color(CLEAN_COLOR_PROMPT);
printf(_("Input ignore patterns>> "));
clean_print_color(CLEAN_COLOR_RESET);
- if (strbuf_getline(&confirm, stdin, '\n') != EOF)
+ if (strbuf_getline_lf(&confirm, stdin) != EOF)
strbuf_trim(&confirm);
else
putchar('\n');
qname = quote_path_relative(item->string, NULL, &buf);
/* TRANSLATORS: Make sure to keep [y/N] as is */
printf(_("Remove %s [y/N]? "), qname);
- if (strbuf_getline(&confirm, stdin, '\n') != EOF) {
+ if (strbuf_getline_lf(&confirm, stdin) != EOF) {
strbuf_trim(&confirm);
} else {
putchar('\n');
static char *option_upload_pack = "git-upload-pack";
static int option_verbosity;
static int option_progress = -1;
+static enum transport_family family;
static struct string_list option_config;
static struct string_list option_reference;
static int option_dissociate;
N_("separate git dir from working tree")),
OPT_STRING_LIST('c', "config", &option_config, N_("key=value"),
N_("set config inside the new repository")),
+ OPT_SET_INT('4', "ipv4", &family, N_("use IPv4 addresses only"),
+ TRANSPORT_FAMILY_IPV4),
+ OPT_SET_INT('6', "ipv6", &family, N_("use IPv6 addresses only"),
+ TRANSPORT_FAMILY_IPV6),
OPT_END()
};
strip_suffix_mem(start, &len, is_bundle ? ".bundle" : ".git");
if (!len || (len == 1 && *start == '/'))
- die("No directory name could be guessed.\n"
- "Please specify a directory on the command line");
+ die(_("No directory name could be guessed.\n"
+ "Please specify a directory on the command line"));
if (is_bare)
dir = xstrfmt("%.*s.git", (int)len, start);
FILE *in = fopen(src->buf, "r");
struct strbuf line = STRBUF_INIT;
- while (strbuf_getline(&line, in, '\n') != EOF) {
+ while (strbuf_getline(&line, in) != EOF) {
char *abs_path;
if (!line.len || line.buf[0] == '#')
continue;
struct strbuf head_ref = STRBUF_INIT;
strbuf_addstr(&head_ref, branch_top);
strbuf_addstr(&head_ref, "HEAD");
- create_symref(head_ref.buf,
- remote_head_points_at->peer_ref->name,
- msg);
+ if (create_symref(head_ref.buf,
+ remote_head_points_at->peer_ref->name,
+ msg) < 0)
+ die(_("unable to update %s"), head_ref.buf);
+ strbuf_release(&head_ref);
}
}
const char *head;
if (our && skip_prefix(our->name, "refs/heads/", &head)) {
/* Local default branch link */
- create_symref("HEAD", our->name, NULL);
+ if (create_symref("HEAD", our->name, NULL) < 0)
+ die(_("unable to update HEAD"));
if (!option_bare) {
update_ref(msg, "HEAD", our->old_oid.hash, NULL, 0,
UPDATE_REFS_DIE_ON_ERR);
static int write_one_config(const char *key, const char *value, void *data)
{
- return git_config_set_multivar(key, value ? value : "true", "^$", 0);
+ return git_config_set_multivar_gently(key, value ? value : "true", "^$", 0);
}
static void write_config(struct string_list *config)
for (i = 0; i < config->nr; i++) {
if (git_config_parse_parameter(config->items[i].string,
write_one_config, NULL) < 0)
- die("unable to write parameters to config file");
+ die(_("unable to write parameters to config file"));
}
}
remote = remote_get(option_origin);
transport = transport_get(remote, remote->url[0]);
transport_set_verbosity(transport, option_verbosity, option_progress);
+ transport->family = family;
path = get_repo_path(remote->url[0], &is_bundle);
is_local = option_local != 0 && path && !is_bundle;
die(_("--command must be the first argument"));
}
finalize_colopts(&colopts, -1);
- while (!strbuf_getline(&sb, stdin, '\n'))
+ while (!strbuf_getline(&sb, stdin))
string_list_append(&list, sb.buf);
print_columns(&list, colopts, &copts);
if (fp == NULL)
die_errno(_("could not open '%s' for reading"),
git_path_merge_head());
- while (strbuf_getline(&m, fp, '\n') != EOF) {
+ while (strbuf_getline_lf(&m, fp) != EOF) {
struct commit *parent;
parent = get_merge_parent(m.buf);
#include "color.h"
#include "parse-options.h"
#include "urlmatch.h"
+#include "quote.h"
static const char *const builtin_config_usage[] = {
N_("git config [<options>]"),
static const char *get_color_slot, *get_colorbool_slot;
static int end_null;
static int respect_includes = -1;
+static int show_origin;
#define ACTION_GET (1<<0)
#define ACTION_GET_ALL (1<<1)
OPT_BOOL('z', "null", &end_null, N_("terminate values with NUL byte")),
OPT_BOOL(0, "name-only", &omit_values, N_("show variable names only")),
OPT_BOOL(0, "includes", &respect_includes, N_("respect include directives on lookup")),
+ OPT_BOOL(0, "show-origin", &show_origin, N_("show origin of config (file, standard input, blob, command line)")),
OPT_END(),
};
usage_with_options(builtin_config_usage, builtin_config_options);
}
+static void show_config_origin(struct strbuf *buf)
+{
+ const char term = end_null ? '\0' : '\t';
+
+ strbuf_addstr(buf, current_config_origin_type());
+ strbuf_addch(buf, ':');
+ if (end_null)
+ strbuf_addstr(buf, current_config_name());
+ else
+ quote_c_style(current_config_name(), buf, NULL, 0);
+ strbuf_addch(buf, term);
+}
+
static int show_all_config(const char *key_, const char *value_, void *cb)
{
+ if (show_origin) {
+ struct strbuf buf = STRBUF_INIT;
+ show_config_origin(&buf);
+ /* Use fwrite as "buf" can contain \0's if "end_null" is set. */
+ fwrite(buf.buf, 1, buf.len, stdout);
+ strbuf_release(&buf);
+ }
if (!omit_values && value_)
printf("%s%c%s%c", key_, delim, value_, term);
else
static int format_config(struct strbuf *buf, const char *key_, const char *value_)
{
+ if (show_origin)
+ show_config_origin(buf);
if (show_keys)
strbuf_addstr(buf, key_);
if (!omit_values) {
static void check_write(void)
{
+ if (!given_config_source.file && !startup_info->have_repository)
+ die("not in a git directory");
+
if (given_config_source.use_stdin)
die("writing to stdin is not supported");
error("--name-only is only applicable to --list or --get-regexp");
usage_with_options(builtin_config_usage, builtin_config_options);
}
+
+ if (show_origin && !(actions &
+ (ACTION_GET|ACTION_GET_ALL|ACTION_GET_REGEXP|ACTION_LIST))) {
+ error("--show-origin is only applicable to --get, --get-all, "
+ "--get-regexp, and --list.");
+ usage_with_options(builtin_config_usage, builtin_config_options);
+ }
+
if (actions == ACTION_LIST) {
check_argc(argc, 0, 0);
if (git_config_with_options(show_all_config, NULL,
check_write();
check_argc(argc, 2, 2);
value = normalize_value(argv[0], argv[1]);
- ret = git_config_set_in_file(given_config_source.file, argv[0], value);
+ ret = git_config_set_in_file_gently(given_config_source.file, argv[0], value);
if (ret == CONFIG_NOTHING_SET)
error("cannot overwrite multiple values with a single value\n"
" Use a regexp, --add or --replace-all to change %s.", argv[0]);
check_write();
check_argc(argc, 2, 3);
value = normalize_value(argv[0], argv[1]);
- return git_config_set_multivar_in_file(given_config_source.file,
- argv[0], value, argv[2], 0);
+ return git_config_set_multivar_in_file_gently(given_config_source.file,
+ argv[0], value, argv[2], 0);
}
else if (actions == ACTION_ADD) {
check_write();
check_argc(argc, 2, 2);
value = normalize_value(argv[0], argv[1]);
- return git_config_set_multivar_in_file(given_config_source.file,
- argv[0], value,
- CONFIG_REGEX_NONE, 0);
+ return git_config_set_multivar_in_file_gently(given_config_source.file,
+ argv[0], value,
+ CONFIG_REGEX_NONE, 0);
}
else if (actions == ACTION_REPLACE_ALL) {
check_write();
check_argc(argc, 2, 3);
value = normalize_value(argv[0], argv[1]);
- return git_config_set_multivar_in_file(given_config_source.file,
- argv[0], value, argv[2], 1);
+ return git_config_set_multivar_in_file_gently(given_config_source.file,
+ argv[0], value, argv[2], 1);
}
else if (actions == ACTION_GET) {
check_argc(argc, 1, 2);
check_write();
check_argc(argc, 1, 2);
if (argc == 2)
- return git_config_set_multivar_in_file(given_config_source.file,
- argv[0], NULL, argv[1], 0);
+ return git_config_set_multivar_in_file_gently(given_config_source.file,
+ argv[0], NULL, argv[1], 0);
else
- return git_config_set_in_file(given_config_source.file,
- argv[0], NULL);
+ return git_config_set_in_file_gently(given_config_source.file,
+ argv[0], NULL);
}
else if (actions == ACTION_UNSET_ALL) {
check_write();
check_argc(argc, 1, 2);
- return git_config_set_multivar_in_file(given_config_source.file,
- argv[0], NULL, argv[1], 1);
+ return git_config_set_multivar_in_file_gently(given_config_source.file,
+ argv[0], NULL, argv[1], 1);
}
else if (actions == ACTION_RENAME_SECTION) {
int ret;
const char **refspecs_str;
int i;
- refspecs_str = xmalloc(sizeof(*refspecs_str) * refspecs_list.nr);
+ ALLOC_ARRAY(refspecs_str, refspecs_list.nr);
for (i = 0; i < refspecs_list.nr; i++)
refspecs_str[i] = refspecs_list.items[i].string;
"[--include-tag] [--upload-pack=<git-upload-pack>] [--depth=<n>] "
"[--no-progress] [--diag-url] [-v] [<host>:]<directory> [<refs>...]";
-static void add_sought_entry_mem(struct ref ***sought, int *nr, int *alloc,
- const char *name, int namelen)
+static void add_sought_entry(struct ref ***sought, int *nr, int *alloc,
+ const char *name)
{
- struct ref *ref = xcalloc(1, sizeof(*ref) + namelen + 1);
+ struct ref *ref;
struct object_id oid;
- const int chunksz = GIT_SHA1_HEXSZ + 1;
- if (namelen > chunksz && name[chunksz - 1] == ' ' &&
- !get_oid_hex(name, &oid)) {
- oidcpy(&ref->old_oid, &oid);
- name += chunksz;
- namelen -= chunksz;
- }
+ if (!get_oid_hex(name, &oid) && name[GIT_SHA1_HEXSZ] == ' ')
+ name += GIT_SHA1_HEXSZ + 1;
+ else
+ oidclr(&oid);
- memcpy(ref->name, name, namelen);
- ref->name[namelen] = '\0';
+ ref = alloc_ref(name);
+ oidcpy(&ref->old_oid, &oid);
(*nr)++;
ALLOC_GROW(*sought, *nr, *alloc);
(*sought)[*nr - 1] = ref;
}
-static void add_sought_entry(struct ref ***sought, int *nr, int *alloc,
- const char *string)
-{
- add_sought_entry_mem(sought, nr, alloc, string, strlen(string));
-}
-
int cmd_fetch_pack(int argc, const char **argv, const char *prefix)
{
int i, ret;
else {
/* read from stdin one ref per line, until EOF */
struct strbuf line = STRBUF_INIT;
- while (strbuf_getline(&line, stdin, '\n') != EOF)
+ while (strbuf_getline_lf(&line, stdin) != EOF)
add_sought_entry(&sought, &nr_sought, &alloc_sought, line.buf);
strbuf_release(&line);
}
static int all, append, dry_run, force, keep, multiple, update_head_ok, verbosity;
static int progress = -1, recurse_submodules = RECURSE_SUBMODULES_DEFAULT;
static int tags = TAGS_DEFAULT, unshallow, update_shallow;
+static int max_children = 1;
+static enum transport_family family;
static const char *depth;
static const char *upload_pack;
static struct strbuf default_rla = STRBUF_INIT;
N_("fetch all tags and associated objects"), TAGS_SET),
OPT_SET_INT('n', NULL, &tags,
N_("do not fetch all tags (--no-tags)"), TAGS_UNSET),
+ OPT_INTEGER('j', "jobs", &max_children,
+ N_("number of submodules fetched in parallel")),
OPT_BOOL('p', "prune", &prune,
N_("prune remote-tracking branches no longer on remote")),
{ OPTION_CALLBACK, 0, "recurse-submodules", NULL, N_("on-demand"),
N_("accept refs that update .git/shallow")),
{ OPTION_CALLBACK, 0, "refmap", NULL, N_("refmap"),
N_("specify fetch refmap"), PARSE_OPT_NONEG, parse_refmap_arg },
+ OPT_SET_INT('4', "ipv4", &family, N_("use IPv4 addresses only"),
+ TRANSPORT_FAMILY_IPV4),
+ OPT_SET_INT('6', "ipv6", &family, N_("use IPv6 addresses only"),
+ TRANSPORT_FAMILY_IPV6),
OPT_END()
};
struct transport *transport;
transport = transport_get(remote, NULL);
transport_set_verbosity(transport, verbosity, progress);
+ transport->family = family;
if (upload_pack)
set_option(transport, TRANS_OPT_UPLOADPACK, upload_pack);
if (keep)
git_config(get_remote_group, &g);
if (list->nr == prev_nr) {
- struct remote *remote;
- if (!remote_is_configured(name))
+ struct remote *remote = remote_get(name);
+ if (!remote_is_configured(remote))
return 0;
- remote = remote_get(name);
string_list_append(list, remote->name);
}
return 1;
if (argc > 0) {
int j = 0;
int i;
- refs = xcalloc(argc + 1, sizeof(const char *));
+ refs = xcalloc(st_add(argc, 1), sizeof(const char *));
for (i = 0; i < argc; i++) {
if (!strcmp(argv[i], "tag")) {
i++;
result = fetch_populated_submodules(&options,
submodule_prefix,
recurse_submodules,
- verbosity < 0);
+ verbosity < 0,
+ max_children);
argv_array_clear(&options);
}
NULL
};
-static int use_threads = 1;
+#define GREP_NUM_THREADS_DEFAULT 8
+static int num_threads;
#ifndef NO_PTHREADS
-#define THREADS 8
-static pthread_t threads[THREADS];
+static pthread_t *threads;
/* We use one producer thread and THREADS consumer
* threads. The producer adds struct work_items to 'todo' and the
static inline void grep_lock(void)
{
- if (use_threads)
+ if (num_threads)
pthread_mutex_lock(&grep_mutex);
}
static inline void grep_unlock(void)
{
- if (use_threads)
+ if (num_threads)
pthread_mutex_unlock(&grep_mutex);
}
strbuf_init(&todo[i].out, 0);
}
- for (i = 0; i < ARRAY_SIZE(threads); i++) {
+ threads = xcalloc(num_threads, sizeof(*threads));
+ for (i = 0; i < num_threads; i++) {
int err;
struct grep_opt *o = grep_opt_dup(opt);
o->output = strbuf_out;
pthread_cond_broadcast(&cond_add);
grep_unlock();
- for (i = 0; i < ARRAY_SIZE(threads); i++) {
+ for (i = 0; i < num_threads; i++) {
void *h;
pthread_join(threads[i], &h);
hit |= (int) (intptr_t) h;
}
+ free(threads);
+
pthread_mutex_destroy(&grep_mutex);
pthread_mutex_destroy(&grep_read_mutex);
pthread_mutex_destroy(&grep_attr_mutex);
int st = grep_config(var, value, cb);
if (git_color_default_config(var, value, cb) < 0)
st = -1;
+
+ if (!strcmp(var, "grep.threads")) {
+ num_threads = git_config_int(var, value);
+ if (num_threads < 0)
+ die(_("invalid number of threads specified (%d) for %s"),
+ num_threads, var);
+ }
+
return st;
}
}
#ifndef NO_PTHREADS
- if (use_threads) {
+ if (num_threads) {
add_work(opt, GREP_SOURCE_SHA1, pathbuf.buf, path, sha1);
strbuf_release(&pathbuf);
return 0;
strbuf_addstr(&buf, filename);
#ifndef NO_PTHREADS
- if (use_threads) {
+ if (num_threads) {
add_work(opt, GREP_SOURCE_FILE, buf.buf, filename, filename);
strbuf_release(&buf);
return 0;
static void run_pager(struct grep_opt *opt, const char *prefix)
{
struct string_list *path_list = opt->output_priv;
- const char **argv = xmalloc(sizeof(const char *) * (path_list->nr + 1));
+ struct child_process child = CHILD_PROCESS_INIT;
int i, status;
for (i = 0; i < path_list->nr; i++)
- argv[i] = path_list->items[i].string;
- argv[path_list->nr] = NULL;
+ argv_array_push(&child.args, path_list->items[i].string);
+ child.dir = prefix;
+ child.use_shell = 1;
- status = run_command_v_opt_cd_env(argv, RUN_USING_SHELL, prefix, NULL);
+ status = run_command(&child);
if (status)
exit(status);
- free(argv);
}
static int grep_cache(struct grep_opt *opt, const struct pathspec *pathspec, int cached)
patterns = from_stdin ? stdin : fopen(arg, "r");
if (!patterns)
die_errno(_("cannot open '%s'"), arg);
- while (strbuf_getline(&sb, patterns, '\n') == 0) {
+ while (strbuf_getline(&sb, patterns) == 0) {
/* ignore empty line like grep does */
if (sb.len == 0)
continue;
N_("show <n> context lines before matches")),
OPT_INTEGER('A', "after-context", &opt.post_context,
N_("show <n> context lines after matches")),
+ OPT_INTEGER(0, "threads", &num_threads,
+ N_("use <n> worker threads")),
OPT_NUMBER_CALLBACK(&opt, N_("shortcut for -C NUM"),
context_callback),
OPT_BOOL('p', "show-function", &opt.funcname,
PARSE_OPT_STOP_AT_NON_OPTION);
grep_commit_pattern_type(pattern_type_arg, &opt);
- if (use_index && !startup_info->have_repository)
- /* die the same way as if we did it at the beginning */
- setup_git_directory();
+ if (use_index && !startup_info->have_repository) {
+ int fallback = 0;
+ git_config_get_bool("grep.fallbacktonoindex", &fallback);
+ if (fallback)
+ use_index = 0;
+ else
+ /* die the same way as if we did it at the beginning */
+ setup_git_directory();
+ }
/*
* skip a -- separator; we know it cannot be
opt.output_priv = &path_list;
opt.output = append_path;
string_list_append(&path_list, show_in_pager);
- use_threads = 0;
}
if (!opt.pattern_list)
}
#ifndef NO_PTHREADS
- if (list.nr || cached || online_cpus() == 1)
- use_threads = 0;
+ if (list.nr || cached || show_in_pager)
+ num_threads = 0;
+ else if (num_threads == 0)
+ num_threads = GREP_NUM_THREADS_DEFAULT;
+ else if (num_threads < 0)
+ die(_("invalid number of threads specified (%d)"), num_threads);
#else
- use_threads = 0;
+ num_threads = 0;
#endif
#ifndef NO_PTHREADS
- if (use_threads) {
+ if (num_threads) {
if (!(opt.name_only || opt.unmatch_name_only || opt.count)
&& (opt.pre_context || opt.post_context ||
opt.file_break || opt.funcbody))
hit = grep_objects(&opt, &pathspec, &list);
}
- if (use_threads)
+ if (num_threads)
hit |= wait_all();
if (hit && show_in_pager)
run_pager(&opt, prefix);
static void hash_stdin_paths(const char *type, int no_filters, unsigned flags,
int literally)
{
- struct strbuf buf = STRBUF_INIT, nbuf = STRBUF_INIT;
+ struct strbuf buf = STRBUF_INIT;
+ struct strbuf unquoted = STRBUF_INIT;
- while (strbuf_getline(&buf, stdin, '\n') != EOF) {
+ while (strbuf_getline(&buf, stdin) != EOF) {
if (buf.buf[0] == '"') {
- strbuf_reset(&nbuf);
- if (unquote_c_style(&nbuf, buf.buf, NULL))
+ strbuf_reset(&unquoted);
+ if (unquote_c_style(&unquoted, buf.buf, NULL))
die("line is badly quoted");
- strbuf_swap(&buf, &nbuf);
+ strbuf_swap(&buf, &unquoted);
}
hash_object(buf.buf, type, no_filters ? NULL : buf.buf, flags,
literally);
}
strbuf_release(&buf);
- strbuf_release(&nbuf);
+ strbuf_release(&unquoted);
}
int cmd_hash_object(int argc, const char **argv, const char *prefix)
static void add_man_viewer(const char *name)
{
struct man_viewer_list **p = &man_viewer_list;
- size_t len = strlen(name);
while (*p)
p = &((*p)->next);
- *p = xcalloc(1, (sizeof(**p) + len + 1));
- memcpy((*p)->name, name, len); /* NUL-terminated by xcalloc */
+ FLEX_ALLOC_STR(*p, name, name);
}
static int supported_man_viewer(const char *name, size_t len)
size_t len,
const char *value)
{
- struct man_viewer_info_list *new = xcalloc(1, sizeof(*new) + len + 1);
-
- memcpy(new->name, name, len); /* NUL-terminated by xcalloc */
+ struct man_viewer_info_list *new;
+ FLEX_ALLOC_MEM(new, name, name, len);
new->info = xstrdup(value);
new->next = man_viewer_info_list;
man_viewer_info_list = new;
* before deltas depending on them, a good heuristic is to start
* resolving deltas in the same order as their position in the pack.
*/
- sorted_by_pos = xmalloc(nr_ref_deltas * sizeof(*sorted_by_pos));
+ ALLOC_ARRAY(sorted_by_pos, nr_ref_deltas);
for (i = 0; i < nr_ref_deltas; i++)
sorted_by_pos[i] = &ref_deltas[i];
qsort(sorted_by_pos, nr_ref_deltas, sizeof(*sorted_by_pos), delta_pos_compare);
if (!(off & 0x80000000))
continue;
off = off & 0x7fffffff;
+ check_pack_index_ptr(p, &idx2[off * 2]);
if (idx2[off * 2])
continue;
/*
curr_pack = open_pack_file(pack_name);
parse_pack_header();
- objects = xcalloc(nr_objects + 1, sizeof(struct object_entry));
+ objects = xcalloc(st_add(nr_objects, 1), sizeof(struct object_entry));
if (show_stat)
- obj_stat = xcalloc(nr_objects + 1, sizeof(struct object_stat));
+ obj_stat = xcalloc(st_add(nr_objects, 1), sizeof(struct object_stat));
ofs_deltas = xcalloc(nr_objects, sizeof(struct ofs_delta_entry));
parse_pack_objects(pack_sha1);
resolve_deltas();
if (show_stat)
show_pack_info(stat_only);
- idx_objects = xmalloc((nr_objects) * sizeof(struct pack_idx_entry *));
+ ALLOC_ARRAY(idx_objects, nr_objects);
for (i = 0; i < nr_objects; i++)
idx_objects[i] = &objects[i].idx;
curr_index = write_idx_file(index_name, idx_objects, nr_objects, &opts, pack_sha1);
git_config_set("core.bare", "false");
/* allow template config file to override the default */
if (log_all_ref_updates == -1)
- git_config_set("core.logallrefupdates", "true");
+ git_config_set("core.logallrefupdates", "true");
if (needs_work_tree_config(get_git_dir(), work_tree))
git_config_set("core.worktree", work_tree);
}
#include "trailer.h"
static const char * const git_interpret_trailers_usage[] = {
- N_("git interpret-trailers [--trim-empty] [(--trailer <token>[(=|:)<value>])...] [<file>...]"),
+ N_("git interpret-trailers [--in-place] [--trim-empty] [(--trailer <token>[(=|:)<value>])...] [<file>...]"),
NULL
};
int cmd_interpret_trailers(int argc, const char **argv, const char *prefix)
{
+ int in_place = 0;
int trim_empty = 0;
struct string_list trailers = STRING_LIST_INIT_DUP;
struct option options[] = {
+ OPT_BOOL(0, "in-place", &in_place, N_("edit files in place")),
OPT_BOOL(0, "trim-empty", &trim_empty, N_("trim empty trailers")),
OPT_STRING_LIST(0, "trailer", &trailers, N_("trailer"),
N_("trailer(s) to add")),
if (argc) {
int i;
for (i = 0; i < argc; i++)
- process_trailers(argv[i], trim_empty, &trailers);
- } else
- process_trailers(NULL, trim_empty, &trailers);
+ process_trailers(argv[i], in_place, trim_empty, &trailers);
+ } else {
+ if (in_place)
+ die(_("no input file given for in-place editing"));
+ process_trailers(NULL, in_place, trim_empty, &trailers);
+ }
string_list_clear(&trailers, 0);
static const char *signature = git_version_string;
static const char *signature_file;
static int config_cover_letter;
+static const char *config_output_directory;
enum {
COVER_UNSET,
config_cover_letter = git_config_bool(var, value) ? COVER_ON : COVER_OFF;
return 0;
}
+ if (!strcmp(var, "format.outputdirectory"))
+ return git_config_string(&config_output_directory, var, value);
return git_log_config(var, value, cb);
}
if (rev.show_notes)
init_display_notes(&rev.notes_opt);
+ if (!output_directory && !use_stdout)
+ output_directory = config_output_directory;
+
if (!use_stdout)
output_directory = set_outdir(prefix, output_directory);
else
static int show_valid_bit;
static int line_terminator = '\n';
static int debug_mode;
+static int show_eol;
static const char *prefix;
static int max_prefix_len;
static const char *tag_skip_worktree = "";
static const char *tag_resolve_undo = "";
+static void write_eolinfo(const struct cache_entry *ce, const char *path)
+{
+ if (!show_eol)
+ return;
+ else {
+ struct stat st;
+ const char *i_txt = "";
+ const char *w_txt = "";
+ const char *a_txt = get_convert_attr_ascii(path);
+ if (ce && S_ISREG(ce->ce_mode))
+ i_txt = get_cached_convert_stats_ascii(ce->name);
+ if (!lstat(path, &st) && S_ISREG(st.st_mode))
+ w_txt = get_wt_convert_stats_ascii(path);
+ printf("i/%-5s w/%-5s attr/%-17s\t", i_txt, w_txt, a_txt);
+ }
+}
+
static void write_name(const char *name)
{
/*
return;
fputs(tag, stdout);
+ write_eolinfo(NULL, ent->name);
write_name(ent->name);
}
find_unique_abbrev(ce->sha1,abbrev),
ce_stage(ce));
}
+ write_eolinfo(ce, ce->name);
write_name(ce->name);
if (debug_mode) {
const struct stat_data *sd = &ce->ce_stat_data;
NULL
};
-static int option_parse_z(const struct option *opt,
- const char *arg, int unset)
-{
- line_terminator = unset ? '\n' : '\0';
-
- return 0;
-}
-
static int option_parse_exclude(const struct option *opt,
const char *arg, int unset)
{
struct exclude_list *el;
struct string_list exclude_list = STRING_LIST_INIT_NODUP;
struct option builtin_ls_files_options[] = {
- { OPTION_CALLBACK, 'z', NULL, NULL, NULL,
- N_("paths are separated with NUL character"),
- PARSE_OPT_NOARG, option_parse_z },
+ /* Think twice before adding "--nul" synonym to this */
+ OPT_SET_INT('z', NULL, &line_terminator,
+ N_("paths are separated with NUL character"), '\0'),
OPT_BOOL('t', NULL, &show_tag,
N_("identify the file status with tags")),
OPT_BOOL('v', NULL, &show_valid_bit,
OPT_BIT(0, "directory", &dir.flags,
N_("show 'other' directories' names only"),
DIR_SHOW_OTHER_DIRECTORIES),
+ OPT_BOOL(0, "eol", &show_eol, N_("show line endings of files")),
OPT_NEGBIT(0, "empty-directory", &dir.flags,
N_("don't show empty directories"),
DIR_HIDE_EMPTY_DIRECTORIES),
#include "transport.h"
#include "remote.h"
-static const char ls_remote_usage[] =
-"git ls-remote [--heads] [--tags] [--upload-pack=<exec>]\n"
-" [-q | --quiet] [--exit-code] [--get-url] [<repository> [<refs>...]]";
+static const char * const ls_remote_usage[] = {
+ N_("git ls-remote [--heads] [--tags] [--refs] [--upload-pack=<exec>]\n"
+ " [-q | --quiet] [--exit-code] [--get-url]\n"
+ " [--symref] [<repository> [<refs>...]]"),
+ NULL
+};
/*
* Is there one among the list of patterns that match the tail part
int cmd_ls_remote(int argc, const char **argv, const char *prefix)
{
- int i;
const char *dest = NULL;
unsigned flags = 0;
int get_url = 0;
int quiet = 0;
int status = 0;
+ int show_symref_target = 0;
const char *uploadpack = NULL;
const char **pattern = NULL;
struct transport *transport;
const struct ref *ref;
- if (argc == 2 && !strcmp("-h", argv[1]))
- usage(ls_remote_usage);
+ struct option options[] = {
+ OPT__QUIET(&quiet, N_("do not print remote URL")),
+ OPT_STRING(0, "upload-pack", &uploadpack, N_("exec"),
+ N_("path of git-upload-pack on the remote host")),
+ { OPTION_STRING, 0, "exec", &uploadpack, N_("exec"),
+ N_("path of git-upload-pack on the remote host"),
+ PARSE_OPT_HIDDEN },
+ OPT_BIT('t', "tags", &flags, N_("limit to tags"), REF_TAGS),
+ OPT_BIT('h', "heads", &flags, N_("limit to heads"), REF_HEADS),
+ OPT_BIT(0, "refs", &flags, N_("do not show peeled tags"), REF_NORMAL),
+ OPT_BOOL(0, "get-url", &get_url,
+ N_("take url.<base>.insteadOf into account")),
+ OPT_SET_INT(0, "exit-code", &status,
+ N_("exit with exit code 2 if no matching refs are found"), 2),
+ OPT_BOOL(0, "symref", &show_symref_target,
+ N_("show underlying ref in addition to the object pointed by it")),
+ OPT_END()
+ };
- for (i = 1; i < argc; i++) {
- const char *arg = argv[i];
+ argc = parse_options(argc, argv, prefix, options, ls_remote_usage,
+ PARSE_OPT_STOP_AT_NON_OPTION);
+ dest = argv[0];
- if (*arg == '-') {
- if (starts_with(arg, "--upload-pack=")) {
- uploadpack = arg + 14;
- continue;
- }
- if (starts_with(arg, "--exec=")) {
- uploadpack = arg + 7;
- continue;
- }
- if (!strcmp("--tags", arg) || !strcmp("-t", arg)) {
- flags |= REF_TAGS;
- continue;
- }
- if (!strcmp("--heads", arg) || !strcmp("-h", arg)) {
- flags |= REF_HEADS;
- continue;
- }
- if (!strcmp("--refs", arg)) {
- flags |= REF_NORMAL;
- continue;
- }
- if (!strcmp("--quiet", arg) || !strcmp("-q", arg)) {
- quiet = 1;
- continue;
- }
- if (!strcmp("--get-url", arg)) {
- get_url = 1;
- continue;
- }
- if (!strcmp("--exit-code", arg)) {
- /* return this code if no refs are reported */
- status = 2;
- continue;
- }
- usage(ls_remote_usage);
- }
- dest = arg;
- i++;
- break;
+ if (argc > 1) {
+ int i;
+ pattern = xcalloc(argc, sizeof(const char *));
+ for (i = 1; i < argc; i++)
+ pattern[i - 1] = xstrfmt("*/%s", argv[i]);
}
- if (argv[i]) {
- int j;
- pattern = xcalloc(argc - i + 1, sizeof(const char *));
- for (j = i; j < argc; j++)
- pattern[j - i] = xstrfmt("*/%s", argv[j]);
- }
remote = remote_get(dest);
if (!remote) {
if (dest)
continue;
if (!tail_match(pattern, ref->name))
continue;
- printf("%s %s\n", oid_to_hex(&ref->old_oid), ref->name);
+ if (show_symref_target && ref->symref)
+ printf("ref: %s\t%s\n", ref->symref, ref->name);
+ printf("%s\t%s\n", oid_to_hex(&ref->old_oid), ref->name);
status = 0; /* we found something */
}
return status;
if (argc < 2)
usage_with_options(merge_base_usage, options);
- rev = xmalloc(argc * sizeof(*rev));
+ ALLOC_ARRAY(rev, argc);
while (argc-- > 0)
rev[rev_nr++] = get_commit_reference(*argv++);
return show_merge_base(rev, rev_nr, show_all);
static char *traverse_path(const struct traverse_info *info, const struct name_entry *n)
{
- char *path = xmalloc(traverse_path_len(info, n) + 1);
+ char *path = xmallocz(traverse_path_len(info, n));
return make_traverse_path(path, info, n);
}
if (!branch->merge_nr)
die(_("No default upstream defined for the current branch."));
- args = xcalloc(branch->merge_nr + 1, sizeof(char *));
+ args = xcalloc(st_add(branch->merge_nr, 1), sizeof(char *));
for (i = 0; i < branch->merge_nr; i++) {
if (!branch->merge[i]->dst)
die(_("No remote-tracking branch for %s from %s"),
static void append_to_tree(unsigned mode, unsigned char *sha1, char *path)
{
struct treeent *ent;
- int len = strlen(path);
+ size_t len = strlen(path);
if (strchr(path, '/'))
die("path %s contains slash", path);
- ALLOC_GROW(entries, used + 1, alloc);
- ent = entries[used++] = xmalloc(sizeof(**entries) + len + 1);
+ FLEX_ALLOC_MEM(ent, name, path, len);
ent->mode = mode;
ent->len = len;
hashcpy(ent->sha1, sha1);
- memcpy(ent->name, path, len+1);
+
+ ALLOC_GROW(entries, used + 1, alloc);
+ entries[used++] = ent;
}
static int ent_compare(const void *a_, const void *b_)
NULL
};
-static void mktree_line(char *buf, size_t len, int line_termination, int allow_missing)
+static void mktree_line(char *buf, size_t len, int nul_term_line, int allow_missing)
{
char *ptr, *ntr;
unsigned mode;
*ntr++ = 0; /* now at the beginning of SHA1 */
path = ntr + 41; /* at the beginning of name */
- if (line_termination && path[0] == '"') {
+ if (!nul_term_line && path[0] == '"') {
struct strbuf p_uq = STRBUF_INIT;
if (unquote_c_style(&p_uq, path, NULL))
die("invalid quoting");
{
struct strbuf sb = STRBUF_INIT;
unsigned char sha1[20];
- int line_termination = '\n';
+ int nul_term_line = 0;
int allow_missing = 0;
int is_batch_mode = 0;
int got_eof = 0;
+ strbuf_getline_fn getline_fn;
const struct option option[] = {
- OPT_SET_INT('z', NULL, &line_termination, N_("input is NUL terminated"), '\0'),
+ OPT_BOOL('z', NULL, &nul_term_line, N_("input is NUL terminated")),
OPT_SET_INT( 0 , "missing", &allow_missing, N_("allow missing objects"), 1),
OPT_SET_INT( 0 , "batch", &is_batch_mode, N_("allow creation of more than one tree"), 1),
OPT_END()
};
ac = parse_options(ac, av, prefix, option, mktree_usage, 0);
+ getline_fn = nul_term_line ? strbuf_getline_nul : strbuf_getline_lf;
while (!got_eof) {
while (1) {
- if (strbuf_getline(&sb, stdin, line_termination) == EOF) {
+ if (getline_fn(&sb, stdin) == EOF) {
got_eof = 1;
break;
}
break;
die("input format error: (blank line only valid in batch mode)");
}
- mktree_line(sb.buf, sb.len, line_termination, allow_missing);
+ mktree_line(sb.buf, sb.len, nul_term_line, allow_missing);
}
if (is_batch_mode && got_eof && used < 1) {
/*
int count, unsigned flags)
{
int i;
- const char **result = xmalloc((count + 1) * sizeof(const char *));
+ const char **result;
+ ALLOC_ARRAY(result, count + 1);
memcpy(result, pathspec, count * sizeof(const char *));
result[count] = NULL;
for (i = 0; i < count; i++) {
static const char *add_slash(const char *path)
{
- int len = strlen(path);
+ size_t len = strlen(path);
if (path[len - 1] != '/') {
- char *with_slash = xmalloc(len + 2);
+ char *with_slash = xmalloc(st_add(len, 2));
memcpy(with_slash, path, len);
with_slash[len++] = '/';
with_slash[len] = 0;
if (!c)
return 0;
} else {
- init_notes(NULL, NULL, NULL, 0);
+ init_notes(NULL, NULL, NULL, NOTES_INIT_WRITABLE);
t = &default_notes_tree;
}
- while (strbuf_getline(&buf, stdin, '\n') != EOF) {
+ while (strbuf_getline_lf(&buf, stdin) != EOF) {
unsigned char from_obj[20], to_obj[20];
struct strbuf **split;
int err;
return ret;
}
-static struct notes_tree *init_notes_check(const char *subcommand)
+static struct notes_tree *init_notes_check(const char *subcommand,
+ int flags)
{
struct notes_tree *t;
- init_notes(NULL, NULL, NULL, 0);
+ const char *ref;
+ init_notes(NULL, NULL, NULL, flags);
t = &default_notes_tree;
- if (!starts_with(t->ref, "refs/notes/"))
+ ref = (flags & NOTES_INIT_WRITABLE) ? t->update_ref : t->ref;
+ if (!starts_with(ref, "refs/notes/"))
die("Refusing to %s notes in %s (outside of refs/notes/)",
- subcommand, t->ref);
+ subcommand, ref);
return t;
}
usage_with_options(git_notes_list_usage, options);
}
- t = init_notes_check("list");
+ t = init_notes_check("list", 0);
if (argc) {
if (get_sha1(argv[0], object))
die(_("Failed to resolve '%s' as a valid ref."), argv[0]);
if (get_sha1(object_ref, object))
die(_("Failed to resolve '%s' as a valid ref."), object_ref);
- t = init_notes_check("add");
+ t = init_notes_check("add", NOTES_INIT_WRITABLE);
note = get_note(t, object);
if (note) {
if (get_sha1(object_ref, object))
die(_("Failed to resolve '%s' as a valid ref."), object_ref);
- t = init_notes_check("copy");
+ t = init_notes_check("copy", NOTES_INIT_WRITABLE);
note = get_note(t, object);
if (note) {
if (get_sha1(object_ref, object))
die(_("Failed to resolve '%s' as a valid ref."), object_ref);
- t = init_notes_check(argv[0]);
+ t = init_notes_check(argv[0], NOTES_INIT_WRITABLE);
note = get_note(t, object);
prepare_note_data(object, &d, edit ? note : NULL);
if (get_sha1(object_ref, object))
die(_("Failed to resolve '%s' as a valid ref."), object_ref);
- t = init_notes_check("show");
+ t = init_notes_check("show", 0);
note = get_note(t, object);
if (!note)
o.local_ref = default_notes_ref();
strbuf_addstr(&remote_ref, argv[0]);
- expand_notes_ref(&remote_ref);
+ expand_loose_notes_ref(&remote_ref);
o.remote_ref = remote_ref.buf;
- t = init_notes_check("merge");
+ t = init_notes_check("merge", NOTES_INIT_WRITABLE);
if (strategy) {
if (parse_notes_merge_strategy(strategy, &o.strategy)) {
argc = parse_options(argc, argv, prefix, options,
git_notes_remove_usage, 0);
- t = init_notes_check("remove");
+ t = init_notes_check("remove", NOTES_INIT_WRITABLE);
if (!argc && !from_stdin) {
retval = remove_one_note(t, "HEAD", flag);
usage_with_options(git_notes_prune_usage, options);
}
- t = init_notes_check("prune");
+ t = init_notes_check("prune", NOTES_INIT_WRITABLE);
prune_notes(t, (verbose ? NOTES_PRUNE_VERBOSE : 0) |
(show_only ? NOTES_PRUNE_VERBOSE|NOTES_PRUNE_DRYRUN : 0) );
{
unsigned int i, wo_end, last_untagged;
- struct object_entry **wo = xmalloc(to_pack.nr_objects * sizeof(*wo));
+ struct object_entry **wo;
struct object_entry *objects = to_pack.objects;
for (i = 0; i < to_pack.nr_objects; i++) {
* Give the objects in the original recency order until
* we see a tagged tip.
*/
+ ALLOC_ARRAY(wo, to_pack.nr_objects);
for (i = wo_end = 0; i < to_pack.nr_objects; i++) {
if (objects[i].tagged)
break;
if (progress > pack_to_stdout)
progress_state = start_progress(_("Writing objects"), nr_result);
- written_list = xmalloc(to_pack.nr_objects * sizeof(*written_list));
+ ALLOC_ARRAY(written_list, to_pack.nr_objects);
write_order = compute_write_order();
do {
if (!to_pack.nr_objects || !window || !depth)
return;
- delta_list = xmalloc(to_pack.nr_objects * sizeof(*delta_list));
+ ALLOC_ARRAY(delta_list, to_pack.nr_objects);
nr_deltas = n = 0;
for (i = 0; i < to_pack.nr_objects; i++) {
index_commit_for_bitmap(commit);
}
-static void show_object(struct object *obj,
- const struct name_path *path, const char *last,
- void *data)
+static void show_object(struct object *obj, const char *name, void *data)
{
- char *name = path_name(path, last);
-
add_preferred_base_object(name);
add_object_entry(obj->oid.hash, obj->type, name, 0);
obj->flags |= OBJECT_ADDED;
-
- /*
- * We will have generated the hash from the name,
- * but not saved a pointer to it - we can free it
- */
- free((char *)name);
}
static void show_edge(struct commit *commit)
}
static void record_recent_object(struct object *obj,
- const struct name_path *path,
- const char *last,
+ const char *name,
void *data)
{
sha1_array_append(&recent_objects, obj->oid.hash);
free_nodes = free_nodes->next;
} else {
int i = 1;
- new = xmalloc(sizeof(struct llist_item) * BLKSIZE);
+ ALLOC_ARRAY(new, BLKSIZE);
for (; i < BLKSIZE; i++)
llist_item_put(&new[i]);
}
REBASE_INVALID = -1,
REBASE_FALSE = 0,
REBASE_TRUE,
- REBASE_PRESERVE
+ REBASE_PRESERVE,
+ REBASE_INTERACTIVE
};
/**
return REBASE_TRUE;
else if (!strcmp(value, "preserve"))
return REBASE_PRESERVE;
+ else if (!strcmp(value, "interactive"))
+ return REBASE_INTERACTIVE;
if (fatal)
die(_("Invalid value for %s: %s"), key, value);
static char *opt_tags;
static char *opt_prune;
static char *opt_recurse_submodules;
+static char *max_children;
static int opt_dry_run;
static char *opt_keep;
static char *opt_depth;
/* Options passed to git-merge or git-rebase */
OPT_GROUP(N_("Options related to merging")),
{ OPTION_CALLBACK, 'r', "rebase", &opt_rebase,
- "false|true|preserve",
+ "false|true|preserve|interactive",
N_("incorporate changes by rebasing rather than merging"),
PARSE_OPT_OPTARG, parse_opt_rebase },
OPT_PASSTHRU('n', NULL, &opt_diffstat, NULL,
N_("on-demand"),
N_("control recursive fetching of submodules"),
PARSE_OPT_OPTARG),
+ OPT_PASSTHRU('j', "jobs", &max_children, N_("n"),
+ N_("number of submodules pulled in parallel"),
+ PARSE_OPT_OPTARG),
OPT_BOOL(0, "dry-run", &opt_dry_run,
N_("dry run")),
OPT_PASSTHRU('k', "keep", &opt_keep, NULL,
if (!(fp = fopen(filename, "r")))
die_errno(_("could not open '%s' for reading"), filename);
- while (strbuf_getline(&sb, fp, '\n') != EOF) {
+ while (strbuf_getline_lf(&sb, fp) != EOF) {
if (get_sha1_hex(sb.buf, sha1))
continue; /* invalid line: does not start with SHA1 */
if (starts_with(sb.buf + GIT_SHA1_HEXSZ, "\tnot-for-merge\t"))
argv_array_push(&args, opt_prune);
if (opt_recurse_submodules)
argv_array_push(&args, opt_recurse_submodules);
+ if (max_children)
+ argv_array_push(&args, max_children);
if (opt_dry_run)
argv_array_push(&args, "--dry-run");
if (opt_keep)
/* Options passed to git-rebase */
if (opt_rebase == REBASE_PRESERVE)
argv_array_push(&args, "--preserve-merges");
+ else if (opt_rebase == REBASE_INTERACTIVE)
+ argv_array_push(&args, "--interactive");
if (opt_diffstat)
argv_array_push(&args, opt_diffstat);
argv_array_pushv(&args, opt_strategies.argv);
static int verbosity;
static int progress = -1;
static int recurse_submodules = RECURSE_SUBMODULES_DEFAULT;
+static enum transport_family family;
static struct push_cas_option cas;
add_refspec(branch->name);
}
-static char warn_unspecified_push_default_msg[] =
-N_("push.default is unset; its implicit value has changed in\n"
- "Git 2.0 from 'matching' to 'simple'. To squelch this message\n"
- "and maintain the traditional behavior, use:\n"
- "\n"
- " git config --global push.default matching\n"
- "\n"
- "To squelch this message and adopt the new behavior now, use:\n"
- "\n"
- " git config --global push.default simple\n"
- "\n"
- "When push.default is set to 'matching', git will push local branches\n"
- "to the remote branches that already exist with the same name.\n"
- "\n"
- "Since Git 2.0, Git defaults to the more conservative 'simple'\n"
- "behavior, which only pushes the current branch to the corresponding\n"
- "remote branch that 'git pull' uses to update the current branch.\n"
- "\n"
- "See 'git help config' and search for 'push.default' for further information.\n"
- "(the 'simple' mode was introduced in Git 1.7.11. Use the similar mode\n"
- "'current' instead of 'simple' if you sometimes use older versions of Git)");
-
-static void warn_unspecified_push_default_configuration(void)
-{
- static int warn_once;
-
- if (warn_once++)
- return;
- warning("%s\n", _(warn_unspecified_push_default_msg));
-}
-
static int is_workflow_triangular(struct remote *remote)
{
struct remote *fetch_remote = remote_get(NULL);
break;
case PUSH_DEFAULT_UNSPECIFIED:
- warn_unspecified_push_default_configuration();
- /* fallthru */
-
case PUSH_DEFAULT_SIMPLE:
if (triangular)
setup_push_current(remote, branch);
unsigned int reject_reasons;
transport_set_verbosity(transport, verbosity, progress);
+ transport->family = family;
if (receivepack)
transport_set_option(transport,
OPT_BIT( 0 , "all", &flags, N_("push all refs"), TRANSPORT_PUSH_ALL),
OPT_BIT( 0 , "mirror", &flags, N_("mirror all refs"),
(TRANSPORT_PUSH_MIRROR|TRANSPORT_PUSH_FORCE)),
- OPT_BOOL( 0, "delete", &deleterefs, N_("delete refs")),
+ OPT_BOOL('d', "delete", &deleterefs, N_("delete refs")),
OPT_BOOL( 0 , "tags", &tags, N_("push tags (can't be used with --all or --mirror)")),
OPT_BIT('n' , "dry-run", &flags, N_("dry run"), TRANSPORT_PUSH_DRY_RUN),
OPT_BIT( 0, "porcelain", &flags, N_("machine-readable output"), TRANSPORT_PUSH_PORCELAIN),
0, "signed", &push_cert, "yes|no|if-asked", N_("GPG sign the push"),
PARSE_OPT_OPTARG, option_parse_push_signed },
OPT_BIT(0, "atomic", &flags, N_("request atomic transaction on remote side"), TRANSPORT_PUSH_ATOMIC),
+ OPT_SET_INT('4', "ipv4", &family, N_("use IPv4 addresses only"),
+ TRANSPORT_FAMILY_IPV4),
+ OPT_SET_INT('6', "ipv6", &family, N_("use IPv6 addresses only"),
+ TRANSPORT_FAMILY_IPV6),
OPT_END()
};
{
struct command *cmd;
int argc;
- const char **argv;
struct child_process proc = CHILD_PROCESS_INIT;
const char *hook;
if (!argc || !hook)
return;
- argv = xmalloc(sizeof(*argv) * (2 + argc));
- argv[0] = hook;
-
- for (argc = 1, cmd = commands; cmd; cmd = cmd->next) {
+ argv_array_push(&proc.args, hook);
+ for (cmd = commands; cmd; cmd = cmd->next) {
if (cmd->error_string || cmd->did_not_exist)
continue;
- argv[argc] = xstrdup(cmd->ref_name);
- argc++;
+ argv_array_push(&proc.args, cmd->ref_name);
}
- argv[argc] = NULL;
proc.no_stdin = 1;
proc.stdout_to_stderr = 1;
proc.err = use_sideband ? -1 : 0;
- proc.argv = argv;
if (!start_command(&proc)) {
if (use_sideband)
refname = line + 82;
reflen = linelen - 82;
- cmd = xcalloc(1, sizeof(struct command) + reflen + 1);
+ cmd = xcalloc(1, st_add3(sizeof(struct command), reflen, 1));
hashcpy(cmd->old_sha1, old_sha1);
hashcpy(cmd->new_sha1, new_sha1);
memcpy(cmd->ref_name, refname, reflen);
{
int i, j, k, bitmap_size = (si->ref->nr + 31) / 32;
- si->used_shallow = xmalloc(sizeof(*si->used_shallow) *
- si->shallow->nr);
+ ALLOC_ARRAY(si->used_shallow, si->shallow->nr);
assign_shallow_commits_to_refs(si, si->used_shallow, NULL);
si->need_reachability_test =
return;
}
- ref_status = xmalloc(sizeof(*ref_status) * ref->nr);
+ ALLOC_ARRAY(ref_status, ref->nr);
assign_shallow_commits_to_refs(si, NULL, ref_status);
for (cmd = commands; cmd; cmd = cmd->next) {
if (is_null_sha1(cmd->new_sha1))
{
struct collected_reflog *e;
struct collect_reflog_cb *cb = cb_data;
- size_t namelen = strlen(ref);
- e = xmalloc(sizeof(*e) + namelen + 1);
+ FLEX_ALLOC_STR(e, reflog, ref);
hashcpy(e->sha1, oid->hash);
- memcpy(e->reflog, ref, namelen + 1);
ALLOC_GROW(cb->e, cb->nr + 1, cb->alloc);
cb->e[cb->nr++] = e;
return 0;
struct reflog_expire_cfg *next;
unsigned long expire_total;
unsigned long expire_unreachable;
- size_t len;
char pattern[FLEX_ARRAY];
} *reflog_expire_cfg, **reflog_expire_cfg_tail;
reflog_expire_cfg_tail = &reflog_expire_cfg;
for (ent = reflog_expire_cfg; ent; ent = ent->next)
- if (ent->len == len &&
- !memcmp(ent->pattern, pattern, len))
+ if (!strncmp(ent->pattern, pattern, len) &&
+ ent->pattern[len] == '\0')
return ent;
- ent = xcalloc(1, (sizeof(*ent) + len));
- memcpy(ent->pattern, pattern, len);
- ent->len = len;
+ FLEX_ALLOC_MEM(ent, pattern, pattern, len);
*reflog_expire_cfg_tail = ent;
reflog_expire_cfg_tail = &(ent->next);
return ent;
}
}
-/* Should be enough... */
-#define MAXARGUMENTS 256
-
-static const char **parse_argv(const char *arg, const char *service)
+static void parse_argv(struct argv_array *out, const char *arg, const char *service)
{
- int arguments = 0;
- int i;
- const char **ret;
- char *temparray[MAXARGUMENTS + 1];
-
while (*arg) {
- char *expanded;
- if (arguments == MAXARGUMENTS)
- die("remote-ext command has too many arguments");
- expanded = strip_escapes(arg, service, &arg);
+ char *expanded = strip_escapes(arg, service, &arg);
if (expanded)
- temparray[arguments++] = expanded;
+ argv_array_push(out, expanded);
+ free(expanded);
}
-
- ret = xmalloc((arguments + 1) * sizeof(char *));
- for (i = 0; i < arguments; i++)
- ret[i] = temparray[i];
- ret[arguments] = NULL;
- return ret;
}
static void send_git_request(int stdin_fd, const char *serv, const char *repo,
child.in = -1;
child.out = -1;
child.err = 0;
- child.argv = parse_argv(arg, service);
+ parse_argv(&child.args, arg, service);
if (start_command(&child) < 0)
die("Can't run specified command");
#define MIRROR_PUSH 2
#define MIRROR_BOTH (MIRROR_FETCH|MIRROR_PUSH)
-static int add_branch(const char *key, const char *branchname,
- const char *remotename, int mirror, struct strbuf *tmp)
+static void add_branch(const char *key, const char *branchname,
+ const char *remotename, int mirror, struct strbuf *tmp)
{
strbuf_reset(tmp);
strbuf_addch(tmp, '+');
else
strbuf_addf(tmp, "refs/heads/%s:refs/remotes/%s/%s",
branchname, remotename, branchname);
- return git_config_set_multivar(key, tmp->buf, "^$", 0);
+ git_config_set_multivar(key, tmp->buf, "^$", 0);
}
static const char mirror_advice[] =
url = argv[1];
remote = remote_get(name);
- if (remote && (remote->url_nr > 1 ||
- (strcmp(name, remote->url[0]) &&
- strcmp(url, remote->url[0])) ||
- remote->fetch_refspec_nr))
+ if (remote_is_configured(remote))
die(_("remote %s already exists."), name);
strbuf_addf(&buf2, "refs/heads/test:refs/remotes/%s/test", name);
die(_("'%s' is not a valid remote name"), name);
strbuf_addf(&buf, "remote.%s.url", name);
- if (git_config_set(buf.buf, url))
- return 1;
+ git_config_set(buf.buf, url);
if (!mirror || mirror & MIRROR_FETCH) {
strbuf_reset(&buf);
if (track.nr == 0)
string_list_append(&track, "*");
for (i = 0; i < track.nr; i++) {
- if (add_branch(buf.buf, track.items[i].string,
- name, mirror, &buf2))
- return 1;
+ add_branch(buf.buf, track.items[i].string,
+ name, mirror, &buf2);
}
}
if (mirror & MIRROR_PUSH) {
strbuf_reset(&buf);
strbuf_addf(&buf, "remote.%s.mirror", name);
- if (git_config_set(buf.buf, "true"))
- return 1;
+ git_config_set(buf.buf, "true");
}
if (fetch_tags != TAGS_DEFAULT) {
strbuf_reset(&buf);
strbuf_addf(&buf, "remote.%s.tagopt", name);
- if (git_config_set(buf.buf,
- fetch_tags == TAGS_SET ? "--tags" : "--no-tags"))
- return 1;
+ git_config_set(buf.buf,
+ fetch_tags == TAGS_SET ? "--tags" : "--no-tags");
}
if (fetch && fetch_remote(name))
struct branch_info {
char *remote_name;
struct string_list merge;
- int rebase;
+ enum { NO_REBASE, NORMAL_REBASE, INTERACTIVE_REBASE } rebase;
};
static struct string_list branch_list;
if (v >= 0)
info->rebase = v;
else if (!strcmp(value, "preserve"))
- info->rebase = 1;
+ info->rebase = NORMAL_REBASE;
+ else if (!strcmp(value, "interactive"))
+ info->rebase = INTERACTIVE_REBASE;
}
}
return 0;
strbuf_addf(&buf, "remote.%s.url", remote->name);
for (i = 0; i < remote->url_nr; i++)
- if (git_config_set_multivar(buf.buf, remote->url[i], "^$", 0))
- return error(_("Could not append '%s' to '%s'"),
- remote->url[i], buf.buf);
+ git_config_set_multivar(buf.buf, remote->url[i], "^$", 0);
strbuf_reset(&buf);
strbuf_addf(&buf, "remote.%s.push", remote->name);
for (i = 0; i < remote->push_refspec_nr; i++)
- if (git_config_set_multivar(buf.buf, remote->push_refspec[i], "^$", 0))
- return error(_("Could not append '%s' to '%s'"),
- remote->push_refspec[i], buf.buf);
+ git_config_set_multivar(buf.buf, remote->push_refspec[i], "^$", 0);
strbuf_reset(&buf);
strbuf_addf(&buf, "remote.%s.fetch", remote->name);
for (i = 0; i < remote->fetch_refspec_nr; i++)
- if (git_config_set_multivar(buf.buf, remote->fetch_refspec[i], "^$", 0))
- return error(_("Could not append '%s' to '%s'"),
- remote->fetch_refspec[i], buf.buf);
+ git_config_set_multivar(buf.buf, remote->fetch_refspec[i], "^$", 0);
if (remote->origin == REMOTE_REMOTES)
unlink_or_warn(git_path("remotes/%s", remote->name));
else if (remote->origin == REMOTE_BRANCHES)
unlink_or_warn(git_path("branches/%s", remote->name));
+
return 0;
}
rename.remote_branches = &remote_branches;
oldremote = remote_get(rename.old);
- if (!oldremote)
+ if (!remote_is_configured(oldremote))
die(_("No such remote: %s"), rename.old);
if (!strcmp(rename.old, rename.new) && oldremote->origin != REMOTE_CONFIG)
return migrate_file(oldremote);
newremote = remote_get(rename.new);
- if (newremote && (newremote->url_nr > 1 || newremote->fetch_refspec_nr))
+ if (remote_is_configured(newremote))
die(_("remote %s already exists."), rename.new);
strbuf_addf(&buf, "refs/heads/test:refs/remotes/%s/test", rename.new);
strbuf_reset(&buf);
strbuf_addf(&buf, "remote.%s.fetch", rename.new);
- if (git_config_set_multivar(buf.buf, NULL, NULL, 1))
- return error(_("Could not remove config section '%s'"), buf.buf);
+ git_config_set_multivar(buf.buf, NULL, NULL, 1);
strbuf_addf(&old_remote_context, ":refs/remotes/%s/", rename.old);
for (i = 0; i < oldremote->fetch_refspec_nr; i++) {
char *ptr;
"\tPlease update the configuration manually if necessary."),
buf2.buf);
- if (git_config_set_multivar(buf.buf, buf2.buf, "^$", 0))
- return error(_("Could not append '%s'"), buf.buf);
+ git_config_set_multivar(buf.buf, buf2.buf, "^$", 0);
}
read_branches();
if (info->remote_name && !strcmp(info->remote_name, rename.old)) {
strbuf_reset(&buf);
strbuf_addf(&buf, "branch.%s.remote", item->string);
- if (git_config_set(buf.buf, rename.new)) {
- return error(_("Could not set '%s'"), buf.buf);
- }
+ git_config_set(buf.buf, rename.new);
}
}
usage_with_options(builtin_remote_rm_usage, options);
remote = remote_get(argv[1]);
- if (!remote)
+ if (!remote_is_configured(remote))
die(_("No such remote: %s"), argv[1]);
known_remotes.to_delete = remote;
strbuf_reset(&buf);
strbuf_addf(&buf, "branch.%s.%s",
item->string, *k);
- if (git_config_set(buf.buf, NULL)) {
- strbuf_release(&buf);
- return -1;
- }
+ git_config_set(buf.buf, NULL);
}
}
}
printf(" %-*s ", show_info->width, item->string);
if (branch_info->rebase) {
- printf_ln(_("rebases onto remote %s"), merge->items[0].string);
+ printf_ln(_(branch_info->rebase == INTERACTIVE_REBASE ?
+ "rebases interactively onto remote %s" :
+ "rebases onto remote %s"), merge->items[0].string);
return 0;
} else if (show_info->any_rebase) {
printf_ln(_(" merges with remote %s"), merge->items[0].string);
static int remove_all_fetch_refspecs(const char *remote, const char *key)
{
- return git_config_set_multivar(key, NULL, NULL, 1);
+ return git_config_set_multivar_gently(key, NULL, NULL, 1);
}
-static int add_branches(struct remote *remote, const char **branches,
- const char *key)
+static void add_branches(struct remote *remote, const char **branches,
+ const char *key)
{
const char *remotename = remote->name;
int mirror = remote->mirror;
struct strbuf refspec = STRBUF_INIT;
for (; *branches; branches++)
- if (add_branch(key, *branches, remotename, mirror, &refspec)) {
- strbuf_release(&refspec);
- return 1;
- }
+ add_branch(key, *branches, remotename, mirror, &refspec);
strbuf_release(&refspec);
- return 0;
}
static int set_remote_branches(const char *remotename, const char **branches,
strbuf_addf(&key, "remote.%s.fetch", remotename);
- if (!remote_is_configured(remotename))
- die(_("No such remote '%s'"), remotename);
remote = remote_get(remotename);
+ if (!remote_is_configured(remote))
+ die(_("No such remote '%s'"), remotename);
if (!add_mode && remove_all_fetch_refspecs(remotename, key.buf)) {
strbuf_release(&key);
return 1;
}
- if (add_branches(remote, branches, key.buf)) {
- strbuf_release(&key);
- return 1;
- }
+ add_branches(remote, branches, key.buf);
strbuf_release(&key);
return 0;
remotename = argv[0];
- if (!remote_is_configured(remotename))
- die(_("No such remote '%s'"), remotename);
remote = remote_get(remotename);
+ if (!remote_is_configured(remote))
+ die(_("No such remote '%s'"), remotename);
url_nr = 0;
if (push_mode) {
if (delete_mode)
oldurl = newurl;
- if (!remote_is_configured(remotename))
- die(_("No such remote '%s'"), remotename);
remote = remote_get(remotename);
+ if (!remote_is_configured(remote))
+ die(_("No such remote '%s'"), remotename);
if (push_mode) {
strbuf_addf(&name_buf, "remote.%s.pushurl", remotename);
if ((!oldurl && !delete_mode) || add_mode) {
if (add_mode)
git_config_set_multivar(name_buf.buf, newurl,
- "^$", 0);
+ "^$", 0);
else
git_config_set(name_buf.buf, newurl);
strbuf_release(&name_buf);
+
return 0;
}
return ret;
out = xfdopen(cmd.out, "r");
- while (strbuf_getline(&line, out, '\n') != EOF) {
+ while (strbuf_getline_lf(&line, out) != EOF) {
if (line.len != 40)
die("repack: Expecting 40 character sha1 lines only from pack-objects.");
string_list_append(&names, line.buf);
free_commit_buffer(commit);
}
-static void finish_object(struct object *obj,
- const struct name_path *path, const char *name,
- void *cb_data)
+static void finish_object(struct object *obj, const char *name, void *cb_data)
{
struct rev_list_info *info = cb_data;
if (obj->type == OBJ_BLOB && !has_object_file(&obj->oid))
parse_object(obj->oid.hash);
}
-static void show_object(struct object *obj,
- const struct name_path *path, const char *component,
- void *cb_data)
+static void show_object(struct object *obj, const char *name, void *cb_data)
{
struct rev_list_info *info = cb_data;
- finish_object(obj, path, component, cb_data);
+ finish_object(obj, name, cb_data);
if (info->flags & REV_LIST_QUIET)
return;
- show_object_with_name(stdout, obj, path, component);
+ show_object_with_name(stdout, obj, name);
}
static void show_edge(struct commit *commit)
/* get the usage up to the first line with a -- on it */
for (;;) {
- if (strbuf_getline(&sb, stdin, '\n') == EOF)
+ if (strbuf_getline(&sb, stdin) == EOF)
die("premature end of input");
ALLOC_GROW(usage, unb + 1, usz);
if (!strcmp("--", sb.buf)) {
}
/* parse: (<short>|<short>,<long>|<long>)[*=?!]*<arghint>? SP+ <help> */
- while (strbuf_getline(&sb, stdin, '\n') != EOF) {
+ while (strbuf_getline(&sb, stdin) != EOF) {
const char *s;
const char *help;
struct option *o;
continue;
}
if (!strcmp(arg, "--git-common-dir")) {
- puts(get_git_common_dir());
+ const char *pfx = prefix ? prefix : "";
+ puts(prefix_filename(pfx, strlen(pfx), get_git_common_dir()));
continue;
}
if (!strcmp(arg, "--resolve-git-dir")) {
argv_array_push(&all_refspecs, buf);
} else {
struct strbuf line = STRBUF_INIT;
- while (strbuf_getline(&line, stdin, '\n') != EOF)
+ while (strbuf_getline(&line, stdin) != EOF)
argv_array_push(&all_refspecs, line.buf);
strbuf_release(&line);
}
NULL
};
-static int compare_by_number(const void *a1, const void *a2)
+/*
+ * The util field of our string_list_items will contain one of two things:
+ *
+ * - if --summary is not in use, it will point to a string list of the
+ * oneline subjects assigned to this author
+ *
+ * - if --summary is in use, we don't need that list; we only need to know
+ * its size. So we abuse the pointer slot to store our integer counter.
+ *
+ * This macro accesses the latter.
+ */
+#define UTIL_TO_INT(x) ((intptr_t)(x)->util)
+
+static int compare_by_counter(const void *a1, const void *a2)
+{
+ const struct string_list_item *i1 = a1, *i2 = a2;
+ return UTIL_TO_INT(i2) - UTIL_TO_INT(i1);
+}
+
+static int compare_by_list(const void *a1, const void *a2)
{
const struct string_list_item *i1 = a1, *i2 = a2;
const struct string_list *l1 = i1->util, *l2 = i2->util;
const char *author,
const char *oneline)
{
- const char *dot3 = log->common_repo_prefix;
- char *buffer, *p;
struct string_list_item *item;
const char *mailbuf, *namebuf;
size_t namelen, maillen;
- const char *eol;
- struct strbuf subject = STRBUF_INIT;
struct strbuf namemailbuf = STRBUF_INIT;
struct ident_split ident;
strbuf_addf(&namemailbuf, " <%.*s>", (int)maillen, mailbuf);
item = string_list_insert(&log->list, namemailbuf.buf);
- if (item->util == NULL)
- item->util = xcalloc(1, sizeof(struct string_list));
-
- /* Skip any leading whitespace, including any blank lines. */
- while (*oneline && isspace(*oneline))
- oneline++;
- eol = strchr(oneline, '\n');
- if (!eol)
- eol = oneline + strlen(oneline);
- if (starts_with(oneline, "[PATCH")) {
- char *eob = strchr(oneline, ']');
- if (eob && (!eol || eob < eol))
- oneline = eob + 1;
- }
- while (*oneline && isspace(*oneline) && *oneline != '\n')
- oneline++;
- format_subject(&subject, oneline, " ");
- buffer = strbuf_detach(&subject, NULL);
-
- if (dot3) {
- int dot3len = strlen(dot3);
- if (dot3len > 5) {
- while ((p = strstr(buffer, dot3)) != NULL) {
- int taillen = strlen(p) - dot3len;
- memcpy(p, "/.../", 5);
- memmove(p + 5, p + dot3len, taillen + 1);
+
+ if (log->summary)
+ item->util = (void *)(UTIL_TO_INT(item) + 1);
+ else {
+ const char *dot3 = log->common_repo_prefix;
+ char *buffer, *p;
+ struct strbuf subject = STRBUF_INIT;
+ const char *eol;
+
+ /* Skip any leading whitespace, including any blank lines. */
+ while (*oneline && isspace(*oneline))
+ oneline++;
+ eol = strchr(oneline, '\n');
+ if (!eol)
+ eol = oneline + strlen(oneline);
+ if (starts_with(oneline, "[PATCH")) {
+ char *eob = strchr(oneline, ']');
+ if (eob && (!eol || eob < eol))
+ oneline = eob + 1;
+ }
+ while (*oneline && isspace(*oneline) && *oneline != '\n')
+ oneline++;
+ format_subject(&subject, oneline, " ");
+ buffer = strbuf_detach(&subject, NULL);
+
+ if (dot3) {
+ int dot3len = strlen(dot3);
+ if (dot3len > 5) {
+ while ((p = strstr(buffer, dot3)) != NULL) {
+ int taillen = strlen(p) - dot3len;
+ memcpy(p, "/.../", 5);
+ memmove(p + 5, p + dot3len, taillen + 1);
+ }
}
}
- }
- string_list_append(item->util, buffer);
+ if (item->util == NULL)
+ item->util = xcalloc(1, sizeof(struct string_list));
+ string_list_append(item->util, buffer);
+ }
}
static void read_from_stdin(struct shortlog *log)
{
- char author[1024], oneline[1024];
+ struct strbuf author = STRBUF_INIT;
+ struct strbuf oneline = STRBUF_INIT;
- while (fgets(author, sizeof(author), stdin) != NULL) {
- if (!(author[0] == 'A' || author[0] == 'a') ||
- !starts_with(author + 1, "uthor: "))
+ while (strbuf_getline_lf(&author, stdin) != EOF) {
+ const char *v;
+ if (!skip_prefix(author.buf, "Author: ", &v) &&
+ !skip_prefix(author.buf, "author ", &v))
continue;
- while (fgets(oneline, sizeof(oneline), stdin) &&
- oneline[0] != '\n')
+ while (strbuf_getline_lf(&oneline, stdin) != EOF &&
+ oneline.len)
; /* discard headers */
- while (fgets(oneline, sizeof(oneline), stdin) &&
- oneline[0] == '\n')
+ while (strbuf_getline_lf(&oneline, stdin) != EOF &&
+ !oneline.len)
; /* discard blanks */
- insert_one_record(log, author + 8, oneline);
+ insert_one_record(log, v, oneline.buf);
}
+ strbuf_release(&author);
+ strbuf_release(&oneline);
}
void shortlog_add_commit(struct shortlog *log, struct commit *commit)
{
- const char *author = NULL, *buffer;
- struct strbuf buf = STRBUF_INIT;
- struct strbuf ufbuf = STRBUF_INIT;
-
- pp_commit_easy(CMIT_FMT_RAW, commit, &buf);
- buffer = buf.buf;
- while (*buffer && *buffer != '\n') {
- const char *eol = strchr(buffer, '\n');
-
- if (eol == NULL)
- eol = buffer + strlen(buffer);
+ struct strbuf author = STRBUF_INIT;
+ struct strbuf oneline = STRBUF_INIT;
+ struct pretty_print_context ctx = {0};
+
+ ctx.fmt = CMIT_FMT_USERFORMAT;
+ ctx.abbrev = log->abbrev;
+ ctx.subject = "";
+ ctx.after_subject = "";
+ ctx.date_mode.type = DATE_NORMAL;
+ ctx.output_encoding = get_log_output_encoding();
+
+ format_commit_message(commit, "%an <%ae>", &author, &ctx);
+ if (!log->summary) {
+ if (log->user_format)
+ pretty_print_commit(&ctx, commit, &oneline);
else
- eol++;
-
- if (starts_with(buffer, "author "))
- author = buffer + 7;
- buffer = eol;
- }
- if (!author) {
- warning(_("Missing author: %s"),
- oid_to_hex(&commit->object.oid));
- return;
+ format_commit_message(commit, "%s", &oneline, &ctx);
}
- if (log->user_format) {
- struct pretty_print_context ctx = {0};
- ctx.fmt = CMIT_FMT_USERFORMAT;
- ctx.abbrev = log->abbrev;
- ctx.subject = "";
- ctx.after_subject = "";
- ctx.date_mode.type = DATE_NORMAL;
- ctx.output_encoding = get_log_output_encoding();
- pretty_print_commit(&ctx, commit, &ufbuf);
- buffer = ufbuf.buf;
- } else if (*buffer) {
- buffer++;
- }
- insert_one_record(log, author, !*buffer ? "<none>" : buffer);
- strbuf_release(&ufbuf);
- strbuf_release(&buf);
+
+ insert_one_record(log, author.buf, oneline.len ? oneline.buf : "<none>");
+
+ strbuf_release(&author);
+ strbuf_release(&oneline);
}
static void get_from_rev(struct rev_info *rev, struct shortlog *log)
if (log->sort_by_number)
qsort(log->list.items, log->list.nr, sizeof(struct string_list_item),
- compare_by_number);
+ log->summary ? compare_by_counter : compare_by_list);
for (i = 0; i < log->list.nr; i++) {
- struct string_list *onelines = log->list.items[i].util;
-
+ const struct string_list_item *item = &log->list.items[i];
if (log->summary) {
- printf("%6d\t%s\n", onelines->nr, log->list.items[i].string);
+ printf("%6d\t%s\n", (int)UTIL_TO_INT(item), item->string);
} else {
- printf("%s (%d):\n", log->list.items[i].string, onelines->nr);
+ struct string_list *onelines = item->util;
+ printf("%s (%d):\n", item->string, onelines->nr);
for (j = onelines->nr - 1; j >= 0; j--) {
const char *msg = onelines->items[j].string;
printf(" %s\n", msg);
}
putchar('\n');
+ onelines->strdup_strings = 1;
+ string_list_clear(onelines, 0);
+ free(onelines);
}
- onelines->strdup_strings = 1;
- string_list_clear(onelines, 0);
- free(onelines);
log->list.items[i].util = NULL;
}
struct module_list *list)
{
int i, result = 0;
- char *max_prefix, *ps_matched = NULL;
- int max_prefix_len;
+ char *ps_matched = NULL;
parse_pathspec(pathspec, 0,
PATHSPEC_PREFER_FULL |
PATHSPEC_STRIP_SUBMODULE_SLASH_CHEAP,
prefix, argv);
- /* Find common prefix for all pathspec's */
- max_prefix = common_prefix(pathspec);
- max_prefix_len = max_prefix ? strlen(max_prefix) : 0;
-
if (pathspec->nr)
ps_matched = xcalloc(pathspec->nr, 1);
if (!S_ISGITLINK(ce->ce_mode) ||
!match_pathspec(pathspec, ce->name, ce_namelen(ce),
- max_prefix_len, ps_matched, 1))
+ 0, ps_matched, 1))
continue;
ALLOC_GROW(list->entries, list->nr + 1, list->alloc);
*/
i++;
}
- free(max_prefix);
if (ps_matched && report_path_error(ps_matched, pathspec, prefix))
result = -1;
#define UNMARK_FLAG 2
static struct strbuf mtime_dir = STRBUF_INIT;
+/* Untracked cache mode */
+enum uc_mode {
+ UC_UNSPECIFIED = -1,
+ UC_DISABLE = 0,
+ UC_ENABLE,
+ UC_TEST,
+ UC_FORCE
+};
+
__attribute__((format (printf, 1, 2)))
static void report(const char *fmt, ...)
{
if (!mkdtemp(mtime_dir.buf))
die_errno("Could not make temporary directory");
- fprintf(stderr, _("Testing "));
+ fprintf(stderr, _("Testing mtime in '%s' "), xgetcwd());
atexit(remove_test_directory);
xstat_mtime_dir(&st);
fill_stat_data(&base, &st);
report("add '%s'", path);
}
-static void read_index_info(int line_termination)
+static void read_index_info(int nul_term_line)
{
struct strbuf buf = STRBUF_INIT;
struct strbuf uq = STRBUF_INIT;
+ strbuf_getline_fn getline_fn;
- while (strbuf_getline(&buf, stdin, line_termination) != EOF) {
+ getline_fn = nul_term_line ? strbuf_getline_nul : strbuf_getline_lf;
+ while (getline_fn(&buf, stdin) != EOF) {
char *ptr, *tab;
char *path_name;
unsigned char sha1[20];
goto bad_line;
path_name = ptr;
- if (line_termination && path_name[0] == '"') {
+ if (!nul_term_line && path_name[0] == '"') {
strbuf_reset(&uq);
if (unquote_c_style(&uq, path_name, NULL)) {
die("git update-index: bad quoting of path name");
static int stdin_cacheinfo_callback(struct parse_opt_ctx_t *ctx,
const struct option *opt, int unset)
{
- int *line_termination = opt->value;
+ int *nul_term_line = opt->value;
if (ctx->argc != 1)
return error("option '%s' must be the last argument", opt->long_name);
allow_add = allow_replace = allow_remove = 1;
- read_index_info(*line_termination);
+ read_index_info(*nul_term_line);
return 0;
}
int cmd_update_index(int argc, const char **argv, const char *prefix)
{
- int newfd, entries, has_errors = 0, line_termination = '\n';
- int untracked_cache = -1;
+ int newfd, entries, has_errors = 0, nul_term_line = 0;
+ enum uc_mode untracked_cache = UC_UNSPECIFIED;
int read_from_stdin = 0;
int prefix_length = prefix ? strlen(prefix) : 0;
int preferred_index_format = 0;
int split_index = -1;
struct lock_file *lock_file;
struct parse_opt_ctx_t ctx;
+ strbuf_getline_fn getline_fn;
int parseopt_state = PARSE_OPT_UNKNOWN;
struct option options[] = {
OPT_BIT('q', NULL, &refresh_args.flags,
N_("add to index only; do not add content to object database"), 1),
OPT_SET_INT(0, "force-remove", &force_remove,
N_("remove named paths even if present in worktree"), 1),
- OPT_SET_INT('z', NULL, &line_termination,
- N_("with --stdin: input lines are terminated by null bytes"), '\0'),
+ OPT_BOOL('z', NULL, &nul_term_line,
+ N_("with --stdin: input lines are terminated by null bytes")),
{OPTION_LOWLEVEL_CALLBACK, 0, "stdin", &read_from_stdin, NULL,
N_("read list of paths to be updated from standard input"),
PARSE_OPT_NONEG | PARSE_OPT_NOARG,
(parse_opt_cb *) stdin_callback},
- {OPTION_LOWLEVEL_CALLBACK, 0, "index-info", &line_termination, NULL,
+ {OPTION_LOWLEVEL_CALLBACK, 0, "index-info", &nul_term_line, NULL,
N_("add entries from standard input to the index"),
PARSE_OPT_NONEG | PARSE_OPT_NOARG,
(parse_opt_cb *) stdin_cacheinfo_callback},
N_("enable or disable split index")),
OPT_BOOL(0, "untracked-cache", &untracked_cache,
N_("enable/disable untracked cache")),
+ OPT_SET_INT(0, "test-untracked-cache", &untracked_cache,
+ N_("test if the filesystem supports untracked cache"), UC_TEST),
OPT_SET_INT(0, "force-untracked-cache", &untracked_cache,
- N_("enable untracked cache without testing the filesystem"), 2),
+ N_("enable untracked cache without testing the filesystem"), UC_FORCE),
OPT_END()
};
}
}
argc = parse_options_end(&ctx);
+
+ getline_fn = nul_term_line ? strbuf_getline_nul : strbuf_getline_lf;
if (preferred_index_format) {
if (preferred_index_format < INDEX_FORMAT_LB ||
INDEX_FORMAT_UB < preferred_index_format)
}
if (read_from_stdin) {
- struct strbuf buf = STRBUF_INIT, nbuf = STRBUF_INIT;
+ struct strbuf buf = STRBUF_INIT;
+ struct strbuf unquoted = STRBUF_INIT;
setup_work_tree();
- while (strbuf_getline(&buf, stdin, line_termination) != EOF) {
+ while (getline_fn(&buf, stdin) != EOF) {
char *p;
- if (line_termination && buf.buf[0] == '"') {
- strbuf_reset(&nbuf);
- if (unquote_c_style(&nbuf, buf.buf, NULL))
+ if (!nul_term_line && buf.buf[0] == '"') {
+ strbuf_reset(&unquoted);
+ if (unquote_c_style(&unquoted, buf.buf, NULL))
die("line is badly quoted");
- strbuf_swap(&buf, &nbuf);
+ strbuf_swap(&buf, &unquoted);
}
p = prefix_path(prefix, prefix_length, buf.buf);
update_one(p);
chmod_path(set_executable_bit, p);
free(p);
}
- strbuf_release(&nbuf);
+ strbuf_release(&unquoted);
strbuf_release(&buf);
}
the_index.split_index = NULL;
the_index.cache_changed |= SOMETHING_CHANGED;
}
- if (untracked_cache > 0) {
- struct untracked_cache *uc;
- if (untracked_cache < 2) {
- setup_work_tree();
- if (!test_if_untracked_cache_is_supported())
- return 1;
- }
- if (!the_index.untracked) {
- uc = xcalloc(1, sizeof(*uc));
- strbuf_init(&uc->ident, 100);
- uc->exclude_per_dir = ".gitignore";
- /* should be the same flags used by git-status */
- uc->dir_flags = DIR_SHOW_OTHER_DIRECTORIES | DIR_HIDE_EMPTY_DIRECTORIES;
- the_index.untracked = uc;
- }
- add_untracked_ident(the_index.untracked);
- the_index.cache_changed |= UNTRACKED_CHANGED;
- } else if (!untracked_cache && the_index.untracked) {
- the_index.untracked = NULL;
- the_index.cache_changed |= UNTRACKED_CHANGED;
+ switch (untracked_cache) {
+ case UC_UNSPECIFIED:
+ break;
+ case UC_DISABLE:
+ if (git_config_get_untracked_cache() == 1)
+ warning("core.untrackedCache is set to true; "
+ "remove or change it, if you really want to "
+ "disable the untracked cache");
+ remove_untracked_cache(&the_index);
+ report(_("Untracked cache disabled"));
+ break;
+ case UC_TEST:
+ setup_work_tree();
+ return !test_if_untracked_cache_is_supported();
+ case UC_ENABLE:
+ case UC_FORCE:
+ if (git_config_get_untracked_cache() == 0)
+ warning("core.untrackedCache is set to false; "
+ "remove or change it, if you really want to "
+ "enable the untracked cache");
+ add_untracked_cache(&the_index);
+ report(_("Untracked cache enabled for '%s'"), get_git_work_tree());
+ break;
+ default:
+ die("Bug: bad untracked_cache value: %d", untracked_cache);
}
if (active_cache_changed) {
return 1;
}
len = st.st_size;
- path = xmalloc(len + 1);
+ path = xmallocz(len);
read_in_full(fd, path, len);
close(fd);
while (len && (path[len - 1] == '\n' || path[len - 1] == '\r'))
die(_("'%s' already exists"), path);
/* is 'refname' a branch or commit? */
- if (opts->force_new_branch) /* definitely a branch */
- ;
- else if (!opts->detach && !strbuf_check_branch_ref(&symref, refname) &&
+ if (!opts->detach && !strbuf_check_branch_ref(&symref, refname) &&
ref_exists(symref.buf)) { /* it's a branch */
if (!opts->force)
die_if_checked_out(symref.buf);
branch = ac < 2 ? "HEAD" : av[1];
opts.force_new_branch = !!new_branch_force;
- if (opts.force_new_branch)
+ if (opts.force_new_branch) {
+ struct strbuf symref = STRBUF_INIT;
+
opts.new_branch = new_branch_force;
+ if (!opts.force &&
+ !strbuf_check_branch_ref(&symref, opts.new_branch) &&
+ ref_exists(symref.buf))
+ die_if_checked_out(symref.buf);
+ strbuf_release(&symref);
+ }
+
if (ac < 2 && !opts.new_branch && !opts.detach) {
int n;
const char *s = worktree_basename(path, &n);
ALLOC_GROW(it->down, it->subtree_nr + 1, it->subtree_alloc);
it->subtree_nr++;
- down = xmalloc(sizeof(*down) + pathlen + 1);
+ FLEX_ALLOC_MEM(down, name, path, pathlen);
down->cache_tree = NULL;
down->namelen = pathlen;
- memcpy(down->name, path, pathlen);
- down->name[pathlen] = 0;
if (pos < it->subtree_nr)
memmove(it->down + pos + 1,
#include "convert.h"
#include "trace.h"
#include "string-list.h"
+#include "pack-revindex.h"
#include SHA1_HEADER
#ifndef platform_SHA_CTX
#error "CE_EXTENDED_FLAGS out of range"
#endif
+/* Forward structure decls */
struct pathspec;
+struct child_process;
/*
* Copy the sha1 and stat state of a cache entry from one to
freshened:1,
do_not_close:1;
unsigned char sha1[20];
+ struct revindex_entry *revindex;
/* something like ".git/objects/pack/xxxxx.pack" */
char pack_name[FLEX_ARRAY]; /* more */
} *packed_git;
extern void clear_delta_base_cache(void);
extern struct packed_git *add_packed_git(const char *path, size_t path_len, int local);
+/*
+ * Make sure that a pointer access into an mmap'd index file is within bounds,
+ * and can provide at least 8 bytes of data.
+ *
+ * Note that this is only necessary for variable-length segments of the file
+ * (like the 64-bit extended offset table), as we compare the size to the
+ * fixed-length parts when we open the file.
+ */
+extern void check_pack_index_ptr(const struct packed_git *p, const void *ptr);
+
/*
* Return the SHA-1 of the nth object within the specified packfile.
* Open the index if it is not already open. The return value points
/* git_config_parse_key() returns these negated: */
#define CONFIG_INVALID_KEY 1
#define CONFIG_NO_SECTION_OR_NAME 2
-/* git_config_set(), git_config_set_multivar() return the above or these: */
+/* git_config_set_gently(), git_config_set_multivar_gently() return the above or these: */
#define CONFIG_NO_LOCK -1
#define CONFIG_INVALID_FILE 3
#define CONFIG_NO_WRITE 4
typedef int (*config_fn_t)(const char *, const char *, void *);
extern int git_default_config(const char *, const char *, void *);
extern int git_config_from_file(config_fn_t fn, const char *, void *);
-extern int git_config_from_buf(config_fn_t fn, const char *name,
- const char *buf, size_t len, void *data);
+extern int git_config_from_mem(config_fn_t fn, const char *origin_type,
+ const char *name, const char *buf, size_t len, void *data);
extern void git_config_push_parameter(const char *text);
extern int git_config_from_parameters(config_fn_t fn, void *data);
extern void git_config(config_fn_t fn, void *);
extern int git_config_maybe_bool(const char *, const char *);
extern int git_config_string(const char **, const char *, const char *);
extern int git_config_pathname(const char **, const char *, const char *);
-extern int git_config_set_in_file(const char *, const char *, const char *);
-extern int git_config_set(const char *, const char *);
+extern int git_config_set_in_file_gently(const char *, const char *, const char *);
+extern void git_config_set_in_file(const char *, const char *, const char *);
+extern int git_config_set_gently(const char *, const char *);
+extern void git_config_set(const char *, const char *);
extern int git_config_parse_key(const char *, char **, int *);
extern int git_config_key_is_valid(const char *key);
-extern int git_config_set_multivar(const char *, const char *, const char *, int);
-extern int git_config_set_multivar_in_file(const char *, const char *, const char *, const char *, int);
+extern int git_config_set_multivar_gently(const char *, const char *, const char *, int);
+extern void git_config_set_multivar(const char *, const char *, const char *, int);
+extern int git_config_set_multivar_in_file_gently(const char *, const char *, const char *, const char *, int);
+extern void git_config_set_multivar_in_file(const char *, const char *, const char *, const char *, int);
extern int git_config_rename_section(const char *, const char *);
extern int git_config_rename_section_in_file(const char *, const char *, const char *);
extern const char *git_etc_gitconfig(void);
extern const char *get_commit_output_encoding(void);
extern int git_config_parse_parameter(const char *, config_fn_t fn, void *data);
+extern const char *current_config_origin_type(void);
+extern const char *current_config_name(void);
struct config_include_data {
int depth;
extern int git_config_get_bool_or_int(const char *key, int *is_bool, int *dest);
extern int git_config_get_maybe_bool(const char *key, int *dest);
extern int git_config_get_pathname(const char *key, const char **dest);
+extern int git_config_get_untracked_cache(void);
+
+/*
+ * This is a hack for test programs like test-dump-untracked-cache to
+ * ensure that they do not modify the untracked cache when reading it.
+ * Do not use it otherwise!
+ */
+extern int ignore_untracked_cache_config;
struct key_value_info {
const char *filename;
extern int term_columns(void);
extern int decimal_width(uintmax_t);
extern int check_pager_config(const char *cmd);
+extern void prepare_pager_args(struct child_process *, const char *pager);
extern const char *editor_program;
extern const char *askpass_program;
data.colopts = colopts;
data.opts = *opts;
- data.len = xmalloc(sizeof(*data.len) * list->nr);
+ ALLOC_ARRAY(data.len, list->nr);
for (i = 0; i < list->nr; i++)
data.len[i] = item_length(colopts, list->items[i].string);
if (colopts & COL_DENSE)
shrink_columns(&data);
- empty_cell = xmalloc(initial_width + 1);
+ empty_cell = xmallocz(initial_width);
memset(empty_cell, ' ', initial_width);
- empty_cell[initial_width] = '\0';
for (y = 0; y < data.rows; y++) {
for (x = 0; x < data.cols; x++)
if (display_cell(&data, initial_width, empty_cell, x, y))
* - Else if we have NEW, insert newend lline into base and
* consume newend
*/
- lcs = xcalloc(origbaselen + 1, sizeof(int*));
- directions = xcalloc(origbaselen + 1, sizeof(enum coalesce_direction*));
+ lcs = xcalloc(st_add(origbaselen, 1), sizeof(int*));
+ directions = xcalloc(st_add(origbaselen, 1), sizeof(enum coalesce_direction*));
for (i = 0; i < origbaselen + 1; i++) {
- lcs[i] = xcalloc(lennew + 1, sizeof(int));
- directions[i] = xcalloc(lennew + 1, sizeof(enum coalesce_direction));
+ lcs[i] = xcalloc(st_add(lennew, 1), sizeof(int));
+ directions[i] = xcalloc(st_add(lennew, 1), sizeof(enum coalesce_direction));
directions[i][0] = BASE;
}
for (j = 1; j < lennew + 1; j++)
if (line[len-1] == '\n')
len--;
- lline = xmalloc(sizeof(*lline) + len + 1);
+ FLEX_ALLOC_MEM(lline, line, line, len);
lline->len = len;
lline->next = NULL;
lline->prev = sline->plost.lost_tail;
sline->plost.lost_tail = lline;
sline->plost.len++;
lline->parent_map = this_mask;
- memcpy(lline->line, line, len);
- lline->line[len] = 0;
}
struct combine_diff_state {
elem->mode = canon_mode(S_IFLNK);
result_size = len;
- result = xmalloc(len + 1);
+ result = xmallocz(len);
done = read_in_full(fd, result, len);
if (done < 0)
else if (done < len)
die("early EOF '%s'", elem->path);
- result[len] = 0;
-
/* If not a fake symlink, apply filters, e.g. autocrlf */
if (is_file) {
struct strbuf buf = STRBUF_INIT;
if (result_size && result[result_size-1] != '\n')
cnt++; /* incomplete line */
- sline = xcalloc(cnt+2, sizeof(*sline));
+ sline = xcalloc(st_add(cnt, 2), sizeof(*sline));
sline[0].bol = result;
for (lno = 0, cp = result; cp < result + result_size; cp++) {
if (*cp == '\n') {
/* Even p_lno[cnt+1] is valid -- that is for the end line number
* for deletion hunk at the end.
*/
- sline[0].p_lno = xcalloc((cnt+2) * num_parent, sizeof(unsigned long));
+ sline[0].p_lno = xcalloc(st_mult(st_add(cnt, 2), num_parent), sizeof(unsigned long));
for (lno = 0; lno <= cnt; lno++)
sline[lno+1].p_lno = sline[lno].p_lno + num_parent;
struct diff_filespec *pool;
pair = xmalloc(sizeof(*pair));
- pool = xcalloc(num_parent + 1, sizeof(struct diff_filespec));
+ pool = xcalloc(st_add(num_parent, 1), sizeof(struct diff_filespec));
pair->one = pool + 1;
pair->two = pool;
struct combine_diff_path paths_head;
struct strbuf base;
- parents_sha1 = xmalloc(nparent * sizeof(parents_sha1[0]));
+ ALLOC_ARRAY(parents_sha1, nparent);
for (i = 0; i < nparent; i++)
parents_sha1[i] = parents->sha1[i];
if (opt->orderfile && num_paths) {
struct obj_order *o;
- o = xmalloc(sizeof(*o) * num_paths);
+ ALLOC_ARRAY(o, num_paths);
for (i = 0, p = paths; p; p = p->next, i++)
o[i].obj = p;
order_objects(opt->orderfile, path_path, o, num_paths);
if ((len + 1) % entry_size)
goto bad_graft_data;
i = (len + 1) / entry_size - 1;
- graft = xmalloc(sizeof(*graft) + GIT_SHA1_RAWSZ * i);
+ graft = xmalloc(st_add(sizeof(*graft), st_mult(GIT_SHA1_RAWSZ, i)));
graft->nr_parent = i;
if (get_oid_hex(buf, &graft->oid))
goto bad_graft_data;
work = xcalloc(cnt, sizeof(*work));
redundant = xcalloc(cnt, 1);
- filled_index = xmalloc(sizeof(*filled_index) * (cnt - 1));
+ ALLOC_ARRAY(filled_index, cnt - 1);
for (i = 0; i < cnt; i++)
parse_commit(array[i]);
#include "../run-command.h"
#include "../cache.h"
+#define HCAST(type, handle) ((type)(intptr_t)handle)
+
static const int delay[] = { 0, 1, 10, 20, 40 };
int err_win_to_posix(DWORD winerr)
return (time_t)(filetime_to_hnsec(ft) / 10000000);
}
+/**
+ * Verifies that safe_create_leading_directories() would succeed.
+ */
+static int has_valid_directory_prefix(wchar_t *wfilename)
+{
+ int n = wcslen(wfilename);
+
+ while (n > 0) {
+ wchar_t c = wfilename[--n];
+ DWORD attributes;
+
+ if (!is_dir_sep(c))
+ continue;
+
+ wfilename[n] = L'\0';
+ attributes = GetFileAttributesW(wfilename);
+ wfilename[n] = c;
+ if (attributes == FILE_ATTRIBUTE_DIRECTORY ||
+ attributes == FILE_ATTRIBUTE_DEVICE)
+ return 1;
+ if (attributes == INVALID_FILE_ATTRIBUTES)
+ switch (GetLastError()) {
+ case ERROR_PATH_NOT_FOUND:
+ continue;
+ case ERROR_FILE_NOT_FOUND:
+ /* This implies parent directory exists. */
+ return 1;
+ }
+ return 0;
+ }
+ return 1;
+}
+
/* We keep the do_lstat code in a separate function to avoid recursion.
* When a path ends with a slash, the stat will fail with ENOENT. In
* this case, we strip the trailing slashes and stat again.
case ERROR_NOT_ENOUGH_MEMORY:
errno = ENOMEM;
break;
+ case ERROR_PATH_NOT_FOUND:
+ if (!has_valid_directory_prefix(wfilename)) {
+ errno = ENOTDIR;
+ break;
+ }
+ /* fallthru */
default:
errno = ENOENT;
break;
errno = err_win_to_posix(GetLastError());
return -1;
}
- filedes[0] = _open_osfhandle((int)h[0], O_NOINHERIT);
+ filedes[0] = _open_osfhandle(HCAST(int, h[0]), O_NOINHERIT);
if (filedes[0] < 0) {
CloseHandle(h[0]);
CloseHandle(h[1]);
return -1;
}
- filedes[1] = _open_osfhandle((int)h[1], O_NOINHERIT);
+ filedes[1] = _open_osfhandle(HCAST(int, h[1]), O_NOINHERIT);
if (filedes[1] < 0) {
close(filedes[0]);
CloseHandle(h[1]);
return arg;
/* insert \ where necessary */
- d = q = xmalloc(len+n+3);
+ d = q = xmalloc(st_add3(len, n, 3));
*d++ = '"';
while (*arg) {
if (*arg == '"')
if (!n)
return NULL;
- path = xmalloc((n+1)*sizeof(char *));
+ ALLOC_ARRAY(path, n + 1);
p = envpath;
i = 0;
do {
i++;
/* copy the environment, leaving space for changes */
- tmpenv = xmalloc((size + i) * sizeof(char*));
+ ALLOC_ARRAY(tmpenv, size + i);
memcpy(tmpenv, environ, size * sizeof(char*));
/* merge supplied environment changes into the temporary environment */
free(quoted);
}
- wargs = xmalloc((2 * args.len + 1) * sizeof(wchar_t));
+ ALLOC_ARRAY(wargs, st_add(st_mult(2, args.len), 1));
xutftowcs(wargs, args.buf, 2 * args.len + 1);
strbuf_release(&args);
int argc = 0;
const char **argv2;
while (argv[argc]) argc++;
- argv2 = xmalloc(sizeof(*argv) * (argc+1));
+ ALLOC_ARRAY(argv2, argc + 1);
argv2[0] = (char *)cmd; /* full path to the script file */
memcpy(&argv2[1], &argv[1], sizeof(*argv) * argc);
pid = mingw_spawnv(prog, argv2, 1);
if (gle == ERROR_ACCESS_DENIED &&
(attrs = GetFileAttributesW(wpnew)) != INVALID_FILE_ATTRIBUTES) {
if (attrs & FILE_ATTRIBUTE_DIRECTORY) {
- errno = EISDIR;
+ DWORD attrsold = GetFileAttributesW(wpold);
+ if (attrsold == INVALID_FILE_ATTRIBUTES ||
+ !(attrsold & FILE_ATTRIBUTE_DIRECTORY))
+ errno = EISDIR;
+ else if (!_wrmdir(wpnew))
+ goto repeat;
return -1;
}
if ((attrs & FILE_ATTRIBUTE_READONLY) &&
die("cannot run browser");
printf("Launching default browser to display HTML ...\n");
- r = (int)ShellExecute(NULL, "open", htmlpath, NULL, "\\", SW_SHOWNORMAL);
+ r = HCAST(int, ShellExecute(NULL, "open", htmlpath,
+ NULL, "\\", SW_SHOWNORMAL));
FreeLibrary(shell32);
/* see the MSDN documentation referring to the result codes here */
if (r <= 32) {
return -1;
}
+static void setup_windows_environment()
+{
+ char *tmp = getenv("TMPDIR");
+
+ /* on Windows it is TMP and TEMP */
+ if (!tmp) {
+ if (!(tmp = getenv("TMP")))
+ tmp = getenv("TEMP");
+ if (tmp) {
+ setenv("TMPDIR", tmp, 1);
+ tmp = getenv("TMPDIR");
+ }
+ }
+
+ if (tmp) {
+ /*
+ * Convert all dir separators to forward slashes,
+ * to help shell commands called from the Git
+ * executable (by not mistaking the dir separators
+ * for escape characters).
+ */
+ for (; *tmp; tmp++)
+ if (*tmp == '\\')
+ *tmp = '/';
+ }
+
+ /* simulate TERM to enable auto-color (see color.c) */
+ if (!getenv("TERM"))
+ setenv("TERM", "cygwin", 1);
+}
+
/*
* Disable MSVCRT command line wildcard expansion (__getmainargs called from
* mingw startup code, see init.c in mingw runtime).
qsort(environ, i, sizeof(char*), compareenv);
/* fix Windows specific environment settings */
-
- /* on Windows it is TMP and TEMP */
- if (!mingw_getenv("TMPDIR")) {
- const char *tmp = mingw_getenv("TMP");
- if (!tmp)
- tmp = mingw_getenv("TEMP");
- if (tmp)
- setenv("TMPDIR", tmp, 1);
- }
-
- /* simulate TERM to enable auto-color (see color.c) */
- if (!getenv("TERM"))
- setenv("TERM", "cygwin", 1);
+ setup_windows_environment();
/* initialize critical section for waitpid pinfo_t list */
InitializeCriticalSection(&pinfo_cs);
+#ifdef __MINGW64_VERSION_MAJOR
+#include <stdint.h>
+#include <wchar.h>
+typedef _sigset_t sigset_t;
+#endif
#include <winsock2.h>
#include <ws2tcpip.h>
+/* MinGW-w64 reports to have flockfile, but it does not actually have it. */
+#ifdef __MINGW64_VERSION_MAJOR
+#undef _POSIX_THREAD_SAFE_FUNCTIONS
+#endif
+
/*
* things that are not available in header files
*/
-typedef int pid_t;
typedef int uid_t;
typedef int socklen_t;
+#ifndef __MINGW64_VERSION_MAJOR
+typedef int pid_t;
#define hstrerror strerror
+#endif
#define S_IFLNK 0120000 /* Symbolic link */
#define S_ISLNK(x) (((x) & S_IFMT) == S_IFLNK)
#define S_ISSOCK(x) 0
+#ifndef S_IRWXG
#define S_IRGRP 0
#define S_IWGRP 0
#define S_IXGRP 0
#define S_IRWXG (S_IRGRP | S_IWGRP | S_IXGRP)
+#endif
+#ifndef S_IRWXO
#define S_IROTH 0
#define S_IWOTH 0
#define S_IXOTH 0
#define S_IRWXO (S_IROTH | S_IWOTH | S_IXOTH)
+#endif
#define S_ISUID 0004000
#define S_ISGID 0002000
{ errno = ENOSYS; return -1; }
static inline int fchmod(int fildes, mode_t mode)
{ errno = ENOSYS; return -1; }
+#ifndef __MINGW64_VERSION_MAJOR
static inline pid_t fork(void)
{ errno = ENOSYS; return -1; }
+#endif
static inline unsigned int alarm(unsigned int seconds)
{ return 0; }
static inline int fsync(int fd)
unsigned int sleep (unsigned int seconds);
int mkstemp(char *template);
int gettimeofday(struct timeval *tv, void *tz);
+#ifndef __MINGW64_VERSION_MAJOR
struct tm *gmtime_r(const time_t *timep, struct tm *result);
struct tm *localtime_r(const time_t *timep, struct tm *result);
+#endif
int getpagesize(void); /* defined in MinGW's libgcc.a */
struct passwd *getpwuid(uid_t uid);
int setitimer(int type, struct itimerval *in, struct itimerval *out);
/*
* Use mingw specific stat()/lstat()/fstat() implementations on Windows.
*/
+#ifndef __MINGW64_VERSION_MAJOR
#define off_t off64_t
#define lseek _lseeki64
+#endif
/* use struct stat with 64 bit st_size */
#ifdef stat
int mingw_offset_1st_component(const char *path);
#define offset_1st_component mingw_offset_1st_component
#define PATH_SEP ';'
+#ifndef __MINGW64_VERSION_MAJOR
#define PRIuMAX "I64u"
#define PRId64 "I64d"
+#else
+#include <inttypes.h>
+#endif
void mingw_open_html(const char *path);
#define open_html mingw_open_html
inlining are defined as macros, so these aren't used for them.
*/
+#ifdef __MINGW64_VERSION_MAJOR
+#undef FORCEINLINE
+#endif
#ifndef FORCEINLINE
#if defined(__GNUC__)
#define FORCEINLINE __inline __attribute__ ((always_inline))
/*** Atomic operations ***/
#if (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) > 40100
+ #undef _ReadWriteBarrier
#define _ReadWriteBarrier() __sync_synchronize()
#else
static __inline__ __attribute__((always_inline)) long __sync_lock_test_and_set(volatile long * const Target, const long Value)
volatile long threadid;
};
+static inline int return_0(int i) { return 0; }
#define MLOCK_T struct win32_mlock_t
#define CURRENT_THREAD win32_getcurrentthreadid()
-#define INITIAL_LOCK(sl) (memset(sl, 0, sizeof(MLOCK_T)), 0)
+#define INITIAL_LOCK(sl) (memset(sl, 0, sizeof(MLOCK_T)), return_0(0))
#define ACQUIRE_LOCK(sl) win32_acquire_lock(sl)
#define RELEASE_LOCK(sl) win32_release_lock(sl)
#define TRY_LOCK(sl) win32_try_lock(sl)
#ifdef WIN32_NATIVE
-#define IsConsoleHandle(h) (((long) (h) & 3) == 3)
+#define IsConsoleHandle(h) (((long) (intptr_t) (h) & 3) == 3)
static BOOL
IsSocketHandle (HANDLE h)
close(output_fd);
git_path_buf(&path, "%s", auml_nfd);
precomposed_unicode = access(path.buf, R_OK) ? 0 : 1;
- git_config_set("core.precomposeunicode", precomposed_unicode ? "true" : "false");
+ git_config_set("core.precomposeunicode",
+ precomposed_unicode ? "true" : "false");
git_path_buf(&path, "%s", auml_nfc);
if (unlink(path.buf))
die_errno(_("failed to unlink '%s'"), path.buf);
void git_qsort(void *b, size_t n, size_t s,
int (*cmp)(const void *, const void *))
{
- const size_t size = n * s;
+ const size_t size = st_mult(n, s);
char buf[1024];
if (size < sizeof(buf)) {
namelen = strlen(name);
valuelen = strlen(value);
- envstr = malloc((namelen + valuelen + 2));
+ envstr = malloc(st_add3(namelen, valuelen, 2));
if (!envstr) {
errno = ENOMEM;
return -1;
fputs(prompt, output_fh);
fflush(output_fh);
- r = strbuf_getline(&buf, input_fh, '\n');
+ r = strbuf_getline_lf(&buf, input_fh);
if (!echo) {
putc('\n', output_fh);
fflush(output_fh);
*/
#define pthread_mutex_t CRITICAL_SECTION
-#define pthread_mutex_init(a,b) (InitializeCriticalSection((a)), 0)
+static inline int return_0(int i) {
+ return 0;
+}
+#define pthread_mutex_init(a,b) return_0((InitializeCriticalSection((a)), 0))
#define pthread_mutex_destroy(a) DeleteCriticalSection((a))
#define pthread_mutex_lock EnterCriticalSection
#define pthread_mutex_unlock LeaveCriticalSection
static inline void NORETURN pthread_exit(void *ret)
{
- ExitThread((DWORD)ret);
+ ExitThread((DWORD)(intptr_t)ret);
}
typedef DWORD pthread_key_t;
return;
}
- str = malloc(str_len + 1);
+ str = malloc(st_add(str_len, 1));
if (!str) {
warning("malloc failed: '%s'", strerror(errno));
return;
va_end(ap);
while ((pos = strstr(str, "%1")) != NULL) {
- str = realloc(str, ++str_len + 1);
+ str = realloc(str, st_add(++str_len, 1));
if (!str) {
warning("realloc failed: '%s'", strerror(errno));
return;
static HANDLE hconsole1, hconsole2;
#ifdef __MINGW32__
+#if !defined(__MINGW64_VERSION_MAJOR) || __MINGW64_VERSION_MAJOR < 5
typedef struct _CONSOLE_FONT_INFOEX {
ULONG cbSize;
DWORD nFont;
WCHAR FaceName[LF_FACESIZE];
} CONSOLE_FONT_INFOEX, *PCONSOLE_FONT_INFOEX;
#endif
+#endif
typedef BOOL (WINAPI *PGETCURRENTCONSOLEFONTEX)(HANDLE, BOOL,
PCONSOLE_FONT_INFOEX);
HANDLE hresult, hproc = GetCurrentProcess();
if (!DuplicateHandle(hproc, hnd, hproc, &hresult, 0, TRUE,
DUPLICATE_SAME_ACCESS))
- die_lasterr("DuplicateHandle(%li) failed", (long) hnd);
+ die_lasterr("DuplicateHandle(%li) failed",
+ (long) (intptr_t) hnd);
return hresult;
}
size_t pos;
} buf;
} u;
+ const char *origin_type;
const char *name;
const char *path;
int die_on_error;
break;
}
if (cf->die_on_error)
- die(_("bad config file line %d in %s"), cf->linenr, cf->name);
+ die(_("bad config line %d in %s %s"), cf->linenr, cf->origin_type, cf->name);
else
- return error(_("bad config file line %d in %s"), cf->linenr, cf->name);
+ return error(_("bad config line %d in %s %s"), cf->linenr, cf->origin_type, cf->name);
}
static int parse_unit_factor(const char *end, uintmax_t *val)
if (!value)
value = "";
- if (cf && cf->name)
- die(_("bad numeric config value '%s' for '%s' in %s: %s"),
- value, name, cf->name, reason);
+ if (cf && cf->origin_type && cf->name)
+ die(_("bad numeric config value '%s' for '%s' in %s %s: %s"),
+ value, name, cf->origin_type, cf->name, reason);
die(_("bad numeric config value '%s' for '%s': %s"), value, name, reason);
}
}
static int do_config_from_file(config_fn_t fn,
- const char *name, const char *path, FILE *f, void *data)
+ const char *origin_type, const char *name, const char *path, FILE *f,
+ void *data)
{
struct config_source top;
top.u.file = f;
+ top.origin_type = origin_type;
top.name = name;
top.path = path;
top.die_on_error = 1;
static int git_config_from_stdin(config_fn_t fn, void *data)
{
- return do_config_from_file(fn, "<stdin>", NULL, stdin, data);
+ return do_config_from_file(fn, "standard input", "", NULL, stdin, data);
}
int git_config_from_file(config_fn_t fn, const char *filename, void *data)
f = fopen(filename, "r");
if (f) {
flockfile(f);
- ret = do_config_from_file(fn, filename, filename, f, data);
+ ret = do_config_from_file(fn, "file", filename, filename, f, data);
funlockfile(f);
fclose(f);
}
return ret;
}
-int git_config_from_buf(config_fn_t fn, const char *name, const char *buf,
- size_t len, void *data)
+int git_config_from_mem(config_fn_t fn, const char *origin_type,
+ const char *name, const char *buf, size_t len, void *data)
{
struct config_source top;
top.u.buf.buf = buf;
top.u.buf.len = len;
top.u.buf.pos = 0;
+ top.origin_type = origin_type;
top.name = name;
top.path = NULL;
top.die_on_error = 0;
return error("reference '%s' does not point to a blob", name);
}
- ret = git_config_from_buf(fn, name, buf, size, data);
+ ret = git_config_from_mem(fn, "blob", name, buf, size, data);
free(buf);
return ret;
return ret;
}
+int git_config_get_untracked_cache(void)
+{
+ int val = -1;
+ const char *v;
+
+ /* Hack for test programs like test-dump-untracked-cache */
+ if (ignore_untracked_cache_config)
+ return -1;
+
+ if (!git_config_get_maybe_bool("core.untrackedcache", &val))
+ return val;
+
+ if (!git_config_get_value("core.untrackedcache", &v)) {
+ if (!strcasecmp(v, "keep"))
+ return -1;
+
+ error("unknown core.untrackedCache value '%s'; "
+ "using 'keep' default value", v);
+ return -1;
+ }
+
+ return -1; /* default value */
+}
+
NORETURN
void git_die_config_linenr(const char *key, const char *filename, int linenr)
{
return offset;
}
-int git_config_set_in_file(const char *config_filename,
- const char *key, const char *value)
+int git_config_set_in_file_gently(const char *config_filename,
+ const char *key, const char *value)
{
- return git_config_set_multivar_in_file(config_filename, key, value, NULL, 0);
+ return git_config_set_multivar_in_file_gently(config_filename, key, value, NULL, 0);
}
-int git_config_set(const char *key, const char *value)
+void git_config_set_in_file(const char *config_filename,
+ const char *key, const char *value)
{
- return git_config_set_multivar(key, value, NULL, 0);
+ git_config_set_multivar_in_file(config_filename, key, value, NULL, 0);
+}
+
+int git_config_set_gently(const char *key, const char *value)
+{
+ return git_config_set_multivar_gently(key, value, NULL, 0);
+}
+
+void git_config_set(const char *key, const char *value)
+{
+ git_config_set_multivar(key, value, NULL, 0);
}
/*
* Validate the key and while at it, lower case it for matching.
*/
if (store_key)
- *store_key = xmalloc(strlen(key) + 1);
+ *store_key = xmallocz(strlen(key));
dot = 0;
for (i = 0; key[i]; i++) {
if (store_key)
(*store_key)[i] = c;
}
- if (store_key)
- (*store_key)[i] = 0;
return 0;
* - the config file is removed and the lock file rename()d to it.
*
*/
-int git_config_set_multivar_in_file(const char *config_filename,
- const char *key, const char *value,
- const char *value_regex, int multi_replace)
+int git_config_set_multivar_in_file_gently(const char *config_filename,
+ const char *key, const char *value,
+ const char *value_regex,
+ int multi_replace)
{
int fd = -1, in_fd = -1;
int ret;
}
-int git_config_set_multivar(const char *key, const char *value,
- const char *value_regex, int multi_replace)
+void git_config_set_multivar_in_file(const char *config_filename,
+ const char *key, const char *value,
+ const char *value_regex, int multi_replace)
+{
+ if (git_config_set_multivar_in_file_gently(config_filename, key, value,
+ value_regex, multi_replace) < 0)
+ die(_("Could not set '%s' to '%s'"), key, value);
+}
+
+int git_config_set_multivar_gently(const char *key, const char *value,
+ const char *value_regex, int multi_replace)
{
- return git_config_set_multivar_in_file(NULL, key, value, value_regex,
- multi_replace);
+ return git_config_set_multivar_in_file_gently(NULL, key, value, value_regex,
+ multi_replace);
+}
+
+void git_config_set_multivar(const char *key, const char *value,
+ const char *value_regex, int multi_replace)
+{
+ git_config_set_multivar_in_file(NULL, key, value, value_regex,
+ multi_replace);
}
static int section_name_match (const char *buf, const char *name)
return 0;
}
+
+const char *current_config_origin_type(void)
+{
+ return cf && cf->origin_type ? cf->origin_type : "command line";
+}
+
+const char *current_config_name(void)
+{
+ return cf && cf->name ? cf->name : "";
+}
ifeq ($(shell expr "$(uname_R)" : '[15]\.'),2)
NO_STRLCPY = YesPlease
endif
+ ifeq ($(shell test "`expr "$(uname_R)" : '\([0-9][0-9]*\)\.'`" -eq 10 && echo 1),1)
+ CC = clang
+ endif
ifeq ($(shell test "`expr "$(uname_R)" : '\([0-9][0-9]*\)\.'`" -ge 11 && echo 1),1)
HAVE_GETDELIM = YesPlease
endif
NO_INET_NTOP = YesPlease
NO_POSIX_GOODIES = UnfortunatelyYes
DEFAULT_HELP_FORMAT = html
- COMPAT_CFLAGS += -D__USE_MINGW_ACCESS -D_USE_32BIT_TIME_T -DNOGDI -Icompat -Icompat/win32
+ COMPAT_CFLAGS += -DNOGDI -Icompat -Icompat/win32
COMPAT_CFLAGS += -DSTRIP_EXTENSION=\".exe\"
COMPAT_OBJS += compat/mingw.o compat/winansi.o \
compat/win32/pthread.o compat/win32/syslog.o \
compat/win32/dirent.o
BASIC_CFLAGS += -DPROTECT_NTFS_DEFAULT=1
- BASIC_LDFLAGS += -Wl,--large-address-aware
EXTLIBS += -lws2_32
GITLIBS += git.res
PTHREAD_LIBS =
INTERNAL_QSORT = YesPlease
HAVE_LIBCHARSET_H = YesPlease
NO_GETTEXT = YesPlease
+ COMPAT_CLFAGS += -D__USE_MINGW_ACCESS
else
- NO_CURL = YesPlease
+ ifeq ($(shell expr "$(uname_R)" : '2\.'),2)
+ # MSys2
+ prefix = /usr/
+ ifeq (MINGW32,$(MSYSTEM))
+ prefix = /mingw32
+ endif
+ ifeq (MINGW64,$(MSYSTEM))
+ prefix = /mingw64
+ else
+ COMPAT_CFLAGS += -D_USE_32BIT_TIME_T
+ BASIC_LDFLAGS += -Wl,--large-address-aware
+ endif
+ CC = gcc
+ COMPAT_CFLAGS += -D__USE_MINGW_ANSI_STDIO=0
+ INSTALL = /bin/install
+ NO_R_TO_GCC_LINKER = YesPlease
+ INTERNAL_QSORT = YesPlease
+ HAVE_LIBCHARSET_H = YesPlease
+ NO_GETTEXT =
+ USE_GETTEXT_SCHEME = fallthrough
+ USE_LIBPCRE= YesPlease
+ NO_CURL =
+ USE_NED_ALLOCATOR = YesPlease
+ else
+ COMPAT_CFLAGS += -D__USE_MINGW_ANSI_STDIO
+ NO_CURL = YesPlease
+ endif
endif
endif
ifeq ($(uname_S),QNX)
port = "<none>";
memset(&hints, 0, sizeof(hints));
+ if (flags & CONNECT_IPV4)
+ hints.ai_family = AF_INET;
+ else if (flags & CONNECT_IPV6)
+ hints.ai_family = AF_INET6;
hints.ai_socktype = SOCK_STREAM;
hints.ai_protocol = IPPROTO_TCP;
}
argv_array_push(&conn->args, ssh);
+ if (flags & CONNECT_IPV4)
+ argv_array_push(&conn->args, "-4");
+ else if (flags & CONNECT_IPV6)
+ argv_array_push(&conn->args, "-6");
if (tortoiseplink)
argv_array_push(&conn->args, "-batch");
if (port) {
#define CONNECT_VERBOSE (1u << 0)
#define CONNECT_DIAG_URL (1u << 1)
+#define CONNECT_IPV4 (1u << 2)
+#define CONNECT_IPV6 (1u << 3)
extern struct child_process *git_connect(int fd[2], const char *url, const char *prog, int flags);
extern int finish_connect(struct child_process *conn);
extern int git_connection_is_socket(struct child_process *conn);
--full-name --line-number
--extended-regexp --basic-regexp --fixed-strings
--perl-regexp
+ --threads
--files-with-matches --name-only
--files-without-match
--max-depth
return
;;
branch.*.rebase)
- __gitcomp "false true"
+ __gitcomp "false true preserve interactive"
return
;;
remote.pushdefault)
core.sparseCheckout
core.symlinks
core.trustctime
+ core.untrackedCache
core.warnAmbiguousRefs
core.whitespace
core.worktree
if test "$templatefile" != ""
then
# Test whether this is just the unaltered template.
- if cnt=`sed -e '/^#/d' < "$templatefile" |
+ if cnt=$(sed -e '/^#/d' < "$templatefile" |
git stripspace |
diff "$GIT_DIR"/COMMIT_BAREMSG - |
- wc -l` &&
+ wc -l) &&
test 0 -lt $cnt
then
have_commitmsg=t
fi
if test -z "$quiet"
then
- commit=`git diff-tree --always --shortstat --pretty="format:%h: %s"\
- --abbrev --summary --root HEAD --`
+ commit=$(git diff-tree --always --shortstat --pretty="format:%h: %s"\
+ --abbrev --summary --root HEAD --)
echo "Created${initial_commit:+ initial} commit $commit"
fi
fi
reflist=$(get_remote_refs_for_fetch "$@")
if test "$tags"
then
- taglist=`IFS=' ' &&
+ taglist=$(IFS=' ' &&
echo "$ls_remote_result" |
git show-ref --exclude-existing=refs/tags/ |
while read sha1 name
do
echo ".${name}:${name}"
- done` || exit
+ done) || exit
if test "$#" -gt 1
then
# remote URL plus explicit refspecs; we need to merge them.
if test "$exit" -eq 1
then
- cnt=`{
+ cnt=$({
git diff-files --name-only
git ls-files --unmerged
- } | wc -l`
+ } | wc -l)
if test $best_cnt -le 0 || test $cnt -le $best_cnt
then
best_strategy=$strategy
,t,)
args= existing=
if [ -d "$PACKDIR" ]; then
- for e in `cd "$PACKDIR" && find . -type f -name '*.pack' \
- | sed -e 's/^\.\///' -e 's/\.pack$//'`
+ for e in $(cd "$PACKDIR" && find . -type f -name '*.pack' \
+ | sed -e 's/^\.\///' -e 's/\.pack$//')
do
if [ -e "$PACKDIR/$e.keep" ]; then
: keep
}'
logmsg=$(git show -s --pretty=raw --encoding="$encoding" "$commit")
- set_author_env=`echo "$logmsg" |
- LANG=C LC_ALL=C sed -ne "$pick_author_script"`
+ set_author_env=$(echo "$logmsg" |
+ LANG=C LC_ALL=C sed -ne "$pick_author_script")
eval "$set_author_env"
export GIT_AUTHOR_NAME
export GIT_AUTHOR_EMAIL
esac >.msg
eval GITHEAD_$head=HEAD
-eval GITHEAD_$next='`git show -s \
+eval GITHEAD_$next='$(git show -s \
--pretty=oneline --encoding="$encoding" "$commit" |
- sed -e "s/^[^ ]* //"`'
+ sed -e "s/^[^ ]* //")'
export GITHEAD_$head GITHEAD_$next
# This three way merge is an interesting one. We are at
GIT_SUBTREE_XML := git-subtree.xml
GIT_SUBTREE_TXT := git-subtree.txt
GIT_SUBTREE_HTML := git-subtree.html
+GIT_SUBTREE_TEST := ../../git-subtree
all:: $(GIT_SUBTREE)
$(ASCIIDOC) -b xhtml11 -d manpage -f $(ASCIIDOC_CONF) \
-agit_version=$(GIT_VERSION) $^
-test:
+$(GIT_SUBTREE_TEST): $(GIT_SUBTREE)
+ cp $< $@
+
+test: $(GIT_SUBTREE_TEST)
$(MAKE) -C t/ test
clean:
case "$a" in
START) sq="$b" ;;
git-subtree-mainline:) main="$b" ;;
- git-subtree-split:) sub="$b" ;;
+ git-subtree-split:)
+ sub="$(git rev-parse "$b^0")" ||
+ die "could not rev-parse split hash $b from commit $sq"
+ ;;
END)
if [ -n "$sub" ]; then
if [ -n "$main" ]; then
case "$a" in
START) sq="$b" ;;
git-subtree-mainline:) main="$b" ;;
- git-subtree-split:) sub="$b" ;;
+ git-subtree-split:)
+ sub="$(git rev-parse "$b^0")" ||
+ die "could not rev-parse split hash $b from commit $sq"
+ ;;
END)
debug " Main is: '$main'"
if [ -z "$main" -a -n "$sub" ]; then
CMT_MSG=$(sed -e '1,/^$/d' -e '/^---$/,$d' "${PATCH}")
DIFF=$(sed -e '1,/^---$/d' "${PATCH}")
-CCS=`echo -e "$CMT_MSG\n$HEADERS" | sed -n -e 's/^Cc: \(.*\)$/\1,/gp' \
- -e 's/^Signed-off-by: \(.*\)/\1,/gp'`
+CCS=$(echo -e "$CMT_MSG\n$HEADERS" | sed -n -e 's/^Cc: \(.*\)$/\1,/gp' \
+ -e 's/^Signed-off-by: \(.*\)/\1,/gp')
echo "$SUBJECT" > $1
echo "Cc: $CCS" >> $1
* translation when the "text" attribute or "auto_crlf" option is set.
*/
+/* Stat bits: When BIN is set, the txt bits are unset */
+#define CONVERT_STAT_BITS_TXT_LF 0x1
+#define CONVERT_STAT_BITS_TXT_CRLF 0x2
+#define CONVERT_STAT_BITS_BIN 0x4
+
enum crlf_action {
- CRLF_GUESS = -1,
- CRLF_BINARY = 0,
+ CRLF_UNDEFINED,
+ CRLF_BINARY,
CRLF_TEXT,
- CRLF_INPUT,
- CRLF_CRLF,
- CRLF_AUTO
+ CRLF_TEXT_INPUT,
+ CRLF_TEXT_CRLF,
+ CRLF_AUTO,
+ CRLF_AUTO_INPUT,
+ CRLF_AUTO_CRLF
};
struct text_stat {
/* NUL, CR, LF and CRLF counts */
- unsigned nul, cr, lf, crlf;
+ unsigned nul, lonecr, lonelf, crlf;
/* These are just approximations! */
unsigned printable, nonprintable;
for (i = 0; i < size; i++) {
unsigned char c = buf[i];
if (c == '\r') {
- stats->cr++;
- if (i+1 < size && buf[i+1] == '\n')
+ if (i+1 < size && buf[i+1] == '\n') {
stats->crlf++;
+ i++;
+ } else
+ stats->lonecr++;
continue;
}
if (c == '\n') {
- stats->lf++;
+ stats->lonelf++;
continue;
}
if (c == 127)
/*
* The same heuristics as diff.c::mmfile_is_binary()
+ * We treat files with bare CR as binary
*/
-static int is_binary(unsigned long size, struct text_stat *stats)
+static int convert_is_binary(unsigned long size, const struct text_stat *stats)
{
-
+ if (stats->lonecr)
+ return 1;
if (stats->nul)
return 1;
if ((stats->printable >> 7) < stats->nonprintable)
return 1;
- /*
- * Other heuristics? Average line length might be relevant,
- * as might LF vs CR vs CRLF counts..
- *
- * NOTE! It might be normal to have a low ratio of CRLF to LF
- * (somebody starts with a LF-only file and edits it with an editor
- * that adds CRLF only to lines that are added..). But do we
- * want to support CR-only? Probably not.
- */
+ return 0;
+}
+
+static unsigned int gather_convert_stats(const char *data, unsigned long size)
+{
+ struct text_stat stats;
+ int ret = 0;
+ if (!data || !size)
+ return 0;
+ gather_stats(data, size, &stats);
+ if (convert_is_binary(size, &stats))
+ ret |= CONVERT_STAT_BITS_BIN;
+ if (stats.crlf)
+ ret |= CONVERT_STAT_BITS_TXT_CRLF;
+ if (stats.lonelf)
+ ret |= CONVERT_STAT_BITS_TXT_LF;
+
+ return ret;
+}
+
+static const char *gather_convert_stats_ascii(const char *data, unsigned long size)
+{
+ unsigned int convert_stats = gather_convert_stats(data, size);
+
+ if (convert_stats & CONVERT_STAT_BITS_BIN)
+ return "-text";
+ switch (convert_stats) {
+ case CONVERT_STAT_BITS_TXT_LF:
+ return "lf";
+ case CONVERT_STAT_BITS_TXT_CRLF:
+ return "crlf";
+ case CONVERT_STAT_BITS_TXT_LF | CONVERT_STAT_BITS_TXT_CRLF:
+ return "mixed";
+ default:
+ return "none";
+ }
+}
+
+const char *get_cached_convert_stats_ascii(const char *path)
+{
+ const char *ret;
+ unsigned long sz;
+ void *data = read_blob_data_from_cache(path, &sz);
+ ret = gather_convert_stats_ascii(data, sz);
+ free(data);
+ return ret;
+}
+
+const char *get_wt_convert_stats_ascii(const char *path)
+{
+ const char *ret = "";
+ struct strbuf sb = STRBUF_INIT;
+ if (strbuf_read_file(&sb, path, 0) >= 0)
+ ret = gather_convert_stats_ascii(sb.buf, sb.len);
+ strbuf_release(&sb);
+ return ret;
+}
+
+static int text_eol_is_crlf(void)
+{
+ if (auto_crlf == AUTO_CRLF_TRUE)
+ return 1;
+ else if (auto_crlf == AUTO_CRLF_INPUT)
+ return 0;
+ if (core_eol == EOL_CRLF)
+ return 1;
+ if (core_eol == EOL_UNSET && EOL_NATIVE == EOL_CRLF)
+ return 1;
return 0;
}
switch (crlf_action) {
case CRLF_BINARY:
return EOL_UNSET;
- case CRLF_CRLF:
+ case CRLF_TEXT_CRLF:
return EOL_CRLF;
- case CRLF_INPUT:
+ case CRLF_TEXT_INPUT:
return EOL_LF;
- case CRLF_GUESS:
- if (!auto_crlf)
- return EOL_UNSET;
- /* fall through */
+ case CRLF_UNDEFINED:
+ case CRLF_AUTO_CRLF:
+ case CRLF_AUTO_INPUT:
case CRLF_TEXT:
case CRLF_AUTO:
- if (auto_crlf == AUTO_CRLF_TRUE)
- return EOL_CRLF;
- else if (auto_crlf == AUTO_CRLF_INPUT)
- return EOL_LF;
- else if (core_eol == EOL_UNSET)
- return EOL_NATIVE;
+ /* fall through */
+ return text_eol_is_crlf() ? EOL_CRLF : EOL_LF;
}
+ warning("Illegal crlf_action %d\n", (int)crlf_action);
return core_eol;
}
* CRLFs would be added by checkout:
* check if we have "naked" LFs
*/
- if (stats->lf != stats->crlf) {
+ if (stats->lonelf) {
if (checksafe == SAFE_CRLF_WARN)
warning("LF will be replaced by CRLF in %s.\nThe file will have its original line endings in your working directory.", path);
else /* i.e. SAFE_CRLF_FAIL */
char *dst;
if (crlf_action == CRLF_BINARY ||
- (crlf_action == CRLF_GUESS && auto_crlf == AUTO_CRLF_FALSE) ||
(src && !len))
return 0;
gather_stats(src, len, &stats);
- if (crlf_action == CRLF_AUTO || crlf_action == CRLF_GUESS) {
- /*
- * We're currently not going to even try to convert stuff
- * that has bare CR characters. Does anybody do that crazy
- * stuff?
- */
- if (stats.cr != stats.crlf)
- return 0;
-
- /*
- * And add some heuristics for binary vs text, of course...
- */
- if (is_binary(len, &stats))
+ if (crlf_action == CRLF_AUTO || crlf_action == CRLF_AUTO_INPUT || crlf_action == CRLF_AUTO_CRLF) {
+ if (convert_is_binary(len, &stats))
return 0;
- if (crlf_action == CRLF_GUESS) {
+ if (crlf_action == CRLF_AUTO_INPUT || crlf_action == CRLF_AUTO_CRLF) {
/*
* If the file in the index has any CR in it, do not convert.
* This is the new safer autocrlf handling.
check_safe_crlf(path, crlf_action, &stats, checksafe);
- /* Optimization: No CR? Nothing to convert, regardless. */
- if (!stats.cr)
+ /* Optimization: No CRLF? Nothing to convert, regardless. */
+ if (!stats.crlf)
return 0;
/*
if (strbuf_avail(buf) + buf->len < len)
strbuf_grow(buf, len - buf->len);
dst = buf->buf;
- if (crlf_action == CRLF_AUTO || crlf_action == CRLF_GUESS) {
+ if (crlf_action == CRLF_AUTO || crlf_action == CRLF_AUTO_INPUT || crlf_action == CRLF_AUTO_CRLF) {
/*
* If we guessed, we already know we rejected a file with
* lone CR, and we can strip a CR without looking at what
gather_stats(src, len, &stats);
- /* No LF? Nothing to convert, regardless. */
- if (!stats.lf)
+ /* No "naked" LF? Nothing to convert, regardless. */
+ if (!stats.lonelf)
return 0;
- /* Was it already in CRLF format? */
- if (stats.lf == stats.crlf)
- return 0;
-
- if (crlf_action == CRLF_AUTO || crlf_action == CRLF_GUESS) {
- if (crlf_action == CRLF_GUESS) {
+ if (crlf_action == CRLF_AUTO || crlf_action == CRLF_AUTO_INPUT || crlf_action == CRLF_AUTO_CRLF) {
+ if (crlf_action == CRLF_AUTO_INPUT || crlf_action == CRLF_AUTO_CRLF) {
/* If we have any CR or CRLF line endings, we do not touch it */
/* This is the new safer autocrlf-handling */
- if (stats.cr > 0 || stats.crlf > 0)
+ if (stats.lonecr || stats.crlf )
return 0;
}
- /* If we have any bare CR characters, we're not going to touch it */
- if (stats.cr != stats.crlf)
- return 0;
-
- if (is_binary(len, &stats))
+ if (convert_is_binary(len, &stats))
return 0;
}
if (src == buf->buf)
to_free = strbuf_detach(buf, NULL);
- strbuf_grow(buf, len + stats.lf - stats.crlf);
+ strbuf_grow(buf, len + stats.lonelf);
for (;;) {
const char *nl = memchr(src, '\n', len);
if (!nl)
struct async async;
struct filter_params params;
- if (!cmd)
+ if (!cmd || !*cmd)
return 0;
if (!dst)
return 1;
}
-static enum crlf_action git_path_check_crlf(const char *path, struct git_attr_check *check)
+static enum crlf_action git_path_check_crlf(struct git_attr_check *check)
{
const char *value = check->value;
else if (ATTR_UNSET(value))
;
else if (!strcmp(value, "input"))
- return CRLF_INPUT;
+ return CRLF_TEXT_INPUT;
else if (!strcmp(value, "auto"))
return CRLF_AUTO;
- return CRLF_GUESS;
+ return CRLF_UNDEFINED;
}
-static enum eol git_path_check_eol(const char *path, struct git_attr_check *check)
+static enum eol git_path_check_eol(struct git_attr_check *check)
{
const char *value = check->value;
return EOL_UNSET;
}
-static struct convert_driver *git_path_check_convert(const char *path,
- struct git_attr_check *check)
+static struct convert_driver *git_path_check_convert(struct git_attr_check *check)
{
const char *value = check->value;
struct convert_driver *drv;
return NULL;
}
-static int git_path_check_ident(const char *path, struct git_attr_check *check)
+static int git_path_check_ident(struct git_attr_check *check)
{
const char *value = check->value;
return !!ATTR_TRUE(value);
}
-static enum crlf_action input_crlf_action(enum crlf_action text_attr, enum eol eol_attr)
-{
- if (text_attr == CRLF_BINARY)
- return CRLF_BINARY;
- if (eol_attr == EOL_LF)
- return CRLF_INPUT;
- if (eol_attr == EOL_CRLF)
- return CRLF_CRLF;
- return text_attr;
-}
-
struct conv_attrs {
struct convert_driver *drv;
- enum crlf_action crlf_action;
- enum eol eol_attr;
+ enum crlf_action attr_action; /* What attr says */
+ enum crlf_action crlf_action; /* When no attr is set, use core.autocrlf */
int ident;
};
}
if (!git_check_attr(path, NUM_CONV_ATTRS, ccheck)) {
- ca->crlf_action = git_path_check_crlf(path, ccheck + 4);
- if (ca->crlf_action == CRLF_GUESS)
- ca->crlf_action = git_path_check_crlf(path, ccheck + 0);
- ca->ident = git_path_check_ident(path, ccheck + 1);
- ca->drv = git_path_check_convert(path, ccheck + 2);
- ca->eol_attr = git_path_check_eol(path, ccheck + 3);
+ ca->crlf_action = git_path_check_crlf(ccheck + 4);
+ if (ca->crlf_action == CRLF_UNDEFINED)
+ ca->crlf_action = git_path_check_crlf(ccheck + 0);
+ ca->attr_action = ca->crlf_action;
+ ca->ident = git_path_check_ident(ccheck + 1);
+ ca->drv = git_path_check_convert(ccheck + 2);
+ if (ca->crlf_action != CRLF_BINARY) {
+ enum eol eol_attr = git_path_check_eol(ccheck + 3);
+ if (eol_attr == EOL_LF)
+ ca->crlf_action = CRLF_TEXT_INPUT;
+ else if (eol_attr == EOL_CRLF)
+ ca->crlf_action = CRLF_TEXT_CRLF;
+ }
+ ca->attr_action = ca->crlf_action;
} else {
ca->drv = NULL;
- ca->crlf_action = CRLF_GUESS;
- ca->eol_attr = EOL_UNSET;
+ ca->crlf_action = CRLF_UNDEFINED;
ca->ident = 0;
}
+ if (ca->crlf_action == CRLF_TEXT)
+ ca->crlf_action = text_eol_is_crlf() ? CRLF_TEXT_CRLF : CRLF_TEXT_INPUT;
+ if (ca->crlf_action == CRLF_UNDEFINED && auto_crlf == AUTO_CRLF_FALSE)
+ ca->crlf_action = CRLF_BINARY;
+ if (ca->crlf_action == CRLF_UNDEFINED && auto_crlf == AUTO_CRLF_TRUE)
+ ca->crlf_action = CRLF_AUTO_CRLF;
+ if (ca->crlf_action == CRLF_UNDEFINED && auto_crlf == AUTO_CRLF_INPUT)
+ ca->crlf_action = CRLF_AUTO_INPUT;
}
int would_convert_to_git_filter_fd(const char *path)
return apply_filter(path, NULL, 0, -1, NULL, ca.drv->clean);
}
+const char *get_convert_attr_ascii(const char *path)
+{
+ struct conv_attrs ca;
+
+ convert_attrs(&ca, path);
+ switch (ca.attr_action) {
+ case CRLF_UNDEFINED:
+ return "";
+ case CRLF_BINARY:
+ return "-text";
+ case CRLF_TEXT:
+ return "text";
+ case CRLF_TEXT_INPUT:
+ return "text eol=lf";
+ case CRLF_TEXT_CRLF:
+ return "text eol=crlf";
+ case CRLF_AUTO:
+ return "text=auto";
+ case CRLF_AUTO_CRLF:
+ return "text=auto eol=crlf"; /* This is not supported yet */
+ case CRLF_AUTO_INPUT:
+ return "text=auto eol=lf"; /* This is not supported yet */
+ }
+ return "";
+}
+
int convert_to_git(const char *path, const char *src, size_t len,
struct strbuf *dst, enum safe_crlf checksafe)
{
src = dst->buf;
len = dst->len;
}
- ca.crlf_action = input_crlf_action(ca.crlf_action, ca.eol_attr);
ret |= crlf_to_git(path, src, len, dst, ca.crlf_action, checksafe);
if (ret && dst) {
src = dst->buf;
if (!apply_filter(path, NULL, 0, fd, dst, ca.drv->clean))
die("%s: clean filter '%s' failed", path, ca.drv->name);
- ca.crlf_action = input_crlf_action(ca.crlf_action, ca.eol_attr);
crlf_to_git(path, dst->buf, dst->len, dst, ca.crlf_action, checksafe);
ident_to_git(path, dst->buf, dst->len, dst, ca.ident);
}
* is a smudge filter. The filter might expect CRLFs.
*/
if (filter || !normalizing) {
- ca.crlf_action = input_crlf_action(ca.crlf_action, ca.eol_attr);
ret |= crlf_to_worktree(path, src, len, dst, ca.crlf_action);
if (ret) {
src = dst->buf;
if (ca.ident)
filter = ident_filter(sha1);
- crlf_action = input_crlf_action(ca.crlf_action, ca.eol_attr);
+ crlf_action = ca.crlf_action;
- if ((crlf_action == CRLF_BINARY) || (crlf_action == CRLF_INPUT) ||
- (crlf_action == CRLF_GUESS && auto_crlf == AUTO_CRLF_FALSE))
+ if ((crlf_action == CRLF_BINARY) ||
+ crlf_action == CRLF_AUTO_INPUT ||
+ (crlf_action == CRLF_TEXT_INPUT))
filter = cascade_filter(filter, &null_filter_singleton);
else if (output_eol(crlf_action) == EOL_CRLF &&
- !(crlf_action == CRLF_AUTO || crlf_action == CRLF_GUESS))
+ !(crlf_action == CRLF_AUTO || crlf_action == CRLF_AUTO_CRLF))
filter = cascade_filter(filter, lf_to_crlf_filter());
return filter;
};
extern enum eol core_eol;
+extern const char *get_cached_convert_stats_ascii(const char *path);
+extern const char *get_wt_convert_stats_ascii(const char *path);
+extern const char *get_convert_attr_ascii(const char *path);
/* returns 1 if *dst was used */
extern int convert_to_git(const char *path, const char *src, size_t len,
static struct strbuf item = STRBUF_INIT;
const char *p;
- strbuf_getline(&item, fh, '\n');
+ strbuf_getline_lf(&item, fh);
if (!skip_prefix(item.buf, "action=", &p))
return error("client sent bogus action line: %s", item.buf);
strbuf_addstr(action, p);
- strbuf_getline(&item, fh, '\n');
+ strbuf_getline_lf(&item, fh);
if (!skip_prefix(item.buf, "timeout=", &p))
return error("client sent bogus timeout line: %s", item.buf);
*timeout = atoi(p);
"users may be able to read your cached credentials. Consider running:\n"
"\n"
" chmod 0700 %s";
-static void check_socket_directory(const char *path)
+static void init_socket_directory(const char *path)
{
struct stat st;
char *path_copy = xstrdup(path);
if (!stat(dir, &st)) {
if (st.st_mode & 077)
die(permissions_advice, dir);
- free(path_copy);
- return;
+ } else {
+ /*
+ * We must be sure to create the directory with the correct mode,
+ * not just chmod it after the fact; otherwise, there is a race
+ * condition in which somebody can chdir to it, sleep, then try to open
+ * our protected socket.
+ */
+ if (safe_create_leading_directories_const(dir) < 0)
+ die_errno("unable to create directories for '%s'", dir);
+ if (mkdir(dir, 0700) < 0)
+ die_errno("unable to mkdir '%s'", dir);
}
- /*
- * We must be sure to create the directory with the correct mode,
- * not just chmod it after the fact; otherwise, there is a race
- * condition in which somebody can chdir to it, sleep, then try to open
- * our protected socket.
- */
- if (safe_create_leading_directories_const(dir) < 0)
- die_errno("unable to create directories for '%s'", dir);
- if (mkdir(dir, 0700) < 0)
- die_errno("unable to mkdir '%s'", dir);
+ if (chdir(dir))
+ /*
+ * We don't actually care what our cwd is; we chdir here just to
+ * be a friendly daemon and avoid tying up our original cwd.
+ * If this fails, it's OK to just continue without that benefit.
+ */
+ ;
+
free(path_copy);
}
if (!socket_path)
usage_with_options(usage, options);
- check_socket_directory(socket_path);
+ if (!is_absolute_path(socket_path))
+ die("socket directory must be an absolute path");
+
+ init_socket_directory(socket_path);
register_tempfile(&socket_file, socket_path);
if (ignore_sighup)
return found_credential;
}
- while (strbuf_getline(&line, fh, '\n') != EOF) {
+ while (strbuf_getline_lf(&line, fh) != EOF) {
credential_from_url(&entry, line.buf);
if (entry.username && entry.password &&
credential_match(c, &entry)) {
{
struct strbuf line = STRBUF_INIT;
- while (strbuf_getline(&line, fp, '\n') != EOF) {
+ while (strbuf_getline_lf(&line, fp) != EOF) {
char *key = line.buf;
char *value = strchr(key, '=');
return;
}
- while (strbuf_getline(&line, fp, '\n') != EOF) {
+ while (strbuf_getline_lf(&line, fp) != EOF) {
logerror("%s", line.buf);
strbuf_setlen(&line, 0);
}
cradle = &blanket->next;
}
-static char **cld_argv;
+static struct argv_array cld_argv = ARGV_ARRAY_INIT;
static void handle(int incoming, struct sockaddr *addr, socklen_t addrlen)
{
struct child_process cld = CHILD_PROCESS_INIT;
#endif
}
- cld.argv = (const char **)cld_argv;
+ cld.argv = cld_argv.argv;
cld.in = incoming;
cld.out = dup(incoming);
write_file(pid_file, "%"PRIuMAX, (uintmax_t) getpid());
/* prepare argv for serving-processes */
- cld_argv = xmalloc(sizeof (char *) * (argc + 2));
- cld_argv[0] = argv[0]; /* git-daemon */
- cld_argv[1] = "--serve";
+ argv_array_push(&cld_argv, argv[0]); /* git-daemon */
+ argv_array_push(&cld_argv, "--serve");
for (i = 1; i < argc; ++i)
- cld_argv[i+1] = argv[i];
- cld_argv[argc+1] = NULL;
+ argv_array_push(&cld_argv, argv[i]);
return serve(&listen_addr, listen_port, cred);
}
struct diff_filespec *alloc_filespec(const char *path)
{
- int namelen = strlen(path);
- struct diff_filespec *spec = xmalloc(sizeof(*spec) + namelen + 1);
+ struct diff_filespec *spec;
- memset(spec, 0, sizeof(*spec));
- spec->path = (char *)(spec + 1);
- memcpy(spec->path, path, namelen+1);
+ FLEXPTR_ALLOC_STR(spec, path, path);
spec->count = 1;
spec->is_binary = -1;
return spec;
static int diff_populate_gitlink(struct diff_filespec *s, int size_only)
{
- int len;
- char *data = xmalloc(100), *dirty = "";
+ struct strbuf buf = STRBUF_INIT;
+ char *dirty = "";
/* Are we looking at the work tree? */
if (s->dirty_submodule)
dirty = "-dirty";
- len = snprintf(data, 100,
- "Subproject commit %s%s\n", sha1_to_hex(s->sha1), dirty);
- s->data = data;
- s->size = len;
- s->should_free = 1;
+ strbuf_addf(&buf, "Subproject commit %s%s\n", sha1_to_hex(s->sha1), dirty);
+ s->size = buf.len;
if (size_only) {
s->data = NULL;
- free(data);
+ strbuf_release(&buf);
+ } else {
+ s->data = strbuf_detach(&buf, NULL);
+ s->should_free = 1;
}
return 0;
}
{
size_t size;
- if (!driver || !driver->textconv) {
+ if (!driver) {
if (!DIFF_FILE_VALID(df)) {
*outbuf = "";
return 0;
return df->size;
}
+ if (!driver->textconv)
+ die("BUG: fill_textconv called with non-textconv driver");
+
if (driver->textconv_cache && df->sha1_valid) {
*outbuf = notes_cache_get(driver->textconv_cache, df->sha1,
&size);
} parent[FLEX_ARRAY];
};
#define combine_diff_path_size(n, l) \
- (sizeof(struct combine_diff_path) + \
- sizeof(struct combine_diff_parent) * (n) + (l) + 1)
+ st_add4(sizeof(struct combine_diff_path), (l), 1, \
+ st_mult(sizeof(struct combine_diff_parent), (n)))
extern void show_combined_diff(struct combine_diff_path *elem, int num_parent,
int dense, struct rev_info *);
extern int index_differs_from(const char *def, int diff_flags);
+/*
+ * Fill the contents of the filespec "df", respecting any textconv defined by
+ * its userdiff driver. The "driver" parameter must come from a
+ * previous call to get_textconv(), and therefore should either be NULL or have
+ * textconv enabled.
+ *
+ * Note that the memory ownership of the resulting buffer depends on whether
+ * the driver field is NULL. If it is, then the memory belongs to the filespec
+ * struct. If it is non-NULL, then "outbuf" points to a newly allocated buffer
+ * that should be freed by the caller.
+ */
extern size_t fill_textconv(struct userdiff_driver *driver,
struct diff_filespec *df,
char **outbuf);
+/*
+ * Look up the userdiff driver for the given filespec, and return it if
+ * and only if it has textconv enabled (otherwise return NULL). The result
+ * can be passed to fill_textconv().
+ */
extern struct userdiff_driver *get_textconv(struct diff_filespec *one);
extern int parse_rename_score(const char **cp_p);
int osz = 1 << orig->alloc_log2;
int sz = osz << 1;
- new = xmalloc(sizeof(*orig) + sizeof(struct spanhash) * sz);
+ new = xmalloc(st_add(sizeof(*orig),
+ st_mult(sizeof(struct spanhash), sz)));
new->alloc_log2 = orig->alloc_log2 + 1;
new->free = INITIAL_FREE(new->alloc_log2);
memset(new->data, 0, sizeof(struct spanhash) * sz);
int is_text = !diff_filespec_is_binary(one);
i = INITIAL_HASH_SIZE;
- hash = xmalloc(sizeof(*hash) + sizeof(struct spanhash) * (1<<i));
+ hash = xmalloc(st_add(sizeof(*hash),
+ st_mult(sizeof(struct spanhash), 1<<i)));
hash->alloc_log2 = i;
hash->free = INITIAL_FREE(i);
memset(hash->data, 0, sizeof(struct spanhash) * (1<<i));
}
if (pass == 0) {
order_cnt = cnt;
- order = xmalloc(sizeof(*order) * cnt);
+ ALLOC_ARRAY(order, cnt);
}
}
}
if (!q->nr)
return;
- o = xmalloc(sizeof(*o) * q->nr);
+ ALLOC_ARRAY(o, q->nr);
for (i = 0; i < q->nr; i++)
o[i].obj = q->queue[i];
order_objects(orderfile, pair_pathtwo, o, q->nr);
rename_dst_nr * rename_src_nr, 50, 1);
}
- mx = xcalloc(num_create * NUM_CANDIDATE_PER_DST, sizeof(*mx));
+ mx = xcalloc(st_mult(num_create, NUM_CANDIDATE_PER_DST), sizeof(*mx));
for (dst_cnt = i = 0; i < rename_dst_nr; i++) {
struct diff_filespec *two = rename_dst[i].two;
struct diff_score *m;
int check_only, const struct path_simplify *simplify);
static int get_dtype(struct dirent *de, const char *path, int len);
+static struct trace_key trace_exclude = TRACE_KEY_INIT(EXCLUDE);
+
/* helper string functions with support for the ignore_case flag */
int strcmp_icase(const char *a, const char *b)
{
parse_exclude_pattern(&string, &patternlen, &flags, &nowildcardlen);
if (flags & EXC_FLAG_MUSTBEDIR) {
- char *s;
- x = xmalloc(sizeof(*x) + patternlen + 1);
- s = (char *)(x+1);
- memcpy(s, string, patternlen);
- s[patternlen] = '\0';
- x->pattern = s;
+ FLEXPTR_ALLOC_MEM(x, pattern, string, patternlen);
} else {
x = xmalloc(sizeof(*x));
x->pattern = string;
x->baselen = baselen;
x->flags = flags;
x->srcpos = srcpos;
+ string_list_init(&x->sticky_paths, 1);
ALLOC_GROW(el->excludes, el->nr + 1, el->alloc);
el->excludes[el->nr++] = x;
x->el = el;
{
int i;
- for (i = 0; i < el->nr; i++)
+ for (i = 0; i < el->nr; i++) {
+ string_list_clear(&el->excludes[i]->sticky_paths, 0);
free(el->excludes[i]);
+ }
free(el->excludes);
free(el->filebuf);
}
uc->dir_created++;
- d = xmalloc(sizeof(*d) + len + 1);
- memset(d, 0, sizeof(*d));
- memcpy(d->name, name, len);
- d->name[len] = '\0';
+ FLEX_ALLOC_MEM(d, name, name, len);
ALLOC_GROW(dir->dirs, dir->dirs_nr + 1, dir->dirs_alloc);
memmove(dir->dirs + first + 1, dir->dirs + first,
return 0;
}
if (buf[size-1] != '\n') {
- buf = xrealloc(buf, size+1);
+ buf = xrealloc(buf, st_add(size, 1));
buf[size++] = '\n';
}
} else {
close(fd);
return 0;
}
- buf = xmalloc(size+1);
+ buf = xmallocz(size);
if (read_in_full(fd, buf, size) != size) {
free(buf);
close(fd);
* then our prefix match is all we need; we
* do not need to call fnmatch at all.
*/
- if (!patternlen && !namelen)
+ if (!patternlen && (!namelen || *name == '/'))
return 1;
}
WM_PATHNAME) == 0;
}
+static void add_sticky(struct exclude *exc, const char *pathname, int pathlen)
+{
+ struct strbuf sb = STRBUF_INIT;
+ int i;
+
+ for (i = exc->sticky_paths.nr - 1; i >= 0; i--) {
+ const char *sticky = exc->sticky_paths.items[i].string;
+ int len = strlen(sticky);
+
+ if (pathlen < len && sticky[pathlen] == '/' &&
+ !strncmp(pathname, sticky, pathlen))
+ return;
+ }
+
+ strbuf_add(&sb, pathname, pathlen);
+ string_list_append_nodup(&exc->sticky_paths, strbuf_detach(&sb, NULL));
+}
+
+static int match_sticky(struct exclude *exc, const char *pathname, int pathlen, int dtype)
+{
+ int i;
+
+ for (i = exc->sticky_paths.nr - 1; i >= 0; i--) {
+ const char *sticky = exc->sticky_paths.items[i].string;
+ int len = strlen(sticky);
+
+ if (pathlen == len && dtype == DT_DIR &&
+ !strncmp(pathname, sticky, len))
+ return 1;
+
+ if (pathlen > len && pathname[len] == '/' &&
+ !strncmp(pathname, sticky, len))
+ return 1;
+ }
+
+ return 0;
+}
+
+static inline int different_decisions(const struct exclude *a,
+ const struct exclude *b)
+{
+ return (a->flags & EXC_FLAG_NEGATIVE) != (b->flags & EXC_FLAG_NEGATIVE);
+}
+
+/*
+ * Return non-zero if pathname is a directory and an ancestor of the
+ * literal path in a pattern.
+ */
+static int match_directory_part(const char *pathname, int pathlen,
+ int *dtype, struct exclude *x)
+{
+ const char *base = x->base;
+ int baselen = x->baselen ? x->baselen - 1 : 0;
+ const char *pattern = x->pattern;
+ int prefix = x->nowildcardlen;
+ int patternlen = x->patternlen;
+
+ if (*dtype == DT_UNKNOWN)
+ *dtype = get_dtype(NULL, pathname, pathlen);
+ if (*dtype != DT_DIR)
+ return 0;
+
+ if (*pattern == '/') {
+ pattern++;
+ patternlen--;
+ prefix--;
+ }
+
+ if (baselen) {
+ if (((pathlen < baselen && base[pathlen] == '/') ||
+ pathlen == baselen) &&
+ !strncmp_icase(pathname, base, pathlen))
+ return 1;
+ pathname += baselen + 1;
+ pathlen -= baselen + 1;
+ }
+
+
+ if (prefix &&
+ (((pathlen < prefix && pattern[pathlen] == '/') ||
+ pathlen == prefix) &&
+ !strncmp_icase(pathname, pattern, pathlen)))
+ return 1;
+
+ return 0;
+}
+
+static struct exclude *should_descend(const char *pathname, int pathlen,
+ int *dtype, struct exclude_list *el,
+ struct exclude *exc)
+{
+ int i;
+
+ for (i = el->nr - 1; 0 <= i; i--) {
+ struct exclude *x = el->excludes[i];
+
+ if (x == exc)
+ break;
+
+ if (!(x->flags & EXC_FLAG_NODIR) &&
+ different_decisions(x, exc) &&
+ match_directory_part(pathname, pathlen, dtype, x))
+ return x;
+ }
+ return NULL;
+}
+
/*
* Scan the given exclude list in reverse to see whether pathname
* should be ignored. The first match (i.e. the last on the list), if
struct exclude_list *el)
{
struct exclude *exc = NULL; /* undecided */
- int i;
+ int i, maybe_descend = 0;
if (!el->nr)
return NULL; /* undefined */
+ trace_printf_key(&trace_exclude, "exclude: from %s\n", el->src);
+
for (i = el->nr - 1; 0 <= i; i--) {
struct exclude *x = el->excludes[i];
const char *exclude = x->pattern;
int prefix = x->nowildcardlen;
+ if (!maybe_descend && i < el->nr - 1 &&
+ different_decisions(x, el->excludes[i+1]))
+ maybe_descend = 1;
+
+ if (x->sticky_paths.nr) {
+ if (*dtype == DT_UNKNOWN)
+ *dtype = get_dtype(NULL, pathname, pathlen);
+ if (match_sticky(x, pathname, pathlen, *dtype)) {
+ exc = x;
+ break;
+ }
+ continue;
+ }
+
if (x->flags & EXC_FLAG_MUSTBEDIR) {
if (*dtype == DT_UNKNOWN)
*dtype = get_dtype(NULL, pathname, pathlen);
break;
}
}
+
+ if (!exc) {
+ trace_printf_key(&trace_exclude, "exclude: %.*s => n/a\n",
+ pathlen, pathname);
+ return NULL;
+ }
+
+ /*
+ * We have found a matching pattern "exc" that may exclude whole
+ * directory. We also found that there may be a pattern that matches
+ * something inside the directory and reincludes stuff.
+ *
+ * Go through the patterns again, find that pattern and double check.
+ * If it's true, return "undecided" and keep descending in. "exc" is
+ * marked sticky so that it continues to match inside the directory.
+ */
+ if (!(exc->flags & EXC_FLAG_NEGATIVE) && maybe_descend) {
+ struct exclude *x;
+
+ if (*dtype == DT_UNKNOWN)
+ *dtype = get_dtype(NULL, pathname, pathlen);
+
+ if (*dtype == DT_DIR &&
+ (x = should_descend(pathname, pathlen, dtype, el, exc))) {
+ add_sticky(exc, pathname, pathlen);
+ trace_printf_key(&trace_exclude,
+ "exclude: %.*s vs %s at line %d => %s,"
+ " forced open by %s at line %d => n/a\n",
+ pathlen, pathname, exc->pattern, exc->srcpos,
+ exc->flags & EXC_FLAG_NEGATIVE ? "no" : "yes",
+ x->pattern, x->srcpos);
+ return NULL;
+ }
+ }
+
+ trace_printf_key(&trace_exclude, "exclude: %.*s vs %s at line %d => %s%s\n",
+ pathlen, pathname, exc->pattern, exc->srcpos,
+ exc->flags & EXC_FLAG_NEGATIVE ? "no" : "yes",
+ exc->sticky_paths.nr ? " (stuck)" : "");
return exc;
}
{
struct dir_entry *ent;
- ent = xmalloc(sizeof(*ent) + len + 1);
+ FLEX_ALLOC_MEM(ent, name, pathname, len);
ent->len = len;
- memcpy(ent->name, pathname, len);
- ent->name[len] = 0;
return ent;
}
struct cached_dir cdir;
enum path_treatment state, subdir_state, dir_state = path_none;
struct strbuf path = STRBUF_INIT;
+ static int level = 0;
strbuf_add(&path, base, baselen);
+ trace_printf_key(&trace_exclude, "exclude: [%d] enter '%.*s'\n",
+ level++, baselen, base);
+
if (open_cached_dir(&cdir, dir, untracked, &path, check_only))
goto out;
}
close_cached_dir(&cdir);
out:
+ trace_printf_key(&trace_exclude, "exclude: [%d] leave '%.*s'\n",
+ --level, baselen, base);
strbuf_release(&path);
return dir_state;
return sb.buf;
if (uname(&uts) < 0)
die_errno(_("failed to get kernel name and information"));
- strbuf_addf(&sb, "Location %s, system %s %s %s", get_git_work_tree(),
- uts.sysname, uts.release, uts.version);
+ strbuf_addf(&sb, "Location %s, system %s", get_git_work_tree(),
+ uts.sysname);
return sb.buf;
}
static int ident_in_untracked(const struct untracked_cache *uc)
{
- const char *end = uc->ident.buf + uc->ident.len;
- const char *p = uc->ident.buf;
+ /*
+ * Previous git versions may have saved many NUL separated
+ * strings in the "ident" field, but it is insane to manage
+ * many locations, so just take care of the first one.
+ */
- for (p = uc->ident.buf; p < end; p += strlen(p) + 1)
- if (!strcmp(p, get_ident_string()))
- return 1;
- return 0;
+ return !strcmp(uc->ident.buf, get_ident_string());
}
-void add_untracked_ident(struct untracked_cache *uc)
+static void set_untracked_ident(struct untracked_cache *uc)
{
- if (ident_in_untracked(uc))
- return;
+ strbuf_reset(&uc->ident);
strbuf_addstr(&uc->ident, get_ident_string());
- /* this strbuf contains a list of strings, save NUL too */
+
+ /*
+ * This strbuf used to contain a list of NUL separated
+ * strings, so save NUL too for backward compatibility.
+ */
strbuf_addch(&uc->ident, 0);
}
+static void new_untracked_cache(struct index_state *istate)
+{
+ struct untracked_cache *uc = xcalloc(1, sizeof(*uc));
+ strbuf_init(&uc->ident, 100);
+ uc->exclude_per_dir = ".gitignore";
+ /* should be the same flags used by git-status */
+ uc->dir_flags = DIR_SHOW_OTHER_DIRECTORIES | DIR_HIDE_EMPTY_DIRECTORIES;
+ set_untracked_ident(uc);
+ istate->untracked = uc;
+ istate->cache_changed |= UNTRACKED_CHANGED;
+}
+
+void add_untracked_cache(struct index_state *istate)
+{
+ if (!istate->untracked) {
+ new_untracked_cache(istate);
+ } else {
+ if (!ident_in_untracked(istate->untracked)) {
+ free_untracked_cache(istate->untracked);
+ new_untracked_cache(istate);
+ }
+ }
+}
+
+void remove_untracked_cache(struct index_state *istate)
+{
+ if (istate->untracked) {
+ free_untracked_cache(istate->untracked);
+ istate->untracked = NULL;
+ istate->cache_changed |= UNTRACKED_CHANGED;
+ }
+}
+
static struct untracked_cache_dir *validate_untracked_cache(struct dir_struct *dir,
int base_len,
const struct pathspec *pathspec)
return NULL;
if (!ident_in_untracked(dir->untracked)) {
- warning(_("Untracked cache is disabled on this system."));
+ warning(_("Untracked cache is disabled on this system or location."));
return NULL;
}
return root;
}
+static void clear_sticky(struct dir_struct *dir)
+{
+ struct exclude_list_group *g;
+ struct exclude_list *el;
+ struct exclude *x;
+ int i, j, k;
+
+ for (i = EXC_CMDL; i <= EXC_FILE; i++) {
+ g = &dir->exclude_list_group[i];
+ for (j = g->nr - 1; j >= 0; j--) {
+ el = &g->el[j];
+ for (k = el->nr - 1; 0 <= k; k--) {
+ x = el->excludes[k];
+ string_list_clear(&x->sticky_paths, 0);
+ }
+ }
+ }
+}
+
int read_directory(struct dir_struct *dir, const char *path, int len, const struct pathspec *pathspec)
{
struct path_simplify *simplify;
if (has_symlink_leading_path(path, len))
return dir->nr;
+ /*
+ * Stay on the safe side. if read_directory() has run once on
+ * "dir", some sticky flag may have been left. Clear them all.
+ */
+ clear_sticky(dir);
+
/*
* exclude patterns are treated like positive ones in
* create_simplify. Usually exclude patterns should be a
struct ondisk_untracked_cache *ouc;
struct write_data wd;
unsigned char varbuf[16];
- int len = 0, varint_len;
- if (untracked->exclude_per_dir)
- len = strlen(untracked->exclude_per_dir);
- ouc = xmalloc(sizeof(*ouc) + len + 1);
+ int varint_len;
+ size_t len = strlen(untracked->exclude_per_dir);
+
+ FLEX_ALLOC_MEM(ouc, exclude_per_dir, untracked->exclude_per_dir, len);
stat_data_to_disk(&ouc->info_exclude_stat, &untracked->ss_info_exclude.stat);
stat_data_to_disk(&ouc->excludes_file_stat, &untracked->ss_excludes_file.stat);
hashcpy(ouc->info_exclude_sha1, untracked->ss_info_exclude.sha1);
hashcpy(ouc->excludes_file_sha1, untracked->ss_excludes_file.sha1);
ouc->dir_flags = htonl(untracked->dir_flags);
- memcpy(ouc->exclude_per_dir, untracked->exclude_per_dir, len + 1);
varint_len = encode_varint(untracked->ident.len, varbuf);
strbuf_add(out, varbuf, varint_len);
ud.untracked_alloc = value;
ud.untracked_nr = value;
if (ud.untracked_nr)
- ud.untracked = xmalloc(sizeof(*ud.untracked) * ud.untracked_nr);
+ ALLOC_ARRAY(ud.untracked, ud.untracked_nr);
data = next;
next = data;
ud.dirs_alloc = ud.dirs_nr = decode_varint(&next);
if (next > end)
return -1;
- ud.dirs = xmalloc(sizeof(*ud.dirs) * ud.dirs_nr);
+ ALLOC_ARRAY(ud.dirs, ud.dirs_nr);
data = next;
len = strlen((const char *)data);
next = data + len + 1;
if (next > rd->end)
return -1;
- *untracked_ = untracked = xmalloc(sizeof(*untracked) + len);
+ *untracked_ = untracked = xmalloc(st_add(sizeof(*untracked), len));
memcpy(untracked, &ud, sizeof(ud));
memcpy(untracked->name, data, len + 1);
data = next;
rd.data = next;
rd.end = end;
rd.index = 0;
- rd.ucd = xmalloc(sizeof(*rd.ucd) * len);
+ ALLOC_ARRAY(rd.ucd, len);
if (read_one_dir(&uc->root, &rd) || rd.index != len)
goto done;
/* See Documentation/technical/api-directory-listing.txt */
#include "strbuf.h"
+#include "string-list.h"
struct dir_entry {
unsigned int len;
* and from -1 decrementing for patterns from CLI args.
*/
int srcpos;
+
+ struct string_list sticky_paths;
};
/*
void free_untracked_cache(struct untracked_cache *);
struct untracked_cache *read_untracked_extension(const void *data, unsigned long sz);
void write_untracked_extension(struct strbuf *out, struct untracked_cache *untracked);
-void add_untracked_ident(struct untracked_cache *);
+void add_untracked_cache(struct index_state *istate);
+void remove_untracked_cache(struct index_state *istate);
#endif
static void create_directories(const char *path, int path_len,
const struct checkout *state)
{
- char *buf = xmalloc(path_len + 1);
+ char *buf = xmallocz(path_len);
int len = 0;
while (len < path_len) {
/* Parallel index stat data preload? */
int core_preload_index = 1;
+/*
+ * This is a hack for test programs like test-dump-untracked-cache to
+ * ensure that they do not modify the untracked cache when reading it.
+ * Do not use it otherwise!
+ */
+int ignore_untracked_cache_config;
+
/* This is set by setup_git_dir_gently() and/or git_default_config() */
char *git_work_tree_cfg;
static char *work_tree;
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
-#include "git-compat-util.h"
+#include "cache.h"
#include "ewok.h"
#define EWAH_MASK(x) ((eword_t)1 << (x % BITS_IN_EWORD))
struct bitmap *bitmap_new(void)
{
- struct bitmap *bitmap = ewah_malloc(sizeof(struct bitmap));
- bitmap->words = ewah_calloc(32, sizeof(eword_t));
+ struct bitmap *bitmap = xmalloc(sizeof(struct bitmap));
+ bitmap->words = xcalloc(32, sizeof(eword_t));
bitmap->word_alloc = 32;
return bitmap;
}
if (block >= self->word_alloc) {
size_t old_size = self->word_alloc;
self->word_alloc = block * 2;
- self->words = ewah_realloc(self->words,
- self->word_alloc * sizeof(eword_t));
-
+ REALLOC_ARRAY(self->words, self->word_alloc);
memset(self->words + old_size, 0x0,
(self->word_alloc - old_size) * sizeof(eword_t));
}
ewah_iterator_init(&it, ewah);
while (ewah_iterator_next(&blowup, &it)) {
- if (i >= bitmap->word_alloc) {
- bitmap->word_alloc *= 1.5;
- bitmap->words = ewah_realloc(
- bitmap->words, bitmap->word_alloc * sizeof(eword_t));
- }
-
+ ALLOC_GROW(bitmap->words, i + 1, bitmap->word_alloc);
bitmap->words[i++] = blowup;
}
if (self->word_alloc < other_final) {
self->word_alloc = other_final;
- self->words = ewah_realloc(self->words,
- self->word_alloc * sizeof(eword_t));
+ REALLOC_ARRAY(self->words, self->word_alloc);
memset(self->words + original_size, 0x0,
(self->word_alloc - original_size) * sizeof(eword_t));
}
return;
self->alloc_size = new_size;
- self->buffer = ewah_realloc(self->buffer,
- self->alloc_size * sizeof(eword_t));
+ REALLOC_ARRAY(self->buffer, self->alloc_size);
self->rlw = self->buffer + (rlw_offset / sizeof(eword_t));
}
{
struct ewah_bitmap *self;
- self = ewah_malloc(sizeof(struct ewah_bitmap));
- if (self == NULL)
- return NULL;
-
- self->buffer = ewah_malloc(32 * sizeof(eword_t));
+ self = xmalloc(sizeof(struct ewah_bitmap));
self->alloc_size = 32;
+ ALLOC_ARRAY(self->buffer, self->alloc_size);
ewah_clear(self);
return self;
self->buffer_size = self->alloc_size = get_be32(ptr);
ptr += sizeof(uint32_t);
- self->buffer = ewah_realloc(self->buffer,
- self->alloc_size * sizeof(eword_t));
-
- if (!self->buffer)
- return -1;
+ REALLOC_ARRAY(self->buffer, self->alloc_size);
/*
* Copy the raw data for the bitmap as a whole chunk;
return -1;
self->buffer_size = self->alloc_size = (size_t)ntohl(word_count);
- self->buffer = ewah_realloc(self->buffer,
- self->alloc_size * sizeof(eword_t));
-
- if (!self->buffer)
- return -1;
+ REALLOC_ARRAY(self->buffer, self->alloc_size);
/** 64 bit x N -- compressed words */
buffer = self->buffer;
#ifndef __EWOK_BITMAP_H__
#define __EWOK_BITMAP_H__
-#ifndef ewah_malloc
-# define ewah_malloc xmalloc
-#endif
-#ifndef ewah_realloc
-# define ewah_realloc xrealloc
-#endif
-#ifndef ewah_calloc
-# define ewah_calloc xcalloc
-#endif
-
struct strbuf;
typedef uint64_t eword_t;
#define BITS_IN_EWORD (sizeof(eword_t) * 8)
#include "cache.h"
#include "exec_cmd.h"
#include "quote.h"
+#include "argv-array.h"
#define MAX_ARGS 32
static const char *argv_exec_path;
if (!argv0 || !*argv0)
return NULL;
- slash = argv0 + strlen(argv0);
- while (argv0 <= slash && !is_dir_sep(*slash))
- slash--;
+ slash = find_last_dir_sep(argv0);
- if (slash >= argv0) {
+ if (slash) {
argv0_path = xstrndup(argv0, slash - argv0);
return slash + 1;
}
strbuf_release(&new_path);
}
-const char **prepare_git_cmd(const char **argv)
+const char **prepare_git_cmd(struct argv_array *out, const char **argv)
{
- int argc;
- const char **nargv;
-
- for (argc = 0; argv[argc]; argc++)
- ; /* just counting */
- nargv = xmalloc(sizeof(*nargv) * (argc + 2));
-
- nargv[0] = "git";
- for (argc = 0; argv[argc]; argc++)
- nargv[argc + 1] = argv[argc];
- nargv[argc + 1] = NULL;
- return nargv;
+ argv_array_push(out, "git");
+ argv_array_pushv(out, argv);
+ return out->argv;
}
int execv_git_cmd(const char **argv) {
- const char **nargv = prepare_git_cmd(argv);
- trace_argv_printf(nargv, "trace: exec:");
+ struct argv_array nargv = ARGV_ARRAY_INIT;
+
+ prepare_git_cmd(&nargv, argv);
+ trace_argv_printf(nargv.argv, "trace: exec:");
/* execvp() can only ever return if it fails */
- sane_execvp("git", (char **)nargv);
+ sane_execvp("git", (char **)nargv.argv);
trace_printf("trace: exec failed: %s\n", strerror(errno));
- free(nargv);
+ argv_array_clear(&nargv);
return -1;
}
#ifndef GIT_EXEC_CMD_H
#define GIT_EXEC_CMD_H
+struct argv_array;
+
extern void git_set_argv_exec_path(const char *exec_path);
extern const char *git_extract_argv0_path(const char *path);
extern const char *git_exec_path(void);
extern void setup_path(void);
-extern const char **prepare_git_cmd(const char **argv);
+extern const char **prepare_git_cmd(struct argv_array *out, const char **argv);
extern int execv_git_cmd(const char **argv); /* NULL terminated */
LAST_ARG_MUST_BE_NULL
extern int execl_git_cmd(const char *cmd, ...);
return xmalloc(len);
}
total_allocd += sizeof(struct mem_pool) + mem_pool_alloc;
- p = xmalloc(sizeof(struct mem_pool) + mem_pool_alloc);
+ p = xmalloc(st_add(sizeof(struct mem_pool), mem_pool_alloc));
p->next_pool = mem_pool;
p->next_free = (char *) p->space;
p->end = p->next_free + mem_pool_alloc;
if (!avail_tree_entry) {
unsigned int n = tree_entry_alloc;
total_allocd += n * sizeof(struct tree_entry);
- avail_tree_entry = e = xmalloc(n * sizeof(struct tree_entry));
+ ALLOC_ARRAY(e, n);
+ avail_tree_entry = e;
while (n-- > 1) {
*((void**)e) = e + 1;
e++;
{
static char tmp_file[PATH_MAX];
struct packed_git *p;
- int namelen;
struct pack_header hdr;
int pack_fd;
pack_fd = odb_mkstemp(tmp_file, sizeof(tmp_file),
"pack/tmp_pack_XXXXXX");
- namelen = strlen(tmp_file) + 2;
- p = xcalloc(1, sizeof(*p) + namelen);
- xsnprintf(p->pack_name, namelen, "%s", tmp_file);
+ FLEX_ALLOC_STR(p, pack_name, tmp_file);
p->pack_fd = pack_fd;
p->do_not_close = 1;
pack_file = sha1fd(pack_fd, p->pack_name);
struct object_entry_pool *o;
/* Build the table of object IDs. */
- idx = xmalloc(object_count * sizeof(*idx));
+ ALLOC_ARRAY(idx, object_count);
c = idx;
for (o = blocks; o; o = o->next_pool)
for (e = o->next_free; e-- != o->entries;)
struct recent_command *rc;
strbuf_detach(&command_buf, NULL);
- stdin_eof = strbuf_getline(&command_buf, stdin, '\n');
+ stdin_eof = strbuf_getline_lf(&command_buf, stdin);
if (stdin_eof)
return EOF;
strbuf_detach(&command_buf, NULL);
for (;;) {
- if (strbuf_getline(&command_buf, stdin, '\n') == EOF)
+ if (strbuf_getline_lf(&command_buf, stdin) == EOF)
die("EOF in data (terminator '%s' not found)", term);
if (term_len == command_buf.len
&& !strcmp(term, command_buf.buf))
#include "version.h"
#include "prio-queue.h"
#include "sha1-array.h"
+#include "sigchain.h"
static int transfer_unpack_limit = -1;
static int fetch_unpack_limit = -1;
static int sideband_demux(int in, int out, void *data)
{
int *xd = data;
+ int ret;
- int ret = recv_sideband("fetch-pack", xd[0], out);
+ sigchain_push(SIGPIPE, SIG_IGN);
+ ret = recv_sideband("fetch-pack", xd[0], out);
close(out);
+ sigchain_pop(SIGPIPE);
return ret;
}
if (!options->msg_type) {
int i;
- int *msg_type = xmalloc(sizeof(int) * FSCK_MSG_MAX);
+ int *msg_type;
+ ALLOC_ARRAY(msg_type, FSCK_MSG_MAX);
for (i = 0; i < FSCK_MSG_MAX; i++)
msg_type[i] = fsck_msg_type(i, options);
options->msg_type = msg_type;
#define unsigned_add_overflows(a, b) \
((b) > maximum_unsigned_value_of_type(a) - (a))
+/*
+ * Returns true if the multiplication of "a" and "b" will
+ * overflow. The types of "a" and "b" must match and must be unsigned.
+ * Note that this macro evaluates "a" twice!
+ */
+#define unsigned_mult_overflows(a, b) \
+ ((a) && (b) > maximum_unsigned_value_of_type(a) / (a))
+
#ifdef __GNUC__
#define TYPEOF(x) (__typeof__(x))
#else
#define _PATH_DEFPATH "/usr/local/bin:/usr/bin:/bin"
#endif
-#ifndef STRIP_EXTENSION
-#define STRIP_EXTENSION ""
-#endif
-
#ifndef has_dos_drive_prefix
static inline int git_has_dos_drive_prefix(const char *path)
{
#ifdef __GLIBC_PREREQ
#if __GLIBC_PREREQ(2, 1)
#define HAVE_STRCHRNUL
-#define HAVE_MEMPCPY
#endif
#endif
}
#endif
-#ifndef HAVE_MEMPCPY
-#define mempcpy gitmempcpy
-static inline void *gitmempcpy(void *dest, const void *src, size_t n)
-{
- return (char *)memcpy(dest, src, n) + n;
-}
-#endif
-
#ifdef NO_INET_PTON
int inet_pton(int af, const char *src, void *dst);
#endif
typedef void (*try_to_free_t)(size_t);
extern try_to_free_t set_try_to_free_routine(try_to_free_t);
+static inline size_t st_add(size_t a, size_t b)
+{
+ if (unsigned_add_overflows(a, b))
+ die("size_t overflow: %"PRIuMAX" + %"PRIuMAX,
+ (uintmax_t)a, (uintmax_t)b);
+ return a + b;
+}
+#define st_add3(a,b,c) st_add((a),st_add((b),(c)))
+#define st_add4(a,b,c,d) st_add((a),st_add3((b),(c),(d)))
+
+static inline size_t st_mult(size_t a, size_t b)
+{
+ if (unsigned_mult_overflows(a, b))
+ die("size_t overflow: %"PRIuMAX" * %"PRIuMAX,
+ (uintmax_t)a, (uintmax_t)b);
+ return a * b;
+}
+
+static inline size_t st_sub(size_t a, size_t b)
+{
+ if (a < b)
+ die("size_t underflow: %"PRIuMAX" - %"PRIuMAX,
+ (uintmax_t)a, (uintmax_t)b);
+ return a - b;
+}
+
#ifdef HAVE_ALLOCA_H
# include <alloca.h>
# define xalloca(size) (alloca(size))
extern char *xgetcwd(void);
extern FILE *fopen_for_writing(const char *path);
-#define REALLOC_ARRAY(x, alloc) (x) = xrealloc((x), (alloc) * sizeof(*(x)))
+#define ALLOC_ARRAY(x, alloc) (x) = xmalloc(st_mult(sizeof(*(x)), (alloc)))
+#define REALLOC_ARRAY(x, alloc) (x) = xrealloc((x), st_mult(sizeof(*(x)), (alloc)))
+
+/*
+ * These functions help you allocate structs with flex arrays, and copy
+ * the data directly into the array. For example, if you had:
+ *
+ * struct foo {
+ * int bar;
+ * char name[FLEX_ARRAY];
+ * };
+ *
+ * you can do:
+ *
+ * struct foo *f;
+ * FLEX_ALLOC_MEM(f, name, src, len);
+ *
+ * to allocate a "foo" with the contents of "src" in the "name" field.
+ * The resulting struct is automatically zero'd, and the flex-array field
+ * is NUL-terminated (whether the incoming src buffer was or not).
+ *
+ * The FLEXPTR_* variants operate on structs that don't use flex-arrays,
+ * but do want to store a pointer to some extra data in the same allocated
+ * block. For example, if you have:
+ *
+ * struct foo {
+ * char *name;
+ * int bar;
+ * };
+ *
+ * you can do:
+ *
+ * struct foo *f;
+ * FLEX_ALLOC_STR(f, name, src);
+ *
+ * and "name" will point to a block of memory after the struct, which will be
+ * freed along with the struct (but the pointer can be repointed anywhere).
+ *
+ * The *_STR variants accept a string parameter rather than a ptr/len
+ * combination.
+ *
+ * Note that these macros will evaluate the first parameter multiple
+ * times, and it must be assignable as an lvalue.
+ */
+#define FLEX_ALLOC_MEM(x, flexname, buf, len) do { \
+ (x) = NULL; /* silence -Wuninitialized for offset calculation */ \
+ (x) = xalloc_flex(sizeof(*(x)), (char *)(&((x)->flexname)) - (char *)(x), (buf), (len)); \
+} while (0)
+#define FLEXPTR_ALLOC_MEM(x, ptrname, buf, len) do { \
+ (x) = xalloc_flex(sizeof(*(x)), sizeof(*(x)), (buf), (len)); \
+ (x)->ptrname = (void *)((x)+1); \
+} while(0)
+#define FLEX_ALLOC_STR(x, flexname, str) \
+ FLEX_ALLOC_MEM((x), flexname, (str), strlen(str))
+#define FLEXPTR_ALLOC_STR(x, ptrname, str) \
+ FLEXPTR_ALLOC_MEM((x), ptrname, (str), strlen(str))
+
+static inline void *xalloc_flex(size_t base_len, size_t offset,
+ const void *src, size_t src_len)
+{
+ unsigned char *ret = xcalloc(1, st_add3(base_len, src_len, 1));
+ memcpy(ret + offset, src, src_len);
+ return ret;
+}
static inline char *xstrdup_or_null(const char *str)
{
if [ $# -eq 0 ]
then
cat <<!
-Usage: `basename $0` git-gui-glossary.txt > git-gui-glossary.pot
+Usage: $(basename $0) git-gui-glossary.txt > git-gui-glossary.pot
!
exit 1;
fi
msgid ""
msgstr ""
"Project-Id-Version: PACKAGE VERSION\n"
-"POT-Creation-Date: `date +'%Y-%m-%d %H:%M%z'`\n"
+"POT-Creation-Date: $(date +'%Y-%m-%d %H:%M%z')\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n"
case "$1" in
'')
echo "Added $4 in both, but differently."
- orig=$(git-unpack-file $2)
- create_virtual_base "$orig" "$src2"
+ orig=$(git-unpack-file e69de29bb2d1d6434b8b29ae775ad8c2e48c5391)
;;
*)
echo "Auto-merging $4"
def p4_delete(f):
p4_system(["delete", wildcard_encode(f)])
-def p4_edit(f):
- p4_system(["edit", wildcard_encode(f)])
+def p4_edit(f, *options):
+ p4_system(["edit"] + list(options) + [wildcard_encode(f)])
def p4_revert(f):
p4_system(["revert", wildcard_encode(f)])
diff = read_pipe_lines("git diff-tree -r %s \"%s^\" \"%s\"" % (self.diffOpts, id, id))
filesToAdd = set()
+ filesToChangeType = set()
filesToDelete = set()
editedFiles = set()
pureRenameCopy = set()
os.unlink(dest)
filesToDelete.add(src)
editedFiles.add(dest)
+ elif modifier == "T":
+ filesToChangeType.add(path)
else:
die("unknown modifier %s for %s" % (modifier, path))
#
system(applyPatchCmd)
+ for f in filesToChangeType:
+ p4_edit(f, "-t", "auto")
for f in filesToAdd:
p4_add(f)
for f in filesToDelete:
done
}
+is_tip_reachable () (
+ clear_local_git_env
+ cd "$1" &&
+ rev=$(git rev-list -n 1 "$2" --not --all 2>/dev/null) &&
+ test -z "$rev"
+)
+
+fetch_in_submodule () (
+ clear_local_git_env
+ cd "$1" &&
+ case "$2" in
+ '')
+ git fetch ;;
+ *)
+ git fetch $(get_default_remote) "$2" ;;
+ esac
+)
+
#
# Update each submodule path to correct revision, using clone and checkout as needed
#
then
# Run fetch only if $sha1 isn't present or it
# is not reachable from a ref.
- (clear_local_git_env; cd "$sm_path" &&
- ( (rev=$(git rev-list -n 1 $sha1 --not --all 2>/dev/null) &&
- test -z "$rev") || git-fetch)) ||
+ is_tip_reachable "$sm_path" "$sha1" ||
+ fetch_in_submodule "$sm_path" ||
die "$(eval_gettext "Unable to fetch in submodule path '\$displaypath'")"
+
+ # Now we tried the usual fetch, but $sha1 may
+ # not be reachable from any of the refs
+ is_tip_reachable "$sm_path" "$sha1" ||
+ fetch_in_submodule "$sm_path" "$sha1" ||
+ die "$(eval_gettext "Fetched in submodule path '\$displaypath', but it did not contain $sha1. Direct fetching of that commit failed.")"
fi
# Is this something we just cloned?
GIT_PREFIX_ENVIRONMENT
};
static char *orig_env[4];
-static int saved_environment;
+static int save_restore_env_balance;
-static void save_env(void)
+static void save_env_before_alias(void)
{
int i;
- if (saved_environment)
- return;
- saved_environment = 1;
+
+ assert(save_restore_env_balance == 0);
+ save_restore_env_balance = 1;
orig_cwd = xgetcwd();
for (i = 0; i < ARRAY_SIZE(env_names); i++) {
orig_env[i] = getenv(env_names[i]);
}
}
-static void restore_env(void)
+static void restore_env(int external_alias)
{
int i;
- if (orig_cwd && chdir(orig_cwd))
+
+ assert(save_restore_env_balance == 1);
+ save_restore_env_balance = 0;
+ if (!external_alias && orig_cwd && chdir(orig_cwd))
die_errno("could not move to %s", orig_cwd);
free(orig_cwd);
for (i = 0; i < ARRAY_SIZE(env_names); i++) {
- if (orig_env[i])
+ if (external_alias &&
+ !strcmp(env_names[i], GIT_PREFIX_ENVIRONMENT))
+ continue;
+ if (orig_env[i]) {
setenv(env_names[i], orig_env[i], 1);
- else
+ free(orig_env[i]);
+ } else {
unsetenv(env_names[i]);
+ }
}
}
static int handle_alias(int *argcp, const char ***argv)
{
int envchanged = 0, ret = 0, saved_errno = errno;
- const char *subdir;
int count, option_count;
const char **new_argv;
const char *alias_command;
char *alias_string;
int unused_nongit;
- subdir = setup_git_directory_gently(&unused_nongit);
+ save_env_before_alias();
+ setup_git_directory_gently(&unused_nongit);
alias_command = (*argv)[0];
alias_string = alias_lookup(alias_command);
if (alias_string) {
if (alias_string[0] == '!') {
- const char **alias_argv;
- int argc = *argcp, i;
+ struct child_process child = CHILD_PROCESS_INIT;
commit_pager_choice();
+ restore_env(1);
- /* build alias_argv */
- alias_argv = xmalloc(sizeof(*alias_argv) * (argc + 1));
- alias_argv[0] = alias_string + 1;
- for (i = 1; i < argc; ++i)
- alias_argv[i] = (*argv)[i];
- alias_argv[argc] = NULL;
+ child.use_shell = 1;
+ argv_array_push(&child.args, alias_string + 1);
+ argv_array_pushv(&child.args, (*argv) + 1);
- ret = run_command_v_opt(alias_argv, RUN_USING_SHELL);
+ ret = run_command(&child);
if (ret >= 0) /* normal exit */
exit(ret);
ret = 1;
}
- if (subdir && chdir(subdir))
- die_errno("Cannot change to '%s'", subdir);
+ restore_env(0);
errno = saved_errno;
* RUN_SETUP for reading from the configuration file.
*/
#define NEED_WORK_TREE (1<<3)
-#define NO_SETUP (1<<4)
struct cmd_struct {
const char *cmd;
{ "cherry", cmd_cherry, RUN_SETUP },
{ "cherry-pick", cmd_cherry_pick, RUN_SETUP | NEED_WORK_TREE },
{ "clean", cmd_clean, RUN_SETUP | NEED_WORK_TREE },
- { "clone", cmd_clone, NO_SETUP },
+ { "clone", cmd_clone },
{ "column", cmd_column, RUN_SETUP_GENTLY },
{ "commit", cmd_commit, RUN_SETUP | NEED_WORK_TREE },
{ "commit-tree", cmd_commit_tree, RUN_SETUP },
{ "hash-object", cmd_hash_object },
{ "help", cmd_help },
{ "index-pack", cmd_index_pack, RUN_SETUP_GENTLY },
- { "init", cmd_init_db, NO_SETUP },
- { "init-db", cmd_init_db, NO_SETUP },
+ { "init", cmd_init_db },
+ { "init-db", cmd_init_db },
{ "interpret-trailers", cmd_interpret_trailers, RUN_SETUP_GENTLY },
{ "log", cmd_log, RUN_SETUP },
{ "ls-files", cmd_ls_files, RUN_SETUP },
return !!get_builtin(s);
}
+#ifdef STRIP_EXTENSION
+static void strip_extension(const char **argv)
+{
+ size_t len;
+
+ if (strip_suffix(argv[0], STRIP_EXTENSION, &len))
+ argv[0] = xmemdupz(argv[0], len);
+}
+#else
+#define strip_extension(cmd)
+#endif
+
static void handle_builtin(int argc, const char **argv)
{
- const char *cmd = argv[0];
- int i;
- static const char ext[] = STRIP_EXTENSION;
+ const char *cmd;
struct cmd_struct *builtin;
- if (sizeof(ext) > 1) {
- i = strlen(argv[0]) - strlen(ext);
- if (i > 0 && !strcmp(argv[0] + i, ext)) {
- char *argv0 = xstrdup(argv[0]);
- argv[0] = cmd = argv0;
- argv0[i] = '\0';
- }
- }
+ strip_extension(argv);
+ cmd = argv[0];
/* Turn "git cmd --help" into "git help cmd" */
if (argc > 1 && !strcmp(argv[1], "--help")) {
}
builtin = get_builtin(cmd);
- if (builtin) {
- if (saved_environment && (builtin->option & NO_SETUP))
- restore_env();
- else
- exit(run_builtin(builtin, argc, argv));
- }
+ if (builtin)
+ exit(run_builtin(builtin, argc, argv));
}
static void execv_dashed_external(const char **argv)
int done_alias = 0;
while (1) {
- /* See if it's a builtin */
- handle_builtin(*argcp, *argv);
+ /*
+ * If we tried alias and futzed with our environment,
+ * it no longer is safe to invoke builtins directly in
+ * general. We have to spawn them as dashed externals.
+ *
+ * NEEDSWORK: if we can figure out cases
+ * where it is safe to do, we can avoid spawning a new
+ * process.
+ */
+ if (!done_alias)
+ handle_builtin(*argcp, *argv);
/* .. then try the external ones */
execv_dashed_external(*argv);
*/
if (done_alias)
break;
- save_env();
if (!handle_alias(argcp, argv))
break;
done_alias = 1;
* We'll automatically grow columns later if we need more room.
*/
graph->column_capacity = 30;
- graph->columns = xmalloc(sizeof(struct column) *
- graph->column_capacity);
- graph->new_columns = xmalloc(sizeof(struct column) *
- graph->column_capacity);
- graph->mapping = xmalloc(sizeof(int) * 2 * graph->column_capacity);
- graph->new_mapping = xmalloc(sizeof(int) * 2 * graph->column_capacity);
+ ALLOC_ARRAY(graph->columns, graph->column_capacity);
+ ALLOC_ARRAY(graph->new_columns, graph->column_capacity);
+ ALLOC_ARRAY(graph->mapping, 2 * graph->column_capacity);
+ ALLOC_ARRAY(graph->new_mapping, 2 * graph->column_capacity);
/*
* The diff output prefix callback, with this we can make
i = open(filename, O_RDONLY);
if (i < 0)
goto err_ret;
- data = xmalloc(size + 1);
+ data = xmallocz(size);
if (st.st_size != read_in_full(i, data, size)) {
error(_("'%s': short read %s"), filename, strerror(errno));
close(i);
return -1;
}
close(i);
- data[size] = 0;
gs->buf = data;
gs->size = size;
e = hashmap_get(&map, &key, data);
if (!e) {
/* not found: create it */
- e = xmallocz(sizeof(struct pool_entry) + len);
+ FLEX_ALLOC_MEM(e, data, data, len);
hashmap_entry_init(e, key.ent.hash);
e->len = len;
- memcpy(e->data, data, len);
hashmap_add(&map, e);
}
return e->data;
void add_cmdname(struct cmdnames *cmds, const char *name, int len)
{
- struct cmdname *ent = xmalloc(sizeof(*ent) + len + 1);
-
+ struct cmdname *ent;
+ FLEX_ALLOC_MEM(ent, name, name, len);
ent->len = len;
- memcpy(ent->name, name, len);
- ent->name[len] = 0;
ALLOC_GROW(cmds->names, cmds->cnt + 1, cmds->alloc);
cmds->names[cmds->cnt++] = ent;
}
static struct object_list **process_blob(struct blob *blob,
- struct object_list **p,
- struct name_path *path,
- const char *name)
+ struct object_list **p)
{
struct object *obj = &blob->object;
}
static struct object_list **process_tree(struct tree *tree,
- struct object_list **p,
- struct name_path *path,
- const char *name)
+ struct object_list **p)
{
struct object *obj = &tree->object;
struct tree_desc desc;
struct name_entry entry;
- struct name_path me;
obj->flags |= LOCAL;
die("bad tree object %s", oid_to_hex(&obj->oid));
obj->flags |= SEEN;
- name = xstrdup(name);
p = add_one_object(obj, p);
- me.up = path;
- me.elem = name;
- me.elem_len = strlen(name);
init_tree_desc(&desc, tree->buffer, tree->size);
while (tree_entry(&desc, &entry))
switch (object_type(entry.mode)) {
case OBJ_TREE:
- p = process_tree(lookup_tree(entry.sha1), p, &me, name);
+ p = process_tree(lookup_tree(entry.sha1), p);
break;
case OBJ_BLOB:
- p = process_blob(lookup_blob(entry.sha1), p, &me, name);
+ p = process_blob(lookup_blob(entry.sha1), p);
break;
default:
/* Subproject commit - not in this repository */
int count = 0;
while ((commit = get_revision(revs)) != NULL) {
- p = process_tree(commit->tree, p, NULL, "");
+ p = process_tree(commit->tree, p);
commit->object.flags |= LOCAL;
if (!(commit->object.flags & UNINTERESTING))
count += add_send_request(&commit->object, lock);
continue;
}
if (obj->type == OBJ_TREE) {
- p = process_tree((struct tree *)obj, p, NULL, name);
+ p = process_tree((struct tree *)obj, p);
continue;
}
if (obj->type == OBJ_BLOB) {
- p = process_blob((struct blob *)obj, p, NULL, name);
+ p = process_blob((struct blob *)obj, p);
continue;
}
die("unknown pending object %s (%s)", oid_to_hex(&obj->oid), name);
#include "gettext.h"
#include "transport.h"
+#if LIBCURL_VERSION_NUM >= 0x070a08
+long int git_curl_ipresolve = CURL_IPRESOLVE_WHATEVER;
+#else
+long int git_curl_ipresolve;
+#endif
int active_requests;
int http_is_verbose;
size_t http_post_buffer = 16 * LARGE_PACKET_MAX;
#if LIBCURL_VERSION_NUM >= 0x070908
static const char *ssl_capath;
#endif
+#if LIBCURL_VERSION_NUM >= 0x072c00
+static const char *ssl_pinnedkey;
+#endif
static const char *ssl_cainfo;
static long curl_low_speed_limit = -1;
static long curl_low_speed_time = -1;
static int curl_ftp_no_epsv;
static const char *curl_http_proxy;
+static const char *http_proxy_authmethod;
+static struct {
+ const char *name;
+ long curlauth_param;
+} proxy_authmethods[] = {
+ { "basic", CURLAUTH_BASIC },
+ { "digest", CURLAUTH_DIGEST },
+ { "negotiate", CURLAUTH_GSSNEGOTIATE },
+ { "ntlm", CURLAUTH_NTLM },
+#ifdef LIBCURL_CAN_HANDLE_AUTH_ANY
+ { "anyauth", CURLAUTH_ANY },
+#endif
+ /*
+ * CURLAUTH_DIGEST_IE has no corresponding command-line option in
+ * curl(1) and is not included in CURLAUTH_ANY, so we leave it out
+ * here, too
+ */
+};
+static struct credential proxy_auth = CREDENTIAL_INIT;
+static const char *curl_proxyuserpwd;
static const char *curl_cookie_file;
static int curl_save_cookies;
struct credential http_auth = CREDENTIAL_INIT;
static int http_proactive_auth;
static const char *user_agent;
+static int curl_empty_auth;
#if LIBCURL_VERSION_NUM >= 0x071700
/* Use CURLOPT_KEYPASSWD as is */
#else
slot->results->auth_avail = 0;
#endif
+
+ curl_easy_getinfo(slot->curl, CURLINFO_HTTP_CONNECTCODE,
+ &slot->results->http_connectcode);
}
/* Run callback if appropriate */
if (!strcmp("http.proxy", var))
return git_config_string(&curl_http_proxy, var, value);
+ if (!strcmp("http.proxyauthmethod", var))
+ return git_config_string(&http_proxy_authmethod, var, value);
+
if (!strcmp("http.cookiefile", var))
return git_config_string(&curl_cookie_file, var, value);
if (!strcmp("http.savecookies", var)) {
if (!strcmp("http.useragent", var))
return git_config_string(&user_agent, var, value);
+ if (!strcmp("http.emptyauth", var)) {
+ curl_empty_auth = git_config_bool(var, value);
+ return 0;
+ }
+
+ if (!strcmp("http.pinnedpubkey", var)) {
+#if LIBCURL_VERSION_NUM >= 0x072c00
+ return git_config_pathname(&ssl_pinnedkey, var, value);
+#else
+ warning(_("Public key pinning not supported with cURL < 7.44.0"));
+ return 0;
+#endif
+ }
+
/* Fall back on the default ones */
return git_default_config(var, value, cb);
}
static void init_curl_http_auth(CURL *result)
{
- if (!http_auth.username)
+ if (!http_auth.username) {
+ if (curl_empty_auth)
+ curl_easy_setopt(result, CURLOPT_USERPWD, ":");
return;
+ }
credential_fill(&http_auth);
#endif
}
+/* *var must be free-able */
+static void var_override(const char **var, char *value)
+{
+ if (value) {
+ free((void *)*var);
+ *var = xstrdup(value);
+ }
+}
+
+static void set_proxyauth_name_password(CURL *result)
+{
+#if LIBCURL_VERSION_NUM >= 0x071301
+ curl_easy_setopt(result, CURLOPT_PROXYUSERNAME,
+ proxy_auth.username);
+ curl_easy_setopt(result, CURLOPT_PROXYPASSWORD,
+ proxy_auth.password);
+#else
+ struct strbuf s = STRBUF_INIT;
+
+ strbuf_addstr_urlencode(&s, proxy_auth.username, 1);
+ strbuf_addch(&s, ':');
+ strbuf_addstr_urlencode(&s, proxy_auth.password, 1);
+ curl_proxyuserpwd = strbuf_detach(&s, NULL);
+ curl_easy_setopt(result, CURLOPT_PROXYUSERPWD, curl_proxyuserpwd);
+#endif
+}
+
+static void init_curl_proxy_auth(CURL *result)
+{
+ if (proxy_auth.username) {
+ if (!proxy_auth.password)
+ credential_fill(&proxy_auth);
+ set_proxyauth_name_password(result);
+ }
+
+ var_override(&http_proxy_authmethod, getenv("GIT_HTTP_PROXY_AUTHMETHOD"));
+
+#if LIBCURL_VERSION_NUM >= 0x070a07 /* CURLOPT_PROXYAUTH and CURLAUTH_ANY */
+ if (http_proxy_authmethod) {
+ int i;
+ for (i = 0; i < ARRAY_SIZE(proxy_authmethods); i++) {
+ if (!strcmp(http_proxy_authmethod, proxy_authmethods[i].name)) {
+ curl_easy_setopt(result, CURLOPT_PROXYAUTH,
+ proxy_authmethods[i].curlauth_param);
+ break;
+ }
+ }
+ if (i == ARRAY_SIZE(proxy_authmethods)) {
+ warning("unsupported proxy authentication method %s: using anyauth",
+ http_proxy_authmethod);
+ curl_easy_setopt(result, CURLOPT_PROXYAUTH, CURLAUTH_ANY);
+ }
+ }
+ else
+ curl_easy_setopt(result, CURLOPT_PROXYAUTH, CURLAUTH_ANY);
+#endif
+}
+
static int has_cert_password(void)
{
if (ssl_cert == NULL || ssl_cert_password_required != 1)
#if LIBCURL_VERSION_NUM >= 0x070908
if (ssl_capath != NULL)
curl_easy_setopt(result, CURLOPT_CAPATH, ssl_capath);
+#endif
+#if LIBCURL_VERSION_NUM >= 0x072c00
+ if (ssl_pinnedkey != NULL)
+ curl_easy_setopt(result, CURLOPT_PINNEDPUBLICKEY, ssl_pinnedkey);
#endif
if (ssl_cainfo != NULL)
curl_easy_setopt(result, CURLOPT_CAINFO, ssl_cainfo);
curl_easy_setopt(result, CURLOPT_USE_SSL, CURLUSESSL_TRY);
#endif
+ /*
+ * CURL also examines these variables as a fallback; but we need to query
+ * them here in order to decide whether to prompt for missing password (cf.
+ * init_curl_proxy_auth()).
+ *
+ * Unlike many other common environment variables, these are historically
+ * lowercase only. It appears that CURL did not know this and implemented
+ * only uppercase variants, which was later corrected to take both - with
+ * the exception of http_proxy, which is lowercase only also in CURL. As
+ * the lowercase versions are the historical quasi-standard, they take
+ * precedence here, as in CURL.
+ */
+ if (!curl_http_proxy) {
+ if (!strcmp(http_auth.protocol, "https")) {
+ var_override(&curl_http_proxy, getenv("HTTPS_PROXY"));
+ var_override(&curl_http_proxy, getenv("https_proxy"));
+ } else {
+ var_override(&curl_http_proxy, getenv("http_proxy"));
+ }
+ if (!curl_http_proxy) {
+ var_override(&curl_http_proxy, getenv("ALL_PROXY"));
+ var_override(&curl_http_proxy, getenv("all_proxy"));
+ }
+ }
+
if (curl_http_proxy) {
curl_easy_setopt(result, CURLOPT_PROXY, curl_http_proxy);
#if LIBCURL_VERSION_NUM >= 0x071800
curl_easy_setopt(result,
CURLOPT_PROXYTYPE, CURLPROXY_SOCKS4);
#endif
+ if (strstr(curl_http_proxy, "://"))
+ credential_from_url(&proxy_auth, curl_http_proxy);
+ else {
+ struct strbuf url = STRBUF_INIT;
+ strbuf_addf(&url, "http://%s", curl_http_proxy);
+ credential_from_url(&proxy_auth, url.buf);
+ strbuf_release(&url);
+ }
+
+ curl_easy_setopt(result, CURLOPT_PROXY, proxy_auth.host);
}
-#if LIBCURL_VERSION_NUM >= 0x070a07
- curl_easy_setopt(result, CURLOPT_PROXYAUTH, CURLAUTH_ANY);
-#endif
+ init_curl_proxy_auth(result);
set_curl_keepalive(result);
if (remote && remote->http_proxy)
curl_http_proxy = xstrdup(remote->http_proxy);
+ if (remote)
+ var_override(&http_proxy_authmethod, remote->http_proxy_authmethod);
+
pragma_header = curl_slist_append(pragma_header, "Pragma: no-cache");
no_pragma_header = curl_slist_append(no_pragma_header, "Pragma:");
curl_http_proxy = NULL;
}
+ if (proxy_auth.password) {
+ memset(proxy_auth.password, 0, strlen(proxy_auth.password));
+ free(proxy_auth.password);
+ proxy_auth.password = NULL;
+ }
+
+ free((void *)curl_proxyuserpwd);
+ curl_proxyuserpwd = NULL;
+
+ free((void *)http_proxy_authmethod);
+ http_proxy_authmethod = NULL;
+
if (cert_auth.password != NULL) {
memset(cert_auth.password, 0, strlen(cert_auth.password));
free(cert_auth.password);
curl_easy_setopt(slot->curl, CURLOPT_HTTPGET, 1);
curl_easy_setopt(slot->curl, CURLOPT_FAILONERROR, 1);
curl_easy_setopt(slot->curl, CURLOPT_RANGE, NULL);
+
+#if LIBCURL_VERSION_NUM >= 0x070a08
+ curl_easy_setopt(slot->curl, CURLOPT_IPRESOLVE, git_curl_ipresolve);
+#endif
#ifdef LIBCURL_CAN_HANDLE_AUTH_ANY
curl_easy_setopt(slot->curl, CURLOPT_HTTPAUTH, http_auth_methods);
#endif
- if (http_auth.password)
+ if (http_auth.password || curl_empty_auth)
init_curl_http_auth(slot->curl);
return slot;
if (results->curl_result == CURLE_OK) {
credential_approve(&http_auth);
+ if (proxy_auth.password)
+ credential_approve(&proxy_auth);
return HTTP_OK;
} else if (missing_target(results))
return HTTP_MISSING_TARGET;
return HTTP_REAUTH;
}
} else {
+ if (results->http_connectcode == 407)
+ credential_reject(&proxy_auth);
#if LIBCURL_VERSION_NUM >= 0x070c00
if (!curl_errorstr[0])
strlcpy(curl_errorstr,
CURLcode curl_result;
long http_code;
long auth_avail;
+ long http_connectcode;
};
struct active_request_slot {
int proactive_auth);
extern void http_cleanup(void);
+extern long int git_curl_ipresolve;
extern int active_requests;
extern int http_is_verbose;
extern size_t http_post_buffer;
static int default_email_is_bogus;
static int default_name_is_bogus;
+static int ident_use_config_only;
+
#define IDENT_NAME_GIVEN 01
#define IDENT_MAIL_GIVEN 02
#define IDENT_ALL_GIVEN (IDENT_NAME_GIVEN|IDENT_MAIL_GIVEN)
static int committer_ident_explicitly_given;
static int author_ident_explicitly_given;
+static int ident_config_given;
#ifdef NO_GECOS_IN_PWENT
#define get_gecos(ignored) "&"
strerror(errno));
return -1;
}
- if (strbuf_getline(&mailnamebuf, mailname, '\n') == EOF) {
+ if (strbuf_getline(&mailnamebuf, mailname) == EOF) {
if (ferror(mailname))
warning("cannot read /etc/mailname: %s",
strerror(errno));
int want_date = !(flag & IDENT_NO_DATE);
int want_name = !(flag & IDENT_NO_NAME);
- if (want_name && !name)
- name = ident_default_name();
- if (!email)
- email = ident_default_email();
-
- if (want_name && !*name) {
- struct passwd *pw;
-
- if (strict) {
- if (name == git_default_name.buf)
+ if (want_name) {
+ int using_default = 0;
+ if (!name) {
+ name = ident_default_name();
+ using_default = 1;
+ if (strict && default_name_is_bogus) {
fputs(env_hint, stderr);
- die("empty ident name (for <%s>) not allowed", email);
+ die("unable to auto-detect name (got '%s')", name);
+ }
+ if (strict && ident_use_config_only
+ && !(ident_config_given & IDENT_NAME_GIVEN))
+ die("user.useConfigOnly set but no name given");
+ }
+ if (!*name) {
+ struct passwd *pw;
+ if (strict) {
+ if (using_default)
+ fputs(env_hint, stderr);
+ die("empty ident name (for <%s>) not allowed", email);
+ }
+ pw = xgetpwuid_self(NULL);
+ name = pw->pw_name;
}
- pw = xgetpwuid_self(NULL);
- name = pw->pw_name;
- }
-
- if (want_name && strict &&
- name == git_default_name.buf && default_name_is_bogus) {
- fputs(env_hint, stderr);
- die("unable to auto-detect name (got '%s')", name);
}
- if (strict && email == git_default_email.buf && default_email_is_bogus) {
- fputs(env_hint, stderr);
- die("unable to auto-detect email address (got '%s')", email);
+ if (!email) {
+ email = ident_default_email();
+ if (strict && default_email_is_bogus) {
+ fputs(env_hint, stderr);
+ die("unable to auto-detect email address (got '%s')", email);
+ }
+ if (strict && ident_use_config_only
+ && !(ident_config_given & IDENT_MAIL_GIVEN))
+ die("user.useConfigOnly set but no mail given");
}
strbuf_reset(&ident);
int git_ident_config(const char *var, const char *value, void *data)
{
+ if (!strcmp(var, "user.useconfigonly")) {
+ ident_use_config_only = git_config_bool(var, value);
+ return 0;
+ }
+
if (!strcmp(var, "user.name")) {
if (!value)
return config_error_nonbool(var);
strbuf_addstr(&git_default_name, value);
committer_ident_explicitly_given |= IDENT_NAME_GIVEN;
author_ident_explicitly_given |= IDENT_NAME_GIVEN;
+ ident_config_given |= IDENT_NAME_GIVEN;
return 0;
}
strbuf_addstr(&git_default_email, value);
committer_ident_explicitly_given |= IDENT_MAIL_GIVEN;
author_ident_explicitly_given |= IDENT_MAIL_GIVEN;
+ ident_config_given |= IDENT_MAIL_GIVEN;
return 0;
}
response = xstrfmt("%s %s", user, hex);
resp_len = strlen(response) + 1;
- response_64 = xmalloc(ENCODED_SIZE(resp_len) + 1);
+ response_64 = xmallocz(ENCODED_SIZE(resp_len));
encoded_len = EVP_EncodeBlock((unsigned char *)response_64,
(unsigned char *)response, resp_len);
if (encoded_len < 0)
die("EVP_EncodeBlock error");
- response_64[encoded_len] = '\0';
return (char *)response_64;
}
j++;
}
- new = xmalloc(j + 1);
+ new = xmallocz(j);
/*
* Second pass: write the new string. Note that this loop is
if (new_n_buckets < 4) new_n_buckets = 4; \
if (h->size >= (khint_t)(new_n_buckets * __ac_HASH_UPPER + 0.5)) j = 0; /* requested size is too small */ \
else { /* hash table size to be changed (shrink or expand); rehash */ \
- new_flags = (khint32_t*)xmalloc(__ac_fsize(new_n_buckets) * sizeof(khint32_t)); \
+ ALLOC_ARRAY(new_flags, __ac_fsize(new_n_buckets)); \
if (!new_flags) return -1; \
memset(new_flags, 0xaa, __ac_fsize(new_n_buckets) * sizeof(khint32_t)); \
if (h->n_buckets < new_n_buckets) { /* expand */ \
int w, int s, int a, int d)
{
int len1 = strlen(string1), len2 = strlen(string2);
- int *row0 = xmalloc(sizeof(int) * (len2 + 1));
- int *row1 = xmalloc(sizeof(int) * (len2 + 1));
- int *row2 = xmalloc(sizeof(int) * (len2 + 1));
+ int *row0, *row1, *row2;
int i, j;
+ ALLOC_ARRAY(row0, len2 + 1);
+ ALLOC_ARRAY(row1, len2 + 1);
+ ALLOC_ARRAY(row2, len2 + 1);
+
for (j = 0; j <= len2; j++)
row1[j] = j * a;
for (i = 0; i < len1; i++) {
#include "graph.h"
#include "userdiff.h"
#include "line-log.h"
+#include "argv-array.h"
static void range_set_grow(struct range_set *rs, size_t extra)
{
if (diff_populate_filespec(spec, 0))
die("Cannot read blob %s", sha1_to_hex(spec->sha1));
- ends = xmalloc(size * sizeof(*ends));
+ ALLOC_ARRAY(ends, size);
ends[cur++] = 0;
data = spec->data;
while (num < spec->size) {
add_line_range(rev, commit, range);
if (!rev->diffopt.detect_rename) {
- int i, count = 0;
- struct line_log_data *r = range;
+ struct line_log_data *r;
+ struct argv_array array = ARGV_ARRAY_INIT;
const char **paths;
- while (r) {
- count++;
- r = r->next;
- }
- paths = xmalloc((count+1)*sizeof(char *));
- r = range;
- for (i = 0; i < count; i++) {
- paths[i] = xstrdup(r->path);
- r = r->next;
- }
- paths[count] = NULL;
+
+ for (r = range; r; r = r->next)
+ argv_array_push(&array, r->path);
+ paths = argv_array_detach(&array);
+
parse_pathspec(&rev->diffopt.pathspec, 0,
PATHSPEC_PREFER_FULL, "", paths);
+ /* strings are now owned by pathspec */
free(paths);
}
}
if (nparents > 1 && rev->first_parent_only)
nparents = 1;
- diffqueues = xmalloc(nparents * sizeof(*diffqueues));
- cand = xmalloc(nparents * sizeof(*cand));
- parents = xmalloc(nparents * sizeof(*parents));
+ ALLOC_ARRAY(diffqueues, nparents);
+ ALLOC_ARRAY(cand, nparents);
+ ALLOC_ARRAY(parents, nparents);
p = commit->parents;
for (i = 0; i < nparents; i++) {
static void process_blob(struct rev_info *revs,
struct blob *blob,
show_object_fn show,
- struct name_path *path,
+ struct strbuf *path,
const char *name,
void *cb_data)
{
struct object *obj = &blob->object;
+ size_t pathlen;
if (!revs->blob_objects)
return;
if (obj->flags & (UNINTERESTING | SEEN))
return;
obj->flags |= SEEN;
- show(obj, path, name, cb_data);
+
+ pathlen = path->len;
+ strbuf_addstr(path, name);
+ show(obj, path->buf, cb_data);
+ strbuf_setlen(path, pathlen);
}
/*
static void process_gitlink(struct rev_info *revs,
const unsigned char *sha1,
show_object_fn show,
- struct name_path *path,
+ struct strbuf *path,
const char *name,
void *cb_data)
{
static void process_tree(struct rev_info *revs,
struct tree *tree,
show_object_fn show,
- struct name_path *path,
struct strbuf *base,
const char *name,
void *cb_data)
struct object *obj = &tree->object;
struct tree_desc desc;
struct name_entry entry;
- struct name_path me;
enum interesting match = revs->diffopt.pathspec.nr == 0 ?
all_entries_interesting: entry_not_interesting;
int baselen = base->len;
return;
die("bad tree object %s", oid_to_hex(&obj->oid));
}
+
obj->flags |= SEEN;
- show(obj, path, name, cb_data);
- me.up = path;
- me.elem = name;
- me.elem_len = strlen(name);
-
- if (!match) {
- strbuf_addstr(base, name);
- if (base->len)
- strbuf_addch(base, '/');
- }
+ strbuf_addstr(base, name);
+ show(obj, base->buf, cb_data);
+ if (base->len)
+ strbuf_addch(base, '/');
init_tree_desc(&desc, tree->buffer, tree->size);
if (S_ISDIR(entry.mode))
process_tree(revs,
lookup_tree(entry.sha1),
- show, &me, base, entry.path,
+ show, base, entry.path,
cb_data);
else if (S_ISGITLINK(entry.mode))
process_gitlink(revs, entry.sha1,
- show, &me, entry.path,
+ show, base, entry.path,
cb_data);
else
process_blob(revs,
lookup_blob(entry.sha1),
- show, &me, entry.path,
+ show, base, entry.path,
cb_data);
}
strbuf_setlen(base, baselen);
continue;
if (obj->type == OBJ_TAG) {
obj->flags |= SEEN;
- show_object(obj, NULL, name, data);
+ show_object(obj, name, data);
continue;
}
if (!path)
path = "";
if (obj->type == OBJ_TREE) {
process_tree(revs, (struct tree *)obj, show_object,
- NULL, &base, path, data);
+ &base, path, data);
continue;
}
if (obj->type == OBJ_BLOB) {
process_blob(revs, (struct blob *)obj, show_object,
- NULL, path, data);
+ &base, path, data);
continue;
}
die("unknown pending object %s (%s)",
#define LIST_OBJECTS_H
typedef void (*show_commit_fn)(struct commit *, void *);
-typedef void (*show_object_fn)(struct object *, const struct name_path *, const char *, void *);
+typedef void (*show_object_fn)(struct object *, const char *, void *);
void traverse_commit_list(struct rev_info *, show_commit_fn, show_object_fn, void *);
typedef void (*show_edge_fn)(struct commit *);
if (fstat(fd, &st))
goto close_bad;
result->size = st.st_size;
- result->ptr = xmalloc(result->size + 1);
+ result->ptr = xmallocz(result->size);
if (read_in_full(fd, result->ptr, result->size) != result->size) {
free(result->ptr);
result->ptr = NULL;
void add_name_decoration(enum decoration_type type, const char *name, struct object *obj)
{
- int nlen = strlen(name);
- struct name_decoration *res = xmalloc(sizeof(*res) + nlen + 1);
- memcpy(res->name, name, nlen + 1);
+ struct name_decoration *res;
+ FLEX_ALLOC_STR(res, name, name);
res->type = type;
res->next = add_decoration(&name_decoration, obj, res);
}
struct strbuf continuation = STRBUF_INIT;
/* Get the first part of the line. */
- if (strbuf_getline(line, in, '\n'))
+ if (strbuf_getline_lf(line, in))
return 0;
/*
peek = fgetc(in); ungetc(peek, in);
if (peek != ' ' && peek != '\t')
break;
- if (strbuf_getline(&continuation, in, '\n'))
+ if (strbuf_getline_lf(&continuation, in))
break;
continuation.buf[0] = ' ';
strbuf_rtrim(&continuation);
static int find_boundary(struct mailinfo *mi, struct strbuf *line)
{
- while (!strbuf_getline(line, mi->input, '\n')) {
+ while (!strbuf_getline_lf(line, mi->input)) {
if (*(mi->content_top) && is_multipart_boundary(mi, line))
return 1;
}
strbuf_release(&newline);
/* replenish line */
- if (strbuf_getline(line, mi->input, '\n'))
+ if (strbuf_getline_lf(line, mi->input))
return 0;
strbuf_addch(line, '\n');
return 1;
return res.ptr;
}
-static int common_outf(void *priv_, mmbuffer_t *mb, int nbuf)
-{
- int i;
- mmfile_t *dst = priv_;
-
- for (i = 0; i < nbuf; i++) {
- memcpy(dst->ptr + dst->size, mb[i].ptr, mb[i].size);
- dst->size += mb[i].size;
- }
- return 0;
-}
-
-static int generate_common_file(mmfile_t *res, mmfile_t *f1, mmfile_t *f2)
-{
- unsigned long size = f1->size < f2->size ? f1->size : f2->size;
- void *ptr = xmalloc(size);
- xpparam_t xpp;
- xdemitconf_t xecfg;
- xdemitcb_t ecb;
-
- memset(&xpp, 0, sizeof(xpp));
- xpp.flags = 0;
- memset(&xecfg, 0, sizeof(xecfg));
- xecfg.ctxlen = 3;
- xecfg.flags = XDL_EMIT_COMMON;
- ecb.outf = common_outf;
-
- res->ptr = ptr;
- res->size = 0;
-
- ecb.priv = res;
- return xdi_diff(f1, f2, &xpp, &xecfg, &ecb);
-}
-
void *merge_blobs(const char *path, struct blob *base, struct blob *our, struct blob *their, unsigned long *size)
{
void *res = NULL;
if (fill_mmfile_blob(&common, base) < 0)
goto out_free_f2_f1;
} else {
- if (generate_common_file(&common, &f1, &f2) < 0)
- goto out_free_f2_f1;
+ common.ptr = xstrdup("");
+ common.size = 0;
}
res = three_way_filemerge(path, &common, &f1, &f2, size);
free_mmfile(&common);
struct diff_options opts;
renames = xcalloc(1, sizeof(struct string_list));
+ if (!o->detect_rename)
+ return renames;
+
diff_setup(&opts);
DIFF_OPT_SET(&opts, RECURSIVE);
DIFF_OPT_CLR(&opts, RENAME_EMPTY);
o->diff_rename_limit = -1;
o->merge_rename_limit = -1;
o->renormalize = 0;
+ o->detect_rename = 1;
merge_recursive_config(o);
if (getenv("GIT_MERGE_VERBOSITY"))
o->verbosity =
o->renormalize = 1;
else if (!strcmp(s, "no-renormalize"))
o->renormalize = 0;
- else if (skip_prefix(s, "rename-threshold=", &arg)) {
+ else if (!strcmp(s, "no-renames"))
+ o->detect_rename = 0;
+ else if (!strcmp(s, "find-renames")) {
+ o->detect_rename = 1;
+ o->rename_score = 0;
+ }
+ else if (skip_prefix(s, "find-renames=", &arg) ||
+ skip_prefix(s, "rename-threshold=", &arg)) {
if ((o->rename_score = parse_rename_score(&arg)) == -1 || *arg != 0)
return -1;
+ o->detect_rename = 1;
}
else
return -1;
unsigned renormalize : 1;
long xdl_opts;
int verbosity;
+ int detect_rename;
int diff_rename_limit;
int merge_rename_limit;
int rename_score;
dir = find_dir_entry(istate, ce->name, namelen);
if (!dir) {
/* not found, create it and add to hash table */
- dir = xcalloc(1, sizeof(struct dir_entry) + namelen + 1);
+ FLEX_ALLOC_MEM(dir, name, ce->name, namelen);
hashmap_entry_init(dir, memihash(ce->name, namelen));
dir->namelen = namelen;
- strncpy(dir->name, ce->name, namelen);
hashmap_add(&istate->dir_hash, dir);
/* recursively add missing parent directories */
const char *validity)
{
struct strbuf ref = STRBUF_INIT;
- int flags = 0;
+ int flags = NOTES_INIT_WRITABLE;
memset(c, 0, sizeof(*c));
c->validity = xstrdup(validity);
strbuf_addf(&ref, "refs/notes/%s", name);
if (!notes_cache_match_validity(ref.buf, validity))
- flags = NOTES_INIT_EMPTY;
+ flags |= NOTES_INIT_EMPTY;
init_notes(&c->tree, ref.buf, combine_notes_overwrite, flags);
strbuf_release(&ref);
}
unsigned char tree_sha1[20];
unsigned char commit_sha1[20];
- if (!c || !c->tree.initialized || !c->tree.ref || !*c->tree.ref)
+ if (!c || !c->tree.initialized || !c->tree.update_ref ||
+ !*c->tree.update_ref)
return -1;
if (!c->tree.dirty)
return 0;
if (commit_tree(c->validity, strlen(c->validity), tree_sha1, NULL,
commit_sha1, NULL, NULL) < 0)
return -1;
- if (update_ref("update notes cache", c->tree.ref, commit_sha1, NULL,
- 0, UPDATE_REFS_QUIET_ON_ERR) < 0)
+ if (update_ref("update notes cache", c->tree.update_ref, commit_sha1,
+ NULL, 0, UPDATE_REFS_QUIET_ON_ERR) < 0)
return -1;
return 0;
if (!t)
t = &default_notes_tree;
- if (!t->initialized || !t->ref || !*t->ref)
+ if (!t->initialized || !t->update_ref || !*t->update_ref)
die(_("Cannot commit uninitialized/unreferenced notes tree"));
if (!t->dirty)
return; /* don't have to commit an unchanged tree */
create_notes_commit(t, NULL, buf.buf, buf.len, commit_sha1);
strbuf_insert(&buf, 0, "notes: ", 7); /* commit message starts at index 7 */
- update_ref(buf.buf, t->ref, commit_sha1, NULL, 0,
+ update_ref(buf.buf, t->update_ref, commit_sha1, NULL, 0,
UPDATE_REFS_DIE_ON_ERR);
strbuf_release(&buf);
free(c);
return NULL;
}
- c->trees = load_notes_trees(c->refs);
+ c->trees = load_notes_trees(c->refs, NOTES_INIT_WRITABLE);
string_list_clear(c->refs, 0);
free(c->refs);
return c;
t->first_non_note = NULL;
t->prev_non_note = NULL;
t->ref = xstrdup_or_null(notes_ref);
+ t->update_ref = (flags & NOTES_INIT_WRITABLE) ? t->ref : NULL;
t->combine_notes = combine_notes;
t->initialized = 1;
t->dirty = 0;
if (flags & NOTES_INIT_EMPTY || !notes_ref ||
- read_ref(notes_ref, object_sha1))
+ get_sha1_treeish(notes_ref, object_sha1))
return;
+ if (flags & NOTES_INIT_WRITABLE && read_ref(notes_ref, object_sha1))
+ die("Cannot use notes ref %s", notes_ref);
if (get_tree_entry(object_sha1, "", sha1, &mode))
die("Failed to read notes tree referenced by %s (%s)",
notes_ref, sha1_to_hex(object_sha1));
load_subtree(t, &root_tree, t->root, 0);
}
-struct notes_tree **load_notes_trees(struct string_list *refs)
+struct notes_tree **load_notes_trees(struct string_list *refs, int flags)
{
struct string_list_item *item;
int counter = 0;
struct notes_tree **trees;
- trees = xmalloc((refs->nr+1) * sizeof(struct notes_tree *));
+ ALLOC_ARRAY(trees, refs->nr + 1);
for_each_string_list_item(item, refs) {
struct notes_tree *t = xcalloc(1, sizeof(struct notes_tree));
- init_notes(t, item->string, combine_notes_ignore, 0);
+ init_notes(t, item->string, combine_notes_ignore, flags);
trees[counter++] = t;
}
trees[counter] = NULL;
item->string);
}
- display_notes_trees = load_notes_trees(&display_notes_refs);
+ display_notes_trees = load_notes_trees(&display_notes_refs, 0);
string_list_clear(&display_notes_refs, 0);
}
else
strbuf_insert(sb, 0, "refs/notes/", 11);
}
+
+void expand_loose_notes_ref(struct strbuf *sb)
+{
+ unsigned char object[20];
+
+ if (get_sha1(sb->buf, object)) {
+ /* fallback to expand_notes_ref */
+ expand_notes_ref(sb);
+ }
+}
struct int_node *root;
struct non_note *first_non_note, *prev_non_note;
char *ref;
+ char *update_ref;
combine_notes_fn combine_notes;
int initialized;
int dirty;
*/
#define NOTES_INIT_EMPTY 1
+/*
+ * By default, the notes tree is only readable, and the notes ref can be
+ * any treeish. The notes tree can however be made writable with this flag,
+ * in which case only strict ref names can be used.
+ */
+#define NOTES_INIT_WRITABLE 2
+
/*
* Initialize the given notes_tree with the notes tree structure at the given
* ref. If given ref is NULL, the value of the $GIT_NOTES_REF environment
* Load the notes tree from each ref listed in 'refs'. The output is
* an array of notes_tree*, terminated by a NULL.
*/
-struct notes_tree **load_notes_trees(struct string_list *refs);
+struct notes_tree **load_notes_trees(struct string_list *refs, int flags);
/*
* Add all refs that match 'glob' to the 'list'.
/* Expand inplace a note ref like "foo" or "notes/foo" into "refs/notes/foo" */
void expand_notes_ref(struct strbuf *sb);
+/*
+ * Similar to expand_notes_ref, but will check whether the ref can be located
+ * via get_sha1 first, and only falls back to expand_notes_ref in the case
+ * where get_sha1 fails.
+ */
+void expand_loose_notes_ref(struct strbuf *sb);
+
#endif
return entry->in_pack_pos;
}
-static void show_object(struct object *object, const struct name_path *path,
- const char *last, void *data)
+static void show_object(struct object *object, const char *name, void *data)
{
struct bitmap *base = data;
bitmap_set(base, find_object_pos(object->oid.hash));
/* Packfile to which this bitmap index belongs to */
struct packed_git *pack;
- /* reverse index for the packfile */
- struct pack_revindex *reverse_index;
-
/*
* Mark the first `reuse_objects` in the packfile as reused:
* they will be sent as-is without using them for repacking
bitmap_git.bitmaps = kh_init_sha1();
bitmap_git.ext_index.positions = kh_init_sha1_pos();
- bitmap_git.reverse_index = revindex_for_pack(bitmap_git.pack);
+ load_pack_revindex(bitmap_git.pack);
if (!(bitmap_git.commits = read_bitmap_1(&bitmap_git)) ||
!(bitmap_git.trees = read_bitmap_1(&bitmap_git)) ||
if (!offset)
return -1;
- return find_revindex_position(bitmap_git.reverse_index, offset);
+ return find_revindex_position(bitmap_git.pack, offset);
}
static int bitmap_position(const unsigned char *sha1)
return bitmap_pos + bitmap_git.pack->num_objects;
}
-static void show_object(struct object *object, const struct name_path *path,
- const char *last, void *data)
+static void show_object(struct object *object, const char *name, void *data)
{
struct bitmap *base = data;
int bitmap_pos;
bitmap_pos = bitmap_position(object->oid.hash);
- if (bitmap_pos < 0) {
- char *name = path_name(path, last);
+ if (bitmap_pos < 0)
bitmap_pos = ext_index_add_object(object, name);
- free(name);
- }
bitmap_set(base, bitmap_pos);
}
if (pos + offset < bitmap_git.reuse_objects)
continue;
- entry = &bitmap_git.reverse_index->revindex[pos + offset];
+ entry = &bitmap_git.pack->revindex[pos + offset];
sha1 = nth_packed_object_sha1(bitmap_git.pack, entry->nr);
if (bitmap_git.hashes)
return -1;
bitmap_git.reuse_objects = *entries = reuse_objects;
- *up_to = bitmap_git.reverse_index->revindex[reuse_objects].offset;
+ *up_to = bitmap_git.pack->revindex[reuse_objects].offset;
*packfile = bitmap_git.pack;
return 0;
size_t seen;
};
-static void test_show_object(struct object *object,
- const struct name_path *path,
- const char *last, void *data)
+static void test_show_object(struct object *object, const char *name,
+ void *data)
{
struct bitmap_test_data *tdata = data;
int bitmap_pos;
struct revindex_entry *entry;
struct object_entry *oe;
- entry = &bitmap_git.reverse_index->revindex[i];
+ entry = &bitmap_git.pack->revindex[i];
sha1 = nth_packed_object_sha1(bitmap_git.pack, entry->nr);
oe = packlist_find(mapping, sha1, NULL);
* we do not do scan-streaming check on the pack file.
*/
nr_objects = p->num_objects;
- entries = xmalloc((nr_objects + 1) * sizeof(*entries));
+ ALLOC_ARRAY(entries, nr_objects + 1);
entries[nr_objects].offset = pack_sig_ofs;
/* first sort entries by pack offset, since unpacking them is more efficient that way */
for (i = 0; i < nr_objects; i++) {
* size is easily available by examining the pack entry header). It is
* also rather expensive to find the sha1 for an object given its offset.
*
- * We build a hashtable of existing packs (pack_revindex), and keep reverse
- * index here -- pack index file is sorted by object name mapping to offset;
- * this pack_revindex[].revindex array is a list of offset/index_nr pairs
+ * The pack index file is sorted by object name mapping to offset;
+ * this revindex array is a list of offset/index_nr pairs
* ordered by offset, so if you know the offset of an object, next offset
* is where its packed representation ends and the index_nr can be used to
* get the object sha1 from the main index.
*/
-static struct pack_revindex *pack_revindex;
-static int pack_revindex_hashsz;
-
-static int pack_revindex_ix(struct packed_git *p)
-{
- unsigned long ui = (unsigned long)(intptr_t)p;
- int i;
-
- ui = ui ^ (ui >> 16); /* defeat structure alignment */
- i = (int)(ui % pack_revindex_hashsz);
- while (pack_revindex[i].p) {
- if (pack_revindex[i].p == p)
- return i;
- if (++i == pack_revindex_hashsz)
- i = 0;
- }
- return -1 - i;
-}
-
-static void init_pack_revindex(void)
-{
- int num;
- struct packed_git *p;
-
- for (num = 0, p = packed_git; p; p = p->next)
- num++;
- if (!num)
- return;
- pack_revindex_hashsz = num * 11;
- pack_revindex = xcalloc(pack_revindex_hashsz, sizeof(*pack_revindex));
- for (p = packed_git; p; p = p->next) {
- num = pack_revindex_ix(p);
- num = - 1 - num;
- pack_revindex[num].p = p;
- }
- /* revindex elements are lazily initialized */
-}
-
/*
* This is a least-significant-digit radix sort.
*
* keep track of them with alias pointers, always sorting from "from"
* to "to".
*/
- struct revindex_entry *tmp = xmalloc(n * sizeof(*tmp));
- struct revindex_entry *from = entries, *to = tmp;
+ struct revindex_entry *tmp, *from, *to;
int bits;
- unsigned *pos = xmalloc(BUCKETS * sizeof(*pos));
+ unsigned *pos;
+
+ ALLOC_ARRAY(pos, BUCKETS);
+ ALLOC_ARRAY(tmp, n);
+ from = entries;
+ to = tmp;
/*
* If (max >> bits) is zero, then we know that the radix digit we are
/*
* Ordered list of offsets of objects in the pack.
*/
-static void create_pack_revindex(struct pack_revindex *rix)
+static void create_pack_revindex(struct packed_git *p)
{
- struct packed_git *p = rix->p;
unsigned num_ent = p->num_objects;
unsigned i;
const char *index = p->index_data;
- rix->revindex = xmalloc(sizeof(*rix->revindex) * (num_ent + 1));
+ ALLOC_ARRAY(p->revindex, num_ent + 1);
index += 4 * 256;
if (p->index_version > 1) {
for (i = 0; i < num_ent; i++) {
uint32_t off = ntohl(*off_32++);
if (!(off & 0x80000000)) {
- rix->revindex[i].offset = off;
+ p->revindex[i].offset = off;
} else {
- rix->revindex[i].offset =
+ p->revindex[i].offset =
((uint64_t)ntohl(*off_64++)) << 32;
- rix->revindex[i].offset |=
+ p->revindex[i].offset |=
ntohl(*off_64++);
}
- rix->revindex[i].nr = i;
+ p->revindex[i].nr = i;
}
} else {
for (i = 0; i < num_ent; i++) {
uint32_t hl = *((uint32_t *)(index + 24 * i));
- rix->revindex[i].offset = ntohl(hl);
- rix->revindex[i].nr = i;
+ p->revindex[i].offset = ntohl(hl);
+ p->revindex[i].nr = i;
}
}
/* This knows the pack format -- the 20-byte trailer
* follows immediately after the last object data.
*/
- rix->revindex[num_ent].offset = p->pack_size - 20;
- rix->revindex[num_ent].nr = -1;
- sort_revindex(rix->revindex, num_ent, p->pack_size);
+ p->revindex[num_ent].offset = p->pack_size - 20;
+ p->revindex[num_ent].nr = -1;
+ sort_revindex(p->revindex, num_ent, p->pack_size);
}
-struct pack_revindex *revindex_for_pack(struct packed_git *p)
+void load_pack_revindex(struct packed_git *p)
{
- int num;
- struct pack_revindex *rix;
-
- if (!pack_revindex_hashsz)
- init_pack_revindex();
-
- num = pack_revindex_ix(p);
- if (num < 0)
- die("internal error: pack revindex fubar");
-
- rix = &pack_revindex[num];
- if (!rix->revindex)
- create_pack_revindex(rix);
-
- return rix;
+ if (!p->revindex)
+ create_pack_revindex(p);
}
-int find_revindex_position(struct pack_revindex *pridx, off_t ofs)
+int find_revindex_position(struct packed_git *p, off_t ofs)
{
int lo = 0;
- int hi = pridx->p->num_objects + 1;
- struct revindex_entry *revindex = pridx->revindex;
+ int hi = p->num_objects + 1;
+ struct revindex_entry *revindex = p->revindex;
do {
unsigned mi = lo + (hi - lo) / 2;
struct revindex_entry *find_pack_revindex(struct packed_git *p, off_t ofs)
{
- struct pack_revindex *pridx = revindex_for_pack(p);
- int pos = find_revindex_position(pridx, ofs);
+ int pos;
+
+ load_pack_revindex(p);
+ pos = find_revindex_position(p, ofs);
if (pos < 0)
return NULL;
- return pridx->revindex + pos;
+ return p->revindex + pos;
}
#ifndef PACK_REVINDEX_H
#define PACK_REVINDEX_H
+struct packed_git;
+
struct revindex_entry {
off_t offset;
unsigned int nr;
};
-struct pack_revindex {
- struct packed_git *p;
- struct revindex_entry *revindex;
-};
-
-struct pack_revindex *revindex_for_pack(struct packed_git *p);
-int find_revindex_position(struct pack_revindex *pridx, off_t ofs);
+void load_pack_revindex(struct packed_git *p);
+int find_revindex_position(struct packed_git *p, off_t ofs);
struct revindex_entry *find_pack_revindex(struct packed_git *p, off_t ofs);
* something different on Windows.
*/
-static const char *pager_argv[] = { NULL, NULL };
static struct child_process pager_process = CHILD_PROCESS_INIT;
static void wait_for_pager(int in_signal)
return pager;
}
+void prepare_pager_args(struct child_process *pager_process, const char *pager)
+{
+ argv_array_push(&pager_process->args, pager);
+ pager_process->use_shell = 1;
+ if (!getenv("LESS"))
+ argv_array_push(&pager_process->env_array, "LESS=FRX");
+ if (!getenv("LV"))
+ argv_array_push(&pager_process->env_array, "LV=-c");
+}
+
void setup_pager(void)
{
const char *pager = git_pager(isatty(1));
setenv("GIT_PAGER_IN_USE", "true", 1);
/* spawn the pager */
- pager_argv[0] = pager;
- pager_process.use_shell = 1;
- pager_process.argv = pager_argv;
+ prepare_pager_args(&pager_process, pager);
pager_process.in = -1;
- if (!getenv("LESS"))
- argv_array_push(&pager_process.env_array, "LESS=FRX");
- if (!getenv("LV"))
- argv_array_push(&pager_process.env_array, "LV=-c");
argv_array_push(&pager_process.env_array, "GIT_PAGER_IN_USE");
if (start_command(&pager_process))
return;
n++;
pathspec->nr = n;
- pathspec->items = item = xmalloc(sizeof(*item) * n);
+ ALLOC_ARRAY(pathspec->items, n);
+ item = pathspec->items;
pathspec->_raw = argv;
prefixlen = prefix ? strlen(prefix) : 0;
void copy_pathspec(struct pathspec *dst, const struct pathspec *src)
{
*dst = *src;
- dst->items = xmalloc(sizeof(struct pathspec_item) * dst->nr);
+ ALLOC_ARRAY(dst->items, dst->nr);
memcpy(dst->items, src->items,
sizeof(struct pathspec_item) * dst->nr);
}
};
if ($dir) {
- $dir =~ m#^/# or $dir = $opts{Directory} . '/' . $dir;
+ _verify_require();
+ File::Spec->file_name_is_absolute($dir) or $dir = $opts{Directory} . '/' . $dir;
$opts{Repository} = abs_path($dir);
# If --git-dir went ok, this shouldn't die either.
"$self->{svn_path}/" : '';
$self->{config} = $opts->{config};
$self->{mergeinfo} = $opts->{mergeinfo};
+ $self->{pathnameencoding} = Git::config('svn.pathnameencoding');
return $self;
}
sub url_path {
my ($self, $path) = @_;
+ $path = $self->repo_path($path);
if ($self->{url} =~ m#^https?://#) {
# characters are taken from subversion/libsvn_subr/path.c
$path =~ s#([^~a-zA-Z0-9_./!$&'()*+,-])#sprintf("%%%02X",ord($1))#eg;
}
- $self->{url} . '/' . $self->repo_path($path);
+ $self->{url} . '/' . $path;
}
sub rmdirs {
size_t len = strlen(msg) + 5;
struct throughput *tp = progress->throughput;
- bufp = (len < sizeof(buf)) ? buf : xmalloc(len + 1);
+ bufp = (len < sizeof(buf)) ? buf : xmallocz(len);
if (tp) {
unsigned int rate = !tp->avg_misecs ? 0 :
tp->avg_bytes / tp->avg_misecs;
* The traversal will have already marked us as SEEN, so we
* only need to handle any progress reporting here.
*/
-static void mark_object(struct object *obj, const struct name_path *path,
- const char *name, void *data)
+static void mark_object(struct object *obj, const char *name, void *data)
{
update_progress(data);
}
static void mark_commit(struct commit *c, void *data)
{
- mark_object(&c->object, NULL, NULL, data);
+ mark_object(&c->object, NULL, data);
}
struct recent_data {
}
}
+static void tweak_untracked_cache(struct index_state *istate)
+{
+ switch (git_config_get_untracked_cache()) {
+ case -1: /* keep: do nothing */
+ break;
+ case 0: /* false */
+ remove_untracked_cache(istate);
+ break;
+ case 1: /* true */
+ add_untracked_cache(istate);
+ break;
+ default: /* unknown value: do nothing */
+ break;
+ }
+}
+
+static void post_read_index_from(struct index_state *istate)
+{
+ check_ce_order(istate);
+ tweak_untracked_cache(istate);
+}
+
/* remember to discard_cache() before reading a different cache! */
int do_read_index(struct index_state *istate, const char *path, int must_exist)
{
return istate->cache_nr;
ret = do_read_index(istate, path, 0);
+
split_index = istate->split_index;
if (!split_index || is_null_sha1(split_index->base_sha1)) {
- check_ce_order(istate);
+ post_read_index_from(istate);
return ret;
}
sha1_to_hex(split_index->base_sha1)),
sha1_to_hex(split_index->base->sha1));
merge_base_index(istate);
- check_ce_order(istate);
+ post_read_index_from(istate);
return ret;
}
typedef enum { FIELD_STR, FIELD_ULONG, FIELD_TIME } cmp_type;
+struct align {
+ align_type position;
+ unsigned int width;
+};
+
+/*
+ * An atom is a valid field atom listed below, possibly prefixed with
+ * a "*" to denote deref_tag().
+ *
+ * We parse given format string and sort specifiers, and make a list
+ * of properties that we need to extract out of objects. ref_array_item
+ * structure will hold an array of values extracted that can be
+ * indexed with the "atom number", which is an index into this
+ * array.
+ */
+static struct used_atom {
+ const char *name;
+ cmp_type type;
+ union {
+ char color[COLOR_MAXLEN];
+ struct align align;
+ enum { RR_NORMAL, RR_SHORTEN, RR_TRACK, RR_TRACKSHORT }
+ remote_ref;
+ struct {
+ enum { C_BARE, C_BODY, C_BODY_DEP, C_LINES, C_SIG, C_SUB } option;
+ unsigned int nlines;
+ } contents;
+ enum { O_FULL, O_SHORT } objectname;
+ } u;
+} *used_atom;
+static int used_atom_cnt, need_tagged, need_symref;
+static int need_color_reset_at_eol;
+
+static void color_atom_parser(struct used_atom *atom, const char *color_value)
+{
+ if (!color_value)
+ die(_("expected format: %%(color:<color>)"));
+ if (color_parse(color_value, atom->u.color) < 0)
+ die(_("unrecognized color: %%(color:%s)"), color_value);
+}
+
+static void remote_ref_atom_parser(struct used_atom *atom, const char *arg)
+{
+ if (!arg)
+ atom->u.remote_ref = RR_NORMAL;
+ else if (!strcmp(arg, "short"))
+ atom->u.remote_ref = RR_SHORTEN;
+ else if (!strcmp(arg, "track"))
+ atom->u.remote_ref = RR_TRACK;
+ else if (!strcmp(arg, "trackshort"))
+ atom->u.remote_ref = RR_TRACKSHORT;
+ else
+ die(_("unrecognized format: %%(%s)"), atom->name);
+}
+
+static void body_atom_parser(struct used_atom *atom, const char *arg)
+{
+ if (arg)
+ die(_("%%(body) does not take arguments"));
+ atom->u.contents.option = C_BODY_DEP;
+}
+
+static void subject_atom_parser(struct used_atom *atom, const char *arg)
+{
+ if (arg)
+ die(_("%%(subject) does not take arguments"));
+ atom->u.contents.option = C_SUB;
+}
+
+static void contents_atom_parser(struct used_atom *atom, const char *arg)
+{
+ if (!arg)
+ atom->u.contents.option = C_BARE;
+ else if (!strcmp(arg, "body"))
+ atom->u.contents.option = C_BODY;
+ else if (!strcmp(arg, "signature"))
+ atom->u.contents.option = C_SIG;
+ else if (!strcmp(arg, "subject"))
+ atom->u.contents.option = C_SUB;
+ else if (skip_prefix(arg, "lines=", &arg)) {
+ atom->u.contents.option = C_LINES;
+ if (strtoul_ui(arg, 10, &atom->u.contents.nlines))
+ die(_("positive value expected contents:lines=%s"), arg);
+ } else
+ die(_("unrecognized %%(contents) argument: %s"), arg);
+}
+
+static void objectname_atom_parser(struct used_atom *atom, const char *arg)
+{
+ if (!arg)
+ atom->u.objectname = O_FULL;
+ else if (!strcmp(arg, "short"))
+ atom->u.objectname = O_SHORT;
+ else
+ die(_("unrecognized %%(objectname) argument: %s"), arg);
+}
+
+static align_type parse_align_position(const char *s)
+{
+ if (!strcmp(s, "right"))
+ return ALIGN_RIGHT;
+ else if (!strcmp(s, "middle"))
+ return ALIGN_MIDDLE;
+ else if (!strcmp(s, "left"))
+ return ALIGN_LEFT;
+ return -1;
+}
+
+static void align_atom_parser(struct used_atom *atom, const char *arg)
+{
+ struct align *align = &atom->u.align;
+ struct string_list params = STRING_LIST_INIT_DUP;
+ int i;
+ unsigned int width = ~0U;
+
+ if (!arg)
+ die(_("expected format: %%(align:<width>,<position>)"));
+
+ align->position = ALIGN_LEFT;
+
+ string_list_split(¶ms, arg, ',', -1);
+ for (i = 0; i < params.nr; i++) {
+ const char *s = params.items[i].string;
+ int position;
+
+ if (skip_prefix(s, "position=", &s)) {
+ position = parse_align_position(s);
+ if (position < 0)
+ die(_("unrecognized position:%s"), s);
+ align->position = position;
+ } else if (skip_prefix(s, "width=", &s)) {
+ if (strtoul_ui(s, 10, &width))
+ die(_("unrecognized width:%s"), s);
+ } else if (!strtoul_ui(s, 10, &width))
+ ;
+ else if ((position = parse_align_position(s)) >= 0)
+ align->position = position;
+ else
+ die(_("unrecognized %%(align) argument: %s"), s);
+ }
+
+ if (width == ~0U)
+ die(_("positive width expected with the %%(align) atom"));
+ align->width = width;
+ string_list_clear(¶ms, 0);
+}
+
static struct {
const char *name;
cmp_type cmp_type;
+ void (*parser)(struct used_atom *atom, const char *arg);
} valid_atom[] = {
{ "refname" },
{ "objecttype" },
{ "objectsize", FIELD_ULONG },
- { "objectname" },
+ { "objectname", FIELD_STR, objectname_atom_parser },
{ "tree" },
{ "parent" },
{ "numparent", FIELD_ULONG },
{ "taggerdate", FIELD_TIME },
{ "creator" },
{ "creatordate", FIELD_TIME },
- { "subject" },
- { "body" },
- { "contents" },
- { "upstream" },
- { "push" },
+ { "subject", FIELD_STR, subject_atom_parser },
+ { "body", FIELD_STR, body_atom_parser },
+ { "contents", FIELD_STR, contents_atom_parser },
+ { "upstream", FIELD_STR, remote_ref_atom_parser },
+ { "push", FIELD_STR, remote_ref_atom_parser },
{ "symref" },
{ "flag" },
{ "HEAD" },
- { "color" },
- { "align" },
+ { "color", FIELD_STR, color_atom_parser },
+ { "align", FIELD_STR, align_atom_parser },
{ "end" },
};
#define REF_FORMATTING_STATE_INIT { 0, NULL }
-struct align {
- align_type position;
- unsigned int width;
-};
-
-struct contents {
- unsigned int lines;
- struct object_id oid;
-};
-
struct ref_formatting_stack {
struct ref_formatting_stack *prev;
struct strbuf output;
const char *s;
union {
struct align align;
- struct contents contents;
} u;
void (*handler)(struct atom_value *atomv, struct ref_formatting_state *state);
unsigned long ul; /* used for sorting when not FIELD_STR */
};
-/*
- * An atom is a valid field atom listed above, possibly prefixed with
- * a "*" to denote deref_tag().
- *
- * We parse given format string and sort specifiers, and make a list
- * of properties that we need to extract out of objects. ref_array_item
- * structure will hold an array of values extracted that can be
- * indexed with the "atom number", which is an index into this
- * array.
- */
-static const char **used_atom;
-static cmp_type *used_atom_type;
-static int used_atom_cnt, need_tagged, need_symref;
-static int need_color_reset_at_eol;
-
/*
* Used to parse format string and sort specifiers
*/
int parse_ref_filter_atom(const char *atom, const char *ep)
{
const char *sp;
+ const char *arg;
int i, at;
sp = atom;
if (*sp == '*' && sp < ep)
sp++; /* deref */
if (ep <= sp)
- die("malformed field name: %.*s", (int)(ep-atom), atom);
+ die(_("malformed field name: %.*s"), (int)(ep-atom), atom);
/* Do we have the atom already used elsewhere? */
for (i = 0; i < used_atom_cnt; i++) {
- int len = strlen(used_atom[i]);
- if (len == ep - atom && !memcmp(used_atom[i], atom, len))
+ int len = strlen(used_atom[i].name);
+ if (len == ep - atom && !memcmp(used_atom[i].name, atom, len))
return i;
}
/* Is the atom a valid one? */
for (i = 0; i < ARRAY_SIZE(valid_atom); i++) {
int len = strlen(valid_atom[i].name);
+
/*
* If the atom name has a colon, strip it and everything after
* it off - it specifies the format for this entry, and
* shouldn't be used for checking against the valid_atom
* table.
*/
- const char *formatp = strchr(sp, ':');
- if (!formatp || ep < formatp)
- formatp = ep;
- if (len == formatp - sp && !memcmp(valid_atom[i].name, sp, len))
+ arg = memchr(sp, ':', ep - sp);
+ if (len == (arg ? arg : ep) - sp &&
+ !memcmp(valid_atom[i].name, sp, len))
break;
}
if (ARRAY_SIZE(valid_atom) <= i)
- die("unknown field name: %.*s", (int)(ep-atom), atom);
+ die(_("unknown field name: %.*s"), (int)(ep-atom), atom);
/* Add it in, including the deref prefix */
at = used_atom_cnt;
used_atom_cnt++;
REALLOC_ARRAY(used_atom, used_atom_cnt);
- REALLOC_ARRAY(used_atom_type, used_atom_cnt);
- used_atom[at] = xmemdupz(atom, ep - atom);
- used_atom_type[at] = valid_atom[i].cmp_type;
+ used_atom[at].name = xmemdupz(atom, ep - atom);
+ used_atom[at].type = valid_atom[i].cmp_type;
+ if (arg)
+ arg = used_atom[at].name + (arg - atom) + 1;
+ memset(&used_atom[at].u, 0, sizeof(used_atom[at].u));
+ if (valid_atom[i].parser)
+ valid_atom[i].parser(&used_atom[at], arg);
if (*atom == '*')
need_tagged = 1;
- if (!strcmp(used_atom[at], "symref"))
+ if (!strcmp(used_atom[at].name, "symref"))
need_symref = 1;
return at;
}
pop_stack_element(&state->stack);
}
-static int match_atom_name(const char *name, const char *atom_name, const char **val)
-{
- const char *body;
-
- if (!skip_prefix(name, atom_name, &body))
- return 0; /* doesn't even begin with "atom_name" */
- if (!body[0]) {
- *val = NULL; /* %(atom_name) and no customization */
- return 1;
- }
- if (body[0] != ':')
- return 0; /* "atom_namefoo" is not "atom_name" or "atom_name:..." */
- *val = body + 1; /* "atom_name:val" */
- return 1;
-}
-
/*
* In a format string, find the next occurrence of %(atom).
*/
int at;
if (!ep)
- return error("malformed format string %s", sp);
+ return error(_("malformed format string %s"), sp);
/* sp points at "%(" and ep points at the closing ")" */
at = parse_ref_filter_atom(sp + 2, ep);
cp = ep + 1;
- if (skip_prefix(used_atom[at], "color:", &color))
+ if (skip_prefix(used_atom[at].name, "color:", &color))
need_color_reset_at_eol = !!strcmp(color, "reset");
}
return 0;
}
static int grab_objectname(const char *name, const unsigned char *sha1,
- struct atom_value *v)
+ struct atom_value *v, struct used_atom *atom)
{
- if (!strcmp(name, "objectname")) {
- v->s = xstrdup(sha1_to_hex(sha1));
- return 1;
- }
- if (!strcmp(name, "objectname:short")) {
- v->s = xstrdup(find_unique_abbrev(sha1, DEFAULT_ABBREV));
- return 1;
+ if (starts_with(name, "objectname")) {
+ if (atom->u.objectname == O_SHORT) {
+ v->s = xstrdup(find_unique_abbrev(sha1, DEFAULT_ABBREV));
+ return 1;
+ } else if (atom->u.objectname == O_FULL) {
+ v->s = xstrdup(sha1_to_hex(sha1));
+ return 1;
+ } else
+ die("BUG: unknown %%(objectname) option");
}
return 0;
}
int i;
for (i = 0; i < used_atom_cnt; i++) {
- const char *name = used_atom[i];
+ const char *name = used_atom[i].name;
struct atom_value *v = &val[i];
if (!!deref != (*name == '*'))
continue;
v->s = xstrfmt("%lu", sz);
}
else if (deref)
- grab_objectname(name, obj->oid.hash, v);
+ grab_objectname(name, obj->oid.hash, v, &used_atom[i]);
}
}
struct tag *tag = (struct tag *) obj;
for (i = 0; i < used_atom_cnt; i++) {
- const char *name = used_atom[i];
+ const char *name = used_atom[i].name;
struct atom_value *v = &val[i];
if (!!deref != (*name == '*'))
continue;
struct commit *commit = (struct commit *) obj;
for (i = 0; i < used_atom_cnt; i++) {
- const char *name = used_atom[i];
+ const char *name = used_atom[i].name;
struct atom_value *v = &val[i];
if (!!deref != (*name == '*'))
continue;
const char *wholine = NULL;
for (i = 0; i < used_atom_cnt; i++) {
- const char *name = used_atom[i];
+ const char *name = used_atom[i].name;
struct atom_value *v = &val[i];
if (!!deref != (*name == '*'))
continue;
if (!wholine)
return;
for (i = 0; i < used_atom_cnt; i++) {
- const char *name = used_atom[i];
+ const char *name = used_atom[i].name;
struct atom_value *v = &val[i];
if (!!deref != (*name == '*'))
continue;
unsigned long sublen = 0, bodylen = 0, nonsiglen = 0, siglen = 0;
for (i = 0; i < used_atom_cnt; i++) {
- const char *name = used_atom[i];
+ struct used_atom *atom = &used_atom[i];
+ const char *name = atom->name;
struct atom_value *v = &val[i];
- const char *valp = NULL;
if (!!deref != (*name == '*'))
continue;
if (deref)
name++;
if (strcmp(name, "subject") &&
strcmp(name, "body") &&
- strcmp(name, "contents") &&
- strcmp(name, "contents:subject") &&
- strcmp(name, "contents:body") &&
- strcmp(name, "contents:signature") &&
- !starts_with(name, "contents:lines="))
+ !starts_with(name, "contents"))
continue;
if (!subpos)
find_subpos(buf, sz,
&bodypos, &bodylen, &nonsiglen,
&sigpos, &siglen);
- if (!strcmp(name, "subject"))
- v->s = copy_subject(subpos, sublen);
- else if (!strcmp(name, "contents:subject"))
+ if (atom->u.contents.option == C_SUB)
v->s = copy_subject(subpos, sublen);
- else if (!strcmp(name, "body"))
+ else if (atom->u.contents.option == C_BODY_DEP)
v->s = xmemdupz(bodypos, bodylen);
- else if (!strcmp(name, "contents:body"))
+ else if (atom->u.contents.option == C_BODY)
v->s = xmemdupz(bodypos, nonsiglen);
- else if (!strcmp(name, "contents:signature"))
+ else if (atom->u.contents.option == C_SIG)
v->s = xmemdupz(sigpos, siglen);
- else if (!strcmp(name, "contents"))
- v->s = xstrdup(subpos);
- else if (skip_prefix(name, "contents:lines=", &valp)) {
+ else if (atom->u.contents.option == C_LINES) {
struct strbuf s = STRBUF_INIT;
const char *contents_end = bodylen + bodypos - siglen;
- if (strtoul_ui(valp, 10, &v->u.contents.lines))
- die(_("positive value expected contents:lines=%s"), valp);
/* Size is the length of the message after removing the signature */
- append_lines(&s, subpos, contents_end - subpos, v->u.contents.lines);
+ append_lines(&s, subpos, contents_end - subpos, atom->u.contents.nlines);
v->s = strbuf_detach(&s, NULL);
- }
+ } else if (atom->u.contents.option == C_BARE)
+ v->s = xstrdup(subpos);
}
}
const char *start = refname;
if (nr < 1 || *end != '\0')
- die(":strip= requires a positive integer argument");
+ die(_(":strip= requires a positive integer argument"));
while (remaining) {
switch (*start++) {
case '\0':
- die("ref '%s' does not have %ld components to :strip",
+ die(_("ref '%s' does not have %ld components to :strip"),
refname, nr);
case '/':
remaining--;
return start;
}
+static void fill_remote_ref_details(struct used_atom *atom, const char *refname,
+ struct branch *branch, const char **s)
+{
+ int num_ours, num_theirs;
+ if (atom->u.remote_ref == RR_SHORTEN)
+ *s = shorten_unambiguous_ref(refname, warn_ambiguous_refs);
+ else if (atom->u.remote_ref == RR_TRACK) {
+ if (stat_tracking_info(branch, &num_ours,
+ &num_theirs, NULL))
+ return;
+
+ if (!num_ours && !num_theirs)
+ *s = "";
+ else if (!num_ours)
+ *s = xstrfmt("[behind %d]", num_theirs);
+ else if (!num_theirs)
+ *s = xstrfmt("[ahead %d]", num_ours);
+ else
+ *s = xstrfmt("[ahead %d, behind %d]",
+ num_ours, num_theirs);
+ } else if (atom->u.remote_ref == RR_TRACKSHORT) {
+ if (stat_tracking_info(branch, &num_ours,
+ &num_theirs, NULL))
+ return;
+
+ if (!num_ours && !num_theirs)
+ *s = "=";
+ else if (!num_ours)
+ *s = "<";
+ else if (!num_theirs)
+ *s = ">";
+ else
+ *s = "<>";
+ } else /* RR_NORMAL */
+ *s = refname;
+}
+
/*
* Parse the object referred by ref, and grab needed value.
*/
/* Fill in specials first */
for (i = 0; i < used_atom_cnt; i++) {
- const char *name = used_atom[i];
+ struct used_atom *atom = &used_atom[i];
+ const char *name = used_atom[i].name;
struct atom_value *v = &ref->value[i];
int deref = 0;
const char *refname;
const char *formatp;
- const char *valp;
struct branch *branch = NULL;
v->handler = append_atom;
branch = branch_get(branch_name);
refname = branch_get_upstream(branch, NULL);
- if (!refname)
- continue;
+ if (refname)
+ fill_remote_ref_details(atom, refname, branch, &v->s);
+ continue;
} else if (starts_with(name, "push")) {
const char *branch_name;
if (!skip_prefix(ref->refname, "refs/heads/",
refname = branch_get_push(branch, NULL);
if (!refname)
continue;
- } else if (match_atom_name(name, "color", &valp)) {
- char color[COLOR_MAXLEN] = "";
-
- if (!valp)
- die(_("expected format: %%(color:<color>)"));
- if (color_parse(valp, color) < 0)
- die(_("unable to parse format"));
- v->s = xstrdup(color);
+ fill_remote_ref_details(atom, refname, branch, &v->s);
+ continue;
+ } else if (starts_with(name, "color:")) {
+ v->s = atom->u.color;
continue;
} else if (!strcmp(name, "flag")) {
char buf[256], *cp = buf;
v->s = xstrdup(buf + 1);
}
continue;
- } else if (!deref && grab_objectname(name, ref->objectname, v)) {
+ } else if (!deref && grab_objectname(name, ref->objectname, v, atom)) {
continue;
} else if (!strcmp(name, "HEAD")) {
const char *head;
else
v->s = " ";
continue;
- } else if (match_atom_name(name, "align", &valp)) {
- struct align *align = &v->u.align;
- struct strbuf **s, **to_free;
- int width = -1;
-
- if (!valp)
- die(_("expected format: %%(align:<width>,<position>)"));
-
- /*
- * TODO: Implement a function similar to strbuf_split_str()
- * which would omit the separator from the end of each value.
- */
- s = to_free = strbuf_split_str(valp, ',', 0);
-
- align->position = ALIGN_LEFT;
-
- while (*s) {
- /* Strip trailing comma */
- if (s[1])
- strbuf_setlen(s[0], s[0]->len - 1);
- if (!strtoul_ui(s[0]->buf, 10, (unsigned int *)&width))
- ;
- else if (!strcmp(s[0]->buf, "left"))
- align->position = ALIGN_LEFT;
- else if (!strcmp(s[0]->buf, "right"))
- align->position = ALIGN_RIGHT;
- else if (!strcmp(s[0]->buf, "middle"))
- align->position = ALIGN_MIDDLE;
- else
- die(_("improper format entered align:%s"), s[0]->buf);
- s++;
- }
-
- if (width < 0)
- die(_("positive width expected with the %%(align) atom"));
- align->width = width;
- strbuf_list_free(to_free);
+ } else if (starts_with(name, "align")) {
+ v->u.align = atom->u.align;
v->handler = align_atom_handler;
continue;
} else if (!strcmp(name, "end")) {
formatp = strchr(name, ':');
if (formatp) {
- int num_ours, num_theirs;
const char *arg;
formatp++;
warn_ambiguous_refs);
else if (skip_prefix(formatp, "strip=", &arg))
refname = strip_ref_components(refname, arg);
- else if (!strcmp(formatp, "track") &&
- (starts_with(name, "upstream") ||
- starts_with(name, "push"))) {
-
- if (stat_tracking_info(branch, &num_ours,
- &num_theirs, NULL))
- continue;
-
- if (!num_ours && !num_theirs)
- v->s = "";
- else if (!num_ours)
- v->s = xstrfmt("[behind %d]", num_theirs);
- else if (!num_theirs)
- v->s = xstrfmt("[ahead %d]", num_ours);
- else
- v->s = xstrfmt("[ahead %d, behind %d]",
- num_ours, num_theirs);
- continue;
- } else if (!strcmp(formatp, "trackshort") &&
- (starts_with(name, "upstream") ||
- starts_with(name, "push"))) {
- assert(branch);
-
- if (stat_tracking_info(branch, &num_ours,
- &num_theirs, NULL))
- continue;
-
- if (!num_ours && !num_theirs)
- v->s = "=";
- else if (!num_ours)
- v->s = "<";
- else if (!num_theirs)
- v->s = ">";
- else
- v->s = "<>";
- continue;
- } else
- die("unknown %.*s format %s",
+ else
+ die(_("unknown %.*s format %s"),
(int)(formatp - name), name, formatp);
}
need_obj:
buf = get_obj(ref->objectname, &obj, &size, &eaten);
if (!buf)
- die("missing object %s for %s",
+ die(_("missing object %s for %s"),
sha1_to_hex(ref->objectname), ref->refname);
if (!obj)
- die("parse_object_buffer failed on %s for %s",
+ die(_("parse_object_buffer failed on %s for %s"),
sha1_to_hex(ref->objectname), ref->refname);
grab_values(ref->value, 0, obj, buf, size);
*/
buf = get_obj(tagged, &obj, &size, &eaten);
if (!buf)
- die("missing object %s for %s",
+ die(_("missing object %s for %s"),
sha1_to_hex(tagged), ref->refname);
if (!obj)
- die("parse_object_buffer failed on %s for %s",
+ die(_("parse_object_buffer failed on %s for %s"),
sha1_to_hex(tagged), ref->refname);
grab_values(ref->value, 1, obj, buf, size);
if (!eaten)
const unsigned char *objectname,
int flag)
{
- size_t len = strlen(refname);
- struct ref_array_item *ref = xcalloc(1, sizeof(struct ref_array_item) + len + 1);
- memcpy(ref->refname, refname, len);
- ref->refname[len] = '\0';
+ struct ref_array_item *ref;
+ FLEX_ALLOC_STR(ref, refname, refname);
hashcpy(ref->objectname, objectname);
ref->flag = flag;
unsigned int kind;
if (flag & REF_BAD_NAME) {
- warning("ignoring ref with broken name %s", refname);
+ warning(_("ignoring ref with broken name %s"), refname);
return 0;
}
if (flag & REF_ISBROKEN) {
- warning("ignoring broken ref %s", refname);
+ warning(_("ignoring broken ref %s"), refname);
return 0;
}
{
struct atom_value *va, *vb;
int cmp;
- cmp_type cmp_type = used_atom_type[s->atom];
+ cmp_type cmp_type = used_atom[s->atom].type;
get_ref_atom_value(a, s->atom, &va);
get_ref_atom_value(b, s->atom, &vb);
char *buf;
int result;
- buf = xmalloc(strlen(refname) + 1);
+ buf = xmallocz(strlen(refname));
/*
* Does the refname try to escape refs/?
* For example: refs/foo/../bar is safe but refs/foo/../../bar
static struct ref_update *add_update(struct ref_transaction *transaction,
const char *refname)
{
- size_t len = strlen(refname) + 1;
- struct ref_update *update = xcalloc(1, sizeof(*update) + len);
-
- memcpy((char *)update->refname, refname, len); /* includes NUL */
+ struct ref_update *update;
+ FLEX_ALLOC_STR(update, refname, refname);
ALLOC_GROW(transaction->updates, transaction->nr + 1, transaction->alloc);
transaction->updates[transaction->nr++] = update;
return update;
/* -2 for strlen("%.*s") - strlen("%s"); +1 for NUL */
total_len += strlen(ref_rev_parse_rules[nr_rules]) - 2 + 1;
- scanf_fmts = xmalloc(nr_rules * sizeof(char *) + total_len);
+ scanf_fmts = xmalloc(st_add(st_mult(nr_rules, sizeof(char *)), total_len));
offset = 0;
for (i = 0; i < nr_rules; i++) {
* If this succeeds, the ref updates will have taken place and
* the transaction cannot be rolled back.
*
+ * - Instead of `ref_transaction_commit`, use
+ * `initial_ref_transaction_commit()` if the ref database is known
+ * to be empty (e.g. during clone). This is likely to be much
+ * faster.
+ *
* - At any time call `ref_transaction_free()` to discard the
* transaction and free associated resources. In particular,
* this rolls back the transaction if it has not been
*
* The message is appended to err without first clearing err.
* err will not be '\n' terminated.
+ *
+ * Caveats
+ * -------
+ *
+ * Note that no locks are taken, and no refs are read, until
+ * `ref_transaction_commit` is called. So `ref_transaction_verify`
+ * won't report a verification failure until the commit is attempted.
*/
struct ref_transaction;
/** rename ref, return 0 on success **/
extern int rename_ref(const char *oldref, const char *newref, const char *logmsg);
-extern int create_symref(const char *ref, const char *refs_heads_master, const char *logmsg);
+extern int create_symref(const char *refname, const char *target, const char *logmsg);
enum action_on_err {
UPDATE_REFS_MSG_ON_ERR,
const unsigned char *sha1, int flag,
int check_name)
{
- int len;
struct ref_entry *ref;
if (check_name &&
check_refname_format(refname, REFNAME_ALLOW_ONELEVEL))
die("Reference has invalid format: '%s'", refname);
- len = strlen(refname) + 1;
- ref = xmalloc(sizeof(struct ref_entry) + len);
+ FLEX_ALLOC_STR(ref, name, refname);
hashcpy(ref->u.value.oid.hash, sha1);
oidclr(&ref->u.value.peeled);
- memcpy(ref->name, refname, len);
ref->flag = flag;
return ref;
}
int incomplete)
{
struct ref_entry *direntry;
- direntry = xcalloc(1, sizeof(struct ref_entry) + len + 1);
- memcpy(direntry->name, dirname, len);
- direntry->name[len] = '\0';
+ FLEX_ALLOC_MEM(direntry, name, dirname, len);
direntry->u.subdir.ref_cache = ref_cache;
direntry->flag = REF_DIR | (incomplete ? REF_INCOMPLETE : 0);
return direntry;
*/
static struct ref_cache *create_ref_cache(const char *submodule)
{
- int len;
struct ref_cache *refs;
if (!submodule)
submodule = "";
- len = strlen(submodule) + 1;
- refs = xcalloc(1, sizeof(struct ref_cache) + len);
- memcpy(refs->name, submodule, len);
+ FLEX_ALLOC_STR(refs, name, submodule);
refs->next = submodule_ref_caches;
submodule_ref_caches = refs;
return refs;
if (read_ref_full(lock->ref_name,
mustexist ? RESOLVE_REF_READING : 0,
lock->old_oid.hash, NULL)) {
- int save_errno = errno;
- strbuf_addf(err, "can't verify ref %s", lock->ref_name);
- errno = save_errno;
- return -1;
+ if (old_sha1) {
+ int save_errno = errno;
+ strbuf_addf(err, "can't verify ref %s", lock->ref_name);
+ errno = save_errno;
+ return -1;
+ } else {
+ hashclr(lock->old_oid.hash);
+ return 0;
+ }
}
- if (hashcmp(lock->old_oid.hash, old_sha1)) {
+ if (old_sha1 && hashcmp(lock->old_oid.hash, old_sha1)) {
strbuf_addf(err, "ref %s is at %s but expected %s",
lock->ref_name,
sha1_to_hex(lock->old_oid.hash),
const char *orig_refname = refname;
struct ref_lock *lock;
int last_errno = 0;
- int type, lflags;
+ int type;
+ int lflags = 0;
int mustexist = (old_sha1 && !is_null_sha1(old_sha1));
int resolve_flags = 0;
int attempts_remaining = 3;
if (mustexist)
resolve_flags |= RESOLVE_REF_READING;
- if (flags & REF_DELETING) {
+ if (flags & REF_DELETING)
resolve_flags |= RESOLVE_REF_ALLOW_BAD_NAME;
- if (flags & REF_NODEREF)
- resolve_flags |= RESOLVE_REF_NO_RECURSE;
+ if (flags & REF_NODEREF) {
+ resolve_flags |= RESOLVE_REF_NO_RECURSE;
+ lflags |= LOCK_NO_DEREF;
}
refname = resolve_ref_unsafe(refname, resolve_flags,
goto error_return;
}
+
+ if (flags & REF_NODEREF)
+ refname = orig_refname;
+
/*
* If the ref did not exist and we are creating it, make sure
* there is no existing packed ref whose name begins with our
lock->lk = xcalloc(1, sizeof(struct lock_file));
- lflags = 0;
- if (flags & REF_NODEREF) {
- refname = orig_refname;
- lflags |= LOCK_NO_DEREF;
- }
lock->ref_name = xstrdup(refname);
lock->orig_ref_name = xstrdup(orig_refname);
strbuf_git_path(&ref_file, "%s", refname);
goto error_return;
}
}
- if (old_sha1 && verify_lock(lock, old_sha1, mustexist, err)) {
+ if (verify_lock(lock, old_sha1, mustexist, err)) {
last_errno = errno;
goto error_return;
}
/* Schedule the loose reference for pruning if requested. */
if ((cb->flags & PACK_REFS_PRUNE)) {
- int namelen = strlen(entry->name) + 1;
- struct ref_to_prune *n = xcalloc(1, sizeof(*n) + namelen);
+ struct ref_to_prune *n;
+ FLEX_ALLOC_STR(n, name, entry->name);
hashcpy(n->sha1, entry->u.value.oid.hash);
- memcpy(n->name, entry->name, namelen); /* includes NUL */
n->next = cb->ref_to_prune;
cb->ref_to_prune = n;
}
return 0;
}
-int create_symref(const char *ref_target, const char *refs_heads_master,
- const char *logmsg)
+static int create_ref_symlink(struct ref_lock *lock, const char *target)
{
- char *lockpath = NULL;
- char ref[1000];
- int fd, len, written;
- char *git_HEAD = git_pathdup("%s", ref_target);
- unsigned char old_sha1[20], new_sha1[20];
- struct strbuf err = STRBUF_INIT;
-
- if (logmsg && read_ref(ref_target, old_sha1))
- hashclr(old_sha1);
-
- if (safe_create_leading_directories(git_HEAD) < 0)
- return error("unable to create directory for %s", git_HEAD);
-
+ int ret = -1;
#ifndef NO_SYMLINK_HEAD
- if (prefer_symlink_refs) {
- unlink(git_HEAD);
- if (!symlink(refs_heads_master, git_HEAD))
- goto done;
+ char *ref_path = get_locked_file_path(lock->lk);
+ unlink(ref_path);
+ ret = symlink(target, ref_path);
+ free(ref_path);
+
+ if (ret)
fprintf(stderr, "no symlink - falling back to symbolic ref\n");
- }
#endif
+ return ret;
+}
- len = snprintf(ref, sizeof(ref), "ref: %s\n", refs_heads_master);
- if (sizeof(ref) <= len) {
- error("refname too long: %s", refs_heads_master);
- goto error_free_return;
- }
- lockpath = mkpathdup("%s.lock", git_HEAD);
- fd = open(lockpath, O_CREAT | O_EXCL | O_WRONLY, 0666);
- if (fd < 0) {
- error("Unable to open %s for writing", lockpath);
- goto error_free_return;
- }
- written = write_in_full(fd, ref, len);
- if (close(fd) != 0 || written != len) {
- error("Unable to write to %s", lockpath);
- goto error_unlink_return;
- }
- if (rename(lockpath, git_HEAD) < 0) {
- error("Unable to create %s", git_HEAD);
- goto error_unlink_return;
- }
- if (adjust_shared_perm(git_HEAD)) {
- error("Unable to fix permissions on %s", lockpath);
- error_unlink_return:
- unlink_or_warn(lockpath);
- error_free_return:
- free(lockpath);
- free(git_HEAD);
- return -1;
+static void update_symref_reflog(struct ref_lock *lock, const char *refname,
+ const char *target, const char *logmsg)
+{
+ struct strbuf err = STRBUF_INIT;
+ unsigned char new_sha1[20];
+ if (logmsg && !read_ref(target, new_sha1) &&
+ log_ref_write(refname, lock->old_oid.hash, new_sha1, logmsg, 0, &err)) {
+ error("%s", err.buf);
+ strbuf_release(&err);
}
- free(lockpath);
+}
-#ifndef NO_SYMLINK_HEAD
- done:
-#endif
- if (logmsg && !read_ref(refs_heads_master, new_sha1) &&
- log_ref_write(ref_target, old_sha1, new_sha1, logmsg, 0, &err)) {
+static int create_symref_locked(struct ref_lock *lock, const char *refname,
+ const char *target, const char *logmsg)
+{
+ if (prefer_symlink_refs && !create_ref_symlink(lock, target)) {
+ update_symref_reflog(lock, refname, target, logmsg);
+ return 0;
+ }
+
+ if (!fdopen_lock_file(lock->lk, "w"))
+ return error("unable to fdopen %s: %s",
+ lock->lk->tempfile.filename.buf, strerror(errno));
+
+ update_symref_reflog(lock, refname, target, logmsg);
+
+ /* no error check; commit_ref will check ferror */
+ fprintf(lock->lk->tempfile.fp, "ref: %s\n", target);
+ if (commit_ref(lock) < 0)
+ return error("unable to write symref for %s: %s", refname,
+ strerror(errno));
+ return 0;
+}
+
+int create_symref(const char *refname, const char *target, const char *logmsg)
+{
+ struct strbuf err = STRBUF_INIT;
+ struct ref_lock *lock;
+ int ret;
+
+ lock = lock_ref_sha1_basic(refname, NULL, NULL, NULL, REF_NODEREF, NULL,
+ &err);
+ if (!lock) {
error("%s", err.buf);
strbuf_release(&err);
+ return -1;
}
- free(git_HEAD);
- return 0;
+ ret = create_symref_locked(lock, refname, target, logmsg);
+ unlock_ref(lock);
+ return ret;
}
int reflog_exists(const char *refname)
else
return -1;
return 0;
+
+#if LIBCURL_VERSION_NUM >= 0x070a08
+ } else if (!strcmp(name, "family")) {
+ if (!strcmp(value, "ipv4"))
+ git_curl_ipresolve = CURL_IPRESOLVE_V4;
+ else if (!strcmp(value, "ipv6"))
+ git_curl_ipresolve = CURL_IPRESOLVE_V6;
+ else if (!strcmp(value, "all"))
+ git_curl_ipresolve = CURL_IPRESOLVE_WHATEVER;
+ else
+ return -1;
+ return 0;
+#endif /* LIBCURL_VERSION_NUM >= 0x070a08 */
} else {
return 1 /* unsupported */;
}
err = run_one_slot(slot, results);
if (err != HTTP_OK && err != HTTP_REAUTH) {
- error("RPC failed; result=%d, HTTP code = %ld",
- results->curl_result, results->http_code);
+ struct strbuf msg = STRBUF_INIT;
+ if (results->http_code && results->http_code != 200)
+ strbuf_addf(&msg, "HTTP %ld", results->http_code);
+ if (results->curl_result != CURLE_OK) {
+ if (msg.len)
+ strbuf_addch(&msg, ' ');
+ strbuf_addf(&msg, "curl %d", results->curl_result);
+ if (curl_errorstr[0]) {
+ strbuf_addch(&msg, ' ');
+ strbuf_addstr(&msg, curl_errorstr);
+ }
+ }
+ error("RPC failed; %s", msg.buf);
+ strbuf_release(&msg);
}
return err;
static int fetch_dumb(int nr_heads, struct ref **to_fetch)
{
struct walker *walker;
- char **targets = xmalloc(nr_heads * sizeof(char*));
+ char **targets;
int ret, i;
+ ALLOC_ARRAY(targets, nr_heads);
if (options.depth)
die("dumb http transport does not support --depth");
for (i = 0; i < nr_heads; i++)
die("http transport does not support %s", buf->buf);
strbuf_reset(buf);
- if (strbuf_getline(buf, stdin, '\n') == EOF)
+ if (strbuf_getline_lf(buf, stdin) == EOF)
return;
if (!*buf->buf)
break;
static int push_dav(int nr_spec, char **specs)
{
- const char **argv = xmalloc((10 + nr_spec) * sizeof(char*));
- int argc = 0, i;
+ struct child_process child = CHILD_PROCESS_INIT;
+ size_t i;
- argv[argc++] = "http-push";
- argv[argc++] = "--helper-status";
+ child.git_cmd = 1;
+ argv_array_push(&child.args, "http-push");
+ argv_array_push(&child.args, "--helper-status");
if (options.dry_run)
- argv[argc++] = "--dry-run";
+ argv_array_push(&child.args, "--dry-run");
if (options.verbosity > 1)
- argv[argc++] = "--verbose";
- argv[argc++] = url.buf;
+ argv_array_push(&child.args, "--verbose");
+ argv_array_push(&child.args, url.buf);
for (i = 0; i < nr_spec; i++)
- argv[argc++] = specs[i];
- argv[argc++] = NULL;
+ argv_array_push(&child.args, specs[i]);
- if (run_command_v_opt(argv, RUN_GIT_CMD))
- die("git-%s failed", argv[0]);
- free(argv);
+ if (run_command(&child))
+ die("git-http-push failed");
return 0;
}
die("http transport does not support %s", buf->buf);
strbuf_reset(buf);
- if (strbuf_getline(buf, stdin, '\n') == EOF)
+ if (strbuf_getline_lf(buf, stdin) == EOF)
goto free_specs;
if (!*buf->buf)
break;
do {
const char *arg;
- if (strbuf_getline(&buf, stdin, '\n') == EOF) {
+ if (strbuf_getline_lf(&buf, stdin) == EOF) {
if (ferror(stdin))
error("remote-curl: error reading command stream from git");
return 1;
fclose(marksfile);
} else {
strbuf_addf(&sb, ":%d ", latestrev);
- while (strbuf_getline(&line, marksfile, '\n') != EOF) {
+ while (strbuf_getline_lf(&line, marksfile) != EOF) {
if (starts_with(line.buf, sb.buf)) {
found++;
break;
marksfilename = marksfilename_sb.buf;
while (1) {
- if (strbuf_getline(&buf, stdin, '\n') == EOF) {
+ if (strbuf_getline_lf(&buf, stdin) == EOF) {
if (ferror(stdin))
die("Error reading command stream");
else
if (!f)
return;
remote->origin = REMOTE_REMOTES;
- while (strbuf_getline(&buf, f, '\n') != EOF) {
+ while (strbuf_getline(&buf, f) != EOF) {
const char *v;
strbuf_rtrim(&buf);
if (!f)
return;
- strbuf_getline(&buf, f, '\n');
+ strbuf_getline_lf(&buf, f);
fclose(f);
strbuf_trim(&buf);
if (!buf.len) {
static int handle_config(const char *key, const char *value, void *cb)
{
const char *name;
+ int namelen;
const char *subkey;
struct remote *remote;
struct branch *branch;
- if (starts_with(key, "branch.")) {
- name = key + 7;
- subkey = strrchr(name, '.');
- if (!subkey)
+ if (parse_config_key(key, "branch", &name, &namelen, &subkey) >= 0) {
+ if (!name)
return 0;
- branch = make_branch(name, subkey - name);
- if (!strcmp(subkey, ".remote")) {
+ branch = make_branch(name, namelen);
+ if (!strcmp(subkey, "remote")) {
return git_config_string(&branch->remote_name, key, value);
- } else if (!strcmp(subkey, ".pushremote")) {
+ } else if (!strcmp(subkey, "pushremote")) {
return git_config_string(&branch->pushremote_name, key, value);
- } else if (!strcmp(subkey, ".merge")) {
+ } else if (!strcmp(subkey, "merge")) {
if (!value)
return config_error_nonbool(key);
add_merge(branch, xstrdup(value));
}
return 0;
}
- if (starts_with(key, "url.")) {
+ if (parse_config_key(key, "url", &name, &namelen, &subkey) >= 0) {
struct rewrite *rewrite;
- name = key + 4;
- subkey = strrchr(name, '.');
- if (!subkey)
+ if (!name)
return 0;
- if (!strcmp(subkey, ".insteadof")) {
- rewrite = make_rewrite(&rewrites, name, subkey - name);
+ if (!strcmp(subkey, "insteadof")) {
+ rewrite = make_rewrite(&rewrites, name, namelen);
if (!value)
return config_error_nonbool(key);
add_instead_of(rewrite, xstrdup(value));
- } else if (!strcmp(subkey, ".pushinsteadof")) {
- rewrite = make_rewrite(&rewrites_push, name, subkey - name);
+ } else if (!strcmp(subkey, "pushinsteadof")) {
+ rewrite = make_rewrite(&rewrites_push, name, namelen);
if (!value)
return config_error_nonbool(key);
add_instead_of(rewrite, xstrdup(value));
}
}
- if (!starts_with(key, "remote."))
+ if (parse_config_key(key, "remote", &name, &namelen, &subkey) < 0)
return 0;
- name = key + 7;
/* Handle remote.* variables */
- if (!strcmp(name, "pushdefault"))
+ if (!name && !strcmp(subkey, "pushdefault"))
return git_config_string(&pushremote_name, key, value);
+ if (!name)
+ return 0;
/* Handle remote.<name>.* variables */
if (*name == '/') {
warning("Config remote shorthand cannot begin with '/': %s",
name);
return 0;
}
- subkey = strrchr(name, '.');
- if (!subkey)
- return 0;
- remote = make_remote(name, subkey - name);
+ remote = make_remote(name, namelen);
remote->origin = REMOTE_CONFIG;
- if (!strcmp(subkey, ".mirror"))
+ if (!strcmp(subkey, "mirror"))
remote->mirror = git_config_bool(key, value);
- else if (!strcmp(subkey, ".skipdefaultupdate"))
+ else if (!strcmp(subkey, "skipdefaultupdate"))
remote->skip_default_update = git_config_bool(key, value);
- else if (!strcmp(subkey, ".skipfetchall"))
+ else if (!strcmp(subkey, "skipfetchall"))
remote->skip_default_update = git_config_bool(key, value);
- else if (!strcmp(subkey, ".prune"))
+ else if (!strcmp(subkey, "prune"))
remote->prune = git_config_bool(key, value);
- else if (!strcmp(subkey, ".url")) {
+ else if (!strcmp(subkey, "url")) {
const char *v;
if (git_config_string(&v, key, value))
return -1;
add_url(remote, v);
- } else if (!strcmp(subkey, ".pushurl")) {
+ } else if (!strcmp(subkey, "pushurl")) {
const char *v;
if (git_config_string(&v, key, value))
return -1;
add_pushurl(remote, v);
- } else if (!strcmp(subkey, ".push")) {
+ } else if (!strcmp(subkey, "push")) {
const char *v;
if (git_config_string(&v, key, value))
return -1;
add_push_refspec(remote, v);
- } else if (!strcmp(subkey, ".fetch")) {
+ } else if (!strcmp(subkey, "fetch")) {
const char *v;
if (git_config_string(&v, key, value))
return -1;
add_fetch_refspec(remote, v);
- } else if (!strcmp(subkey, ".receivepack")) {
+ } else if (!strcmp(subkey, "receivepack")) {
const char *v;
if (git_config_string(&v, key, value))
return -1;
remote->receivepack = v;
else
error("more than one receivepack given, using the first");
- } else if (!strcmp(subkey, ".uploadpack")) {
+ } else if (!strcmp(subkey, "uploadpack")) {
const char *v;
if (git_config_string(&v, key, value))
return -1;
remote->uploadpack = v;
else
error("more than one uploadpack given, using the first");
- } else if (!strcmp(subkey, ".tagopt")) {
+ } else if (!strcmp(subkey, "tagopt")) {
if (!strcmp(value, "--no-tags"))
remote->fetch_tags = -1;
else if (!strcmp(value, "--tags"))
remote->fetch_tags = 2;
- } else if (!strcmp(subkey, ".proxy")) {
+ } else if (!strcmp(subkey, "proxy")) {
return git_config_string((const char **)&remote->http_proxy,
key, value);
- } else if (!strcmp(subkey, ".vcs")) {
+ } else if (!strcmp(subkey, "proxyauthmethod")) {
+ return git_config_string((const char **)&remote->http_proxy_authmethod,
+ key, value);
+ } else if (!strcmp(subkey, "vcs")) {
return git_config_string(&remote->foreign_vcs, key, value);
}
return 0;
return remote_get_1(name, pushremote_for_branch);
}
-int remote_is_configured(const char *name)
+int remote_is_configured(struct remote *remote)
{
- struct remotes_hash_key lookup;
- struct hashmap_entry lookup_entry;
- read_config();
-
- init_remotes_hash();
- lookup.str = name;
- lookup.len = strlen(name);
- hashmap_entry_init(&lookup_entry, memhash(name, lookup.len));
-
- return hashmap_get(&remotes_hash, &lookup_entry, &lookup) != NULL;
+ return remote && remote->origin;
}
int for_each_remote(each_remote_fn fn, void *priv)
const char *name)
{
size_t len = strlen(name);
- struct ref *ref = xcalloc(1, sizeof(struct ref) + prefixlen + len + 1);
+ struct ref *ref = xcalloc(1, st_add4(sizeof(*ref), prefixlen, len, 1));
memcpy(ref->name, prefix, prefixlen);
memcpy(ref->name + prefixlen, name, len);
return ref;
size_t len;
if (!ref)
return NULL;
- len = strlen(ref->name);
- cpy = xmalloc(sizeof(struct ref) + len + 1);
- memcpy(cpy, ref, sizeof(struct ref) + len + 1);
+ len = st_add3(sizeof(struct ref), strlen(ref->name), 1);
+ cpy = xmalloc(len);
+ memcpy(cpy, ref, len);
cpy->next = NULL;
cpy->symref = xstrdup_or_null(ref->symref);
cpy->remote_status = xstrdup_or_null(ref->remote_status);
{
struct ref ***local_tail = cb_data;
struct ref *ref;
- int len;
/* we already know it starts with refs/ to get here */
if (check_refname_format(refname + 5, 0))
return 0;
- len = strlen(refname) + 1;
- ref = xcalloc(1, sizeof(*ref) + len);
+ ref = alloc_ref(refname);
oidcpy(&ref->new_oid, oid);
- memcpy(ref->name, refname, len);
**local_tail = ref;
*local_tail = &ref->next;
return 0;
#include "hashmap.h"
enum {
+ REMOTE_UNCONFIGURED = 0,
REMOTE_CONFIG,
REMOTE_REMOTES,
REMOTE_BRANCHES
* for curl remotes only
*/
char *http_proxy;
+ char *http_proxy_authmethod;
};
struct remote *remote_get(const char *name);
struct remote *pushremote_get(const char *name);
-int remote_is_configured(const char *name);
+int remote_is_configured(struct remote *remote);
typedef int each_remote_fn(struct remote *remote, void *priv);
int for_each_remote(each_remote_fn fn, void *priv);
static struct rerere_id *new_rerere_id_hex(char *hex)
{
struct rerere_id *id = xmalloc(sizeof(*id));
- strcpy(id->hex, hex);
+ xsnprintf(id->hex, sizeof(id->hex), "%s", hex);
return id;
}
static struct rerere_id *dirname_to_id(const char *name)
{
static struct rerere_id id;
- strcpy(id.hex, name);
+ xsnprintf(id.hex, sizeof(id.hex), "%s", name);
return &id;
}
static const char *term_bad;
static const char *term_good;
-char *path_name(const struct name_path *path, const char *name)
-{
- const struct name_path *p;
- char *n, *m;
- int nlen = strlen(name);
- int len = nlen + 1;
-
- for (p = path; p; p = p->up) {
- if (p->elem_len)
- len += p->elem_len + 1;
- }
- n = xmalloc(len);
- m = n + len - (nlen + 1);
- memcpy(m, name, nlen + 1);
- for (p = path; p; p = p->up) {
- if (p->elem_len) {
- m -= p->elem_len + 1;
- memcpy(m, p->elem, p->elem_len);
- m[p->elem_len] = '/';
- }
- }
- return n;
-}
-
-static int show_path_component_truncated(FILE *out, const char *name, int len)
-{
- int cnt;
- for (cnt = 0; cnt < len; cnt++) {
- int ch = name[cnt];
- if (!ch || ch == '\n')
- return -1;
- fputc(ch, out);
- }
- return len;
-}
-
-static int show_path_truncated(FILE *out, const struct name_path *path)
-{
- int emitted, ours;
-
- if (!path)
- return 0;
- emitted = show_path_truncated(out, path->up);
- if (emitted < 0)
- return emitted;
- if (emitted)
- fputc('/', out);
- ours = show_path_component_truncated(out, path->elem, path->elem_len);
- if (ours < 0)
- return ours;
- return ours || emitted;
-}
-
-void show_object_with_name(FILE *out, struct object *obj,
- const struct name_path *path, const char *component)
+void show_object_with_name(FILE *out, struct object *obj, const char *name)
{
- struct name_path leaf;
- leaf.up = (struct name_path *)path;
- leaf.elem = component;
- leaf.elem_len = strlen(component);
+ const char *p;
fprintf(out, "%s ", oid_to_hex(&obj->oid));
- show_path_truncated(out, &leaf);
+ for (p = name; *p && *p != '\n'; p++)
+ fputc(*p, out);
fputc('\n', out);
}
static struct treesame_state *initialise_treesame(struct rev_info *revs, struct commit *commit)
{
unsigned n = commit_list_count(commit->parents);
- struct treesame_state *st = xcalloc(1, sizeof(*st) + n);
+ struct treesame_state *st = xcalloc(1, st_add(sizeof(*st), n));
st->nparents = n;
add_decoration(&revs->treesame, &commit->object, st);
return st;
static void read_pathspec_from_stdin(struct rev_info *revs, struct strbuf *sb,
struct cmdline_pathspec *prune)
{
- while (strbuf_getwholeline(sb, stdin, '\n') != EOF) {
- int len = sb->len;
- if (len && sb->buf[len - 1] == '\n')
- sb->buf[--len] = '\0';
+ while (strbuf_getline(sb, stdin) != EOF) {
ALLOC_GROW(prune->path, prune->nr + 1, prune->alloc);
prune->path[prune->nr++] = xstrdup(sb->buf);
}
warn_on_object_refname_ambiguity = 0;
strbuf_init(&sb, 1000);
- while (strbuf_getwholeline(&sb, stdin, '\n') != EOF) {
+ while (strbuf_getline(&sb, stdin) != EOF) {
int len = sb.len;
- if (len && sb.buf[len - 1] == '\n')
- sb.buf[--len] = '\0';
if (!len)
break;
if (sb.buf[0] == '-') {
extern void mark_parents_uninteresting(struct commit *commit);
extern void mark_tree_uninteresting(struct tree *tree);
-struct name_path {
- struct name_path *up;
- int elem_len;
- const char *elem;
-};
-
-char *path_name(const struct name_path *path, const char *name);
+char *path_name(struct strbuf *path, const char *name);
-extern void show_object_with_name(FILE *, struct object *,
- const struct name_path *, const char *);
+extern void show_object_with_name(FILE *, struct object *, const char *);
extern void add_pending_object(struct rev_info *revs,
struct object *obj, const char *name);
#include "exec_cmd.h"
#include "sigchain.h"
#include "argv-array.h"
+#include "thread-utils.h"
+#include "strbuf.h"
void child_process_init(struct child_process *child)
{
return -1;
}
-static const char **prepare_shell_cmd(const char **argv)
+static const char **prepare_shell_cmd(struct argv_array *out, const char **argv)
{
- int argc, nargc = 0;
- const char **nargv;
-
- for (argc = 0; argv[argc]; argc++)
- ; /* just counting */
- /* +1 for NULL, +3 for "sh -c" plus extra $0 */
- nargv = xmalloc(sizeof(*nargv) * (argc + 1 + 3));
-
- if (argc < 1)
+ if (!argv[0])
die("BUG: shell command is empty");
if (strcspn(argv[0], "|&;<>()$`\\\"' \t\n*?[#~=%") != strlen(argv[0])) {
#ifndef GIT_WINDOWS_NATIVE
- nargv[nargc++] = SHELL_PATH;
+ argv_array_push(out, SHELL_PATH);
#else
- nargv[nargc++] = "sh";
+ argv_array_push(out, "sh");
#endif
- nargv[nargc++] = "-c";
-
- if (argc < 2)
- nargv[nargc++] = argv[0];
- else {
- struct strbuf arg0 = STRBUF_INIT;
- strbuf_addf(&arg0, "%s \"$@\"", argv[0]);
- nargv[nargc++] = strbuf_detach(&arg0, NULL);
- }
- }
+ argv_array_push(out, "-c");
- for (argc = 0; argv[argc]; argc++)
- nargv[nargc++] = argv[argc];
- nargv[nargc] = NULL;
+ /*
+ * If we have no extra arguments, we do not even need to
+ * bother with the "$@" magic.
+ */
+ if (!argv[1])
+ argv_array_push(out, argv[0]);
+ else
+ argv_array_pushf(out, "%s \"$@\"", argv[0]);
+ }
- return nargv;
+ argv_array_pushv(out, argv);
+ return out->argv;
}
#ifndef GIT_WINDOWS_NATIVE
static int execv_shell_cmd(const char **argv)
{
- const char **nargv = prepare_shell_cmd(argv);
- trace_argv_printf(nargv, "trace: exec:");
- sane_execvp(nargv[0], (char **)nargv);
- free(nargv);
+ struct argv_array nargv = ARGV_ARRAY_INIT;
+ prepare_shell_cmd(&nargv, argv);
+ trace_argv_printf(nargv.argv, "trace: exec:");
+ sane_execvp(nargv.argv[0], (char **)nargv.argv);
+ argv_array_clear(&nargv);
return -1;
}
#endif
error("waitpid is confused (%s)", argv0);
} else if (WIFSIGNALED(status)) {
code = WTERMSIG(status);
- if (code != SIGINT && code != SIGQUIT)
+ if (code != SIGINT && code != SIGQUIT && code != SIGPIPE)
error("%s died of signal %d", argv0, code);
/*
* This return value is chosen so that code & 0xff
{
int fhin = 0, fhout = 1, fherr = 2;
const char **sargv = cmd->argv;
+ struct argv_array nargv = ARGV_ARRAY_INIT;
if (cmd->no_stdin)
fhin = open("/dev/null", O_RDWR);
fhout = dup(cmd->out);
if (cmd->git_cmd)
- cmd->argv = prepare_git_cmd(cmd->argv);
+ cmd->argv = prepare_git_cmd(&nargv, cmd->argv);
else if (cmd->use_shell)
- cmd->argv = prepare_shell_cmd(cmd->argv);
+ cmd->argv = prepare_shell_cmd(&nargv, cmd->argv);
cmd->pid = mingw_spawnvpe(cmd->argv[0], cmd->argv, (char**) cmd->env,
cmd->dir, fhin, fhout, fherr);
if (cmd->clean_on_exit && cmd->pid >= 0)
mark_child_for_cleanup(cmd->pid);
- if (cmd->git_cmd)
- free(cmd->argv);
-
+ argv_array_clear(&nargv);
cmd->argv = sargv;
if (fhin != 0)
close(fhin);
return !pthread_equal(main_thread, pthread_self());
}
+void NORETURN async_exit(int code)
+{
+ pthread_exit((void *)(intptr_t)code);
+}
+
#else
static struct {
return process_is_async;
}
+void NORETURN async_exit(int code)
+{
+ exit(code);
+}
+
#endif
int start_async(struct async *async)
close(cmd->out);
return finish_command(cmd);
}
+
+enum child_state {
+ GIT_CP_FREE,
+ GIT_CP_WORKING,
+ GIT_CP_WAIT_CLEANUP,
+};
+
+struct parallel_processes {
+ void *data;
+
+ int max_processes;
+ int nr_processes;
+
+ get_next_task_fn get_next_task;
+ start_failure_fn start_failure;
+ task_finished_fn task_finished;
+
+ struct {
+ enum child_state state;
+ struct child_process process;
+ struct strbuf err;
+ void *data;
+ } *children;
+ /*
+ * The struct pollfd is logically part of *children,
+ * but the system call expects it as its own array.
+ */
+ struct pollfd *pfd;
+
+ unsigned shutdown : 1;
+
+ int output_owner;
+ struct strbuf buffered_output; /* of finished children */
+};
+
+static int default_start_failure(struct strbuf *err,
+ void *pp_cb,
+ void *pp_task_cb)
+{
+ return 0;
+}
+
+static int default_task_finished(int result,
+ struct strbuf *err,
+ void *pp_cb,
+ void *pp_task_cb)
+{
+ return 0;
+}
+
+static void kill_children(struct parallel_processes *pp, int signo)
+{
+ int i, n = pp->max_processes;
+
+ for (i = 0; i < n; i++)
+ if (pp->children[i].state == GIT_CP_WORKING)
+ kill(pp->children[i].process.pid, signo);
+}
+
+static struct parallel_processes *pp_for_signal;
+
+static void handle_children_on_signal(int signo)
+{
+ kill_children(pp_for_signal, signo);
+ sigchain_pop(signo);
+ raise(signo);
+}
+
+static void pp_init(struct parallel_processes *pp,
+ int n,
+ get_next_task_fn get_next_task,
+ start_failure_fn start_failure,
+ task_finished_fn task_finished,
+ void *data)
+{
+ int i;
+
+ if (n < 1)
+ n = online_cpus();
+
+ pp->max_processes = n;
+
+ trace_printf("run_processes_parallel: preparing to run up to %d tasks", n);
+
+ pp->data = data;
+ if (!get_next_task)
+ die("BUG: you need to specify a get_next_task function");
+ pp->get_next_task = get_next_task;
+
+ pp->start_failure = start_failure ? start_failure : default_start_failure;
+ pp->task_finished = task_finished ? task_finished : default_task_finished;
+
+ pp->nr_processes = 0;
+ pp->output_owner = 0;
+ pp->shutdown = 0;
+ pp->children = xcalloc(n, sizeof(*pp->children));
+ pp->pfd = xcalloc(n, sizeof(*pp->pfd));
+ strbuf_init(&pp->buffered_output, 0);
+
+ for (i = 0; i < n; i++) {
+ strbuf_init(&pp->children[i].err, 0);
+ child_process_init(&pp->children[i].process);
+ pp->pfd[i].events = POLLIN | POLLHUP;
+ pp->pfd[i].fd = -1;
+ }
+
+ pp_for_signal = pp;
+ sigchain_push_common(handle_children_on_signal);
+}
+
+static void pp_cleanup(struct parallel_processes *pp)
+{
+ int i;
+
+ trace_printf("run_processes_parallel: done");
+ for (i = 0; i < pp->max_processes; i++) {
+ strbuf_release(&pp->children[i].err);
+ child_process_clear(&pp->children[i].process);
+ }
+
+ free(pp->children);
+ free(pp->pfd);
+
+ /*
+ * When get_next_task added messages to the buffer in its last
+ * iteration, the buffered output is non empty.
+ */
+ fputs(pp->buffered_output.buf, stderr);
+ strbuf_release(&pp->buffered_output);
+
+ sigchain_pop_common();
+}
+
+/* returns
+ * 0 if a new task was started.
+ * 1 if no new jobs was started (get_next_task ran out of work, non critical
+ * problem with starting a new command)
+ * <0 no new job was started, user wishes to shutdown early. Use negative code
+ * to signal the children.
+ */
+static int pp_start_one(struct parallel_processes *pp)
+{
+ int i, code;
+
+ for (i = 0; i < pp->max_processes; i++)
+ if (pp->children[i].state == GIT_CP_FREE)
+ break;
+ if (i == pp->max_processes)
+ die("BUG: bookkeeping is hard");
+
+ code = pp->get_next_task(&pp->children[i].process,
+ &pp->children[i].err,
+ pp->data,
+ &pp->children[i].data);
+ if (!code) {
+ strbuf_addbuf(&pp->buffered_output, &pp->children[i].err);
+ strbuf_reset(&pp->children[i].err);
+ return 1;
+ }
+ pp->children[i].process.err = -1;
+ pp->children[i].process.stdout_to_stderr = 1;
+ pp->children[i].process.no_stdin = 1;
+
+ if (start_command(&pp->children[i].process)) {
+ code = pp->start_failure(&pp->children[i].err,
+ pp->data,
+ &pp->children[i].data);
+ strbuf_addbuf(&pp->buffered_output, &pp->children[i].err);
+ strbuf_reset(&pp->children[i].err);
+ if (code)
+ pp->shutdown = 1;
+ return code;
+ }
+
+ pp->nr_processes++;
+ pp->children[i].state = GIT_CP_WORKING;
+ pp->pfd[i].fd = pp->children[i].process.err;
+ return 0;
+}
+
+static void pp_buffer_stderr(struct parallel_processes *pp, int output_timeout)
+{
+ int i;
+
+ while ((i = poll(pp->pfd, pp->max_processes, output_timeout)) < 0) {
+ if (errno == EINTR)
+ continue;
+ pp_cleanup(pp);
+ die_errno("poll");
+ }
+
+ /* Buffer output from all pipes. */
+ for (i = 0; i < pp->max_processes; i++) {
+ if (pp->children[i].state == GIT_CP_WORKING &&
+ pp->pfd[i].revents & (POLLIN | POLLHUP)) {
+ int n = strbuf_read_once(&pp->children[i].err,
+ pp->children[i].process.err, 0);
+ if (n == 0) {
+ close(pp->children[i].process.err);
+ pp->children[i].state = GIT_CP_WAIT_CLEANUP;
+ } else if (n < 0)
+ if (errno != EAGAIN)
+ die_errno("read");
+ }
+ }
+}
+
+static void pp_output(struct parallel_processes *pp)
+{
+ int i = pp->output_owner;
+ if (pp->children[i].state == GIT_CP_WORKING &&
+ pp->children[i].err.len) {
+ fputs(pp->children[i].err.buf, stderr);
+ strbuf_reset(&pp->children[i].err);
+ }
+}
+
+static int pp_collect_finished(struct parallel_processes *pp)
+{
+ int i, code;
+ int n = pp->max_processes;
+ int result = 0;
+
+ while (pp->nr_processes > 0) {
+ for (i = 0; i < pp->max_processes; i++)
+ if (pp->children[i].state == GIT_CP_WAIT_CLEANUP)
+ break;
+ if (i == pp->max_processes)
+ break;
+
+ code = finish_command(&pp->children[i].process);
+
+ code = pp->task_finished(code,
+ &pp->children[i].err, pp->data,
+ &pp->children[i].data);
+
+ if (code)
+ result = code;
+ if (code < 0)
+ break;
+
+ pp->nr_processes--;
+ pp->children[i].state = GIT_CP_FREE;
+ pp->pfd[i].fd = -1;
+ child_process_init(&pp->children[i].process);
+
+ if (i != pp->output_owner) {
+ strbuf_addbuf(&pp->buffered_output, &pp->children[i].err);
+ strbuf_reset(&pp->children[i].err);
+ } else {
+ fputs(pp->children[i].err.buf, stderr);
+ strbuf_reset(&pp->children[i].err);
+
+ /* Output all other finished child processes */
+ fputs(pp->buffered_output.buf, stderr);
+ strbuf_reset(&pp->buffered_output);
+
+ /*
+ * Pick next process to output live.
+ * NEEDSWORK:
+ * For now we pick it randomly by doing a round
+ * robin. Later we may want to pick the one with
+ * the most output or the longest or shortest
+ * running process time.
+ */
+ for (i = 0; i < n; i++)
+ if (pp->children[(pp->output_owner + i) % n].state == GIT_CP_WORKING)
+ break;
+ pp->output_owner = (pp->output_owner + i) % n;
+ }
+ }
+ return result;
+}
+
+int run_processes_parallel(int n,
+ get_next_task_fn get_next_task,
+ start_failure_fn start_failure,
+ task_finished_fn task_finished,
+ void *pp_cb)
+{
+ int i, code;
+ int output_timeout = 100;
+ int spawn_cap = 4;
+ struct parallel_processes pp;
+
+ pp_init(&pp, n, get_next_task, start_failure, task_finished, pp_cb);
+ while (1) {
+ for (i = 0;
+ i < spawn_cap && !pp.shutdown &&
+ pp.nr_processes < pp.max_processes;
+ i++) {
+ code = pp_start_one(&pp);
+ if (!code)
+ continue;
+ if (code < 0) {
+ pp.shutdown = 1;
+ kill_children(&pp, -code);
+ }
+ break;
+ }
+ if (!pp.nr_processes)
+ break;
+ pp_buffer_stderr(&pp, output_timeout);
+ pp_output(&pp);
+ code = pp_collect_finished(&pp);
+ if (code) {
+ pp.shutdown = 1;
+ if (code < 0)
+ kill_children(&pp, -code);
+ }
+ }
+
+ pp_cleanup(&pp);
+ return 0;
+}
int start_async(struct async *async);
int finish_async(struct async *async);
int in_async(void);
+void NORETURN async_exit(int code);
+
+/**
+ * This callback should initialize the child process and preload the
+ * error channel if desired. The preloading of is useful if you want to
+ * have a message printed directly before the output of the child process.
+ * pp_cb is the callback cookie as passed to run_processes_parallel.
+ * You can store a child process specific callback cookie in pp_task_cb.
+ *
+ * Even after returning 0 to indicate that there are no more processes,
+ * this function will be called again until there are no more running
+ * child processes.
+ *
+ * Return 1 if the next child is ready to run.
+ * Return 0 if there are currently no more tasks to be processed.
+ * To send a signal to other child processes for abortion,
+ * return the negative signal number.
+ */
+typedef int (*get_next_task_fn)(struct child_process *cp,
+ struct strbuf *err,
+ void *pp_cb,
+ void **pp_task_cb);
+
+/**
+ * This callback is called whenever there are problems starting
+ * a new process.
+ *
+ * You must not write to stdout or stderr in this function. Add your
+ * message to the strbuf err instead, which will be printed without
+ * messing up the output of the other parallel processes.
+ *
+ * pp_cb is the callback cookie as passed into run_processes_parallel,
+ * pp_task_cb is the callback cookie as passed into get_next_task_fn.
+ *
+ * Return 0 to continue the parallel processing. To abort return non zero.
+ * To send a signal to other child processes for abortion, return
+ * the negative signal number.
+ */
+typedef int (*start_failure_fn)(struct strbuf *err,
+ void *pp_cb,
+ void *pp_task_cb);
+
+/**
+ * This callback is called on every child process that finished processing.
+ *
+ * You must not write to stdout or stderr in this function. Add your
+ * message to the strbuf err instead, which will be printed without
+ * messing up the output of the other parallel processes.
+ *
+ * pp_cb is the callback cookie as passed into run_processes_parallel,
+ * pp_task_cb is the callback cookie as passed into get_next_task_fn.
+ *
+ * Return 0 to continue the parallel processing. To abort return non zero.
+ * To send a signal to other child processes for abortion, return
+ * the negative signal number.
+ */
+typedef int (*task_finished_fn)(int result,
+ struct strbuf *err,
+ void *pp_cb,
+ void *pp_task_cb);
+
+/**
+ * Runs up to n processes at the same time. Whenever a process can be
+ * started, the callback get_next_task_fn is called to obtain the data
+ * required to start another child process.
+ *
+ * The children started via this function run in parallel. Their output
+ * (both stdout and stderr) is routed to stderr in a manner that output
+ * from different tasks does not interleave.
+ *
+ * start_failure_fn and task_finished_fn can be NULL to omit any
+ * special handling.
+ */
+int run_processes_parallel(int n,
+ get_next_task_fn,
+ start_failure_fn,
+ task_finished_fn,
+ void *pp_cb);
#endif
struct commit_message {
char *parent_label;
- const char *label;
- const char *subject;
+ char *label;
+ char *subject;
const char *message;
};
static int get_message(struct commit *commit, struct commit_message *out)
{
const char *abbrev, *subject;
- int abbrev_len, subject_len;
- char *q;
-
- if (!git_commit_encoding)
- git_commit_encoding = "UTF-8";
+ int subject_len;
- out->message = logmsg_reencode(commit, NULL, git_commit_encoding);
+ out->message = logmsg_reencode(commit, NULL, get_commit_output_encoding());
abbrev = find_unique_abbrev(commit->object.oid.hash, DEFAULT_ABBREV);
- abbrev_len = strlen(abbrev);
subject_len = find_commit_subject(out->message, &subject);
- out->parent_label = xmalloc(strlen("parent of ") + abbrev_len +
- strlen("... ") + subject_len + 1);
- q = out->parent_label;
- q = mempcpy(q, "parent of ", strlen("parent of "));
- out->label = q;
- q = mempcpy(q, abbrev, abbrev_len);
- q = mempcpy(q, "... ", strlen("... "));
- out->subject = q;
- q = mempcpy(q, subject, subject_len);
- *q = '\0';
+ out->subject = xmemdupz(subject, subject_len);
+ out->label = xstrfmt("%s... %s", abbrev, out->subject);
+ out->parent_label = xstrfmt("parent of %s", out->label);
+
return 0;
}
static void free_message(struct commit *commit, struct commit_message *msg)
{
free(msg->parent_label);
+ free(msg->label);
+ free(msg->subject);
unuse_commit_buffer(commit, msg->message);
}
if (!f)
return error(_("cannot open %s: %s"), git_path_head_file(),
strerror(errno));
- if (strbuf_getline(&buf, f, '\n')) {
+ if (strbuf_getline_lf(&buf, f)) {
error(_("cannot read %s: %s"), git_path_head_file(),
ferror(f) ? strerror(errno) : _("unexpected end of file"));
fclose(f);
const char *orig = path;
char *sanitized;
if (is_absolute_path(orig)) {
- sanitized = xmalloc(strlen(path) + 1);
+ sanitized = xmallocz(strlen(path));
if (remaining_prefix)
*remaining_prefix = 0;
if (normalize_path_copy_len(sanitized, path, remaining_prefix)) {
if (arg[2] == '\0') /* ":/" is root dir, always exists */
return 1;
name = arg + 2;
- } else if (!no_wildcard(arg))
- return 1;
- else if (prefix)
+ } else if (prefix)
name = prefix_filename(prefix, strlen(prefix), arg);
else
name = arg;
{
if (*arg == '-')
die("bad flag '%s' used after filename", arg);
- if (check_filename(prefix, arg))
+ if (check_filename(prefix, arg) || !no_wildcard(arg))
return;
die_verify_filename(prefix, arg, diagnose_misspelt_rev);
}
error_code = READ_GITFILE_ERR_OPEN_FAILED;
goto cleanup_return;
}
- buf = xmalloc(st.st_size + 1);
+ buf = xmallocz(st.st_size);
len = read_in_full(fd, buf, st.st_size);
close(fd);
if (len != st.st_size) {
error_code = READ_GITFILE_ERR_READ_FAILED;
goto cleanup_return;
}
- buf[len] = '\0';
if (!starts_with(buf, "gitdir: ")) {
error_code = READ_GITFILE_ERR_INVALID_FORMAT;
goto cleanup_return;
{
struct alternate_object_database *ent;
struct alternate_object_database *alt;
- int pfxlen, entlen;
+ size_t pfxlen, entlen;
struct strbuf pathbuf = STRBUF_INIT;
if (!is_absolute_path(entry) && relative_base) {
while (pfxlen && pathbuf.buf[pfxlen-1] == '/')
pfxlen -= 1;
- entlen = pfxlen + 43; /* '/' + 2 hex + '/' + 38 hex + NUL */
- ent = xmalloc(sizeof(*ent) + entlen);
+ entlen = st_add(pfxlen, 43); /* '/' + 2 hex + '/' + 38 hex + NUL */
+ ent = xmalloc(st_add(sizeof(*ent), entlen));
memcpy(ent->base, pathbuf.buf, pfxlen);
strbuf_release(&pathbuf);
struct strbuf line = STRBUF_INIT;
int found = 0;
- while (strbuf_getline(&line, in, '\n') != EOF) {
+ while (strbuf_getline(&line, in) != EOF) {
if (!strcmp(reference, line.buf)) {
found = 1;
break;
die("packfile %s cannot be accessed", p->pack_name);
if (offset > (p->pack_size - 20))
die("offset beyond end of packfile (truncated pack?)");
+ if (offset < 0)
+ die(_("offset before end of packfile (broken .idx?)"));
if (!win || !in_window(win, offset)) {
if (win)
static struct packed_git *alloc_packed_git(int extra)
{
- struct packed_git *p = xmalloc(sizeof(*p) + extra);
+ struct packed_git *p = xmalloc(st_add(sizeof(*p), extra));
memset(p, 0, sizeof(*p));
p->pack_fd = -1;
return p;
* ".pack" is long enough to hold any suffix we're adding (and
* the use xsnprintf double-checks that)
*/
- alloc = path_len + strlen(".pack") + 1;
+ alloc = st_add3(path_len, strlen(".pack"), 1);
p = alloc_packed_git(alloc);
memcpy(p->pack_name, path, path_len);
struct packed_git *parse_pack_index(unsigned char *sha1, const char *idx_path)
{
const char *path = sha1_pack_name(sha1);
- int alloc = strlen(path) + 1;
+ size_t alloc = st_add(strlen(path), 1);
struct packed_git *p = alloc_packed_git(alloc);
memcpy(p->pack_name, path, alloc); /* includes NUL */
{
unsigned i;
for (i = 0; i < p->num_bad_objects; i++)
- if (!hashcmp(sha1, p->bad_object_sha1 + 20 * i))
+ if (!hashcmp(sha1, p->bad_object_sha1 + GIT_SHA1_RAWSZ * i))
return;
- p->bad_object_sha1 = xrealloc(p->bad_object_sha1, 20 * (p->num_bad_objects + 1));
- hashcpy(p->bad_object_sha1 + 20 * p->num_bad_objects, sha1);
+ p->bad_object_sha1 = xrealloc(p->bad_object_sha1,
+ st_mult(GIT_SHA1_RAWSZ,
+ st_add(p->num_bad_objects, 1)));
+ hashcpy(p->bad_object_sha1 + GIT_SHA1_RAWSZ * p->num_bad_objects, sha1);
p->num_bad_objects++;
}
/* Push the object we're going to leave behind */
if (poi_stack_nr >= poi_stack_alloc && poi_stack == small_poi_stack) {
poi_stack_alloc = alloc_nr(poi_stack_nr);
- poi_stack = xmalloc(sizeof(off_t)*poi_stack_alloc);
+ ALLOC_ARRAY(poi_stack, poi_stack_alloc);
memcpy(poi_stack, small_poi_stack, sizeof(off_t)*poi_stack_nr);
} else {
ALLOC_GROW(poi_stack, poi_stack_nr+1, poi_stack_alloc);
if (delta_stack_nr >= delta_stack_alloc
&& delta_stack == small_delta_stack) {
delta_stack_alloc = alloc_nr(delta_stack_nr);
- delta_stack = xmalloc(sizeof(*delta_stack)*delta_stack_alloc);
+ ALLOC_ARRAY(delta_stack, delta_stack_alloc);
memcpy(delta_stack, small_delta_stack,
sizeof(*delta_stack)*delta_stack_nr);
} else {
}
}
+void check_pack_index_ptr(const struct packed_git *p, const void *vptr)
+{
+ const unsigned char *ptr = vptr;
+ const unsigned char *start = p->index_data;
+ const unsigned char *end = start + p->index_size;
+ if (ptr < start)
+ die(_("offset before start of pack index for %s (corrupt index?)"),
+ p->pack_name);
+ /* No need to check for underflow; .idx files must be at least 8 bytes */
+ if (ptr >= end - 8)
+ die(_("offset beyond end of pack index for %s (truncated index?)"),
+ p->pack_name);
+}
+
off_t nth_packed_object_offset(const struct packed_git *p, uint32_t n)
{
const unsigned char *index = p->index_data;
if (!(off & 0x80000000))
return off;
index += p->num_objects * 4 + (off & 0x7fffffff) * 8;
+ check_pack_index_ptr(p, index);
return (((uint64_t)ntohl(*((uint32_t *)(index + 0)))) << 32) |
ntohl(*((uint32_t *)(index + 4)));
}
* object databases including our own.
*/
const char *objdir = get_object_directory();
- int objdir_len = strlen(objdir);
- int entlen = objdir_len + 43;
- fakeent = xmalloc(sizeof(*fakeent) + entlen);
+ size_t objdir_len = strlen(objdir);
+ fakeent = xmalloc(st_add3(sizeof(*fakeent), objdir_len, 43));
memcpy(fakeent->base, objdir, objdir_len);
fakeent->name = fakeent->base + objdir_len + 1;
fakeent->name[-1] = '/';
* through history and returning the first commit whose message starts
* the given regular expression.
*
- * For future extension, ':/!' is reserved. If you want to match a message
- * beginning with a '!', you have to repeat the exclamation mark.
+ * For negative-matching, prefix the pattern-part with '!-', like: ':/!-WIP'.
+ *
+ * For a literal '!' character at the beginning of a pattern, you have to repeat
+ * that, like: ':/!!foo'
+ *
+ * For future extension, all other sequences beginning with ':/!' are reserved.
*/
/* Remember to update object flag allocation in object.h */
{
struct commit_list *backup = NULL, *l;
int found = 0;
+ int negative = 0;
regex_t regex;
if (prefix[0] == '!') {
- if (prefix[1] != '!')
- die ("Invalid search pattern: %s", prefix);
prefix++;
+
+ if (prefix[0] == '-') {
+ prefix++;
+ negative = 1;
+ } else if (prefix[0] != '!') {
+ return -1;
+ }
}
if (regcomp(®ex, prefix, REG_EXTENDED))
- die("Invalid search pattern: %s", prefix);
+ return -1;
for (l = list; l; l = l->next) {
l->item->object.flags |= ONELINE_SEEN;
continue;
buf = get_commit_buffer(commit, NULL);
p = strstr(buf, "\n\n");
- matches = p && !regexec(®ex, p + 2, 0, NULL, 0);
+ matches = negative ^ (p && !regexec(®ex, p + 2, 0, NULL, 0));
unuse_commit_buffer(commit, buf);
if (matches) {
info->shallow = sa;
if (!sa)
return;
- info->ours = xmalloc(sizeof(*info->ours) * sa->nr);
- info->theirs = xmalloc(sizeof(*info->theirs) * sa->nr);
+ ALLOC_ARRAY(info->ours, sa->nr);
+ ALLOC_ARRAY(info->theirs, sa->nr);
for (i = 0; i < sa->nr; i++) {
if (has_sha1_file(sa->sha1[i])) {
struct commit_graft *graft;
unsigned int i, nr;
struct commit_list *head = NULL;
int bitmap_nr = (info->nr_bits + 31) / 32;
- int bitmap_size = bitmap_nr * sizeof(uint32_t);
+ size_t bitmap_size = st_mult(bitmap_nr, sizeof(uint32_t));
uint32_t *tmp = xmalloc(bitmap_size); /* to be freed before return */
uint32_t *bitmap = paint_alloc(info);
struct commit *c = lookup_commit_reference_gently(sha1, 1);
struct paint_info pi;
trace_printf_key(&trace_shallow, "shallow: assign_shallow_commits_to_refs\n");
- shallow = xmalloc(sizeof(*shallow) * (info->nr_ours + info->nr_theirs));
+ ALLOC_ARRAY(shallow, info->nr_ours + info->nr_theirs);
for (i = 0; i < info->nr_ours; i++)
shallow[nr_shallow++] = info->ours[i];
for (i = 0; i < info->nr_theirs; i++)
int count;
fprintf(stderr, "git> ");
- if (strbuf_getline(&line, stdin, '\n') == EOF) {
+ if (strbuf_getline_lf(&line, stdin) == EOF) {
fprintf(stderr, "\n");
strbuf_release(&line);
break;
unsigned char sha1[20];
uint32_t crc;
uint32_t off;
- } *entries = xmalloc(nr * sizeof(entries[0]));
+ } *entries;
+ ALLOC_ARRAY(entries, nr);
for (i = 0; i < nr; i++)
if (fread(entries[i].sha1, 20, 1, stdin) != 1)
die("unable to read sha1 %u/%u", i, nr);
sigchain_push(SIGQUIT, f);
sigchain_push(SIGPIPE, f);
}
+
+void sigchain_pop_common(void)
+{
+ sigchain_pop(SIGPIPE);
+ sigchain_pop(SIGQUIT);
+ sigchain_pop(SIGTERM);
+ sigchain_pop(SIGHUP);
+ sigchain_pop(SIGINT);
+}
int sigchain_pop(int sig);
void sigchain_push_common(sigchain_fun f);
+void sigchain_pop_common(void);
#endif /* SIGCHAIN_H */
return sb->len - oldlen;
}
+ssize_t strbuf_read_once(struct strbuf *sb, int fd, size_t hint)
+{
+ ssize_t cnt;
+
+ strbuf_grow(sb, hint ? hint : 8192);
+ cnt = xread(fd, sb->buf + sb->len, sb->alloc - sb->len - 1);
+ if (cnt > 0)
+ strbuf_setlen(sb, sb->len + cnt);
+ return cnt;
+}
+
#define STRBUF_MAXLINK (2*PATH_MAX)
int strbuf_readlink(struct strbuf *sb, const char *path, size_t hint)
}
#endif
-int strbuf_getline(struct strbuf *sb, FILE *fp, int term)
+static int strbuf_getdelim(struct strbuf *sb, FILE *fp, int term)
{
if (strbuf_getwholeline(sb, fp, term))
return EOF;
- if (sb->buf[sb->len-1] == term)
- strbuf_setlen(sb, sb->len-1);
+ if (sb->buf[sb->len - 1] == term)
+ strbuf_setlen(sb, sb->len - 1);
return 0;
}
+int strbuf_getline(struct strbuf *sb, FILE *fp)
+{
+ if (strbuf_getwholeline(sb, fp, '\n'))
+ return EOF;
+ if (sb->buf[sb->len - 1] == '\n') {
+ strbuf_setlen(sb, sb->len - 1);
+ if (sb->len && sb->buf[sb->len - 1] == '\r')
+ strbuf_setlen(sb, sb->len - 1);
+ }
+ return 0;
+}
+
+int strbuf_getline_lf(struct strbuf *sb, FILE *fp)
+{
+ return strbuf_getdelim(sb, fp, '\n');
+}
+
+int strbuf_getline_nul(struct strbuf *sb, FILE *fp)
+{
+ return strbuf_getdelim(sb, fp, '\0');
+}
+
int strbuf_getwholeline_fd(struct strbuf *sb, int fd, int term)
{
strbuf_reset(sb);
size_t len, i;
len = strlen(string);
- result = xmalloc(len + 1);
+ result = xmallocz(len);
for (i = 0; i < len; i++)
result[i] = tolower(string[i]);
result[i] = '\0';
*
* NOTE: The buffer is rewound if the read fails. If -1 is returned,
* `errno` must be consulted, like you would do for `read(3)`.
- * `strbuf_read()`, `strbuf_read_file()` and `strbuf_getline()` has the
- * same behaviour as well.
+ * `strbuf_read()`, `strbuf_read_file()` and `strbuf_getline_*()`
+ * family of functions have the same behaviour as well.
*/
extern size_t strbuf_fread(struct strbuf *, size_t, FILE *);
*/
extern ssize_t strbuf_read(struct strbuf *, int fd, size_t hint);
+/**
+ * Read the contents of a given file descriptor partially by using only one
+ * attempt of xread. The third argument can be used to give a hint about the
+ * file size, to avoid reallocs. Returns the number of new bytes appended to
+ * the sb.
+ */
+extern ssize_t strbuf_read_once(struct strbuf *, int fd, size_t hint);
+
/**
* Read the contents of a file, specified by its path. The third argument
* can be used to give a hint about the file size, to avoid reallocs.
extern int strbuf_readlink(struct strbuf *sb, const char *path, size_t hint);
/**
- * Read a line from a FILE *, overwriting the existing contents
- * of the strbuf. The second argument specifies the line
- * terminator character, typically `'\n'`.
+ * Read a line from a FILE *, overwriting the existing contents of
+ * the strbuf. The strbuf_getline*() family of functions share
+ * this signature, but have different line termination conventions.
+ *
* Reading stops after the terminator or at EOF. The terminator
* is removed from the buffer before returning. Returns 0 unless
* there was nothing left before EOF, in which case it returns `EOF`.
*/
-extern int strbuf_getline(struct strbuf *, FILE *, int);
+typedef int (*strbuf_getline_fn)(struct strbuf *, FILE *);
+
+/* Uses LF as the line terminator */
+extern int strbuf_getline_lf(struct strbuf *sb, FILE *fp);
+
+/* Uses NUL as the line terminator */
+extern int strbuf_getline_nul(struct strbuf *sb, FILE *fp);
+
+/*
+ * Similar to strbuf_getline_lf(), but additionally treats a CR that
+ * comes immediately before the LF as part of the terminator.
+ * This is the most friendly version to be used to read "text" files
+ * that can come from platforms whose native text format is CRLF
+ * terminated.
+ */
+extern int strbuf_getline(struct strbuf *, FILE *);
+
/**
* Like `strbuf_getline`, but keeps the trailing terminator (if
parameter.commit_sha1 = commit_sha1;
parameter.gitmodules_sha1 = sha1;
parameter.overwrite = 0;
- git_config_from_buf(parse_config, rev.buf, config, config_size,
- ¶meter);
+ git_config_from_mem(parse_config, "submodule-blob", rev.buf,
+ config, config_size, ¶meter);
free(config);
switch (lookup_type) {
#include "sha1-array.h"
#include "argv-array.h"
#include "blob.h"
+#include "thread-utils.h"
static int config_fetch_recurse_submodules = RECURSE_SUBMODULES_ON_DEMAND;
static struct string_list changed_submodule_paths;
strbuf_addstr(&entry, "submodule.");
strbuf_addstr(&entry, submodule->name);
strbuf_addstr(&entry, ".path");
- if (git_config_set_in_file(".gitmodules", entry.buf, newpath) < 0) {
+ if (git_config_set_in_file_gently(".gitmodules", entry.buf, newpath) < 0) {
/* Maybe the user already did that, don't error out here */
warning(_("Could not update .gitmodules entry %s"), entry.buf);
strbuf_release(&entry);
struct strbuf objects_directory = STRBUF_INIT;
struct alternate_object_database *alt_odb;
int ret = 0;
- int alloc;
+ size_t alloc;
strbuf_git_path_submodule(&objects_directory, path, "objects/");
if (!is_directory(objects_directory.buf)) {
objects_directory.len))
goto done;
- alloc = objects_directory.len + 42; /* for "12/345..." sha1 */
- alt_odb = xmalloc(sizeof(*alt_odb) + alloc);
+ alloc = st_add(objects_directory.len, 42); /* for "12/345..." sha1 */
+ alt_odb = xmalloc(st_add(sizeof(*alt_odb), alloc));
alt_odb->next = alt_odb_list;
xsnprintf(alt_odb->base, alloc, "%s", objects_directory.buf);
alt_odb->name = alt_odb->base + objects_directory.len;
initialized_fetch_ref_tips = 0;
}
-int fetch_populated_submodules(const struct argv_array *options,
- const char *prefix, int command_line_option,
- int quiet)
+struct submodule_parallel_fetch {
+ int count;
+ struct argv_array args;
+ const char *work_tree;
+ const char *prefix;
+ int command_line_option;
+ int quiet;
+ int result;
+};
+#define SPF_INIT {0, ARGV_ARRAY_INIT, NULL, NULL, 0, 0, 0}
+
+static int get_next_submodule(struct child_process *cp,
+ struct strbuf *err, void *data, void **task_cb)
{
- int i, result = 0;
- struct child_process cp = CHILD_PROCESS_INIT;
- struct argv_array argv = ARGV_ARRAY_INIT;
- const char *work_tree = get_git_work_tree();
- if (!work_tree)
- goto out;
-
- if (read_cache() < 0)
- die("index file corrupt");
-
- argv_array_push(&argv, "fetch");
- for (i = 0; i < options->argc; i++)
- argv_array_push(&argv, options->argv[i]);
- argv_array_push(&argv, "--recurse-submodules-default");
- /* default value, "--submodule-prefix" and its value are added later */
-
- cp.env = local_repo_env;
- cp.git_cmd = 1;
- cp.no_stdin = 1;
-
- calculate_changed_submodule_paths();
+ int ret = 0;
+ struct submodule_parallel_fetch *spf = data;
- for (i = 0; i < active_nr; i++) {
+ for (; spf->count < active_nr; spf->count++) {
struct strbuf submodule_path = STRBUF_INIT;
struct strbuf submodule_git_dir = STRBUF_INIT;
struct strbuf submodule_prefix = STRBUF_INIT;
- const struct cache_entry *ce = active_cache[i];
+ const struct cache_entry *ce = active_cache[spf->count];
const char *git_dir, *default_argv;
const struct submodule *submodule;
submodule = submodule_from_name(null_sha1, ce->name);
default_argv = "yes";
- if (command_line_option == RECURSE_SUBMODULES_DEFAULT) {
+ if (spf->command_line_option == RECURSE_SUBMODULES_DEFAULT) {
if (submodule &&
submodule->fetch_recurse !=
RECURSE_SUBMODULES_NONE) {
default_argv = "on-demand";
}
}
- } else if (command_line_option == RECURSE_SUBMODULES_ON_DEMAND) {
+ } else if (spf->command_line_option == RECURSE_SUBMODULES_ON_DEMAND) {
if (!unsorted_string_list_lookup(&changed_submodule_paths, ce->name))
continue;
default_argv = "on-demand";
}
- strbuf_addf(&submodule_path, "%s/%s", work_tree, ce->name);
+ strbuf_addf(&submodule_path, "%s/%s", spf->work_tree, ce->name);
strbuf_addf(&submodule_git_dir, "%s/.git", submodule_path.buf);
- strbuf_addf(&submodule_prefix, "%s%s/", prefix, ce->name);
+ strbuf_addf(&submodule_prefix, "%s%s/", spf->prefix, ce->name);
git_dir = read_gitfile(submodule_git_dir.buf);
if (!git_dir)
git_dir = submodule_git_dir.buf;
if (is_directory(git_dir)) {
- if (!quiet)
- printf("Fetching submodule %s%s\n", prefix, ce->name);
- cp.dir = submodule_path.buf;
- argv_array_push(&argv, default_argv);
- argv_array_push(&argv, "--submodule-prefix");
- argv_array_push(&argv, submodule_prefix.buf);
- cp.argv = argv.argv;
- if (run_command(&cp))
- result = 1;
- argv_array_pop(&argv);
- argv_array_pop(&argv);
- argv_array_pop(&argv);
+ child_process_init(cp);
+ cp->dir = strbuf_detach(&submodule_path, NULL);
+ cp->env = local_repo_env;
+ cp->git_cmd = 1;
+ if (!spf->quiet)
+ strbuf_addf(err, "Fetching submodule %s%s\n",
+ spf->prefix, ce->name);
+ argv_array_init(&cp->args);
+ argv_array_pushv(&cp->args, spf->args.argv);
+ argv_array_push(&cp->args, default_argv);
+ argv_array_push(&cp->args, "--submodule-prefix");
+ argv_array_push(&cp->args, submodule_prefix.buf);
+ ret = 1;
}
strbuf_release(&submodule_path);
strbuf_release(&submodule_git_dir);
strbuf_release(&submodule_prefix);
+ if (ret) {
+ spf->count++;
+ return 1;
+ }
}
- argv_array_clear(&argv);
+ return 0;
+}
+
+static int fetch_start_failure(struct strbuf *err,
+ void *cb, void *task_cb)
+{
+ struct submodule_parallel_fetch *spf = cb;
+
+ spf->result = 1;
+
+ return 0;
+}
+
+static int fetch_finish(int retvalue, struct strbuf *err,
+ void *cb, void *task_cb)
+{
+ struct submodule_parallel_fetch *spf = cb;
+
+ if (retvalue)
+ spf->result = 1;
+
+ return 0;
+}
+
+int fetch_populated_submodules(const struct argv_array *options,
+ const char *prefix, int command_line_option,
+ int quiet, int max_parallel_jobs)
+{
+ int i;
+ struct submodule_parallel_fetch spf = SPF_INIT;
+
+ spf.work_tree = get_git_work_tree();
+ spf.command_line_option = command_line_option;
+ spf.quiet = quiet;
+ spf.prefix = prefix;
+
+ if (!spf.work_tree)
+ goto out;
+
+ if (read_cache() < 0)
+ die("index file corrupt");
+
+ argv_array_push(&spf.args, "fetch");
+ for (i = 0; i < options->argc; i++)
+ argv_array_push(&spf.args, options->argv[i]);
+ argv_array_push(&spf.args, "--recurse-submodules-default");
+ /* default value, "--submodule-prefix" and its value are added later */
+
+ calculate_changed_submodule_paths();
+ run_processes_parallel(max_parallel_jobs,
+ get_next_submodule,
+ fetch_start_failure,
+ fetch_finish,
+ &spf);
+
+ argv_array_clear(&spf.args);
out:
string_list_clear(&changed_submodule_paths, 1);
- return result;
+ return spf.result;
}
unsigned is_submodule_modified(const char *path, int ignore_untracked)
/* Update core.worktree setting */
strbuf_reset(&file_name);
strbuf_addf(&file_name, "%s/config", git_dir);
- if (git_config_set_in_file(file_name.buf, "core.worktree",
- relative_path(real_work_tree, git_dir,
- &rel_path)))
- die(_("Could not set core.worktree in %s"),
- file_name.buf);
+ git_config_set_in_file(file_name.buf, "core.worktree",
+ relative_path(real_work_tree, git_dir,
+ &rel_path));
strbuf_release(&file_name);
strbuf_release(&rel_path);
void check_for_new_submodule_commits(unsigned char new_sha1[20]);
int fetch_populated_submodules(const struct argv_array *options,
const char *prefix, int command_line_option,
- int quiet);
+ int quiet, int max_parallel_jobs);
unsigned is_submodule_modified(const char *path, int ignore_untracked);
int submodule_uses_gitfile(const char *path);
int ok_to_remove_submodule(const char *path);
TEST_RESULTS_DIRECTORY_SQ = $(subst ','\'',$(TEST_RESULTS_DIRECTORY))
T = $(sort $(wildcard t[0-9][0-9][0-9][0-9]-*.sh))
-TSVN = $(sort $(wildcard t91[0-9][0-9]-*.sh))
TGITWEB = $(sort $(wildcard t95[0-9][0-9]-*.sh))
THELPERS = $(sort $(filter-out $(T),$(wildcard *.sh)))
echo "$$f"; \
done | '$(SHELL_PATH_SQ)' ./aggregate-results.sh
-# we can test NO_OPTIMIZE_COMMITS independently of LC_ALL
-full-svn-test:
- $(MAKE) $(TSVN) GIT_SVN_NO_OPTIMIZE_COMMITS=1 LC_ALL=C
- $(MAKE) $(TSVN) GIT_SVN_NO_OPTIMIZE_COMMITS=0 LC_ALL=en_US.UTF-8
-
gitweb-test:
$(MAKE) $(TGITWEB)
test_done
fi
+if test_have_prereq !PIPE
+then
+ test_skip_or_die $GIT_TEST_GIT_DAEMON "file system does not support FIFOs"
+fi
+
LIB_GIT_DAEMON_PORT=${LIB_GIT_DAEMON_PORT-${this_test#t}}
GIT_DAEMON_PID=
--listen-host 127.0.0.1 &
}
+prepare_a_utf8_locale () {
+ a_utf8_locale=$(locale -a | sed -n '/\.[uU][tT][fF]-*8$/{
+ p
+ q
+}')
+ if test -n "$a_utf8_locale"
+ then
+ test_set_prereq UTF8
+ else
+ say "# UTF-8 locale not available, some tests are skipped"
+ fi
+}
test_skip_or_die $GIT_TEST_HTTPD "no web server found at '$LIB_HTTPD_PATH'"
fi
-HTTPD_VERSION=`$LIB_HTTPD_PATH -v | \
- sed -n 's/^Server version: Apache\/\([0-9]*\)\..*$/\1/p; q'`
+HTTPD_VERSION=$($LIB_HTTPD_PATH -v | \
+ sed -n 's/^Server version: Apache\/\([0-9]*\)\..*$/\1/p; q')
if test -n "$HTTPD_VERSION"
then
<IfModule !mod_mpm_prefork.c>
LoadModule mpm_prefork_module modules/mod_mpm_prefork.so
</IfModule>
+<IfModule !mod_unixd.c>
+ LoadModule unixd_module modules/mod_unixd.so
+</IfModule>
</IfVersion>
PassEnv GIT_VALGRIND
check_config bare-ancestor-aliased.git/plain-nested/.git false unset
'
+test_expect_success 'No extra GIT_* on alias scripts' '
+ (
+ env | sed -ne "/^GIT_/s/=.*//p" &&
+ echo GIT_PREFIX && # setup.c
+ echo GIT_TEXTDOMAINDIR # wrapper-for-bin.sh
+ ) | sort | uniq >expected &&
+ cat <<-\EOF >script &&
+ #!/bin/sh
+ env | sed -ne "/^GIT_/s/=.*//p" | sort >actual
+ exit 0
+ EOF
+ chmod 755 script &&
+ git config alias.script \!./script &&
+ ( mkdir sub && cd sub && git script ) &&
+ test_cmp expected actual
+'
+
test_expect_success 'plain with GIT_WORK_TREE' '
mkdir plain-wt &&
test_must_fail env GIT_WORK_TREE="$(pwd)/plain-wt" git init plain-wt
test "$SHA" = "$(git rev-list HEAD)"
'
-test_expect_failure 'setup_git_dir twice in subdir' '
+test_expect_success 'setup_git_dir twice in subdir' '
git init sgd &&
(
cd sgd &&
. ./test-lib.sh
init_vars () {
- global_excludes="$(pwd)/global-excludes"
+ global_excludes="global-excludes"
}
enable_global_excludes () {
test_cmp expected filtered-empty-in-repo
'
+test_expect_success 'disable filter with empty override' '
+ test_config_global filter.disable.smudge false &&
+ test_config_global filter.disable.clean false &&
+ test_config filter.disable.smudge false &&
+ test_config filter.disable.clean false &&
+
+ echo "*.disable filter=disable" >.gitattributes &&
+
+ echo test >test.disable &&
+ git -c filter.disable.clean= add test.disable 2>err &&
+ test_must_be_empty err &&
+ rm -f test.disable &&
+ git -c filter.disable.smudge= checkout -- test.disable 2>err &&
+ test_must_be_empty err
+'
+
test_done
pfx=$1
exp=$2.expect
act=$pfx.actual.$3
- tr '\015\000' QN <"$2" >"$exp" &&
- tr '\015\000' QN <"$3" >"$act" &&
- test_cmp $exp $act &&
- rm $exp $act
+ tr '\015\000abcdef0123456789' QN00000000000000000 <"$2" >"$exp" &&
+ tr '\015\000abcdef0123456789' QN00000000000000000 <"$3" >"$act" &&
+ test_cmp "$exp" "$act" &&
+ rm "$exp" "$act"
}
create_gitattributes () {
- attr=$1
- case "$attr" in
- auto)
- echo "*.txt text=auto" >.gitattributes
- ;;
- text)
- echo "*.txt text" >.gitattributes
- ;;
- -text)
- echo "*.txt -text" >.gitattributes
- ;;
- crlf)
- echo "*.txt eol=crlf" >.gitattributes
- ;;
- lf)
- echo "*.txt eol=lf" >.gitattributes
- ;;
- "")
- echo >.gitattributes
- ;;
- *)
- echo >&2 invalid attribute: $attr
- exit 1
- ;;
- esac
+ {
+ while test "$#" != 0
+ do
+ case "$1" in
+ auto) echo '*.txt text=auto' ;;
+ ident) echo '*.txt ident' ;;
+ text) echo '*.txt text' ;;
+ -text) echo '*.txt -text' ;;
+ crlf) echo '*.txt eol=crlf' ;;
+ lf) echo '*.txt eol=lf' ;;
+ "") ;;
+ *)
+ echo >&2 invalid attribute: "$1"
+ exit 1
+ ;;
+ esac &&
+ shift
+ done
+ } >.gitattributes
}
create_NNO_files () {
- lfname=$1
- crlfname=$2
- lfmixcrlf=$3
- lfmixcr=$4
- crlfnul=$5
for crlf in false true input
do
for attr in "" auto text -text lf crlf
do
pfx=NNO_${crlf}_attr_${attr} &&
- cp $lfname ${pfx}_LF.txt &&
- cp $crlfname ${pfx}_CRLF.txt &&
- cp $lfmixcrlf ${pfx}_CRLF_mix_LF.txt &&
- cp $lfmixcr ${pfx}_LF_mix_CR.txt &&
- cp $crlfnul ${pfx}_CRLF_nul.txt
+ cp CRLF_mix_LF ${pfx}_LF.txt &&
+ cp CRLF_mix_LF ${pfx}_CRLF.txt &&
+ cp CRLF_mix_LF ${pfx}_CRLF_mix_LF.txt &&
+ cp CRLF_mix_LF ${pfx}_LF_mix_CR.txt &&
+ cp CRLF_mix_LF ${pfx}_CRLF_nul.txt
done
done
}
crlfnul=$7
pfx=crlf_${crlf}_attr_${attr}
create_gitattributes "$attr" &&
- for f in LF CRLF repoMIX LF_mix_CR CRLF_mix_LF LF_nul CRLF_nul
+ for f in LF CRLF LF_mix_CR CRLF_mix_LF LF_nul CRLF_nul
do
fname=${pfx}_$f.txt &&
cp $f $fname &&
'
}
+stats_ascii () {
+ case "$1" in
+ LF)
+ echo lf
+ ;;
+ CRLF)
+ echo crlf
+ ;;
+ CRLF_mix_LF)
+ echo mixed
+ ;;
+ LF_mix_CR|CRLF_nul|LF_nul|CRLF_mix_CR)
+ echo "-text"
+ ;;
+ *)
+ echo error_invalid $1
+ ;;
+ esac
+
+}
+
+
+# contruct the attr/ returned by git ls-files --eol
+# Take none (=empty), one or two args
+attr_ascii () {
+ case $1,$2 in
+ -text,*) echo "-text" ;;
+ text,) echo "text" ;;
+ text,lf) echo "text eol=lf" ;;
+ text,crlf) echo "text eol=crlf" ;;
+ auto,) echo "text=auto" ;;
+ auto,lf) echo "text=auto eol=lf" ;;
+ auto,crlf) echo "text=auto eol=crlf" ;;
+ lf,) echo "text eol=lf" ;;
+ crlf,) echo "text eol=crlf" ;;
+ ,) echo "" ;;
+ *) echo invalid_attr "$1,$2" ;;
+ esac
+}
+
check_files_in_repo () {
crlf=$1
attr=$2
}
checkout_files () {
- eol=$1
- crlf=$2
- attr=$3
- lfname=$4
- crlfname=$5
- lfmixcrlf=$6
- lfmixcr=$7
- crlfnul=$8
- create_gitattributes $attr &&
+ attr=$1 ; shift
+ ident=$1; shift
+ aeol=$1 ; shift
+ crlf=$1 ; shift
+ ceol=$1 ; shift
+ lfname=$1 ; shift
+ crlfname=$1 ; shift
+ lfmixcrlf=$1 ; shift
+ lfmixcr=$1 ; shift
+ crlfnul=$1 ; shift
+ create_gitattributes "$attr" "$ident" &&
git config core.autocrlf $crlf &&
- pfx=eol_${eol}_crlf_${crlf}_attr_${attr}_ &&
- src=crlf_false_attr__ &&
+ pfx=eol_${ceol}_crlf_${crlf}_attr_${attr}_ &&
for f in LF CRLF LF_mix_CR CRLF_mix_LF LF_nul
do
- rm $src$f.txt &&
- if test -z "$eol"; then
- git checkout $src$f.txt
+ rm crlf_false_attr__$f.txt &&
+ if test -z "$ceol"; then
+ git checkout crlf_false_attr__$f.txt
else
- git -c core.eol=$eol checkout $src$f.txt
+ git -c core.eol=$ceol checkout crlf_false_attr__$f.txt
fi
done
- test_expect_success "checkout core.eol=$eol core.autocrlf=$crlf gitattributes=$attr file=LF" "
- compare_ws_file $pfx $lfname ${src}LF.txt
+ test_expect_success "ls-files --eol attr=$attr $ident $aeol core.autocrlf=$crlf core.eol=$ceol" '
+ test_when_finished "rm expect actual" &&
+ sort <<-EOF >expect &&
+ i/crlf w/$(stats_ascii $crlfname) attr/$(attr_ascii $attr $aeol) crlf_false_attr__CRLF.txt
+ i/mixed w/$(stats_ascii $lfmixcrlf) attr/$(attr_ascii $attr $aeol) crlf_false_attr__CRLF_mix_LF.txt
+ i/lf w/$(stats_ascii $lfname) attr/$(attr_ascii $attr $aeol) crlf_false_attr__LF.txt
+ i/-text w/$(stats_ascii $lfmixcr) attr/$(attr_ascii $attr $aeol) crlf_false_attr__LF_mix_CR.txt
+ i/-text w/$(stats_ascii $crlfnul) attr/$(attr_ascii $attr $aeol) crlf_false_attr__CRLF_nul.txt
+ i/-text w/$(stats_ascii $crlfnul) attr/$(attr_ascii $attr $aeol) crlf_false_attr__LF_nul.txt
+ EOF
+ git ls-files --eol crlf_false_attr__* |
+ sed -e "s/ / /g" -e "s/ */ /g" |
+ sort >actual &&
+ test_cmp expect actual
+ '
+ test_expect_success "checkout $ident $attr $aeol core.autocrlf=$crlf core.eol=$ceol file=LF" "
+ compare_ws_file $pfx $lfname crlf_false_attr__LF.txt
"
- test_expect_success "checkout core.eol=$eol core.autocrlf=$crlf gitattributes=$attr file=CRLF" "
- compare_ws_file $pfx $crlfname ${src}CRLF.txt
+ test_expect_success "checkout $ident $attr $aeol core.autocrlf=$crlf core.eol=$ceol file=CRLF" "
+ compare_ws_file $pfx $crlfname crlf_false_attr__CRLF.txt
"
- test_expect_success "checkout core.eol=$eol core.autocrlf=$crlf gitattributes=$attr file=CRLF_mix_LF" "
- compare_ws_file $pfx $lfmixcrlf ${src}CRLF_mix_LF.txt
+ test_expect_success "checkout $ident $attr $aeol core.autocrlf=$crlf core.eol=$ceol file=CRLF_mix_LF" "
+ compare_ws_file $pfx $lfmixcrlf crlf_false_attr__CRLF_mix_LF.txt
"
- test_expect_success "checkout core.eol=$eol core.autocrlf=$crlf gitattributes=$attr file=LF_mix_CR" "
- compare_ws_file $pfx $lfmixcr ${src}LF_mix_CR.txt
+ test_expect_success "checkout $ident $attr $aeol core.autocrlf=$crlf core.eol=$ceol file=LF_mix_CR" "
+ compare_ws_file $pfx $lfmixcr crlf_false_attr__LF_mix_CR.txt
"
- test_expect_success "checkout core.eol=$eol core.autocrlf=$crlf gitattributes=$attr file=LF_nul" "
- compare_ws_file $pfx $crlfnul ${src}LF_nul.txt
+ test_expect_success "checkout $ident $attr $aeol core.autocrlf=$crlf core.eol=$ceol file=LF_nul" "
+ compare_ws_file $pfx $crlfnul crlf_false_attr__LF_nul.txt
"
}
-#######
+# Test control characters
+# NUL SOH CR EOF==^Z
+test_expect_success 'ls-files --eol -o Text/Binary' '
+ test_when_finished "rm expect actual TeBi_*" &&
+ STRT=AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA &&
+ STR=$STRT$STRT$STRT$STRT &&
+ printf "${STR}BBB\001" >TeBi_127_S &&
+ printf "${STR}BBBB\001">TeBi_128_S &&
+ printf "${STR}BBB\032" >TeBi_127_E &&
+ printf "\032${STR}BBB" >TeBi_E_127 &&
+ printf "${STR}BBBB\000">TeBi_128_N &&
+ printf "${STR}BBB\012">TeBi_128_L &&
+ printf "${STR}BBB\015">TeBi_127_C &&
+ printf "${STR}BB\015\012" >TeBi_126_CL &&
+ printf "${STR}BB\015\012\015" >TeBi_126_CLC &&
+ sort <<-\EOF >expect &&
+ i/ w/-text TeBi_127_S
+ i/ w/none TeBi_128_S
+ i/ w/none TeBi_127_E
+ i/ w/-text TeBi_E_127
+ i/ w/-text TeBi_128_N
+ i/ w/lf TeBi_128_L
+ i/ w/-text TeBi_127_C
+ i/ w/crlf TeBi_126_CL
+ i/ w/-text TeBi_126_CLC
+ EOF
+ git ls-files --eol -o |
+ sed -n -e "/TeBi_/{s!attr/[ ]*!!g
+ s! ! !g
+ s! *! !g
+ p
+ }" | sort >actual &&
+ test_cmp expect actual
+'
+
test_expect_success 'setup master' '
echo >.gitattributes &&
git checkout -b master &&
git add .gitattributes &&
git commit -m "add .gitattributes" "" &&
- printf "line1\nline2\nline3" >LF &&
- printf "line1\r\nline2\r\nline3" >CRLF &&
- printf "line1\r\nline2\nline3" >repoMIX &&
- printf "line1\r\nline2\nline3" >CRLF_mix_LF &&
- printf "line1\nline2\rline3" >LF_mix_CR &&
- printf "line1\r\nline2\rline3" >CRLF_mix_CR &&
- printf "line1Q\r\nline2\r\nline3" | q_to_nul >CRLF_nul &&
- printf "line1Q\nline2\nline3" | q_to_nul >LF_nul &&
+ printf "\$Id: 0000000000000000000000000000000000000000 \$\nLINEONE\nLINETWO\nLINETHREE" >LF &&
+ printf "\$Id: 0000000000000000000000000000000000000000 \$\r\nLINEONE\r\nLINETWO\r\nLINETHREE" >CRLF &&
+ printf "\$Id: 0000000000000000000000000000000000000000 \$\nLINEONE\r\nLINETWO\nLINETHREE" >CRLF_mix_LF &&
+ printf "\$Id: 0000000000000000000000000000000000000000 \$\nLINEONE\nLINETWO\rLINETHREE" >LF_mix_CR &&
+ printf "\$Id: 0000000000000000000000000000000000000000 \$\r\nLINEONE\r\nLINETWO\rLINETHREE" >CRLF_mix_CR &&
+ printf "\$Id: 0000000000000000000000000000000000000000 \$\r\nLINEONEQ\r\nLINETWO\r\nLINETHREE" | q_to_nul >CRLF_nul &&
+ printf "\$Id: 0000000000000000000000000000000000000000 \$\nLINEONEQ\nLINETWO\nLINETHREE" | q_to_nul >LF_nul &&
create_NNO_files CRLF_mix_LF CRLF_mix_LF CRLF_mix_LF CRLF_mix_LF CRLF_mix_LF &&
git -c core.autocrlf=false add NNO_*.txt &&
git commit -m "mixed line endings" &&
# How to read the table below:
# - checkout_files will check multiple files with a combination of settings
# and attributes (core.autocrlf=input is forbidden with core.eol=crlf)
-# - parameter $1 : core.eol lf | crlf
-# - parameter $2 : core.autocrlf false | true | input
-# - parameter $3 : text in .gitattributs "" (empty) | auto | text | -text
-# - parameter $4 : reference for a file with only LF in the repo
-# - parameter $5 : reference for a file with only CRLF in the repo
-# - parameter $6 : reference for a file with mixed LF and CRLF in the repo
-# - parameter $7 : reference for a file with LF and CR in the repo (does somebody uses this ?)
-# - parameter $8 : reference for a file with CRLF and a NUL (should be handled as binary when auto)
-
-# What we have in the repo:
-# ----------------- EOL in repo ----------------
-# LF CRLF CRLF_mix_LF LF_mix_CR CRLF_nul
-# settings with checkout:
-# core. core. .gitattr
-# eol acrlf
-# ----------------------------------------------
-# What we want to have in the working tree:
+#
+# - parameter $1 : text in .gitattributs "" (empty) | auto | text | -text
+# - parameter $2 : ident "" | i (i == ident)
+# - parameter $3 : eol in .gitattributs "" (empty) | lf | crlf
+# - parameter $4 : core.autocrlf false | true | input
+# - parameter $5 : core.eol "" | lf | crlf | "native"
+# - parameter $6 : reference for a file with only LF in the repo
+# - parameter $7 : reference for a file with only CRLF in the repo
+# - parameter $8 : reference for a file with mixed LF and CRLF in the repo
+# - parameter $9 : reference for a file with LF and CR in the repo
+# - parameter $10 : reference for a file with CRLF and a NUL (should be handled as binary when auto)
+
if test_have_prereq NATIVE_CRLF
then
MIX_CRLF_LF=CRLF
fi
export CRLF_MIX_LF_CR MIX NL
-checkout_files lf false "" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-checkout_files lf true "" CRLF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-checkout_files lf input "" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-checkout_files lf false "auto" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-checkout_files lf true "auto" CRLF CRLF CRLF LF_mix_CR LF_nul
-checkout_files lf input "auto" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-checkout_files lf false "text" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-checkout_files lf true "text" CRLF CRLF CRLF CRLF_mix_CR CRLF_nul
-checkout_files lf input "text" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-checkout_files lf false "-text" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-checkout_files lf true "-text" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-checkout_files lf input "-text" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-checkout_files lf false "lf" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-checkout_files lf true "lf" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-checkout_files lf input "lf" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-checkout_files lf false "crlf" CRLF CRLF CRLF CRLF_mix_CR CRLF_nul
-checkout_files lf true "crlf" CRLF CRLF CRLF CRLF_mix_CR CRLF_nul
-checkout_files lf input "crlf" CRLF CRLF CRLF CRLF_mix_CR CRLF_nul
-
-checkout_files crlf false "" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-checkout_files crlf true "" CRLF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-checkout_files crlf false "auto" CRLF CRLF CRLF LF_mix_CR LF_nul
-checkout_files crlf true "auto" CRLF CRLF CRLF LF_mix_CR LF_nul
-checkout_files crlf false "text" CRLF CRLF CRLF CRLF_mix_CR CRLF_nul
-checkout_files crlf true "text" CRLF CRLF CRLF CRLF_mix_CR CRLF_nul
-checkout_files crlf false "-text" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-checkout_files crlf true "-text" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-checkout_files crlf false "lf" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-checkout_files crlf true "lf" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-checkout_files crlf false "crlf" CRLF CRLF CRLF CRLF_mix_CR CRLF_nul
-checkout_files crlf true "crlf" CRLF CRLF CRLF CRLF_mix_CR CRLF_nul
-
-checkout_files "" false "" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-checkout_files "" true "" CRLF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-checkout_files "" input "" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-checkout_files "" false "auto" $NL CRLF $MIX_CRLF_LF LF_mix_CR LF_nul
-checkout_files "" true "auto" CRLF CRLF CRLF LF_mix_CR LF_nul
-checkout_files "" input "auto" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-checkout_files "" false "text" $NL CRLF $MIX_CRLF_LF $MIX_LF_CR $LFNUL
-checkout_files "" true "text" CRLF CRLF CRLF CRLF_mix_CR CRLF_nul
-checkout_files "" input "text" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-checkout_files "" false "-text" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-checkout_files "" true "-text" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-checkout_files "" input "-text" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-checkout_files "" false "lf" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-checkout_files "" true "lf" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-checkout_files "" input "lf" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-checkout_files "" false "crlf" CRLF CRLF CRLF CRLF_mix_CR CRLF_nul
-checkout_files "" true "crlf" CRLF CRLF CRLF CRLF_mix_CR CRLF_nul
-checkout_files "" input "crlf" CRLF CRLF CRLF CRLF_mix_CR CRLF_nul
-
-checkout_files native false "" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-checkout_files native true "" CRLF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-checkout_files native false "auto" $NL CRLF $MIX_CRLF_LF LF_mix_CR LF_nul
-checkout_files native true "auto" CRLF CRLF CRLF LF_mix_CR LF_nul
-checkout_files native false "text" $NL CRLF $MIX_CRLF_LF $MIX_LF_CR $LFNUL
-checkout_files native true "text" CRLF CRLF CRLF CRLF_mix_CR CRLF_nul
-checkout_files native false "-text" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-checkout_files native true "-text" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-checkout_files native false "lf" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-checkout_files native true "lf" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
-checkout_files native false "crlf" CRLF CRLF CRLF CRLF_mix_CR CRLF_nul
-checkout_files native true "crlf" CRLF CRLF CRLF CRLF_mix_CR CRLF_nul
+checkout_files "" "" "" false "" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+checkout_files "" "" "" false crlf LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+checkout_files "" "" "" false lf LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+checkout_files "" "" "" false native LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+checkout_files "" "" "" input "" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+checkout_files "" "" "" input lf LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+checkout_files "" "" "" true "" CRLF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+checkout_files "" "" "" true crlf CRLF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+checkout_files "" "" "" true lf CRLF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+checkout_files "" "" "" true native CRLF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+checkout_files "" ident "" false "" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+checkout_files "" ident "" false crlf LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+checkout_files "" ident "" false lf LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+checkout_files "" ident "" false native LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+checkout_files "" ident "" input "" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+checkout_files "" ident "" input lf LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+checkout_files "" ident "" true "" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+checkout_files "" ident "" true crlf LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+checkout_files "" ident "" true lf LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+checkout_files "" ident "" true native LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+checkout_files "auto" "" "" false "" $NL CRLF $MIX_CRLF_LF LF_mix_CR LF_nul
+checkout_files "auto" "" "" false crlf CRLF CRLF CRLF LF_mix_CR LF_nul
+checkout_files "auto" "" "" false lf LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+checkout_files "auto" "" "" false native $NL CRLF $MIX_CRLF_LF LF_mix_CR LF_nul
+checkout_files "auto" "" "" input "" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+checkout_files "auto" "" "" input lf LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+checkout_files "auto" "" "" true "" CRLF CRLF CRLF LF_mix_CR LF_nul
+checkout_files "auto" "" "" true crlf CRLF CRLF CRLF LF_mix_CR LF_nul
+checkout_files "auto" "" "" true lf CRLF CRLF CRLF LF_mix_CR LF_nul
+checkout_files "auto" "" "" true native CRLF CRLF CRLF LF_mix_CR LF_nul
+checkout_files "auto" ident "" false "" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+checkout_files "auto" ident "" false crlf LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+checkout_files "auto" ident "" false lf LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+checkout_files "auto" ident "" false native LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+checkout_files "auto" ident "" input "" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+checkout_files "auto" ident "" input lf LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+checkout_files "auto" ident "" true "" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+checkout_files "auto" ident "" true crlf LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+checkout_files "auto" ident "" true lf LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+checkout_files "auto" ident "" true native LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+
+for id in "" ident;
+do
+ checkout_files "crlf" "$id" "" false "" CRLF CRLF CRLF CRLF_mix_CR CRLF_nul
+ checkout_files "crlf" "$id" "" false crlf CRLF CRLF CRLF CRLF_mix_CR CRLF_nul
+ checkout_files "crlf" "$id" "" false lf CRLF CRLF CRLF CRLF_mix_CR CRLF_nul
+ checkout_files "crlf" "$id" "" false native CRLF CRLF CRLF CRLF_mix_CR CRLF_nul
+ checkout_files "crlf" "$id" "" input "" CRLF CRLF CRLF CRLF_mix_CR CRLF_nul
+ checkout_files "crlf" "$id" "" input lf CRLF CRLF CRLF CRLF_mix_CR CRLF_nul
+ checkout_files "crlf" "$id" "" true "" CRLF CRLF CRLF CRLF_mix_CR CRLF_nul
+ checkout_files "crlf" "$id" "" true crlf CRLF CRLF CRLF CRLF_mix_CR CRLF_nul
+ checkout_files "crlf" "$id" "" true lf CRLF CRLF CRLF CRLF_mix_CR CRLF_nul
+ checkout_files "crlf" "$id" "" true native CRLF CRLF CRLF CRLF_mix_CR CRLF_nul
+ checkout_files "lf" "$id" "" false "" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+ checkout_files "lf" "$id" "" false crlf LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+ checkout_files "lf" "$id" "" false lf LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+ checkout_files "lf" "$id" "" false native LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+ checkout_files "lf" "$id" "" input "" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+ checkout_files "lf" "$id" "" input lf LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+ checkout_files "lf" "$id" "" true "" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+ checkout_files "lf" "$id" "" true crlf LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+ checkout_files "lf" "$id" "" true lf LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+ checkout_files "lf" "$id" "" true native LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+ checkout_files "text" "$id" "" false "" $NL CRLF $MIX_CRLF_LF $MIX_LF_CR $LFNUL
+ checkout_files "text" "$id" "" false crlf CRLF CRLF CRLF CRLF_mix_CR CRLF_nul
+ checkout_files "text" "$id" "" false lf LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+ checkout_files "text" "$id" "" false native $NL CRLF $MIX_CRLF_LF $MIX_LF_CR $LFNUL
+ checkout_files "text" "$id" "" input "" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+ checkout_files "text" "$id" "" input lf LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+ checkout_files "text" "$id" "" true "" CRLF CRLF CRLF CRLF_mix_CR CRLF_nul
+ checkout_files "text" "$id" "" true crlf CRLF CRLF CRLF CRLF_mix_CR CRLF_nul
+ checkout_files "text" "$id" "" true lf CRLF CRLF CRLF CRLF_mix_CR CRLF_nul
+ checkout_files "text" "$id" "" true native CRLF CRLF CRLF CRLF_mix_CR CRLF_nul
+ checkout_files "-text" "$id" "" false "" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+ checkout_files "-text" "$id" "" false crlf LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+ checkout_files "-text" "$id" "" false lf LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+ checkout_files "-text" "$id" "" false native LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+ checkout_files "-text" "$id" "" input "" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+ checkout_files "-text" "$id" "" input lf LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+ checkout_files "-text" "$id" "" true "" LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+ checkout_files "-text" "$id" "" true crlf LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+ checkout_files "-text" "$id" "" true lf LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+ checkout_files "-text" "$id" "" true native LF CRLF CRLF_mix_LF LF_mix_CR LF_nul
+done
+
+# Should be the last test case: remove some files from the worktree
+test_expect_success 'ls-files --eol -d -z' '
+ rm crlf_false_attr__CRLF.txt crlf_false_attr__CRLF_mix_LF.txt crlf_false_attr__LF.txt .gitattributes &&
+ cat >expect <<-\EOF &&
+ i/crlf w/ crlf_false_attr__CRLF.txt
+ i/lf w/ .gitattributes
+ i/lf w/ crlf_false_attr__LF.txt
+ i/mixed w/ crlf_false_attr__CRLF_mix_LF.txt
+ EOF
+ git ls-files --eol -d |
+ sed -e "s!attr/[^ ]*!!g" -e "s/ / /g" -e "s/ */ /g" |
+ sort >actual &&
+ test_cmp expect actual
+'
test_done
rootoff= # we are on Unix
else
rootoff=$(($rootoff-1))
+ # In MSYS2, the root directory "/" is translated into a Windows
+ # directory *with* trailing slash. Let's test for that and adjust
+ # our expected longest ancestor length accordingly.
+ case "$(test-path-utils print_path /)" in
+ */) rootslash=1;;
+ *) rootslash=0;;
+ esac
fi
ancestor() {
# We do some math with the expected ancestor length.
expected=$3
if test -n "$rootoff" && test "x$expected" != x-1; then
+ expected=$(($expected-$rootslash))
+ test $expected -lt 0 ||
expected=$(($expected+$rootoff))
fi
test_expect_success "longest ancestor: $1 $2 => $expected" \
test_cmp expect actual
'
+cat >expect <<-EOF
+preloaded output of a child
+Hello
+World
+preloaded output of a child
+Hello
+World
+preloaded output of a child
+Hello
+World
+preloaded output of a child
+Hello
+World
+EOF
+
+test_expect_success 'run_command runs in parallel with more jobs available than tasks' '
+ test-run-command run-command-parallel 5 sh -c "printf \"%s\n%s\n\" Hello World" 2>actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'run_command runs in parallel with as many jobs as tasks' '
+ test-run-command run-command-parallel 4 sh -c "printf \"%s\n%s\n\" Hello World" 2>actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'run_command runs in parallel with more tasks than jobs available' '
+ test-run-command run-command-parallel 3 sh -c "printf \"%s\n%s\n\" Hello World" 2>actual &&
+ test_cmp expect actual
+'
+
+cat >expect <<-EOF
+preloaded output of a child
+asking for a quick stop
+preloaded output of a child
+asking for a quick stop
+preloaded output of a child
+asking for a quick stop
+EOF
+
+test_expect_success 'run_command is asked to abort gracefully' '
+ test-run-command run-command-abort 3 false 2>actual &&
+ test_cmp expect actual
+'
+
+cat >expect <<-EOF
+no further jobs available
+EOF
+
+test_expect_success 'run_command outputs ' '
+ test-run-command run-command-no-jobs 3 sh -c "printf \"%s\n%s\n\" Hello World" 2>actual &&
+ test_cmp expect actual
+'
+
test_done
GIT_COMMITTER_NAME="Committer Name" \
GIT_COMMITTER_EMAIL="committer@email" \
GIT_COMMITTER_DATE="2005-05-26 23:30" \
- TZ=GMT git commit-tree `cat treeid` >commitid 2>/dev/null'
+ TZ=GMT git commit-tree $(cat treeid) >commitid 2>/dev/null'
test_expect_success \
'read commit' \
- 'git cat-file commit `cat commitid` >commit'
+ 'git cat-file commit $(cat commitid) >commit'
test_expect_success \
'compare commit' \
git config aninvalid.unit >actual &&
test_cmp expect actual &&
cat >expect <<-\EOF &&
- fatal: bad numeric config value '\''1auto'\'' for '\''aninvalid.unit'\'' in .git/config: invalid unit
+ fatal: bad numeric config value '\''1auto'\'' for '\''aninvalid.unit'\'' in file .git/config: invalid unit
EOF
test_must_fail git config --int --get aninvalid.unit 2>actual &&
test_i18ncmp expect actual
'
+test_expect_success 'invalid stdin config' '
+ echo "fatal: bad config line 1 in standard input " >expect &&
+ echo "[broken" | test_must_fail git config --list --file - >output 2>&1 &&
+ test_cmp expect output
+'
+
cat > expect << EOF
true
false
Qsection.sub=section.val5Q
EOF
test_expect_success '--null --list' '
- git config --null --list | nul_to_q >result &&
+ git config --null --list >result.raw &&
+ nul_to_q <result.raw >result &&
echo >>result &&
test_cmp expect result
'
test_expect_success '--null --get-regexp' '
- git config --null --get-regexp "val[0-9]" | nul_to_q >result &&
+ git config --null --get-regexp "val[0-9]" >result.raw &&
+ nul_to_q <result.raw >result &&
echo >>result &&
test_cmp expect result
'
"die q(badrename) if ((stat(q(.git/config)))[2] & 07777) != 0600"
'
+test_expect_success 'set up --show-origin tests' '
+ INCLUDE_DIR="$HOME/include" &&
+ mkdir -p "$INCLUDE_DIR" &&
+ cat >"$INCLUDE_DIR"/absolute.include <<-\EOF &&
+ [user]
+ absolute = include
+ EOF
+ cat >"$INCLUDE_DIR"/relative.include <<-\EOF &&
+ [user]
+ relative = include
+ EOF
+ cat >"$HOME"/.gitconfig <<-EOF &&
+ [user]
+ global = true
+ override = global
+ [include]
+ path = "$INCLUDE_DIR/absolute.include"
+ EOF
+ cat >.git/config <<-\EOF
+ [user]
+ local = true
+ override = local
+ [include]
+ path = ../include/relative.include
+ EOF
+'
+
+test_expect_success '--show-origin with --list' '
+ cat >expect <<-EOF &&
+ file:$HOME/.gitconfig user.global=true
+ file:$HOME/.gitconfig user.override=global
+ file:$HOME/.gitconfig include.path=$INCLUDE_DIR/absolute.include
+ file:$INCLUDE_DIR/absolute.include user.absolute=include
+ file:.git/config user.local=true
+ file:.git/config user.override=local
+ file:.git/config include.path=../include/relative.include
+ file:.git/../include/relative.include user.relative=include
+ command line: user.cmdline=true
+ EOF
+ git -c user.cmdline=true config --list --show-origin >output &&
+ test_cmp expect output
+'
+
+test_expect_success '--show-origin with --list --null' '
+ cat >expect <<-EOF &&
+ file:$HOME/.gitconfigQuser.global
+ trueQfile:$HOME/.gitconfigQuser.override
+ globalQfile:$HOME/.gitconfigQinclude.path
+ $INCLUDE_DIR/absolute.includeQfile:$INCLUDE_DIR/absolute.includeQuser.absolute
+ includeQfile:.git/configQuser.local
+ trueQfile:.git/configQuser.override
+ localQfile:.git/configQinclude.path
+ ../include/relative.includeQfile:.git/../include/relative.includeQuser.relative
+ includeQcommand line:Quser.cmdline
+ trueQ
+ EOF
+ git -c user.cmdline=true config --null --list --show-origin >output.raw &&
+ nul_to_q <output.raw >output &&
+ # The here-doc above adds a newline that the --null output would not
+ # include. Add it here to make the two comparable.
+ echo >>output &&
+ test_cmp expect output
+'
+
+test_expect_success '--show-origin with single file' '
+ cat >expect <<-\EOF &&
+ file:.git/config user.local=true
+ file:.git/config user.override=local
+ file:.git/config include.path=../include/relative.include
+ EOF
+ git config --local --list --show-origin >output &&
+ test_cmp expect output
+'
+
+test_expect_success '--show-origin with --get-regexp' '
+ cat >expect <<-EOF &&
+ file:$HOME/.gitconfig user.global true
+ file:.git/config user.local true
+ EOF
+ git config --show-origin --get-regexp "user\.[g|l].*" >output &&
+ test_cmp expect output
+'
+
+test_expect_success '--show-origin getting a single key' '
+ cat >expect <<-\EOF &&
+ file:.git/config local
+ EOF
+ git config --show-origin user.override >output &&
+ test_cmp expect output
+'
+
+test_expect_success 'set up custom config file' '
+ CUSTOM_CONFIG_FILE="file\" (dq) and spaces.conf" &&
+ cat >"$CUSTOM_CONFIG_FILE" <<-\EOF
+ [user]
+ custom = true
+ EOF
+'
+
+test_expect_success '--show-origin escape special file name characters' '
+ cat >expect <<-\EOF &&
+ file:"file\" (dq) and spaces.conf" user.custom=true
+ EOF
+ git config --file "$CUSTOM_CONFIG_FILE" --show-origin --list >output &&
+ test_cmp expect output
+'
+
+test_expect_success '--show-origin stdin' '
+ cat >expect <<-\EOF &&
+ standard input: user.custom=true
+ EOF
+ git config --file - --show-origin --list <"$CUSTOM_CONFIG_FILE" >output &&
+ test_cmp expect output
+'
+
+test_expect_success '--show-origin stdin with file include' '
+ cat >"$INCLUDE_DIR"/stdin.include <<-EOF &&
+ [user]
+ stdin = include
+ EOF
+ cat >expect <<-EOF &&
+ file:$INCLUDE_DIR/stdin.include include
+ EOF
+ echo "[include]path=\"$INCLUDE_DIR\"/stdin.include" \
+ | git config --show-origin --includes --file - user.stdin >output &&
+ test_cmp expect output
+'
+
+test_expect_success '--show-origin blob' '
+ cat >expect <<-\EOF &&
+ blob:a9d9f9e555b5c6f07cbe09d3f06fe3df11e09c08 user.custom=true
+ EOF
+ blob=$(git hash-object -w "$CUSTOM_CONFIG_FILE") &&
+ git config --blob=$blob --show-origin --list >output &&
+ test_cmp expect output
+'
+
+test_expect_success '--show-origin blob ref' '
+ cat >expect <<-\EOF &&
+ blob:"master:file\" (dq) and spaces.conf" user.custom=true
+ EOF
+ git add "$CUSTOM_CONFIG_FILE" &&
+ git commit -m "new config file" &&
+ git config --blob=master:"$CUSTOM_CONFIG_FILE" --show-origin --list >output &&
+ test_cmp expect output
+'
+
test_done
cp .git/config .git/config.old &&
test_when_finished "mv .git/config.old .git/config" &&
echo "[" >>.git/config &&
- echo "fatal: bad config file line 34 in .git/config" >expect &&
+ echo "fatal: bad config line 34 in file .git/config" >expect &&
test_expect_code 128 test-config get_value foo.bar 2>actual &&
test_cmp expect actual
'
test_expect_success 'proper error on error in custom config files' '
echo "[" >>syntax-error &&
- echo "fatal: bad config file line 1 in syntax-error" >expect &&
+ echo "fatal: bad config line 1 in file syntax-error" >expect &&
test_expect_code 128 test-config configset_get_value foo.bar syntax-error 2>actual &&
test_cmp expect actual
'
test_i18ngrep "fatal: .*alias\.br.*\.git/config.*line 2" result
'
+test_expect_success 'error on modifying repo config without repo' '
+ mkdir no-repo &&
+ (
+ GIT_CEILING_DIRECTORIES=$(pwd) &&
+ export GIT_CEILING_DIRECTORIES &&
+ cd no-repo &&
+ test_must_fail git config a.b c 2>err &&
+ grep "not in a git directory" err
+ )
+'
+
test_done
test_expect_success 'symbolic-ref refuses bare sha1' '
echo content >file && git add file && git commit -m one &&
- test_must_fail git symbolic-ref HEAD `git rev-parse HEAD`
+ test_must_fail git symbolic-ref HEAD $(git rev-parse HEAD)
'
reset_to_sane
test_cmp expect actual
'
+test_expect_success 'symbolic-ref does not create ref d/f conflicts' '
+ git checkout -b df &&
+ test_commit df &&
+ test_must_fail git symbolic-ref refs/heads/df/conflict refs/heads/df &&
+ git pack-refs --all --prune &&
+ test_must_fail git symbolic-ref refs/heads/df/conflict refs/heads/df
+'
+
+test_expect_success 'symbolic-ref handles existing pointer to invalid name' '
+ head=$(git rev-parse HEAD) &&
+ git symbolic-ref HEAD refs/heads/outer &&
+ git update-ref refs/heads/outer/inner $head &&
+ git symbolic-ref HEAD refs/heads/unrelated
+'
+
test_done
git add . &&
test_tick && git commit -m rabbit &&
- H=`git rev-parse --verify HEAD` &&
- A=`git rev-parse --verify HEAD:A` &&
- B=`git rev-parse --verify HEAD:A/B` &&
- C=`git rev-parse --verify HEAD:C` &&
- D=`git rev-parse --verify HEAD:A/D` &&
- E=`git rev-parse --verify HEAD:A/B/E` &&
+ H=$(git rev-parse --verify HEAD) &&
+ A=$(git rev-parse --verify HEAD:A) &&
+ B=$(git rev-parse --verify HEAD:A/B) &&
+ C=$(git rev-parse --verify HEAD:C) &&
+ D=$(git rev-parse --verify HEAD:A/D) &&
+ E=$(git rev-parse --verify HEAD:A/B/E) &&
check_fsck &&
test_chmod +x C &&
git add C &&
test_tick && git commit -m dragon &&
- L=`git rev-parse --verify HEAD` &&
+ L=$(git rev-parse --verify HEAD) &&
check_fsck &&
rm -f C A/B/E &&
echo horse >A/G &&
git add F A/G &&
test_tick && git commit -a -m sheep &&
- F=`git rev-parse --verify HEAD:F` &&
- G=`git rev-parse --verify HEAD:A/G` &&
- I=`git rev-parse --verify HEAD:A` &&
- J=`git rev-parse --verify HEAD` &&
+ F=$(git rev-parse --verify HEAD:F) &&
+ G=$(git rev-parse --verify HEAD:A/G) &&
+ I=$(git rev-parse --verify HEAD:A) &&
+ J=$(git rev-parse --verify HEAD) &&
check_fsck &&
rm -f A/G &&
test_tick && git commit -a -m monkey &&
- K=`git rev-parse --verify HEAD` &&
+ K=$(git rev-parse --verify HEAD) &&
check_fsck &&
check_have A B C D E F G H I J K L &&
--- /dev/null
+#!/bin/sh
+
+test_description='test separate work tree'
+. ./test-lib.sh
+
+test_expect_success 'setup' '
+ EMPTY_TREE=$(git write-tree) &&
+ EMPTY_BLOB=$(git hash-object -t blob --stdin </dev/null) &&
+ CHANGED_BLOB=$(echo changed | git hash-object -t blob --stdin) &&
+ EMPTY_BLOB7=$(echo $EMPTY_BLOB | sed "s/\(.......\).*/\1/") &&
+ CHANGED_BLOB7=$(echo $CHANGED_BLOB | sed "s/\(.......\).*/\1/") &&
+
+ mkdir -p work/sub/dir &&
+ mkdir -p work2 &&
+ mv .git repo.git
+'
+
+test_expect_success 'setup: helper for testing rev-parse' '
+ test_rev_parse() {
+ echo $1 >expected.bare &&
+ echo $2 >expected.inside-git &&
+ echo $3 >expected.inside-worktree &&
+ if test $# -ge 4
+ then
+ echo $4 >expected.prefix
+ fi &&
+
+ git rev-parse --is-bare-repository >actual.bare &&
+ git rev-parse --is-inside-git-dir >actual.inside-git &&
+ git rev-parse --is-inside-work-tree >actual.inside-worktree &&
+ if test $# -ge 4
+ then
+ git rev-parse --show-prefix >actual.prefix
+ fi &&
+
+ test_cmp expected.bare actual.bare &&
+ test_cmp expected.inside-git actual.inside-git &&
+ test_cmp expected.inside-worktree actual.inside-worktree &&
+ if test $# -ge 4
+ then
+ # rev-parse --show-prefix should output
+ # a single newline when at the top of the work tree,
+ # but we test for that separately.
+ test -z "$4" && ! test -s actual.prefix ||
+ test_cmp expected.prefix actual.prefix
+ fi
+ }
+'
+
+test_expect_success 'setup: core.worktree = relative path' '
+ sane_unset GIT_WORK_TREE &&
+ GIT_DIR=repo.git &&
+ GIT_CONFIG="$(pwd)"/$GIT_DIR/config &&
+ export GIT_DIR GIT_CONFIG &&
+ git config core.worktree ../work
+'
+
+test_expect_success 'outside' '
+ test_rev_parse false false false
+'
+
+test_expect_success 'inside work tree' '
+ (
+ cd work &&
+ GIT_DIR=../repo.git &&
+ GIT_CONFIG="$(pwd)"/$GIT_DIR/config &&
+ test_rev_parse false false true ""
+ )
+'
+
+test_expect_success 'empty prefix is actually written out' '
+ echo >expected &&
+ (
+ cd work &&
+ GIT_DIR=../repo.git &&
+ GIT_CONFIG="$(pwd)"/$GIT_DIR/config &&
+ git rev-parse --show-prefix >../actual
+ ) &&
+ test_cmp expected actual
+'
+
+test_expect_success 'subdir of work tree' '
+ (
+ cd work/sub/dir &&
+ GIT_DIR=../../../repo.git &&
+ GIT_CONFIG="$(pwd)"/$GIT_DIR/config &&
+ test_rev_parse false false true sub/dir/
+ )
+'
+
+test_expect_success 'setup: core.worktree = absolute path' '
+ sane_unset GIT_WORK_TREE &&
+ GIT_DIR=$(pwd)/repo.git &&
+ GIT_CONFIG=$GIT_DIR/config &&
+ export GIT_DIR GIT_CONFIG &&
+ git config core.worktree "$(pwd)/work"
+'
+
+test_expect_success 'outside' '
+ test_rev_parse false false false &&
+ (
+ cd work2 &&
+ test_rev_parse false false false
+ )
+'
+
+test_expect_success 'inside work tree' '
+ (
+ cd work &&
+ test_rev_parse false false true ""
+ )
+'
+
+test_expect_success 'subdir of work tree' '
+ (
+ cd work/sub/dir &&
+ test_rev_parse false false true sub/dir/
+ )
+'
+
+test_expect_success 'setup: GIT_WORK_TREE=relative (override core.worktree)' '
+ GIT_DIR=$(pwd)/repo.git &&
+ GIT_CONFIG=$GIT_DIR/config &&
+ git config core.worktree non-existent &&
+ GIT_WORK_TREE=work &&
+ export GIT_DIR GIT_CONFIG GIT_WORK_TREE
+'
+
+test_expect_success 'outside' '
+ test_rev_parse false false false &&
+ (
+ cd work2 &&
+ test_rev_parse false false false
+ )
+'
+
+test_expect_success 'inside work tree' '
+ (
+ cd work &&
+ GIT_WORK_TREE=. &&
+ test_rev_parse false false true ""
+ )
+'
+
+test_expect_success 'subdir of work tree' '
+ (
+ cd work/sub/dir &&
+ GIT_WORK_TREE=../.. &&
+ test_rev_parse false false true sub/dir/
+ )
+'
+
+test_expect_success 'setup: GIT_WORK_TREE=absolute, below git dir' '
+ mv work repo.git/work &&
+ mv work2 repo.git/work2 &&
+ GIT_DIR=$(pwd)/repo.git &&
+ GIT_CONFIG=$GIT_DIR/config &&
+ GIT_WORK_TREE=$(pwd)/repo.git/work &&
+ export GIT_DIR GIT_CONFIG GIT_WORK_TREE
+'
+
+test_expect_success 'outside' '
+ echo outside &&
+ test_rev_parse false false false
+'
+
+test_expect_success 'in repo.git' '
+ (
+ cd repo.git &&
+ test_rev_parse false true false
+ ) &&
+ (
+ cd repo.git/objects &&
+ test_rev_parse false true false
+ ) &&
+ (
+ cd repo.git/work2 &&
+ test_rev_parse false true false
+ )
+'
+
+test_expect_success 'inside work tree' '
+ (
+ cd repo.git/work &&
+ test_rev_parse false true true ""
+ )
+'
+
+test_expect_success 'subdir of work tree' '
+ (
+ cd repo.git/work/sub/dir &&
+ test_rev_parse false true true sub/dir/
+ )
+'
+
+test_expect_success 'find work tree from repo' '
+ echo sub/dir/untracked >expected &&
+ cat <<-\EOF >repo.git/work/.gitignore &&
+ expected.*
+ actual.*
+ .gitignore
+ EOF
+ >repo.git/work/sub/dir/untracked &&
+ (
+ cd repo.git &&
+ git ls-files --others --exclude-standard >../actual
+ ) &&
+ test_cmp expected actual
+'
+
+test_expect_success 'find work tree from work tree' '
+ echo sub/dir/tracked >expected &&
+ >repo.git/work/sub/dir/tracked &&
+ (
+ cd repo.git/work/sub/dir &&
+ git --git-dir=../../.. add tracked
+ ) &&
+ (
+ cd repo.git &&
+ git ls-files >../actual
+ ) &&
+ test_cmp expected actual
+'
+
+test_expect_success '_gently() groks relative GIT_DIR & GIT_WORK_TREE' '
+ (
+ cd repo.git/work/sub/dir &&
+ GIT_DIR=../../.. &&
+ GIT_WORK_TREE=../.. &&
+ GIT_PAGER= &&
+ export GIT_DIR GIT_WORK_TREE GIT_PAGER &&
+
+ git diff --exit-code tracked &&
+ echo changed >tracked &&
+ test_must_fail git diff --exit-code tracked
+ )
+'
+
+test_expect_success 'diff-index respects work tree under .git dir' '
+ cat >diff-index-cached.expected <<-EOF &&
+ :000000 100644 $_z40 $EMPTY_BLOB A sub/dir/tracked
+ EOF
+ cat >diff-index.expected <<-EOF &&
+ :000000 100644 $_z40 $_z40 A sub/dir/tracked
+ EOF
+
+ (
+ GIT_DIR=repo.git &&
+ GIT_WORK_TREE=repo.git/work &&
+ export GIT_DIR GIT_WORK_TREE &&
+ git diff-index $EMPTY_TREE >diff-index.actual &&
+ git diff-index --cached $EMPTY_TREE >diff-index-cached.actual
+ ) &&
+ test_cmp diff-index.expected diff-index.actual &&
+ test_cmp diff-index-cached.expected diff-index-cached.actual
+'
+
+test_expect_success 'diff-files respects work tree under .git dir' '
+ cat >diff-files.expected <<-EOF &&
+ :100644 100644 $EMPTY_BLOB $_z40 M sub/dir/tracked
+ EOF
+
+ (
+ GIT_DIR=repo.git &&
+ GIT_WORK_TREE=repo.git/work &&
+ export GIT_DIR GIT_WORK_TREE &&
+ git diff-files >diff-files.actual
+ ) &&
+ test_cmp diff-files.expected diff-files.actual
+'
+
+test_expect_success 'git diff respects work tree under .git dir' '
+ cat >diff-TREE.expected <<-EOF &&
+ diff --git a/sub/dir/tracked b/sub/dir/tracked
+ new file mode 100644
+ index 0000000..$CHANGED_BLOB7
+ --- /dev/null
+ +++ b/sub/dir/tracked
+ @@ -0,0 +1 @@
+ +changed
+ EOF
+ cat >diff-TREE-cached.expected <<-EOF &&
+ diff --git a/sub/dir/tracked b/sub/dir/tracked
+ new file mode 100644
+ index 0000000..$EMPTY_BLOB7
+ EOF
+ cat >diff-FILES.expected <<-EOF &&
+ diff --git a/sub/dir/tracked b/sub/dir/tracked
+ index $EMPTY_BLOB7..$CHANGED_BLOB7 100644
+ --- a/sub/dir/tracked
+ +++ b/sub/dir/tracked
+ @@ -0,0 +1 @@
+ +changed
+ EOF
+
+ (
+ GIT_DIR=repo.git &&
+ GIT_WORK_TREE=repo.git/work &&
+ export GIT_DIR GIT_WORK_TREE &&
+ git diff $EMPTY_TREE >diff-TREE.actual &&
+ git diff --cached $EMPTY_TREE >diff-TREE-cached.actual &&
+ git diff >diff-FILES.actual
+ ) &&
+ test_cmp diff-TREE.expected diff-TREE.actual &&
+ test_cmp diff-TREE-cached.expected diff-TREE-cached.actual &&
+ test_cmp diff-FILES.expected diff-FILES.actual
+'
+
+test_expect_success 'git grep' '
+ echo dir/tracked >expected.grep &&
+ (
+ cd repo.git/work/sub &&
+ GIT_DIR=../.. &&
+ GIT_WORK_TREE=.. &&
+ export GIT_DIR GIT_WORK_TREE &&
+ git grep -l changed >../../../actual.grep
+ ) &&
+ test_cmp expected.grep actual.grep
+'
+
+test_expect_success 'git commit' '
+ (
+ cd repo.git &&
+ GIT_DIR=. GIT_WORK_TREE=work git commit -a -m done
+ )
+'
+
+test_expect_success 'absolute pathspec should fail gracefully' '
+ (
+ cd repo.git &&
+ test_might_fail git config --unset core.worktree &&
+ test_must_fail git log HEAD -- /home
+ )
+'
+
+test_expect_success 'make_relative_path handles double slashes in GIT_DIR' '
+ >dummy_file &&
+ echo git --git-dir="$(pwd)//repo.git" --work-tree="$(pwd)" add dummy_file &&
+ git --git-dir="$(pwd)//repo.git" --work-tree="$(pwd)" add dummy_file
+'
+
+test_expect_success 'relative $GIT_WORK_TREE and git subprocesses' '
+ GIT_DIR=repo.git GIT_WORK_TREE=repo.git/work \
+ test-subprocess --setup-work-tree rev-parse --show-toplevel >actual &&
+ echo "$(pwd)/repo.git/work" >expected &&
+ test_cmp expected actual
+'
+
+test_expect_success 'Multi-worktree setup' '
+ mkdir work &&
+ mkdir -p repo.git/repos/foo &&
+ cp repo.git/HEAD repo.git/index repo.git/repos/foo &&
+ test_might_fail cp repo.git/sharedindex.* repo.git/repos/foo &&
+ sane_unset GIT_DIR GIT_CONFIG GIT_WORK_TREE
+'
+
+test_expect_success 'GIT_DIR set (1)' '
+ echo "gitdir: repo.git/repos/foo" >gitfile &&
+ echo ../.. >repo.git/repos/foo/commondir &&
+ (
+ cd work &&
+ GIT_DIR=../gitfile git rev-parse --git-common-dir >actual &&
+ test-path-utils real_path "$TRASH_DIRECTORY/repo.git" >expect &&
+ test_cmp expect actual
+ )
+'
+
+test_expect_success 'GIT_DIR set (2)' '
+ echo "gitdir: repo.git/repos/foo" >gitfile &&
+ echo "$(pwd)/repo.git" >repo.git/repos/foo/commondir &&
+ (
+ cd work &&
+ GIT_DIR=../gitfile git rev-parse --git-common-dir >actual &&
+ test-path-utils real_path "$TRASH_DIRECTORY/repo.git" >expect &&
+ test_cmp expect actual
+ )
+'
+
+test_expect_success 'Auto discovery' '
+ echo "gitdir: repo.git/repos/foo" >.git &&
+ echo ../.. >repo.git/repos/foo/commondir &&
+ (
+ cd work &&
+ git rev-parse --git-common-dir >actual &&
+ test-path-utils real_path "$TRASH_DIRECTORY/repo.git" >expect &&
+ test_cmp expect actual &&
+ echo haha >data1 &&
+ git add data1 &&
+ git ls-files --full-name :/ | grep data1 >actual &&
+ echo work/data1 >expect &&
+ test_cmp expect actual
+ )
+'
+
+test_expect_success '$GIT_DIR/common overrides core.worktree' '
+ mkdir elsewhere &&
+ git --git-dir=repo.git config core.worktree "$TRASH_DIRECTORY/elsewhere" &&
+ echo "gitdir: repo.git/repos/foo" >.git &&
+ echo ../.. >repo.git/repos/foo/commondir &&
+ (
+ cd work &&
+ git rev-parse --git-common-dir >actual &&
+ test-path-utils real_path "$TRASH_DIRECTORY/repo.git" >expect &&
+ test_cmp expect actual &&
+ echo haha >data2 &&
+ git add data2 &&
+ git ls-files --full-name :/ | grep data2 >actual &&
+ echo work/data2 >expect &&
+ test_cmp expect actual
+ )
+'
+
+test_expect_success '$GIT_WORK_TREE overrides $GIT_DIR/common' '
+ echo "gitdir: repo.git/repos/foo" >.git &&
+ echo ../.. >repo.git/repos/foo/commondir &&
+ (
+ cd work &&
+ echo haha >data3 &&
+ git --git-dir=../.git --work-tree=. add data3 &&
+ git ls-files --full-name -- :/ | grep data3 >actual &&
+ echo data3 >expect &&
+ test_cmp expect actual
+ )
+'
+
+test_done
+++ /dev/null
-#!/bin/sh
-
-test_description='test separate work tree'
-. ./test-lib.sh
-
-test_expect_success 'setup' '
- EMPTY_TREE=$(git write-tree) &&
- EMPTY_BLOB=$(git hash-object -t blob --stdin </dev/null) &&
- CHANGED_BLOB=$(echo changed | git hash-object -t blob --stdin) &&
- EMPTY_BLOB7=$(echo $EMPTY_BLOB | sed "s/\(.......\).*/\1/") &&
- CHANGED_BLOB7=$(echo $CHANGED_BLOB | sed "s/\(.......\).*/\1/") &&
-
- mkdir -p work/sub/dir &&
- mkdir -p work2 &&
- mv .git repo.git
-'
-
-test_expect_success 'setup: helper for testing rev-parse' '
- test_rev_parse() {
- echo $1 >expected.bare &&
- echo $2 >expected.inside-git &&
- echo $3 >expected.inside-worktree &&
- if test $# -ge 4
- then
- echo $4 >expected.prefix
- fi &&
-
- git rev-parse --is-bare-repository >actual.bare &&
- git rev-parse --is-inside-git-dir >actual.inside-git &&
- git rev-parse --is-inside-work-tree >actual.inside-worktree &&
- if test $# -ge 4
- then
- git rev-parse --show-prefix >actual.prefix
- fi &&
-
- test_cmp expected.bare actual.bare &&
- test_cmp expected.inside-git actual.inside-git &&
- test_cmp expected.inside-worktree actual.inside-worktree &&
- if test $# -ge 4
- then
- # rev-parse --show-prefix should output
- # a single newline when at the top of the work tree,
- # but we test for that separately.
- test -z "$4" && ! test -s actual.prefix ||
- test_cmp expected.prefix actual.prefix
- fi
- }
-'
-
-test_expect_success 'setup: core.worktree = relative path' '
- sane_unset GIT_WORK_TREE &&
- GIT_DIR=repo.git &&
- GIT_CONFIG="$(pwd)"/$GIT_DIR/config &&
- export GIT_DIR GIT_CONFIG &&
- git config core.worktree ../work
-'
-
-test_expect_success 'outside' '
- test_rev_parse false false false
-'
-
-test_expect_success 'inside work tree' '
- (
- cd work &&
- GIT_DIR=../repo.git &&
- GIT_CONFIG="$(pwd)"/$GIT_DIR/config &&
- test_rev_parse false false true ""
- )
-'
-
-test_expect_success 'empty prefix is actually written out' '
- echo >expected &&
- (
- cd work &&
- GIT_DIR=../repo.git &&
- GIT_CONFIG="$(pwd)"/$GIT_DIR/config &&
- git rev-parse --show-prefix >../actual
- ) &&
- test_cmp expected actual
-'
-
-test_expect_success 'subdir of work tree' '
- (
- cd work/sub/dir &&
- GIT_DIR=../../../repo.git &&
- GIT_CONFIG="$(pwd)"/$GIT_DIR/config &&
- test_rev_parse false false true sub/dir/
- )
-'
-
-test_expect_success 'setup: core.worktree = absolute path' '
- sane_unset GIT_WORK_TREE &&
- GIT_DIR=$(pwd)/repo.git &&
- GIT_CONFIG=$GIT_DIR/config &&
- export GIT_DIR GIT_CONFIG &&
- git config core.worktree "$(pwd)/work"
-'
-
-test_expect_success 'outside' '
- test_rev_parse false false false &&
- (
- cd work2 &&
- test_rev_parse false false false
- )
-'
-
-test_expect_success 'inside work tree' '
- (
- cd work &&
- test_rev_parse false false true ""
- )
-'
-
-test_expect_success 'subdir of work tree' '
- (
- cd work/sub/dir &&
- test_rev_parse false false true sub/dir/
- )
-'
-
-test_expect_success 'setup: GIT_WORK_TREE=relative (override core.worktree)' '
- GIT_DIR=$(pwd)/repo.git &&
- GIT_CONFIG=$GIT_DIR/config &&
- git config core.worktree non-existent &&
- GIT_WORK_TREE=work &&
- export GIT_DIR GIT_CONFIG GIT_WORK_TREE
-'
-
-test_expect_success 'outside' '
- test_rev_parse false false false &&
- (
- cd work2 &&
- test_rev_parse false false false
- )
-'
-
-test_expect_success 'inside work tree' '
- (
- cd work &&
- GIT_WORK_TREE=. &&
- test_rev_parse false false true ""
- )
-'
-
-test_expect_success 'subdir of work tree' '
- (
- cd work/sub/dir &&
- GIT_WORK_TREE=../.. &&
- test_rev_parse false false true sub/dir/
- )
-'
-
-test_expect_success 'setup: GIT_WORK_TREE=absolute, below git dir' '
- mv work repo.git/work &&
- mv work2 repo.git/work2 &&
- GIT_DIR=$(pwd)/repo.git &&
- GIT_CONFIG=$GIT_DIR/config &&
- GIT_WORK_TREE=$(pwd)/repo.git/work &&
- export GIT_DIR GIT_CONFIG GIT_WORK_TREE
-'
-
-test_expect_success 'outside' '
- echo outside &&
- test_rev_parse false false false
-'
-
-test_expect_success 'in repo.git' '
- (
- cd repo.git &&
- test_rev_parse false true false
- ) &&
- (
- cd repo.git/objects &&
- test_rev_parse false true false
- ) &&
- (
- cd repo.git/work2 &&
- test_rev_parse false true false
- )
-'
-
-test_expect_success 'inside work tree' '
- (
- cd repo.git/work &&
- test_rev_parse false true true ""
- )
-'
-
-test_expect_success 'subdir of work tree' '
- (
- cd repo.git/work/sub/dir &&
- test_rev_parse false true true sub/dir/
- )
-'
-
-test_expect_success 'find work tree from repo' '
- echo sub/dir/untracked >expected &&
- cat <<-\EOF >repo.git/work/.gitignore &&
- expected.*
- actual.*
- .gitignore
- EOF
- >repo.git/work/sub/dir/untracked &&
- (
- cd repo.git &&
- git ls-files --others --exclude-standard >../actual
- ) &&
- test_cmp expected actual
-'
-
-test_expect_success 'find work tree from work tree' '
- echo sub/dir/tracked >expected &&
- >repo.git/work/sub/dir/tracked &&
- (
- cd repo.git/work/sub/dir &&
- git --git-dir=../../.. add tracked
- ) &&
- (
- cd repo.git &&
- git ls-files >../actual
- ) &&
- test_cmp expected actual
-'
-
-test_expect_success '_gently() groks relative GIT_DIR & GIT_WORK_TREE' '
- (
- cd repo.git/work/sub/dir &&
- GIT_DIR=../../.. &&
- GIT_WORK_TREE=../.. &&
- GIT_PAGER= &&
- export GIT_DIR GIT_WORK_TREE GIT_PAGER &&
-
- git diff --exit-code tracked &&
- echo changed >tracked &&
- test_must_fail git diff --exit-code tracked
- )
-'
-
-test_expect_success 'diff-index respects work tree under .git dir' '
- cat >diff-index-cached.expected <<-EOF &&
- :000000 100644 $_z40 $EMPTY_BLOB A sub/dir/tracked
- EOF
- cat >diff-index.expected <<-EOF &&
- :000000 100644 $_z40 $_z40 A sub/dir/tracked
- EOF
-
- (
- GIT_DIR=repo.git &&
- GIT_WORK_TREE=repo.git/work &&
- export GIT_DIR GIT_WORK_TREE &&
- git diff-index $EMPTY_TREE >diff-index.actual &&
- git diff-index --cached $EMPTY_TREE >diff-index-cached.actual
- ) &&
- test_cmp diff-index.expected diff-index.actual &&
- test_cmp diff-index-cached.expected diff-index-cached.actual
-'
-
-test_expect_success 'diff-files respects work tree under .git dir' '
- cat >diff-files.expected <<-EOF &&
- :100644 100644 $EMPTY_BLOB $_z40 M sub/dir/tracked
- EOF
-
- (
- GIT_DIR=repo.git &&
- GIT_WORK_TREE=repo.git/work &&
- export GIT_DIR GIT_WORK_TREE &&
- git diff-files >diff-files.actual
- ) &&
- test_cmp diff-files.expected diff-files.actual
-'
-
-test_expect_success 'git diff respects work tree under .git dir' '
- cat >diff-TREE.expected <<-EOF &&
- diff --git a/sub/dir/tracked b/sub/dir/tracked
- new file mode 100644
- index 0000000..$CHANGED_BLOB7
- --- /dev/null
- +++ b/sub/dir/tracked
- @@ -0,0 +1 @@
- +changed
- EOF
- cat >diff-TREE-cached.expected <<-EOF &&
- diff --git a/sub/dir/tracked b/sub/dir/tracked
- new file mode 100644
- index 0000000..$EMPTY_BLOB7
- EOF
- cat >diff-FILES.expected <<-EOF &&
- diff --git a/sub/dir/tracked b/sub/dir/tracked
- index $EMPTY_BLOB7..$CHANGED_BLOB7 100644
- --- a/sub/dir/tracked
- +++ b/sub/dir/tracked
- @@ -0,0 +1 @@
- +changed
- EOF
-
- (
- GIT_DIR=repo.git &&
- GIT_WORK_TREE=repo.git/work &&
- export GIT_DIR GIT_WORK_TREE &&
- git diff $EMPTY_TREE >diff-TREE.actual &&
- git diff --cached $EMPTY_TREE >diff-TREE-cached.actual &&
- git diff >diff-FILES.actual
- ) &&
- test_cmp diff-TREE.expected diff-TREE.actual &&
- test_cmp diff-TREE-cached.expected diff-TREE-cached.actual &&
- test_cmp diff-FILES.expected diff-FILES.actual
-'
-
-test_expect_success 'git grep' '
- echo dir/tracked >expected.grep &&
- (
- cd repo.git/work/sub &&
- GIT_DIR=../.. &&
- GIT_WORK_TREE=.. &&
- export GIT_DIR GIT_WORK_TREE &&
- git grep -l changed >../../../actual.grep
- ) &&
- test_cmp expected.grep actual.grep
-'
-
-test_expect_success 'git commit' '
- (
- cd repo.git &&
- GIT_DIR=. GIT_WORK_TREE=work git commit -a -m done
- )
-'
-
-test_expect_success 'absolute pathspec should fail gracefully' '
- (
- cd repo.git &&
- test_might_fail git config --unset core.worktree &&
- test_must_fail git log HEAD -- /home
- )
-'
-
-test_expect_success 'make_relative_path handles double slashes in GIT_DIR' '
- >dummy_file &&
- echo git --git-dir="$(pwd)//repo.git" --work-tree="$(pwd)" add dummy_file &&
- git --git-dir="$(pwd)//repo.git" --work-tree="$(pwd)" add dummy_file
-'
-
-test_expect_success 'relative $GIT_WORK_TREE and git subprocesses' '
- GIT_DIR=repo.git GIT_WORK_TREE=repo.git/work \
- test-subprocess --setup-work-tree rev-parse --show-toplevel >actual &&
- echo "$(pwd)/repo.git/work" >expected &&
- test_cmp expected actual
-'
-
-test_expect_success 'Multi-worktree setup' '
- mkdir work &&
- mkdir -p repo.git/repos/foo &&
- cp repo.git/HEAD repo.git/index repo.git/repos/foo &&
- test_might_fail cp repo.git/sharedindex.* repo.git/repos/foo &&
- sane_unset GIT_DIR GIT_CONFIG GIT_WORK_TREE
-'
-
-test_expect_success 'GIT_DIR set (1)' '
- echo "gitdir: repo.git/repos/foo" >gitfile &&
- echo ../.. >repo.git/repos/foo/commondir &&
- (
- cd work &&
- GIT_DIR=../gitfile git rev-parse --git-common-dir >actual &&
- test-path-utils real_path "$TRASH_DIRECTORY/repo.git" >expect &&
- test_cmp expect actual
- )
-'
-
-test_expect_success 'GIT_DIR set (2)' '
- echo "gitdir: repo.git/repos/foo" >gitfile &&
- echo "$(pwd)/repo.git" >repo.git/repos/foo/commondir &&
- (
- cd work &&
- GIT_DIR=../gitfile git rev-parse --git-common-dir >actual &&
- test-path-utils real_path "$TRASH_DIRECTORY/repo.git" >expect &&
- test_cmp expect actual
- )
-'
-
-test_expect_success 'Auto discovery' '
- echo "gitdir: repo.git/repos/foo" >.git &&
- echo ../.. >repo.git/repos/foo/commondir &&
- (
- cd work &&
- git rev-parse --git-common-dir >actual &&
- test-path-utils real_path "$TRASH_DIRECTORY/repo.git" >expect &&
- test_cmp expect actual &&
- echo haha >data1 &&
- git add data1 &&
- git ls-files --full-name :/ | grep data1 >actual &&
- echo work/data1 >expect &&
- test_cmp expect actual
- )
-'
-
-test_expect_success '$GIT_DIR/common overrides core.worktree' '
- mkdir elsewhere &&
- git --git-dir=repo.git config core.worktree "$TRASH_DIRECTORY/elsewhere" &&
- echo "gitdir: repo.git/repos/foo" >.git &&
- echo ../.. >repo.git/repos/foo/commondir &&
- (
- cd work &&
- git rev-parse --git-common-dir >actual &&
- test-path-utils real_path "$TRASH_DIRECTORY/repo.git" >expect &&
- test_cmp expect actual &&
- echo haha >data2 &&
- git add data2 &&
- git ls-files --full-name :/ | grep data2 >actual &&
- echo work/data2 >expect &&
- test_cmp expect actual
- )
-'
-
-test_expect_success '$GIT_WORK_TREE overrides $GIT_DIR/common' '
- echo "gitdir: repo.git/repos/foo" >.git &&
- echo ../.. >repo.git/repos/foo/commondir &&
- (
- cd work &&
- echo haha >data3 &&
- git --git-dir=../.git --work-tree=. add data3 &&
- git ls-files --full-name -- :/ | grep data3 >actual &&
- echo data3 >expect &&
- test_cmp expect actual
- )
-'
-
-test_done
git checkout -b upstream-branch &&
test_commit upstream-one &&
test_commit upstream-two &&
- git checkout -b @/at-test &&
+ if test_have_prereq !MINGW
+ then
+ git checkout -b @/at-test
+ fi &&
git checkout -b @@/at-test &&
git checkout -b @at-test &&
git checkout -b old-branch &&
check "@" commit new-two
check "@@{u}" ref refs/heads/upstream-branch
check "@@/at-test" ref refs/heads/@@/at-test
+test_have_prereq MINGW ||
check "@/at-test" ref refs/heads/@/at-test
check "@at-test" ref refs/heads/@at-test
nonsense "@{u}@{-1}"
--- /dev/null
+#!/bin/sh
+
+test_description='Test Git when git repository is located at root
+
+This test requires write access in root. Do not bother if you do not
+have a throwaway chroot or VM.
+
+Script t1509/prepare-chroot.sh may help you setup chroot, then you
+can chroot in and execute this test from there.
+'
+
+. ./test-lib.sh
+
+test_cmp_val() {
+ echo "$1" > expected
+ echo "$2" > result
+ test_cmp expected result
+}
+
+test_vars() {
+ test_expect_success "$1: gitdir" '
+ test_cmp_val "'"$2"'" "$(git rev-parse --git-dir)"
+ '
+
+ test_expect_success "$1: worktree" '
+ test_cmp_val "'"$3"'" "$(git rev-parse --show-toplevel)"
+ '
+
+ test_expect_success "$1: prefix" '
+ test_cmp_val "'"$4"'" "$(git rev-parse --show-prefix)"
+ '
+}
+
+test_foobar_root() {
+ test_expect_success 'add relative' '
+ test -z "$(cd / && git ls-files)" &&
+ git add foo/foome &&
+ git add foo/bar/barme &&
+ git add me &&
+ ( cd / && git ls-files --stage ) > result &&
+ test_cmp /ls.expected result &&
+ rm "$(git rev-parse --git-dir)/index"
+ '
+
+ test_expect_success 'add absolute' '
+ test -z "$(cd / && git ls-files)" &&
+ git add /foo/foome &&
+ git add /foo/bar/barme &&
+ git add /me &&
+ ( cd / && git ls-files --stage ) > result &&
+ test_cmp /ls.expected result &&
+ rm "$(git rev-parse --git-dir)/index"
+ '
+
+}
+
+test_foobar_foo() {
+ test_expect_success 'add relative' '
+ test -z "$(cd / && git ls-files)" &&
+ git add foome &&
+ git add bar/barme &&
+ git add ../me &&
+ ( cd / && git ls-files --stage ) > result &&
+ test_cmp /ls.expected result &&
+ rm "$(git rev-parse --git-dir)/index"
+ '
+
+ test_expect_success 'add absolute' '
+ test -z "$(cd / && git ls-files)" &&
+ git add /foo/foome &&
+ git add /foo/bar/barme &&
+ git add /me &&
+ ( cd / && git ls-files --stage ) > result &&
+ test_cmp /ls.expected result &&
+ rm "$(git rev-parse --git-dir)/index"
+ '
+}
+
+test_foobar_foobar() {
+ test_expect_success 'add relative' '
+ test -z "$(cd / && git ls-files)" &&
+ git add ../foome &&
+ git add barme &&
+ git add ../../me &&
+ ( cd / && git ls-files --stage ) > result &&
+ test_cmp /ls.expected result &&
+ rm "$(git rev-parse --git-dir)/index"
+ '
+
+ test_expect_success 'add absolute' '
+ test -z "$(cd / && git ls-files)" &&
+ git add /foo/foome &&
+ git add /foo/bar/barme &&
+ git add /me &&
+ ( cd / && git ls-files --stage ) > result &&
+ test_cmp /ls.expected result &&
+ rm "$(git rev-parse --git-dir)/index"
+ '
+}
+
+if ! test -w /
+then
+ skip_all="Test requiring writable / skipped. Read this test if you want to run it"
+ test_done
+fi
+
+if test -e /refs || test -e /objects || test -e /info || test -e /hooks ||
+ test -e /.git || test -e /foo || test -e /me
+then
+ skip_all="Skip test that clobbers existing files in /"
+ test_done
+fi
+
+if [ "$IKNOWWHATIAMDOING" != "YES" ]; then
+ skip_all="You must set env var IKNOWWHATIAMDOING=YES in order to run this test"
+ test_done
+fi
+
+if ! test_have_prereq NOT_ROOT
+then
+ skip_all="No you can't run this as root"
+ test_done
+fi
+
+ONE_SHA1=d00491fd7e5bb6fa28c517a0bb32b8b506539d4d
+
+test_expect_success 'setup' '
+ rm -rf /foo &&
+ mkdir /foo &&
+ mkdir /foo/bar &&
+ echo 1 > /foo/foome &&
+ echo 1 > /foo/bar/barme &&
+ echo 1 > /me
+'
+
+say "GIT_DIR absolute, GIT_WORK_TREE set"
+
+test_expect_success 'go to /' 'cd /'
+
+cat >ls.expected <<EOF
+100644 $ONE_SHA1 0 foo/bar/barme
+100644 $ONE_SHA1 0 foo/foome
+100644 $ONE_SHA1 0 me
+EOF
+
+GIT_DIR="$TRASH_DIRECTORY/.git" && export GIT_DIR
+GIT_WORK_TREE=/ && export GIT_WORK_TREE
+
+test_vars 'abs gitdir, root' "$GIT_DIR" "/" ""
+test_foobar_root
+
+test_expect_success 'go to /foo' 'cd /foo'
+
+test_vars 'abs gitdir, foo' "$GIT_DIR" "/" "foo/"
+test_foobar_foo
+
+test_expect_success 'go to /foo/bar' 'cd /foo/bar'
+
+test_vars 'abs gitdir, foo/bar' "$GIT_DIR" "/" "foo/bar/"
+test_foobar_foobar
+
+say "GIT_DIR relative, GIT_WORK_TREE set"
+
+test_expect_success 'go to /' 'cd /'
+
+GIT_DIR="$(echo $TRASH_DIRECTORY|sed 's,^/,,')/.git" && export GIT_DIR
+GIT_WORK_TREE=/ && export GIT_WORK_TREE
+
+test_vars 'rel gitdir, root' "$GIT_DIR" "/" ""
+test_foobar_root
+
+test_expect_success 'go to /foo' 'cd /foo'
+
+GIT_DIR="../$TRASH_DIRECTORY/.git" && export GIT_DIR
+GIT_WORK_TREE=/ && export GIT_WORK_TREE
+
+test_vars 'rel gitdir, foo' "$TRASH_DIRECTORY/.git" "/" "foo/"
+test_foobar_foo
+
+test_expect_success 'go to /foo/bar' 'cd /foo/bar'
+
+GIT_DIR="../../$TRASH_DIRECTORY/.git" && export GIT_DIR
+GIT_WORK_TREE=/ && export GIT_WORK_TREE
+
+test_vars 'rel gitdir, foo/bar' "$TRASH_DIRECTORY/.git" "/" "foo/bar/"
+test_foobar_foobar
+
+say "GIT_DIR relative, GIT_WORK_TREE relative"
+
+test_expect_success 'go to /' 'cd /'
+
+GIT_DIR="$(echo $TRASH_DIRECTORY|sed 's,^/,,')/.git" && export GIT_DIR
+GIT_WORK_TREE=. && export GIT_WORK_TREE
+
+test_vars 'rel gitdir, root' "$GIT_DIR" "/" ""
+test_foobar_root
+
+test_expect_success 'go to /' 'cd /foo'
+
+GIT_DIR="../$TRASH_DIRECTORY/.git" && export GIT_DIR
+GIT_WORK_TREE=.. && export GIT_WORK_TREE
+
+test_vars 'rel gitdir, foo' "$TRASH_DIRECTORY/.git" "/" "foo/"
+test_foobar_foo
+
+test_expect_success 'go to /foo/bar' 'cd /foo/bar'
+
+GIT_DIR="../../$TRASH_DIRECTORY/.git" && export GIT_DIR
+GIT_WORK_TREE=../.. && export GIT_WORK_TREE
+
+test_vars 'rel gitdir, foo/bar' "$TRASH_DIRECTORY/.git" "/" "foo/bar/"
+test_foobar_foobar
+
+say ".git at root"
+
+unset GIT_DIR
+unset GIT_WORK_TREE
+
+test_expect_success 'go to /' 'cd /'
+test_expect_success 'setup' '
+ rm -rf /.git &&
+ echo "Initialized empty Git repository in /.git/" > expected &&
+ git init > result &&
+ test_cmp expected result
+'
+
+test_vars 'auto gitdir, root' ".git" "/" ""
+test_foobar_root
+
+test_expect_success 'go to /foo' 'cd /foo'
+test_vars 'auto gitdir, foo' "/.git" "/" "foo/"
+test_foobar_foo
+
+test_expect_success 'go to /foo/bar' 'cd /foo/bar'
+test_vars 'auto gitdir, foo/bar' "/.git" "/" "foo/bar/"
+test_foobar_foobar
+
+test_expect_success 'cleanup' 'rm -rf /.git'
+
+say "auto bare gitdir"
+
+# DESTROYYYYY!!!!!
+test_expect_success 'setup' '
+ rm -rf /refs /objects /info /hooks &&
+ rm -f /expected /ls.expected /me /result &&
+ cd / &&
+ echo "Initialized empty Git repository in /" > expected &&
+ git init --bare > result &&
+ test_cmp expected result
+'
+
+test_vars 'auto gitdir, root' "." "" ""
+
+test_expect_success 'go to /foo' 'cd /foo'
+
+test_vars 'auto gitdir, root' "/" "" ""
+
+test_done
+++ /dev/null
-#!/bin/sh
-
-test_description='Test Git when git repository is located at root
-
-This test requires write access in root. Do not bother if you do not
-have a throwaway chroot or VM.
-
-Script t1509/prepare-chroot.sh may help you setup chroot, then you
-can chroot in and execute this test from there.
-'
-
-. ./test-lib.sh
-
-test_cmp_val() {
- echo "$1" > expected
- echo "$2" > result
- test_cmp expected result
-}
-
-test_vars() {
- test_expect_success "$1: gitdir" '
- test_cmp_val "'"$2"'" "$(git rev-parse --git-dir)"
- '
-
- test_expect_success "$1: worktree" '
- test_cmp_val "'"$3"'" "$(git rev-parse --show-toplevel)"
- '
-
- test_expect_success "$1: prefix" '
- test_cmp_val "'"$4"'" "$(git rev-parse --show-prefix)"
- '
-}
-
-test_foobar_root() {
- test_expect_success 'add relative' '
- test -z "$(cd / && git ls-files)" &&
- git add foo/foome &&
- git add foo/bar/barme &&
- git add me &&
- ( cd / && git ls-files --stage ) > result &&
- test_cmp /ls.expected result &&
- rm "$(git rev-parse --git-dir)/index"
- '
-
- test_expect_success 'add absolute' '
- test -z "$(cd / && git ls-files)" &&
- git add /foo/foome &&
- git add /foo/bar/barme &&
- git add /me &&
- ( cd / && git ls-files --stage ) > result &&
- test_cmp /ls.expected result &&
- rm "$(git rev-parse --git-dir)/index"
- '
-
-}
-
-test_foobar_foo() {
- test_expect_success 'add relative' '
- test -z "$(cd / && git ls-files)" &&
- git add foome &&
- git add bar/barme &&
- git add ../me &&
- ( cd / && git ls-files --stage ) > result &&
- test_cmp /ls.expected result &&
- rm "$(git rev-parse --git-dir)/index"
- '
-
- test_expect_success 'add absolute' '
- test -z "$(cd / && git ls-files)" &&
- git add /foo/foome &&
- git add /foo/bar/barme &&
- git add /me &&
- ( cd / && git ls-files --stage ) > result &&
- test_cmp /ls.expected result &&
- rm "$(git rev-parse --git-dir)/index"
- '
-}
-
-test_foobar_foobar() {
- test_expect_success 'add relative' '
- test -z "$(cd / && git ls-files)" &&
- git add ../foome &&
- git add barme &&
- git add ../../me &&
- ( cd / && git ls-files --stage ) > result &&
- test_cmp /ls.expected result &&
- rm "$(git rev-parse --git-dir)/index"
- '
-
- test_expect_success 'add absolute' '
- test -z "$(cd / && git ls-files)" &&
- git add /foo/foome &&
- git add /foo/bar/barme &&
- git add /me &&
- ( cd / && git ls-files --stage ) > result &&
- test_cmp /ls.expected result &&
- rm "$(git rev-parse --git-dir)/index"
- '
-}
-
-if ! test -w /
-then
- skip_all="Test requiring writable / skipped. Read this test if you want to run it"
- test_done
-fi
-
-if test -e /refs || test -e /objects || test -e /info || test -e /hooks ||
- test -e /.git || test -e /foo || test -e /me
-then
- skip_all="Skip test that clobbers existing files in /"
- test_done
-fi
-
-if [ "$IKNOWWHATIAMDOING" != "YES" ]; then
- skip_all="You must set env var IKNOWWHATIAMDOING=YES in order to run this test"
- test_done
-fi
-
-if ! test_have_prereq NOT_ROOT
-then
- skip_all="No you can't run this as root"
- test_done
-fi
-
-ONE_SHA1=d00491fd7e5bb6fa28c517a0bb32b8b506539d4d
-
-test_expect_success 'setup' '
- rm -rf /foo &&
- mkdir /foo &&
- mkdir /foo/bar &&
- echo 1 > /foo/foome &&
- echo 1 > /foo/bar/barme &&
- echo 1 > /me
-'
-
-say "GIT_DIR absolute, GIT_WORK_TREE set"
-
-test_expect_success 'go to /' 'cd /'
-
-cat >ls.expected <<EOF
-100644 $ONE_SHA1 0 foo/bar/barme
-100644 $ONE_SHA1 0 foo/foome
-100644 $ONE_SHA1 0 me
-EOF
-
-GIT_DIR="$TRASH_DIRECTORY/.git" && export GIT_DIR
-GIT_WORK_TREE=/ && export GIT_WORK_TREE
-
-test_vars 'abs gitdir, root' "$GIT_DIR" "/" ""
-test_foobar_root
-
-test_expect_success 'go to /foo' 'cd /foo'
-
-test_vars 'abs gitdir, foo' "$GIT_DIR" "/" "foo/"
-test_foobar_foo
-
-test_expect_success 'go to /foo/bar' 'cd /foo/bar'
-
-test_vars 'abs gitdir, foo/bar' "$GIT_DIR" "/" "foo/bar/"
-test_foobar_foobar
-
-say "GIT_DIR relative, GIT_WORK_TREE set"
-
-test_expect_success 'go to /' 'cd /'
-
-GIT_DIR="$(echo $TRASH_DIRECTORY|sed 's,^/,,')/.git" && export GIT_DIR
-GIT_WORK_TREE=/ && export GIT_WORK_TREE
-
-test_vars 'rel gitdir, root' "$GIT_DIR" "/" ""
-test_foobar_root
-
-test_expect_success 'go to /foo' 'cd /foo'
-
-GIT_DIR="../$TRASH_DIRECTORY/.git" && export GIT_DIR
-GIT_WORK_TREE=/ && export GIT_WORK_TREE
-
-test_vars 'rel gitdir, foo' "$TRASH_DIRECTORY/.git" "/" "foo/"
-test_foobar_foo
-
-test_expect_success 'go to /foo/bar' 'cd /foo/bar'
-
-GIT_DIR="../../$TRASH_DIRECTORY/.git" && export GIT_DIR
-GIT_WORK_TREE=/ && export GIT_WORK_TREE
-
-test_vars 'rel gitdir, foo/bar' "$TRASH_DIRECTORY/.git" "/" "foo/bar/"
-test_foobar_foobar
-
-say "GIT_DIR relative, GIT_WORK_TREE relative"
-
-test_expect_success 'go to /' 'cd /'
-
-GIT_DIR="$(echo $TRASH_DIRECTORY|sed 's,^/,,')/.git" && export GIT_DIR
-GIT_WORK_TREE=. && export GIT_WORK_TREE
-
-test_vars 'rel gitdir, root' "$GIT_DIR" "/" ""
-test_foobar_root
-
-test_expect_success 'go to /' 'cd /foo'
-
-GIT_DIR="../$TRASH_DIRECTORY/.git" && export GIT_DIR
-GIT_WORK_TREE=.. && export GIT_WORK_TREE
-
-test_vars 'rel gitdir, foo' "$TRASH_DIRECTORY/.git" "/" "foo/"
-test_foobar_foo
-
-test_expect_success 'go to /foo/bar' 'cd /foo/bar'
-
-GIT_DIR="../../$TRASH_DIRECTORY/.git" && export GIT_DIR
-GIT_WORK_TREE=../.. && export GIT_WORK_TREE
-
-test_vars 'rel gitdir, foo/bar' "$TRASH_DIRECTORY/.git" "/" "foo/bar/"
-test_foobar_foobar
-
-say ".git at root"
-
-unset GIT_DIR
-unset GIT_WORK_TREE
-
-test_expect_success 'go to /' 'cd /'
-test_expect_success 'setup' '
- rm -rf /.git &&
- echo "Initialized empty Git repository in /.git/" > expected &&
- git init > result &&
- test_cmp expected result
-'
-
-test_vars 'auto gitdir, root' ".git" "/" ""
-test_foobar_root
-
-test_expect_success 'go to /foo' 'cd /foo'
-test_vars 'auto gitdir, foo' "/.git" "/" "foo/"
-test_foobar_foo
-
-test_expect_success 'go to /foo/bar' 'cd /foo/bar'
-test_vars 'auto gitdir, foo/bar' "/.git" "/" "foo/bar/"
-test_foobar_foobar
-
-test_expect_success 'cleanup' 'rm -rf /.git'
-
-say "auto bare gitdir"
-
-# DESTROYYYYY!!!!!
-test_expect_success 'setup' '
- rm -rf /refs /objects /info /hooks &&
- rm -f /expected /ls.expected /me /result &&
- cd / &&
- echo "Initialized empty Git repository in /" > expected &&
- git init --bare > result &&
- test_cmp expected result
-'
-
-test_vars 'auto gitdir, root' "." "" ""
-
-test_expect_success 'go to /foo' 'cd /foo'
-
-test_vars 'auto gitdir, root' "/" "" ""
-
-test_done
test_expect_success 'setup' '
echo blob >a-blob &&
- git tag -a -m blob blob-tag `git hash-object -w a-blob` &&
+ git tag -a -m blob blob-tag $(git hash-object -w a-blob) &&
mkdir a-tree &&
echo moreblobs >a-tree/another-blob &&
git add . &&
- TREE_SHA1=`git write-tree` &&
+ TREE_SHA1=$(git write-tree) &&
git tag -a -m tree tree-tag "$TREE_SHA1" &&
git commit -m Initial &&
git tag -a -m commit commit-tag &&
git checkout master &&
echo modified >>a-blob &&
git add -u &&
- git commit -m Modified
+ git commit -m Modified &&
+ git branch modref &&
+ echo changed! >>a-blob &&
+ git add -u &&
+ git commit -m !Exp &&
+ git branch expref &&
+ echo changed >>a-blob &&
+ git add -u &&
+ git commit -m Changed &&
+ echo changed-again >>a-blob &&
+ git add -u &&
+ git commit -m Changed-again
'
test_expect_success 'ref^{non-existent}' '
test_cmp expected actual
'
+test_expect_success 'ref^{/!Exp}' '
+ test_must_fail git rev-parse master^{/!Exp}
+'
+
+test_expect_success 'ref^{/!}' '
+ test_must_fail git rev-parse master^{/!}
+'
+
+test_expect_success 'ref^{/!!Exp}' '
+ git rev-parse expref >expected &&
+ git rev-parse master^{/!!Exp} >actual &&
+ test_cmp expected actual
+'
+
+test_expect_success 'ref^{/!-}' '
+ test_must_fail git rev-parse master^{/!-}
+'
+
+test_expect_success 'ref^{/!-.}' '
+ test_must_fail git rev-parse master^{/!-.}
+'
+
+test_expect_success 'ref^{/!-non-existent}' '
+ git rev-parse master >expected &&
+ git rev-parse master^{/!-non-existent} >actual &&
+ test_cmp expected actual
+'
+
+test_expect_success 'ref^{/!-Changed}' '
+ git rev-parse expref >expected &&
+ git rev-parse master^{/!-Changed} >actual &&
+ test_cmp expected actual
+'
+
+test_expect_success 'ref^{/!-!Exp}' '
+ git rev-parse modref >expected &&
+ git rev-parse expref^{/!-!Exp} >actual &&
+ test_cmp expected actual
+'
+
test_done
test_expect_success 'ambiguous 40-hex ref' '
TREE=$(git mktree </dev/null) &&
- REF=`git rev-parse HEAD` &&
+ REF=$(git rev-parse HEAD) &&
VAL=$(git commit-tree $TREE </dev/null) &&
git update-ref refs/heads/$REF $VAL &&
- test `git rev-parse $REF 2>err` = $REF &&
+ test $(git rev-parse $REF 2>err) = $REF &&
grep "refname.*${REF}.*ambiguous" err
'
test_expect_success 'ambiguous short sha1 ref' '
TREE=$(git mktree </dev/null) &&
- REF=`git rev-parse --short HEAD` &&
+ REF=$(git rev-parse --short HEAD) &&
VAL=$(git commit-tree $TREE </dev/null) &&
git update-ref refs/heads/$REF $VAL &&
- test `git rev-parse $REF 2>err` = $VAL &&
+ test $(git rev-parse $REF 2>err) = $VAL &&
grep "refname.*${REF}.*ambiguous" err
'
EOF
test_cmp ls-files.expect ls-files.actual &&
- BASE=`test-dump-split-index .git/index | grep "^own" | sed "s/own/base/"` &&
+ BASE=$(test-dump-split-index .git/index | grep "^own" | sed "s/own/base/") &&
test-dump-split-index .git/index | sed "/^own/d" >actual &&
cat >expect <<EOF &&
not a split index
git checkout master --
'
+test_expect_success 'checkout notices failure to lock HEAD' '
+ test_when_finished "rm -f .git/HEAD.lock" &&
+ >.git/HEAD.lock &&
+ test_must_fail git checkout -b other
+'
+
+test_expect_success 'create ref directory/file conflict scenario' '
+ git update-ref refs/heads/outer/inner master &&
+
+ # do not rely on symbolic-ref to get a known state,
+ # as it may use the same code we are testing
+ reset_to_df () {
+ echo "ref: refs/heads/outer" >.git/HEAD
+ }
+'
+
+test_expect_success 'checkout away from d/f HEAD (unpacked, to branch)' '
+ reset_to_df &&
+ git checkout master
+'
+
+test_expect_success 'checkout away from d/f HEAD (unpacked, to detached)' '
+ reset_to_df &&
+ git checkout --detach master
+'
+
+test_expect_success 'pack refs' '
+ git pack-refs --all --prune
+'
+
+test_expect_success 'checkout away from d/f HEAD (packed, to branch)' '
+ reset_to_df &&
+ git checkout master
+'
+
+test_expect_success 'checkout away from d/f HEAD (packed, to detached)' '
+ reset_to_df &&
+ git checkout --detach master
+'
test_done
test_i18ngrep ! "^HEAD is now at" stderr
'
-test_expect_success 'wildcard ambiguation, paths win' '
- git init ambi &&
- (
- cd ambi &&
- echo a >a.c &&
- git add a.c &&
- echo b >a.c &&
- git checkout "*.c" &&
- echo a >expect &&
- test_cmp expect a.c
- )
-'
-
-test_expect_success !MINGW 'wildcard ambiguation, refs lose' '
- git init ambi2 &&
- (
- cd ambi2 &&
- echo a >"*.c" &&
- git add . &&
- test_must_fail git show :"*.c" &&
- git show :"*.c" -- >actual &&
- echo a >expect &&
- test_cmp expect actual
- )
-'
-
test_done
test_expect_success 'checkout with grafts' '
test_when_finished rm .git/info/grafts &&
test_commit abc &&
- SHA1=`git rev-parse HEAD` &&
+ SHA1=$(git rev-parse HEAD) &&
test_commit def &&
test_commit xyz &&
- echo "`git rev-parse HEAD` $SHA1" >.git/info/grafts &&
+ echo "$(git rev-parse HEAD) $SHA1" >.git/info/grafts &&
cat >expected <<-\EOF &&
xyz
abc
test_must_fail git worktree add -B poodle --detach bamboo master
'
+test_expect_success '"add -B" fails if the branch is checked out' '
+ git rev-parse newmaster >before &&
+ test_must_fail git worktree add -B newmaster bamboo master &&
+ git rev-parse newmaster >after &&
+ test_cmp before after
+'
+
+test_expect_success 'add -B' '
+ git worktree add -B poodle bamboo2 master^ &&
+ git -C bamboo2 symbolic-ref HEAD >actual &&
+ echo refs/heads/poodle >expected &&
+ test_cmp expected actual &&
+ test_cmp_rev master^ poodle
+'
+
test_expect_success 'local clone from linked checkout' '
git clone --local here here-clone &&
( cd here-clone && git fsck )
test_commit init
'
+test_expect_success 'rev-parse --git-common-dir on main worktree' '
+ git rev-parse --git-common-dir >actual &&
+ echo .git >expected &&
+ test_cmp expected actual &&
+ mkdir sub &&
+ git -C sub rev-parse --git-common-dir >actual2 &&
+ echo sub/.git >expected2 &&
+ test_cmp expected2 actual2
+'
+
test_expect_success '"list" all worktrees from main' '
echo "$(git rev-parse --show-toplevel) $(git rev-parse --short HEAD) [$(git symbolic-ref --short HEAD)]" >expect &&
test_when_finished "rm -rf here && git worktree prune" &&
test_expect_success \
'the index entry must still be a symbolic link' '
-case "`git ls-files --stage --cached symlink`" in
+case "$(git ls-files --stage --cached symlink)" in
120000" "*symlink) echo pass;;
*) echo fail; git ls-files --stage --cached symlink; (exit 1);;
esac'
grep "^a.1" output
'
-test_expect_success 'excluded directory overrides content patterns' '
+test_expect_success 'excluded directory does not override content patterns' '
git ls-files --others --exclude="one" --exclude="!one/a.1" >output &&
- if grep "^one/a.1" output
- then
- false
- fi
+ grep "^one/a.1" output
'
test_expect_success 'negated directory doesn'\''t affect content patterns' '
--- /dev/null
+#!/bin/sh
+
+test_description='test re-include patterns'
+
+. ./test-lib.sh
+
+test_expect_success 'setup' '
+ mkdir -p fooo foo/bar tmp &&
+ touch abc foo/def foo/bar/ghi foo/bar/bar
+'
+
+test_expect_success 'no match, do not enter subdir and waste cycles' '
+ cat >.gitignore <<-\EOF &&
+ /tmp
+ /foo
+ !fooo/bar/bar
+ EOF
+ GIT_TRACE_EXCLUDE="$(pwd)/tmp/trace" git ls-files -o --exclude-standard >tmp/actual &&
+ ! grep "enter .foo/.\$" tmp/trace &&
+ cat >tmp/expected <<-\EOF &&
+ .gitignore
+ abc
+ EOF
+ test_cmp tmp/expected tmp/actual
+'
+
+test_expect_success 'match, excluded by literal pathname pattern' '
+ cat >.gitignore <<-\EOF &&
+ /tmp
+ /fooo
+ /foo
+ !foo/bar/bar
+ EOF
+ cat >fooo/.gitignore <<-\EOF &&
+ !/*
+ EOF git ls-files -o --exclude-standard >tmp/actual &&
+ cat >tmp/expected <<-\EOF &&
+ .gitignore
+ abc
+ foo/bar/bar
+ EOF
+ test_cmp tmp/expected tmp/actual
+'
+
+test_expect_success 'match, excluded by wildcard pathname pattern' '
+ cat >.gitignore <<-\EOF &&
+ /tmp
+ /fooo
+ /fo?
+ !foo/bar/bar
+ EOF
+ git ls-files -o --exclude-standard >tmp/actual &&
+ cat >tmp/expected <<-\EOF &&
+ .gitignore
+ abc
+ foo/bar/bar
+ EOF
+ test_cmp tmp/expected tmp/actual
+'
+
+test_expect_success 'match, excluded by literal basename pattern' '
+ cat >.gitignore <<-\EOF &&
+ /tmp
+ /fooo
+ foo
+ !foo/bar/bar
+ EOF
+ git ls-files -o --exclude-standard >tmp/actual &&
+ cat >tmp/expected <<-\EOF &&
+ .gitignore
+ abc
+ foo/bar/bar
+ EOF
+ test_cmp tmp/expected tmp/actual
+'
+
+test_expect_success 'match, excluded by wildcard basename pattern' '
+ cat >.gitignore <<-\EOF &&
+ /tmp
+ /fooo
+ fo?
+ !foo/bar/bar
+ EOF
+ git ls-files -o --exclude-standard >tmp/actual &&
+ cat >tmp/expected <<-\EOF &&
+ .gitignore
+ abc
+ foo/bar/bar
+ EOF
+ test_cmp tmp/expected tmp/actual
+'
+
+test_expect_success 'match, excluded by literal mustbedir, basename pattern' '
+ cat >.gitignore <<-\EOF &&
+ /tmp
+ /fooo
+ foo/
+ !foo/bar/bar
+ EOF
+ git ls-files -o --exclude-standard >tmp/actual &&
+ cat >tmp/expected <<-\EOF &&
+ .gitignore
+ abc
+ foo/bar/bar
+ EOF
+ test_cmp tmp/expected tmp/actual
+'
+
+test_expect_success 'match, excluded by literal mustbedir, pathname pattern' '
+ cat >.gitignore <<-\EOF &&
+ /tmp
+ /fooo
+ /foo/
+ !foo/bar/bar
+ EOF
+ git ls-files -o --exclude-standard >tmp/actual &&
+ cat >tmp/expected <<-\EOF &&
+ .gitignore
+ abc
+ foo/bar/bar
+ EOF
+ test_cmp tmp/expected tmp/actual
+'
+
+test_expect_success 'prepare for nested negatives' '
+ cat >.git/info/exclude <<-\EOF &&
+ /.gitignore
+ /tmp
+ /foo
+ /abc
+ EOF
+ git ls-files -o --exclude-standard >tmp/actual &&
+ test_must_be_empty tmp/actual &&
+ mkdir -p 1/2/3/4 &&
+ touch 1/f 1/2/f 1/2/3/f 1/2/3/4/f
+'
+
+test_expect_success 'match, literal pathname, nested negatives' '
+ cat >.gitignore <<-\EOF &&
+ /1
+ !1/2
+ 1/2/3
+ !1/2/3/4
+ EOF
+ git ls-files -o --exclude-standard >tmp/actual &&
+ cat >tmp/expected <<-\EOF &&
+ 1/2/3/4/f
+ 1/2/f
+ EOF
+ test_cmp tmp/expected tmp/actual
+'
+
+test_done
test_ln_s_add e a &&
test_tick &&
git commit -m "rename a->e, symlink a->e" &&
- oln=`printf e | git hash-object --stdin`
+ oln=$(printf e | git hash-object --stdin)
'
test_expect_success 'setup 9' '
+++ /dev/null
-#!/bin/sh
-
-test_description='merge-recursive options
-
-* [master] Clarify
- ! [remote] Remove cruft
---
- + [remote] Remove cruft
-* [master] Clarify
-*+ [remote^] Initial revision
-* ok 1: setup
-'
-
-. ./test-lib.sh
-
-test_have_prereq SED_STRIPS_CR && SED_OPTIONS=-b
-if test_have_prereq GREP_STRIPS_CR
-then
- GREP_OPTIONS=-U
- export GREP_OPTIONS
-fi
-
-test_expect_success 'setup' '
- conflict_hunks () {
- sed $SED_OPTIONS -n -e "
- /^<<<</ b conflict
- b
- : conflict
- p
- /^>>>>/ b
- n
- b conflict
- " "$@"
- } &&
-
- cat <<-\EOF >text.txt &&
- Hope, he says, cherishes the soul of him who lives in
- justice and holiness and is the nurse of his age and the
- companion of his journey;--hope which is mightiest to sway
- the restless soul of man.
-
- How admirable are his words! And the great blessing of riches, I do
- not say to every man, but to a good man, is, that he has had no
- occasion to deceive or to defraud others, either intentionally or
- unintentionally; and when he departs to the world below he is not in
- any apprehension about offerings due to the gods or debts which he owes
- to men. Now to this peace of mind the possession of wealth greatly
- contributes; and therefore I say, that, setting one thing against
- another, of the many advantages which wealth has to give, to a man of
- sense this is in my opinion the greatest.
-
- Well said, Cephalus, I replied; but as concerning justice, what is
- it?--to speak the truth and to pay your debts--no more than this? And
- even to this are there not exceptions? Suppose that a friend when in
- his right mind has deposited arms with me and he asks for them when he
- is not in his right mind, ought I to give them back to him? No one
- would say that I ought or that I should be right in doing so, any more
- than they would say that I ought always to speak the truth to one who
- is in his condition.
-
- You are quite right, he replied.
-
- But then, I said, speaking the truth and paying your debts is not a
- correct definition of justice.
-
- CEPHALUS - SOCRATES - POLEMARCHUS
-
- Quite correct, Socrates, if Simonides is to be believed, said
- Polemarchus interposing.
-
- I fear, said Cephalus, that I must go now, for I have to look after the
- sacrifices, and I hand over the argument to Polemarchus and the company.
- EOF
- git add text.txt &&
- test_tick &&
- git commit -m "Initial revision" &&
-
- git checkout -b remote &&
- sed -e "
- s/\. /\. /g
- s/[?] /? /g
- s/ / /g
- s/--/---/g
- s/but as concerning/but as con cerning/
- /CEPHALUS - SOCRATES - POLEMARCHUS/ d
- " text.txt >text.txt+ &&
- mv text.txt+ text.txt &&
- git commit -a -m "Remove cruft" &&
-
- git checkout master &&
- sed -e "
- s/\(not in his right mind\),\(.*\)/\1;\2Q/
- s/Quite correct\(.*\)/It is too correct\1Q/
- s/unintentionally/un intentionally/
- /un intentionally/ s/$/Q/
- s/Polemarchus interposing./Polemarchus, interposing.Q/
- /justice and holiness/ s/$/Q/
- /pay your debts/ s/$/Q/
- " text.txt | q_to_cr >text.txt+ &&
- mv text.txt+ text.txt &&
- git commit -a -m "Clarify" &&
- git show-branch --all
-'
-
-test_expect_success 'naive merge fails' '
- git read-tree --reset -u HEAD &&
- test_must_fail git merge-recursive HEAD^ -- HEAD remote &&
- test_must_fail git update-index --refresh &&
- grep "<<<<<<" text.txt
-'
-
-test_expect_success '--ignore-space-change makes merge succeed' '
- git read-tree --reset -u HEAD &&
- git merge-recursive --ignore-space-change HEAD^ -- HEAD remote
-'
-
-test_expect_success 'naive cherry-pick fails' '
- git read-tree --reset -u HEAD &&
- test_must_fail git cherry-pick --no-commit remote &&
- git read-tree --reset -u HEAD &&
- test_must_fail git cherry-pick remote &&
- test_must_fail git update-index --refresh &&
- grep "<<<<<<" text.txt
-'
-
-test_expect_success '-Xignore-space-change makes cherry-pick succeed' '
- git read-tree --reset -u HEAD &&
- git cherry-pick --no-commit -Xignore-space-change remote
-'
-
-test_expect_success '--ignore-space-change: our w/s-only change wins' '
- q_to_cr <<-\EOF >expected &&
- justice and holiness and is the nurse of his age and theQ
- EOF
-
- git read-tree --reset -u HEAD &&
- git merge-recursive --ignore-space-change HEAD^ -- HEAD remote &&
- grep "justice and holiness" text.txt >actual &&
- test_cmp expected actual
-'
-
-test_expect_success '--ignore-space-change: their real change wins over w/s' '
- cat <<-\EOF >expected &&
- it?---to speak the truth and to pay your debts---no more than this? And
- EOF
-
- git read-tree --reset -u HEAD &&
- git merge-recursive --ignore-space-change HEAD^ -- HEAD remote &&
- grep "pay your debts" text.txt >actual &&
- test_cmp expected actual
-'
-
-test_expect_success '--ignore-space-change: does not ignore new spaces' '
- cat <<-\EOF >expected1 &&
- Well said, Cephalus, I replied; but as con cerning justice, what is
- EOF
- q_to_cr <<-\EOF >expected2 &&
- un intentionally; and when he departs to the world below he is not inQ
- EOF
-
- git read-tree --reset -u HEAD &&
- git merge-recursive --ignore-space-change HEAD^ -- HEAD remote &&
- grep "Well said" text.txt >actual1 &&
- grep "when he departs" text.txt >actual2 &&
- test_cmp expected1 actual1 &&
- test_cmp expected2 actual2
-'
-
-test_expect_success '--ignore-all-space drops their new spaces' '
- cat <<-\EOF >expected &&
- Well said, Cephalus, I replied; but as concerning justice, what is
- EOF
-
- git read-tree --reset -u HEAD &&
- git merge-recursive --ignore-all-space HEAD^ -- HEAD remote &&
- grep "Well said" text.txt >actual &&
- test_cmp expected actual
-'
-
-test_expect_success '--ignore-all-space keeps our new spaces' '
- q_to_cr <<-\EOF >expected &&
- un intentionally; and when he departs to the world below he is not inQ
- EOF
-
- git read-tree --reset -u HEAD &&
- git merge-recursive --ignore-all-space HEAD^ -- HEAD remote &&
- grep "when he departs" text.txt >actual &&
- test_cmp expected actual
-'
-
-test_expect_success '--ignore-space-at-eol' '
- q_to_cr <<-\EOF >expected &&
- <<<<<<< HEAD
- is not in his right mind; ought I to give them back to him? No oneQ
- =======
- is not in his right mind, ought I to give them back to him? No one
- >>>>>>> remote
- EOF
-
- git read-tree --reset -u HEAD &&
- test_must_fail git merge-recursive --ignore-space-at-eol \
- HEAD^ -- HEAD remote &&
- conflict_hunks text.txt >actual &&
- test_cmp expected actual
-'
-
-test_done
--- /dev/null
+#!/bin/sh
+
+test_description='merge-recursive space options
+
+* [master] Clarify
+ ! [remote] Remove cruft
+--
+ + [remote] Remove cruft
+* [master] Clarify
+*+ [remote^] Initial revision
+* ok 1: setup
+'
+
+. ./test-lib.sh
+
+test_have_prereq SED_STRIPS_CR && SED_OPTIONS=-b
+if test_have_prereq GREP_STRIPS_CR
+then
+ GREP_OPTIONS=-U
+ export GREP_OPTIONS
+fi
+
+test_expect_success 'setup' '
+ conflict_hunks () {
+ sed $SED_OPTIONS -n -e "
+ /^<<<</ b conflict
+ b
+ : conflict
+ p
+ /^>>>>/ b
+ n
+ b conflict
+ " "$@"
+ } &&
+
+ cat <<-\EOF >text.txt &&
+ Hope, he says, cherishes the soul of him who lives in
+ justice and holiness and is the nurse of his age and the
+ companion of his journey;--hope which is mightiest to sway
+ the restless soul of man.
+
+ How admirable are his words! And the great blessing of riches, I do
+ not say to every man, but to a good man, is, that he has had no
+ occasion to deceive or to defraud others, either intentionally or
+ unintentionally; and when he departs to the world below he is not in
+ any apprehension about offerings due to the gods or debts which he owes
+ to men. Now to this peace of mind the possession of wealth greatly
+ contributes; and therefore I say, that, setting one thing against
+ another, of the many advantages which wealth has to give, to a man of
+ sense this is in my opinion the greatest.
+
+ Well said, Cephalus, I replied; but as concerning justice, what is
+ it?--to speak the truth and to pay your debts--no more than this? And
+ even to this are there not exceptions? Suppose that a friend when in
+ his right mind has deposited arms with me and he asks for them when he
+ is not in his right mind, ought I to give them back to him? No one
+ would say that I ought or that I should be right in doing so, any more
+ than they would say that I ought always to speak the truth to one who
+ is in his condition.
+
+ You are quite right, he replied.
+
+ But then, I said, speaking the truth and paying your debts is not a
+ correct definition of justice.
+
+ CEPHALUS - SOCRATES - POLEMARCHUS
+
+ Quite correct, Socrates, if Simonides is to be believed, said
+ Polemarchus interposing.
+
+ I fear, said Cephalus, that I must go now, for I have to look after the
+ sacrifices, and I hand over the argument to Polemarchus and the company.
+ EOF
+ git add text.txt &&
+ test_tick &&
+ git commit -m "Initial revision" &&
+
+ git checkout -b remote &&
+ sed -e "
+ s/\. /\. /g
+ s/[?] /? /g
+ s/ / /g
+ s/--/---/g
+ s/but as concerning/but as con cerning/
+ /CEPHALUS - SOCRATES - POLEMARCHUS/ d
+ " text.txt >text.txt+ &&
+ mv text.txt+ text.txt &&
+ git commit -a -m "Remove cruft" &&
+
+ git checkout master &&
+ sed -e "
+ s/\(not in his right mind\),\(.*\)/\1;\2Q/
+ s/Quite correct\(.*\)/It is too correct\1Q/
+ s/unintentionally/un intentionally/
+ /un intentionally/ s/$/Q/
+ s/Polemarchus interposing./Polemarchus, interposing.Q/
+ /justice and holiness/ s/$/Q/
+ /pay your debts/ s/$/Q/
+ " text.txt | q_to_cr >text.txt+ &&
+ mv text.txt+ text.txt &&
+ git commit -a -m "Clarify" &&
+ git show-branch --all
+'
+
+test_expect_success 'naive merge fails' '
+ git read-tree --reset -u HEAD &&
+ test_must_fail git merge-recursive HEAD^ -- HEAD remote &&
+ test_must_fail git update-index --refresh &&
+ grep "<<<<<<" text.txt
+'
+
+test_expect_success '--ignore-space-change makes merge succeed' '
+ git read-tree --reset -u HEAD &&
+ git merge-recursive --ignore-space-change HEAD^ -- HEAD remote
+'
+
+test_expect_success 'naive cherry-pick fails' '
+ git read-tree --reset -u HEAD &&
+ test_must_fail git cherry-pick --no-commit remote &&
+ git read-tree --reset -u HEAD &&
+ test_must_fail git cherry-pick remote &&
+ test_must_fail git update-index --refresh &&
+ grep "<<<<<<" text.txt
+'
+
+test_expect_success '-Xignore-space-change makes cherry-pick succeed' '
+ git read-tree --reset -u HEAD &&
+ git cherry-pick --no-commit -Xignore-space-change remote
+'
+
+test_expect_success '--ignore-space-change: our w/s-only change wins' '
+ q_to_cr <<-\EOF >expected &&
+ justice and holiness and is the nurse of his age and theQ
+ EOF
+
+ git read-tree --reset -u HEAD &&
+ git merge-recursive --ignore-space-change HEAD^ -- HEAD remote &&
+ grep "justice and holiness" text.txt >actual &&
+ test_cmp expected actual
+'
+
+test_expect_success '--ignore-space-change: their real change wins over w/s' '
+ cat <<-\EOF >expected &&
+ it?---to speak the truth and to pay your debts---no more than this? And
+ EOF
+
+ git read-tree --reset -u HEAD &&
+ git merge-recursive --ignore-space-change HEAD^ -- HEAD remote &&
+ grep "pay your debts" text.txt >actual &&
+ test_cmp expected actual
+'
+
+test_expect_success '--ignore-space-change: does not ignore new spaces' '
+ cat <<-\EOF >expected1 &&
+ Well said, Cephalus, I replied; but as con cerning justice, what is
+ EOF
+ q_to_cr <<-\EOF >expected2 &&
+ un intentionally; and when he departs to the world below he is not inQ
+ EOF
+
+ git read-tree --reset -u HEAD &&
+ git merge-recursive --ignore-space-change HEAD^ -- HEAD remote &&
+ grep "Well said" text.txt >actual1 &&
+ grep "when he departs" text.txt >actual2 &&
+ test_cmp expected1 actual1 &&
+ test_cmp expected2 actual2
+'
+
+test_expect_success '--ignore-all-space drops their new spaces' '
+ cat <<-\EOF >expected &&
+ Well said, Cephalus, I replied; but as concerning justice, what is
+ EOF
+
+ git read-tree --reset -u HEAD &&
+ git merge-recursive --ignore-all-space HEAD^ -- HEAD remote &&
+ grep "Well said" text.txt >actual &&
+ test_cmp expected actual
+'
+
+test_expect_success '--ignore-all-space keeps our new spaces' '
+ q_to_cr <<-\EOF >expected &&
+ un intentionally; and when he departs to the world below he is not inQ
+ EOF
+
+ git read-tree --reset -u HEAD &&
+ git merge-recursive --ignore-all-space HEAD^ -- HEAD remote &&
+ grep "when he departs" text.txt >actual &&
+ test_cmp expected actual
+'
+
+test_expect_success '--ignore-space-at-eol' '
+ q_to_cr <<-\EOF >expected &&
+ <<<<<<< HEAD
+ is not in his right mind; ought I to give them back to him? No oneQ
+ =======
+ is not in his right mind, ought I to give them back to him? No one
+ >>>>>>> remote
+ EOF
+
+ git read-tree --reset -u HEAD &&
+ test_must_fail git merge-recursive --ignore-space-at-eol \
+ HEAD^ -- HEAD remote &&
+ conflict_hunks text.txt >actual &&
+ test_cmp expected actual
+'
+
+test_done
--- /dev/null
+#!/bin/sh
+
+test_description='merge-recursive rename options
+
+Test rename detection by examining rename/delete conflicts.
+
+* (HEAD -> rename) rename
+| * (master) delete
+|/
+* base
+
+git diff --name-status base master
+D 0-old
+D 1-old
+D 2-old
+D 3-old
+
+git diff --name-status -M01 base rename
+R025 0-old 0-new
+R050 1-old 1-new
+R075 2-old 2-new
+R100 3-old 3-new
+
+Actual similarity indices are parsed from diff output. We rely on the fact that
+they are rounded down (see, e.g., Documentation/diff-generate-patch.txt, which
+mentions this in a different context).
+'
+
+. ./test-lib.sh
+
+get_expected_stages () {
+ git checkout rename -- $1-new &&
+ git ls-files --stage $1-new >expected-stages-undetected-$1 &&
+ sed "s/ 0 / 2 /" <expected-stages-undetected-$1 \
+ >expected-stages-detected-$1 &&
+ git read-tree -u --reset HEAD
+}
+
+rename_detected () {
+ git ls-files --stage $1-old $1-new >stages-actual-$1 &&
+ test_cmp expected-stages-detected-$1 stages-actual-$1
+}
+
+rename_undetected () {
+ git ls-files --stage $1-old $1-new >stages-actual-$1 &&
+ test_cmp expected-stages-undetected-$1 stages-actual-$1
+}
+
+check_common () {
+ git ls-files --stage >stages-actual &&
+ test_line_count = 4 stages-actual
+}
+
+check_threshold_0 () {
+ check_common &&
+ rename_detected 0 &&
+ rename_detected 1 &&
+ rename_detected 2 &&
+ rename_detected 3
+}
+
+check_threshold_1 () {
+ check_common &&
+ rename_undetected 0 &&
+ rename_detected 1 &&
+ rename_detected 2 &&
+ rename_detected 3
+}
+
+check_threshold_2 () {
+ check_common &&
+ rename_undetected 0 &&
+ rename_undetected 1 &&
+ rename_detected 2 &&
+ rename_detected 3
+}
+
+check_exact_renames () {
+ check_common &&
+ rename_undetected 0 &&
+ rename_undetected 1 &&
+ rename_undetected 2 &&
+ rename_detected 3
+}
+
+check_no_renames () {
+ check_common &&
+ rename_undetected 0 &&
+ rename_undetected 1 &&
+ rename_undetected 2 &&
+ rename_undetected 3
+}
+
+test_expect_success 'setup repo' '
+ cat <<-\EOF >3-old &&
+ 33a
+ 33b
+ 33c
+ 33d
+ EOF
+ sed s/33/22/ <3-old >2-old &&
+ sed s/33/11/ <3-old >1-old &&
+ sed s/33/00/ <3-old >0-old &&
+ git add [0-3]-old &&
+ git commit -m base &&
+ git rm [0-3]-old &&
+ git commit -m delete &&
+ git checkout -b rename HEAD^ &&
+ cp 3-old 3-new &&
+ sed 1,1s/./x/ <2-old >2-new &&
+ sed 1,2s/./x/ <1-old >1-new &&
+ sed 1,3s/./x/ <0-old >0-new &&
+ git add [0-3]-new &&
+ git rm [0-3]-old &&
+ git commit -m rename &&
+ get_expected_stages 0 &&
+ get_expected_stages 1 &&
+ get_expected_stages 2 &&
+ get_expected_stages 3 &&
+ check_50="false" &&
+ tail="HEAD^ -- HEAD master"
+'
+
+test_expect_success 'setup thresholds' '
+ git diff --name-status -M01 HEAD^ HEAD >diff-output &&
+ test_debug "cat diff-output" &&
+ test_line_count = 4 diff-output &&
+ grep "R[0-9][0-9][0-9] \([0-3]\)-old \1-new" diff-output \
+ >grep-output &&
+ test_cmp diff-output grep-output &&
+ th0=$(sed -n "s/R\(...\) 0-old 0-new/\1/p" <diff-output) &&
+ th1=$(sed -n "s/R\(...\) 1-old 1-new/\1/p" <diff-output) &&
+ th2=$(sed -n "s/R\(...\) 2-old 2-new/\1/p" <diff-output) &&
+ th3=$(sed -n "s/R\(...\) 3-old 3-new/\1/p" <diff-output) &&
+ test "$th0" -lt "$th1" &&
+ test "$th1" -lt "$th2" &&
+ test "$th2" -lt "$th3" &&
+ test "$th3" = 100 &&
+ if test 50 -le "$th0"
+ then
+ check_50=check_threshold_0
+ elif test 50 -le "$th1"
+ then
+ check_50=check_threshold_1
+ elif test 50 -le "$th2"
+ then
+ check_50=check_threshold_2
+ fi &&
+ th0="$th0%" &&
+ th1="$th1%" &&
+ th2="$th2%" &&
+ th3="$th3%"
+'
+
+test_expect_success 'assumption for tests: rename detection with diff' '
+ git diff --name-status -M$th0 --diff-filter=R HEAD^ HEAD \
+ >diff-output-0 &&
+ git diff --name-status -M$th1 --diff-filter=R HEAD^ HEAD \
+ >diff-output-1 &&
+ git diff --name-status -M$th2 --diff-filter=R HEAD^ HEAD \
+ >diff-output-2 &&
+ git diff --name-status -M100% --diff-filter=R HEAD^ HEAD \
+ >diff-output-3 &&
+ test_line_count = 4 diff-output-0 &&
+ test_line_count = 3 diff-output-1 &&
+ test_line_count = 2 diff-output-2 &&
+ test_line_count = 1 diff-output-3
+'
+
+test_expect_success 'default similarity threshold is 50%' '
+ git read-tree --reset -u HEAD &&
+ test_must_fail git merge-recursive $tail &&
+ $check_50
+'
+
+test_expect_success 'low rename threshold' '
+ git read-tree --reset -u HEAD &&
+ test_must_fail git merge-recursive --find-renames=$th0 $tail &&
+ check_threshold_0
+'
+
+test_expect_success 'medium rename threshold' '
+ git read-tree --reset -u HEAD &&
+ test_must_fail git merge-recursive --find-renames=$th1 $tail &&
+ check_threshold_1
+'
+
+test_expect_success 'high rename threshold' '
+ git read-tree --reset -u HEAD &&
+ test_must_fail git merge-recursive --find-renames=$th2 $tail &&
+ check_threshold_2
+'
+
+test_expect_success 'exact renames only' '
+ git read-tree --reset -u HEAD &&
+ test_must_fail git merge-recursive --find-renames=100% $tail &&
+ check_exact_renames
+'
+
+test_expect_success 'rename threshold is truncated' '
+ git read-tree --reset -u HEAD &&
+ test_must_fail git merge-recursive --find-renames=200% $tail &&
+ check_exact_renames
+'
+
+test_expect_success 'disabled rename detection' '
+ git read-tree --reset -u HEAD &&
+ git merge-recursive --no-renames $tail &&
+ check_no_renames
+'
+
+test_expect_success 'last wins in --find-renames=<m> --find-renames=<n>' '
+ git read-tree --reset -u HEAD &&
+ test_must_fail git merge-recursive \
+ --find-renames=$th0 --find-renames=$th2 $tail &&
+ check_threshold_2
+'
+
+test_expect_success '--find-renames resets threshold' '
+ git read-tree --reset -u HEAD &&
+ test_must_fail git merge-recursive \
+ --find-renames=$th0 --find-renames $tail &&
+ $check_50
+'
+
+test_expect_success 'last wins in --no-renames --find-renames' '
+ git read-tree --reset -u HEAD &&
+ test_must_fail git merge-recursive --no-renames --find-renames $tail &&
+ $check_50
+'
+
+test_expect_success 'last wins in --find-renames --no-renames' '
+ git read-tree --reset -u HEAD &&
+ git merge-recursive --find-renames --no-renames $tail &&
+ check_no_renames
+'
+
+test_expect_success 'assumption for further tests: trivial merge succeeds' '
+ git read-tree --reset -u HEAD &&
+ git merge-recursive HEAD -- HEAD HEAD &&
+ git diff --quiet --cached &&
+ git merge-recursive --find-renames=$th0 HEAD -- HEAD HEAD &&
+ git diff --quiet --cached &&
+ git merge-recursive --find-renames=$th2 HEAD -- HEAD HEAD &&
+ git diff --quiet --cached &&
+ git merge-recursive --find-renames=100% HEAD -- HEAD HEAD &&
+ git diff --quiet --cached &&
+ git merge-recursive --no-renames HEAD -- HEAD HEAD &&
+ git diff --quiet --cached
+'
+
+test_expect_success '--find-renames rejects negative argument' '
+ git read-tree --reset -u HEAD &&
+ test_must_fail git merge-recursive --find-renames=-25 \
+ HEAD -- HEAD HEAD &&
+ git diff --quiet --cached
+'
+
+test_expect_success '--find-renames rejects non-numbers' '
+ git read-tree --reset -u HEAD &&
+ test_must_fail git merge-recursive --find-renames=0xf \
+ HEAD -- HEAD HEAD &&
+ git diff --quiet --cached
+'
+
+test_expect_success 'rename-threshold=<n> is a synonym for find-renames=<n>' '
+ git read-tree --reset -u HEAD &&
+ test_must_fail git merge-recursive --rename-threshold=$th0 $tail &&
+ check_threshold_0
+'
+
+test_expect_success 'last wins in --no-renames --rename-threshold=<n>' '
+ git read-tree --reset -u HEAD &&
+ test_must_fail git merge-recursive --no-renames --rename-threshold=$th0 $tail &&
+ check_threshold_0
+'
+
+test_expect_success 'last wins in --rename-threshold=<n> --no-renames' '
+ git read-tree --reset -u HEAD &&
+ git merge-recursive --rename-threshold=$th0 --no-renames $tail &&
+ check_no_renames
+'
+
+test_expect_success '--rename-threshold=<n> rejects negative argument' '
+ git read-tree --reset -u HEAD &&
+ test_must_fail git merge-recursive --rename-threshold=-25 \
+ HEAD -- HEAD HEAD &&
+ git diff --quiet --cached
+'
+
+test_expect_success '--rename-threshold=<n> rejects non-numbers' '
+ git read-tree --reset -u HEAD &&
+ test_must_fail git merge-recursive --rename-threshold=0xf \
+ HEAD -- HEAD HEAD &&
+ git diff --quiet --cached
+'
+
+test_expect_success 'last wins in --rename-threshold=<m> --find-renames=<n>' '
+ git read-tree --reset -u HEAD &&
+ test_must_fail git merge-recursive \
+ --rename-threshold=$th0 --find-renames=$th2 $tail &&
+ check_threshold_2
+'
+
+test_expect_success 'last wins in --find-renames=<m> --rename-threshold=<n>' '
+ git read-tree --reset -u HEAD &&
+ test_must_fail git merge-recursive \
+ --find-renames=$th2 --rename-threshold=$th0 $tail &&
+ check_threshold_0
+'
+
+test_done
echo Mi >path2/baz/b &&
find path? \( -type f -o -type l \) -print |
xargs git update-index --add &&
- tree=`git write-tree` &&
+ tree=$(git write-tree) &&
echo $tree'
test_output () {
echo 222 >path3/2.txt &&
find *.txt path* \( -type f -o -type l \) -print |
xargs git update-index --add &&
- tree=`git write-tree` &&
+ tree=$(git write-tree) &&
echo $tree
'
test_must_fail git branch --set-upstream-to HEAD^{}
'
+test_expect_success '--set-upstream-to fails on locked config' '
+ test_when_finished "rm -f .git/config.lock" &&
+ >.git/config.lock &&
+ git branch locked &&
+ test_must_fail git branch --set-upstream-to locked
+'
+
test_expect_success 'use --set-upstream-to modify HEAD' '
test_config branch.master.remote foo &&
test_config branch.master.merge foo &&
test_must_fail git branch --unset-upstream i-dont-exist
'
+test_expect_success '--unset-upstream should fail if config is locked' '
+ test_when_finished "rm -f .git/config.lock" &&
+ git branch --set-upstream-to locked &&
+ >.git/config.lock &&
+ test_must_fail git branch --unset-upstream
+'
+
test_expect_success 'test --unset-upstream on HEAD' '
git branch my14 &&
test_config branch.master.remote foo &&
git config remote.ambi1.fetch refs/heads/lalala:refs/heads/master &&
git config remote.ambi2.url lilili &&
git config remote.ambi2.fetch refs/heads/lilili:refs/heads/master &&
- git branch all1 master &&
+ test_must_fail git branch all1 master &&
test -z "$(git config branch.all1.merge)"
'
test_expect_success \
'see if git show-ref works as expected' \
'git branch a &&
- SHA1=`cat .git/refs/heads/a` &&
+ SHA1=$(cat .git/refs/heads/a) &&
echo "$SHA1 refs/heads/a" >expect &&
git show-ref a >result &&
test_cmp expect result'
HT=' '
+test_have_prereq MINGW ||
echo 2>/dev/null > "Name with an${HT}HT"
if ! test -f "Name with an${HT}HT"
then
test_must_fail git notes show HEAD^
'
+test_expect_success 'show notes from treeish' '
+ test "b3" = "$(git notes --ref commits^{tree} show)" &&
+ test "b4" = "$(git notes --ref commits@{1} show)"
+'
+
+test_expect_success 'cannot edit notes from non-ref' '
+ test_must_fail git notes --ref commits^{tree} edit &&
+ test_must_fail git notes --ref commits@{1} edit
+'
+
test_expect_success 'cannot "git notes add -m" where notes already exists' '
test_must_fail git notes add -m "b2" &&
test_path_is_missing .git/NOTES_EDITMSG &&
git notes add -m "Notes on 1st commit" 1st &&
git notes add -m "Notes on 2nd commit" 2nd &&
git notes add -m "Notes on 3rd commit" 3rd &&
- git notes add -m "Notes on 4th commit" 4th
+ git notes add -m "Notes on 4th commit" 4th &&
+ # Copy notes to remote-notes
+ git fetch . refs/notes/*:refs/remote-notes/origin/*
'
commit_sha1=$(git rev-parse 1st^{commit})
'
cp expect_notes_x expect_notes_y
+cp expect_notes_x expect_notes_v
cp expect_log_x expect_log_y
+cp expect_log_x expect_log_v
test_expect_success 'fail to merge empty notes ref into empty notes ref (z => y)' '
test_must_fail git -c "core.notesRef=refs/notes/y" notes merge z
test_must_fail git -c "core.notesRef=refs/notes/foo^{bar" notes merge x
'
-test_expect_success 'fail to merge various non-note-trees' '
- git config core.notesRef refs/notes/y &&
- test_must_fail git notes merge refs/notes &&
- test_must_fail git notes merge refs/notes/ &&
- test_must_fail git notes merge refs/notes/dir &&
- test_must_fail git notes merge refs/notes/dir/ &&
- test_must_fail git notes merge refs/heads/master &&
- test_must_fail git notes merge x: &&
- test_must_fail git notes merge x:foo &&
- test_must_fail git notes merge foo^{bar
+test_expect_success 'merge non-notes ref into empty notes ref (remote-notes/origin/x => v)' '
+ git config core.notesRef refs/notes/v &&
+ git notes merge refs/remote-notes/origin/x &&
+ verify_notes v &&
+ # refs/remote-notes/origin/x and v should point to the same notes commit
+ test "$(git rev-parse refs/remote-notes/origin/x)" = "$(git rev-parse refs/notes/v)"
'
test_expect_success 'merge notes into empty notes ref (x => y)' '
'
test_expect_success 'merge and reference trees equal' '
- test -z "`git diff-tree skip-merge skip-reference`"
+ test -z "$(git diff-tree skip-merge skip-reference)"
'
test_expect_success 'moved back to branch correctly' '
test_expect_success 'cherry-pick -x inserts blank line after one line subject' '
pristine_detach initial &&
- sha1=`git rev-parse mesg-one-line^0` &&
+ sha1=$(git rev-parse mesg-one-line^0) &&
git cherry-pick -x mesg-one-line &&
cat <<-EOF >expect &&
$mesg_one_line
test_expect_success 'cherry-pick -x inserts blank line when conforming footer not found' '
pristine_detach initial &&
- sha1=`git rev-parse mesg-no-footer^0` &&
+ sha1=$(git rev-parse mesg-no-footer^0) &&
git cherry-pick -x mesg-no-footer &&
cat <<-EOF >expect &&
$mesg_no_footer
test_expect_success 'cherry-pick -x -s inserts blank line when conforming footer not found' '
pristine_detach initial &&
- sha1=`git rev-parse mesg-no-footer^0` &&
+ sha1=$(git rev-parse mesg-no-footer^0) &&
git cherry-pick -x -s mesg-no-footer &&
cat <<-EOF >expect &&
$mesg_no_footer
test_expect_success 'cherry-pick -x -s adds sob when last sob doesnt match committer' '
pristine_detach initial &&
- sha1=`git rev-parse mesg-with-footer^0` &&
+ sha1=$(git rev-parse mesg-with-footer^0) &&
git cherry-pick -x -s mesg-with-footer &&
cat <<-EOF >expect &&
$mesg_with_footer
test_expect_success 'cherry-pick -x -s adds sob even when trailing sob exists for committer' '
pristine_detach initial &&
- sha1=`git rev-parse mesg-with-footer-sob^0` &&
+ sha1=$(git rev-parse mesg-with-footer-sob^0) &&
git cherry-pick -x -s mesg-with-footer-sob &&
cat <<-EOF >expect &&
$mesg_with_footer_sob
test_expect_success 'cherry-pick -x treats "(cherry picked from..." line as part of footer' '
pristine_detach initial &&
- sha1=`git rev-parse mesg-with-cherry-footer^0` &&
+ sha1=$(git rev-parse mesg-with-cherry-footer^0) &&
git cherry-pick -x mesg-with-cherry-footer &&
cat <<-EOF >expect &&
$mesg_with_cherry_footer
test_expect_success 'cherry-pick -x -s treats "(cherry picked from..." line as part of footer' '
pristine_detach initial &&
- sha1=`git rev-parse mesg-with-cherry-footer^0` &&
+ sha1=$(git rev-parse mesg-with-cherry-footer^0) &&
git cherry-pick -x -s mesg-with-cherry-footer &&
cat <<-EOF >expect &&
$mesg_with_cherry_footer
git add -- foo bar baz 'space embedded' -q &&
git commit -m 'add normal files'"
-if touch -- 'tab embedded' 'newline
+if test_have_prereq !MINGW && touch -- 'tab embedded' 'newline
embedded' 2>/dev/null
then
test_set_prereq FUNNYNAMES
git add test-file &&
git commit -m "add file for rm test" &&
git rm test-file > rm-output &&
- test `grep "^rm " rm-output | wc -l` = 1 &&
+ test $(grep "^rm " rm-output | wc -l) = 1 &&
rm -f test-file rm-output &&
git commit -m "remove file from rm test"
'
git add test-file &&
git commit -m "add file for rm --quiet test" &&
git rm --quiet test-file > rm-output &&
- test `wc -l < rm-output` = 0 &&
+ test $(wc -l < rm-output) = 0 &&
rm -f test-file rm-output &&
git commit -m "remove file from rm --quiet test"
'
echo foo >xfoo1 &&
chmod 755 xfoo1 &&
git add xfoo1 &&
- case "`git ls-files --stage xfoo1`" in
+ case "$(git ls-files --stage xfoo1)" in
100644" "*xfoo1) echo pass;;
*) echo fail; git ls-files --stage xfoo1; (exit 1);;
esac'
test_expect_success 'git add: filemode=0 should not get confused by symlink' '
rm -f xfoo1 &&
test_ln_s_add foo xfoo1 &&
- case "`git ls-files --stage xfoo1`" in
+ case "$(git ls-files --stage xfoo1)" in
120000" "*xfoo1) echo pass;;
*) echo fail; git ls-files --stage xfoo1; (exit 1);;
esac
echo foo >xfoo2 &&
chmod 755 xfoo2 &&
git update-index --add xfoo2 &&
- case "`git ls-files --stage xfoo2`" in
+ case "$(git ls-files --stage xfoo2)" in
100644" "*xfoo2) echo pass;;
*) echo fail; git ls-files --stage xfoo2; (exit 1);;
esac'
test_expect_success 'git add: filemode=0 should not get confused by symlink' '
rm -f xfoo2 &&
test_ln_s_add foo xfoo2 &&
- case "`git ls-files --stage xfoo2`" in
+ case "$(git ls-files --stage xfoo2)" in
120000" "*xfoo2) echo pass;;
*) echo fail; git ls-files --stage xfoo2; (exit 1);;
esac
'git update-index --add: Test that executable bit is not used...' \
'git config core.filemode 0 &&
test_ln_s_add xfoo2 xfoo3 && # runs git update-index --add
- case "`git ls-files --stage xfoo3`" in
+ case "$(git ls-files --stage xfoo3)" in
120000" "*xfoo3) echo pass;;
*) echo fail; git ls-files --stage xfoo3; (exit 1);;
esac'
test_expect_success 'git add --refresh' '
>foo && git add foo && git commit -a -m "commit all" &&
- test -z "`git diff-index HEAD -- foo`" &&
+ test -z "$(git diff-index HEAD -- foo)" &&
git read-tree HEAD &&
- case "`git diff-index HEAD -- foo`" in
+ case "$(git diff-index HEAD -- foo)" in
:100644" "*"M foo") echo pass;;
*) echo fail; (exit 1);;
esac &&
git add --refresh -- foo &&
- test -z "`git diff-index HEAD -- foo`"
+ test -z "$(git diff-index HEAD -- foo)"
'
test_expect_success 'git add --refresh with pathspec' '
add 'sub/foo'
EOF
-if mkdir ":" 2>/dev/null
+if test_have_prereq !MINGW && mkdir ":" 2>/dev/null
then
test_set_prereq COLON_DIR
fi
HT=' '
DQ='"'
+test_have_prereq MINGW ||
echo foo 2>/dev/null > "Name and an${HT}HT"
if ! test -f "Name and an${HT}HT"
then
test_cmp from filtered
'
+test_expect_success 'format-patch format.outputDirectory option' '
+ test_config format.outputDirectory patches &&
+ rm -fr patches &&
+ git format-patch master..side &&
+ test $(git rev-list master..side | wc -l) -eq $(ls patches | wc -l)
+'
+
+test_expect_success 'format-patch -o overrides format.outputDirectory' '
+ test_config format.outputDirectory patches &&
+ rm -fr patches patchset &&
+ git format-patch master..side -o patchset &&
+ test_path_is_missing patches &&
+ test_path_is_dir patchset
+'
+
test_done
P2='pathname with SP'
P3='pathname
with LF'
+test_have_prereq !MINGW &&
echo 2>/dev/null >"$P1" && test -f "$P1" && rm -f "$P1" || {
skip_all='Your filesystem does not allow tabs in filenames'
test_done
test_when_finished "rm -f \"tab embedded.txt\"" &&
test_when_finished "rm -f '\''\"quoteembedded\".txt'\''" &&
- if touch -- "tab embedded.txt" '\''"quoteembedded".txt'\''
+ if test_have_prereq !MINGW &&
+ touch -- "tab embedded.txt" '\''"quoteembedded".txt'\''
then
test_set_prereq FUNNYNAMES
fi
test_cmp expect out
'
+test_expect_success !MINGW 'shortlog can read --format=raw output' '
+ git log --format=raw HEAD >log &&
+ GIT_DIR=non-existing git shortlog -w <log >out &&
+ test_cmp expect out
+'
+
test_expect_success 'shortlog should add newline when input line matches wraplen' '
cat >expect <<\EOF &&
A U Thor (2):
git shortlog HEAD~2.. > out &&
test_cmp expect out'
-test_expect_success 'shortlog ignores commits with missing authors' '
- git commit --allow-empty -m normal &&
- git commit --allow-empty -m soon-to-be-broken &&
- git cat-file commit HEAD >commit.tmp &&
- sed "/^author/d" commit.tmp >broken.tmp &&
- commit=$(git hash-object -w -t commit --stdin <broken.tmp) &&
- git update-ref HEAD $commit &&
- cat >expect <<-\EOF &&
- A U Thor (1):
- normal
-
- EOF
- git shortlog HEAD~2.. >actual &&
- test_cmp expect actual
-'
-
test_expect_success 'shortlog with revision pseudo options' '
git shortlog --all &&
git shortlog --branches &&
test_expect_success 'split sample box' \
'git mailsplit -o. "$TEST_DIRECTORY"/t5100/sample.mbox >last &&
- last=`cat last` &&
+ last=$(cat last) &&
echo total is $last &&
- test `cat last` = 17'
+ test $(cat last) = 17'
check_mailinfo () {
mail=$1 opt=$2
}
-for mail in `echo 00*`
+for mail in 00*
do
test_expect_success "mailinfo $mail" '
check_mailinfo $mail "" &&
'mkdir rfc2047 &&
git mailsplit -orfc2047 "$TEST_DIRECTORY"/t5100/rfc2047-samples.mbox \
>rfc2047/last &&
- last=`cat rfc2047/last` &&
+ last=$(cat rfc2047/last) &&
echo total is $last &&
- test `cat rfc2047/last` = 11'
+ test $(cat rfc2047/last) = 11'
-for mail in `echo rfc2047/00*`
+for mail in rfc2047/00*
do
test_expect_success "mailinfo $mail" '
git mailinfo -u $mail-msg $mail-patch <$mail >$mail-info &&
'
. ./test-lib.sh
-TRASH=`pwd`
+TRASH=$(pwd)
test_expect_success \
'setup' \
test-genrandom "seed b" 2097152 > b_big &&
git update-index --add a a_big b b_big c &&
cat c >d && echo foo >>d && git update-index --add d &&
- tree=`git write-tree` &&
- commit=`git commit-tree $tree </dev/null` && {
+ tree=$(git write-tree) &&
+ commit=$(git commit-tree $tree </dev/null) && {
echo $tree &&
echo $commit &&
git ls-tree $tree | sed -e "s/.* \\([0-9a-f]*\\) .*/\\1/"
git diff-tree --root -p $commit &&
while read object
do
- t=`git cat-file -t $object` &&
+ t=$(git cat-file -t $object) &&
git cat-file $t $object || return 1
done <obj-list
} >expect'
git diff-tree --root -p $commit &&
while read object
do
- t=`git cat-file -t $object` &&
+ t=$(git cat-file -t $object) &&
git cat-file $t $object || return 1
done <obj-list
} >current &&
git diff-tree --root -p $commit &&
while read object
do
- t=`git cat-file -t $object` &&
+ t=$(git cat-file -t $object) &&
git cat-file $t $object || return 1
done <obj-list
} >current &&
git diff-tree --root -p $commit &&
while read object
do
- t=`git cat-file -t $object` &&
+ t=$(git cat-file -t $object) &&
git cat-file $t $object || return 1
done <obj-list
} >current &&
test_expect_success \
'verify-pack catches a corrupted sum of the index file itself' \
- 'l=`wc -c <test-3.idx` &&
- l=`expr $l - 20` &&
+ 'l=$(wc -c <test-3.idx) &&
+ l=$(expr $l - 20) &&
cat test-1-${packname_1}.pack >test-3.pack &&
printf "%20s" "" | dd of=test-3.idx count=20 bs=1 conv=notrunc seek=$l &&
if git verify-pack test-3.pack
git update-index --add $i || return 1
done &&
echo d >d && cat c >>d && git update-index --add d &&
- tree=`git write-tree` &&
- commit1=`git commit-tree $tree </dev/null` &&
+ tree=$(git write-tree) &&
+ commit1=$(git commit-tree $tree </dev/null) &&
git update-ref HEAD $commit1 &&
git repack -a -d &&
- test "`git count-objects`" = "0 objects, 0 kilobytes" &&
- pack1=`ls .git/objects/pack/*.pack` &&
+ test "$(git count-objects)" = "0 objects, 0 kilobytes" &&
+ pack1=$(ls .git/objects/pack/*.pack) &&
test -f "$pack1"'
test_expect_success \
'repack -a -d, packedGit{WindowSize,Limit} == 1 page' \
'git config core.packedGitWindowSize 512 &&
git config core.packedGitLimit 512 &&
- commit2=`git commit-tree $tree -p $commit1 </dev/null` &&
+ commit2=$(git commit-tree $tree -p $commit1 </dev/null) &&
git update-ref HEAD $commit2 &&
git repack -a -d &&
- test "`git count-objects`" = "0 objects, 0 kilobytes" &&
- pack2=`ls .git/objects/pack/*.pack` &&
+ test "$(git count-objects)" = "0 objects, 0 kilobytes" &&
+ pack2=$(ls .git/objects/pack/*.pack) &&
test -f "$pack2" &&
test "$pack1" \!= "$pack2"'
i=1 &&
while test $i -le 100
do
- iii=`printf '%03i' $i`
+ iii=$(printf '%03i' $i)
test-genrandom "bar" 200 > wide_delta_$iii &&
test-genrandom "baz $iii" 50 >> wide_delta_$iii &&
test-genrandom "foo"$i 100 > deep_delta_$iii &&
- test-genrandom "foo"`expr $i + 1` 100 >> deep_delta_$iii &&
- test-genrandom "foo"`expr $i + 2` 100 >> deep_delta_$iii &&
+ test-genrandom "foo"$(expr $i + 1) 100 >> deep_delta_$iii &&
+ test-genrandom "foo"$(expr $i + 2) 100 >> deep_delta_$iii &&
echo $iii >file_$iii &&
test-genrandom "$iii" 8192 >>file_$iii &&
git update-index --add file_$iii deep_delta_$iii wide_delta_$iii &&
- i=`expr $i + 1` || return 1
+ i=$(expr $i + 1) || return 1
done &&
{ echo 101 && test-genrandom 100 8192; } >file_101 &&
git update-index --add file_101 &&
- tree=`git write-tree` &&
- commit=`git commit-tree $tree </dev/null` && {
+ tree=$(git write-tree) &&
+ commit=$(git commit-tree $tree </dev/null) && {
echo $tree &&
git ls-tree $tree | sed -e "s/.* \\([0-9a-f]*\\) .*/\\1/"
} >obj-list &&
'[index v1] 2) create a stealth corruption in a delta base reference' \
'# This test assumes file_101 is a delta smaller than 16 bytes.
# It should be against file_100 but we substitute its base for file_099
- sha1_101=`git hash-object file_101` &&
- sha1_099=`git hash-object file_099` &&
- offs_101=`index_obj_offset 1.idx $sha1_101` &&
- nr_099=`index_obj_nr 1.idx $sha1_099` &&
+ sha1_101=$(git hash-object file_101) &&
+ sha1_099=$(git hash-object file_099) &&
+ offs_101=$(index_obj_offset 1.idx $sha1_101) &&
+ nr_099=$(index_obj_nr 1.idx $sha1_099) &&
chmod +w ".git/objects/pack/pack-${pack1}.pack" &&
dd of=".git/objects/pack/pack-${pack1}.pack" seek=$(($offs_101 + 1)) \
if=".git/objects/pack/pack-${pack1}.idx" \
'[index v2] 2) create a stealth corruption in a delta base reference' \
'# This test assumes file_101 is a delta smaller than 16 bytes.
# It should be against file_100 but we substitute its base for file_099
- sha1_101=`git hash-object file_101` &&
- sha1_099=`git hash-object file_099` &&
- offs_101=`index_obj_offset 1.idx $sha1_101` &&
- nr_099=`index_obj_nr 1.idx $sha1_099` &&
+ sha1_101=$(git hash-object file_101) &&
+ sha1_099=$(git hash-object file_099) &&
+ offs_101=$(index_obj_offset 1.idx $sha1_101) &&
+ nr_099=$(index_obj_nr 1.idx $sha1_099) &&
chmod +w ".git/objects/pack/pack-${pack1}.pack" &&
dd of=".git/objects/pack/pack-${pack1}.pack" seek=$(($offs_101 + 1)) \
if=".git/objects/pack/pack-${pack1}.idx" \
'rm -f .git/objects/pack/* &&
git index-pack --index-version=2 --stdin < "test-1-${pack1}.pack" &&
git verify-pack ".git/objects/pack/pack-${pack1}.pack" &&
- obj=`git hash-object file_001` &&
- nr=`index_obj_nr ".git/objects/pack/pack-${pack1}.idx" $obj` &&
+ obj=$(git hash-object file_001) &&
+ nr=$(index_obj_nr ".git/objects/pack/pack-${pack1}.idx" $obj) &&
chmod +w ".git/objects/pack/pack-${pack1}.idx" &&
printf xxxx | dd of=".git/objects/pack/pack-${pack1}.idx" conv=notrunc \
- bs=1 count=4 seek=$((8 + 256 * 4 + `wc -l <obj-list` * 20 + $nr * 4)) &&
+ bs=1 count=4 seek=$((8 + 256 * 4 + $(wc -l <obj-list) * 20 + $nr * 4)) &&
( while read obj
do git cat-file -p $obj >/dev/null || exit 1
done <obj-list ) &&
create_new_pack() {
rm -rf .git &&
git init &&
- blob_1=`git hash-object -t blob -w file_1` &&
- blob_2=`git hash-object -t blob -w file_2` &&
- blob_3=`git hash-object -t blob -w file_3` &&
- pack=`printf "$blob_1\n$blob_2\n$blob_3\n" |
- git pack-objects $@ .git/objects/pack/pack` &&
+ blob_1=$(git hash-object -t blob -w file_1) &&
+ blob_2=$(git hash-object -t blob -w file_2) &&
+ blob_3=$(git hash-object -t blob -w file_3) &&
+ pack=$(printf "$blob_1\n$blob_2\n$blob_3\n" |
+ git pack-objects $@ .git/objects/pack/pack) &&
pack=".git/objects/pack/pack-${pack}" &&
git verify-pack -v ${pack}.pack
}
do_repack() {
- pack=`printf "$blob_1\n$blob_2\n$blob_3\n" |
- git pack-objects $@ .git/objects/pack/pack` &&
+ pack=$(printf "$blob_1\n$blob_2\n$blob_3\n" |
+ git pack-objects $@ .git/objects/pack/pack) &&
pack=".git/objects/pack/pack-${pack}"
}
do_corrupt_object() {
- ofs=`git show-index < ${pack}.idx | grep $1 | cut -f1 -d" "` &&
+ ofs=$(git show-index < ${pack}.idx | grep $1 | cut -f1 -d" ") &&
ofs=$(($ofs + $2)) &&
chmod +w ${pack}.pack &&
dd of=${pack}.pack bs=1 conv=notrunc seek=$ofs &&
'
test_expect_success 'prune .git/shallow' '
- SHA1=`echo hi|git commit-tree HEAD^{tree}` &&
+ SHA1=$(echo hi|git commit-tree HEAD^{tree}) &&
echo $SHA1 >.git/shallow &&
git prune --dry-run >out &&
grep $SHA1 .git/shallow &&
test_description='git pack-object --include-tag'
. ./test-lib.sh
-TRASH=`pwd`
+TRASH=$(pwd)
test_expect_success setup '
echo c >d &&
git update-index --add d &&
- tree=`git write-tree` &&
- commit=`git commit-tree $tree </dev/null` &&
+ tree=$(git write-tree) &&
+ commit=$(git commit-tree $tree </dev/null) &&
echo "object $commit" >sig &&
echo "type commit" >>sig &&
echo "tag mytag" >>sig &&
echo "tagger $(git var GIT_COMMITTER_IDENT)" >>sig &&
echo >>sig &&
echo "our test tag" >>sig &&
- tag=`git mktag <sig` &&
+ tag=$(git mktag <sig) &&
rm d sig &&
git update-ref refs/tags/mytag $tag && {
echo $tree &&
--- /dev/null
+#!/bin/sh
+
+test_description='bounds-checking of access to mmapped on-disk file formats'
+. ./test-lib.sh
+
+clear_base () {
+ test_when_finished 'restore_base' &&
+ rm -f $base
+}
+
+restore_base () {
+ cp base-backup/* .git/objects/pack/
+}
+
+do_pack () {
+ pack_objects=$1; shift
+ sha1=$(
+ for i in $pack_objects
+ do
+ echo $i
+ done | git pack-objects "$@" .git/objects/pack/pack
+ ) &&
+ pack=.git/objects/pack/pack-$sha1.pack &&
+ idx=.git/objects/pack/pack-$sha1.idx &&
+ chmod +w $pack $idx &&
+ test_when_finished 'rm -f "$pack" "$idx"'
+}
+
+munge () {
+ printf "$3" | dd of="$1" bs=1 conv=notrunc seek=$2
+}
+
+# Offset in a v2 .idx to its initial and extended offset tables. For an index
+# with "nr" objects, this is:
+#
+# magic(4) + version(4) + fan-out(4*256) + sha1s(20*nr) + crc(4*nr),
+#
+# for the initial, and another ofs(4*nr) past that for the extended.
+#
+ofs_table () {
+ echo $((4 + 4 + 4*256 + 20*$1 + 4*$1))
+}
+extended_table () {
+ echo $(($(ofs_table "$1") + 4*$1))
+}
+
+test_expect_success 'set up base packfile and variables' '
+ # the hash of this content starts with ff, which
+ # makes some later computations much simpler
+ echo 74 >file &&
+ git add file &&
+ git commit -m base &&
+ git repack -ad &&
+ base=$(echo .git/objects/pack/*) &&
+ chmod +w $base &&
+ mkdir base-backup &&
+ cp $base base-backup/ &&
+ object=$(git rev-parse HEAD:file)
+'
+
+test_expect_success 'pack/index object count mismatch' '
+ do_pack $object &&
+ munge $pack 8 "\377\0\0\0" &&
+ clear_base &&
+
+ # We enumerate the objects from the completely-fine
+ # .idx, but notice later that the .pack is bogus
+ # and fail to show any data.
+ echo "$object missing" >expect &&
+ git cat-file --batch-all-objects --batch-check >actual &&
+ test_cmp expect actual &&
+
+ # ...and here fail to load the object (without segfaulting),
+ # but fallback to a good copy if available.
+ test_must_fail git cat-file blob $object &&
+ restore_base &&
+ git cat-file blob $object >actual &&
+ test_cmp file actual &&
+
+ # ...and make sure that index-pack --verify, which has its
+ # own reading routines, does not segfault.
+ test_must_fail git index-pack --verify $pack
+'
+
+test_expect_success 'matched bogus object count' '
+ do_pack $object &&
+ munge $pack 8 "\377\0\0\0" &&
+ munge $idx $((255 * 4)) "\377\0\0\0" &&
+ clear_base &&
+
+ # Unlike above, we should notice early that the .idx is totally
+ # bogus, and not even enumerate its contents.
+ >expect &&
+ git cat-file --batch-all-objects --batch-check >actual &&
+ test_cmp expect actual &&
+
+ # But as before, we can do the same object-access checks.
+ test_must_fail git cat-file blob $object &&
+ restore_base &&
+ git cat-file blob $object >actual &&
+ test_cmp file actual &&
+
+ test_must_fail git index-pack --verify $pack
+'
+
+# Note that we cannot check the fallback case for these
+# further .idx tests, as we notice the problem in functions
+# whose interface doesn't allow an error return (like use_pack()),
+# and thus we just die().
+#
+# There's also no point in doing enumeration tests, as
+# we are munging offsets here, which are about looking up
+# specific objects.
+
+test_expect_success 'bogus object offset (v1)' '
+ do_pack $object --index-version=1 &&
+ munge $idx $((4 * 256)) "\377\0\0\0" &&
+ clear_base &&
+ test_must_fail git cat-file blob $object &&
+ test_must_fail git index-pack --verify $pack
+'
+
+test_expect_success 'bogus object offset (v2, no msb)' '
+ do_pack $object --index-version=2 &&
+ munge $idx $(ofs_table 1) "\0\377\0\0" &&
+ clear_base &&
+ test_must_fail git cat-file blob $object &&
+ test_must_fail git index-pack --verify $pack
+'
+
+test_expect_success 'bogus offset into v2 extended table' '
+ do_pack $object --index-version=2 &&
+ munge $idx $(ofs_table 1) "\377\0\0\0" &&
+ clear_base &&
+ test_must_fail git cat-file blob $object &&
+ test_must_fail git index-pack --verify $pack
+'
+
+test_expect_success 'bogus offset inside v2 extended table' '
+ # We need two objects here, so we can plausibly require
+ # an extended table (if the first object were larger than 2^31).
+ do_pack "$object $(git rev-parse HEAD)" --index-version=2 &&
+
+ # We have to make extra room for the table, so we cannot
+ # just munge in place as usual.
+ {
+ dd if=$idx bs=1 count=$(($(ofs_table 2) + 4)) &&
+ printf "\200\0\0\0" &&
+ printf "\377\0\0\0\0\0\0\0" &&
+ dd if=$idx bs=1 skip=$(extended_table 2)
+ } >tmp &&
+ mv tmp "$idx" &&
+ clear_base &&
+ test_must_fail git cat-file blob $object &&
+ test_must_fail git index-pack --verify $pack
+'
+
+test_expect_success 'bogus OFS_DELTA in packfile' '
+ # Generate a pack with a delta in it.
+ base=$(test-genrandom foo 3000 | git hash-object --stdin -w) &&
+ delta=$(test-genrandom foo 2000 | git hash-object --stdin -w) &&
+ do_pack "$base $delta" --delta-base-offset &&
+ rm -f .git/objects/??/* &&
+
+ # Double check that we have the delta we expect.
+ echo $base >expect &&
+ echo $delta | git cat-file --batch-check="%(deltabase)" >actual &&
+ test_cmp expect actual &&
+
+ # Now corrupt it. We assume the varint size for the delta is small
+ # enough to fit in the first byte (which it should be, since it
+ # is a pure deletion from the base), and that original ofs_delta
+ # takes 2 bytes (which it should, as it should be ~3000).
+ ofs=$(git show-index <$idx | grep $delta | cut -d" " -f1) &&
+ munge $pack $(($ofs + 1)) "\177\377" &&
+ test_must_fail git cat-file blob $delta >/dev/null
+'
+
+test_done
add () {
name=$1 &&
text="$@" &&
- branch=`echo $name | sed -e 's/^\(.\).*$/\1/'` &&
+ branch=$(echo $name | sed -e 's/^\(.\).*$/\1/') &&
parents="" &&
shift &&
case "$heads" in *B*)
echo $BTIP > .git/refs/heads/B;;
esac &&
- git symbolic-ref HEAD refs/heads/`echo $heads \
- | sed -e "s/^\(.\).*$/\1/"` &&
+ git symbolic-ref HEAD refs/heads/$(echo $heads \
+ | sed -e "s/^\(.\).*$/\1/") &&
git fsck --full &&
mv .git/objects/pack/pack-* . &&
- p=`ls -1 pack-*.pack` &&
+ p=$(ls -1 pack-*.pack) &&
git unpack-objects <$p &&
git fsck --full &&
- idx=`echo pack-*.idx` &&
- pack_count=`git show-index <$idx | wc -l` &&
+ idx=$(echo pack-*.idx) &&
+ pack_count=$(git show-index <$idx | wc -l) &&
test $pack_count = $count &&
rm -f pack-*
)
test_expect_success 'clone shallow depth 1' '
git clone --no-single-branch --depth 1 "file://$(pwd)/." shallow0 &&
- test "`git --git-dir=shallow0/.git rev-list --count HEAD`" = 1
+ test "$(git --git-dir=shallow0/.git rev-list --count HEAD)" = 1
'
test_expect_success 'clone shallow depth 1 with fsck' '
git config --global fetch.fsckobjects true &&
git clone --no-single-branch --depth 1 "file://$(pwd)/." shallow0fsck &&
- test "`git --git-dir=shallow0fsck/.git rev-list --count HEAD`" = 1 &&
+ test "$(git --git-dir=shallow0fsck/.git rev-list --count HEAD)" = 1 &&
git config --global --unset fetch.fsckobjects
'
'
test_expect_success 'clone shallow depth count' '
- test "`git --git-dir=shallow/.git rev-list --count HEAD`" = 2
+ test "$(git --git-dir=shallow/.git rev-list --count HEAD)" = 2
'
test_expect_success 'clone shallow object count' '
'
test_expect_success 'clone shallow depth count' '
- test "`git --git-dir=shallow/.git rev-list --count HEAD`" = 11
+ test "$(git --git-dir=shallow/.git rev-list --count HEAD)" = 11
'
test_expect_success 'clone shallow object count' '
git config transfer.fsckobjects false
) &&
test_must_fail ok=sigpipe git push --porcelain dst master:refs/heads/test >act &&
- test_cmp exp act
+ {
+ test_cmp exp act ||
+ ! test -s act
+ }
'
test_expect_success 'push with transfer.fsckobjects' '
git clone one test
'
+test_expect_success 'add remote whose URL agrees with url.<...>.insteadOf' '
+ test_config url.git@host.com:team/repo.git.insteadOf myremote &&
+ git remote add myremote git@host.com:team/repo.git
+'
+
test_expect_success C_LOCALE_OUTPUT 'remote information for the origin' '
(
cd test &&
test_expect_success 'remote forces tracking branches' '
(
cd test &&
- case `git config remote.second.fetch` in
+ case $(git config remote.second.fetch) in
+*) true ;;
*) false ;;
esac
)
'
+test_expect_success 'remove errors out early when deleting non-existent branch' '
+ (
+ cd test &&
+ echo "fatal: No such remote: foo" >expect &&
+ test_must_fail git remote rm foo 2>actual &&
+ test_i18ncmp expect actual
+ )
+'
+
+test_expect_success 'rename errors out early when deleting non-existent branch' '
+ (
+ cd test &&
+ echo "fatal: No such remote: foo" >expect &&
+ test_must_fail git remote rename foo bar 2>actual &&
+ test_i18ncmp expect actual
+ )
+'
+
+test_expect_success 'add existing foreign_vcs remote' '
+ test_config remote.foo.vcs bar &&
+ echo "fatal: remote foo already exists." >expect &&
+ test_must_fail git remote add foo bar 2>actual &&
+ test_i18ncmp expect actual
+'
+
+test_expect_success 'add existing foreign_vcs remote' '
+ test_config remote.foo.vcs bar &&
+ test_config remote.bar.vcs bar &&
+ echo "fatal: remote bar already exists." >expect &&
+ test_must_fail git remote rename foo bar 2>actual &&
+ test_i18ncmp expect actual
+'
+
cat >test/expect <<EOF
* remote origin
Fetch URL: $(pwd)/one
echo foo | get_url_test --push --all someremote
'
+test_expect_success 'remote set-url with locked config' '
+ test_when_finished "rm -f .git/config.lock" &&
+ git config --get-all remote.someremote.url >expect &&
+ >.git/config.lock &&
+ test_must_fail git remote set-url someremote baz &&
+ git config --get-all remote.someremote.url >actual &&
+ cmp expect actual
+'
+
test_expect_success 'remote set-url bar' '
git remote set-url someremote bar &&
echo bar >expect &&
}
repo_fetched() {
- if test "`git log -1 --pretty=format:%s $1 --`" = "`cat mark`"; then
+ if test "$(git log -1 --pretty=format:%s $1 --)" = "$(cat mark)"; then
echo >&2 "repo was fetched: $1"
return 0
fi
. ./test-lib.sh
-D=`pwd`
+D=$(pwd)
test_bundle_object_count () {
git verify-pack -v "$1" >verify.out &&
cd two &&
git fetch &&
test -f .git/refs/heads/one &&
- mine=`git rev-parse refs/heads/one` &&
- his=`cd ../one && git rev-parse refs/heads/master` &&
+ mine=$(git rev-parse refs/heads/one) &&
+ his=$(cd ../one && git rev-parse refs/heads/master) &&
test "z$mine" = "z$his"
'
git fetch &&
test -f .git/refs/heads/two &&
test -f .git/refs/heads/one &&
- master_in_two=`cd ../two && git rev-parse master` &&
- one_in_two=`cd ../two && git rev-parse one` &&
+ master_in_two=$(cd ../two && git rev-parse master) &&
+ one_in_two=$(cd ../two && git rev-parse one) &&
{
echo "$one_in_two "
echo "$master_in_two not-for-merge"
'
-! rsync --help > /dev/null 2> /dev/null &&
-say 'Skipping rsync tests because rsync was not found' || {
-test_expect_success 'fetch via rsync' '
- git pack-refs &&
- mkdir rsynced &&
- (cd rsynced &&
- git init --bare &&
- git fetch "rsync:../.git" master:refs/heads/master &&
- git gc --prune &&
- test $(git rev-parse master) = $(cd .. && git rev-parse master) &&
- git fsck --full)
-'
-
-test_expect_success 'push via rsync' '
- mkdir rsynced2 &&
- (cd rsynced2 &&
- git init) &&
- (cd rsynced &&
- git push "rsync:../rsynced2/.git" master) &&
- (cd rsynced2 &&
- git gc --prune &&
- test $(git rev-parse master) = $(cd .. && git rev-parse master) &&
- git fsck --full)
-'
-
-test_expect_success 'push via rsync' '
- mkdir rsynced3 &&
- (cd rsynced3 &&
- git init) &&
- git push --all "rsync:rsynced3/.git" &&
- (cd rsynced3 &&
- test $(git rev-parse master) = $(cd .. && git rev-parse master) &&
- git fsck --full)
-'
-}
-
test_expect_success 'fetch with a non-applying branch.<name>.merge' '
git config branch.master.remote yeti &&
git config branch.master.merge refs/heads/bigfoot &&
grep refs/tags/magic actual
'
+test_expect_success 'ls-remote --symref' '
+ cat >expect <<-\EOF &&
+ ref: refs/heads/master HEAD
+ 1bd44cb9d13204b0fe1958db0082f5028a16eb3a HEAD
+ 1bd44cb9d13204b0fe1958db0082f5028a16eb3a refs/heads/master
+ 1bd44cb9d13204b0fe1958db0082f5028a16eb3a refs/remotes/origin/HEAD
+ 1bd44cb9d13204b0fe1958db0082f5028a16eb3a refs/remotes/origin/master
+ 1bd44cb9d13204b0fe1958db0082f5028a16eb3a refs/tags/mark
+ EOF
+ git ls-remote --symref >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'ls-remote with filtered symref (refname)' '
+ cat >expect <<-\EOF &&
+ ref: refs/heads/master HEAD
+ 1bd44cb9d13204b0fe1958db0082f5028a16eb3a HEAD
+ EOF
+ git ls-remote --symref . HEAD >actual &&
+ test_cmp expect actual
+'
+
+test_expect_failure 'ls-remote with filtered symref (--heads)' '
+ git symbolic-ref refs/heads/foo refs/tags/mark &&
+ cat >expect <<-\EOF &&
+ ref: refs/tags/mark refs/heads/foo
+ 1bd44cb9d13204b0fe1958db0082f5028a16eb3a refs/heads/foo
+ 1bd44cb9d13204b0fe1958db0082f5028a16eb3a refs/heads/master
+ EOF
+ git ls-remote --symref --heads . >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'ls-remote --symref omits filtered-out matches' '
+ cat >expect <<-\EOF &&
+ 1bd44cb9d13204b0fe1958db0082f5028a16eb3a refs/heads/foo
+ 1bd44cb9d13204b0fe1958db0082f5028a16eb3a refs/heads/master
+ EOF
+ git ls-remote --symref --heads . >actual &&
+ test_cmp expect actual &&
+ git ls-remote --symref . "refs/heads/*" >actual &&
+ test_cmp expect actual
+'
+
+
test_done
case "$cmd" in
'' | '#'*) continue ;;
esac
- test=`echo "$cmd" | sed -e 's|[/ ][/ ]*|_|g'`
- pfx=`printf "%04d" $test_count`
+ test=$(echo "$cmd" | sed -e 's|[/ ][/ ]*|_|g')
+ pfx=$(printf "%04d" $test_count)
expect_f="$TEST_DIRECTORY/t5515/fetch.$test"
actual_f="$pfx-fetch.$test"
expect_r="$TEST_DIRECTORY/t5515/refs.$test"
. ./test-lib.sh
-D=`pwd`
+D=$(pwd)
mk_empty () {
repo_name="$1"
test_expect_success 'push sha1 with non-existent, incomplete dest' '
mk_test testrepo &&
- test_must_fail git push testrepo `git rev-parse master`:foo
+ test_must_fail git push testrepo $(git rev-parse master):foo
'
. ./test-lib.sh
-D=`pwd`
+D=$(pwd)
invert () {
if "$@"; then
test "$(git rev-parse HEAD^2)" = "$(git rev-parse keep-merge)"
'
+test_expect_success 'pull.rebase=interactive' '
+ write_script "$TRASH_DIRECTORY/fake-editor" <<-\EOF &&
+ echo I was here >fake.out &&
+ false
+ EOF
+ test_set_editor "$TRASH_DIRECTORY/fake-editor" &&
+ test_must_fail git pull --rebase=interactive . copy &&
+ test "I was here" = "$(cat fake.out)"
+'
+
test_expect_success 'pull.rebase=invalid fails' '
git reset --hard before-preserve-rebase &&
test_config pull.rebase invalid &&
# git rev-parse --show-cdup printed a path relative to
# clone-repo/subdir/, not subdir-link/. Git rev-parse --show-cdup
# used the correct .git, but when the git pull shell script did
-# "cd `git rev-parse --show-cdup`", it ended up in the wrong
+# "cd $(git rev-parse --show-cdup)", it ended up in the wrong
# directory. A POSIX shell's "cd" works a little differently
# than chdir() in C; "cd -P" is much closer to chdir().
#
git add subfile &&
git commit -m new subfile &&
head2=$(git rev-parse --short HEAD) &&
- echo "From $pwd/submodule" > ../expect.err &&
+ echo "Fetching submodule submodule" > ../expect.err &&
+ echo "From $pwd/submodule" >> ../expect.err &&
echo " $head1..$head2 master -> origin/master" >> ../expect.err
) &&
(
git add deepsubfile &&
git commit -m new deepsubfile &&
head2=$(git rev-parse --short HEAD) &&
+ echo "Fetching submodule submodule/subdir/deepsubmodule" >> ../expect.err
echo "From $pwd/deepsubmodule" >> ../expect.err &&
echo " $head1..$head2 master -> origin/master" >> ../expect.err
)
(
cd downstream &&
git submodule update --init --recursive
- ) &&
- echo "Fetching submodule submodule" > expect.out &&
- echo "Fetching submodule submodule/subdir/deepsubmodule" >> expect.out
+ )
'
test_expect_success "fetch --recurse-submodules recurses into submodules" '
cd downstream &&
git fetch --recurse-submodules >../actual.out 2>../actual.err
) &&
- test_i18ncmp expect.out actual.out &&
+ test_must_be_empty actual.out &&
test_i18ncmp expect.err actual.err
'
+test_expect_success "fetch --recurse-submodules -j2 has the same output behaviour" '
+ add_upstream_commit &&
+ (
+ cd downstream &&
+ GIT_TRACE=$(pwd)/../trace.out git fetch --recurse-submodules -j2 2>../actual.err
+ ) &&
+ test_must_be_empty actual.out &&
+ test_i18ncmp expect.err actual.err &&
+ grep "2 tasks" trace.out
+'
+
test_expect_success "fetch alone only fetches superproject" '
add_upstream_commit &&
(
git config -f .gitmodules submodule.submodule.fetchRecurseSubmodules true &&
git fetch >../actual.out 2>../actual.err
) &&
- test_i18ncmp expect.out actual.out &&
+ test_must_be_empty actual.out &&
test_i18ncmp expect.err actual.err
'
git config --unset -f .gitmodules submodule.submodule.fetchRecurseSubmodules &&
git config --unset submodule.submodule.fetchRecurseSubmodules
) &&
- test_i18ncmp expect.out actual.out &&
+ test_must_be_empty actual.out &&
test_i18ncmp expect.err actual.err
'
! test -s actual.err
'
+test_expect_success "--quiet propagates to parallel submodules" '
+ (
+ cd downstream &&
+ git fetch --recurse-submodules -j 2 --quiet >../actual.out 2>../actual.err
+ ) &&
+ ! test -s actual.out &&
+ ! test -s actual.err
+'
+
test_expect_success "--dry-run propagates to submodules" '
add_upstream_commit &&
(
cd downstream &&
git fetch --recurse-submodules --dry-run >../actual.out 2>../actual.err
) &&
- test_i18ncmp expect.out actual.out &&
+ test_must_be_empty actual.out &&
test_i18ncmp expect.err actual.err
'
cd downstream &&
git fetch --recurse-submodules >../actual.out 2>../actual.err
) &&
- test_i18ncmp expect.out actual.out &&
+ test_must_be_empty actual.out &&
test_i18ncmp expect.err actual.err
'
git config fetch.recurseSubmodules true
git fetch >../actual.out 2>../actual.err
) &&
- test_i18ncmp expect.out actual.out &&
+ test_must_be_empty actual.out &&
test_i18ncmp expect.err actual.err
'
) &&
git fetch --recurse-submodules >../actual.out 2>../actual.err
) &&
- test_i18ncmp expect.out actual.out &&
+ test_must_be_empty actual.out &&
test_i18ncmp expect.err actual.err
'
git add submodule &&
git commit -m "new submodule" &&
head2=$(git rev-parse --short HEAD) &&
- echo "Fetching submodule submodule" > expect.out.sub &&
echo "From $pwd/." > expect.err.sub &&
echo " $head1..$head2 master -> origin/master" >>expect.err.sub &&
- head -2 expect.err >> expect.err.sub &&
+ head -3 expect.err >> expect.err.sub &&
(
cd downstream &&
git fetch >../actual.out 2>../actual.err
) &&
test_i18ncmp expect.err.sub actual.err &&
- test_i18ncmp expect.out.sub actual.out
+ test_must_be_empty actual.out
'
test_expect_success "Recursion doesn't happen when new superproject commits don't change any submodules" '
)
) &&
test_i18ncmp expect.err.sub actual.err &&
- test_i18ncmp expect.out actual.out
+ test_must_be_empty actual.out
'
test_expect_success "Recursion picks up all submodules when necessary" '
git add subdir/deepsubmodule &&
git commit -m "new deepsubmodule"
head2=$(git rev-parse --short HEAD) &&
- echo "From $pwd/submodule" > ../expect.err.sub &&
+ echo "Fetching submodule submodule" > ../expect.err.sub &&
+ echo "From $pwd/submodule" >> ../expect.err.sub &&
echo " $head1..$head2 master -> origin/master" >> ../expect.err.sub
) &&
head1=$(git rev-parse --short HEAD) &&
echo "From $pwd/." > expect.err.2 &&
echo " $head1..$head2 master -> origin/master" >> expect.err.2 &&
cat expect.err.sub >> expect.err.2 &&
- tail -2 expect.err >> expect.err.2 &&
+ tail -3 expect.err >> expect.err.2 &&
(
cd downstream &&
git fetch >../actual.out 2>../actual.err
) &&
test_i18ncmp expect.err.2 actual.err &&
- test_i18ncmp expect.out actual.out
+ test_must_be_empty actual.out
'
test_expect_success "'--recurse-submodules=on-demand' doesn't recurse when no new commits are fetched in the superproject (and ignores config)" '
git add subdir/deepsubmodule &&
git commit -m "new deepsubmodule" &&
head2=$(git rev-parse --short HEAD) &&
- echo "From $pwd/submodule" > ../expect.err.sub &&
+ echo Fetching submodule submodule > ../expect.err.sub &&
+ echo "From $pwd/submodule" >> ../expect.err.sub &&
echo " $head1..$head2 master -> origin/master" >> ../expect.err.sub
) &&
(
git add submodule &&
git commit -m "new submodule" &&
head2=$(git rev-parse --short HEAD) &&
- tail -2 expect.err > expect.err.deepsub &&
+ tail -3 expect.err > expect.err.deepsub &&
echo "From $pwd/." > expect.err &&
echo " $head1..$head2 master -> origin/master" >>expect.err &&
cat expect.err.sub >> expect.err &&
git config --unset -f .gitmodules submodule.subdir/deepsubmodule.fetchRecursive
)
) &&
- test_i18ncmp expect.out actual.out &&
+ test_must_be_empty actual.out &&
test_i18ncmp expect.err actual.err
'
head2=$(git rev-parse --short HEAD) &&
echo "From $pwd/." > expect.err.2 &&
echo " $head1..$head2 master -> origin/master" >>expect.err.2 &&
- head -2 expect.err >> expect.err.2 &&
+ head -3 expect.err >> expect.err.2 &&
(
cd downstream &&
git config fetch.recurseSubmodules on-demand &&
cd downstream &&
git config --unset fetch.recurseSubmodules
) &&
- test_i18ncmp expect.out.sub actual.out &&
+ test_must_be_empty actual.out &&
test_i18ncmp expect.err.2 actual.err
'
head2=$(git rev-parse --short HEAD) &&
echo "From $pwd/." > expect.err.2 &&
echo " $head1..$head2 master -> origin/master" >>expect.err.2 &&
- head -2 expect.err >> expect.err.2 &&
+ head -3 expect.err >> expect.err.2 &&
(
cd downstream &&
git config submodule.submodule.fetchRecurseSubmodules on-demand &&
cd downstream &&
git config --unset submodule.submodule.fetchRecurseSubmodules
) &&
- test_i18ncmp expect.out.sub actual.out &&
+ test_must_be_empty actual.out &&
test_i18ncmp expect.err.2 actual.err
'
. ./test-lib.sh
-D=`pwd`
+D=$(pwd)
corrupt_repo () {
object_sha1=$(git rev-parse "$1") &&
cat >proxy <<'EOF'
#!/bin/sh
echo >&2 "proxying for $*"
-cmd=`"$PERL_PATH" -e '
+cmd=$("$PERL_PATH" -e '
read(STDIN, $buf, 4);
my $n = hex($buf) - 4;
read(STDIN, $buf, $n);
# drop absolute-path on repo name
$cmd =~ s{ /}{ };
print $cmd;
-'`
+')
echo >&2 "Running '$cmd'"
exec $cmd
EOF
test_expect_success 'fetch something upstream has but hidden by clients shallow boundaries' '
# the blob "1" is available in .git but hidden by the
# shallow2/.git/shallow and it should be resent
- ! git --git-dir=shallow2/.git cat-file blob `echo 1|git hash-object --stdin` >/dev/null &&
+ ! git --git-dir=shallow2/.git cat-file blob $(echo 1|git hash-object --stdin) >/dev/null &&
echo 1 >1.t &&
git add 1.t &&
git commit -m add-1-back &&
EOF
test_cmp expect actual
) &&
- git --git-dir=shallow2/.git cat-file blob `echo 1|git hash-object --stdin` >/dev/null
+ git --git-dir=shallow2/.git cat-file blob $(echo 1|git hash-object --stdin) >/dev/null
'
'
test_expect_success 'push from full to shallow' '
- ! git --git-dir=shallow2/.git cat-file blob `echo 1|git hash-object --stdin` &&
+ ! git --git-dir=shallow2/.git cat-file blob $(echo 1|git hash-object --stdin) &&
commit 1 &&
git push shallow2/.git +master:refs/remotes/top/master &&
(
3
EOF
test_cmp expect actual &&
- git cat-file blob `echo 1|git hash-object --stdin` >/dev/null
+ git cat-file blob $(echo 1|git hash-object --stdin) >/dev/null
)
'
test_done
test_expect_success 'fetch notices corrupt pack' '
cp -R "$HTTPD_DOCUMENT_ROOT_PATH"/repo_pack.git "$HTTPD_DOCUMENT_ROOT_PATH"/repo_bad1.git &&
(cd "$HTTPD_DOCUMENT_ROOT_PATH"/repo_bad1.git &&
- p=`ls objects/pack/pack-*.pack` &&
+ p=$(ls objects/pack/pack-*.pack) &&
chmod u+w $p &&
printf %0256d 0 | dd of=$p bs=256 count=1 seek=1 conv=notrunc
) &&
(cd repo_bad1.git &&
git --bare init &&
test_must_fail git --bare fetch $HTTPD_URL/dumb/repo_bad1.git &&
- test 0 = `ls objects/pack/pack-*.pack | wc -l`
+ test 0 = $(ls objects/pack/pack-*.pack | wc -l)
)
'
test_expect_success 'fetch notices corrupt idx' '
cp -R "$HTTPD_DOCUMENT_ROOT_PATH"/repo_pack.git "$HTTPD_DOCUMENT_ROOT_PATH"/repo_bad2.git &&
(cd "$HTTPD_DOCUMENT_ROOT_PATH"/repo_bad2.git &&
- p=`ls objects/pack/pack-*.idx` &&
+ p=$(ls objects/pack/pack-*.idx) &&
chmod u+w $p &&
printf %0256d 0 | dd of=$p bs=256 count=1 seek=1 conv=notrunc
) &&
(cd repo_bad2.git &&
git --bare init &&
test_must_fail git --bare fetch $HTTPD_URL/dumb/repo_bad2.git &&
- test 0 = `ls objects/pack | wc -l`
+ test 0 = $(ls objects/pack | wc -l)
)
'
. "$TEST_DIRECTORY"/lib-git-daemon.sh
start_git_daemon
+check_verbose_connect () {
+ grep -F "Looking up 127.0.0.1 ..." stderr &&
+ grep -F "Connecting to 127.0.0.1 (port " stderr &&
+ grep -F "done." stderr
+}
+
test_expect_success 'setup repository' '
git config push.default matching &&
echo content >file &&
'
test_expect_success 'clone git repository' '
- git clone "$GIT_DAEMON_URL/repo.git" clone &&
+ git clone -v "$GIT_DAEMON_URL/repo.git" clone 2>stderr &&
+ check_verbose_connect &&
test_cmp file clone/file
'
echo content >>file &&
git commit -a -m two &&
git push public &&
- (cd clone && git pull) &&
+ (cd clone && git pull -v) 2>stderr &&
+ check_verbose_connect &&
test_cmp file clone/file
'
+test_expect_success 'no-op fetch -v stderr is as expected' '
+ (cd clone && git fetch -v) 2>stderr &&
+ check_verbose_connect
+'
+
+test_expect_success 'no-op fetch without "-v" is quiet' '
+ (cd clone && git fetch) 2>stderr &&
+ ! test -s stderr
+'
+
test_expect_success 'remote detects correct HEAD' '
git push public master:other &&
(cd clone &&
test_expect_success 'fetch notices corrupt pack' '
cp -R "$GIT_DAEMON_DOCUMENT_ROOT_PATH"/repo_pack.git "$GIT_DAEMON_DOCUMENT_ROOT_PATH"/repo_bad1.git &&
(cd "$GIT_DAEMON_DOCUMENT_ROOT_PATH"/repo_bad1.git &&
- p=`ls objects/pack/pack-*.pack` &&
+ p=$(ls objects/pack/pack-*.pack) &&
chmod u+w $p &&
printf %0256d 0 | dd of=$p bs=256 count=1 seek=1 conv=notrunc
) &&
(cd repo_bad1.git &&
git --bare init &&
test_must_fail git --bare fetch "$GIT_DAEMON_URL/repo_bad1.git" &&
- test 0 = `ls objects/pack/pack-*.pack | wc -l`
+ test 0 = $(ls objects/pack/pack-*.pack | wc -l)
)
'
test_expect_success 'fetch notices corrupt idx' '
cp -R "$GIT_DAEMON_DOCUMENT_ROOT_PATH"/repo_pack.git "$GIT_DAEMON_DOCUMENT_ROOT_PATH"/repo_bad2.git &&
(cd "$GIT_DAEMON_DOCUMENT_ROOT_PATH"/repo_bad2.git &&
- p=`ls objects/pack/pack-*.idx` &&
+ p=$(ls objects/pack/pack-*.idx) &&
chmod u+w $p &&
printf %0256d 0 | dd of=$p bs=256 count=1 seek=1 conv=notrunc
) &&
(cd repo_bad2.git &&
git --bare init &&
test_must_fail git --bare fetch "$GIT_DAEMON_URL/repo_bad2.git" &&
- test 0 = `ls objects/pack | wc -l`
+ test 0 = $(ls objects/pack | wc -l)
)
'
. ./test-lib.sh
+X=
+test_have_prereq !MINGW || X=.exe
+
test_expect_success setup '
rm -fr .git &&
'
+test_expect_success 'clone from hooks' '
+
+ test_create_repo r0 &&
+ cd r0 &&
+ test_commit initial &&
+ cd .. &&
+ git init r1 &&
+ cd r1 &&
+ cat >.git/hooks/pre-commit <<-\EOF &&
+ #!/bin/sh
+ git clone ../r0 ../r2
+ exit 1
+ EOF
+ chmod u+x .git/hooks/pre-commit &&
+ : >file &&
+ git add file &&
+ test_must_fail git commit -m invoke-hook &&
+ cd .. &&
+ test_cmp r0/.git/HEAD r2/.git/HEAD &&
+ test_cmp r0/initial.t r2/initial.t
+
+'
+
test_expect_success 'clone creates intermediate directories' '
git clone src long/path/to/dst &&
'
test_expect_success 'clone separate gitdir: output' '
- echo "gitdir: `pwd`/realgitdir" >expected &&
+ echo "gitdir: $(pwd)/realgitdir" >expected &&
test_cmp expected dst/.git
'
setup_ssh_wrapper () {
test_expect_success 'setup ssh wrapper' '
- write_script "$TRASH_DIRECTORY/ssh-wrapper" <<-\EOF &&
- echo >>"$TRASH_DIRECTORY/ssh-output" "ssh: $*" &&
- # throw away all but the last argument, which should be the
- # command
- while test $# -gt 1; do shift; done
- eval "$1"
- EOF
- GIT_SSH="$TRASH_DIRECTORY/ssh-wrapper" &&
+ cp "$GIT_BUILD_DIR/test-fake-ssh$X" \
+ "$TRASH_DIRECTORY/ssh-wrapper$X" &&
+ GIT_SSH="$TRASH_DIRECTORY/ssh-wrapper$X" &&
export GIT_SSH &&
export TRASH_DIRECTORY &&
>"$TRASH_DIRECTORY"/ssh-output
}
copy_ssh_wrapper_as () {
- cp "$TRASH_DIRECTORY/ssh-wrapper" "$1" &&
- GIT_SSH="$1" &&
+ cp "$TRASH_DIRECTORY/ssh-wrapper$X" "${1%$X}$X" &&
+ GIT_SSH="${1%$X}$X" &&
export GIT_SSH
}
test_description='test clone --reference'
. ./test-lib.sh
-base_dir=`pwd`
+base_dir=$(pwd)
U=$base_dir/UPLOAD_LOG
test_line_count = 0 fsck.log
}
-base_dir=`pwd`
+base_dir=$(pwd)
test_expect_success 'preparing first repository' \
'test_create_repo A && cd A &&
make_bare() {
git init --bare "$1" &&
(cd "$1" &&
- tree=`git hash-object -w -t tree /dev/null` &&
+ tree=$(git hash-object -w -t tree /dev/null) &&
commit=$(echo "$1" | git commit-tree $tree) &&
git update-ref HEAD $commit
)
echo >subdir/fileB fileB &&
git add fileA subdir/fileB &&
git commit -a -m "Initial in one history." &&
- A0=`git rev-parse --verify HEAD` &&
+ A0=$(git rev-parse --verify HEAD) &&
echo >fileA fileA modified &&
git commit -a -m "Second in one history." &&
- A1=`git rev-parse --verify HEAD` &&
+ A1=$(git rev-parse --verify HEAD) &&
echo >subdir/fileB fileB modified &&
git commit -a -m "Third in one history." &&
- A2=`git rev-parse --verify HEAD` &&
+ A2=$(git rev-parse --verify HEAD) &&
rm -f .git/refs/heads/master .git/index &&
echo >subdir/fileB fileB again &&
git add fileA subdir/fileB &&
git commit -a -m "Initial in alternate history." &&
- B0=`git rev-parse --verify HEAD` &&
+ B0=$(git rev-parse --verify HEAD) &&
echo >fileA fileA modified in alternate history &&
git commit -a -m "Second in alternate history." &&
- B1=`git rev-parse --verify HEAD` &&
+ B1=$(git rev-parse --verify HEAD) &&
echo >subdir/fileB fileB modified in alternate history &&
git commit -a -m "Third in alternate history." &&
- B2=`git rev-parse --verify HEAD` &&
+ B2=$(git rev-parse --verify HEAD) &&
: done
'
# Test if bisection size is close to half of list size within
# tolerance.
#
- _bisect_err=`expr $_list_size - $_bisection_size \* 2`
- test "$_bisect_err" -lt 0 && _bisect_err=`expr 0 - $_bisect_err`
- _bisect_err=`expr $_bisect_err / 2` ; # floor
+ _bisect_err=$(expr $_list_size - $_bisection_size \* 2)
+ test "$_bisect_err" -lt 0 && _bisect_err=$(expr 0 - $_bisect_err)
+ _bisect_err=$(expr $_bisect_err / 2) ; # floor
test_expect_success \
"bisection diff $_bisect_option $_head $* <= $_max_diff" \
test_expect_success 'set up --show-all --parents test' '
test_commit one foo.txt &&
- commit1=`git rev-list -1 HEAD` &&
+ commit1=$(git rev-list -1 HEAD) &&
test_commit two bar.txt &&
- commit2=`git rev-list -1 HEAD` &&
+ commit2=$(git rev-list -1 HEAD) &&
test_commit three foo.txt &&
- commit3=`git rev-list -1 HEAD`
+ commit3=$(git rev-list -1 HEAD)
'
test_expect_success '--parents rewrites TREESAME parents correctly' '
make_text() {
echo $1: $2
- for i in `count 20`; do
+ for i in $(count 20); do
echo $1: $i
done
echo $1: $3
test_expect_success 'setup' '
for p in file sub/file sub/sub/file sub/file2 sub/sub/sub/file sub2/file; do
if echo $p | grep /; then
- mkdir -p `dirname $p`
+ mkdir -p $(dirname $p)
fi &&
: >$p &&
git add $p &&
--- /dev/null
+#!/bin/sh
+
+test_description='test dwim of revs versus pathspecs in revision parser'
+. ./test-lib.sh
+
+test_expect_success 'setup' '
+ test_commit base &&
+ echo content >"br[ack]ets" &&
+ git add . &&
+ test_tick &&
+ git commit -m brackets
+'
+
+test_expect_success 'non-rev wildcard dwims to pathspec' '
+ git log -- "*.t" >expect &&
+ git log "*.t" >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'tree:path with metacharacters dwims to rev' '
+ git show "HEAD:br[ack]ets" -- >expect &&
+ git show "HEAD:br[ack]ets" >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success '^{foo} with metacharacters dwims to rev' '
+ git log "HEAD^{/b.*}" -- >expect &&
+ git log "HEAD^{/b.*}" >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success '@{foo} with metacharacters dwims to rev' '
+ git log "HEAD@{now [or thereabouts]}" -- >expect &&
+ git log "HEAD@{now [or thereabouts]}" >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success ':/*.t from a subdir dwims to a pathspec' '
+ mkdir subdir &&
+ (
+ cd subdir &&
+ git log -- ":/*.t" >expect &&
+ git log ":/*.t" >actual &&
+ test_cmp expect actual
+ )
+'
+
+test_done
test_cmp expect actual
'
+cat >expect <<-\EOF
+| refname is refs/heads/master |refs/heads/master
+| refname is refs/heads/side |refs/heads/side
+| refname is refs/odd/spot |refs/odd/spot
+| refname is refs/tags/double-tag |refs/tags/double-tag
+| refname is refs/tags/four |refs/tags/four
+| refname is refs/tags/one |refs/tags/one
+| refname is refs/tags/signed-tag |refs/tags/signed-tag
+| refname is refs/tags/three |refs/tags/three
+| refname is refs/tags/two |refs/tags/two
+EOF
+
+test_align_permutations() {
+ while read -r option
+ do
+ test_expect_success "align:$option" '
+ git for-each-ref --format="|%(align:$option)refname is %(refname)%(end)|%(refname)" >actual &&
+ test_cmp expect actual
+ '
+ done
+}
+
+test_align_permutations <<-\EOF
+ middle,42
+ 42,middle
+ position=middle,42
+ 42,position=middle
+ middle,width=42
+ width=42,middle
+ position=middle,width=42
+ width=42,position=middle
+EOF
+
+# Last one wins (silently) when multiple arguments of the same type are given
+
+test_align_permutations <<-\EOF
+ 32,width=42,middle
+ width=30,42,middle
+ width=42,position=right,middle
+ 42,right,position=middle
+EOF
+
# Individual atoms inside %(align:...) and %(end) must not be quoted.
test_expect_success 'alignment with format quote' "
test_expect_success \
'adding another file' \
- 'cp "$TEST_DIRECTORY"/../README path0/README &&
+ 'cp "$TEST_DIRECTORY"/../README.md path0/README &&
git add path0/README &&
git commit -m add2 -a'
echo b > partA/outline.txt &&
echo c > papers/unsorted/_another &&
git add papers partA &&
- T1=`git write-tree` &&
+ T1=$(git write-tree) &&
git mv papers/unsorted/Thesis.pdf papers/all-papers/moo-blah.pdf &&
- T=`git write-tree` &&
+ T=$(git write-tree) &&
git ls-tree -r $T | verbose grep partA/outline.txt
'
git branch original HEAD
'
-orig_head=`git show-ref --hash --head HEAD`
+orig_head=$(git show-ref --hash --head HEAD)
test_expect_success 'rewrite submodule with another content' '
git filter-branch --tree-filter "test -d submod && {
mkdir submod &&
: > submod/file
} || :" HEAD &&
- test $orig_head != `git show-ref --hash --head HEAD`
+ test $orig_head != $(git show-ref --hash --head HEAD)
'
test_expect_success 'replace submodule revision' '
"if git ls-files --error-unmatch -- submod > /dev/null 2>&1
then git update-index --cacheinfo 160000 0123456789012345678901234567890123456789 submod
fi" HEAD &&
- test $orig_head != `git show-ref --hash --head HEAD`
+ test $orig_head != $(git show-ref --hash --head HEAD)
'
test_expect_success 'filter commit message without trailing newline' '
'
test_expect_success 'listing all tags in an empty tree should output nothing' '
- test `git tag -l | wc -l` -eq 0 &&
- test `git tag | wc -l` -eq 0
+ test $(git tag -l | wc -l) -eq 0 &&
+ test $(git tag | wc -l) -eq 0
'
test_expect_success 'looking for a tag in an empty tree should fail' \
'
test_expect_success 'listing all tags if one exists should output that tag' '
- test `git tag -l` = mytag &&
- test `git tag` = mytag
+ test $(git tag -l) = mytag &&
+ test $(git tag) = mytag
'
# pattern matching:
test_expect_success \
'listing a tag using a matching pattern should output that tag' \
- 'test `git tag -l mytag` = mytag'
+ 'test $(git tag -l mytag) = mytag'
# todo: git tag -l now returns always zero, when fixed, change this test
test_expect_success \
test_expect_success \
'listing tags using a non-matching pattern should output nothing' \
- 'test `git tag -l xxx | wc -l` -eq 0'
+ 'test $(git tag -l xxx | wc -l) -eq 0'
# special cases for creating tags:
test_expect_success \
'trying to create a tag with a non-valid name should fail' '
- test `git tag -l | wc -l` -eq 1 &&
+ test $(git tag -l | wc -l) -eq 1 &&
test_must_fail git tag "" &&
test_must_fail git tag .othertag &&
test_must_fail git tag "other tag" &&
test_must_fail git tag "othertag^" &&
test_must_fail git tag "other~tag" &&
- test `git tag -l | wc -l` -eq 1
+ test $(git tag -l | wc -l) -eq 1
'
test_expect_success 'creating a tag using HEAD directly should succeed' '
echo "foo:initial" >expect &&
>actual &&
test_config pager.external "sed s/^/foo:/ >actual" &&
- test_terminal git --exec-path="`pwd`" external log --format=%s -1 &&
+ test_terminal git --exec-path="$(pwd)" external log --format=%s -1 &&
test_cmp expect actual
'
test_cmp expect actual &&
echo "b diff" >.gitattributes &&
echo "b:binQary" >expect &&
- git grep bin b | nul_to_q >actual &&
+ git grep bin b >actual.raw &&
+ nul_to_q <actual.raw >actual &&
test_cmp expect actual
'
sleep 1
}
-# It's fine if git update-index returns an error code other than one,
-# it'll be caught in the first test.
test_lazy_prereq UNTRACKED_CACHE '
- { git update-index --untracked-cache; ret=$?; } &&
+ { git update-index --test-untracked-cache; ret=$?; } &&
test $ret -ne 1
'
test_done
fi
+test_expect_success 'core.untrackedCache is unset' '
+ test_must_fail git config --get core.untrackedCache
+'
+
test_expect_success 'setup' '
git init worktree &&
cd worktree &&
test_expect_success 'untracked cache is empty' '
test-dump-untracked-cache >../actual &&
- cat >../expect <<EOF &&
+ cat >../expect-empty <<EOF &&
info/exclude 0000000000000000000000000000000000000000
core.excludesfile 0000000000000000000000000000000000000000
exclude_per_dir .gitignore
flags 00000006
EOF
- test_cmp ../expect ../actual
+ test_cmp ../expect-empty ../actual
'
cat >../status.expect <<EOF &&
test_expect_success 'verify untracked cache dump (sparse/subdirs)' '
test-dump-untracked-cache >../actual &&
- cat >../expect <<EOF &&
+ cat >../expect-from-test-dump <<EOF &&
info/exclude 13263c0978fb9fad16b2d580fb800b6d811c3ff0
core.excludesfile 0000000000000000000000000000000000000000
exclude_per_dir .gitignore
/dtwo/ 0000000000000000000000000000000000000000 recurse check_only valid
two
EOF
- test_cmp ../expect ../actual
+ test_cmp ../expect-from-test-dump ../actual
'
test_expect_success 'test sparse status again with untracked cache and subdir' '
test_cmp ../status.expect ../status.actual
'
+test_expect_success '--no-untracked-cache removes the cache' '
+ git update-index --no-untracked-cache &&
+ test-dump-untracked-cache >../actual &&
+ echo "no untracked cache" >../expect-no-uc &&
+ test_cmp ../expect-no-uc ../actual
+'
+
+test_expect_success 'git status does not change anything' '
+ git status &&
+ test-dump-untracked-cache >../actual &&
+ test_cmp ../expect-no-uc ../actual
+'
+
+test_expect_success 'setting core.untrackedCache to true and using git status creates the cache' '
+ git config core.untrackedCache true &&
+ test-dump-untracked-cache >../actual &&
+ test_cmp ../expect-no-uc ../actual &&
+ git status &&
+ test-dump-untracked-cache >../actual &&
+ test_cmp ../expect-from-test-dump ../actual
+'
+
+test_expect_success 'using --no-untracked-cache does not fail when core.untrackedCache is true' '
+ git update-index --no-untracked-cache &&
+ test-dump-untracked-cache >../actual &&
+ test_cmp ../expect-no-uc ../actual &&
+ git update-index --untracked-cache &&
+ test-dump-untracked-cache >../actual &&
+ test_cmp ../expect-empty ../actual
+'
+
+test_expect_success 'setting core.untrackedCache to false and using git status removes the cache' '
+ git config core.untrackedCache false &&
+ test-dump-untracked-cache >../actual &&
+ test_cmp ../expect-empty ../actual &&
+ git status &&
+ test-dump-untracked-cache >../actual &&
+ test_cmp ../expect-no-uc ../actual
+'
+
+test_expect_success 'using --untracked-cache does not fail when core.untrackedCache is false' '
+ git update-index --untracked-cache &&
+ test-dump-untracked-cache >../actual &&
+ test_cmp ../expect-empty ../actual
+'
+
+test_expect_success 'setting core.untrackedCache to keep' '
+ git config core.untrackedCache keep &&
+ git update-index --untracked-cache &&
+ test-dump-untracked-cache >../actual &&
+ test_cmp ../expect-empty ../actual &&
+ git status &&
+ test-dump-untracked-cache >../actual &&
+ test_cmp ../expect-from-test-dump ../actual &&
+ git update-index --no-untracked-cache &&
+ test-dump-untracked-cache >../actual &&
+ test_cmp ../expect-no-uc ../actual &&
+ git update-index --force-untracked-cache &&
+ test-dump-untracked-cache >../actual &&
+ test_cmp ../expect-empty ../actual &&
+ git status &&
+ test-dump-untracked-cache >../actual &&
+ test_cmp ../expect-from-test-dump ../actual
+'
+
+test_expect_success 'test ident field is working' '
+ mkdir ../other_worktree &&
+ cp -R done dthree dtwo four three ../other_worktree &&
+ GIT_WORK_TREE=../other_worktree git status 2>../err &&
+ echo "warning: Untracked cache is disabled on this system or location." >../expect &&
+ test_cmp ../expect ../err
+'
+
test_done
test_expect_success '"soft" reset is allowed in bare' '
git reset --soft HEAD^ &&
- test "`git show --pretty=format:%s | head -n 1`" = "one"
+ test "$(git show --pretty=format:%s | head -n 1)" = "one"
'
test_done
)
'
+test_expect_success 'submodule helper list is not confused by common prefixes' '
+ mkdir -p dir1/b &&
+ (
+ cd dir1/b &&
+ git init &&
+ echo hi >testfile2 &&
+ git add . &&
+ git commit -m "test1"
+ ) &&
+ mkdir -p dir2/b &&
+ (
+ cd dir2/b &&
+ git init &&
+ echo hello >testfile1 &&
+ git add . &&
+ git commit -m "test2"
+ ) &&
+ git submodule add /dir1/b dir1/b &&
+ git submodule add /dir2/b dir2/b &&
+ git commit -m "first submodule commit" &&
+ git submodule--helper list dir1/b |cut -c51- >actual &&
+ echo "dir1/b" >expect &&
+ test_cmp expect actual
+'
+
test_done
compare_head()
{
- sha_master=`git rev-list --max-count=1 master`
- sha_head=`git rev-list --max-count=1 HEAD`
+ sha_master=$(git rev-list --max-count=1 master)
+ sha_head=$(git rev-list --max-count=1 HEAD)
test "$sha_master" = "$sha_head"
}
test_description='test clone --reference'
. ./test-lib.sh
-base_dir=`pwd`
+base_dir=$(pwd)
U=$base_dir/UPLOAD_LOG
--- /dev/null
+#!/bin/sh
+#
+# Copyright (c) 2012 Daniel Graña
+#
+
+test_description='Test submodules on detached working tree
+
+This test verifies that "git submodule" initialization, update and addition works
+on detahced working trees
+'
+
+TEST_NO_CREATE_REPO=1
+. ./test-lib.sh
+
+test_expect_success 'submodule on detached working tree' '
+ git init --bare remote &&
+ test_create_repo bundle1 &&
+ (
+ cd bundle1 &&
+ test_commit "shoot" &&
+ git rev-parse --verify HEAD >../expect
+ ) &&
+ mkdir home &&
+ (
+ cd home &&
+ GIT_WORK_TREE="$(pwd)" &&
+ GIT_DIR="$(pwd)/.dotfiles" &&
+ export GIT_WORK_TREE GIT_DIR &&
+ git clone --bare ../remote .dotfiles &&
+ git submodule add ../bundle1 .vim/bundle/sogood &&
+ test_commit "sogood" &&
+ (
+ unset GIT_WORK_TREE GIT_DIR &&
+ cd .vim/bundle/sogood &&
+ git rev-parse --verify HEAD >actual &&
+ test_cmp ../../../../expect actual
+ ) &&
+ git push origin master
+ ) &&
+ mkdir home2 &&
+ (
+ cd home2 &&
+ git clone --bare ../remote .dotfiles &&
+ GIT_WORK_TREE="$(pwd)" &&
+ GIT_DIR="$(pwd)/.dotfiles" &&
+ export GIT_WORK_TREE GIT_DIR &&
+ git checkout master &&
+ git submodule update --init &&
+ (
+ unset GIT_WORK_TREE GIT_DIR &&
+ cd .vim/bundle/sogood &&
+ git rev-parse --verify HEAD >actual &&
+ test_cmp ../../../../expect actual
+ )
+ )
+'
+
+test_expect_success 'submodule on detached working pointed by core.worktree' '
+ mkdir home3 &&
+ (
+ cd home3 &&
+ GIT_DIR="$(pwd)/.dotfiles" &&
+ export GIT_DIR &&
+ git clone --bare ../remote "$GIT_DIR" &&
+ git config core.bare false &&
+ git config core.worktree .. &&
+ git checkout master &&
+ git submodule add ../bundle1 .vim/bundle/dupe &&
+ test_commit "dupe" &&
+ git push origin master
+ ) &&
+ (
+ cd home &&
+ GIT_DIR="$(pwd)/.dotfiles" &&
+ export GIT_DIR &&
+ git config core.bare false &&
+ git config core.worktree .. &&
+ git pull &&
+ git submodule update --init &&
+ test -f .vim/bundle/dupe/shoot.t
+ )
+'
+
+test_done
+++ /dev/null
-#!/bin/sh
-#
-# Copyright (c) 2012 Daniel Graña
-#
-
-test_description='Test submodules on detached working tree
-
-This test verifies that "git submodule" initialization, update and addition works
-on detahced working trees
-'
-
-TEST_NO_CREATE_REPO=1
-. ./test-lib.sh
-
-test_expect_success 'submodule on detached working tree' '
- git init --bare remote &&
- test_create_repo bundle1 &&
- (
- cd bundle1 &&
- test_commit "shoot" &&
- git rev-parse --verify HEAD >../expect
- ) &&
- mkdir home &&
- (
- cd home &&
- GIT_WORK_TREE="$(pwd)" &&
- GIT_DIR="$(pwd)/.dotfiles" &&
- export GIT_WORK_TREE GIT_DIR &&
- git clone --bare ../remote .dotfiles &&
- git submodule add ../bundle1 .vim/bundle/sogood &&
- test_commit "sogood" &&
- (
- unset GIT_WORK_TREE GIT_DIR &&
- cd .vim/bundle/sogood &&
- git rev-parse --verify HEAD >actual &&
- test_cmp ../../../../expect actual
- ) &&
- git push origin master
- ) &&
- mkdir home2 &&
- (
- cd home2 &&
- git clone --bare ../remote .dotfiles &&
- GIT_WORK_TREE="$(pwd)" &&
- GIT_DIR="$(pwd)/.dotfiles" &&
- export GIT_WORK_TREE GIT_DIR &&
- git checkout master &&
- git submodule update --init &&
- (
- unset GIT_WORK_TREE GIT_DIR &&
- cd .vim/bundle/sogood &&
- git rev-parse --verify HEAD >actual &&
- test_cmp ../../../../expect actual
- )
- )
-'
-
-test_expect_success 'submodule on detached working pointed by core.worktree' '
- mkdir home3 &&
- (
- cd home3 &&
- GIT_DIR="$(pwd)/.dotfiles" &&
- export GIT_DIR &&
- git clone --bare ../remote "$GIT_DIR" &&
- git config core.bare false &&
- git config core.worktree .. &&
- git checkout master &&
- git submodule add ../bundle1 .vim/bundle/dupe &&
- test_commit "dupe" &&
- git push origin master
- ) &&
- (
- cd home &&
- GIT_DIR="$(pwd)/.dotfiles" &&
- export GIT_DIR &&
- git config core.bare false &&
- git config core.worktree .. &&
- git pull &&
- git submodule update --init &&
- test -f .vim/bundle/dupe/shoot.t
- )
-'
-
-test_done
chmod +x "$HOOK"
commit_msg_is () {
- test "`git log --pretty=format:%s%b -1`" = "$1"
+ test "$(git log --pretty=format:%s%b -1)" = "$1"
}
test_expect_success 'hook edits commit message' '
echo "more" >> file &&
git add file &&
git commit -m "more" &&
- test "`git log -1 --pretty=format:%s`" = "message (no editor)"
+ test "$(git log -1 --pretty=format:%s)" = "message (no editor)"
'
echo "more" >> file &&
git add file &&
GIT_EDITOR="\"\$FAKE_EDITOR\"" git commit -e -m "more more" &&
- test "`git log -1 --pretty=format:%s`" = message
+ test "$(git log -1 --pretty=format:%s)" = message
'
echo "more" >> file &&
git add file &&
git commit -t "$(git rev-parse --git-dir)/template" &&
- test "`git log -1 --pretty=format:%s`" = template
+ test "$(git log -1 --pretty=format:%s)" = template
'
echo "more" >> file &&
git add file &&
(echo more | git commit -F -) &&
- test "`git log -1 --pretty=format:%s`" = "message (no editor)"
+ test "$(git log -1 --pretty=format:%s)" = "message (no editor)"
'
echo "more" >> file &&
git add file &&
(echo more more | GIT_EDITOR="\"\$FAKE_EDITOR\"" git commit -e -F -) &&
- test "`git log -1 --pretty=format:%s`" = message
+ test "$(git log -1 --pretty=format:%s)" = message
'
test_expect_success 'with hook (-C)' '
- head=`git rev-parse HEAD` &&
+ head=$(git rev-parse HEAD) &&
echo "more" >> file &&
git add file &&
git commit -C $head &&
- test "`git log -1 --pretty=format:%s`" = "$head (no editor)"
+ test "$(git log -1 --pretty=format:%s)" = "$head (no editor)"
'
echo "more more" >> file &&
git add file &&
GIT_EDITOR="\"\$FAKE_EDITOR\"" git commit &&
- test "`git log -1 --pretty=format:%s`" = default
+ test "$(git log -1 --pretty=format:%s)" = default
'
test_expect_success 'with hook (--amend)' '
- head=`git rev-parse HEAD` &&
+ head=$(git rev-parse HEAD) &&
echo "more" >> file &&
git add file &&
GIT_EDITOR="\"\$FAKE_EDITOR\"" git commit --amend &&
- test "`git log -1 --pretty=format:%s`" = "$head"
+ test "$(git log -1 --pretty=format:%s)" = "$head"
'
test_expect_success 'with hook (-c)' '
- head=`git rev-parse HEAD` &&
+ head=$(git rev-parse HEAD) &&
echo "more" >> file &&
git add file &&
GIT_EDITOR="\"\$FAKE_EDITOR\"" git commit -c $head &&
- test "`git log -1 --pretty=format:%s`" = "$head"
+ test "$(git log -1 --pretty=format:%s)" = "$head"
'
git commit -m other &&
git checkout - &&
git merge --no-ff other &&
- test "`git log -1 --pretty=format:%s`" = "merge (no editor)"
+ test "$(git log -1 --pretty=format:%s)" = "merge (no editor)"
'
test_expect_success 'with hook and editor (merge)' '
git commit -m other &&
git checkout - &&
env GIT_EDITOR="\"\$FAKE_EDITOR\"" git merge --no-ff -e other &&
- test "`git log -1 --pretty=format:%s`" = "merge"
+ test "$(git log -1 --pretty=format:%s)" = "merge"
'
cat > "$HOOK" <<'EOF'
test_expect_success 'with failing hook' '
test_when_finished "git checkout -f master" &&
- head=`git rev-parse HEAD` &&
+ head=$(git rev-parse HEAD) &&
echo "more" >> file &&
git add file &&
test_must_fail env GIT_EDITOR="\"\$FAKE_EDITOR\"" git commit -c $head
test_expect_success 'with failing hook (--no-verify)' '
test_when_finished "git checkout -f master" &&
- head=`git rev-parse HEAD` &&
+ head=$(git rev-parse HEAD) &&
echo "more" >> file &&
git add file &&
test_must_fail env GIT_EDITOR="\"\$FAKE_EDITOR\"" git commit --no-verify -c $head
test_cmp expected actual
'
+test_expect_success 'in-place editing with basic patch' '
+ cat basic_message >message &&
+ cat basic_patch >>message &&
+ cat basic_message >expected &&
+ echo >>expected &&
+ cat basic_patch >>expected &&
+ git interpret-trailers --in-place message &&
+ test_cmp expected message
+'
+
+test_expect_success 'in-place editing with additional trailer' '
+ cat basic_message >message &&
+ cat basic_patch >>message &&
+ cat basic_message >expected &&
+ echo >>expected &&
+ cat >>expected <<-\EOF &&
+ Reviewed-by: Alice
+ EOF
+ cat basic_patch >>expected &&
+ git interpret-trailers --trailer "Reviewed-by: Alice" --in-place message &&
+ test_cmp expected message
+'
+
+test_expect_success 'in-place editing on stdin disallowed' '
+ test_must_fail git interpret-trailers --trailer "Reviewed-by: Alice" --in-place < basic_message
+'
+
+test_expect_success 'in-place editing on non-existing file' '
+ test_must_fail git interpret-trailers --trailer "Reviewed-by: Alice" --in-place nonexisting &&
+ test_path_is_missing nonexisting
+'
+
+test_expect_success POSIXPERM,SANITY "in-place editing doesn't clobber original file on error" '
+ cat basic_message >message &&
+ chmod -r message &&
+ test_must_fail git interpret-trailers --trailer "Reviewed-by: Alice" --in-place message &&
+ chmod +r message &&
+ test_cmp message basic_message
+'
+
test_expect_success 'using "where = before"' '
git config trailer.bug.where "before" &&
cat complex_message_body >expected &&
--- /dev/null
+#!/bin/sh
+#
+# Copyright (c) 2016 Dan Aloni
+# Copyright (c) 2016 Jeff King
+#
+
+test_description='per-repo forced setting of email address'
+
+. ./test-lib.sh
+
+test_expect_success 'setup a likely user.useConfigOnly use case' '
+ # we want to make sure a reflog is written, since that needs
+ # a non-strict ident. So be sure we have an actual commit.
+ test_commit foo &&
+
+ sane_unset GIT_AUTHOR_NAME GIT_AUTHOR_EMAIL &&
+ sane_unset GIT_COMMITTER_NAME GIT_COMMITTER_EMAIL &&
+ git config user.name "test" &&
+ git config --global user.useConfigOnly true
+'
+
+test_expect_success 'fails committing if clone email is not set' '
+ test_must_fail git commit --allow-empty -m msg
+'
+
+test_expect_success 'fails committing if clone email is not set, but EMAIL set' '
+ test_must_fail env EMAIL=test@fail.com git commit --allow-empty -m msg
+'
+
+test_expect_success 'succeeds committing if clone email is set' '
+ test_config user.email "test@ok.com" &&
+ git commit --allow-empty -m msg
+'
+
+test_expect_success 'succeeds cloning if global email is not set' '
+ git clone . clone
+'
+
+test_done
git add c$i.c &&
git commit -m c$i &&
git tag c$i &&
- i=`expr $i + 1` || return 1
+ i=$(expr $i + 1) || return 1
done
'
while test $i -le 30
do
refs="$refs c$i"
- i=`expr $i + 1`
+ i=$(expr $i + 1)
done &&
git merge $refs &&
test "$(git rev-parse c1)" != "$(git rev-parse HEAD)" &&
while test $i -le 30
do
test "$(git rev-parse c$i)" = "$(git rev-parse HEAD^$i)" &&
- i=`expr $i + 1` || return 1
+ i=$(expr $i + 1) || return 1
done &&
git diff --exit-code &&
i=1 &&
while test $i -le 30
do
test -f c$i.c &&
- i=`expr $i + 1` || return 1
+ i=$(expr $i + 1) || return 1
done
'
test_expect_success 'loose objects in alternate ODB are not repacked' '
mkdir alt_objects &&
- echo `pwd`/alt_objects > .git/objects/info/alternates &&
+ echo $(pwd)/alt_objects > .git/objects/info/alternates &&
echo content3 > file3 &&
objsha1=$(GIT_OBJECT_DIRECTORY=alt_objects git hash-object -w file3) &&
git add file3 &&
'
test_expect_success 'local packed unreachable obs that exist in alternate ODB are not loosened' '
- echo `pwd`/alt_objects > .git/objects/info/alternates &&
+ echo $(pwd)/alt_objects > .git/objects/info/alternates &&
echo "$csha1" | git pack-objects --non-empty --all --reflog pack &&
rm -f .git/objects/pack/* &&
mv pack-* .git/objects/pack/ &&
test_expect_success PERL,SYMLINKS 'difftool --dir-diff --symlink without unstaged changes' '
cat >expect <<-EOF &&
file
- $(pwd)/file
+ $PWD/file
file2
- $(pwd)/file2
+ $PWD/file2
sub/sub
- $(pwd)/sub/sub
+ $PWD/sub/sub
EOF
git difftool --dir-diff --symlink \
--extcmd "./.git/CHECK_SYMLINKS" branch HEAD &&
run_dir_diff_test 'difftool --dir-diff syncs worktree with unstaged change' '
test_when_finished git reset --hard &&
echo "orig content" >file &&
- git difftool -d $symlinks --extcmd "$(pwd)/modify-right-file" branch &&
+ git difftool -d $symlinks --extcmd "$PWD/modify-right-file" branch &&
echo "new content" >expect &&
test_cmp expect file
'
run_dir_diff_test 'difftool --dir-diff syncs worktree without unstaged change' '
test_when_finished git reset --hard &&
- git difftool -d $symlinks --extcmd "$(pwd)/modify-right-file" branch &&
+ git difftool -d $symlinks --extcmd "$PWD/modify-right-file" branch &&
echo "new content" >expect &&
test_cmp expect file
'
test_expect_success PERL 'difftool --no-symlinks does not overwrite working tree file ' '
echo "orig content" >file &&
- git difftool --dir-diff --no-symlinks --extcmd "$(pwd)/modify-file" branch &&
+ git difftool --dir-diff --no-symlinks --extcmd "$PWD/modify-file" branch &&
echo "new content" >expect &&
test_cmp expect file
'
TMPDIR=$TRASH_DIRECTORY &&
export TMPDIR &&
echo "orig content" >file &&
- test_must_fail git difftool --dir-diff --no-symlinks --extcmd "$(pwd)/modify-both-files" branch &&
+ test_must_fail git difftool --dir-diff --no-symlinks --extcmd "$PWD/modify-both-files" branch &&
echo "wt content" >expect &&
test_cmp expect file &&
echo "tmp content" >expect &&
} >non/expect.full &&
echo file2:world >non/expect.sub &&
(
- GIT_CEILING_DIRECTORIES="$(pwd)/non/git" &&
+ GIT_CEILING_DIRECTORIES="$(pwd)/non" &&
export GIT_CEILING_DIRECTORIES &&
cd non/git &&
test_must_fail git grep o &&
git grep --no-index o >../actual.full &&
- test_cmp ../expect.full ../actual.full
+ test_cmp ../expect.full ../actual.full &&
cd sub &&
test_must_fail git grep o &&
git grep --no-index o >../../actual.sub &&
echo ".*o*" >non/git/.gitignore &&
(
- GIT_CEILING_DIRECTORIES="$(pwd)/non/git" &&
+ GIT_CEILING_DIRECTORIES="$(pwd)/non" &&
export GIT_CEILING_DIRECTORIES &&
cd non/git &&
test_must_fail git grep o &&
test_cmp ../expect.full ../actual.full &&
{
- echo ".gitignore:.*o*"
+ echo ".gitignore:.*o*" &&
cat ../expect.full
} >../expect.with.ignored &&
git grep --no-index --no-exclude o >../actual.full &&
)
'
+test_expect_success 'outside of git repository with fallbackToNoIndex' '
+ rm -fr non &&
+ mkdir -p non/git/sub &&
+ echo hello >non/git/file1 &&
+ echo world >non/git/sub/file2 &&
+ cat <<-\EOF >non/expect.full &&
+ file1:hello
+ sub/file2:world
+ EOF
+ echo file2:world >non/expect.sub &&
+ (
+ GIT_CEILING_DIRECTORIES="$(pwd)/non" &&
+ export GIT_CEILING_DIRECTORIES &&
+ cd non/git &&
+ test_must_fail git -c grep.fallbackToNoIndex=false grep o &&
+ git -c grep.fallbackToNoIndex=true grep o >../actual.full &&
+ test_cmp ../expect.full ../actual.full &&
+ cd sub &&
+ test_must_fail git -c grep.fallbackToNoIndex=false grep o &&
+ git -c grep.fallbackToNoIndex=true grep o >../../actual.sub &&
+ test_cmp ../../expect.sub ../../actual.sub
+ ) &&
+
+ echo ".*o*" >non/git/.gitignore &&
+ (
+ GIT_CEILING_DIRECTORIES="$(pwd)/non" &&
+ export GIT_CEILING_DIRECTORIES &&
+ cd non/git &&
+ test_must_fail git -c grep.fallbackToNoIndex=false grep o &&
+ git -c grep.fallbackToNoIndex=true grep --exclude-standard o >../actual.full &&
+ test_cmp ../expect.full ../actual.full &&
+
+ {
+ echo ".gitignore:.*o*" &&
+ cat ../expect.full
+ } >../expect.with.ignored &&
+ git -c grep.fallbackToNoIndex grep --no-exclude o >../actual.full &&
+ test_cmp ../expect.with.ignored ../actual.full
+ )
+'
+
test_expect_success 'inside git repository but with --no-index' '
rm -fr is &&
mkdir -p is/git/sub &&
'
test_expect_success 'blame to a commit with no author name' '
- TREE=`git rev-parse HEAD:` &&
+ TREE=$(git rev-parse HEAD:) &&
cat >badcommit <<EOF &&
tree $TREE
author <noname> 1234567890 +0000
some message
EOF
- COMMIT=`git hash-object -t commit -w badcommit` &&
+ COMMIT=$(git hash-object -t commit -w badcommit) &&
git --no-pager blame $COMMIT -- uno >/dev/null
'
summary $SJIS_MSG
EOF
+filter_author_summary () {
+ sed -n -e '/^author /p' -e '/^summary /p' "$@"
+}
+
test_expect_success !MINGW \
'blame respects i18n.commitencoding' '
- git blame --incremental file | \
- egrep "^(author|summary) " > actual &&
- test_cmp actual expected
+ git blame --incremental file >output &&
+ filter_author_summary output >actual &&
+ test_cmp expected actual
'
cat >expected <<EOF
test_expect_success !MINGW \
'blame respects i18n.logoutputencoding' '
git config i18n.logoutputencoding eucJP &&
- git blame --incremental file | \
- egrep "^(author|summary) " > actual &&
- test_cmp actual expected
+ git blame --incremental file >output &&
+ filter_author_summary output >actual &&
+ test_cmp expected actual
'
cat >expected <<EOF
test_expect_success !MINGW \
'blame respects --encoding=UTF-8' '
- git blame --incremental --encoding=UTF-8 file | \
- egrep "^(author|summary) " > actual &&
- test_cmp actual expected
+ git blame --incremental --encoding=UTF-8 file >output &&
+ filter_author_summary output >actual &&
+ test_cmp expected actual
'
cat >expected <<EOF
test_expect_success !MINGW \
'blame respects --encoding=none' '
- git blame --incremental --encoding=none file | \
- egrep "^(author|summary) " > actual &&
- test_cmp actual expected
+ git blame --incremental --encoding=none file >output &&
+ filter_author_summary output >actual &&
+ test_cmp expected actual
'
test_done
}
test_expect_success $PREREQ 'Extract patches' '
- patches=`git format-patch -s --cc="One <one@example.com>" --cc=two@example.com -n HEAD^1`
+ patches=$(git format-patch -s --cc="One <one@example.com>" --cc=two@example.com -n HEAD^1)
'
# Test no confirm early to ensure remaining tests will not hang
'
test_expect_success $PREREQ 'patches To headers are used by default' '
- patch=`git format-patch -1 --to="bodies@example.com"` &&
+ patch=$(git format-patch -1 --to="bodies@example.com") &&
test_when_finished "rm $patch" &&
git send-email \
--dry-run \
'
test_expect_success $PREREQ 'patches To headers are appended to' '
- patch=`git format-patch -1 --to="bodies@example.com"` &&
+ patch=$(git format-patch -1 --to="bodies@example.com") &&
test_when_finished "rm $patch" &&
git send-email \
--dry-run \
'
test_expect_success $PREREQ 'To headers from files reset each patch' '
- patch1=`git format-patch -1 --to="bodies@example.com"` &&
- patch2=`git format-patch -1 --to="other@example.com" HEAD~` &&
+ patch1=$(git format-patch -1 --to="bodies@example.com") &&
+ patch2=$(git format-patch -1 --to="other@example.com" HEAD~) &&
test_when_finished "rm $patch1 && rm $patch2" &&
git send-email \
--dry-run \
clean_fake_sendmail &&
rm -fr outdir &&
git format-patch --cover-letter -2 -o outdir &&
- cover=`echo outdir/0000-*.patch` &&
+ cover=$(echo outdir/0000-*.patch) &&
mv $cover cover-to-edit.patch &&
perl -pe "s/^From:/$header: extra\@address.com\nFrom:/" cover-to-edit.patch >"$cover" &&
git send-email \
--- /dev/null
+#!/bin/sh
+
+test_description='help.autocorrect finding a match'
+. ./test-lib.sh
+
+test_expect_success 'setup' '
+ # An alias
+ git config alias.lgf "log --format=%s --first-parent" &&
+
+ # A random user-defined command
+ write_script git-distimdistim <<-EOF &&
+ echo distimdistim was called
+ EOF
+
+ PATH="$PATH:." &&
+ export PATH &&
+
+ git commit --allow-empty -m "a single log entry" &&
+
+ # Sanity check
+ git lgf >actual &&
+ echo "a single log entry" >expect &&
+ test_cmp expect actual &&
+
+ git distimdistim >actual &&
+ echo "distimdistim was called" >expect &&
+ test_cmp expect actual
+'
+
+test_expect_success 'autocorrect showing candidates' '
+ git config help.autocorrect 0 &&
+
+ test_must_fail git lfg 2>actual &&
+ sed -e "1,/^Did you mean this/d" actual | grep lgf &&
+
+ test_must_fail git distimdist 2>actual &&
+ sed -e "1,/^Did you mean this/d" actual | grep distimdistim
+'
+
+test_expect_success 'autocorrect running commands' '
+ git config help.autocorrect -1 &&
+
+ git lfg >actual &&
+ echo "a single log entry" >expect &&
+ test_cmp expect actual &&
+
+ git distimdist >actual &&
+ echo "distimdistim was called" >expect &&
+ test_cmp expect actual
+'
+
+test_done
name='remove executable bit from a file'
-test_expect_success "$name" '
+test_expect_success POSIXPERM "$name" '
rm -f "$GIT_DIR"/index &&
git checkout -f -b mybranch5 ${remotes_git_svn} &&
chmod -x exec.sh &&
name='add executable bit back file'
-test_expect_success "$name" '
+test_expect_success POSIXPERM "$name" '
chmod +x exec.sh &&
git update-index exec.sh &&
git commit -m "$name" &&
name='executable file becomes a symlink to file'
-test_expect_success "$name" '
+test_expect_success SYMLINKS "$name" '
rm exec.sh &&
ln -s file exec.sh &&
git update-index exec.sh &&
name='new symlink is added to a file that was also just made executable'
-test_expect_success "$name" '
+test_expect_success POSIXPERM,SYMLINKS "$name" '
chmod +x file &&
ln -s file exec-2.sh &&
git update-index --add file exec-2.sh &&
test -h "$SVN_TREE"/exec-2.sh'
name='modify a symlink to become a file'
-test_expect_success "$name" '
+test_expect_success POSIXPERM,SYMLINKS "$name" '
echo git help >help &&
rm exec-2.sh &&
cp help exec-2.sh &&
name="commit with UTF-8 message: locale: $GIT_SVN_LC_ALL"
LC_ALL="$GIT_SVN_LC_ALL"
export LC_ALL
-test_expect_success UTF8 "$name" "
+# This test relies on the previous test, hence requires POSIXPERM,SYMLINKS
+test_expect_success UTF8,POSIXPERM,SYMLINKS "$name" "
echo '# hello' >> exec-2.sh &&
git update-index exec-2.sh &&
git commit -m 'éï∏' &&
tree 8f51f74cf0163afc9ad68a4b1537288c4558b5a4
EOF
-test_expect_success "$name" "test_cmp a expected"
+test_expect_success POSIXPERM,SYMLINKS "$name" "test_cmp a expected"
test_expect_success 'exit if remote refs are ambigious' "
git config --add svn-remote.svn.fetch \
git update-index --add d &&
git commit -m '/bar/d should be in the log' &&
git svn dcommit -i bar &&
- test -z \"\`git diff refs/heads/my-bar refs/remotes/bar\`\" &&
+ test -z \"\$(git diff refs/heads/my-bar refs/remotes/bar)\" &&
mkdir newdir &&
echo new > newdir/dir &&
git update-index --add newdir/dir &&
git commit -m 'add a new directory' &&
git svn dcommit -i bar &&
- test -z \"\`git diff refs/heads/my-bar refs/remotes/bar\`\" &&
+ test -z \"\$(git diff refs/heads/my-bar refs/remotes/bar)\" &&
echo foo >> newdir/dir &&
git update-index newdir/dir &&
git commit -m 'modify a file in new directory' &&
git svn dcommit -i bar &&
- test -z \"\`git diff refs/heads/my-bar refs/remotes/bar\`\"
+ test -z \"\$(git diff refs/heads/my-bar refs/remotes/bar)\"
"
test_expect_success 'dcommit should not fail with a touched file' '
git update-index d &&
git commit -m 'update /bar/d' &&
git svn set-tree -i bar HEAD &&
- test -z \"\`git diff refs/heads/my-bar refs/remotes/bar\`\"
+ test -z \"\$(git diff refs/heads/my-bar refs/remotes/bar)\"
"
test_expect_success 'git-svn works in a bare repository' '
EOF
printf "Hello\r\nWorld\r\n" > crlf
- a_crlf=`git hash-object -w crlf`
+ a_crlf=$(git hash-object -w crlf)
printf "Hello\rWorld\r" > cr
- a_cr=`git hash-object -w cr`
+ a_cr=$(git hash-object -w cr)
printf "Hello\nWorld\n" > lf
- a_lf=`git hash-object -w lf`
+ a_lf=$(git hash-object -w lf)
printf "Hello\r\nWorld" > ne_crlf
- a_ne_crlf=`git hash-object -w ne_crlf`
+ a_ne_crlf=$(git hash-object -w ne_crlf)
printf "Hello\nWorld" > ne_lf
- a_ne_lf=`git hash-object -w ne_lf`
+ a_ne_lf=$(git hash-object -w ne_lf)
printf "Hello\rWorld" > ne_cr
- a_ne_cr=`git hash-object -w ne_cr`
+ a_ne_cr=$(git hash-object -w ne_cr)
touch empty
- a_empty=`git hash-object -w empty`
+ a_empty=$(git hash-object -w empty)
printf "\n" > empty_lf
- a_empty_lf=`git hash-object -w empty_lf`
+ a_empty_lf=$(git hash-object -w empty_lf)
printf "\r" > empty_cr
- a_empty_cr=`git hash-object -w empty_cr`
+ a_empty_cr=$(git hash-object -w empty_cr)
printf "\r\n" > empty_crlf
- a_empty_crlf=`git hash-object -w empty_crlf`
+ a_empty_crlf=$(git hash-object -w empty_crlf)
svn_cmd import --no-auto-props -m 'import for git svn' . "$svnrepo" >/dev/null
cd ..
git pull . ${remotes_git_svn}'
expect='/* $Id$ */'
-got="`sed -ne 2p kw.c`"
+got="$(sed -ne 2p kw.c)"
test_expect_success 'raw $Id$ found in kw.c' "test '$expect' = '$got'"
test_expect_success "propset CR on crlf files" '
cd test_wc
printf '$Id$\rHello\rWorld\r' > cr
printf '$Id$\rHello\rWorld' > ne_cr
- a_cr=`printf '$Id$\r\nHello\r\nWorld\r\n' | git hash-object --stdin`
- a_ne_cr=`printf '$Id$\r\nHello\r\nWorld' | git hash-object --stdin`
+ a_cr=$(printf '$Id$\r\nHello\r\nWorld\r\n' | git hash-object --stdin)
+ a_ne_cr=$(printf '$Id$\r\nHello\r\nWorld' | git hash-object --stdin)
test_expect_success 'Set CRLF on cr files' \
'svn_cmd propset svn:eol-style CRLF cr &&
svn_cmd propset svn:eol-style CRLF ne_cr &&
test_expect_success 'fetch and pull latest from svn' \
'git svn fetch && git pull . ${remotes_git_svn}'
-b_cr="`git hash-object cr`"
-b_ne_cr="`git hash-object ne_cr`"
+b_cr="$(git hash-object cr)"
+b_ne_cr="$(git hash-object ne_cr)"
test_expect_success 'CRLF + $Id$' "test '$a_cr' = '$b_cr'"
test_expect_success 'CRLF + $Id$ (no newline)' "test '$a_ne_cr' = '$b_ne_cr'"
test_expect_success 'init and fetch a moved directory' '
git svn init --minimize-url -i thunk "$svnrepo"/thunk &&
git svn fetch -i thunk &&
- test "`git rev-parse --verify refs/remotes/thunk@2`" \
- = "`git rev-parse --verify refs/remotes/thunk~1`" &&
- test "`git cat-file blob refs/remotes/thunk:readme |\
- sed -n -e "3p"`" = goodbye &&
- test -z "`git config --get svn-remote.svn.fetch \
- "^trunk:refs/remotes/thunk@2$"`"
+ test "$(git rev-parse --verify refs/remotes/thunk@2)" \
+ = "$(git rev-parse --verify refs/remotes/thunk~1)" &&
+ test "$(git cat-file blob refs/remotes/thunk:readme |\
+ sed -n -e "3p")" = goodbye &&
+ test -z "$(git config --get svn-remote.svn.fetch \
+ "^trunk:refs/remotes/thunk@2$")"
'
test_expect_success 'init and fetch from one svn-remote' '
git config --add svn-remote.svn.fetch \
thunk:refs/remotes/svn/thunk &&
git svn fetch -i svn/thunk &&
- test "`git rev-parse --verify refs/remotes/svn/trunk`" \
- = "`git rev-parse --verify refs/remotes/svn/thunk~1`" &&
- test "`git cat-file blob refs/remotes/svn/thunk:readme |\
- sed -n -e "3p"`" = goodbye
+ test "$(git rev-parse --verify refs/remotes/svn/trunk)" \
+ = "$(git rev-parse --verify refs/remotes/svn/thunk~1)" &&
+ test "$(git cat-file blob refs/remotes/svn/thunk:readme |\
+ sed -n -e "3p")" = goodbye
'
test_expect_success 'follow deleted parent' '
junk:refs/remotes/svn/junk &&
git svn fetch -i svn/thunk &&
git svn fetch -i svn/junk &&
- test -z "`git diff svn/junk svn/trunk`" &&
- test "`git merge-base svn/junk svn/trunk`" \
- = "`git rev-parse svn/trunk`"
+ test -z "$(git diff svn/junk svn/trunk)" &&
+ test "$(git merge-base svn/junk svn/trunk)" \
+ = "$(git rev-parse svn/trunk)"
'
test_expect_success 'follow larger parent' '
git rev-parse --verify refs/remotes/larger &&
git rev-parse --verify \
refs/remotes/larger-parent &&
- test "`git merge-base \
+ test "$(git merge-base \
refs/remotes/larger-parent \
- refs/remotes/larger`" = \
- "`git rev-parse refs/remotes/larger`"
+ refs/remotes/larger)" = \
+ "$(git rev-parse refs/remotes/larger)"
'
test_expect_success 'follow higher-level parent' '
svn_cmd rm -m "remove glob" "$svnrepo"/glob &&
git svn init --minimize-url -i glob "$svnrepo"/glob &&
git svn fetch -i glob &&
- test "`git cat-file blob refs/remotes/glob:blob/bye`" = hi &&
- test "`git ls-tree refs/remotes/glob | wc -l `" -eq 1
+ test "$(git cat-file blob refs/remotes/glob:blob/bye)" = hi &&
+ test "$(git ls-tree refs/remotes/glob | wc -l )" -eq 1
'
# ref: r9270 of the Subversion repository: (http://svn.collab.net/repos/svn)
git svn init --minimize-url -i r9270-t \
"$svnrepo"/r9270/trunk/subversion/bindings/swig/perl/native/t &&
git svn fetch -i r9270-t &&
- test `git rev-list r9270-t | wc -l` -eq 2 &&
- test "`git ls-tree --name-only r9270-t~1`" = \
- "`git ls-tree --name-only r9270-t`"
+ test $(git rev-list r9270-t | wc -l) -eq 2 &&
+ test "$(git ls-tree --name-only r9270-t~1)" = \
+ "$(git ls-tree --name-only r9270-t)"
'
test_expect_success "track initial change if it was only made to parent" '
git svn init --minimize-url -i r9270-d \
"$svnrepo"/r9270/drunk/subversion/bindings/swig/perl/native/t &&
git svn fetch -i r9270-d &&
- test `git rev-list r9270-d | wc -l` -eq 3 &&
- test "`git ls-tree --name-only r9270-t`" = \
- "`git ls-tree --name-only r9270-d`" &&
- test "`git rev-parse r9270-t`" = \
- "`git rev-parse r9270-d~1`"
+ test $(git rev-list r9270-d | wc -l) -eq 3 &&
+ test "$(git ls-tree --name-only r9270-t)" = \
+ "$(git ls-tree --name-only r9270-d)" &&
+ test "$(git rev-parse r9270-t)" = \
+ "$(git rev-parse r9270-d~1)"
'
test_expect_success "follow-parent is atomic" '
git svn fetch -i stunk &&
git svn init --minimize-url -i flunked "$svnrepo"/flunked &&
git svn fetch -i flunked &&
- test "`git rev-parse --verify refs/remotes/flunk@18`" \
- = "`git rev-parse --verify refs/remotes/stunk`" &&
- test "`git rev-parse --verify refs/remotes/flunk~1`" \
- = "`git rev-parse --verify refs/remotes/stunk`" &&
- test "`git rev-parse --verify refs/remotes/flunked~1`" \
- = "`git rev-parse --verify refs/remotes/stunk~1`"
+ test "$(git rev-parse --verify refs/remotes/flunk@18)" \
+ = "$(git rev-parse --verify refs/remotes/stunk)" &&
+ test "$(git rev-parse --verify refs/remotes/flunk~1)" \
+ = "$(git rev-parse --verify refs/remotes/stunk)" &&
+ test "$(git rev-parse --verify refs/remotes/flunked~1)" \
+ = "$(git rev-parse --verify refs/remotes/stunk~1)"
'
test_expect_success "track multi-parent paths" '
svn_cmd cp -m "resurrect /glob" "$svnrepo"/r9270 "$svnrepo"/glob &&
git svn multi-fetch &&
- test `git cat-file commit refs/remotes/glob | \
- grep "^parent " | wc -l` -eq 2
+ test $(git cat-file commit refs/remotes/glob | \
+ grep "^parent " | wc -l) -eq 2
'
test_expect_success "multi-fetch continues to work" "
git commit -a -m "another"
'
-head=`git rev-parse --verify HEAD^0`
-prev=`git rev-parse --verify HEAD^1`
+head=$(git rev-parse --verify HEAD^0)
+prev=$(git rev-parse --verify HEAD^1)
# the internals of the commit-diff command are the same as the regular
# commit, so only a basic test of functionality is needed since we've
git update-ref -d refs/${remotes_git_svn} refs/${remotes_git_svn}
'
-head=`git rev-parse --verify refs/heads/git-svn-HEAD^0`
+head=$(git rev-parse --verify refs/heads/git-svn-HEAD^0)
test_expect_success 'git-svn-HEAD is a real HEAD' "test -n '$head'"
-svnrepo_escaped=`echo $svnrepo | sed 's/ /%20/'`
+svnrepo_escaped=$(echo $svnrepo | sed 's/ /%20/')
test_expect_success 'initialize old-style (v0) git svn layout' '
mkdir -p "$GIT_DIR"/git-svn/info "$GIT_DIR"/svn/info &&
git rev-parse --verify refs/${remotes_git_svn}^0 &&
git rev-parse --verify refs/remotes/svn^0 &&
test "$(git config --get svn-remote.svn.url)" = "$svnrepo_escaped" &&
- test `git config --get svn-remote.svn.fetch` = \
+ test $(git config --get svn-remote.svn.fetch) = \
":refs/${remotes_git_svn}"
'
git svn init "$svnrepo" -T trunk -t tags -b branches &&
git config --get-all svn-remote.svn.fetch > fetch.out &&
grep "^trunk:refs/remotes/origin/trunk$" fetch.out &&
- test -n "`git config --get svn-remote.svn.branches \
- "^branches/\*:refs/remotes/origin/\*$"`" &&
- test -n "`git config --get svn-remote.svn.tags \
- "^tags/\*:refs/remotes/origin/tags/\*$"`" &&
+ test -n "$(git config --get svn-remote.svn.branches \
+ "^branches/\*:refs/remotes/origin/\*$")" &&
+ test -n "$(git config --get svn-remote.svn.tags \
+ "^tags/\*:refs/remotes/origin/tags/\*$")" &&
git config --unset svn-remote.svn.branches \
"^branches/\*:refs/remotes/origin/\*$" &&
git config --unset svn-remote.svn.tags \
for i in trunk a b tags/0.1 tags/0.2 tags/0.3; do
git rev-parse --verify refs/remotes/origin/\$i^0 >> refs.out || exit 1;
done &&
- test -z \"\`sort < refs.out | uniq -d\`\" &&
+ test -z \"\$(sort < refs.out | uniq -d)\" &&
for i in trunk a b tags/0.1 tags/0.2 tags/0.3; do
for j in trunk a b tags/0.1 tags/0.2 tags/0.3; do
if test \$j != \$i; then continue; fi
- test -z \"\`git diff refs/remotes/origin/\$i \
- refs/remotes/origin/\$j\`\" ||exit 1; done; done
+ test -z \"\$(git diff refs/remotes/origin/\$i \
+ refs/remotes/origin/\$j)\" ||exit 1; done; done
"
test_expect_success 'migrate --minimize on old inited layout' '
git config --unset-all svn-remote.svn.fetch &&
git config --unset-all svn-remote.svn.url &&
rm -rf "$GIT_DIR"/svn &&
- for i in `cat fetch.out`; do
- path=`expr $i : "\([^:]*\):.*$"`
- ref=`expr $i : "[^:]*:\(refs/remotes/.*\)$"`
+ for i in $(cat fetch.out); do
+ path=$(expr $i : "\([^:]*\):.*$")
+ ref=$(expr $i : "[^:]*:\(refs/remotes/.*\)$")
if test -z "$ref"; then continue; fi
if test -n "$path"; then path="/$path"; fi
( mkdir -p "$GIT_DIR"/svn/$ref/info/ &&
echo "$svnrepo"$path > "$GIT_DIR"/svn/$ref/info/url ) || exit 1;
done &&
git svn migrate --minimize &&
- test -z "`git config -l | grep "^svn-remote\.git-svn\."`" &&
+ test -z "$(git config -l | grep "^svn-remote\.git-svn\.")" &&
git config --get-all svn-remote.svn.fetch > fetch.out &&
grep "^trunk:refs/remotes/origin/trunk$" fetch.out &&
grep "^branches/a:refs/remotes/origin/a$" fetch.out &&
git log --pretty=oneline refs/remotes/tags/end | \
sed -e "s/^.\{41\}//" > output.end &&
test_cmp expect.end output.end &&
- test "`git rev-parse refs/remotes/tags/end~1`" = \
- "`git rev-parse refs/remotes/branches/start`" &&
- test "`git rev-parse refs/remotes/branches/start~2`" = \
- "`git rev-parse refs/remotes/trunk`" &&
+ test "$(git rev-parse refs/remotes/tags/end~1)" = \
+ "$(git rev-parse refs/remotes/branches/start)" &&
+ test "$(git rev-parse refs/remotes/branches/start~2)" = \
+ "$(git rev-parse refs/remotes/trunk)" &&
test_must_fail git rev-parse refs/remotes/tags/end@3
'
svn_cmd commit -m "try to try"
) &&
git svn fetch two &&
- test `git rev-list refs/remotes/two/tags/end | wc -l` -eq 6 &&
- test `git rev-list refs/remotes/two/branches/start | wc -l` -eq 3 &&
- test `git rev-parse refs/remotes/two/branches/start~2` = \
- `git rev-parse refs/remotes/two/trunk` &&
- test `git rev-parse refs/remotes/two/tags/end~3` = \
- `git rev-parse refs/remotes/two/branches/start` &&
+ test $(git rev-list refs/remotes/two/tags/end | wc -l) -eq 6 &&
+ test $(git rev-list refs/remotes/two/branches/start | wc -l) -eq 3 &&
+ test $(git rev-parse refs/remotes/two/branches/start~2) = \
+ $(git rev-parse refs/remotes/two/trunk) &&
+ test $(git rev-parse refs/remotes/two/tags/end~3) = \
+ $(git rev-parse refs/remotes/two/branches/start) &&
git log --pretty=oneline refs/remotes/two/tags/end | \
sed -e "s/^.\{41\}//" > output.two &&
test_cmp expect.two output.two
git log --pretty=oneline refs/remotes/tags/end | \
sed -e "s/^.\{41\}//" > output.end &&
test_cmp expect.end output.end &&
- test "`git rev-parse refs/remotes/tags/end~1`" = \
- "`git rev-parse refs/remotes/branches/v1/start`" &&
- test "`git rev-parse refs/remotes/branches/v1/start~2`" = \
- "`git rev-parse refs/remotes/trunk`" &&
+ test "$(git rev-parse refs/remotes/tags/end~1)" = \
+ "$(git rev-parse refs/remotes/branches/v1/start)" &&
+ test "$(git rev-parse refs/remotes/branches/v1/start~2)" = \
+ "$(git rev-parse refs/remotes/trunk)" &&
test_must_fail git rev-parse refs/remotes/tags/end@3
'
svn_cmd commit -m "try to try"
) &&
git svn fetch two &&
- test `git rev-list refs/remotes/two/tags/end | wc -l` -eq 6 &&
- test `git rev-list refs/remotes/two/branches/v1/start | wc -l` -eq 3 &&
- test `git rev-parse refs/remotes/two/branches/v1/start~2` = \
- `git rev-parse refs/remotes/two/trunk` &&
- test `git rev-parse refs/remotes/two/tags/end~3` = \
- `git rev-parse refs/remotes/two/branches/v1/start` &&
+ test $(git rev-list refs/remotes/two/tags/end | wc -l) -eq 6 &&
+ test $(git rev-list refs/remotes/two/branches/v1/start | wc -l) -eq 3 &&
+ test $(git rev-parse refs/remotes/two/branches/v1/start~2) = \
+ $(git rev-parse refs/remotes/two/trunk) &&
+ test $(git rev-parse refs/remotes/two/tags/end~3) = \
+ $(git rev-parse refs/remotes/two/branches/v1/start) &&
git log --pretty=oneline refs/remotes/two/tags/end | \
sed -e "s/^.\{41\}//" > output.two &&
test_cmp expect.two output.two
git config --add svn-remote.four.tags \
"tags/*:refs/remotes/four/tags/*" &&
git svn fetch four &&
- test `git rev-list refs/remotes/four/tags/next | wc -l` -eq 5 &&
- test `git rev-list refs/remotes/four/branches/v2/start | wc -l` -eq 3 &&
- test `git rev-parse refs/remotes/four/branches/v2/start~2` = \
- `git rev-parse refs/remotes/four/trunk` &&
- test `git rev-parse refs/remotes/four/tags/next~2` = \
- `git rev-parse refs/remotes/four/branches/v2/start` &&
+ test $(git rev-list refs/remotes/four/tags/next | wc -l) -eq 5 &&
+ test $(git rev-list refs/remotes/four/branches/v2/start | wc -l) -eq 3 &&
+ test $(git rev-parse refs/remotes/four/branches/v2/start~2) = \
+ $(git rev-parse refs/remotes/four/trunk) &&
+ test $(git rev-parse refs/remotes/four/tags/next~2) = \
+ $(git rev-parse refs/remotes/four/branches/v2/start) &&
git log --pretty=oneline refs/remotes/four/tags/next | \
sed -e "s/^.\{41\}//" > output.four &&
test_cmp expect.four output.four
test_expect_success 'find commit based on SVN revision number' "
git svn find-rev r12 |
- grep `git rev-parse HEAD`
+ grep $(git rev-parse HEAD)
"
test_expect_success 'empty rebase' "
test_debug 'gitk --all & sleep 1'
test_expect_success 'verify pre-merge ancestry' "
- test x\`git rev-parse --verify refs/heads/svn^2\` = \
- x\`git rev-parse --verify refs/heads/merge\` &&
+ test x\$(git rev-parse --verify refs/heads/svn^2) = \
+ x\$(git rev-parse --verify refs/heads/merge) &&
git cat-file commit refs/heads/svn^ | grep '^friend$'
"
test_debug 'gitk --all & sleep 1'
test_expect_success 'verify post-merge ancestry' "
- test x\`git rev-parse --verify refs/heads/svn\` = \
- x\`git rev-parse --verify refs/remotes/origin/trunk \` &&
- test x\`git rev-parse --verify refs/heads/svn^2\` = \
- x\`git rev-parse --verify refs/heads/merge\` &&
+ test x\$(git rev-parse --verify refs/heads/svn) = \
+ x\$(git rev-parse --verify refs/remotes/origin/trunk) &&
+ test x\$(git rev-parse --verify refs/heads/svn^2) = \
+ x\$(git rev-parse --verify refs/heads/merge) &&
git cat-file commit refs/heads/svn^ | grep '^friend$'
"
'
test_expect_success 'git svn rebase works inside a fresh-cloned repository' '
- cd test-rebase &&
+ (
+ cd test-rebase &&
git svn rebase &&
test -e test-rebase-main &&
test -e test-rebase
- '
+ )'
+
+# Without this, LC_ALL=C as set in test-lib.sh, and Cygwin converts
+# non-ASCII characters in filenames unexpectedly, and causes errors.
+# https://cygwin.com/cygwin-ug-net/using-specialnames.html#pathnames-specialchars
+# > Some characters are disallowed in filenames on Windows filesystems. ...
+# ...
+# > ... All of the above characters, except for the backslash, are converted
+# > to special UNICODE characters in the range 0xf000 to 0xf0ff (the
+# > "Private use area") when creating or accessing files.
+prepare_a_utf8_locale
+test_expect_success UTF8 'svn.pathnameencoding=cp932 new file on dcommit' '
+ LC_ALL=$a_utf8_locale &&
+ export LC_ALL &&
+ neq=$(printf "\201\202") &&
+ git config svn.pathnameencoding cp932 &&
+ echo neq >"$neq" &&
+ git add "$neq" &&
+ git commit -m "neq" &&
+ git svn dcommit
+'
+
+# See the comment on the above test for setting of LC_ALL.
+test_expect_success 'svn.pathnameencoding=cp932 rename on dcommit' '
+ LC_ALL=$a_utf8_locale &&
+ export LC_ALL &&
+ inf=$(printf "\201\207") &&
+ git config svn.pathnameencoding cp932 &&
+ echo inf >"$inf" &&
+ git add "$inf" &&
+ git commit -m "inf" &&
+ git svn dcommit &&
+ git mv "$inf" inf &&
+ git commit -m "inf rename" &&
+ git svn dcommit
+'
stop_httpd
"$svnrepo/pr ject/branches/$scary_uri" &&
svn_cmd cp -m "leading dot" "$svnrepo/pr ject/trunk" \
"$svnrepo/pr ject/branches/.leading_dot" &&
- svn_cmd cp -m "trailing dot" "$svnrepo/pr ject/trunk" \
- "$svnrepo/pr ject/branches/trailing_dot." &&
+ if test_have_prereq !MINGW
+ then
+ svn_cmd cp -m "trailing dot" "$svnrepo/pr ject/trunk" \
+ "$svnrepo/pr ject/branches/trailing_dot."
+ fi &&
svn_cmd cp -m "trailing .lock" "$svnrepo/pr ject/trunk" \
"$svnrepo/pr ject/branches/trailing_dotlock.lock" &&
svn_cmd cp -m "reflog" "$svnrepo/pr ject/trunk" \
# SVN 1.7 will truncate "not-a%40{0]" to just "not-a".
# Look at what SVN wound up naming the branch and use that.
# Be sure to escape the @ if it shows up.
-non_reflog=`svn_cmd ls "$svnrepo/pr ject/branches" | grep not-a | sed 's/\///' | sed 's/@/%40/'`
+non_reflog=$(svn_cmd ls "$svnrepo/pr ject/branches" | grep not-a | sed 's/\///' | sed 's/@/%40/')
test_expect_success 'test clone with funky branch names' '
git svn clone -s "$svnrepo/pr ject" project &&
git rev-parse "refs/remotes/origin/more%20fun%20plugin!" &&
git rev-parse "refs/remotes/origin/$scary_ref" &&
git rev-parse "refs/remotes/origin/%2Eleading_dot" &&
- git rev-parse "refs/remotes/origin/trailing_dot%2E" &&
+ if test_have_prereq !MINGW
+ then
+ git rev-parse "refs/remotes/origin/trailing_dot%2E"
+ fi &&
git rev-parse "refs/remotes/origin/trailing_dotlock%2Elock" &&
git rev-parse "refs/remotes/origin/$non_reflog"
)
# Tested with: svn, version 1.4.4 (r25188)
# Tested with: svn, version 1.6.[12345689]
-v=`svn_cmd --version | sed -n -e 's/^svn, version \(1\.[0-9]*\.[0-9]*\).*$/\1/p'`
+v=$(svn_cmd --version | sed -n -e 's/^svn, version \(1\.[0-9]*\.[0-9]*\).*$/\1/p')
case $v in
1.[456].*)
;;
'
test_expect_success 'add files matching auto-props' '
- echo "#!$SHELL_PATH" >exec1.sh &&
- chmod +x exec1.sh &&
+ write_script exec1.sh </dev/null &&
echo "hello" >hello.txt &&
echo bar >bar &&
git add exec1.sh hello.txt bar &&
'
test_expect_success 'add files matching disabled auto-props' '
- echo "#$SHELL_PATH" >exec2.sh &&
- chmod +x exec2.sh &&
+ write_script exec2.sh </dev/null &&
echo "world" >world.txt &&
echo zot >zot &&
git add exec2.sh world.txt zot &&
cd svnrepo &&
# Check properties from first commit.
- test "x$(svn_cmd propget svn:executable exec1.sh)" = "x*" &&
+ if test_have_prereq POSIXPERM
+ then
+ test "x$(svn_cmd propget svn:executable exec1.sh)" = "x*"
+ fi &&
test "x$(svn_cmd propget svn:mime-type exec1.sh)" = \
"xapplication/x-shellscript" &&
test "x$(svn_cmd propget svn:mime-type hello.txt)" = "xtext/plain" &&
test "x$(svn_cmd propget svn:mime-type bar)" = "x" &&
# Check properties from second commit.
- test "x$(svn_cmd propget svn:executable exec2.sh)" = "x*" &&
+ if test_have_prereq POSIXPERM
+ then
+ test "x$(svn_cmd propget svn:executable exec2.sh)" = "x*"
+ fi &&
test "x$(svn_cmd propget svn:mime-type exec2.sh)" = "x" &&
test "x$(svn_cmd propget svn:mime-type world.txt)" = "x" &&
test "x$(svn_cmd propget svn:eol-style world.txt)" = "x" &&
. ./lib-git-svn.sh
compare_git_head_with () {
- nr=`wc -l < "$1"`
+ nr=$(wc -l < "$1")
a=7
b=$(($a + $nr - 1))
git cat-file commit HEAD | sed -ne "$a,${b}p" >current &&
test_cmp current "$1"
}
-a_utf8_locale=$(locale -a | sed -n '/\.[uU][tT][fF]-*8$/{
- p
- q
-}')
-
-if test -n "$a_utf8_locale"
-then
- test_set_prereq UTF8
-else
- say "# UTF-8 locale not available, some tests are skipped"
-fi
+prepare_a_utf8_locale
compare_svn_head_with () {
# extract just the log message and strip out committer info.
# don't use --limit here since svn 1.1.x doesn't have it,
- LC_ALL="$a_utf8_locale" svn log `git svn info --url` | perl -w -e '
+ LC_ALL="$a_utf8_locale" svn log $(git svn info --url) | perl -w -e '
use bytes;
$/ = ("-"x72) . "\n";
my @x = <STDIN>;
test_expect_success 'imported 2 revisions successfully' '
(
cd x
- test "`git rev-list refs/remotes/git-svn | wc -l`" -eq 2 &&
+ test "$(git rev-list refs/remotes/git-svn | wc -l)" -eq 2 &&
git rev-list -1 --pretty=raw refs/remotes/git-svn | \
grep "^author BBBBBBB BBBBBBB <bb@example\.com> " &&
git rev-list -1 --pretty=raw refs/remotes/git-svn~1 | \
(
cd x
git svn fetch --authors-file=../svn-authors &&
- test "`git rev-list refs/remotes/git-svn | wc -l`" -eq 4 &&
+ test "$(git rev-list refs/remotes/git-svn | wc -l)" -eq 4 &&
git rev-list -1 --pretty=raw refs/remotes/git-svn | \
grep "^author DDDDDDD DDDDDDD <dd@example\.com> " &&
git rev-list -1 --pretty=raw refs/remotes/git-svn~1 | \
test_expect_success 'failure happened without negative side effects' '
(
cd aa-work &&
- test 6 -eq "`tmp_config_get svn-remote.svn.branches-maxRev`" &&
- test 6 -eq "`tmp_config_get svn-remote.svn.tags-maxRev`"
+ test 6 -eq "$(tmp_config_get svn-remote.svn.branches-maxRev)" &&
+ test 6 -eq "$(tmp_config_get svn-remote.svn.tags-maxRev)"
)
'
(
cd aa-work &&
git svn fetch --authors-file=../svn-authors &&
- test 8 -eq "`tmp_config_get svn-remote.svn.branches-maxRev`" &&
- test 8 -eq "`tmp_config_get svn-remote.svn.tags-maxRev`"
+ test 8 -eq "$(tmp_config_get svn-remote.svn.branches-maxRev)" &&
+ test 8 -eq "$(tmp_config_get svn-remote.svn.tags-maxRev)"
)
'
-test_expect_success 'fresh clone with svn.authors-file in config' '
+test_expect_success !MINGW 'fresh clone with svn.authors-file in config' '
(
rm -r "$GIT_DIR" &&
test x = x"$(git config svn.authorsfile)" &&
test_expect_success SYMLINKS '"bar" is a symlink that points to "asdf"' '
test -L x/bar &&
- (cd x && test xasdf = x"`git cat-file blob HEAD:bar`")
+ (cd x && test xasdf = x"$(git cat-file blob HEAD:bar)")
'
test_expect_success 'get "bar" => symlink fix from svn' '
test_expect_success SYMLINKS '"bar" remains a proper symlink' '
test -L x/bar &&
- (cd x && test xdoink = x"`git cat-file blob HEAD:bar`")
+ (cd x && test xdoink = x"$(git cat-file blob HEAD:bar)")
'
test_done
'
test_expect_success '(supposedly) non-conflicting change from SVN' '
- test x"`sed -n -e 58p < file`" = x58 &&
- test x"`sed -n -e 61p < file`" = x61 &&
+ test x"$(sed -n -e 58p < file)" = x58 &&
+ test x"$(sed -n -e 61p < file)" = x61 &&
svn_cmd co "$svnrepo" tmp &&
(cd tmp &&
perl -i.bak -p -e "s/^58$/5588/" file &&
perl -i.bak -p -e "s/^61$/6611/" file &&
poke file &&
- test x"`sed -n -e 58p < file`" = x5588 &&
- test x"`sed -n -e 61p < file`" = x6611 &&
+ test x"$(sed -n -e 58p < file)" = x5588 &&
+ test x"$(sed -n -e 61p < file)" = x6611 &&
svn_cmd commit -m "58 => 5588, 61 => 6611"
)
'
"
test_expect_success 'change file but in unrelated area' "
- test x\"\`sed -n -e 4p < file\`\" = x4 &&
- test x\"\`sed -n -e 7p < file\`\" = x7 &&
+ test x\"\$(sed -n -e 4p < file)\" = x4 &&
+ test x\"\$(sed -n -e 7p < file)\" = x7 &&
perl -i.bak -p -e 's/^4\$/4444/' file &&
perl -i.bak -p -e 's/^7\$/7777/' file &&
- test x\"\`sed -n -e 4p < file\`\" = x4444 &&
- test x\"\`sed -n -e 7p < file\`\" = x7777 &&
+ test x\"\$(sed -n -e 4p < file)\" = x4444 &&
+ test x\"\$(sed -n -e 7p < file)\" = x7777 &&
git commit -m '4 => 4444, 7 => 7777' file &&
git svn dcommit &&
svn_cmd up tmp &&
cd tmp &&
- test x\"\`sed -n -e 4p < file\`\" = x4444 &&
- test x\"\`sed -n -e 7p < file\`\" = x7777 &&
- test x\"\`sed -n -e 58p < file\`\" = x5588 &&
- test x\"\`sed -n -e 61p < file\`\" = x6611
+ test x\"\$(sed -n -e 4p < file)\" = x4444 &&
+ test x\"\$(sed -n -e 7p < file)\" = x7777 &&
+ test x\"\$(sed -n -e 58p < file)\" = x5588 &&
+ test x\"\$(sed -n -e 61p < file)\" = x6611
"
test_expect_success 'attempt to dcommit with a dirty index' '
test_expect_success 'imported 6 revisions successfully' '
(
cd x
- test "`git rev-list refs/remotes/git-svn | wc -l`" -eq 6
+ test "$(git rev-list refs/remotes/git-svn | wc -l)" -eq 6
)
'
git svn clone -s "$svnrepo" g &&
(
cd g &&
- test x`git rev-parse --verify refs/remotes/origin/trunk^0` = \
- x`git rev-parse --verify refs/heads/master^0`
+ test x$(git rev-parse --verify refs/remotes/origin/trunk^0) = \
+ x$(git rev-parse --verify refs/heads/master^0)
)
'
uuid=b48289b2-9c08-4d72-af37-0358a40b9c15
test_expect_success 'svk merges were represented coming in' "
- [ `git cat-file commit HEAD | grep parent | wc -l` -eq 2 ]
+ [ $(git cat-file commit HEAD | grep parent | wc -l) -eq 2 ]
"
test_done
check_entries () {
# $1 == directory, $2 == expected
- grep '^/' "$1/CVS/Entries" | sort | cut -d/ -f2,3,5 >actual
+ sed -ne '/^\//p' "$1/CVS/Entries" | sort | cut -d/ -f2,3,5 >actual
if test -z "$2"
then
>expected
then
# This test contains UTF-8 characters
-test_expect_success \
+test_expect_success !MINGW \
'File with non-ascii file name' \
'mkdir -p Å/goo/a/b/c/d/e/f/g/h/i/j/k/l/m/n/o/p/q/r/s/t/u/v/w/x/y/z/å/ä/ö &&
echo Foo >Å/goo/a/b/c/d/e/f/g/h/i/j/k/l/m/n/o/p/q/r/s/t/u/v/w/x/y/z/å/ä/ö/gårdetsågårdet.txt &&
test_expect_success 'A: verify marks output' '
cat >expect <<-EOF &&
- :2 `git rev-parse --verify master:file2`
- :3 `git rev-parse --verify master:file3`
- :4 `git rev-parse --verify master:file4`
- :5 `git rev-parse --verify master^0`
+ :2 $(git rev-parse --verify master:file2)
+ :3 $(git rev-parse --verify master:file3)
+ :4 $(git rev-parse --verify master:file4)
+ :5 $(git rev-parse --verify master^0)
EOF
test_cmp expect marks.out
'
EOF
git diff-tree -M -r master verify--import-marks >actual &&
compare_diff_raw expect actual &&
- test `git rev-parse --verify master:file2` \
- = `git rev-parse --verify verify--import-marks:copy-of-file2`
+ test $(git rev-parse --verify master:file2) \
+ = $(git rev-parse --verify verify--import-marks:copy-of-file2)
'
test_expect_success 'A: export marks with large values' '
git prune" &&
git fast-import <input &&
test -f .git/TEMP_TAG &&
- test `git rev-parse master` = `git rev-parse TEMP_TAG^`
+ test $(git rev-parse master) = $(git rev-parse TEMP_TAG^)
'
test_expect_success 'B: accept empty committer' '
###
test_expect_success 'C: incremental import create pack from stdin' '
- newf=`echo hi newf | git hash-object -w --stdin` &&
- oldf=`git rev-parse --verify master:file2` &&
+ newf=$(echo hi newf | git hash-object -w --stdin) &&
+ oldf=$(git rev-parse --verify master:file2) &&
test_tick &&
cat >input <<-INPUT_END &&
commit refs/heads/branch
'
test_expect_success 'C: validate reuse existing blob' '
- test $newf = `git rev-parse --verify branch:file2/newf` &&
- test $oldf = `git rev-parse --verify branch:file2/oldf`
+ test $newf = $(git rev-parse --verify branch:file2/newf) &&
+ test $oldf = $(git rev-parse --verify branch:file2/oldf)
'
test_expect_success 'C: verify commit' '
cat >expect <<-EOF &&
- parent `git rev-parse --verify master^0`
+ parent $(git rev-parse --verify master^0)
author $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
###
test_expect_success 'F: non-fast-forward update skips' '
- old_branch=`git rev-parse --verify branch^0` &&
+ old_branch=$(git rev-parse --verify branch^0) &&
test_tick &&
cat >input <<-INPUT_END &&
commit refs/heads/branch
test_must_fail git fast-import <input &&
# branch must remain unaffected
- test $old_branch = `git rev-parse --verify branch^0`
+ test $old_branch = $(git rev-parse --verify branch^0)
'
test_expect_success 'F: verify pack' '
test_expect_success 'F: verify other commit' '
cat >expect <<-EOF &&
- tree `git rev-parse branch~1^{tree}`
- parent `git rev-parse branch~1`
+ tree $(git rev-parse branch~1^{tree})
+ parent $(git rev-parse branch~1)
author $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
###
test_expect_success 'G: non-fast-forward update forced' '
- old_branch=`git rev-parse --verify branch^0` &&
+ old_branch=$(git rev-parse --verify branch^0) &&
test_tick &&
cat >input <<-INPUT_END &&
commit refs/heads/branch
'
test_expect_success 'G: branch changed, but logged' '
- test $old_branch != `git rev-parse --verify branch^0` &&
- test $old_branch = `git rev-parse --verify branch@{1}`
+ test $old_branch != $(git rev-parse --verify branch^0) &&
+ test $old_branch = $(git rev-parse --verify branch@{1})
'
###
test_expect_success 'I: verify edge list' '
cat >expect <<-EOF &&
- .git/objects/pack/pack-.pack: `git rev-parse --verify export-boundary`
+ .git/objects/pack/pack-.pack: $(git rev-parse --verify export-boundary)
EOF
sed -e s/pack-.*pack/pack-.pack/ edges.list >actual &&
test_cmp expect actual
git fast-import <input
'
test_expect_success 'J: branch has 1 commit, empty tree' '
- test 1 = `git rev-list J | wc -l` &&
- test 0 = `git ls-tree J | wc -l`
+ test 1 = $(git rev-list J | wc -l) &&
+ test 0 = $(git ls-tree J | wc -l)
'
test_expect_success 'J: tag must fail on empty branch' '
git fast-import <input
'
test_expect_success 'K: verify K^1 = branch^1' '
- test `git rev-parse --verify branch^1` \
- = `git rev-parse --verify K^1`
+ test $(git rev-parse --verify branch^1) \
+ = $(git rev-parse --verify K^1)
'
###
git ls-tree L2 g/b/ >tmp &&
cat tmp | cut -f 2 >actual &&
test_cmp expect actual &&
- git fsck `git rev-parse L2`
+ git fsck $(git rev-parse L2)
'
###
INPUT_END
git fast-import <input &&
- test `git rev-parse N2^{tree}` = `git rev-parse N3^{tree}`
+ test $(git rev-parse N2^{tree}) = $(git rev-parse N3^{tree})
'
test_expect_success 'N: copy directory by id' '
INPUT_END
git fast-import <input &&
- test `git rev-parse N3` = `git rev-parse O1`
+ test $(git rev-parse N3) = $(git rev-parse O1)
'
test_expect_success 'O: blank lines not necessary after data commands' '
INPUT_END
git fast-import <input &&
- test `git rev-parse N3` = `git rev-parse O2`
+ test $(git rev-parse N3) = $(git rev-parse O2)
'
test_expect_success 'O: repack before next test' '
INPUT_END
git fast-import <input &&
- test 8 = `find .git/objects/pack -type f | wc -l` &&
- test `git rev-parse refs/tags/O3-2nd` = `git rev-parse O3^` &&
+ test 8 = $(find .git/objects/pack -type f | wc -l) &&
+ test $(git rev-parse refs/tags/O3-2nd) = $(git rev-parse O3^) &&
git log --reverse --pretty=oneline O3 | sed s/^.*z// >actual &&
test_cmp expect actual
'
data <<DATAEND
[submodule "sub"]
path = sub
- url = "`pwd`/sub"
+ url = "$(pwd)/sub"
DATAEND
commit refs/heads/subuse1
data <<DATAEND
[submodule "sub"]
path = sub
- url = "`pwd`/sub"
+ url = "$(pwd)/sub"
DATAEND
commit refs/heads/subuse2
test_expect_success 'Q: verify second notes commit' '
cat >expect <<-EOF &&
- parent `git rev-parse --verify refs/notes/foobar~2`
+ parent $(git rev-parse --verify refs/notes/foobar~2)
author $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
test_expect_success 'Q: verify fourth notes commit' '
cat >expect <<-EOF &&
- parent `git rev-parse --verify refs/notes/foobar^`
+ parent $(git rev-parse --verify refs/notes/foobar^)
author $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
git add file &&
git commit -m sub_initial
) &&
- git submodule add "`pwd`/sub" sub &&
+ git submodule add "$(pwd)/sub" sub &&
git commit -m initial &&
test_tick &&
(
test_expect_success 'set-up a few more tags for tag export tests' '
git checkout -f master &&
- HEAD_TREE=`git show -s --pretty=raw HEAD | grep tree | sed "s/tree //"` &&
+ HEAD_TREE=$(git show -s --pretty=raw HEAD | grep tree | sed "s/tree //") &&
git tag tree_tag -m "tagging a tree" $HEAD_TREE &&
git tag -a tree_tag-obj -m "tagging a tree" $HEAD_TREE &&
git tag tag-obj_tag -m "tagging a tag" tree_tag-obj &&
test_expect_success 'fast-export quotes pathnames' '
git init crazy-paths &&
(cd crazy-paths &&
- blob=`echo foo | git hash-object -w --stdin` &&
+ blob=$(echo foo | git hash-object -w --stdin) &&
git update-index --add \
--cacheinfo 100644 $blob "$(printf "path with\\nnewline")" \
--cacheinfo 100644 $blob "path with \"quote\"" \
test_done
}
-WORKDIR=$(pwd)
-SERVERDIR=$(pwd)/gitcvs.git
+WORKDIR=$PWD
+SERVERDIR=$PWD/gitcvs.git
git_config="$SERVERDIR/config"
CVSROOT=":fork:$SERVERDIR"
-CVSWORK="$(pwd)/cvswork"
+CVSWORK="$PWD/cvswork"
CVS_SERVER=git-cvsserver
export CVSROOT CVS_SERVER
}
unset GIT_DIR GIT_CONFIG
-WORKDIR=$(pwd)
-SERVERDIR=$(pwd)/gitcvs.git
+WORKDIR=$PWD
+SERVERDIR=$PWD/gitcvs.git
git_config="$SERVERDIR/config"
CVSROOT=":fork:$SERVERDIR"
-CVSWORK="$(pwd)/cvswork"
+CVSWORK="$PWD/cvswork"
CVS_SERVER=git-cvsserver
export CVSROOT CVS_SERVER
}
unset GIT_DIR GIT_CONFIG
-WORKDIR=$(pwd)
-SERVERDIR=$(pwd)/gitcvs.git
+WORKDIR=$PWD
+SERVERDIR=$PWD/gitcvs.git
git_config="$SERVERDIR/config"
CVSROOT=":fork:$SERVERDIR"
-CVSWORK="$(pwd)/cvswork"
+CVSWORK="$PWD/cvswork"
CVS_SERVER=git-cvsserver
export CVSROOT CVS_SERVER
echo object > tag-object &&
git add tag-object &&
test_tick && git commit -m "Object to be tagged" &&
- git tag tagged-object `git hash-object tag-object` &&
+ git tag tagged-object $(git hash-object tag-object) &&
gitweb_run "p=.git;a=snapshot;h=tagged-object;sf=tgz" &&
grep "400 - Object is not a tree-ish" gitweb.output
'
test_debug 'cat gitweb.output'
test_expect_success 'snapshots: good object id' '
- ID=`git rev-parse --verify HEAD` &&
+ ID=$(git rev-parse --verify HEAD) &&
gitweb_run "p=.git;a=snapshot;h=$ID;sf=tgz" &&
grep "Status: 200 OK" gitweb.output
'
test_debug 'cat gitweb.headers'
test_expect_success DATE_PARSER 'modification: tree snapshot' '
- ID=`git rev-parse --verify HEAD^{tree}` &&
+ ID=$(git rev-parse --verify HEAD^{tree}) &&
HTTP_IF_MODIFIED_SINCE="Wed, 6 Apr 2005 22:14:13 +0000" &&
export HTTP_IF_MODIFIED_SINCE &&
test_when_finished "unset HTTP_IF_MODIFIED_SINCE" &&
use Cwd;
use File::Basename;
+sub adjust_dirsep {
+ my $path = shift;
+ $path =~ s{\\}{/}g;
+ return $path;
+}
+
BEGIN { use_ok('Git') }
# set up
is($r->config_int("test.nonexistent"), undef, "config_int: nonexistent");
ok($r->config_bool("test.booltrue"), "config_bool: true");
ok(!$r->config_bool("test.boolfalse"), "config_bool: false");
-is($r->config_path("test.path"), $r->config("test.pathexpanded"),
+is(adjust_dirsep($r->config_path("test.path")), $r->config("test.pathexpanded"),
"config_path: ~/foo expansion");
is_deeply([$r->config_path("test.pathmulti")], ["foo", "bar"],
"config_path: multiple values");
--- /dev/null
+#!/bin/sh
+
+test_description='git p4 support for file type change'
+
+. ./lib-git-p4.sh
+
+test_expect_success 'start p4d' '
+ start_p4d
+'
+
+test_expect_success 'create files' '
+ (
+ cd "$cli" &&
+ p4 client -o | sed "/LineEnd/s/:.*/:unix/" | p4 client -i &&
+ cat >file1 <<-EOF &&
+ text without any funny substitution business
+ EOF
+ cat >file2 <<-EOF &&
+ second file whose type will change
+ EOF
+ p4 add file1 file2 &&
+ p4 submit -d "add files"
+ )
+'
+
+test_expect_success SYMLINKS 'change file to symbolic link' '
+ git p4 clone --dest="$git" //depot@all &&
+ test_when_finished cleanup_git &&
+ (
+ cd "$git" &&
+ git config git-p4.skipSubmitEdit true &&
+
+ rm file2 &&
+ ln -s file1 file2 &&
+ git add file2 &&
+ git commit -m "symlink file1 to file2" &&
+ git p4 submit &&
+ p4 filelog -m 1 //depot/file2 >filelog &&
+ grep "(symlink)" filelog
+ )
+'
+
+test_expect_success SYMLINKS 'change symbolic link to file' '
+ git p4 clone --dest="$git" //depot@all &&
+ test_when_finished cleanup_git &&
+ (
+ cd "$git" &&
+ git config git-p4.skipSubmitEdit true &&
+
+ rm file2 &&
+ cat >file2 <<-EOF &&
+ This is another content for the second file.
+ EOF
+ git add file2 &&
+ git commit -m "re-write file2" &&
+ git p4 submit &&
+ p4 filelog -m 1 //depot/file2 >filelog &&
+ grep "(text)" filelog
+ )
+'
+
+test_expect_success 'kill p4d' '
+ kill_p4d
+'
+
+test_done
echo fake: "$@"
EOF
chmod +x "fake browser" &&
- git config browser.w3m.path "`pwd`/fake browser" &&
+ git config browser.w3m.path "$(pwd)/fake browser" &&
test_web_browse w3m http://example.com/foo
'
with
newline'
-if mkdir "$repo_with_newline" 2>/dev/null
+if test_have_prereq !MINGW && mkdir "$repo_with_newline" 2>/dev/null
then
test_set_prereq FUNNYNAMES
else
return 0
elif test $exit_code -gt 129 && test $exit_code -le 192
then
- echo >&2 "test_must_fail: died by signal: $*"
+ echo >&2 "test_must_fail: died by signal $(($exit_code - 128)): $*"
return 1
elif test $exit_code -eq 127
then
test_lazy_prereq PIPE '
# test whether the filesystem supports FIFOs
case $(uname -s) in
- CYGWIN*)
+ CYGWIN*|MINGW*)
false
;;
*)
#!/bin/sh
#
-# An example hook script to blocks unannotated tags from entering.
+# An example hook script to block unannotated tags from entering.
# Called by "git receive-pack" with arguments: refname sha1-old sha1-new
#
# To enable this hook, rename this file to "update".
{
struct untracked_cache *uc;
struct strbuf base = STRBUF_INIT;
+
+ /* Hack to avoid modifying the untracked cache when we read it */
+ ignore_untracked_cache_config = 1;
+
setup_git_directory();
if (read_cache() < 0)
die("unable to read index file");
--- /dev/null
+#include "git-compat-util.h"
+#include "run-command.h"
+#include "strbuf.h"
+
+int main(int argc, char **argv)
+{
+ const char *trash_directory = getenv("TRASH_DIRECTORY");
+ struct strbuf buf = STRBUF_INIT;
+ FILE *f;
+ int i;
+ const char *child_argv[] = { NULL, NULL };
+
+ /* First, print all parameters into $TRASH_DIRECTORY/ssh-output */
+ if (!trash_directory)
+ die("Need a TRASH_DIRECTORY!");
+ strbuf_addf(&buf, "%s/ssh-output", trash_directory);
+ f = fopen(buf.buf, "w");
+ if (!f)
+ die("Could not write to %s", buf.buf);
+ for (i = 0; i < argc; i++)
+ fprintf(f, "%s%s", i > 0 ? " " : "", i > 0 ? argv[i] : "ssh:");
+ fprintf(f, "\n");
+ fclose(f);
+
+ /* Now, evaluate the *last* parameter */
+ if (argc < 2)
+ return 0;
+ child_argv[0] = argv[argc - 1];
+ return run_command_v_opt(child_argv, RUN_USING_SHELL);
+}
*/
static int normalize_ceiling_entry(struct string_list_item *item, void *unused)
{
- const char *ceil = item->string;
- int len = strlen(ceil);
- char buf[PATH_MAX+1];
+ char *ceil = item->string;
- if (len == 0)
+ if (!*ceil)
die("Empty path is not supported");
- if (len > PATH_MAX)
- die("Path \"%s\" is too long", ceil);
if (!is_absolute_path(ceil))
die("Path \"%s\" is not absolute", ceil);
- if (normalize_path_copy(buf, ceil) < 0)
+ if (normalize_path_copy(ceil, ceil) < 0)
die("Path \"%s\" could not be normalized", ceil);
- len = strlen(buf);
- free(item->string);
- item->string = xstrdup(buf);
return 1;
}
if (!data[i].from)
to = func(NULL);
else {
- strcpy(buffer, data[i].from);
+ xsnprintf(buffer, sizeof(buffer), "%s", data[i].from);
to = func(buffer);
}
if (!strcmp(to, data[i].to))
int main(int argc, char **argv)
{
if (argc == 3 && !strcmp(argv[1], "normalize_path_copy")) {
- char *buf = xmalloc(PATH_MAX + 1);
+ char *buf = xmallocz(strlen(argv[2]));
int rv = normalize_path_copy(buf, argv[2]);
if (rv)
buf = "++failed++";
#include "git-compat-util.h"
#include "run-command.h"
+#include "argv-array.h"
+#include "strbuf.h"
#include <string.h>
#include <errno.h>
+static int number_callbacks;
+static int parallel_next(struct child_process *cp,
+ struct strbuf *err,
+ void *cb,
+ void **task_cb)
+{
+ struct child_process *d = cb;
+ if (number_callbacks >= 4)
+ return 0;
+
+ argv_array_pushv(&cp->args, d->argv);
+ strbuf_addf(err, "preloaded output of a child\n");
+ number_callbacks++;
+ return 1;
+}
+
+static int no_job(struct child_process *cp,
+ struct strbuf *err,
+ void *cb,
+ void **task_cb)
+{
+ strbuf_addf(err, "no further jobs available\n");
+ return 0;
+}
+
+static int task_finished(int result,
+ struct strbuf *err,
+ void *pp_cb,
+ void *pp_task_cb)
+{
+ strbuf_addf(err, "asking for a quick stop\n");
+ return 1;
+}
+
int main(int argc, char **argv)
{
struct child_process proc = CHILD_PROCESS_INIT;
+ int jobs;
if (argc < 3)
return 1;
- proc.argv = (const char **)argv+2;
+ proc.argv = (const char **)argv + 2;
if (!strcmp(argv[1], "start-command-ENOENT")) {
if (start_command(&proc) < 0 && errno == ENOENT)
if (!strcmp(argv[1], "run-command"))
exit(run_command(&proc));
+ jobs = atoi(argv[2]);
+ proc.argv = (const char **)argv + 3;
+
+ if (!strcmp(argv[1], "run-command-parallel"))
+ exit(run_processes_parallel(jobs, parallel_next,
+ NULL, NULL, &proc));
+
+ if (!strcmp(argv[1], "run-command-abort"))
+ exit(run_processes_parallel(jobs, parallel_next,
+ NULL, task_finished, &proc));
+
+ if (!strcmp(argv[1], "run-command-no-jobs"))
+ exit(run_processes_parallel(jobs, no_job,
+ NULL, task_finished, &proc));
+
fprintf(stderr, "check usage\n");
return 1;
}
struct sha1_array array = SHA1_ARRAY_INIT;
struct strbuf line = STRBUF_INIT;
- while (strbuf_getline(&line, stdin, '\n') != EOF) {
+ while (strbuf_getline(&line, stdin) != EOF) {
const char *arg;
unsigned char sha1[20];
while read expect cnt pfx
do
case "$expect" in '#'*) continue ;; esac
- actual=`
+ actual=$(
{
test -z "$pfx" || echo "$pfx"
dd if=/dev/zero bs=1048576 count=$cnt 2>/dev/null |
perl -pe 'y/\000/g/'
} | ./test-sha1 $cnt
- `
+ )
if test "$expect" = "$actual"
then
echo "OK: $expect $cnt $pfx"
while read cnt pfx
do
- actual=`
+ actual=$(
{
test -z "$pfx" || echo "$pfx"
dd if=/dev/zero bs=1048576 count=$cnt 2>/dev/null |
perl -pe 'y/\000/g/'
} | sha1sum |
sed -e 's/ .*//'
- `
+ )
echo "$actual $cnt $pfx"
done <<EOF
0
#include "string-list.h"
#include "run-command.h"
#include "commit.h"
+#include "tempfile.h"
#include "trailer.h"
/*
* Copyright (c) 2013, 2014 Christian Couder <chriscool@tuxfamily.org>
return '\0';
}
-static void print_tok_val(const char *tok, const char *val)
+static void print_tok_val(FILE *outfile, const char *tok, const char *val)
{
char c = last_non_space_char(tok);
if (!c)
return;
if (strchr(separators, c))
- printf("%s%s\n", tok, val);
+ fprintf(outfile, "%s%s\n", tok, val);
else
- printf("%s%c %s\n", tok, separators[0], val);
+ fprintf(outfile, "%s%c %s\n", tok, separators[0], val);
}
-static void print_all(struct trailer_item *first, int trim_empty)
+static void print_all(FILE *outfile, struct trailer_item *first, int trim_empty)
{
struct trailer_item *item;
for (item = first; item; item = item->next) {
if (!trim_empty || strlen(item->value) > 0)
- print_tok_val(item->token, item->value);
+ print_tok_val(outfile, item->token, item->value);
}
}
cp.use_shell = 1;
if (capture_command(&cp, &buf, 1024)) {
- error("running trailer command '%s' failed", cmd.buf);
+ error(_("running trailer command '%s' failed"), cmd.buf);
strbuf_release(&buf);
result = xstrdup("");
} else {
return 0;
}
-static void print_lines(struct strbuf **lines, int start, int end)
+static void print_lines(FILE *outfile, struct strbuf **lines, int start, int end)
{
int i;
for (i = start; lines[i] && i < end; i++)
- printf("%s", lines[i]->buf);
+ fprintf(outfile, "%s", lines[i]->buf);
}
-static int process_input_file(struct strbuf **lines,
+static int process_input_file(FILE *outfile,
+ struct strbuf **lines,
struct trailer_item **in_tok_first,
struct trailer_item **in_tok_last)
{
trailer_start = find_trailer_start(lines, trailer_end);
/* Print lines before the trailers as is */
- print_lines(lines, 0, trailer_start);
+ print_lines(outfile, lines, 0, trailer_start);
if (!has_blank_line_before(lines, trailer_start - 1))
- printf("\n");
+ fprintf(outfile, "\n");
/* Parse trailer lines */
for (i = trailer_start; i < trailer_end; i++) {
}
}
-void process_trailers(const char *file, int trim_empty, struct string_list *trailers)
+static struct tempfile trailers_tempfile;
+
+static FILE *create_in_place_tempfile(const char *file)
+{
+ struct stat st;
+ struct strbuf template = STRBUF_INIT;
+ const char *tail;
+ FILE *outfile;
+
+ if (stat(file, &st))
+ die_errno(_("could not stat %s"), file);
+ if (!S_ISREG(st.st_mode))
+ die(_("file %s is not a regular file"), file);
+ if (!(st.st_mode & S_IWUSR))
+ die(_("file %s is not writable by user"), file);
+
+ /* Create temporary file in the same directory as the original */
+ tail = strrchr(file, '/');
+ if (tail != NULL)
+ strbuf_add(&template, file, tail - file + 1);
+ strbuf_addstr(&template, "git-interpret-trailers-XXXXXX");
+
+ xmks_tempfile_m(&trailers_tempfile, template.buf, st.st_mode);
+ strbuf_release(&template);
+ outfile = fdopen_tempfile(&trailers_tempfile, "w");
+ if (!outfile)
+ die_errno(_("could not open temporary file"));
+
+ return outfile;
+}
+
+void process_trailers(const char *file, int in_place, int trim_empty, struct string_list *trailers)
{
struct trailer_item *in_tok_first = NULL;
struct trailer_item *in_tok_last = NULL;
struct trailer_item *arg_tok_first;
struct strbuf **lines;
int trailer_end;
+ FILE *outfile = stdout;
/* Default config must be setup first */
git_config(git_trailer_default_config, NULL);
lines = read_input_file(file);
+ if (in_place)
+ outfile = create_in_place_tempfile(file);
+
/* Print the lines before the trailers */
- trailer_end = process_input_file(lines, &in_tok_first, &in_tok_last);
+ trailer_end = process_input_file(outfile, lines, &in_tok_first, &in_tok_last);
arg_tok_first = process_command_line_args(trailers);
process_trailers_lists(&in_tok_first, &in_tok_last, &arg_tok_first);
- print_all(in_tok_first, trim_empty);
+ print_all(outfile, in_tok_first, trim_empty);
free_all(&in_tok_first);
/* Print the lines after the trailers as is */
- print_lines(lines, trailer_end, INT_MAX);
+ print_lines(outfile, lines, trailer_end, INT_MAX);
+
+ if (in_place)
+ if (rename_tempfile(&trailers_tempfile, file))
+ die_errno(_("could not rename temporary file to %s"), file);
strbuf_list_free(lines);
}
#ifndef TRAILER_H
#define TRAILER_H
-void process_trailers(const char *file, int trim_empty, struct string_list *trailers);
+void process_trailers(const char *file, int in_place, int trim_empty,
+ struct string_list *trailers);
#endif /* TRAILER_H */
strbuf_reset(buffer);
if (debug)
fprintf(stderr, "Debug: Remote helper: Waiting...\n");
- if (strbuf_getline(buffer, helper, '\n') == EOF) {
+ if (strbuf_getline(buffer, helper) == EOF) {
if (debug)
fprintf(stderr, "Debug: Remote helper quit.\n");
return 1;
data->no_disconnect_req = 0;
/*
- * Open the output as FILE* so strbuf_getline() can be used.
+ * Open the output as FILE* so strbuf_getline_*() family of
+ * functions can be used.
* Do this with duped fd because fclose() will close the fd,
* and stuff like taking over will require the fd to remain.
*/
if (n >= sizeof(buf))
die("impossibly large verbosity value");
set_helper_option(t, "verbosity", buf);
+
+ switch (t->family) {
+ case TRANSPORT_FAMILY_ALL:
+ /*
+ * this is already the default,
+ * do not break old remote helpers by setting "all" here
+ */
+ break;
+ case TRANSPORT_FAMILY_IPV4:
+ set_helper_option(t, "family", "ipv4");
+ break;
+ case TRANSPORT_FAMILY_IPV6:
+ set_helper_option(t, "family", "ipv6");
+ break;
+ }
}
static int release_helper(struct transport *transport)
#include "sha1-array.h"
#include "sigchain.h"
-/* rsync support */
-
-/*
- * We copy packed-refs and refs/ into a temporary file, then read the
- * loose refs recursively (sorting whenever possible), and then inserting
- * those packed refs that are not yet in the list (not validating, but
- * assuming that the file is sorted).
- *
- * Appears refactoring this from refs.c is too cumbersome.
- */
-
-static int str_cmp(const void *a, const void *b)
-{
- const char *s1 = a;
- const char *s2 = b;
-
- return strcmp(s1, s2);
-}
-
-/* path->buf + name_offset is expected to point to "refs/" */
-
-static int read_loose_refs(struct strbuf *path, int name_offset,
- struct ref **tail)
-{
- DIR *dir = opendir(path->buf);
- struct dirent *de;
- struct {
- char **entries;
- int nr, alloc;
- } list;
- int i, pathlen;
-
- if (!dir)
- return -1;
-
- memset (&list, 0, sizeof(list));
-
- while ((de = readdir(dir))) {
- if (is_dot_or_dotdot(de->d_name))
- continue;
- ALLOC_GROW(list.entries, list.nr + 1, list.alloc);
- list.entries[list.nr++] = xstrdup(de->d_name);
- }
- closedir(dir);
-
- /* sort the list */
-
- qsort(list.entries, list.nr, sizeof(char *), str_cmp);
-
- pathlen = path->len;
- strbuf_addch(path, '/');
-
- for (i = 0; i < list.nr; i++, strbuf_setlen(path, pathlen + 1)) {
- strbuf_addstr(path, list.entries[i]);
- if (read_loose_refs(path, name_offset, tail)) {
- int fd = open(path->buf, O_RDONLY);
- char buffer[40];
- struct ref *next;
-
- if (fd < 0)
- continue;
- next = alloc_ref(path->buf + name_offset);
- if (read_in_full(fd, buffer, 40) != 40 ||
- get_oid_hex(buffer, &next->old_oid)) {
- close(fd);
- free(next);
- continue;
- }
- close(fd);
- (*tail)->next = next;
- *tail = next;
- }
- }
- strbuf_setlen(path, pathlen);
-
- for (i = 0; i < list.nr; i++)
- free(list.entries[i]);
- free(list.entries);
-
- return 0;
-}
-
-/* insert the packed refs for which no loose refs were found */
-
-static void insert_packed_refs(const char *packed_refs, struct ref **list)
-{
- FILE *f = fopen(packed_refs, "r");
- static char buffer[PATH_MAX];
-
- if (!f)
- return;
-
- for (;;) {
- int cmp = 0; /* assigned before used */
- int len;
-
- if (!fgets(buffer, sizeof(buffer), f)) {
- fclose(f);
- return;
- }
-
- if (!isxdigit(buffer[0]))
- continue;
- len = strlen(buffer);
- if (len && buffer[len - 1] == '\n')
- buffer[--len] = '\0';
- if (len < 41)
- continue;
- while ((*list)->next &&
- (cmp = strcmp(buffer + 41,
- (*list)->next->name)) > 0)
- list = &(*list)->next;
- if (!(*list)->next || cmp < 0) {
- struct ref *next = alloc_ref(buffer + 41);
- buffer[40] = '\0';
- if (get_oid_hex(buffer, &next->old_oid)) {
- warning ("invalid SHA-1: %s", buffer);
- free(next);
- continue;
- }
- next->next = (*list)->next;
- (*list)->next = next;
- list = &(*list)->next;
- }
- }
-}
-
static void set_upstreams(struct transport *transport, struct ref *refs,
int pretend)
{
}
}
-static const char *rsync_url(const char *url)
-{
- if (!starts_with(url, "rsync://"))
- skip_prefix(url, "rsync:", &url);
- return url;
-}
-
-static struct ref *get_refs_via_rsync(struct transport *transport, int for_push)
-{
- struct strbuf buf = STRBUF_INIT, temp_dir = STRBUF_INIT;
- struct ref dummy = {NULL}, *tail = &dummy;
- struct child_process rsync = CHILD_PROCESS_INIT;
- const char *args[5];
- int temp_dir_len;
-
- if (for_push)
- return NULL;
-
- /* copy the refs to the temporary directory */
-
- strbuf_addstr(&temp_dir, git_path("rsync-refs-XXXXXX"));
- if (!mkdtemp(temp_dir.buf))
- die_errno ("Could not make temporary directory");
- temp_dir_len = temp_dir.len;
-
- strbuf_addstr(&buf, rsync_url(transport->url));
- strbuf_addstr(&buf, "/refs");
-
- rsync.argv = args;
- rsync.stdout_to_stderr = 1;
- args[0] = "rsync";
- args[1] = (transport->verbose > 1) ? "-rv" : "-r";
- args[2] = buf.buf;
- args[3] = temp_dir.buf;
- args[4] = NULL;
-
- if (run_command(&rsync))
- die ("Could not run rsync to get refs");
-
- strbuf_reset(&buf);
- strbuf_addstr(&buf, rsync_url(transport->url));
- strbuf_addstr(&buf, "/packed-refs");
-
- args[2] = buf.buf;
-
- if (run_command(&rsync))
- die ("Could not run rsync to get refs");
-
- /* read the copied refs */
-
- strbuf_addstr(&temp_dir, "/refs");
- read_loose_refs(&temp_dir, temp_dir_len + 1, &tail);
- strbuf_setlen(&temp_dir, temp_dir_len);
-
- tail = &dummy;
- strbuf_addstr(&temp_dir, "/packed-refs");
- insert_packed_refs(temp_dir.buf, &tail);
- strbuf_setlen(&temp_dir, temp_dir_len);
-
- if (remove_dir_recursively(&temp_dir, 0))
- warning ("Error removing temporary directory %s.",
- temp_dir.buf);
-
- strbuf_release(&buf);
- strbuf_release(&temp_dir);
-
- return dummy.next;
-}
-
-static int fetch_objs_via_rsync(struct transport *transport,
- int nr_objs, struct ref **to_fetch)
-{
- struct child_process rsync = CHILD_PROCESS_INIT;
-
- rsync.stdout_to_stderr = 1;
- argv_array_push(&rsync.args, "rsync");
- argv_array_push(&rsync.args, (transport->verbose > 1) ? "-rv" : "-r");
- argv_array_push(&rsync.args, "--ignore-existing");
- argv_array_push(&rsync.args, "--exclude");
- argv_array_push(&rsync.args, "info");
- argv_array_pushf(&rsync.args, "%s/objects/", rsync_url(transport->url));
- argv_array_push(&rsync.args, get_object_directory());
-
- /* NEEDSWORK: handle one level of alternates */
- return run_command(&rsync);
-}
-
-static int write_one_ref(const char *name, const struct object_id *oid,
- int flags, void *data)
-{
- struct strbuf *buf = data;
- int len = buf->len;
-
- /* when called via for_each_ref(), flags is non-zero */
- if (flags && !starts_with(name, "refs/heads/") &&
- !starts_with(name, "refs/tags/"))
- return 0;
-
- strbuf_addstr(buf, name);
- if (safe_create_leading_directories(buf->buf) ||
- write_file_gently(buf->buf, "%s", oid_to_hex(oid)))
- return error("problems writing temporary file %s: %s",
- buf->buf, strerror(errno));
- strbuf_setlen(buf, len);
- return 0;
-}
-
-static int write_refs_to_temp_dir(struct strbuf *temp_dir,
- int refspec_nr, const char **refspec)
-{
- int i;
-
- for (i = 0; i < refspec_nr; i++) {
- struct object_id oid;
- char *ref;
-
- if (dwim_ref(refspec[i], strlen(refspec[i]), oid.hash, &ref) != 1)
- return error("Could not get ref %s", refspec[i]);
-
- if (write_one_ref(ref, &oid, 0, temp_dir)) {
- free(ref);
- return -1;
- }
- free(ref);
- }
- return 0;
-}
-
-static int rsync_transport_push(struct transport *transport,
- int refspec_nr, const char **refspec, int flags)
-{
- struct strbuf buf = STRBUF_INIT, temp_dir = STRBUF_INIT;
- int result = 0, i;
- struct child_process rsync = CHILD_PROCESS_INIT;
- const char *args[10];
-
- if (flags & TRANSPORT_PUSH_MIRROR)
- return error("rsync transport does not support mirror mode");
-
- /* first push the objects */
-
- strbuf_addstr(&buf, rsync_url(transport->url));
- strbuf_addch(&buf, '/');
-
- rsync.argv = args;
- rsync.stdout_to_stderr = 1;
- i = 0;
- args[i++] = "rsync";
- args[i++] = "-a";
- if (flags & TRANSPORT_PUSH_DRY_RUN)
- args[i++] = "--dry-run";
- if (transport->verbose > 1)
- args[i++] = "-v";
- args[i++] = "--ignore-existing";
- args[i++] = "--exclude";
- args[i++] = "info";
- args[i++] = get_object_directory();
- args[i++] = buf.buf;
- args[i++] = NULL;
-
- if (run_command(&rsync))
- return error("Could not push objects to %s",
- rsync_url(transport->url));
-
- /* copy the refs to the temporary directory; they could be packed. */
-
- strbuf_addstr(&temp_dir, git_path("rsync-refs-XXXXXX"));
- if (!mkdtemp(temp_dir.buf))
- die_errno ("Could not make temporary directory");
- strbuf_addch(&temp_dir, '/');
-
- if (flags & TRANSPORT_PUSH_ALL) {
- if (for_each_ref(write_one_ref, &temp_dir))
- return -1;
- } else if (write_refs_to_temp_dir(&temp_dir, refspec_nr, refspec))
- return -1;
-
- i = 2;
- if (flags & TRANSPORT_PUSH_DRY_RUN)
- args[i++] = "--dry-run";
- if (!(flags & TRANSPORT_PUSH_FORCE))
- args[i++] = "--ignore-existing";
- args[i++] = temp_dir.buf;
- args[i++] = rsync_url(transport->url);
- args[i++] = NULL;
- if (run_command(&rsync))
- result = error("Could not push to %s",
- rsync_url(transport->url));
-
- if (remove_dir_recursively(&temp_dir, 0))
- warning ("Could not remove temporary directory %s.",
- temp_dir.buf);
-
- strbuf_release(&buf);
- strbuf_release(&temp_dir);
-
- return result;
-}
-
struct bundle_transport_data {
int fd;
struct bundle_header header;
return 1;
}
-static int connect_setup(struct transport *transport, int for_push, int verbose)
+static int connect_setup(struct transport *transport, int for_push)
{
struct git_transport_data *data = transport->data;
+ int flags = transport->verbose > 0 ? CONNECT_VERBOSE : 0;
if (data->conn)
return 0;
+ switch (transport->family) {
+ case TRANSPORT_FAMILY_ALL: break;
+ case TRANSPORT_FAMILY_IPV4: flags |= CONNECT_IPV4; break;
+ case TRANSPORT_FAMILY_IPV6: flags |= CONNECT_IPV6; break;
+ }
+
data->conn = git_connect(data->fd, transport->url,
for_push ? data->options.receivepack :
data->options.uploadpack,
- verbose ? CONNECT_VERBOSE : 0);
+ flags);
return 0;
}
struct git_transport_data *data = transport->data;
struct ref *refs;
- connect_setup(transport, for_push, 0);
+ connect_setup(transport, for_push);
get_remote_heads(data->fd[0], NULL, 0, &refs,
for_push ? REF_NORMAL : 0,
&data->extra_have,
args.update_shallow = data->options.update_shallow;
if (!data->got_remote_heads) {
- connect_setup(transport, 0, 0);
+ connect_setup(transport, 0);
get_remote_heads(data->fd[0], NULL, 0, &refs_tmp, 0,
NULL, &data->shallow);
data->got_remote_heads = 1;
if (!data->got_remote_heads) {
struct ref *tmp_refs;
- connect_setup(transport, 1, 0);
+ connect_setup(transport, 1);
get_remote_heads(data->fd[0], NULL, 0, &tmp_refs, REF_NORMAL,
NULL, &data->shallow);
if (helper) {
transport_helper_init(ret, helper);
} else if (starts_with(url, "rsync:")) {
- transport_check_allowed("rsync");
- ret->get_refs_list = get_refs_via_rsync;
- ret->fetch = fetch_objs_via_rsync;
- ret->push = rsync_transport_push;
- ret->smart_options = NULL;
+ die("git-over-rsync is no longer supported");
} else if (url_is_local_not_ssh(url) && is_file(url) && is_bundle(url, 1)) {
struct bundle_transport_data *data = xcalloc(1, sizeof(*data));
transport_check_allowed("file");
* This condition shouldn't be met in a non-deepening fetch
* (see builtin/fetch.c:quickfetch()).
*/
- heads = xmalloc(nr_refs * sizeof(*heads));
+ ALLOC_ARRAY(heads, nr_refs);
for (rm = refs; rm; rm = rm->next)
heads[nr_heads++] = rm;
}
*/
char *transport_anonymize_url(const char *url)
{
- char *anon_url, *scheme_prefix, *anon_part;
+ char *scheme_prefix, *anon_part;
size_t anon_len, prefix_len = 0;
anon_part = strchr(url, '@');
goto literal_copy;
prefix_len = scheme_prefix - url + 3;
}
- anon_url = xcalloc(1, 1 + prefix_len + anon_len);
- memcpy(anon_url, url, prefix_len);
- memcpy(anon_url + prefix_len, anon_part, anon_len);
- return anon_url;
+ return xstrfmt("%.*s%.*s", (int)prefix_len, url,
+ (int)anon_len, anon_part);
literal_copy:
return xstrdup(url);
}
struct push_cas_option *cas;
};
+enum transport_family {
+ TRANSPORT_FAMILY_ALL = 0,
+ TRANSPORT_FAMILY_IPV4,
+ TRANSPORT_FAMILY_IPV6
+};
+
struct transport {
struct remote *remote;
const char *url;
* actually turns out to be smart.
*/
struct git_transport_options *smart_options;
+
+ enum transport_family family;
};
#define TRANSPORT_PUSH_ALL 1
unsigned mode, const unsigned char *sha1)
{
struct combine_diff_path *p;
- int len = base->len + pathlen;
- int alloclen = combine_diff_path_size(nparent, len);
+ size_t len = st_add(base->len, pathlen);
+ size_t alloclen = combine_diff_path_size(nparent, len);
/* if last->next is !NULL - it is a pre-allocated memory, we can reuse */
p = last->next;
struct tree_desc_x *tx = xcalloc(n, sizeof(*tx));
struct strbuf base = STRBUF_INIT;
int interesting = 1;
+ char *traverse_path;
for (i = 0; i < n; i++)
tx[i].d = t[i];
make_traverse_path(base.buf, info->prev, &info->name);
base.buf[info->pathlen-1] = '/';
strbuf_setlen(&base, info->pathlen);
+ traverse_path = xstrndup(base.buf, info->pathlen);
+ } else {
+ traverse_path = xstrndup(info->name.path, info->pathlen);
}
+ info->traverse_path = traverse_path;
for (;;) {
int trees_used;
unsigned long mask, dirmask;
for (i = 0; i < n; i++)
free_extended_entry(tx + i);
free(tx);
+ free(traverse_path);
+ info->traverse_path = NULL;
strbuf_release(&base);
return error;
}
enum follow_symlinks_result get_tree_entry_follow_symlinks(unsigned char *tree_sha1, const char *name, unsigned char *result, struct strbuf *result_path, unsigned *mode);
struct traverse_info {
+ const char *traverse_path;
struct traverse_info *prev;
struct name_entry name;
int pathlen;
#!/bin/sh
-echo >&2 "fatal: git was built without support for `basename $0` (@@REASON@@)."
+echo >&2 "fatal: git was built without support for $(basename $0) (@@REASON@@)."
exit 128
* itself - the caller needs to do the final check for the cache
* entry having more data at the end!
*/
-static int do_compare_entry(const struct cache_entry *ce, const struct traverse_info *info, const struct name_entry *n)
+static int do_compare_entry_piecewise(const struct cache_entry *ce, const struct traverse_info *info, const struct name_entry *n)
{
int len, pathlen, ce_len;
const char *ce_name;
if (info->prev) {
- int cmp = do_compare_entry(ce, info->prev, &info->name);
+ int cmp = do_compare_entry_piecewise(ce, info->prev,
+ &info->name);
if (cmp)
return cmp;
}
return df_name_compare(ce_name, ce_len, S_IFREG, n->path, len, n->mode);
}
+static int do_compare_entry(const struct cache_entry *ce,
+ const struct traverse_info *info,
+ const struct name_entry *n)
+{
+ int len, pathlen, ce_len;
+ const char *ce_name;
+ int cmp;
+
+ /*
+ * If we have not precomputed the traverse path, it is quicker
+ * to avoid doing so. But if we have precomputed it,
+ * it is quicker to use the precomputed version.
+ */
+ if (!info->traverse_path)
+ return do_compare_entry_piecewise(ce, info, n);
+
+ cmp = strncmp(ce->name, info->traverse_path, info->pathlen);
+ if (cmp)
+ return cmp;
+
+ pathlen = info->pathlen;
+ ce_len = ce_namelen(ce);
+
+ if (ce_len < pathlen)
+ return -1;
+
+ ce_len -= pathlen;
+ ce_name = ce->name + pathlen;
+
+ len = tree_entry_len(n);
+ return df_name_compare(ce_name, ce_len, S_IFREG, n->path, len, n->mode);
+}
+
static int compare_entry(const struct cache_entry *ce, const struct traverse_info *info, const struct name_entry *n)
{
int cmp = do_compare_entry(ce, info, n);
++o->cache_bottom;
continue;
}
- if (!ce_in_traverse_path(ce, info))
+ if (!ce_in_traverse_path(ce, info)) {
+ /*
+ * Check if we can skip future cache checks
+ * (because we're already past all possible
+ * entries in the traverse path).
+ */
+ if (info->traverse_path) {
+ if (strncmp(ce->name, info->traverse_path,
+ info->pathlen) > 0)
+ break;
+ }
continue;
+ }
ce_name = ce->name + pfxlen;
ce_slash = strchr(ce_name, '/');
if (ce_slash)
struct userdiff_driver *userdiff_find_by_name(const char *name);
struct userdiff_driver *userdiff_find_by_path(const char *path);
+/*
+ * Initialize any textconv-related fields in the driver and return it, or NULL
+ * if it does not have textconv enabled at all.
+ */
struct userdiff_driver *userdiff_get_textconv(struct userdiff_driver *driver);
#endif /* USERDIFF */
char *rf_one = NULL;
char *tg_one;
- if (strbuf_getline(&buf, stdin, '\n') == EOF)
+ if (strbuf_getline_lf(&buf, stdin) == EOF)
break;
tg_one = buf.buf;
rf_one = strchr(tg_one, '\t');
{
void *ret;
+ if (unsigned_mult_overflows(nmemb, size))
+ die("data too large to fit into virtual memory space");
+
memory_limit_check(size * nmemb, 0);
ret = calloc(nmemb, size);
if (!ret && (!nmemb || !size))
len = MAX_IO_SIZE;
while (1) {
nr = read(fd, buf, len);
- if ((nr < 0) && (errno == EAGAIN || errno == EINTR))
- continue;
+ if (nr < 0) {
+ if (errno == EINTR)
+ continue;
+ if (errno == EAGAIN || errno == EWOULDBLOCK) {
+ struct pollfd pfd;
+ pfd.events = POLLIN;
+ pfd.fd = fd;
+ /*
+ * it is OK if this poll() failed; we
+ * want to leave this infinite loop
+ * only when read() returns with
+ * success, or an expected failure,
+ * which would be checked by the next
+ * call to read(2).
+ */
+ poll(&pfd, 1, -1);
+ }
+ }
return nr;
}
}
#include "cache.h"
+#include "run-command.h"
static void check_pipe(int err)
{
if (err == EPIPE) {
+ if (in_async())
+ async_exit(141);
+
signal(SIGPIPE, SIG_DFL);
raise(SIGPIPE);
/* Should never happen, but just in case... */
strbuf_release(&buf);
return NULL;
}
- strbuf_getline(&buf, fp, '\n');
+ strbuf_getline_lf(&buf, fp);
if (!fclose(fp)) {
return strbuf_detach(&buf, NULL);
} else {
if (!f)
die_errno("Could not open file %s for reading",
git_path("%s", fname));
- while (!strbuf_getline(&line, f, '\n')) {
+ while (!strbuf_getline_lf(&line, f)) {
if (line.len && line.buf[0] == comment_line_char)
continue;
strbuf_trim(&line);
for (i = 0, regs->nr = 1; value[i]; i++)
if (value[i] == '\n')
regs->nr++;
- regs->array = xmalloc(regs->nr * sizeof(struct ff_reg));
+ ALLOC_ARRAY(regs->array, regs->nr);
for (i = 0; i < regs->nr; i++) {
struct ff_reg *reg = regs->array + i;
const char *ep = strchr(value, '\n'), *expression;
#define XDF_IGNORE_BLANK_LINES (1 << 7)
#define XDL_EMIT_FUNCNAMES (1 << 0)
-#define XDL_EMIT_COMMON (1 << 1)
#define XDL_EMIT_FUNCCONTEXT (1 << 2)
#define XDL_MMB_READONLY (1 << 0)
return -1;
}
-static int xdl_emit_common(xdfenv_t *xe, xdchange_t *xscr, xdemitcb_t *ecb,
- xdemitconf_t const *xecfg) {
- xdfile_t *xdf = &xe->xdf2;
- const char *rchg = xdf->rchg;
- long ix;
-
- for (ix = 0; ix < xdf->nrec; ix++) {
- if (rchg[ix])
- continue;
- if (xdl_emit_record(xdf, ix, "", ecb))
- return -1;
- }
- return 0;
-}
-
struct func_line {
long len;
char buf[80];
long funclineprev = -1;
struct func_line func_line = { 0 };
- if (xecfg->flags & XDL_EMIT_COMMON)
- return xdl_emit_common(xe, xscr, ecb, xecfg);
-
for (xch = xscr; xch; xch = xche->next) {
xche = xdl_get_hunk(&xch, xecfg);
if (!xch)
result->ptr = NULL;
result->size = 0;
- if (xdl_do_diff(orig, mf1, xpp, &xe1) < 0 ||
- xdl_do_diff(orig, mf2, xpp, &xe2) < 0) {
+ if (xdl_do_diff(orig, mf1, xpp, &xe1) < 0) {
+ return -1;
+ }
+ if (xdl_do_diff(orig, mf2, xpp, &xe2) < 0) {
+ xdl_free_env(&xe1);
return -1;
}
if (xdl_change_compact(&xe1.xdf1, &xe1.xdf2, xpp->flags) < 0 ||
if (xdl_change_compact(&xe2.xdf1, &xe2.xdf2, xpp->flags) < 0 ||
xdl_change_compact(&xe2.xdf2, &xe2.xdf1, xpp->flags) < 0 ||
xdl_build_script(&xe2, &xscr2) < 0) {
+ xdl_free_script(xscr1);
+ xdl_free_env(&xe1);
xdl_free_env(&xe2);
return -1;
}