Code clean-up.
* rs/resolve-ref-optional-result:
refs: pass NULL to resolve_refdup() if hash is not needed
refs: pass NULL to refs_resolve_refdup() if hash is not needed
--- /dev/null
+# Defaults
+
+# Use tabs whenever we need to fill whitespace that spans at least from one tab
+# stop to the next one.
+UseTab: Always
+TabWidth: 8
+IndentWidth: 8
+ContinuationIndentWidth: 8
+ColumnLimit: 80
+
+# C Language specifics
+Language: Cpp
+
+# Align parameters on the open bracket
+# someLongFunction(argument1,
+# argument2);
+AlignAfterOpenBracket: Align
+
+# Don't align consecutive assignments
+# int aaaa = 12;
+# int b = 14;
+AlignConsecutiveAssignments: false
+
+# Don't align consecutive declarations
+# int aaaa = 12;
+# double b = 3.14;
+AlignConsecutiveDeclarations: false
+
+# Align escaped newlines as far left as possible
+# #define A \
+# int aaaa; \
+# int b; \
+# int cccccccc;
+AlignEscapedNewlines: Left
+
+# Align operands of binary and ternary expressions
+# int aaa = bbbbbbbbbbb +
+# cccccc;
+AlignOperands: true
+
+# Don't align trailing comments
+# int a; // Comment a
+# int b = 2; // Comment b
+AlignTrailingComments: false
+
+# By default don't allow putting parameters onto the next line
+# myFunction(foo, bar, baz);
+AllowAllParametersOfDeclarationOnNextLine: false
+
+# Don't allow short braced statements to be on a single line
+# if (a) not if (a) return;
+# return;
+AllowShortBlocksOnASingleLine: false
+AllowShortCaseLabelsOnASingleLine: false
+AllowShortFunctionsOnASingleLine: false
+AllowShortIfStatementsOnASingleLine: false
+AllowShortLoopsOnASingleLine: false
+
+# By default don't add a line break after the return type of top-level functions
+# int foo();
+AlwaysBreakAfterReturnType: None
+
+# Pack as many parameters or arguments onto the same line as possible
+# int myFunction(int aaaaaaaaaaaa, int bbbbbbbb,
+# int cccc);
+BinPackArguments: true
+BinPackParameters: true
+
+# Attach braces to surrounding context except break before braces on function
+# definitions.
+# void foo()
+# {
+# if (true) {
+# } else {
+# }
+# };
+BreakBeforeBraces: Linux
+
+# Break after operators
+# int valuve = aaaaaaaaaaaaa +
+# bbbbbb -
+# ccccccccccc;
+BreakBeforeBinaryOperators: None
+BreakBeforeTernaryOperators: false
+
+# Don't break string literals
+BreakStringLiterals: false
+
+# Use the same indentation level as for the switch statement.
+# Switch statement body is always indented one level more than case labels.
+IndentCaseLabels: false
+
+# Don't indent a function definition or declaration if it is wrapped after the
+# type
+IndentWrappedFunctionNames: false
+
+# Align pointer to the right
+# int *a;
+PointerAlignment: Right
+
+# Don't insert a space after a cast
+# x = (int32)y; not x = (int32) y;
+SpaceAfterCStyleCast: false
+
+# Insert spaces before and after assignment operators
+# int a = 5; not int a=5;
+# a += 42; a+=42;
+SpaceBeforeAssignmentOperators: true
+
+# Put a space before opening parentheses only after control statement keywords.
+# void f() {
+# if (true) {
+# f();
+# }
+# }
+SpaceBeforeParens: ControlStatements
+
+# Don't insert spaces inside empty '()'
+SpaceInEmptyParentheses: false
+
+# The number of spaces before trailing line comments (// - comments).
+# This does not affect trailing block comments (/* - comments).
+SpacesBeforeTrailingComments: 1
+
+# Don't insert spaces in casts
+# x = (int32) y; not x = ( int32 ) y;
+SpacesInCStyleCastParentheses: false
+
+# Don't insert spaces inside container literals
+# var arr = [1, 2, 3]; not var arr = [ 1, 2, 3 ];
+SpacesInContainerLiterals: false
+
+# Don't insert spaces after '(' or before ')'
+# f(arg); not f( arg );
+SpacesInParentheses: false
+
+# Don't insert spaces after '[' or before ']'
+# int a[5]; not int a[ 5 ];
+SpacesInSquareBrackets: false
+
+# Insert a space after '{' and before '}' in struct initializers
+Cpp11BracedListStyle: false
+
+# A list of macros that should be interpreted as foreach loops instead of as
+# function calls.
+ForEachMacros: ['for_each_string_list_item']
+
+# The maximum number of consecutive empty lines to keep.
+MaxEmptyLinesToKeep: 1
+
+# No empty line at the start of a block.
+KeepEmptyLinesAtTheStartOfBlocks: false
+
+# Penalties
+# This decides what order things should be done if a line is too long
+PenaltyBreakAssignment: 10
+PenaltyBreakBeforeFirstCallParameter: 30
+PenaltyBreakComment: 10
+PenaltyBreakFirstLessLess: 0
+PenaltyBreakString: 10
+PenaltyExcessCharacter: 100
+PenaltyReturnTypeOnItsOwnLine: 5
+
+# Don't sort #include's
+SortIncludes: false
services:
- docker
before_install:
- - docker pull daald/ubuntu32:xenial
before_script:
- script:
- - >
- docker run
- --interactive
- --env DEVELOPER
- --env DEFAULT_TEST_TARGET
- --env GIT_PROVE_OPTS
- --env GIT_TEST_OPTS
- --env GIT_TEST_CLONE_2GB
- --volume "${PWD}:/usr/src/git"
- daald/ubuntu32:xenial
- /usr/src/git/ci/run-linux32-build.sh $(id -u $USER)
- # Use the following command to debug the docker build locally:
- # $ docker run -itv "${PWD}:/usr/src/git" --entrypoint /bin/bash daald/ubuntu32:xenial
- # root@container:/# /usr/src/git/ci/run-linux32-build.sh
+ script: ci/run-linux32-docker.sh
- env: Static Analysis
os: linux
compiler:
packages:
- coccinelle
before_install:
- script:
- # "before_script" that builds Git is inherited from base job
- - make coccicheck
+ # "before_script" that builds Git is inherited from base job
+ script: ci/run-static-analysis.sh
after_failure:
- env: Documentation
os: linux
- asciidoc
- xmlto
before_install:
- before_script: gem install asciidoctor
+ before_script:
script: ci/test-documentation.sh
after_failure:
-before_install:
- - >
- case "${TRAVIS_OS_NAME:-linux}" in
- linux)
- export GIT_TEST_HTTPD=YesPlease
-
- mkdir --parents custom/p4
- pushd custom/p4
- wget --quiet http://filehost.perforce.com/perforce/r$LINUX_P4_VERSION/bin.linux26x86_64/p4d
- wget --quiet http://filehost.perforce.com/perforce/r$LINUX_P4_VERSION/bin.linux26x86_64/p4
- chmod u+x p4d
- chmod u+x p4
- export PATH="$(pwd):$PATH"
- popd
- mkdir --parents custom/git-lfs
- pushd custom/git-lfs
- wget --quiet https://github.com/github/git-lfs/releases/download/v$LINUX_GIT_LFS_VERSION/git-lfs-linux-amd64-$LINUX_GIT_LFS_VERSION.tar.gz
- tar --extract --gunzip --file "git-lfs-linux-amd64-$LINUX_GIT_LFS_VERSION.tar.gz"
- cp git-lfs-$LINUX_GIT_LFS_VERSION/git-lfs .
- export PATH="$(pwd):$PATH"
- popd
- ;;
- osx)
- brew update --quiet
- # Uncomment this if you want to run perf tests:
- # brew install gnu-time
- brew install git-lfs gettext
- brew link --force gettext
- brew install caskroom/cask/perforce
- ;;
- esac;
- echo "$(tput setaf 6)Perforce Server Version$(tput sgr0)";
- p4d -V | grep Rev.;
- echo "$(tput setaf 6)Perforce Client Version$(tput sgr0)";
- p4 -V | grep Rev.;
- echo "$(tput setaf 6)Git-LFS Version$(tput sgr0)";
- git-lfs version;
-
-before_script: make --jobs=2
-
-script:
- - >
- mkdir -p $HOME/travis-cache;
- ln -s $HOME/travis-cache/.prove t/.prove;
- make --quiet test;
-
-after_failure:
- - >
- : '<-- Click here to see detailed test output! ';
- for TEST_EXIT in t/test-results/*.exit;
- do
- if [ "$(cat "$TEST_EXIT")" != "0" ];
- then
- TEST_OUT="${TEST_EXIT%exit}out";
- echo "------------------------------------------------------------------------";
- echo "$(tput setaf 1)${TEST_OUT}...$(tput sgr0)";
- echo "------------------------------------------------------------------------";
- cat "${TEST_OUT}";
- fi;
- done;
+before_install: ci/install-dependencies.sh
+before_script: ci/run-build.sh
+script: ci/run-tests.sh
+after_failure: ci/print-test-failures.sh
notifications:
email: false
--- /dev/null
+Git v2.10.5 Release Notes
+=========================
+
+Fixes since v2.10.4
+-------------------
+
+ * "git cvsserver" no longer is invoked by "git daemon" by default,
+ as it is old and largely unmaintained.
+
+ * Various Perl scripts did not use safe_pipe_capture() instead of
+ backticks, leaving them susceptible to end-user input. They have
+ been corrected.
+
+Credits go to joernchen <joernchen@phenoelit.de> for finding the
+unsafe constructs in "git cvsserver", and to Jeff King at GitHub for
+finding and fixing instances of the same issue in other scripts.
+
--- /dev/null
+Git v2.11.4 Release Notes
+=========================
+
+Fixes since v2.11.3
+-------------------
+
+ * "git cvsserver" no longer is invoked by "git daemon" by default,
+ as it is old and largely unmaintained.
+
+ * Various Perl scripts did not use safe_pipe_capture() instead of
+ backticks, leaving them susceptible to end-user input. They have
+ been corrected.
+
+Credits go to joernchen <joernchen@phenoelit.de> for finding the
+unsafe constructs in "git cvsserver", and to Jeff King at GitHub for
+finding and fixing instances of the same issue in other scripts.
+
--- /dev/null
+Git v2.12.5 Release Notes
+=========================
+
+Fixes since v2.12.4
+-------------------
+
+ * "git cvsserver" no longer is invoked by "git daemon" by default,
+ as it is old and largely unmaintained.
+
+ * Various Perl scripts did not use safe_pipe_capture() instead of
+ backticks, leaving them susceptible to end-user input. They have
+ been corrected.
+
+Credits go to joernchen <joernchen@phenoelit.de> for finding the
+unsafe constructs in "git cvsserver", and to Jeff King at GitHub for
+finding and fixing instances of the same issue in other scripts.
+
--- /dev/null
+Git v2.13.6 Release Notes
+=========================
+
+Fixes since v2.13.5
+-------------------
+
+ * "git cvsserver" no longer is invoked by "git daemon" by default,
+ as it is old and largely unmaintained.
+
+ * Various Perl scripts did not use safe_pipe_capture() instead of
+ backticks, leaving them susceptible to end-user input. They have
+ been corrected.
+
+Credits go to joernchen <joernchen@phenoelit.de> for finding the
+unsafe constructs in "git cvsserver", and to Jeff King at GitHub for
+finding and fixing instances of the same issue in other scripts.
+
* "git archive" did not work well with pathspecs and the
export-ignore attribute.
+ * "git cvsserver" no longer is invoked by "git daemon" by default,
+ as it is old and largely unmaintained.
+
+ * Various Perl scripts did not use safe_pipe_capture() instead of
+ backticks, leaving them susceptible to end-user input. They have
+ been corrected.
+
Also contains various documentation updates and code clean-ups.
+
+Credits go to joernchen <joernchen@phenoelit.de> for finding the
+unsafe constructs in "git cvsserver", and to Jeff King at GitHub for
+finding and fixing instances of the same issue in other scripts.
* The codepath to call external process filter for smudge/clean
operation learned to show the progress meter.
+ * "git rev-parse" learned "--is-shallow-repository", that is to be
+ used in a way similar to existing "--is-bare-repository" and
+ friends.
+
+ * "git describe --match <pattern>" has been taught to play well with
+ the "--all" option.
+
+ * "git branch" learned "-c/-C" to create a new branch by copying an
+ existing one.
+
+ * Some commands (most notably "git status") makes an opportunistic
+ update when performing a read-only operation to help optimize later
+ operations in the same repository. The new "--no-optional-locks"
+ option can be passed to Git to disable them.
+
Performance, Internal Implementation, Development Support etc.
* Many leaks of strbuf have been fixed.
+ * "git imap-send" has our own implementation of the protocol and also
+ can use more recent libCurl with the imap protocol support. Update
+ the latter so that it can use the credential subsystem, and then
+ make it the default option to use, so that we can eventually
+ deprecate and remove the former.
+
+ * "make style" runs git-clang-format to help developers by pointing
+ out coding style issues.
+
+ * A test to demonstrate "git mv" failing to adjust nested submodules
+ has been added.
+ (merge c514167df2 hv/mv-nested-submodules-test later to maint).
+
+ * On Cygwin, "ulimit -s" does not report failure but it does not work
+ at all, which causes an unexpected success of some tests that
+ expect failures under a limited stack situation. This has been
+ fixed.
+
+ * Many codepaths have been updated to squelch -Wimplicit-fallthrough
+ warnings from Gcc 7 (which is a good code hygiene).
+
+ * Add a helper for DLL loading in anticipation for its need in a
+ future topic RSN.
+
+ * "git status --ignored", when noticing that a directory without any
+ tracked path is ignored, still enumerated all the ignored paths in
+ the directory, which is unnecessary. The codepath has been
+ optimized to avoid this overhead.
+
+ * The final batch to "git rebase -i" updates to move more code from
+ the shell script to C has been merged.
+
+ * Operations that do not touch (majority of) packed refs have been
+ optimized by making accesses to packed-refs file lazy; we no longer
+ pre-parse everything, and an access to a single ref in the
+ packed-refs does not touch majority of irrelevant refs, either.
Also contains various documentation updates and code clean-ups.
to match the behaviour of the former.
(merge c818e74332 rk/commit-tree-make-F-verbatim later to maint).
+ * Many codepaths did not diagnose write failures correctly when disks
+ go full, due to their misuse of write_in_full() helper function,
+ which have been corrected.
+ (merge f48ecd38cb jk/write-in-full-fix later to maint).
+
+ * "git help co" now says "co is aliased to ...", not "git co is".
+ (merge b3a8076e0d ks/help-alias-label later to maint).
+
+ * "git archive", especially when used with pathspec, stored an empty
+ directory in its output, even though Git itself never does so.
+ This has been fixed.
+ (merge 4318094047 rs/archive-excluded-directory later to maint).
+
+ * API error-proofing which happens to also squelch warnings from GCC.
+ (merge c788c54cde tg/refs-allowed-flags later to maint).
+
+ * The explanation of the cut-line in the commit log editor has been
+ slightly tweaked.
+ (merge 8c4b1a3593 ks/commit-do-not-touch-cut-line later to maint).
+
+ * "git gc" tries to avoid running two instances at the same time by
+ reading and writing pid/host from and to a lock file; it used to
+ use an incorrect fscanf() format when reading, which has been
+ corrected.
+ (merge afe2fab72c aw/gc-lockfile-fscanf-fix later to maint).
+
+ * The scripts to drive TravisCI has been reorganized and then an
+ optimization to avoid spending cycles on a branch whose tip is
+ tagged has been implemented.
+ (merge 8376eb4a8f ls/travis-scriptify later to maint).
+
+ * The test linter has been taught that we do not like "echo -e".
+ (merge 1a6d46895d tb/test-lint-echo-e later to maint).
+
+ * Code cmp.std.c nitpick.
+ (merge ac7da78ede mh/for-each-string-list-item-empty-fix later to maint).
+
+ * A regression fix for 2.11 that made the code to read the list of
+ alternate object stores overrun the end of the string.
+ (merge f0f7bebef7 jk/info-alternates-fix later to maint).
+
+ * "git describe --match" learned to take multiple patterns in v2.13
+ series, but the feature ignored the patterns after the first one
+ and did not work at all. This has been fixed.
+ (merge da769d2986 jk/describe-omit-some-refs later to maint).
+
+ * "git filter-branch" cannot reproduce a history with a tag without
+ the tagger field, which only ancient versions of Git allowed to be
+ created. This has been corrected.
+ (merge b2c1ca6b4b ic/fix-filter-branch-to-handle-tag-without-tagger later to maint).
+
+ * "git cat-file --textconv" started segfaulting recently, which
+ has been corrected.
+ (merge cc0ea7c9e5 jk/diff-blob later to maint).
+
+ * The built-in pattern to detect the "function header" for HTML did
+ not match <H1>..<H6> elements without any attributes, which has
+ been fixed.
+ (merge 9c03caca2c ik/userdiff-html-h-element-fix later to maint).
+
+ * "git mailinfo" was loose in decoding quoted printable and produced
+ garbage when the two letters after the equal sign are not
+ hexadecimal. This has been fixed.
+ (merge c8cf423eab rs/mailinfo-qp-decode-fix later to maint).
+
+ * The machinery to create xdelta used in pack files received the
+ sizes of the data in size_t, but lost the higher bits of them by
+ storing them in "unsigned int" during the computation, which is
+ fixed.
+
+ * The delta format used in the packfile cannot reference data at
+ offset larger than what can be expressed in 4-byte, but the
+ generator for the data failed to make sure the offset does not
+ overflow. This has been corrected.
+
+ * The documentation for '-X<option>' for merges was misleadingly
+ written to suggest that "-s theirs" exists, which is not the case.
+ (merge c25d98b2a7 jc/merge-x-theirs-docfix later to maint).
+
+ * "git fast-export" with -M/-C option issued "copy" instruction on a
+ path that is simultaneously modified, which was incorrect.
+ (merge b3e8ca89cf jt/fast-export-copy-modify-fix later to maint).
+
+ * Many codepaths have been updated to squelch -Wsign-compare
+ warnings.
+ (merge 071bcaab64 rj/no-sign-compare later to maint).
+
+ * Memory leaks in various codepaths have been plugged.
+ (merge 4d01a7fa65 ma/leakplugs later to maint).
+
+ * Recent versions of "git rev-parse --parseopt" did not parse the
+ option specification that does not have the optional flags (*=?!)
+ correctly, which has been corrected.
+ (merge a6304fa4c2 bc/rev-parse-parseopt-fix later to maint).
+
* Other minor doc, test and build updates and code cleanups.
(merge f094b89a4d ma/parse-maybe-bool later to maint).
(merge 39b00fa4d4 jk/drop-sha1-entry-pos later to maint).
(merge 276d0e35c0 ma/split-symref-update-fix later to maint).
(merge 3bc4b8f7c7 bb/doc-eol-dirty later to maint).
(merge c1bb33c99c jk/system-path-cleanup later to maint).
+ (merge ab46e6fc72 cc/subprocess-handshake-missing-capabilities later to maint).
+ (merge f7a32dd97f kd/doc-for-each-ref later to maint).
+ (merge be94568bc7 ez/doc-duplicated-words-fix later to maint).
+ (merge 01e4be6c3d ks/test-readme-phrasofix later to maint).
+ (merge 217bb56d4f hn/typofix later to maint).
+ (merge c08fd6388c jk/doc-read-tree-table-asciidoctor-fix later to maint).
+ (merge c3342b362e ks/doc-use-camelcase-for-config-name later to maint).
+ (merge 0bca165fdb jk/validate-headref-fix later to maint).
+ (merge 93dbefb389 mr/doc-negative-pathspec later to maint).
+ (merge 5e633326e4 ad/doc-markup-fix later to maint).
See linkgit:git-submodule[1] and linkgit:gitmodules[5] for details.
submodule.<name>.update::
- The default update procedure for a submodule. This variable
- is populated by `git submodule init` from the
- linkgit:gitmodules[5] file. See description of 'update'
- command in linkgit:git-submodule[1].
+ The method by which a submodule is updated by 'git submodule update',
+ which is the only affected command, others such as
+ 'git checkout --recurse-submodules' are unaffected. It exists for
+ historical reasons, when 'git submodule' was the only command to
+ interact with submodules; settings like `submodule.active`
+ and `pull.rebase` are more specific. It is populated by
+ `git submodule init` from the linkgit:gitmodules[5] file.
+ See description of 'update' command in linkgit:git-submodule[1].
submodule.<name>.branch::
The remote branch name for a submodule, used by `git submodule
the working tree. Note that older versions of Git used
to ignore removed files; use `--no-all` option if you want
to add modified or new files but ignore removed ones.
++
+For more details about the <pathspec> syntax, see the 'pathspec' entry
+in linkgit:gitglossary[7].
-n::
--dry-run::
'git branch' (--set-upstream-to=<upstream> | -u <upstream>) [<branchname>]
'git branch' --unset-upstream [<branchname>]
'git branch' (-m | -M) [<oldbranch>] <newbranch>
+'git branch' (-c | -C) [<oldbranch>] <newbranch>
'git branch' (-d | -D) [-r] <branchname>...
'git branch' --edit-description [<branchname>]
renaming. If <newbranch> exists, -M must be used to force the rename
to happen.
+The `-c` and `-C` options have the exact same semantics as `-m` and
+`-M`, except instead of the branch being renamed it along with its
+config and reflog will be copied to a new name.
+
With a `-d` or `-D` option, `<branchname>` will be deleted. You may
specify more than one branch for deletion. If the branch currently
has a reflog then the reflog will also be deleted.
all changes made to the branch ref, enabling use of date
based sha1 expressions such as "<branchname>@\{yesterday}".
Note that in non-bare repositories, reflogs are usually
- enabled by default by the `core.logallrefupdates` config option.
+ enabled by default by the `core.logAllRefUpdates` config option.
The negated form `--no-create-reflog` only overrides an earlier
`--create-reflog`, but currently does not negate the setting of
- `core.logallrefupdates`.
+ `core.logAllRefUpdates`.
-f::
--force::
In combination with `-d` (or `--delete`), allow deleting the
branch irrespective of its merged status. In combination with
`-m` (or `--move`), allow renaming the branch even if the new
- branch name already exists.
+ branch name already exists, the same applies for `-c` (or `--copy`).
-m::
--move::
-M::
Shortcut for `--move --force`.
+-c::
+--copy::
+ Copy a branch and the corresponding reflog.
+
+-C::
+ Shortcut for `--copy --force`.
+
--color[=<when>]::
Color branches to highlight current, local, and
remote-tracking branches.
The 40-hex object name of the object.
`objecttype`::
- The type of of the object (the same as `cat-file -t` reports).
+ The type of the object (the same as `cat-file -t` reports).
`objectsize`::
The size, in bytes, of the object (the same as `cat-file -s`
------------
+
You could omit <branch>, in which case the command degenerates to
-"check out the current branch", which is a glorified no-op with a
+"check out the current branch", which is a glorified no-op with
rather expensive side-effects to show only the tracking information,
if exists, for the current branch.
--match <pattern>::
Only consider tags matching the given `glob(7)` pattern,
- excluding the "refs/tags/" prefix. This can be used to avoid
- leaking private tags from the repository. If given multiple times, a
- list of patterns will be accumulated, and tags matching any of the
- patterns will be considered. Use `--no-match` to clear and reset the
- list of patterns.
+ excluding the "refs/tags/" prefix. If used with `--all`, it also
+ considers local branches and remote-tracking references matching the
+ pattern, excluding respectively "refs/heads/" and "refs/remotes/"
+ prefix; references of other types are never considered. If given
+ multiple times, a list of patterns will be accumulated, and tags
+ matching any of the patterns will be considered. Use `--no-match` to
+ clear and reset the list of patterns.
--exclude <pattern>::
Do not consider tags matching the given `glob(7)` pattern, excluding
- the "refs/tags/" prefix. This can be used to narrow the tag space and
- find only tags matching some meaningful criteria. If given multiple
- times, a list of patterns will be accumulated and tags matching any
- of the patterns will be excluded. When combined with --match a tag will
- be considered when it matches at least one --match pattern and does not
+ the "refs/tags/" prefix. If used with `--all`, it also does not consider
+ local branches and remote-tracking references matching the pattern,
+ excluding respectively "refs/heads/" and "refs/remotes/" prefix;
+ references of other types are never considered. If given multiple times,
+ a list of patterns will be accumulated and tags matching any of the
+ patterns will be excluded. When combined with --match a tag will be
+ considered when it matches at least one --match pattern and does not
match any of the --exclude patterns. Use `--no-exclude` to clear and
reset the list of patterns.
[--commit-filter <command>] [--tag-name-filter <command>]
[--subdirectory-filter <directory>] [--prune-empty]
[--original <namespace>] [-d <directory>] [-f | --force]
- [--] [<rev-list options>...]
+ [--state-branch <branch>] [--] [<rev-list options>...]
DESCRIPTION
-----------
directory or when there are already refs starting with
'refs/original/', unless forced.
+--state-branch <branch>::
+ This option will cause the mapping from old to new objects to
+ be loaded from named branch upon startup and saved as a new
+ commit to that branch upon exit, enabling incremental of large
+ trees. If '<branch>' does not exist it will be created.
+
<rev-list options>...::
Arguments for 'git rev-list'. All positive refs included by
these options are rewritten. You may also specify options
[verse]
'git for-each-ref' [--count=<count>] [--shell|--perl|--python|--tcl]
[(--sort=<key>)...] [--format=<format>] [<pattern>...]
- [--points-at <object>] [(--merged | --no-merged) [<object>]]
- [--contains [<object>]] [--no-contains [<object>]]
+ [--points-at=<object>]
+ (--merged[=<object>] | --no-merged[=<object>])
+ [--contains[=<object>]] [--no-contains[=<object>]]
DESCRIPTION
-----------
OPTIONS
-------
-<count>::
+<pattern>...::
+ If one or more patterns are given, only refs are shown that
+ match against at least one pattern, either using fnmatch(3) or
+ literally, in the latter case matching completely or from the
+ beginning up to a slash.
+
+--count=<count>::
By default the command shows all refs that match
`<pattern>`. This option makes it stop after showing
that many refs.
-<key>::
+--sort=<key>::
A field name to sort on. Prefix `-` to sort in
descending order of the value. When unspecified,
`refname` is used. You may use the --sort=<key> option
multiple times, in which case the last key becomes the primary
key.
-<format>::
+--format=<format>::
A string that interpolates `%(fieldname)` from a ref being shown
and the object it points at. If `fieldname`
is prefixed with an asterisk (`*`) and the ref points
`xx`; for example `%00` interpolates to `\0` (NUL),
`%09` to `\t` (TAB) and `%0a` to `\n` (LF).
-<pattern>...::
- If one or more patterns are given, only refs are shown that
- match against at least one pattern, either using fnmatch(3) or
- literally, in the latter case matching completely or from the
- beginning up to a slash.
-
--shell::
--perl::
--python::
the specified host language. This is meant to produce
a scriptlet that can directly be `eval`ed.
---points-at <object>::
+--points-at=<object>::
Only list refs which points at the given object.
---merged [<object>]::
+--merged[=<object>]::
Only list refs whose tips are reachable from the
specified commit (HEAD if not specified),
incompatible with `--no-merged`.
---no-merged [<object>]::
+--no-merged[=<object>]::
Only list refs whose tips are not reachable from the
specified commit (HEAD if not specified),
incompatible with `--merged`.
---contains [<object>]::
+--contains[=<object>]::
Only list refs which contain the specified commit (HEAD if not
specified).
---no-contains [<object>]::
+--no-contains[=<object>]::
Only list refs which don't contain the specified commit (HEAD
if not specified).
<pathspec>...::
If given, limit the search to paths matching at least one pattern.
Both leading paths match and glob(7) patterns are supported.
++
+For more details about the <pathspec> syntax, see the 'pathspec' entry
+in linkgit:gitglossary[7].
Examples
--------
Looks for a line that has `NODE` or `Unexpected` in
files that have lines that match both.
+`git grep solution -- :^Documentation`::
+ Looks for `solution`, excluding files in `Documentation`.
+
GIT
---
Part of the linkgit:git[1] suite
object that does not have notes attached to it.
--stdin::
- Also read the object names to remove notes from from the standard
+ Also read the object names to remove notes from the standard
input (there is no reason you cannot combine this with object
names from the command line).
"clean" means that index and work tree coincide, and "exists"/"nothing"
refer to the presence of a path in the specified commit:
+....
I H M Result
-------------------------------------------------------
0 nothing nothing nothing (does not happen)
19 no no yes exists exists keep index
20 yes yes no exists exists use M
21 no yes no exists exists fail
+....
In all "keep index" cases, the index entry stays as in the
original index file. If the entry is not up to date,
--autosquash::
--no-autosquash::
When the commit log message begins with "squash! ..." (or
- "fixup! ..."), and there is a commit whose title begins with
- the same ..., automatically modify the todo list of rebase -i
- so that the commit marked for squashing comes right after the
- commit to be modified, and change the action of the moved
- commit from `pick` to `squash` (or `fixup`). Ignores subsequent
- "fixup! " or "squash! " after the first, in case you referred to an
- earlier fixup/squash with `git commit --fixup/--squash`.
+ "fixup! ..."), and there is already a commit in the todo list that
+ matches the same `...`, automatically modify the todo list of rebase
+ -i so that the commit marked for squashing comes right after the
+ commit to be modified, and change the action of the moved commit
+ from `pick` to `squash` (or `fixup`). A commit matches the `...` if
+ the commit subject matches, or if the `...` refers to the commit's
+ hash. As a fall-back, partial matches of the commit subject work,
+ too. The recommended way to create fixup/squash commits is by using
+ the `--fixup`/`--squash` options of linkgit:git-commit[1].
+
This option is only valid when the `--interactive` option is used.
+
--is-bare-repository::
When the repository is bare print "true", otherwise "false".
+--is-shallow-repository::
+ When the repository is shallow print "true", otherwise "false".
+
--resolve-git-dir <path>::
Check if <path> is a valid repository or a gitfile that
points at a valid repository, and print the location of the
$ chmod +x $HOME/git-shell-commands/no-interactive-login
----------------
+To enable git-cvsserver access (which should generally have the
+`no-interactive-login` example above as a prerequisite, as creating
+the git-shell-commands directory allows interactive logins):
+
+----------------
+$ cat >$HOME/git-shell-commands/cvs <<\EOF
+if ! test $# = 1 && test "$1" = "server"
+then
+ echo >&2 "git-cvsserver only handles \"server\""
+ exit 1
+fi
+exec git cvsserver server
+EOF
+$ chmod +x $HOME/git-shell-commands/cvs
+----------------
+
SEE ALSO
--------
ssh(1),
without options are equivalent to 'always' and 'never'
respectively.
+<pathspec>...::
+ See the 'pathspec' entry in linkgit:gitglossary[7].
OUTPUT
------
`core.logAllRefUpdates` in linkgit:git-config[1].
The negated form `--no-create-reflog` only overrides an earlier
`--create-reflog`, but currently does not negate the setting of
- `core.logallrefupdates`.
+ `core.logAllRefUpdates`.
<tagname>::
The name of the tag to create, delete, or describe.
+
Version 4 performs a simple pathname compression that reduces index
size by 30%-50% on large repositories, which results in faster load
-time. Version 4 is relatively young (first released in in 1.8.0 in
+time. Version 4 is relatively young (first released in 1.8.0 in
October 2012). Other Git implementations such as JGit and libgit2
may not support it yet.
Note that omitting the `=` in `git -c foo.bar ...` is allowed and sets
`foo.bar` to the boolean true value (just like `[foo]bar` would in a
config file). Including the equals but with an empty value (like `git -c
-foo.bar= ...`) sets `foo.bar` to the empty string which ` git config
+foo.bar= ...`) sets `foo.bar` to the empty string which `git config
--bool` will convert to `false`.
--exec-path[=<path>]::
Add "icase" magic to all pathspec. This is equivalent to setting
the `GIT_ICASE_PATHSPECS` environment variable to `1`.
+--no-optional-locks::
+ Do not perform optional operations that require locks. This is
+ equivalent to setting the `GIT_OPTIONAL_LOCKS` to `0`.
+
GIT COMMANDS
------------
which feed potentially-untrusted URLS to git commands. See
linkgit:git-config[1] for more details.
+`GIT_OPTIONAL_LOCKS`::
+ If set to `0`, Git will complete any requested operation without
+ performing any optional sub-operations that require taking a lock.
+ For example, this will prevent `git status` from refreshing the
+ index as a side effect. This is useful for processes running in
+ the background which do not want to cause lock contention with
+ other operations on the repository. Defaults to `1`.
+
Discussion[[Discussion]]
------------------------
commit-msg
~~~~~~~~~~
-This hook is invoked by 'git commit', and can be bypassed
-with the `--no-verify` option. It takes a single parameter, the
-name of the file that holds the proposed commit log message.
-Exiting with a non-zero status causes the 'git commit' to
-abort.
+This hook is invoked by 'git commit' and 'git merge', and can be
+bypassed with the `--no-verify` option. It takes a single parameter,
+the name of the file that holds the proposed commit log message.
+Exiting with a non-zero status causes the command to abort.
The hook is allowed to edit the message file in place, and can be used
to normalize the message into some project standard format. It
exclude;;
After a path matches any non-exclude pathspec, it will be run
- through all exclude pathspec (magic signature: `!` or its
+ through all exclude pathspecs (magic signature: `!` or its
synonym `^`). If it matches, the path is ignored. When there
is no non-exclude pathspec, the exclusion is applied to the
result set as if invoked without any pathspec.
the other tree did, declaring 'our' history contains all that happened in it.
theirs;;
- This is the opposite of 'ours'.
+ This is the opposite of 'ours'; note that, unlike 'ours', there is
+ no 'theirs' merge stragegy to confuse this merge option with.
patience;;
With this option, 'merge-recursive' spends a little extra time
+++ /dev/null
-string-list API
-===============
-
-The string_list API offers a data structure and functions to handle
-sorted and unsorted string lists. A "sorted" list is one whose
-entries are sorted by string value in `strcmp()` order.
-
-The 'string_list' struct used to be called 'path_list', but was renamed
-because it is not specific to paths.
-
-The caller:
-
-. Allocates and clears a `struct string_list` variable.
-
-. Initializes the members. You might want to set the flag `strdup_strings`
- if the strings should be strdup()ed. For example, this is necessary
- when you add something like git_path("..."), since that function returns
- a static buffer that will change with the next call to git_path().
-+
-If you need something advanced, you can manually malloc() the `items`
-member (you need this if you add things later) and you should set the
-`nr` and `alloc` members in that case, too.
-
-. Adds new items to the list, using `string_list_append`,
- `string_list_append_nodup`, `string_list_insert`,
- `string_list_split`, and/or `string_list_split_in_place`.
-
-. Can check if a string is in the list using `string_list_has_string` or
- `unsorted_string_list_has_string` and get it from the list using
- `string_list_lookup` for sorted lists.
-
-. Can sort an unsorted list using `string_list_sort`.
-
-. Can remove duplicate items from a sorted list using
- `string_list_remove_duplicates`.
-
-. Can remove individual items of an unsorted list using
- `unsorted_string_list_delete_item`.
-
-. Can remove items not matching a criterion from a sorted or unsorted
- list using `filter_string_list`, or remove empty strings using
- `string_list_remove_empty_items`.
-
-. Finally it should free the list using `string_list_clear`.
-
-Example:
-
-----
-struct string_list list = STRING_LIST_INIT_NODUP;
-int i;
-
-string_list_append(&list, "foo");
-string_list_append(&list, "bar");
-for (i = 0; i < list.nr; i++)
- printf("%s\n", list.items[i].string)
-----
-
-NOTE: It is more efficient to build an unsorted list and sort it
-afterwards, instead of building a sorted list (`O(n log n)` instead of
-`O(n^2)`).
-+
-However, if you use the list to check if a certain string was added
-already, you should not do that (using unsorted_string_list_has_string()),
-because the complexity would be quadratic again (but with a worse factor).
-
-Functions
----------
-
-* General ones (works with sorted and unsorted lists as well)
-
-`string_list_init`::
-
- Initialize the members of the string_list, set `strdup_strings`
- member according to the value of the second parameter.
-
-`filter_string_list`::
-
- Apply a function to each item in a list, retaining only the
- items for which the function returns true. If free_util is
- true, call free() on the util members of any items that have
- to be deleted. Preserve the order of the items that are
- retained.
-
-`string_list_remove_empty_items`::
-
- Remove any empty strings from the list. If free_util is true,
- call free() on the util members of any items that have to be
- deleted. Preserve the order of the items that are retained.
-
-`print_string_list`::
-
- Dump a string_list to stdout, useful mainly for debugging purposes. It
- can take an optional header argument and it writes out the
- string-pointer pairs of the string_list, each one in its own line.
-
-`string_list_clear`::
-
- Free a string_list. The `string` pointer of the items will be freed in
- case the `strdup_strings` member of the string_list is set. The second
- parameter controls if the `util` pointer of the items should be freed
- or not.
-
-* Functions for sorted lists only
-
-`string_list_has_string`::
-
- Determine if the string_list has a given string or not.
-
-`string_list_insert`::
-
- Insert a new element to the string_list. The returned pointer can be
- handy if you want to write something to the `util` pointer of the
- string_list_item containing the just added string. If the given
- string already exists the insertion will be skipped and the
- pointer to the existing item returned.
-+
-Since this function uses xrealloc() (which die()s if it fails) if the
-list needs to grow, it is safe not to check the pointer. I.e. you may
-write `string_list_insert(...)->util = ...;`.
-
-`string_list_lookup`::
-
- Look up a given string in the string_list, returning the containing
- string_list_item. If the string is not found, NULL is returned.
-
-`string_list_remove_duplicates`::
-
- Remove all but the first of consecutive entries that have the
- same string value. If free_util is true, call free() on the
- util members of any items that have to be deleted.
-
-* Functions for unsorted lists only
-
-`string_list_append`::
-
- Append a new string to the end of the string_list. If
- `strdup_string` is set, then the string argument is copied;
- otherwise the new `string_list_entry` refers to the input
- string.
-
-`string_list_append_nodup`::
-
- Append a new string to the end of the string_list. The new
- `string_list_entry` always refers to the input string, even if
- `strdup_string` is set. This function can be used to hand
- ownership of a malloc()ed string to a `string_list` that has
- `strdup_string` set.
-
-`string_list_sort`::
-
- Sort the list's entries by string value in `strcmp()` order.
-
-`unsorted_string_list_has_string`::
-
- It's like `string_list_has_string()` but for unsorted lists.
-
-`unsorted_string_list_lookup`::
-
- It's like `string_list_lookup()` but for unsorted lists.
-+
-The above two functions need to look through all items, as opposed to their
-counterpart for sorted lists, which performs a binary search.
-
-`unsorted_string_list_delete_item`::
-
- Remove an item from a string_list. The `string` pointer of the items
- will be freed in case the `strdup_strings` member of the string_list
- is set. The third parameter controls if the `util` pointer of the
- items should be freed or not.
-
-`string_list_split`::
-`string_list_split_in_place`::
-
- Split a string into substrings on a delimiter character and
- append the substrings to a `string_list`. If `maxsplit` is
- non-negative, then split at most `maxsplit` times. Return the
- number of substrings appended to the list.
-+
-`string_list_split` requires a `string_list` that has `strdup_strings`
-set to true; it leaves the input string untouched and makes copies of
-the substrings in newly-allocated memory.
-`string_list_split_in_place` requires a `string_list` that has
-`strdup_strings` set to false; it splits the input string in place,
-overwriting the delimiter characters with NULs and creating new
-string_list_items that point into the original string (the original
-string must therefore not be modified or freed while the `string_list`
-is in use).
-
-
-Data structures
----------------
-
-* `struct string_list_item`
-
-Represents an item of the list. The `string` member is a pointer to the
-string, and you may use the `util` member for any purpose, if you want.
-
-* `struct string_list`
-
-Represents the list itself.
-
-. The array of items are available via the `items` member.
-. The `nr` member contains the number of items stored in the list.
-. The `alloc` member is used to avoid reallocating at every insertion.
- You should not tamper with it.
-. Setting the `strdup_strings` member to 1 will strdup() the strings
- before adding them, see above.
-. The `compare_strings_fn` member is used to specify a custom compare
- function, otherwise `strcmp()` is used as the default function.
#
# Define NO_MMAP if you want to avoid mmap.
#
+# Define MMAP_PREVENTS_DELETE if a file that is currently mmapped cannot be
+# deleted or cannot be replaced using rename().
+#
# Define NO_SYS_POLL_H if you don't have sys/poll.h.
#
# Define NO_POLL if you do not have or don't want to use poll().
COMPAT_OBJS += compat/win32mmap.o
endif
endif
+ifdef MMAP_PREVENTS_DELETE
+ BASIC_CFLAGS += -DMMAP_PREVENTS_DELETE
+endif
ifdef OBJECT_CREATION_USES_RENAMES
COMPAT_CFLAGS += -DOBJECT_CREATION_MODE=1
endif
.PHONY: sparse $(SP_OBJ)
sparse: $(SP_OBJ)
+.PHONY: style
+style:
+ git clang-format --style file --diff --extensions c,h
+
check: common-cmds.h
@if sparse; \
then \
return retval;
}
+/*
+ * Resolve `path` into an absolute, cleaned-up path. The return value
+ * comes from a shared buffer.
+ */
const char *real_path(const char *path)
{
static struct strbuf realpath = STRBUF_INIT;
if (plen && (ws_rule & WS_BLANK_AT_EOF) &&
ws_blank_line(patch + 1, plen, ws_rule))
is_blank_context = 1;
+ /* fallthrough */
case '-':
memcpy(old, patch + 1, plen);
add_line_info(&preimage, old, plen,
old += plen;
if (first == '-')
break;
- /* Fall-through for ' ' */
+ /* fallthrough */
case '+':
/* --no-add does not add new lines */
if (first == '+' && state->no_add)
return check && ATTR_TRUE(check->items[1].value);
}
-static int should_queue_directories(const struct archiver_args *args)
-{
- return args->pathspec.has_wildcard;
-}
-
static int write_archive_entry(const unsigned char *sha1, const char *base,
int baselen, const char *filename, unsigned mode, int stage,
void *context)
strbuf_addch(&path, '/');
path_without_prefix = path.buf + args->baselen;
- if (!S_ISDIR(mode) || !should_queue_directories(args)) {
+ if (!S_ISDIR(mode)) {
const struct attr_check *check;
check = get_archive_attrs(path_without_prefix);
if (check_attr_export_ignore(check))
return write_entry(args, sha1, path.buf, path.len, mode);
}
-static int write_archive_entry_buf(const unsigned char *sha1, struct strbuf *base,
- const char *filename, unsigned mode, int stage,
- void *context)
-{
- return write_archive_entry(sha1, base->buf, base->len,
- filename, mode, stage, context);
-}
-
static void queue_directory(const unsigned char *sha1,
struct strbuf *base, const char *filename,
unsigned mode, int stage, struct archiver_context *c)
}
err = read_tree_recursive(args->tree, "", 0, 0, &args->pathspec,
- should_queue_directories(args) ?
- queue_or_write_archive_entry :
- write_archive_entry_buf,
+ queue_or_write_archive_entry,
&context);
if (err == READ_TREE_RECURSIVE)
err = 0;
/* Clean up objects used, as they will be reused. */
clear_commit_marks_for_object_array(&pending_copy, ALL_REV_FLAGS);
- free(pending_copy.objects);
+
+ object_array_clear(&pending_copy);
return res;
}
N_("git branch [<options>] [-l] [-f] <branch-name> [<start-point>]"),
N_("git branch [<options>] [-r] (-d | -D) <branch-name>..."),
N_("git branch [<options>] (-m | -M) [<old-branch>] <new-branch>"),
+ N_("git branch [<options>] (-c | -C) [<old-branch>] <new-branch>"),
N_("git branch [<options>] [-r | -a] [--points-at]"),
N_("git branch [<options>] [-r | -a] [--format]"),
NULL
free_worktrees(worktrees);
}
-static void rename_branch(const char *oldname, const char *newname, int force)
+static void copy_or_rename_branch(const char *oldname, const char *newname, int copy, int force)
{
struct strbuf oldref = STRBUF_INIT, newref = STRBUF_INIT, logmsg = STRBUF_INIT;
struct strbuf oldsection = STRBUF_INIT, newsection = STRBUF_INIT;
int recovery = 0;
int clobber_head_ok;
- if (!oldname)
- die(_("cannot rename the current branch while not on any."));
+ if (!oldname) {
+ if (copy)
+ die(_("cannot copy the current branch while not on any."));
+ else
+ die(_("cannot rename the current branch while not on any."));
+ }
if (strbuf_check_branch_ref(&oldref, oldname)) {
/*
reject_rebase_or_bisect_branch(oldref.buf);
- strbuf_addf(&logmsg, "Branch: renamed %s to %s",
- oldref.buf, newref.buf);
+ if (copy)
+ strbuf_addf(&logmsg, "Branch: copied %s to %s",
+ oldref.buf, newref.buf);
+ else
+ strbuf_addf(&logmsg, "Branch: renamed %s to %s",
+ oldref.buf, newref.buf);
- if (rename_ref(oldref.buf, newref.buf, logmsg.buf))
+ if (!copy && rename_ref(oldref.buf, newref.buf, logmsg.buf))
die(_("Branch rename failed"));
+ if (copy && copy_existing_ref(oldref.buf, newref.buf, logmsg.buf))
+ die(_("Branch copy failed"));
- if (recovery)
- warning(_("Renamed a misnamed branch '%s' away"), oldref.buf + 11);
+ if (recovery) {
+ if (copy)
+ warning(_("Copied a misnamed branch '%s' away"),
+ oldref.buf + 11);
+ else
+ warning(_("Renamed a misnamed branch '%s' away"),
+ oldref.buf + 11);
+ }
- if (replace_each_worktree_head_symref(oldref.buf, newref.buf, logmsg.buf))
+ if (!copy &&
+ replace_each_worktree_head_symref(oldref.buf, newref.buf, logmsg.buf))
die(_("Branch renamed to %s, but HEAD is not updated!"), newname);
strbuf_release(&logmsg);
strbuf_release(&oldref);
strbuf_addf(&newsection, "branch.%s", newref.buf + 11);
strbuf_release(&newref);
- if (git_config_rename_section(oldsection.buf, newsection.buf) < 0)
+ if (!copy && git_config_rename_section(oldsection.buf, newsection.buf) < 0)
die(_("Branch is renamed, but update of config-file failed"));
+ if (copy && strcmp(oldname, newname) && git_config_copy_section(oldsection.buf, newsection.buf) < 0)
+ die(_("Branch is copied, but update of config-file failed"));
strbuf_release(&oldsection);
strbuf_release(&newsection);
}
int cmd_branch(int argc, const char **argv, const char *prefix)
{
- int delete = 0, rename = 0, force = 0, list = 0;
+ int delete = 0, rename = 0, copy = 0, force = 0, list = 0;
int reflog = 0, edit_description = 0;
int quiet = 0, unset_upstream = 0;
const char *new_upstream = NULL;
OPT_BIT('D', NULL, &delete, N_("delete branch (even if not merged)"), 2),
OPT_BIT('m', "move", &rename, N_("move/rename a branch and its reflog"), 1),
OPT_BIT('M', NULL, &rename, N_("move/rename a branch, even if target exists"), 2),
+ OPT_BIT('c', "copy", ©, N_("copy a branch and its reflog"), 1),
+ OPT_BIT('C', NULL, ©, N_("copy a branch, even if target exists"), 2),
OPT_BOOL(0, "list", &list, N_("list branch names")),
OPT_BOOL('l', "create-reflog", &reflog, N_("create the branch's reflog")),
OPT_BOOL(0, "edit-description", &edit_description,
argc = parse_options(argc, argv, prefix, options, builtin_branch_usage,
0);
- if (!delete && !rename && !edit_description && !new_upstream && !unset_upstream && argc == 0)
+ if (!delete && !rename && !copy && !edit_description && !new_upstream && !unset_upstream && argc == 0)
list = 1;
if (filter.with_commit || filter.merge != REF_FILTER_MERGED_NONE || filter.points_at.nr ||
filter.no_commit)
list = 1;
- if (!!delete + !!rename + !!new_upstream +
+ if (!!delete + !!rename + !!copy + !!new_upstream +
list + unset_upstream > 1)
usage_with_options(builtin_branch_usage, options);
if (force) {
delete *= 2;
rename *= 2;
+ copy *= 2;
}
if (delete) {
if (edit_branch_description(branch_name))
return 1;
+ } else if (copy) {
+ if (!argc)
+ die(_("branch name required"));
+ else if (argc == 1)
+ copy_or_rename_branch(head, argv[0], 1, copy > 1);
+ else if (argc == 2)
+ copy_or_rename_branch(argv[0], argv[1], 1, copy > 1);
+ else
+ die(_("too many branches for a copy operation"));
} else if (rename) {
if (!argc)
die(_("branch name required"));
else if (argc == 1)
- rename_branch(head, argv[0], rename > 1);
+ copy_or_rename_branch(head, argv[0], 0, rename > 1);
else if (argc == 2)
- rename_branch(argv[0], argv[1], rename > 1);
+ copy_or_rename_branch(argv[0], argv[1], 0, rename > 1);
else
die(_("too many branches for a rename operation"));
} else if (new_upstream) {
return !has_object_file(&oid);
case 'w':
- if (!path[0])
+ if (!path)
die("git cat-file --filters %s: <object> must be "
"<sha1:path>", obj_name);
break;
case 'c':
- if (!path[0])
+ if (!path)
die("git cat-file --textconv %s: <object> must be <sha1:path>",
obj_name);
if (textconv_object(path, obj_context.mode, &oid, 1, &buf, &size))
break;
+ /* else fallthrough */
case 'p':
type = sha1_object_info(oid.hash, NULL);
* update paths in the work tree, and we cannot revert
* them.
*/
+ /* fallthrough */
case 0:
return 0;
default:
for_each_ref(add_pending_uninteresting_ref, &revs);
add_pending_oid(&revs, "HEAD", &new->object.oid, UNINTERESTING);
+ /* Save pending objects, so they can be cleaned up later. */
refs = revs.pending;
revs.leak_pending = 1;
+ /*
+ * prepare_revision_walk (together with .leak_pending = 1) makes us
+ * the sole owner of the list of pending objects.
+ */
if (prepare_revision_walk(&revs))
die(_("internal error in revision walk"));
if (!(old->object.flags & UNINTERESTING))
else
describe_detached_head(_("Previous HEAD position was"), old);
+ /* Clean up objects used, as they will be reused. */
clear_commit_marks_for_object_array(&refs, ALL_REV_FLAGS);
- free(refs.objects);
+
+ object_array_clear(&refs);
}
static int switch_branches(const struct checkout_opts *opts,
static const char *prepare_index(int argc, const char **argv, const char *prefix,
const struct commit *current_head, int is_status)
{
- struct string_list partial;
+ struct string_list partial = STRING_LIST_INIT_DUP;
struct pathspec pathspec;
int refresh_flags = REFRESH_QUIET;
const char *ret;
warning(_("Failed to update main cache tree"));
commit_style = COMMIT_NORMAL;
- return get_lock_file_path(&index_lock);
+ ret = get_lock_file_path(&index_lock);
+ goto out;
}
/*
if (write_locked_index(&the_index, &index_lock, CLOSE_LOCK))
die(_("unable to write new_index file"));
commit_style = COMMIT_NORMAL;
- return get_lock_file_path(&index_lock);
+ ret = get_lock_file_path(&index_lock);
+ goto out;
}
/*
rollback_lock_file(&index_lock);
}
commit_style = COMMIT_AS_IS;
- return get_index_file();
+ ret = get_index_file();
+ goto out;
}
/*
die(_("cannot do a partial commit during a cherry-pick."));
}
- string_list_init(&partial, 1);
if (list_paths(&partial, !current_head ? NULL : "HEAD", prefix, &pathspec))
exit(1);
discard_cache();
ret = get_lock_file_path(&false_lock);
read_cache_from(ret);
+out:
+ string_list_clear(&partial, 0);
+ clear_pathspec(&pathspec);
return ret;
}
read_cache_preload(&s.pathspec);
refresh_index(&the_index, REFRESH_QUIET|REFRESH_UNMERGED, &s.pathspec, NULL, NULL);
- fd = hold_locked_index(&index_lock, 0);
+ if (use_optional_locks())
+ fd = hold_locked_index(&index_lock, 0);
+ else
+ fd = -1;
s.is_initial = get_oid(s.reference, &oid) ? 1 : 0;
if (!s.is_initial)
static int get_name(const char *path, const struct object_id *oid, int flag, void *cb_data)
{
- int is_tag = starts_with(path, "refs/tags/");
+ int is_tag = 0;
struct object_id peeled;
int is_annotated, prio;
-
- /* Reject anything outside refs/tags/ unless --all */
- if (!all && !is_tag)
+ const char *path_to_match = NULL;
+
+ if (skip_prefix(path, "refs/tags/", &path_to_match)) {
+ is_tag = 1;
+ } else if (all) {
+ if ((exclude_patterns.nr || patterns.nr) &&
+ !skip_prefix(path, "refs/heads/", &path_to_match) &&
+ !skip_prefix(path, "refs/remotes/", &path_to_match)) {
+ /* Only accept reference of known type if there are match/exclude patterns */
+ return 0;
+ }
+ } else {
+ /* Reject anything outside refs/tags/ unless --all */
return 0;
+ }
/*
* If we're given exclude patterns, first exclude any tag which match
if (exclude_patterns.nr) {
struct string_list_item *item;
- if (!is_tag)
- return 0;
-
for_each_string_list_item(item, &exclude_patterns) {
- if (!wildmatch(item->string, path + 10, 0))
+ if (!wildmatch(item->string, path_to_match, 0))
return 0;
}
}
* pattern.
*/
if (patterns.nr) {
+ int found = 0;
struct string_list_item *item;
- if (!is_tag)
- return 0;
-
for_each_string_list_item(item, &patterns) {
- if (!wildmatch(item->string, path + 10, 0))
+ if (!wildmatch(item->string, path_to_match, 0)) {
+ found = 1;
break;
+ }
+ }
- /* If we get here, no pattern matched. */
+ if (!found)
return 0;
- }
}
/* Is it annotated? */
struct diff_options *options, void *data)
{
int i;
+ struct string_list *changed = data;
/*
* Handle files below a directory first, in case they are all deleted
case DIFF_STATUS_DELETED:
printf("D ");
print_path(spec->path);
+ string_list_insert(changed, spec->path);
putchar('\n');
break;
case DIFF_STATUS_COPIED:
case DIFF_STATUS_RENAMED:
- printf("%c ", q->queue[i]->status);
- print_path(ospec->path);
- putchar(' ');
- print_path(spec->path);
- putchar('\n');
-
- if (!oidcmp(&ospec->oid, &spec->oid) &&
- ospec->mode == spec->mode)
- break;
+ /*
+ * If a change in the file corresponding to ospec->path
+ * has been observed, we cannot trust its contents
+ * because the diff is calculated based on the prior
+ * contents, not the current contents. So, declare a
+ * copy or rename only if there was no change observed.
+ */
+ if (!string_list_has_string(changed, ospec->path)) {
+ printf("%c ", q->queue[i]->status);
+ print_path(ospec->path);
+ putchar(' ');
+ print_path(spec->path);
+ string_list_insert(changed, spec->path);
+ putchar('\n');
+
+ if (!oidcmp(&ospec->oid, &spec->oid) &&
+ ospec->mode == spec->mode)
+ break;
+ }
/* fallthrough */
case DIFF_STATUS_TYPE_CHANGED:
get_object_mark(object));
}
print_path(spec->path);
+ string_list_insert(changed, spec->path);
putchar('\n');
break;
*end = out->buf + out->len;
}
-static void handle_commit(struct commit *commit, struct rev_info *rev)
+static void handle_commit(struct commit *commit, struct rev_info *rev,
+ struct string_list *paths_of_changed_objects)
{
int saved_output_format = rev->diffopt.output_format;
const char *commit_buffer;
if (full_tree)
printf("deleteall\n");
log_tree_diff_flush(rev);
+ string_list_clear(paths_of_changed_objects, 0);
rev->diffopt.output_format = saved_output_format;
printf("\n");
return strbuf_detach(&out, len);
}
-static void handle_tail(struct object_array *commits, struct rev_info *revs)
+static void handle_tail(struct object_array *commits, struct rev_info *revs,
+ struct string_list *paths_of_changed_objects)
{
struct commit *commit;
while (commits->nr) {
- commit = (struct commit *)commits->objects[commits->nr - 1].item;
+ commit = (struct commit *)object_array_pop(commits);
if (has_unshown_parent(commit))
return;
- handle_commit(commit, revs);
- commits->nr--;
+ handle_commit(commit, revs, paths_of_changed_objects);
}
}
char *export_filename = NULL, *import_filename = NULL;
uint32_t lastimportid;
struct string_list refspecs_list = STRING_LIST_INIT_NODUP;
+ struct string_list paths_of_changed_objects = STRING_LIST_INIT_DUP;
struct option options[] = {
OPT_INTEGER(0, "progress", &progress,
N_("show progress after <n> objects")),
if (prepare_revision_walk(&revs))
die("revision walk setup failed");
revs.diffopt.format_callback = show_filemodify;
+ revs.diffopt.format_callback_data = &paths_of_changed_objects;
DIFF_OPT_SET(&revs.diffopt, RECURSIVE);
while ((commit = get_revision(&revs))) {
if (has_unshown_parent(commit)) {
add_object_array(&commit->object, NULL, &commits);
}
else {
- handle_commit(commit, &revs);
- handle_tail(&commits, &revs);
+ handle_commit(commit, &revs, &paths_of_changed_objects);
+ handle_tail(&commits, &revs, &paths_of_changed_objects);
}
}
if (show_progress)
progress = start_delayed_progress(_("Checking connectivity"), 0);
while (pending.nr) {
- struct object_array_entry *entry;
- struct object *obj;
-
- entry = pending.objects + --pending.nr;
- obj = entry->item;
- result |= traverse_one_object(obj);
+ result |= traverse_one_object(object_array_pop(&pending));
display_progress(progress, ++nr);
}
stop_progress(&progress);
int should_exit;
if (!scan_fmt)
- scan_fmt = xstrfmt("%s %%%dc", "%"SCNuMAX, HOST_NAME_MAX);
+ scan_fmt = xstrfmt("%s %%%ds", "%"SCNuMAX, HOST_NAME_MAX);
fp = fopen(pidfile_path, "r");
memset(locking_host, 0, sizeof(locking_host));
should_exit =
usage(builtin_get_tar_commit_id_usage);
n = read_in_full(0, buffer, HEADERSIZE);
- if (n < HEADERSIZE)
- die("git get-tar-commit-id: read error");
+ if (n < 0)
+ die_errno("git get-tar-commit-id: read error");
+ if (n != HEADERSIZE)
+ die_errno("git get-tar-commit-id: EOF before reading tar header");
if (header->typeflag[0] != 'g')
return 1;
if (!skip_prefix(content, "52 comment=", &comment))
return 1;
- n = write_in_full(1, comment, 41);
- if (n < 41)
+ if (write_in_full(1, comment, 41) < 0)
die_errno("git get-tar-commit-id: write error");
return 0;
alias = alias_lookup(cmd);
if (alias) {
- printf_ln(_("`git %s' is aliased to `%s'"), cmd, alias);
+ printf_ln(_("'%s' is aliased to '%s'"), cmd, alias);
free(alias);
exit(0);
}
return want;
}
- for (entry = packed_git_mru->head; entry; entry = entry->next) {
+ for (entry = packed_git_mru.head; entry; entry = entry->next) {
struct packed_git *p = entry->item;
off_t offset;
}
want = want_found_object(exclude, p);
if (!exclude && want > 0)
- mru_mark(packed_git_mru, entry);
+ mru_mark(&packed_git_mru, entry);
if (want != -1)
return want;
}
};
struct in_pack {
- int alloc;
- int nr;
+ unsigned int alloc;
+ unsigned int nr;
struct in_pack_object *array;
};
int cmd_rebase__helper(int argc, const char **argv, const char *prefix)
{
struct replay_opts opts = REPLAY_OPTS_INIT;
+ int keep_empty = 0;
enum {
- CONTINUE = 1, ABORT
+ CONTINUE = 1, ABORT, MAKE_SCRIPT, SHORTEN_SHA1S, EXPAND_SHA1S,
+ CHECK_TODO_LIST, SKIP_UNNECESSARY_PICKS, REARRANGE_SQUASH
} command = 0;
struct option options[] = {
OPT_BOOL(0, "ff", &opts.allow_ff, N_("allow fast-forward")),
+ OPT_BOOL(0, "keep-empty", &keep_empty, N_("keep empty commits")),
OPT_CMDMODE(0, "continue", &command, N_("continue rebase"),
CONTINUE),
OPT_CMDMODE(0, "abort", &command, N_("abort rebase"),
ABORT),
+ OPT_CMDMODE(0, "make-script", &command,
+ N_("make rebase script"), MAKE_SCRIPT),
+ OPT_CMDMODE(0, "shorten-ids", &command,
+ N_("shorten SHA-1s in the todo list"), SHORTEN_SHA1S),
+ OPT_CMDMODE(0, "expand-ids", &command,
+ N_("expand SHA-1s in the todo list"), EXPAND_SHA1S),
+ OPT_CMDMODE(0, "check-todo-list", &command,
+ N_("check the todo list"), CHECK_TODO_LIST),
+ OPT_CMDMODE(0, "skip-unnecessary-picks", &command,
+ N_("skip unnecessary picks"), SKIP_UNNECESSARY_PICKS),
+ OPT_CMDMODE(0, "rearrange-squash", &command,
+ N_("rearrange fixup/squash lines"), REARRANGE_SQUASH),
OPT_END()
};
return !!sequencer_continue(&opts);
if (command == ABORT && argc == 1)
return !!sequencer_remove_state(&opts);
+ if (command == MAKE_SCRIPT && argc > 1)
+ return !!sequencer_make_script(keep_empty, stdout, argc, argv);
+ if (command == SHORTEN_SHA1S && argc == 1)
+ return !!transform_todo_ids(1);
+ if (command == EXPAND_SHA1S && argc == 1)
+ return !!transform_todo_ids(0);
+ if (command == CHECK_TODO_LIST && argc == 1)
+ return !!check_todo_list();
+ if (command == SKIP_UNNECESSARY_PICKS && argc == 1)
+ return !!skip_unnecessary_picks();
+ if (command == REARRANGE_SQUASH && argc == 1)
+ return !!rearrange_squash();
usage_with_options(builtin_rebase_helper_usage, options);
}
size_t n;
if (feed(feed_state, &buf, &n))
break;
- if (write_in_full(proc.in, buf, n) != n)
+ if (write_in_full(proc.in, buf, n) < 0)
break;
}
close(proc.in);
struct commit *c;
struct commit_list *parent;
- c = (struct commit *)study.objects[--study.nr].item;
+ c = (struct commit *)object_array_pop(&study);
if (!c->object.parsed && !parse_object(&c->object.oid))
c->object.flags |= INCOMPLETE;
found.objects[i].item->flags |= SEEN;
}
/* free object arrays */
- free(study.objects);
- free(found.objects);
+ object_array_clear(&study);
+ object_array_clear(&found);
return !is_incomplete;
}
special = str[rpos];
if (rpos == 1)
break;
- /* Fall-through to error. */
+ /* fallthrough */
default:
die("Bad remote-ext placeholder '%%%c'.",
str[rpos]);
{
int i;
for (i = 0; i < nbuf; i++)
- if (write_in_full(1, ptr[i].ptr, ptr[i].size) != ptr[i].size)
+ if (write_in_full(1, ptr[i].ptr, ptr[i].size) < 0)
return -1;
return 0;
}
return s;
}
+static char *findspace(const char *s)
+{
+ for (; *s; s++)
+ if (isspace(*s))
+ return (char*)s;
+ return NULL;
+}
+
static int cmd_parseopt(int argc, const char **argv, const char *prefix)
{
static int keep_dashdash = 0, stop_at_non_option = 0;
/* parse: (<short>|<short>,<long>|<long>)[*=?!]*<arghint>? SP+ <help> */
while (strbuf_getline(&sb, stdin) != EOF) {
const char *s;
- const char *help;
+ char *help;
struct option *o;
if (!sb.len)
memset(opts + onb, 0, sizeof(opts[onb]));
o = &opts[onb++];
- help = strchr(sb.buf, ' ');
- if (!help || *sb.buf == ' ') {
+ help = findspace(sb.buf);
+ if (!help || sb.buf == help) {
o->type = OPTION_GROUP;
o->help = xstrdup(skipspaces(sb.buf));
continue;
}
+ *help = '\0';
+
o->type = OPTION_CALLBACK;
- o->help = xstrdup(skipspaces(help));
+ o->help = xstrdup(skipspaces(help+1));
o->value = &parsed;
o->flags = PARSE_OPT_NOARG;
o->callback = &parseopt_dump;
: "false");
continue;
}
+ if (!strcmp(arg, "--is-shallow-repository")) {
+ printf("%s\n", is_repository_shallow() ? "true"
+ : "false");
+ continue;
+ }
if (!strcmp(arg, "--shared-index-path")) {
if (read_cache() < 0)
die(_("Could not read the index"));
break;
die("HEAD does not match the named branch in the superproject");
}
+ /* fallthrough */
default:
die("src refspec '%s' must name a ref",
rs->src);
xsnprintf(path, sizeof(path), ".merge_file_XXXXXX");
fd = xmkstemp(path);
- if (write_in_full(fd, buf, size) != size)
+ if (write_in_full(fd, buf, size) < 0)
die_errno("unable to write temp-file");
close(fd);
return path;
{
struct stat st;
char *path;
- int fd, len;
+ int fd;
+ size_t len;
+ ssize_t read_result;
if (!is_directory(git_path("worktrees/%s", id))) {
strbuf_addf(reason, _("Removing worktrees/%s: not a valid directory"), id);
id, strerror(errno));
return 1;
}
- len = st.st_size;
+ len = xsize_t(st.st_size);
path = xmallocz(len);
- read_in_full(fd, path, len);
+
+ read_result = read_in_full(fd, path, len);
+ if (read_result < 0) {
+ strbuf_addf(reason, _("Removing worktrees/%s: unable to read gitdir file (%s)"),
+ id, strerror(errno));
+ close(fd);
+ free(path);
+ return 1;
+ }
close(fd);
+
+ if (read_result != len) {
+ strbuf_addf(reason,
+ _("Removing worktrees/%s: short read (expected %"PRIuMAX" bytes, read %"PRIuMAX")"),
+ id, (uintmax_t)len, (uintmax_t)read_result);
+ free(path);
+ return 1;
+ }
while (len && (path[len - 1] == '\n' || path[len - 1] == '\r'))
len--;
if (!len) {
if (size && !s.avail_in) {
ssize_t rsize = size < sizeof(ibuf) ? size : sizeof(ibuf);
- if (read_in_full(fd, ibuf, rsize) != rsize)
+ ssize_t read_result = read_in_full(fd, ibuf, rsize);
+ if (read_result < 0)
+ die_errno("failed to read from '%s'", path);
+ if (read_result != rsize)
die("failed to read %d bytes from '%s'",
(int)rsize, path);
offset += rsize;
req_nr = revs.pending.nr;
setup_revisions(2, argv, &revs, NULL);
+ /* Save pending objects, so they can be cleaned up later. */
refs = revs.pending;
revs.leak_pending = 1;
+ /*
+ * prepare_revision_walk (together with .leak_pending = 1) makes us
+ * the sole owner of the list of pending objects.
+ */
if (prepare_revision_walk(&revs))
die(_("revision walk setup failed"));
refs.objects[i].name);
}
+ /* Clean up objects used, as they will be reused. */
clear_commit_marks_for_object_array(&refs, ALL_REV_FLAGS);
- free(refs.objects);
+
+ object_array_clear(&refs);
if (verbose) {
struct ref_list *r;
#include "git-compat-util.h"
#include "strbuf.h"
#include "hashmap.h"
+#include "mru.h"
#include "advice.h"
#include "gettext.h"
#include "convert.h"
#define GIT_NOGLOB_PATHSPECS_ENVIRONMENT "GIT_NOGLOB_PATHSPECS"
#define GIT_ICASE_PATHSPECS_ENVIRONMENT "GIT_ICASE_PATHSPECS"
#define GIT_QUARANTINE_ENVIRONMENT "GIT_QUARANTINE_PATH"
+#define GIT_OPTIONAL_LOCKS_ENVIRONMENT "GIT_OPTIONAL_LOCKS"
/*
* This environment variable is expected to contain a boolean indicating
*/
extern int ref_paranoia;
+/*
+ * Returns the boolean value of $GIT_OPTIONAL_LOCKS (or the default value).
+ */
+int use_optional_locks(void);
+
/*
* The character that begins a commented line in user-editable file
* that is subject to stripspace.
*/
static inline int hex2chr(const char *s)
{
- int val = hexval(s[0]);
- return (val < 0) ? val : (val << 4) | hexval(s[1]);
+ unsigned int val = hexval(s[0]);
+ return (val & ~0xf) ? val : (val << 4) | hexval(s[1]);
}
/* Convert to/from hex/sha1 representation */
* A most-recently-used ordered version of the packed_git list, which can
* be iterated instead of packed_git (and marked via mru_mark).
*/
-struct mru;
-extern struct mru *packed_git_mru;
+extern struct mru packed_git_mru;
struct pack_entry {
off_t offset;
--- /dev/null
+#!/usr/bin/env bash
+#
+# Install dependencies required to build and test Git on Linux and macOS
+#
+
+. ${0%/*}/lib-travisci.sh
+
+P4WHENCE=http://filehost.perforce.com/perforce/r$LINUX_P4_VERSION
+LFSWHENCE=https://github.com/github/git-lfs/releases/download/v$LINUX_GIT_LFS_VERSION
+
+case "${TRAVIS_OS_NAME:-linux}" in
+linux)
+ export GIT_TEST_HTTPD=YesPlease
+
+ mkdir --parents custom/p4
+ pushd custom/p4
+ wget --quiet "$P4WHENCE/bin.linux26x86_64/p4d"
+ wget --quiet "$P4WHENCE/bin.linux26x86_64/p4"
+ chmod u+x p4d
+ chmod u+x p4
+ export PATH="$(pwd):$PATH"
+ popd
+ mkdir --parents custom/git-lfs
+ pushd custom/git-lfs
+ wget --quiet "$LFSWHENCE/git-lfs-linux-amd64-$LINUX_GIT_LFS_VERSION.tar.gz"
+ tar --extract --gunzip --file "git-lfs-linux-amd64-$LINUX_GIT_LFS_VERSION.tar.gz"
+ cp git-lfs-$LINUX_GIT_LFS_VERSION/git-lfs .
+ export PATH="$(pwd):$PATH"
+ popd
+ ;;
+osx)
+ brew update --quiet
+ # Uncomment this if you want to run perf tests:
+ # brew install gnu-time
+ brew install git-lfs gettext
+ brew link --force gettext
+ brew install caskroom/cask/perforce
+ ;;
+esac
+
+echo "$(tput setaf 6)Perforce Server Version$(tput sgr0)"
+p4d -V | grep Rev.
+echo "$(tput setaf 6)Perforce Client Version$(tput sgr0)"
+p4 -V | grep Rev.
+echo "$(tput setaf 6)Git-LFS Version$(tput sgr0)"
+git-lfs version
--- /dev/null
+# Library of functions shared by all CI scripts
+
+skip_branch_tip_with_tag () {
+ # Sometimes, a branch is pushed at the same time the tag that points
+ # at the same commit as the tip of the branch is pushed, and building
+ # both at the same time is a waste.
+ #
+ # Travis gives a tagname e.g. v2.14.0 in $TRAVIS_BRANCH when
+ # the build is triggered by a push to a tag. Let's see if
+ # $TRAVIS_BRANCH is exactly at a tag, and if so, if it is
+ # different from $TRAVIS_BRANCH. That way, we can tell if
+ # we are building the tip of a branch that is tagged and
+ # we can skip the build because we won't be skipping a build
+ # of a tag.
+
+ if TAG=$(git describe --exact-match "$TRAVIS_BRANCH" 2>/dev/null) &&
+ test "$TAG" != "$TRAVIS_BRANCH"
+ then
+ echo "Tip of $TRAVIS_BRANCH is exactly at $TAG"
+ exit 0
+ fi
+}
+
+# Set 'exit on error' for all CI scripts to let the caller know that
+# something went wrong
+set -e
+
+skip_branch_tip_with_tag
--- /dev/null
+#!/bin/sh
+#
+# Print output of failing tests
+#
+
+. ${0%/*}/lib-travisci.sh
+
+for TEST_EXIT in t/test-results/*.exit
+do
+ if [ "$(cat "$TEST_EXIT")" != "0" ]
+ then
+ TEST_OUT="${TEST_EXIT%exit}out"
+ echo "------------------------------------------------------------------------"
+ echo "$(tput setaf 1)${TEST_OUT}...$(tput sgr0)"
+ echo "------------------------------------------------------------------------"
+ cat "${TEST_OUT}"
+ fi
+done
--- /dev/null
+#!/bin/sh
+#
+# Build Git
+#
+
+. ${0%/*}/lib-travisci.sh
+
+make --jobs=2
--- /dev/null
+#!/bin/sh
+#
+# Download and run Docker image to build and test 32-bit Git
+#
+
+. ${0%/*}/lib-travisci.sh
+
+docker pull daald/ubuntu32:xenial
+
+# Use the following command to debug the docker build locally:
+# $ docker run -itv "${PWD}:/usr/src/git" --entrypoint /bin/bash daald/ubuntu32:xenial
+# root@container:/# /usr/src/git/ci/run-linux32-build.sh
+
+docker run \
+ --interactive \
+ --env DEVELOPER \
+ --env DEFAULT_TEST_TARGET \
+ --env GIT_PROVE_OPTS \
+ --env GIT_TEST_OPTS \
+ --env GIT_TEST_CLONE_2GB \
+ --volume "${PWD}:/usr/src/git" \
+ daald/ubuntu32:xenial \
+ /usr/src/git/ci/run-linux32-build.sh $(id -u $USER)
--- /dev/null
+#!/bin/sh
+#
+# Perform various static code analysis checks
+#
+
+. ${0%/*}/lib-travisci.sh
+
+make coccicheck
--- /dev/null
+#!/bin/sh
+#
+# Test Git
+#
+
+. ${0%/*}/lib-travisci.sh
+
+mkdir -p $HOME/travis-cache
+ln -s $HOME/travis-cache/.prove t/.prove
+make --quiet test
# supported) and a commit hash.
#
+. ${0%/*}/lib-travisci.sh
+
test $# -ne 2 && echo "Unexpected number of parameters" && exit 1
test -z "$GFW_CI_TOKEN" && echo "GFW_CI_TOKEN not defined" && exit
# Perform sanity checks on documentation and build it.
#
-set -e
+. ${0%/*}/lib-travisci.sh
+
+gem install asciidoctor
make check-builtins
make check-docs
\
static MAYBE_UNUSED void clear_ ##slabname(struct slabname *s) \
{ \
- int i; \
+ unsigned int i; \
for (i = 0; i < s->slab_count; i++) \
free(s->slab[i]); \
s->slab_count = 0; \
const struct commit *c, \
int add_if_missing) \
{ \
- int nth_slab, nth_slot; \
+ unsigned int nth_slab, nth_slot; \
\
nth_slab = c->index / s->slab_size; \
nth_slot = c->index % s->slab_size; \
\
if (s->slab_count <= nth_slab) { \
- int i; \
+ unsigned int i; \
if (!add_if_missing) \
return NULL; \
REALLOC_ARRAY(s->slab, nth_slab + 1); \
num_head = remove_redundant(array, num_head);
for (i = 0; i < num_head; i++)
tail = &commit_list_insert(array[i], tail)->next;
+ free(array);
return result;
}
--- /dev/null
+#ifndef LAZYLOAD_H
+#define LAZYLOAD_H
+
+/*
+ * A pair of macros to simplify loading of DLL functions. Example:
+ *
+ * DECLARE_PROC_ADDR(kernel32.dll, BOOL, CreateHardLinkW,
+ * LPCWSTR, LPCWSTR, LPSECURITY_ATTRIBUTES);
+ *
+ * if (!INIT_PROC_ADDR(CreateHardLinkW))
+ * return error("Could not find CreateHardLinkW() function";
+ *
+ * if (!CreateHardLinkW(source, target, NULL))
+ * return error("could not create hardlink from %S to %S",
+ * source, target);
+ */
+
+struct proc_addr {
+ const char *const dll;
+ const char *const function;
+ FARPROC pfunction;
+ unsigned initialized : 1;
+};
+
+/* Declares a function to be loaded dynamically from a DLL. */
+#define DECLARE_PROC_ADDR(dll, rettype, function, ...) \
+ static struct proc_addr proc_addr_##function = \
+ { #dll, #function, NULL, 0 }; \
+ static rettype (WINAPI *function)(__VA_ARGS__)
+
+/*
+ * Loads a function from a DLL (once-only).
+ * Returns non-NULL function pointer on success.
+ * Returns NULL + errno == ENOSYS on failure.
+ * This function is not thread-safe.
+ */
+#define INIT_PROC_ADDR(function) \
+ (function = get_proc_addr(&proc_addr_##function))
+
+static inline void *get_proc_addr(struct proc_addr *proc)
+{
+ /* only do this once */
+ if (!proc->initialized) {
+ HANDLE hnd;
+ proc->initialized = 1;
+ hnd = LoadLibraryExA(proc->dll, NULL,
+ LOAD_LIBRARY_SEARCH_SYSTEM32);
+ if (hnd)
+ proc->pfunction = GetProcAddress(hnd, proc->function);
+ }
+ /* set ENOSYS if DLL or function was not found */
+ if (!proc->pfunction)
+ errno = ENOSYS;
+ return proc->pfunction;
+}
+
+#endif
size_t *offset;
unsigned int offset_alloc;
enum { START, SECTION_SEEN, SECTION_END_SEEN, KEY_SEEN } state;
- int seen;
+ unsigned int seen;
} store;
static int matches(const char *key, const char *value)
return 4;
}
-static int store_write_section(int fd, const char *key)
+static struct strbuf store_create_section(const char *key)
{
const char *dot;
- int i, success;
+ int i;
struct strbuf sb = STRBUF_INIT;
dot = memchr(key, '.', store.baselen);
strbuf_addf(&sb, "[%.*s]\n", store.baselen, key);
}
- success = write_in_full(fd, sb.buf, sb.len) == sb.len;
+ return sb;
+}
+
+static ssize_t write_section(int fd, const char *key)
+{
+ struct strbuf sb = store_create_section(key);
+ ssize_t ret;
+
+ ret = write_in_full(fd, sb.buf, sb.len) == sb.len;
strbuf_release(&sb);
- return success;
+ return ret;
}
-static int store_write_pair(int fd, const char *key, const char *value)
+static ssize_t write_pair(int fd, const char *key, const char *value)
{
- int i, success;
+ int i;
+ ssize_t ret;
int length = strlen(key + store.baselen + 1);
const char *quote = "";
struct strbuf sb = STRBUF_INIT;
case '"':
case '\\':
strbuf_addch(&sb, '\\');
+ /* fallthrough */
default:
strbuf_addch(&sb, value[i]);
break;
}
strbuf_addf(&sb, "%s\n", quote);
- success = write_in_full(fd, sb.buf, sb.len) == sb.len;
+ ret = write_in_full(fd, sb.buf, sb.len);
strbuf_release(&sb);
- return success;
+ return ret;
}
static ssize_t find_beginning_of_line(const char *contents, size_t size,
}
store.key = (char *)key;
- if (!store_write_section(fd, key) ||
- !store_write_pair(fd, key, value))
+ if (write_section(fd, key) < 0 ||
+ write_pair(fd, key, value) < 0)
goto write_err_out;
} else {
struct stat st;
/* write the first part of the config */
if (copy_end > copy_begin) {
if (write_in_full(fd, contents + copy_begin,
- copy_end - copy_begin) <
- copy_end - copy_begin)
+ copy_end - copy_begin) < 0)
goto write_err_out;
if (new_line &&
- write_str_in_full(fd, "\n") != 1)
+ write_str_in_full(fd, "\n") < 0)
goto write_err_out;
}
copy_begin = store.offset[i];
/* write the pair (value == NULL means unset) */
if (value != NULL) {
if (store.state == START) {
- if (!store_write_section(fd, key))
+ if (write_section(fd, key) < 0)
goto write_err_out;
}
- if (!store_write_pair(fd, key, value))
+ if (write_pair(fd, key, value) < 0)
goto write_err_out;
}
/* write the rest of the config */
if (copy_begin < contents_sz)
if (write_in_full(fd, contents + copy_begin,
- contents_sz - copy_begin) <
- contents_sz - copy_begin)
+ contents_sz - copy_begin) < 0)
goto write_err_out;
munmap(contents, contents_sz);
}
/* if new_name == NULL, the section is removed instead */
-int git_config_rename_section_in_file(const char *config_filename,
- const char *old_name, const char *new_name)
+static int git_config_copy_or_rename_section_in_file(const char *config_filename,
+ const char *old_name, const char *new_name, int copy)
{
int ret = 0, remove = 0;
char *filename_buf = NULL;
char buf[1024];
FILE *config_file = NULL;
struct stat st;
+ struct strbuf copystr = STRBUF_INIT;
if (new_name && !section_name_is_ok(new_name)) {
ret = error("invalid section name: %s", new_name);
while (fgets(buf, sizeof(buf), config_file)) {
int i;
int length;
+ int is_section = 0;
char *output = buf;
for (i = 0; buf[i] && isspace(buf[i]); i++)
; /* do nothing */
if (buf[i] == '[') {
/* it's a section */
- int offset = section_name_match(&buf[i], old_name);
+ int offset;
+ is_section = 1;
+
+ /*
+ * When encountering a new section under -c we
+ * need to flush out any section we're already
+ * coping and begin anew. There might be
+ * multiple [branch "$name"] sections.
+ */
+ if (copystr.len > 0) {
+ if (write_in_full(out_fd, copystr.buf, copystr.len) != copystr.len) {
+ ret = write_error(get_lock_file_path(lock));
+ goto out;
+ }
+ strbuf_reset(©str);
+ }
+
+ offset = section_name_match(&buf[i], old_name);
if (offset > 0) {
ret++;
if (new_name == NULL) {
continue;
}
store.baselen = strlen(new_name);
- if (!store_write_section(out_fd, new_name)) {
- ret = write_error(get_lock_file_path(lock));
- goto out;
- }
- /*
- * We wrote out the new section, with
- * a newline, now skip the old
- * section's length
- */
- output += offset + i;
- if (strlen(output) > 0) {
+ if (!copy) {
+ if (write_section(out_fd, new_name) < 0) {
+ ret = write_error(get_lock_file_path(lock));
+ goto out;
+ }
/*
- * More content means there's
- * a declaration to put on the
- * next line; indent with a
- * tab
+ * We wrote out the new section, with
+ * a newline, now skip the old
+ * section's length
*/
- output -= 1;
- output[0] = '\t';
+ output += offset + i;
+ if (strlen(output) > 0) {
+ /*
+ * More content means there's
+ * a declaration to put on the
+ * next line; indent with a
+ * tab
+ */
+ output -= 1;
+ output[0] = '\t';
+ }
+ } else {
+ copystr = store_create_section(new_name);
}
}
remove = 0;
if (remove)
continue;
length = strlen(output);
- if (write_in_full(out_fd, output, length) != length) {
+
+ if (!is_section && copystr.len > 0) {
+ strbuf_add(©str, output, length);
+ }
+
+ if (write_in_full(out_fd, output, length) < 0) {
ret = write_error(get_lock_file_path(lock));
goto out;
}
}
+
+ /*
+ * Copy a trailing section at the end of the config, won't be
+ * flushed by the usual "flush because we have a new section
+ * logic in the loop above.
+ */
+ if (copystr.len > 0) {
+ if (write_in_full(out_fd, copystr.buf, copystr.len) != copystr.len) {
+ ret = write_error(get_lock_file_path(lock));
+ goto out;
+ }
+ strbuf_reset(©str);
+ }
+
fclose(config_file);
config_file = NULL;
commit_and_out:
return ret;
}
+int git_config_rename_section_in_file(const char *config_filename,
+ const char *old_name, const char *new_name)
+{
+ return git_config_copy_or_rename_section_in_file(config_filename,
+ old_name, new_name, 0);
+}
+
int git_config_rename_section(const char *old_name, const char *new_name)
{
return git_config_rename_section_in_file(NULL, old_name, new_name);
}
+int git_config_copy_section_in_file(const char *config_filename,
+ const char *old_name, const char *new_name)
+{
+ return git_config_copy_or_rename_section_in_file(config_filename,
+ old_name, new_name, 1);
+}
+
+int git_config_copy_section(const char *old_name, const char *new_name)
+{
+ return git_config_copy_section_in_file(NULL, old_name, new_name);
+}
+
/*
* Call this to report error for your variable that should not
* get a boolean value (i.e. "[my] var" means "true").
extern void git_config_set_multivar_in_file(const char *, const char *, const char *, const char *, int);
extern int git_config_rename_section(const char *, const char *);
extern int git_config_rename_section_in_file(const char *, const char *, const char *);
+extern int git_config_copy_section(const char *, const char *);
+extern int git_config_copy_section_in_file(const char *, const char *, const char *);
extern const char *git_etc_gitconfig(void);
extern int git_env_bool(const char *, int);
extern unsigned long git_env_ulong(const char *, unsigned long);
UNRELIABLE_FSTAT = UnfortunatelyYes
SPARSE_FLAGS = -isystem /usr/include/w32api -Wno-one-bit-signed-bitfield
OBJECT_CREATION_USES_RENAMES = UnfortunatelyNeedsTo
+ MMAP_PREVENTS_DELETE = UnfortunatelyYes
COMPAT_OBJS += compat/cygwin.o
FREAD_READS_DIRECTORIES = UnfortunatelyYes
endif
NO_ST_BLOCKS_IN_STRUCT_STAT = YesPlease
NO_NSEC = YesPlease
USE_WIN32_MMAP = YesPlease
+ MMAP_PREVENTS_DELETE = UnfortunatelyYes
# USE_NED_ALLOCATOR = YesPlease
UNRELIABLE_FSTAT = UnfortunatelyYes
OBJECT_CREATION_USES_RENAMES = UnfortunatelyNeedsTo
NO_ST_BLOCKS_IN_STRUCT_STAT = YesPlease
NO_NSEC = YesPlease
USE_WIN32_MMAP = YesPlease
+ MMAP_PREVENTS_DELETE = UnfortunatelyYes
USE_NED_ALLOCATOR = YesPlease
UNRELIABLE_FSTAT = UnfortunatelyYes
OBJECT_CREATION_USES_RENAMES = UnfortunatelyNeedsTo
switch (ident->state) {
default:
strbuf_add(&ident->left, head, ident->state);
+ /* fallthrough */
case IDENT_SKIPPING:
- /* fallthru */
+ /* fallthrough */
case IDENT_DRAINING:
ident_drain(ident, &output, osize_p);
}
if (ret < 0)
die_errno("%s: sha1 file read error", f->name);
- if (ret < count)
+ if (ret != count)
die("%s: sha1 file truncated", f->name);
if (memcmp(buf, check_buffer, count))
die("sha1 file '%s' validation error", f->name);
const void *trg_buf, unsigned long trg_size,
unsigned long *delta_size, unsigned long max_size)
{
- unsigned int i, outpos, outsize, moff, msize, val;
+ unsigned int i, val;
+ off_t outpos, moff;
+ size_t l, outsize, msize;
int inscnt;
const unsigned char *ref_data, *ref_top, *data, *top;
unsigned char *out;
return NULL;
/* store reference buffer size */
- i = index->src_size;
- while (i >= 0x80) {
- out[outpos++] = i | 0x80;
- i >>= 7;
+ l = index->src_size;
+ while (l >= 0x80) {
+ out[outpos++] = l | 0x80;
+ l >>= 7;
}
- out[outpos++] = i;
+ out[outpos++] = l;
/* store target buffer size */
- i = trg_size;
- while (i >= 0x80) {
- out[outpos++] = i | 0x80;
- i >>= 7;
+ l = trg_size;
+ while (l >= 0x80) {
+ out[outpos++] = l | 0x80;
+ l >>= 7;
}
- out[outpos++] = i;
+ out[outpos++] = l;
ref_data = index->src_buf;
ref_top = ref_data + index->src_size;
moff += msize;
msize = left;
+ if (moff > 0xffffffff)
+ msize = 0;
+
if (msize < 4096) {
int j;
val = 0;
rev.diffopt.flags |= diff_flags;
rev.diffopt.ita_invisible_in_index = ita_invisible_in_index;
run_diff_index(&rev, 1);
- if (rev.pending.alloc)
- free(rev.pending.objects);
+ object_array_clear(&rev.pending);
return (DIFF_OPT_TST(&rev.diffopt, HAS_CHANGES) != 0);
}
struct diff_words_buffer {
mmfile_t text;
- long alloc;
+ unsigned long alloc;
struct diff_words_orig {
const char *begin, *end;
} *orig;
blob = buf.buf;
size = buf.len;
}
- if (write_in_full(temp->tempfile->fd, blob, size) != size ||
+ if (write_in_full(temp->tempfile->fd, blob, size) < 0 ||
close_tempfile_gently(temp->tempfile))
die_errno("unable to write temp-file");
temp->name = get_tempfile_path(temp->tempfile);
strbuf_addch(&sb, ' ');
quote_c_style(p->two->path, &sb, NULL, 0);
}
+ strbuf_addch(&sb, '\n');
emit_diff_symbol(opt, DIFF_SYMBOL_SUMMARY,
sb.buf, sb.len, 0);
strbuf_release(&sb);
static enum path_treatment read_directory_recursive(struct dir_struct *dir,
struct index_state *istate, const char *path, int len,
struct untracked_cache_dir *untracked,
- int check_only, const struct pathspec *pathspec);
+ int check_only, int stop_at_first_file, const struct pathspec *pathspec);
static int get_dtype(struct dirent *de, struct index_state *istate,
const char *path, int len);
untracked = lookup_untracked(dir->untracked, untracked,
dirname + baselen, len - baselen);
+
+ /*
+ * If this is an excluded directory, then we only need to check if
+ * the directory contains any files.
+ */
return read_directory_recursive(dir, istate, dirname, len,
- untracked, 1, pathspec);
+ untracked, 1, exclude, pathspec);
}
/*
* with check_only set.
*/
return read_directory_recursive(dir, istate, path->buf, path->len,
- cdir->ucd, 1, pathspec);
+ cdir->ucd, 1, 0, pathspec);
/*
* We get path_recurse in the first run when
* directory_exists_in_index() returns index_nonexistent. We
* Also, we ignore the name ".git" (even if it is not a directory).
* That likely will not change.
*
+ * If 'stop_at_first_file' is specified, 'path_excluded' is returned
+ * to signal that a file was found. This is the least significant value that
+ * indicates that a file was encountered that does not depend on the order of
+ * whether an untracked or exluded path was encountered first.
+ *
* Returns the most significant path_treatment value encountered in the scan.
+ * If 'stop_at_first_file' is specified, `path_excluded` is the most
+ * significant path_treatment value that will be returned.
*/
+
static enum path_treatment read_directory_recursive(struct dir_struct *dir,
struct index_state *istate, const char *base, int baselen,
struct untracked_cache_dir *untracked, int check_only,
- const struct pathspec *pathspec)
+ int stop_at_first_file, const struct pathspec *pathspec)
{
struct cached_dir cdir;
enum path_treatment state, subdir_state, dir_state = path_none;
subdir_state =
read_directory_recursive(dir, istate, path.buf,
path.len, ud,
- check_only, pathspec);
+ check_only, stop_at_first_file, pathspec);
if (subdir_state > dir_state)
dir_state = subdir_state;
}
if (check_only) {
+ if (stop_at_first_file) {
+ /*
+ * If stopping at first file, then
+ * signal that a file was found by
+ * returning `path_excluded`. This is
+ * to return a consistent value
+ * regardless of whether an ignored or
+ * excluded file happened to be
+ * encountered 1st.
+ *
+ * In current usage, the
+ * `stop_at_first_file` is passed when
+ * an ancestor directory has matched
+ * an exclude pattern, so any found
+ * files will be excluded.
+ */
+ if (dir_state >= path_excluded) {
+ dir_state = path_excluded;
+ break;
+ }
+ }
+
/* abort early if maximum state has been reached */
if (dir_state == path_untracked) {
if (cdir.fdir)
*/
dir->untracked = NULL;
if (!len || treat_leading_path(dir, istate, path, len, pathspec))
- read_directory_recursive(dir, istate, path, len, untracked, 0, pathspec);
+ read_directory_recursive(dir, istate, path, len, untracked, 0, 0, pathspec);
QSORT(dir->entries, dir->nr, cmp_dir_entry);
QSORT(dir->ignored, dir->ignored_nr, cmp_dir_entry);
char *new;
struct strbuf buf = STRBUF_INIT;
unsigned long size;
- size_t wrote, newsize = 0;
+ ssize_t wrote;
+ size_t newsize = 0;
struct stat st;
const struct submodule *sub;
fstat_done = fstat_output(fd, state, &st);
close(fd);
free(new);
- if (wrote != size)
+ if (wrote < 0)
return error("unable to write file %s", path);
break;
case S_IFGITLINK:
{
need_shared_repository_from_config = 1;
}
+
+int use_optional_locks(void)
+{
+ return git_env_bool(GIT_OPTIONAL_LOCKS_ENVIRONMENT, 1);
+}
static void cat_blob_write(const char *buf, unsigned long size)
{
- if (write_in_full(cat_blob_fd, buf, size) != size)
+ if (write_in_full(cat_blob_fd, buf, size) < 0)
die_errno("Write to frontend failed");
}
checkpoint_requested = 0;
if (object_count) {
cycle_packfile();
- dump_branches();
- dump_tags();
- dump_marks();
}
+ dump_branches();
+ dump_tags();
+ dump_marks();
}
static void parse_checkpoint(void)
case S_IFREG | 0664:
if (!options->strict)
break;
+ /* fallthrough */
default:
has_bad_modes = 1;
}
# check that we actually know about the branch
next unless -e "$git_dir/refs/heads/$branch";
- my $mergebase = `git-merge-base $branch $ps->{branch}`;
+ my $mergebase = safe_pipe_capture(qw(git-merge-base), $branch, $ps->{branch});
if ($?) {
# Don't die here, Arch supports one-way cherry-picking
# between branches with no common base (or any relationship
sub git_rev_parse {
my $name = shift;
- my $val = `git-rev-parse $name`;
+ my $val = safe_pipe_capture(qw(git-rev-parse), $name);
die "Error: git-rev-parse $name" if $?;
chomp $val;
return $val;
static inline size_t xsize_t(off_t len)
{
- if (len > (size_t) len)
+ size_t size = (size_t) len;
+
+ if (len != (off_t) size)
die("Cannot handle files this big");
- return (size_t)len;
+ return size;
}
__attribute__((format (printf, 3, 4)))
sub get_headref ($) {
my $name = shift;
+ $name =~ s/'/'\\''/;
my $r = `git rev-parse --verify '$name' 2>/dev/null`;
return undef unless $? == 0;
chomp $r;
return 0;
}
- my @gitvars = `git config -l`;
+ my @gitvars = safe_pipe_capture(qw(git config -l));
if ($?) {
print "E problems executing git-config on the server -- this is not a git repository or the PATH is not set correctly.\n";
print "E \n";
# Save the file data in $state
$state->{entries}{$state->{directory}.$data}{modified_filename} = $filename;
$state->{entries}{$state->{directory}.$data}{modified_mode} = $mode;
- $state->{entries}{$state->{directory}.$data}{modified_hash} = `git hash-object $filename`;
+ $state->{entries}{$state->{directory}.$data}{modified_hash} = safe_pipe_capture('git','hash-object',$filename);
$state->{entries}{$state->{directory}.$data}{modified_hash} =~ s/\s.*$//s;
#$log->debug("req_Modified : file=$data mode=$mode size=$size");
# Provide list of modules, if -c was used.
if (exists $state->{opt}{c}) {
- my $showref = `git show-ref --heads`;
+ my $showref = safe_pipe_capture(qw(git show-ref --heads));
for my $line (split '\n', $showref) {
if ( $line =~ m% refs/heads/(.*)$% ) {
print "M $1\t$1\n";
# projects (heads in this case) to checkout.
#
if ($state->{module} eq '') {
- my $showref = `git show-ref --heads`;
+ my $showref = safe_pipe_capture(qw(git show-ref --heads));
print "E cvs update: Updating .\n";
for my $line (split '\n', $showref) {
if ( $line =~ m% refs/heads/(.*)$% ) {
# transmit file, format is single integer on a line by itself (file
# size) followed by the file contents
# TODO : we should copy files in blocks
- my $data = `cat $mergedFile`;
+ my $data = safe_pipe_capture('cat', $mergedFile);
$log->debug("File size : " . length($data));
print length($data) . "\n";
print $data;
$branchRef = "refs/heads/$stickyInfo->{tag}";
}
- $parenthash = `git show-ref -s $branchRef`;
+ $parenthash = safe_pipe_capture('git', 'show-ref', '-s', $branchRef);
chomp $parenthash;
if ($parenthash !~ /^[0-9a-f]{40}$/)
{
return;
}
- my $treehash = `git write-tree`;
+ my $treehash = safe_pipe_capture(qw(git write-tree));
chomp $treehash;
$log->debug("Treehash : $treehash, Parenthash : $parenthash");
}
close $msg_fh;
- my $commithash = `git commit-tree $treehash -p $parenthash < $msg_filename`;
+ my $commithash = safe_pipe_capture('git', 'commit-tree', $treehash, '-p', $parenthash, '-F', $msg_filename);
chomp($commithash);
$log->info("Commit hash : $commithash");
die "Need filehash" unless ( defined ( $filehash ) and $filehash =~ /^[a-zA-Z0-9]{40}$/ );
- my $type = `git cat-file -t $filehash`;
+ my $type = safe_pipe_capture('git', 'cat-file', '-t', $filehash);
chomp $type;
die ( "Invalid type '$type' (expected 'blob')" ) unless ( defined ( $type ) and $type eq "blob" );
- my $size = `git cat-file -s $filehash`;
+ my $size = safe_pipe_capture('git', 'cat-file', '-s', $filehash);
chomp $size;
$log->debug("transmitfile($filehash) size=$size, type=$type");
chdir $work->{emptyDir} or
die "Unable to chdir to $work->{emptyDir}\n";
- my $ver = `git show-ref -s refs/heads/$state->{module}`;
+ my $ver = safe_pipe_capture('git', 'show-ref', '-s', "refs/heads/$state->{module}");
chomp $ver;
if ($ver !~ /^[0-9a-f]{40}$/)
{
die "Need filehash\n";
}
- my $type = `git cat-file -t $name`;
+ my $type = safe_pipe_capture('git', 'cat-file', '-t', $name);
chomp $type;
unless ( defined ( $type ) and $type eq "blob" )
die ( "Invalid type '$type' (expected 'blob')" )
}
- my $size = `git cat-file -s $name`;
+ my $size = safe_pipe_capture('git', 'cat-file', '-s', $name);
chomp $size;
$log->debug("open_blob_or_die($name) size=$size, type=$type");
return $out;
}
+# an alternative to `command` that allows input to be passed as an array
+# to work around shell problems with weird characters in arguments
+
+sub safe_pipe_capture {
+
+ my @output;
+
+ if (my $pid = open my $child, '-|') {
+ @output = (<$child>);
+ close $child or die join(' ',@_).": $! $?";
+ } else {
+ exec(@_) or die "$! $?"; # exec() can fail the executable can't be found
+ }
+ return wantarray ? @output : join('',@output);
+}
+
package GITCVS::log;
# first lets get the commit list
$ENV{GIT_DIR} = $self->{git_path};
- my $commitsha1 = `git rev-parse $self->{module}`;
+ my $commitsha1 = ::safe_pipe_capture('git', 'rev-parse', $self->{module});
chomp $commitsha1;
- my $commitinfo = `git cat-file commit $self->{module} 2>&1`;
+ my $commitinfo = ::safe_pipe_capture('git', 'cat-file', 'commit', $self->{module});
unless ( $commitinfo =~ /tree\s+[a-zA-Z0-9]{40}/ )
{
die("Invalid module '$self->{module}'");
# several candidate merge bases. let's assume
# that the first one is the best one.
my $base = eval {
- safe_pipe_capture('git', 'merge-base',
+ ::safe_pipe_capture('git', 'merge-base',
$lastpicked, $parent);
};
# The two branches may not be related at all,
return $retVal;
}
- my($fileHash)=safe_pipe_capture("git","rev-parse","$revCommit:$filename");
+ my($fileHash) = ::safe_pipe_capture("git","rev-parse","$revCommit:$filename");
chomp $fileHash;
if(!($fileHash=~/^[0-9a-f]{40}$/))
{
return $commitHash;
}
- $commitHash=safe_pipe_capture("git","rev-parse","--verify","--quiet",
- $self->unescapeRefName($ref));
+ $commitHash = ::safe_pipe_capture("git","rev-parse","--verify","--quiet",
+ $self->unescapeRefName($ref));
$commitHash=~s/\s*$//;
if(!($commitHash=~/^[0-9a-f]{40}$/))
{
if( defined($commitHash) )
{
- my $type=safe_pipe_capture("git","cat-file","-t",$commitHash);
+ my $type = ::safe_pipe_capture("git","cat-file","-t",$commitHash);
if( ! ($type=~/^commit\s*$/ ) )
{
$commitHash=undef;
return $message;
}
- my @lines = safe_pipe_capture("git", "cat-file", "commit", $commithash);
+ my @lines = ::safe_pipe_capture("git", "cat-file", "commit", $commithash);
shift @lines while ( $lines[0] =~ /\S/ );
$message = join("",@lines);
$message .= " " if ( $message =~ /\n$/ );
return $retval;
}
-=head2 safe_pipe_capture
-
-an alternative to `command` that allows input to be passed as an array
-to work around shell problems with weird characters in arguments
-
-=cut
-sub safe_pipe_capture {
-
- my @output;
-
- if (my $pid = open my $child, '-|') {
- @output = (<$child>);
- close $child or die join(' ',@_).": $! $?";
- } else {
- exec(@_) or die "$! $?"; # exec() can fail the executable can't be found
- }
- return wantarray ? @output : join('',@output);
-}
-
=head2 mangle_dirname
create a string from a directory name that is suitable to use as
[--parent-filter <command>] [--msg-filter <command>]
[--commit-filter <command>] [--tag-name-filter <command>]
[--subdirectory-filter <directory>] [--original <namespace>]
- [-d <directory>] [-f | --force]
+ [-d <directory>] [-f | --force] [--state-branch <branch>]
[--] [<rev-list options>...]"
OPTIONS_SPEC=
filter_commit=
filter_tag_name=
filter_subdir=
+state_branch=
orig_namespace=refs/original/
force=
prune_empty=
--original)
orig_namespace=$(expr "$OPTARG/" : '\(.*[^/]\)/*$')/
;;
+ --state-branch)
+ state_branch="$OPTARG"
+ ;;
*)
usage
;;
ORIG_GIT_DIR="$GIT_DIR"
ORIG_GIT_WORK_TREE="$GIT_WORK_TREE"
ORIG_GIT_INDEX_FILE="$GIT_INDEX_FILE"
+ORIG_GIT_AUTHOR_NAME="$GIT_AUTHOR_NAME"
+ORIG_GIT_AUTHOR_EMAIL="$GIT_AUTHOR_EMAIL"
+ORIG_GIT_AUTHOR_DATE="$GIT_AUTHOR_DATE"
+ORIG_GIT_COMMITTER_NAME="$GIT_COMMITTER_NAME"
+ORIG_GIT_COMMITTER_EMAIL="$GIT_COMMITTER_EMAIL"
+ORIG_GIT_COMMITTER_DATE="$GIT_COMMITTER_DATE"
+
GIT_WORK_TREE=.
export GIT_DIR GIT_WORK_TREE
# map old->new commit ids for rewriting parents
mkdir ../map || die "Could not create map/ directory"
+if test -n "$state_branch"
+then
+ state_commit=$(git rev-parse --no-flags --revs-only "$state_branch")
+ if test -n "$state_commit"
+ then
+ echo "Populating map from $state_branch ($state_commit)" 1>&2
+ perl -e'open(MAP, "-|", "git show $ARGV[0]:filter.map") or die;
+ while (<MAP>) {
+ m/(.*):(.*)/ or die;
+ open F, ">../map/$1" or die;
+ print F "$2" or die;
+ close(F) or die;
+ }
+ close(MAP) or die;' "$state_commit" \
+ || die "Unable to load state from $state_branch:filter.map"
+ else
+ echo "Branch $state_branch does not exist. Will create" 1>&2
+ fi
+fi
+
# we need "--" only if there are no path arguments in $@
nonrevs=$(git rev-parse --no-revs "$@") || exit
if test -z "$nonrevs"
}' \
-e '/^-----BEGIN PGP SIGNATURE-----/q' \
-e 'p' ) |
- git mktag) ||
+ git hash-object -t tag -w --stdin) ||
die "Could not create new tag object for $ref"
if git cat-file tag "$ref" | \
sane_grep '^-----BEGIN PGP SIGNATURE-----' >/dev/null 2>&1
done
fi
-cd "$orig_dir"
-rm -rf "$tempdir"
-
-trap - 0
-
unset GIT_DIR GIT_WORK_TREE GIT_INDEX_FILE
+unset GIT_AUTHOR_NAME GIT_AUTHOR_EMAIL GIT_AUTHOR_DATE
+unset GIT_COMMITTER_NAME GIT_COMMITTER_EMAIL GIT_COMMITTER_DATE
test -z "$ORIG_GIT_DIR" || {
GIT_DIR="$ORIG_GIT_DIR" && export GIT_DIR
}
GIT_INDEX_FILE="$ORIG_GIT_INDEX_FILE" &&
export GIT_INDEX_FILE
}
+test -z "$ORIG_GIT_AUTHOR_NAME" || {
+ GIT_AUTHOR_NAME="$ORIG_GIT_AUTHOR_NAME" &&
+ export GIT_AUTHOR_NAME
+}
+test -z "$ORIG_GIT_AUTHOR_EMAIL" || {
+ GIT_AUTHOR_EMAIL="$ORIG_GIT_AUTHOR_EMAIL" &&
+ export GIT_AUTHOR_EMAIL
+}
+test -z "$ORIG_GIT_AUTHOR_DATE" || {
+ GIT_AUTHOR_DATE="$ORIG_GIT_AUTHOR_DATE" &&
+ export GIT_AUTHOR_DATE
+}
+test -z "$ORIG_GIT_COMMITTER_NAME" || {
+ GIT_COMMITTER_NAME="$ORIG_GIT_COMMITTER_NAME" &&
+ export GIT_COMMITTER_NAME
+}
+test -z "$ORIG_GIT_COMMITTER_EMAIL" || {
+ GIT_COMMITTER_EMAIL="$ORIG_GIT_COMMITTER_EMAIL" &&
+ export GIT_COMMITTER_EMAIL
+}
+test -z "$ORIG_GIT_COMMITTER_DATE" || {
+ GIT_COMMITTER_DATE="$ORIG_GIT_COMMITTER_DATE" &&
+ export GIT_COMMITTER_DATE
+}
+
+if test -n "$state_branch"
+then
+ echo "Saving rewrite state to $state_branch" 1>&2
+ state_blob=$(
+ perl -e'opendir D, "../map" or die;
+ open H, "|-", "git hash-object -w --stdin" or die;
+ foreach (sort readdir(D)) {
+ next if m/^\.\.?$/;
+ open F, "<../map/$_" or die;
+ chomp($f = <F>);
+ print H "$_:$f\n" or die;
+ }
+ close(H) or die;' || die "Unable to save state")
+ state_tree=$(/bin/echo -e "100644 blob $state_blob\tfilter.map" | git mktree)
+ if test -n "$state_commit"
+ then
+ state_commit=$(/bin/echo "Sync" | git commit-tree "$state_tree" -p "$state_commit")
+ else
+ state_commit=$(/bin/echo "Sync" | git commit-tree "$state_tree" )
+ fi
+ git update-ref "$state_branch" "$state_commit"
+fi
+
+cd "$orig_dir"
+rm -rf "$tempdir"
+
+trap - 0
if [ "$(is_bare_repository)" = false ]; then
git read-tree -u -m HEAD || exit
append_todo_help () {
gettext "
Commands:
- p, pick = use commit
- r, reword = use commit, but edit the commit message
- e, edit = use commit, but stop for amending
- s, squash = use commit, but meld into previous commit
- f, fixup = like \"squash\", but discard this commit's log message
- x, exec = run command (the rest of the line) using shell
- d, drop = remove commit
+p, pick = use commit
+r, reword = use commit, but edit the commit message
+e, edit = use commit, but stop for amending
+s, squash = use commit, but meld into previous commit
+f, fixup = like \"squash\", but discard this commit's log message
+x, exec = run command (the rest of the line) using shell
+d, drop = remove commit
These lines can be re-ordered; they are executed from top to bottom.
" | git stripspace --comment-lines >>"$todo"
done
}
-# skip picking commits whose parents are unchanged
-skip_unnecessary_picks () {
- fd=3
- while read -r command rest
- do
- # fd=3 means we skip the command
- case "$fd,$command" in
- 3,pick|3,p)
- # pick a commit whose parent is current $onto -> skip
- sha1=${rest%% *}
- case "$(git rev-parse --verify --quiet "$sha1"^)" in
- "$onto"*)
- onto=$sha1
- ;;
- *)
- fd=1
- ;;
- esac
- ;;
- 3,"$comment_char"*|3,)
- # copy comments
- ;;
- *)
- fd=1
- ;;
- esac
- printf '%s\n' "$command${rest:+ }$rest" >&$fd
- done <"$todo" >"$todo.new" 3>>"$done" &&
- mv -f "$todo".new "$todo" &&
- case "$(peek_next_command)" in
- squash|s|fixup|f)
- record_in_rewritten "$onto"
- ;;
- esac ||
- die "$(gettext "Could not skip unnecessary pick commands")"
-}
-
-transform_todo_ids () {
- while read -r command rest
- do
- case "$command" in
- "$comment_char"* | exec)
- # Be careful for oddball commands like 'exec'
- # that do not have a SHA-1 at the beginning of $rest.
- ;;
- *)
- sha1=$(git rev-parse --verify --quiet "$@" ${rest%%[ ]*}) &&
- rest="$sha1 ${rest#*[ ]}"
- ;;
- esac
- printf '%s\n' "$command${rest:+ }$rest"
- done <"$todo" >"$todo.new" &&
- mv -f "$todo.new" "$todo"
-}
-
expand_todo_ids() {
- transform_todo_ids
+ git rebase--helper --expand-ids
}
collapse_todo_ids() {
- transform_todo_ids --short
-}
-
-# Rearrange the todo list that has both "pick sha1 msg" and
-# "pick sha1 fixup!/squash! msg" appears in it so that the latter
-# comes immediately after the former, and change "pick" to
-# "fixup"/"squash".
-#
-# Note that if the config has specified a custom instruction format
-# each log message will be re-retrieved in order to normalize the
-# autosquash arrangement
-rearrange_squash () {
- # extract fixup!/squash! lines and resolve any referenced sha1's
- while read -r pick sha1 message
- do
- test -z "${format}" || message=$(git log -n 1 --format="%s" ${sha1})
- case "$message" in
- "squash! "*|"fixup! "*)
- action="${message%%!*}"
- rest=$message
- prefix=
- # skip all squash! or fixup! (but save for later)
- while :
- do
- case "$rest" in
- "squash! "*|"fixup! "*)
- prefix="$prefix${rest%%!*},"
- rest="${rest#*! }"
- ;;
- *)
- break
- ;;
- esac
- done
- printf '%s %s %s %s\n' "$sha1" "$action" "$prefix" "$rest"
- # if it's a single word, try to resolve to a full sha1 and
- # emit a second copy. This allows us to match on both message
- # and on sha1 prefix
- if test "${rest#* }" = "$rest"; then
- fullsha="$(git rev-parse -q --verify "$rest" 2>/dev/null)"
- if test -n "$fullsha"; then
- # prefix the action to uniquely identify this line as
- # intended for full sha1 match
- echo "$sha1 +$action $prefix $fullsha"
- fi
- fi
- esac
- done >"$1.sq" <"$1"
- test -s "$1.sq" || return
-
- used=
- while read -r pick sha1 message
- do
- case " $used" in
- *" $sha1 "*) continue ;;
- esac
- printf '%s\n' "$pick $sha1 $message"
- test -z "${format}" || message=$(git log -n 1 --format="%s" ${sha1})
- used="$used$sha1 "
- while read -r squash action msg_prefix msg_content
- do
- case " $used" in
- *" $squash "*) continue ;;
- esac
- emit=0
- case "$action" in
- +*)
- action="${action#+}"
- # full sha1 prefix test
- case "$msg_content" in "$sha1"*) emit=1;; esac ;;
- *)
- # message prefix test
- case "$message" in "$msg_content"*) emit=1;; esac ;;
- esac
- if test $emit = 1; then
- if test -n "${format}"
- then
- msg_content=$(git log -n 1 --format="${format}" ${squash})
- else
- msg_content="$(echo "$msg_prefix" | sed "s/,/! /g")$msg_content"
- fi
- printf '%s\n' "$action $squash $msg_content"
- used="$used$squash "
- fi
- done <"$1.sq"
- done >"$1.rearranged" <"$1"
- cat "$1.rearranged" >"$1"
- rm -f "$1.sq" "$1.rearranged"
+ git rebase--helper --shorten-ids
}
# Add commands after a pick or after a squash/fixup serie
mv "$1.new" "$1"
}
-# Check if the SHA-1 passed as an argument is a
-# correct one, if not then print $2 in "$todo".badsha
-# $1: the SHA-1 to test
-# $2: the line number of the input
-# $3: the input filename
-check_commit_sha () {
- badsha=0
- if test -z "$1"
- then
- badsha=1
- else
- sha1_verif="$(git rev-parse --verify --quiet $1^{commit})"
- if test -z "$sha1_verif"
- then
- badsha=1
- fi
- fi
-
- if test $badsha -ne 0
- then
- line="$(sed -n -e "${2}p" "$3")"
- warn "$(eval_gettext "\
-Warning: the SHA-1 is missing or isn't a commit in the following line:
- - \$line")"
- warn
- fi
-
- return $badsha
-}
-
-# prints the bad commits and bad commands
-# from the todolist in stdin
-check_bad_cmd_and_sha () {
- retval=0
- lineno=0
- while read -r command rest
- do
- lineno=$(( $lineno + 1 ))
- case $command in
- "$comment_char"*|''|noop|x|exec)
- # Doesn't expect a SHA-1
- ;;
- "$cr")
- # Work around CR left by "read" (e.g. with Git for
- # Windows' Bash).
- ;;
- pick|p|drop|d|reword|r|edit|e|squash|s|fixup|f)
- if ! check_commit_sha "${rest%%[ ]*}" "$lineno" "$1"
- then
- retval=1
- fi
- ;;
- *)
- line="$(sed -n -e "${lineno}p" "$1")"
- warn "$(eval_gettext "\
-Warning: the command isn't recognized in the following line:
- - \$line")"
- warn
- retval=1
- ;;
- esac
- done <"$1"
- return $retval
-}
-
-# Print the list of the SHA-1 of the commits
-# from stdin to stdout
-todo_list_to_sha_list () {
- git stripspace --strip-comments |
- while read -r command sha1 rest
- do
- case $command in
- "$comment_char"*|''|noop|x|"exec")
- ;;
- *)
- long_sha=$(git rev-list --no-walk "$sha1" 2>/dev/null)
- printf "%s\n" "$long_sha"
- ;;
- esac
- done
-}
-
-# Use warn for each line in stdin
-warn_lines () {
- while read -r line
- do
- warn " - $line"
- done
-}
-
# Switch to the branch in $into and notify it in the reflog
checkout_onto () {
GIT_REFLOG_ACTION="$GIT_REFLOG_ACTION: checkout $onto_name"
printf '%s' "$check_level" | tr 'A-Z' 'a-z'
}
-# Check if the user dropped some commits by mistake
-# Behaviour determined by rebase.missingCommitsCheck.
-# Check if there is an unrecognized command or a
-# bad SHA-1 in a command.
-check_todo_list () {
- raise_error=f
-
- check_level=$(get_missing_commit_check_level)
-
- case "$check_level" in
- warn|error)
- # Get the SHA-1 of the commits
- todo_list_to_sha_list <"$todo".backup >"$todo".oldsha1
- todo_list_to_sha_list <"$todo" >"$todo".newsha1
-
- # Sort the SHA-1 and compare them
- sort -u "$todo".oldsha1 >"$todo".oldsha1+
- mv "$todo".oldsha1+ "$todo".oldsha1
- sort -u "$todo".newsha1 >"$todo".newsha1+
- mv "$todo".newsha1+ "$todo".newsha1
- comm -2 -3 "$todo".oldsha1 "$todo".newsha1 >"$todo".miss
-
- # Warn about missing commits
- if test -s "$todo".miss
- then
- test "$check_level" = error && raise_error=t
-
- warn "$(gettext "\
-Warning: some commits may have been dropped accidentally.
-Dropped commits (newer to older):")"
-
- # Make the list user-friendly and display
- opt="--no-walk=sorted --format=oneline --abbrev-commit --stdin"
- git rev-list $opt <"$todo".miss | warn_lines
-
- warn "$(gettext "\
-To avoid this message, use \"drop\" to explicitly remove a commit.
-
-Use 'git config rebase.missingCommitsCheck' to change the level of warnings.
-The possible behaviours are: ignore, warn, error.")"
- warn
- fi
- ;;
- ignore)
- ;;
- *)
- warn "$(eval_gettext "Unrecognized setting \$check_level for option rebase.missingCommitsCheck. Ignoring.")"
- ;;
- esac
-
- if ! check_bad_cmd_and_sha "$todo"
- then
- raise_error=t
- fi
-
- if test $raise_error = t
- then
- # Checkout before the first commit of the
- # rebase: this way git rebase --continue
- # will work correctly as it expects HEAD to be
- # placed before the commit of the next action
- checkout_onto
-
- warn "$(gettext "You can fix this with 'git rebase --edit-todo' and then run 'git rebase --continue'.")"
- die "$(gettext "Or you can abort the rebase with 'git rebase --abort'.")"
- fi
-}
-
# The whole contents of this file is run by dot-sourcing it from
# inside a shell function. It used to be that "return"s we see
# below were not inside any function, and expected to return
revisions=$onto...$orig_head
shortrevisions=$shorthead
fi
-format=$(git config --get rebase.instructionFormat)
-# the 'rev-list .. | sed' requires %m to parse; the instruction requires %H to parse
-git rev-list $merges_option --format="%m%H ${format:-%s}" \
- --reverse --left-right --topo-order \
- $revisions ${restrict_revision+^$restrict_revision} | \
- sed -n "s/^>//p" |
-while read -r sha1 rest
-do
-
- if test -z "$keep_empty" && is_empty_commit $sha1 && ! is_merge_commit $sha1
- then
- comment_out="$comment_char "
- else
- comment_out=
- fi
+if test t != "$preserve_merges"
+then
+ git rebase--helper --make-script ${keep_empty:+--keep-empty} \
+ $revisions ${restrict_revision+^$restrict_revision} >"$todo"
+else
+ format=$(git config --get rebase.instructionFormat)
+ # the 'rev-list .. | sed' requires %m to parse; the instruction requires %H to parse
+ git rev-list $merges_option --format="%m%H ${format:-%s}" \
+ --reverse --left-right --topo-order \
+ $revisions ${restrict_revision+^$restrict_revision} | \
+ sed -n "s/^>//p" |
+ while read -r sha1 rest
+ do
+
+ if test -z "$keep_empty" && is_empty_commit $sha1 && ! is_merge_commit $sha1
+ then
+ comment_out="$comment_char "
+ else
+ comment_out=
+ fi
- if test t != "$preserve_merges"
- then
- printf '%s\n' "${comment_out}pick $sha1 $rest" >>"$todo"
- else
if test -z "$rebase_root"
then
preserve=t
touch "$rewritten"/$sha1
printf '%s\n' "${comment_out}pick $sha1 $rest" >>"$todo"
fi
- fi
-done
+ done
+fi
# Watch for commits that been dropped by --cherry-pick
if test t = "$preserve_merges"
fi
test -s "$todo" || echo noop >> "$todo"
-test -n "$autosquash" && rearrange_squash "$todo"
+test -z "$autosquash" || git rebase--helper --rearrange-squash || exit
test -n "$cmd" && add_exec_commands "$todo"
todocount=$(git stripspace --strip-comments <"$todo" | wc -l)
has_action "$todo" ||
return 2
-check_todo_list
+git rebase--helper --check-todo-list || {
+ ret=$?
+ checkout_onto
+ exit $ret
+}
expand_todo_ids
-test -d "$rewritten" || test -n "$force_rebase" || skip_unnecessary_picks
+test -d "$rewritten" || test -n "$force_rebase" ||
+onto="$(git rebase--helper --skip-unnecessary-picks)" ||
+die "Could not skip unnecessary pick commands"
checkout_onto
if test -z "$rebase_root" && test ! -d "$rewritten"
shift
break
;;
+ *)
+ usage
+ ;;
esac
shift
done
setenv(GIT_ICASE_PATHSPECS_ENVIRONMENT, "1", 1);
if (envchanged)
*envchanged = 1;
+ } else if (!strcmp(cmd, "--no-optional-locks")) {
+ setenv(GIT_OPTIONAL_LOCKS_ENVIRONMENT, "0", 1);
+ if (envchanged)
+ *envchanged = 1;
} else if (!strcmp(cmd, "--shallow-file")) {
(*argv)++;
(*argc)--;
die("zlib error inflating request, result %d", ret);
n = stream.total_out - cnt;
- if (write_in_full(out, out_buf, n) != n)
+ if (write_in_full(out, out_buf, n) < 0)
die("%s aborted reading request", prog_name);
cnt += n;
ssize_t n = read_request(0, &buf);
if (n < 0)
die_errno("error reading request body");
- if (write_in_full(out, buf, n) != n)
+ if (write_in_full(out, buf, n) < 0)
die("%s aborted reading request", prog_name);
close(out);
free(buf);
break;
case HTTP_ERROR:
error("unable to access '%s': %s", url, curl_errorstr);
+ /* fallthrough */
default:
ret = -1;
}
switch (type) {
case CURLINFO_TEXT:
trace_printf_key(&trace_curl, "== Info: %s", data);
- default: /* we ignore unknown types by default */
- return 0;
-
+ break;
case CURLINFO_HEADER_OUT:
text = "=> Send header";
curl_dump_header(text, (unsigned char *)data, size, DO_FILTER);
text = "<= Recv SSL data";
curl_dump_data(text, (unsigned char *)data, size);
break;
+
+ default: /* we ignore unknown types by default */
+ return 0;
}
return 0;
}
#include "http.h"
#endif
-#if defined(USE_CURL_FOR_IMAP_SEND) && defined(NO_OPENSSL)
-/* only available option */
+#if defined(USE_CURL_FOR_IMAP_SEND)
+/* Always default to curl if it's available. */
#define USE_CURL_DEFAULT 1
#else
-/* strictly opt in */
+/* We don't have curl, so continue to use the historical implementation */
#define USE_CURL_DEFAULT 0
#endif
return 0;
}
+static void server_fill_credential(struct imap_server_conf *srvc, struct credential *cred)
+{
+ if (srvc->user && srvc->pass)
+ return;
+
+ cred->protocol = xstrdup(srvc->use_ssl ? "imaps" : "imap");
+ cred->host = xstrdup(srvc->host);
+
+ cred->username = xstrdup_or_null(srvc->user);
+ cred->password = xstrdup_or_null(srvc->pass);
+
+ credential_fill(cred);
+
+ if (!srvc->user)
+ srvc->user = xstrdup(cred->username);
+ if (!srvc->pass)
+ srvc->pass = xstrdup(cred->password);
+}
+
static struct imap_store *imap_open_store(struct imap_server_conf *srvc, char *folder)
{
struct credential cred = CREDENTIAL_INIT;
}
#endif
imap_info("Logging in...\n");
- if (!srvc->user || !srvc->pass) {
- cred.protocol = xstrdup(srvc->use_ssl ? "imaps" : "imap");
- cred.host = xstrdup(srvc->host);
-
- cred.username = xstrdup_or_null(srvc->user);
- cred.password = xstrdup_or_null(srvc->pass);
-
- credential_fill(&cred);
-
- if (!srvc->user)
- srvc->user = xstrdup(cred.username);
- if (!srvc->pass)
- srvc->pass = xstrdup(cred.password);
- }
+ server_fill_credential(srvc, &cred);
if (srvc->auth_method) {
struct imap_cmd_cb cb;
}
#ifdef USE_CURL_FOR_IMAP_SEND
-static CURL *setup_curl(struct imap_server_conf *srvc)
+static CURL *setup_curl(struct imap_server_conf *srvc, struct credential *cred)
{
CURL *curl;
struct strbuf path = STRBUF_INIT;
if (!curl)
die("curl_easy_init failed");
+ server_fill_credential(&server, cred);
curl_easy_setopt(curl, CURLOPT_USERNAME, server.user);
curl_easy_setopt(curl, CURLOPT_PASSWORD, server.pass);
struct buffer msgbuf = { STRBUF_INIT, 0 };
CURL *curl;
CURLcode res = CURLE_OK;
+ struct credential cred = CREDENTIAL_INIT;
- curl = setup_curl(server);
+ curl = setup_curl(server, &cred);
curl_easy_setopt(curl, CURLOPT_READDATA, &msgbuf);
fprintf(stderr, "sending %d message%s\n", total, (total != 1) ? "s" : "");
curl_easy_cleanup(curl);
curl_global_cleanup();
- return 0;
+ if (cred.username) {
+ if (res == CURLE_OK)
+ credential_approve(&cred);
+#if LIBCURL_VERSION_NUM >= 0x070d01
+ else if (res == CURLE_LOGIN_DENIED)
+#else
+ else
+#endif
+ credential_reject(&cred);
+ }
+
+ credential_clear(&cred);
+
+ return res != CURLE_OK;
}
#endif
*/
static void range_set_check_invariants(struct range_set *rs)
{
- int i;
+ unsigned int i;
if (!rs)
return;
*/
void sort_and_merge_range_set(struct range_set *rs)
{
- int i;
- int o = 0; /* output cursor */
+ unsigned int i;
+ unsigned int o = 0; /* output cursor */
QSORT(rs->ranges, rs->nr, range_cmp);
static void range_set_union(struct range_set *out,
struct range_set *a, struct range_set *b)
{
- int i = 0, j = 0;
+ unsigned int i = 0, j = 0;
struct range *ra = a->ranges;
struct range *rb = b->ranges;
/* cannot make an alias of out->ranges: it may change during grow */
static void range_set_difference(struct range_set *out,
struct range_set *a, struct range_set *b)
{
- int i, j = 0;
+ unsigned int i, j = 0;
for (i = 0; i < a->nr; i++) {
long start = a->ranges[i].start;
long end = a->ranges[i].end;
struct diff_ranges *diff,
struct range_set *rs)
{
- int i, j = 0;
+ unsigned int i, j = 0;
assert(out->target.nr == 0);
struct range_set *rs,
struct diff_ranges *diff)
{
- int i, j = 0;
+ unsigned int i, j = 0;
long offset = 0;
struct range *src = rs->ranges;
struct range *target = diff->target.ranges;
static void dump_diff_hacky_one(struct rev_info *rev, struct line_log_data *range)
{
- int i, j = 0;
+ unsigned int i, j = 0;
long p_lines, t_lines;
unsigned long *p_ends = NULL, *t_ends = NULL;
struct diff_filepair *pair = range->pair;
long t_start = range->ranges.ranges[i].start;
long t_end = range->ranges.ranges[i].end;
long t_cur = t_start;
- int j_last;
+ unsigned int j_last;
while (j < diff->target.nr && diff->target.ranges[j].end < t_start)
j++;
/* A set of ranges. The ranges must always be disjoint and sorted. */
struct range_set {
- int alloc, nr;
+ unsigned int alloc, nr;
struct range *ranges;
};
xsnprintf(path, len, ".merge_file_XXXXXX");
fd = xmkstemp(path);
- if (write_in_full(fd, src->ptr, src->size) != src->size)
+ if (write_in_full(fd, src->ptr, src->size) < 0)
die_errno("unable to write temp-file");
close(fd);
}
while ((c = *in++) != 0) {
if (c == '=') {
- int d = *in++;
+ int ch, d = *in;
if (d == '\n' || !d)
break; /* drop trailing newline */
- strbuf_addch(out, (hexval(d) << 4) | hexval(*in++));
- continue;
+ ch = hex2chr(in);
+ if (ch >= 0) {
+ strbuf_addch(out, ch);
+ in += 2;
+ continue;
+ }
+ /* garbage -- fall through */
}
if (rfc2047 && c == '_') /* rfc2047 4.2 (2) */
c = 0x20;
if (!handle_commit_msg(mi, line))
break;
mi->filter_stage++;
+ /* fallthrough */
case 1:
handle_patch(mi, line);
break;
fd = xopen(path, O_WRONLY | O_EXCL | O_CREAT, 0666);
while (size > 0) {
- long ret = write_in_full(fd, buf, size);
+ ssize_t ret = write_in_full(fd, buf, size);
if (ret < 0) {
/* Ignore epipe */
if (errno == EPIPE)
break;
die_errno("notes-merge");
- } else if (!ret) {
- die("notes-merge: disk full?");
}
size -= ret;
buf += ret;
free(ent->path);
}
+struct object *object_array_pop(struct object_array *array)
+{
+ struct object *ret;
+
+ if (!array->nr)
+ return NULL;
+
+ ret = array->objects[array->nr - 1].item;
+ object_array_release_entry(&array->objects[array->nr - 1]);
+ array->nr--;
+ return ret;
+}
+
void object_array_filter(struct object_array *array,
object_array_each_func_t want, void *cb_data)
{
void add_object_array(struct object *obj, const char *name, struct object_array *array);
void add_object_array_with_path(struct object *obj, const char *name, struct object_array *array, unsigned mode, const char *path);
+/*
+ * Returns NULL if the array is empty. Otherwise, returns the last object
+ * after removing its entry from the array. Other resources associated
+ * with that object are left in an unspecified state and should not be
+ * examined.
+ */
+struct object *object_array_pop(struct object_array *array);
+
typedef int (*object_array_each_func_t)(struct object_array_entry *, void *);
/*
traverse_commit_list(&revs, show_commit, show_object, base);
- revs.pending.nr = 0;
- revs.pending.alloc = 0;
- revs.pending.objects = NULL;
+ object_array_clear(&revs.pending);
stored->bitmap = bitmap_to_ewah(base);
need_reset = 0;
int prepare_bitmap_walk(struct rev_info *revs)
{
unsigned int i;
- unsigned int pending_nr = revs->pending.nr;
- struct object_array_entry *pending_e = revs->pending.objects;
struct object_list *wants = NULL;
struct object_list *haves = NULL;
return -1;
}
- for (i = 0; i < pending_nr; ++i) {
- struct object *object = pending_e[i].item;
+ for (i = 0; i < revs->pending.nr; ++i) {
+ struct object *object = revs->pending.objects[i].item;
if (object->type == OBJ_NONE)
parse_object_or_die(&object->oid, NULL);
if (!bitmap_git.loaded && load_pack_bitmap() < 0)
return -1;
- revs->pending.nr = 0;
- revs->pending.alloc = 0;
- revs->pending.objects = NULL;
+ object_array_clear(&revs->pending);
if (haves) {
revs->ignore_missing_links = 1;
git_SHA_CTX old_sha1_ctx, new_sha1_ctx;
struct pack_header hdr;
char *buf;
+ ssize_t read_result;
git_SHA1_Init(&old_sha1_ctx);
git_SHA1_Init(&new_sha1_ctx);
if (lseek(pack_fd, 0, SEEK_SET) != 0)
die_errno("Failed seeking to start of '%s'", pack_name);
- if (read_in_full(pack_fd, &hdr, sizeof(hdr)) != sizeof(hdr))
+ read_result = read_in_full(pack_fd, &hdr, sizeof(hdr));
+ if (read_result < 0)
die_errno("Unable to reread header of '%s'", pack_name);
+ else if (read_result != sizeof(hdr))
+ die_errno("Unexpected short read for header of '%s'",
+ pack_name);
if (lseek(pack_fd, 0, SEEK_SET) != 0)
die_errno("Failed seeking to start of '%s'", pack_name);
git_SHA1_Update(&old_sha1_ctx, &hdr, sizeof(hdr));
static size_t peak_pack_mapped;
static size_t pack_mapped;
struct packed_git *packed_git;
-
-static struct mru packed_git_mru_storage;
-struct mru *packed_git_mru = &packed_git_mru_storage;
+struct mru packed_git_mru;
#define SZ_FMT PRIuMAX
static inline uintmax_t sz_fmt(size_t s) { return s; }
unsigned char sha1[20];
unsigned char *idx_sha1;
long fd_flag;
+ ssize_t read_result;
if (!p->index_data && open_pack_index(p))
return error("packfile %s index unavailable", p->pack_name);
return error("cannot set FD_CLOEXEC");
/* Verify we recognize this pack file format. */
- if (read_in_full(p->pack_fd, &hdr, sizeof(hdr)) != sizeof(hdr))
+ read_result = read_in_full(p->pack_fd, &hdr, sizeof(hdr));
+ if (read_result < 0)
+ return error_errno("error reading from %s", p->pack_name);
+ if (read_result != sizeof(hdr))
return error("file %s is far too short to be a packfile", p->pack_name);
if (hdr.hdr_signature != htonl(PACK_SIGNATURE))
return error("file %s is not a GIT packfile", p->pack_name);
p->num_objects);
if (lseek(p->pack_fd, p->pack_size - sizeof(sha1), SEEK_SET) == -1)
return error("end of packfile %s is unavailable", p->pack_name);
- if (read_in_full(p->pack_fd, sha1, sizeof(sha1)) != sizeof(sha1))
+ read_result = read_in_full(p->pack_fd, sha1, sizeof(sha1));
+ if (read_result < 0)
+ return error_errno("error reading from %s", p->pack_name);
+ if (read_result != sizeof(sha1))
return error("packfile %s signature is unavailable", p->pack_name);
idx_sha1 = ((unsigned char *)p->index_data) + p->index_size - 40;
if (hashcmp(sha1, idx_sha1))
{
struct packed_git *p;
- mru_clear(packed_git_mru);
+ mru_clear(&packed_git_mru);
for (p = packed_git; p; p = p->next)
- mru_append(packed_git_mru, p);
+ mru_append(&packed_git_mru, p);
}
static int prepare_packed_git_run_once = 0;
if (!packed_git)
return 0;
- for (p = packed_git_mru->head; p; p = p->next) {
+ for (p = packed_git_mru.head; p; p = p->next) {
if (fill_pack_entry(sha1, e, p->item)) {
- mru_mark(packed_git_mru, p);
+ mru_mark(&packed_git_mru, p);
return 1;
}
}
const struct option *opts, int full, int err)
{
FILE *outfile = err ? stderr : stdout;
+ int need_newline;
if (!usagestr)
return PARSE_OPT_HELP;
if (**usagestr)
fprintf_ln(outfile, _(" %s"), _(*usagestr));
else
- putchar('\n');
+ fputc('\n', outfile);
usagestr++;
}
- if (opts->type != OPTION_GROUP)
- fputc('\n', outfile);
+ need_newline = 1;
for (; opts->type != OPTION_END; opts++) {
size_t pos;
if (opts->type == OPTION_GROUP) {
fputc('\n', outfile);
+ need_newline = 0;
if (*opts->help)
fprintf(outfile, "%s\n", _(opts->help));
continue;
if (!full && (opts->flags & PARSE_OPT_HIDDEN))
continue;
+ if (need_newline) {
+ fputc('\n', outfile);
+ need_newline = 0;
+ }
+
pos = fprintf(outfile, " ");
if (opts->short_name) {
if (opts->flags & PARSE_OPT_NODASH)
int validate_headref(const char *path)
{
struct stat st;
- char *buf, buffer[256];
- unsigned char sha1[20];
+ char buffer[256];
+ const char *refname;
+ struct object_id oid;
int fd;
ssize_t len;
len = read_in_full(fd, buffer, sizeof(buffer)-1);
close(fd);
+ if (len < 0)
+ return -1;
+ buffer[len] = '\0';
+
/*
* Is it a symbolic ref?
*/
- if (len < 4)
- return -1;
- if (!memcmp("ref:", buffer, 4)) {
- buf = buffer + 4;
- len -= 4;
- while (len && isspace(*buf))
- buf++, len--;
- if (len >= 5 && !memcmp("refs/", buf, 5))
+ if (skip_prefix(buffer, "ref:", &refname)) {
+ while (isspace(*refname))
+ refname++;
+ if (starts_with(refname, "refs/"))
return 0;
}
/*
* Is this a detached HEAD?
*/
- if (!get_sha1_hex(buffer, sha1))
+ if (!get_oid_hex(buffer, &oid))
return 0;
return -1;
pattern, sb.buf);
}
-/*
- * Given command line arguments and a prefix, convert the input to
- * pathspec. die() if any magic in magic_mask is used.
- */
void parse_pathspec(struct pathspec *pathspec,
unsigned magic_mask, unsigned flags,
const char *prefix, const char **argv)
*/
#define PATHSPEC_LITERAL_PATH (1<<6)
+/*
+ * Given command line arguments and a prefix, convert the input to
+ * pathspec. die() if any magic in magic_mask is used.
+ *
+ * Any arguments used are copied. It is safe for the caller to modify
+ * or free 'prefix' and 'args' after calling this function.
+ */
extern void parse_pathspec(struct pathspec *pathspec,
unsigned magic_mask,
unsigned flags,
int packet_flush_gently(int fd)
{
packet_trace("0000", 4, 1);
- if (write_in_full(fd, "0000", 4) == 4)
- return 0;
- return error("flush packet write failed");
+ if (write_in_full(fd, "0000", 4) < 0)
+ return error("flush packet write failed");
+ return 0;
}
void packet_buf_flush(struct strbuf *buf)
const char *fmt, va_list args)
{
static struct strbuf buf = STRBUF_INIT;
- ssize_t count;
strbuf_reset(&buf);
format_packet(&buf, fmt, args);
- count = write_in_full(fd, buf.buf, buf.len);
- if (count == buf.len)
- return 0;
-
- if (!gently) {
- check_pipe(errno);
- die_errno("packet write with format failed");
+ if (write_in_full(fd, buf.buf, buf.len) < 0) {
+ if (!gently) {
+ check_pipe(errno);
+ die_errno("packet write with format failed");
+ }
+ return error("packet write with format failed");
}
- return error("packet write with format failed");
+
+ return 0;
}
void packet_write_fmt(int fd, const char *fmt, ...)
packet_size = size + 4;
set_packet_header(packet_write_buffer, packet_size);
memcpy(packet_write_buffer + 4, buf, size);
- if (write_in_full(fd_out, packet_write_buffer, packet_size) == packet_size)
- return 0;
- return error("packet write failed");
+ if (write_in_full(fd_out, packet_write_buffer, packet_size) < 0)
+ return error("packet write failed");
+ return 0;
}
void packet_buf_write(struct strbuf *buf, const char *fmt, ...)
}
/* And complain if we didn't get enough bytes to satisfy the read. */
- if (ret < size) {
+ if (ret != size) {
if (options & PACKET_READ_GENTLE_ON_EOF)
return -1;
case '{': case '}':
case '$': case '\\': case '"':
strbuf_addch(sb, '\\');
+ /* fallthrough */
default:
strbuf_addch(sb, c);
break;
case S_IFDIR:
if (S_ISGITLINK(ce->ce_mode))
return ce_compare_gitlink(ce) ? DATA_CHANGED : 0;
+ /* else fallthrough */
default:
return TYPE_CHANGED;
}
unsigned int buffered = write_buffer_len;
if (buffered) {
git_SHA1_Update(context, write_buffer, buffered);
- if (write_in_full(fd, write_buffer, buffered) != buffered)
+ if (write_in_full(fd, write_buffer, buffered) < 0)
return -1;
write_buffer_len = 0;
}
/* Flush first if not enough space for SHA1 signature */
if (left + 20 > WRITE_BUFFER_SIZE) {
- if (write_in_full(fd, write_buffer, left) != left)
+ if (write_in_full(fd, write_buffer, left) < 0)
return -1;
left = 0;
}
git_SHA1_Final(write_buffer + left, context);
hashcpy(sha1, write_buffer + left);
left += 20;
- return (write_in_full(fd, write_buffer, left) != left) ? -1 : 0;
+ return (write_in_full(fd, write_buffer, left) < 0) ? -1 : 0;
}
static void ce_smudge_racily_clean_entry(struct cache_entry *ce)
if (!result)
result = ce_write(c, fd, to_remove_vi, prefix_size);
if (!result)
- result = ce_write(c, fd, ce->name + common, ce_namelen(ce) - common + 1);
+ result = ce_write(c, fd, ce->name + common, ce_namelen(ce) - common);
+ if (!result)
+ result = ce_write(c, fd, padding, 1);
strbuf_splice(previous_name, common, to_remove,
ce->name + common, ce_namelen(ce) - common);
}
}
- if (write_in_full(fd, buf.buf, buf.len) != buf.len) {
+ if (write_in_full(fd, buf.buf, buf.len) < 0) {
strbuf_addf(err, "could not write to '%s'", filename);
rollback_lock_file(&lock);
goto done;
return -1;
}
+ flags &= REF_TRANSACTION_UPDATE_ALLOWED_FLAGS;
+
flags |= (new_sha1 ? REF_HAVE_NEW : 0) | (old_sha1 ? REF_HAVE_OLD : 0);
ref_transaction_add_update(transaction, refname, flags,
if (trim)
iter = prefix_ref_iterator_begin(iter, "", trim);
+ /* Sanity check for subclasses: */
+ if (!iter->ordered)
+ BUG("reference iterator is not ordered");
+
return iter;
}
return do_for_each_ref(get_main_ref_store(),
git_replace_ref_base, fn,
strlen(git_replace_ref_base),
- 0, cb_data);
+ DO_FOR_EACH_INCLUDE_BROKEN, cb_data);
}
int for_each_namespaced_ref(each_ref_fn fn, void *cb_data)
int refs_peel_ref(struct ref_store *refs, const char *refname,
unsigned char *sha1)
{
- return refs->be->peel_ref(refs, refname, sha1);
+ int flag;
+ unsigned char base[20];
+
+ if (current_ref_iter && current_ref_iter->refname == refname) {
+ struct object_id peeled;
+
+ if (ref_iterator_peel(current_ref_iter, &peeled))
+ return -1;
+ hashcpy(sha1, peeled.hash);
+ return 0;
+ }
+
+ if (refs_read_ref_full(refs, refname,
+ RESOLVE_REF_READING, base, &flag))
+ return -1;
+
+ return peel_object(base, sha1);
}
int peel_ref(const char *refname, unsigned char *sha1)
{
return refs_rename_ref(get_main_ref_store(), oldref, newref, logmsg);
}
+
+int refs_copy_existing_ref(struct ref_store *refs, const char *oldref,
+ const char *newref, const char *logmsg)
+{
+ return refs->be->copy_ref(refs, oldref, newref, logmsg);
+}
+
+int copy_existing_ref(const char *oldref, const char *newref, const char *logmsg)
+{
+ return refs_copy_existing_ref(get_main_ref_store(), oldref, newref, logmsg);
+}
#define REF_NODEREF 0x01
#define REF_FORCE_CREATE_REFLOG 0x40
+/*
+ * Flags that can be passed in to ref_transaction_update
+ */
+#define REF_TRANSACTION_UPDATE_ALLOWED_FLAGS \
+ REF_ISPRUNING | \
+ REF_FORCE_CREATE_REFLOG | \
+ REF_NODEREF
+
/*
* Setup reflog before using. Fill in err and return -1 on failure.
*/
/** rename ref, return 0 on success **/
int refs_rename_ref(struct ref_store *refs, const char *oldref,
const char *newref, const char *logmsg);
-int rename_ref(const char *oldref, const char *newref, const char *logmsg);
+int rename_ref(const char *oldref, const char *newref,
+ const char *logmsg);
+
+/** copy ref, return 0 on success **/
+int refs_copy_existing_ref(struct ref_store *refs, const char *oldref,
+ const char *newref, const char *logmsg);
+int copy_existing_ref(const char *oldref, const char *newref,
+ const char *logmsg);
int refs_create_symref(struct ref_store *refs, const char *refname,
const char *target, const char *logmsg);
return ret;
}
-static int files_peel_ref(struct ref_store *ref_store,
- const char *refname, unsigned char *sha1)
-{
- struct files_ref_store *refs =
- files_downcast(ref_store, REF_STORE_READ | REF_STORE_ODB,
- "peel_ref");
- int flag;
- unsigned char base[20];
-
- if (current_ref_iter && current_ref_iter->refname == refname) {
- struct object_id peeled;
-
- if (ref_iterator_peel(current_ref_iter, &peeled))
- return -1;
- hashcpy(sha1, peeled.hash);
- return 0;
- }
-
- if (refs_read_ref_full(ref_store, refname,
- RESOLVE_REF_READING, base, &flag))
- return -1;
-
- /*
- * If the reference is packed, read its ref_entry from the
- * cache in the hope that we already know its peeled value.
- * We only try this optimization on packed references because
- * (a) forcing the filling of the loose reference cache could
- * be expensive and (b) loose references anyway usually do not
- * have REF_KNOWS_PEELED.
- */
- if (flag & REF_ISPACKED &&
- !refs_peel_ref(refs->packed_ref_store, refname, sha1))
- return 0;
-
- return peel_object(base, sha1);
-}
-
struct files_ref_iterator {
struct ref_iterator base;
const char *prefix, unsigned int flags)
{
struct files_ref_store *refs;
- struct ref_iterator *loose_iter, *packed_iter;
+ struct ref_iterator *loose_iter, *packed_iter, *overlay_iter;
struct files_ref_iterator *iter;
struct ref_iterator *ref_iterator;
unsigned int required_flags = REF_STORE_READ;
refs = files_downcast(ref_store, required_flags, "ref_iterator_begin");
- iter = xcalloc(1, sizeof(*iter));
- ref_iterator = &iter->base;
- base_ref_iterator_init(ref_iterator, &files_ref_iterator_vtable);
-
/*
* We must make sure that all loose refs are read before
* accessing the packed-refs file; this avoids a race
refs->packed_ref_store, prefix, 0,
DO_FOR_EACH_INCLUDE_BROKEN);
- iter->iter0 = overlay_ref_iterator_begin(loose_iter, packed_iter);
+ overlay_iter = overlay_ref_iterator_begin(loose_iter, packed_iter);
+
+ iter = xcalloc(1, sizeof(*iter));
+ ref_iterator = &iter->base;
+ base_ref_iterator_init(ref_iterator, &files_ref_iterator_vtable,
+ overlay_iter->ordered);
+ iter->iter0 = overlay_iter;
iter->flags = flags;
return ref_iterator;
const struct object_id *oid, const char *logmsg,
struct strbuf *err);
-static int files_rename_ref(struct ref_store *ref_store,
+static int files_copy_or_rename_ref(struct ref_store *ref_store,
const char *oldrefname, const char *newrefname,
- const char *logmsg)
+ const char *logmsg, int copy)
{
struct files_ref_store *refs =
files_downcast(ref_store, REF_STORE_WRITE, "rename_ref");
}
if (flag & REF_ISSYMREF) {
- ret = error("refname %s is a symbolic ref, renaming it is not supported",
- oldrefname);
+ if (copy)
+ ret = error("refname %s is a symbolic ref, copying it is not supported",
+ oldrefname);
+ else
+ ret = error("refname %s is a symbolic ref, renaming it is not supported",
+ oldrefname);
goto out;
}
if (!refs_rename_ref_available(&refs->base, oldrefname, newrefname)) {
goto out;
}
- if (log && rename(sb_oldref.buf, tmp_renamed_log.buf)) {
+ if (!copy && log && rename(sb_oldref.buf, tmp_renamed_log.buf)) {
ret = error("unable to move logfile logs/%s to logs/"TMP_RENAMED_LOG": %s",
oldrefname, strerror(errno));
goto out;
}
- if (refs_delete_ref(&refs->base, logmsg, oldrefname,
+ if (copy && log && copy_file(tmp_renamed_log.buf, sb_oldref.buf, 0644)) {
+ ret = error("unable to copy logfile logs/%s to logs/"TMP_RENAMED_LOG": %s",
+ oldrefname, strerror(errno));
+ goto out;
+ }
+
+ if (!copy && refs_delete_ref(&refs->base, logmsg, oldrefname,
orig_oid.hash, REF_NODEREF)) {
error("unable to delete old %s", oldrefname);
goto rollback;
* the safety anyway; we want to delete the reference whatever
* its current value.
*/
- if (!refs_read_ref_full(&refs->base, newrefname,
+ if (!copy && !refs_read_ref_full(&refs->base, newrefname,
RESOLVE_REF_READING | RESOLVE_REF_NO_RECURSE,
oid.hash, NULL) &&
refs_delete_ref(&refs->base, NULL, newrefname,
lock = lock_ref_sha1_basic(refs, newrefname, NULL, NULL, NULL,
REF_NODEREF, NULL, &err);
if (!lock) {
- error("unable to rename '%s' to '%s': %s", oldrefname, newrefname, err.buf);
+ if (copy)
+ error("unable to copy '%s' to '%s': %s", oldrefname, newrefname, err.buf);
+ else
+ error("unable to rename '%s' to '%s': %s", oldrefname, newrefname, err.buf);
strbuf_release(&err);
goto rollback;
}
return ret;
}
+static int files_rename_ref(struct ref_store *ref_store,
+ const char *oldrefname, const char *newrefname,
+ const char *logmsg)
+{
+ return files_copy_or_rename_ref(ref_store, oldrefname,
+ newrefname, logmsg, 0);
+}
+
+static int files_copy_ref(struct ref_store *ref_store,
+ const char *oldrefname, const char *newrefname,
+ const char *logmsg)
+{
+ return files_copy_or_rename_ref(ref_store, oldrefname,
+ newrefname, logmsg, 1);
+}
+
static int close_ref_gently(struct ref_lock *lock)
{
if (close_lock_file_gently(&lock->lk))
written = len <= maxlen ? write_in_full(fd, logrec, len) : -1;
free(logrec);
- if (written != len)
+ if (written < 0)
return -1;
return 0;
return -1;
}
fd = get_lock_file_fd(&lock->lk);
- if (write_in_full(fd, oid_to_hex(oid), GIT_SHA1_HEXSZ) != GIT_SHA1_HEXSZ ||
- write_in_full(fd, &term, 1) != 1 ||
+ if (write_in_full(fd, oid_to_hex(oid), GIT_SHA1_HEXSZ) < 0 ||
+ write_in_full(fd, &term, 1) < 0 ||
close_ref_gently(lock) < 0) {
strbuf_addf(err,
"couldn't write '%s'", get_lock_file_path(&lock->lk));
struct ref_iterator *ref_iterator = &iter->base;
struct strbuf sb = STRBUF_INIT;
- base_ref_iterator_init(ref_iterator, &files_reflog_iterator_vtable);
+ base_ref_iterator_init(ref_iterator, &files_reflog_iterator_vtable, 0);
strbuf_addf(&sb, "%s/logs", gitdir);
iter->dir_iterator = dir_iterator_begin(sb.buf);
iter->ref_store = ref_store;
return reflog_iterator_begin(ref_store, refs->gitcommondir);
} else {
return merge_ref_iterator_begin(
+ 0,
reflog_iterator_begin(ref_store, refs->gitdir),
reflog_iterator_begin(ref_store, refs->gitcommondir),
reflog_iterator_select, refs);
rollback_lock_file(&reflog_lock);
} else if (update &&
(write_in_full(get_lock_file_fd(&lock->lk),
- oid_to_hex(&cb.last_kept_oid), GIT_SHA1_HEXSZ) != GIT_SHA1_HEXSZ ||
- write_str_in_full(get_lock_file_fd(&lock->lk), "\n") != 1 ||
+ oid_to_hex(&cb.last_kept_oid), GIT_SHA1_HEXSZ) < 0 ||
+ write_str_in_full(get_lock_file_fd(&lock->lk), "\n") < 0 ||
close_ref_gently(lock) < 0)) {
status |= error("couldn't write %s",
get_lock_file_path(&lock->lk));
files_initial_transaction_commit,
files_pack_refs,
- files_peel_ref,
files_create_symref,
files_delete_refs,
files_rename_ref,
+ files_copy_ref,
files_ref_iterator_begin,
files_read_raw_ref,
}
void base_ref_iterator_init(struct ref_iterator *iter,
- struct ref_iterator_vtable *vtable)
+ struct ref_iterator_vtable *vtable,
+ int ordered)
{
iter->vtable = vtable;
+ iter->ordered = !!ordered;
iter->refname = NULL;
iter->oid = NULL;
iter->flags = 0;
struct empty_ref_iterator *iter = xcalloc(1, sizeof(*iter));
struct ref_iterator *ref_iterator = &iter->base;
- base_ref_iterator_init(ref_iterator, &empty_ref_iterator_vtable);
+ base_ref_iterator_init(ref_iterator, &empty_ref_iterator_vtable, 1);
return ref_iterator;
}
};
struct ref_iterator *merge_ref_iterator_begin(
+ int ordered,
struct ref_iterator *iter0, struct ref_iterator *iter1,
ref_iterator_select_fn *select, void *cb_data)
{
* references through only if they exist in both iterators.
*/
- base_ref_iterator_init(ref_iterator, &merge_ref_iterator_vtable);
+ base_ref_iterator_init(ref_iterator, &merge_ref_iterator_vtable, ordered);
iter->iter0 = iter0;
iter->iter1 = iter1;
iter->select = select;
} else if (is_empty_ref_iterator(back)) {
ref_iterator_abort(back);
return front;
+ } else if (!front->ordered || !back->ordered) {
+ BUG("overlay_ref_iterator requires ordered inputs");
}
- return merge_ref_iterator_begin(front, back,
+ return merge_ref_iterator_begin(1, front, back,
overlay_iterator_select, NULL);
}
int trim;
};
+/* Return -1, 0, 1 if refname is before, inside, or after the prefix. */
+static int compare_prefix(const char *refname, const char *prefix)
+{
+ while (*prefix) {
+ if (*refname != *prefix)
+ return ((unsigned char)*refname < (unsigned char)*prefix) ? -1 : +1;
+
+ refname++;
+ prefix++;
+ }
+
+ return 0;
+}
+
static int prefix_ref_iterator_advance(struct ref_iterator *ref_iterator)
{
struct prefix_ref_iterator *iter =
int ok;
while ((ok = ref_iterator_advance(iter->iter0)) == ITER_OK) {
- if (!starts_with(iter->iter0->refname, iter->prefix))
+ int cmp = compare_prefix(iter->iter0->refname, iter->prefix);
+
+ if (cmp < 0)
continue;
+ if (cmp > 0) {
+ /*
+ * If the source iterator is ordered, then we
+ * can stop the iteration as soon as we see a
+ * refname that comes after the prefix:
+ */
+ if (iter->iter0->ordered) {
+ ok = ref_iterator_abort(iter->iter0);
+ break;
+ } else {
+ continue;
+ }
+ }
+
if (iter->trim) {
/*
* It is nonsense to trim off characters that
iter = xcalloc(1, sizeof(*iter));
ref_iterator = &iter->base;
- base_ref_iterator_init(ref_iterator, &prefix_ref_iterator_vtable);
+ base_ref_iterator_init(ref_iterator, &prefix_ref_iterator_vtable, iter0->ordered);
iter->iter0 = iter0;
iter->prefix = xstrdup(prefix);
#include "../config.h"
#include "../refs.h"
#include "refs-internal.h"
-#include "ref-cache.h"
#include "packed-backend.h"
#include "../iterator.h"
#include "../lockfile.h"
-struct packed_ref_cache {
- struct ref_cache *cache;
+enum mmap_strategy {
+ /*
+ * Don't use mmap() at all for reading `packed-refs`.
+ */
+ MMAP_NONE,
/*
- * Count of references to the data structure in this instance,
- * including the pointer from files_ref_store::packed if any.
- * The data will not be freed as long as the reference count
- * is nonzero.
+ * Can use mmap() for reading `packed-refs`, but the file must
+ * not remain mmapped. This is the usual option on Windows,
+ * where you cannot rename a new version of a file onto a file
+ * that is currently mmapped.
*/
- unsigned int referrers;
+ MMAP_TEMPORARY,
- /* The metadata from when this packed-refs cache was read */
- struct stat_validity validity;
+ /*
+ * It is OK to leave the `packed-refs` file mmapped while
+ * arbitrary other code is running.
+ */
+ MMAP_OK
};
-/*
- * Increment the reference count of *packed_refs.
- */
-static void acquire_packed_ref_cache(struct packed_ref_cache *packed_refs)
-{
- packed_refs->referrers++;
-}
+#if defined(NO_MMAP)
+static enum mmap_strategy mmap_strategy = MMAP_NONE;
+#elif defined(MMAP_PREVENTS_DELETE)
+static enum mmap_strategy mmap_strategy = MMAP_TEMPORARY;
+#else
+static enum mmap_strategy mmap_strategy = MMAP_OK;
+#endif
+
+struct packed_ref_store;
/*
- * Decrease the reference count of *packed_refs. If it goes to zero,
- * free *packed_refs and return true; otherwise return false.
+ * A `snapshot` represents one snapshot of a `packed-refs` file.
+ *
+ * Normally, this will be a mmapped view of the contents of the
+ * `packed-refs` file at the time the snapshot was created. However,
+ * if the `packed-refs` file was not sorted, this might point at heap
+ * memory holding the contents of the `packed-refs` file with its
+ * records sorted by refname.
+ *
+ * `snapshot` instances are reference counted (via
+ * `acquire_snapshot()` and `release_snapshot()`). This is to prevent
+ * an instance from disappearing while an iterator is still iterating
+ * over it. Instances are garbage collected when their `referrers`
+ * count goes to zero.
+ *
+ * The most recent `snapshot`, if available, is referenced by the
+ * `packed_ref_store`. Its freshness is checked whenever
+ * `get_snapshot()` is called; if the existing snapshot is obsolete, a
+ * new snapshot is taken.
*/
-static int release_packed_ref_cache(struct packed_ref_cache *packed_refs)
-{
- if (!--packed_refs->referrers) {
- free_ref_cache(packed_refs->cache);
- stat_validity_clear(&packed_refs->validity);
- free(packed_refs);
- return 1;
- } else {
- return 0;
- }
-}
+struct snapshot {
+ /*
+ * A back-pointer to the packed_ref_store with which this
+ * snapshot is associated:
+ */
+ struct packed_ref_store *refs;
+
+ /* Is the `packed-refs` file currently mmapped? */
+ int mmapped;
+
+ /*
+ * The contents of the `packed-refs` file. If the file was
+ * already sorted, this points at the mmapped contents of the
+ * file. If not, this points at heap-allocated memory
+ * containing the contents, sorted. If there were no contents
+ * (e.g., because the file didn't exist), `buf` and `eof` are
+ * both NULL.
+ */
+ char *buf, *eof;
+
+ /* The size of the header line, if any; otherwise, 0: */
+ size_t header_len;
+
+ /*
+ * What is the peeled state of the `packed-refs` file that
+ * this snapshot represents? (This is usually determined from
+ * the file's header.)
+ */
+ enum { PEELED_NONE, PEELED_TAGS, PEELED_FULLY } peeled;
+
+ /*
+ * Count of references to this instance, including the pointer
+ * from `packed_ref_store::snapshot`, if any. The instance
+ * will not be freed as long as the reference count is
+ * nonzero.
+ */
+ unsigned int referrers;
+
+ /*
+ * The metadata of the `packed-refs` file from which this
+ * snapshot was created, used to tell if the file has been
+ * replaced since we read it.
+ */
+ struct stat_validity validity;
+};
/*
- * A container for `packed-refs`-related data. It is not (yet) a
- * `ref_store`.
+ * A `ref_store` representing references stored in a `packed-refs`
+ * file. It implements the `ref_store` interface, though it has some
+ * limitations:
+ *
+ * - It cannot store symbolic references.
+ *
+ * - It cannot store reflogs.
+ *
+ * - It does not support reference renaming (though it could).
+ *
+ * On the other hand, it can be locked outside of a reference
+ * transaction. In that case, it remains locked even after the
+ * transaction is done and the new `packed-refs` file is activated.
*/
struct packed_ref_store {
struct ref_store base;
char *path;
/*
- * A cache of the values read from the `packed-refs` file, if
- * it might still be current; otherwise, NULL.
+ * A snapshot of the values read from the `packed-refs` file,
+ * if it might still be current; otherwise, NULL.
*/
- struct packed_ref_cache *cache;
+ struct snapshot *snapshot;
/*
* Lock used for the "packed-refs" file. Note that this (and
struct tempfile *tempfile;
};
+/*
+ * Increment the reference count of `*snapshot`.
+ */
+static void acquire_snapshot(struct snapshot *snapshot)
+{
+ snapshot->referrers++;
+}
+
+/*
+ * If the buffer in `snapshot` is active, then either munmap the
+ * memory and close the file, or free the memory. Then set the buffer
+ * pointers to NULL.
+ */
+static void clear_snapshot_buffer(struct snapshot *snapshot)
+{
+ if (snapshot->mmapped) {
+ if (munmap(snapshot->buf, snapshot->eof - snapshot->buf))
+ die_errno("error ummapping packed-refs file %s",
+ snapshot->refs->path);
+ snapshot->mmapped = 0;
+ } else {
+ free(snapshot->buf);
+ }
+ snapshot->buf = snapshot->eof = NULL;
+ snapshot->header_len = 0;
+}
+
+/*
+ * Decrease the reference count of `*snapshot`. If it goes to zero,
+ * free `*snapshot` and return true; otherwise return false.
+ */
+static int release_snapshot(struct snapshot *snapshot)
+{
+ if (!--snapshot->referrers) {
+ stat_validity_clear(&snapshot->validity);
+ clear_snapshot_buffer(snapshot);
+ free(snapshot);
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
struct ref_store *packed_ref_store_create(const char *path,
unsigned int store_flags)
{
return refs;
}
-static void clear_packed_ref_cache(struct packed_ref_store *refs)
+static void clear_snapshot(struct packed_ref_store *refs)
{
- if (refs->cache) {
- struct packed_ref_cache *cache = refs->cache;
+ if (refs->snapshot) {
+ struct snapshot *snapshot = refs->snapshot;
- refs->cache = NULL;
- release_packed_ref_cache(cache);
+ refs->snapshot = NULL;
+ release_snapshot(snapshot);
}
}
-/* The length of a peeled reference line in packed-refs, including EOL: */
-#define PEELED_LINE_LENGTH 42
+static NORETURN void die_unterminated_line(const char *path,
+ const char *p, size_t len)
+{
+ if (len < 80)
+ die("unterminated line in %s: %.*s", path, (int)len, p);
+ else
+ die("unterminated line in %s: %.75s...", path, p);
+}
+
+static NORETURN void die_invalid_line(const char *path,
+ const char *p, size_t len)
+{
+ const char *eol = memchr(p, '\n', len);
+
+ if (!eol)
+ die_unterminated_line(path, p, len);
+ else if (eol - p < 80)
+ die("unexpected line in %s: %.*s", path, (int)(eol - p), p);
+ else
+ die("unexpected line in %s: %.75s...", path, p);
+
+}
+
+struct snapshot_record {
+ const char *start;
+ size_t len;
+};
+
+static int cmp_packed_ref_records(const void *v1, const void *v2)
+{
+ const struct snapshot_record *e1 = v1, *e2 = v2;
+ const char *r1 = e1->start + GIT_SHA1_HEXSZ + 1;
+ const char *r2 = e2->start + GIT_SHA1_HEXSZ + 1;
+
+ while (1) {
+ if (*r1 == '\n')
+ return *r2 == '\n' ? 0 : -1;
+ if (*r1 != *r2) {
+ if (*r2 == '\n')
+ return 1;
+ else
+ return (unsigned char)*r1 < (unsigned char)*r2 ? -1 : +1;
+ }
+ r1++;
+ r2++;
+ }
+}
/*
- * Parse one line from a packed-refs file. Write the SHA1 to sha1.
- * Return a pointer to the refname within the line (null-terminated),
- * or NULL if there was a problem.
+ * Compare a snapshot record at `rec` to the specified NUL-terminated
+ * refname.
*/
-static const char *parse_ref_line(struct strbuf *line, struct object_id *oid)
+static int cmp_record_to_refname(const char *rec, const char *refname)
{
- const char *ref;
+ const char *r1 = rec + GIT_SHA1_HEXSZ + 1;
+ const char *r2 = refname;
+
+ while (1) {
+ if (*r1 == '\n')
+ return *r2 ? -1 : 0;
+ if (!*r2)
+ return 1;
+ if (*r1 != *r2)
+ return (unsigned char)*r1 < (unsigned char)*r2 ? -1 : +1;
+ r1++;
+ r2++;
+ }
+}
- if (parse_oid_hex(line->buf, oid, &ref) < 0)
- return NULL;
- if (!isspace(*ref++))
- return NULL;
+/*
+ * `snapshot->buf` is not known to be sorted. Check whether it is, and
+ * if not, sort it into new memory and munmap/free the old storage.
+ */
+static void sort_snapshot(struct snapshot *snapshot)
+{
+ struct snapshot_record *records = NULL;
+ size_t alloc = 0, nr = 0;
+ int sorted = 1;
+ const char *pos, *eof, *eol;
+ size_t len, i;
+ char *new_buffer, *dst;
- if (isspace(*ref))
- return NULL;
+ pos = snapshot->buf + snapshot->header_len;
+ eof = snapshot->eof;
+ len = eof - pos;
- if (line->buf[line->len - 1] != '\n')
- return NULL;
- line->buf[--line->len] = 0;
+ if (!len)
+ return;
+
+ /*
+ * Initialize records based on a crude estimate of the number
+ * of references in the file (we'll grow it below if needed):
+ */
+ ALLOC_GROW(records, len / 80 + 20, alloc);
+
+ while (pos < eof) {
+ eol = memchr(pos, '\n', eof - pos);
+ if (!eol)
+ /* The safety check should prevent this. */
+ BUG("unterminated line found in packed-refs");
+ if (eol - pos < GIT_SHA1_HEXSZ + 2)
+ die_invalid_line(snapshot->refs->path,
+ pos, eof - pos);
+ eol++;
+ if (eol < eof && *eol == '^') {
+ /*
+ * Keep any peeled line together with its
+ * reference:
+ */
+ const char *peeled_start = eol;
+
+ eol = memchr(peeled_start, '\n', eof - peeled_start);
+ if (!eol)
+ /* The safety check should prevent this. */
+ BUG("unterminated peeled line found in packed-refs");
+ eol++;
+ }
+
+ ALLOC_GROW(records, nr + 1, alloc);
+ records[nr].start = pos;
+ records[nr].len = eol - pos;
+ nr++;
+
+ if (sorted &&
+ nr > 1 &&
+ cmp_packed_ref_records(&records[nr - 2],
+ &records[nr - 1]) >= 0)
+ sorted = 0;
+
+ pos = eol;
+ }
+
+ if (sorted)
+ goto cleanup;
+
+ /* We need to sort the memory. First we sort the records array: */
+ QSORT(records, nr, cmp_packed_ref_records);
+
+ /*
+ * Allocate a new chunk of memory, and copy the old memory to
+ * the new in the order indicated by `records` (not bothering
+ * with the header line):
+ */
+ new_buffer = xmalloc(len);
+ for (dst = new_buffer, i = 0; i < nr; i++) {
+ memcpy(dst, records[i].start, records[i].len);
+ dst += records[i].len;
+ }
+
+ /*
+ * Now munmap the old buffer and use the sorted buffer in its
+ * place:
+ */
+ clear_snapshot_buffer(snapshot);
+ snapshot->buf = new_buffer;
+ snapshot->eof = new_buffer + len;
+ snapshot->header_len = 0;
+
+cleanup:
+ free(records);
+}
+
+/*
+ * Return a pointer to the start of the record that contains the
+ * character `*p` (which must be within the buffer). If no other
+ * record start is found, return `buf`.
+ */
+static const char *find_start_of_record(const char *buf, const char *p)
+{
+ while (p > buf && (p[-1] != '\n' || p[0] == '^'))
+ p--;
+ return p;
+}
+
+/*
+ * Return a pointer to the start of the record following the record
+ * that contains `*p`. If none is found before `end`, return `end`.
+ */
+static const char *find_end_of_record(const char *p, const char *end)
+{
+ while (++p < end && (p[-1] != '\n' || p[0] == '^'))
+ ;
+ return p;
+}
+
+/*
+ * We want to be able to compare mmapped reference records quickly,
+ * without totally parsing them. We can do so because the records are
+ * LF-terminated, and the refname should start exactly (GIT_SHA1_HEXSZ
+ * + 1) bytes past the beginning of the record.
+ *
+ * But what if the `packed-refs` file contains garbage? We're willing
+ * to tolerate not detecting the problem, as long as we don't produce
+ * totally garbled output (we can't afford to check the integrity of
+ * the whole file during every Git invocation). But we do want to be
+ * sure that we never read past the end of the buffer in memory and
+ * perform an illegal memory access.
+ *
+ * Guarantee that minimum level of safety by verifying that the last
+ * record in the file is LF-terminated, and that it has at least
+ * (GIT_SHA1_HEXSZ + 1) characters before the LF. Die if either of
+ * these checks fails.
+ */
+static void verify_buffer_safe(struct snapshot *snapshot)
+{
+ const char *buf = snapshot->buf + snapshot->header_len;
+ const char *eof = snapshot->eof;
+ const char *last_line;
+
+ if (buf == eof)
+ return;
+
+ last_line = find_start_of_record(buf, eof - 1);
+ if (*(eof - 1) != '\n' || eof - last_line < GIT_SHA1_HEXSZ + 2)
+ die_invalid_line(snapshot->refs->path,
+ last_line, eof - last_line);
+}
- return ref;
+/*
+ * Depending on `mmap_strategy`, either mmap or read the contents of
+ * the `packed-refs` file into the snapshot. Return 1 if the file
+ * existed and was read, or 0 if the file was absent. Die on errors.
+ */
+static int load_contents(struct snapshot *snapshot)
+{
+ int fd;
+ struct stat st;
+ size_t size;
+ ssize_t bytes_read;
+
+ fd = open(snapshot->refs->path, O_RDONLY);
+ if (fd < 0) {
+ if (errno == ENOENT) {
+ /*
+ * This is OK; it just means that no
+ * "packed-refs" file has been written yet,
+ * which is equivalent to it being empty,
+ * which is its state when initialized with
+ * zeros.
+ */
+ return 0;
+ } else {
+ die_errno("couldn't read %s", snapshot->refs->path);
+ }
+ }
+
+ stat_validity_update(&snapshot->validity, fd);
+
+ if (fstat(fd, &st) < 0)
+ die_errno("couldn't stat %s", snapshot->refs->path);
+ size = xsize_t(st.st_size);
+
+ switch (mmap_strategy) {
+ case MMAP_NONE:
+ snapshot->buf = xmalloc(size);
+ bytes_read = read_in_full(fd, snapshot->buf, size);
+ if (bytes_read < 0 || bytes_read != size)
+ die_errno("couldn't read %s", snapshot->refs->path);
+ snapshot->eof = snapshot->buf + size;
+ snapshot->mmapped = 0;
+ break;
+ case MMAP_TEMPORARY:
+ case MMAP_OK:
+ snapshot->buf = xmmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0);
+ snapshot->eof = snapshot->buf + size;
+ snapshot->mmapped = 1;
+ break;
+ }
+ close(fd);
+
+ return 1;
}
/*
- * Read from `packed_refs_file` into a newly-allocated
- * `packed_ref_cache` and return it. The return value will already
- * have its reference count incremented.
+ * Find the place in `snapshot->buf` where the start of the record for
+ * `refname` starts. If `mustexist` is true and the reference doesn't
+ * exist, then return NULL. If `mustexist` is false and the reference
+ * doesn't exist, then return the point where that reference would be
+ * inserted. In the latter mode, `refname` doesn't have to be a proper
+ * reference name; for example, one could search for "refs/replace/"
+ * to find the start of any replace references.
+ *
+ * The record is sought using a binary search, so `snapshot->buf` must
+ * be sorted.
+ */
+static const char *find_reference_location(struct snapshot *snapshot,
+ const char *refname, int mustexist)
+{
+ /*
+ * This is not *quite* a garden-variety binary search, because
+ * the data we're searching is made up of records, and we
+ * always need to find the beginning of a record to do a
+ * comparison. A "record" here is one line for the reference
+ * itself and zero or one peel lines that start with '^'. Our
+ * loop invariant is described in the next two comments.
+ */
+
+ /*
+ * A pointer to the character at the start of a record whose
+ * preceding records all have reference names that come
+ * *before* `refname`.
+ */
+ const char *lo = snapshot->buf + snapshot->header_len;
+
+ /*
+ * A pointer to a the first character of a record whose
+ * reference name comes *after* `refname`.
+ */
+ const char *hi = snapshot->eof;
+
+ while (lo < hi) {
+ const char *mid, *rec;
+ int cmp;
+
+ mid = lo + (hi - lo) / 2;
+ rec = find_start_of_record(lo, mid);
+ cmp = cmp_record_to_refname(rec, refname);
+ if (cmp < 0) {
+ lo = find_end_of_record(mid, hi);
+ } else if (cmp > 0) {
+ hi = rec;
+ } else {
+ return rec;
+ }
+ }
+
+ if (mustexist)
+ return NULL;
+ else
+ return lo;
+}
+
+/*
+ * Create a newly-allocated `snapshot` of the `packed-refs` file in
+ * its current state and return it. The return value will already have
+ * its reference count incremented.
*
* A comment line of the form "# pack-refs with: " may contain zero or
* more traits. We interpret the traits as follows:
*
- * No traits:
+ * Neither `peeled` nor `fully-peeled`:
*
* Probably no references are peeled. But if the file contains a
* peeled value for a reference, we will use it.
*
- * peeled:
+ * `peeled`:
*
* References under "refs/tags/", if they *can* be peeled, *are*
* peeled in this file. References outside of "refs/tags/" are
* probably not peeled even if they could have been, but if we find
* a peeled value for such a reference we will use it.
*
- * fully-peeled:
+ * `fully-peeled`:
*
* All references in the file that can be peeled are peeled.
* Inversely (and this is more important), any references in the
* trait should typically be written alongside "peeled" for
* compatibility with older clients, but we do not require it
* (i.e., "peeled" is a no-op if "fully-peeled" is set).
+ *
+ * `sorted`:
+ *
+ * The references in this file are known to be sorted by refname.
*/
-static struct packed_ref_cache *read_packed_refs(const char *packed_refs_file)
+static struct snapshot *create_snapshot(struct packed_ref_store *refs)
{
- FILE *f;
- struct packed_ref_cache *packed_refs = xcalloc(1, sizeof(*packed_refs));
- struct ref_entry *last = NULL;
- struct strbuf line = STRBUF_INIT;
- enum { PEELED_NONE, PEELED_TAGS, PEELED_FULLY } peeled = PEELED_NONE;
- struct ref_dir *dir;
-
- acquire_packed_ref_cache(packed_refs);
- packed_refs->cache = create_ref_cache(NULL, NULL);
- packed_refs->cache->root->flag &= ~REF_INCOMPLETE;
-
- f = fopen(packed_refs_file, "r");
- if (!f) {
- if (errno == ENOENT) {
- /*
- * This is OK; it just means that no
- * "packed-refs" file has been written yet,
- * which is equivalent to it being empty.
- */
- return packed_refs;
- } else {
- die_errno("couldn't read %s", packed_refs_file);
- }
- }
+ struct snapshot *snapshot = xcalloc(1, sizeof(*snapshot));
+ int sorted = 0;
- stat_validity_update(&packed_refs->validity, fileno(f));
+ snapshot->refs = refs;
+ acquire_snapshot(snapshot);
+ snapshot->peeled = PEELED_NONE;
- dir = get_ref_dir(packed_refs->cache->root);
- while (strbuf_getwholeline(&line, f, '\n') != EOF) {
- struct object_id oid;
- const char *refname;
- const char *traits;
+ if (!load_contents(snapshot))
+ return snapshot;
- if (!line.len || line.buf[line.len - 1] != '\n')
- die("unterminated line in %s: %s", packed_refs_file, line.buf);
+ /* If the file has a header line, process it: */
+ if (snapshot->buf < snapshot->eof && *snapshot->buf == '#') {
+ struct strbuf tmp = STRBUF_INIT;
+ char *p;
+ const char *eol;
+ struct string_list traits = STRING_LIST_INIT_NODUP;
- if (skip_prefix(line.buf, "# pack-refs with:", &traits)) {
- if (strstr(traits, " fully-peeled "))
- peeled = PEELED_FULLY;
- else if (strstr(traits, " peeled "))
- peeled = PEELED_TAGS;
- /* perhaps other traits later as well */
- continue;
- }
+ eol = memchr(snapshot->buf, '\n',
+ snapshot->eof - snapshot->buf);
+ if (!eol)
+ die_unterminated_line(refs->path,
+ snapshot->buf,
+ snapshot->eof - snapshot->buf);
- refname = parse_ref_line(&line, &oid);
- if (refname) {
- int flag = REF_ISPACKED;
+ strbuf_add(&tmp, snapshot->buf, eol - snapshot->buf);
- if (check_refname_format(refname, REFNAME_ALLOW_ONELEVEL)) {
- if (!refname_is_safe(refname))
- die("packed refname is dangerous: %s", refname);
- oidclr(&oid);
- flag |= REF_BAD_NAME | REF_ISBROKEN;
- }
- last = create_ref_entry(refname, &oid, flag);
- if (peeled == PEELED_FULLY ||
- (peeled == PEELED_TAGS && starts_with(refname, "refs/tags/")))
- last->flag |= REF_KNOWS_PEELED;
- add_ref_entry(dir, last);
- } else if (last &&
- line.buf[0] == '^' &&
- line.len == PEELED_LINE_LENGTH &&
- line.buf[PEELED_LINE_LENGTH - 1] == '\n' &&
- !get_oid_hex(line.buf + 1, &oid)) {
- oidcpy(&last->u.value.peeled, &oid);
- /*
- * Regardless of what the file header said,
- * we definitely know the value of *this*
- * reference:
- */
- last->flag |= REF_KNOWS_PEELED;
- } else {
- strbuf_setlen(&line, line.len - 1);
- die("unexpected line in %s: %s", packed_refs_file, line.buf);
- }
+ if (!skip_prefix(tmp.buf, "# pack-refs with:", (const char **)&p))
+ die_invalid_line(refs->path,
+ snapshot->buf,
+ snapshot->eof - snapshot->buf);
+
+ string_list_split_in_place(&traits, p, ' ', -1);
+
+ if (unsorted_string_list_has_string(&traits, "fully-peeled"))
+ snapshot->peeled = PEELED_FULLY;
+ else if (unsorted_string_list_has_string(&traits, "peeled"))
+ snapshot->peeled = PEELED_TAGS;
+
+ sorted = unsorted_string_list_has_string(&traits, "sorted");
+
+ /* perhaps other traits later as well */
+
+ /* The "+ 1" is for the LF character. */
+ snapshot->header_len = eol + 1 - snapshot->buf;
+
+ string_list_clear(&traits, 0);
+ strbuf_release(&tmp);
}
- fclose(f);
- strbuf_release(&line);
+ verify_buffer_safe(snapshot);
- return packed_refs;
+ if (!sorted) {
+ sort_snapshot(snapshot);
+
+ /*
+ * Reordering the records might have moved a short one
+ * to the end of the buffer, so verify the buffer's
+ * safety again:
+ */
+ verify_buffer_safe(snapshot);
+ }
+
+ if (mmap_strategy != MMAP_OK && snapshot->mmapped) {
+ /*
+ * We don't want to leave the file mmapped, so we are
+ * forced to make a copy now:
+ */
+ size_t size = snapshot->eof -
+ (snapshot->buf + snapshot->header_len);
+ char *buf_copy = xmalloc(size);
+
+ memcpy(buf_copy, snapshot->buf + snapshot->header_len, size);
+ clear_snapshot_buffer(snapshot);
+ snapshot->buf = buf_copy;
+ snapshot->eof = buf_copy + size;
+ }
+
+ return snapshot;
}
/*
- * Check that the packed refs cache (if any) still reflects the
- * contents of the file. If not, clear the cache.
+ * Check that `refs->snapshot` (if present) still reflects the
+ * contents of the `packed-refs` file. If not, clear the snapshot.
*/
-static void validate_packed_ref_cache(struct packed_ref_store *refs)
+static void validate_snapshot(struct packed_ref_store *refs)
{
- if (refs->cache &&
- !stat_validity_check(&refs->cache->validity, refs->path))
- clear_packed_ref_cache(refs);
+ if (refs->snapshot &&
+ !stat_validity_check(&refs->snapshot->validity, refs->path))
+ clear_snapshot(refs);
}
/*
- * Get the packed_ref_cache for the specified packed_ref_store,
- * creating and populating it if it hasn't been read before or if the
- * file has been changed (according to its `validity` field) since it
- * was last read. On the other hand, if we hold the lock, then assume
- * that the file hasn't been changed out from under us, so skip the
- * extra `stat()` call in `stat_validity_check()`.
+ * Get the `snapshot` for the specified packed_ref_store, creating and
+ * populating it if it hasn't been read before or if the file has been
+ * changed (according to its `validity` field) since it was last read.
+ * On the other hand, if we hold the lock, then assume that the file
+ * hasn't been changed out from under us, so skip the extra `stat()`
+ * call in `stat_validity_check()`. This function does *not* increase
+ * the snapshot's reference count on behalf of the caller.
*/
-static struct packed_ref_cache *get_packed_ref_cache(struct packed_ref_store *refs)
+static struct snapshot *get_snapshot(struct packed_ref_store *refs)
{
if (!is_lock_file_locked(&refs->lock))
- validate_packed_ref_cache(refs);
+ validate_snapshot(refs);
- if (!refs->cache)
- refs->cache = read_packed_refs(refs->path);
+ if (!refs->snapshot)
+ refs->snapshot = create_snapshot(refs);
- return refs->cache;
-}
-
-static struct ref_dir *get_packed_ref_dir(struct packed_ref_cache *packed_ref_cache)
-{
- return get_ref_dir(packed_ref_cache->cache->root);
-}
-
-static struct ref_dir *get_packed_refs(struct packed_ref_store *refs)
-{
- return get_packed_ref_dir(get_packed_ref_cache(refs));
-}
-
-/*
- * Return the ref_entry for the given refname from the packed
- * references. If it does not exist, return NULL.
- */
-static struct ref_entry *get_packed_ref(struct packed_ref_store *refs,
- const char *refname)
-{
- return find_ref_entry(get_packed_refs(refs), refname);
+ return refs->snapshot;
}
static int packed_read_raw_ref(struct ref_store *ref_store,
{
struct packed_ref_store *refs =
packed_downcast(ref_store, REF_STORE_READ, "read_raw_ref");
-
- struct ref_entry *entry;
+ struct snapshot *snapshot = get_snapshot(refs);
+ const char *rec;
*type = 0;
- entry = get_packed_ref(refs, refname);
- if (!entry) {
+ rec = find_reference_location(snapshot, refname, 1);
+
+ if (!rec) {
+ /* refname is not a packed reference. */
errno = ENOENT;
return -1;
}
- hashcpy(sha1, entry->u.value.oid.hash);
+ if (get_sha1_hex(rec, sha1))
+ die_invalid_line(refs->path, rec, snapshot->eof - rec);
+
*type = REF_ISPACKED;
return 0;
}
-static int packed_peel_ref(struct ref_store *ref_store,
- const char *refname, unsigned char *sha1)
-{
- struct packed_ref_store *refs =
- packed_downcast(ref_store, REF_STORE_READ | REF_STORE_ODB,
- "peel_ref");
- struct ref_entry *r = get_packed_ref(refs, refname);
-
- if (!r || peel_entry(r, 0))
- return -1;
-
- hashcpy(sha1, r->u.value.peeled.hash);
- return 0;
-}
+/*
+ * This value is set in `base.flags` if the peeled value of the
+ * current reference is known. In that case, `peeled` contains the
+ * correct peeled value for the reference, which might be `null_sha1`
+ * if the reference is not a tag or if it is broken.
+ */
+#define REF_KNOWS_PEELED 0x40
+/*
+ * An iterator over a snapshot of a `packed-refs` file.
+ */
struct packed_ref_iterator {
struct ref_iterator base;
- struct packed_ref_cache *cache;
- struct ref_iterator *iter0;
+ struct snapshot *snapshot;
+
+ /* The current position in the snapshot's buffer: */
+ const char *pos;
+
+ /* The end of the part of the buffer that will be iterated over: */
+ const char *eof;
+
+ /* Scratch space for current values: */
+ struct object_id oid, peeled;
+ struct strbuf refname_buf;
+
unsigned int flags;
};
+/*
+ * Move the iterator to the next record in the snapshot, without
+ * respect for whether the record is actually required by the current
+ * iteration. Adjust the fields in `iter` and return `ITER_OK` or
+ * `ITER_DONE`. This function does not free the iterator in the case
+ * of `ITER_DONE`.
+ */
+static int next_record(struct packed_ref_iterator *iter)
+{
+ const char *p = iter->pos, *eol;
+
+ strbuf_reset(&iter->refname_buf);
+
+ if (iter->pos == iter->eof)
+ return ITER_DONE;
+
+ iter->base.flags = REF_ISPACKED;
+
+ if (iter->eof - p < GIT_SHA1_HEXSZ + 2 ||
+ parse_oid_hex(p, &iter->oid, &p) ||
+ !isspace(*p++))
+ die_invalid_line(iter->snapshot->refs->path,
+ iter->pos, iter->eof - iter->pos);
+
+ eol = memchr(p, '\n', iter->eof - p);
+ if (!eol)
+ die_unterminated_line(iter->snapshot->refs->path,
+ iter->pos, iter->eof - iter->pos);
+
+ strbuf_add(&iter->refname_buf, p, eol - p);
+ iter->base.refname = iter->refname_buf.buf;
+
+ if (check_refname_format(iter->base.refname, REFNAME_ALLOW_ONELEVEL)) {
+ if (!refname_is_safe(iter->base.refname))
+ die("packed refname is dangerous: %s",
+ iter->base.refname);
+ oidclr(&iter->oid);
+ iter->base.flags |= REF_BAD_NAME | REF_ISBROKEN;
+ }
+ if (iter->snapshot->peeled == PEELED_FULLY ||
+ (iter->snapshot->peeled == PEELED_TAGS &&
+ starts_with(iter->base.refname, "refs/tags/")))
+ iter->base.flags |= REF_KNOWS_PEELED;
+
+ iter->pos = eol + 1;
+
+ if (iter->pos < iter->eof && *iter->pos == '^') {
+ p = iter->pos + 1;
+ if (iter->eof - p < GIT_SHA1_HEXSZ + 1 ||
+ parse_oid_hex(p, &iter->peeled, &p) ||
+ *p++ != '\n')
+ die_invalid_line(iter->snapshot->refs->path,
+ iter->pos, iter->eof - iter->pos);
+ iter->pos = p;
+
+ /*
+ * Regardless of what the file header said, we
+ * definitely know the value of *this* reference. But
+ * we suppress it if the reference is broken:
+ */
+ if ((iter->base.flags & REF_ISBROKEN)) {
+ oidclr(&iter->peeled);
+ iter->base.flags &= ~REF_KNOWS_PEELED;
+ } else {
+ iter->base.flags |= REF_KNOWS_PEELED;
+ }
+ } else {
+ oidclr(&iter->peeled);
+ }
+
+ return ITER_OK;
+}
+
static int packed_ref_iterator_advance(struct ref_iterator *ref_iterator)
{
struct packed_ref_iterator *iter =
(struct packed_ref_iterator *)ref_iterator;
int ok;
- while ((ok = ref_iterator_advance(iter->iter0)) == ITER_OK) {
+ while ((ok = next_record(iter)) == ITER_OK) {
if (iter->flags & DO_FOR_EACH_PER_WORKTREE_ONLY &&
- ref_type(iter->iter0->refname) != REF_TYPE_PER_WORKTREE)
+ ref_type(iter->base.refname) != REF_TYPE_PER_WORKTREE)
continue;
if (!(iter->flags & DO_FOR_EACH_INCLUDE_BROKEN) &&
- !ref_resolves_to_object(iter->iter0->refname,
- iter->iter0->oid,
- iter->iter0->flags))
+ !ref_resolves_to_object(iter->base.refname, &iter->oid,
+ iter->flags))
continue;
- iter->base.refname = iter->iter0->refname;
- iter->base.oid = iter->iter0->oid;
- iter->base.flags = iter->iter0->flags;
return ITER_OK;
}
- iter->iter0 = NULL;
if (ref_iterator_abort(ref_iterator) != ITER_DONE)
ok = ITER_ERROR;
struct packed_ref_iterator *iter =
(struct packed_ref_iterator *)ref_iterator;
- return ref_iterator_peel(iter->iter0, peeled);
+ if ((iter->base.flags & REF_KNOWS_PEELED)) {
+ oidcpy(peeled, &iter->peeled);
+ return is_null_oid(&iter->peeled) ? -1 : 0;
+ } else if ((iter->base.flags & (REF_ISBROKEN | REF_ISSYMREF))) {
+ return -1;
+ } else {
+ return !!peel_object(iter->oid.hash, peeled->hash);
+ }
}
static int packed_ref_iterator_abort(struct ref_iterator *ref_iterator)
(struct packed_ref_iterator *)ref_iterator;
int ok = ITER_DONE;
- if (iter->iter0)
- ok = ref_iterator_abort(iter->iter0);
-
- release_packed_ref_cache(iter->cache);
+ strbuf_release(&iter->refname_buf);
+ release_snapshot(iter->snapshot);
base_ref_iterator_free(ref_iterator);
return ok;
}
const char *prefix, unsigned int flags)
{
struct packed_ref_store *refs;
+ struct snapshot *snapshot;
+ const char *start;
struct packed_ref_iterator *iter;
struct ref_iterator *ref_iterator;
unsigned int required_flags = REF_STORE_READ;
required_flags |= REF_STORE_ODB;
refs = packed_downcast(ref_store, required_flags, "ref_iterator_begin");
+ /*
+ * Note that `get_snapshot()` internally checks whether the
+ * snapshot is up to date with what is on disk, and re-reads
+ * it if not.
+ */
+ snapshot = get_snapshot(refs);
+
+ if (!snapshot->buf)
+ return empty_ref_iterator_begin();
+
iter = xcalloc(1, sizeof(*iter));
ref_iterator = &iter->base;
- base_ref_iterator_init(ref_iterator, &packed_ref_iterator_vtable);
+ base_ref_iterator_init(ref_iterator, &packed_ref_iterator_vtable, 1);
- /*
- * Note that get_packed_ref_cache() internally checks whether
- * the packed-ref cache is up to date with what is on disk,
- * and re-reads it if not.
- */
+ iter->snapshot = snapshot;
+ acquire_snapshot(snapshot);
- iter->cache = get_packed_ref_cache(refs);
- acquire_packed_ref_cache(iter->cache);
- iter->iter0 = cache_ref_iterator_begin(iter->cache->cache, prefix, 0);
+ if (prefix && *prefix)
+ start = find_reference_location(snapshot, prefix, 0);
+ else
+ start = snapshot->buf + snapshot->header_len;
+
+ iter->pos = start;
+ iter->eof = snapshot->eof;
+ strbuf_init(&iter->refname_buf, 0);
+
+ iter->base.oid = &iter->oid;
iter->flags = flags;
+ if (prefix && *prefix)
+ /* Stop iteration after we've gone *past* prefix: */
+ ref_iterator = prefix_ref_iterator_begin(ref_iterator, prefix, 0);
+
return ref_iterator;
}
/*
* Now that we hold the `packed-refs` lock, make sure that our
- * cache matches the current version of the file. Normally
- * `get_packed_ref_cache()` does that for us, but that
- * function assumes that when the file is locked, any existing
- * cache is still valid. We've just locked the file, but it
- * might have changed the moment *before* we locked it.
+ * snapshot matches the current version of the file. Normally
+ * `get_snapshot()` does that for us, but that function
+ * assumes that when the file is locked, any existing snapshot
+ * is still valid. We've just locked the file, but it might
+ * have changed the moment *before* we locked it.
*/
- validate_packed_ref_cache(refs);
+ validate_snapshot(refs);
/*
* Now make sure that the packed-refs file as it exists in the
- * locked state is loaded into the cache:
+ * locked state is loaded into the snapshot:
*/
- get_packed_ref_cache(refs);
+ get_snapshot(refs);
return 0;
}
}
/*
- * The packed-refs header line that we write out. Perhaps other
- * traits will be added later. The trailing space is required.
+ * The packed-refs header line that we write out. Perhaps other traits
+ * will be added later.
+ *
+ * Note that earlier versions of Git used to parse these traits by
+ * looking for " trait " in the line. For this reason, the space after
+ * the colon and the trailing space are required.
*/
static const char PACKED_REFS_HEADER[] =
- "# pack-refs with: peeled fully-peeled \n";
+ "# pack-refs with: peeled fully-peeled sorted \n";
static int packed_init_db(struct ref_store *ref_store, struct strbuf *err)
{
}
/*
- * Write the packed-refs from the cache to the packed-refs tempfile,
- * incorporating any changes from `updates`. `updates` must be a
- * sorted string list whose keys are the refnames and whose util
+ * Write the packed refs from the current snapshot to the packed-refs
+ * tempfile, incorporating any changes from `updates`. `updates` must
+ * be a sorted string list whose keys are the refnames and whose util
* values are `struct ref_update *`. On error, rollback the tempfile,
* write an error message to `err`, and return a nonzero value.
*
/*
* Note that we *don't* skip transactions with zero updates,
* because such a transaction might be executed for the side
- * effect of ensuring that all of the references are peeled.
- * If the caller wants to optimize away empty transactions, it
- * should do so itself.
+ * effect of ensuring that all of the references are peeled or
+ * ensuring that the `packed-refs` file is sorted. If the
+ * caller wants to optimize away empty transactions, it should
+ * do so itself.
*/
data = xcalloc(1, sizeof(*data));
int ret = TRANSACTION_GENERIC_ERROR;
char *packed_refs_path;
+ clear_snapshot(refs);
+
packed_refs_path = get_locked_file_path(&refs->lock);
if (rename_tempfile(&refs->tempfile, packed_refs_path)) {
strbuf_addf(err, "error replacing %s: %s",
goto cleanup;
}
- clear_packed_ref_cache(refs);
ret = 0;
cleanup:
die("BUG: packed reference store does not support renaming references");
}
+static int packed_copy_ref(struct ref_store *ref_store,
+ const char *oldrefname, const char *newrefname,
+ const char *logmsg)
+{
+ die("BUG: packed reference store does not support copying references");
+}
+
static struct ref_iterator *packed_reflog_iterator_begin(struct ref_store *ref_store)
{
return empty_ref_iterator_begin();
packed_initial_transaction_commit,
packed_pack_refs,
- packed_peel_ref,
packed_create_symref,
packed_delete_refs,
packed_rename_ref,
+ packed_copy_ref,
packed_ref_iterator_begin,
packed_read_raw_ref,
FLEX_ALLOC_STR(ref, name, refname);
oidcpy(&ref->u.value.oid, oid);
- oidclr(&ref->u.value.peeled);
ref->flag = flag;
return ref;
}
}
}
-enum peel_status peel_entry(struct ref_entry *entry, int repeel)
-{
- enum peel_status status;
-
- if (entry->flag & REF_KNOWS_PEELED) {
- if (repeel) {
- entry->flag &= ~REF_KNOWS_PEELED;
- oidclr(&entry->u.value.peeled);
- } else {
- return is_null_oid(&entry->u.value.peeled) ?
- PEEL_NON_TAG : PEEL_PEELED;
- }
- }
- if (entry->flag & REF_ISBROKEN)
- return PEEL_BROKEN;
- if (entry->flag & REF_ISSYMREF)
- return PEEL_IS_SYMREF;
-
- status = peel_object(entry->u.value.oid.hash, entry->u.value.peeled.hash);
- if (status == PEEL_PEELED || status == PEEL_NON_TAG)
- entry->flag |= REF_KNOWS_PEELED;
- return status;
-}
-
static int cache_ref_iterator_peel(struct ref_iterator *ref_iterator,
struct object_id *peeled)
{
- struct cache_ref_iterator *iter =
- (struct cache_ref_iterator *)ref_iterator;
- struct cache_ref_iterator_level *level;
- struct ref_entry *entry;
-
- level = &iter->levels[iter->levels_nr - 1];
-
- if (level->index == -1)
- die("BUG: peel called before advance for cache iterator");
-
- entry = level->dir->entries[level->index];
-
- if (peel_entry(entry, 0))
- return -1;
- oidcpy(peeled, &entry->u.value.peeled);
- return 0;
+ return peel_object(ref_iterator->oid->hash, peeled->hash);
}
static int cache_ref_iterator_abort(struct ref_iterator *ref_iterator)
iter = xcalloc(1, sizeof(*iter));
ref_iterator = &iter->base;
- base_ref_iterator_init(ref_iterator, &cache_ref_iterator_vtable);
+ base_ref_iterator_init(ref_iterator, &cache_ref_iterator_vtable, 1);
ALLOC_GROW(iter->levels, 10, iter->levels_alloc);
iter->levels_nr = 1;
* referred to by the last reference in the symlink chain.
*/
struct object_id oid;
-
- /*
- * If REF_KNOWS_PEELED, then this field holds the peeled value
- * of this reference, or null if the reference is known not to
- * be peelable. See the documentation for peel_ref() for an
- * exact definition of "peelable".
- */
- struct object_id peeled;
};
/*
* public values; see refs.h.
*/
-/*
- * The field ref_entry->u.value.peeled of this value entry contains
- * the correct peeled value for the reference, which might be
- * null_sha1 if the reference is not a tag or if it is broken.
- */
-#define REF_KNOWS_PEELED 0x10
-
/* ref_entry represents a directory of references */
-#define REF_DIR 0x20
+#define REF_DIR 0x10
/*
* Entry has not yet been read from disk (used only for REF_DIR
* entries representing loose references)
*/
-#define REF_INCOMPLETE 0x40
+#define REF_INCOMPLETE 0x20
/*
* A ref_entry represents either a reference or a "subdirectory" of
* Start iterating over references in `cache`. If `prefix` is
* specified, only include references whose names start with that
* prefix. If `prime_dir` is true, then fill any incomplete
- * directories before beginning the iteration.
+ * directories before beginning the iteration. The output is ordered
+ * by refname.
*/
struct ref_iterator *cache_ref_iterator_begin(struct ref_cache *cache,
const char *prefix,
int prime_dir);
-/*
- * Peel the entry (if possible) and return its new peel_status. If
- * repeel is true, re-peel the entry even if there is an old peeled
- * value that is already stored in it.
- *
- * It is OK to call this function with a packed reference entry that
- * might be stale and might even refer to an object that has since
- * been garbage-collected. In such a case, if the entry has
- * REF_KNOWS_PEELED then leave the status unchanged and return
- * PEEL_PEELED or PEEL_NON_TAG; otherwise, return PEEL_INVALID.
- */
-enum peel_status peel_entry(struct ref_entry *entry, int repeel);
-
#endif /* REFS_REF_CACHE_H */
*/
struct ref_iterator {
struct ref_iterator_vtable *vtable;
+
+ /*
+ * Does this `ref_iterator` iterate over references in order
+ * by refname?
+ */
+ unsigned int ordered : 1;
+
const char *refname;
const struct object_id *oid;
unsigned int flags;
* which the refname begins with prefix. If trim is non-zero, then
* trim that many characters off the beginning of each refname. flags
* can be DO_FOR_EACH_INCLUDE_BROKEN to include broken references in
- * the iteration.
+ * the iteration. The output is ordered by refname.
*/
struct ref_iterator *refs_ref_iterator_begin(
struct ref_store *refs,
* Iterate over the entries from iter0 and iter1, with the values
* interleaved as directed by the select function. The iterator takes
* ownership of iter0 and iter1 and frees them when the iteration is
- * over.
+ * over. A derived class should set `ordered` to 1 or 0 based on
+ * whether it generates its output in order by reference name.
*/
struct ref_iterator *merge_ref_iterator_begin(
+ int ordered,
struct ref_iterator *iter0, struct ref_iterator *iter1,
ref_iterator_select_fn *select, void *cb_data);
* As an convenience to callers, if prefix is the empty string and
* trim is zero, this function returns iter0 directly, without
* wrapping it.
+ *
+ * The resulting ref_iterator is ordered if iter0 is.
*/
struct ref_iterator *prefix_ref_iterator_begin(struct ref_iterator *iter0,
const char *prefix,
/*
* Base class constructor for ref_iterators. Initialize the
* ref_iterator part of iter, setting its vtable pointer as specified.
+ * `ordered` should be set to 1 if the iterator will iterate over
+ * references in order by refname; otherwise it should be set to 0.
* This is meant to be called only by the initializers of derived
* classes.
*/
void base_ref_iterator_init(struct ref_iterator *iter,
- struct ref_iterator_vtable *vtable);
+ struct ref_iterator_vtable *vtable,
+ int ordered);
/*
* Base class destructor for ref_iterators. Destroy the ref_iterator
struct strbuf *err);
typedef int pack_refs_fn(struct ref_store *ref_store, unsigned int flags);
-typedef int peel_ref_fn(struct ref_store *ref_store,
- const char *refname, unsigned char *sha1);
typedef int create_symref_fn(struct ref_store *ref_store,
const char *ref_target,
const char *refs_heads_master,
typedef int rename_ref_fn(struct ref_store *ref_store,
const char *oldref, const char *newref,
const char *logmsg);
+typedef int copy_ref_fn(struct ref_store *ref_store,
+ const char *oldref, const char *newref,
+ const char *logmsg);
/*
* Iterate over the references in `ref_store` whose names start with
* `prefix`. `prefix` is matched as a literal string, without regard
* for path separators. If prefix is NULL or the empty string, iterate
- * over all references in `ref_store`.
+ * over all references in `ref_store`. The output is ordered by
+ * refname.
*/
typedef struct ref_iterator *ref_iterator_begin_fn(
struct ref_store *ref_store,
ref_transaction_commit_fn *initial_transaction_commit;
pack_refs_fn *pack_refs;
- peel_ref_fn *peel_ref;
create_symref_fn *create_symref;
delete_refs_fn *delete_refs;
rename_ref_fn *rename_ref;
+ copy_ref_fn *copy_ref;
ref_iterator_begin_fn *iterator_begin;
read_raw_ref_fn *read_raw_ref;
rerere_id_hex(id),
rr->items[i].string, 0);
- if (write_in_full(out_fd, buf.buf, buf.len) != buf.len)
+ if (write_in_full(out_fd, buf.buf, buf.len) < 0)
die("unable to write rerere record");
strbuf_release(&buf);
#include "bisect.h"
#include "packfile.h"
#include "worktree.h"
+#include "argv-array.h"
volatile show_early_output_fn_t show_early_output;
unsigned flags)
{
struct rev_cmdline_info *info = &revs->cmdline;
- int nr = info->nr;
+ unsigned int nr = info->nr;
ALLOC_GROW(info->rev, nr + 1, info->alloc);
info->rev[nr].item = item;
return 0;
}
-struct cmdline_pathspec {
- int alloc;
- int nr;
- const char **path;
-};
-
-static void append_prune_data(struct cmdline_pathspec *prune, const char **av)
-{
- while (*av) {
- ALLOC_GROW(prune->path, prune->nr + 1, prune->alloc);
- prune->path[prune->nr++] = *(av++);
- }
-}
-
static void read_pathspec_from_stdin(struct rev_info *revs, struct strbuf *sb,
- struct cmdline_pathspec *prune)
+ struct argv_array *prune)
{
- while (strbuf_getline(sb, stdin) != EOF) {
- ALLOC_GROW(prune->path, prune->nr + 1, prune->alloc);
- prune->path[prune->nr++] = xstrdup(sb->buf);
- }
+ while (strbuf_getline(sb, stdin) != EOF)
+ argv_array_push(prune, sb->buf);
}
static void read_revisions_from_stdin(struct rev_info *revs,
- struct cmdline_pathspec *prune)
+ struct argv_array *prune)
{
struct strbuf sb;
int seen_dashdash = 0;
int setup_revisions(int argc, const char **argv, struct rev_info *revs, struct setup_revision_opt *opt)
{
int i, flags, left, seen_dashdash, read_from_stdin, got_rev_arg = 0, revarg_opt;
- struct cmdline_pathspec prune_data;
+ struct argv_array prune_data = ARGV_ARRAY_INIT;
const char *submodule = NULL;
- memset(&prune_data, 0, sizeof(prune_data));
if (opt)
submodule = opt->submodule;
argv[i] = NULL;
argc = i;
if (argv[i + 1])
- append_prune_data(&prune_data, argv + i + 1);
+ argv_array_pushv(&prune_data, argv + i + 1);
seen_dashdash = 1;
break;
}
for (j = i; j < argc; j++)
verify_filename(revs->prefix, argv[j], j == i);
- append_prune_data(&prune_data, argv + i);
+ argv_array_pushv(&prune_data, argv + i);
break;
}
else
got_rev_arg = 1;
}
- if (prune_data.nr) {
+ if (prune_data.argc) {
/*
* If we need to introduce the magic "a lone ':' means no
* pathspec whatsoever", here is the place to do so.
* call init_pathspec() to set revs->prune_data here.
* }
*/
- ALLOC_GROW(prune_data.path, prune_data.nr + 1, prune_data.alloc);
- prune_data.path[prune_data.nr++] = NULL;
parse_pathspec(&revs->prune_data, 0, 0,
- revs->prefix, prune_data.path);
+ revs->prefix, prune_data.argv);
}
+ argv_array_clear(&prune_data);
if (revs->def == NULL)
revs->def = opt ? opt->def : NULL;
date_mode_explicit:1,
preserve_subject:1;
unsigned int disable_stdin:1;
+ /*
+ * Set `leak_pending` to prevent `prepare_revision_walk()` from clearing
+ * the array of pending objects (`pending`). It will still forget about
+ * the array and its entries, so they really are leaked. This can be
+ * useful if the `struct object_array` `pending` is copied before
+ * calling `prepare_revision_walk()`. By setting `leak_pending`, you
+ * effectively claim ownership of the old array, so you should most
+ * likely call `object_array_clear(&pending_copy)` once you are done.
+ * Observe that this is about ownership of the array and its entries,
+ * not the commits referenced by those entries.
+ */
unsigned int leak_pending:1;
/* --show-linear-break */
unsigned int track_linear:1,
strbuf_release(&cap_buf);
return atomic_push_failure(args, remote_refs, ref);
}
- /* Fallthrough for non atomic case. */
+ /* else fallthrough */
default:
continue;
}
#include "trailer.h"
#include "log-tree.h"
#include "wt-status.h"
+#include "hashmap.h"
#define GIT_REFLOG_ACTION "GIT_REFLOG_ACTION"
strbuf_release(&sob);
}
+
+int sequencer_make_script(int keep_empty, FILE *out,
+ int argc, const char **argv)
+{
+ char *format = NULL;
+ struct pretty_print_context pp = {0};
+ struct strbuf buf = STRBUF_INIT;
+ struct rev_info revs;
+ struct commit *commit;
+
+ init_revisions(&revs, NULL);
+ revs.verbose_header = 1;
+ revs.max_parents = 1;
+ revs.cherry_pick = 1;
+ revs.limited = 1;
+ revs.reverse = 1;
+ revs.right_only = 1;
+ revs.sort_order = REV_SORT_IN_GRAPH_ORDER;
+ revs.topo_order = 1;
+
+ revs.pretty_given = 1;
+ git_config_get_string("rebase.instructionFormat", &format);
+ if (!format || !*format) {
+ free(format);
+ format = xstrdup("%s");
+ }
+ get_commit_format(format, &revs);
+ free(format);
+ pp.fmt = revs.commit_format;
+ pp.output_encoding = get_log_output_encoding();
+
+ if (setup_revisions(argc, argv, &revs, NULL) > 1)
+ return error(_("make_script: unhandled options"));
+
+ if (prepare_revision_walk(&revs) < 0)
+ return error(_("make_script: error preparing revisions"));
+
+ while ((commit = get_revision(&revs))) {
+ strbuf_reset(&buf);
+ if (!keep_empty && is_original_commit_empty(commit))
+ strbuf_addf(&buf, "%c ", comment_line_char);
+ strbuf_addf(&buf, "pick %s ", oid_to_hex(&commit->object.oid));
+ pretty_print_commit(&pp, commit, &buf);
+ strbuf_addch(&buf, '\n');
+ fputs(buf.buf, out);
+ }
+ strbuf_release(&buf);
+ return 0;
+}
+
+
+int transform_todo_ids(int shorten_ids)
+{
+ const char *todo_file = rebase_path_todo();
+ struct todo_list todo_list = TODO_LIST_INIT;
+ int fd, res, i;
+ FILE *out;
+
+ strbuf_reset(&todo_list.buf);
+ fd = open(todo_file, O_RDONLY);
+ if (fd < 0)
+ return error_errno(_("could not open '%s'"), todo_file);
+ if (strbuf_read(&todo_list.buf, fd, 0) < 0) {
+ close(fd);
+ return error(_("could not read '%s'."), todo_file);
+ }
+ close(fd);
+
+ res = parse_insn_buffer(todo_list.buf.buf, &todo_list);
+ if (res) {
+ todo_list_release(&todo_list);
+ return error(_("unusable todo list: '%s'"), todo_file);
+ }
+
+ out = fopen(todo_file, "w");
+ if (!out) {
+ todo_list_release(&todo_list);
+ return error(_("unable to open '%s' for writing"), todo_file);
+ }
+ for (i = 0; i < todo_list.nr; i++) {
+ struct todo_item *item = todo_list.items + i;
+ int bol = item->offset_in_buf;
+ const char *p = todo_list.buf.buf + bol;
+ int eol = i + 1 < todo_list.nr ?
+ todo_list.items[i + 1].offset_in_buf :
+ todo_list.buf.len;
+
+ if (item->command >= TODO_EXEC && item->command != TODO_DROP)
+ fwrite(p, eol - bol, 1, out);
+ else {
+ const char *id = shorten_ids ?
+ short_commit_name(item->commit) :
+ oid_to_hex(&item->commit->object.oid);
+ int len;
+
+ p += strspn(p, " \t"); /* left-trim command */
+ len = strcspn(p, " \t"); /* length of command */
+
+ fprintf(out, "%.*s %s %.*s\n",
+ len, p, id, item->arg_len, item->arg);
+ }
+ }
+ fclose(out);
+ todo_list_release(&todo_list);
+ return 0;
+}
+
+enum check_level {
+ CHECK_IGNORE = 0, CHECK_WARN, CHECK_ERROR
+};
+
+static enum check_level get_missing_commit_check_level(void)
+{
+ const char *value;
+
+ if (git_config_get_value("rebase.missingcommitscheck", &value) ||
+ !strcasecmp("ignore", value))
+ return CHECK_IGNORE;
+ if (!strcasecmp("warn", value))
+ return CHECK_WARN;
+ if (!strcasecmp("error", value))
+ return CHECK_ERROR;
+ warning(_("unrecognized setting %s for option"
+ "rebase.missingCommitsCheck. Ignoring."), value);
+ return CHECK_IGNORE;
+}
+
+/*
+ * Check if the user dropped some commits by mistake
+ * Behaviour determined by rebase.missingCommitsCheck.
+ * Check if there is an unrecognized command or a
+ * bad SHA-1 in a command.
+ */
+int check_todo_list(void)
+{
+ enum check_level check_level = get_missing_commit_check_level();
+ struct strbuf todo_file = STRBUF_INIT;
+ struct todo_list todo_list = TODO_LIST_INIT;
+ struct strbuf missing = STRBUF_INIT;
+ int advise_to_edit_todo = 0, res = 0, fd, i;
+
+ strbuf_addstr(&todo_file, rebase_path_todo());
+ fd = open(todo_file.buf, O_RDONLY);
+ if (fd < 0) {
+ res = error_errno(_("could not open '%s'"), todo_file.buf);
+ goto leave_check;
+ }
+ if (strbuf_read(&todo_list.buf, fd, 0) < 0) {
+ close(fd);
+ res = error(_("could not read '%s'."), todo_file.buf);
+ goto leave_check;
+ }
+ close(fd);
+ advise_to_edit_todo = res =
+ parse_insn_buffer(todo_list.buf.buf, &todo_list);
+
+ if (res || check_level == CHECK_IGNORE)
+ goto leave_check;
+
+ /* Mark the commits in git-rebase-todo as seen */
+ for (i = 0; i < todo_list.nr; i++) {
+ struct commit *commit = todo_list.items[i].commit;
+ if (commit)
+ commit->util = (void *)1;
+ }
+
+ todo_list_release(&todo_list);
+ strbuf_addstr(&todo_file, ".backup");
+ fd = open(todo_file.buf, O_RDONLY);
+ if (fd < 0) {
+ res = error_errno(_("could not open '%s'"), todo_file.buf);
+ goto leave_check;
+ }
+ if (strbuf_read(&todo_list.buf, fd, 0) < 0) {
+ close(fd);
+ res = error(_("could not read '%s'."), todo_file.buf);
+ goto leave_check;
+ }
+ close(fd);
+ strbuf_release(&todo_file);
+ res = !!parse_insn_buffer(todo_list.buf.buf, &todo_list);
+
+ /* Find commits in git-rebase-todo.backup yet unseen */
+ for (i = todo_list.nr - 1; i >= 0; i--) {
+ struct todo_item *item = todo_list.items + i;
+ struct commit *commit = item->commit;
+ if (commit && !commit->util) {
+ strbuf_addf(&missing, " - %s %.*s\n",
+ short_commit_name(commit),
+ item->arg_len, item->arg);
+ commit->util = (void *)1;
+ }
+ }
+
+ /* Warn about missing commits */
+ if (!missing.len)
+ goto leave_check;
+
+ if (check_level == CHECK_ERROR)
+ advise_to_edit_todo = res = 1;
+
+ fprintf(stderr,
+ _("Warning: some commits may have been dropped accidentally.\n"
+ "Dropped commits (newer to older):\n"));
+
+ /* Make the list user-friendly and display */
+ fputs(missing.buf, stderr);
+ strbuf_release(&missing);
+
+ fprintf(stderr, _("To avoid this message, use \"drop\" to "
+ "explicitly remove a commit.\n\n"
+ "Use 'git config rebase.missingCommitsCheck' to change "
+ "the level of warnings.\n"
+ "The possible behaviours are: ignore, warn, error.\n\n"));
+
+leave_check:
+ strbuf_release(&todo_file);
+ todo_list_release(&todo_list);
+
+ if (advise_to_edit_todo)
+ fprintf(stderr,
+ _("You can fix this with 'git rebase --edit-todo' "
+ "and then run 'git rebase --continue'.\n"
+ "Or you can abort the rebase with 'git rebase"
+ " --abort'.\n"));
+
+ return res;
+}
+
+/* skip picking commits whose parents are unchanged */
+int skip_unnecessary_picks(void)
+{
+ const char *todo_file = rebase_path_todo();
+ struct strbuf buf = STRBUF_INIT;
+ struct todo_list todo_list = TODO_LIST_INIT;
+ struct object_id onto_oid, *oid = &onto_oid, *parent_oid;
+ int fd, i;
+
+ if (!read_oneliner(&buf, rebase_path_onto(), 0))
+ return error(_("could not read 'onto'"));
+ if (get_oid(buf.buf, &onto_oid)) {
+ strbuf_release(&buf);
+ return error(_("need a HEAD to fixup"));
+ }
+ strbuf_release(&buf);
+
+ fd = open(todo_file, O_RDONLY);
+ if (fd < 0) {
+ return error_errno(_("could not open '%s'"), todo_file);
+ }
+ if (strbuf_read(&todo_list.buf, fd, 0) < 0) {
+ close(fd);
+ return error(_("could not read '%s'."), todo_file);
+ }
+ close(fd);
+ if (parse_insn_buffer(todo_list.buf.buf, &todo_list) < 0) {
+ todo_list_release(&todo_list);
+ return -1;
+ }
+
+ for (i = 0; i < todo_list.nr; i++) {
+ struct todo_item *item = todo_list.items + i;
+
+ if (item->command >= TODO_NOOP)
+ continue;
+ if (item->command != TODO_PICK)
+ break;
+ if (parse_commit(item->commit)) {
+ todo_list_release(&todo_list);
+ return error(_("could not parse commit '%s'"),
+ oid_to_hex(&item->commit->object.oid));
+ }
+ if (!item->commit->parents)
+ break; /* root commit */
+ if (item->commit->parents->next)
+ break; /* merge commit */
+ parent_oid = &item->commit->parents->item->object.oid;
+ if (hashcmp(parent_oid->hash, oid->hash))
+ break;
+ oid = &item->commit->object.oid;
+ }
+ if (i > 0) {
+ int offset = i < todo_list.nr ?
+ todo_list.items[i].offset_in_buf : todo_list.buf.len;
+ const char *done_path = rebase_path_done();
+
+ fd = open(done_path, O_CREAT | O_WRONLY | O_APPEND, 0666);
+ if (fd < 0) {
+ error_errno(_("could not open '%s' for writing"),
+ done_path);
+ todo_list_release(&todo_list);
+ return -1;
+ }
+ if (write_in_full(fd, todo_list.buf.buf, offset) < 0) {
+ error_errno(_("could not write to '%s'"), done_path);
+ todo_list_release(&todo_list);
+ close(fd);
+ return -1;
+ }
+ close(fd);
+
+ fd = open(rebase_path_todo(), O_WRONLY, 0666);
+ if (fd < 0) {
+ error_errno(_("could not open '%s' for writing"),
+ rebase_path_todo());
+ todo_list_release(&todo_list);
+ return -1;
+ }
+ if (write_in_full(fd, todo_list.buf.buf + offset,
+ todo_list.buf.len - offset) < 0) {
+ error_errno(_("could not write to '%s'"),
+ rebase_path_todo());
+ close(fd);
+ todo_list_release(&todo_list);
+ return -1;
+ }
+ if (ftruncate(fd, todo_list.buf.len - offset) < 0) {
+ error_errno(_("could not truncate '%s'"),
+ rebase_path_todo());
+ todo_list_release(&todo_list);
+ close(fd);
+ return -1;
+ }
+ close(fd);
+
+ todo_list.current = i;
+ if (is_fixup(peek_command(&todo_list, 0)))
+ record_in_rewritten(oid, peek_command(&todo_list, 0));
+ }
+
+ todo_list_release(&todo_list);
+ printf("%s\n", oid_to_hex(oid));
+
+ return 0;
+}
+
+struct subject2item_entry {
+ struct hashmap_entry entry;
+ int i;
+ char subject[FLEX_ARRAY];
+};
+
+static int subject2item_cmp(const void *fndata,
+ const struct subject2item_entry *a,
+ const struct subject2item_entry *b, const void *key)
+{
+ return key ? strcmp(a->subject, key) : strcmp(a->subject, b->subject);
+}
+
+/*
+ * Rearrange the todo list that has both "pick commit-id msg" and "pick
+ * commit-id fixup!/squash! msg" in it so that the latter is put immediately
+ * after the former, and change "pick" to "fixup"/"squash".
+ *
+ * Note that if the config has specified a custom instruction format, each log
+ * message will have to be retrieved from the commit (as the oneline in the
+ * script cannot be trusted) in order to normalize the autosquash arrangement.
+ */
+int rearrange_squash(void)
+{
+ const char *todo_file = rebase_path_todo();
+ struct todo_list todo_list = TODO_LIST_INIT;
+ struct hashmap subject2item;
+ int res = 0, rearranged = 0, *next, *tail, fd, i;
+ char **subjects;
+
+ fd = open(todo_file, O_RDONLY);
+ if (fd < 0)
+ return error_errno(_("could not open '%s'"), todo_file);
+ if (strbuf_read(&todo_list.buf, fd, 0) < 0) {
+ close(fd);
+ return error(_("could not read '%s'."), todo_file);
+ }
+ close(fd);
+ if (parse_insn_buffer(todo_list.buf.buf, &todo_list) < 0) {
+ todo_list_release(&todo_list);
+ return -1;
+ }
+
+ /*
+ * The hashmap maps onelines to the respective todo list index.
+ *
+ * If any items need to be rearranged, the next[i] value will indicate
+ * which item was moved directly after the i'th.
+ *
+ * In that case, last[i] will indicate the index of the latest item to
+ * be moved to appear after the i'th.
+ */
+ hashmap_init(&subject2item, (hashmap_cmp_fn) subject2item_cmp,
+ NULL, todo_list.nr);
+ ALLOC_ARRAY(next, todo_list.nr);
+ ALLOC_ARRAY(tail, todo_list.nr);
+ ALLOC_ARRAY(subjects, todo_list.nr);
+ for (i = 0; i < todo_list.nr; i++) {
+ struct strbuf buf = STRBUF_INIT;
+ struct todo_item *item = todo_list.items + i;
+ const char *commit_buffer, *subject, *p;
+ size_t subject_len;
+ int i2 = -1;
+ struct subject2item_entry *entry;
+
+ next[i] = tail[i] = -1;
+ if (item->command >= TODO_EXEC) {
+ subjects[i] = NULL;
+ continue;
+ }
+
+ if (is_fixup(item->command)) {
+ todo_list_release(&todo_list);
+ return error(_("the script was already rearranged."));
+ }
+
+ item->commit->util = item;
+
+ parse_commit(item->commit);
+ commit_buffer = get_commit_buffer(item->commit, NULL);
+ find_commit_subject(commit_buffer, &subject);
+ format_subject(&buf, subject, " ");
+ subject = subjects[i] = strbuf_detach(&buf, &subject_len);
+ unuse_commit_buffer(item->commit, commit_buffer);
+ if ((skip_prefix(subject, "fixup! ", &p) ||
+ skip_prefix(subject, "squash! ", &p))) {
+ struct commit *commit2;
+
+ for (;;) {
+ while (isspace(*p))
+ p++;
+ if (!skip_prefix(p, "fixup! ", &p) &&
+ !skip_prefix(p, "squash! ", &p))
+ break;
+ }
+
+ if ((entry = hashmap_get_from_hash(&subject2item,
+ strhash(p), p)))
+ /* found by title */
+ i2 = entry->i;
+ else if (!strchr(p, ' ') &&
+ (commit2 =
+ lookup_commit_reference_by_name(p)) &&
+ commit2->util)
+ /* found by commit name */
+ i2 = (struct todo_item *)commit2->util
+ - todo_list.items;
+ else {
+ /* copy can be a prefix of the commit subject */
+ for (i2 = 0; i2 < i; i2++)
+ if (subjects[i2] &&
+ starts_with(subjects[i2], p))
+ break;
+ if (i2 == i)
+ i2 = -1;
+ }
+ }
+ if (i2 >= 0) {
+ rearranged = 1;
+ todo_list.items[i].command =
+ starts_with(subject, "fixup!") ?
+ TODO_FIXUP : TODO_SQUASH;
+ if (next[i2] < 0)
+ next[i2] = i;
+ else
+ next[tail[i2]] = i;
+ tail[i2] = i;
+ } else if (!hashmap_get_from_hash(&subject2item,
+ strhash(subject), subject)) {
+ FLEX_ALLOC_MEM(entry, subject, subject, subject_len);
+ entry->i = i;
+ hashmap_entry_init(entry, strhash(entry->subject));
+ hashmap_put(&subject2item, entry);
+ }
+ }
+
+ if (rearranged) {
+ struct strbuf buf = STRBUF_INIT;
+
+ for (i = 0; i < todo_list.nr; i++) {
+ enum todo_command command = todo_list.items[i].command;
+ int cur = i;
+
+ /*
+ * Initially, all commands are 'pick's. If it is a
+ * fixup or a squash now, we have rearranged it.
+ */
+ if (is_fixup(command))
+ continue;
+
+ while (cur >= 0) {
+ int offset = todo_list.items[cur].offset_in_buf;
+ int end_offset = cur + 1 < todo_list.nr ?
+ todo_list.items[cur + 1].offset_in_buf :
+ todo_list.buf.len;
+ char *bol = todo_list.buf.buf + offset;
+ char *eol = todo_list.buf.buf + end_offset;
+
+ /* replace 'pick', by 'fixup' or 'squash' */
+ command = todo_list.items[cur].command;
+ if (is_fixup(command)) {
+ strbuf_addstr(&buf,
+ todo_command_info[command].str);
+ bol += strcspn(bol, " \t");
+ }
+
+ strbuf_add(&buf, bol, eol - bol);
+
+ cur = next[cur];
+ }
+ }
+
+ fd = open(todo_file, O_WRONLY);
+ if (fd < 0)
+ res = error_errno(_("could not open '%s'"), todo_file);
+ else if (write(fd, buf.buf, buf.len) < 0)
+ res = error_errno(_("could not read '%s'."), todo_file);
+ else if (ftruncate(fd, buf.len) < 0)
+ res = error_errno(_("could not finish '%s'"),
+ todo_file);
+ close(fd);
+ strbuf_release(&buf);
+ }
+
+ free(next);
+ free(tail);
+ for (i = 0; i < todo_list.nr; i++)
+ free(subjects[i]);
+ free(subjects);
+ hashmap_free(&subject2item, 1);
+ todo_list_release(&todo_list);
+
+ return res;
+}
int sequencer_rollback(struct replay_opts *opts);
int sequencer_remove_state(struct replay_opts *opts);
+int sequencer_make_script(int keep_empty, FILE *out,
+ int argc, const char **argv);
+
+int transform_todo_ids(int shorten_ids);
+int check_todo_list(void);
+int skip_unnecessary_picks(void);
+int rearrange_squash(void);
+
extern const char sign_off_header[];
void append_signoff(struct strbuf *msgbuf, int ignore_footer, unsigned flag);
/*
* Try to read the location of the git directory from the .git file,
- * return path to git directory if found.
+ * return path to git directory if found. The return value comes from
+ * a shared buffer.
*
* On failure, if return_error_code is not NULL, return_error_code
* will be set to an error code and NULL will be returned. If
return end;
}
-static void link_alt_odb_entries(const char *alt, int len, int sep,
+static void link_alt_odb_entries(const char *alt, int sep,
const char *relative_base, int depth)
{
struct strbuf objdirbuf = STRBUF_INIT;
static void read_info_alternates(const char * relative_base, int depth)
{
- char *map;
- size_t mapsz;
- struct stat st;
char *path;
- int fd;
+ struct strbuf buf = STRBUF_INIT;
path = xstrfmt("%s/info/alternates", relative_base);
- fd = git_open(path);
- free(path);
- if (fd < 0)
- return;
- if (fstat(fd, &st) || (st.st_size == 0)) {
- close(fd);
+ if (strbuf_read_file(&buf, path, 1024) < 0) {
+ warn_on_fopen_errors(path);
+ free(path);
return;
}
- mapsz = xsize_t(st.st_size);
- map = xmmap(NULL, mapsz, PROT_READ, MAP_PRIVATE, fd, 0);
- close(fd);
-
- link_alt_odb_entries(map, mapsz, '\n', relative_base, depth);
- munmap(map, mapsz);
+ link_alt_odb_entries(buf.buf, '\n', relative_base, depth);
+ strbuf_release(&buf);
+ free(path);
}
struct alternate_object_database *alloc_alt_odb(const char *dir)
if (commit_lock_file(lock))
die_errno("unable to move new alternates file into place");
if (alt_odb_tail)
- link_alt_odb_entries(reference, strlen(reference), '\n', NULL, 0);
+ link_alt_odb_entries(reference, '\n', NULL, 0);
}
free(alts);
}
*/
prepare_alt_odb();
- link_alt_odb_entries(reference, strlen(reference), '\n', NULL, 0);
+ link_alt_odb_entries(reference, '\n', NULL, 0);
}
/*
if (!alt) alt = "";
alt_odb_tail = &alt_odb_list;
- link_alt_odb_entries(alt, strlen(alt), PATH_SEP, NULL, 0);
+ link_alt_odb_entries(alt, PATH_SEP, NULL, 0);
read_info_alternates(get_object_directory(), 0);
}
ret = index_mem(sha1, "", size, type, path, flags);
} else if (size <= SMALL_FILE_SIZE) {
char *buf = xmalloc(size);
- if (size == read_in_full(fd, buf, size))
- ret = index_mem(sha1, buf, size, type, path, flags);
+ ssize_t read_result = read_in_full(fd, buf, size);
+ if (read_result < 0)
+ ret = error_errno("read error while indexing %s",
+ path ? path : "<unknown>");
+ else if (read_result != size)
+ ret = error("short read while indexing %s",
+ path ? path : "<unknown>");
else
- ret = error_errno("short read");
+ ret = index_mem(sha1, buf, size, type, path, flags);
free(buf);
} else {
void *buf = xmmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0);
int read_pack_header(int fd, struct pack_header *header)
{
- if (read_in_full(fd, header, sizeof(*header)) < sizeof(*header))
+ if (read_in_full(fd, header, sizeof(*header)) != sizeof(*header))
/* "eof before pack header was fully read" */
return PH_ERROR_EOF;
cur_depth = 0;
} else {
commit = (struct commit *)
- stack.objects[--stack.nr].item;
+ object_array_pop(&stack);
cur_depth = *(int *)commit->util;
}
}
if (write_shallow_commits(&sb, 0, extra)) {
temp = xmks_tempfile(git_path("shallow_XXXXXX"));
- if (write_in_full(temp->fd, sb.buf, sb.len) != sb.len ||
+ if (write_in_full(temp->fd, sb.buf, sb.len) < 0 ||
close_tempfile_gently(temp) < 0)
die_errno("failed to write to %s",
get_tempfile_path(temp));
LOCK_DIE_ON_ERROR);
check_shallow_file_for_update();
if (write_shallow_commits(&sb, 0, extra)) {
- if (write_in_full(fd, sb.buf, sb.len) != sb.len)
+ if (write_in_full(fd, sb.buf, sb.len) < 0)
die_errno("failed to write to %s",
get_lock_file_path(shallow_lock));
*alternate_shallow_file = get_lock_file_path(shallow_lock);
LOCK_DIE_ON_ERROR);
check_shallow_file_for_update();
if (write_shallow_commits_1(&sb, 0, NULL, SEEN_ONLY)) {
- if (write_in_full(fd, sb.buf, sb.len) != sb.len)
+ if (write_in_full(fd, sb.buf, sb.len) < 0)
die_errno("failed to write to %s",
get_lock_file_path(&shallow_lock));
commit_lock_file(&shallow_lock);
return execv_git_cmd(my_argv);
}
-static int do_cvs_cmd(const char *me, char *arg)
-{
- const char *cvsserver_argv[3] = {
- "cvsserver", "server", NULL
- };
-
- if (!arg || strcmp(arg, "server"))
- die("git-cvsserver only handles server: %s", arg);
-
- setup_path();
- return execv_git_cmd(cvsserver_argv);
-}
-
static int is_valid_cmd_name(const char *cmd)
{
/* Test command contains no . or / characters */
{ "git-receive-pack", do_generic_cmd },
{ "git-upload-pack", do_generic_cmd },
{ "git-upload-archive", do_generic_cmd },
- { "cvs", do_cvs_cmd },
{ NULL },
};
kept = 0;
wrote = write_in_full(fd, buf, readlen);
- if (wrote != readlen)
+ if (wrote < 0)
goto close_and_exit;
}
if (kept && (lseek(fd, kept - 1, SEEK_CUR) == (off_t) -1 ||
#ifndef STRING_LIST_H
#define STRING_LIST_H
+/**
+ * The string_list API offers a data structure and functions to handle
+ * sorted and unsorted arrays of strings. A "sorted" list is one whose
+ * entries are sorted by string value in `strcmp()` order.
+ *
+ * The caller:
+ *
+ * . Allocates and clears a `struct string_list` variable.
+ *
+ * . Initializes the members. You might want to set the flag `strdup_strings`
+ * if the strings should be strdup()ed. For example, this is necessary
+ * when you add something like git_path("..."), since that function returns
+ * a static buffer that will change with the next call to git_path().
+ *
+ * If you need something advanced, you can manually malloc() the `items`
+ * member (you need this if you add things later) and you should set the
+ * `nr` and `alloc` members in that case, too.
+ *
+ * . Adds new items to the list, using `string_list_append`,
+ * `string_list_append_nodup`, `string_list_insert`,
+ * `string_list_split`, and/or `string_list_split_in_place`.
+ *
+ * . Can check if a string is in the list using `string_list_has_string` or
+ * `unsorted_string_list_has_string` and get it from the list using
+ * `string_list_lookup` for sorted lists.
+ *
+ * . Can sort an unsorted list using `string_list_sort`.
+ *
+ * . Can remove duplicate items from a sorted list using
+ * `string_list_remove_duplicates`.
+ *
+ * . Can remove individual items of an unsorted list using
+ * `unsorted_string_list_delete_item`.
+ *
+ * . Can remove items not matching a criterion from a sorted or unsorted
+ * list using `filter_string_list`, or remove empty strings using
+ * `string_list_remove_empty_items`.
+ *
+ * . Finally it should free the list using `string_list_clear`.
+ *
+ * Example:
+ *
+ * struct string_list list = STRING_LIST_INIT_NODUP;
+ * int i;
+ *
+ * string_list_append(&list, "foo");
+ * string_list_append(&list, "bar");
+ * for (i = 0; i < list.nr; i++)
+ * printf("%s\n", list.items[i].string)
+ *
+ * NOTE: It is more efficient to build an unsorted list and sort it
+ * afterwards, instead of building a sorted list (`O(n log n)` instead of
+ * `O(n^2)`).
+ *
+ * However, if you use the list to check if a certain string was added
+ * already, you should not do that (using unsorted_string_list_has_string()),
+ * because the complexity would be quadratic again (but with a worse factor).
+ */
+
+/**
+ * Represents an item of the list. The `string` member is a pointer to the
+ * string, and you may use the `util` member for any purpose, if you want.
+ */
struct string_list_item {
char *string;
void *util;
typedef int (*compare_strings_fn)(const char *, const char *);
+/**
+ * Represents the list itself.
+ *
+ * . The array of items are available via the `items` member.
+ * . The `nr` member contains the number of items stored in the list.
+ * . The `alloc` member is used to avoid reallocating at every insertion.
+ * You should not tamper with it.
+ * . Setting the `strdup_strings` member to 1 will strdup() the strings
+ * before adding them, see above.
+ * . The `compare_strings_fn` member is used to specify a custom compare
+ * function, otherwise `strcmp()` is used as the default function.
+ */
struct string_list {
struct string_list_item *items;
unsigned int nr, alloc;
#define STRING_LIST_INIT_NODUP { NULL, 0, 0, 0, NULL }
#define STRING_LIST_INIT_DUP { NULL, 0, 0, 1, NULL }
+/* General functions which work with both sorted and unsorted lists. */
+
+/**
+ * Initialize the members of the string_list, set `strdup_strings`
+ * member according to the value of the second parameter.
+ */
void string_list_init(struct string_list *list, int strdup_strings);
+/** Callback function type for for_each_string_list */
+typedef int (*string_list_each_func_t)(struct string_list_item *, void *);
+
+/**
+ * Apply `want` to each item in `list`, retaining only the ones for which
+ * the function returns true. If `free_util` is true, call free() on
+ * the util members of any items that have to be deleted. Preserve
+ * the order of the items that are retained.
+ */
+void filter_string_list(struct string_list *list, int free_util,
+ string_list_each_func_t want, void *cb_data);
+
+/**
+ * Dump a string_list to stdout, useful mainly for debugging
+ * purposes. It can take an optional header argument and it writes out
+ * the string-pointer pairs of the string_list, each one in its own
+ * line.
+ */
void print_string_list(const struct string_list *p, const char *text);
+
+/**
+ * Free a string_list. The `string` pointer of the items will be freed
+ * in case the `strdup_strings` member of the string_list is set. The
+ * second parameter controls if the `util` pointer of the items should
+ * be freed or not.
+ */
void string_list_clear(struct string_list *list, int free_util);
-/* Use this function to call a custom clear function on each util pointer */
-/* The string associated with the util pointer is passed as the second argument */
+/**
+ * Callback type for `string_list_clear_func`. The string associated
+ * with the util pointer is passed as the second argument
+ */
typedef void (*string_list_clear_func_t)(void *p, const char *str);
+
+/** Call a custom clear function on each util pointer */
void string_list_clear_func(struct string_list *list, string_list_clear_func_t clearfunc);
-/* Use this function or the macro below to iterate over each item */
-typedef int (*string_list_each_func_t)(struct string_list_item *, void *);
+/**
+ * Apply `func` to each item. If `func` returns nonzero, the
+ * iteration aborts and the return value is propagated.
+ */
int for_each_string_list(struct string_list *list,
- string_list_each_func_t, void *cb_data);
-#define for_each_string_list_item(item,list) \
- for (item = (list)->items; item < (list)->items + (list)->nr; ++item)
+ string_list_each_func_t func, void *cb_data);
-/*
- * Apply want to each item in list, retaining only the ones for which
- * the function returns true. If free_util is true, call free() on
- * the util members of any items that have to be deleted. Preserve
- * the order of the items that are retained.
- */
-void filter_string_list(struct string_list *list, int free_util,
- string_list_each_func_t want, void *cb_data);
+/** Iterate over each item, as a macro. */
+#define for_each_string_list_item(item,list) \
+ for (item = (list)->items; \
+ item && item < (list)->items + (list)->nr; \
+ ++item)
-/*
+/**
* Remove any empty strings from the list. If free_util is true, call
* free() on the util members of any items that have to be deleted.
* Preserve the order of the items that are retained.
void string_list_remove_empty_items(struct string_list *list, int free_util);
/* Use these functions only on sorted lists: */
+
+/** Determine if the string_list has a given string or not. */
int string_list_has_string(const struct string_list *list, const char *string);
int string_list_find_insert_index(const struct string_list *list, const char *string,
int negative_existing_index);
-/*
- * Inserts the given string into the sorted list.
- * If the string already exists, the list is not altered.
- * Returns the string_list_item, the string is part of.
+
+/**
+ * Insert a new element to the string_list. The returned pointer can
+ * be handy if you want to write something to the `util` pointer of
+ * the string_list_item containing the just added string. If the given
+ * string already exists the insertion will be skipped and the pointer
+ * to the existing item returned.
+ *
+ * Since this function uses xrealloc() (which die()s if it fails) if the
+ * list needs to grow, it is safe not to check the pointer. I.e. you may
+ * write `string_list_insert(...)->util = ...;`.
*/
struct string_list_item *string_list_insert(struct string_list *list, const char *string);
-/*
- * Removes the given string from the sorted list.
- * If the string doesn't exist, the list is not altered.
+/**
+ * Remove the given string from the sorted list. If the string
+ * doesn't exist, the list is not altered.
*/
extern void string_list_remove(struct string_list *list, const char *string,
int free_util);
-/*
- * Checks if the given string is part of a sorted list. If it is part of the list,
+/**
+ * Check if the given string is part of a sorted list. If it is part of the list,
* return the coresponding string_list_item, NULL otherwise.
*/
struct string_list_item *string_list_lookup(struct string_list *list, const char *string);
/* Use these functions only on unsorted lists: */
-/*
+/**
* Add string to the end of list. If list->strdup_string is set, then
* string is copied; otherwise the new string_list_entry refers to the
* input string.
*/
struct string_list_item *string_list_append(struct string_list *list, const char *string);
-/*
+/**
* Like string_list_append(), except string is never copied. When
* list->strdup_strings is set, this function can be used to hand
* ownership of a malloc()ed string to list without making an extra
*/
struct string_list_item *string_list_append_nodup(struct string_list *list, char *string);
+/**
+ * Sort the list's entries by string value in `strcmp()` order.
+ */
void string_list_sort(struct string_list *list);
+
+/**
+ * Like `string_list_has_string()` but for unsorted lists. Linear in
+ * size of the list.
+ */
int unsorted_string_list_has_string(struct string_list *list, const char *string);
+
+/**
+ * Like `string_list_lookup()` but for unsorted lists. Linear in size
+ * of the list.
+ */
struct string_list_item *unsorted_string_list_lookup(struct string_list *list,
const char *string);
-
+/**
+ * Remove an item from a string_list. The `string` pointer of the
+ * items will be freed in case the `strdup_strings` member of the
+ * string_list is set. The third parameter controls if the `util`
+ * pointer of the items should be freed or not.
+ */
void unsorted_string_list_delete_item(struct string_list *list, int i, int free_util);
-/*
- * Split string into substrings on character delim and append the
- * substrings to list. The input string is not modified.
+/**
+ * Split string into substrings on character `delim` and append the
+ * substrings to `list`. The input string is not modified.
* list->strdup_strings must be set, as new memory needs to be
* allocated to hold the substrings. If maxsplit is non-negative,
* then split at most maxsplit times. Return the number of substrings
if (supported_capabilities)
*supported_capabilities |= capabilities[i].flag;
} else {
- warning("subprocess '%s' requested unsupported capability '%s'",
- process->argv[0], p);
+ die("subprocess '%s' requested unsupported capability '%s'",
+ process->argv[0], p);
}
}
if (add_submodule_odb(path)) {
if (!message)
- message = "(not initialized)";
+ message = "(commits not present)";
goto output_header;
}
return 0;
}
+struct has_commit_data {
+ int result;
+ const char *path;
+};
+
static int check_has_commit(const struct object_id *oid, void *data)
{
- int *has_commit = data;
+ struct has_commit_data *cb = data;
- if (!lookup_commit_reference(oid))
- *has_commit = 0;
+ enum object_type type = sha1_object_info(oid->hash, NULL);
- return 0;
+ switch (type) {
+ case OBJ_COMMIT:
+ return 0;
+ case OBJ_BAD:
+ /*
+ * Object is missing or invalid. If invalid, an error message
+ * has already been printed.
+ */
+ cb->result = 0;
+ return 0;
+ default:
+ die(_("submodule entry '%s' (%s) is a %s, not a commit"),
+ cb->path, oid_to_hex(oid), typename(type));
+ }
}
static int submodule_has_commits(const char *path, struct oid_array *commits)
{
- int has_commit = 1;
+ struct has_commit_data has_commit = { 1, path };
/*
* Perform a cheap, but incorrect check for the existence of 'commits'.
oid_array_for_each_unique(commits, check_has_commit, &has_commit);
- if (has_commit) {
+ if (has_commit.result) {
/*
* Even if the submodule is checked out and the commit is
* present, make sure it exists in the submodule's object store
cp.dir = path;
if (capture_command(&cp, &out, GIT_MAX_HEXSZ + 1) || out.len)
- has_commit = 0;
+ has_commit.result = 0;
strbuf_release(&out);
}
- return has_commit;
+ return has_commit.result;
}
static int submodule_needs_pushing(const char *path, struct oid_array *commits)
add_object_array(merges.objects[i].item, NULL, result);
}
- free(merges.objects);
+ object_array_clear(&merges);
return result->nr;
}
print_commit((struct commit *) merges.objects[i].item);
}
- free(merges.objects);
+ object_array_clear(&merges);
return 0;
}
return ret;
}
+/*
+ * Put the gitdir for a submodule (given relative to the main
+ * repository worktree) into `buf`, or return -1 on error.
+ */
int submodule_to_gitdir(struct strbuf *buf, const char *submodule)
{
const struct submodule *sub;
/*
* Prepare the "env_array" parameter of a "struct child_process" for executing
- * a submodule by clearing any repo-specific envirionment variables, but
+ * a submodule by clearing any repo-specific environment variables, but
* retaining any config in the environment.
*/
extern void prepare_submodule_repo_env(struct argv_array *out);
$ sh ./t9200-git-cvsexport-commit.sh --run='-3 21'
-As noted above, the test set is built going though items left to
-right, so this:
+As noted above, the test set is built by going through the items
+from left to right, so this:
$ sh ./t9200-git-cvsexport-commit.sh --run='1-4 !3'
-will run tests 1, 2, and 4. Items that comes later have higher
+will run tests 1, 2, and 4. Items that come later have higher
precedence. It means that this:
$ sh ./t9200-git-cvsexport-commit.sh --run='!3 1-4'
while (<>) {
chomp;
/\bsed\s+-i/ and err 'sed -i is not portable';
- /\becho\s+-n/ and err 'echo -n is not portable (please use printf)';
+ /\becho\s+-[neE]/ and err 'echo with option is not portable (please use printf)';
/^\s*declare\s+/ and err 'arrays/declare not portable';
/^\s*[^#]\s*which\s/ and err 'which is not portable (please use type)';
/\btest\s+[^=]*==/ and err '"test a == b" is not portable (please use =)';
/test-svn-fe
/test-urlmatch-normalization
/test-wildmatch
+/test-write-cache
}
fd = open (argv[4], O_WRONLY|O_CREAT|O_TRUNC, 0666);
- if (fd < 0 || write_in_full(fd, out_buf, out_size) != out_size) {
+ if (fd < 0 || write_in_full(fd, out_buf, out_size) < 0) {
perror(argv[4]);
return 1;
}
static void handle_command(const char *command, const char *arg, struct line_buffer *buf)
{
- switch (*command) {
- case 'b':
- if (starts_with(command, "binary ")) {
- struct strbuf sb = STRBUF_INIT;
- strbuf_addch(&sb, '>');
- buffer_read_binary(buf, &sb, strtouint32(arg));
- fwrite(sb.buf, 1, sb.len, stdout);
- strbuf_release(&sb);
- return;
- }
- case 'c':
- if (starts_with(command, "copy ")) {
- buffer_copy_bytes(buf, strtouint32(arg));
- return;
- }
- case 's':
- if (starts_with(command, "skip ")) {
- buffer_skip_bytes(buf, strtouint32(arg));
- return;
- }
- default:
+ if (starts_with(command, "binary ")) {
+ struct strbuf sb = STRBUF_INIT;
+ strbuf_addch(&sb, '>');
+ buffer_read_binary(buf, &sb, strtouint32(arg));
+ fwrite(sb.buf, 1, sb.len, stdout);
+ strbuf_release(&sb);
+ } else if (starts_with(command, "copy ")) {
+ buffer_copy_bytes(buf, strtouint32(arg));
+ } else if (starts_with(command, "skip ")) {
+ buffer_skip_bytes(buf, strtouint32(arg));
+ } else {
die("unrecognized command: %s", command);
}
}
const char *prefix = "prefix/";
const char *usage[] = {
"test-parse-options <options>",
+ "",
+ "A helper function for the parse-options API.",
NULL
};
struct string_list expect = STRING_LIST_INIT_NODUP;
cat >expect <<\EOF
usage: test-parse-options <options>
+ A helper function for the parse-options API.
+
--yes get a boolean
-D, --no-doubt begins with 'no-'
-B, --no-fear be brave
(ulimit -n 32 && "$@")
}
-test_lazy_prereq ULIMIT_FILE_DESCRIPTORS 'run_with_limited_open_files true'
+test_lazy_prereq ULIMIT_FILE_DESCRIPTORS '
+ test_have_prereq !MINGW,!CYGWIN &&
+ run_with_limited_open_files true
+'
test_expect_success ULIMIT_FILE_DESCRIPTORS 'large transaction creating branches does not burst open file limit' '
(
test_cmp expect actual
'
+test_expect_success 'rev-parse --is-shallow-repository in shallow repo' '
+ test_commit test_commit &&
+ echo true >expect &&
+ git clone --depth 1 --no-local . shallow &&
+ test_when_finished "rm -rf shallow" &&
+ git -C shallow rev-parse --is-shallow-repository >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'rev-parse --is-shallow-repository in non-shallow repo' '
+ echo false >expect &&
+ git rev-parse --is-shallow-repository >actual &&
+ test_cmp expect actual
+'
+
test_expect_success 'showing the superproject correctly' '
git rev-parse --show-superproject-working-tree >out &&
test_must_be_empty out &&
|g,fluf?path short and long option optional argument
|longest=very-long-argument-hint a very long argument hint
|pair=key=value with an equals sign in the hint
+|aswitch help te=t contains? fl*g characters!`
+|bswitch=hint hint has trailing tab character
+|cswitch switch has trailing tab character
|short-hint=a with a one symbol hint
|
|Extras
EOF
'
+test_expect_success 'setup optionspec-no-switches' '
+ sed -e "s/^|//" >optionspec_no_switches <<\EOF
+|some-command [options] <args>...
+|
+|some-command does foo and bar!
+|--
+EOF
+'
+
+test_expect_success 'setup optionspec-only-hidden-switches' '
+ sed -e "s/^|//" >optionspec_only_hidden_switches <<\EOF
+|some-command [options] <args>...
+|
+|some-command does foo and bar!
+|--
+|hidden1* A hidden switch
+EOF
+'
+
test_expect_success 'test --parseopt help output' '
sed -e "s/^|//" >expect <<\END_EXPECT &&
|cat <<\EOF
| --longest <very-long-argument-hint>
| a very long argument hint
| --pair <key=value> with an equals sign in the hint
+| --aswitch help te=t contains? fl*g characters!`
+| --bswitch <hint> hint has trailing tab character
+| --cswitch switch has trailing tab character
| --short-hint <a> with a one symbol hint
|
|Extras
test_i18ncmp expect output
'
+test_expect_success 'test --parseopt help output no switches' '
+ sed -e "s/^|//" >expect <<\END_EXPECT &&
+|cat <<\EOF
+|usage: some-command [options] <args>...
+|
+| some-command does foo and bar!
+|
+|EOF
+END_EXPECT
+ test_expect_code 129 git rev-parse --parseopt -- -h > output < optionspec_no_switches &&
+ test_i18ncmp expect output
+'
+
+test_expect_success 'test --parseopt help output hidden switches' '
+ sed -e "s/^|//" >expect <<\END_EXPECT &&
+|cat <<\EOF
+|usage: some-command [options] <args>...
+|
+| some-command does foo and bar!
+|
+|EOF
+END_EXPECT
+ test_expect_code 129 git rev-parse --parseopt -- -h > output < optionspec_only_hidden_switches &&
+ test_i18ncmp expect output
+'
+
+test_expect_success 'test --parseopt help-all output hidden switches' '
+ sed -e "s/^|//" >expect <<\END_EXPECT &&
+|cat <<\EOF
+|usage: some-command [options] <args>...
+|
+| some-command does foo and bar!
+|
+| --hidden1 A hidden switch
+|
+|EOF
+END_EXPECT
+ test_expect_code 129 git rev-parse --parseopt -- --help-all > output < optionspec_only_hidden_switches &&
+ test_i18ncmp expect output
+'
+
+test_expect_success 'test --parseopt invalid switch help output' '
+ sed -e "s/^|//" >expect <<\END_EXPECT &&
+|error: unknown option `does-not-exist'\''
+|usage: some-command [options] <args>...
+|
+| some-command does foo and bar!
+|
+| -h, --help show the help
+| --foo some nifty option --foo
+| --bar ... some cool option --bar with an argument
+| -b, --baz a short and long option
+|
+|An option group Header
+| -C[...] option C with an optional argument
+| -d, --data[=...] short and long option with an optional argument
+|
+|Argument hints
+| -B <arg> short option required argument
+| --bar2 <arg> long option required argument
+| -e, --fuz <with-space>
+| short and long option required argument
+| -s[<some>] short option optional argument
+| --long[=<data>] long option optional argument
+| -g, --fluf[=<path>] short and long option optional argument
+| --longest <very-long-argument-hint>
+| a very long argument hint
+| --pair <key=value> with an equals sign in the hint
+| --aswitch help te=t contains? fl*g characters!`
+| --bswitch <hint> hint has trailing tab character
+| --cswitch switch has trailing tab character
+| --short-hint <a> with a one symbol hint
+|
+|Extras
+| --extra1 line above used to cause a segfault but no longer does
+|
+END_EXPECT
+ test_expect_code 129 git rev-parse --parseopt -- --does-not-exist 1>/dev/null 2>output < optionspec &&
+ test_i18ncmp expect output
+'
+
test_expect_success 'setup expect.1' "
cat > expect <<EOF
-set -- --foo --bar 'ham' -b -- 'arg'
+set -- --foo --bar 'ham' -b --aswitch -- 'arg'
EOF
"
test_expect_success 'test --parseopt' '
- git rev-parse --parseopt -- --foo --bar=ham --baz arg < optionspec > output &&
+ git rev-parse --parseopt -- --foo --bar=ham --baz --aswitch arg < optionspec > output &&
test_cmp expect output
'
test_expect_success 'test --parseopt with mixed options and arguments' '
- git rev-parse --parseopt -- --foo arg --bar=ham --baz < optionspec > output &&
+ git rev-parse --parseopt -- --foo arg --bar=ham --baz --aswitch < optionspec > output &&
test_cmp expect output
'
test_must_fail git config branch.s/s.dummy
'
+test_expect_success 'git branch -m correctly renames multiple config sections' '
+ test_when_finished "git checkout master" &&
+ git checkout -b source master &&
+
+ # Assert that a config file with multiple config sections has
+ # those sections preserved...
+ cat >expect <<-\EOF &&
+ branch.dest.key1=value1
+ some.gar.b=age
+ branch.dest.key2=value2
+ EOF
+ cat >config.branch <<\EOF &&
+;; Note the lack of -\EOF above & mixed indenting here. This is
+;; intentional, we are also testing that the formatting of copied
+;; sections is preserved.
+
+;; Comment for source. Tabs
+[branch "source"]
+ ;; Comment for the source value
+ key1 = value1
+;; Comment for some.gar. Spaces
+[some "gar"]
+ ;; Comment for the some.gar value
+ b = age
+;; Comment for source, again. Mixed tabs/spaces.
+[branch "source"]
+ ;; Comment for the source value, again
+ key2 = value2
+EOF
+ cat config.branch >>.git/config &&
+ git branch -m source dest &&
+ git config -f .git/config -l | grep -F -e source -e dest -e some.gar >actual &&
+ test_cmp expect actual &&
+
+ # ...and that the comments for those sections are also
+ # preserved.
+ cat config.branch | sed "s/\"source\"/\"dest\"/" >expect &&
+ sed -n -e "/Note the lack/,\$p" .git/config >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'git branch -c dumps usage' '
+ test_expect_code 128 git branch -c 2>err &&
+ test_i18ngrep "branch name required" err
+'
+
+test_expect_success 'git branch --copy dumps usage' '
+ test_expect_code 128 git branch --copy 2>err &&
+ test_i18ngrep "branch name required" err
+'
+
+test_expect_success 'git branch -c d e should work' '
+ git branch -l d &&
+ git reflog exists refs/heads/d &&
+ git config branch.d.dummy Hello &&
+ git branch -c d e &&
+ git reflog exists refs/heads/d &&
+ git reflog exists refs/heads/e &&
+ echo Hello >expect &&
+ git config branch.e.dummy >actual &&
+ test_cmp expect actual &&
+ echo Hello >expect &&
+ git config branch.d.dummy >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'git branch --copy is a synonym for -c' '
+ git branch -l copy &&
+ git reflog exists refs/heads/copy &&
+ git config branch.copy.dummy Hello &&
+ git branch --copy copy copy-to &&
+ git reflog exists refs/heads/copy &&
+ git reflog exists refs/heads/copy-to &&
+ echo Hello >expect &&
+ git config branch.copy.dummy >actual &&
+ test_cmp expect actual &&
+ echo Hello >expect &&
+ git config branch.copy-to.dummy >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'git branch -c ee ef should copy ee to create branch ef' '
+ git checkout -b ee &&
+ git reflog exists refs/heads/ee &&
+ git config branch.ee.dummy Hello &&
+ git branch -c ee ef &&
+ git reflog exists refs/heads/ee &&
+ git reflog exists refs/heads/ef &&
+ test $(git config branch.ee.dummy) = Hello &&
+ test $(git config branch.ef.dummy) = Hello &&
+ test $(git rev-parse --abbrev-ref HEAD) = ee
+'
+
+test_expect_success 'git branch -c f/f g/g should work' '
+ git branch -l f/f &&
+ git reflog exists refs/heads/f/f &&
+ git config branch.f/f.dummy Hello &&
+ git branch -c f/f g/g &&
+ git reflog exists refs/heads/f/f &&
+ git reflog exists refs/heads/g/g &&
+ test $(git config branch.f/f.dummy) = Hello &&
+ test $(git config branch.g/g.dummy) = Hello
+'
+
+test_expect_success 'git branch -c m2 m2 should work' '
+ git branch -l m2 &&
+ git reflog exists refs/heads/m2 &&
+ git config branch.m2.dummy Hello &&
+ git branch -c m2 m2 &&
+ git reflog exists refs/heads/m2 &&
+ test $(git config branch.m2.dummy) = Hello
+'
+
+test_expect_success 'git branch -c zz zz/zz should fail' '
+ git branch -l zz &&
+ git reflog exists refs/heads/zz &&
+ test_must_fail git branch -c zz zz/zz
+'
+
+test_expect_success 'git branch -c b/b b should fail' '
+ git branch -l b/b &&
+ test_must_fail git branch -c b/b b
+'
+
+test_expect_success 'git branch -C o/q o/p should work when o/p exists' '
+ git branch -l o/q &&
+ git reflog exists refs/heads/o/q &&
+ git reflog exists refs/heads/o/p &&
+ git branch -C o/q o/p
+'
+
+test_expect_success 'git branch -c -f o/q o/p should work when o/p exists' '
+ git reflog exists refs/heads/o/q &&
+ git reflog exists refs/heads/o/p &&
+ git branch -c -f o/q o/p
+'
+
+test_expect_success 'git branch -c qq rr/qq should fail when r exists' '
+ git branch qq &&
+ git branch rr &&
+ test_must_fail git branch -c qq rr/qq
+'
+
+test_expect_success 'git branch -C b1 b2 should fail when b2 is checked out' '
+ git branch b1 &&
+ git checkout -b b2 &&
+ test_must_fail git branch -C b1 b2
+'
+
+test_expect_success 'git branch -C c1 c2 should succeed when c1 is checked out' '
+ git checkout -b c1 &&
+ git branch c2 &&
+ git branch -C c1 c2 &&
+ test $(git rev-parse --abbrev-ref HEAD) = c1
+'
+
+test_expect_success 'git branch -C c1 c2 should never touch HEAD' '
+ msg="Branch: copied refs/heads/c1 to refs/heads/c2" &&
+ ! grep "$msg$" .git/logs/HEAD
+'
+
+test_expect_success 'git branch -C master should work when master is checked out' '
+ git checkout master &&
+ git branch -C master
+'
+
+test_expect_success 'git branch -C master master should work when master is checked out' '
+ git checkout master &&
+ git branch -C master master
+'
+
+test_expect_success 'git branch -C master5 master5 should work when master is checked out' '
+ git checkout master &&
+ git branch master5 &&
+ git branch -C master5 master5
+'
+
+test_expect_success 'git branch -C ab cd should overwrite existing config for cd' '
+ git branch -l cd &&
+ git reflog exists refs/heads/cd &&
+ git config branch.cd.dummy CD &&
+ git branch -l ab &&
+ git reflog exists refs/heads/ab &&
+ git config branch.ab.dummy AB &&
+ git branch -C ab cd &&
+ git reflog exists refs/heads/ab &&
+ git reflog exists refs/heads/cd &&
+ test $(git config branch.ab.dummy) = AB &&
+ test $(git config branch.cd.dummy) = AB
+'
+
+test_expect_success 'git branch -c correctly copies multiple config sections' '
+ FOO=1 &&
+ export FOO &&
+ test_when_finished "git checkout master" &&
+ git checkout -b source2 master &&
+
+ # Assert that a config file with multiple config sections has
+ # those sections preserved...
+ cat >expect <<-\EOF &&
+ branch.source2.key1=value1
+ branch.dest2.key1=value1
+ more.gar.b=age
+ branch.source2.key2=value2
+ branch.dest2.key2=value2
+ EOF
+ cat >config.branch <<\EOF &&
+;; Note the lack of -\EOF above & mixed indenting here. This is
+;; intentional, we are also testing that the formatting of copied
+;; sections is preserved.
+
+;; Comment for source2. Tabs
+[branch "source2"]
+ ;; Comment for the source2 value
+ key1 = value1
+;; Comment for more.gar. Spaces
+[more "gar"]
+ ;; Comment for the more.gar value
+ b = age
+;; Comment for source2, again. Mixed tabs/spaces.
+[branch "source2"]
+ ;; Comment for the source2 value, again
+ key2 = value2
+EOF
+ cat config.branch >>.git/config &&
+ git branch -c source2 dest2 &&
+ git config -f .git/config -l | grep -F -e source2 -e dest2 -e more.gar >actual &&
+ test_cmp expect actual &&
+
+ # ...and that the comments and formatting for those sections
+ # is also preserved.
+ cat >expect <<\EOF &&
+;; Comment for source2. Tabs
+[branch "source2"]
+ ;; Comment for the source2 value
+ key1 = value1
+;; Comment for more.gar. Spaces
+[branch "dest2"]
+ ;; Comment for the source2 value
+ key1 = value1
+;; Comment for more.gar. Spaces
+[more "gar"]
+ ;; Comment for the more.gar value
+ b = age
+;; Comment for source2, again. Mixed tabs/spaces.
+[branch "source2"]
+ ;; Comment for the source2 value, again
+ key2 = value2
+[branch "dest2"]
+ ;; Comment for the source2 value, again
+ key2 = value2
+EOF
+ sed -n -e "/Comment for source2/,\$p" .git/config >actual &&
+ test_cmp expect actual
+'
+
test_expect_success 'deleting a symref' '
git branch target &&
git symbolic-ref refs/heads/symref refs/heads/target &&
test B = $(git cat-file commit HEAD^ | sed -ne \$p)
'
-cat >expect <<EOF
-Warning: the command isn't recognized in the following line:
- - badcmd $(git rev-list --oneline -1 master~1)
-
-You can fix this with 'git rebase --edit-todo' and then run 'git rebase --continue'.
-Or you can abort the rebase with 'git rebase --abort'.
-EOF
-
test_expect_success 'static check of bad command' '
rebase_setup_and_clean bad-cmd &&
set_fake_editor &&
test_must_fail env FAKE_LINES="1 2 3 bad 4 5" \
git rebase -i --root 2>actual &&
- test_i18ncmp expect actual &&
+ test_i18ngrep "badcmd $(git rev-list --oneline -1 master~1)" actual &&
+ test_i18ngrep "You can fix this with .git rebase --edit-todo.." actual &&
FAKE_LINES="1 2 3 drop 4 5" git rebase --edit-todo &&
git rebase --continue &&
test E = $(git cat-file commit HEAD | sed -ne \$p) &&
test E = $(git cat-file commit HEAD | sed -ne \$p)
'
-cat >expect <<EOF
-Warning: the SHA-1 is missing or isn't a commit in the following line:
- - edit XXXXXXX False commit
-
-You can fix this with 'git rebase --edit-todo' and then run 'git rebase --continue'.
-Or you can abort the rebase with 'git rebase --abort'.
-EOF
-
test_expect_success 'static check of bad SHA-1' '
rebase_setup_and_clean bad-sha &&
set_fake_editor &&
test_must_fail env FAKE_LINES="1 2 edit fakesha 3 4 5 #" \
git rebase -i --root 2>actual &&
- test_i18ncmp expect actual &&
+ test_i18ngrep "edit XXXXXXX False commit" actual &&
+ test_i18ngrep "You can fix this with .git rebase --edit-todo.." actual &&
FAKE_LINES="1 2 4 5 6" git rebase --edit-todo &&
git rebase --continue &&
test E = $(git cat-file commit HEAD | sed -ne \$p)
test 2 = $(git cat-file commit HEAD^ | grep squash | wc -l)
'
+test_expect_success 'autosquash with empty custom instructionFormat' '
+ git reset --hard base &&
+ test_commit empty-instructionFormat-test &&
+ (
+ set_cat_todo_editor &&
+ test_must_fail git -c rebase.instructionFormat= \
+ rebase --autosquash --force -i HEAD^ >actual &&
+ git log -1 --format="pick %h %s" >expect &&
+ test_cmp expect actual
+ )
+'
+
set_backup_editor () {
write_script backup-editor.sh <<-\EOF
cp "$1" .git/backup-"$(basename "$1")"
test_set_editor "$PWD/backup-editor.sh"
}
-test_expect_failure 'autosquash with multiple empty patches' '
+test_expect_success 'autosquash with multiple empty patches' '
test_tick &&
git commit --allow-empty -m "empty" &&
test_tick &&
test $base = $parent
'
+test_expect_success 'wrapped original subject' '
+ if test -d .git/rebase-merge; then git rebase --abort; fi &&
+ base=$(git rev-parse HEAD) &&
+ echo "wrapped subject" >wrapped &&
+ git add wrapped &&
+ test_tick &&
+ git commit --allow-empty -m "$(printf "To\nfixup")" &&
+ test_tick &&
+ git commit --allow-empty -m "fixup! To fixup" &&
+ git rebase -i --autosquash --keep-empty HEAD~2 &&
+ parent=$(git rev-parse HEAD^) &&
+ test $base = $parent
+'
+
test_done
git commit -m "Rearranged lines in dir/sub" &&
git checkout master &&
+ GIT_AUTHOR_DATE="2006-06-26 00:06:00 +0000" &&
+ GIT_COMMITTER_DATE="2006-06-26 00:06:00 +0000" &&
+ export GIT_AUTHOR_DATE GIT_COMMITTER_DATE &&
+ git checkout -b mode initial &&
+ git update-index --chmod=+x file0 &&
+ git commit -m "update mode" &&
+ git checkout -f master &&
+
git config diff.renames false &&
git show-branch
diff-tree --pretty -p side
diff-tree --pretty --patch-with-stat side
+diff-tree initial mode
+diff-tree --stat initial mode
+diff-tree --summary initial mode
+
diff-tree master
diff-tree -p master
diff-tree -p -m master
--- /dev/null
+$ git diff-tree --stat initial mode
+ file0 | 0
+ 1 file changed, 0 insertions(+), 0 deletions(-)
+$
--- /dev/null
+$ git diff-tree --summary initial mode
+ mode change 100644 => 100755 file0
+$
--- /dev/null
+$ git diff-tree initial mode
+:100644 100755 01e79c32a8c99c557f0757da7cb6d65b3414466d 01e79c32a8c99c557f0757da7cb6d65b3414466d M file0
+$
$ git log --decorate=full --all
+commit b7e0bc69303b488b47deca799a7d723971dfa6cd (refs/heads/mode)
+Author: A U Thor <author@example.com>
+Date: Mon Jun 26 00:06:00 2006 +0000
+
+ update mode
+
commit cd4e72fd96faed3f0ba949dc42967430374e2290 (refs/heads/rearrange)
Author: A U Thor <author@example.com>
Date: Mon Jun 26 00:06:00 2006 +0000
$ git log --decorate --all
+commit b7e0bc69303b488b47deca799a7d723971dfa6cd (mode)
+Author: A U Thor <author@example.com>
+Date: Mon Jun 26 00:06:00 2006 +0000
+
+ update mode
+
commit cd4e72fd96faed3f0ba949dc42967430374e2290 (rearrange)
Author: A U Thor <author@example.com>
Date: Mon Jun 26 00:06:00 2006 +0000
git clone . sm3 &&
git -C sm3 diff-tree -p --no-commit-id --submodule=log HEAD >actual &&
cat >expected <<-EOF &&
- Submodule sm1 $smhead1...$smhead2 (not initialized)
+ Submodule sm1 $smhead1...$smhead2 (commits not present)
EOF
test_cmp expected actual
'
test_expect_missing archive-pathspec/ignored-by-tree.d
test_expect_missing archive-pathspec/ignored-by-tree.d/file
test_expect_exists archive-pathspec/ignored-by-worktree
-test_expect_missing archive-pathspec/excluded-by-pathspec.d failure
+test_expect_missing archive-pathspec/excluded-by-pathspec.d
test_expect_missing archive-pathspec/excluded-by-pathspec.d/file
test_expect_success 'git archive with wildcard pathspec' '
test_expect_missing archive/deep/and/slashless/foo &&
test_expect_missing archive/deep/with/wildcard/ &&
test_expect_missing archive/deep/with/wildcard/foo &&
-test_expect_exists archive/one-level-lower/
+test_expect_missing archive/one-level-lower/
test_expect_missing archive/one-level-lower/two-levels-lower/ignored-only-if-dir/
test_expect_missing archive/one-level-lower/two-levels-lower/ignored-ony-if-dir/ignored-by-ignored-dir
git archive --format=tar $root_tree >subtree-all.tar &&
make_dir extract &&
"$TAR" xf subtree-all.tar -C extract &&
- check_dir extract sub
+ check_dir extract
'
test_expect_success 'archive empty subtree by direct pathspec' '
git archive --format=tar $root_tree -- sub >subtree-path.tar &&
make_dir extract &&
"$TAR" xf subtree-path.tar -C extract &&
- check_dir extract sub
+ check_dir extract
'
ZIPINFO=zipinfo
)
'
+test_expect_success 'submodule entry pointing at a tag is error' '
+ git -C work/gar/bage tag -a test1 -m "tag" &&
+ tag=$(git -C work/gar/bage rev-parse test1^{tag}) &&
+ git -C work update-index --cacheinfo 160000 "$tag" gar/bage &&
+ git -C work commit -m "bad commit" &&
+ test_when_finished "git -C work reset --hard HEAD^" &&
+ test_must_fail git -C work push --recurse-submodules=on-demand ../pub.git master 2>err &&
+ test_i18ngrep "is a tag, not a commit" err
+'
+
test_expect_success 'push fails if recurse submodules option passed as yes' '
(
cd work/gar/bage &&
check_describe "test2-lightweight-*" --long --tags --match="test2-*" HEAD^
-check_describe "test1-lightweight-*" --long --tags --match="test1-*" --match="test2-*" HEAD^
+check_describe "test2-lightweight-*" --long --tags --match="test1-*" --match="test2-*" HEAD^
check_describe "test2-lightweight-*" --long --tags --match="test1-*" --no-match --match="test2-*" HEAD^
+check_describe "test1-lightweight-*" --long --tags --match="test1-*" --match="test3-*" HEAD
+
+check_describe "test1-lightweight-*" --long --tags --match="test3-*" --match="test1-*" HEAD
+
+test_expect_success 'set-up branches' '
+ git branch branch_A A &&
+ git branch branch_C c &&
+ git update-ref refs/remotes/origin/remote_branch_A "A^{commit}" &&
+ git update-ref refs/remotes/origin/remote_branch_C "c^{commit}" &&
+ git update-ref refs/original/original_branch_A test-annotated~2
+'
+
+check_describe "heads/branch_A*" --all --match="branch_*" --exclude="branch_C" HEAD
+
+check_describe "remotes/origin/remote_branch_A*" --all --match="origin/remote_branch_*" --exclude="origin/remote_branch_C" HEAD
+
+check_describe "original/original_branch_A*" --all test-annotated~1
+
+test_expect_success '--match does not work for other types' '
+ test_must_fail git describe --all --match="*original_branch_*" test-annotated~1
+'
+
+test_expect_success '--exclude does not work for other types' '
+ R=$(git describe --all --exclude="any_pattern_even_not_matching" test-annotated~1) &&
+ case "$R" in
+ *original_branch_A*) echo "fail: Found unknown reference $R with --exclude"
+ false;;
+ *) echo ok: Found some known type;;
+ esac
+'
+
test_expect_success 'name-rev with exact tags' '
echo A >expect &&
tag_object=$(git rev-parse refs/tags/A) &&
test_cmp expect actual
'
+test_expect_success 'name-rev --all' '
+ >expect.unsorted &&
+ for rev in $(git rev-list --all)
+ do
+ git name-rev $rev >>expect.unsorted
+ done &&
+ sort <expect.unsorted >expect &&
+ git name-rev --all >actual.unsorted &&
+ sort <actual.unsorted >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success 'name-rev --stdin' '
+ >expect.unsorted &&
+ for rev in $(git rev-list --all)
+ do
+ name=$(git name-rev --name-only $rev) &&
+ echo "$rev ($name)" >>expect.unsorted
+ done &&
+ sort <expect.unsorted >expect &&
+ git rev-list --all | git name-rev --stdin >actual.unsorted &&
+ sort <actual.unsorted >actual &&
+ test_cmp expect actual
+'
+
test_expect_success 'describe --contains with the exact tags' '
echo "A^0" >expect &&
tag_object=$(git rev-parse refs/tags/A) &&
'
test_expect_success 'describe ignoring a borken submodule' '
git describe --broken >out &&
+ test_when_finished "mv .git/modules/sub_moved .git/modules/sub1" &&
grep broken out
'
+test_expect_failure ULIMIT_STACK_SIZE 'name-rev works in a deep repo' '
+ i=1 &&
+ while test $i -lt 8000
+ do
+ echo "commit refs/heads/master
+committer A U Thor <author@example.com> $((1000000000 + $i * 100)) +0200
+data <<EOF
+commit #$i
+EOF"
+ test $i = 1 && echo "from refs/heads/master^0"
+ i=$(($i + 1))
+ done | git fast-import &&
+ git checkout master &&
+ git tag far-far-away HEAD^ &&
+ echo "HEAD~4000 tags/far-far-away~3999" >expect &&
+ git name-rev HEAD~4000 >actual &&
+ test_cmp expect actual &&
+ run_with_limited_stack git name-rev HEAD~4000 >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success ULIMIT_STACK_SIZE 'describe works in a deep repo' '
+ git tag -f far-far-away HEAD~7999 &&
+ echo "far-far-away" >expect &&
+ git describe --tags --abbrev=0 HEAD~4000 >actual &&
+ test_cmp expect actual &&
+ run_with_limited_stack git describe --tags --abbrev=0 HEAD~4000 >actual &&
+ test_cmp expect actual
+'
+
test_done
test_cmp expect actual
'
-test_expect_success 'exclude only no longer errors out' '
+test_expect_success 'exclude only pathspec uses default implicit pathspec' '
git log --oneline --format=%s -- . ":(exclude)sub" >expect &&
git log --oneline --format=%s -- ":(exclude)sub" >actual &&
test_cmp expect actual
test_cmp expect actual
'
+test_expect_success 'multiple exclusions' '
+ git ls-files -- ":^*/file2" ":^sub2" >actual &&
+ cat <<-\EOF >expect &&
+ file
+ sub/file
+ sub/sub/file
+ sub/sub/sub/file
+ EOF
+ test_cmp expect actual
+'
+
test_done
test_cmp actual expect
'
+test_expect_failure 'moving nested submodules' '
+ git commit -am "cleanup commit" &&
+ mkdir sub_nested_nested &&
+ (cd sub_nested_nested &&
+ touch nested_level2 &&
+ git init &&
+ git add . &&
+ git commit -m "nested level 2"
+ ) &&
+ mkdir sub_nested &&
+ (cd sub_nested &&
+ touch nested_level1 &&
+ git init &&
+ git add . &&
+ git commit -m "nested level 1"
+ git submodule add ../sub_nested_nested &&
+ git commit -m "add nested level 2"
+ ) &&
+ git submodule add ./sub_nested nested_move &&
+ git commit -m "add nested_move" &&
+ git submodule update --init --recursive &&
+ git mv nested_move sub_nested_moved &&
+ git status
+'
+
test_done
git tag -l --sort=version:refname
'
-run_with_limited_stack () {
- (ulimit -s 128 && "$@")
-}
-
-test_lazy_prereq ULIMIT_STACK_SIZE 'run_with_limited_stack true'
-
-# we require ulimit, this excludes Windows
test_expect_success ULIMIT_STACK_SIZE '--contains and --no-contains work in a deep repo' '
>expect &&
i=1 &&
)
'
+test_expect_success 'submodule update - command in .gitmodules is ignored' '
+ test_when_finished "git -C super reset --hard HEAD^" &&
+ git -C super config -f .gitmodules submodule.submodule.update "!false" &&
+ git -C super commit -a -m "add command to .gitmodules file" &&
+ git -C super/submodule reset --hard $submodulesha1^ &&
+ git -C super submodule update submodule
+'
+
cat << EOF >expect
Execution of 'false $submodulesha1' failed in submodule path 'submodule'
EOF
test_i18ngrep ! "Initial commit" output
'
+test_expect_success '--no-optional-locks prevents index update' '
+ test-chmtime =1234567890 .git/index &&
+ git --no-optional-locks status &&
+ test-chmtime -v +0 .git/index >out &&
+ grep ^1234567890 out &&
+ git status &&
+ test-chmtime -v +0 .git/index >out &&
+ ! grep ^1234567890 out
+'
+
test_done
grep "path.*needs.*filters" err
'
+test_expect_success '--textconv/--filters complain without path' '
+ test_must_fail git cat-file --textconv HEAD &&
+ test_must_fail git cat-file --filters HEAD
+'
+
test_expect_success 'cat-file --textconv --batch works' '
sha1=$(git rev-parse -q --verify HEAD:world.txt) &&
test_config diff.txt.textconv "tr A-Za-z N-ZA-Mn-za-m <" &&
. ./test-lib.sh
+if test_have_prereq !PIPE
+then
+ skip_all="svn dumpfile importer testing requires the PIPE prerequisite"
+ test_done
+fi
+
reinit_git () {
- if ! test_declared_prereq PIPE
- then
- echo >&4 "reinit_git: need to declare PIPE prerequisite"
- return 127
- fi
rm -fr .git &&
rm -f stream backflow &&
git init &&
>empty
-test_expect_success PIPE 'empty dump' '
+test_expect_success 'empty dump' '
reinit_git &&
echo "SVN-fs-dump-format-version: 2" >input &&
try_dump input
'
-test_expect_success PIPE 'v4 dumps not supported' '
+test_expect_success 'v4 dumps not supported' '
reinit_git &&
echo "SVN-fs-dump-format-version: 4" >v4.dump &&
try_dump v4.dump must_fail
'
-test_expect_failure PIPE 'empty revision' '
+test_expect_failure 'empty revision' '
reinit_git &&
printf "rev <nobody, nobody@local>: %s\n" "" "" >expect &&
cat >emptyrev.dump <<-\EOF &&
test_cmp expect actual
'
-test_expect_success PIPE 'empty properties' '
+test_expect_success 'empty properties' '
reinit_git &&
printf "rev <nobody, nobody@local>: %s\n" "" "" >expect &&
cat >emptyprop.dump <<-\EOF &&
test_cmp expect actual
'
-test_expect_success PIPE 'author name and commit message' '
+test_expect_success 'author name and commit message' '
reinit_git &&
echo "<author@example.com, author@example.com@local>" >expect.author &&
cat >message <<-\EOF &&
test_cmp expect.author actual.author
'
-test_expect_success PIPE 'unsupported properties are ignored' '
+test_expect_success 'unsupported properties are ignored' '
reinit_git &&
echo author >expect &&
cat >extraprop.dump <<-\EOF &&
test_cmp expect actual
'
-test_expect_failure PIPE 'timestamp and empty file' '
+test_expect_failure 'timestamp and empty file' '
echo author@example.com >expect.author &&
echo 1999-01-01 >expect.date &&
echo file >expect.files &&
test_cmp empty file
'
-test_expect_success PIPE 'directory with files' '
+test_expect_success 'directory with files' '
reinit_git &&
printf "%s\n" directory/file1 directory/file2 >expect.files &&
echo hi >hi &&
test_cmp hi directory/file2
'
-test_expect_success PIPE 'branch name with backslash' '
+test_expect_success 'branch name with backslash' '
reinit_git &&
sort <<-\EOF >expect.branch-files &&
trunk/file1
test_cmp expect.branch-files actual.branch-files
'
-test_expect_success PIPE 'node without action' '
+test_expect_success 'node without action' '
reinit_git &&
cat >inaction.dump <<-\EOF &&
SVN-fs-dump-format-version: 3
try_dump inaction.dump must_fail
'
-test_expect_success PIPE 'action: add node without text' '
+test_expect_success 'action: add node without text' '
reinit_git &&
cat >textless.dump <<-\EOF &&
SVN-fs-dump-format-version: 3
try_dump textless.dump must_fail
'
-test_expect_failure PIPE 'change file mode but keep old content' '
+test_expect_failure 'change file mode but keep old content' '
reinit_git &&
cat >expect <<-\EOF &&
OBJID
test_cmp hello actual.target
'
-test_expect_success PIPE 'NUL in property value' '
+test_expect_success 'NUL in property value' '
reinit_git &&
echo "commit message" >expect.message &&
{
test_cmp expect.message actual.message
'
-test_expect_success PIPE 'NUL in log message, file content, and property name' '
+test_expect_success 'NUL in log message, file content, and property name' '
# Caveat: svnadmin 1.6.16 (r1073529) truncates at \0 in the
# svn:specialQnotreally example.
reinit_git &&
test_cmp expect.hello2 actual.hello2
'
-test_expect_success PIPE 'change file mode and reiterate content' '
+test_expect_success 'change file mode and reiterate content' '
reinit_git &&
cat >expect <<-\EOF &&
OBJID
test_cmp hello actual.target
'
-test_expect_success PIPE 'deltas supported' '
+test_expect_success 'deltas supported' '
reinit_git &&
{
# (old) h + (inline) ello + (old) \n
try_dump delta.dump
'
-test_expect_success PIPE 'property deltas supported' '
+test_expect_success 'property deltas supported' '
reinit_git &&
cat >expect <<-\EOF &&
OBJID
test_cmp expect actual
'
-test_expect_success PIPE 'properties on /' '
+test_expect_success 'properties on /' '
reinit_git &&
cat <<-\EOF >expect &&
OBJID
test_cmp expect actual
'
-test_expect_success PIPE 'deltas for typechange' '
+test_expect_success 'deltas for typechange' '
reinit_git &&
cat >expect <<-\EOF &&
OBJID
test_cmp expect actual
'
-test_expect_success PIPE 'deltas need not consume the whole preimage' '
+test_expect_success 'deltas need not consume the whole preimage' '
reinit_git &&
cat >expect <<-\EOF &&
OBJID
test_cmp expect.3 actual.3
'
-test_expect_success PIPE 'no hang for delta trying to read past end of preimage' '
+test_expect_success 'no hang for delta trying to read past end of preimage' '
reinit_git &&
{
# COPY 1
fi
'
-test_expect_success SVNREPO,PIPE 't9135/svn.dump' '
+test_expect_success SVNREPO 't9135/svn.dump' '
mkdir -p simple-git &&
(
cd simple-git &&
compare_diff_raw expect actual
'
+###
+### series V (checkpoint)
+###
+
+# The commands in input_file should not produce any output on the file
+# descriptor set with --cat-blob-fd (or stdout if unspecified).
+#
+# To make sure you're observing the side effects of checkpoint *before*
+# fast-import terminates (and thus writes out its state), check that the
+# fast-import process is still running using background_import_still_running
+# *after* evaluating the test conditions.
+background_import_then_checkpoint () {
+ options=$1
+ input_file=$2
+
+ mkfifo V.input
+ exec 8<>V.input
+ rm V.input
+
+ mkfifo V.output
+ exec 9<>V.output
+ rm V.output
+
+ git fast-import $options <&8 >&9 &
+ echo $! >V.pid
+ # We don't mind if fast-import has already died by the time the test
+ # ends.
+ test_when_finished "exec 8>&-; exec 9>&-; kill $(cat V.pid) || true"
+
+ # Start in the background to ensure we adhere strictly to (blocking)
+ # pipes writing sequence. We want to assume that the write below could
+ # block, e.g. if fast-import blocks writing its own output to &9
+ # because there is no reader on &9 yet.
+ (
+ cat "$input_file"
+ echo "checkpoint"
+ echo "progress checkpoint"
+ ) >&8 &
+
+ error=1 ;# assume the worst
+ while read output <&9
+ do
+ if test "$output" = "progress checkpoint"
+ then
+ error=0
+ break
+ fi
+ # otherwise ignore cruft
+ echo >&2 "cruft: $output"
+ done
+
+ if test $error -eq 1
+ then
+ false
+ fi
+}
+
+background_import_still_running () {
+ if ! kill -0 "$(cat V.pid)"
+ then
+ echo >&2 "background fast-import terminated too early"
+ false
+ fi
+}
+
+test_expect_success PIPE 'V: checkpoint helper does not get stuck with extra output' '
+ cat >input <<-INPUT_END &&
+ progress foo
+ progress bar
+
+ INPUT_END
+
+ background_import_then_checkpoint "" input &&
+ background_import_still_running
+'
+
+test_expect_success PIPE 'V: checkpoint updates refs after reset' '
+ cat >input <<-\INPUT_END &&
+ reset refs/heads/V
+ from refs/heads/U
+
+ INPUT_END
+
+ background_import_then_checkpoint "" input &&
+ test "$(git rev-parse --verify V)" = "$(git rev-parse --verify U)" &&
+ background_import_still_running
+'
+
+test_expect_success PIPE 'V: checkpoint updates refs and marks after commit' '
+ cat >input <<-INPUT_END &&
+ commit refs/heads/V
+ mark :1
+ committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
+ data 0
+ from refs/heads/U
+
+ INPUT_END
+
+ background_import_then_checkpoint "--export-marks=marks.actual" input &&
+
+ echo ":1 $(git rev-parse --verify V)" >marks.expected &&
+
+ test "$(git rev-parse --verify V^)" = "$(git rev-parse --verify U)" &&
+ test_cmp marks.expected marks.actual &&
+ background_import_still_running
+'
+
+# Re-create the exact same commit, but on a different branch: no new object is
+# created in the database, but the refs and marks still need to be updated.
+test_expect_success PIPE 'V: checkpoint updates refs and marks after commit (no new objects)' '
+ cat >input <<-INPUT_END &&
+ commit refs/heads/V2
+ mark :2
+ committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
+ data 0
+ from refs/heads/U
+
+ INPUT_END
+
+ background_import_then_checkpoint "--export-marks=marks.actual" input &&
+
+ echo ":2 $(git rev-parse --verify V2)" >marks.expected &&
+
+ test "$(git rev-parse --verify V2)" = "$(git rev-parse --verify V)" &&
+ test_cmp marks.expected marks.actual &&
+ background_import_still_running
+'
+
+test_expect_success PIPE 'V: checkpoint updates tags after tag' '
+ cat >input <<-INPUT_END &&
+ tag Vtag
+ from refs/heads/V
+ tagger $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
+ data 0
+
+ INPUT_END
+
+ background_import_then_checkpoint "" input &&
+ git show-ref -d Vtag &&
+ background_import_still_running
+'
+
test_done
mkdir new &&
git --git-dir=new/.git init &&
git fast-export -C -C --signed-tags=strip --all > output &&
- grep "^C file6 file7\$" output &&
+ grep "^C file2 file4\$" output &&
cat output |
(cd new &&
git fast-import &&
test_cmp expected actual
'
+test_expect_success 'when using -C, do not declare copy when source of copy is also modified' '
+ test_create_repo src &&
+ echo a_line >src/file.txt &&
+ git -C src add file.txt &&
+ git -C src commit -m 1st_commit &&
+
+ cp src/file.txt src/file2.txt &&
+ echo another_line >>src/file.txt &&
+ git -C src add file.txt file2.txt &&
+ git -C src commit -m 2nd_commit &&
+
+ test_create_repo dst &&
+ git -C src fast-export --all -C | git -C dst fast-import &&
+ git -C src show >expected &&
+ git -C dst show >actual &&
+ test_cmp expected actual
+'
+
test_done
test_cmp ../expect ../actual
'
+#------------
+# running via git-shell
+#------------
+
+cd "$WORKDIR"
+
+test_expect_success 'create remote-cvs helper' '
+ write_script remote-cvs <<-\EOF
+ exec git shell -c "cvs server"
+ EOF
+'
+
+test_expect_success 'cvs server does not run with vanilla git-shell' '
+ (
+ cd cvswork &&
+ CVS_SERVER=$WORKDIR/remote-cvs &&
+ export CVS_SERVER &&
+ test_must_fail cvs log merge
+ )
+'
+
+test_expect_success 'configure git shell to run cvs server' '
+ mkdir "$HOME"/git-shell-commands &&
+
+ write_script "$HOME"/git-shell-commands/cvs <<-\EOF &&
+ if ! test $# = 1 && test "$1" = "server"
+ then
+ echo >&2 "git-cvsserver only handles \"server\""
+ exit 1
+ fi
+ exec git cvsserver server
+ EOF
+
+ # Should not be used, but part of the recommended setup
+ write_script "$HOME"/git-shell-commands/no-interactive-login <<-\EOF
+ echo Interactive login forbidden
+ EOF
+'
+
+test_expect_success 'cvs server can run with recommended config' '
+ (
+ cd cvswork &&
+ CVS_SERVER=$WORKDIR/remote-cvs &&
+ export CVS_SERVER &&
+ cvs log merge
+ )
+'
+
test_done
test_lazy_prereq PIPE '
# test whether the filesystem supports FIFOs
- case $(uname -s) in
- CYGWIN*|MINGW*)
- false
- ;;
- *)
- rm -f testfifo && mkfifo testfifo
- ;;
- esac
+ test_have_prereq !MINGW,!CYGWIN &&
+ rm -f testfifo && mkfifo testfifo
'
test_lazy_prereq SYMLINKS '
(ulimit -s 128 && "$@")
}
-test_lazy_prereq CMDLINE_LIMIT 'run_with_limited_cmdline true'
+test_lazy_prereq CMDLINE_LIMIT '
+ test_have_prereq !MINGW,!CYGWIN &&
+ run_with_limited_cmdline true
+'
+
+run_with_limited_stack () {
+ (ulimit -s 128 && "$@")
+}
+
+test_lazy_prereq ULIMIT_STACK_SIZE '
+ test_have_prereq !MINGW,!CYGWIN &&
+ run_with_limited_stack true
+'
build_option () {
git version --build-options |
{
if (debug)
fprintf(stderr, "Debug: Remote helper: -> %s", buffer->buf);
- if (write_in_full(helper->helper->in, buffer->buf, buffer->len)
- != buffer->len)
+ if (write_in_full(helper->helper->in, buffer->buf, buffer->len) < 0)
die_errno("Full write to remote helper failed");
}
{
if (debug)
fprintf(stderr, "Debug: Remote helper: -> %s", str);
- if (write_in_full(fd, str, strlen(str)) != strlen(str))
+ if (write_in_full(fd, str, strlen(str)) < 0)
die_errno("Full write to remote helper failed");
}
int retval = MISSING_OBJECT;
struct dir_state *parents = NULL;
size_t parents_alloc = 0;
- ssize_t parents_nr = 0;
+ size_t i, parents_nr = 0;
unsigned char current_tree_sha1[20];
struct strbuf namebuf = STRBUF_INIT;
struct tree_desc t;
int follows_remaining = GET_TREE_ENTRY_FOLLOW_SYMLINKS_MAX_LINKS;
- int i;
init_tree_desc(&t, NULL, 0UL);
strbuf_addstr(&namebuf, name);
}
shallow_nr += shallows.nr;
- free(shallows.objects);
+ object_array_clear(&shallows);
}
/* return non-zero if the ref is hidden, otherwise 0 */
"|//|\\*\\*|::|[/<>=]="),
IPATTERN("fountain", "^((\\.[^.]|(int|ext|est|int\\.?/ext|i/e)[. ]).*)$",
"[^ \t-]+"),
-PATTERNS("html", "^[ \t]*(<[Hh][1-6][ \t].*>.*)$",
+PATTERNS("html", "^[ \t]*(<[Hh][1-6]([ \t].*)?>.*)$",
"[^<>= \t]+"),
PATTERNS("java",
"!^[ \t]*(catch|do|for|if|instanceof|new|return|switch|throw|while)\n"
void write_file_buf(const char *path, const char *buf, size_t len)
{
int fd = xopen(path, O_WRONLY | O_CREAT | O_TRUNC, 0666);
- if (write_in_full(fd, buf, len) != len)
+ if (write_in_full(fd, buf, len) < 0)
die_errno(_("could not write to %s"), path);
if (close(fd))
die_errno(_("could not close %s"), path);
void wt_status_add_cut_line(FILE *fp)
{
- const char *explanation = _("Do not touch the line above.\nEverything below will be removed.");
+ const char *explanation = _("Do not modify or remove the line above.\nEverything below it will be ignored.");
struct strbuf buf = STRBUF_INIT;
fprintf(fp, "%c %s", comment_line_char, cut_line);