/git-init-db
/git-interpret-trailers
/git-instaweb
+/git-legacy-rebase
/git-log
/git-ls-files
/git-ls-remote
/git-read-tree
/git-rebase
/git-rebase--am
+/git-rebase--common
/git-rebase--interactive
/git-rebase--merge
/git-rebase--preserve-merges
is used as a short-hand for "--create-reflog" and warns about the
future repurposing of the it when it is used.
+ * The userdiff pattern for .php has been updated.
+
+ * The content-transfer-encoding of the message "git send-email" sends
+ out by default was 8bit, which can cause trouble when there is an
+ overlong line to bust RFC 5322/2822 limit. A new option 'auto' to
+ automatically switch to quoted-printable when there is such a line
+ in the payload has been introduced and is made the default.
+
+ * "git checkout" and "git worktree add" learned to honor
+ checkout.defaultRemote when auto-vivifying a local branch out of a
+ remote tracking branch in a repository with multiple remotes that
+ have tracking branches that share the same names.
+ (merge 8d7b558bae ab/checkout-default-remote later to maint).
+
+ * "git grep" learned the "--only-matching" option.
+
+ * "git rebase --rebase-merges" mode now handles octopus merges as
+ well.
+
+ * Add a server-side knob to skip commits in exponential/fibbonacci
+ stride in an attempt to cover wider swath of history with a smaller
+ number of iterations, potentially accepting a larger packfile
+ transfer, instead of going back one commit a time during common
+ ancestor discovery during the "git fetch" transaction.
+ (merge 42cc7485a2 jt/fetch-negotiator-skipping later to maint).
+
Performance, Internal Implementation, Development Support etc.
file, even though it shares the same syntax with configuration
files, to read random configuration items from it.
+ * "git fast-import" has been updated to avoid attempting to create
+ delta against a zero-byte-long string, which is pointless.
+
+ * The codebase has been updated to compile cleanly with -pedantic
+ option.
+ (merge 2b647a05d7 bb/pedantic later to maint).
+
+ * The character display width table has been updated to match the
+ latest Unicode standard.
+ (merge 570951eea2 bb/unicode-11-width later to maint).
+
+ * test-lint now looks for broken use of "VAR=VAL shell_func" in test
+ scripts.
+
+ * Conversion from uchar[40] to struct object_id continues.
+
+ * Recent "security fix" to pay attention to contents of ".gitmodules"
+ while accepting "git push" was a bit overly strict than necessary,
+ which has been adjusted.
+
+ * "git fsck" learns to make sure the optional commit-graph file is in
+ a sane state.
+
+ * "git diff --color-moved" feature has further been tweaked.
+
+ * Code restructuring and a small fix to transport protocol v2 during
+ fetching.
+
+ * Parsing of -L[<N>][,[<M>]] parameters "git blame" and "git log"
+ take has been tweaked.
+
+ * lookup_commit_reference() and friends have been updated to find
+ in-core object for a specific in-core repository instance.
+
+ * Various glitches in the heuristics of merge-recursive strategy have
+ been documented in new tests.
+
+ * "git fetch" learned a new option "--negotiation-tip" to limit the
+ set of commits it tells the other end as "have", to reduce wasted
+ bandwidth and cycles, which would be helpful when the receiving
+ repository has a lot of refs that have little to do with the
+ history at the remote it is fetching from.
+
+ * For a large tree, the index needs to hold many cache entries
+ allocated on heap. These cache entries are now allocated out of a
+ dedicated memory pool to amortize malloc(3) overhead.
+
+ * Tests to cover various conflicting cases have been added for
+ merge-recursive.
+
+ * Tests to cover conflict cases that involve submodules have been
+ added for merge-recursive.
+
+ * Look for broken "&&" chains that are hidden in subshell, many of
+ which have been found and corrected.
+
+ * The singleton commit-graph in-core instance is made per in-core
+ repository instance.
+
Fixes since v2.18
-----------------
not turn a case-incapable filesystem into a case-capable one.
(merge 48294b512a ms/core-icase-doc later to maint).
+ * "fsck.skipList" did not prevent a blob object listed there from
+ being inspected for is contents (e.g. we recently started to
+ inspect the contents of ".gitmodules" for certain malicious
+ patterns), which has been corrected.
+ (merge fb16287719 rj/submodule-fsck-skip later to maint).
+
+ * "git checkout --recurse-submodules another-branch" did not report
+ in which submodule it failed to update the working tree, which
+ resulted in an unhelpful error message.
+ (merge ba95d4e4bd sb/submodule-move-head-error-msg later to maint).
+
+ * "git rebase" behaved slightly differently depending on which one of
+ the three backends gets used; this has been documented and an
+ effort to make them more uniform has begun.
+ (merge b00bf1c9a8 en/rebase-consistency later to maint).
+
+ * The "--ignore-case" option of "git for-each-ref" (and its friends)
+ did not work correctly, which has been fixed.
+ (merge e674eb2528 jk/for-each-ref-icase later to maint).
+
+ * "git fetch" failed to correctly validate the set of objects it
+ received when making a shallow history deeper, which has been
+ corrected.
+ (merge cf1e7c0770 jt/connectivity-check-after-unshallow later to maint).
+
+ * Partial clone support of "git clone" has been updated to correctly
+ validate the objects it receives from the other side. The server
+ side has been corrected to send objects that are directly
+ requested, even if they may match the filtering criteria (e.g. when
+ doing a "lazy blob" partial clone).
+ (merge a7e67c11b8 jt/partial-clone-fsck-connectivity later to maint).
+
+ * Handling of an empty range by "git cherry-pick" was inconsistent
+ depending on how the range ended up to be empty, which has been
+ corrected.
+ (merge c5e358d073 jk/empty-pick-fix later to maint).
+
+ * "git reset --merge" (hence "git merge ---abort") and "git reset --hard"
+ had trouble working correctly in a sparsely checked out working
+ tree after a conflict, which has been corrected.
+ (merge b33fdfc34c mk/merge-in-sparse-checkout later to maint).
+
+ * Correct a broken use of "VAR=VAL shell_func" in a test.
+ (merge 650161a277 jc/t3404-one-shot-export-fix later to maint).
+
+ * "git rev-parse ':/substring'" did not consider the history leading
+ only to HEAD when looking for a commit with the given substring,
+ when the HEAD is detached. This has been fixed.
+ (merge 6b3351e799 wc/find-commit-with-pattern-on-detached-head later to maint).
+
+ * Build doc update for Windows.
+ (merge ede8d89bb1 nd/command-list later to maint).
+
+ * core.commentchar is now honored when preparing the list of commits
+ to replay in "rebase -i".
+
+ * "git pull --rebase" on a corrupt HEAD caused a segfault. In
+ general we substitute an empty tree object when running the in-core
+ equivalent of the diff-index command, and the codepath has been
+ corrected to do so as well to fix this issue.
+ (merge 3506dc9445 jk/has-uncommitted-changes-fix later to maint).
+
+ * httpd tests saw occasional breakage due to the way its access log
+ gets inspected by the tests, which has been updated to make them
+ less flaky.
+ (merge e8b3b2e275 sg/httpd-test-unflake later to maint).
+
+ * Tests to cover more D/F conflict cases have been added for
+ merge-recursive.
+
+ * "git gc --auto" opens file descriptors for the packfiles before
+ spawning "git repack/prune", which would upset Windows that does
+ not want a process to work on a file that is open by another
+ process. The issue has been worked around.
+ (merge 12e73a3ce4 kg/gc-auto-windows-workaround later to maint).
+
+ * The recursive merge strategy did not properly ensure there was no
+ change between HEAD and the index before performing its operation,
+ which has been corrected.
+ (merge 55f39cf755 en/dirty-merge-fixes later to maint).
+
+ * "git rebase" started exporting GIT_DIR environment variable and
+ exposing it to hook scripts when part of it got rewritten in C.
+ Instead of matching the old scripted Porcelains' behaviour,
+ compensate by also exporting GIT_WORK_TREE environment as well to
+ lessen the damage. This can harm existing hooks that want to
+ operate on different repository, but the current behaviour is
+ already broken for them anyway.
+ (merge ab5e67d751 bc/sequencer-export-work-tree-as-well later to maint).
+
+ * "git send-email" when using in a batched mode that limits the
+ number of messages sent in a single SMTP session lost the contents
+ of the variable used to choose between tls/ssl, unable to send the
+ second and later batches, which has been fixed.
+ (merge 636f3d7ac5 jm/send-email-tls-auth-on-batch later to maint).
+
+ * The lazy clone support had a few places where missing but promised
+ objects were not correctly tolerated, which have been fixed.
+
* Code cleanup, docfix, build fix, etc.
(merge aee9be2ebe sg/update-ref-stdin-cleanup later to maint).
(merge 037714252f jc/clean-after-sanity-tests later to maint).
(merge 51d1863168 tz/exclude-doc-smallfixes later to maint).
(merge a9aa3c0927 ds/commit-graph later to maint).
(merge 5cf8e06474 js/enhanced-version-info later to maint).
+ (merge 6aaded5509 tb/config-default later to maint).
+ (merge 022d2ac1f3 sb/blame-color later to maint).
+ (merge 5a06a20e0c bp/test-drop-caches-for-windows later to maint).
Advice shown when you used linkgit:git-checkout[1] to
move to the detach HEAD state, to instruct how to create
a local branch after the fact.
+ checkoutAmbiguousRemoteBranchName::
+ Advice shown when the argument to
+ linkgit:git-checkout[1] ambiguously resolves to a
+ remote tracking branch on more than one remote in
+ situations where an unambiguous argument would have
+ otherwise caused a remote-tracking branch to be
+ checked out. See the `checkout.defaultRemote`
+ configuration variable for how to set a given remote
+ to used by default in some situations where this
+ advice would be printed.
amWorkDir::
Advice that shows the location of the patch file when
linkgit:git-am[1] fails to apply it.
This setting defaults to "refs/notes/commits", and it can be overridden by
the `GIT_NOTES_REF` environment variable. See linkgit:git-notes[1].
-core.commitGraph::
- Enable git commit graph feature. Allows reading from the
- commit-graph file.
+gc.commitGraph::
+ If true, then gc will rewrite the commit-graph file when
+ linkgit:git-gc[1] is run. When using linkgit:git-gc[1]
+ '--auto' the commit-graph will be updated if housekeeping is
+ required. Default is false. See linkgit:git-commit-graph[1]
+ for details.
core.sparseCheckout::
Enable "sparse checkout" feature. See section "Sparse checkout" in
browse HTML help (see `-w` option in linkgit:git-help[1]) or a
working repository in gitweb (see linkgit:git-instaweb[1]).
+checkout.defaultRemote::
+ When you run 'git checkout <something>' and only have one
+ remote, it may implicitly fall back on checking out and
+ tracking e.g. 'origin/<something>'. This stops working as soon
+ as you have more than one remote with a '<something>'
+ reference. This setting allows for setting the name of a
+ preferred remote that should always win when it comes to
+ disambiguation. The typical use-case is to set this to
+ `origin`.
++
+Currently this is used by linkgit:git-checkout[1] when 'git checkout
+<something>' will checkout the '<something>' branch on another remote,
+and by linkgit:git-worktree[1] when 'git worktree add' refers to a
+remote branch. This setting might be used for other checkout-like
+commands or functionality in the future.
+
clean.requireForce::
A boolean to make git-clean do nothing unless given -f,
-i or -n. Defaults to true.
true the default color mode will be used. When set to false,
moved lines are not colored.
+diff.colorMovedWS::
+ When moved lines are colored using e.g. the `diff.colorMoved` setting,
+ this option controls the `<mode>` how spaces are treated
+ for details of valid modes see '--color-moved-ws' in linkgit:git-diff[1].
+
color.diff.<slot>::
Use customized color for diff colorization. `<slot>` specifies
which part of the patch to use the specified color, and is one
`full` and `compact`. Default value is `full`. See section
OUTPUT in linkgit:git-fetch[1] for detail.
+fetch.negotiationAlgorithm::
+ Control how information about the commits in the local repository is
+ sent when negotiating the contents of the packfile to be sent by the
+ server. Set to "skipping" to use an algorithm that skips commits in an
+ effort to converge faster, but may result in a larger-than-necessary
+ packfile; any other value instructs Git to use the default algorithm
+ that never skips commits (unless the server has acknowledged it or one
+ of its descendants).
+
format.attach::
Enable multipart/mixed attachments as the default for
'format-patch'. The value can also be a double quoted string
repository-level config (this is a safety measure against fetching from
untrusted repositories).
+uploadpack.allowRefInWant::
+ If this option is set, `upload-pack` will support the `ref-in-want`
+ feature of the protocol version 2 `fetch` command. This feature
+ is intended for the benefit of load-balanced servers which may
+ not have the same view of what OIDs their refs point to due to
+ replication delay.
+
url.<base>.insteadOf::
Any URL that starts with this value will be rewritten to
start, instead, with <base>. In cases where some site serves a
that are added somewhere else in the diff. This mode picks up any
moved line, but it is not very useful in a review to determine
if a block of code was moved without permutation.
-zebra::
+blocks::
Blocks of moved text of at least 20 alphanumeric characters
are detected greedily. The detected blocks are
- painted using either the 'color.diff.{old,new}Moved' color or
+ painted using either the 'color.diff.{old,new}Moved' color.
+ Adjacent blocks cannot be told apart.
+zebra::
+ Blocks of moved text are detected as in 'blocks' mode. The blocks
+ are painted using either the 'color.diff.{old,new}Moved' color or
'color.diff.{old,new}MovedAlternative'. The change between
the two colors indicates that a new block was detected.
dimmed_zebra::
blocks are considered interesting, the rest is uninteresting.
--
+--color-moved-ws=<modes>::
+ This configures how white spaces are ignored when performing the
+ move detection for `--color-moved`.
+ifdef::git-diff[]
+ It can be set by the `diff.colorMovedWS` configuration setting.
+endif::git-diff[]
+ These modes can be given as a comma separated list:
++
+--
+ignore-space-at-eol::
+ Ignore changes in whitespace at EOL.
+ignore-space-change::
+ Ignore changes in amount of whitespace. This ignores whitespace
+ at line end, and considers all other sequences of one or
+ more whitespace characters to be equivalent.
+ignore-all-space::
+ Ignore whitespace when comparing lines. This ignores differences
+ even if one line has whitespace where the other line has none.
+allow-indentation-change::
+ Initially ignore any white spaces in the move detection, then
+ group the moved code blocks only into a block if the change in
+ whitespace is the same per line. This is incompatible with the
+ other modes.
+--
+
--word-diff[=<mode>]::
Show a word diff, using the <mode> to delimit changed words.
By default, words are delimited by whitespace; see
.git/shallow. This option updates .git/shallow and accept such
refs.
+--negotiation-tip=<commit|glob>::
+ By default, Git will report, to the server, commits reachable
+ from all local refs to find common commits in an attempt to
+ reduce the size of the to-be-received packfile. If specified,
+ Git will only report commits reachable from the given tips.
+ This is useful to speed up fetches when the user knows which
+ local ref is likely to have commits in common with the
+ upstream ref being fetched.
++
+This option may be specified more than once; if so, Git will report
+commits reachable from any of the given commits.
++
+The argument to this option may be a glob on ref names, a ref, or the (possibly
+abbreviated) SHA-1 of a commit. Specifying a glob is equivalent to specifying
+this option multiple times, one for each matching ref name.
+
ifndef::git-pull[]
--dry-run::
Show what would be done, without making any changes.
$ git checkout -b <branch> --track <remote>/<branch>
------------
+
+If the branch exists in multiple remotes and one of them is named by
+the `checkout.defaultRemote` configuration variable, we'll use that
+one for the purposes of disambiguation, even if the `<branch>` isn't
+unique across all remotes. Set it to
+e.g. `checkout.defaultRemote=origin` to always checkout remote
+branches from there if `<branch>` is ambiguous but exists on the
+'origin' remote. See also `checkout.defaultRemote` in
+linkgit:git-config[1].
++
You could omit <branch>, in which case the command degenerates to
"check out the current branch", which is a glorified no-op with
rather expensive side-effects to show only the tracking information,
--------
[verse]
'git commit-graph read' [--object-dir <dir>]
+'git commit-graph verify' [--object-dir <dir>]
'git commit-graph write' <options> [--object-dir <dir>]
+
With the `--stdin-packs` option, generate the new commit graph by
walking objects only in the specified pack-indexes. (Cannot be combined
-with --stdin-commits.)
+with `--stdin-commits` or `--reachable`.)
+
With the `--stdin-commits` option, generate the new commit graph by
walking commits starting at the commits specified in stdin as a list
of OIDs in hex, one OID per line. (Cannot be combined with
---stdin-packs.)
+`--stdin-packs` or `--reachable`.)
++
+With the `--reachable` option, generate the new commit graph by walking
+commits starting at all refs. (Cannot be combined with `--stdin-commits`
+or `--stdin-packs`.)
+
With the `--append` option, include all commits that are present in the
existing commit-graph file.
Read a graph file given by the commit-graph file and output basic
details about the graph file. Used for debugging purposes.
+'verify'::
+
+Read the commit-graph file and verify its contents against the object
+database. Used to check for corrupted data.
+
EXAMPLES
--------
(i.e., you can just remove them and do an 'rsync' with some other site in
the hopes that somebody else has the object you have corrupted).
+If core.commitGraph is true, the commit-graph file will also be inspected
+using 'git commit-graph verify'. See linkgit:git-commit-graph[1].
+
Extracted Diagnostics
---------------------
it within all non-bare repos or it can be set to a boolean value.
This defaults to true.
+The optional configuration variable `gc.commitGraph` determines if
+'git gc' should run 'git commit-graph write'. This can be set to a
+boolean value. This defaults to false.
+
The optional configuration variable `gc.aggressiveWindow` controls how
much time is spent optimizing the delta compression of the objects in
the repository when the --aggressive option is specified. The larger
[-l | --files-with-matches] [-L | --files-without-match]
[(-O | --open-files-in-pager) [<pager>]]
[-z | --null]
- [-c | --count] [--all-match] [-q | --quiet]
+ [ -o | --only-matching ] [-c | --count] [--all-match] [-q | --quiet]
[--max-depth <depth>]
[--color[=<when>] | --no-color]
[--break] [--heading] [-p | --show-function]
Output \0 instead of the character that normally follows a
file name.
+-o::
+--only-matching::
+ Print only the matched (non-empty) parts of a matching line, with each such
+ part on a separate output line.
+
-c::
--count::
Instead of showing every matched line, show the number of
'git merge' [-n] [--stat] [--no-commit] [--squash] [--[no-]edit]
[-s <strategy>] [-X <strategy-option>] [-S[<keyid>]]
[--[no-]allow-unrelated-histories]
- [--[no-]rerere-autoupdate] [-m <msg>] [<commit>...]
+ [--[no-]rerere-autoupdate] [-m <msg>] [-F <file>] [<commit>...]
'git merge' --abort
'git merge' --continue
used to give a good default for automated 'git merge'
invocations. The automated message can include the branch description.
+-F <file>::
+--file=<file>::
+ Read the commit message to be used for the merge commit (in
+ case one is created).
++
+If `--log` is specified, a shortlog of the commits being merged
+will be appended to the specified message.
+
--[no-]rerere-autoupdate::
Allow the rerere mechanism to update the index with the
result of auto-conflict resolution if possible.
To avoid recording unrelated changes in the merge commit,
'git pull' and 'git merge' will also abort if there are any changes
-registered in the index relative to the `HEAD` commit. (One
-exception is when the changed index entries are in the state that
-would result from the merge already.)
+registered in the index relative to the `HEAD` commit. (Special
+narrow exceptions to this rule may exist depending on which merge
+strategy is in use, but generally, the index must match HEAD.)
If all named commits are already ancestors of `HEAD`, 'git merge'
will exit early with the message "Already up to date."
--keep-empty::
Keep the commits that do not change anything from its
parents in the result.
++
+See also INCOMPATIBLE OPTIONS below.
--allow-empty-message::
By default, rebasing commits with an empty message will fail.
This option overrides that behavior, allowing commits with empty
messages to be rebased.
++
+See also INCOMPATIBLE OPTIONS below.
--skip::
Restart the rebasing process by skipping the current patch.
conflict happens, the side reported as 'ours' is the so-far rebased
series, starting with <upstream>, and 'theirs' is the working branch. In
other words, the sides are swapped.
++
+See also INCOMPATIBLE OPTIONS below.
-s <strategy>::
--strategy=<strategy>::
+
Because 'git rebase' replays each commit from the working branch
on top of the <upstream> branch using the given strategy, using
-the 'ours' strategy simply discards all patches from the <branch>,
+the 'ours' strategy simply empties all patches from the <branch>,
which makes little sense.
++
+See also INCOMPATIBLE OPTIONS below.
-X <strategy-option>::
--strategy-option=<strategy-option>::
This implies `--merge` and, if no strategy has been
specified, `-s recursive`. Note the reversal of 'ours' and
'theirs' as noted above for the `-m` option.
++
+See also INCOMPATIBLE OPTIONS below.
-S[<keyid>]::
--gpg-sign[=<keyid>]::
and after each change. When fewer lines of surrounding
context exist they all must match. By default no context is
ever ignored.
++
+See also INCOMPATIBLE OPTIONS below.
--f::
+--no-ff::
--force-rebase::
- Force a rebase even if the current branch is up to date and
- the command without `--force` would return without doing anything.
+-f::
+ Individually replay all rebased commits instead of fast-forwarding
+ over the unchanged ones. This ensures that the entire history of
+ the rebased branch is composed of new commits.
+
-You may find this (or --no-ff with an interactive rebase) helpful after
-reverting a topic branch merge, as this option recreates the topic branch with
-fresh commits so it can be remerged successfully without needing to "revert
-the reversion" (see the
-link:howto/revert-a-faulty-merge.html[revert-a-faulty-merge How-To] for details).
+You may find this helpful after reverting a topic branch merge, as this option
+recreates the topic branch with fresh commits so it can be remerged
+successfully without needing to "revert the reversion" (see the
+link:howto/revert-a-faulty-merge.html[revert-a-faulty-merge How-To] for
+details).
--fork-point::
--no-fork-point::
--whitespace=<option>::
These flag are passed to the 'git apply' program
(see linkgit:git-apply[1]) that applies the patch.
- Incompatible with the --interactive option.
++
+See also INCOMPATIBLE OPTIONS below.
--committer-date-is-author-date::
--ignore-date::
These flags are passed to 'git am' to easily change the dates
of the rebased commits (see linkgit:git-am[1]).
- Incompatible with the --interactive option.
++
+See also INCOMPATIBLE OPTIONS below.
--signoff::
Add a Signed-off-by: trailer to all the rebased commits. Note
that if `--interactive` is given then only commits marked to be
- picked, edited or reworded will have the trailer added. Incompatible
- with the `--preserve-merges` option.
+ picked, edited or reworded will have the trailer added.
++
+See also INCOMPATIBLE OPTIONS below.
-i::
--interactive::
The commit list format can be changed by setting the configuration option
rebase.instructionFormat. A customized instruction format will automatically
have the long commit hash prepended to the format.
++
+See also INCOMPATIBLE OPTIONS below.
-r::
--rebase-merges[=(rebase-cousins|no-rebase-cousins)]::
`recursive` merge strategy; Different merge strategies can be used only via
explicit `exec git merge -s <strategy> [...]` commands.
+
-See also REBASING MERGES below.
+See also REBASING MERGES and INCOMPATIBLE OPTIONS below.
-p::
--preserve-merges::
This uses the `--interactive` machinery internally, but combining it
with the `--interactive` option explicitly is generally not a good
idea unless you know what you are doing (see BUGS below).
++
+See also INCOMPATIBLE OPTIONS below.
-x <cmd>::
--exec <cmd>::
+
This uses the `--interactive` machinery internally, but it can be run
without an explicit `--interactive`.
++
+See also INCOMPATIBLE OPTIONS below.
--root::
Rebase all commits reachable from <branch>, instead of
When used together with both --onto and --preserve-merges,
'all' root commits will be rewritten to have <newbase> as parent
instead.
++
+See also INCOMPATIBLE OPTIONS below.
--autosquash::
--no-autosquash::
too. The recommended way to create fixup/squash commits is by using
the `--fixup`/`--squash` options of linkgit:git-commit[1].
+
-This option is only valid when the `--interactive` option is used.
-+
If the `--autosquash` option is enabled by default using the
configuration variable `rebase.autoSquash`, this option can be
used to override and disable this setting.
++
+See also INCOMPATIBLE OPTIONS below.
--autostash::
--no-autostash::
with care: the final stash application after a successful
rebase might result in non-trivial conflicts.
---no-ff::
- With --interactive, cherry-pick all rebased commits instead of
- fast-forwarding over the unchanged ones. This ensures that the
- entire history of the rebased branch is composed of new commits.
-+
-Without --interactive, this is a synonym for --force-rebase.
-+
-You may find this helpful after reverting a topic branch merge, as this option
-recreates the topic branch with fresh commits so it can be remerged
-successfully without needing to "revert the reversion" (see the
-link:howto/revert-a-faulty-merge.html[revert-a-faulty-merge How-To] for details).
+INCOMPATIBLE OPTIONS
+--------------------
+
+git-rebase has many flags that are incompatible with each other,
+predominantly due to the fact that it has three different underlying
+implementations:
+
+ * one based on linkgit:git-am[1] (the default)
+ * one based on git-merge-recursive (merge backend)
+ * one based on linkgit:git-cherry-pick[1] (interactive backend)
+
+Flags only understood by the am backend:
+
+ * --committer-date-is-author-date
+ * --ignore-date
+ * --whitespace
+ * --ignore-whitespace
+ * -C
+
+Flags understood by both merge and interactive backends:
+
+ * --merge
+ * --strategy
+ * --strategy-option
+ * --allow-empty-message
+
+Flags only understood by the interactive backend:
+
+ * --[no-]autosquash
+ * --rebase-merges
+ * --preserve-merges
+ * --interactive
+ * --exec
+ * --keep-empty
+ * --autosquash
+ * --edit-todo
+ * --root when used in combination with --onto
+
+Other incompatible flag pairs:
+
+ * --preserve-merges and --interactive
+ * --preserve-merges and --signoff
+ * --preserve-merges and --rebase-merges
+ * --rebase-merges and --strategy
+ * --rebase-merges and --strategy-option
+
+BEHAVIORAL DIFFERENCES
+-----------------------
+
+ * empty commits:
+
+ am-based rebase will drop any "empty" commits, whether the
+ commit started empty (had no changes relative to its parent to
+ start with) or ended empty (all changes were already applied
+ upstream in other commits).
+
+ merge-based rebase does the same.
+
+ interactive-based rebase will by default drop commits that
+ started empty and halt if it hits a commit that ended up empty.
+ The `--keep-empty` option exists for interactive rebases to allow
+ it to keep commits that started empty.
+
+ * directory rename detection:
+
+ merge-based and interactive-based rebases work fine with
+ directory rename detection. am-based rebases sometimes do not.
include::merge-strategies.txt[]
(this typically happens when a `reset` command was inserted into the todo
list manually and contains a typo).
-The `merge` command will merge the specified revision into whatever is
-HEAD at that time. With `-C <original-commit>`, the commit message of
+The `merge` command will merge the specified revision(s) into whatever
+is HEAD at that time. With `-C <original-commit>`, the commit message of
the specified merge commit will be used. When the `-C` is changed to
a lower-case `-c`, the message will be opened in an editor after a
successful merge so that the user can edit the message.
when the merge operation did not even start), it is rescheduled immediately.
At this time, the `merge` command will *always* use the `recursive`
-merge strategy, with no way to choose a different one. To work around
+merge strategy for regular merges, and `octopus` for octopus merges,
+strategy, with no way to choose a different one. To work around
this, an `exec` command can be used to call `git merge` explicitly,
using the fact that the labels are worktree-local refs (the ref
`refs/rewritten/onto` would correspond to the label `onto`, for example).
Specify encoding of compose message. Default is the value of the
'sendemail.composeencoding'; if that is unspecified, UTF-8 is assumed.
---transfer-encoding=(7bit|8bit|quoted-printable|base64)::
+--transfer-encoding=(7bit|8bit|quoted-printable|base64|auto)::
Specify the transfer encoding to be used to send the message over SMTP.
7bit will fail upon encountering a non-ASCII message. quoted-printable
can be useful when the repository contains files that contain carriage
returns, but makes the raw patch email file (as saved from a MUA) much
harder to inspect manually. base64 is even more fool proof, but also
- even more opaque. Default is the value of the `sendemail.transferEncoding`
- configuration value; if that is unspecified, git will use 8bit and not
- add a Content-Transfer-Encoding header.
+ even more opaque. auto will use 8bit when possible, and quoted-printable
+ otherwise.
++
+Default is the value of the `sendemail.transferEncoding` configuration
+value; if that is unspecified, default to `auto`.
--xmailer::
--no-xmailer::
+
--
* Invoke the sendemail-validate hook if present (see linkgit:githooks[5]).
- * Warn of patches that contain lines longer than 998 characters; this
- is due to SMTP limits as described by http://www.ietf.org/rfc/rfc2821.txt.
+ * Warn of patches that contain lines longer than
+ 998 characters unless a suitable transfer encoding
+ ('auto', 'base64', or 'quoted-printable') is used;
+ this is due to SMTP limits as described by
+ http://www.ietf.org/rfc/rfc5322.txt.
--
+
Default is the value of `sendemail.validate`; if this is not set,
$ git worktree add --track -b <branch> <path> <remote>/<branch>
------------
+
+If the branch exists in multiple remotes and one of them is named by
+the `checkout.defaultRemote` configuration variable, we'll use that
+one for the purposes of disambiguation, even if the `<branch>` isn't
+unique across all remotes. Set it to
+e.g. `checkout.defaultRemote=origin` to always checkout remote
+branches from there if `<branch>` is ambiguous but exists on the
+'origin' remote. See also `checkout.defaultRemote` in
+linkgit:git-config[1].
++
If `<commit-ish>` is omitted and neither `-b` nor `-B` nor `--detach` used,
then, as a convenience, the new worktree is associated with a branch
(call it `<branch>`) named after `$(basename <path>)`. If `<branch>`
A colon, followed by a slash, followed by a text, names
a commit whose commit message matches the specified regular expression.
This name returns the youngest matching commit which is
- reachable from any ref. The regular expression can match any part of the
+ reachable from any ref, including HEAD.
+ The regular expression can match any part of the
commit message. To match messages starting with a string, one can use
e.g. ':/^foo'. The special sequence ':/!' is reserved for modifiers to what
is matched. ':/!-foo' performs a negative match, while ':/!!foo' matches a
- The commit graph feature currently does not honor commit grafts. This can
be remedied by duplicating or refactoring the current graft logic.
-- The 'commit-graph' subcommand does not have a "verify" mode that is
- necessary for integration with fsck.
-
- After computing and storing generation numbers, we must make graph
walks aware of generation numbers to gain the performance benefits they
enable. This will mostly be accomplished by swapping a commit-date-ordered
- 'log --topo-order'
- 'tag --merged'
-- Currently, parse_commit_gently() requires filling in the root tree
- object for a commit. This passes through lookup_tree() and consequently
- lookup_object(). Also, it calls lookup_commit() when loading the parents.
- These method calls check the ODB for object existence, even if the
- consumer does not need the content. For example, we do not need the
- tree contents when computing merge bases. Now that commit parsing is
- removed from the computation time, these lookup operations are the
- slowest operations keeping graph walks from being fast. Consider
- loading these objects without verifying their existence in the ODB and
- only loading them fully when consumers need them. Consider a method
- such as "ensure_tree_loaded(commit)" that fully loads a tree before
- using commit->tree.
-
-- The current design uses the 'commit-graph' subcommand to generate the graph.
- When this feature stabilizes enough to recommend to most users, we should
- add automatic graph writes to common operations that create many commits.
- For example, one could compute a graph on 'clone', 'fetch', or 'repack'
- commands.
-
- A server could provide a commit graph file as part of the network protocol
to avoid extra calculations by clients. This feature is only of benefit if
the user is willing to trust the file, because verifying the file is correct
--- /dev/null
+Directory rename detection
+==========================
+
+Rename detection logic in diffcore-rename that checks for renames of
+individual files is aggregated and analyzed in merge-recursive for cases
+where combinations of renames indicate that a full directory has been
+renamed.
+
+Scope of abilities
+------------------
+
+It is perhaps easiest to start with an example:
+
+ * When all of x/a, x/b and x/c have moved to z/a, z/b and z/c, it is
+ likely that x/d added in the meantime would also want to move to z/d by
+ taking the hint that the entire directory 'x' moved to 'z'.
+
+More interesting possibilities exist, though, such as:
+
+ * one side of history renames x -> z, and the other renames some file to
+ x/e, causing the need for the merge to do a transitive rename.
+
+ * one side of history renames x -> z, but also renames all files within
+ x. For example, x/a -> z/alpha, x/b -> z/bravo, etc.
+
+ * both 'x' and 'y' being merged into a single directory 'z', with a
+ directory rename being detected for both x->z and y->z.
+
+ * not all files in a directory being renamed to the same location;
+ i.e. perhaps most the files in 'x' are now found under 'z', but a few
+ are found under 'w'.
+
+ * a directory being renamed, which also contained a subdirectory that was
+ renamed to some entirely different location. (And perhaps the inner
+ directory itself contained inner directories that were renamed to yet
+ other locations).
+
+ * combinations of the above; see t/t6043-merge-rename-directories.sh for
+ various interesting cases.
+
+Limitations -- applicability of directory renames
+-------------------------------------------------
+
+In order to prevent edge and corner cases resulting in either conflicts
+that cannot be represented in the index or which might be too complex for
+users to try to understand and resolve, a couple basic rules limit when
+directory rename detection applies:
+
+ 1) If a given directory still exists on both sides of a merge, we do
+ not consider it to have been renamed.
+
+ 2) If a subset of to-be-renamed files have a file or directory in the
+ way (or would be in the way of each other), "turn off" the directory
+ rename for those specific sub-paths and report the conflict to the
+ user.
+
+ 3) If the other side of history did a directory rename to a path that
+ your side of history renamed away, then ignore that particular
+ rename from the other side of history for any implicit directory
+ renames (but warn the user).
+
+Limitations -- detailed rules and testcases
+-------------------------------------------
+
+t/t6043-merge-rename-directories.sh contains extensive tests and commentary
+which generate and explore the rules listed above. It also lists a few
+additional rules:
+
+ a) If renames split a directory into two or more others, the directory
+ with the most renames, "wins".
+
+ b) Avoid directory-rename-detection for a path, if that path is the
+ source of a rename on either side of a merge.
+
+ c) Only apply implicit directory renames to directories if the other side
+ of history is the one doing the renaming.
+
+Limitations -- support in different commands
+--------------------------------------------
+
+Directory rename detection is supported by 'merge' and 'cherry-pick'.
+Other git commands which users might be surprised to see limited or no
+directory rename detection support in:
+
+ * diff
+
+ Folks have requested in the past that `git diff` detect directory
+ renames and somehow simplify its output. It is not clear whether this
+ would be desirable or how the output should be simplified, so this was
+ simply not implemented. Further, to implement this, directory rename
+ detection logic would need to move from merge-recursive to
+ diffcore-rename.
+
+ * am
+
+ git-am tries to avoid a full three way merge, instead calling
+ git-apply. That prevents us from detecting renames at all, which may
+ defeat the directory rename detection. There is a fallback, though; if
+ the initial git-apply fails and the user has specified the -3 option,
+ git-am will fall back to a three way merge. However, git-am lacks the
+ necessary information to do a "real" three way merge. Instead, it has
+ to use build_fake_ancestor() to get a merge base that is missing files
+ whose rename may have been important to detect for directory rename
+ detection to function.
+
+ * rebase
+
+ Since am-based rebases work by first generating a bunch of patches
+ (which no longer record what the original commits were and thus don't
+ have the necessary info from which we can find a real merge-base), and
+ then calling git-am, this implies that am-based rebases will not always
+ successfully detect directory renames either (see the 'am' section
+ above). merged-based rebases (rebase -m) and cherry-pick-based rebases
+ (rebase -i) are not affected by this shortcoming, and fully support
+ directory rename detection.
The client can optionally request that pack-objects omit various
objects from the packfile using one of several filtering techniques.
These are intended for use with partial clone and partial fetch
-operations. See `rev-list` for possible "filter-spec" values.
+operations. An object that does not meet a filter-spec value is
+omitted unless explicitly requested in a 'want' line. See `rev-list`
+for possible filter-spec values.
Once all the 'want's and 'shallow's (and optional 'deepen') are
transferred, clients MUST send a flush-pkt, to tell the server side
for use with partial clone and partial fetch operations. See
`rev-list` for possible "filter-spec" values.
+If the 'ref-in-want' feature is advertised, the following argument can
+be included in the client's request as well as the potential addition of
+the 'wanted-refs' section in the server's response as explained below.
+
+ want-ref <ref>
+ Indicates to the server that the client wants to retrieve a
+ particular ref, where <ref> is the full name of a ref on the
+ server.
+
The response of `fetch` is broken into a number of sections separated by
delimiter packets (0001), with each section beginning with its section
header.
output = *section
- section = (acknowledgments | shallow-info | packfile)
+ section = (acknowledgments | shallow-info | wanted-refs | packfile)
(flush-pkt | delim-pkt)
acknowledgments = PKT-LINE("acknowledgments" LF)
shallow = "shallow" SP obj-id
unshallow = "unshallow" SP obj-id
+ wanted-refs = PKT-LINE("wanted-refs" LF)
+ *PKT-LINE(wanted-ref LF)
+ wanted-ref = obj-id SP refname
+
packfile = PKT-LINE("packfile" LF)
*PKT-LINE(%x01-03 *%x00-ff)
* This section is only included if a packfile section is also
included in the response.
+ wanted-refs section
+ * This section is only included if the client has requested a
+ ref using a 'want-ref' line and if a packfile section is also
+ included in the response.
+
+ * Always begins with the section header "wanted-refs".
+
+ * The server will send a ref listing ("<oid> <refname>") for
+ each reference requested using 'want-ref' lines.
+
+ * The server MUST NOT send any refs which were not requested
+ using 'want-ref' lines.
+
packfile section
* This section is only included if the client has sent 'want'
lines in its request and either requested that no more
SCRIPT_SH += git-merge-resolve.sh
SCRIPT_SH += git-mergetool.sh
SCRIPT_SH += git-quiltimport.sh
-SCRIPT_SH += git-rebase.sh
+SCRIPT_SH += git-legacy-rebase.sh
SCRIPT_SH += git-remote-testgit.sh
SCRIPT_SH += git-request-pull.sh
SCRIPT_SH += git-stash.sh
SCRIPT_LIB += git-mergetool--lib
SCRIPT_LIB += git-parse-remote
SCRIPT_LIB += git-rebase--am
+SCRIPT_LIB += git-rebase--common
SCRIPT_LIB += git-rebase--preserve-merges
SCRIPT_LIB += git-rebase--merge
SCRIPT_LIB += git-sh-setup
TEST_BUILTINS_OBJS += test-read-cache.o
TEST_BUILTINS_OBJS += test-ref-store.o
TEST_BUILTINS_OBJS += test-regex.o
+TEST_BUILTINS_OBJS += test-repository.o
TEST_BUILTINS_OBJS += test-revision-walking.o
TEST_BUILTINS_OBJS += test-run-command.o
TEST_BUILTINS_OBJS += test-scrap-cache-tree.o
LIB_OBJS += ewah/ewah_io.o
LIB_OBJS += ewah/ewah_rlw.o
LIB_OBJS += exec-cmd.o
+LIB_OBJS += fetch-negotiator.o
LIB_OBJS += fetch-object.o
LIB_OBJS += fetch-pack.o
LIB_OBJS += fsck.o
LIB_OBJS += merge-recursive.o
LIB_OBJS += mergesort.o
LIB_OBJS += name-hash.o
+LIB_OBJS += negotiator/default.o
+LIB_OBJS += negotiator/skipping.o
LIB_OBJS += notes.o
LIB_OBJS += notes-cache.o
LIB_OBJS += notes-merge.o
BUILTIN_OBJS += builtin/pull.o
BUILTIN_OBJS += builtin/push.o
BUILTIN_OBJS += builtin/read-tree.o
+BUILTIN_OBJS += builtin/rebase.o
BUILTIN_OBJS += builtin/rebase--interactive.o
BUILTIN_OBJS += builtin/receive-pack.o
BUILTIN_OBJS += builtin/reflog.o
int advice_ignored_hook = 1;
int advice_waiting_for_editor = 1;
int advice_graft_file_deprecated = 1;
+int advice_checkout_ambiguous_remote_branch_name = 1;
static int advice_use_color = -1;
static char advice_colors[][COLOR_MAXLEN] = {
{ "ignoredHook", &advice_ignored_hook },
{ "waitingForEditor", &advice_waiting_for_editor },
{ "graftFileDeprecated", &advice_graft_file_deprecated },
+ { "checkoutAmbiguousRemoteBranchName", &advice_checkout_ambiguous_remote_branch_name },
/* make this an alias for backward compatibility */
{ "pushNonFastForward", &advice_push_update_rejected }
extern int advice_ignored_hook;
extern int advice_waiting_for_editor;
extern int advice_graft_file_deprecated;
+extern int advice_checkout_ambiguous_remote_branch_name;
int git_default_advice_config(const char *var, const char *value);
__attribute__((format (printf, 1, 2)))
return get_oid_hex(p->old_sha1_prefix, oid);
}
-/* Build an index that contains the just the files needed for a 3way merge */
+/* Build an index that contains just the files needed for a 3way merge */
static int build_fake_ancestor(struct apply_state *state, struct patch *list)
{
struct patch *patch;
return error(_("sha1 information is lacking or useless "
"(%s)."), name);
- ce = make_cache_entry(patch->old_mode, oid.hash, name, 0, 0);
+ ce = make_cache_entry(&result, patch->old_mode, &oid, name, 0, 0);
if (!ce)
return error(_("make_cache_entry failed for path '%s'"),
name);
if (add_index_entry(&result, ce, ADD_CACHE_OK_TO_ADD)) {
- free(ce);
+ discard_cache_entry(ce);
return error(_("could not add %s to temporary index"),
name);
}
struct stat st;
struct cache_entry *ce;
int namelen = strlen(path);
- unsigned ce_size = cache_entry_size(namelen);
- ce = xcalloc(1, ce_size);
+ ce = make_empty_cache_entry(&the_index, namelen);
memcpy(ce->name, path, namelen);
ce->ce_mode = create_ce_mode(mode);
ce->ce_flags = create_ce_flags(0);
if (!skip_prefix(buf, "Subproject commit ", &s) ||
get_oid_hex(s, &ce->oid)) {
- free(ce);
- return error(_("corrupt patch for submodule %s"), path);
+ discard_cache_entry(ce);
+ return error(_("corrupt patch for submodule %s"), path);
}
} else {
if (!state->cached) {
if (lstat(path, &st) < 0) {
- free(ce);
+ discard_cache_entry(ce);
return error_errno(_("unable to stat newly "
"created file '%s'"),
path);
fill_stat_cache_info(ce, &st);
}
if (write_object_file(buf, size, blob_type, &ce->oid) < 0) {
- free(ce);
+ discard_cache_entry(ce);
return error(_("unable to create backing store "
"for newly created file %s"), path);
}
}
if (add_cache_entry(ce, ADD_CACHE_OK_TO_ADD) < 0) {
- free(ce);
+ discard_cache_entry(ce);
return error(_("unable to add cache entry for %s"), path);
}
struct patch *patch)
{
int stage, namelen;
- unsigned ce_size, mode;
+ unsigned mode;
struct cache_entry *ce;
if (!state->update_index)
return 0;
namelen = strlen(patch->new_name);
- ce_size = cache_entry_size(namelen);
mode = patch->new_mode ? patch->new_mode : (S_IFREG | 0644);
remove_file_from_cache(patch->new_name);
for (stage = 1; stage < 4; stage++) {
if (is_null_oid(&patch->threeway_stage[stage - 1]))
continue;
- ce = xcalloc(1, ce_size);
+ ce = make_empty_cache_entry(&the_index, namelen);
memcpy(ce->name, patch->new_name, namelen);
ce->ce_mode = create_ce_mode(mode);
ce->ce_flags = create_ce_flags(stage);
ce->ce_namelen = namelen;
oidcpy(&ce->oid, &patch->threeway_stage[stage - 1]);
if (add_cache_entry(ce, ADD_CACHE_OK_TO_ADD) < 0) {
- free(ce);
+ discard_cache_entry(ce);
return error(_("unable to add cache entry for %s"),
patch->new_name);
}
if (get_oid(name, &oid))
die("Not a valid object name");
- commit = lookup_commit_reference_gently(&oid, 1);
+ commit = lookup_commit_reference_gently(the_repository, &oid, 1);
if (commit) {
commit_sha1 = commit->object.oid.hash;
archive_time = commit->date;
static struct commit *get_commit_reference(const struct object_id *oid)
{
- struct commit *r = lookup_commit_reference(oid);
+ struct commit *r = lookup_commit_reference(the_repository, oid);
if (!r)
die(_("Not a valid commit name %s"), oid_to_hex(oid));
return r;
{
struct commit *parent;
- parent = lookup_commit_reference(oid);
+ parent = lookup_commit_reference(the_repository, oid);
if (!parent)
die("no such commit %s", oid_to_hex(oid));
return &commit_list_insert(parent, tail)->next;
{
size_t len;
void *buf = strbuf_detach(sb, &len);
- set_commit_buffer(c, buf, len);
+ set_commit_buffer(the_repository, c, buf, len);
}
/*
struct strbuf buf = STRBUF_INIT;
const char *ident;
time_t now;
- int size, len;
+ int len;
struct cache_entry *ce;
unsigned mode;
struct strbuf msg = STRBUF_INIT;
/* Let's not bother reading from HEAD tree */
mode = S_IFREG | 0644;
}
- size = cache_entry_size(len);
- ce = xcalloc(1, size);
+ ce = make_empty_cache_entry(&the_index, len);
oidcpy(&ce->oid, &origin->blob_oid);
memcpy(ce->name, path, len);
ce->ce_flags = create_ce_flags(0);
struct object *obj = revs->pending.objects[i].item;
if (obj->flags & UNINTERESTING)
continue;
- obj = deref_tag(obj, NULL, 0);
+ obj = deref_tag(the_repository, obj, NULL, 0);
if (obj->type != OBJ_COMMIT)
die("Non commit %s?", revs->pending.objects[i].name);
if (found)
/* Is that sole rev a committish? */
obj = revs->pending.objects[0].item;
- obj = deref_tag(obj, NULL, 0);
+ obj = deref_tag(the_repository, obj, NULL, 0);
if (obj->type != OBJ_COMMIT)
return NULL;
/* Do we have HEAD? */
if (!resolve_ref_unsafe("HEAD", RESOLVE_REF_READING, &head_oid, NULL))
return NULL;
- head_commit = lookup_commit_reference_gently(&head_oid, 1);
+ head_commit = lookup_commit_reference_gently(the_repository,
+ &head_oid, 1);
if (!head_commit)
return NULL;
struct object *obj = revs->pending.objects[i].item;
if (!(obj->flags & UNINTERESTING))
continue;
- obj = deref_tag(obj, NULL, 0);
+ obj = deref_tag(the_repository, obj, NULL, 0);
if (obj->type != OBJ_COMMIT)
die("Non commit %s?", revs->pending.objects[i].name);
if (found)
const char *blob_type = "blob";
-struct blob *lookup_blob(const struct object_id *oid)
+struct blob *lookup_blob(struct repository *r, const struct object_id *oid)
{
- struct object *obj = lookup_object(oid->hash);
+ struct object *obj = lookup_object(r, oid->hash);
if (!obj)
- return create_object(the_repository, oid->hash,
- alloc_blob_node(the_repository));
- return object_as_type(obj, OBJ_BLOB, 0);
+ return create_object(r, oid->hash,
+ alloc_blob_node(r));
+ return object_as_type(r, obj, OBJ_BLOB, 0);
}
int parse_blob_buffer(struct blob *item, void *buffer, unsigned long size)
struct object object;
};
-struct blob *lookup_blob(const struct object_id *oid);
+struct blob *lookup_blob(struct repository *r, const struct object_id *oid);
int parse_blob_buffer(struct blob *item, void *buffer, unsigned long size);
break;
}
- if ((commit = lookup_commit_reference(&oid)) == NULL)
+ if ((commit = lookup_commit_reference(the_repository, &oid)) == NULL)
die(_("Not a valid branch point: '%s'."), start_name);
oidcpy(&oid, &commit->object.oid);
extern int cmd_pull(int argc, const char **argv, const char *prefix);
extern int cmd_push(int argc, const char **argv, const char *prefix);
extern int cmd_read_tree(int argc, const char **argv, const char *prefix);
+extern int cmd_rebase(int argc, const char **argv, const char *prefix);
extern int cmd_rebase__interactive(int argc, const char **argv, const char *prefix);
extern int cmd_receive_pack(int argc, const char **argv, const char *prefix);
extern int cmd_reflog(int argc, const char **argv, const char *prefix);
#include "apply.h"
#include "string-list.h"
#include "packfile.h"
+#include "repository.h"
/**
* Returns 1 if the file is empty or does not exist, 0 otherwise.
FILE *fp;
if (!get_oid_tree("HEAD", &head))
- tree = lookup_tree(&head);
+ tree = lookup_tree(the_repository, &head);
else
- tree = lookup_tree(the_hash_algo->empty_tree);
+ tree = lookup_tree(the_repository,
+ the_repository->hash_algo->empty_tree);
fp = xfopen(am_path(state, "patch"), "w");
init_revisions(&rev_info, NULL);
if (!get_oid_commit("HEAD", &parent)) {
old_oid = &parent;
- commit_list_insert(lookup_commit(&parent), &parents);
+ commit_list_insert(lookup_commit(the_repository, &parent),
+ &parents);
} else {
old_oid = NULL;
say(state, stderr, _("applying to an empty history"));
refresh_and_write_cache();
- if (index_has_changes(&sb)) {
+ if (index_has_changes(&the_index, NULL, &sb)) {
write_state_bool(state, "dirtyindex", 1);
die(_("Dirty index: cannot apply patches (dirty: %s)"), sb.buf);
}
* Applying the patch to an earlier tree and merging
* the result may have produced the same tree as ours.
*/
- if (!apply_status && !index_has_changes(NULL)) {
+ if (!apply_status &&
+ !index_has_changes(&the_index, NULL, NULL)) {
say(state, stdout, _("No changes -- Patch already applied."));
goto next;
}
say(state, stdout, _("Applying: %.*s"), linelen(state->msg), state->msg);
- if (!index_has_changes(NULL)) {
+ if (!index_has_changes(&the_index, NULL, NULL)) {
printf_ln(_("No changes - did you forget to use 'git add'?\n"
"If there is nothing left to stage, chances are that something else\n"
"already introduced the same changes; you might want to skip this patch."));
nth_line_cb, &sb, lno, anchor,
&bottom, &top, sb.path))
usage(blame_usage);
- if (lno < top || ((lno || bottom) && lno < bottom))
+ if ((!lno && (top || bottom)) || lno < bottom)
die(Q_("file %s has only %lu line",
"file %s has only %lu lines",
lno), path, lno);
if (bottom < 1)
bottom = 1;
- if (top < 1)
+ if (top < 1 || lno < top)
top = lno;
bottom--;
range_set_append_unsafe(&ranges, bottom, top);
find_alignment(&sb, &output_option);
if (!*repeated_meta_color &&
(output_option & OUTPUT_COLOR_LINE))
- strcpy(repeated_meta_color, GIT_COLOR_CYAN);
+ xsnprintf(repeated_meta_color,
+ sizeof(repeated_meta_color),
+ "%s", GIT_COLOR_CYAN);
}
if (output_option & OUTPUT_ANNOTATE_COMPAT)
output_option &= ~(OUTPUT_COLOR_LINE | OUTPUT_SHOW_AGE_WITH_COLOR);
(reference_name = reference_name_to_free =
resolve_refdup(upstream, RESOLVE_REF_READING,
&oid, NULL)) != NULL)
- reference_rev = lookup_commit_reference(&oid);
+ reference_rev = lookup_commit_reference(the_repository,
+ &oid);
}
if (!reference_rev)
reference_rev = head_rev;
const struct object_id *oid, struct commit *head_rev,
int kinds, int force)
{
- struct commit *rev = lookup_commit_reference(oid);
+ struct commit *rev = lookup_commit_reference(the_repository, oid);
if (!rev) {
error(_("Couldn't look up commit object for '%s'"), refname);
return -1;
}
if (!force) {
- head_rev = lookup_commit_reference(&head_oid);
+ head_rev = lookup_commit_reference(the_repository, &head_oid);
if (!head_rev)
die(_("Couldn't look up commit object for HEAD"));
}
#include "resolve-undo.h"
#include "submodule-config.h"
#include "submodule.h"
+#include "advice.h"
static const char * const checkout_usage[] = {
N_("git checkout [<options>] <branch>"),
return READ_TREE_RECURSIVE;
len = base->len + strlen(pathname);
- ce = xcalloc(1, cache_entry_size(len));
+ ce = make_empty_cache_entry(&the_index, len);
oidcpy(&ce->oid, oid);
memcpy(ce->name, base->buf, base->len);
memcpy(ce->name + base->len, pathname, len - base->len);
if (ce->ce_mode == old->ce_mode &&
!oidcmp(&ce->oid, &old->oid)) {
old->ce_flags |= CE_UPDATE;
- free(ce);
+ discard_cache_entry(ce);
return 0;
}
}
if (write_object_file(result_buf.ptr, result_buf.size, blob_type, &oid))
die(_("Unable to add merge result for '%s'"), path);
free(result_buf.ptr);
- ce = make_cache_entry(mode, oid.hash, path, 2, 0);
+ ce = make_transient_cache_entry(mode, &oid, path, 2);
if (!ce)
die(_("make_cache_entry failed for path '%s'"), path);
status = checkout_entry(ce, state, NULL);
- free(ce);
+ discard_cache_entry(ce);
return status;
}
die(_("unable to write new index file"));
read_ref_full("HEAD", 0, &rev, NULL);
- head = lookup_commit_reference_gently(&rev, 1);
+ head = lookup_commit_reference_gently(the_repository, &rev, 1);
errs |= post_checkout_hook(head, head, 0);
return errs;
memset(&old_branch_info, 0, sizeof(old_branch_info));
old_branch_info.path = path_to_free = resolve_refdup("HEAD", 0, &rev, &flag);
if (old_branch_info.path)
- old_branch_info.commit = lookup_commit_reference_gently(&rev, 1);
+ old_branch_info.commit = lookup_commit_reference_gently(the_repository, &rev, 1);
if (!(flag & REF_ISSYMREF))
old_branch_info.path = NULL;
int dwim_new_local_branch_ok,
struct branch_info *new_branch_info,
struct checkout_opts *opts,
- struct object_id *rev)
+ struct object_id *rev,
+ int *dwim_remotes_matched)
{
struct tree **source_tree = &opts->source_tree;
const char **new_branch = &opts->new_branch;
* (b) If <something> is _not_ a commit, either "--" is present
* or <something> is not a path, no -t or -b was given, and
* and there is a tracking branch whose name is <something>
- * in one and only one remote, then this is a short-hand to
- * fork local <something> from that remote-tracking branch.
+ * in one and only one remote (or if the branch exists on the
+ * remote named in checkout.defaultRemote), then this is a
+ * short-hand to fork local <something> from that
+ * remote-tracking branch.
*
* (c) Otherwise, if "--" is present, treat it like case (1).
*
recover_with_dwim = 0;
if (recover_with_dwim) {
- const char *remote = unique_tracking_name(arg, rev);
+ const char *remote = unique_tracking_name(arg, rev,
+ dwim_remotes_matched);
if (remote) {
*new_branch = arg;
arg = remote;
else
new_branch_info->path = NULL; /* not an existing branch */
- new_branch_info->commit = lookup_commit_reference_gently(rev, 1);
+ new_branch_info->commit = lookup_commit_reference_gently(the_repository, rev, 1);
if (!new_branch_info->commit) {
/* not a commit */
*source_tree = parse_tree_indirect(rev);
struct branch_info new_branch_info;
char *conflict_style = NULL;
int dwim_new_local_branch = 1;
+ int dwim_remotes_matched = 0;
struct option options[] = {
OPT__QUIET(&opts.quiet, N_("suppress progress reporting")),
OPT_STRING('b', NULL, &opts.new_branch, N_("branch"),
opts.track == BRANCH_TRACK_UNSPECIFIED &&
!opts.new_branch;
int n = parse_branchname_arg(argc, argv, dwim_ok,
- &new_branch_info, &opts, &rev);
+ &new_branch_info, &opts, &rev,
+ &dwim_remotes_matched);
argv += n;
argc -= n;
}
}
UNLEAK(opts);
- if (opts.patch_mode || opts.pathspec.nr)
- return checkout_paths(&opts, new_branch_info.name);
- else
+ if (opts.patch_mode || opts.pathspec.nr) {
+ int ret = checkout_paths(&opts, new_branch_info.name);
+ if (ret && dwim_remotes_matched > 1 &&
+ advice_checkout_ambiguous_remote_branch_name)
+ advise(_("'%s' matched more than one remote tracking branch.\n"
+ "We found %d remotes with a reference that matched. So we fell back\n"
+ "on trying to resolve the argument as a path, but failed there too!\n"
+ "\n"
+ "If you meant to check out a remote tracking branch on, e.g. 'origin',\n"
+ "you can do so by fully qualifying the name with the --track option:\n"
+ "\n"
+ " git checkout --track origin/<name>\n"
+ "\n"
+ "If you'd like to always have checkouts of an ambiguous <name> prefer\n"
+ "one remote, e.g. the 'origin' remote, consider setting\n"
+ "checkout.defaultRemote=origin in your config."),
+ argv[0],
+ dwim_remotes_matched);
+ return ret;
+ } else {
return checkout_branch(&opts, &new_branch_info);
+ }
}
install_branch_config(0, head, option_origin, our->name);
}
} else if (our) {
- struct commit *c = lookup_commit_reference(&our->old_oid);
+ struct commit *c = lookup_commit_reference(the_repository,
+ &our->old_oid);
/* --branch specifies a non-branch (i.e. tags), detach HEAD */
update_ref(msg, "HEAD", &c->object.oid, NULL, REF_NO_DEREF,
UPDATE_REFS_DIE_ON_ERR);
}
if (!is_local && !complete_refs_before_fetch)
- transport_fetch_refs(transport, mapped_refs);
+ transport_fetch_refs(transport, mapped_refs, NULL);
remote_head = find_ref_by_name(refs, "HEAD");
remote_head_points_at =
if (is_local)
clone_local(path, git_dir);
else if (refs && complete_refs_before_fetch)
- transport_fetch_refs(transport, mapped_refs);
+ transport_fetch_refs(transport, mapped_refs, NULL);
update_remote_refs(refs, mapped_refs, remote_head_points_at,
branch_top.buf, reflog_msg.buf, transport,
- !is_local && !filter_options.choice);
+ !is_local);
update_head(our_head_points_at, remote_head, reflog_msg.buf);
#include "dir.h"
#include "lockfile.h"
#include "parse-options.h"
+#include "repository.h"
#include "commit-graph.h"
static char const * const builtin_commit_graph_usage[] = {
N_("git commit-graph [--object-dir <objdir>]"),
N_("git commit-graph read [--object-dir <objdir>]"),
- N_("git commit-graph write [--object-dir <objdir>] [--append] [--stdin-packs|--stdin-commits]"),
+ N_("git commit-graph verify [--object-dir <objdir>]"),
+ N_("git commit-graph write [--object-dir <objdir>] [--append] [--reachable|--stdin-packs|--stdin-commits]"),
+ NULL
+};
+
+static const char * const builtin_commit_graph_verify_usage[] = {
+ N_("git commit-graph verify [--object-dir <objdir>]"),
NULL
};
};
static const char * const builtin_commit_graph_write_usage[] = {
- N_("git commit-graph write [--object-dir <objdir>] [--append] [--stdin-packs|--stdin-commits]"),
+ N_("git commit-graph write [--object-dir <objdir>] [--append] [--reachable|--stdin-packs|--stdin-commits]"),
NULL
};
static struct opts_commit_graph {
const char *obj_dir;
+ int reachable;
int stdin_packs;
int stdin_commits;
int append;
} opts;
+
+static int graph_verify(int argc, const char **argv)
+{
+ struct commit_graph *graph = NULL;
+ char *graph_name;
+
+ static struct option builtin_commit_graph_verify_options[] = {
+ OPT_STRING(0, "object-dir", &opts.obj_dir,
+ N_("dir"),
+ N_("The object directory to store the graph")),
+ OPT_END(),
+ };
+
+ argc = parse_options(argc, argv, NULL,
+ builtin_commit_graph_verify_options,
+ builtin_commit_graph_verify_usage, 0);
+
+ if (!opts.obj_dir)
+ opts.obj_dir = get_object_directory();
+
+ graph_name = get_commit_graph_filename(opts.obj_dir);
+ graph = load_commit_graph_one(graph_name);
+ FREE_AND_NULL(graph_name);
+
+ if (!graph)
+ return 0;
+
+ return verify_commit_graph(the_repository, graph);
+}
+
static int graph_read(int argc, const char **argv)
{
struct commit_graph *graph = NULL;
graph_name = get_commit_graph_filename(opts.obj_dir);
graph = load_commit_graph_one(graph_name);
- if (!graph)
+ if (!graph) {
+ UNLEAK(graph_name);
die("graph file %s does not exist", graph_name);
+ }
+
FREE_AND_NULL(graph_name);
printf("header: %08x %d %d %d %d\n",
printf(" large_edges");
printf("\n");
+ free_commit_graph(graph);
+
return 0;
}
static int graph_write(int argc, const char **argv)
{
- const char **pack_indexes = NULL;
- int packs_nr = 0;
- const char **commit_hex = NULL;
- int commits_nr = 0;
- const char **lines = NULL;
- int lines_nr = 0;
- int lines_alloc = 0;
+ struct string_list *pack_indexes = NULL;
+ struct string_list *commit_hex = NULL;
+ struct string_list lines;
static struct option builtin_commit_graph_write_options[] = {
OPT_STRING(0, "object-dir", &opts.obj_dir,
N_("dir"),
N_("The object directory to store the graph")),
+ OPT_BOOL(0, "reachable", &opts.reachable,
+ N_("start walk at all refs")),
OPT_BOOL(0, "stdin-packs", &opts.stdin_packs,
N_("scan pack-indexes listed by stdin for commits")),
OPT_BOOL(0, "stdin-commits", &opts.stdin_commits,
builtin_commit_graph_write_options,
builtin_commit_graph_write_usage, 0);
- if (opts.stdin_packs && opts.stdin_commits)
- die(_("cannot use both --stdin-commits and --stdin-packs"));
+ if (opts.reachable + opts.stdin_packs + opts.stdin_commits > 1)
+ die(_("use at most one of --reachable, --stdin-commits, or --stdin-packs"));
if (!opts.obj_dir)
opts.obj_dir = get_object_directory();
+ if (opts.reachable) {
+ write_commit_graph_reachable(opts.obj_dir, opts.append);
+ return 0;
+ }
+
+ string_list_init(&lines, 0);
if (opts.stdin_packs || opts.stdin_commits) {
struct strbuf buf = STRBUF_INIT;
- lines_nr = 0;
- lines_alloc = 128;
- ALLOC_ARRAY(lines, lines_alloc);
-
- while (strbuf_getline(&buf, stdin) != EOF) {
- ALLOC_GROW(lines, lines_nr + 1, lines_alloc);
- lines[lines_nr++] = strbuf_detach(&buf, NULL);
- }
-
- if (opts.stdin_packs) {
- pack_indexes = lines;
- packs_nr = lines_nr;
- }
- if (opts.stdin_commits) {
- commit_hex = lines;
- commits_nr = lines_nr;
- }
+
+ while (strbuf_getline(&buf, stdin) != EOF)
+ string_list_append(&lines, strbuf_detach(&buf, NULL));
+
+ if (opts.stdin_packs)
+ pack_indexes = &lines;
+ if (opts.stdin_commits)
+ commit_hex = &lines;
}
write_commit_graph(opts.obj_dir,
pack_indexes,
- packs_nr,
commit_hex,
- commits_nr,
opts.append);
+ string_list_clear(&lines, 0);
return 0;
}
if (argc > 0) {
if (!strcmp(argv[0], "read"))
return graph_read(argc, argv);
+ if (!strcmp(argv[0], "verify"))
+ return graph_verify(argc, argv);
if (!strcmp(argv[0], "write"))
return graph_write(argc, argv);
}
#include "cache.h"
#include "config.h"
#include "object-store.h"
+#include "repository.h"
#include "commit.h"
#include "tree.h"
#include "builtin.h"
if (get_oid_commit(argv[i], &oid))
die("Not a valid object name %s", argv[i]);
assert_oid_type(&oid, OBJ_COMMIT);
- new_parent(lookup_commit(&oid), &parents);
+ new_parent(lookup_commit(the_repository, &oid),
+ &parents);
continue;
}
{ OPTION_CALLBACK, (s), (l), (v), NULL, (h), PARSE_OPT_NOARG | \
PARSE_OPT_NONEG, option_parse_type, (i) }
-static struct option builtin_config_options[];
+static NORETURN void usage_builtin_config(void);
static int option_parse_type(const struct option *opt, const char *arg,
int unset)
* --type=int'.
*/
error("only one type at a time.");
- usage_with_options(builtin_config_usage,
- builtin_config_options);
+ usage_builtin_config();
}
*to_type = new_type;
OPT_END(),
};
+static NORETURN void usage_builtin_config(void)
+{
+ usage_with_options(builtin_config_usage, builtin_config_options);
+}
+
static void check_argc(int argc, int min, int max) {
if (argc >= min && argc <= max)
return;
error("wrong number of arguments");
- usage_with_options(builtin_config_usage, builtin_config_options);
+ usage_builtin_config();
}
static void show_config_origin(struct strbuf *buf)
if (use_global_config + use_system_config + use_local_config +
!!given_config_source.file + !!given_config_source.blob > 1) {
error("only one config file at a time.");
- usage_with_options(builtin_config_usage, builtin_config_options);
+ usage_builtin_config();
}
if (use_local_config && nongit)
if ((actions & (ACTION_GET_COLOR|ACTION_GET_COLORBOOL)) && type) {
error("--get-color and variable type are incoherent");
- usage_with_options(builtin_config_usage, builtin_config_options);
+ usage_builtin_config();
}
if (HAS_MULTI_BITS(actions)) {
error("only one action at a time.");
- usage_with_options(builtin_config_usage, builtin_config_options);
+ usage_builtin_config();
}
if (actions == 0)
switch (argc) {
case 2: actions = ACTION_SET; break;
case 3: actions = ACTION_SET_ALL; break;
default:
- usage_with_options(builtin_config_usage, builtin_config_options);
+ usage_builtin_config();
}
if (omit_values &&
!(actions == ACTION_LIST || actions == ACTION_GET_REGEXP)) {
error("--name-only is only applicable to --list or --get-regexp");
- usage_with_options(builtin_config_usage, builtin_config_options);
+ usage_builtin_config();
}
if (show_origin && !(actions &
(ACTION_GET|ACTION_GET_ALL|ACTION_GET_REGEXP|ACTION_LIST))) {
error("--show-origin is only applicable to --get, --get-all, "
"--get-regexp, and --list.");
- usage_with_options(builtin_config_usage, builtin_config_options);
+ usage_builtin_config();
}
if (default_value && !(actions & ACTION_GET)) {
error("--default is only applicable to --get");
- usage_with_options(builtin_config_usage,
- builtin_config_options);
+ usage_builtin_config();
}
if (actions & PAGING_ACTIONS)
struct tag *t;
if (!e->tag) {
- t = lookup_tag(&e->oid);
+ t = lookup_tag(the_repository, &e->oid);
if (!t || parse_tag(t))
return 1;
e->tag = t;
}
- t = lookup_tag(oid);
+ t = lookup_tag(the_repository, oid);
if (!t || parse_tag(t))
return 0;
*tag = t;
static void append_name(struct commit_name *n, struct strbuf *dst)
{
if (n->prio == 2 && !n->tag) {
- n->tag = lookup_tag(&n->oid);
+ n->tag = lookup_tag(the_repository, &n->oid);
if (!n->tag || parse_tag(n->tag))
die(_("annotated tag %s not available"), n->path);
}
unsigned long seen_commits = 0;
unsigned int unannotated_cnt = 0;
- cmit = lookup_commit_reference(oid);
+ cmit = lookup_commit_reference(the_repository, oid);
n = find_commit_name(&cmit->object.oid);
if (n && (tags || all || n->prio == 2)) {
init_commit_names(&commit_names);
n = hashmap_iter_first(&names, &iter);
for (; n; n = hashmap_iter_next(&iter)) {
- c = lookup_commit_reference_gently(&n->peeled, 1);
+ c = lookup_commit_reference_gently(the_repository,
+ &n->peeled, 1);
if (c)
*commit_names_at(&commit_names, c) = n;
}
if (get_oid(arg, &oid))
die(_("Not a valid object name %s"), arg);
- cmit = lookup_commit_reference_gently(&oid, 1);
+ cmit = lookup_commit_reference_gently(the_repository, &oid, 1);
if (cmit)
describe_commit(&oid, &sb);
#include "log-tree.h"
#include "builtin.h"
#include "submodule.h"
+#include "repository.h"
static struct rev_info log_tree_opt;
static int diff_tree_commit_oid(const struct object_id *oid)
{
- struct commit *commit = lookup_commit_reference(oid);
+ struct commit *commit = lookup_commit_reference(the_repository, oid);
if (!commit)
return -1;
return log_tree_commit(&log_tree_opt, commit);
/* Graft the fake parents locally to the commit */
while (isspace(*p++) && !parse_oid_hex(p, &oid, &p)) {
- struct commit *parent = lookup_commit(&oid);
+ struct commit *parent = lookup_commit(the_repository, &oid);
if (!pptr) {
/* Free the real parent list */
free_commit_list(commit->parents);
struct tree *tree2;
if (!isspace(*p++) || parse_oid_hex(p, &oid, &p) || *p)
return error("Need exactly two trees, separated by a space");
- tree2 = lookup_tree(&oid);
+ tree2 = lookup_tree(the_repository, &oid);
if (!tree2 || parse_tree(tree2))
return -1;
printf("%s %s\n", oid_to_hex(&tree1->object.oid),
line[len-1] = 0;
if (parse_oid_hex(line, &oid, &p))
return -1;
- obj = parse_object(&oid);
+ obj = parse_object(the_repository, &oid);
if (!obj)
return -1;
if (obj->type == OBJ_COMMIT)
add_head_to_pending(&rev);
if (!rev.pending.nr) {
struct tree *tree;
- tree = lookup_tree(the_hash_algo->empty_tree);
+ tree = lookup_tree(the_repository,
+ the_repository->hash_algo->empty_tree);
add_pending_object(&rev, &tree->object, "HEAD");
}
break;
const char *name = entry->name;
int flags = (obj->flags & UNINTERESTING);
if (!obj->parsed)
- obj = parse_object(&obj->oid);
- obj = deref_tag(obj, NULL, 0);
+ obj = parse_object(the_repository, &obj->oid);
+ obj = deref_tag(the_repository, obj, NULL, 0);
if (!obj)
die(_("invalid object '%s' given."), name);
if (obj->type == OBJ_COMMIT)
struct cache_entry *ce;
int ret;
- ce = make_cache_entry(mode, oid->hash, path, 0, 0);
+ ce = make_transient_cache_entry(mode, oid, path, 0);
ret = checkout_entry(ce, state, NULL);
- free(ce);
+ discard_cache_entry(ce);
return ret;
}
* index.
*/
struct cache_entry *ce2 =
- make_cache_entry(rmode, roid.hash,
+ make_cache_entry(&wtindex, rmode, &roid,
dst_path, 0, 0);
add_index_entry(&wtindex, ce2,
if (is_null_oid(oid))
return;
- object = lookup_object(oid->hash);
+ object = lookup_object(the_repository, oid->hash);
if (object && object->flags & SHOWN)
return;
if (anonymize) {
buf = anonymize_blob(&size);
- object = (struct object *)lookup_blob(oid);
+ object = (struct object *)lookup_blob(the_repository, oid);
eaten = 0;
} else {
buf = read_object_file(oid, &type, &size);
die ("Could not read blob %s", oid_to_hex(oid));
if (check_object_signature(oid, buf, size, type_name(type)) < 0)
die("sha1 mismatch in blob %s", oid_to_hex(oid));
- object = parse_object_buffer(oid, type, size, buf, &eaten);
+ object = parse_object_buffer(the_repository, oid, type,
+ size, buf, &eaten);
}
if (!object)
anonymize_sha1(&spec->oid) :
spec->oid.hash));
else {
- struct object *object = lookup_object(spec->oid.hash);
+ struct object *object = lookup_object(the_repository,
+ spec->oid.hash);
printf("M %06o :%d ", spec->mode,
get_object_mark(object));
}
/* handle nested tags */
while (tag && tag->object.type == OBJ_TAG) {
- parse_object(&tag->object.oid);
+ parse_object(the_repository, &tag->object.oid);
string_list_append(&extra_refs, full_name)->util = tag;
tag = (struct tag *)tag->tagged;
}
/* only commits */
continue;
- commit = lookup_commit(&oid);
+ commit = lookup_commit(the_repository, &oid);
if (!commit)
die("not a commit? can't happen: %s", oid_to_hex(&oid));
static struct refspec refmap = REFSPEC_INIT_FETCH;
static struct list_objects_filter_options filter_options;
static struct string_list server_options = STRING_LIST_INIT_DUP;
+static struct string_list negotiation_tip = STRING_LIST_INIT_NODUP;
static int git_fetch_config(const char *k, const char *v, void *cb)
{
TRANSPORT_FAMILY_IPV4),
OPT_SET_INT('6', "ipv6", &family, N_("use IPv6 addresses only"),
TRANSPORT_FAMILY_IPV6),
+ OPT_STRING_LIST(0, "negotiation-tip", &negotiation_tip, N_("revision"),
+ N_("report that we have only objects reachable from this object")),
OPT_PARSE_LIST_OBJECTS_FILTER(&filter_options),
OPT_END()
};
return 0;
}
-static void find_non_local_tags(struct transport *transport,
- struct ref **head,
- struct ref ***tail)
+static void find_non_local_tags(const struct ref *refs,
+ struct ref **head,
+ struct ref ***tail)
{
struct string_list existing_refs = STRING_LIST_INIT_DUP;
struct string_list remote_refs = STRING_LIST_INIT_NODUP;
struct string_list_item *item = NULL;
for_each_ref(add_existing, &existing_refs);
- for (ref = transport_get_remote_refs(transport, NULL); ref; ref = ref->next) {
+ for (ref = refs; ref; ref = ref->next) {
if (!starts_with(ref->name, "refs/tags/"))
continue;
string_list_clear(&remote_refs, 0);
}
-static struct ref *get_ref_map(struct transport *transport,
+static struct ref *get_ref_map(struct remote *remote,
+ const struct ref *remote_refs,
struct refspec *rs,
int tags, int *autotags)
{
struct ref *rm;
struct ref *ref_map = NULL;
struct ref **tail = &ref_map;
- struct argv_array ref_prefixes = ARGV_ARRAY_INIT;
/* opportunistically-updated references: */
struct ref *orefs = NULL, **oref_tail = &orefs;
- const struct ref *remote_refs;
-
- if (rs->nr)
- refspec_ref_prefixes(rs, &ref_prefixes);
- else if (transport->remote && transport->remote->fetch.nr)
- refspec_ref_prefixes(&transport->remote->fetch, &ref_prefixes);
-
- if (ref_prefixes.argc &&
- (tags == TAGS_SET || (tags == TAGS_DEFAULT && !rs->nr))) {
- argv_array_push(&ref_prefixes, "refs/tags/");
- }
-
- remote_refs = transport_get_remote_refs(transport, &ref_prefixes);
-
- argv_array_clear(&ref_prefixes);
+ struct string_list existing_refs = STRING_LIST_INIT_DUP;
if (rs->nr) {
struct refspec *fetch_refspec;
if (refmap.nr)
fetch_refspec = &refmap;
else
- fetch_refspec = &transport->remote->fetch;
+ fetch_refspec = &remote->fetch;
for (i = 0; i < fetch_refspec->nr; i++)
get_fetch_map(ref_map, &fetch_refspec->items[i], &oref_tail, 1);
die("--refmap option is only meaningful with command-line refspec(s).");
} else {
/* Use the defaults */
- struct remote *remote = transport->remote;
struct branch *branch = branch_get(NULL);
int has_merge = branch_has_merge_config(branch);
if (remote &&
/* also fetch all tags */
get_fetch_map(remote_refs, tag_refspec, &tail, 0);
else if (tags == TAGS_DEFAULT && *autotags)
- find_non_local_tags(transport, &ref_map, &tail);
+ find_non_local_tags(remote_refs, &ref_map, &tail);
/* Now append any refs to be updated opportunistically: */
*tail = orefs;
tail = &rm->next;
}
- return ref_remove_duplicates(ref_map);
+ ref_map = ref_remove_duplicates(ref_map);
+
+ for_each_ref(add_existing, &existing_refs);
+ for (rm = ref_map; rm; rm = rm->next) {
+ if (rm->peer_ref) {
+ struct string_list_item *peer_item =
+ string_list_lookup(&existing_refs,
+ rm->peer_ref->name);
+ if (peer_item) {
+ struct object_id *old_oid = peer_item->util;
+ oidcpy(&rm->peer_ref->old_oid, old_oid);
+ }
+ }
+ }
+ string_list_clear(&existing_refs, 1);
+
+ return ref_map;
}
#define STORE_REF_ERROR_OTHER 1
return r;
}
- current = lookup_commit_reference_gently(&ref->old_oid, 1);
- updated = lookup_commit_reference_gently(&ref->new_oid, 1);
+ current = lookup_commit_reference_gently(the_repository,
+ &ref->old_oid, 1);
+ updated = lookup_commit_reference_gently(the_repository,
+ &ref->new_oid, 1);
if (!current || !updated) {
const char *msg;
const char *what;
}
static int store_updated_refs(const char *raw_url, const char *remote_name,
- struct ref *ref_map)
+ int connectivity_checked, struct ref *ref_map)
{
FILE *fp;
struct commit *commit;
else
url = xstrdup("foreign");
- rm = ref_map;
- if (check_connected(iterate_ref_map, &rm, NULL)) {
- rc = error(_("%s did not send all necessary objects\n"), url);
- goto abort;
+ if (!connectivity_checked) {
+ rm = ref_map;
+ if (check_connected(iterate_ref_map, &rm, NULL)) {
+ rc = error(_("%s did not send all necessary objects\n"), url);
+ goto abort;
+ }
}
prepare_format_display(ref_map);
continue;
}
- commit = lookup_commit_reference_gently(&rm->old_oid,
+ commit = lookup_commit_reference_gently(the_repository,
+ &rm->old_oid,
1);
if (!commit)
rm->fetch_head_status = FETCH_HEAD_NOT_FOR_MERGE;
return check_connected(iterate_ref_map, &rm, &opt);
}
-static int fetch_refs(struct transport *transport, struct ref *ref_map)
+static int fetch_refs(struct transport *transport, struct ref *ref_map,
+ struct ref **updated_remote_refs)
{
int ret = quickfetch(ref_map);
if (ret)
- ret = transport_fetch_refs(transport, ref_map);
+ ret = transport_fetch_refs(transport, ref_map,
+ updated_remote_refs);
if (!ret)
- ret |= store_updated_refs(transport->url,
- transport->remote->name,
- ref_map);
+ /*
+ * Keep the new pack's ".keep" file around to allow the caller
+ * time to update refs to reference the new objects.
+ */
+ return 0;
+ transport_unlock_pack(transport);
+ return ret;
+}
+
+/* Update local refs based on the ref values fetched from a remote */
+static int consume_refs(struct transport *transport, struct ref *ref_map)
+{
+ int connectivity_checked = transport->smart_options
+ ? transport->smart_options->connectivity_checked : 0;
+ int ret = store_updated_refs(transport->url,
+ transport->remote->name,
+ connectivity_checked,
+ ref_map);
transport_unlock_pack(transport);
return ret;
}
name, transport->url);
}
+
+static int add_oid(const char *refname, const struct object_id *oid, int flags,
+ void *cb_data)
+{
+ struct oid_array *oids = cb_data;
+
+ oid_array_append(oids, oid);
+ return 0;
+}
+
+static void add_negotiation_tips(struct git_transport_options *smart_options)
+{
+ struct oid_array *oids = xcalloc(1, sizeof(*oids));
+ int i;
+
+ for (i = 0; i < negotiation_tip.nr; i++) {
+ const char *s = negotiation_tip.items[i].string;
+ int old_nr;
+ if (!has_glob_specials(s)) {
+ struct object_id oid;
+ if (get_oid(s, &oid))
+ die("%s is not a valid object", s);
+ oid_array_append(oids, &oid);
+ continue;
+ }
+ old_nr = oids->nr;
+ for_each_glob_ref(add_oid, s, oids);
+ if (old_nr == oids->nr)
+ warning("Ignoring --negotiation-tip=%s because it does not match any refs",
+ s);
+ }
+ smart_options->negotiation_tips = oids;
+}
+
static struct transport *prepare_transport(struct remote *remote, int deepen)
{
struct transport *transport;
filter_options.filter_spec);
set_option(transport, TRANS_OPT_FROM_PROMISOR, "1");
}
+ if (negotiation_tip.nr) {
+ if (transport->smart_options)
+ add_negotiation_tips(transport->smart_options);
+ else
+ warning("Ignoring --negotiation-tip because the protocol does not support it.");
+ }
return transport;
}
transport_set_option(transport, TRANS_OPT_FOLLOWTAGS, NULL);
transport_set_option(transport, TRANS_OPT_DEPTH, "0");
transport_set_option(transport, TRANS_OPT_DEEPEN_RELATIVE, NULL);
- fetch_refs(transport, ref_map);
+ if (!fetch_refs(transport, ref_map, NULL))
+ consume_refs(transport, ref_map);
if (gsecondary) {
transport_disconnect(gsecondary);
static int do_fetch(struct transport *transport,
struct refspec *rs)
{
- struct string_list existing_refs = STRING_LIST_INIT_DUP;
struct ref *ref_map;
- struct ref *rm;
int autotags = (transport->remote->fetch_tags == 1);
int retcode = 0;
-
- for_each_ref(add_existing, &existing_refs);
+ const struct ref *remote_refs;
+ struct ref *updated_remote_refs = NULL;
+ struct argv_array ref_prefixes = ARGV_ARRAY_INIT;
if (tags == TAGS_DEFAULT) {
if (transport->remote->fetch_tags == 2)
goto cleanup;
}
- ref_map = get_ref_map(transport, rs, tags, &autotags);
- if (!update_head_ok)
- check_not_current_branch(ref_map);
+ if (rs->nr)
+ refspec_ref_prefixes(rs, &ref_prefixes);
+ else if (transport->remote && transport->remote->fetch.nr)
+ refspec_ref_prefixes(&transport->remote->fetch, &ref_prefixes);
- for (rm = ref_map; rm; rm = rm->next) {
- if (rm->peer_ref) {
- struct string_list_item *peer_item =
- string_list_lookup(&existing_refs,
- rm->peer_ref->name);
- if (peer_item) {
- struct object_id *old_oid = peer_item->util;
- oidcpy(&rm->peer_ref->old_oid, old_oid);
- }
- }
+ if (ref_prefixes.argc &&
+ (tags == TAGS_SET || (tags == TAGS_DEFAULT && !rs->nr))) {
+ argv_array_push(&ref_prefixes, "refs/tags/");
}
+ remote_refs = transport_get_remote_refs(transport, &ref_prefixes);
+ argv_array_clear(&ref_prefixes);
+
+ ref_map = get_ref_map(transport->remote, remote_refs, rs,
+ tags, &autotags);
+ if (!update_head_ok)
+ check_not_current_branch(ref_map);
+
if (tags == TAGS_DEFAULT && autotags)
transport_set_option(transport, TRANS_OPT_FOLLOWTAGS, "1");
if (prune) {
transport->url);
}
}
- if (fetch_refs(transport, ref_map)) {
+
+ if (fetch_refs(transport, ref_map, &updated_remote_refs)) {
+ free_refs(ref_map);
+ retcode = 1;
+ goto cleanup;
+ }
+ if (updated_remote_refs) {
+ /*
+ * Regenerate ref_map using the updated remote refs. This is
+ * to account for additional information which may be provided
+ * by the transport (e.g. shallow info).
+ */
+ free_refs(ref_map);
+ ref_map = get_ref_map(transport->remote, updated_remote_refs, rs,
+ tags, &autotags);
+ free_refs(updated_remote_refs);
+ }
+ if (consume_refs(transport, ref_map)) {
free_refs(ref_map);
retcode = 1;
goto cleanup;
if (tags == TAGS_DEFAULT && autotags) {
struct ref **tail = &ref_map;
ref_map = NULL;
- find_non_local_tags(transport, &ref_map, &tail);
+ find_non_local_tags(remote_refs, &ref_map, &tail);
if (ref_map)
backfill_tags(transport, ref_map);
free_refs(ref_map);
}
cleanup:
- string_list_clear(&existing_refs, 1);
return retcode;
}
#include "branch.h"
#include "fmt-merge-msg.h"
#include "gpg-interface.h"
+#include "repository.h"
static const char * const fmt_merge_msg_usage[] = {
N_("git fmt-merge-msg [-m <message>] [--log[=<n>] | --no-log] [--file <file>]"),
struct string_list_item *item;
int pulling_head = 0;
struct object_id oid;
+ const unsigned hexsz = the_hash_algo->hexsz;
- if (len < GIT_SHA1_HEXSZ + 3 || line[GIT_SHA1_HEXSZ] != '\t')
+ if (len < hexsz + 3 || line[hexsz] != '\t')
return 1;
- if (starts_with(line + GIT_SHA1_HEXSZ + 1, "not-for-merge"))
+ if (starts_with(line + hexsz + 1, "not-for-merge"))
return 0;
- if (line[GIT_SHA1_HEXSZ + 1] != '\t')
+ if (line[hexsz + 1] != '\t')
return 2;
i = get_oid_hex(line, &oid);
if (line[len - 1] == '\n')
line[len - 1] = 0;
- line += GIT_SHA1_HEXSZ + 2;
+ line += hexsz + 2;
/*
* At this point, line points at the beginning of comment e.g.
const struct object_id *oid = &origin_data->oid;
int limit = opts->shortlog_len;
- branch = deref_tag(parse_object(oid), oid_to_hex(oid), GIT_SHA1_HEXSZ);
+ branch = deref_tag(the_repository, parse_object(the_repository, oid),
+ oid_to_hex(oid),
+ the_hash_algo->hexsz);
if (!branch || branch->type != OBJ_COMMIT)
return;
int len;
char *p = in->buf + pos;
char *newline = strchr(p, '\n');
+ const char *q;
struct object_id oid;
struct commit *parent;
struct object *obj;
len = newline ? newline - p : strlen(p);
pos += len + !!newline;
- if (len < GIT_SHA1_HEXSZ + 3 ||
- get_oid_hex(p, &oid) ||
- p[GIT_SHA1_HEXSZ] != '\t' ||
- p[GIT_SHA1_HEXSZ + 1] != '\t')
+ if (parse_oid_hex(p, &oid, &q) ||
+ q[0] != '\t' ||
+ q[1] != '\t')
continue; /* skip not-for-merge */
/*
* Do not use get_merge_parent() here; we do not have
* "name" here and we do not want to contaminate its
* util field yet.
*/
- obj = parse_object(&oid);
+ obj = parse_object(the_repository, &oid);
parent = (struct commit *)peel_to_type(NULL, 0, obj, OBJ_COMMIT);
if (!parent)
continue;
commit_list_insert(parent, &parents);
add_merge_parent(result, &obj->oid, &parent->object.oid);
}
- head_commit = lookup_commit(head);
+ head_commit = lookup_commit(the_repository, head);
if (head_commit)
commit_list_insert(head_commit, &parents);
reduce_heads_replace(&parents);
#include "decorate.h"
#include "packfile.h"
#include "object-store.h"
+#include "run-command.h"
#define REACHABLE 0x0001
#define SEEN 0x0002
#define ERROR_REACHABLE 02
#define ERROR_PACK 04
#define ERROR_REFS 010
+#define ERROR_COMMIT_GRAPH 020
static const char *describe_object(struct object *obj)
{
enum object_type type = oid_object_info(the_repository,
&obj->oid, NULL);
if (type > 0)
- object_as_type(obj, type, 0);
+ object_as_type(the_repository, obj, type, 0);
}
ret = type_name(obj->type);
* verify_packfile(), data_valid variable for details.
*/
struct object *obj;
- obj = parse_object_buffer(oid, type, size, buffer, eaten);
+ obj = parse_object_buffer(the_repository, oid, type, size, buffer,
+ eaten);
if (!obj) {
errors_found |= ERROR_OBJECT;
return error("%s: object corrupt or missing", oid_to_hex(oid));
struct object *obj;
if (!is_null_oid(oid)) {
- obj = lookup_object(oid->hash);
+ obj = lookup_object(the_repository, oid->hash);
if (obj && (obj->flags & HAS_OBJ)) {
if (timestamp && name_objects)
add_decoration(fsck_walk_options.object_names,
{
struct object *obj;
- obj = parse_object(oid);
+ obj = parse_object(the_repository, oid);
if (!obj) {
if (is_promisor_object(oid)) {
/*
if (!contents && type != OBJ_BLOB)
BUG("read_loose_object streamed a non-blob");
- obj = parse_object_buffer(oid, type, size, contents, &eaten);
+ obj = parse_object_buffer(the_repository, oid, type, size,
+ contents, &eaten);
+
if (!obj) {
errors_found |= ERROR_OBJECT;
error("%s: object could not be parsed: %s",
fprintf(stderr, "Checking cache tree\n");
if (0 <= it->entry_count) {
- struct object *obj = parse_object(&it->oid);
+ struct object *obj = parse_object(the_repository, &it->oid);
if (!obj) {
error("%s: invalid sha1 pointer in cache-tree",
oid_to_hex(&it->oid));
const char *arg = argv[i];
struct object_id oid;
if (!get_oid(arg, &oid)) {
- struct object *obj = lookup_object(oid.hash);
+ struct object *obj = lookup_object(the_repository,
+ oid.hash);
if (!obj || !(obj->flags & HAS_OBJ)) {
if (is_promisor_object(&oid))
mode = active_cache[i]->ce_mode;
if (S_ISGITLINK(mode))
continue;
- blob = lookup_blob(&active_cache[i]->oid);
+ blob = lookup_blob(the_repository,
+ &active_cache[i]->oid);
if (!blob)
continue;
obj = &blob->object;
}
check_connectivity();
+
+ if (!git_config_get_bool("core.commitgraph", &i) && i) {
+ struct child_process commit_graph_verify = CHILD_PROCESS_INIT;
+ const char *verify_argv[] = { "commit-graph", "verify", NULL, NULL, NULL };
+
+ commit_graph_verify.argv = verify_argv;
+ commit_graph_verify.git_cmd = 1;
+ if (run_command(&commit_graph_verify))
+ errors_found |= ERROR_COMMIT_GRAPH;
+
+ prepare_alt_odb(the_repository);
+ for (alt = the_repository->objects->alt_odb_list; alt; alt = alt->next) {
+ verify_argv[2] = "--object-dir";
+ verify_argv[3] = alt->path;
+ if (run_command(&commit_graph_verify))
+ errors_found |= ERROR_COMMIT_GRAPH;
+ }
+ }
+
return errors_found;
}
#include "sigchain.h"
#include "argv-array.h"
#include "commit.h"
+#include "commit-graph.h"
#include "packfile.h"
#include "object-store.h"
#include "pack.h"
static int aggressive_window = 250;
static int gc_auto_threshold = 6700;
static int gc_auto_pack_limit = 50;
+static int gc_write_commit_graph;
static int detach_auto = 1;
static timestamp_t gc_log_expire_time;
static const char *gc_log_expire = "1.day.ago";
git_config_get_int("gc.aggressivedepth", &aggressive_depth);
git_config_get_int("gc.auto", &gc_auto_threshold);
git_config_get_int("gc.autopacklimit", &gc_auto_pack_limit);
+ git_config_get_bool("gc.writecommitgraph", &gc_write_commit_graph);
git_config_get_bool("gc.autodetach", &detach_auto);
git_config_get_expiry("gc.pruneexpire", &prune_expire);
git_config_get_expiry("gc.worktreepruneexpire", &prune_worktrees_expire);
return -1;
if (!repository_format_precious_objects) {
+ close_all_packs(the_repository->objects);
if (run_command_v_opt(repack.argv, RUN_GIT_CMD))
return error(FAILED_RUN, repack.argv[0]);
if (pack_garbage.nr > 0)
clean_pack_garbage();
+ if (gc_write_commit_graph)
+ write_commit_graph_reachable(get_object_directory(), 0);
+
if (auto_gc && too_many_loose_objects())
warning(_("There are too many unreachable loose objects; "
"run 'git prune' to remove them."));
for (i = 0; i < nr; i++) {
struct object *real_obj;
- real_obj = deref_tag(list->objects[i].item, NULL, 0);
+ real_obj = deref_tag(the_repository, list->objects[i].item,
+ NULL, 0);
/* load the gitmodules file for this rev */
if (recurse_submodules) {
OPT_BOOL_F('z', "null", &opt.null_following_name,
N_("print NUL after filenames"),
PARSE_OPT_NOCOMPLETE),
+ OPT_BOOL('o', "only-matching", &opt.only_matching,
+ N_("show only matching parts of a line")),
OPT_BOOL('c', "count", &opt.count,
N_("show the number of matches instead of matching lines")),
OPT__COLOR(&opt.color, N_("highlight matches")),
if (!opt.pattern_list)
die(_("no pattern given."));
+ /* --only-matching has no effect with --invert. */
+ if (opt.invert)
+ opt.only_matching = 0;
+
/*
* We have to find "--" in a separate pass, because its presence
* influences how we will parse arguments that come before it.
if (strict || do_fsck_object) {
read_lock();
if (type == OBJ_BLOB) {
- struct blob *blob = lookup_blob(oid);
+ struct blob *blob = lookup_blob(the_repository, oid);
if (blob)
blob->object.flags |= FLAG_CHECKED;
else
* we do not need to free the memory here, as the
* buf is deleted by the caller.
*/
- obj = parse_object_buffer(oid, type, size, buf,
+ obj = parse_object_buffer(the_repository, oid, type,
+ size, buf,
&eaten);
if (!obj)
die(_("invalid %s"), type_name(type));
#include "gpg-interface.h"
#include "progress.h"
#include "commit-slab.h"
+#include "repository.h"
#define MAIL_DEFAULT_WRAP 72
rev.shown_one = 1;
if (ret)
break;
- o = parse_object(&t->tagged->oid);
+ o = parse_object(the_repository, &t->tagged->oid);
if (!o)
ret = error(_("Could not read object %s"),
oid_to_hex(&t->tagged->oid));
o2 = rev->pending.objects[1].item;
flags1 = o1->flags;
flags2 = o2->flags;
- c1 = lookup_commit_reference(&o1->oid);
- c2 = lookup_commit_reference(&o2->oid);
+ c1 = lookup_commit_reference(the_repository, &o1->oid);
+ c2 = lookup_commit_reference(the_repository, &o2->oid);
if ((flags1 & UNINTERESTING) == (flags2 & UNINTERESTING))
die(_("Not a range."));
{
struct object_id oid;
if (get_oid(arg, &oid) == 0) {
- struct commit *commit = lookup_commit_reference(&oid);
+ struct commit *commit = lookup_commit_reference(the_repository,
+ &oid);
if (commit) {
commit->object.flags |= flags;
add_pending_object(revs, &commit->object, arg);
#include "diff.h"
#include "revision.h"
#include "parse-options.h"
+#include "repository.h"
static int show_merge_base(struct commit **rev, int rev_nr, int show_all)
{
if (get_oid(arg, &revkey))
die("Not a valid object name %s", arg);
- r = lookup_commit_reference(&revkey);
+ r = lookup_commit_reference(the_repository, &revkey);
if (!r)
die("Not a valid commit name %s", arg);
return 1;
}
-struct rev_collect {
- struct commit **commit;
- int nr;
- int alloc;
- unsigned int initial : 1;
-};
-
-static void add_one_commit(struct object_id *oid, struct rev_collect *revs)
-{
- struct commit *commit;
-
- if (is_null_oid(oid))
- return;
-
- commit = lookup_commit(oid);
- if (!commit ||
- (commit->object.flags & TMP_MARK) ||
- parse_commit(commit))
- return;
-
- ALLOC_GROW(revs->commit, revs->nr + 1, revs->alloc);
- revs->commit[revs->nr++] = commit;
- commit->object.flags |= TMP_MARK;
-}
-
-static int collect_one_reflog_ent(struct object_id *ooid, struct object_id *noid,
- const char *ident, timestamp_t timestamp,
- int tz, const char *message, void *cbdata)
-{
- struct rev_collect *revs = cbdata;
-
- if (revs->initial) {
- revs->initial = 0;
- add_one_commit(ooid, revs);
- }
- add_one_commit(noid, revs);
- return 0;
-}
-
static int handle_fork_point(int argc, const char **argv)
{
struct object_id oid;
char *refname;
+ struct commit *derived, *fork_point;
const char *commitname;
- struct rev_collect revs;
- struct commit *derived;
- struct commit_list *bases;
- int i, ret = 0;
switch (dwim_ref(argv[0], strlen(argv[0]), &oid, &refname)) {
case 0:
if (get_oid(commitname, &oid))
die("Not a valid object name: '%s'", commitname);
- derived = lookup_commit_reference(&oid);
- memset(&revs, 0, sizeof(revs));
- revs.initial = 1;
- for_each_reflog_ent(refname, collect_one_reflog_ent, &revs);
+ derived = lookup_commit_reference(the_repository, &oid);
- if (!revs.nr && !get_oid(refname, &oid))
- add_one_commit(&oid, &revs);
+ fork_point = get_fork_point(refname, derived);
- for (i = 0; i < revs.nr; i++)
- revs.commit[i]->object.flags &= ~TMP_MARK;
-
- bases = get_merge_bases_many_dirty(derived, revs.nr, revs.commit);
-
- /*
- * There should be one and only one merge base, when we found
- * a common ancestor among reflog entries.
- */
- if (!bases || bases->next) {
- ret = 1;
- goto cleanup_return;
- }
-
- /* And the found one must be one of the reflog entries */
- for (i = 0; i < revs.nr; i++)
- if (&bases->item->object == &revs.commit[i]->object)
- break; /* found */
- if (revs.nr <= i) {
- ret = 1; /* not found */
- goto cleanup_return;
- }
-
- printf("%s\n", oid_to_hex(&bases->item->object.oid));
+ if (!fork_point)
+ return 1;
-cleanup_return:
- free_commit_list(bases);
- return ret;
+ printf("%s\n", oid_to_hex(&fork_point->object.oid));
+ return 0;
}
int cmd_merge_base(int argc, const char **argv, const char *prefix)
static const char *better_branch_name(const char *branch)
{
- static char githead_env[8 + GIT_SHA1_HEXSZ + 1];
+ static char githead_env[8 + GIT_MAX_HEXSZ + 1];
char *name;
- if (strlen(branch) != GIT_SHA1_HEXSZ)
+ if (strlen(branch) != the_hash_algo->hexsz)
return branch;
xsnprintf(githead_env, sizeof(githead_env), "GITHEAD_%s", branch);
name = getenv(githead_env);
#include "tree-walk.h"
#include "xdiff-interface.h"
#include "object-store.h"
+#include "repository.h"
#include "blob.h"
#include "exec-cmd.h"
#include "merge-blobs.h"
res->stage = stage;
res->path = path;
res->mode = mode;
- res->blob = lookup_blob(oid);
+ res->blob = lookup_blob(the_repository, oid);
return res;
}
return 0;
}
+static int option_read_message(struct parse_opt_ctx_t *ctx,
+ const struct option *opt, int unset)
+{
+ struct strbuf *buf = opt->value;
+ const char *arg;
+
+ if (unset)
+ BUG("-F cannot be negated");
+
+ if (ctx->opt) {
+ arg = ctx->opt;
+ ctx->opt = NULL;
+ } else if (ctx->argc > 1) {
+ ctx->argc--;
+ arg = *++ctx->argv;
+ } else
+ return opterror(opt, "requires a value", 0);
+
+ if (buf->len)
+ strbuf_addch(buf, '\n');
+ if (ctx->prefix && !is_absolute_path(arg))
+ arg = prefix_filename(ctx->prefix, arg);
+ if (strbuf_read_file(buf, arg, 0) < 0)
+ return error(_("could not read file '%s'"), arg);
+ have_message = 1;
+
+ return 0;
+}
+
static struct strategy *get_strategy(const char *name)
{
int i;
OPT_CALLBACK('m', "message", &merge_msg, N_("message"),
N_("merge commit message (for a non-fast-forward merge)"),
option_parse_message),
+ { OPTION_LOWLEVEL_CALLBACK, 'F', "file", &merge_msg, N_("path"),
+ N_("read message from file"), PARSE_OPT_NONEG,
+ (parse_opt_cb *) option_read_message },
OPT__VERBOSITY(&verbosity),
OPT_BOOL(0, "abort", &abort_current_merge,
N_("abort the current in-progress merge")),
const char *filename;
int fd, pos, npos;
struct strbuf fetch_head_file = STRBUF_INIT;
+ const unsigned hexsz = the_hash_algo->hexsz;
if (!merge_names)
merge_names = &fetch_head_file;
else
npos = merge_names->len;
- if (npos - pos < GIT_SHA1_HEXSZ + 2 ||
+ if (npos - pos < hexsz + 2 ||
get_oid_hex(merge_names->buf + pos, &oid))
commit = NULL; /* bad */
- else if (memcmp(merge_names->buf + pos + GIT_SHA1_HEXSZ, "\t\t", 2))
+ else if (memcmp(merge_names->buf + pos + hexsz, "\t\t", 2))
continue; /* not-for-merge */
else {
- char saved = merge_names->buf[pos + GIT_SHA1_HEXSZ];
- merge_names->buf[pos + GIT_SHA1_HEXSZ] = '\0';
+ char saved = merge_names->buf[pos + hexsz];
+ merge_names->buf[pos + hexsz] = '\0';
commit = get_merge_parent(merge_names->buf + pos);
- merge_names->buf[pos + GIT_SHA1_HEXSZ] = saved;
+ merge_names->buf[pos + hexsz] = saved;
}
if (!commit) {
if (ptr)
#include "builtin.h"
#include "cache.h"
+#include "repository.h"
#include "config.h"
#include "commit.h"
#include "tag.h"
static int name_ref(const char *path, const struct object_id *oid, int flags, void *cb_data)
{
- struct object *o = parse_object(oid);
+ struct object *o = parse_object(the_repository, oid);
struct name_ref_data *data = cb_data;
int can_abbreviate_output = data->tags_only && data->name_only;
int deref = 0;
struct tag *t = (struct tag *) o;
if (!t->tagged)
break; /* broken repository */
- o = parse_object(&t->tagged->oid);
+ o = parse_object(the_repository, &t->tagged->oid);
deref = 1;
taggerdate = t->date;
}
*(p+1) = 0;
if (!get_oid(p - (GIT_SHA1_HEXSZ - 1), &oid)) {
struct object *o =
- lookup_object(oid.hash);
+ lookup_object(the_repository,
+ oid.hash);
if (o)
name = get_rev_name(o, &buf);
}
}
commit = NULL;
- object = parse_object(&oid);
+ object = parse_object(the_repository, &oid);
if (object) {
- struct object *peeled = deref_tag(object, *argv, 0);
+ struct object *peeled = deref_tag(the_repository,
+ object, *argv, 0);
if (peeled && peeled->type == OBJ_COMMIT)
commit = (struct commit *)peeled;
}
#include "builtin.h"
#include "notes.h"
#include "object-store.h"
+#include "repository.h"
#include "blob.h"
#include "pretty.h"
#include "refs.h"
if (get_oid("NOTES_MERGE_PARTIAL", &oid))
die(_("failed to read ref NOTES_MERGE_PARTIAL"));
- else if (!(partial = lookup_commit_reference(&oid)))
+ else if (!(partial = lookup_commit_reference(the_repository, &oid)))
die(_("could not find commit from NOTES_MERGE_PARTIAL."));
else if (parse_commit(partial))
die(_("could not parse commit from NOTES_MERGE_PARTIAL."));
if (packlist_find(&to_pack, oid->hash, NULL))
return;
- tag = lookup_tag(oid);
+ tag = lookup_tag(the_repository, oid);
while (1) {
if (!tag || parse_tag(tag) || !tag->tagged)
die("unable to pack objects reachable from tag %s",
* Do we know about this object?
* It must have been reachable
*/
- if (lookup_object(oid->hash))
+ if (lookup_object(the_repository, oid->hash))
return 0;
if (lstat(fullpath, &st)) {
{
struct commit_list *revs = NULL, *result;
- commit_list_insert(lookup_commit_reference(curr_head), &revs);
- commit_list_insert(lookup_commit_reference(merge_head), &revs);
+ commit_list_insert(lookup_commit_reference(the_repository, curr_head),
+ &revs);
+ commit_list_insert(lookup_commit_reference(the_repository, merge_head),
+ &revs);
if (!is_null_oid(fork_point))
- commit_list_insert(lookup_commit_reference(fork_point), &revs);
+ commit_list_insert(lookup_commit_reference(the_repository, fork_point),
+ &revs);
result = get_octopus_merge_bases(revs);
free_commit_list(revs);
struct commit_list *list = NULL;
struct commit *merge_head, *head;
- head = lookup_commit_reference(&orig_head);
+ head = lookup_commit_reference(the_repository,
+ &orig_head);
commit_list_insert(head, &list);
- merge_head = lookup_commit_reference(&merge_heads.oid[0]);
+ merge_head = lookup_commit_reference(the_repository,
+ &merge_heads.oid[0]);
if (is_descendant_of(merge_head, list)) {
/* we can fast-forward this without invoking rebase */
opt_ff = "--ff-only";
--- /dev/null
+/*
+ * "git rebase" builtin command
+ *
+ * Copyright (c) 2018 Pratik Karki
+ */
+
+#include "builtin.h"
+#include "run-command.h"
+#include "exec-cmd.h"
+#include "argv-array.h"
+#include "dir.h"
+#include "packfile.h"
+#include "refs.h"
+#include "quote.h"
+#include "config.h"
+#include "cache-tree.h"
+#include "unpack-trees.h"
+#include "lockfile.h"
+#include "parse-options.h"
+#include "commit.h"
+#include "diff.h"
+#include "wt-status.h"
+#include "revision.h"
+#include "rerere.h"
+
+static char const * const builtin_rebase_usage[] = {
+ N_("git rebase [-i] [options] [--exec <cmd>] [--onto <newbase>] "
+ "[<upstream>] [<branch>]"),
+ N_("git rebase [-i] [options] [--exec <cmd>] [--onto <newbase>] "
+ "--root [<branch>]"),
+ N_("git rebase --continue | --abort | --skip | --edit-todo"),
+ NULL
+};
+
+static GIT_PATH_FUNC(apply_dir, "rebase-apply")
+static GIT_PATH_FUNC(merge_dir, "rebase-merge")
+
+enum rebase_type {
+ REBASE_UNSPECIFIED = -1,
+ REBASE_AM,
+ REBASE_MERGE,
+ REBASE_INTERACTIVE,
+ REBASE_PRESERVE_MERGES
+};
+
+static int use_builtin_rebase(void)
+{
+ struct child_process cp = CHILD_PROCESS_INIT;
+ struct strbuf out = STRBUF_INIT;
+ int ret;
+
+ argv_array_pushl(&cp.args,
+ "config", "--bool", "rebase.usebuiltin", NULL);
+ cp.git_cmd = 1;
+ if (capture_command(&cp, &out, 6)) {
+ strbuf_release(&out);
+ return 0;
+ }
+
+ strbuf_trim(&out);
+ ret = !strcmp("true", out.buf);
+ strbuf_release(&out);
+ return ret;
+}
+
+struct rebase_options {
+ enum rebase_type type;
+ const char *state_dir;
+ struct commit *upstream;
+ const char *upstream_name;
+ const char *upstream_arg;
+ char *head_name;
+ struct object_id orig_head;
+ struct commit *onto;
+ const char *onto_name;
+ const char *revisions;
+ const char *switch_to;
+ int root;
+ struct object_id *squash_onto;
+ struct commit *restrict_revision;
+ int dont_finish_rebase;
+ enum {
+ REBASE_NO_QUIET = 1<<0,
+ REBASE_VERBOSE = 1<<1,
+ REBASE_DIFFSTAT = 1<<2,
+ REBASE_FORCE = 1<<3,
+ REBASE_INTERACTIVE_EXPLICIT = 1<<4,
+ } flags;
+ struct strbuf git_am_opt;
+ const char *action;
+ int signoff;
+ int allow_rerere_autoupdate;
+ int keep_empty;
+ int autosquash;
+ char *gpg_sign_opt;
+ int autostash;
+ char *cmd;
+ int allow_empty_message;
+ int rebase_merges, rebase_cousins;
+ char *strategy, *strategy_opts;
+ struct strbuf git_format_patch_opt;
+};
+
+static int is_interactive(struct rebase_options *opts)
+{
+ return opts->type == REBASE_INTERACTIVE ||
+ opts->type == REBASE_PRESERVE_MERGES;
+}
+
+static void imply_interactive(struct rebase_options *opts, const char *option)
+{
+ switch (opts->type) {
+ case REBASE_AM:
+ die(_("%s requires an interactive rebase"), option);
+ break;
+ case REBASE_INTERACTIVE:
+ case REBASE_PRESERVE_MERGES:
+ break;
+ case REBASE_MERGE:
+ /* we silently *upgrade* --merge to --interactive if needed */
+ default:
+ opts->type = REBASE_INTERACTIVE; /* implied */
+ break;
+ }
+}
+
+/* Returns the filename prefixed by the state_dir */
+static const char *state_dir_path(const char *filename, struct rebase_options *opts)
+{
+ static struct strbuf path = STRBUF_INIT;
+ static size_t prefix_len;
+
+ if (!prefix_len) {
+ strbuf_addf(&path, "%s/", opts->state_dir);
+ prefix_len = path.len;
+ }
+
+ strbuf_setlen(&path, prefix_len);
+ strbuf_addstr(&path, filename);
+ return path.buf;
+}
+
+/* Read one file, then strip line endings */
+static int read_one(const char *path, struct strbuf *buf)
+{
+ if (strbuf_read_file(buf, path, 0) < 0)
+ return error_errno(_("could not read '%s'"), path);
+ strbuf_trim_trailing_newline(buf);
+ return 0;
+}
+
+/* Initialize the rebase options from the state directory. */
+static int read_basic_state(struct rebase_options *opts)
+{
+ struct strbuf head_name = STRBUF_INIT;
+ struct strbuf buf = STRBUF_INIT;
+ struct object_id oid;
+
+ if (read_one(state_dir_path("head-name", opts), &head_name) ||
+ read_one(state_dir_path("onto", opts), &buf))
+ return -1;
+ opts->head_name = starts_with(head_name.buf, "refs/") ?
+ xstrdup(head_name.buf) : NULL;
+ strbuf_release(&head_name);
+ if (get_oid(buf.buf, &oid))
+ return error(_("could not get 'onto': '%s'"), buf.buf);
+ opts->onto = lookup_commit_or_die(&oid, buf.buf);
+
+ /*
+ * We always write to orig-head, but interactive rebase used to write to
+ * head. Fall back to reading from head to cover for the case that the
+ * user upgraded git with an ongoing interactive rebase.
+ */
+ strbuf_reset(&buf);
+ if (file_exists(state_dir_path("orig-head", opts))) {
+ if (read_one(state_dir_path("orig-head", opts), &buf))
+ return -1;
+ } else if (read_one(state_dir_path("head", opts), &buf))
+ return -1;
+ if (get_oid(buf.buf, &opts->orig_head))
+ return error(_("invalid orig-head: '%s'"), buf.buf);
+
+ strbuf_reset(&buf);
+ if (read_one(state_dir_path("quiet", opts), &buf))
+ return -1;
+ if (buf.len)
+ opts->flags &= ~REBASE_NO_QUIET;
+ else
+ opts->flags |= REBASE_NO_QUIET;
+
+ if (file_exists(state_dir_path("verbose", opts)))
+ opts->flags |= REBASE_VERBOSE;
+
+ if (file_exists(state_dir_path("signoff", opts))) {
+ opts->signoff = 1;
+ opts->flags |= REBASE_FORCE;
+ }
+
+ if (file_exists(state_dir_path("allow_rerere_autoupdate", opts))) {
+ strbuf_reset(&buf);
+ if (read_one(state_dir_path("allow_rerere_autoupdate", opts),
+ &buf))
+ return -1;
+ if (!strcmp(buf.buf, "--rerere-autoupdate"))
+ opts->allow_rerere_autoupdate = 1;
+ else if (!strcmp(buf.buf, "--no-rerere-autoupdate"))
+ opts->allow_rerere_autoupdate = 0;
+ else
+ warning(_("ignoring invalid allow_rerere_autoupdate: "
+ "'%s'"), buf.buf);
+ } else
+ opts->allow_rerere_autoupdate = -1;
+
+ if (file_exists(state_dir_path("gpg_sign_opt", opts))) {
+ strbuf_reset(&buf);
+ if (read_one(state_dir_path("gpg_sign_opt", opts),
+ &buf))
+ return -1;
+ free(opts->gpg_sign_opt);
+ opts->gpg_sign_opt = xstrdup(buf.buf);
+ }
+
+ if (file_exists(state_dir_path("strategy", opts))) {
+ strbuf_reset(&buf);
+ if (read_one(state_dir_path("strategy", opts), &buf))
+ return -1;
+ free(opts->strategy);
+ opts->strategy = xstrdup(buf.buf);
+ }
+
+ if (file_exists(state_dir_path("strategy_opts", opts))) {
+ strbuf_reset(&buf);
+ if (read_one(state_dir_path("strategy_opts", opts), &buf))
+ return -1;
+ free(opts->strategy_opts);
+ opts->strategy_opts = xstrdup(buf.buf);
+ }
+
+ strbuf_release(&buf);
+
+ return 0;
+}
+
+static int apply_autostash(struct rebase_options *opts)
+{
+ const char *path = state_dir_path("autostash", opts);
+ struct strbuf autostash = STRBUF_INIT;
+ struct child_process stash_apply = CHILD_PROCESS_INIT;
+
+ if (!file_exists(path))
+ return 0;
+
+ if (read_one(state_dir_path("autostash", opts), &autostash))
+ return error(_("Could not read '%s'"), path);
+ argv_array_pushl(&stash_apply.args,
+ "stash", "apply", autostash.buf, NULL);
+ stash_apply.git_cmd = 1;
+ stash_apply.no_stderr = stash_apply.no_stdout =
+ stash_apply.no_stdin = 1;
+ if (!run_command(&stash_apply))
+ printf(_("Applied autostash.\n"));
+ else {
+ struct argv_array args = ARGV_ARRAY_INIT;
+ int res = 0;
+
+ argv_array_pushl(&args,
+ "stash", "store", "-m", "autostash", "-q",
+ autostash.buf, NULL);
+ if (run_command_v_opt(args.argv, RUN_GIT_CMD))
+ res = error(_("Cannot store %s"), autostash.buf);
+ argv_array_clear(&args);
+ strbuf_release(&autostash);
+ if (res)
+ return res;
+
+ fprintf(stderr,
+ _("Applying autostash resulted in conflicts.\n"
+ "Your changes are safe in the stash.\n"
+ "You can run \"git stash pop\" or \"git stash drop\" "
+ "at any time.\n"));
+ }
+
+ strbuf_release(&autostash);
+ return 0;
+}
+
+static int finish_rebase(struct rebase_options *opts)
+{
+ struct strbuf dir = STRBUF_INIT;
+ const char *argv_gc_auto[] = { "gc", "--auto", NULL };
+
+ delete_ref(NULL, "REBASE_HEAD", NULL, REF_NO_DEREF);
+ apply_autostash(opts);
+ close_all_packs(the_repository->objects);
+ /*
+ * We ignore errors in 'gc --auto', since the
+ * user should see them.
+ */
+ run_command_v_opt(argv_gc_auto, RUN_GIT_CMD);
+ strbuf_addstr(&dir, opts->state_dir);
+ remove_dir_recursively(&dir, 0);
+ strbuf_release(&dir);
+
+ return 0;
+}
+
+static struct commit *peel_committish(const char *name)
+{
+ struct object *obj;
+ struct object_id oid;
+
+ if (get_oid(name, &oid))
+ return NULL;
+ obj = parse_object(the_repository, &oid);
+ return (struct commit *)peel_to_type(name, 0, obj, OBJ_COMMIT);
+}
+
+static void add_var(struct strbuf *buf, const char *name, const char *value)
+{
+ if (!value)
+ strbuf_addf(buf, "unset %s; ", name);
+ else {
+ strbuf_addf(buf, "%s=", name);
+ sq_quote_buf(buf, value);
+ strbuf_addstr(buf, "; ");
+ }
+}
+
+static int run_specific_rebase(struct rebase_options *opts)
+{
+ const char *argv[] = { NULL, NULL };
+ struct strbuf script_snippet = STRBUF_INIT;
+ int status;
+ const char *backend, *backend_func;
+
+ add_var(&script_snippet, "GIT_DIR", absolute_path(get_git_dir()));
+ add_var(&script_snippet, "state_dir", opts->state_dir);
+
+ add_var(&script_snippet, "upstream_name", opts->upstream_name);
+ add_var(&script_snippet, "upstream", opts->upstream ?
+ oid_to_hex(&opts->upstream->object.oid) : NULL);
+ add_var(&script_snippet, "head_name",
+ opts->head_name ? opts->head_name : "detached HEAD");
+ add_var(&script_snippet, "orig_head", oid_to_hex(&opts->orig_head));
+ add_var(&script_snippet, "onto", opts->onto ?
+ oid_to_hex(&opts->onto->object.oid) : NULL);
+ add_var(&script_snippet, "onto_name", opts->onto_name);
+ add_var(&script_snippet, "revisions", opts->revisions);
+ add_var(&script_snippet, "restrict_revision", opts->restrict_revision ?
+ oid_to_hex(&opts->restrict_revision->object.oid) : NULL);
+ add_var(&script_snippet, "GIT_QUIET",
+ opts->flags & REBASE_NO_QUIET ? "" : "t");
+ add_var(&script_snippet, "git_am_opt", opts->git_am_opt.buf);
+ add_var(&script_snippet, "verbose",
+ opts->flags & REBASE_VERBOSE ? "t" : "");
+ add_var(&script_snippet, "diffstat",
+ opts->flags & REBASE_DIFFSTAT ? "t" : "");
+ add_var(&script_snippet, "force_rebase",
+ opts->flags & REBASE_FORCE ? "t" : "");
+ if (opts->switch_to)
+ add_var(&script_snippet, "switch_to", opts->switch_to);
+ add_var(&script_snippet, "action", opts->action ? opts->action : "");
+ add_var(&script_snippet, "signoff", opts->signoff ? "--signoff" : "");
+ add_var(&script_snippet, "allow_rerere_autoupdate",
+ opts->allow_rerere_autoupdate < 0 ? "" :
+ opts->allow_rerere_autoupdate ?
+ "--rerere-autoupdate" : "--no-rerere-autoupdate");
+ add_var(&script_snippet, "keep_empty", opts->keep_empty ? "yes" : "");
+ add_var(&script_snippet, "autosquash", opts->autosquash ? "t" : "");
+ add_var(&script_snippet, "gpg_sign_opt", opts->gpg_sign_opt);
+ add_var(&script_snippet, "cmd", opts->cmd);
+ add_var(&script_snippet, "allow_empty_message",
+ opts->allow_empty_message ? "--allow-empty-message" : "");
+ add_var(&script_snippet, "rebase_merges",
+ opts->rebase_merges ? "t" : "");
+ add_var(&script_snippet, "rebase_cousins",
+ opts->rebase_cousins ? "t" : "");
+ add_var(&script_snippet, "strategy", opts->strategy);
+ add_var(&script_snippet, "strategy_opts", opts->strategy_opts);
+ add_var(&script_snippet, "rebase_root", opts->root ? "t" : "");
+ add_var(&script_snippet, "squash_onto",
+ opts->squash_onto ? oid_to_hex(opts->squash_onto) : "");
+ add_var(&script_snippet, "git_format_patch_opt",
+ opts->git_format_patch_opt.buf);
+
+ if (is_interactive(opts) &&
+ !(opts->flags & REBASE_INTERACTIVE_EXPLICIT)) {
+ strbuf_addstr(&script_snippet,
+ "GIT_EDITOR=:; export GIT_EDITOR; ");
+ opts->autosquash = 0;
+ }
+
+ switch (opts->type) {
+ case REBASE_AM:
+ backend = "git-rebase--am";
+ backend_func = "git_rebase__am";
+ break;
+ case REBASE_INTERACTIVE:
+ backend = "git-rebase--interactive";
+ backend_func = "git_rebase__interactive";
+ break;
+ case REBASE_MERGE:
+ backend = "git-rebase--merge";
+ backend_func = "git_rebase__merge";
+ break;
+ case REBASE_PRESERVE_MERGES:
+ backend = "git-rebase--preserve-merges";
+ backend_func = "git_rebase__preserve_merges";
+ break;
+ default:
+ BUG("Unhandled rebase type %d", opts->type);
+ break;
+ }
+
+ strbuf_addf(&script_snippet,
+ ". git-sh-setup && . git-rebase--common &&"
+ " . %s && %s", backend, backend_func);
+ argv[0] = script_snippet.buf;
+
+ status = run_command_v_opt(argv, RUN_USING_SHELL);
+ if (opts->dont_finish_rebase)
+ ; /* do nothing */
+ else if (status == 0) {
+ if (!file_exists(state_dir_path("stopped-sha", opts)))
+ finish_rebase(opts);
+ } else if (status == 2) {
+ struct strbuf dir = STRBUF_INIT;
+
+ apply_autostash(opts);
+ strbuf_addstr(&dir, opts->state_dir);
+ remove_dir_recursively(&dir, 0);
+ strbuf_release(&dir);
+ die("Nothing to do");
+ }
+
+ strbuf_release(&script_snippet);
+
+ return status ? -1 : 0;
+}
+
+#define GIT_REFLOG_ACTION_ENVIRONMENT "GIT_REFLOG_ACTION"
+
+static int reset_head(struct object_id *oid, const char *action,
+ const char *switch_to_branch, int detach_head,
+ const char *reflog_orig_head, const char *reflog_head)
+{
+ struct object_id head_oid;
+ struct tree_desc desc;
+ struct lock_file lock = LOCK_INIT;
+ struct unpack_trees_options unpack_tree_opts;
+ struct tree *tree;
+ const char *reflog_action;
+ struct strbuf msg = STRBUF_INIT;
+ size_t prefix_len;
+ struct object_id *orig = NULL, oid_orig,
+ *old_orig = NULL, oid_old_orig;
+ int ret = 0;
+
+ if (switch_to_branch && !starts_with(switch_to_branch, "refs/"))
+ BUG("Not a fully qualified branch: '%s'", switch_to_branch);
+
+ if (hold_locked_index(&lock, LOCK_REPORT_ON_ERROR) < 0)
+ return -1;
+
+ if (!oid) {
+ if (get_oid("HEAD", &head_oid)) {
+ rollback_lock_file(&lock);
+ return error(_("could not determine HEAD revision"));
+ }
+ oid = &head_oid;
+ }
+
+ memset(&unpack_tree_opts, 0, sizeof(unpack_tree_opts));
+ setup_unpack_trees_porcelain(&unpack_tree_opts, action);
+ unpack_tree_opts.head_idx = 1;
+ unpack_tree_opts.src_index = the_repository->index;
+ unpack_tree_opts.dst_index = the_repository->index;
+ unpack_tree_opts.fn = oneway_merge;
+ unpack_tree_opts.update = 1;
+ unpack_tree_opts.merge = 1;
+ if (!detach_head)
+ unpack_tree_opts.reset = 1;
+
+ if (read_index_unmerged(the_repository->index) < 0) {
+ rollback_lock_file(&lock);
+ return error(_("could not read index"));
+ }
+
+ if (!fill_tree_descriptor(&desc, oid)) {
+ error(_("failed to find tree of %s"), oid_to_hex(oid));
+ rollback_lock_file(&lock);
+ free((void *)desc.buffer);
+ return -1;
+ }
+
+ if (unpack_trees(1, &desc, &unpack_tree_opts)) {
+ rollback_lock_file(&lock);
+ free((void *)desc.buffer);
+ return -1;
+ }
+
+ tree = parse_tree_indirect(oid);
+ prime_cache_tree(the_repository->index, tree);
+
+ if (write_locked_index(the_repository->index, &lock, COMMIT_LOCK) < 0)
+ ret = error(_("could not write index"));
+ free((void *)desc.buffer);
+
+ if (ret)
+ return ret;
+
+ reflog_action = getenv(GIT_REFLOG_ACTION_ENVIRONMENT);
+ strbuf_addf(&msg, "%s: ", reflog_action ? reflog_action : "rebase");
+ prefix_len = msg.len;
+
+ if (!get_oid("ORIG_HEAD", &oid_old_orig))
+ old_orig = &oid_old_orig;
+ if (!get_oid("HEAD", &oid_orig)) {
+ orig = &oid_orig;
+ if (!reflog_orig_head) {
+ strbuf_addstr(&msg, "updating ORIG_HEAD");
+ reflog_orig_head = msg.buf;
+ }
+ update_ref(reflog_orig_head, "ORIG_HEAD", orig, old_orig, 0,
+ UPDATE_REFS_MSG_ON_ERR);
+ } else if (old_orig)
+ delete_ref(NULL, "ORIG_HEAD", old_orig, 0);
+ if (!reflog_head) {
+ strbuf_setlen(&msg, prefix_len);
+ strbuf_addstr(&msg, "updating HEAD");
+ reflog_head = msg.buf;
+ }
+ if (!switch_to_branch)
+ ret = update_ref(reflog_head, "HEAD", oid, orig, REF_NO_DEREF,
+ UPDATE_REFS_MSG_ON_ERR);
+ else {
+ ret = create_symref("HEAD", switch_to_branch, msg.buf);
+ if (!ret)
+ ret = update_ref(reflog_head, "HEAD", oid, NULL, 0,
+ UPDATE_REFS_MSG_ON_ERR);
+ }
+
+ strbuf_release(&msg);
+ return ret;
+}
+
+static int rebase_config(const char *var, const char *value, void *data)
+{
+ struct rebase_options *opts = data;
+
+ if (!strcmp(var, "rebase.stat")) {
+ if (git_config_bool(var, value))
+ opts->flags |= REBASE_DIFFSTAT;
+ else
+ opts->flags &= !REBASE_DIFFSTAT;
+ return 0;
+ }
+
+ if (!strcmp(var, "rebase.autosquash")) {
+ opts->autosquash = git_config_bool(var, value);
+ return 0;
+ }
+
+ if (!strcmp(var, "commit.gpgsign")) {
+ free(opts->gpg_sign_opt);
+ opts->gpg_sign_opt = git_config_bool(var, value) ?
+ xstrdup("-S") : NULL;
+ return 0;
+ }
+
+ if (!strcmp(var, "rebase.autostash")) {
+ opts->autostash = git_config_bool(var, value);
+ return 0;
+ }
+
+ return git_default_config(var, value, data);
+}
+
+/*
+ * Determines whether the commits in from..to are linear, i.e. contain
+ * no merge commits. This function *expects* `from` to be an ancestor of
+ * `to`.
+ */
+static int is_linear_history(struct commit *from, struct commit *to)
+{
+ while (to && to != from) {
+ parse_commit(to);
+ if (!to->parents)
+ return 1;
+ if (to->parents->next)
+ return 0;
+ to = to->parents->item;
+ }
+ return 1;
+}
+
+static int can_fast_forward(struct commit *onto, struct object_id *head_oid,
+ struct object_id *merge_base)
+{
+ struct commit *head = lookup_commit(the_repository, head_oid);
+ struct commit_list *merge_bases;
+ int res;
+
+ if (!head)
+ return 0;
+
+ merge_bases = get_merge_bases(onto, head);
+ if (merge_bases && !merge_bases->next) {
+ oidcpy(merge_base, &merge_bases->item->object.oid);
+ res = !oidcmp(merge_base, &onto->object.oid);
+ } else {
+ oidcpy(merge_base, &null_oid);
+ res = 0;
+ }
+ free_commit_list(merge_bases);
+ return res && is_linear_history(onto, head);
+}
+
+/* -i followed by -m is still -i */
+static int parse_opt_merge(const struct option *opt, const char *arg, int unset)
+{
+ struct rebase_options *opts = opt->value;
+
+ if (!is_interactive(opts))
+ opts->type = REBASE_MERGE;
+
+ return 0;
+}
+
+/* -i followed by -p is still explicitly interactive, but -p alone is not */
+static int parse_opt_interactive(const struct option *opt, const char *arg,
+ int unset)
+{
+ struct rebase_options *opts = opt->value;
+
+ opts->type = REBASE_INTERACTIVE;
+ opts->flags |= REBASE_INTERACTIVE_EXPLICIT;
+
+ return 0;
+}
+
+static void NORETURN error_on_missing_default_upstream(void)
+{
+ struct branch *current_branch = branch_get(NULL);
+
+ printf(_("%s\n"
+ "Please specify which branch you want to rebase against.\n"
+ "See git-rebase(1) for details.\n"
+ "\n"
+ " git rebase '<branch>'\n"
+ "\n"),
+ current_branch ? _("There is no tracking information for "
+ "the current branch.") :
+ _("You are not currently on a branch."));
+
+ if (current_branch) {
+ const char *remote = current_branch->remote_name;
+
+ if (!remote)
+ remote = _("<remote>");
+
+ printf(_("If you wish to set tracking information for this "
+ "branch you can do so with:\n"
+ "\n"
+ " git branch --set-upstream-to=%s/<branch> %s\n"
+ "\n"),
+ remote, current_branch->name);
+ }
+ exit(1);
+}
+
+int cmd_rebase(int argc, const char **argv, const char *prefix)
+{
+ struct rebase_options options = {
+ .type = REBASE_UNSPECIFIED,
+ .flags = REBASE_NO_QUIET,
+ .git_am_opt = STRBUF_INIT,
+ .allow_rerere_autoupdate = -1,
+ .allow_empty_message = 1,
+ .git_format_patch_opt = STRBUF_INIT,
+ };
+ const char *branch_name;
+ int ret, flags, total_argc, in_progress = 0;
+ int ok_to_skip_pre_rebase = 0;
+ struct strbuf msg = STRBUF_INIT;
+ struct strbuf revisions = STRBUF_INIT;
+ struct strbuf buf = STRBUF_INIT;
+ struct object_id merge_base;
+ enum {
+ NO_ACTION,
+ ACTION_CONTINUE,
+ ACTION_SKIP,
+ ACTION_ABORT,
+ ACTION_QUIT,
+ ACTION_EDIT_TODO,
+ ACTION_SHOW_CURRENT_PATCH,
+ } action = NO_ACTION;
+ int committer_date_is_author_date = 0;
+ int ignore_date = 0;
+ int ignore_whitespace = 0;
+ const char *gpg_sign = NULL;
+ int opt_c = -1;
+ struct string_list whitespace = STRING_LIST_INIT_NODUP;
+ struct string_list exec = STRING_LIST_INIT_NODUP;
+ const char *rebase_merges = NULL;
+ int fork_point = -1;
+ struct string_list strategy_options = STRING_LIST_INIT_NODUP;
+ struct object_id squash_onto;
+ char *squash_onto_name = NULL;
+ struct option builtin_rebase_options[] = {
+ OPT_STRING(0, "onto", &options.onto_name,
+ N_("revision"),
+ N_("rebase onto given branch instead of upstream")),
+ OPT_BOOL(0, "no-verify", &ok_to_skip_pre_rebase,
+ N_("allow pre-rebase hook to run")),
+ OPT_NEGBIT('q', "quiet", &options.flags,
+ N_("be quiet. implies --no-stat"),
+ REBASE_NO_QUIET| REBASE_VERBOSE | REBASE_DIFFSTAT),
+ OPT_BIT('v', "verbose", &options.flags,
+ N_("display a diffstat of what changed upstream"),
+ REBASE_NO_QUIET | REBASE_VERBOSE | REBASE_DIFFSTAT),
+ {OPTION_NEGBIT, 'n', "no-stat", &options.flags, NULL,
+ N_("do not show diffstat of what changed upstream"),
+ PARSE_OPT_NOARG, NULL, REBASE_DIFFSTAT },
+ OPT_BOOL(0, "ignore-whitespace", &ignore_whitespace,
+ N_("passed to 'git apply'")),
+ OPT_BOOL(0, "signoff", &options.signoff,
+ N_("add a Signed-off-by: line to each commit")),
+ OPT_BOOL(0, "committer-date-is-author-date",
+ &committer_date_is_author_date,
+ N_("passed to 'git am'")),
+ OPT_BOOL(0, "ignore-date", &ignore_date,
+ N_("passed to 'git am'")),
+ OPT_BIT('f', "force-rebase", &options.flags,
+ N_("cherry-pick all commits, even if unchanged"),
+ REBASE_FORCE),
+ OPT_BIT(0, "no-ff", &options.flags,
+ N_("cherry-pick all commits, even if unchanged"),
+ REBASE_FORCE),
+ OPT_CMDMODE(0, "continue", &action, N_("continue"),
+ ACTION_CONTINUE),
+ OPT_CMDMODE(0, "skip", &action,
+ N_("skip current patch and continue"), ACTION_SKIP),
+ OPT_CMDMODE(0, "abort", &action,
+ N_("abort and check out the original branch"),
+ ACTION_ABORT),
+ OPT_CMDMODE(0, "quit", &action,
+ N_("abort but keep HEAD where it is"), ACTION_QUIT),
+ OPT_CMDMODE(0, "edit-todo", &action, N_("edit the todo list "
+ "during an interactive rebase"), ACTION_EDIT_TODO),
+ OPT_CMDMODE(0, "show-current-patch", &action,
+ N_("show the patch file being applied or merged"),
+ ACTION_SHOW_CURRENT_PATCH),
+ { OPTION_CALLBACK, 'm', "merge", &options, NULL,
+ N_("use merging strategies to rebase"),
+ PARSE_OPT_NOARG | PARSE_OPT_NONEG,
+ parse_opt_merge },
+ { OPTION_CALLBACK, 'i', "interactive", &options, NULL,
+ N_("let the user edit the list of commits to rebase"),
+ PARSE_OPT_NOARG | PARSE_OPT_NONEG,
+ parse_opt_interactive },
+ OPT_SET_INT('p', "preserve-merges", &options.type,
+ N_("try to recreate merges instead of ignoring "
+ "them"), REBASE_PRESERVE_MERGES),
+ OPT_BOOL(0, "rerere-autoupdate",
+ &options.allow_rerere_autoupdate,
+ N_("allow rerere to update index with resolved "
+ "conflict")),
+ OPT_BOOL('k', "keep-empty", &options.keep_empty,
+ N_("preserve empty commits during rebase")),
+ OPT_BOOL(0, "autosquash", &options.autosquash,
+ N_("move commits that begin with "
+ "squash!/fixup! under -i")),
+ { OPTION_STRING, 'S', "gpg-sign", &gpg_sign, N_("key-id"),
+ N_("GPG-sign commits"),
+ PARSE_OPT_OPTARG, NULL, (intptr_t) "" },
+ OPT_STRING_LIST(0, "whitespace", &whitespace,
+ N_("whitespace"), N_("passed to 'git apply'")),
+ OPT_SET_INT('C', NULL, &opt_c, N_("passed to 'git apply'"),
+ REBASE_AM),
+ OPT_BOOL(0, "autostash", &options.autostash,
+ N_("automatically stash/stash pop before and after")),
+ OPT_STRING_LIST('x', "exec", &exec, N_("exec"),
+ N_("add exec lines after each commit of the "
+ "editable list")),
+ OPT_BOOL(0, "allow-empty-message",
+ &options.allow_empty_message,
+ N_("allow rebasing commits with empty messages")),
+ {OPTION_STRING, 'r', "rebase-merges", &rebase_merges,
+ N_("mode"),
+ N_("try to rebase merges instead of skipping them"),
+ PARSE_OPT_OPTARG, NULL, (intptr_t)""},
+ OPT_BOOL(0, "fork-point", &fork_point,
+ N_("use 'merge-base --fork-point' to refine upstream")),
+ OPT_STRING('s', "strategy", &options.strategy,
+ N_("strategy"), N_("use the given merge strategy")),
+ OPT_STRING_LIST('X', "strategy-option", &strategy_options,
+ N_("option"),
+ N_("pass the argument through to the merge "
+ "strategy")),
+ OPT_BOOL(0, "root", &options.root,
+ N_("rebase all reachable commits up to the root(s)")),
+ OPT_END(),
+ };
+
+ /*
+ * NEEDSWORK: Once the builtin rebase has been tested enough
+ * and git-legacy-rebase.sh is retired to contrib/, this preamble
+ * can be removed.
+ */
+
+ if (!use_builtin_rebase()) {
+ const char *path = mkpath("%s/git-legacy-rebase",
+ git_exec_path());
+
+ if (sane_execvp(path, (char **)argv) < 0)
+ die_errno(_("could not exec %s"), path);
+ else
+ BUG("sane_execvp() returned???");
+ }
+
+ if (argc == 2 && !strcmp(argv[1], "-h"))
+ usage_with_options(builtin_rebase_usage,
+ builtin_rebase_options);
+
+ prefix = setup_git_directory();
+ trace_repo_setup(prefix);
+ setup_work_tree();
+
+ git_config(rebase_config, &options);
+
+ strbuf_reset(&buf);
+ strbuf_addf(&buf, "%s/applying", apply_dir());
+ if(file_exists(buf.buf))
+ die(_("It looks like 'git am' is in progress. Cannot rebase."));
+
+ if (is_directory(apply_dir())) {
+ options.type = REBASE_AM;
+ options.state_dir = apply_dir();
+ } else if (is_directory(merge_dir())) {
+ strbuf_reset(&buf);
+ strbuf_addf(&buf, "%s/rewritten", merge_dir());
+ if (is_directory(buf.buf)) {
+ options.type = REBASE_PRESERVE_MERGES;
+ options.flags |= REBASE_INTERACTIVE_EXPLICIT;
+ } else {
+ strbuf_reset(&buf);
+ strbuf_addf(&buf, "%s/interactive", merge_dir());
+ if(file_exists(buf.buf)) {
+ options.type = REBASE_INTERACTIVE;
+ options.flags |= REBASE_INTERACTIVE_EXPLICIT;
+ } else
+ options.type = REBASE_MERGE;
+ }
+ options.state_dir = merge_dir();
+ }
+
+ if (options.type != REBASE_UNSPECIFIED)
+ in_progress = 1;
+
+ total_argc = argc;
+ argc = parse_options(argc, argv, prefix,
+ builtin_rebase_options,
+ builtin_rebase_usage, 0);
+
+ if (action != NO_ACTION && total_argc != 2) {
+ usage_with_options(builtin_rebase_usage,
+ builtin_rebase_options);
+ }
+
+ if (argc > 2)
+ usage_with_options(builtin_rebase_usage,
+ builtin_rebase_options);
+
+ if (action != NO_ACTION && !in_progress)
+ die(_("No rebase in progress?"));
+
+ if (action == ACTION_EDIT_TODO && !is_interactive(&options))
+ die(_("The --edit-todo action can only be used during "
+ "interactive rebase."));
+
+ switch (action) {
+ case ACTION_CONTINUE: {
+ struct object_id head;
+ struct lock_file lock_file = LOCK_INIT;
+ int fd;
+
+ options.action = "continue";
+
+ /* Sanity check */
+ if (get_oid("HEAD", &head))
+ die(_("Cannot read HEAD"));
+
+ fd = hold_locked_index(&lock_file, 0);
+ if (read_index(the_repository->index) < 0)
+ die(_("could not read index"));
+ refresh_index(the_repository->index, REFRESH_QUIET, NULL, NULL,
+ NULL);
+ if (0 <= fd)
+ update_index_if_able(the_repository->index,
+ &lock_file);
+ rollback_lock_file(&lock_file);
+
+ if (has_unstaged_changes(1)) {
+ puts(_("You must edit all merge conflicts and then\n"
+ "mark them as resolved using git add"));
+ exit(1);
+ }
+ if (read_basic_state(&options))
+ exit(1);
+ goto run_rebase;
+ }
+ case ACTION_SKIP: {
+ struct string_list merge_rr = STRING_LIST_INIT_DUP;
+
+ options.action = "skip";
+
+ rerere_clear(&merge_rr);
+ string_list_clear(&merge_rr, 1);
+
+ if (reset_head(NULL, "reset", NULL, 0, NULL, NULL) < 0)
+ die(_("could not discard worktree changes"));
+ if (read_basic_state(&options))
+ exit(1);
+ goto run_rebase;
+ }
+ case ACTION_ABORT: {
+ struct string_list merge_rr = STRING_LIST_INIT_DUP;
+ options.action = "abort";
+
+ rerere_clear(&merge_rr);
+ string_list_clear(&merge_rr, 1);
+
+ if (read_basic_state(&options))
+ exit(1);
+ if (reset_head(&options.orig_head, "reset",
+ options.head_name, 0, NULL, NULL) < 0)
+ die(_("could not move back to %s"),
+ oid_to_hex(&options.orig_head));
+ ret = finish_rebase(&options);
+ goto cleanup;
+ }
+ case ACTION_QUIT: {
+ strbuf_reset(&buf);
+ strbuf_addstr(&buf, options.state_dir);
+ ret = !!remove_dir_recursively(&buf, 0);
+ if (ret)
+ die(_("could not remove '%s'"), options.state_dir);
+ goto cleanup;
+ }
+ case ACTION_EDIT_TODO:
+ options.action = "edit-todo";
+ options.dont_finish_rebase = 1;
+ goto run_rebase;
+ case ACTION_SHOW_CURRENT_PATCH:
+ options.action = "show-current-patch";
+ options.dont_finish_rebase = 1;
+ goto run_rebase;
+ case NO_ACTION:
+ break;
+ default:
+ BUG("action: %d", action);
+ }
+
+ /* Make sure no rebase is in progress */
+ if (in_progress) {
+ const char *last_slash = strrchr(options.state_dir, '/');
+ const char *state_dir_base =
+ last_slash ? last_slash + 1 : options.state_dir;
+ const char *cmd_live_rebase =
+ "git rebase (--continue | --abort | --skip)";
+ strbuf_reset(&buf);
+ strbuf_addf(&buf, "rm -fr \"%s\"", options.state_dir);
+ die(_("It seems that there is already a %s directory, and\n"
+ "I wonder if you are in the middle of another rebase. "
+ "If that is the\n"
+ "case, please try\n\t%s\n"
+ "If that is not the case, please\n\t%s\n"
+ "and run me again. I am stopping in case you still "
+ "have something\n"
+ "valuable there.\n"),
+ state_dir_base, cmd_live_rebase, buf.buf);
+ }
+
+ if (!(options.flags & REBASE_NO_QUIET))
+ strbuf_addstr(&options.git_am_opt, " -q");
+
+ if (committer_date_is_author_date) {
+ strbuf_addstr(&options.git_am_opt,
+ " --committer-date-is-author-date");
+ options.flags |= REBASE_FORCE;
+ }
+
+ if (ignore_whitespace)
+ strbuf_addstr(&options.git_am_opt, " --ignore-whitespace");
+
+ if (ignore_date) {
+ strbuf_addstr(&options.git_am_opt, " --ignore-date");
+ options.flags |= REBASE_FORCE;
+ }
+
+ if (options.keep_empty)
+ imply_interactive(&options, "--keep-empty");
+
+ if (gpg_sign) {
+ free(options.gpg_sign_opt);
+ options.gpg_sign_opt = xstrfmt("-S%s", gpg_sign);
+ }
+
+ if (opt_c >= 0)
+ strbuf_addf(&options.git_am_opt, " -C%d", opt_c);
+
+ if (whitespace.nr) {
+ int i;
+
+ for (i = 0; i < whitespace.nr; i++) {
+ const char *item = whitespace.items[i].string;
+
+ strbuf_addf(&options.git_am_opt, " --whitespace=%s",
+ item);
+
+ if ((!strcmp(item, "fix")) || (!strcmp(item, "strip")))
+ options.flags |= REBASE_FORCE;
+ }
+ }
+
+ if (exec.nr) {
+ int i;
+
+ imply_interactive(&options, "--exec");
+
+ strbuf_reset(&buf);
+ for (i = 0; i < exec.nr; i++)
+ strbuf_addf(&buf, "exec %s\n", exec.items[i].string);
+ options.cmd = xstrdup(buf.buf);
+ }
+
+ if (rebase_merges) {
+ if (!*rebase_merges)
+ ; /* default mode; do nothing */
+ else if (!strcmp("rebase-cousins", rebase_merges))
+ options.rebase_cousins = 1;
+ else if (strcmp("no-rebase-cousins", rebase_merges))
+ die(_("Unknown mode: %s"), rebase_merges);
+ options.rebase_merges = 1;
+ imply_interactive(&options, "--rebase-merges");
+ }
+
+ if (strategy_options.nr) {
+ int i;
+
+ if (!options.strategy)
+ options.strategy = "recursive";
+
+ strbuf_reset(&buf);
+ for (i = 0; i < strategy_options.nr; i++)
+ strbuf_addf(&buf, " --%s",
+ strategy_options.items[i].string);
+ options.strategy_opts = xstrdup(buf.buf);
+ }
+
+ if (options.strategy) {
+ options.strategy = xstrdup(options.strategy);
+ switch (options.type) {
+ case REBASE_AM:
+ die(_("--strategy requires --merge or --interactive"));
+ case REBASE_MERGE:
+ case REBASE_INTERACTIVE:
+ case REBASE_PRESERVE_MERGES:
+ /* compatible */
+ break;
+ case REBASE_UNSPECIFIED:
+ options.type = REBASE_MERGE;
+ break;
+ default:
+ BUG("unhandled rebase type (%d)", options.type);
+ }
+ }
+
+ if (options.root && !options.onto_name)
+ imply_interactive(&options, "--root without --onto");
+
+ if (isatty(2) && options.flags & REBASE_NO_QUIET)
+ strbuf_addstr(&options.git_format_patch_opt, " --progress");
+
+ switch (options.type) {
+ case REBASE_MERGE:
+ case REBASE_INTERACTIVE:
+ case REBASE_PRESERVE_MERGES:
+ options.state_dir = merge_dir();
+ break;
+ case REBASE_AM:
+ options.state_dir = apply_dir();
+ break;
+ default:
+ /* the default rebase backend is `--am` */
+ options.type = REBASE_AM;
+ options.state_dir = apply_dir();
+ break;
+ }
+
+ if (options.git_am_opt.len) {
+ const char *p;
+
+ /* all am options except -q are compatible only with --am */
+ strbuf_reset(&buf);
+ strbuf_addbuf(&buf, &options.git_am_opt);
+ strbuf_addch(&buf, ' ');
+ while ((p = strstr(buf.buf, " -q ")))
+ strbuf_splice(&buf, p - buf.buf, 4, " ", 1);
+ strbuf_trim(&buf);
+
+ if (is_interactive(&options) && buf.len)
+ die(_("error: cannot combine interactive options "
+ "(--interactive, --exec, --rebase-merges, "
+ "--preserve-merges, --keep-empty, --root + "
+ "--onto) with am options (%s)"), buf.buf);
+ if (options.type == REBASE_MERGE && buf.len)
+ die(_("error: cannot combine merge options (--merge, "
+ "--strategy, --strategy-option) with am options "
+ "(%s)"), buf.buf);
+ }
+
+ if (options.signoff) {
+ if (options.type == REBASE_PRESERVE_MERGES)
+ die("cannot combine '--signoff' with "
+ "'--preserve-merges'");
+ strbuf_addstr(&options.git_am_opt, " --signoff");
+ options.flags |= REBASE_FORCE;
+ }
+
+ if (options.type == REBASE_PRESERVE_MERGES)
+ /*
+ * Note: incompatibility with --signoff handled in signoff block above
+ * Note: incompatibility with --interactive is just a strong warning;
+ * git-rebase.txt caveats with "unless you know what you are doing"
+ */
+ if (options.rebase_merges)
+ die(_("error: cannot combine '--preserve_merges' with "
+ "'--rebase-merges'"));
+
+ if (options.rebase_merges) {
+ if (strategy_options.nr)
+ die(_("error: cannot combine '--rebase_merges' with "
+ "'--strategy-option'"));
+ if (options.strategy)
+ die(_("error: cannot combine '--rebase_merges' with "
+ "'--strategy'"));
+ }
+
+ if (!options.root) {
+ if (argc < 1) {
+ struct branch *branch;
+
+ branch = branch_get(NULL);
+ options.upstream_name = branch_get_upstream(branch,
+ NULL);
+ if (!options.upstream_name)
+ error_on_missing_default_upstream();
+ if (fork_point < 0)
+ fork_point = 1;
+ } else {
+ options.upstream_name = argv[0];
+ argc--;
+ argv++;
+ if (!strcmp(options.upstream_name, "-"))
+ options.upstream_name = "@{-1}";
+ }
+ options.upstream = peel_committish(options.upstream_name);
+ if (!options.upstream)
+ die(_("invalid upstream '%s'"), options.upstream_name);
+ options.upstream_arg = options.upstream_name;
+ } else {
+ if (!options.onto_name) {
+ if (commit_tree("", 0, the_hash_algo->empty_tree, NULL,
+ &squash_onto, NULL, NULL) < 0)
+ die(_("Could not create new root commit"));
+ options.squash_onto = &squash_onto;
+ options.onto_name = squash_onto_name =
+ xstrdup(oid_to_hex(&squash_onto));
+ }
+ options.upstream_name = NULL;
+ options.upstream = NULL;
+ if (argc > 1)
+ usage_with_options(builtin_rebase_usage,
+ builtin_rebase_options);
+ options.upstream_arg = "--root";
+ }
+
+ /* Make sure the branch to rebase onto is valid. */
+ if (!options.onto_name)
+ options.onto_name = options.upstream_name;
+ if (strstr(options.onto_name, "...")) {
+ if (get_oid_mb(options.onto_name, &merge_base) < 0)
+ die(_("'%s': need exactly one merge base"),
+ options.onto_name);
+ options.onto = lookup_commit_or_die(&merge_base,
+ options.onto_name);
+ } else {
+ options.onto = peel_committish(options.onto_name);
+ if (!options.onto)
+ die(_("Does not point to a valid commit '%s'"),
+ options.onto_name);
+ }
+
+ /*
+ * If the branch to rebase is given, that is the branch we will rebase
+ * branch_name -- branch/commit being rebased, or
+ * HEAD (already detached)
+ * orig_head -- commit object name of tip of the branch before rebasing
+ * head_name -- refs/heads/<that-branch> or NULL (detached HEAD)
+ */
+ if (argc == 1) {
+ /* Is it "rebase other branchname" or "rebase other commit"? */
+ branch_name = argv[0];
+ options.switch_to = argv[0];
+
+ /* Is it a local branch? */
+ strbuf_reset(&buf);
+ strbuf_addf(&buf, "refs/heads/%s", branch_name);
+ if (!read_ref(buf.buf, &options.orig_head))
+ options.head_name = xstrdup(buf.buf);
+ /* If not is it a valid ref (branch or commit)? */
+ else if (!get_oid(branch_name, &options.orig_head))
+ options.head_name = NULL;
+ else
+ die(_("fatal: no such branch/commit '%s'"),
+ branch_name);
+ } else if (argc == 0) {
+ /* Do not need to switch branches, we are already on it. */
+ options.head_name =
+ xstrdup_or_null(resolve_ref_unsafe("HEAD", 0, NULL,
+ &flags));
+ if (!options.head_name)
+ die(_("No such ref: %s"), "HEAD");
+ if (flags & REF_ISSYMREF) {
+ if (!skip_prefix(options.head_name,
+ "refs/heads/", &branch_name))
+ branch_name = options.head_name;
+
+ } else {
+ free(options.head_name);
+ options.head_name = NULL;
+ branch_name = "HEAD";
+ }
+ if (get_oid("HEAD", &options.orig_head))
+ die(_("Could not resolve HEAD to a revision"));
+ } else
+ BUG("unexpected number of arguments left to parse");
+
+ if (fork_point > 0) {
+ struct commit *head =
+ lookup_commit_reference(the_repository,
+ &options.orig_head);
+ options.restrict_revision =
+ get_fork_point(options.upstream_name, head);
+ }
+
+ if (read_index(the_repository->index) < 0)
+ die(_("could not read index"));
+
+ if (options.autostash) {
+ struct lock_file lock_file = LOCK_INIT;
+ int fd;
+
+ fd = hold_locked_index(&lock_file, 0);
+ refresh_cache(REFRESH_QUIET);
+ if (0 <= fd)
+ update_index_if_able(&the_index, &lock_file);
+ rollback_lock_file(&lock_file);
+
+ if (has_unstaged_changes(0) || has_uncommitted_changes(0)) {
+ const char *autostash =
+ state_dir_path("autostash", &options);
+ struct child_process stash = CHILD_PROCESS_INIT;
+ struct object_id oid;
+ struct commit *head =
+ lookup_commit_reference(the_repository,
+ &options.orig_head);
+
+ argv_array_pushl(&stash.args,
+ "stash", "create", "autostash", NULL);
+ stash.git_cmd = 1;
+ stash.no_stdin = 1;
+ strbuf_reset(&buf);
+ if (capture_command(&stash, &buf, GIT_MAX_HEXSZ))
+ die(_("Cannot autostash"));
+ strbuf_trim_trailing_newline(&buf);
+ if (get_oid(buf.buf, &oid))
+ die(_("Unexpected stash response: '%s'"),
+ buf.buf);
+ strbuf_reset(&buf);
+ strbuf_add_unique_abbrev(&buf, &oid, DEFAULT_ABBREV);
+
+ if (safe_create_leading_directories_const(autostash))
+ die(_("Could not create directory for '%s'"),
+ options.state_dir);
+ write_file(autostash, "%s", buf.buf);
+ printf(_("Created autostash: %s\n"), buf.buf);
+ if (reset_head(&head->object.oid, "reset --hard",
+ NULL, 0, NULL, NULL) < 0)
+ die(_("could not reset --hard"));
+ printf(_("HEAD is now at %s"),
+ find_unique_abbrev(&head->object.oid,
+ DEFAULT_ABBREV));
+ strbuf_reset(&buf);
+ pp_commit_easy(CMIT_FMT_ONELINE, head, &buf);
+ if (buf.len > 0)
+ printf(" %s", buf.buf);
+ putchar('\n');
+
+ if (discard_index(the_repository->index) < 0 ||
+ read_index(the_repository->index) < 0)
+ die(_("could not read index"));
+ }
+ }
+
+ if (require_clean_work_tree("rebase",
+ _("Please commit or stash them."), 1, 1)) {
+ ret = 1;
+ goto cleanup;
+ }
+
+ /*
+ * Now we are rebasing commits upstream..orig_head (or with --root,
+ * everything leading up to orig_head) on top of onto.
+ */
+
+ /*
+ * Check if we are already based on onto with linear history,
+ * but this should be done only when upstream and onto are the same
+ * and if this is not an interactive rebase.
+ */
+ if (can_fast_forward(options.onto, &options.orig_head, &merge_base) &&
+ !is_interactive(&options) && !options.restrict_revision &&
+ options.upstream &&
+ !oidcmp(&options.upstream->object.oid, &options.onto->object.oid)) {
+ int flag;
+
+ if (!(options.flags & REBASE_FORCE)) {
+ /* Lazily switch to the target branch if needed... */
+ if (options.switch_to) {
+ struct object_id oid;
+
+ if (get_oid(options.switch_to, &oid) < 0) {
+ ret = !!error(_("could not parse '%s'"),
+ options.switch_to);
+ goto cleanup;
+ }
+
+ strbuf_reset(&buf);
+ strbuf_addf(&buf, "rebase: checkout %s",
+ options.switch_to);
+ if (reset_head(&oid, "checkout",
+ options.head_name, 0,
+ NULL, NULL) < 0) {
+ ret = !!error(_("could not switch to "
+ "%s"),
+ options.switch_to);
+ goto cleanup;
+ }
+ }
+
+ if (!(options.flags & REBASE_NO_QUIET))
+ ; /* be quiet */
+ else if (!strcmp(branch_name, "HEAD") &&
+ resolve_ref_unsafe("HEAD", 0, NULL, &flag))
+ puts(_("HEAD is up to date."));
+ else
+ printf(_("Current branch %s is up to date.\n"),
+ branch_name);
+ ret = !!finish_rebase(&options);
+ goto cleanup;
+ } else if (!(options.flags & REBASE_NO_QUIET))
+ ; /* be quiet */
+ else if (!strcmp(branch_name, "HEAD") &&
+ resolve_ref_unsafe("HEAD", 0, NULL, &flag))
+ puts(_("HEAD is up to date, rebase forced."));
+ else
+ printf(_("Current branch %s is up to date, rebase "
+ "forced.\n"), branch_name);
+ }
+
+ /* If a hook exists, give it a chance to interrupt*/
+ if (!ok_to_skip_pre_rebase &&
+ run_hook_le(NULL, "pre-rebase", options.upstream_arg,
+ argc ? argv[0] : NULL, NULL))
+ die(_("The pre-rebase hook refused to rebase."));
+
+ if (options.flags & REBASE_DIFFSTAT) {
+ struct diff_options opts;
+
+ if (options.flags & REBASE_VERBOSE)
+ printf(_("Changes from %s to %s:\n"),
+ oid_to_hex(&merge_base),
+ oid_to_hex(&options.onto->object.oid));
+
+ /* We want color (if set), but no pager */
+ diff_setup(&opts);
+ opts.stat_width = -1; /* use full terminal width */
+ opts.stat_graph_width = -1; /* respect statGraphWidth config */
+ opts.output_format |=
+ DIFF_FORMAT_SUMMARY | DIFF_FORMAT_DIFFSTAT;
+ opts.detect_rename = DIFF_DETECT_RENAME;
+ diff_setup_done(&opts);
+ diff_tree_oid(&merge_base, &options.onto->object.oid,
+ "", &opts);
+ diffcore_std(&opts);
+ diff_flush(&opts);
+ }
+
+ if (is_interactive(&options))
+ goto run_rebase;
+
+ /* Detach HEAD and reset the tree */
+ if (options.flags & REBASE_NO_QUIET)
+ printf(_("First, rewinding head to replay your work on top of "
+ "it...\n"));
+
+ strbuf_addf(&msg, "rebase: checkout %s", options.onto_name);
+ if (reset_head(&options.onto->object.oid, "checkout", NULL, 1,
+ NULL, msg.buf))
+ die(_("Could not detach HEAD"));
+ strbuf_release(&msg);
+
+ /*
+ * If the onto is a proper descendant of the tip of the branch, then
+ * we just fast-forwarded.
+ */
+ strbuf_reset(&msg);
+ if (!oidcmp(&merge_base, &options.orig_head)) {
+ printf(_("Fast-forwarded %s to %s. \n"),
+ branch_name, options.onto_name);
+ strbuf_addf(&msg, "rebase finished: %s onto %s",
+ options.head_name ? options.head_name : "detached HEAD",
+ oid_to_hex(&options.onto->object.oid));
+ reset_head(NULL, "Fast-forwarded", options.head_name, 0,
+ "HEAD", msg.buf);
+ strbuf_release(&msg);
+ ret = !!finish_rebase(&options);
+ goto cleanup;
+ }
+
+ strbuf_addf(&revisions, "%s..%s",
+ options.root ? oid_to_hex(&options.onto->object.oid) :
+ (options.restrict_revision ?
+ oid_to_hex(&options.restrict_revision->object.oid) :
+ oid_to_hex(&options.upstream->object.oid)),
+ oid_to_hex(&options.orig_head));
+
+ options.revisions = revisions.buf;
+
+run_rebase:
+ ret = !!run_specific_rebase(&options);
+
+cleanup:
+ strbuf_release(&revisions);
+ free(options.head_name);
+ free(options.gpg_sign_opt);
+ free(options.cmd);
+ free(squash_onto_name);
+ return ret;
+}
return;
if (!already_done) {
- struct strbuf gpg_output = STRBUF_INIT;
- struct strbuf gpg_status = STRBUF_INIT;
int bogs /* beginning_of_gpg_sig */;
already_done = 1;
oidclr(&push_cert_oid);
memset(&sigcheck, '\0', sizeof(sigcheck));
- sigcheck.result = 'N';
bogs = parse_signature(push_cert.buf, push_cert.len);
- if (verify_signed_buffer(push_cert.buf, bogs,
- push_cert.buf + bogs, push_cert.len - bogs,
- &gpg_output, &gpg_status) < 0) {
- ; /* error running gpg */
- } else {
- sigcheck.payload = push_cert.buf;
- sigcheck.gpg_output = gpg_output.buf;
- sigcheck.gpg_status = gpg_status.buf;
- parse_gpg_output(&sigcheck);
- }
+ check_signature(push_cert.buf, bogs, push_cert.buf + bogs,
+ push_cert.len - bogs, &sigcheck);
- strbuf_release(&gpg_output);
- strbuf_release(&gpg_status);
nonce_status = check_nonce(push_cert.buf, bogs);
}
if (!is_null_oid(&push_cert_oid)) {
struct object *old_object, *new_object;
struct commit *old_commit, *new_commit;
- old_object = parse_object(old_oid);
- new_object = parse_object(new_oid);
+ old_object = parse_object(the_repository, old_oid);
+ new_object = parse_object(the_repository, new_oid);
if (!old_object || !new_object ||
old_object->type != OBJ_COMMIT ||
if (is_null_oid(new_oid)) {
struct strbuf err = STRBUF_INIT;
- if (!parse_object(old_oid)) {
+ if (!parse_object(the_repository, old_oid)) {
old_oid = NULL;
if (ref_exists(name)) {
rp_warning("Allowing deletion of corrupt ref.");
#include "config.h"
#include "lockfile.h"
#include "object-store.h"
+#include "repository.h"
#include "commit.h"
#include "refs.h"
#include "dir.h"
int complete;
struct tree *tree;
- tree = lookup_tree(oid);
+ tree = lookup_tree(the_repository, oid);
if (!tree)
return 0;
if (tree->object.flags & SEEN)
struct commit_list *parent;
c = (struct commit *)object_array_pop(&study);
- if (!c->object.parsed && !parse_object(&c->object.oid))
+ if (!c->object.parsed && !parse_object(the_repository, &c->object.oid))
c->object.flags |= INCOMPLETE;
if (c->object.flags & INCOMPLETE) {
if (is_null_oid(oid))
return 1;
- commit = lookup_commit_reference_gently(oid, 1);
+ commit = lookup_commit_reference_gently(the_repository, oid, 1);
if (!commit)
return 0;
if (is_null_oid(oid))
return 0;
- commit = lookup_commit_reference_gently(oid, 1);
+ commit = lookup_commit_reference_gently(the_repository, oid,
+ 1);
/* Not a commit -- keep it */
if (!commit)
struct commit *tip_commit;
if (flags & REF_ISSYMREF)
return 0;
- tip_commit = lookup_commit_reference_gently(oid, 1);
+ tip_commit = lookup_commit_reference_gently(the_repository, oid, 1);
if (!tip_commit)
return 0;
commit_list_insert(tip_commit, list);
cb->tip_commit = NULL;
cb->unreachable_expire_kind = UE_HEAD;
} else {
- cb->tip_commit = lookup_commit_reference_gently(oid, 1);
+ cb->tip_commit = lookup_commit_reference_gently(the_repository,
+ oid, 1);
if (!cb->tip_commit)
cb->unreachable_expire_kind = UE_ALWAYS;
else
return error(_("Not a valid object name: '%s'"),
argv[i]);
}
- if (!lookup_commit_reference(&oid)) {
+ if (!lookup_commit_reference(the_repository, &oid)) {
strbuf_release(&new_parents);
return error(_("could not parse %s"), argv[i]);
}
int i;
hash_object_file(extra->value, extra->len, type_name(OBJ_TAG), &tag_oid);
- tag = lookup_tag(&tag_oid);
+ tag = lookup_tag(the_repository, &tag_oid);
if (!tag)
return error(_("bad mergetag in commit '%s'"), ref);
- if (parse_tag_buffer(tag, extra->value, extra->len))
+ if (parse_tag_buffer(the_repository, tag, extra->value, extra->len))
return error(_("malformed mergetag in commit '%s'"), ref);
/* iterate over new parents */
if (get_oid(old_ref, &old_oid) < 0)
return error(_("Not a valid object name: '%s'"), old_ref);
- commit = lookup_commit_reference(&old_oid);
+ commit = lookup_commit_reference(the_repository, &old_oid);
if (!commit)
return error(_("could not parse %s"), old_ref);
continue;
}
- ce = make_cache_entry(one->mode, one->oid.hash, one->path,
+ ce = make_cache_entry(&the_index, one->mode, &one->oid, one->path,
0, 0);
if (!ce)
die(_("make_cache_entry failed for path '%s'"),
struct commit *commit;
if (get_oid_committish(rev, &oid))
die(_("Failed to resolve '%s' as a valid revision."), rev);
- commit = lookup_commit_reference(&oid);
+ commit = lookup_commit_reference(the_repository, &oid);
if (!commit)
die(_("Could not parse object '%s'."), rev);
oidcpy(&oid, &commit->object.oid);
update_ref_status = reset_refs(rev, &oid);
if (reset_type == HARD && !update_ref_status && !quiet)
- print_new_head_line(lookup_commit_reference(&oid));
+ print_new_head_line(lookup_commit_reference(the_repository, &oid));
}
if (!pathspec.nr)
remove_branch_state();
return 1;
}
if (info->revs->verify_objects && !obj->parsed && obj->type != OBJ_COMMIT)
- parse_object(&obj->oid);
+ parse_object(the_repository, &obj->oid);
return 0;
}
if (symmetric) {
struct commit_list *exclude;
struct commit *a, *b;
- a = lookup_commit_reference(&start_oid);
- b = lookup_commit_reference(&end_oid);
+ a = lookup_commit_reference(the_repository, &start_oid);
+ b = lookup_commit_reference(the_repository, &end_oid);
if (!a || !b) {
*dotdot = '.';
return 0;
*dotdot = 0;
if (get_oid_committish(arg, &oid) ||
- !(commit = lookup_commit_reference(&oid))) {
+ !(commit = lookup_commit_reference(the_repository, &oid))) {
*dotdot = '^';
return 0;
}
static int append_ref(const char *refname, const struct object_id *oid,
int allow_dups)
{
- struct commit *commit = lookup_commit_reference_gently(oid, 1);
+ struct commit *commit = lookup_commit_reference_gently(the_repository,
+ oid, 1);
int i;
if (!commit)
MAX_REVS), MAX_REVS);
if (get_oid(ref_name[num_rev], &revkey))
die(_("'%s' is not a valid ref."), ref_name[num_rev]);
- commit = lookup_commit_reference(&revkey);
+ commit = lookup_commit_reference(the_repository, &revkey);
if (!commit)
die(_("cannot find commit %s (%s)"),
ref_name[num_rev], oid_to_hex(&revkey));
}
free(buf);
- if ((c = lookup_commit_reference(oid)) != NULL)
+ if ((c = lookup_commit_reference(the_repository, oid)) != NULL)
strbuf_addf(sb, ", %s", show_date(c->date, 0, DATE_MODE(SHORT)));
break;
case OBJ_TREE:
added_object(nr, type, buf, size);
free(buf);
- blob = lookup_blob(&obj_list[nr].oid);
+ blob = lookup_blob(the_repository, &obj_list[nr].oid);
if (blob)
blob->object.flags |= FLAG_WRITTEN;
else
int eaten;
hash_object_file(buf, size, type_name(type), &obj_list[nr].oid);
added_object(nr, type, buf, size);
- obj = parse_object_buffer(&obj_list[nr].oid, type, size, buf,
+ obj = parse_object_buffer(the_repository, &obj_list[nr].oid,
+ type, size, buf,
&eaten);
if (!obj)
die("invalid %s", type_name(type));
{
struct object *obj;
struct obj_buffer *obj_buffer;
- obj = lookup_object(base->hash);
+ obj = lookup_object(the_repository, base->hash);
if (!obj)
return 0;
obj_buffer = lookup_object_buffer(obj);
static int add_one_path(const struct cache_entry *old, const char *path, int len, struct stat *st)
{
- int option, size;
+ int option;
struct cache_entry *ce;
/* Was the old index entry already up-to-date? */
if (old && !ce_stage(old) && !ce_match_stat(old, st, 0))
return 0;
- size = cache_entry_size(len);
- ce = xcalloc(1, size);
+ ce = make_empty_cache_entry(&the_index, len);
memcpy(ce->name, path, len);
ce->ce_flags = create_ce_flags(0);
ce->ce_namelen = len;
if (index_path(&ce->oid, path, st,
info_only ? 0 : HASH_WRITE_OBJECT)) {
- free(ce);
+ discard_cache_entry(ce);
return -1;
}
option = allow_add ? ADD_CACHE_OK_TO_ADD : 0;
option |= allow_replace ? ADD_CACHE_OK_TO_REPLACE : 0;
if (add_cache_entry(ce, option)) {
- free(ce);
+ discard_cache_entry(ce);
return error("%s: cannot add to the index - missing --add option?", path);
}
return 0;
static int add_cacheinfo(unsigned int mode, const struct object_id *oid,
const char *path, int stage)
{
- int size, len, option;
+ int len, option;
struct cache_entry *ce;
if (!verify_path(path, mode))
return error("Invalid path '%s'", path);
len = strlen(path);
- size = cache_entry_size(len);
- ce = xcalloc(1, size);
+ ce = make_empty_cache_entry(&the_index, len);
oidcpy(&ce->oid, oid);
memcpy(ce->name, path, len);
static void read_index_info(int nul_term_line)
{
+ const int hexsz = the_hash_algo->hexsz;
struct strbuf buf = STRBUF_INIT;
struct strbuf uq = STRBUF_INIT;
strbuf_getline_fn getline_fn;
mode = ul;
tab = strchr(ptr, '\t');
- if (!tab || tab - ptr < GIT_SHA1_HEXSZ + 1)
+ if (!tab || tab - ptr < hexsz + 1)
goto bad_line;
if (tab[-2] == ' ' && '0' <= tab[-1] && tab[-1] <= '3') {
ptr = tab + 1; /* point at the head of path */
}
- if (get_oid_hex(tab - GIT_SHA1_HEXSZ, &oid) ||
- tab[-(GIT_SHA1_HEXSZ + 1)] != ' ')
+ if (get_oid_hex(tab - hexsz, &oid) ||
+ tab[-(hexsz + 1)] != ' ')
goto bad_line;
path_name = ptr;
* ptr[-1] points at tab,
* ptr[-41] is at the beginning of sha1
*/
- ptr[-(GIT_SHA1_HEXSZ + 2)] = ptr[-1] = 0;
+ ptr[-(hexsz + 2)] = ptr[-1] = 0;
if (add_cacheinfo(mode, &oid, path_name, stage))
die("git update-index: unable to update %s",
path_name);
{
unsigned mode;
struct object_id oid;
- int size;
struct cache_entry *ce;
if (get_tree_entry(ent, path, &oid, &mode)) {
error("%s: not a blob in %s branch.", path, which);
return NULL;
}
- size = cache_entry_size(namelen);
- ce = xcalloc(1, size);
+ ce = make_empty_cache_entry(&the_index, namelen);
oidcpy(&ce->oid, &oid);
memcpy(ce->name, path, namelen);
error("%s: cannot add their version to the index.", path);
ret = -1;
free_return:
- free(ce_2);
- free(ce_3);
+ discard_cache_entry(ce_2);
+ discard_cache_entry(ce_3);
return ret;
}
ce->name, ce_namelen(ce), 0);
if (old && ce->ce_mode == old->ce_mode &&
!oidcmp(&ce->oid, &old->oid)) {
- free(old);
+ discard_cache_entry(old);
continue; /* unchanged */
}
/* Be careful. The working tree may not have the
path = xstrdup(ce->name);
update_one(path);
free(path);
- free(old);
+ discard_cache_entry(old);
if (save_nr != active_nr)
goto redo;
}
{
unsigned long ul;
char *endp;
+ const char *p;
if (!arg)
return -1;
return -1; /* not a new-style cacheinfo */
*mode = ul;
endp++;
- if (get_oid_hex(endp, oid) || endp[GIT_SHA1_HEXSZ] != ',')
+ if (parse_oid_hex(endp, oid, &p) || *p != ',')
return -1;
- *path = endp + GIT_SHA1_HEXSZ + 1;
+ *path = p + 1;
return 0;
}
#include "config.h"
#include "builtin.h"
#include "object-store.h"
+#include "repository.h"
#include "commit.h"
#include "run-command.h"
#include <signal.h>
memset(&signature_check, 0, sizeof(signature_check));
- ret = check_commit_signature(lookup_commit(oid), &signature_check);
+ ret = check_commit_signature(lookup_commit(the_repository, oid),
+ &signature_check);
print_signature_buffer(&signature_check, flags);
signature_check_clear(&signature_check);
if (guess_remote) {
struct object_id oid;
const char *remote =
- unique_tracking_name(*new_branch, &oid);
+ unique_tracking_name(*new_branch, &oid, NULL);
return remote;
}
return NULL;
commit = lookup_commit_reference_by_name(branch);
if (!commit) {
- remote = unique_tracking_name(branch, &oid);
+ remote = unique_tracking_name(branch, &oid, NULL);
if (remote) {
new_branch = branch;
branch = remote;
#include "lockfile.h"
#include "bundle.h"
#include "object-store.h"
+#include "repository.h"
#include "object.h"
#include "commit.h"
#include "diff.h"
init_revisions(&revs, NULL);
for (i = 0; i < p->nr; i++) {
struct ref_list_entry *e = p->list + i;
- struct object *o = parse_object(&e->oid);
+ struct object *o = parse_object(the_repository, &e->oid);
if (o) {
o->flags |= PREREQ_MARK;
add_pending_object(&revs, o, e->name);
for (i = 0; i < p->nr; i++) {
struct ref_list_entry *e = p->list + i;
- struct object *o = parse_object(&e->oid);
+ struct object *o = parse_object(the_repository, &e->oid);
assert(o); /* otherwise we'd have returned early */
if (o->flags & SHOWN)
continue;
/* Clean up objects used, as they will be reused. */
for (i = 0; i < p->nr; i++) {
struct ref_list_entry *e = p->list + i;
- commit = lookup_commit_reference_gently(&e->oid, 1);
+ commit = lookup_commit_reference_gently(the_repository, &e->oid, 1);
if (commit)
clear_commit_marks(commit, ALL_REV_FLAGS);
}
* in terms of a tag (e.g. v2.0 from the range
* "v1.0..v2.0")?
*/
- struct commit *one = lookup_commit_reference(&oid);
+ struct commit *one = lookup_commit_reference(the_repository,
+ &oid);
struct object *obj;
if (e->item == &(one->object)) {
cnt++;
else {
struct cache_tree_sub *sub;
- struct tree *subtree = lookup_tree(entry.oid);
+ struct tree *subtree = lookup_tree(the_repository,
+ entry.oid);
if (!subtree->object.parsed)
parse_tree(subtree);
sub = cache_tree_sub(it, entry.path);
#include "path.h"
#include "sha1-array.h"
#include "repository.h"
+#include "mem-pool.h"
#include <zlib.h>
typedef struct git_zstream {
struct stat_data ce_stat_data;
unsigned int ce_mode;
unsigned int ce_flags;
+ unsigned int mem_pool_allocated;
unsigned int ce_namelen;
unsigned int index; /* for link extension */
struct object_id oid;
/* Forward structure decls */
struct pathspec;
struct child_process;
+struct tree;
/*
* Copy the sha1 and stat state of a cache entry from one to
const struct cache_entry *src)
{
unsigned int state = dst->ce_flags & CE_HASHED;
+ int mem_pool_allocated = dst->mem_pool_allocated;
/* Don't copy hash chain and name */
memcpy(&dst->ce_stat_data, &src->ce_stat_data,
/* Restore the hash state */
dst->ce_flags = (dst->ce_flags & ~CE_HASHED) | state;
+
+ /* Restore the mem_pool_allocated flag */
+ dst->mem_pool_allocated = mem_pool_allocated;
}
static inline unsigned create_ce_flags(unsigned stage)
struct untracked_cache *untracked;
uint64_t fsmonitor_last_update;
struct ewah_bitmap *fsmonitor_dirty;
+ struct mem_pool *ce_mem_pool;
};
extern struct index_state the_index;
extern void free_name_hash(struct index_state *istate);
+/* Cache entry creation and cleanup */
+
+/*
+ * Create cache_entry intended for use in the specified index. Caller
+ * is responsible for discarding the cache_entry with
+ * `discard_cache_entry`.
+ */
+struct cache_entry *make_cache_entry(struct index_state *istate,
+ unsigned int mode,
+ const struct object_id *oid,
+ const char *path,
+ int stage,
+ unsigned int refresh_options);
+
+struct cache_entry *make_empty_cache_entry(struct index_state *istate,
+ size_t name_len);
+
+/*
+ * Create a cache_entry that is not intended to be added to an index.
+ * Caller is responsible for discarding the cache_entry
+ * with `discard_cache_entry`.
+ */
+struct cache_entry *make_transient_cache_entry(unsigned int mode,
+ const struct object_id *oid,
+ const char *path,
+ int stage);
+
+struct cache_entry *make_empty_transient_cache_entry(size_t name_len);
+
+/*
+ * Discard cache entry.
+ */
+void discard_cache_entry(struct cache_entry *ce);
+
+/*
+ * Check configuration if we should perform extra validation on cache
+ * entries.
+ */
+int should_validate_cache_entries(void);
+
+/*
+ * Duplicate a cache_entry. Allocate memory for the new entry from a
+ * memory_pool. Takes into account cache_entry fields that are meant
+ * for managing the underlying memory allocation of the cache_entry.
+ */
+struct cache_entry *dup_cache_entry(const struct cache_entry *ce, struct index_state *istate);
+
+/*
+ * Validate the cache entries in the index. This is an internal
+ * consistency check that the cache_entry structs are allocated from
+ * the expected memory pool.
+ */
+void validate_cache_entries(const struct index_state *istate);
+
#ifndef NO_THE_INDEX_COMPATIBILITY_MACROS
#define active_cache (the_index.cache)
#define active_nr (the_index.cache_nr)
extern int unmerged_index(const struct index_state *);
/**
- * Returns 1 if the index differs from HEAD, 0 otherwise. When on an unborn
- * branch, returns 1 if there are entries in the index, 0 otherwise. If an
- * strbuf is provided, the space-separated list of files that differ will be
- * appended to it.
+ * Returns 1 if istate differs from tree, 0 otherwise. If tree is NULL,
+ * compares istate to HEAD. If tree is NULL and on an unborn branch,
+ * returns 1 if there are entries in istate, 0 otherwise. If an strbuf is
+ * provided, the space-separated list of files that differ will be appended
+ * to it.
*/
-extern int index_has_changes(struct strbuf *sb);
+extern int index_has_changes(const struct index_state *istate,
+ struct tree *tree,
+ struct strbuf *sb);
extern int verify_path(const char *path, unsigned mode);
extern int strcmp_offset(const char *s1, const char *s2, size_t *first_change);
extern int add_to_index(struct index_state *, const char *path, struct stat *, int flags);
extern int add_file_to_index(struct index_state *, const char *path, int flags);
-extern struct cache_entry *make_cache_entry(unsigned int mode, const unsigned char *sha1, const char *path, int stage, unsigned int refresh_options);
extern int chmod_index_entry(struct index_state *, struct cache_entry *ce, char flip);
extern int ce_same_name(const struct cache_entry *a, const struct cache_entry *b);
extern void set_object_name_for_intent_to_add_entry(struct cache_entry *ce);
#define REFRESH_IGNORE_SUBMODULES 0x0010 /* ignore submodules */
#define REFRESH_IN_PORCELAIN 0x0020 /* user friendly output, not "needs update" */
extern int refresh_index(struct index_state *, unsigned int flags, const struct pathspec *pathspec, char *seen, const char *header_msg);
-extern struct cache_entry *refresh_cache_entry(struct cache_entry *, unsigned int);
+extern struct cache_entry *refresh_cache_entry(struct index_state *, struct cache_entry *, unsigned int);
/*
* Opportunistically update the index but do not complain if we can't.
extern int fsync_object_files;
extern int core_preload_index;
-extern int core_commit_graph;
extern int core_apply_sparse_checkout;
extern int precomposed_unicode;
extern int protect_hfs;
static inline int hashcmp(const unsigned char *sha1, const unsigned char *sha2)
{
- return memcmp(sha1, sha2, GIT_SHA1_RAWSZ);
+ return memcmp(sha1, sha2, the_hash_algo->rawsz);
}
static inline int oidcmp(const struct object_id *oid1, const struct object_id *oid2)
static inline void hashcpy(unsigned char *sha_dst, const unsigned char *sha_src)
{
- memcpy(sha_dst, sha_src, GIT_SHA1_RAWSZ);
+ memcpy(sha_dst, sha_src, the_hash_algo->rawsz);
}
static inline void oidcpy(struct object_id *dst, const struct object_id *src)
static inline void hashclr(unsigned char *hash)
{
- memset(hash, 0, GIT_SHA1_RAWSZ);
+ memset(hash, 0, the_hash_algo->rawsz);
}
static inline void oidclr(struct object_id *oid)
#include "remote.h"
#include "refspec.h"
#include "checkout.h"
+#include "config.h"
struct tracking_name_data {
/* const */ char *src_ref;
char *dst_ref;
struct object_id *dst_oid;
- int unique;
+ int num_matches;
+ const char *default_remote;
+ char *default_dst_ref;
+ struct object_id *default_dst_oid;
};
+#define TRACKING_NAME_DATA_INIT { NULL, NULL, NULL, 0, NULL, NULL, NULL }
+
static int check_tracking_name(struct remote *remote, void *cb_data)
{
struct tracking_name_data *cb = cb_data;
free(query.dst);
return 0;
}
+ cb->num_matches++;
+ if (cb->default_remote && !strcmp(remote->name, cb->default_remote)) {
+ struct object_id *dst = xmalloc(sizeof(*cb->default_dst_oid));
+ cb->default_dst_ref = xstrdup(query.dst);
+ oidcpy(dst, cb->dst_oid);
+ cb->default_dst_oid = dst;
+ }
if (cb->dst_ref) {
free(query.dst);
- cb->unique = 0;
return 0;
}
cb->dst_ref = query.dst;
return 0;
}
-const char *unique_tracking_name(const char *name, struct object_id *oid)
+const char *unique_tracking_name(const char *name, struct object_id *oid,
+ int *dwim_remotes_matched)
{
- struct tracking_name_data cb_data = { NULL, NULL, NULL, 1 };
+ struct tracking_name_data cb_data = TRACKING_NAME_DATA_INIT;
+ const char *default_remote = NULL;
+ if (!git_config_get_string_const("checkout.defaultremote", &default_remote))
+ cb_data.default_remote = default_remote;
cb_data.src_ref = xstrfmt("refs/heads/%s", name);
cb_data.dst_oid = oid;
for_each_remote(check_tracking_name, &cb_data);
+ if (dwim_remotes_matched)
+ *dwim_remotes_matched = cb_data.num_matches;
free(cb_data.src_ref);
- if (cb_data.unique)
+ free((char *)default_remote);
+ if (cb_data.num_matches == 1) {
+ free(cb_data.default_dst_ref);
+ free(cb_data.default_dst_oid);
return cb_data.dst_ref;
+ }
free(cb_data.dst_ref);
+ if (cb_data.default_dst_ref) {
+ oidcpy(oid, cb_data.default_dst_oid);
+ free(cb_data.default_dst_oid);
+ return cb_data.default_dst_ref;
+ }
return NULL;
}
* tracking branch. Return the name of the remote if such a branch
* exists, NULL otherwise.
*/
-extern const char *unique_tracking_name(const char *name, struct object_id *oid);
+extern const char *unique_tracking_name(const char *name,
+ struct object_id *oid,
+ int *dwim_remotes_matched);
#endif /* CHECKOUT_H */
#include "packfile.h"
#include "commit.h"
#include "object.h"
+#include "refs.h"
#include "revision.h"
#include "sha1-lookup.h"
#include "commit-graph.h"
#include "object-store.h"
+#include "alloc.h"
#define GRAPH_SIGNATURE 0x43475048 /* "CGPH" */
#define GRAPH_CHUNKID_OIDFANOUT 0x4f494446 /* "OIDF" */
#define GRAPH_LAST_EDGE 0x80000000
+#define GRAPH_HEADER_SIZE 8
#define GRAPH_FANOUT_SIZE (4 * 256)
#define GRAPH_CHUNKLOOKUP_WIDTH 12
-#define GRAPH_MIN_SIZE (5 * GRAPH_CHUNKLOOKUP_WIDTH + GRAPH_FANOUT_SIZE + \
- GRAPH_OID_LEN + 8)
+#define GRAPH_MIN_SIZE (GRAPH_HEADER_SIZE + 4 * GRAPH_CHUNKLOOKUP_WIDTH \
+ + GRAPH_FANOUT_SIZE + GRAPH_OID_LEN)
char *get_commit_graph_filename(const char *obj_dir)
{
exit(1);
}
-/* global storage */
-static struct commit_graph *commit_graph = NULL;
-
-static void prepare_commit_graph_one(const char *obj_dir)
+static void prepare_commit_graph_one(struct repository *r, const char *obj_dir)
{
char *graph_name;
- if (commit_graph)
+ if (r->objects->commit_graph)
return;
graph_name = get_commit_graph_filename(obj_dir);
- commit_graph = load_commit_graph_one(graph_name);
+ r->objects->commit_graph =
+ load_commit_graph_one(graph_name);
FREE_AND_NULL(graph_name);
}
-static int prepare_commit_graph_run_once = 0;
-static void prepare_commit_graph(void)
+/*
+ * Return 1 if commit_graph is non-NULL, and 0 otherwise.
+ *
+ * On the first invocation, this function attemps to load the commit
+ * graph if the_repository is configured to have one.
+ */
+static int prepare_commit_graph(struct repository *r)
{
struct alternate_object_database *alt;
char *obj_dir;
+ int config_value;
+
+ if (r->objects->commit_graph_attempted)
+ return !!r->objects->commit_graph;
+ r->objects->commit_graph_attempted = 1;
+
+ if (repo_config_get_bool(r, "core.commitgraph", &config_value) ||
+ !config_value)
+ /*
+ * This repository is not configured to use commit graphs, so
+ * do not load one. (But report commit_graph_attempted anyway
+ * so that commit graph loading is not attempted again for this
+ * repository.)
+ */
+ return 0;
- if (prepare_commit_graph_run_once)
- return;
- prepare_commit_graph_run_once = 1;
-
- obj_dir = get_object_directory();
- prepare_commit_graph_one(obj_dir);
- prepare_alt_odb(the_repository);
- for (alt = the_repository->objects->alt_odb_list;
- !commit_graph && alt;
+ obj_dir = r->objects->objectdir;
+ prepare_commit_graph_one(r, obj_dir);
+ prepare_alt_odb(r);
+ for (alt = r->objects->alt_odb_list;
+ !r->objects->commit_graph && alt;
alt = alt->next)
- prepare_commit_graph_one(alt->path);
+ prepare_commit_graph_one(r, alt->path);
+ return !!r->objects->commit_graph;
}
static void close_commit_graph(void)
{
- if (!commit_graph)
- return;
-
- if (commit_graph->graph_fd >= 0) {
- munmap((void *)commit_graph->data, commit_graph->data_len);
- commit_graph->data = NULL;
- close(commit_graph->graph_fd);
- }
-
- FREE_AND_NULL(commit_graph);
+ free_commit_graph(the_repository->objects->commit_graph);
+ the_repository->objects->commit_graph = NULL;
}
static int bsearch_graph(struct commit_graph *g, struct object_id *oid, uint32_t *pos)
{
struct commit *c;
struct object_id oid;
+
+ if (pos >= g->num_commits)
+ die("invalid parent position %"PRIu64, pos);
+
hashcpy(oid.hash, g->chunk_oid_lookup + g->hash_len * pos);
- c = lookup_commit(&oid);
+ c = lookup_commit(the_repository, &oid);
if (!c)
die("could not find commit %s", oid_to_hex(&oid));
c->graph_pos = pos;
}
}
-int parse_commit_in_graph(struct commit *item)
+static int parse_commit_in_graph_one(struct commit_graph *g, struct commit *item)
{
uint32_t pos;
- if (!core_commit_graph)
- return 0;
if (item->object.parsed)
return 1;
- prepare_commit_graph();
- if (commit_graph && find_commit_in_graph(item, commit_graph, &pos))
- return fill_commit_in_graph(item, commit_graph, pos);
+
+ if (find_commit_in_graph(item, g, &pos))
+ return fill_commit_in_graph(item, g, pos);
+
return 0;
}
-void load_commit_graph_info(struct commit *item)
+int parse_commit_in_graph(struct repository *r, struct commit *item)
+{
+ if (!prepare_commit_graph(r))
+ return 0;
+ return parse_commit_in_graph_one(r->objects->commit_graph, item);
+}
+
+void load_commit_graph_info(struct repository *r, struct commit *item)
{
uint32_t pos;
- if (!core_commit_graph)
+ if (!prepare_commit_graph(r))
return;
- prepare_commit_graph();
- if (commit_graph && find_commit_in_graph(item, commit_graph, &pos))
- fill_commit_graph_info(item, commit_graph, pos);
+ if (find_commit_in_graph(item, r->objects->commit_graph, &pos))
+ fill_commit_graph_info(item, r->objects->commit_graph, pos);
}
static struct tree *load_tree_for_commit(struct commit_graph *g, struct commit *c)
GRAPH_DATA_WIDTH * (c->graph_pos);
hashcpy(oid.hash, commit_data);
- c->maybe_tree = lookup_tree(&oid);
+ c->maybe_tree = lookup_tree(the_repository, &oid);
return c->maybe_tree;
}
-struct tree *get_commit_tree_in_graph(const struct commit *c)
+static struct tree *get_commit_tree_in_graph_one(struct commit_graph *g,
+ const struct commit *c)
{
if (c->maybe_tree)
return c->maybe_tree;
if (c->graph_pos == COMMIT_NOT_FROM_GRAPH)
- BUG("get_commit_tree_in_graph called from non-commit-graph commit");
+ BUG("get_commit_tree_in_graph_one called from non-commit-graph commit");
- return load_tree_for_commit(commit_graph, (struct commit *)c);
+ return load_tree_for_commit(g, (struct commit *)c);
+}
+
+struct tree *get_commit_tree_in_graph(struct repository *r, const struct commit *c)
+{
+ return get_commit_tree_in_graph_one(r->objects->commit_graph, c);
}
static void write_graph_chunk_fanout(struct hashfile *f,
struct commit *commit;
for (i = 0; i < oids->nr; i++) {
- commit = lookup_commit(&oids->list[i]);
+ commit = lookup_commit(the_repository, &oids->list[i]);
if (commit)
commit->object.flags |= UNINTERESTING;
}
* closure.
*/
for (i = 0; i < oids->nr; i++) {
- commit = lookup_commit(&oids->list[i]);
+ commit = lookup_commit(the_repository, &oids->list[i]);
if (commit && !parse_commit(commit))
add_missing_parents(oids, commit);
}
for (i = 0; i < oids->nr; i++) {
- commit = lookup_commit(&oids->list[i]);
+ commit = lookup_commit(the_repository, &oids->list[i]);
if (commit)
commit->object.flags &= ~UNINTERESTING;
}
}
+static int add_ref_to_list(const char *refname,
+ const struct object_id *oid,
+ int flags, void *cb_data)
+{
+ struct string_list *list = (struct string_list *)cb_data;
+
+ string_list_append(list, oid_to_hex(oid));
+ return 0;
+}
+
+void write_commit_graph_reachable(const char *obj_dir, int append)
+{
+ struct string_list list;
+
+ string_list_init(&list, 1);
+ for_each_ref(add_ref_to_list, &list);
+ write_commit_graph(obj_dir, NULL, &list, append);
+}
+
void write_commit_graph(const char *obj_dir,
- const char **pack_indexes,
- int nr_packs,
- const char **commit_hex,
- int nr_commits,
+ struct string_list *pack_indexes,
+ struct string_list *commit_hex,
int append)
{
struct packed_oid_list oids;
oids.alloc = approximate_object_count() / 4;
if (append) {
- prepare_commit_graph_one(obj_dir);
- if (commit_graph)
- oids.alloc += commit_graph->num_commits;
+ prepare_commit_graph_one(the_repository, obj_dir);
+ if (the_repository->objects->commit_graph)
+ oids.alloc += the_repository->objects->commit_graph->num_commits;
}
if (oids.alloc < 1024)
oids.alloc = 1024;
ALLOC_ARRAY(oids.list, oids.alloc);
- if (append && commit_graph) {
+ if (append && the_repository->objects->commit_graph) {
+ struct commit_graph *commit_graph =
+ the_repository->objects->commit_graph;
for (i = 0; i < commit_graph->num_commits; i++) {
const unsigned char *hash = commit_graph->chunk_oid_lookup +
commit_graph->hash_len * i;
int dirlen;
strbuf_addf(&packname, "%s/pack/", obj_dir);
dirlen = packname.len;
- for (i = 0; i < nr_packs; i++) {
+ for (i = 0; i < pack_indexes->nr; i++) {
struct packed_git *p;
strbuf_setlen(&packname, dirlen);
- strbuf_addstr(&packname, pack_indexes[i]);
+ strbuf_addstr(&packname, pack_indexes->items[i].string);
p = add_packed_git(packname.buf, packname.len, 1);
if (!p)
die("error adding pack %s", packname.buf);
}
if (commit_hex) {
- for (i = 0; i < nr_commits; i++) {
+ for (i = 0; i < commit_hex->nr; i++) {
const char *end;
struct object_id oid;
struct commit *result;
- if (commit_hex[i] && parse_oid_hex(commit_hex[i], &oid, &end))
+ if (commit_hex->items[i].string &&
+ parse_oid_hex(commit_hex->items[i].string, &oid, &end))
continue;
- result = lookup_commit_reference_gently(&oid, 1);
+ result = lookup_commit_reference_gently(the_repository, &oid, 1);
if (result) {
ALLOC_GROW(oids.list, oids.nr + 1, oids.alloc);
if (i > 0 && !oidcmp(&oids.list[i-1], &oids.list[i]))
continue;
- commits.list[commits.nr] = lookup_commit(&oids.list[i]);
+ commits.list[commits.nr] = lookup_commit(the_repository, &oids.list[i]);
parse_commit(commits.list[commits.nr]);
for (parent = commits.list[commits.nr]->parents;
oids.alloc = 0;
oids.nr = 0;
}
+
+#define VERIFY_COMMIT_GRAPH_ERROR_HASH 2
+static int verify_commit_graph_error;
+
+static void graph_report(const char *fmt, ...)
+{
+ va_list ap;
+
+ verify_commit_graph_error = 1;
+ va_start(ap, fmt);
+ vfprintf(stderr, fmt, ap);
+ fprintf(stderr, "\n");
+ va_end(ap);
+}
+
+#define GENERATION_ZERO_EXISTS 1
+#define GENERATION_NUMBER_EXISTS 2
+
+int verify_commit_graph(struct repository *r, struct commit_graph *g)
+{
+ uint32_t i, cur_fanout_pos = 0;
+ struct object_id prev_oid, cur_oid, checksum;
+ int generation_zero = 0;
+ struct hashfile *f;
+ int devnull;
+
+ if (!g) {
+ graph_report("no commit-graph file loaded");
+ return 1;
+ }
+
+ verify_commit_graph_error = 0;
+
+ if (!g->chunk_oid_fanout)
+ graph_report("commit-graph is missing the OID Fanout chunk");
+ if (!g->chunk_oid_lookup)
+ graph_report("commit-graph is missing the OID Lookup chunk");
+ if (!g->chunk_commit_data)
+ graph_report("commit-graph is missing the Commit Data chunk");
+
+ if (verify_commit_graph_error)
+ return verify_commit_graph_error;
+
+ devnull = open("/dev/null", O_WRONLY);
+ f = hashfd(devnull, NULL);
+ hashwrite(f, g->data, g->data_len - g->hash_len);
+ finalize_hashfile(f, checksum.hash, CSUM_CLOSE);
+ if (hashcmp(checksum.hash, g->data + g->data_len - g->hash_len)) {
+ graph_report(_("the commit-graph file has incorrect checksum and is likely corrupt"));
+ verify_commit_graph_error = VERIFY_COMMIT_GRAPH_ERROR_HASH;
+ }
+
+ for (i = 0; i < g->num_commits; i++) {
+ struct commit *graph_commit;
+
+ hashcpy(cur_oid.hash, g->chunk_oid_lookup + g->hash_len * i);
+
+ if (i && oidcmp(&prev_oid, &cur_oid) >= 0)
+ graph_report("commit-graph has incorrect OID order: %s then %s",
+ oid_to_hex(&prev_oid),
+ oid_to_hex(&cur_oid));
+
+ oidcpy(&prev_oid, &cur_oid);
+
+ while (cur_oid.hash[0] > cur_fanout_pos) {
+ uint32_t fanout_value = get_be32(g->chunk_oid_fanout + cur_fanout_pos);
+
+ if (i != fanout_value)
+ graph_report("commit-graph has incorrect fanout value: fanout[%d] = %u != %u",
+ cur_fanout_pos, fanout_value, i);
+ cur_fanout_pos++;
+ }
+
+ graph_commit = lookup_commit(r, &cur_oid);
+ if (!parse_commit_in_graph_one(g, graph_commit))
+ graph_report("failed to parse %s from commit-graph",
+ oid_to_hex(&cur_oid));
+ }
+
+ while (cur_fanout_pos < 256) {
+ uint32_t fanout_value = get_be32(g->chunk_oid_fanout + cur_fanout_pos);
+
+ if (g->num_commits != fanout_value)
+ graph_report("commit-graph has incorrect fanout value: fanout[%d] = %u != %u",
+ cur_fanout_pos, fanout_value, i);
+
+ cur_fanout_pos++;
+ }
+
+ if (verify_commit_graph_error & ~VERIFY_COMMIT_GRAPH_ERROR_HASH)
+ return verify_commit_graph_error;
+
+ for (i = 0; i < g->num_commits; i++) {
+ struct commit *graph_commit, *odb_commit;
+ struct commit_list *graph_parents, *odb_parents;
+ uint32_t max_generation = 0;
+
+ hashcpy(cur_oid.hash, g->chunk_oid_lookup + g->hash_len * i);
+
+ graph_commit = lookup_commit(r, &cur_oid);
+ odb_commit = (struct commit *)create_object(r, cur_oid.hash, alloc_commit_node(r));
+ if (parse_commit_internal(odb_commit, 0, 0)) {
+ graph_report("failed to parse %s from object database",
+ oid_to_hex(&cur_oid));
+ continue;
+ }
+
+ if (oidcmp(&get_commit_tree_in_graph_one(g, graph_commit)->object.oid,
+ get_commit_tree_oid(odb_commit)))
+ graph_report("root tree OID for commit %s in commit-graph is %s != %s",
+ oid_to_hex(&cur_oid),
+ oid_to_hex(get_commit_tree_oid(graph_commit)),
+ oid_to_hex(get_commit_tree_oid(odb_commit)));
+
+ graph_parents = graph_commit->parents;
+ odb_parents = odb_commit->parents;
+
+ while (graph_parents) {
+ if (odb_parents == NULL) {
+ graph_report("commit-graph parent list for commit %s is too long",
+ oid_to_hex(&cur_oid));
+ break;
+ }
+
+ if (oidcmp(&graph_parents->item->object.oid, &odb_parents->item->object.oid))
+ graph_report("commit-graph parent for %s is %s != %s",
+ oid_to_hex(&cur_oid),
+ oid_to_hex(&graph_parents->item->object.oid),
+ oid_to_hex(&odb_parents->item->object.oid));
+
+ if (graph_parents->item->generation > max_generation)
+ max_generation = graph_parents->item->generation;
+
+ graph_parents = graph_parents->next;
+ odb_parents = odb_parents->next;
+ }
+
+ if (odb_parents != NULL)
+ graph_report("commit-graph parent list for commit %s terminates early",
+ oid_to_hex(&cur_oid));
+
+ if (!graph_commit->generation) {
+ if (generation_zero == GENERATION_NUMBER_EXISTS)
+ graph_report("commit-graph has generation number zero for commit %s, but non-zero elsewhere",
+ oid_to_hex(&cur_oid));
+ generation_zero = GENERATION_ZERO_EXISTS;
+ } else if (generation_zero == GENERATION_ZERO_EXISTS)
+ graph_report("commit-graph has non-zero generation number for commit %s, but zero elsewhere",
+ oid_to_hex(&cur_oid));
+
+ if (generation_zero == GENERATION_ZERO_EXISTS)
+ continue;
+
+ /*
+ * If one of our parents has generation GENERATION_NUMBER_MAX, then
+ * our generation is also GENERATION_NUMBER_MAX. Decrement to avoid
+ * extra logic in the following condition.
+ */
+ if (max_generation == GENERATION_NUMBER_MAX)
+ max_generation--;
+
+ if (graph_commit->generation != max_generation + 1)
+ graph_report("commit-graph generation for commit %s is %u != %u",
+ oid_to_hex(&cur_oid),
+ graph_commit->generation,
+ max_generation + 1);
+
+ if (graph_commit->date != odb_commit->date)
+ graph_report("commit date for commit %s in commit-graph is %"PRItime" != %"PRItime,
+ oid_to_hex(&cur_oid),
+ graph_commit->date,
+ odb_commit->date);
+ }
+
+ return verify_commit_graph_error;
+}
+
+void free_commit_graph(struct commit_graph *g)
+{
+ if (!g)
+ return;
+ if (g->graph_fd >= 0) {
+ munmap((void *)g->data, g->data_len);
+ g->data = NULL;
+ close(g->graph_fd);
+ }
+ free(g);
+}
#define COMMIT_GRAPH_H
#include "git-compat-util.h"
+#include "repository.h"
+#include "string-list.h"
+
+struct commit;
char *get_commit_graph_filename(const char *obj_dir);
*
* See parse_commit_buffer() for the fallback after this call.
*/
-int parse_commit_in_graph(struct commit *item);
+int parse_commit_in_graph(struct repository *r, struct commit *item);
/*
* It is possible that we loaded commit contents from the commit buffer,
* checked and filled. Fill the graph_pos and generation members of
* the given commit.
*/
-void load_commit_graph_info(struct commit *item);
+void load_commit_graph_info(struct repository *r, struct commit *item);
-struct tree *get_commit_tree_in_graph(const struct commit *c);
+struct tree *get_commit_tree_in_graph(struct repository *r,
+ const struct commit *c);
struct commit_graph {
int graph_fd;
struct commit_graph *load_commit_graph_one(const char *graph_file);
+void write_commit_graph_reachable(const char *obj_dir, int append);
void write_commit_graph(const char *obj_dir,
- const char **pack_indexes,
- int nr_packs,
- const char **commit_hex,
- int nr_commits,
+ struct string_list *pack_indexes,
+ struct string_list *commit_hex,
int append);
+int verify_commit_graph(struct repository *r, struct commit_graph *g);
+
+void free_commit_graph(struct commit_graph *);
+
#endif
#define implement_commit_slab(slabname, elemtype, scope) \
\
-static int stat_ ##slabname## realloc; \
- \
scope void init_ ##slabname## _with_stride(struct slabname *s, \
unsigned stride) \
{ \
if (!add_if_missing) \
return NULL; \
REALLOC_ARRAY(s->slab, nth_slab + 1); \
- stat_ ##slabname## realloc++; \
for (i = s->slab_count; i <= nth_slab; i++) \
s->slab[i] = NULL; \
s->slab_count = nth_slab + 1; \
#include "sha1-lookup.h"
#include "wt-status.h"
#include "advice.h"
+#include "refs.h"
static struct commit_extra_header *read_commit_extra_header_lines(const char *buf, size_t len, const char **);
const char *commit_type = "commit";
-struct commit *lookup_commit_reference_gently(const struct object_id *oid,
- int quiet)
+struct commit *lookup_commit_reference_gently(struct repository *r,
+ const struct object_id *oid, int quiet)
{
- struct object *obj = deref_tag(parse_object(oid), NULL, 0);
+ struct object *obj = deref_tag(r,
+ parse_object(r, oid),
+ NULL, 0);
if (!obj)
return NULL;
- return object_as_type(obj, OBJ_COMMIT, quiet);
+ return object_as_type(r, obj, OBJ_COMMIT, quiet);
}
-struct commit *lookup_commit_reference(const struct object_id *oid)
+struct commit *lookup_commit_reference(struct repository *r, const struct object_id *oid)
{
- return lookup_commit_reference_gently(oid, 0);
+ return lookup_commit_reference_gently(r, oid, 0);
}
struct commit *lookup_commit_or_die(const struct object_id *oid, const char *ref_name)
{
- struct commit *c = lookup_commit_reference(oid);
+ struct commit *c = lookup_commit_reference(the_repository, oid);
if (!c)
die(_("could not parse %s"), ref_name);
if (oidcmp(oid, &c->object.oid)) {
return c;
}
-struct commit *lookup_commit(const struct object_id *oid)
+struct commit *lookup_commit(struct repository *r, const struct object_id *oid)
{
- struct object *obj = lookup_object(oid->hash);
+ struct object *obj = lookup_object(r, oid->hash);
if (!obj)
- return create_object(the_repository, oid->hash,
- alloc_commit_node(the_repository));
- return object_as_type(obj, OBJ_COMMIT, 0);
+ return create_object(r, oid->hash,
+ alloc_commit_node(r));
+ return object_as_type(r, obj, OBJ_COMMIT, 0);
}
struct commit *lookup_commit_reference_by_name(const char *name)
if (get_oid_committish(name, &oid))
return NULL;
- commit = lookup_commit_reference(&oid);
+ commit = lookup_commit_reference(the_repository, &oid);
if (parse_commit(commit))
return NULL;
return commit;
unsigned long size;
};
define_commit_slab(buffer_slab, struct commit_buffer);
-static struct buffer_slab buffer_slab = COMMIT_SLAB_INIT(1, buffer_slab);
-void set_commit_buffer(struct commit *commit, void *buffer, unsigned long size)
+struct buffer_slab *allocate_commit_buffer_slab(void)
{
- struct commit_buffer *v = buffer_slab_at(&buffer_slab, commit);
+ struct buffer_slab *bs = xmalloc(sizeof(*bs));
+ init_buffer_slab(bs);
+ return bs;
+}
+
+void free_commit_buffer_slab(struct buffer_slab *bs)
+{
+ clear_buffer_slab(bs);
+ free(bs);
+}
+
+void set_commit_buffer(struct repository *r, struct commit *commit, void *buffer, unsigned long size)
+{
+ struct commit_buffer *v = buffer_slab_at(
+ r->parsed_objects->buffer_slab, commit);
v->buffer = buffer;
v->size = size;
}
-const void *get_cached_commit_buffer(const struct commit *commit, unsigned long *sizep)
+const void *get_cached_commit_buffer(struct repository *r, const struct commit *commit, unsigned long *sizep)
{
- struct commit_buffer *v = buffer_slab_peek(&buffer_slab, commit);
+ struct commit_buffer *v = buffer_slab_peek(
+ r->parsed_objects->buffer_slab, commit);
if (!v) {
if (sizep)
*sizep = 0;
const void *get_commit_buffer(const struct commit *commit, unsigned long *sizep)
{
- const void *ret = get_cached_commit_buffer(commit, sizep);
+ const void *ret = get_cached_commit_buffer(the_repository, commit, sizep);
if (!ret) {
enum object_type type;
unsigned long size;
void unuse_commit_buffer(const struct commit *commit, const void *buffer)
{
- struct commit_buffer *v = buffer_slab_peek(&buffer_slab, commit);
+ struct commit_buffer *v = buffer_slab_peek(
+ the_repository->parsed_objects->buffer_slab, commit);
if (!(v && v->buffer == buffer))
free((void *)buffer);
}
void free_commit_buffer(struct commit *commit)
{
- struct commit_buffer *v = buffer_slab_peek(&buffer_slab, commit);
+ struct commit_buffer *v = buffer_slab_peek(
+ the_repository->parsed_objects->buffer_slab, commit);
if (v) {
FREE_AND_NULL(v->buffer);
v->size = 0;
if (commit->graph_pos == COMMIT_NOT_FROM_GRAPH)
BUG("commit has NULL tree, but was not loaded from commit-graph");
- return get_commit_tree_in_graph(commit);
+ return get_commit_tree_in_graph(the_repository, commit);
}
struct object_id *get_commit_tree_oid(const struct commit *commit)
const void *detach_commit_buffer(struct commit *commit, unsigned long *sizep)
{
- struct commit_buffer *v = buffer_slab_peek(&buffer_slab, commit);
+ struct commit_buffer *v = buffer_slab_peek(
+ the_repository->parsed_objects->buffer_slab, commit);
void *ret;
if (!v) {
return ret;
}
-int parse_commit_buffer(struct commit *item, const void *buffer, unsigned long size, int check_graph)
+int parse_commit_buffer(struct repository *r, struct commit *item, const void *buffer, unsigned long size, int check_graph)
{
const char *tail = buffer;
const char *bufptr = buffer;
struct object_id parent;
struct commit_list **pptr;
struct commit_graft *graft;
- const int tree_entry_len = GIT_SHA1_HEXSZ + 5;
- const int parent_entry_len = GIT_SHA1_HEXSZ + 7;
+ const int tree_entry_len = the_hash_algo->hexsz + 5;
+ const int parent_entry_len = the_hash_algo->hexsz + 7;
if (item->object.parsed)
return 0;
if (get_oid_hex(bufptr + 5, &parent) < 0)
return error("bad tree pointer in commit %s",
oid_to_hex(&item->object.oid));
- item->maybe_tree = lookup_tree(&parent);
+ item->maybe_tree = lookup_tree(r, &parent);
bufptr += tree_entry_len + 1; /* "tree " + "hex sha1" + "\n" */
pptr = &item->parents;
- graft = lookup_commit_graft(the_repository, &item->object.oid);
+ graft = lookup_commit_graft(r, &item->object.oid);
while (bufptr + parent_entry_len < tail && !memcmp(bufptr, "parent ", 7)) {
struct commit *new_parent;
*/
if (graft && (graft->nr_parent < 0 || grafts_replace_parents))
continue;
- new_parent = lookup_commit(&parent);
+ new_parent = lookup_commit(r, &parent);
if (new_parent)
pptr = &commit_list_insert(new_parent, pptr)->next;
}
int i;
struct commit *new_parent;
for (i = 0; i < graft->nr_parent; i++) {
- new_parent = lookup_commit(&graft->parent[i]);
+ new_parent = lookup_commit(r,
+ &graft->parent[i]);
if (!new_parent)
continue;
pptr = &commit_list_insert(new_parent, pptr)->next;
item->date = parse_commit_date(bufptr, tail);
if (check_graph)
- load_commit_graph_info(item);
+ load_commit_graph_info(the_repository, item);
return 0;
}
-int parse_commit_gently(struct commit *item, int quiet_on_missing)
+int parse_commit_internal(struct commit *item, int quiet_on_missing, int use_commit_graph)
{
enum object_type type;
void *buffer;
return -1;
if (item->object.parsed)
return 0;
- if (parse_commit_in_graph(item))
+ if (use_commit_graph && parse_commit_in_graph(the_repository, item))
return 0;
buffer = read_object_file(&item->object.oid, &type, &size);
if (!buffer)
return error("Object %s not a commit",
oid_to_hex(&item->object.oid));
}
- ret = parse_commit_buffer(item, buffer, size, 0);
+
+ ret = parse_commit_buffer(the_repository, item, buffer, size, 0);
if (save_commit_buffer && !ret) {
- set_commit_buffer(item, buffer, size);
+ set_commit_buffer(the_repository, item, buffer, size);
return 0;
}
free(buffer);
return ret;
}
+int parse_commit_gently(struct commit *item, int quiet_on_missing)
+{
+ return parse_commit_internal(item, quiet_on_missing, 1);
+}
+
void parse_commit_or_die(struct commit *item)
{
if (parse_commit(item))
return result;
}
+struct rev_collect {
+ struct commit **commit;
+ int nr;
+ int alloc;
+ unsigned int initial : 1;
+};
+
+static void add_one_commit(struct object_id *oid, struct rev_collect *revs)
+{
+ struct commit *commit;
+
+ if (is_null_oid(oid))
+ return;
+
+ commit = lookup_commit(the_repository, oid);
+ if (!commit ||
+ (commit->object.flags & TMP_MARK) ||
+ parse_commit(commit))
+ return;
+
+ ALLOC_GROW(revs->commit, revs->nr + 1, revs->alloc);
+ revs->commit[revs->nr++] = commit;
+ commit->object.flags |= TMP_MARK;
+}
+
+static int collect_one_reflog_ent(struct object_id *ooid, struct object_id *noid,
+ const char *ident, timestamp_t timestamp,
+ int tz, const char *message, void *cbdata)
+{
+ struct rev_collect *revs = cbdata;
+
+ if (revs->initial) {
+ revs->initial = 0;
+ add_one_commit(ooid, revs);
+ }
+ add_one_commit(noid, revs);
+ return 0;
+}
+
+struct commit *get_fork_point(const char *refname, struct commit *commit)
+{
+ struct object_id oid;
+ struct rev_collect revs;
+ struct commit_list *bases;
+ int i;
+ struct commit *ret = NULL;
+
+ memset(&revs, 0, sizeof(revs));
+ revs.initial = 1;
+ for_each_reflog_ent(refname, collect_one_reflog_ent, &revs);
+
+ if (!revs.nr && !get_oid(refname, &oid))
+ add_one_commit(&oid, &revs);
+
+ for (i = 0; i < revs.nr; i++)
+ revs.commit[i]->object.flags &= ~TMP_MARK;
+
+ bases = get_merge_bases_many(commit, revs.nr, revs.commit);
+
+ /*
+ * There should be one and only one merge base, when we found
+ * a common ancestor among reflog entries.
+ */
+ if (!bases || bases->next)
+ goto cleanup_return;
+
+ /* And the found one must be one of the reflog entries */
+ for (i = 0; i < revs.nr; i++)
+ if (&bases->item->object == &revs.commit[i]->object)
+ break; /* found */
+ if (revs.nr <= i)
+ goto cleanup_return;
+
+ ret = bases->item;
+
+cleanup_return:
+ free_commit_list(bases);
+ return ret;
+}
+
struct commit_list *get_octopus_merge_bases(struct commit_list *in)
{
struct commit_list *i, *j, *k, *ret = NULL;
struct object_id oid;
if (get_oid(name, &oid))
return NULL;
- obj = parse_object(&oid);
+ obj = parse_object(the_repository, &oid);
commit = (struct commit *)peel_to_type(name, 0, obj, OBJ_COMMIT);
if (commit && !merge_remote_util(commit))
set_merge_remote_desc(commit, name, obj);
void add_name_decoration(enum decoration_type type, const char *name, struct object *obj);
const struct name_decoration *get_name_decoration(const struct object *obj);
-struct commit *lookup_commit(const struct object_id *oid);
-struct commit *lookup_commit_reference(const struct object_id *oid);
-struct commit *lookup_commit_reference_gently(const struct object_id *oid,
+struct commit *lookup_commit(struct repository *r, const struct object_id *oid);
+struct commit *lookup_commit_reference(struct repository *r,
+ const struct object_id *oid);
+struct commit *lookup_commit_reference_gently(struct repository *r,
+ const struct object_id *oid,
int quiet);
struct commit *lookup_commit_reference_by_name(const char *name);
*/
struct commit *lookup_commit_or_die(const struct object_id *oid, const char *ref_name);
-int parse_commit_buffer(struct commit *item, const void *buffer, unsigned long size, int check_graph);
+int parse_commit_buffer(struct repository *r, struct commit *item, const void *buffer, unsigned long size, int check_graph);
+int parse_commit_internal(struct commit *item, int quiet_on_missing, int use_commit_graph);
int parse_commit_gently(struct commit *item, int quiet_on_missing);
static inline int parse_commit(struct commit *item)
{
}
void parse_commit_or_die(struct commit *item);
+struct buffer_slab;
+struct buffer_slab *allocate_commit_buffer_slab(void);
+void free_commit_buffer_slab(struct buffer_slab *bs);
+
/*
* Associate an object buffer with the commit. The ownership of the
* memory is handed over to the commit, and must be free()-able.
*/
-void set_commit_buffer(struct commit *, void *buffer, unsigned long size);
+void set_commit_buffer(struct repository *r, struct commit *, void *buffer, unsigned long size);
/*
* Get any cached object buffer associated with the commit. Returns NULL
* if none. The resulting memory should not be freed.
*/
-const void *get_cached_commit_buffer(const struct commit *, unsigned long *size);
+const void *get_cached_commit_buffer(struct repository *, const struct commit *, unsigned long *size);
/*
* Get the commit's object contents, either from cache or by reading the object
/* To be used only when object flags after this call no longer matter */
extern struct commit_list *get_merge_bases_many_dirty(struct commit *one, int n, struct commit **twos);
+struct commit *get_fork_point(const char *refname, struct commit *commit);
+
/* largest positive number a signed 32-bit integer can contain */
#define INFINITE_DEPTH 0x7fffffff
the git operations.
3. Inside Git's directory run the command:
- make common-cmds.h
- to generate the common-cmds.h file needed to compile git.
+ make command-list.h
+ to generate the command-list.h file needed to compile git.
4. Then either build Git with the GNU Make Makefile in the Git projects
root
enum config_origin_type origin_type;
const char *name;
const char *path;
- int die_on_error;
+ enum config_error_action default_error_action;
int linenr;
int eof;
struct strbuf value;
cf->linenr, cf->name);
}
- if (cf->die_on_error)
+ switch (opts && opts->error_action ?
+ opts->error_action :
+ cf->default_error_action) {
+ case CONFIG_ERROR_DIE:
die("%s", error_msg);
- else
+ break;
+ case CONFIG_ERROR_ERROR:
error_return = error("%s", error_msg);
+ break;
+ case CONFIG_ERROR_SILENT:
+ error_return = -1;
+ break;
+ case CONFIG_ERROR_UNSET:
+ BUG("config error action unset");
+ }
free(error_msg);
return error_return;
return 0;
}
- if (!strcmp(var, "core.commitgraph")) {
- core_commit_graph = git_config_bool(var, value);
- return 0;
- }
-
if (!strcmp(var, "core.sparsecheckout")) {
core_apply_sparse_checkout = git_config_bool(var, value);
return 0;
top.origin_type = origin_type;
top.name = name;
top.path = path;
- top.die_on_error = 1;
+ top.default_error_action = CONFIG_ERROR_DIE;
top.do_fgetc = config_file_fgetc;
top.do_ungetc = config_file_ungetc;
top.do_ftell = config_file_ftell;
return git_config_from_file_with_options(fn, filename, data, NULL);
}
-int git_config_from_mem(config_fn_t fn, const enum config_origin_type origin_type,
- const char *name, const char *buf, size_t len, void *data)
+int git_config_from_mem(config_fn_t fn,
+ const enum config_origin_type origin_type,
+ const char *name, const char *buf, size_t len,
+ void *data, const struct config_options *opts)
{
struct config_source top;
top.origin_type = origin_type;
top.name = name;
top.path = NULL;
- top.die_on_error = 0;
+ top.default_error_action = CONFIG_ERROR_ERROR;
top.do_fgetc = config_buf_fgetc;
top.do_ungetc = config_buf_ungetc;
top.do_ftell = config_buf_ftell;
- return do_config_from(&top, fn, data, NULL);
+ return do_config_from(&top, fn, data, opts);
}
int git_config_from_blob_oid(config_fn_t fn,
return error("reference '%s' does not point to a blob", name);
}
- ret = git_config_from_mem(fn, CONFIG_ORIGIN_BLOB, name, buf, size, data);
+ ret = git_config_from_mem(fn, CONFIG_ORIGIN_BLOB, name, buf, size,
+ data, NULL);
free(buf);
return ret;
const char *git_dir;
config_parser_event_fn_t event_fn;
void *event_fn_data;
+ enum config_error_action {
+ CONFIG_ERROR_UNSET = 0, /* use source-specific default */
+ CONFIG_ERROR_DIE, /* die() on error */
+ CONFIG_ERROR_ERROR, /* error() on error, return -1 */
+ CONFIG_ERROR_SILENT, /* return -1 */
+ } error_action;
};
typedef int (*config_fn_t)(const char *, const char *, void *);
extern int git_config_from_file_with_options(config_fn_t fn, const char *,
void *,
const struct config_options *);
-extern int git_config_from_mem(config_fn_t fn, const enum config_origin_type,
- const char *name, const char *buf, size_t len, void *data);
+extern int git_config_from_mem(config_fn_t fn,
+ const enum config_origin_type,
+ const char *name,
+ const char *buf, size_t len,
+ void *data, const struct config_options *opts);
extern int git_config_from_blob_oid(config_fn_t fn, const char *name,
const struct object_id *oid, void *data);
extern void git_config_push_parameter(const char *text);
#ifndef CONNECT_H
#define CONNECT_H
+#include "protocol.h"
+
#define CONNECT_VERBOSE (1u << 0)
#define CONNECT_DIAG_URL (1u << 1)
#define CONNECT_IPV4 (1u << 2)
argv_array_push(&rev_list.args, "--stdin");
if (repository_format_partial_clone)
argv_array_push(&rev_list.args, "--exclude-promisor-objects");
- argv_array_push(&rev_list.args, "--not");
- argv_array_push(&rev_list.args, "--all");
+ if (!opt->is_deepening_fetch) {
+ argv_array_push(&rev_list.args, "--not");
+ argv_array_push(&rev_list.args, "--all");
+ }
argv_array_push(&rev_list.args, "--quiet");
if (opt->progress)
argv_array_pushf(&rev_list.args, "--progress=%s",
* Insert these variables into the environment of the child process.
*/
const char **env;
+
+ /*
+ * If non-zero, check the ancestry chain completely, not stopping at
+ * any existing ref. This is necessary when deepening existing refs
+ * during a fetch.
+ */
+ unsigned is_deepening_fetch : 1;
};
#define CHECK_CONNECTED_INIT { 0 }
// These excluded functions must access c->maybe_tree direcly.
@@
-identifier f !~ "^(get_commit_tree|get_commit_tree_in_graph|load_tree_for_commit)$";
+identifier f !~ "^(get_commit_tree|get_commit_tree_in_graph_one|load_tree_for_commit)$";
expression c;
@@
f(...) {...
strbuf_addf(&trace, "%s (%s, considered %s):\n", context, path, encoding);
for (i = 0; i < len && buf; ++i) {
strbuf_addf(
- &trace,"| \e[2m%2i:\e[0m %2x \e[2m%c\e[0m%c",
+ &trace, "| \033[2m%2i:\033[0m %2x \033[2m%c\033[0m%c",
i,
(unsigned char) buf[i],
(buf[i] > 32 && buf[i] < 127 ? buf[i] : ' '),
struct object_array_entry *ent;
uint64_t start = getnanotime();
+ if (revs->pending.nr != 1)
+ BUG("run_diff_index must be passed exactly one tree");
+
ent = revs->pending.objects;
if (diff_cache(revs, &ent->item->oid, ent->name, cached))
exit(128);
static int diff_suppress_blank_empty;
static int diff_use_color_default = -1;
static int diff_color_moved_default;
+static int diff_color_moved_ws_default;
static int diff_context_default = 3;
static int diff_interhunk_context_default;
static const char *diff_word_regex_cfg;
return COLOR_MOVED_NO;
else if (!strcmp(arg, "plain"))
return COLOR_MOVED_PLAIN;
+ else if (!strcmp(arg, "blocks"))
+ return COLOR_MOVED_BLOCKS;
else if (!strcmp(arg, "zebra"))
return COLOR_MOVED_ZEBRA;
else if (!strcmp(arg, "default"))
else if (!strcmp(arg, "dimmed_zebra"))
return COLOR_MOVED_ZEBRA_DIM;
else
- return error(_("color moved setting must be one of 'no', 'default', 'zebra', 'dimmed_zebra', 'plain'"));
+ return error(_("color moved setting must be one of 'no', 'default', 'blocks', 'zebra', 'dimmed_zebra', 'plain'"));
+}
+
+static int parse_color_moved_ws(const char *arg)
+{
+ int ret = 0;
+ struct string_list l = STRING_LIST_INIT_DUP;
+ struct string_list_item *i;
+
+ string_list_split(&l, arg, ',', -1);
+
+ for_each_string_list_item(i, &l) {
+ struct strbuf sb = STRBUF_INIT;
+ strbuf_addstr(&sb, i->string);
+ strbuf_trim(&sb);
+
+ if (!strcmp(sb.buf, "ignore-space-change"))
+ ret |= XDF_IGNORE_WHITESPACE_CHANGE;
+ else if (!strcmp(sb.buf, "ignore-space-at-eol"))
+ ret |= XDF_IGNORE_WHITESPACE_AT_EOL;
+ else if (!strcmp(sb.buf, "ignore-all-space"))
+ ret |= XDF_IGNORE_WHITESPACE;
+ else if (!strcmp(sb.buf, "allow-indentation-change"))
+ ret |= COLOR_MOVED_WS_ALLOW_INDENTATION_CHANGE;
+ else
+ error(_("ignoring unknown color-moved-ws mode '%s'"), sb.buf);
+
+ strbuf_release(&sb);
+ }
+
+ if ((ret & COLOR_MOVED_WS_ALLOW_INDENTATION_CHANGE) &&
+ (ret & XDF_WHITESPACE_FLAGS))
+ die(_("color-moved-ws: allow-indentation-change cannot be combined with other white space modes"));
+
+ string_list_clear(&l, 0);
+
+ return ret;
}
int git_diff_ui_config(const char *var, const char *value, void *cb)
diff_color_moved_default = cm;
return 0;
}
+ if (!strcmp(var, "diff.colormovedws")) {
+ int cm = parse_color_moved_ws(value);
+ if (cm < 0)
+ return -1;
+ diff_color_moved_ws_default = cm;
+ return 0;
+ }
if (!strcmp(var, "diff.context")) {
diff_context_default = git_config_int(var, value);
if (diff_context_default < 0)
struct hashmap_entry ent;
const struct emitted_diff_symbol *es;
struct moved_entry *next_line;
+ struct ws_delta *wsd;
};
-static int moved_entry_cmp(const struct diff_options *diffopt,
- const struct moved_entry *a,
- const struct moved_entry *b,
+/**
+ * The struct ws_delta holds white space differences between moved lines, i.e.
+ * between '+' and '-' lines that have been detected to be a move.
+ * The string contains the difference in leading white spaces, before the
+ * rest of the line is compared using the white space config for move
+ * coloring. The current_longer indicates if the first string in the
+ * comparision is longer than the second.
+ */
+struct ws_delta {
+ char *string;
+ unsigned int current_longer : 1;
+};
+#define WS_DELTA_INIT { NULL, 0 }
+
+static int compute_ws_delta(const struct emitted_diff_symbol *a,
+ const struct emitted_diff_symbol *b,
+ struct ws_delta *out)
+{
+ const struct emitted_diff_symbol *longer = a->len > b->len ? a : b;
+ const struct emitted_diff_symbol *shorter = a->len > b->len ? b : a;
+ int d = longer->len - shorter->len;
+
+ out->string = xmemdupz(longer->line, d);
+ out->current_longer = (a == longer);
+
+ return !strncmp(longer->line + d, shorter->line, shorter->len);
+}
+
+static int cmp_in_block_with_wsd(const struct diff_options *o,
+ const struct moved_entry *cur,
+ const struct moved_entry *match,
+ struct moved_entry *pmb,
+ int n)
+{
+ struct emitted_diff_symbol *l = &o->emitted_symbols->buf[n];
+ int al = cur->es->len, cl = l->len;
+ const char *a = cur->es->line,
+ *b = match->es->line,
+ *c = l->line;
+
+ int wslen;
+
+ /*
+ * We need to check if 'cur' is equal to 'match'.
+ * As those are from the same (+/-) side, we do not need to adjust for
+ * indent changes. However these were found using fuzzy matching
+ * so we do have to check if they are equal.
+ */
+ if (strcmp(a, b))
+ return 1;
+
+ if (!pmb->wsd)
+ /*
+ * No white space delta was carried forward? This can happen
+ * when we exit early in this function and do not carry
+ * forward ws.
+ */
+ return 1;
+
+ /*
+ * The indent changes of the block are known and carried forward in
+ * pmb->wsd; however we need to check if the indent changes of the
+ * current line are still the same as before.
+ *
+ * To do so we need to compare 'l' to 'cur', adjusting the
+ * one of them for the white spaces, depending which was longer.
+ */
+
+ wslen = strlen(pmb->wsd->string);
+ if (pmb->wsd->current_longer) {
+ c += wslen;
+ cl -= wslen;
+ } else {
+ a += wslen;
+ al -= wslen;
+ }
+
+ if (strcmp(a, c))
+ return 1;
+
+ return 0;
+}
+
+static int moved_entry_cmp(const void *hashmap_cmp_fn_data,
+ const void *entry,
+ const void *entry_or_key,
const void *keydata)
{
+ const struct diff_options *diffopt = hashmap_cmp_fn_data;
+ const struct moved_entry *a = entry;
+ const struct moved_entry *b = entry_or_key;
+ unsigned flags = diffopt->color_moved_ws_handling
+ & XDF_WHITESPACE_FLAGS;
+
+ if (diffopt->color_moved_ws_handling &
+ COLOR_MOVED_WS_ALLOW_INDENTATION_CHANGE)
+ /*
+ * As there is not specific white space config given,
+ * we'd need to check for a new block, so ignore all
+ * white space. The setup of the white space
+ * configuration for the next block is done else where
+ */
+ flags |= XDF_IGNORE_WHITESPACE;
+
return !xdiff_compare_lines(a->es->line, a->es->len,
b->es->line, b->es->len,
- diffopt->xdl_opts);
+ flags);
}
static struct moved_entry *prepare_entry(struct diff_options *o,
{
struct moved_entry *ret = xmalloc(sizeof(*ret));
struct emitted_diff_symbol *l = &o->emitted_symbols->buf[line_no];
+ unsigned flags = o->color_moved_ws_handling & XDF_WHITESPACE_FLAGS;
- ret->ent.hash = xdiff_hash_string(l->line, l->len, o->xdl_opts);
+ ret->ent.hash = xdiff_hash_string(l->line, l->len, flags);
ret->es = l;
ret->next_line = NULL;
+ ret->wsd = NULL;
return ret;
}
}
}
+static void pmb_advance_or_null(struct diff_options *o,
+ struct moved_entry *match,
+ struct hashmap *hm,
+ struct moved_entry **pmb,
+ int pmb_nr)
+{
+ int i;
+ for (i = 0; i < pmb_nr; i++) {
+ struct moved_entry *prev = pmb[i];
+ struct moved_entry *cur = (prev && prev->next_line) ?
+ prev->next_line : NULL;
+ if (cur && !hm->cmpfn(o, cur, match, NULL)) {
+ pmb[i] = cur;
+ } else {
+ pmb[i] = NULL;
+ }
+ }
+}
+
+static void pmb_advance_or_null_multi_match(struct diff_options *o,
+ struct moved_entry *match,
+ struct hashmap *hm,
+ struct moved_entry **pmb,
+ int pmb_nr, int n)
+{
+ int i;
+ char *got_match = xcalloc(1, pmb_nr);
+
+ for (; match; match = hashmap_get_next(hm, match)) {
+ for (i = 0; i < pmb_nr; i++) {
+ struct moved_entry *prev = pmb[i];
+ struct moved_entry *cur = (prev && prev->next_line) ?
+ prev->next_line : NULL;
+ if (!cur)
+ continue;
+ if (!cmp_in_block_with_wsd(o, cur, match, pmb[i], n))
+ got_match[i] |= 1;
+ }
+ }
+
+ for (i = 0; i < pmb_nr; i++) {
+ if (got_match[i]) {
+ /* Carry the white space delta forward */
+ pmb[i]->next_line->wsd = pmb[i]->wsd;
+ pmb[i] = pmb[i]->next_line;
+ } else
+ pmb[i] = NULL;
+ }
+}
+
static int shrink_potential_moved_blocks(struct moved_entry **pmb,
int pmb_nr)
{
if (lp < pmb_nr && rp > -1 && lp < rp) {
pmb[lp] = pmb[rp];
+ if (pmb[rp]->wsd) {
+ free(pmb[rp]->wsd->string);
+ FREE_AND_NULL(pmb[rp]->wsd);
+ }
pmb[rp] = NULL;
rp--;
lp++;
struct moved_entry *key;
struct moved_entry *match = NULL;
struct emitted_diff_symbol *l = &o->emitted_symbols->buf[n];
- int i;
switch (l->s) {
case DIFF_SYMBOL_PLUS:
hm = del_lines;
key = prepare_entry(o, n);
- match = hashmap_get(hm, key, o);
+ match = hashmap_get(hm, key, NULL);
free(key);
break;
case DIFF_SYMBOL_MINUS:
hm = add_lines;
key = prepare_entry(o, n);
- match = hashmap_get(hm, key, o);
+ match = hashmap_get(hm, key, NULL);
free(key);
break;
default:
if (o->color_moved == COLOR_MOVED_PLAIN)
continue;
- /* Check any potential block runs, advance each or nullify */
- for (i = 0; i < pmb_nr; i++) {
- struct moved_entry *p = pmb[i];
- struct moved_entry *pnext = (p && p->next_line) ?
- p->next_line : NULL;
- if (pnext && !hm->cmpfn(o, pnext, match, NULL)) {
- pmb[i] = p->next_line;
- } else {
- pmb[i] = NULL;
- }
- }
+ if (o->color_moved_ws_handling &
+ COLOR_MOVED_WS_ALLOW_INDENTATION_CHANGE)
+ pmb_advance_or_null_multi_match(o, match, hm, pmb, pmb_nr, n);
+ else
+ pmb_advance_or_null(o, match, hm, pmb, pmb_nr);
pmb_nr = shrink_potential_moved_blocks(pmb, pmb_nr);
*/
for (; match; match = hashmap_get_next(hm, match)) {
ALLOC_GROW(pmb, pmb_nr + 1, pmb_alloc);
- pmb[pmb_nr++] = match;
+ if (o->color_moved_ws_handling &
+ COLOR_MOVED_WS_ALLOW_INDENTATION_CHANGE) {
+ struct ws_delta *wsd = xmalloc(sizeof(*match->wsd));
+ if (compute_ws_delta(l, match->es, wsd)) {
+ match->wsd = wsd;
+ pmb[pmb_nr++] = match;
+ } else
+ free(wsd);
+ } else {
+ pmb[pmb_nr++] = match;
+ }
}
flipped_block = (flipped_block + 1) % 2;
block_length++;
- if (flipped_block)
+ if (flipped_block && o->color_moved != COLOR_MOVED_BLOCKS)
l->flags |= DIFF_SYMBOL_MOVED_LINE_ALT;
}
adjust_last_block(o, n, block_length);
char *hex = oid_to_hex(oid);
if (abbrev < 0)
abbrev = FALLBACK_DEFAULT_ABBREV;
- if (abbrev > GIT_SHA1_HEXSZ)
+ if (abbrev > the_hash_algo->hexsz)
BUG("oid abbreviation out of range: %d", abbrev);
if (abbrev)
hex[abbrev] = '\0';
}
options->color_moved = diff_color_moved_default;
+ options->color_moved_ws_handling = diff_color_moved_ws_default;
}
void diff_setup_done(struct diff_options *options)
if (cm < 0)
die("bad --color-moved argument: %s", arg);
options->color_moved = cm;
+ } else if (skip_prefix(arg, "--color-moved-ws=", &arg)) {
+ options->color_moved_ws_handling = parse_color_moved_ws(arg);
} else if (skip_to_optional_arg_default(arg, "--color-words", &options->word_regex, NULL)) {
options->use_color = 1;
options->word_diff = DIFF_WORDS_COLOR;
const char *abbrev;
/* Do we want all 40 hex characters? */
- if (len == GIT_SHA1_HEXSZ)
+ if (len == the_hash_algo->hexsz)
return oid_to_hex(oid);
/* An abbreviated value is fine, possibly followed by an ellipsis. */
* the automatic sizing is supposed to give abblen that ensures
* uniqueness across all objects (statistically speaking).
*/
- if (abblen < GIT_SHA1_HEXSZ - 3) {
+ if (abblen < the_hash_algo->hexsz - 3) {
static char hex[GIT_MAX_HEXSZ + 1];
if (len < abblen && abblen <= len + 2)
xsnprintf(hex, sizeof(hex), "%s%.*s", abbrev, len+3-abblen, "..");
if (o->color_moved) {
struct hashmap add_lines, del_lines;
- hashmap_init(&del_lines,
- (hashmap_cmp_fn)moved_entry_cmp, o, 0);
- hashmap_init(&add_lines,
- (hashmap_cmp_fn)moved_entry_cmp, o, 0);
+ if (o->color_moved_ws_handling &
+ COLOR_MOVED_WS_ALLOW_INDENTATION_CHANGE)
+ o->color_moved_ws_handling |= XDF_IGNORE_WHITESPACE;
+
+ hashmap_init(&del_lines, moved_entry_cmp, o, 0);
+ hashmap_init(&add_lines, moved_entry_cmp, o, 0);
add_lines_to_move_detection(o, &add_lines, &del_lines);
mark_color_as_moved(o, &add_lines, &del_lines);
enum {
COLOR_MOVED_NO = 0,
COLOR_MOVED_PLAIN = 1,
- COLOR_MOVED_ZEBRA = 2,
- COLOR_MOVED_ZEBRA_DIM = 3,
+ COLOR_MOVED_BLOCKS = 2,
+ COLOR_MOVED_ZEBRA = 3,
+ COLOR_MOVED_ZEBRA_DIM = 4,
} color_moved;
#define COLOR_MOVED_DEFAULT COLOR_MOVED_ZEBRA
#define COLOR_MOVED_MIN_ALNUM_COUNT 20
+
+ /* XDF_WHITESPACE_FLAGS regarding block detection are set at 2, 3, 4 */
+ #define COLOR_MOVED_WS_ALLOW_INDENTATION_CHANGE (1<<5)
+ int color_moved_ws_handling;
};
void diff_emit_submodule_del(struct diff_options *o, const char *line);
enum object_creation_mode object_creation_mode = OBJECT_CREATION_MODE;
char *notes_ref_name;
int grafts_replace_parents = 1;
-int core_commit_graph;
int core_apply_sparse_checkout;
int merge_log_config = -1;
int precomposed_unicode = -1; /* see probe_utf8_pathname_composition() */
return 1;
}
- if (last && last->data.buf && last->depth < max_depth
+ if (last && last->data.len && last->data.buf && last->depth < max_depth
&& dat->len > the_hash_algo->rawsz) {
delta_count_attempts_by_type[type]++;
if (!force_update && !is_null_oid(&old_oid)) {
struct commit *old_cmit, *new_cmit;
- old_cmit = lookup_commit_reference_gently(&old_oid, 0);
- new_cmit = lookup_commit_reference_gently(&b->oid, 0);
+ old_cmit = lookup_commit_reference_gently(the_repository,
+ &old_oid, 0);
+ new_cmit = lookup_commit_reference_gently(the_repository,
+ &b->oid, 0);
if (!old_cmit || !new_cmit)
return error("Branch %s is missing commits.", b->name);
--- /dev/null
+#include "git-compat-util.h"
+#include "fetch-negotiator.h"
+#include "negotiator/default.h"
+#include "negotiator/skipping.h"
+
+void fetch_negotiator_init(struct fetch_negotiator *negotiator,
+ const char *algorithm)
+{
+ if (algorithm && !strcmp(algorithm, "skipping")) {
+ skipping_negotiator_init(negotiator);
+ return;
+ }
+ default_negotiator_init(negotiator);
+}
--- /dev/null
+#ifndef FETCH_NEGOTIATOR
+#define FETCH_NEGOTIATOR
+
+struct commit;
+
+/*
+ * An object that supplies the information needed to negotiate the contents of
+ * the to-be-sent packfile during a fetch.
+ *
+ * To set up the negotiator, call fetch_negotiator_init(), then known_common()
+ * (0 or more times), then add_tip() (0 or more times).
+ *
+ * Then, when "have" lines are required, call next(). Call ack() to report what
+ * the server tells us.
+ *
+ * Once negotiation is done, call release(). The negotiator then cannot be used
+ * (unless reinitialized with fetch_negotiator_init()).
+ */
+struct fetch_negotiator {
+ /*
+ * Before negotiation starts, indicate that the server is known to have
+ * this commit.
+ */
+ void (*known_common)(struct fetch_negotiator *, struct commit *);
+
+ /*
+ * Once this function is invoked, known_common() cannot be invoked any
+ * more.
+ *
+ * Indicate that this commit and all its ancestors are to be checked
+ * for commonality with the server.
+ */
+ void (*add_tip)(struct fetch_negotiator *, struct commit *);
+
+ /*
+ * Once this function is invoked, known_common() and add_tip() cannot
+ * be invoked any more.
+ *
+ * Return the next commit that the client should send as a "have" line.
+ */
+ const struct object_id *(*next)(struct fetch_negotiator *);
+
+ /*
+ * Inform the negotiator that the server has the given commit. This
+ * method must only be called on commits returned by next().
+ */
+ int (*ack)(struct fetch_negotiator *, struct commit *);
+
+ void (*release)(struct fetch_negotiator *);
+
+ /* internal use */
+ void *data;
+};
+
+void fetch_negotiator_init(struct fetch_negotiator *negotiator,
+ const char *algorithm);
+
+#endif
transport_set_option(transport, TRANS_OPT_FROM_PROMISOR, "1");
transport_set_option(transport, TRANS_OPT_NO_DEPENDENTS, "1");
- transport_fetch_refs(transport, ref);
+ transport_fetch_refs(transport, ref, NULL);
fetch_if_missing = original_fetch_if_missing;
}
#include "connect.h"
#include "transport.h"
#include "version.h"
-#include "prio-queue.h"
#include "sha1-array.h"
#include "oidset.h"
#include "packfile.h"
#include "object-store.h"
+#include "connected.h"
+#include "fetch-negotiator.h"
static int transfer_unpack_limit = -1;
static int fetch_unpack_limit = -1;
static int server_supports_filtering;
static struct lock_file shallow_lock;
static const char *alternate_shallow_file;
+static char *negotiation_algorithm;
/* Remember to update object flag allocation in object.h */
#define COMPLETE (1U << 0)
-#define COMMON (1U << 1)
-#define COMMON_REF (1U << 2)
-#define SEEN (1U << 3)
-#define POPPED (1U << 4)
-#define ALTERNATE (1U << 5)
-
-static int marked;
+#define ALTERNATE (1U << 1)
/*
* After sending this many "have"s if we do not get any new ACK , we
*/
#define MAX_IN_VAIN 256
-static struct prio_queue rev_list = { compare_commits_by_commit_date };
-static int non_common_revs, multi_ack, use_sideband;
+static int multi_ack, use_sideband;
/* Allow specifying sha1 if it is a ref tip. */
#define ALLOW_TIP_SHA1 01
/* Allow request of a sha1 if it is reachable from a ref (possibly hidden ref). */
void *vcache)
{
struct alternate_object_cache *cache = vcache;
- struct object *obj = parse_object(oid);
+ struct object *obj = parse_object(the_repository, oid);
if (!obj || (obj->flags & ALTERNATE))
return;
cache->items[cache->nr++] = obj;
}
-static void for_each_cached_alternate(void (*cb)(struct object *))
+static void for_each_cached_alternate(struct fetch_negotiator *negotiator,
+ void (*cb)(struct fetch_negotiator *,
+ struct object *))
{
static int initialized;
static struct alternate_object_cache cache;
}
for (i = 0; i < cache.nr; i++)
- cb(cache.items[i]);
-}
-
-static void rev_list_push(struct commit *commit, int mark)
-{
- if (!(commit->object.flags & mark)) {
- commit->object.flags |= mark;
-
- if (parse_commit(commit))
- return;
-
- prio_queue_put(&rev_list, commit);
-
- if (!(commit->object.flags & COMMON))
- non_common_revs++;
- }
+ cb(negotiator, cache.items[i]);
}
-static int rev_list_insert_ref(const char *refname, const struct object_id *oid)
+static int rev_list_insert_ref(struct fetch_negotiator *negotiator,
+ const char *refname,
+ const struct object_id *oid)
{
- struct object *o = deref_tag(parse_object(oid), refname, 0);
+ struct object *o = deref_tag(the_repository,
+ parse_object(the_repository, oid),
+ refname, 0);
if (o && o->type == OBJ_COMMIT)
- rev_list_push((struct commit *)o, SEEN);
+ negotiator->add_tip(negotiator, (struct commit *)o);
return 0;
}
static int rev_list_insert_ref_oid(const char *refname, const struct object_id *oid,
int flag, void *cb_data)
{
- return rev_list_insert_ref(refname, oid);
-}
-
-static int clear_marks(const char *refname, const struct object_id *oid,
- int flag, void *cb_data)
-{
- struct object *o = deref_tag(parse_object(oid), refname, 0);
-
- if (o && o->type == OBJ_COMMIT)
- clear_commit_marks((struct commit *)o,
- COMMON | COMMON_REF | SEEN | POPPED);
- return 0;
-}
-
-/*
- This function marks a rev and its ancestors as common.
- In some cases, it is desirable to mark only the ancestors (for example
- when only the server does not yet know that they are common).
-*/
-
-static void mark_common(struct commit *commit,
- int ancestors_only, int dont_parse)
-{
- if (commit != NULL && !(commit->object.flags & COMMON)) {
- struct object *o = (struct object *)commit;
-
- if (!ancestors_only)
- o->flags |= COMMON;
-
- if (!(o->flags & SEEN))
- rev_list_push(commit, SEEN);
- else {
- struct commit_list *parents;
-
- if (!ancestors_only && !(o->flags & POPPED))
- non_common_revs--;
- if (!o->parsed && !dont_parse)
- if (parse_commit(commit))
- return;
-
- for (parents = commit->parents;
- parents;
- parents = parents->next)
- mark_common(parents->item, 0, dont_parse);
- }
- }
-}
-
-/*
- Get the next rev to send, ignoring the common.
-*/
-
-static const struct object_id *get_rev(void)
-{
- struct commit *commit = NULL;
-
- while (commit == NULL) {
- unsigned int mark;
- struct commit_list *parents;
-
- if (rev_list.nr == 0 || non_common_revs == 0)
- return NULL;
-
- commit = prio_queue_get(&rev_list);
- parse_commit(commit);
- parents = commit->parents;
-
- commit->object.flags |= POPPED;
- if (!(commit->object.flags & COMMON))
- non_common_revs--;
-
- if (commit->object.flags & COMMON) {
- /* do not send "have", and ignore ancestors */
- commit = NULL;
- mark = COMMON | SEEN;
- } else if (commit->object.flags & COMMON_REF)
- /* send "have", and ignore ancestors */
- mark = COMMON | SEEN;
- else
- /* send "have", also for its ancestors */
- mark = SEEN;
-
- while (parents) {
- if (!(parents->item->object.flags & SEEN))
- rev_list_push(parents->item, mark);
- if (mark & COMMON)
- mark_common(parents->item, 1, 0);
- parents = parents->next;
- }
- }
-
- return &commit->object.oid;
+ return rev_list_insert_ref(cb_data, refname, oid);
}
enum ack_type {
write_or_die(fd, buf->buf, buf->len);
}
-static void insert_one_alternate_object(struct object *obj)
+static void insert_one_alternate_object(struct fetch_negotiator *negotiator,
+ struct object *obj)
{
- rev_list_insert_ref(NULL, &obj->oid);
+ rev_list_insert_ref(negotiator, NULL, &obj->oid);
}
#define INITIAL_FLUSH 16
return count;
}
-static int find_common(struct fetch_pack_args *args,
+static void mark_tips(struct fetch_negotiator *negotiator,
+ const struct oid_array *negotiation_tips)
+{
+ int i;
+
+ if (!negotiation_tips) {
+ for_each_ref(rev_list_insert_ref_oid, negotiator);
+ return;
+ }
+
+ for (i = 0; i < negotiation_tips->nr; i++)
+ rev_list_insert_ref(negotiator, NULL,
+ &negotiation_tips->oid[i]);
+ return;
+}
+
+static int find_common(struct fetch_negotiator *negotiator,
+ struct fetch_pack_args *args,
int fd[2], struct object_id *result_oid,
struct ref *refs)
{
if (args->stateless_rpc && multi_ack == 1)
die(_("--stateless-rpc requires multi_ack_detailed"));
- if (marked)
- for_each_ref(clear_marks, NULL);
- marked = 1;
- for_each_ref(rev_list_insert_ref_oid, NULL);
- for_each_cached_alternate(insert_one_alternate_object);
+ mark_tips(negotiator, args->negotiation_tips);
+ for_each_cached_alternate(negotiator, insert_one_alternate_object);
fetching = 0;
for ( ; refs ; refs = refs->next) {
* interested in the case we *know* the object is
* reachable and we have already scanned it.
*/
- if (((o = lookup_object(remote->hash)) != NULL) &&
+ if (((o = lookup_object(the_repository, remote->hash)) != NULL) &&
(o->flags & COMPLETE)) {
continue;
}
if (skip_prefix(line, "unshallow ", &arg)) {
if (get_oid_hex(arg, &oid))
die(_("invalid unshallow line: %s"), line);
- if (!lookup_object(oid.hash))
+ if (!lookup_object(the_repository, oid.hash))
die(_("object not found: %s"), line);
/* make sure that it is parsed as shallow */
- if (!parse_object(&oid))
+ if (!parse_object(the_repository, &oid))
die(_("error in object: %s"), line);
if (unregister_shallow(&oid))
die(_("no shallow found: %s"), line);
retval = -1;
if (args->no_dependents)
goto done;
- while ((oid = get_rev())) {
+ while ((oid = negotiator->next(negotiator))) {
packet_buf_write(&req_buf, "have %s\n", oid_to_hex(oid));
print_verbose(args, "have %s", oid_to_hex(oid));
in_vain++;
case ACK_ready:
case ACK_continue: {
struct commit *commit =
- lookup_commit(result_oid);
+ lookup_commit(the_repository,
+ result_oid);
+ int was_common;
+
if (!commit)
die(_("invalid commit %s"), oid_to_hex(result_oid));
+ was_common = negotiator->ack(negotiator, commit);
if (args->stateless_rpc
&& ack == ACK_common
- && !(commit->object.flags & COMMON)) {
+ && !was_common) {
/* We need to replay the have for this object
* on the next RPC request so the peer knows
* it is in common with us.
} else if (!args->stateless_rpc
|| ack != ACK_common)
in_vain = 0;
- mark_common(commit, 0, 1);
retval = 0;
got_continue = 1;
- if (ack == ACK_ready) {
- clear_prio_queue(&rev_list);
+ if (ack == ACK_ready)
got_ready = 1;
- }
break;
}
}
print_verbose(args, _("giving up"));
break; /* give up */
}
+ if (got_ready)
+ break;
}
}
done:
static int mark_complete(const struct object_id *oid)
{
- struct object *o = parse_object(oid);
+ struct object *o = parse_object(the_repository, oid);
while (o && o->type == OBJ_TAG) {
struct tag *t = (struct tag *) o;
if (!t->tagged)
break; /* broken repository */
o->flags |= COMPLETE;
- o = parse_object(&t->tagged->oid);
+ o = parse_object(the_repository, &t->tagged->oid);
}
if (o && o->type == OBJ_COMMIT) {
struct commit *commit = (struct commit *)o;
*refs = newlist;
}
-static void mark_alternate_complete(struct object *obj)
+static void mark_alternate_complete(struct fetch_negotiator *unused,
+ struct object *obj)
{
mark_complete(&obj->oid);
}
return 0;
}
-static int everything_local(struct fetch_pack_args *args,
- struct ref **refs,
- struct ref **sought, int nr_sought)
+/*
+ * Mark recent commits available locally and reachable from a local ref as
+ * COMPLETE. If args->no_dependents is false, also mark COMPLETE remote refs as
+ * COMMON_REF (otherwise, we are not planning to participate in negotiation, and
+ * thus do not need COMMON_REF marks).
+ *
+ * The cutoff time for recency is determined by this heuristic: it is the
+ * earliest commit time of the objects in refs that are commits and that we know
+ * the commit time of.
+ */
+static void mark_complete_and_common_ref(struct fetch_negotiator *negotiator,
+ struct fetch_pack_args *args,
+ struct ref **refs)
{
struct ref *ref;
- int retval;
int old_save_commit_buffer = save_commit_buffer;
timestamp_t cutoff = 0;
struct oidset loose_oid_set = OIDSET_INIT;
if (!has_object_file_with_flags(&ref->old_oid, flags))
continue;
- o = parse_object(&ref->old_oid);
+ o = parse_object(the_repository, &ref->old_oid);
if (!o)
continue;
if (!args->no_dependents) {
if (!args->deepen) {
for_each_ref(mark_complete_oid, NULL);
- for_each_cached_alternate(mark_alternate_complete);
+ for_each_cached_alternate(NULL, mark_alternate_complete);
commit_list_sort_by_date(&complete);
if (cutoff)
mark_recent_complete_commits(args, cutoff);
* Don't mark them common yet; the server has to be told so first.
*/
for (ref = *refs; ref; ref = ref->next) {
- struct object *o = deref_tag(lookup_object(ref->old_oid.hash),
+ struct object *o = deref_tag(the_repository,
+ lookup_object(the_repository,
+ ref->old_oid.hash),
NULL, 0);
if (!o || o->type != OBJ_COMMIT || !(o->flags & COMPLETE))
continue;
- if (!(o->flags & SEEN)) {
- rev_list_push((struct commit *)o, COMMON_REF | SEEN);
-
- mark_common((struct commit *)o, 1, 1);
- }
+ negotiator->known_common(negotiator,
+ (struct commit *)o);
}
}
- filter_refs(args, refs, sought, nr_sought);
+ save_commit_buffer = old_save_commit_buffer;
+}
+
+/*
+ * Returns 1 if every object pointed to by the given remote refs is available
+ * locally and reachable from a local ref, and 0 otherwise.
+ */
+static int everything_local(struct fetch_pack_args *args,
+ struct ref **refs)
+{
+ struct ref *ref;
+ int retval;
for (retval = 1, ref = *refs; ref ; ref = ref->next) {
const struct object_id *remote = &ref->old_oid;
struct object *o;
- o = lookup_object(remote->hash);
+ o = lookup_object(the_repository, remote->hash);
if (!o || !(o->flags & COMPLETE)) {
retval = 0;
print_verbose(args, "want %s (%s)", oid_to_hex(remote),
ref->name);
}
- save_commit_buffer = old_save_commit_buffer;
-
return retval;
}
struct object_id oid;
const char *agent_feature;
int agent_len;
+ struct fetch_negotiator negotiator;
+ fetch_negotiator_init(&negotiator, negotiation_algorithm);
sort_ref_list(&ref, ref_compare_name);
QSORT(sought, nr_sought, cmp_ref_by_name);
if (!server_supports("deepen-relative") && args->deepen_relative)
die(_("Server does not support --deepen"));
- if (everything_local(args, &ref, sought, nr_sought)) {
+ mark_complete_and_common_ref(&negotiator, args, &ref);
+ filter_refs(args, &ref, sought, nr_sought);
+ if (everything_local(args, &ref)) {
packet_flush(fd[1]);
goto all_done;
}
- if (find_common(args, fd, &oid, ref) < 0)
+ if (find_common(&negotiator, args, fd, &oid, ref) < 0)
if (!args->keep_pack)
/* When cloning, it is not unusual to have
* no common commit.
die(_("git fetch-pack: fetch failed."));
all_done:
+ negotiator.release(&negotiator);
return ref;
}
static void add_wants(const struct ref *wants, struct strbuf *req_buf)
{
+ int use_ref_in_want = server_supports_feature("fetch", "ref-in-want", 0);
+
for ( ; wants ; wants = wants->next) {
const struct object_id *remote = &wants->old_oid;
- const char *remote_hex;
struct object *o;
/*
* interested in the case we *know* the object is
* reachable and we have already scanned it.
*/
- if (((o = lookup_object(remote->hash)) != NULL) &&
+ if (((o = lookup_object(the_repository, remote->hash)) != NULL) &&
(o->flags & COMPLETE)) {
continue;
}
- remote_hex = oid_to_hex(remote);
- packet_buf_write(req_buf, "want %s\n", remote_hex);
+ if (!use_ref_in_want || wants->exact_oid)
+ packet_buf_write(req_buf, "want %s\n", oid_to_hex(remote));
+ else
+ packet_buf_write(req_buf, "want-ref %s\n", wants->name);
}
}
}
}
-static int add_haves(struct strbuf *req_buf, int *haves_to_send, int *in_vain)
+static int add_haves(struct fetch_negotiator *negotiator,
+ struct strbuf *req_buf,
+ int *haves_to_send, int *in_vain)
{
int ret = 0;
int haves_added = 0;
const struct object_id *oid;
- while ((oid = get_rev())) {
+ while ((oid = negotiator->next(negotiator))) {
packet_buf_write(req_buf, "have %s\n", oid_to_hex(oid));
if (++haves_added >= *haves_to_send)
break;
return ret;
}
-static int send_fetch_request(int fd_out, const struct fetch_pack_args *args,
+static int send_fetch_request(struct fetch_negotiator *negotiator, int fd_out,
+ const struct fetch_pack_args *args,
const struct ref *wants, struct oidset *common,
int *haves_to_send, int *in_vain)
{
add_common(&req_buf, common);
/* Add initial haves */
- ret = add_haves(&req_buf, haves_to_send, in_vain);
+ ret = add_haves(negotiator, &req_buf, haves_to_send, in_vain);
}
/* Send request */
return ret;
}
-static int process_acks(struct packet_reader *reader, struct oidset *common)
+static int process_acks(struct fetch_negotiator *negotiator,
+ struct packet_reader *reader,
+ struct oidset *common)
{
/* received */
int received_ready = 0;
if (!get_oid_hex(arg, &oid)) {
struct commit *commit;
oidset_insert(common, &oid);
- commit = lookup_commit(&oid);
- mark_common(commit, 0, 1);
+ commit = lookup_commit(the_repository, &oid);
+ negotiator->ack(negotiator, commit);
}
continue;
}
if (!strcmp(reader->line, "ready")) {
- clear_prio_queue(&rev_list);
received_ready = 1;
continue;
}
if (skip_prefix(reader->line, "unshallow ", &arg)) {
if (get_oid_hex(arg, &oid))
die(_("invalid unshallow line: %s"), reader->line);
- if (!lookup_object(oid.hash))
+ if (!lookup_object(the_repository, oid.hash))
die(_("object not found: %s"), reader->line);
/* make sure that it is parsed as shallow */
- if (!parse_object(&oid))
+ if (!parse_object(the_repository, &oid))
die(_("error in object: %s"), reader->line);
if (unregister_shallow(&oid))
die(_("no shallow found: %s"), reader->line);
args->deepen = 1;
}
+static void receive_wanted_refs(struct packet_reader *reader, struct ref *refs)
+{
+ process_section_header(reader, "wanted-refs", 0);
+ while (packet_reader_read(reader) == PACKET_READ_NORMAL) {
+ struct object_id oid;
+ const char *end;
+ struct ref *r = NULL;
+
+ if (parse_oid_hex(reader->line, &oid, &end) || *end++ != ' ')
+ die("expected wanted-ref, got '%s'", reader->line);
+
+ for (r = refs; r; r = r->next) {
+ if (!strcmp(end, r->name)) {
+ oidcpy(&r->old_oid, &oid);
+ break;
+ }
+ }
+
+ if (!r)
+ die("unexpected wanted-ref: '%s'", reader->line);
+ }
+
+ if (reader->status != PACKET_READ_DELIM)
+ die("error processing wanted refs: %d", reader->status);
+}
+
enum fetch_state {
FETCH_CHECK_LOCAL = 0,
FETCH_SEND_REQUEST,
struct packet_reader reader;
int in_vain = 0;
int haves_to_send = INITIAL_FLUSH;
+ struct fetch_negotiator negotiator;
+ fetch_negotiator_init(&negotiator, negotiation_algorithm);
packet_reader_init(&reader, fd[0], NULL, 0,
PACKET_READ_CHOMP_NEWLINE);
if (args->depth > 0 || args->deepen_since || args->deepen_not)
args->deepen = 1;
- if (marked)
- for_each_ref(clear_marks, NULL);
- marked = 1;
-
- for_each_ref(rev_list_insert_ref_oid, NULL);
- for_each_cached_alternate(insert_one_alternate_object);
-
/* Filter 'ref' by 'sought' and those that aren't local */
- if (everything_local(args, &ref, sought, nr_sought))
+ mark_complete_and_common_ref(&negotiator, args, &ref);
+ filter_refs(args, &ref, sought, nr_sought);
+ if (everything_local(args, &ref))
state = FETCH_DONE;
else
state = FETCH_SEND_REQUEST;
+
+ mark_tips(&negotiator, args->negotiation_tips);
+ for_each_cached_alternate(&negotiator,
+ insert_one_alternate_object);
break;
case FETCH_SEND_REQUEST:
- if (send_fetch_request(fd[1], args, ref, &common,
+ if (send_fetch_request(&negotiator, fd[1], args, ref,
+ &common,
&haves_to_send, &in_vain))
state = FETCH_GET_PACK;
else
break;
case FETCH_PROCESS_ACKS:
/* Process ACKs/NAKs */
- switch (process_acks(&reader, &common)) {
+ switch (process_acks(&negotiator, &reader, &common)) {
case 2:
state = FETCH_GET_PACK;
break;
if (process_section_header(&reader, "shallow-info", 1))
receive_shallow_info(args, &reader);
+ if (process_section_header(&reader, "wanted-refs", 1))
+ receive_wanted_refs(&reader, ref);
+
/* get the pack */
process_section_header(&reader, "packfile", 0);
if (get_pack(args, fd, pack_lockfile))
}
}
+ negotiator.release(&negotiator);
oidset_clear(&common);
return ref;
}
git_config_get_bool("repack.usedeltabaseoffset", &prefer_ofs_delta);
git_config_get_bool("fetch.fsckobjects", &fetch_fsck_objects);
git_config_get_bool("transfer.fsckobjects", &transfer_fsck_objects);
+ git_config_get_string("fetch.negotiationalgorithm",
+ &negotiation_algorithm);
git_config(git_default_config, NULL);
}
}
static void update_shallow(struct fetch_pack_args *args,
- struct ref **sought, int nr_sought,
+ struct ref *refs,
struct shallow_info *si)
{
struct oid_array ref = OID_ARRAY_INIT;
int *status;
int i;
+ struct ref *r;
if (args->deepen && alternate_shallow_file) {
if (*alternate_shallow_file == '\0') { /* --unshallow */
remove_nonexistent_theirs_shallow(si);
if (!si->nr_ours && !si->nr_theirs)
return;
- for (i = 0; i < nr_sought; i++)
- oid_array_append(&ref, &sought[i]->old_oid);
+ for (r = refs; r; r = r->next)
+ oid_array_append(&ref, &r->old_oid);
si->ref = &ref;
if (args->update_shallow) {
* remote is also shallow, check what ref is safe to update
* without updating .git/shallow
*/
- status = xcalloc(nr_sought, sizeof(*status));
+ status = xcalloc(ref.nr, sizeof(*status));
assign_shallow_commits_to_refs(si, NULL, status);
if (si->nr_ours || si->nr_theirs) {
- for (i = 0; i < nr_sought; i++)
+ for (r = refs, i = 0; r; r = r->next, i++)
if (status[i])
- sought[i]->status = REF_STATUS_REJECT_SHALLOW;
+ r->status = REF_STATUS_REJECT_SHALLOW;
}
free(status);
oid_array_clear(&ref);
}
+static int iterate_ref_map(void *cb_data, struct object_id *oid)
+{
+ struct ref **rm = cb_data;
+ struct ref *ref = *rm;
+
+ if (!ref)
+ return -1; /* end of the list */
+ *rm = ref->next;
+ oidcpy(oid, &ref->old_oid);
+ return 0;
+}
+
struct ref *fetch_pack(struct fetch_pack_args *args,
int fd[], struct child_process *conn,
const struct ref *ref,
ref_cpy = do_fetch_pack(args, fd, ref, sought, nr_sought,
&si, pack_lockfile);
reprepare_packed_git(the_repository);
- update_shallow(args, sought, nr_sought, &si);
+
+ if (!args->cloning && args->deepen) {
+ struct check_connected_options opt = CHECK_CONNECTED_INIT;
+ struct ref *iterator = ref_cpy;
+ opt.shallow_file = alternate_shallow_file;
+ if (args->deepen)
+ opt.is_deepening_fetch = 1;
+ if (check_connected(iterate_ref_map, &iterator, &opt)) {
+ error(_("remote did not send all necessary objects"));
+ free_refs(ref_cpy);
+ ref_cpy = NULL;
+ rollback_lock_file(&shallow_lock);
+ goto cleanup;
+ }
+ args->connectivity_checked = 1;
+ }
+
+ update_shallow(args, ref_cpy, &si);
+cleanup:
clear_shallow_info(&si);
return ref_cpy;
}
const struct string_list *deepen_not;
struct list_objects_filter_options filter_options;
const struct string_list *server_options;
+
+ /*
+ * If not NULL, during packfile negotiation, fetch-pack will send "have"
+ * lines only with these tips and their ancestors.
+ */
+ const struct oid_array *negotiation_tips;
+
unsigned deepen_relative:1;
unsigned quiet:1;
unsigned keep_pack:1;
* regardless of which object flags it uses (if any).
*/
unsigned no_dependents:1;
+
+ /*
+ * Because fetch_pack() overwrites the shallow file upon a
+ * successful deepening non-clone fetch, if this struct
+ * specifies such a fetch, fetch_pack() needs to perform a
+ * connectivity check before deciding if a fetch is successful
+ * (and overwriting the shallow file). fetch_pack() sets this
+ * field to 1 if such a connectivity check was performed.
+ *
+ * This is different from check_self_contained_and_connected
+ * in that the former allows existing objects in the
+ * repository to satisfy connectivity needs, whereas the
+ * latter doesn't.
+ */
+ unsigned connectivity_checked:1;
};
/*
#include "cache.h"
#include "object-store.h"
+#include "repository.h"
#include "object.h"
#include "blob.h"
#include "tree.h"
FUNC(ZERO_PADDED_DATE, ERROR) \
FUNC(GITMODULES_MISSING, ERROR) \
FUNC(GITMODULES_BLOB, ERROR) \
- FUNC(GITMODULES_PARSE, ERROR) \
+ FUNC(GITMODULES_LARGE, ERROR) \
FUNC(GITMODULES_NAME, ERROR) \
FUNC(GITMODULES_SYMLINK, ERROR) \
/* warnings */ \
FUNC(ZERO_PADDED_FILEMODE, WARN) \
FUNC(NUL_IN_COMMIT, WARN) \
/* infos (reported as warnings, but ignored by default) */ \
+ FUNC(GITMODULES_PARSE, INFO) \
FUNC(BAD_TAG_NAME, INFO) \
FUNC(MISSING_TAGGER_ENTRY, INFO)
strbuf_addstr(sb, ": ");
}
+static int object_on_skiplist(struct fsck_options *opts, struct object *obj)
+{
+ if (opts && opts->skiplist && obj)
+ return oid_array_lookup(opts->skiplist, &obj->oid) >= 0;
+ return 0;
+}
+
__attribute__((format (printf, 4, 5)))
static int report(struct fsck_options *options, struct object *object,
enum fsck_msg_id id, const char *fmt, ...)
if (msg_type == FSCK_IGNORE)
return 0;
- if (options->skiplist && object &&
- oid_array_lookup(options->skiplist, &object->oid) >= 0)
+ if (object_on_skiplist(options, object))
return 0;
if (msg_type == FSCK_FATAL)
continue;
if (S_ISDIR(entry.mode)) {
- obj = (struct object *)lookup_tree(entry.oid);
+ obj = (struct object *)lookup_tree(the_repository, entry.oid);
if (name && obj)
put_object_name(options, obj, "%s%s/", name,
entry.path);
result = options->walk(obj, OBJ_TREE, data, options);
}
else if (S_ISREG(entry.mode) || S_ISLNK(entry.mode)) {
- obj = (struct object *)lookup_blob(entry.oid);
+ obj = (struct object *)lookup_blob(the_repository, entry.oid);
if (name && obj)
put_object_name(options, obj, "%s%s", name,
entry.path);
return -1;
if (obj->type == OBJ_NONE)
- parse_object(&obj->oid);
+ parse_object(the_repository, &obj->oid);
switch (obj->type) {
case OBJ_BLOB:
unsigned long size, struct fsck_options *options)
{
struct fsck_gitmodules_data data;
+ struct config_options config_opts = { 0 };
if (!oidset_contains(&gitmodules_found, &blob->object.oid))
return 0;
oidset_insert(&gitmodules_done, &blob->object.oid);
+ if (object_on_skiplist(options, &blob->object))
+ return 0;
+
if (!buf) {
/*
* A missing buffer here is a sign that the caller found the
* that an error.
*/
return report(options, &blob->object,
- FSCK_MSG_GITMODULES_PARSE,
+ FSCK_MSG_GITMODULES_LARGE,
".gitmodules too large to parse");
}
data.obj = &blob->object;
data.options = options;
data.ret = 0;
+ config_opts.error_action = CONFIG_ERROR_SILENT;
if (git_config_from_mem(fsck_gitmodules_fn, CONFIG_ORIGIN_BLOB,
- ".gitmodules", buf, size, &data))
+ ".gitmodules", buf, size, &data, &config_opts))
data.ret |= report(options, &blob->object,
FSCK_MSG_GITMODULES_PARSE,
"could not parse gitmodules blob");
if (oidset_contains(&gitmodules_done, oid))
continue;
- blob = lookup_blob(oid);
+ blob = lookup_blob(the_repository, oid);
if (!blob) {
struct object *obj = lookup_unknown_object(oid->hash);
ret |= report(options, obj,
--- /dev/null
+#!/bin/sh
+#
+# Copyright (c) 2005 Junio C Hamano.
+#
+
+SUBDIRECTORY_OK=Yes
+OPTIONS_KEEPDASHDASH=
+OPTIONS_STUCKLONG=t
+OPTIONS_SPEC="\
+git rebase [-i] [options] [--exec <cmd>] [--onto <newbase>] [<upstream>] [<branch>]
+git rebase [-i] [options] [--exec <cmd>] [--onto <newbase>] --root [<branch>]
+git rebase --continue | --abort | --skip | --edit-todo
+--
+ Available options are
+v,verbose! display a diffstat of what changed upstream
+q,quiet! be quiet. implies --no-stat
+autostash automatically stash/stash pop before and after
+fork-point use 'merge-base --fork-point' to refine upstream
+onto=! rebase onto given branch instead of upstream
+r,rebase-merges? try to rebase merges instead of skipping them
+p,preserve-merges! try to recreate merges instead of ignoring them
+s,strategy=! use the given merge strategy
+X,strategy-option=! pass the argument through to the merge strategy
+no-ff! cherry-pick all commits, even if unchanged
+f,force-rebase! cherry-pick all commits, even if unchanged
+m,merge! use merging strategies to rebase
+i,interactive! let the user edit the list of commits to rebase
+x,exec=! add exec lines after each commit of the editable list
+k,keep-empty preserve empty commits during rebase
+allow-empty-message allow rebasing commits with empty messages
+stat! display a diffstat of what changed upstream
+n,no-stat! do not show diffstat of what changed upstream
+verify allow pre-rebase hook to run
+rerere-autoupdate allow rerere to update index with resolved conflicts
+root! rebase all reachable commits up to the root(s)
+autosquash move commits that begin with squash!/fixup! under -i
+signoff add a Signed-off-by: line to each commit
+committer-date-is-author-date! passed to 'git am'
+ignore-date! passed to 'git am'
+whitespace=! passed to 'git apply'
+ignore-whitespace! passed to 'git apply'
+C=! passed to 'git apply'
+S,gpg-sign? GPG-sign commits
+ Actions:
+continue! continue
+abort! abort and check out the original branch
+skip! skip current patch and continue
+edit-todo! edit the todo list during an interactive rebase
+quit! abort but keep HEAD where it is
+show-current-patch! show the patch file being applied or merged
+"
+. git-sh-setup
+set_reflog_action rebase
+require_work_tree_exists
+cd_to_toplevel
+
+LF='
+'
+ok_to_skip_pre_rebase=
+
+squash_onto=
+unset onto
+unset restrict_revision
+cmd=
+strategy=
+strategy_opts=
+do_merge=
+merge_dir="$GIT_DIR"/rebase-merge
+apply_dir="$GIT_DIR"/rebase-apply
+verbose=
+diffstat=
+test "$(git config --bool rebase.stat)" = true && diffstat=t
+autostash="$(git config --bool rebase.autostash || echo false)"
+fork_point=auto
+git_am_opt=
+git_format_patch_opt=
+rebase_root=
+force_rebase=
+allow_rerere_autoupdate=
+# Non-empty if a rebase was in progress when 'git rebase' was invoked
+in_progress=
+# One of {am, merge, interactive}
+type=
+# One of {"$GIT_DIR"/rebase-apply, "$GIT_DIR"/rebase-merge}
+state_dir=
+# One of {'', continue, skip, abort}, as parsed from command line
+action=
+rebase_merges=
+rebase_cousins=
+preserve_merges=
+autosquash=
+keep_empty=
+allow_empty_message=--allow-empty-message
+signoff=
+test "$(git config --bool rebase.autosquash)" = "true" && autosquash=t
+case "$(git config --bool commit.gpgsign)" in
+true) gpg_sign_opt=-S ;;
+*) gpg_sign_opt= ;;
+esac
+. git-rebase--common
+
+read_basic_state () {
+ test -f "$state_dir/head-name" &&
+ test -f "$state_dir/onto" &&
+ head_name=$(cat "$state_dir"/head-name) &&
+ onto=$(cat "$state_dir"/onto) &&
+ # We always write to orig-head, but interactive rebase used to write to
+ # head. Fall back to reading from head to cover for the case that the
+ # user upgraded git with an ongoing interactive rebase.
+ if test -f "$state_dir"/orig-head
+ then
+ orig_head=$(cat "$state_dir"/orig-head)
+ else
+ orig_head=$(cat "$state_dir"/head)
+ fi &&
+ GIT_QUIET=$(cat "$state_dir"/quiet) &&
+ test -f "$state_dir"/verbose && verbose=t
+ test -f "$state_dir"/strategy && strategy="$(cat "$state_dir"/strategy)"
+ test -f "$state_dir"/strategy_opts &&
+ strategy_opts="$(cat "$state_dir"/strategy_opts)"
+ test -f "$state_dir"/allow_rerere_autoupdate &&
+ allow_rerere_autoupdate="$(cat "$state_dir"/allow_rerere_autoupdate)"
+ test -f "$state_dir"/gpg_sign_opt &&
+ gpg_sign_opt="$(cat "$state_dir"/gpg_sign_opt)"
+ test -f "$state_dir"/signoff && {
+ signoff="$(cat "$state_dir"/signoff)"
+ force_rebase=t
+ }
+}
+
+finish_rebase () {
+ rm -f "$(git rev-parse --git-path REBASE_HEAD)"
+ apply_autostash &&
+ { git gc --auto || true; } &&
+ rm -rf "$state_dir"
+}
+
+run_interactive () {
+ GIT_CHERRY_PICK_HELP="$resolvemsg"
+ export GIT_CHERRY_PICK_HELP
+
+ test -n "$keep_empty" && keep_empty="--keep-empty"
+ test -n "$rebase_merges" && rebase_merges="--rebase-merges"
+ test -n "$rebase_cousins" && rebase_cousins="--rebase-cousins"
+ test -n "$autosquash" && autosquash="--autosquash"
+ test -n "$verbose" && verbose="--verbose"
+ test -n "$force_rebase" && force_rebase="--no-ff"
+ test -n "$restrict_revision" && \
+ restrict_revision="--restrict-revision=^$restrict_revision"
+ test -n "$upstream" && upstream="--upstream=$upstream"
+ test -n "$onto" && onto="--onto=$onto"
+ test -n "$squash_onto" && squash_onto="--squash-onto=$squash_onto"
+ test -n "$onto_name" && onto_name="--onto-name=$onto_name"
+ test -n "$head_name" && head_name="--head-name=$head_name"
+ test -n "$strategy" && strategy="--strategy=$strategy"
+ test -n "$strategy_opts" && strategy_opts="--strategy-opts=$strategy_opts"
+ test -n "$switch_to" && switch_to="--switch-to=$switch_to"
+ test -n "$cmd" && cmd="--cmd=$cmd"
+ test -n "$action" && action="--$action"
+
+ exec git rebase--interactive "$action" "$keep_empty" "$rebase_merges" "$rebase_cousins" \
+ "$upstream" "$onto" "$squash_onto" "$restrict_revision" \
+ "$allow_empty_message" "$autosquash" "$verbose" \
+ "$force_rebase" "$onto_name" "$head_name" "$strategy" \
+ "$strategy_opts" "$cmd" "$switch_to" \
+ "$allow_rerere_autoupdate" "$gpg_sign_opt" "$signoff"
+}
+
+run_specific_rebase () {
+ if [ "$interactive_rebase" = implied ]; then
+ GIT_EDITOR=:
+ export GIT_EDITOR
+ autosquash=
+ fi
+
+ if test -n "$interactive_rebase" -a -z "$preserve_merges"
+ then
+ run_interactive
+ else
+ . git-rebase--$type
+
+ if test -z "$preserve_merges"
+ then
+ git_rebase__$type
+ else
+ git_rebase__preserve_merges
+ fi
+ fi
+
+ ret=$?
+ if test $ret -eq 0
+ then
+ finish_rebase
+ elif test $ret -eq 2 # special exit status for rebase -p
+ then
+ apply_autostash &&
+ rm -rf "$state_dir" &&
+ die "Nothing to do"
+ fi
+ exit $ret
+}
+
+run_pre_rebase_hook () {
+ if test -z "$ok_to_skip_pre_rebase" &&
+ test -x "$(git rev-parse --git-path hooks/pre-rebase)"
+ then
+ "$(git rev-parse --git-path hooks/pre-rebase)" ${1+"$@"} ||
+ die "$(gettext "The pre-rebase hook refused to rebase.")"
+ fi
+}
+
+test -f "$apply_dir"/applying &&
+ die "$(gettext "It looks like 'git am' is in progress. Cannot rebase.")"
+
+if test -d "$apply_dir"
+then
+ type=am
+ state_dir="$apply_dir"
+elif test -d "$merge_dir"
+then
+ if test -d "$merge_dir"/rewritten
+ then
+ type=preserve-merges
+ interactive_rebase=explicit
+ preserve_merges=t
+ elif test -f "$merge_dir"/interactive
+ then
+ type=interactive
+ interactive_rebase=explicit
+ else
+ type=merge
+ fi
+ state_dir="$merge_dir"
+fi
+test -n "$type" && in_progress=t
+
+total_argc=$#
+while test $# != 0
+do
+ case "$1" in
+ --no-verify)
+ ok_to_skip_pre_rebase=yes
+ ;;
+ --verify)
+ ok_to_skip_pre_rebase=
+ ;;
+ --continue|--skip|--abort|--quit|--edit-todo|--show-current-patch)
+ test $total_argc -eq 2 || usage
+ action=${1##--}
+ ;;
+ --onto=*)
+ onto="${1#--onto=}"
+ ;;
+ --exec=*)
+ cmd="${cmd}exec ${1#--exec=}${LF}"
+ test -z "$interactive_rebase" && interactive_rebase=implied
+ ;;
+ --interactive)
+ interactive_rebase=explicit
+ ;;
+ --keep-empty)
+ keep_empty=yes
+ ;;
+ --allow-empty-message)
+ allow_empty_message=--allow-empty-message
+ ;;
+ --no-keep-empty)
+ keep_empty=
+ ;;
+ --rebase-merges)
+ rebase_merges=t
+ test -z "$interactive_rebase" && interactive_rebase=implied
+ ;;
+ --rebase-merges=*)
+ rebase_merges=t
+ case "${1#*=}" in
+ rebase-cousins) rebase_cousins=t;;
+ no-rebase-cousins) rebase_cousins=;;
+ *) die "Unknown mode: $1";;
+ esac
+ test -z "$interactive_rebase" && interactive_rebase=implied
+ ;;
+ --preserve-merges)
+ preserve_merges=t
+ test -z "$interactive_rebase" && interactive_rebase=implied
+ ;;
+ --autosquash)
+ autosquash=t
+ ;;
+ --no-autosquash)
+ autosquash=
+ ;;
+ --fork-point)
+ fork_point=t
+ ;;
+ --no-fork-point)
+ fork_point=
+ ;;
+ --merge)
+ do_merge=t
+ ;;
+ --strategy-option=*)
+ strategy_opts="$strategy_opts $(git rev-parse --sq-quote "--${1#--strategy-option=}" | sed -e s/^.//)"
+ do_merge=t
+ test -z "$strategy" && strategy=recursive
+ ;;
+ --strategy=*)
+ strategy="${1#--strategy=}"
+ do_merge=t
+ ;;
+ --no-stat)
+ diffstat=
+ ;;
+ --stat)
+ diffstat=t
+ ;;
+ --autostash)
+ autostash=true
+ ;;
+ --no-autostash)
+ autostash=false
+ ;;
+ --verbose)
+ verbose=t
+ diffstat=t
+ GIT_QUIET=
+ ;;
+ --quiet)
+ GIT_QUIET=t
+ git_am_opt="$git_am_opt -q"
+ verbose=
+ diffstat=
+ ;;
+ --whitespace=*)
+ git_am_opt="$git_am_opt --whitespace=${1#--whitespace=}"
+ case "${1#--whitespace=}" in
+ fix|strip)
+ force_rebase=t
+ ;;
+ esac
+ ;;
+ --ignore-whitespace)
+ git_am_opt="$git_am_opt $1"
+ ;;
+ --signoff)
+ signoff=--signoff
+ ;;
+ --no-signoff)
+ signoff=
+ ;;
+ --committer-date-is-author-date|--ignore-date)
+ git_am_opt="$git_am_opt $1"
+ force_rebase=t
+ ;;
+ -C*)
+ git_am_opt="$git_am_opt $1"
+ ;;
+ --root)
+ rebase_root=t
+ ;;
+ --force-rebase|--no-ff)
+ force_rebase=t
+ ;;
+ --rerere-autoupdate|--no-rerere-autoupdate)
+ allow_rerere_autoupdate="$1"
+ ;;
+ --gpg-sign)
+ gpg_sign_opt=-S
+ ;;
+ --gpg-sign=*)
+ gpg_sign_opt="-S${1#--gpg-sign=}"
+ ;;
+ --)
+ shift
+ break
+ ;;
+ *)
+ usage
+ ;;
+ esac
+ shift
+done
+test $# -gt 2 && usage
+
+if test -n "$action"
+then
+ test -z "$in_progress" && die "$(gettext "No rebase in progress?")"
+ # Only interactive rebase uses detailed reflog messages
+ if test -n "$interactive_rebase" && test "$GIT_REFLOG_ACTION" = rebase
+ then
+ GIT_REFLOG_ACTION="rebase -i ($action)"
+ export GIT_REFLOG_ACTION
+ fi
+fi
+
+if test "$action" = "edit-todo" && test -z "$interactive_rebase"
+then
+ die "$(gettext "The --edit-todo action can only be used during interactive rebase.")"
+fi
+
+case "$action" in
+continue)
+ # Sanity check
+ git rev-parse --verify HEAD >/dev/null ||
+ die "$(gettext "Cannot read HEAD")"
+ git update-index --ignore-submodules --refresh &&
+ git diff-files --quiet --ignore-submodules || {
+ echo "$(gettext "You must edit all merge conflicts and then
+mark them as resolved using git add")"
+ exit 1
+ }
+ read_basic_state
+ run_specific_rebase
+ ;;
+skip)
+ output git reset --hard HEAD || exit $?
+ read_basic_state
+ run_specific_rebase
+ ;;
+abort)
+ git rerere clear
+ read_basic_state
+ case "$head_name" in
+ refs/*)
+ git symbolic-ref -m "rebase: aborting" HEAD $head_name ||
+ die "$(eval_gettext "Could not move back to \$head_name")"
+ ;;
+ esac
+ output git reset --hard $orig_head
+ finish_rebase
+ exit
+ ;;
+quit)
+ exec rm -rf "$state_dir"
+ ;;
+edit-todo)
+ run_specific_rebase
+ ;;
+show-current-patch)
+ run_specific_rebase
+ die "BUG: run_specific_rebase is not supposed to return here"
+ ;;
+esac
+
+# Make sure no rebase is in progress
+if test -n "$in_progress"
+then
+ state_dir_base=${state_dir##*/}
+ cmd_live_rebase="git rebase (--continue | --abort | --skip)"
+ cmd_clear_stale_rebase="rm -fr \"$state_dir\""
+ die "
+$(eval_gettext 'It seems that there is already a $state_dir_base directory, and
+I wonder if you are in the middle of another rebase. If that is the
+case, please try
+ $cmd_live_rebase
+If that is not the case, please
+ $cmd_clear_stale_rebase
+and run me again. I am stopping in case you still have something
+valuable there.')"
+fi
+
+if test -n "$rebase_root" && test -z "$onto"
+then
+ test -z "$interactive_rebase" && interactive_rebase=implied
+fi
+
+if test -n "$keep_empty"
+then
+ test -z "$interactive_rebase" && interactive_rebase=implied
+fi
+
+if test -n "$interactive_rebase"
+then
+ if test -z "$preserve_merges"
+ then
+ type=interactive
+ else
+ type=preserve-merges
+ fi
+
+ state_dir="$merge_dir"
+elif test -n "$do_merge"
+then
+ type=merge
+ state_dir="$merge_dir"
+else
+ type=am
+ state_dir="$apply_dir"
+fi
+
+if test -t 2 && test -z "$GIT_QUIET"
+then
+ git_format_patch_opt="$git_format_patch_opt --progress"
+fi
+
+if test -n "$git_am_opt"; then
+ incompatible_opts=$(echo " $git_am_opt " | \
+ sed -e 's/ -q / /g' -e 's/^ \(.*\) $/\1/')
+ if test -n "$interactive_rebase"
+ then
+ if test -n "$incompatible_opts"
+ then
+ die "$(gettext "error: cannot combine interactive options (--interactive, --exec, --rebase-merges, --preserve-merges, --keep-empty, --root + --onto) with am options ($incompatible_opts)")"
+ fi
+ fi
+ if test -n "$do_merge"; then
+ if test -n "$incompatible_opts"
+ then
+ die "$(gettext "error: cannot combine merge options (--merge, --strategy, --strategy-option) with am options ($incompatible_opts)")"
+ fi
+ fi
+fi
+
+if test -n "$signoff"
+then
+ test -n "$preserve_merges" &&
+ die "$(gettext "error: cannot combine '--signoff' with '--preserve-merges'")"
+ git_am_opt="$git_am_opt $signoff"
+ force_rebase=t
+fi
+
+if test -n "$preserve_merges"
+then
+ # Note: incompatibility with --signoff handled in signoff block above
+ # Note: incompatibility with --interactive is just a strong warning;
+ # git-rebase.txt caveats with "unless you know what you are doing"
+ test -n "$rebase_merges" &&
+ die "$(gettext "error: cannot combine '--preserve_merges' with '--rebase-merges'")"
+fi
+
+if test -n "$rebase_merges"
+then
+ test -n "$strategy_opts" &&
+ die "$(gettext "error: cannot combine '--rebase_merges' with '--strategy-option'")"
+ test -n "$strategy" &&
+ die "$(gettext "error: cannot combine '--rebase_merges' with '--strategy'")"
+fi
+
+if test -z "$rebase_root"
+then
+ case "$#" in
+ 0)
+ if ! upstream_name=$(git rev-parse --symbolic-full-name \
+ --verify -q @{upstream} 2>/dev/null)
+ then
+ . git-parse-remote
+ error_on_missing_default_upstream "rebase" "rebase" \
+ "against" "git rebase $(gettext '<branch>')"
+ fi
+
+ test "$fork_point" = auto && fork_point=t
+ ;;
+ *) upstream_name="$1"
+ if test "$upstream_name" = "-"
+ then
+ upstream_name="@{-1}"
+ fi
+ shift
+ ;;
+ esac
+ upstream=$(peel_committish "${upstream_name}") ||
+ die "$(eval_gettext "invalid upstream '\$upstream_name'")"
+ upstream_arg="$upstream_name"
+else
+ if test -z "$onto"
+ then
+ empty_tree=$(git hash-object -t tree /dev/null)
+ onto=$(git commit-tree $empty_tree </dev/null)
+ squash_onto="$onto"
+ fi
+ unset upstream_name
+ unset upstream
+ test $# -gt 1 && usage
+ upstream_arg=--root
+fi
+
+# Make sure the branch to rebase onto is valid.
+onto_name=${onto-"$upstream_name"}
+case "$onto_name" in
+*...*)
+ if left=${onto_name%...*} right=${onto_name#*...} &&
+ onto=$(git merge-base --all ${left:-HEAD} ${right:-HEAD})
+ then
+ case "$onto" in
+ ?*"$LF"?*)
+ die "$(eval_gettext "\$onto_name: there are more than one merge bases")"
+ ;;
+ '')
+ die "$(eval_gettext "\$onto_name: there is no merge base")"
+ ;;
+ esac
+ else
+ die "$(eval_gettext "\$onto_name: there is no merge base")"
+ fi
+ ;;
+*)
+ onto=$(peel_committish "$onto_name") ||
+ die "$(eval_gettext "Does not point to a valid commit: \$onto_name")"
+ ;;
+esac
+
+# If the branch to rebase is given, that is the branch we will rebase
+# $branch_name -- branch/commit being rebased, or HEAD (already detached)
+# $orig_head -- commit object name of tip of the branch before rebasing
+# $head_name -- refs/heads/<that-branch> or "detached HEAD"
+switch_to=
+case "$#" in
+1)
+ # Is it "rebase other $branchname" or "rebase other $commit"?
+ branch_name="$1"
+ switch_to="$1"
+
+ # Is it a local branch?
+ if git show-ref --verify --quiet -- "refs/heads/$branch_name" &&
+ orig_head=$(git rev-parse -q --verify "refs/heads/$branch_name")
+ then
+ head_name="refs/heads/$branch_name"
+ # If not is it a valid ref (branch or commit)?
+ elif orig_head=$(git rev-parse -q --verify "$branch_name")
+ then
+ head_name="detached HEAD"
+
+ else
+ die "$(eval_gettext "fatal: no such branch/commit '\$branch_name'")"
+ fi
+ ;;
+0)
+ # Do not need to switch branches, we are already on it.
+ if branch_name=$(git symbolic-ref -q HEAD)
+ then
+ head_name=$branch_name
+ branch_name=$(expr "z$branch_name" : 'zrefs/heads/\(.*\)')
+ else
+ head_name="detached HEAD"
+ branch_name=HEAD
+ fi
+ orig_head=$(git rev-parse --verify HEAD) || exit
+ ;;
+*)
+ die "BUG: unexpected number of arguments left to parse"
+ ;;
+esac
+
+if test "$fork_point" = t
+then
+ new_upstream=$(git merge-base --fork-point "$upstream_name" \
+ "${switch_to:-HEAD}")
+ if test -n "$new_upstream"
+ then
+ restrict_revision=$new_upstream
+ fi
+fi
+
+if test "$autostash" = true && ! (require_clean_work_tree) 2>/dev/null
+then
+ stash_sha1=$(git stash create "autostash") ||
+ die "$(gettext 'Cannot autostash')"
+
+ mkdir -p "$state_dir" &&
+ echo $stash_sha1 >"$state_dir/autostash" &&
+ stash_abbrev=$(git rev-parse --short $stash_sha1) &&
+ echo "$(eval_gettext 'Created autostash: $stash_abbrev')" &&
+ git reset --hard
+fi
+
+require_clean_work_tree "rebase" "$(gettext "Please commit or stash them.")"
+
+# Now we are rebasing commits $upstream..$orig_head (or with --root,
+# everything leading up to $orig_head) on top of $onto
+
+# Check if we are already based on $onto with linear history,
+# but this should be done only when upstream and onto are the same
+# and if this is not an interactive rebase.
+mb=$(git merge-base "$onto" "$orig_head")
+if test -z "$interactive_rebase" && test "$upstream" = "$onto" &&
+ test "$mb" = "$onto" && test -z "$restrict_revision" &&
+ # linear history?
+ ! (git rev-list --parents "$onto".."$orig_head" | sane_grep " .* ") > /dev/null
+then
+ if test -z "$force_rebase"
+ then
+ # Lazily switch to the target branch if needed...
+ test -z "$switch_to" ||
+ GIT_REFLOG_ACTION="$GIT_REFLOG_ACTION: checkout $switch_to" \
+ git checkout -q "$switch_to" --
+ if test "$branch_name" = "HEAD" &&
+ ! git symbolic-ref -q HEAD
+ then
+ say "$(eval_gettext "HEAD is up to date.")"
+ else
+ say "$(eval_gettext "Current branch \$branch_name is up to date.")"
+ fi
+ finish_rebase
+ exit 0
+ else
+ if test "$branch_name" = "HEAD" &&
+ ! git symbolic-ref -q HEAD
+ then
+ say "$(eval_gettext "HEAD is up to date, rebase forced.")"
+ else
+ say "$(eval_gettext "Current branch \$branch_name is up to date, rebase forced.")"
+ fi
+ fi
+fi
+
+# If a hook exists, give it a chance to interrupt
+run_pre_rebase_hook "$upstream_arg" "$@"
+
+if test -n "$diffstat"
+then
+ if test -n "$verbose"
+ then
+ echo "$(eval_gettext "Changes from \$mb to \$onto:")"
+ fi
+ # We want color (if set), but no pager
+ GIT_PAGER='' git diff --stat --summary "$mb" "$onto"
+fi
+
+test -n "$interactive_rebase" && run_specific_rebase
+
+# Detach HEAD and reset the tree
+say "$(gettext "First, rewinding head to replay your work on top of it...")"
+
+GIT_REFLOG_ACTION="$GIT_REFLOG_ACTION: checkout $onto_name" \
+ git checkout -q "$onto^0" || die "could not detach HEAD"
+git update-ref ORIG_HEAD $orig_head
+
+# If the $onto is a proper descendant of the tip of the branch, then
+# we just fast-forwarded.
+if test "$mb" = "$orig_head"
+then
+ say "$(eval_gettext "Fast-forwarded \$branch_name to \$onto_name.")"
+ move_to_original_branch
+ finish_rebase
+ exit 0
+fi
+
+if test -n "$rebase_root"
+then
+ revisions="$onto..$orig_head"
+else
+ revisions="${restrict_revision-$upstream}..$orig_head"
+fi
+
+run_specific_rebase
--- /dev/null
+
+resolvemsg="
+$(gettext 'Resolve all conflicts manually, mark them as resolved with
+"git add/rm <conflicted_files>", then run "git rebase --continue".
+You can instead skip this commit: run "git rebase --skip".
+To abort and get back to the state before "git rebase", run "git rebase --abort".')
+"
+
+write_basic_state () {
+ echo "$head_name" > "$state_dir"/head-name &&
+ echo "$onto" > "$state_dir"/onto &&
+ echo "$orig_head" > "$state_dir"/orig-head &&
+ echo "$GIT_QUIET" > "$state_dir"/quiet &&
+ test t = "$verbose" && : > "$state_dir"/verbose
+ test -n "$strategy" && echo "$strategy" > "$state_dir"/strategy
+ test -n "$strategy_opts" && echo "$strategy_opts" > \
+ "$state_dir"/strategy_opts
+ test -n "$allow_rerere_autoupdate" && echo "$allow_rerere_autoupdate" > \
+ "$state_dir"/allow_rerere_autoupdate
+ test -n "$gpg_sign_opt" && echo "$gpg_sign_opt" > "$state_dir"/gpg_sign_opt
+ test -n "$signoff" && echo "$signoff" >"$state_dir"/signoff
+}
+
+apply_autostash () {
+ if test -f "$state_dir/autostash"
+ then
+ stash_sha1=$(cat "$state_dir/autostash")
+ if git stash apply $stash_sha1 >/dev/null 2>&1
+ then
+ echo "$(gettext 'Applied autostash.')" >&2
+ else
+ git stash store -m "autostash" -q $stash_sha1 ||
+ die "$(eval_gettext "Cannot store \$stash_sha1")"
+ gettext 'Applying autostash resulted in conflicts.
+Your changes are safe in the stash.
+You can run "git stash pop" or "git stash drop" at any time.
+' >&2
+ fi
+ fi
+}
+
+move_to_original_branch () {
+ case "$head_name" in
+ refs/*)
+ message="rebase finished: $head_name onto $onto"
+ git update-ref -m "$message" \
+ $head_name $(git rev-parse HEAD) $orig_head &&
+ git symbolic-ref \
+ -m "rebase finished: returning to $head_name" \
+ HEAD $head_name ||
+ die "$(eval_gettext "Could not move back to \$head_name")"
+ ;;
+ esac
+}
+
+output () {
+ case "$verbose" in
+ '')
+ output=$("$@" 2>&1 )
+ status=$?
+ test $status != 0 && printf "%s\n" "$output"
+ return $status
+ ;;
+ *)
+ "$@"
+ ;;
+ esac
+}
EOF
append_todo_help
gettext "
- However, if you remove everything, the rebase will be aborted.
+However, if you remove everything, the rebase will be aborted.
- " | git stripspace --comment-lines >>"$todo"
+" | git stripspace --comment-lines >>"$todo"
if test -z "$keep_empty"
then
+++ /dev/null
-#!/bin/sh
-#
-# Copyright (c) 2005 Junio C Hamano.
-#
-
-SUBDIRECTORY_OK=Yes
-OPTIONS_KEEPDASHDASH=
-OPTIONS_STUCKLONG=t
-OPTIONS_SPEC="\
-git rebase [-i] [options] [--exec <cmd>] [--onto <newbase>] [<upstream>] [<branch>]
-git rebase [-i] [options] [--exec <cmd>] [--onto <newbase>] --root [<branch>]
-git rebase --continue | --abort | --skip | --edit-todo
---
- Available options are
-v,verbose! display a diffstat of what changed upstream
-q,quiet! be quiet. implies --no-stat
-autostash automatically stash/stash pop before and after
-fork-point use 'merge-base --fork-point' to refine upstream
-onto=! rebase onto given branch instead of upstream
-r,rebase-merges? try to rebase merges instead of skipping them
-p,preserve-merges! try to recreate merges instead of ignoring them
-s,strategy=! use the given merge strategy
-no-ff! cherry-pick all commits, even if unchanged
-m,merge! use merging strategies to rebase
-i,interactive! let the user edit the list of commits to rebase
-x,exec=! add exec lines after each commit of the editable list
-k,keep-empty preserve empty commits during rebase
-allow-empty-message allow rebasing commits with empty messages
-f,force-rebase! force rebase even if branch is up to date
-X,strategy-option=! pass the argument through to the merge strategy
-stat! display a diffstat of what changed upstream
-n,no-stat! do not show diffstat of what changed upstream
-verify allow pre-rebase hook to run
-rerere-autoupdate allow rerere to update index with resolved conflicts
-root! rebase all reachable commits up to the root(s)
-autosquash move commits that begin with squash!/fixup! under -i
-committer-date-is-author-date! passed to 'git am'
-ignore-date! passed to 'git am'
-signoff passed to 'git am'
-whitespace=! passed to 'git apply'
-ignore-whitespace! passed to 'git apply'
-C=! passed to 'git apply'
-S,gpg-sign? GPG-sign commits
- Actions:
-continue! continue
-abort! abort and check out the original branch
-skip! skip current patch and continue
-edit-todo! edit the todo list during an interactive rebase
-quit! abort but keep HEAD where it is
-show-current-patch! show the patch file being applied or merged
-"
-. git-sh-setup
-set_reflog_action rebase
-require_work_tree_exists
-cd_to_toplevel
-
-LF='
-'
-ok_to_skip_pre_rebase=
-resolvemsg="
-$(gettext 'Resolve all conflicts manually, mark them as resolved with
-"git add/rm <conflicted_files>", then run "git rebase --continue".
-You can instead skip this commit: run "git rebase --skip".
-To abort and get back to the state before "git rebase", run "git rebase --abort".')
-"
-squash_onto=
-unset onto
-unset restrict_revision
-cmd=
-strategy=
-strategy_opts=
-do_merge=
-merge_dir="$GIT_DIR"/rebase-merge
-apply_dir="$GIT_DIR"/rebase-apply
-verbose=
-diffstat=
-test "$(git config --bool rebase.stat)" = true && diffstat=t
-autostash="$(git config --bool rebase.autostash || echo false)"
-fork_point=auto
-git_am_opt=
-git_format_patch_opt=
-rebase_root=
-force_rebase=
-allow_rerere_autoupdate=
-# Non-empty if a rebase was in progress when 'git rebase' was invoked
-in_progress=
-# One of {am, merge, interactive}
-type=
-# One of {"$GIT_DIR"/rebase-apply, "$GIT_DIR"/rebase-merge}
-state_dir=
-# One of {'', continue, skip, abort}, as parsed from command line
-action=
-rebase_merges=
-rebase_cousins=
-preserve_merges=
-autosquash=
-keep_empty=
-allow_empty_message=
-signoff=
-test "$(git config --bool rebase.autosquash)" = "true" && autosquash=t
-case "$(git config --bool commit.gpgsign)" in
-true) gpg_sign_opt=-S ;;
-*) gpg_sign_opt= ;;
-esac
-
-read_basic_state () {
- test -f "$state_dir/head-name" &&
- test -f "$state_dir/onto" &&
- head_name=$(cat "$state_dir"/head-name) &&
- onto=$(cat "$state_dir"/onto) &&
- # We always write to orig-head, but interactive rebase used to write to
- # head. Fall back to reading from head to cover for the case that the
- # user upgraded git with an ongoing interactive rebase.
- if test -f "$state_dir"/orig-head
- then
- orig_head=$(cat "$state_dir"/orig-head)
- else
- orig_head=$(cat "$state_dir"/head)
- fi &&
- GIT_QUIET=$(cat "$state_dir"/quiet) &&
- test -f "$state_dir"/verbose && verbose=t
- test -f "$state_dir"/strategy && strategy="$(cat "$state_dir"/strategy)"
- test -f "$state_dir"/strategy_opts &&
- strategy_opts="$(cat "$state_dir"/strategy_opts)"
- test -f "$state_dir"/allow_rerere_autoupdate &&
- allow_rerere_autoupdate="$(cat "$state_dir"/allow_rerere_autoupdate)"
- test -f "$state_dir"/gpg_sign_opt &&
- gpg_sign_opt="$(cat "$state_dir"/gpg_sign_opt)"
- test -f "$state_dir"/signoff && {
- signoff="$(cat "$state_dir"/signoff)"
- force_rebase=t
- }
-}
-
-write_basic_state () {
- echo "$head_name" > "$state_dir"/head-name &&
- echo "$onto" > "$state_dir"/onto &&
- echo "$orig_head" > "$state_dir"/orig-head &&
- echo "$GIT_QUIET" > "$state_dir"/quiet &&
- test t = "$verbose" && : > "$state_dir"/verbose
- test -n "$strategy" && echo "$strategy" > "$state_dir"/strategy
- test -n "$strategy_opts" && echo "$strategy_opts" > \
- "$state_dir"/strategy_opts
- test -n "$allow_rerere_autoupdate" && echo "$allow_rerere_autoupdate" > \
- "$state_dir"/allow_rerere_autoupdate
- test -n "$gpg_sign_opt" && echo "$gpg_sign_opt" > "$state_dir"/gpg_sign_opt
- test -n "$signoff" && echo "$signoff" >"$state_dir"/signoff
-}
-
-output () {
- case "$verbose" in
- '')
- output=$("$@" 2>&1 )
- status=$?
- test $status != 0 && printf "%s\n" "$output"
- return $status
- ;;
- *)
- "$@"
- ;;
- esac
-}
-
-move_to_original_branch () {
- case "$head_name" in
- refs/*)
- message="rebase finished: $head_name onto $onto"
- git update-ref -m "$message" \
- $head_name $(git rev-parse HEAD) $orig_head &&
- git symbolic-ref \
- -m "rebase finished: returning to $head_name" \
- HEAD $head_name ||
- die "$(eval_gettext "Could not move back to \$head_name")"
- ;;
- esac
-}
-
-apply_autostash () {
- if test -f "$state_dir/autostash"
- then
- stash_sha1=$(cat "$state_dir/autostash")
- if git stash apply $stash_sha1 >/dev/null 2>&1
- then
- echo "$(gettext 'Applied autostash.')" >&2
- else
- git stash store -m "autostash" -q $stash_sha1 ||
- die "$(eval_gettext "Cannot store \$stash_sha1")"
- gettext 'Applying autostash resulted in conflicts.
-Your changes are safe in the stash.
-You can run "git stash pop" or "git stash drop" at any time.
-' >&2
- fi
- fi
-}
-
-finish_rebase () {
- rm -f "$(git rev-parse --git-path REBASE_HEAD)"
- apply_autostash &&
- { git gc --auto || true; } &&
- rm -rf "$state_dir"
-}
-
-run_interactive () {
- GIT_CHERRY_PICK_HELP="$resolvemsg"
- export GIT_CHERRY_PICK_HELP
-
- test -n "$keep_empty" && keep_empty="--keep-empty"
- test -n "$rebase_merges" && rebase_merges="--rebase-merges"
- test -n "$rebase_cousins" && rebase_cousins="--rebase-cousins"
- test -n "$autosquash" && autosquash="--autosquash"
- test -n "$verbose" && verbose="--verbose"
- test -n "$force_rebase" && force_rebase="--no-ff"
- test -n "$restrict_revision" && \
- restrict_revision="--restrict-revision=^$restrict_revision"
- test -n "$upstream" && upstream="--upstream=$upstream"
- test -n "$onto" && onto="--onto=$onto"
- test -n "$squash_onto" && squash_onto="--squash-onto=$squash_onto"
- test -n "$onto_name" && onto_name="--onto-name=$onto_name"
- test -n "$head_name" && head_name="--head-name=$head_name"
- test -n "$strategy" && strategy="--strategy=$strategy"
- test -n "$strategy_opts" && strategy_opts="--strategy-opts=$strategy_opts"
- test -n "$switch_to" && switch_to="--switch-to=$switch_to"
- test -n "$cmd" && cmd="--cmd=$cmd"
- test -n "$action" && action="--$action"
-
- exec git rebase--interactive "$action" "$keep_empty" "$rebase_merges" "$rebase_cousins" \
- "$upstream" "$onto" "$squash_onto" "$restrict_revision" \
- "$allow_empty_message" "$autosquash" "$verbose" \
- "$force_rebase" "$onto_name" "$head_name" "$strategy" \
- "$strategy_opts" "$cmd" "$switch_to" \
- "$allow_rerere_autoupdate" "$gpg_sign_opt" "$signoff"
-}
-
-run_specific_rebase () {
- if [ "$interactive_rebase" = implied ]; then
- GIT_EDITOR=:
- export GIT_EDITOR
- autosquash=
- fi
-
- if test -n "$interactive_rebase" -a -z "$preserve_merges"
- then
- run_interactive
- else
- . git-rebase--$type
-
- if test -z "$preserve_merges"
- then
- git_rebase__$type
- else
- git_rebase__preserve_merges
- fi
- fi
-
- ret=$?
- if test $ret -eq 0
- then
- finish_rebase
- elif test $ret -eq 2 # special exit status for rebase -p
- then
- apply_autostash &&
- rm -rf "$state_dir" &&
- die "Nothing to do"
- fi
- exit $ret
-}
-
-run_pre_rebase_hook () {
- if test -z "$ok_to_skip_pre_rebase" &&
- test -x "$(git rev-parse --git-path hooks/pre-rebase)"
- then
- "$(git rev-parse --git-path hooks/pre-rebase)" ${1+"$@"} ||
- die "$(gettext "The pre-rebase hook refused to rebase.")"
- fi
-}
-
-test -f "$apply_dir"/applying &&
- die "$(gettext "It looks like 'git am' is in progress. Cannot rebase.")"
-
-if test -d "$apply_dir"
-then
- type=am
- state_dir="$apply_dir"
-elif test -d "$merge_dir"
-then
- if test -d "$merge_dir"/rewritten
- then
- type=preserve-merges
- interactive_rebase=explicit
- preserve_merges=t
- elif test -f "$merge_dir"/interactive
- then
- type=interactive
- interactive_rebase=explicit
- else
- type=merge
- fi
- state_dir="$merge_dir"
-fi
-test -n "$type" && in_progress=t
-
-total_argc=$#
-while test $# != 0
-do
- case "$1" in
- --no-verify)
- ok_to_skip_pre_rebase=yes
- ;;
- --verify)
- ok_to_skip_pre_rebase=
- ;;
- --continue|--skip|--abort|--quit|--edit-todo|--show-current-patch)
- test $total_argc -eq 2 || usage
- action=${1##--}
- ;;
- --onto=*)
- onto="${1#--onto=}"
- ;;
- --exec=*)
- cmd="${cmd}exec ${1#--exec=}${LF}"
- test -z "$interactive_rebase" && interactive_rebase=implied
- ;;
- --interactive)
- interactive_rebase=explicit
- ;;
- --keep-empty)
- keep_empty=yes
- ;;
- --allow-empty-message)
- allow_empty_message=--allow-empty-message
- ;;
- --no-keep-empty)
- keep_empty=
- ;;
- --rebase-merges)
- rebase_merges=t
- test -z "$interactive_rebase" && interactive_rebase=implied
- ;;
- --rebase-merges=*)
- rebase_merges=t
- case "${1#*=}" in
- rebase-cousins) rebase_cousins=t;;
- no-rebase-cousins) rebase_cousins=;;
- *) die "Unknown mode: $1";;
- esac
- test -z "$interactive_rebase" && interactive_rebase=implied
- ;;
- --preserve-merges)
- preserve_merges=t
- test -z "$interactive_rebase" && interactive_rebase=implied
- ;;
- --autosquash)
- autosquash=t
- ;;
- --no-autosquash)
- autosquash=
- ;;
- --fork-point)
- fork_point=t
- ;;
- --no-fork-point)
- fork_point=
- ;;
- --merge)
- do_merge=t
- ;;
- --strategy-option=*)
- strategy_opts="$strategy_opts $(git rev-parse --sq-quote "--${1#--strategy-option=}" | sed -e s/^.//)"
- do_merge=t
- test -z "$strategy" && strategy=recursive
- ;;
- --strategy=*)
- strategy="${1#--strategy=}"
- do_merge=t
- ;;
- --no-stat)
- diffstat=
- ;;
- --stat)
- diffstat=t
- ;;
- --autostash)
- autostash=true
- ;;
- --no-autostash)
- autostash=false
- ;;
- --verbose)
- verbose=t
- diffstat=t
- GIT_QUIET=
- ;;
- --quiet)
- GIT_QUIET=t
- git_am_opt="$git_am_opt -q"
- verbose=
- diffstat=
- ;;
- --whitespace=*)
- git_am_opt="$git_am_opt --whitespace=${1#--whitespace=}"
- case "${1#--whitespace=}" in
- fix|strip)
- force_rebase=t
- ;;
- esac
- ;;
- --ignore-whitespace)
- git_am_opt="$git_am_opt $1"
- ;;
- --signoff)
- signoff=--signoff
- ;;
- --no-signoff)
- signoff=
- ;;
- --committer-date-is-author-date|--ignore-date)
- git_am_opt="$git_am_opt $1"
- force_rebase=t
- ;;
- -C*)
- git_am_opt="$git_am_opt $1"
- ;;
- --root)
- rebase_root=t
- ;;
- --force-rebase|--no-ff)
- force_rebase=t
- ;;
- --rerere-autoupdate|--no-rerere-autoupdate)
- allow_rerere_autoupdate="$1"
- ;;
- --gpg-sign)
- gpg_sign_opt=-S
- ;;
- --gpg-sign=*)
- gpg_sign_opt="-S${1#--gpg-sign=}"
- ;;
- --)
- shift
- break
- ;;
- *)
- usage
- ;;
- esac
- shift
-done
-test $# -gt 2 && usage
-
-if test -n "$action"
-then
- test -z "$in_progress" && die "$(gettext "No rebase in progress?")"
- # Only interactive rebase uses detailed reflog messages
- if test -n "$interactive_rebase" && test "$GIT_REFLOG_ACTION" = rebase
- then
- GIT_REFLOG_ACTION="rebase -i ($action)"
- export GIT_REFLOG_ACTION
- fi
-fi
-
-if test "$action" = "edit-todo" && test -z "$interactive_rebase"
-then
- die "$(gettext "The --edit-todo action can only be used during interactive rebase.")"
-fi
-
-case "$action" in
-continue)
- # Sanity check
- git rev-parse --verify HEAD >/dev/null ||
- die "$(gettext "Cannot read HEAD")"
- git update-index --ignore-submodules --refresh &&
- git diff-files --quiet --ignore-submodules || {
- echo "$(gettext "You must edit all merge conflicts and then
-mark them as resolved using git add")"
- exit 1
- }
- read_basic_state
- run_specific_rebase
- ;;
-skip)
- output git reset --hard HEAD || exit $?
- read_basic_state
- run_specific_rebase
- ;;
-abort)
- git rerere clear
- read_basic_state
- case "$head_name" in
- refs/*)
- git symbolic-ref -m "rebase: aborting" HEAD $head_name ||
- die "$(eval_gettext "Could not move back to \$head_name")"
- ;;
- esac
- output git reset --hard $orig_head
- finish_rebase
- exit
- ;;
-quit)
- exec rm -rf "$state_dir"
- ;;
-edit-todo)
- run_specific_rebase
- ;;
-show-current-patch)
- run_specific_rebase
- die "BUG: run_specific_rebase is not supposed to return here"
- ;;
-esac
-
-# Make sure no rebase is in progress
-if test -n "$in_progress"
-then
- state_dir_base=${state_dir##*/}
- cmd_live_rebase="git rebase (--continue | --abort | --skip)"
- cmd_clear_stale_rebase="rm -fr \"$state_dir\""
- die "
-$(eval_gettext 'It seems that there is already a $state_dir_base directory, and
-I wonder if you are in the middle of another rebase. If that is the
-case, please try
- $cmd_live_rebase
-If that is not the case, please
- $cmd_clear_stale_rebase
-and run me again. I am stopping in case you still have something
-valuable there.')"
-fi
-
-if test -n "$rebase_root" && test -z "$onto"
-then
- test -z "$interactive_rebase" && interactive_rebase=implied
-fi
-
-if test -n "$keep_empty"
-then
- test -z "$interactive_rebase" && interactive_rebase=implied
-fi
-
-if test -n "$interactive_rebase"
-then
- if test -z "$preserve_merges"
- then
- type=interactive
- else
- type=preserve-merges
- fi
-
- state_dir="$merge_dir"
-elif test -n "$do_merge"
-then
- type=merge
- state_dir="$merge_dir"
-else
- type=am
- state_dir="$apply_dir"
-fi
-
-if test -t 2 && test -z "$GIT_QUIET"
-then
- git_format_patch_opt="$git_format_patch_opt --progress"
-fi
-
-if test -n "$signoff"
-then
- test -n "$preserve_merges" &&
- die "$(gettext "error: cannot combine '--signoff' with '--preserve-merges'")"
- git_am_opt="$git_am_opt $signoff"
- force_rebase=t
-fi
-
-if test -z "$rebase_root"
-then
- case "$#" in
- 0)
- if ! upstream_name=$(git rev-parse --symbolic-full-name \
- --verify -q @{upstream} 2>/dev/null)
- then
- . git-parse-remote
- error_on_missing_default_upstream "rebase" "rebase" \
- "against" "git rebase $(gettext '<branch>')"
- fi
-
- test "$fork_point" = auto && fork_point=t
- ;;
- *) upstream_name="$1"
- if test "$upstream_name" = "-"
- then
- upstream_name="@{-1}"
- fi
- shift
- ;;
- esac
- upstream=$(peel_committish "${upstream_name}") ||
- die "$(eval_gettext "invalid upstream '\$upstream_name'")"
- upstream_arg="$upstream_name"
-else
- if test -z "$onto"
- then
- empty_tree=$(git hash-object -t tree /dev/null)
- onto=$(git commit-tree $empty_tree </dev/null)
- squash_onto="$onto"
- fi
- unset upstream_name
- unset upstream
- test $# -gt 1 && usage
- upstream_arg=--root
-fi
-
-# Make sure the branch to rebase onto is valid.
-onto_name=${onto-"$upstream_name"}
-case "$onto_name" in
-*...*)
- if left=${onto_name%...*} right=${onto_name#*...} &&
- onto=$(git merge-base --all ${left:-HEAD} ${right:-HEAD})
- then
- case "$onto" in
- ?*"$LF"?*)
- die "$(eval_gettext "\$onto_name: there are more than one merge bases")"
- ;;
- '')
- die "$(eval_gettext "\$onto_name: there is no merge base")"
- ;;
- esac
- else
- die "$(eval_gettext "\$onto_name: there is no merge base")"
- fi
- ;;
-*)
- onto=$(peel_committish "$onto_name") ||
- die "$(eval_gettext "Does not point to a valid commit: \$onto_name")"
- ;;
-esac
-
-# If the branch to rebase is given, that is the branch we will rebase
-# $branch_name -- branch/commit being rebased, or HEAD (already detached)
-# $orig_head -- commit object name of tip of the branch before rebasing
-# $head_name -- refs/heads/<that-branch> or "detached HEAD"
-switch_to=
-case "$#" in
-1)
- # Is it "rebase other $branchname" or "rebase other $commit"?
- branch_name="$1"
- switch_to="$1"
-
- # Is it a local branch?
- if git show-ref --verify --quiet -- "refs/heads/$branch_name" &&
- orig_head=$(git rev-parse -q --verify "refs/heads/$branch_name")
- then
- head_name="refs/heads/$branch_name"
- # If not is it a valid ref (branch or commit)?
- elif orig_head=$(git rev-parse -q --verify "$branch_name")
- then
- head_name="detached HEAD"
-
- else
- die "$(eval_gettext "fatal: no such branch/commit '\$branch_name'")"
- fi
- ;;
-0)
- # Do not need to switch branches, we are already on it.
- if branch_name=$(git symbolic-ref -q HEAD)
- then
- head_name=$branch_name
- branch_name=$(expr "z$branch_name" : 'zrefs/heads/\(.*\)')
- else
- head_name="detached HEAD"
- branch_name=HEAD
- fi
- orig_head=$(git rev-parse --verify HEAD) || exit
- ;;
-*)
- die "BUG: unexpected number of arguments left to parse"
- ;;
-esac
-
-if test "$fork_point" = t
-then
- new_upstream=$(git merge-base --fork-point "$upstream_name" \
- "${switch_to:-HEAD}")
- if test -n "$new_upstream"
- then
- restrict_revision=$new_upstream
- fi
-fi
-
-if test "$autostash" = true && ! (require_clean_work_tree) 2>/dev/null
-then
- stash_sha1=$(git stash create "autostash") ||
- die "$(gettext 'Cannot autostash')"
-
- mkdir -p "$state_dir" &&
- echo $stash_sha1 >"$state_dir/autostash" &&
- stash_abbrev=$(git rev-parse --short $stash_sha1) &&
- echo "$(eval_gettext 'Created autostash: $stash_abbrev')" &&
- git reset --hard
-fi
-
-require_clean_work_tree "rebase" "$(gettext "Please commit or stash them.")"
-
-# Now we are rebasing commits $upstream..$orig_head (or with --root,
-# everything leading up to $orig_head) on top of $onto
-
-# Check if we are already based on $onto with linear history,
-# but this should be done only when upstream and onto are the same
-# and if this is not an interactive rebase.
-mb=$(git merge-base "$onto" "$orig_head")
-if test -z "$interactive_rebase" && test "$upstream" = "$onto" &&
- test "$mb" = "$onto" && test -z "$restrict_revision" &&
- # linear history?
- ! (git rev-list --parents "$onto".."$orig_head" | sane_grep " .* ") > /dev/null
-then
- if test -z "$force_rebase"
- then
- # Lazily switch to the target branch if needed...
- test -z "$switch_to" ||
- GIT_REFLOG_ACTION="$GIT_REFLOG_ACTION: checkout $switch_to" \
- git checkout -q "$switch_to" --
- if test "$branch_name" = "HEAD" &&
- ! git symbolic-ref -q HEAD
- then
- say "$(eval_gettext "HEAD is up to date.")"
- else
- say "$(eval_gettext "Current branch \$branch_name is up to date.")"
- fi
- finish_rebase
- exit 0
- else
- if test "$branch_name" = "HEAD" &&
- ! git symbolic-ref -q HEAD
- then
- say "$(eval_gettext "HEAD is up to date, rebase forced.")"
- else
- say "$(eval_gettext "Current branch \$branch_name is up to date, rebase forced.")"
- fi
- fi
-fi
-
-# If a hook exists, give it a chance to interrupt
-run_pre_rebase_hook "$upstream_arg" "$@"
-
-if test -n "$diffstat"
-then
- if test -n "$verbose"
- then
- echo "$(eval_gettext "Changes from \$mb to \$onto:")"
- fi
- # We want color (if set), but no pager
- GIT_PAGER='' git diff --stat --summary "$mb" "$onto"
-fi
-
-test -n "$interactive_rebase" && run_specific_rebase
-
-# Detach HEAD and reset the tree
-say "$(gettext "First, rewinding head to replay your work on top of it...")"
-
-GIT_REFLOG_ACTION="$GIT_REFLOG_ACTION: checkout $onto_name" \
- git checkout -q "$onto^0" || die "could not detach HEAD"
-git update-ref ORIG_HEAD $orig_head
-
-# If the $onto is a proper descendant of the tip of the branch, then
-# we just fast-forwarded.
-if test "$mb" = "$orig_head"
-then
- say "$(eval_gettext "Fast-forwarded \$branch_name to \$onto_name.")"
- move_to_original_branch
- finish_rebase
- exit 0
-fi
-
-if test -n "$rebase_root"
-then
- revisions="$onto..$orig_head"
-else
- revisions="${restrict_revision-$upstream}..$orig_head"
-fi
-
-run_specific_rebase
my (@suppress_cc);
my ($auto_8bit_encoding);
my ($compose_encoding);
-my ($target_xfer_encoding);
+my $target_xfer_encoding = 'auto';
my ($debug_net_smtp) = 0; # Net::SMTP, see send_message()
if ($validate) {
foreach my $f (@files) {
unless (-p $f) {
- my $error = validate_patch($f);
+ my $error = validate_patch($f, $target_xfer_encoding);
$error and die sprintf(__("fatal: %s: %s\nwarning: no patches were sent\n"),
$f, $error);
}
SSL => 1);
}
}
- else {
+ elsif (!$smtp) {
$smtp_server_port ||= 25;
$smtp ||= Net::SMTP->new($smtp_server,
Hello => $smtp_domain,
$smtp->starttls(ssl_verify_params())
or die sprintf(__("STARTTLS failed! %s"), IO::Socket::SSL::errstr());
}
- $smtp_encryption = '';
# Send EHLO again to receive fresh
# supported commands
$smtp->hello($smtp_domain);
}
}
}
- if (defined $target_xfer_encoding) {
- $xfer_encoding = '8bit' if not defined $xfer_encoding;
- $message = apply_transfer_encoding(
- $message, $xfer_encoding, $target_xfer_encoding);
- $xfer_encoding = $target_xfer_encoding;
- }
- if (defined $xfer_encoding) {
- push @xh, "Content-Transfer-Encoding: $xfer_encoding";
- }
- if (defined $xfer_encoding or $has_content_type) {
- unshift @xh, 'MIME-Version: 1.0' unless $has_mime_version;
- }
+ $xfer_encoding = '8bit' if not defined $xfer_encoding;
+ ($message, $xfer_encoding) = apply_transfer_encoding(
+ $message, $xfer_encoding, $target_xfer_encoding);
+ push @xh, "Content-Transfer-Encoding: $xfer_encoding";
+ unshift @xh, 'MIME-Version: 1.0' unless $has_mime_version;
$needs_confirm = (
$confirm eq "always" or
$message = MIME::Base64::decode($message)
if ($from eq 'base64');
+ $to = ($message =~ /.{999,}/) ? 'quoted-printable' : '8bit'
+ if $to eq 'auto';
+
die __("cannot send message as 7bit")
if ($to eq '7bit' and $message =~ /[^[:ascii:]]/);
- return $message
+ return ($message, $to)
if ($to eq '7bit' or $to eq '8bit');
- return MIME::QuotedPrint::encode($message, "\n", 0)
+ return (MIME::QuotedPrint::encode($message, "\n", 0), $to)
if ($to eq 'quoted-printable');
- return MIME::Base64::encode($message, "\n")
+ return (MIME::Base64::encode($message, "\n"), $to)
if ($to eq 'base64');
die __("invalid transfer encoding");
}
}
sub validate_patch {
- my $fn = shift;
+ my ($fn, $xfer_encoding) = @_;
if ($repo) {
my $validate_hook = catfile(catdir($repo->repo_path(), 'hooks'),
return $hook_error if $hook_error;
}
- open(my $fh, '<', $fn)
- or die sprintf(__("unable to open %s: %s\n"), $fn, $!);
- while (my $line = <$fh>) {
- if (length($line) > 998) {
- return sprintf(__("%s: patch contains a line longer than 998 characters"), $.);
+ # Any long lines will be automatically fixed if we use a suitable transfer
+ # encoding.
+ unless ($xfer_encoding =~ /^(?:auto|quoted-printable|base64)$/) {
+ open(my $fh, '<', $fn)
+ or die sprintf(__("unable to open %s: %s\n"), $fn, $!);
+ while (my $line = <$fh>) {
+ if (length($line) > 998) {
+ return sprintf(__("%s: patch contains a line longer than 998 characters"), $.);
+ }
}
}
return;
trace_argv_printf(argv, "trace: built-in: git");
+ validate_cache_entries(&the_index);
status = p->fn(argc, argv, prefix);
+ validate_cache_entries(&the_index);
+
if (status)
return status;
{ "pull", cmd_pull, RUN_SETUP | NEED_WORK_TREE },
{ "push", cmd_push, RUN_SETUP },
{ "read-tree", cmd_read_tree, RUN_SETUP | SUPPORT_SUPER_PREFIX},
+ /*
+ * NEEDSWORK: Until the rebase is independent and needs no redirection
+ * to rebase shell script this is kept as is, then should be changed to
+ * RUN_SETUP | NEED_WORK_TREE
+ */
+ { "rebase", cmd_rebase },
{ "rebase--interactive", cmd_rebase__interactive, RUN_SETUP | NEED_WORK_TREE },
{ "receive-pack", cmd_receive_pack },
{ "reflog", cmd_reflog, RUN_SETUP },
{ 'R', "\n[GNUPG:] REVKEYSIG "},
};
-void parse_gpg_output(struct signature_check *sigc)
+static void parse_gpg_output(struct signature_check *sigc)
{
const char *buf = sigc->gpg_status;
int i;
*/
size_t parse_signature(const char *buf, size_t size);
-void parse_gpg_output(struct signature_check *);
-
/*
* Create a detached signature for the contents of "buffer" and append
* it after "signature"; "buffer" and "signature" can be the same
color_set(opt->colors[GREP_COLOR_MATCH_SELECTED], GIT_COLOR_BOLD_RED);
color_set(opt->colors[GREP_COLOR_SELECTED], "");
color_set(opt->colors[GREP_COLOR_SEP], GIT_COLOR_CYAN);
+ opt->only_matching = 0;
opt->color = -1;
opt->output = std_output;
}
opt->pattern_tail = &opt->pattern_list;
opt->header_tail = &opt->header_list;
+ opt->only_matching = def->only_matching;
opt->color = def->color;
opt->extended_regexp_option = def->extended_regexp_option;
opt->pattern_type_option = def->pattern_type_option;
return hit;
}
-static void show_line(struct grep_opt *opt, char *bol, char *eol,
- const char *name, unsigned lno, ssize_t cno, char sign)
+static void show_line_header(struct grep_opt *opt, const char *name,
+ unsigned lno, ssize_t cno, char sign)
{
- int rest = eol - bol;
- const char *match_color, *line_color = NULL;
-
- if (opt->file_break && opt->last_shown == 0) {
- if (opt->show_hunk_mark)
- opt->output(opt, "\n", 1);
- } else if (opt->pre_context || opt->post_context || opt->funcbody) {
- if (opt->last_shown == 0) {
- if (opt->show_hunk_mark) {
- output_color(opt, "--", 2, opt->colors[GREP_COLOR_SEP]);
- opt->output(opt, "\n", 1);
- }
- } else if (lno > opt->last_shown + 1) {
- output_color(opt, "--", 2, opt->colors[GREP_COLOR_SEP]);
- opt->output(opt, "\n", 1);
- }
- }
if (opt->heading && opt->last_shown == 0) {
output_color(opt, name, strlen(name), opt->colors[GREP_COLOR_FILENAME]);
opt->output(opt, "\n", 1);
output_color(opt, buf, strlen(buf), opt->colors[GREP_COLOR_COLUMNNO]);
output_sep(opt, sign);
}
- if (opt->color) {
+}
+
+static void show_line(struct grep_opt *opt, char *bol, char *eol,
+ const char *name, unsigned lno, ssize_t cno, char sign)
+{
+ int rest = eol - bol;
+ const char *match_color = NULL;
+ const char *line_color = NULL;
+
+ if (opt->file_break && opt->last_shown == 0) {
+ if (opt->show_hunk_mark)
+ opt->output(opt, "\n", 1);
+ } else if (opt->pre_context || opt->post_context || opt->funcbody) {
+ if (opt->last_shown == 0) {
+ if (opt->show_hunk_mark) {
+ output_color(opt, "--", 2, opt->colors[GREP_COLOR_SEP]);
+ opt->output(opt, "\n", 1);
+ }
+ } else if (lno > opt->last_shown + 1) {
+ output_color(opt, "--", 2, opt->colors[GREP_COLOR_SEP]);
+ opt->output(opt, "\n", 1);
+ }
+ }
+ if (!opt->only_matching) {
+ /*
+ * In case the line we're being called with contains more than
+ * one match, leave printing each header to the loop below.
+ */
+ show_line_header(opt, name, lno, cno, sign);
+ }
+ if (opt->color || opt->only_matching) {
regmatch_t match;
enum grep_context ctx = GREP_CONTEXT_BODY;
int ch = *eol;
int eflags = 0;
- if (sign == ':')
- match_color = opt->colors[GREP_COLOR_MATCH_SELECTED];
- else
- match_color = opt->colors[GREP_COLOR_MATCH_CONTEXT];
- if (sign == ':')
- line_color = opt->colors[GREP_COLOR_SELECTED];
- else if (sign == '-')
- line_color = opt->colors[GREP_COLOR_CONTEXT];
- else if (sign == '=')
- line_color = opt->colors[GREP_COLOR_FUNCTION];
+ if (opt->color) {
+ if (sign == ':')
+ match_color = opt->colors[GREP_COLOR_MATCH_SELECTED];
+ else
+ match_color = opt->colors[GREP_COLOR_MATCH_CONTEXT];
+ if (sign == ':')
+ line_color = opt->colors[GREP_COLOR_SELECTED];
+ else if (sign == '-')
+ line_color = opt->colors[GREP_COLOR_CONTEXT];
+ else if (sign == '=')
+ line_color = opt->colors[GREP_COLOR_FUNCTION];
+ }
*eol = '\0';
while (next_match(opt, bol, eol, ctx, &match, eflags)) {
if (match.rm_so == match.rm_eo)
break;
- output_color(opt, bol, match.rm_so, line_color);
+ if (opt->only_matching)
+ show_line_header(opt, name, lno, cno, sign);
+ else
+ output_color(opt, bol, match.rm_so, line_color);
output_color(opt, bol + match.rm_so,
match.rm_eo - match.rm_so, match_color);
+ if (opt->only_matching)
+ opt->output(opt, "\n", 1);
bol += match.rm_eo;
+ cno += match.rm_eo;
rest -= match.rm_eo;
eflags = REG_NOTBOL;
}
*eol = ch;
}
- output_color(opt, bol, rest, line_color);
- opt->output(opt, "\n", 1);
+ if (!opt->only_matching) {
+ output_color(opt, bol, rest, line_color);
+ opt->output(opt, "\n", 1);
+ }
}
#ifndef NO_PTHREADS
int relative;
int pathname;
int null_following_name;
+ int only_matching;
int color;
int max_depth;
int funcname;
int get_sha1_hex(const char *hex, unsigned char *sha1)
{
int i;
- for (i = 0; i < GIT_SHA1_RAWSZ; i++) {
+ for (i = 0; i < the_hash_algo->rawsz; i++) {
int val = hex2chr(hex);
if (val < 0)
return -1;
{
int ret = get_oid_hex(hex, oid);
if (!ret)
- *end = hex + GIT_SHA1_HEXSZ;
+ *end = hex + the_hash_algo->hexsz;
return ret;
}
char *buf = buffer;
int i;
- for (i = 0; i < GIT_SHA1_RAWSZ; i++) {
+ for (i = 0; i < the_hash_algo->rawsz; i++) {
unsigned int val = *sha1++;
*buf++ = hex[val >> 4];
*buf++ = hex[val & 0xf];
{
const char *name_nons = strip_namespace(name);
struct strbuf *buf = cb_data;
- struct object *o = parse_object(oid);
+ struct object *o = parse_object(the_repository, oid);
if (!o)
return 0;
strbuf_addf(buf, "%s\t%s\n", oid_to_hex(oid), name_nons);
if (o->type == OBJ_TAG) {
- o = deref_tag(o, name, 0);
+ o = deref_tag(the_repository, o, name, 0);
if (!o)
return 0;
strbuf_addf(buf, "%s\t%s^{}\n", oid_to_hex(&o->oid),
#include "cache.h"
+#include "repository.h"
#include "commit.h"
#include "tag.h"
#include "blob.h"
#include "packfile.h"
#include "object-store.h"
+
#ifdef EXPAT_NEEDS_XMLPARSE_H
#include <xmlparse.h>
#else
{
struct object *obj;
- obj = lookup_object(oid->hash);
+ obj = lookup_object(the_repository, oid->hash);
if (!obj)
- obj = parse_object(oid);
+ obj = parse_object(the_repository, oid);
/* Ignore remote objects that don't exist locally */
if (!obj)
while (tree_entry(&desc, &entry))
switch (object_type(entry.mode)) {
case OBJ_TREE:
- p = process_tree(lookup_tree(entry.oid), p);
+ p = process_tree(lookup_tree(the_repository, entry.oid),
+ p);
break;
case OBJ_BLOB:
- p = process_blob(lookup_blob(entry.oid), p);
+ p = process_blob(lookup_blob(the_repository, entry.oid),
+ p);
break;
default:
/* Subproject commit - not in this repository */
return;
}
- o = parse_object(&ref->old_oid);
+ o = parse_object(the_repository, &ref->old_oid);
if (!o) {
fprintf(stderr,
"Unable to parse object %s for remote ref %s\n",
oid_to_hex(&ref->old_oid), ls->dentry_name);
if (o->type == OBJ_TAG) {
- o = deref_tag(o, ls->dentry_name, 0);
+ o = deref_tag(the_repository, o, ls->dentry_name, 0);
if (o)
strbuf_addf(buf, "%s\t%s^{}\n",
oid_to_hex(&o->oid), ls->dentry_name);
struct object *obj = revs->pending.objects[i].item;
if (obj->flags & UNINTERESTING)
continue;
- obj = deref_tag(obj, NULL, 0);
+ obj = deref_tag(the_repository, obj, NULL, 0);
if (obj->type != OBJ_COMMIT)
die("Non commit %s?", revs->pending.objects[i].name);
if (commit)
lines, anchor, &begin, &end,
full_name))
die("malformed -L argument '%s'", range_part);
- if (lines < end || ((lines || begin) && lines < begin))
+ if ((!lines && (begin || end)) || lines < begin)
die("file %s has only %lu lines", name_part, lines);
if (begin < 1)
begin = 1;
- if (end < 1)
+ if (end < 1 || lines < end)
end = lines;
begin--;
line_log_data_insert(&ranges, full_name, begin, end);
else if (!num)
*ret = begin;
else
- *ret = begin + num;
+ *ret = begin + num > 0 ? begin + num : 1;
return term;
}
return spec;
pathlen = path->len;
strbuf_addstr(path, name);
- if (filter_fn)
+ if (!(obj->flags & USER_GIVEN) && filter_fn)
r = filter_fn(LOFS_BLOB, obj,
path->buf, &path->buf[pathlen],
filter_data);
}
strbuf_addstr(base, name);
- if (filter_fn)
+ if (!(obj->flags & USER_GIVEN) && filter_fn)
r = filter_fn(LOFS_BEGIN_TREE, obj,
base->buf, &base->buf[baselen],
filter_data);
if (S_ISDIR(entry.mode))
process_tree(revs,
- lookup_tree(entry.oid),
+ lookup_tree(the_repository, entry.oid),
show, base, entry.path,
cb_data, filter_fn, filter_data);
else if (S_ISGITLINK(entry.mode))
cb_data);
else
process_blob(revs,
- lookup_blob(entry.oid),
+ lookup_blob(the_repository, entry.oid),
show, base, entry.path,
cb_data, filter_fn, filter_data);
}
- if (filter_fn) {
+ if (!(obj->flags & USER_GIVEN) && filter_fn) {
r = filter_fn(LOFS_END_TREE, obj,
base->buf, &base->buf[baselen],
filter_data);
#include "config.h"
#include "diff.h"
#include "object-store.h"
+#include "repository.h"
#include "commit.h"
#include "tag.h"
#include "graph.h"
warning("invalid replace ref %s", refname);
return 0;
}
- obj = parse_object(&original_oid);
+ obj = parse_object(the_repository, &original_oid);
if (obj)
add_name_decoration(DECORATION_GRAFTED, "replaced", obj);
return 0;
}
- obj = parse_object(oid);
+ obj = parse_object(the_repository, oid);
if (!obj)
return 0;
if (!obj)
break;
if (!obj->parsed)
- parse_object(&obj->oid);
+ parse_object(the_repository, &obj->oid);
add_name_decoration(DECORATION_REF_TAG, refname, obj);
}
return 0;
static int add_graft_decoration(const struct commit_graft *graft, void *cb_data)
{
- struct commit *commit = lookup_commit(&graft->oid);
+ struct commit *commit = lookup_commit(the_repository, &graft->oid);
if (!commit)
return 0;
add_name_decoration(DECORATION_GRAFTED, "grafted", &commit->object);
size_t payload_size, gpg_message_offset;
hash_object_file(extra->value, extra->len, type_name(OBJ_TAG), &oid);
- tag = lookup_tag(&oid);
+ tag = lookup_tag(the_repository, &oid);
if (!tag)
return -1; /* error message already given */
strbuf_init(&verify_message, 256);
- if (parse_tag_buffer(tag, extra->value, extra->len))
+ if (parse_tag_buffer(the_repository, tag, extra->value, extra->len))
strbuf_addstr(&verify_message, "malformed mergetag\n");
else if (is_common_merge(commit) &&
!oidcmp(&tag->tagged->oid,
struct strbuf msgbuf = STRBUF_INIT;
struct log_info *log = opt->loginfo;
struct commit *commit = log->commit, *parent = log->parent;
- int abbrev_commit = opt->abbrev_commit ? opt->abbrev : GIT_SHA1_HEXSZ;
+ int abbrev_commit = opt->abbrev_commit ? opt->abbrev : the_hash_algo->hexsz;
const char *extra_headers = opt->extra_headers;
struct pretty_print_context ctx = {0};
#include "cache.h"
#include "mem-pool.h"
-static struct mp_block *mem_pool_alloc_block(struct mem_pool *mem_pool, size_t block_alloc)
+#define BLOCK_GROWTH_SIZE 1024*1024 - sizeof(struct mp_block);
+
+/*
+ * Allocate a new mp_block and insert it after the block specified in
+ * `insert_after`. If `insert_after` is NULL, then insert block at the
+ * head of the linked list.
+ */
+static struct mp_block *mem_pool_alloc_block(struct mem_pool *mem_pool, size_t block_alloc, struct mp_block *insert_after)
{
struct mp_block *p;
mem_pool->pool_alloc += sizeof(struct mp_block) + block_alloc;
p = xmalloc(st_add(sizeof(struct mp_block), block_alloc));
- p->next_block = mem_pool->mp_block;
+
p->next_free = (char *)p->space;
p->end = p->next_free + block_alloc;
- mem_pool->mp_block = p;
+
+ if (insert_after) {
+ p->next_block = insert_after->next_block;
+ insert_after->next_block = p;
+ } else {
+ p->next_block = mem_pool->mp_block;
+ mem_pool->mp_block = p;
+ }
return p;
}
+void mem_pool_init(struct mem_pool **mem_pool, size_t initial_size)
+{
+ struct mem_pool *pool;
+
+ if (*mem_pool)
+ return;
+
+ pool = xcalloc(1, sizeof(*pool));
+
+ pool->block_alloc = BLOCK_GROWTH_SIZE;
+
+ if (initial_size > 0)
+ mem_pool_alloc_block(pool, initial_size, NULL);
+
+ *mem_pool = pool;
+}
+
+void mem_pool_discard(struct mem_pool *mem_pool, int invalidate_memory)
+{
+ struct mp_block *block, *block_to_free;
+
+ block = mem_pool->mp_block;
+ while (block)
+ {
+ block_to_free = block;
+ block = block->next_block;
+
+ if (invalidate_memory)
+ memset(block_to_free->space, 0xDD, ((char *)block_to_free->end) - ((char *)block_to_free->space));
+
+ free(block_to_free);
+ }
+
+ free(mem_pool);
+}
+
void *mem_pool_alloc(struct mem_pool *mem_pool, size_t len)
{
- struct mp_block *p;
+ struct mp_block *p = NULL;
void *r;
/* round up to a 'uintmax_t' alignment */
if (len & (sizeof(uintmax_t) - 1))
len += sizeof(uintmax_t) - (len & (sizeof(uintmax_t) - 1));
- for (p = mem_pool->mp_block; p; p = p->next_block)
- if (p->end - p->next_free >= len)
- break;
+ if (mem_pool->mp_block &&
+ mem_pool->mp_block->end - mem_pool->mp_block->next_free >= len)
+ p = mem_pool->mp_block;
if (!p) {
- if (len >= (mem_pool->block_alloc / 2)) {
- mem_pool->pool_alloc += len;
- return xmalloc(len);
- }
+ if (len >= (mem_pool->block_alloc / 2))
+ return mem_pool_alloc_block(mem_pool, len, mem_pool->mp_block);
- p = mem_pool_alloc_block(mem_pool, mem_pool->block_alloc);
+ p = mem_pool_alloc_block(mem_pool, mem_pool->block_alloc, NULL);
}
r = p->next_free;
memset(r, 0, len);
return r;
}
+
+int mem_pool_contains(struct mem_pool *mem_pool, void *mem)
+{
+ struct mp_block *p;
+
+ /* Check if memory is allocated in a block */
+ for (p = mem_pool->mp_block; p; p = p->next_block)
+ if ((mem >= ((void *)p->space)) &&
+ (mem < ((void *)p->end)))
+ return 1;
+
+ return 0;
+}
+
+void mem_pool_combine(struct mem_pool *dst, struct mem_pool *src)
+{
+ struct mp_block *p;
+
+ /* Append the blocks from src to dst */
+ if (dst->mp_block && src->mp_block) {
+ /*
+ * src and dst have blocks, append
+ * blocks from src to dst.
+ */
+ p = dst->mp_block;
+ while (p->next_block)
+ p = p->next_block;
+
+ p->next_block = src->mp_block;
+ } else if (src->mp_block) {
+ /*
+ * src has blocks, dst is empty.
+ */
+ dst->mp_block = src->mp_block;
+ } else {
+ /* src is empty, nothing to do. */
+ }
+
+ dst->pool_alloc += src->pool_alloc;
+ src->pool_alloc = 0;
+ src->mp_block = NULL;
+}
size_t pool_alloc;
};
+/*
+ * Initialize mem_pool with specified initial size.
+ */
+void mem_pool_init(struct mem_pool **mem_pool, size_t initial_size);
+
+/*
+ * Discard a memory pool and free all the memory it is responsible for.
+ */
+void mem_pool_discard(struct mem_pool *mem_pool, int invalidate_memory);
+
/*
* Alloc memory from the mem_pool.
*/
*/
void *mem_pool_calloc(struct mem_pool *pool, size_t count, size_t size);
+/*
+ * Move the memory associated with the 'src' pool to the 'dst' pool. The 'src'
+ * pool will be empty and not contain any memory. It still needs to be free'd
+ * with a call to `mem_pool_discard`.
+ */
+void mem_pool_combine(struct mem_pool *dst, struct mem_pool *src);
+
+/*
+ * Check if a memory pointed at by 'mem' is part of the range of
+ * memory managed by the specified mem_pool.
+ */
+int mem_pool_contains(struct mem_pool *mem_pool, void *mem);
+
#endif
#include "lockfile.h"
#include "cache-tree.h"
#include "object-store.h"
+#include "repository.h"
#include "commit.h"
#include "blob.h"
#include "builtin.h"
}
if (!oidcmp(&two->object.oid, &shifted))
return two;
- return lookup_tree(&shifted);
+ return lookup_tree(the_repository, &shifted);
}
static struct commit *make_virtual_commit(struct tree *tree, const char *comment)
struct cache_entry *ce;
int ret;
- ce = make_cache_entry(mode, oid ? oid->hash : null_sha1, path, stage, 0);
+ ce = make_cache_entry(&the_index, mode, oid ? oid : &null_oid, path, stage, 0);
if (!ce)
return err(o, _("add_cacheinfo failed for path '%s'; merge aborting."), path);
if (refresh) {
struct cache_entry *nce;
- nce = refresh_cache_entry(ce, CE_MATCH_REFRESH | CE_MATCH_IGNORE_MISSING);
+ nce = refresh_cache_entry(&the_index, ce, CE_MATCH_REFRESH | CE_MATCH_IGNORE_MISSING);
if (!nce)
return err(o, _("add_cacheinfo failed to refresh for path '%s'; merge aborting."), path);
if (nce != ce)
return NULL;
}
- result = lookup_tree(&active_cache_tree->oid);
+ result = lookup_tree(the_repository, &active_cache_tree->oid);
return result;
}
return 0;
}
- if (!(commit_base = lookup_commit_reference(base)) ||
- !(commit_a = lookup_commit_reference(a)) ||
- !(commit_b = lookup_commit_reference(b))) {
+ if (!(commit_base = lookup_commit_reference(the_repository, base)) ||
+ !(commit_a = lookup_commit_reference(the_repository, a)) ||
+ !(commit_b = lookup_commit_reference(the_repository, b))) {
output(o, 1, _("Failed to merge submodule %s (commits not present)"), path);
return 0;
}
struct tree **result)
{
int code, clean;
+ struct strbuf sb = STRBUF_INIT;
+
+ if (!o->call_depth && index_has_changes(&the_index, head, &sb)) {
+ err(o, _("Your local changes to the following files would be overwritten by merge:\n %s"),
+ sb.buf);
+ return -1;
+ }
if (o->subtree_shift) {
merge = shift_tree_object(head, merge, o->subtree_shift);
}
if (oid_eq(&common->object.oid, &merge->object.oid)) {
- struct strbuf sb = STRBUF_INIT;
-
- if (!o->call_depth && index_has_changes(&sb)) {
- err(o, _("Dirty index: cannot merge (dirty: %s)"),
- sb.buf);
- return 0;
- }
output(o, 0, _("Already up to date!"));
*result = head;
return 1;
/* if there is no common ancestor, use an empty tree */
struct tree *tree;
- tree = lookup_tree(the_hash_algo->empty_tree);
+ tree = lookup_tree(the_repository, the_repository->hash_algo->empty_tree);
merged_common_ancestors = make_virtual_commit(tree, "ancestor");
}
{
struct object *object;
- object = deref_tag(parse_object(oid), name, strlen(name));
+ object = deref_tag(the_repository, parse_object(the_repository, oid),
+ name,
+ strlen(name));
if (!object)
return NULL;
if (object->type == OBJ_TREE)
return oid_to_hex(commit ? &commit->object.oid : the_hash_algo->empty_tree);
}
-int index_has_changes(struct strbuf *sb)
-{
- struct object_id head;
- int i;
-
- if (!get_oid_tree("HEAD", &head)) {
- struct diff_options opt;
-
- diff_setup(&opt);
- opt.flags.exit_with_status = 1;
- if (!sb)
- opt.flags.quick = 1;
- do_diff_cache(&head, &opt);
- diffcore_std(&opt);
- for (i = 0; sb && i < diff_queued_diff.nr; i++) {
- if (i)
- strbuf_addch(sb, ' ');
- strbuf_addstr(sb, diff_queued_diff.queue[i]->two->path);
- }
- diff_flush(&opt);
- return opt.flags.has_changes != 0;
- } else {
- for (i = 0; sb && i < active_nr; i++) {
- if (i)
- strbuf_addch(sb, ' ');
- strbuf_addstr(sb, active_cache[i]->name);
- }
- return !!active_nr;
- }
-}
-
int try_merge_command(const char *strategy, size_t xopts_nr,
const char **xopts, struct commit_list *common,
const char *head_arg, struct commit_list *remotes)
--- /dev/null
+#include "cache.h"
+#include "default.h"
+#include "../commit.h"
+#include "../fetch-negotiator.h"
+#include "../prio-queue.h"
+#include "../refs.h"
+#include "../tag.h"
+
+/* Remember to update object flag allocation in object.h */
+#define COMMON (1U << 2)
+#define COMMON_REF (1U << 3)
+#define SEEN (1U << 4)
+#define POPPED (1U << 5)
+
+static int marked;
+
+struct negotiation_state {
+ struct prio_queue rev_list;
+ int non_common_revs;
+};
+
+static void rev_list_push(struct negotiation_state *ns,
+ struct commit *commit, int mark)
+{
+ if (!(commit->object.flags & mark)) {
+ commit->object.flags |= mark;
+
+ if (parse_commit(commit))
+ return;
+
+ prio_queue_put(&ns->rev_list, commit);
+
+ if (!(commit->object.flags & COMMON))
+ ns->non_common_revs++;
+ }
+}
+
+static int clear_marks(const char *refname, const struct object_id *oid,
+ int flag, void *cb_data)
+{
+ struct object *o = deref_tag(the_repository, parse_object(the_repository, oid), refname, 0);
+
+ if (o && o->type == OBJ_COMMIT)
+ clear_commit_marks((struct commit *)o,
+ COMMON | COMMON_REF | SEEN | POPPED);
+ return 0;
+}
+
+/*
+ * This function marks a rev and its ancestors as common.
+ * In some cases, it is desirable to mark only the ancestors (for example
+ * when only the server does not yet know that they are common).
+ */
+static void mark_common(struct negotiation_state *ns, struct commit *commit,
+ int ancestors_only, int dont_parse)
+{
+ if (commit != NULL && !(commit->object.flags & COMMON)) {
+ struct object *o = (struct object *)commit;
+
+ if (!ancestors_only)
+ o->flags |= COMMON;
+
+ if (!(o->flags & SEEN))
+ rev_list_push(ns, commit, SEEN);
+ else {
+ struct commit_list *parents;
+
+ if (!ancestors_only && !(o->flags & POPPED))
+ ns->non_common_revs--;
+ if (!o->parsed && !dont_parse)
+ if (parse_commit(commit))
+ return;
+
+ for (parents = commit->parents;
+ parents;
+ parents = parents->next)
+ mark_common(ns, parents->item, 0,
+ dont_parse);
+ }
+ }
+}
+
+/*
+ * Get the next rev to send, ignoring the common.
+ */
+static const struct object_id *get_rev(struct negotiation_state *ns)
+{
+ struct commit *commit = NULL;
+
+ while (commit == NULL) {
+ unsigned int mark;
+ struct commit_list *parents;
+
+ if (ns->rev_list.nr == 0 || ns->non_common_revs == 0)
+ return NULL;
+
+ commit = prio_queue_get(&ns->rev_list);
+ parse_commit(commit);
+ parents = commit->parents;
+
+ commit->object.flags |= POPPED;
+ if (!(commit->object.flags & COMMON))
+ ns->non_common_revs--;
+
+ if (commit->object.flags & COMMON) {
+ /* do not send "have", and ignore ancestors */
+ commit = NULL;
+ mark = COMMON | SEEN;
+ } else if (commit->object.flags & COMMON_REF)
+ /* send "have", and ignore ancestors */
+ mark = COMMON | SEEN;
+ else
+ /* send "have", also for its ancestors */
+ mark = SEEN;
+
+ while (parents) {
+ if (!(parents->item->object.flags & SEEN))
+ rev_list_push(ns, parents->item, mark);
+ if (mark & COMMON)
+ mark_common(ns, parents->item, 1, 0);
+ parents = parents->next;
+ }
+ }
+
+ return &commit->object.oid;
+}
+
+static void known_common(struct fetch_negotiator *n, struct commit *c)
+{
+ if (!(c->object.flags & SEEN)) {
+ rev_list_push(n->data, c, COMMON_REF | SEEN);
+ mark_common(n->data, c, 1, 1);
+ }
+}
+
+static void add_tip(struct fetch_negotiator *n, struct commit *c)
+{
+ n->known_common = NULL;
+ rev_list_push(n->data, c, SEEN);
+}
+
+static const struct object_id *next(struct fetch_negotiator *n)
+{
+ n->known_common = NULL;
+ n->add_tip = NULL;
+ return get_rev(n->data);
+}
+
+static int ack(struct fetch_negotiator *n, struct commit *c)
+{
+ int known_to_be_common = !!(c->object.flags & COMMON);
+ mark_common(n->data, c, 0, 1);
+ return known_to_be_common;
+}
+
+static void release(struct fetch_negotiator *n)
+{
+ clear_prio_queue(&((struct negotiation_state *)n->data)->rev_list);
+ FREE_AND_NULL(n->data);
+}
+
+void default_negotiator_init(struct fetch_negotiator *negotiator)
+{
+ struct negotiation_state *ns;
+ negotiator->known_common = known_common;
+ negotiator->add_tip = add_tip;
+ negotiator->next = next;
+ negotiator->ack = ack;
+ negotiator->release = release;
+ negotiator->data = ns = xcalloc(1, sizeof(*ns));
+ ns->rev_list.compare = compare_commits_by_commit_date;
+
+ if (marked)
+ for_each_ref(clear_marks, NULL);
+ marked = 1;
+}
--- /dev/null
+#ifndef NEGOTIATOR_DEFAULT_H
+#define NEGOTIATOR_DEFAULT_H
+
+struct fetch_negotiator;
+
+void default_negotiator_init(struct fetch_negotiator *negotiator);
+
+#endif
--- /dev/null
+#include "cache.h"
+#include "skipping.h"
+#include "../commit.h"
+#include "../fetch-negotiator.h"
+#include "../prio-queue.h"
+#include "../refs.h"
+#include "../tag.h"
+
+/* Remember to update object flag allocation in object.h */
+/*
+ * Both us and the server know that both parties have this object.
+ */
+#define COMMON (1U << 2)
+/*
+ * The server has told us that it has this object. We still need to tell the
+ * server that we have this object (or one of its descendants), but since we are
+ * going to do that, we do not need to tell the server about its ancestors.
+ */
+#define ADVERTISED (1U << 3)
+/*
+ * This commit has entered the priority queue.
+ */
+#define SEEN (1U << 4)
+/*
+ * This commit has left the priority queue.
+ */
+#define POPPED (1U << 5)
+
+static int marked;
+
+/*
+ * An entry in the priority queue.
+ */
+struct entry {
+ struct commit *commit;
+
+ /*
+ * Used only if commit is not COMMON.
+ */
+ uint16_t original_ttl;
+ uint16_t ttl;
+};
+
+struct data {
+ struct prio_queue rev_list;
+
+ /*
+ * The number of non-COMMON commits in rev_list.
+ */
+ int non_common_revs;
+};
+
+static int compare(const void *a_, const void *b_, void *unused)
+{
+ const struct entry *a = a_;
+ const struct entry *b = b_;
+ return compare_commits_by_commit_date(a->commit, b->commit, NULL);
+}
+
+static struct entry *rev_list_push(struct data *data, struct commit *commit, int mark)
+{
+ struct entry *entry;
+ commit->object.flags |= mark | SEEN;
+
+ entry = xcalloc(1, sizeof(*entry));
+ entry->commit = commit;
+ prio_queue_put(&data->rev_list, entry);
+
+ if (!(mark & COMMON))
+ data->non_common_revs++;
+ return entry;
+}
+
+static int clear_marks(const char *refname, const struct object_id *oid,
+ int flag, void *cb_data)
+{
+ struct object *o = deref_tag(the_repository, parse_object(the_repository, oid), refname, 0);
+
+ if (o && o->type == OBJ_COMMIT)
+ clear_commit_marks((struct commit *)o,
+ COMMON | ADVERTISED | SEEN | POPPED);
+ return 0;
+}
+
+/*
+ * Mark this SEEN commit and all its SEEN ancestors as COMMON.
+ */
+static void mark_common(struct data *data, struct commit *c)
+{
+ struct commit_list *p;
+
+ if (c->object.flags & COMMON)
+ return;
+ c->object.flags |= COMMON;
+ if (!(c->object.flags & POPPED))
+ data->non_common_revs--;
+
+ if (!c->object.parsed)
+ return;
+ for (p = c->parents; p; p = p->next) {
+ if (p->item->object.flags & SEEN)
+ mark_common(data, p->item);
+ }
+}
+
+/*
+ * Ensure that the priority queue has an entry for to_push, and ensure that the
+ * entry has the correct flags and ttl.
+ *
+ * This function returns 1 if an entry was found or created, and 0 otherwise
+ * (because the entry for this commit had already been popped).
+ */
+static int push_parent(struct data *data, struct entry *entry,
+ struct commit *to_push)
+{
+ struct entry *parent_entry;
+
+ if (to_push->object.flags & SEEN) {
+ int i;
+ if (to_push->object.flags & POPPED)
+ /*
+ * The entry for this commit has already been popped,
+ * due to clock skew. Pretend that this parent does not
+ * exist.
+ */
+ return 0;
+ /*
+ * Find the existing entry and use it.
+ */
+ for (i = 0; i < data->rev_list.nr; i++) {
+ parent_entry = data->rev_list.array[i].data;
+ if (parent_entry->commit == to_push)
+ goto parent_found;
+ }
+ BUG("missing parent in priority queue");
+parent_found:
+ ;
+ } else {
+ parent_entry = rev_list_push(data, to_push, 0);
+ }
+
+ if (entry->commit->object.flags & (COMMON | ADVERTISED)) {
+ mark_common(data, to_push);
+ } else {
+ uint16_t new_original_ttl = entry->ttl
+ ? entry->original_ttl : entry->original_ttl * 3 / 2 + 1;
+ uint16_t new_ttl = entry->ttl
+ ? entry->ttl - 1 : new_original_ttl;
+ if (parent_entry->original_ttl < new_original_ttl) {
+ parent_entry->original_ttl = new_original_ttl;
+ parent_entry->ttl = new_ttl;
+ }
+ }
+
+ return 1;
+}
+
+static const struct object_id *get_rev(struct data *data)
+{
+ struct commit *to_send = NULL;
+
+ while (to_send == NULL) {
+ struct entry *entry;
+ struct commit *commit;
+ struct commit_list *p;
+ int parent_pushed = 0;
+
+ if (data->rev_list.nr == 0 || data->non_common_revs == 0)
+ return NULL;
+
+ entry = prio_queue_get(&data->rev_list);
+ commit = entry->commit;
+ commit->object.flags |= POPPED;
+ if (!(commit->object.flags & COMMON))
+ data->non_common_revs--;
+
+ if (!(commit->object.flags & COMMON) && !entry->ttl)
+ to_send = commit;
+
+ parse_commit(commit);
+ for (p = commit->parents; p; p = p->next)
+ parent_pushed |= push_parent(data, entry, p->item);
+
+ if (!(commit->object.flags & COMMON) && !parent_pushed)
+ /*
+ * This commit has no parents, or all of its parents
+ * have already been popped (due to clock skew), so send
+ * it anyway.
+ */
+ to_send = commit;
+
+ free(entry);
+ }
+
+ return &to_send->object.oid;
+}
+
+static void known_common(struct fetch_negotiator *n, struct commit *c)
+{
+ if (c->object.flags & SEEN)
+ return;
+ rev_list_push(n->data, c, ADVERTISED);
+}
+
+static void add_tip(struct fetch_negotiator *n, struct commit *c)
+{
+ n->known_common = NULL;
+ if (c->object.flags & SEEN)
+ return;
+ rev_list_push(n->data, c, 0);
+}
+
+static const struct object_id *next(struct fetch_negotiator *n)
+{
+ n->known_common = NULL;
+ n->add_tip = NULL;
+ return get_rev(n->data);
+}
+
+static int ack(struct fetch_negotiator *n, struct commit *c)
+{
+ int known_to_be_common = !!(c->object.flags & COMMON);
+ if (!(c->object.flags & SEEN))
+ die("received ack for commit %s not sent as 'have'\n",
+ oid_to_hex(&c->object.oid));
+ mark_common(n->data, c);
+ return known_to_be_common;
+}
+
+static void release(struct fetch_negotiator *n)
+{
+ clear_prio_queue(&((struct data *)n->data)->rev_list);
+ FREE_AND_NULL(n->data);
+}
+
+void skipping_negotiator_init(struct fetch_negotiator *negotiator)
+{
+ struct data *data;
+ negotiator->known_common = known_common;
+ negotiator->add_tip = add_tip;
+ negotiator->next = next;
+ negotiator->ack = ack;
+ negotiator->release = release;
+ negotiator->data = data = xcalloc(1, sizeof(*data));
+ data->rev_list.compare = compare;
+
+ if (marked)
+ for_each_ref(clear_marks, NULL);
+ marked = 1;
+}
--- /dev/null
+#ifndef NEGOTIATOR_SKIPPING_H
+#define NEGOTIATOR_SKIPPING_H
+
+struct fetch_negotiator;
+
+void skipping_negotiator_init(struct fetch_negotiator *negotiator);
+
+#endif
#include "cache.h"
#include "notes-cache.h"
#include "object-store.h"
+#include "repository.h"
#include "commit.h"
#include "refs.h"
if (read_ref(ref, &oid) < 0)
return 0;
- commit = lookup_commit_reference_gently(&oid, 1);
+ commit = lookup_commit_reference_gently(the_repository, &oid, 1);
if (!commit)
return 0;
#include "commit.h"
#include "refs.h"
#include "object-store.h"
+#include "repository.h"
#include "diff.h"
#include "diffcore.h"
#include "xdiff-interface.h"
else if (!check_refname_format(o->local_ref, 0) &&
is_null_oid(&local_oid))
local = NULL; /* local_oid == null_oid indicates unborn ref */
- else if (!(local = lookup_commit_reference(&local_oid)))
+ else if (!(local = lookup_commit_reference(the_repository, &local_oid)))
die("Could not parse local commit %s (%s)",
oid_to_hex(&local_oid), o->local_ref);
trace_printf("\tlocal commit: %.7s\n", oid_to_hex(&local_oid));
die("Failed to resolve remote notes ref '%s'",
o->remote_ref);
}
- } else if (!(remote = lookup_commit_reference(&remote_oid))) {
+ } else if (!(remote = lookup_commit_reference(the_repository, &remote_oid))) {
die("Could not parse remote commit %s (%s)",
oid_to_hex(&remote_oid), o->remote_ref);
}
#include "commit.h"
#include "refs.h"
#include "notes-utils.h"
+#include "repository.h"
void create_notes_commit(struct notes_tree *t, struct commit_list *parents,
const char *msg, size_t msg_len,
/* Deduce parent commit from t->ref */
struct object_id parent_oid;
if (!read_ref(t->ref, &parent_oid)) {
- struct commit *parent = lookup_commit(&parent_oid);
+ struct commit *parent = lookup_commit(the_repository,
+ &parent_oid);
if (parse_commit(parent))
die("Failed to find/parse commit %s", t->ref);
commit_list_insert(parent, &parents);
#define OBJECT_STORE_H
#include "oidmap.h"
+#include "list.h"
+#include "sha1-array.h"
+#include "strbuf.h"
struct alternate_object_database {
struct alternate_object_database *next;
*/
struct oidmap *replace_map;
+ struct commit_graph *commit_graph;
+ unsigned commit_graph_attempted : 1; /* if loading has been attempted */
+
/*
* private data
*
#include "alloc.h"
#include "object-store.h"
#include "packfile.h"
+#include "commit-graph.h"
unsigned int get_max_object_index(void)
{
* Look up the record for the given sha1 in the hash map stored in
* obj_hash. Return NULL if it was not found.
*/
-struct object *lookup_object(const unsigned char *sha1)
+struct object *lookup_object(struct repository *r, const unsigned char *sha1)
{
unsigned int i, first;
struct object *obj;
- if (!the_repository->parsed_objects->obj_hash)
+ if (!r->parsed_objects->obj_hash)
return NULL;
- first = i = hash_obj(sha1,
- the_repository->parsed_objects->obj_hash_size);
- while ((obj = the_repository->parsed_objects->obj_hash[i]) != NULL) {
+ first = i = hash_obj(sha1, r->parsed_objects->obj_hash_size);
+ while ((obj = r->parsed_objects->obj_hash[i]) != NULL) {
if (!hashcmp(sha1, obj->oid.hash))
break;
i++;
- if (i == the_repository->parsed_objects->obj_hash_size)
+ if (i == r->parsed_objects->obj_hash_size)
i = 0;
}
if (obj && i != first) {
* that we do not need to walk the hash table the next
* time we look for it.
*/
- SWAP(the_repository->parsed_objects->obj_hash[i],
- the_repository->parsed_objects->obj_hash[first]);
+ SWAP(r->parsed_objects->obj_hash[i],
+ r->parsed_objects->obj_hash[first]);
}
return obj;
}
return obj;
}
-void *object_as_type(struct object *obj, enum object_type type, int quiet)
+void *object_as_type(struct repository *r, struct object *obj, enum object_type type, int quiet)
{
if (obj->type == type)
return obj;
else if (obj->type == OBJ_NONE) {
if (type == OBJ_COMMIT)
- ((struct commit *)obj)->index = alloc_commit_index(the_repository);
+ ((struct commit *)obj)->index = alloc_commit_index(r);
obj->type = type;
return obj;
}
struct object *lookup_unknown_object(const unsigned char *sha1)
{
- struct object *obj = lookup_object(sha1);
+ struct object *obj = lookup_object(the_repository, sha1);
if (!obj)
obj = create_object(the_repository, sha1,
alloc_object_node(the_repository));
return obj;
}
-struct object *parse_object_buffer(const struct object_id *oid, enum object_type type, unsigned long size, void *buffer, int *eaten_p)
+struct object *parse_object_buffer(struct repository *r, const struct object_id *oid, enum object_type type, unsigned long size, void *buffer, int *eaten_p)
{
struct object *obj;
*eaten_p = 0;
obj = NULL;
if (type == OBJ_BLOB) {
- struct blob *blob = lookup_blob(oid);
+ struct blob *blob = lookup_blob(r, oid);
if (blob) {
if (parse_blob_buffer(blob, buffer, size))
return NULL;
obj = &blob->object;
}
} else if (type == OBJ_TREE) {
- struct tree *tree = lookup_tree(oid);
+ struct tree *tree = lookup_tree(r, oid);
if (tree) {
obj = &tree->object;
if (!tree->buffer)
}
}
} else if (type == OBJ_COMMIT) {
- struct commit *commit = lookup_commit(oid);
+ struct commit *commit = lookup_commit(r, oid);
if (commit) {
- if (parse_commit_buffer(commit, buffer, size, 1))
+ if (parse_commit_buffer(r, commit, buffer, size, 1))
return NULL;
- if (!get_cached_commit_buffer(commit, NULL)) {
- set_commit_buffer(commit, buffer, size);
+ if (!get_cached_commit_buffer(r, commit, NULL)) {
+ set_commit_buffer(r, commit, buffer, size);
*eaten_p = 1;
}
obj = &commit->object;
}
} else if (type == OBJ_TAG) {
- struct tag *tag = lookup_tag(oid);
+ struct tag *tag = lookup_tag(r, oid);
if (tag) {
- if (parse_tag_buffer(tag, buffer, size))
+ if (parse_tag_buffer(r, tag, buffer, size))
return NULL;
obj = &tag->object;
}
struct object *parse_object_or_die(const struct object_id *oid,
const char *name)
{
- struct object *o = parse_object(oid);
+ struct object *o = parse_object(the_repository, oid);
if (o)
return o;
die(_("unable to parse object: %s"), name ? name : oid_to_hex(oid));
}
-struct object *parse_object(const struct object_id *oid)
+struct object *parse_object(struct repository *r, const struct object_id *oid)
{
unsigned long size;
enum object_type type;
int eaten;
- const struct object_id *repl = lookup_replace_object(the_repository, oid);
+ const struct object_id *repl = lookup_replace_object(r, oid);
void *buffer;
struct object *obj;
- obj = lookup_object(oid->hash);
+ obj = lookup_object(r, oid->hash);
if (obj && obj->parsed)
return obj;
if ((obj && obj->type == OBJ_BLOB && has_object_file(oid)) ||
(!obj && has_object_file(oid) &&
- oid_object_info(the_repository, oid, NULL) == OBJ_BLOB)) {
+ oid_object_info(r, oid, NULL) == OBJ_BLOB)) {
if (check_object_signature(repl, NULL, 0, NULL) < 0) {
error("sha1 mismatch %s", oid_to_hex(oid));
return NULL;
}
- parse_blob_buffer(lookup_blob(oid), NULL, 0);
- return lookup_object(oid->hash);
+ parse_blob_buffer(lookup_blob(r, oid), NULL, 0);
+ return lookup_object(r, oid->hash);
}
buffer = read_object_file(oid, &type, &size);
return NULL;
}
- obj = parse_object_buffer(oid, type, size, buffer, &eaten);
+ obj = parse_object_buffer(r, oid, type, size,
+ buffer, &eaten);
if (!eaten)
free(buffer);
return obj;
o->is_shallow = -1;
o->shallow_stat = xcalloc(1, sizeof(*o->shallow_stat));
+ o->buffer_slab = allocate_commit_buffer_slab();
+
return o;
}
oidmap_free(o->replace_map, 1);
FREE_AND_NULL(o->replace_map);
+ free_commit_graph(o->commit_graph);
+ o->commit_graph = NULL;
+ o->commit_graph_attempted = 0;
+
free_alt_odbs(o);
o->alt_odb_tail = NULL;
FREE_AND_NULL(o->obj_hash);
o->obj_hash_size = 0;
+ free_commit_buffer_slab(o->buffer_slab);
+ o->buffer_slab = NULL;
+
clear_alloc_state(o->blob_state);
clear_alloc_state(o->tree_state);
clear_alloc_state(o->commit_state);
#ifndef OBJECT_H
#define OBJECT_H
+struct buffer_slab;
+
struct parsed_object_pool {
struct object **obj_hash;
int nr_objs, obj_hash_size;
char *alternate_shallow_file;
int commit_graft_prepared;
+
+ struct buffer_slab *buffer_slab;
};
struct parsed_object_pool *parsed_object_pool_new(void);
/*
* object flag allocation:
- * revision.h: 0---------10 26
- * fetch-pack.c: 0----5
+ * revision.h: 0---------10 2526
+ * fetch-pack.c: 01
+ * negotiator/default.c: 2--5
* walker.c: 0-2
* upload-pack.c: 4 11----------------19
* builtin/blame.c: 12-13
* half-initialised objects, the caller is expected to initialize them
* by calling parse_object() on them.
*/
-struct object *lookup_object(const unsigned char *sha1);
+struct object *lookup_object(struct repository *r, const unsigned char *sha1);
extern void *create_object(struct repository *r, const unsigned char *sha1, void *obj);
-void *object_as_type(struct object *obj, enum object_type type, int quiet);
+void *object_as_type(struct repository *r, struct object *obj, enum object_type type, int quiet);
/*
* Returns the object, having parsed it to find out what it is.
*
* Returns NULL if the object is missing or corrupt.
*/
-struct object *parse_object(const struct object_id *oid);
+struct object *parse_object(struct repository *r, const struct object_id *oid);
/*
* Like parse_object, but will die() instead of returning NULL. If the
* parsing it. eaten_p indicates if the object has a borrowed copy
* of buffer and the caller should not free() it.
*/
-struct object *parse_object_buffer(const struct object_id *oid, enum object_type type, unsigned long size, void *buffer, int *eaten_p);
+struct object *parse_object_buffer(struct repository *r, const struct object_id *oid, enum object_type type, unsigned long size, void *buffer, int *eaten_p);
/** Returns the object, with potentially excess memory allocated. **/
struct object *lookup_unknown_object(const unsigned char *sha1);
void *set_)
{
struct oidset *set = set_;
- struct object *obj = parse_object(oid);
+ struct object *obj = parse_object(the_repository, oid);
if (!obj)
return 1;
return -1;
if (get_oid(arg, &oid))
return error("malformed object name %s", arg);
- commit = lookup_commit_reference(&oid);
+ commit = lookup_commit_reference(the_repository, &oid);
if (!commit)
return error("no such commit %s", arg);
commit_list_insert(commit, opt->value);
/*
* You can define a static memoized git path like:
*
- * static GIT_PATH_FUNC(git_path_foo, "FOO");
+ * static GIT_PATH_FUNC(git_path_foo, "FOO")
*
* or use one of the global ones below.
*/
* the cached copy from get_commit_buffer, we need to duplicate it
* to avoid munging the cached copy.
*/
- if (msg == get_cached_commit_buffer(commit, NULL))
+ if (msg == get_cached_commit_buffer(the_repository, commit, NULL))
out = xstrdup(msg);
else
out = (char *)msg;
/* these depend on the commit */
if (!commit->object.parsed)
- parse_object(&commit->object.oid);
+ parse_object(the_repository, &commit->object.oid);
switch (placeholder[0]) {
case 'H': /* commit hash */
}
if (starts_with(line, "parent ")) {
- if (linelen != 48)
+ if (linelen != the_hash_algo->hexsz + 8)
die("bad parent line in commit");
continue;
}
if (!parents_shown) {
unsigned num = commit_list_count(commit->parents);
/* with enough slop */
- strbuf_grow(sb, num * 50 + 20);
+ strbuf_grow(sb, num * (GIT_MAX_HEXSZ + 10) + 20);
add_merge_info(pp, sb, commit);
parents_shown = 1;
}
obj = parse_object_or_die(oid, NULL);
break;
case OBJ_TREE:
- obj = (struct object *)lookup_tree(oid);
+ obj = (struct object *)lookup_tree(the_repository, oid);
break;
case OBJ_BLOB:
- obj = (struct object *)lookup_blob(oid);
+ obj = (struct object *)lookup_blob(the_repository, oid);
break;
default:
die("unknown object type for %s: %s",
const char *path, void *data)
{
struct stat st;
- struct object *obj = lookup_object(oid->hash);
+ struct object *obj = lookup_object(the_repository, oid->hash);
if (obj && obj->flags & SEEN)
return 0;
struct packed_git *p, uint32_t pos,
void *data)
{
- struct object *obj = lookup_object(oid->hash);
+ struct object *obj = lookup_object(the_repository, oid->hash);
if (obj && obj->flags & SEEN)
return 0;
#define NO_THE_INDEX_COMPATIBILITY_MACROS
#include "cache.h"
#include "config.h"
+#include "diff.h"
+#include "diffcore.h"
#include "tempfile.h"
#include "lockfile.h"
#include "cache-tree.h"
CE_ENTRY_ADDED | CE_ENTRY_REMOVED | CE_ENTRY_CHANGED | \
SPLIT_INDEX_ORDERED | UNTRACKED_CHANGED | FSMONITOR_CHANGED)
+
+/*
+ * This is an estimate of the pathname length in the index. We use
+ * this for V4 index files to guess the un-deltafied size of the index
+ * in memory because of pathname deltafication. This is not required
+ * for V2/V3 index formats because their pathnames are not compressed.
+ * If the initial amount of memory set aside is not sufficient, the
+ * mem pool will allocate extra memory.
+ */
+#define CACHE_ENTRY_PATH_LENGTH 80
+
+static inline struct cache_entry *mem_pool__ce_alloc(struct mem_pool *mem_pool, size_t len)
+{
+ struct cache_entry *ce;
+ ce = mem_pool_alloc(mem_pool, cache_entry_size(len));
+ ce->mem_pool_allocated = 1;
+ return ce;
+}
+
+static inline struct cache_entry *mem_pool__ce_calloc(struct mem_pool *mem_pool, size_t len)
+{
+ struct cache_entry * ce;
+ ce = mem_pool_calloc(mem_pool, 1, cache_entry_size(len));
+ ce->mem_pool_allocated = 1;
+ return ce;
+}
+
+static struct mem_pool *find_mem_pool(struct index_state *istate)
+{
+ struct mem_pool **pool_ptr;
+
+ if (istate->split_index && istate->split_index->base)
+ pool_ptr = &istate->split_index->base->ce_mem_pool;
+ else
+ pool_ptr = &istate->ce_mem_pool;
+
+ if (!*pool_ptr)
+ mem_pool_init(pool_ptr, 0);
+
+ return *pool_ptr;
+}
+
struct index_state the_index;
static const char *alternate_index_output;
replace_index_entry_in_base(istate, old, ce);
remove_name_hash(istate, old);
- free(old);
+ discard_cache_entry(old);
ce->ce_flags &= ~CE_HASHED;
set_index_entry(istate, nr, ce);
ce->ce_flags |= CE_UPDATE_IN_BASE;
struct cache_entry *old_entry = istate->cache[nr], *new_entry;
int namelen = strlen(new_name);
- new_entry = xmalloc(cache_entry_size(namelen));
+ new_entry = make_empty_cache_entry(istate, namelen);
copy_cache_entry(new_entry, old_entry);
new_entry->ce_flags &= ~CE_HASHED;
new_entry->ce_namelen = namelen;
/* Ok, create the new entry using the name of the existing alias */
len = ce_namelen(alias);
- new_entry = xcalloc(1, cache_entry_size(len));
+ new_entry = make_empty_cache_entry(istate, len);
memcpy(new_entry->name, alias->name, len);
copy_cache_entry(new_entry, ce);
save_or_free_index_entry(istate, ce);
int add_to_index(struct index_state *istate, const char *path, struct stat *st, int flags)
{
- int size, namelen, was_same;
+ int namelen, was_same;
mode_t st_mode = st->st_mode;
struct cache_entry *ce, *alias = NULL;
unsigned ce_option = CE_MATCH_IGNORE_VALID|CE_MATCH_IGNORE_SKIP_WORKTREE|CE_MATCH_RACY_IS_DIRTY;
while (namelen && path[namelen-1] == '/')
namelen--;
}
- size = cache_entry_size(namelen);
- ce = xcalloc(1, size);
+ ce = make_empty_cache_entry(istate, namelen);
memcpy(ce->name, path, namelen);
ce->ce_namelen = namelen;
if (!intent_only)
ce_mark_uptodate(alias);
alias->ce_flags |= CE_ADDED;
- free(ce);
+ discard_cache_entry(ce);
return 0;
}
}
if (!intent_only) {
if (index_path(&ce->oid, path, st, newflags)) {
- free(ce);
+ discard_cache_entry(ce);
return error("unable to index file %s", path);
}
} else
ce->ce_mode == alias->ce_mode);
if (pretend)
- free(ce);
+ discard_cache_entry(ce);
else if (add_index_entry(istate, ce, add_option)) {
- free(ce);
+ discard_cache_entry(ce);
return error("unable to add %s to index", path);
}
if (verbose && !was_same)
return add_to_index(istate, path, &st, flags);
}
-struct cache_entry *make_cache_entry(unsigned int mode,
- const unsigned char *sha1, const char *path, int stage,
- unsigned int refresh_options)
+struct cache_entry *make_empty_cache_entry(struct index_state *istate, size_t len)
+{
+ return mem_pool__ce_calloc(find_mem_pool(istate), len);
+}
+
+struct cache_entry *make_empty_transient_cache_entry(size_t len)
+{
+ return xcalloc(1, cache_entry_size(len));
+}
+
+struct cache_entry *make_cache_entry(struct index_state *istate,
+ unsigned int mode,
+ const struct object_id *oid,
+ const char *path,
+ int stage,
+ unsigned int refresh_options)
{
- int size, len;
struct cache_entry *ce, *ret;
+ int len;
if (!verify_path(path, mode)) {
error("Invalid path '%s'", path);
}
len = strlen(path);
- size = cache_entry_size(len);
- ce = xcalloc(1, size);
+ ce = make_empty_cache_entry(istate, len);
- hashcpy(ce->oid.hash, sha1);
+ oidcpy(&ce->oid, oid);
memcpy(ce->name, path, len);
ce->ce_flags = create_ce_flags(stage);
ce->ce_namelen = len;
ce->ce_mode = create_ce_mode(mode);
- ret = refresh_cache_entry(ce, refresh_options);
+ ret = refresh_cache_entry(&the_index, ce, refresh_options);
if (ret != ce)
- free(ce);
+ discard_cache_entry(ce);
return ret;
}
+struct cache_entry *make_transient_cache_entry(unsigned int mode, const struct object_id *oid,
+ const char *path, int stage)
+{
+ struct cache_entry *ce;
+ int len;
+
+ if (!verify_path(path, mode)) {
+ error("Invalid path '%s'", path);
+ return NULL;
+ }
+
+ len = strlen(path);
+ ce = make_empty_transient_cache_entry(len);
+
+ oidcpy(&ce->oid, oid);
+ memcpy(ce->name, path, len);
+ ce->ce_flags = create_ce_flags(stage);
+ ce->ce_namelen = len;
+ ce->ce_mode = create_ce_mode(mode);
+
+ return ce;
+}
+
/*
* Chmod an index entry with either +x or -x.
*
{
struct stat st;
struct cache_entry *updated;
- int changed, size;
+ int changed;
int refresh = options & CE_MATCH_REFRESH;
int ignore_valid = options & CE_MATCH_IGNORE_VALID;
int ignore_skip_worktree = options & CE_MATCH_IGNORE_SKIP_WORKTREE;
return NULL;
}
- size = ce_size(ce);
- updated = xmalloc(size);
+ updated = make_empty_cache_entry(istate, ce_namelen(ce));
copy_cache_entry(updated, ce);
memcpy(updated->name, ce->name, ce->ce_namelen + 1);
fill_stat_cache_info(updated, &st);
return has_errors;
}
-struct cache_entry *refresh_cache_entry(struct cache_entry *ce,
- unsigned int options)
+struct cache_entry *refresh_cache_entry(struct index_state *istate,
+ struct cache_entry *ce,
+ unsigned int options)
{
- return refresh_cache_ent(&the_index, ce, options, NULL, NULL);
+ return refresh_cache_ent(istate, ce, options, NULL, NULL);
}
return read_index_from(istate, get_index_file(), get_git_dir());
}
-static struct cache_entry *cache_entry_from_ondisk(struct ondisk_cache_entry *ondisk,
+static struct cache_entry *cache_entry_from_ondisk(struct mem_pool *mem_pool,
+ struct ondisk_cache_entry *ondisk,
unsigned int flags,
const char *name,
size_t len)
{
- struct cache_entry *ce = xmalloc(cache_entry_size(len));
+ struct cache_entry *ce = mem_pool__ce_alloc(mem_pool, len);
ce->ce_stat_data.sd_ctime.sec = get_be32(&ondisk->ctime.sec);
ce->ce_stat_data.sd_mtime.sec = get_be32(&ondisk->mtime.sec);
return (const char *)ep + 1 - cp_;
}
-static struct cache_entry *create_from_disk(struct ondisk_cache_entry *ondisk,
+static struct cache_entry *create_from_disk(struct mem_pool *mem_pool,
+ struct ondisk_cache_entry *ondisk,
unsigned long *ent_size,
struct strbuf *previous_name)
{
/* v3 and earlier */
if (len == CE_NAMEMASK)
len = strlen(name);
- ce = cache_entry_from_ondisk(ondisk, flags, name, len);
+ ce = cache_entry_from_ondisk(mem_pool, ondisk, flags, name, len);
*ent_size = ondisk_ce_size(ce);
} else {
unsigned long consumed;
consumed = expand_name_field(previous_name, name);
- ce = cache_entry_from_ondisk(ondisk, flags,
+ ce = cache_entry_from_ondisk(mem_pool, ondisk, flags,
previous_name->buf,
previous_name->len);
tweak_fsmonitor(istate);
}
+static size_t estimate_cache_size_from_compressed(unsigned int entries)
+{
+ return entries * (sizeof(struct cache_entry) + CACHE_ENTRY_PATH_LENGTH);
+}
+
+static size_t estimate_cache_size(size_t ondisk_size, unsigned int entries)
+{
+ long per_entry = sizeof(struct cache_entry) - sizeof(struct ondisk_cache_entry);
+
+ /*
+ * Account for potential alignment differences.
+ */
+ per_entry += align_padding_size(sizeof(struct cache_entry), -sizeof(struct ondisk_cache_entry));
+ return ondisk_size + entries * per_entry;
+}
+
/* remember to discard_cache() before reading a different cache! */
int do_read_index(struct index_state *istate, const char *path, int must_exist)
{
istate->cache = xcalloc(istate->cache_alloc, sizeof(*istate->cache));
istate->initialized = 1;
- if (istate->version == 4)
+ if (istate->version == 4) {
previous_name = &previous_name_buf;
- else
+ mem_pool_init(&istate->ce_mem_pool,
+ estimate_cache_size_from_compressed(istate->cache_nr));
+ } else {
previous_name = NULL;
+ mem_pool_init(&istate->ce_mem_pool,
+ estimate_cache_size(mmap_size, istate->cache_nr));
+ }
src_offset = sizeof(*hdr);
for (i = 0; i < istate->cache_nr; i++) {
unsigned long consumed;
disk_ce = (struct ondisk_cache_entry *)((char *)mmap + src_offset);
- ce = create_from_disk(disk_ce, &consumed, previous_name);
+ ce = create_from_disk(istate->ce_mem_pool, disk_ce, &consumed, previous_name);
set_index_entry(istate, i, ce);
src_offset += consumed;
int discard_index(struct index_state *istate)
{
- int i;
+ /*
+ * Cache entries in istate->cache[] should have been allocated
+ * from the memory pool associated with this index, or from an
+ * associated split_index. There is no need to free individual
+ * cache entries. validate_cache_entries can detect when this
+ * assertion does not hold.
+ */
+ validate_cache_entries(istate);
- for (i = 0; i < istate->cache_nr; i++) {
- if (istate->cache[i]->index &&
- istate->split_index &&
- istate->split_index->base &&
- istate->cache[i]->index <= istate->split_index->base->cache_nr &&
- istate->cache[i] == istate->split_index->base->cache[istate->cache[i]->index - 1])
- continue;
- free(istate->cache[i]);
- }
resolve_undo_clear_index(istate);
istate->cache_nr = 0;
istate->cache_changed = 0;
discard_split_index(istate);
free_untracked_cache(istate->untracked);
istate->untracked = NULL;
+
+ if (istate->ce_mem_pool) {
+ mem_pool_discard(istate->ce_mem_pool, should_validate_cache_entries());
+ istate->ce_mem_pool = NULL;
+ }
+
return 0;
}
+/*
+ * Validate the cache entries of this index.
+ * All cache entries associated with this index
+ * should have been allocated by the memory pool
+ * associated with this index, or by a referenced
+ * split index.
+ */
+void validate_cache_entries(const struct index_state *istate)
+{
+ int i;
+
+ if (!should_validate_cache_entries() ||!istate || !istate->initialized)
+ return;
+
+ for (i = 0; i < istate->cache_nr; i++) {
+ if (!istate) {
+ die("internal error: cache entry is not allocated from expected memory pool");
+ } else if (!istate->ce_mem_pool ||
+ !mem_pool_contains(istate->ce_mem_pool, istate->cache[i])) {
+ if (!istate->split_index ||
+ !istate->split_index->base ||
+ !istate->split_index->base->ce_mem_pool ||
+ !mem_pool_contains(istate->split_index->base->ce_mem_pool, istate->cache[i])) {
+ die("internal error: cache entry is not allocated from expected memory pool");
+ }
+ }
+ }
+
+ if (istate->split_index)
+ validate_cache_entries(istate->split_index->base);
+}
+
int unmerged_index(const struct index_state *istate)
{
int i;
return 0;
}
+int index_has_changes(const struct index_state *istate,
+ struct tree *tree,
+ struct strbuf *sb)
+{
+ struct object_id cmp;
+ int i;
+
+ if (istate != &the_index) {
+ BUG("index_has_changes cannot yet accept istate != &the_index; do_diff_cache needs updating first.");
+ }
+ if (tree)
+ cmp = tree->object.oid;
+ if (tree || !get_oid_tree("HEAD", &cmp)) {
+ struct diff_options opt;
+
+ diff_setup(&opt);
+ opt.flags.exit_with_status = 1;
+ if (!sb)
+ opt.flags.quick = 1;
+ do_diff_cache(&cmp, &opt);
+ diffcore_std(&opt);
+ for (i = 0; sb && i < diff_queued_diff.nr; i++) {
+ if (i)
+ strbuf_addch(sb, ' ');
+ strbuf_addstr(sb, diff_queued_diff.queue[i]->two->path);
+ }
+ diff_flush(&opt);
+ return opt.flags.has_changes != 0;
+ } else {
+ for (i = 0; sb && i < istate->cache_nr; i++) {
+ if (i)
+ strbuf_addch(sb, ' ');
+ strbuf_addstr(sb, istate->cache[i]->name);
+ }
+ return !!istate->cache_nr;
+ }
+}
+
#define WRITE_BUFFER_SIZE 8192
static unsigned char write_buffer[WRITE_BUFFER_SIZE];
static unsigned long write_buffer_len;
for (i = 0; i < istate->cache_nr; i++) {
struct cache_entry *ce = istate->cache[i];
struct cache_entry *new_ce;
- int size, len;
+ int len;
if (!ce_stage(ce))
continue;
unmerged = 1;
len = ce_namelen(ce);
- size = cache_entry_size(len);
- new_ce = xcalloc(1, size);
+ new_ce = make_empty_cache_entry(istate, len);
memcpy(new_ce->name, ce->name, len);
new_ce->ce_flags = create_ce_flags(0) | CE_CONFLICTED;
new_ce->ce_namelen = len;
dst->untracked = src->untracked;
src->untracked = NULL;
}
+
+struct cache_entry *dup_cache_entry(const struct cache_entry *ce,
+ struct index_state *istate)
+{
+ unsigned int size = ce_size(ce);
+ int mem_pool_allocated;
+ struct cache_entry *new_entry = make_empty_cache_entry(istate, ce_namelen(ce));
+ mem_pool_allocated = new_entry->mem_pool_allocated;
+
+ memcpy(new_entry, ce, size);
+ new_entry->mem_pool_allocated = mem_pool_allocated;
+ return new_entry;
+}
+
+void discard_cache_entry(struct cache_entry *ce)
+{
+ if (ce && should_validate_cache_entries())
+ memset(ce, 0xCD, cache_entry_size(ce->ce_namelen));
+
+ if (ce && ce->mem_pool_allocated)
+ return;
+
+ free(ce);
+}
+
+int should_validate_cache_entries(void)
+{
+ static int validate_index_cache_entries = -1;
+
+ if (validate_index_cache_entries < 0) {
+ if (getenv("GIT_TEST_VALIDATE_INDEX_CACHE_ENTRIES"))
+ validate_index_cache_entries = 1;
+ else
+ validate_index_cache_entries = 0;
+ }
+
+ return validate_index_cache_entries;
+}
#include "refs.h"
#include "wildmatch.h"
#include "object-store.h"
+#include "repository.h"
#include "commit.h"
#include "remote.h"
#include "color.h"
void *buf = read_object_file(oid, &type, sz);
if (buf)
- *obj = parse_object_buffer(oid, type, *sz, buf, eaten);
+ *obj = parse_object_buffer(the_repository, oid, type, *sz,
+ buf, eaten);
else
*obj = NULL;
return buf;
for (p = want; p; p = p->next) {
struct commit *c = p->item;
- load_commit_graph_info(c);
+ load_commit_graph_info(the_repository, c);
if (c->generation < cutoff)
cutoff = c->generation;
}
refname[plen] == '/' ||
p[plen-1] == '/'))
return 1;
- if (!wildmatch(p, refname, WM_PATHNAME))
+ if (!wildmatch(p, refname, flags))
return 1;
}
return 0;
return for_each_fullref_in("", cb, cb_data, broken);
}
+ if (filter->ignore_case) {
+ /*
+ * we can't handle case-insensitive comparisons,
+ * so just return everything and let the caller
+ * sort it out.
+ */
+ return for_each_fullref_in("", cb, cb_data, broken);
+ }
+
if (!filter->name_patterns[0]) {
/* no patterns; we have to look at everything */
return for_each_fullref_in("", cb, cb_data, broken);
if (oid_array_lookup(points_at, oid) >= 0)
return oid;
- obj = parse_object(oid);
+ obj = parse_object(the_repository, oid);
if (!obj)
die(_("malformed object at '%s'"), refname);
if (obj->type == OBJ_TAG)
* non-commits early. The actual filtering is done later.
*/
if (filter->merge_commit || filter->with_commit || filter->no_commit || filter->verbose) {
- commit = lookup_commit_reference_gently(oid, 1);
+ commit = lookup_commit_reference_gently(the_repository, oid,
+ 1);
if (!commit)
return 0;
/* We perform the filtering for the '--contains' option... */
if (get_oid(arg, &oid))
die(_("malformed object name %s"), arg);
- rf->merge_commit = lookup_commit_reference_gently(&oid, 0);
+ rf->merge_commit = lookup_commit_reference_gently(the_repository,
+ &oid, 0);
if (!rf->merge_commit)
return opterror(opt, "must point to a commit", 0);
{
for (; log->recno >= 0; log->recno--) {
struct reflog_info *entry = &log->reflogs->items[log->recno];
- struct object *obj = parse_object(&entry->noid);
+ struct object *obj = parse_object(the_repository,
+ &entry->noid);
if (obj && obj->type == OBJ_COMMIT)
return (struct commit *)obj;
if (o->type == OBJ_NONE) {
int type = oid_object_info(the_repository, name, NULL);
- if (type < 0 || !object_as_type(o, type, 0))
+ if (type < 0 || !object_as_type(the_repository, o, type, 0))
return PEEL_INVALID;
}
old_oid, flags);
}
-int copy_reflog_msg(char *buf, const char *msg)
+void copy_reflog_msg(struct strbuf *sb, const char *msg)
{
- char *cp = buf;
char c;
int wasspace = 1;
- *cp++ = '\t';
+ strbuf_addch(sb, '\t');
while ((c = *msg++)) {
if (wasspace && isspace(c))
continue;
wasspace = isspace(c);
if (wasspace)
c = ' ';
- *cp++ = c;
+ strbuf_addch(sb, c);
}
- while (buf < cp && isspace(cp[-1]))
- cp--;
- *cp++ = '\n';
- return cp - buf;
+ strbuf_rtrim(sb);
}
int should_autocreate_reflog(const char *refname)
const struct object_id *new_oid,
const char *committer, const char *msg)
{
- int msglen, written;
- unsigned maxlen, len;
- char *logrec;
-
- msglen = msg ? strlen(msg) : 0;
- maxlen = strlen(committer) + msglen + 100;
- logrec = xmalloc(maxlen);
- len = xsnprintf(logrec, maxlen, "%s %s %s\n",
- oid_to_hex(old_oid),
- oid_to_hex(new_oid),
- committer);
- if (msglen)
- len += copy_reflog_msg(logrec + len - 1, msg) - 1;
-
- written = len <= maxlen ? write_in_full(fd, logrec, len) : -1;
- free(logrec);
- if (written < 0)
- return -1;
+ struct strbuf sb = STRBUF_INIT;
+ int ret = 0;
- return 0;
+ strbuf_addf(&sb, "%s %s %s", oid_to_hex(old_oid), oid_to_hex(new_oid), committer);
+ if (msg && *msg)
+ copy_reflog_msg(&sb, msg);
+ strbuf_addch(&sb, '\n');
+ if (write_in_full(fd, sb.buf, sb.len) < 0)
+ ret = -1;
+ strbuf_release(&sb);
+ return ret;
}
static int files_log_ref_write(struct files_ref_store *refs,
struct object *o;
int fd;
- o = parse_object(oid);
+ o = parse_object(the_repository, oid);
if (!o) {
strbuf_addf(err,
"trying to write ref '%s' with nonexistent object %s",
return -1;
}
fd = get_lock_file_fd(&lock->lk);
- if (write_in_full(fd, oid_to_hex(oid), GIT_SHA1_HEXSZ) < 0 ||
+ if (write_in_full(fd, oid_to_hex(oid), the_hash_algo->hexsz) < 0 ||
write_in_full(fd, &term, 1) < 0 ||
close_ref_gently(lock) < 0) {
strbuf_addf(err,
rollback_lock_file(&reflog_lock);
} else if (update &&
(write_in_full(get_lock_file_fd(&lock->lk),
- oid_to_hex(&cb.last_kept_oid), GIT_SHA1_HEXSZ) < 0 ||
+ oid_to_hex(&cb.last_kept_oid), the_hash_algo->hexsz) < 0 ||
write_str_in_full(get_lock_file_fd(&lock->lk), "\n") < 0 ||
close_ref_gently(lock) < 0)) {
status |= error("couldn't write %s",
#ifndef REFS_REFS_INTERNAL_H
#define REFS_REFS_INTERNAL_H
+#include "iterator.h"
+
/*
* Data structures and functions for the internal use of the refs
* module. Code outside of the refs module should use only the public
enum peel_status peel_object(const struct object_id *name, struct object_id *oid);
/*
- * Copy the reflog message msg to buf, which has been allocated sufficiently
- * large, while cleaning up the whitespaces. Especially, convert LF to space,
- * because reflog file is one line per entry.
+ * Copy the reflog message msg to sb while cleaning up the whitespaces.
+ * Especially, convert LF to space, because reflog file is one line per entry.
*/
-int copy_reflog_msg(char *buf, const char *msg);
+void copy_reflog_msg(struct strbuf *sb, const char *msg);
/**
* Information needed for a single ref update. Set new_oid to the new
if (is_null_oid(oid))
return;
- commit = lookup_commit_reference_gently(oid, 1);
+ commit = lookup_commit_reference_gently(the_repository, oid, 1);
if (!commit || (commit->object.flags & TMP_MARK))
return;
commit->object.flags |= TMP_MARK;
if (is_null_oid(&ref->new_oid))
continue;
- commit = lookup_commit_reference_gently(&ref->new_oid,
+ commit = lookup_commit_reference_gently(the_repository,
+ &ref->new_oid,
1);
if (!commit)
/* not pushing a commit, which is not an error */
reject_reason = REF_STATUS_REJECT_ALREADY_EXISTS;
else if (!has_object_file(&ref->old_oid))
reject_reason = REF_STATUS_REJECT_FETCH_FIRST;
- else if (!lookup_commit_reference_gently(&ref->old_oid, 1) ||
- !lookup_commit_reference_gently(&ref->new_oid, 1))
+ else if (!lookup_commit_reference_gently(the_repository, &ref->old_oid, 1) ||
+ !lookup_commit_reference_gently(the_repository, &ref->new_oid, 1))
reject_reason = REF_STATUS_REJECT_NEEDS_FORCE;
else if (!ref_newer(&ref->new_oid, &ref->old_oid))
reject_reason = REF_STATUS_REJECT_NONFASTFORWARD;
if (refspec->exact_sha1) {
ref_map = alloc_ref(name);
get_oid_hex(name, &ref_map->old_oid);
+ ref_map->exact_oid = 1;
} else {
ref_map = get_remote_ref(remote_refs, name);
}
* Both new_commit and old_commit must be commit-ish and new_commit is descendant of
* old_commit. Otherwise we require --force.
*/
- o = deref_tag(parse_object(old_oid), NULL, 0);
+ o = deref_tag(the_repository, parse_object(the_repository, old_oid),
+ NULL, 0);
if (!o || o->type != OBJ_COMMIT)
return 0;
old_commit = (struct commit *) o;
- o = deref_tag(parse_object(new_oid), NULL, 0);
+ o = deref_tag(the_repository, parse_object(the_repository, new_oid),
+ NULL, 0);
if (!o || o->type != OBJ_COMMIT)
return 0;
new_commit = (struct commit *) o;
/* Cannot stat if what we used to build on no longer exists */
if (read_ref(base, &oid))
return -1;
- theirs = lookup_commit_reference(&oid);
+ theirs = lookup_commit_reference(the_repository, &oid);
if (!theirs)
return -1;
if (read_ref(branch->refname, &oid))
return -1;
- ours = lookup_commit_reference(&oid);
+ ours = lookup_commit_reference(the_repository, &oid);
if (!ours)
return -1;
force:1,
forced_update:1,
expect_old_sha1:1,
+ exact_oid:1,
deletion:1;
enum {
struct cache_entry *nce;
if (!ru->mode[i])
continue;
- nce = make_cache_entry(ru->mode[i], ru->oid[i].hash,
+ nce = make_cache_entry(istate,
+ ru->mode[i],
+ &ru->oid[i],
name, i + 1, 0);
if (matched)
nce->ce_flags |= CE_MATCHED;
while (tree_entry(&desc, &entry)) {
switch (object_type(entry.mode)) {
case OBJ_TREE:
- mark_tree_uninteresting(lookup_tree(entry.oid));
+ mark_tree_uninteresting(lookup_tree(the_repository, entry.oid));
break;
case OBJ_BLOB:
- mark_blob_uninteresting(lookup_blob(entry.oid));
+ mark_blob_uninteresting(lookup_blob(the_repository, entry.oid));
break;
default:
/* Subproject commit - not in this repository */
strbuf_release(&buf);
return; /* do not add the commit itself */
}
+ obj->flags |= USER_GIVEN;
add_object_array_with_path(obj, name, &revs->pending, mode, path);
}
struct object *obj;
if (get_oid("HEAD", &oid))
return;
- obj = parse_object(&oid);
+ obj = parse_object(the_repository, &oid);
if (!obj)
return;
add_pending_object(revs, obj, "HEAD");
{
struct object *object;
- object = parse_object(oid);
+ object = parse_object(the_repository, oid);
if (!object) {
if (revs->ignore_missing)
return object;
add_pending_object(revs, object, tag->tag);
if (!tag->tagged)
die("bad tag");
- object = parse_object(&tag->tagged->oid);
+ object = parse_object(the_repository, &tag->tagged->oid);
if (!object) {
if (revs->ignore_missing_links || (flags & UNINTERESTING))
return NULL;
+ if (revs->exclude_promisor_objects &&
+ is_promisor_object(&tag->tagged->oid))
+ return NULL;
die("bad object %s", oid_to_hex(&tag->tagged->oid));
}
object->flags |= flags;
{
struct all_refs_cb *cb = cb_data;
if (!is_null_oid(oid)) {
- struct object *o = parse_object(oid);
+ struct object *o = parse_object(the_repository, oid);
if (o) {
o->flags |= cb->all_flags;
/* ??? CMDLINEFLAGS ??? */
int i;
if (it->entry_count >= 0) {
- struct tree *tree = lookup_tree(&it->oid);
+ struct tree *tree = lookup_tree(the_repository, &it->oid);
add_pending_object_with_path(revs, &tree->object, "",
040000, path->buf);
}
if (S_ISGITLINK(ce->ce_mode))
continue;
- blob = lookup_blob(&ce->oid);
+ blob = lookup_blob(the_repository, &ce->oid);
if (!blob)
die("unable to add index blob to traversal");
add_pending_object_with_path(revs, &blob->object, "",
*dotdot = '\0';
}
- a_obj = parse_object(&a_oid);
- b_obj = parse_object(&b_oid);
+ a_obj = parse_object(the_repository, &a_oid);
+ b_obj = parse_object(the_repository, &b_oid);
if (!a_obj || !b_obj)
return dotdot_missing(arg, dotdot, revs, symmetric);
struct commit *a, *b;
struct commit_list *exclude;
- a = lookup_commit_reference(&a_obj->oid);
- b = lookup_commit_reference(&b_obj->oid);
+ a = lookup_commit_reference(the_repository, &a_obj->oid);
+ b = lookup_commit_reference(the_repository, &b_obj->oid);
if (!a || !b)
return dotdot_missing(arg, dotdot, revs, symmetric);
uint32_t pos,
void *unused)
{
- struct object *o = parse_object(oid);
+ struct object *o = parse_object(the_repository, oid);
o->flags |= UNINTERESTING | SEEN;
return 0;
}
#define SYMMETRIC_LEFT (1u<<8)
#define PATCHSAME (1u<<9)
#define BOTTOM (1u<<10)
+#define USER_GIVEN (1u<<25) /* given directly by the user */
#define TRACK_LINEAR (1u<<26)
-#define ALL_REV_FLAGS (((1u<<11)-1) | TRACK_LINEAR)
+#define ALL_REV_FLAGS (((1u<<11)-1) | USER_GIVEN | TRACK_LINEAR)
#define DECORATE_SHORT_REFS 1
#define DECORATE_FULL_REFS 2
* The file to keep track of how many commands were already processed (e.g.
* for the prompt).
*/
-static GIT_PATH_FUNC(rebase_path_msgnum, "rebase-merge/msgnum");
+static GIT_PATH_FUNC(rebase_path_msgnum, "rebase-merge/msgnum")
/*
* The file to keep track of how many commands are to be processed in total
* (e.g. for the prompt).
*/
-static GIT_PATH_FUNC(rebase_path_msgtotal, "rebase-merge/end");
+static GIT_PATH_FUNC(rebase_path_msgtotal, "rebase-merge/end")
/*
* The commit message that is planned to be used for any changes that
* need to be committed following a user interaction.
static struct tree *empty_tree(void)
{
- return lookup_tree(the_hash_algo->empty_tree);
+ return lookup_tree(the_repository, the_repository->hash_algo->empty_tree);
}
static int error_dirty_index(struct replay_opts *opts)
if (!resolve_ref_unsafe("HEAD", RESOLVE_REF_READING, &head_oid, NULL))
return error(_("could not resolve HEAD commit"));
- head_commit = lookup_commit(&head_oid);
+ head_commit = lookup_commit(the_repository, &head_oid);
/*
* If head_commit is NULL, check_commit, called from
struct strbuf author_ident = STRBUF_INIT;
struct strbuf committer_ident = STRBUF_INIT;
- commit = lookup_commit(oid);
+ commit = lookup_commit(the_repository, oid);
if (!commit)
die(_("couldn't look up newly created commit"));
if (parse_commit(commit))
if (get_oid("HEAD", &oid)) {
current_head = NULL;
} else {
- current_head = lookup_commit_reference(&oid);
+ current_head = lookup_commit_reference(the_repository, &oid);
if (!current_head)
return error(_("could not parse HEAD"));
if (oidcmp(&oid, ¤t_head->object.oid)) {
if (get_oid("HEAD", &head))
return error(_("need a HEAD to fixup"));
- if (!(head_commit = lookup_commit_reference(&head)))
+ if (!(head_commit = lookup_commit_reference(the_repository, &head)))
return error(_("could not read HEAD"));
if (!(head_message = get_commit_buffer(head_commit, NULL)))
return error(_("could not read HEAD's commit message"));
if (prepare_revision_walk(opts->revs))
return error(_("revision walk setup failed"));
- if (!opts->revs->commits)
- return error(_("empty commit set passed"));
return 0;
}
if (status < 0)
return -1;
- item->commit = lookup_commit_reference(&commit_oid);
+ item->commit = lookup_commit_reference(the_repository, &commit_oid);
return !item->commit;
}
short_commit_name(commit), subject_len, subject);
unuse_commit_buffer(commit, commit_buffer);
}
+
+ if (!todo_list->nr)
+ return error(_("empty commit set passed"));
+
return 0;
}
fprintf(stderr, "Executing: %s\n", command_line);
child_argv[0] = command_line;
argv_array_pushf(&child_env, "GIT_DIR=%s", absolute_path(get_git_dir()));
+ argv_array_pushf(&child_env, "GIT_WORK_TREE=%s",
+ absolute_path(get_git_work_tree()));
status = run_command_v_opt_cd_env(child_argv, RUN_USING_SHELL, NULL,
child_env.argv);
return ret;
}
+static struct commit *lookup_label(const char *label, int len,
+ struct strbuf *buf)
+{
+ struct commit *commit;
+
+ strbuf_reset(buf);
+ strbuf_addf(buf, "refs/rewritten/%.*s", len, label);
+ commit = lookup_commit_reference_by_name(buf->buf);
+ if (!commit) {
+ /* fall back to non-rewritten ref or commit */
+ strbuf_splice(buf, 0, strlen("refs/rewritten/"), "", 0);
+ commit = lookup_commit_reference_by_name(buf->buf);
+ }
+
+ if (!commit)
+ error(_("could not resolve '%s'"), buf->buf);
+
+ return commit;
+}
+
static int do_merge(struct commit *commit, const char *arg, int arg_len,
int flags, struct replay_opts *opts)
{
struct strbuf ref_name = STRBUF_INIT;
struct commit *head_commit, *merge_commit, *i;
struct commit_list *bases, *j, *reversed = NULL;
+ struct commit_list *to_merge = NULL, **tail = &to_merge;
struct merge_options o;
- int merge_arg_len, oneline_offset, can_fast_forward, ret;
+ int merge_arg_len, oneline_offset, can_fast_forward, ret, k;
static struct lock_file lock;
const char *p;
goto leave_merge;
}
- oneline_offset = arg_len;
- merge_arg_len = strcspn(arg, " \t\n");
- p = arg + merge_arg_len;
- p += strspn(p, " \t\n");
- if (*p == '#' && (!p[1] || isspace(p[1]))) {
- p += 1 + strspn(p + 1, " \t\n");
- oneline_offset = p - arg;
- } else if (p - arg < arg_len)
- BUG("octopus merges are not supported yet: '%s'", p);
-
- strbuf_addf(&ref_name, "refs/rewritten/%.*s", merge_arg_len, arg);
- merge_commit = lookup_commit_reference_by_name(ref_name.buf);
- if (!merge_commit) {
- /* fall back to non-rewritten ref or commit */
- strbuf_splice(&ref_name, 0, strlen("refs/rewritten/"), "", 0);
- merge_commit = lookup_commit_reference_by_name(ref_name.buf);
+ /*
+ * For octopus merges, the arg starts with the list of revisions to be
+ * merged. The list is optionally followed by '#' and the oneline.
+ */
+ merge_arg_len = oneline_offset = arg_len;
+ for (p = arg; p - arg < arg_len; p += strspn(p, " \t\n")) {
+ if (!*p)
+ break;
+ if (*p == '#' && (!p[1] || isspace(p[1]))) {
+ p += 1 + strspn(p + 1, " \t\n");
+ oneline_offset = p - arg;
+ break;
+ }
+ k = strcspn(p, " \t\n");
+ if (!k)
+ continue;
+ merge_commit = lookup_label(p, k, &ref_name);
+ if (!merge_commit) {
+ ret = error(_("unable to parse '%.*s'"), k, p);
+ goto leave_merge;
+ }
+ tail = &commit_list_insert(merge_commit, tail)->next;
+ p += k;
+ merge_arg_len = p - arg;
}
- if (!merge_commit) {
- ret = error(_("could not resolve '%s'"), ref_name.buf);
+ if (!to_merge) {
+ ret = error(_("nothing to merge: '%.*s'"), arg_len, arg);
goto leave_merge;
}
* "[new root]", let's simply fast-forward to the merge head.
*/
rollback_lock_file(&lock);
- ret = fast_forward_to(&merge_commit->object.oid,
- &head_commit->object.oid, 0, opts);
+ if (to_merge->next)
+ ret = error(_("octopus merge cannot be executed on "
+ "top of a [new root]"));
+ else
+ ret = fast_forward_to(&to_merge->item->object.oid,
+ &head_commit->object.oid, 0,
+ opts);
goto leave_merge;
}
p = arg + oneline_offset;
len = arg_len - oneline_offset;
} else {
- strbuf_addf(&buf, "Merge branch '%.*s'",
+ strbuf_addf(&buf, "Merge %s '%.*s'",
+ to_merge->next ? "branches" : "branch",
merge_arg_len, arg);
p = buf.buf;
len = buf.len;
&head_commit->object.oid);
/*
- * If the merge head is different from the original one, we cannot
+ * If any merge head is different from the original one, we cannot
* fast-forward.
*/
if (can_fast_forward) {
- struct commit_list *second_parent = commit->parents->next;
+ struct commit_list *p = commit->parents->next;
- if (second_parent && !second_parent->next &&
- oidcmp(&merge_commit->object.oid,
- &second_parent->item->object.oid))
+ for (j = to_merge; j && p; j = j->next, p = p->next)
+ if (oidcmp(&j->item->object.oid,
+ &p->item->object.oid)) {
+ can_fast_forward = 0;
+ break;
+ }
+ /*
+ * If the number of merge heads differs from the original merge
+ * commit, we cannot fast-forward.
+ */
+ if (j || p)
can_fast_forward = 0;
}
- if (can_fast_forward && commit->parents->next &&
- !commit->parents->next->next &&
- !oidcmp(&commit->parents->next->item->object.oid,
- &merge_commit->object.oid)) {
+ if (can_fast_forward) {
rollback_lock_file(&lock);
ret = fast_forward_to(&commit->object.oid,
&head_commit->object.oid, 0, opts);
goto leave_merge;
}
+ if (to_merge->next) {
+ /* Octopus merge */
+ struct child_process cmd = CHILD_PROCESS_INIT;
+
+ if (read_env_script(&cmd.env_array)) {
+ const char *gpg_opt = gpg_sign_opt_quoted(opts);
+
+ ret = error(_(staged_changes_advice), gpg_opt, gpg_opt);
+ goto leave_merge;
+ }
+
+ cmd.git_cmd = 1;
+ argv_array_push(&cmd.args, "merge");
+ argv_array_push(&cmd.args, "-s");
+ argv_array_push(&cmd.args, "octopus");
+ argv_array_push(&cmd.args, "--no-edit");
+ argv_array_push(&cmd.args, "--no-ff");
+ argv_array_push(&cmd.args, "--no-log");
+ argv_array_push(&cmd.args, "--no-stat");
+ argv_array_push(&cmd.args, "-F");
+ argv_array_push(&cmd.args, git_path_merge_msg(the_repository));
+ if (opts->gpg_sign)
+ argv_array_push(&cmd.args, opts->gpg_sign);
+
+ /* Add the tips to be merged */
+ for (j = to_merge; j; j = j->next)
+ argv_array_push(&cmd.args,
+ oid_to_hex(&j->item->object.oid));
+
+ strbuf_release(&ref_name);
+ unlink(git_path_cherry_pick_head(the_repository));
+ rollback_lock_file(&lock);
+
+ rollback_lock_file(&lock);
+ ret = run_command(&cmd);
+
+ /* force re-reading of the cache */
+ if (!ret && (discard_cache() < 0 || read_cache() < 0))
+ ret = error(_("could not read index"));
+ goto leave_merge;
+ }
+
+ merge_commit = to_merge->item;
write_message(oid_to_hex(&merge_commit->object.oid), GIT_SHA1_HEXSZ,
git_path_merge_head(the_repository), 0);
write_message("no-ff", 5, git_path_merge_mode(the_repository), 0);
leave_merge:
strbuf_release(&ref_name);
rollback_lock_file(&lock);
+ free_commit_list(to_merge);
return ret;
}
continue;
if (!get_oid(name, &oid)) {
- if (!lookup_commit_reference_gently(&oid, 1)) {
+ if (!lookup_commit_reference_gently(the_repository, &oid, 1)) {
enum object_type type = oid_object_info(the_repository,
&oid,
NULL);
if (prepare_revision_walk(opts->revs))
return error(_("revision walk setup failed"));
cmit = get_revision(opts->revs);
- if (!cmit || get_revision(opts->revs))
- return error("BUG: expected exactly one commit from walk");
+ if (!cmit)
+ return error(_("empty commit set passed"));
+ if (get_revision(opts->revs))
+ BUG("unexpected extra commit from walk");
return single_pick(cmit, opts);
}
*/
while ((commit = get_revision(revs))) {
struct commit_list *to_merge;
- int is_octopus;
const char *p1, *p2;
struct object_id *oid;
int is_empty;
continue;
}
- is_octopus = to_merge && to_merge->next;
-
- if (is_octopus)
- BUG("Octopus merges not yet supported");
-
/* Create a label */
strbuf_reset(&label);
if (skip_prefix(oneline.buf, "Merge ", &p1) &&
strbuf_addf(&buf, "%s -C %s",
cmd_merge, oid_to_hex(&commit->object.oid));
- /* label the tip of merged branch */
- oid = &to_merge->item->object.oid;
- strbuf_addch(&buf, ' ');
+ /* label the tips of merged branches */
+ for (; to_merge; to_merge = to_merge->next) {
+ oid = &to_merge->item->object.oid;
+ strbuf_addch(&buf, ' ');
+
+ if (!oidset_contains(&interesting, oid)) {
+ strbuf_addstr(&buf, label_oid(oid, NULL,
+ &state));
+ continue;
+ }
- if (!oidset_contains(&interesting, oid))
- strbuf_addstr(&buf, label_oid(oid, NULL, &state));
- else {
tips_tail = &commit_list_insert(to_merge->item,
tips_tail)->next;
entry = oidmap_get(&state.commit2label, &commit->object.oid);
if (entry)
- fprintf(out, "\n# Branch %s\n", entry->string);
+ fprintf(out, "\n%c Branch %s\n", comment_line_char, entry->string);
else
fprintf(out, "\n");
int flag, void *cb_data)
{
FILE *fp = cb_data;
- struct object *o = parse_object(oid);
+ struct object *o = parse_object(the_repository, oid);
if (!o)
return -1;
return -1;
if (o->type == OBJ_TAG) {
- o = deref_tag(o, path, 0);
+ o = deref_tag(the_repository, o, path, 0);
if (o)
if (fprintf(fp, "%s %s^{}\n",
oid_to_hex(&o->oid), path) < 0)
static void fill_sha1_path(struct strbuf *buf, const unsigned char *sha1)
{
int i;
- for (i = 0; i < 20; i++) {
+ for (i = 0; i < the_hash_algo->rawsz; i++) {
static char hex[] = "0123456789abcdef";
unsigned int val = sha1[i];
strbuf_addch(buf, hex[val >> 4]);
}
ref_length = strlen(ref_type);
- if (ref_length + GIT_SHA1_HEXSZ > isize ||
+ if (ref_length + the_hash_algo->hexsz > isize ||
memcmp(buffer, ref_type, ref_length) ||
get_oid_hex((char *) buffer + ref_length, &actual_oid)) {
free(buffer);
{
struct commit c;
memset(&c, 0, sizeof(c));
- if (parse_commit_buffer(&c, buf, size, 0))
+ if (parse_commit_buffer(the_repository, &c, buf, size, 0))
die("corrupt commit");
}
{
struct tag t;
memset(&t, 0, sizeof(t));
- if (parse_tag_buffer(&t, buf, size))
+ if (parse_tag_buffer(the_repository, &t, buf, size))
die("corrupt tag");
}
namelen = strlen(de->d_name);
strbuf_setlen(path, baselen);
strbuf_add(path, de->d_name, namelen);
- if (namelen == GIT_SHA1_HEXSZ - 2 &&
+ if (namelen == the_hash_algo->hexsz - 2 &&
!hex_to_bytes(oid.hash + 1, de->d_name,
- GIT_SHA1_RAWSZ - 1)) {
+ the_hash_algo->rawsz - 1)) {
if (obj_cb) {
r = obj_cb(&oid, path->buf, data);
if (r)
return 0;
/* We need to do this the hard way... */
- obj = deref_tag(parse_object(oid), NULL, 0);
+ obj = deref_tag(the_repository, parse_object(the_repository, oid),
+ NULL, 0);
if (obj && obj->type == OBJ_COMMIT)
return 1;
return 0;
return 0;
/* We need to do this the hard way... */
- obj = deref_tag(parse_object(oid), NULL, 0);
+ obj = deref_tag(the_repository, parse_object(the_repository, oid),
+ NULL, 0);
if (obj && (obj->type == OBJ_TREE || obj->type == OBJ_COMMIT))
return 1;
return 0;
{
int i;
- if (len < MINIMUM_ABBREV || len > GIT_SHA1_HEXSZ)
+ if (len < MINIMUM_ABBREV || len > the_hash_algo->hexsz)
return -1;
memset(ds, 0, sizeof(*ds));
type = oid_object_info(the_repository, oid, NULL);
if (type == OBJ_COMMIT) {
- struct commit *commit = lookup_commit(oid);
+ struct commit *commit = lookup_commit(the_repository, oid);
if (commit) {
struct pretty_print_context pp = {0};
pp.date_mode.type = DATE_SHORT;
format_commit_message(commit, " %ad - %s", &desc, &pp);
}
} else if (type == OBJ_TAG) {
- struct tag *tag = lookup_tag(oid);
+ struct tag *tag = lookup_tag(the_repository, oid);
if (!parse_tag(tag) && tag->tag)
strbuf_addf(&desc, " %s", tag->tag);
}
struct disambiguate_state ds;
struct min_abbrev_data mad;
struct object_id oid_ret;
+ const unsigned hexsz = the_hash_algo->hexsz;
+
if (len < 0) {
unsigned long count = approximate_object_count();
/*
}
oid_to_hex_r(hex, oid);
- if (len == GIT_SHA1_HEXSZ || !len)
- return GIT_SHA1_HEXSZ;
+ if (len == hexsz || !len)
+ return hexsz;
mad.init_len = len;
mad.cur_len = len;
int refs_found = 0;
int at, reflog_len, nth_prior = 0;
- if (len == GIT_SHA1_HEXSZ && !get_oid_hex(str, oid)) {
+ if (len == the_hash_algo->hexsz && !get_oid_hex(str, oid)) {
if (warn_ambiguous_refs && warn_on_object_refname_ambiguity) {
refs_found = dwim_ref(str, len, &tmp_oid, &real_ref);
if (refs_found > 0) {
int detached;
if (interpret_nth_prior_checkout(str, len, &buf) > 0) {
- detached = (buf.len == GIT_SHA1_HEXSZ && !get_oid_hex(buf.buf, oid));
+ detached = (buf.len == the_hash_algo->hexsz && !get_oid_hex(buf.buf, oid));
strbuf_release(&buf);
if (detached)
return 0;
if (ret)
return ret;
- commit = lookup_commit_reference(&oid);
+ commit = lookup_commit_reference(the_repository, &oid);
if (parse_commit(commit))
return -1;
if (!idx) {
ret = get_oid_1(name, len, &oid, GET_OID_COMMITTISH);
if (ret)
return ret;
- commit = lookup_commit_reference(&oid);
+ commit = lookup_commit_reference(the_repository, &oid);
if (!commit)
return -1;
if (name && !namelen)
namelen = strlen(name);
while (1) {
- if (!o || (!o->parsed && !parse_object(&o->oid)))
+ if (!o || (!o->parsed && !parse_object(the_repository, &o->oid)))
return NULL;
if (expected_type == OBJ_ANY || o->type == expected_type)
return o;
if (get_oid_1(name, sp - name - 2, &outer, lookup_flags))
return -1;
- o = parse_object(&outer);
+ o = parse_object(the_repository, &outer);
if (!o)
return -1;
if (!expected_type) {
- o = deref_tag(o, name, sp - name - 2);
- if (!o || (!o->parsed && !parse_object(&o->oid)))
+ o = deref_tag(the_repository, o, name, sp - name - 2);
+ if (!o || (!o->parsed && !parse_object(the_repository, &o->oid)))
return -1;
oidcpy(oid, &o->oid);
return 0;
int flag, void *cb_data)
{
struct commit_list **list = cb_data;
- struct object *object = parse_object(oid);
+ struct object *object = parse_object(the_repository, oid);
if (!object)
return 0;
if (object->type == OBJ_TAG) {
- object = deref_tag(object, path, strlen(path));
+ object = deref_tag(the_repository, object, path,
+ strlen(path));
if (!object)
return 0;
}
int matches;
commit = pop_most_recent_commit(&list, ONELINE_SEEN);
- if (!parse_object(&commit->object.oid))
+ if (!parse_object(the_repository, &commit->object.oid))
continue;
buf = get_commit_buffer(commit, NULL);
p = strstr(buf, "\n\n");
}
if (st)
return st;
- one = lookup_commit_reference_gently(&oid_tmp, 0);
+ one = lookup_commit_reference_gently(the_repository, &oid_tmp, 0);
if (!one)
return -1;
if (get_oid_committish(dots[3] ? (dots + 3) : "HEAD", &oid_tmp))
return -1;
- two = lookup_commit_reference_gently(&oid_tmp, 0);
+ two = lookup_commit_reference_gently(the_repository, &oid_tmp, 0);
if (!two)
return -1;
mbs = get_merge_bases(one, two);
struct commit_list *list = NULL;
for_each_ref(handle_one_ref, &list);
+ head_ref(handle_one_ref, &list);
commit_list_sort_by_date(&list);
return get_oid_oneline(name + 2, oid, list);
}
{
struct commit_graft *graft =
xmalloc(sizeof(struct commit_graft));
- struct commit *commit = lookup_commit(oid);
+ struct commit *commit = lookup_commit(the_repository, oid);
oidcpy(&graft->oid, oid);
graft->nr_parent = -1;
if (i < heads->nr) {
int **depth_slot;
commit = (struct commit *)
- deref_tag(heads->objects[i++].item, NULL, 0);
+ deref_tag(the_repository,
+ heads->objects[i++].item,
+ NULL, 0);
if (!commit || commit->object.type != OBJ_COMMIT) {
commit = NULL;
continue;
if (graft->nr_parent != -1)
return 0;
if (data->flags & SEEN_ONLY) {
- struct commit *c = lookup_commit(&graft->oid);
+ struct commit *c = lookup_commit(the_repository, &graft->oid);
if (!c || !(c->object.flags & SEEN)) {
if (data->flags & VERBOSE)
printf("Removing %s from .git/shallow\n",
struct commit_list *head = NULL;
int bitmap_nr = DIV_ROUND_UP(info->nr_bits, 32);
size_t bitmap_size = st_mult(sizeof(uint32_t), bitmap_nr);
- struct commit *c = lookup_commit_reference_gently(oid, 1);
+ struct commit *c = lookup_commit_reference_gently(the_repository, oid,
+ 1);
uint32_t *tmp; /* to be freed before return */
uint32_t *bitmap;
static int mark_uninteresting(const char *refname, const struct object_id *oid,
int flags, void *cb_data)
{
- struct commit *commit = lookup_commit_reference_gently(oid, 1);
+ struct commit *commit = lookup_commit_reference_gently(the_repository,
+ oid, 1);
if (!commit)
return 0;
commit->object.flags |= UNINTERESTING;
/* Mark potential bottoms so we won't go out of bound */
for (i = 0; i < nr_shallow; i++) {
- struct commit *c = lookup_commit(&oid[shallow[i]]);
+ struct commit *c = lookup_commit(the_repository,
+ &oid[shallow[i]]);
c->object.flags |= BOTTOM;
}
int bitmap_size = DIV_ROUND_UP(pi.nr_bits, 32) * sizeof(uint32_t);
memset(used, 0, sizeof(*used) * info->shallow->nr);
for (i = 0; i < nr_shallow; i++) {
- const struct commit *c = lookup_commit(&oid[shallow[i]]);
+ const struct commit *c = lookup_commit(the_repository,
+ &oid[shallow[i]]);
uint32_t **map = ref_bitmap_at(&pi.ref_bitmap, c);
if (*map)
used[shallow[i]] = xmemdupz(*map, bitmap_size);
{
struct commit_array *ca = cb_data;
ALLOC_GROW(ca->commits, ca->nr + 1, ca->alloc);
- ca->commits[ca->nr] = lookup_commit_reference_gently(oid, 1);
+ ca->commits[ca->nr] = lookup_commit_reference_gently(the_repository,
+ oid, 1);
if (ca->commits[ca->nr])
ca->nr++;
return 0;
for (i = dst = 0; i < info->nr_theirs; i++) {
if (i != dst)
info->theirs[dst] = info->theirs[i];
- c = lookup_commit(&oid[info->theirs[i]]);
+ c = lookup_commit(the_repository, &oid[info->theirs[i]]);
bitmap = ref_bitmap_at(ref_bitmap, c);
if (!*bitmap)
continue;
for (i = dst = 0; i < info->nr_ours; i++) {
if (i != dst)
info->ours[dst] = info->ours[i];
- c = lookup_commit(&oid[info->ours[i]]);
+ c = lookup_commit(the_repository, &oid[info->ours[i]]);
bitmap = ref_bitmap_at(ref_bitmap, c);
if (!*bitmap)
continue;
int delayed_reachability_test(struct shallow_info *si, int c)
{
if (si->need_reachability_test[c]) {
- struct commit *commit = lookup_commit(&si->shallow->oid[c]);
+ struct commit *commit = lookup_commit(the_repository,
+ &si->shallow->oid[c]);
if (!si->commits) {
struct commit_array ca;
int i;
/*
- * do not delete old si->base, its index entries may be shared
- * with istate->cache[]. Accept a bit of leaking here because
- * this code is only used by short-lived update-index.
+ * If there was a previous base index, then transfer ownership of allocated
+ * entries to the parent index.
*/
+ if (si->base &&
+ si->base->ce_mem_pool) {
+
+ if (!istate->ce_mem_pool)
+ mem_pool_init(&istate->ce_mem_pool, 0);
+
+ mem_pool_combine(istate->ce_mem_pool, istate->split_index->base->ce_mem_pool);
+ }
+
si->base = xcalloc(1, sizeof(*si->base));
si->base->version = istate->version;
/* zero timestamp disables racy test in ce_write_index() */
si->base->timestamp = istate->timestamp;
ALLOC_GROW(si->base->cache, istate->cache_nr, si->base->cache_alloc);
si->base->cache_nr = istate->cache_nr;
+
+ /*
+ * The mem_pool needs to move with the allocated entries.
+ */
+ si->base->ce_mem_pool = istate->ce_mem_pool;
+ istate->ce_mem_pool = NULL;
+
COPY_ARRAY(si->base->cache, istate->cache, istate->cache_nr);
mark_base_index_entries(si->base);
for (i = 0; i < si->base->cache_nr; i++)
src->ce_flags |= CE_UPDATE_IN_BASE;
src->ce_namelen = dst->ce_namelen;
copy_cache_entry(dst, src);
- free(src);
+ discard_cache_entry(src);
si->nr_replacements++;
}
base->ce_flags = base_flags;
if (ret)
ce->ce_flags |= CE_UPDATE_IN_BASE;
- free(base);
+ discard_cache_entry(base);
si->base->cache[ce->index - 1] = ce;
}
for (i = 0; i < si->base->cache_nr; i++) {
ce == istate->split_index->base->cache[ce->index - 1])
ce->ce_flags |= CE_REMOVE;
else
- free(ce);
+ discard_cache_entry(ce);
}
void replace_index_entry_in_base(struct index_state *istate,
old_entry->index <= istate->split_index->base->cache_nr) {
new_entry->index = old_entry->index;
if (old_entry != istate->split_index->base->cache[new_entry->index - 1])
- free(istate->split_index->base->cache[new_entry->index - 1]);
+ discard_cache_entry(istate->split_index->base->cache[new_entry->index - 1]);
istate->split_index->base->cache[new_entry->index - 1] = new_entry;
}
}
{
if (istate->split_index) {
/*
- * can't discard_split_index(&the_index); because that
- * will destroy split_index->base->cache[], which may
- * be shared with the_index.cache[]. So yeah we're
- * leaking a bit here.
+ * When removing the split index, we need to move
+ * ownership of the mem_pool associated with the
+ * base index to the main index. There may be cache entries
+ * allocated from the base's memory pool that are shared with
+ * the_index.cache[].
*/
- istate->split_index = NULL;
+ mem_pool_combine(istate->ce_mem_pool, istate->split_index->base->ce_mem_pool);
+
+ /*
+ * The split index no longer owns the mem_pool backing
+ * its cache array. As we are discarding this index,
+ * mark the index as having no cache entries, so it
+ * will not attempt to clean up the cache entries or
+ * validate them.
+ */
+ if (istate->split_index->base)
+ istate->split_index->base->cache_nr = 0;
+
+ /*
+ * We can discard the split index because its
+ * memory pool has been incorporated into the
+ * memory pool associated with the the_index.
+ */
+ discard_split_index(istate);
+
istate->cache_changed |= SOMETHING_CHANGED;
}
}
sb->buf[sb->len] = '\0';
}
+void strbuf_trim_trailing_newline(struct strbuf *sb)
+{
+ if (sb->len > 0 && sb->buf[sb->len - 1] == '\n') {
+ if (--sb->len > 0 && sb->buf[sb->len - 1] == '\r')
+ --sb->len;
+ sb->buf[sb->len] = '\0';
+ }
+}
+
void strbuf_ltrim(struct strbuf *sb)
{
char *b = sb->buf;
int abbrev_len)
{
int r;
- strbuf_grow(sb, GIT_SHA1_HEXSZ + 1);
+ strbuf_grow(sb, GIT_MAX_HEXSZ + 1);
r = find_unique_abbrev_r(sb->buf + sb->len, oid, abbrev_len);
strbuf_setlen(sb, sb->len + r);
}
/* Strip trailing directory separators */
extern void strbuf_trim_trailing_dir_sep(struct strbuf *);
+/* Strip trailing LF or CR/LF */
+extern void strbuf_trim_trailing_newline(struct strbuf *sb);
+
/**
* Replace the contents of the strbuf with a reencoded form. Returns -1
* on error, 0 on success.
list->strdup_strings ? xstrdup(string) : (char *)string);
}
+/*
+ * Encapsulate the compare function pointer because ISO C99 forbids
+ * casting from void * to a function pointer and vice versa.
+ */
+struct string_list_sort_ctx
+{
+ compare_strings_fn cmp;
+};
+
static int cmp_items(const void *a, const void *b, void *ctx)
{
- compare_strings_fn cmp = ctx;
+ struct string_list_sort_ctx *sort_ctx = ctx;
const struct string_list_item *one = a;
const struct string_list_item *two = b;
- return cmp(one->string, two->string);
+ return sort_ctx->cmp(one->string, two->string);
}
void string_list_sort(struct string_list *list)
{
- QSORT_S(list->items, list->nr, cmp_items,
- list->cmp ? list->cmp : strcmp);
+ struct string_list_sort_ctx sort_ctx = {list->cmp ? list->cmp : strcmp};
+
+ QSORT_S(list->items, list->nr, cmp_items, &sort_ctx);
}
struct string_list_item *unsorted_string_list_lookup(struct string_list *list,
parameter.gitmodules_oid = &oid;
parameter.overwrite = 0;
git_config_from_mem(parse_config, CONFIG_ORIGIN_SUBMODULE_BLOB, rev.buf,
- config, config_size, ¶meter);
+ config, config_size, ¶meter, NULL);
strbuf_release(&rev);
free(config);
* Attempt to lookup the commit references, and determine if this is
* a fast forward or fast backwards update.
*/
- *left = lookup_commit_reference(one);
- *right = lookup_commit_reference(two);
+ *left = lookup_commit_reference(the_repository, one);
+ *right = lookup_commit_reference(the_repository, two);
/*
* Warn about missing commits in the submodule project, but only if
argv_array_push(&cp.args, new_head ? new_head : empty_tree_oid_hex());
if (run_command(&cp)) {
- ret = -1;
+ ret = error(_("Submodule '%s' could not be updated."), path);
goto out;
}
/trash directory*
/test-results
/.prove
+/chainlinttmp
ifdef TEST_OUTPUT_DIRECTORY
TEST_RESULTS_DIRECTORY = $(TEST_OUTPUT_DIRECTORY)/test-results
+CHAINLINTTMP = $(TEST_OUTPUT_DIRECTORY)/chainlinttmp
else
TEST_RESULTS_DIRECTORY = test-results
+CHAINLINTTMP = chainlinttmp
endif
# Shell quote;
TEST_SHELL_PATH_SQ = $(subst ','\'',$(TEST_SHELL_PATH))
PERL_PATH_SQ = $(subst ','\'',$(PERL_PATH))
TEST_RESULTS_DIRECTORY_SQ = $(subst ','\'',$(TEST_RESULTS_DIRECTORY))
+CHAINLINTTMP_SQ = $(subst ','\'',$(CHAINLINTTMP))
T = $(sort $(wildcard t[0-9][0-9][0-9][0-9]-*.sh))
TGITWEB = $(sort $(wildcard t95[0-9][0-9]-*.sh))
THELPERS = $(sort $(filter-out $(T),$(wildcard *.sh)))
+CHAINLINTTESTS = $(sort $(patsubst chainlint/%.test,%,$(wildcard chainlint/*.test)))
+CHAINLINT = sed -f chainlint.sed
all: $(DEFAULT_TEST_TARGET)
-test: pre-clean $(TEST_LINT)
+test: pre-clean check-chainlint $(TEST_LINT)
$(MAKE) aggregate-results-and-cleanup
failed:
sed -n 's/\.counts$$/.sh/p') && \
test -z "$$failed" || $(MAKE) $$failed
-prove: pre-clean $(TEST_LINT)
+prove: pre-clean check-chainlint $(TEST_LINT)
@echo "*** prove ***"; $(PROVE) --exec '$(TEST_SHELL_PATH_SQ)' $(GIT_PROVE_OPTS) $(T) :: $(GIT_TEST_OPTS)
$(MAKE) clean-except-prove-cache
pre-clean:
$(RM) -r '$(TEST_RESULTS_DIRECTORY_SQ)'
-clean-except-prove-cache:
+clean-except-prove-cache: clean-chainlint
$(RM) -r 'trash directory'.* '$(TEST_RESULTS_DIRECTORY_SQ)'
$(RM) -r valgrind/bin
clean: clean-except-prove-cache
$(RM) .prove
+clean-chainlint:
+ $(RM) -r '$(CHAINLINTTMP_SQ)'
+
+check-chainlint:
+ @mkdir -p '$(CHAINLINTTMP_SQ)' && \
+ err=0 && \
+ for i in $(CHAINLINTTESTS); do \
+ $(CHAINLINT) <chainlint/$$i.test | \
+ sed -e '/^# LINT: /d' >'$(CHAINLINTTMP_SQ)'/$$i.actual && \
+ diff -u chainlint/$$i.expect '$(CHAINLINTTMP_SQ)'/$$i.actual || err=1; \
+ done && exit $$err
+
test-lint: test-lint-duplicates test-lint-executable test-lint-shell-syntax \
test-lint-filenames
perf:
$(MAKE) -C perf/ all
-.PHONY: pre-clean $(T) aggregate-results clean valgrind perf
+.PHONY: pre-clean $(T) aggregate-results clean valgrind perf check-chainlint clean-chainlint
test_expect_success 'blame -L ,Y (Y == nlines + 1)' '
n=$(expr $(wc -l <file) + 2) &&
- test_must_fail $PROG -L,$n file
+ check_count -L,$n A 1 B 1 B1 1 B2 1 "A U Thor" 1 C 1 D 1 E 1
'
test_expect_success 'blame -L ,Y (Y > nlines)' '
- test_must_fail $PROG -L,12345 file
+ check_count -L,12345 A 1 B 1 B1 1 B2 1 "A U Thor" 1 C 1 D 1 E 1
'
test_expect_success 'blame -L multiple (disjoint)' '
--- /dev/null
+#------------------------------------------------------------------------------
+# Detect broken &&-chains in tests.
+#
+# At present, only &&-chains in subshells are examined by this linter;
+# top-level &&-chains are instead checked directly by the test framework. Like
+# the top-level &&-chain linter, the subshell linter (intentionally) does not
+# check &&-chains within {...} blocks.
+#
+# Checking for &&-chain breakage is done line-by-line by pure textual
+# inspection.
+#
+# Incomplete lines (those ending with "\") are stitched together with following
+# lines to simplify processing, particularly of "one-liner" statements.
+# Top-level here-docs are swallowed to avoid false positives within the
+# here-doc body, although the statement to which the here-doc is attached is
+# retained.
+#
+# Heuristics are used to detect end-of-subshell when the closing ")" is cuddled
+# with the final subshell statement on the same line:
+#
+# (cd foo &&
+# bar)
+#
+# in order to avoid misinterpreting the ")" in constructs such as "x=$(...)"
+# and "case $x in *)" as ending the subshell.
+#
+# Lines missing a final "&&" are flagged with "?!AMP?!", and lines which chain
+# commands with ";" internally rather than "&&" are flagged "?!SEMI?!". A line
+# may be flagged for both violations.
+#
+# Detection of a missing &&-link in a multi-line subshell is complicated by the
+# fact that the last statement before the closing ")" must not end with "&&".
+# Since processing is line-by-line, it is not known whether a missing "&&" is
+# legitimate or not until the _next_ line is seen. To accommodate this, within
+# multi-line subshells, each line is stored in sed's "hold" area until after
+# the next line is seen and processed. If the next line is a stand-alone ")",
+# then a missing "&&" on the previous line is legitimate; otherwise a missing
+# "&&" is a break in the &&-chain.
+#
+# (
+# cd foo &&
+# bar
+# )
+#
+# In practical terms, when "bar" is encountered, it is flagged with "?!AMP?!",
+# but when the stand-alone ")" line is seen which closes the subshell, the
+# "?!AMP?!" violation is removed from the "bar" line (retrieved from the "hold"
+# area) since the final statement of a subshell must not end with "&&". The
+# final line of a subshell may still break the &&-chain by using ";" internally
+# to chain commands together rather than "&&", so "?!SEMI?!" is never removed
+# from a line (even though "?!AMP?!" might be).
+#
+# Care is taken to recognize the last _statement_ of a multi-line subshell, not
+# necessarily the last textual _line_ within the subshell, since &&-chaining
+# applies to statements, not to lines. Consequently, blank lines, comment
+# lines, and here-docs are swallowed (but not the command to which the here-doc
+# is attached), leaving the last statement in the "hold" area, not the last
+# line, thus simplifying &&-link checking.
+#
+# The final statement before "done" in for- and while-loops, and before "elif",
+# "else", and "fi" in if-then-else likewise must not end with "&&", thus
+# receives similar treatment.
+#
+# To facilitate regression testing (and manual debugging), a ">" annotation is
+# applied to the line containing ")" which closes a subshell, ">>" to a line
+# closing a nested subshell, and ">>>" to a line closing both at once. This
+# makes it easy to detect whether the heuristics correctly identify
+# end-of-subshell.
+#------------------------------------------------------------------------------
+
+# incomplete line -- slurp up next line
+:squash
+/\\$/ {
+ N
+ s/\\\n//
+ bsquash
+}
+
+# here-doc -- swallow it to avoid false hits within its body (but keep the
+# command to which it was attached)
+/<<[ ]*[-\\]*EOF[ ]*/ {
+ s/[ ]*<<[ ]*[-\\]*EOF//
+ h
+ :hereslurp
+ N
+ s/.*\n//
+ /^[ ]*EOF[ ]*$/!bhereslurp
+ x
+}
+
+# one-liner "(...) &&"
+/^[ ]*!*[ ]*(..*)[ ]*&&[ ]*$/boneline
+
+# same as above but without trailing "&&"
+/^[ ]*!*[ ]*(..*)[ ]*$/boneline
+
+# one-liner "(...) >x" (or "2>x" or "<x" or "|x" or "&"
+/^[ ]*!*[ ]*(..*)[ ]*[0-9]*[<>|&]/boneline
+
+# multi-line "(...\n...)"
+/^[ ]*(/bsubshell
+
+# innocuous line -- print it and advance to next line
+b
+
+# found one-liner "(...)" -- mark suspect if it uses ";" internally rather than
+# "&&" (but not ";" in a string)
+:oneline
+/;/{
+ /"[^"]*;[^"]*"/!s/^/?!SEMI?!/
+}
+b
+
+:subshell
+# bare "(" line?
+/^[ ]*([ ]*$/ {
+ # stash for later printing
+ h
+ bnextline
+}
+# "(..." line -- split off and stash "(", then process "..." as its own line
+x
+s/.*/(/
+x
+s/(//
+bslurp
+
+:nextline
+N
+s/.*\n//
+
+:slurp
+# incomplete line "...\"
+/\\$/bincomplete
+# multi-line quoted string "...\n..."
+/^[^"]*"[^"]*$/bdqstring
+# multi-line quoted string '...\n...' (but not contraction in string "it's so")
+/^[^']*'[^']*$/{
+ /"[^'"]*'[^'"]*"/!bsqstring
+}
+# here-doc -- swallow it
+/<<[ ]*[-\\]*EOF/bheredoc
+/<<[ ]*[-\\]*EOT/bheredoc
+/<<[ ]*[-\\]*INPUT_END/bheredoc
+# comment or empty line -- discard since final non-comment, non-empty line
+# before closing ")", "done", "elsif", "else", or "fi" will need to be
+# re-visited to drop "suspect" marking since final line of those constructs
+# legitimately lacks "&&", so "suspect" mark must be removed
+/^[ ]*#/bnextline
+/^[ ]*$/bnextline
+# in-line comment -- strip it (but not "#" in a string, Bash ${#...} array
+# length, or Perforce "//depot/path#42" revision in filespec)
+/[ ]#/{
+ /"[^"]*#[^"]*"/!s/[ ]#.*$//
+}
+# one-liner "case ... esac"
+/^[ ]*case[ ]*..*esac/bcheckchain
+# multi-line "case ... esac"
+/^[ ]*case[ ]..*[ ]in/bcase
+# multi-line "for ... done" or "while ... done"
+/^[ ]*for[ ]..*[ ]in/bcontinue
+/^[ ]*while[ ]/bcontinue
+/^[ ]*do[ ]/bcontinue
+/^[ ]*do[ ]*$/bcontinue
+/;[ ]*do/bcontinue
+/^[ ]*done[ ]*&&[ ]*$/bdone
+/^[ ]*done[ ]*$/bdone
+/^[ ]*done[ ]*[<>|]/bdone
+/^[ ]*done[ ]*)/bdone
+/||[ ]*exit[ ]/bcontinue
+/||[ ]*exit[ ]*$/bcontinue
+# multi-line "if...elsif...else...fi"
+/^[ ]*if[ ]/bcontinue
+/^[ ]*then[ ]/bcontinue
+/^[ ]*then[ ]*$/bcontinue
+/;[ ]*then/bcontinue
+/^[ ]*elif[ ]/belse
+/^[ ]*elif[ ]*$/belse
+/^[ ]*else[ ]/belse
+/^[ ]*else[ ]*$/belse
+/^[ ]*fi[ ]*&&[ ]*$/bdone
+/^[ ]*fi[ ]*$/bdone
+/^[ ]*fi[ ]*[<>|]/bdone
+/^[ ]*fi[ ]*)/bdone
+# nested one-liner "(...) &&"
+/^[ ]*(.*)[ ]*&&[ ]*$/bcheckchain
+# nested one-liner "(...)"
+/^[ ]*(.*)[ ]*$/bcheckchain
+# nested one-liner "(...) >x" (or "2>x" or "<x" or "|x")
+/^[ ]*(.*)[ ]*[0-9]*[<>|]/bcheckchain
+# nested multi-line "(...\n...)"
+/^[ ]*(/bnest
+# multi-line "{...\n...}"
+/^[ ]*{/bblock
+# closing ")" on own line -- exit subshell
+/^[ ]*)/bclosesolo
+# "$((...))" -- arithmetic expansion; not closing ")"
+/\$(([^)][^)]*))[^)]*$/bcheckchain
+# "$(...)" -- command substitution; not closing ")"
+/\$([^)][^)]*)[^)]*$/bcheckchain
+# multi-line "$(...\n...)" -- command substitution; treat as nested subshell
+/\$([ ]*$/bnest
+# "=(...)" -- Bash array assignment; not closing ")"
+/=(/bcheckchain
+# closing "...) &&"
+/)[ ]*&&[ ]*$/bclose
+# closing "...)"
+/)[ ]*$/bclose
+# closing "...) >x" (or "2>x" or "<x" or "|x")
+/)[ ]*[<>|]/bclose
+:checkchain
+# mark suspect if line uses ";" internally rather than "&&" (but not ";" in a
+# string and not ";;" in one-liner "case...esac")
+/;/{
+ /;;/!{
+ /"[^"]*;[^"]*"/!s/^/?!SEMI?!/
+ }
+}
+# line ends with pipe "...|" -- valid; not missing "&&"
+/|[ ]*$/bcontinue
+# missing end-of-line "&&" -- mark suspect
+/&&[ ]*$/!s/^/?!AMP?!/
+:continue
+# retrieve and print previous line
+x
+n
+bslurp
+
+# found incomplete line "...\" -- slurp up next line
+:incomplete
+N
+s/\\\n//
+bslurp
+
+# found multi-line double-quoted string "...\n..." -- slurp until end of string
+:dqstring
+s/"//g
+N
+s/\n//
+/"/!bdqstring
+bcheckchain
+
+# found multi-line single-quoted string '...\n...' -- slurp until end of string
+:sqstring
+s/'//g
+N
+s/\n//
+/'/!bsqstring
+bcheckchain
+
+# found here-doc -- swallow it to avoid false hits within its body (but keep
+# the command to which it was attached); take care to handle here-docs nested
+# within here-docs by only recognizing closing tag matching outer here-doc
+# opening tag
+:heredoc
+/EOF/{ s/[ ]*<<[ ]*[-\\]*EOF//; s/^/EOF/; }
+/EOT/{ s/[ ]*<<[ ]*[-\\]*EOT//; s/^/EOT/; }
+/INPUT_END/{ s/[ ]*<<[ ]*[-\\]*INPUT_END//; s/^/INPUT_END/; }
+:hereslurpsub
+N
+/^EOF.*\n[ ]*EOF[ ]*$/bhereclose
+/^EOT.*\n[ ]*EOT[ ]*$/bhereclose
+/^INPUT_END.*\n[ ]*INPUT_END[ ]*$/bhereclose
+bhereslurpsub
+:hereclose
+s/^EOF//
+s/^EOT//
+s/^INPUT_END//
+s/\n.*$//
+bcheckchain
+
+# found "case ... in" -- pass through untouched
+:case
+x
+n
+/^[ ]*esac/bslurp
+bcase
+
+# found "else" or "elif" -- drop "suspect" from final line before "else" since
+# that line legitimately lacks "&&"
+:else
+x
+s/?!AMP?!//
+x
+bcontinue
+
+# found "done" closing for-loop or while-loop, or "fi" closing if-then -- drop
+# "suspect" from final contained line since that line legitimately lacks "&&"
+:done
+x
+s/?!AMP?!//
+x
+# is 'done' or 'fi' cuddled with ")" to close subshell?
+/done.*)/bclose
+/fi.*)/bclose
+bcheckchain
+
+# found nested multi-line "(...\n...)" -- pass through untouched
+:nest
+x
+:nestslurp
+n
+# closing ")" on own line -- stop nested slurp
+/^[ ]*)/bnestclose
+# comment -- not closing ")" if in comment
+/^[ ]*#/bnestcontinue
+# "$((...))" -- arithmetic expansion; not closing ")"
+/\$(([^)][^)]*))[^)]*$/bnestcontinue
+# "$(...)" -- command substitution; not closing ")"
+/\$([^)][^)]*)[^)]*$/bnestcontinue
+# closing "...)" -- stop nested slurp
+/)/bnestclose
+:nestcontinue
+x
+bnestslurp
+:nestclose
+s/^/>>/
+# is it "))" which closes nested and parent subshells?
+/)[ ]*)/bslurp
+bcheckchain
+
+# found multi-line "{...\n...}" block -- pass through untouched
+:block
+x
+n
+# closing "}" -- stop block slurp
+/}/bcheckchain
+bblock
+
+# found closing ")" on own line -- drop "suspect" from final line of subshell
+# since that line legitimately lacks "&&" and exit subshell loop
+:closesolo
+x
+s/?!AMP?!//
+p
+x
+s/^/>/
+b
+
+# found closing "...)" -- exit subshell loop
+:close
+x
+p
+x
+s/^/>/
+b
--- /dev/null
+(
+ foo &&
+ bar=$((42 + 1)) &&
+ baz
+>) &&
+(
+?!AMP?! bar=$((42 + 1))
+ baz
+>)
--- /dev/null
+(
+ foo &&
+# LINT: closing ")" of $((...)) not misinterpreted as subshell-closing ")"
+ bar=$((42 + 1)) &&
+ baz
+) &&
+(
+# LINT: missing "&&" on $((...))
+ bar=$((42 + 1))
+ baz
+)
--- /dev/null
+(
+ foo &&
+ bar=(gumbo stumbo wumbo) &&
+ baz
+>) &&
+(
+ foo &&
+ bar=${#bar[@]} &&
+ baz
+>)
--- /dev/null
+(
+ foo &&
+# LINT: ")" in Bash array assignment not misinterpreted as subshell-closing ")"
+ bar=(gumbo stumbo wumbo) &&
+ baz
+) &&
+(
+ foo &&
+# LINT: Bash array length operator not misinterpreted as comment
+ bar=${#bar[@]} &&
+ baz
+)
--- /dev/null
+(
+ nothing &&
+ something
+>)
--- /dev/null
+(
+
+ nothing &&
+
+ something
+# LINT: swallow blank lines since final _statement_ before subshell end is
+# LINT: significant to "&&"-check, not final _line_ (which might be blank)
+
+
+)
--- /dev/null
+(
+ foo &&
+ {
+ echo a
+ echo b
+ } &&
+ bar &&
+ {
+ echo c
+?!AMP?! }
+ baz
+>)
--- /dev/null
+(
+# LINT: missing "&&" in block not currently detected (for consistency with
+# LINT: --chain-lint at top level and to provide escape hatch if needed)
+ foo &&
+ {
+ echo a
+ echo b
+ } &&
+ bar &&
+# LINT: missing "&&" at closing "}"
+ {
+ echo c
+ }
+ baz
+)
--- /dev/null
+(
+ foo &&
+?!AMP?! bar
+ baz &&
+ wop
+>)
--- /dev/null
+(
+ foo &&
+# LINT: missing "&&" from 'bar'
+ bar
+ baz &&
+# LINT: final statement before closing ")" legitimately lacks "&&"
+ wop
+)
--- /dev/null
+(
+ case "$x" in
+ x) foo ;;
+ *) bar ;;
+ esac &&
+ foobar
+>) &&
+(
+ case "$x" in
+ x) foo ;;
+ *) bar ;;
+?!AMP?! esac
+ foobar
+>) &&
+(
+ case "$x" in 1) true;; esac &&
+?!AMP?! case "$y" in 2) false;; esac
+ foobar
+>)
--- /dev/null
+(
+# LINT: "...)" arms in 'case' not misinterpreted as subshell-closing ")"
+ case "$x" in
+ x) foo ;;
+ *) bar ;;
+ esac &&
+ foobar
+) &&
+(
+# LINT: missing "&&" on 'esac'
+ case "$x" in
+ x) foo ;;
+ *) bar ;;
+ esac
+ foobar
+) &&
+(
+# LINT: "...)" arm in one-liner 'case' not misinterpreted as closing ")"
+ case "$x" in 1) true;; esac &&
+# LINT: same but missing "&&"
+ case "$y" in 2) false;; esac
+ foobar
+)
--- /dev/null
+(
+cd foo &&
+ (bar &&
+>>> baz))
--- /dev/null
+(cd foo &&
+ (bar &&
+ baz))
--- /dev/null
+(
+ foo
+>) &&
+(
+ bar
+>) >out &&
+(
+ baz
+>) 2>err &&
+(
+ boo
+>) <input &&
+(
+ bip
+>) | wuzzle &&
+(
+ bop
+>) | fazz fozz &&
+(
+ bup
+>) |
+fuzzle &&
+(
+ yop
+>)
--- /dev/null
+# LINT: closing ")" with various decorations ("&&", ">", "|", etc.)
+(
+ foo
+) &&
+(
+ bar
+) >out &&
+(
+ baz
+) 2>err &&
+(
+ boo
+) <input &&
+(
+ bip
+) | wuzzle &&
+(
+ bop
+) | fazz \
+ fozz &&
+(
+ bup
+) |
+fuzzle &&
+(
+ yop
+)
--- /dev/null
+(
+ foo &&
+ bar=$(gobble) &&
+ baz
+>) &&
+(
+?!AMP?! bar=$(gobble blocks)
+ baz
+>)
--- /dev/null
+(
+ foo &&
+# LINT: closing ")" of $(...) not misinterpreted as subshell-closing ")"
+ bar=$(gobble) &&
+ baz
+) &&
+(
+# LINT: missing "&&" on $(...)
+ bar=$(gobble blocks)
+ baz
+)
--- /dev/null
+(
+ nothing &&
+ something
+>)
--- /dev/null
+(
+# LINT: swallow comment lines
+ # comment 1
+ nothing &&
+ # comment 2
+ something
+# LINT: swallow comment lines since final _statement_ before subshell end is
+# LINT: significant to "&&"-check, not final _line_ (which might be comment)
+ # comment 3
+ # comment 4
+)
--- /dev/null
+(
+for i in a b c; do
+ if test "$(echo $(waffle bat))" = "eleventeen" &&
+ test "$x" = "$y"; then
+ :
+ else
+ echo >file
+ fi
+> done) &&
+test ! -f file
--- /dev/null
+# LINT: 'for' loop cuddled with "(" and ")" and nested 'if' with complex
+# LINT: multi-line condition; indented with spaces, not tabs
+(for i in a b c; do
+ if test "$(echo $(waffle bat))" = "eleventeen" &&
+ test "$x" = "$y"; then
+ :
+ else
+ echo >file
+ fi
+ done) &&
+test ! -f file
--- /dev/null
+(
+if test -z ""; then
+ echo empty
+ else
+ echo bizzy
+> fi) &&
+echo foobar
--- /dev/null
+# LINT: 'if' cuddled with "(" and ")"; indented with spaces, not tabs
+(if test -z ""; then
+ echo empty
+ else
+ echo bizzy
+ fi) &&
+echo foobar
--- /dev/null
+(
+ while read x
+ do foobar bop || exit 1
+> done <file ) &&
+outside subshell
--- /dev/null
+# LINT: 'while' loop cuddled with "(" and ")", with embedded (allowed)
+# LINT: "|| exit {n}" to exit loop early, and using redirection "<" to feed
+# LINT: loop; indented with spaces, not tabs
+( while read x
+ do foobar bop || exit 1
+ done <file ) &&
+outside subshell
--- /dev/null
+(
+cd foo &&
+ bar
+>) &&
+
+(
+?!AMP?!cd foo
+ bar
+>) &&
+
+(
+ cd foo &&
+> bar) &&
+
+(
+cd foo &&
+> bar) &&
+
+(
+?!AMP?!cd foo
+> bar)
--- /dev/null
+# LINT: first subshell statement cuddled with opening "("; for implementation
+# LINT: simplicity, "(..." is split into two lines, "(" and "..."
+(cd foo &&
+ bar
+) &&
+
+# LINT: same with missing "&&"
+(cd foo
+ bar
+) &&
+
+# LINT: closing ")" cuddled with final subshell statement
+(
+ cd foo &&
+ bar) &&
+
+# LINT: "(" and ")" cuddled with first and final subshell statements
+(cd foo &&
+ bar) &&
+
+# LINT: same with missing "&&"
+(cd foo
+ bar)
--- /dev/null
+(
+ for i in a b c
+ do
+ foo || exit 1
+ bar &&
+ baz
+ done
+>) &&
+(
+ while true
+ do
+ foo || exit 1
+ bar &&
+ baz
+ done
+>) &&
+(
+ i=0 &&
+ while test $i -lt 10
+ do
+ echo $i || exit
+ i=$(($i + 1))
+ done
+>)
--- /dev/null
+(
+ for i in a b c
+ do
+# LINT: "|| exit {n}" valid for-loop escape in subshell; no "&&" needed
+ foo || exit 1
+ bar &&
+ baz
+ done
+) &&
+(
+ while true
+ do
+# LINT: "|| exit {n}" valid while-loop escape in subshell; no "&&" needed
+ foo || exit 1
+ bar &&
+ baz
+ done
+) &&
+(
+ i=0 &&
+ while test $i -lt 10
+ do
+# LINT: "|| exit" (sans exit code) valid escape in subshell; no "&&" needed
+ echo $i || exit
+ i=$(($i + 1))
+ done
+)
--- /dev/null
+(
+ foo || exit 1
+ bar &&
+ baz
+>)
--- /dev/null
+(
+# LINT: "|| exit {n}" valid subshell escape without hurting &&-chain
+ foo || exit 1
+ bar &&
+ baz
+)
--- /dev/null
+(
+ for i in a b c
+ do
+?!AMP?! echo $i
+ cat
+?!AMP?! done
+ for i in a b c; do
+ echo $i &&
+ cat $i
+ done
+>)
--- /dev/null
+(
+# LINT: 'for', 'do', 'done' do not need "&&"
+ for i in a b c
+ do
+# LINT: missing "&&" on 'echo'
+ echo $i
+# LINT: last statement of while does not need "&&"
+ cat <<-\EOF
+ bar
+ EOF
+# LINT: missing "&&" on 'done'
+ done
+
+# LINT: 'do' on same line as 'for'
+ for i in a b c; do
+ echo $i &&
+ cat $i
+ done
+)
--- /dev/null
+boodle wobba gorgo snoot wafta snurb &&
+
+horticulture
--- /dev/null
+# LINT: stitch together incomplete \-ending lines
+# LINT: swallow here-doc to avoid false positives in content
+boodle wobba \
+ gorgo snoot \
+ wafta snurb <<EOF &&
+quoth the raven,
+nevermore...
+EOF
+
+# LINT: swallow here-doc (EOF is last line of test)
+horticulture <<\EOF
+gomez
+morticia
+wednesday
+pugsly
+EOF
--- /dev/null
+(
+ for i in a b c
+ do
+ if false
+ then
+?!AMP?! echo "err"
+ exit 1
+?!AMP?! fi
+ foo
+?!AMP?! done
+ bar
+>)
--- /dev/null
+(
+ for i in a b c
+ do
+ if false
+ then
+# LINT: missing "&&" on 'echo'
+ echo "err"
+ exit 1
+# LINT: missing "&&" on 'fi'
+ fi
+ foo
+# LINT: missing "&&" on 'done'
+ done
+ bar
+)
--- /dev/null
+(
+ if test -n ""
+ then
+?!AMP?! echo very
+ echo empty
+ elif test -z ""
+ echo foo
+ else
+ echo foo &&
+ cat
+?!AMP?! fi
+ echo poodle
+>) &&
+(
+ if test -n ""; then
+ echo very &&
+?!AMP?! echo empty
+ if
+>)
--- /dev/null
+(
+# LINT: 'if', 'then', 'elif', 'else', 'fi' do not need "&&"
+ if test -n ""
+ then
+# LINT: missing "&&" on 'echo'
+ echo very
+# LINT: last statement before 'elif' does not need "&&"
+ echo empty
+ elif test -z ""
+# LINT: last statement before 'else' does not need "&&"
+ echo foo
+ else
+ echo foo &&
+# LINT: last statement before 'fi' does not need "&&"
+ cat <<-\EOF
+ bar
+ EOF
+# LINT: missing "&&" on 'fi'
+ fi
+ echo poodle
+) &&
+(
+# LINT: 'then' on same line as 'if'
+ if test -n ""; then
+ echo very &&
+ echo empty
+ if
+)
--- /dev/null
+line 1 line 2 line 3 line 4 &&
+(
+ line 5 line 6 line 7 line 8
+>)
--- /dev/null
+# LINT: stitch together all incomplete \-ending lines
+line 1 \
+line 2 \
+line 3 \
+line 4 &&
+(
+# LINT: stitch together all incomplete \-ending lines (subshell)
+ line 5 \
+ line 6 \
+ line 7 \
+ line 8
+)
--- /dev/null
+(
+ foobar &&
+?!AMP?! barfoo
+ flibble "not a # comment"
+>) &&
+
+(
+cd foo &&
+> flibble "not a # comment")
--- /dev/null
+(
+# LINT: swallow inline comment (leaving command intact)
+ foobar && # comment 1
+# LINT: mispositioned "&&" (correctly) swallowed with comment
+ barfoo # wrong position for &&
+# LINT: "#" in string not misinterpreted as comment
+ flibble "not a # comment"
+) &&
+
+# LINT: "#" in string in cuddled subshell not misinterpreted as comment
+(cd foo &&
+ flibble "not a # comment")
--- /dev/null
+(
+ if true
+ then
+ while true
+ do
+?!AMP?! echo "pop"
+ echo "glup"
+?!AMP?! done
+ foo
+?!AMP?! fi
+ bar
+>)
--- /dev/null
+(
+ if true
+ then
+ while true
+ do
+# LINT: missing "&&" on 'echo'
+ echo "pop"
+ echo "glup"
+# LINT: missing "&&" on 'done'
+ done
+ foo
+# LINT: missing "&&" on 'fi'
+ fi
+ bar
+)
--- /dev/null
+(
+ foo &&
+ x=$(
+ echo bar |
+ cat
+>> ) &&
+ echo ok
+>) |
+sort
--- /dev/null
+(
+ foo &&
+ x=$(
+ echo bar |
+ cat
+ ) &&
+ echo ok
+) |
+sort
--- /dev/null
+(
+ x=line 1 line 2 line 3" &&
+?!AMP?! y=line 1 line2'
+ foobar
+>) &&
+(
+ echo "there's nothing to see here" &&
+ exit
+>)
--- /dev/null
+(
+ x="line 1
+ line 2
+ line 3" &&
+# LINT: missing "&&" on assignment
+ y='line 1
+ line2'
+ foobar
+) &&
+(
+# LINT: apostrophe (in a contraction) within string not misinterpreted as
+# LINT: starting multi-line single-quoted string
+ echo "there's nothing to see here" &&
+ exit
+)
--- /dev/null
+! (foo && bar) &&
+! (foo && bar) >baz &&
+
+?!SEMI?!! (foo; bar) &&
+?!SEMI?!! (foo; bar) >baz
--- /dev/null
+# LINT: top-level one-liner subshell
+! (foo && bar) &&
+! (foo && bar) >baz &&
+
+# LINT: top-level one-liner subshell missing internal "&&"
+! (foo; bar) &&
+! (foo; bar) >baz
--- /dev/null
+(
+ (cd foo &&
+ bar
+>> ) &&
+ (cd foo &&
+ bar
+?!AMP?!>> )
+ (
+ cd foo &&
+>> bar) &&
+ (
+ cd foo &&
+?!AMP?!>> bar)
+ (cd foo &&
+>> bar) &&
+ (cd foo &&
+?!AMP?!>> bar)
+ foobar
+>)
--- /dev/null
+(
+# LINT: opening "(" cuddled with first nested subshell statement
+ (cd foo &&
+ bar
+ ) &&
+
+# LINT: same but "&&" missing
+ (cd foo &&
+ bar
+ )
+
+# LINT: closing ")" cuddled with final nested subshell statement
+ (
+ cd foo &&
+ bar) &&
+
+# LINT: same but "&&" missing
+ (
+ cd foo &&
+ bar)
+
+# LINT: "(" and ")" cuddled with first and final subshell statements
+ (cd foo &&
+ bar) &&
+
+# LINT: same but "&&" missing
+ (cd foo &&
+ bar)
+
+ foobar
+)
--- /dev/null
+(
+ cat &&
+?!AMP?! cat
+ foobar
+>)
--- /dev/null
+(
+# LINT: inner "EOF" not misintrepreted as closing INPUT_END here-doc
+ cat <<-\INPUT_END &&
+ fish are mice
+ but geese go slow
+ data <<EOF
+ perl is lerp
+ and nothing else
+ EOF
+ toink
+ INPUT_END
+
+# LINT: same but missing "&&"
+ cat <<-\EOT
+ text goes here
+ data <<EOF
+ data goes here
+ EOF
+ more test here
+ EOT
+
+ foobar
+)
--- /dev/null
+(
+ foo &&
+ (
+ bar &&
+ # bottles wobble while fiddles gobble
+ # minor numbers of cows (or do they?)
+ baz &&
+ snaff
+?!AMP?!>> )
+ fuzzy
+>)
--- /dev/null
+(
+ foo &&
+ (
+ bar &&
+# LINT: ")" in comment in nested subshell not misinterpreted as closing ")"
+ # bottles wobble while fiddles gobble
+ # minor numbers of cows (or do they?)
+ baz &&
+ snaff
+# LINT: missing "&&" on ')'
+ )
+ fuzzy
+)
--- /dev/null
+(
+ cd foo &&
+ (
+ echo a &&
+ echo b
+>> ) >file &&
+ cd foo &&
+ (
+ echo a
+ echo b
+>> ) >file
+>)
--- /dev/null
+(
+ cd foo &&
+ (
+ echo a &&
+ echo b
+ ) >file &&
+
+ cd foo &&
+ (
+# LINT: nested multi-line subshell not presently checked for missing "&&"
+ echo a
+ echo b
+ ) >file
+)
--- /dev/null
+(foo && bar) &&
+(foo && bar) |
+(foo && bar) >baz &&
+
+?!SEMI?!(foo; bar) &&
+?!SEMI?!(foo; bar) |
+?!SEMI?!(foo; bar) >baz
+
+(foo "bar; baz")
--- /dev/null
+# LINT: top-level one-liner subshell
+(foo && bar) &&
+(foo && bar) |
+(foo && bar) >baz &&
+
+# LINT: top-level one-liner subshell missing internal "&&"
+(foo; bar) &&
+(foo; bar) |
+(foo; bar) >baz
+
+# LINT: ";" in string not misinterpreted as broken &&-chain
+(foo "bar; baz")
--- /dev/null
+(
+ p4 print -1 //depot/fiddle#42 >file &&
+ foobar
+>)
--- /dev/null
+(
+# LINT: Perforce revspec in filespec not misinterpreted as in-line comment
+ p4 print -1 //depot/fiddle#42 >file &&
+ foobar
+)
--- /dev/null
+(
+ foo |
+ bar |
+ baz &&
+ fish |
+?!AMP?! cow
+ sunder
+>)
--- /dev/null
+(
+# LINT: no "&&" needed on line ending with "|"
+ foo |
+ bar |
+ baz &&
+
+# LINT: final line of pipe sequence ('cow') lacking "&&"
+ fish |
+ cow
+
+ sunder
+)
--- /dev/null
+(
+?!AMP?!?!SEMI?! cat foo ; echo bar
+?!SEMI?! cat foo ; echo bar
+>) &&
+(
+?!SEMI?! cat foo ; echo bar &&
+?!SEMI?! cat foo ; echo bar
+>) &&
+(
+ echo "foo; bar" &&
+?!SEMI?! cat foo; echo bar
+>) &&
+(
+?!SEMI?! foo;
+>) &&
+(
+cd foo &&
+ for i in a b c; do
+?!SEMI?! echo;
+> done)
--- /dev/null
+(
+# LINT: missing internal "&&" and ending "&&"
+ cat foo ; echo bar
+# LINT: final statement before ")" only missing internal "&&"
+ cat foo ; echo bar
+) &&
+(
+# LINT: missing internal "&&"
+ cat foo ; echo bar &&
+ cat foo ; echo bar
+) &&
+(
+# LINT: not fooled by semicolon in string
+ echo "foo; bar" &&
+ cat foo; echo bar
+) &&
+(
+# LINT: unnecessary terminating semicolon
+ foo;
+) &&
+(cd foo &&
+ for i in a b c; do
+# LINT: unnecessary terminating semicolon
+ echo;
+ done)
--- /dev/null
+(
+ echo wobba gorgo snoot wafta snurb &&
+?!AMP?! cat >bip
+ echo >bop
+>)
--- /dev/null
+(
+# LINT: stitch together incomplete \-ending lines
+# LINT: swallow here-doc to avoid false positives in content
+ echo wobba \
+ gorgo snoot \
+ wafta snurb <<-EOF &&
+ quoth the raven,
+ nevermore...
+ EOF
+
+# LINT: missing "&&" on 'cat'
+ cat <<EOF >bip
+ fish fly high
+ EOF
+
+# LINT: swallow here-doc (EOF is last line of subshell)
+ echo <<-\EOF >bop
+ gomez
+ morticia
+ wednesday
+ pugsly
+ EOF
+)
--- /dev/null
+(
+ (foo && bar) &&
+ (foo && bar) |
+ (foo && bar) >baz &&
+?!SEMI?! (foo; bar) &&
+?!SEMI?! (foo; bar) |
+?!SEMI?! (foo; bar) >baz &&
+ (foo || exit 1) &&
+ (foo || exit 1) |
+ (foo || exit 1) >baz &&
+?!AMP?! (foo && bar)
+?!AMP?!?!SEMI?! (foo && bar; baz)
+ foobar
+>)
--- /dev/null
+(
+# LINT: nested one-liner subshell
+ (foo && bar) &&
+ (foo && bar) |
+ (foo && bar) >baz &&
+
+# LINT: nested one-liner subshell missing internal "&&"
+ (foo; bar) &&
+ (foo; bar) |
+ (foo; bar) >baz &&
+
+# LINT: nested one-liner subshell with "|| exit"
+ (foo || exit 1) &&
+ (foo || exit 1) |
+ (foo || exit 1) >baz &&
+
+# LINT: nested one-liner subshell lacking ending "&&"
+ (foo && bar)
+
+# LINT: nested one-liner subshell missing internal "&&" and lacking ending "&&"
+ (foo && bar; baz)
+
+ foobar
+)
--- /dev/null
+(
+ while true
+ do
+?!AMP?! echo foo
+ cat
+?!AMP?! done
+ while true; do
+ echo foo &&
+ cat bar
+ done
+>)
--- /dev/null
+(
+# LINT: 'while, 'do', 'done' do not need "&&"
+ while true
+ do
+# LINT: missing "&&" on 'echo'
+ echo foo
+# LINT: last statement of while does not need "&&"
+ cat <<-\EOF
+ bar
+ EOF
+# LINT: missing "&&" on 'done'
+ done
+
+# LINT: 'do' on same line as 'while'
+ while true; do
+ echo foo &&
+ cat bar
+ done
+)
use warnings;
my $exit_code=0;
+my %func;
sub err {
my $msg = shift;
+ s/^\s+//;
+ s/\s+$//;
+ s/\s+/ /g;
print "$ARGV:$.: error: $msg: $_\n";
$exit_code = 1;
}
+# glean names of shell functions
+for my $i (@ARGV) {
+ open(my $f, '<', $i) or die "$0: $i: $!\n";
+ while (<$f>) {
+ $func{$1} = 1 if /^\s*(\w+)\s*\(\)\s*{\s*$/;
+ }
+ close $f;
+}
+
while (<>) {
chomp;
+ # stitch together incomplete lines (those ending with "\")
+ while (s/\\$//) {
+ $_ .= readline;
+ chomp;
+ }
+
/\bsed\s+-i/ and err 'sed -i is not portable';
- /\becho\s+-[neE]/ and err 'echo with option is not portable (please use printf)';
+ /\becho\s+-[neE]/ and err 'echo with option is not portable (use printf)';
/^\s*declare\s+/ and err 'arrays/declare not portable';
- /^\s*[^#]\s*which\s/ and err 'which is not portable (please use type)';
- /\btest\s+[^=]*==/ and err '"test a == b" is not portable (please use =)';
- /\bwc -l.*"\s*=/ and err '`"$(wc -l)"` is not portable (please use test_line_count)';
- /\bexport\s+[A-Za-z0-9_]*=/ and err '"export FOO=bar" is not portable (please use FOO=bar && export FOO)';
+ /^\s*[^#]\s*which\s/ and err 'which is not portable (use type)';
+ /\btest\s+[^=]*==/ and err '"test a == b" is not portable (use =)';
+ /\bwc -l.*"\s*=/ and err '`"$(wc -l)"` is not portable (use test_line_count)';
+ /\bexport\s+[A-Za-z0-9_]*=/ and err '"export FOO=bar" is not portable (use FOO=bar && export FOO)';
+ /^\s*([A-Z0-9_]+=(\w+|(["']).*?\3)\s+)+(\w+)/ and exists($func{$4}) and
+ err '"FOO=bar shell_func" assignment extends beyond "shell_func"';
# this resets our $. for each file
close ARGV if eof;
}
if ((0 == dwRet) || (dwRet > MAX_PATH))
return error("Error getting current directory");
- if ((Buffer[0] < 'A') || (Buffer[0] > 'Z'))
- return error("Invalid drive letter '%c'", Buffer[0]);
+ if (!has_dos_drive_prefix(Buffer))
+ return error("'%s': invalid drive letter", Buffer);
szVolumeAccessPath[4] = Buffer[0];
hVolWrite = CreateFile(szVolumeAccessPath, GENERIC_READ | GENERIC_WRITE,
+#include "cache.h"
#include "pkt-line.h"
static void pack_line(const char *line)
}
}
+static void unpack_sideband(void)
+{
+ struct packet_reader reader;
+ packet_reader_init(&reader, 0, NULL, 0,
+ PACKET_READ_GENTLE_ON_EOF |
+ PACKET_READ_CHOMP_NEWLINE);
+
+ while (packet_reader_read(&reader) != PACKET_READ_EOF) {
+ int band;
+ int fd;
+
+ switch (reader.status) {
+ case PACKET_READ_EOF:
+ break;
+ case PACKET_READ_NORMAL:
+ band = reader.line[0] & 0xff;
+ if (band < 1 || band > 2)
+ die("unexpected side band %d", band);
+ fd = band;
+
+ write_or_die(fd, reader.line + 1, reader.pktlen - 1);
+ break;
+ case PACKET_READ_FLUSH:
+ return;
+ case PACKET_READ_DELIM:
+ break;
+ }
+ }
+}
+
int cmd_main(int argc, const char **argv)
{
if (argc < 2)
pack(argc - 2, argv + 2);
else if (!strcmp(argv[1], "unpack"))
unpack();
+ else if (!strcmp(argv[1], "unpack-sideband"))
+ unpack_sideband();
else
die("invalid argument '%s'", argv[1]);
--- /dev/null
+#include "test-tool.h"
+#include "cache.h"
+#include "commit-graph.h"
+#include "commit.h"
+#include "config.h"
+#include "object-store.h"
+#include "object.h"
+#include "repository.h"
+#include "tree.h"
+
+static void test_parse_commit_in_graph(const char *gitdir, const char *worktree,
+ const struct object_id *commit_oid)
+{
+ struct repository r;
+ struct commit *c;
+ struct commit_list *parent;
+
+ repo_init(&r, gitdir, worktree);
+
+ c = lookup_commit(&r, commit_oid);
+
+ if (!parse_commit_in_graph(&r, c))
+ die("Couldn't parse commit");
+
+ printf("%"PRItime, c->date);
+ for (parent = c->parents; parent; parent = parent->next)
+ printf(" %s", oid_to_hex(&parent->item->object.oid));
+ printf("\n");
+
+ repo_clear(&r);
+}
+
+static void test_get_commit_tree_in_graph(const char *gitdir,
+ const char *worktree,
+ const struct object_id *commit_oid)
+{
+ struct repository r;
+ struct commit *c;
+ struct tree *tree;
+
+ repo_init(&r, gitdir, worktree);
+
+ c = lookup_commit(&r, commit_oid);
+
+ /*
+ * get_commit_tree_in_graph does not automatically parse the commit, so
+ * parse it first.
+ */
+ if (!parse_commit_in_graph(&r, c))
+ die("Couldn't parse commit");
+ tree = get_commit_tree_in_graph(&r, c);
+ if (!tree)
+ die("Couldn't get commit tree");
+
+ printf("%s\n", oid_to_hex(&tree->object.oid));
+
+ repo_clear(&r);
+}
+
+int cmd__repository(int argc, const char **argv)
+{
+ if (argc < 2)
+ die("must have at least 2 arguments");
+ if (!strcmp(argv[1], "parse_commit_in_graph")) {
+ struct object_id oid;
+ if (argc < 5)
+ die("not enough arguments");
+ if (parse_oid_hex(argv[4], &oid, &argv[4]))
+ die("cannot parse oid '%s'", argv[4]);
+ test_parse_commit_in_graph(argv[2], argv[3], &oid);
+ } else if (!strcmp(argv[1], "get_commit_tree_in_graph")) {
+ struct object_id oid;
+ if (argc < 5)
+ die("not enough arguments");
+ if (parse_oid_hex(argv[4], &oid, &argv[4]))
+ die("cannot parse oid '%s'", argv[4]);
+ test_get_commit_tree_in_graph(argv[2], argv[3], &oid);
+ } else {
+ die("unrecognized '%s'", argv[1]);
+ }
+ return 0;
+}
{ "read-cache", cmd__read_cache },
{ "ref-store", cmd__ref_store },
{ "regex", cmd__regex },
+ { "repository", cmd__repository },
{ "revision-walking", cmd__revision_walking },
{ "run-command", cmd__run_command },
{ "scrap-cache-tree", cmd__scrap_cache_tree },
int cmd__read_cache(int argc, const char **argv);
int cmd__ref_store(int argc, const char **argv);
int cmd__regex(int argc, const char **argv);
+int cmd__repository(int argc, const char **argv);
int cmd__revision_walking(int argc, const char **argv);
int cmd__run_command(int argc, const char **argv);
int cmd__scrap_cache_tree(int argc, const char **argv);
cp "$TEST_PATH"/passwd "$HTTPD_ROOT_PATH"
install_script broken-smart-http.sh
install_script error.sh
+ install_script apply-one-time-sed.sh
ln -s "$LIB_HTTPD_MODULE_PATH" "$HTTPD_ROOT_PATH/modules"
test_cmp "$TRASH_DIRECTORY/askpass-expect" \
"$TRASH_DIRECTORY/askpass-query"
}
+
+strip_access_log() {
+ sed -e "
+ s/^.* \"//
+ s/\"//
+ s/ [1-9][0-9]*\$//
+ s/^GET /GET /
+ " "$HTTPD_ROOT_PATH"/access.log
+}
+
+# Requires one argument: the name of a file containing the expected stripped
+# access log entries.
+check_access_log() {
+ sort "$1" >"$1".sorted &&
+ strip_access_log >access.log.stripped &&
+ sort access.log.stripped >access.log.sorted &&
+ if ! test_cmp "$1".sorted access.log.sorted
+ then
+ test_cmp "$1" access.log.stripped
+ fi
+}
SetEnv GIT_EXEC_PATH ${GIT_EXEC_PATH}
SetEnv GIT_HTTP_EXPORT_ALL
</LocationMatch>
+<LocationMatch /one_time_sed/>
+ SetEnv GIT_EXEC_PATH ${GIT_EXEC_PATH}
+ SetEnv GIT_HTTP_EXPORT_ALL
+</LocationMatch>
ScriptAliasMatch /smart_*[^/]*/(.*) ${GIT_EXEC_PATH}/git-http-backend/$1
ScriptAlias /broken_smart/ broken-smart-http.sh/
ScriptAlias /error/ error.sh/
+ScriptAliasMatch /one_time_sed/(.*) apply-one-time-sed.sh/$1
<Directory ${GIT_EXEC_PATH}>
Options FollowSymlinks
</Directory>
<Files error.sh>
Options ExecCGI
</Files>
+<Files apply-one-time-sed.sh>
+ Options ExecCGI
+</Files>
<Files ${GIT_EXEC_PATH}/git-http-backend>
Options ExecCGI
</Files>
--- /dev/null
+#!/bin/sh
+
+# If "one-time-sed" exists in $HTTPD_ROOT_PATH, run sed on the HTTP response,
+# using the contents of "one-time-sed" as the sed command to be run. If the
+# response was modified as a result, delete "one-time-sed" so that subsequent
+# HTTP responses are no longer modified.
+#
+# This can be used to simulate the effects of the repository changing in
+# between HTTP request-response pairs.
+if [ -e one-time-sed ]; then
+ "$GIT_EXEC_PATH/git-http-backend" >out
+ sed "$(cat one-time-sed)" <out >out_modified
+
+ if diff out out_modified >/dev/null; then
+ cat out
+ else
+ cat out_modified
+ rm one-time-sed
+ fi
+else
+ "$GIT_EXEC_PATH/git-http-backend"
+fi
: >sub1/untrackedfile &&
test_must_fail $command replace_sub1_with_file &&
test_superproject_content origin/add_sub1 &&
- test_submodule_content sub1 origin/add_sub1
+ test_submodule_content sub1 origin/add_sub1 &&
test -f sub1/untracked_file
)
'
(
cd submodule_update &&
git branch -t invalid_sub1 origin/invalid_sub1 &&
- test_must_fail $command invalid_sub1 &&
+ test_must_fail $command invalid_sub1 2>err &&
+ test_i18ngrep sub1 err &&
test_superproject_content origin/add_sub1 &&
test_submodule_content sub1 origin/add_sub1
)
cd submodule_update &&
git branch -t add_sub1 origin/add_sub1 &&
: >sub1 &&
- echo sub1 >.git/info/exclude
+ echo sub1 >.git/info/exclude &&
$command add_sub1 &&
test_superproject_content origin/add_sub1 &&
test_submodule_content sub1 origin/add_sub1
rm -rf .git/modules/sub1 &&
$command replace_sub1_with_directory &&
test_superproject_content origin/replace_sub1_with_directory &&
- test_submodule_content sub1 origin/modify_sub1
test_git_directory_exists sub1
)
'
(
git ls-files -s path4 |
sed -e "s/ .*/ /" |
- tr -d "\012"
+ tr -d "\012" &&
echo "$a"
) | git update-index --index-info &&
len=$(git ls-files "a*" | wc -c) &&
test_expect_success MINGW '.git hidden' '
rm -rf newdir &&
(
- unset GIT_DIR GIT_WORK_TREE
+ sane_unset GIT_DIR GIT_WORK_TREE &&
mkdir newdir &&
cd newdir &&
git init &&
test_expect_success MINGW 'bare git dir not hidden' '
rm -rf newdir &&
(
- unset GIT_DIR GIT_WORK_TREE GIT_CONFIG
+ sane_unset GIT_DIR GIT_WORK_TREE GIT_CONFIG &&
mkdir newdir &&
cd newdir &&
git --bare init
test_expect_success 'setup' '
mkdir -p a/b/d a/c b &&
(
- echo "[attr]notest !test"
- echo "\" d \" test=d"
- echo " e test=e"
- echo " e\" test=e"
- echo "f test=f"
- echo "a/i test=a/i"
- echo "onoff test -test"
- echo "offon -test test"
- echo "no notest"
+ echo "[attr]notest !test" &&
+ echo "\" d \" test=d" &&
+ echo " e test=e" &&
+ echo " e\" test=e" &&
+ echo "f test=f" &&
+ echo "a/i test=a/i" &&
+ echo "onoff test -test" &&
+ echo "offon -test test" &&
+ echo "no notest" &&
echo "A/e/F test=A/e/F"
) >.gitattributes &&
(
) >a/.gitattributes &&
(
echo "h test=a/b/h" &&
- echo "d/* test=a/b/d/*"
+ echo "d/* test=a/b/d/*" &&
echo "d/yes notest"
) >a/b/.gitattributes &&
(
(
cd bare.git &&
(
- echo "f test=f"
+ echo "f test=f" &&
echo "a/i test=a/i"
) >.gitattributes &&
attr_check f unspecified &&
(
cd bare.git &&
(
- echo "f test=f"
+ echo "f test=f" &&
echo "a/i test=a/i"
) >info/attributes &&
attr_check f f &&
cd repo &&
git init &&
echo "*.a filter=bug" >.gitattributes &&
- cp "$TEST_ROOT/test.o" missing-delay.a
+ cp "$TEST_ROOT/test.o" missing-delay.a &&
git add . &&
git commit -m "test commit"
) &&
git init &&
echo "*.a filter=bug" >.gitattributes &&
cp "$TEST_ROOT/test.o" invalid-delay.a &&
- cp "$TEST_ROOT/test.o" unfiltered
+ cp "$TEST_ROOT/test.o" unfiltered &&
git add . &&
git commit -m "test commit"
) &&
return 44;
}
EOT
- (echo p; echo 1; echo; echo s; echo n; echo y; echo q) |
+ test_write_lines p 1 "" s n y q |
git commit --interactive -m foo &&
test_cache_tree
'
read_tree_u_must_succeed -m -u branch-point side-b side-a &&
git ls-files -u >actual &&
(
- a=$(git rev-parse branch-point:subdir/file2)
- b=$(git rev-parse side-a:subdir/file2/another)
- echo "100644 $a 1 subdir/file2"
- echo "100644 $a 2 subdir/file2"
+ a=$(git rev-parse branch-point:subdir/file2) &&
+ b=$(git rev-parse side-a:subdir/file2/another) &&
+ echo "100644 $a 1 subdir/file2" &&
+ echo "100644 $a 2 subdir/file2" &&
echo "100644 $b 3 subdir/file2/another"
) >expect &&
test_cmp expect actual
git ls-files -s >expect &&
sha1=$(git rev-parse :new) &&
(
- echo "100644 $sha1 1 old"
+ echo "100644 $sha1 1 old" &&
echo "100644 $sha1 3 old"
) | git update-index --index-info &&
>old &&
git ls-files -s >expect &&
sha1=$(git rev-parse :new) &&
(
- echo "100644 $sha1 1 old"
+ echo "100644 $sha1 1 old" &&
echo "100644 $sha1 3 old"
) | git update-index --index-info &&
>old &&
git ls-files -s >expect &&
sha1=$(git rev-parse :new) &&
(
- echo "100644 $sha1 1 old"
+ echo "100644 $sha1 1 old" &&
echo "100644 $sha1 3 old"
) | git update-index --index-info &&
>old &&
git ls-files -s >expect &&
sha1=$(git rev-parse :new) &&
(
- echo "100644 $sha1 1 old"
+ echo "100644 $sha1 1 old" &&
echo "100644 $sha1 3 old"
) | git update-index --index-info &&
>old &&
git ls-files -s >expect &&
sha1=$(git rev-parse :new) &&
(
- echo "100644 $sha1 1 old"
+ echo "100644 $sha1 1 old" &&
echo "100644 $sha1 3 old"
) | git update-index --index-info &&
>old &&
test_expect_success 'multi-read' '
read_tree_must_succeed initial master side &&
- (echo a; echo b/c) >expect &&
+ test_write_lines a b/c >expect &&
git ls-files >actual &&
test_cmp expect actual
'
(
cd dir &&
echo "change" >two &&
- GIT_EXTERNAL_DIFF=./diff git diff >../actual
+ GIT_EXTERNAL_DIFF=./diff git diff >../actual &&
git checkout -- two
) &&
test_cmp expect actual
test-tool genrandom "c" $(( 128 * 1024 )) >mid3 &&
git add mid1 mid2 mid3 &&
- count=0
+ count=0 &&
for pi in .git/objects/pack/pack-*.idx
do
test -f "$pi" && count=$(( $count + 1 ))
test $count = 2 &&
(
- git hash-object --stdin <mid1
- git hash-object --stdin <mid2
+ git hash-object --stdin <mid1 &&
+ git hash-object --stdin <mid2 &&
git hash-object --stdin <mid3
) |
sort >expect &&
test_expect_success !MINGW 'get --path copes with unset $HOME' '
(
- unset HOME;
+ sane_unset HOME &&
test_must_fail git config --get --path path.home \
>result 2>msg &&
git config --get --path path.normal >>result &&
git log -1 -p HEAD^ >log.one &&
git log -1 -p HEAD >log.two &&
(
- cat log.one; echo
- cat log.two; echo
- cat log.one; echo
+ cat log.one && echo &&
+ cat log.two && echo &&
+ cat log.one && echo &&
cat log.two
) >expect &&
test_cmp expect actual
test_expect_success 'merge my-side@{u} records the correct name' '
(
- cd clone || exit
- git checkout master || exit
- git branch -D new ;# can fail but is ok
+ cd clone &&
+ git checkout master &&
+ test_might_fail git branch -D new &&
git branch -t new my-side@{u} &&
git merge -s ours new@{u} &&
git show -s --pretty=tformat:%s >actual &&
for i in 0 1 2 3 4 5 6 7 8 9
do
echo $i
- done
- echo
+ done &&
+ echo &&
echo b1rwzyc3
) >a0blgqsjc &&
test_might_fail git rm -f a0blgqsjc &&
(
- git cat-file blob $side:f5518nwu
+ git cat-file blob $side:f5518nwu &&
echo j3l0i9s6
) >ab2gs879 &&
git add ab2gs879 &&
commit=$(git commit-tree $tree -p HEAD <msg) &&
git update-ref HEAD "$commit" &&
GIT_ALLOW_NULL_SHA1=1 git reset --hard &&
- (test-tool dump-cache-tree >cache-tree.out || true) &&
+ test_might_fail test-tool dump-cache-tree >cache-tree.out &&
test_line_count = 0 cache-tree.out
'
test_expect_success PERL 'saying "n" does nothing' '
set_and_save_state dir/foo work head &&
- (echo n; echo n) | git checkout -p &&
+ test_write_lines n n | git checkout -p &&
verify_saved_state bar &&
verify_saved_state dir/foo
'
test_expect_success PERL 'git checkout -p' '
- (echo n; echo y) | git checkout -p &&
+ test_write_lines n y | git checkout -p &&
verify_saved_state bar &&
verify_state dir/foo head head
'
test_expect_success PERL 'git checkout -p with staged changes' '
set_state dir/foo work index &&
- (echo n; echo y) | git checkout -p &&
+ test_write_lines n y | git checkout -p &&
verify_saved_state bar &&
verify_state dir/foo index index
'
test_expect_success PERL 'git checkout -p HEAD with NO staged changes: abort' '
set_and_save_state dir/foo work head &&
- (echo n; echo y; echo n) | git checkout -p HEAD &&
+ test_write_lines n y n | git checkout -p HEAD &&
verify_saved_state bar &&
verify_saved_state dir/foo
'
test_expect_success PERL 'git checkout -p HEAD with NO staged changes: apply' '
- (echo n; echo y; echo y) | git checkout -p HEAD &&
+ test_write_lines n y y | git checkout -p HEAD &&
verify_saved_state bar &&
verify_state dir/foo head head
'
test_expect_success PERL 'git checkout -p HEAD with change already staged' '
set_state dir/foo index index &&
# the third n is to get out in case it mistakenly does not apply
- (echo n; echo y; echo n) | git checkout -p HEAD &&
+ test_write_lines n y n | git checkout -p HEAD &&
verify_saved_state bar &&
verify_state dir/foo head head
'
test_expect_success PERL 'git checkout -p HEAD^' '
# the third n is to get out in case it mistakenly does not apply
- (echo n; echo y; echo n) | git checkout -p HEAD^ &&
+ test_write_lines n y n | git checkout -p HEAD^ &&
verify_saved_state bar &&
verify_state dir/foo parent parent
'
test_expect_success PERL 'git checkout -p handles deletion' '
set_state dir/foo work index &&
rm dir/foo &&
- (echo n; echo y) | git checkout -p &&
+ test_write_lines n y | git checkout -p &&
verify_saved_state bar &&
verify_state dir/foo index index
'
test_expect_success PERL 'path limiting works: dir' '
set_state dir/foo work head &&
- (echo y; echo n) | git checkout -p dir &&
+ test_write_lines y n | git checkout -p dir &&
verify_saved_state bar &&
verify_state dir/foo head head
'
test_expect_success PERL 'path limiting works: -- dir' '
set_state dir/foo work head &&
- (echo y; echo n) | git checkout -p -- dir &&
+ test_write_lines y n | git checkout -p -- dir &&
verify_saved_state bar &&
verify_state dir/foo head head
'
test_expect_success PERL 'path limiting works: HEAD^ -- dir' '
# the third n is to get out in case it mistakenly does not apply
- (echo y; echo n; echo n) | git checkout -p HEAD^ -- dir &&
+ test_write_lines y n n | git checkout -p HEAD^ -- dir &&
verify_saved_state bar &&
verify_state dir/foo parent parent
'
test_expect_success PERL 'path limiting works: foo inside dir' '
set_state dir/foo work head &&
# the third n is to get out in case it mistakenly does not apply
- (echo y; echo n; echo n) | (cd dir && git checkout -p foo) &&
+ test_write_lines y n n | (cd dir && git checkout -p foo) &&
verify_saved_state bar &&
verify_state dir/foo head head
'
test_cmp expect.upstream actual.upstream
}
+status_uno_is_clean () {
+ >status.expect &&
+ git status -uno --porcelain >status.actual &&
+ test_cmp status.expect status.actual
+}
+
test_expect_success 'setup' '
test_commit my_master &&
git init repo_a &&
test_might_fail git branch -D xyzzy &&
test_must_fail git checkout xyzzy &&
+ status_uno_is_clean &&
test_must_fail git rev-parse --verify refs/heads/xyzzy &&
test_branch master
'
test_might_fail git branch -D foo &&
test_must_fail git checkout foo &&
+ status_uno_is_clean &&
test_must_fail git rev-parse --verify refs/heads/foo &&
test_branch master
'
+test_expect_success 'checkout of branch from multiple remotes fails with advice' '
+ git checkout -B master &&
+ test_might_fail git branch -D foo &&
+ test_must_fail git checkout foo 2>stderr &&
+ test_branch master &&
+ status_uno_is_clean &&
+ test_i18ngrep "^hint: " stderr &&
+ test_must_fail git -c advice.checkoutAmbiguousRemoteBranchName=false \
+ checkout foo 2>stderr &&
+ test_branch master &&
+ status_uno_is_clean &&
+ test_i18ngrep ! "^hint: " stderr &&
+ # Make sure the likes of checkout -p do not print this hint
+ git checkout -p foo 2>stderr &&
+ test_i18ngrep ! "^hint: " stderr &&
+ status_uno_is_clean
+'
+
+test_expect_success 'checkout of branch from multiple remotes succeeds with checkout.defaultRemote #1' '
+ git checkout -B master &&
+ status_uno_is_clean &&
+ test_might_fail git branch -D foo &&
+
+ git -c checkout.defaultRemote=repo_a checkout foo &&
+ status_uno_is_clean &&
+ test_branch foo &&
+ test_cmp_rev remotes/repo_a/foo HEAD &&
+ test_branch_upstream foo repo_a foo
+'
+
test_expect_success 'checkout of branch from a single remote succeeds #1' '
git checkout -B master &&
test_might_fail git branch -D bar &&
git checkout bar &&
+ status_uno_is_clean &&
test_branch bar &&
test_cmp_rev remotes/repo_a/bar HEAD &&
test_branch_upstream bar repo_a bar
test_might_fail git branch -D baz &&
git checkout baz &&
+ status_uno_is_clean &&
test_branch baz &&
test_cmp_rev remotes/other_b/baz HEAD &&
test_branch_upstream baz repo_b baz
test_expect_success '--no-guess suppresses branch auto-vivification' '
git checkout -B master &&
+ status_uno_is_clean &&
test_might_fail git branch -D bar &&
test_must_fail git checkout --no-guess bar &&
test_expect_success 'setup more remotes with unconventional refspecs' '
git checkout -B master &&
+ status_uno_is_clean &&
git init repo_c &&
(
cd repo_c &&
test_expect_success 'checkout of branch from multiple remotes fails #2' '
git checkout -B master &&
+ status_uno_is_clean &&
test_might_fail git branch -D bar &&
test_must_fail git checkout bar &&
+ status_uno_is_clean &&
test_must_fail git rev-parse --verify refs/heads/bar &&
test_branch master
'
test_expect_success 'checkout of branch from multiple remotes fails #3' '
git checkout -B master &&
+ status_uno_is_clean &&
test_might_fail git branch -D baz &&
test_must_fail git checkout baz &&
+ status_uno_is_clean &&
test_must_fail git rev-parse --verify refs/heads/baz &&
test_branch master
'
test_expect_success 'checkout of branch from a single remote succeeds #3' '
git checkout -B master &&
+ status_uno_is_clean &&
test_might_fail git branch -D spam &&
git checkout spam &&
+ status_uno_is_clean &&
test_branch spam &&
test_cmp_rev refs/remotes/extra_dir/repo_c/extra_dir/spam HEAD &&
test_branch_upstream spam repo_c spam
test_expect_success 'checkout of branch from a single remote succeeds #4' '
git checkout -B master &&
+ status_uno_is_clean &&
test_might_fail git branch -D eggs &&
git checkout eggs &&
+ status_uno_is_clean &&
test_branch eggs &&
test_cmp_rev refs/repo_d/eggs HEAD &&
test_branch_upstream eggs repo_d eggs
test_expect_success 'checkout of branch with a file having the same name fails' '
git checkout -B master &&
+ status_uno_is_clean &&
test_might_fail git branch -D spam &&
>spam &&
test_must_fail git checkout spam &&
+ status_uno_is_clean &&
test_must_fail git rev-parse --verify refs/heads/spam &&
test_branch master
'
test_expect_success 'checkout of branch with a file in subdir having the same name fails' '
git checkout -B master &&
+ status_uno_is_clean &&
test_might_fail git branch -D spam &&
>spam &&
mkdir sub &&
mv spam sub/spam &&
test_must_fail git -C sub checkout spam &&
+ status_uno_is_clean &&
test_must_fail git rev-parse --verify refs/heads/spam &&
test_branch master
'
test_expect_success 'checkout <branch> -- succeeds, even if a file with the same name exists' '
git checkout -B master &&
+ status_uno_is_clean &&
test_might_fail git branch -D spam &&
>spam &&
git checkout spam -- &&
+ status_uno_is_clean &&
test_branch spam &&
test_cmp_rev refs/remotes/extra_dir/repo_c/extra_dir/spam HEAD &&
test_branch_upstream spam repo_c spam
test_expect_success 'loosely defined local base branch is reported correctly' '
git checkout master &&
+ status_uno_is_clean &&
git branch strict &&
git branch loose &&
git commit --allow-empty -m "a bit more" &&
test_config branch.loose.merge master &&
git checkout strict | sed -e "s/strict/BRANCHNAME/g" >expect &&
+ status_uno_is_clean &&
git checkout loose | sed -e "s/loose/BRANCHNAME/g" >actual &&
+ status_uno_is_clean &&
test_cmp expect actual
'
)
'
+test_expect_success '"add" <path> <branch> dwims with checkout.defaultRemote' '
+ test_when_finished rm -rf repo_upstream repo_dwim foo &&
+ setup_remote_repo repo_upstream repo_dwim &&
+ git init repo_dwim &&
+ (
+ cd repo_dwim &&
+ git remote add repo_upstream2 ../repo_upstream &&
+ git fetch repo_upstream2 &&
+ test_must_fail git worktree add ../foo foo &&
+ git -c checkout.defaultRemote=repo_upstream worktree add ../foo foo &&
+ >status.expect &&
+ git status -uno --porcelain >status.actual &&
+ test_cmp status.expect status.actual
+ ) &&
+ (
+ cd foo &&
+ test_branch_upstream foo repo_upstream foo &&
+ test_cmp_rev refs/remotes/repo_upstream/foo refs/heads/foo
+ )
+'
+
test_expect_success 'git worktree add does not match remote' '
test_when_finished rm -rf repo_a repo_b foo &&
setup_remote_repo repo_a repo_b &&
test_create_repo xyzzy &&
cd xyzzy &&
>file &&
- git add file
+ git add file &&
git commit -m "sub initial"
) &&
git add xyzzy &&
test_expect_success setup '
(
- echo .gitignore
+ echo .gitignore &&
echo will-remove
) >expect &&
(
- echo actual
- echo expect
+ echo actual &&
+ echo expect &&
echo ignored
) >.gitignore &&
git --literal-pathspecs add --all &&
test_expect_success 'git add --all' '
(
- echo .gitignore
- echo not-ignored
- echo "M .gitignore"
- echo "A not-ignored"
+ echo .gitignore &&
+ echo not-ignored &&
+ echo "M .gitignore" &&
+ echo "A not-ignored" &&
echo "D will-remove"
) >expect &&
>ignored &&
) &&
(
cd super &&
- "$SHELL_PATH" "$TEST_DIRECTORY/../contrib/workdir/git-new-workdir" ../sub sub
+ "$SHELL_PATH" "$TEST_DIRECTORY/../contrib/workdir/git-new-workdir" ../sub sub &&
git ls-files --others --exclude-standard >../actual
) &&
echo sub/ >expect &&
printf "$pat" "$blob_a" "$path_a" "$blob_z" "$path_z" |
git update-index --add --index-info &&
(
- echo "$path_a"
+ echo "$path_a" &&
echo "$path_z"
) >expect &&
git ls-files >actual &&
test_expect_success 'no buffer overflow in lazy_init_name_hash' '
(
- test_seq $LAZY_THREAD_COST | sed "s/^/a_/"
- echo b/b/b
- test_seq $LAZY_THREAD_COST | sed "s/^/c_/"
- test_seq 50 | sed "s/^/d_/" | tr "\n" "/"; echo d
+ test_seq $LAZY_THREAD_COST | sed "s/^/a_/" &&
+ echo b/b/b &&
+ test_seq $LAZY_THREAD_COST | sed "s/^/c_/" &&
+ test_seq 50 | sed "s/^/d_/" | tr "\n" "/" && echo d
) |
sed "s/^/100644 $EMPTY_BLOB /" |
git update-index --index-info &&
test_tick &&
git commit -m "master modifies a and d/e" &&
c1=$(git rev-parse --verify HEAD) &&
- ( git ls-tree -r HEAD ; git ls-files -s ) >actual &&
+ ( git ls-tree -r HEAD && git ls-files -s ) >actual &&
(
- echo "100644 blob $o1 a"
- echo "100644 blob $o0 b"
- echo "100644 blob $o0 c"
- echo "100644 blob $o1 d/e"
- echo "100644 $o1 0 a"
- echo "100644 $o0 0 b"
- echo "100644 $o0 0 c"
+ echo "100644 blob $o1 a" &&
+ echo "100644 blob $o0 b" &&
+ echo "100644 blob $o0 c" &&
+ echo "100644 blob $o1 d/e" &&
+ echo "100644 $o1 0 a" &&
+ echo "100644 $o0 0 b" &&
+ echo "100644 $o0 0 c" &&
echo "100644 $o1 0 d/e"
) >expected &&
test_cmp expected actual
rm -rf [abcd] &&
git checkout side &&
- ( git ls-tree -r HEAD ; git ls-files -s ) >actual &&
+ ( git ls-tree -r HEAD && git ls-files -s ) >actual &&
(
- echo "100644 blob $o0 a"
- echo "100644 blob $o0 b"
- echo "100644 blob $o0 c"
- echo "100644 blob $o0 d/e"
- echo "100644 $o0 0 a"
- echo "100644 $o0 0 b"
- echo "100644 $o0 0 c"
+ echo "100644 blob $o0 a" &&
+ echo "100644 blob $o0 b" &&
+ echo "100644 blob $o0 c" &&
+ echo "100644 blob $o0 d/e" &&
+ echo "100644 $o0 0 a" &&
+ echo "100644 $o0 0 b" &&
+ echo "100644 $o0 0 c" &&
echo "100644 $o0 0 d/e"
) >expected &&
test_cmp expected actual &&
test_tick &&
git commit -m "side modifies a" &&
c2=$(git rev-parse --verify HEAD) &&
- ( git ls-tree -r HEAD ; git ls-files -s ) >actual &&
+ ( git ls-tree -r HEAD && git ls-files -s ) >actual &&
(
- echo "100644 blob $o2 a"
- echo "100644 blob $o0 b"
- echo "100644 blob $o0 c"
- echo "100644 blob $o0 d/e"
- echo "100644 $o2 0 a"
- echo "100644 $o0 0 b"
- echo "100644 $o0 0 c"
+ echo "100644 blob $o2 a" &&
+ echo "100644 blob $o0 b" &&
+ echo "100644 blob $o0 c" &&
+ echo "100644 blob $o0 d/e" &&
+ echo "100644 $o2 0 a" &&
+ echo "100644 $o0 0 b" &&
+ echo "100644 $o0 0 c" &&
echo "100644 $o0 0 d/e"
) >expected &&
test_cmp expected actual
rm -rf [abcd] &&
git checkout df-1 &&
- ( git ls-tree -r HEAD ; git ls-files -s ) >actual &&
+ ( git ls-tree -r HEAD && git ls-files -s ) >actual &&
(
- echo "100644 blob $o0 a"
- echo "100644 blob $o0 b"
- echo "100644 blob $o0 c"
- echo "100644 blob $o0 d/e"
- echo "100644 $o0 0 a"
- echo "100644 $o0 0 b"
- echo "100644 $o0 0 c"
+ echo "100644 blob $o0 a" &&
+ echo "100644 blob $o0 b" &&
+ echo "100644 blob $o0 c" &&
+ echo "100644 blob $o0 d/e" &&
+ echo "100644 $o0 0 a" &&
+ echo "100644 $o0 0 b" &&
+ echo "100644 $o0 0 c" &&
echo "100644 $o0 0 d/e"
) >expected &&
test_cmp expected actual &&
test_tick &&
git commit -m "df-1 makes b/c" &&
c3=$(git rev-parse --verify HEAD) &&
- ( git ls-tree -r HEAD ; git ls-files -s ) >actual &&
+ ( git ls-tree -r HEAD && git ls-files -s ) >actual &&
(
- echo "100644 blob $o0 a"
- echo "100644 blob $o3 b/c"
- echo "100644 blob $o0 c"
- echo "100644 blob $o0 d/e"
- echo "100644 $o0 0 a"
- echo "100644 $o3 0 b/c"
- echo "100644 $o0 0 c"
+ echo "100644 blob $o0 a" &&
+ echo "100644 blob $o3 b/c" &&
+ echo "100644 blob $o0 c" &&
+ echo "100644 blob $o0 d/e" &&
+ echo "100644 $o0 0 a" &&
+ echo "100644 $o3 0 b/c" &&
+ echo "100644 $o0 0 c" &&
echo "100644 $o0 0 d/e"
) >expected &&
test_cmp expected actual
rm -rf [abcd] &&
git checkout df-2 &&
- ( git ls-tree -r HEAD ; git ls-files -s ) >actual &&
+ ( git ls-tree -r HEAD && git ls-files -s ) >actual &&
(
- echo "100644 blob $o0 a"
- echo "100644 blob $o0 b"
- echo "100644 blob $o0 c"
- echo "100644 blob $o0 d/e"
- echo "100644 $o0 0 a"
- echo "100644 $o0 0 b"
- echo "100644 $o0 0 c"
+ echo "100644 blob $o0 a" &&
+ echo "100644 blob $o0 b" &&
+ echo "100644 blob $o0 c" &&
+ echo "100644 blob $o0 d/e" &&
+ echo "100644 $o0 0 a" &&
+ echo "100644 $o0 0 b" &&
+ echo "100644 $o0 0 c" &&
echo "100644 $o0 0 d/e"
) >expected &&
test_cmp expected actual &&
test_tick &&
git commit -m "df-2 makes a/c" &&
c4=$(git rev-parse --verify HEAD) &&
- ( git ls-tree -r HEAD ; git ls-files -s ) >actual &&
+ ( git ls-tree -r HEAD && git ls-files -s ) >actual &&
(
- echo "100644 blob $o4 a/c"
- echo "100644 blob $o0 b"
- echo "100644 blob $o0 c"
- echo "100644 blob $o0 d/e"
- echo "100644 $o4 0 a/c"
- echo "100644 $o0 0 b"
- echo "100644 $o0 0 c"
+ echo "100644 blob $o4 a/c" &&
+ echo "100644 blob $o0 b" &&
+ echo "100644 blob $o0 c" &&
+ echo "100644 blob $o0 d/e" &&
+ echo "100644 $o4 0 a/c" &&
+ echo "100644 $o0 0 b" &&
+ echo "100644 $o0 0 c" &&
echo "100644 $o0 0 d/e"
) >expected &&
test_cmp expected actual
rm -rf [abcd] &&
git checkout remove &&
- ( git ls-tree -r HEAD ; git ls-files -s ) >actual &&
+ ( git ls-tree -r HEAD && git ls-files -s ) >actual &&
(
- echo "100644 blob $o0 a"
- echo "100644 blob $o0 b"
- echo "100644 blob $o0 c"
- echo "100644 blob $o0 d/e"
- echo "100644 $o0 0 a"
- echo "100644 $o0 0 b"
- echo "100644 $o0 0 c"
+ echo "100644 blob $o0 a" &&
+ echo "100644 blob $o0 b" &&
+ echo "100644 blob $o0 c" &&
+ echo "100644 blob $o0 d/e" &&
+ echo "100644 $o0 0 a" &&
+ echo "100644 $o0 0 b" &&
+ echo "100644 $o0 0 c" &&
echo "100644 $o0 0 d/e"
) >expected &&
test_cmp expected actual &&
test_tick &&
git commit -m "remove removes b and modifies a" &&
c5=$(git rev-parse --verify HEAD) &&
- ( git ls-tree -r HEAD ; git ls-files -s ) >actual &&
+ ( git ls-tree -r HEAD && git ls-files -s ) >actual &&
(
- echo "100644 blob $o5 a"
- echo "100644 blob $o0 c"
- echo "100644 blob $o0 d/e"
- echo "100644 $o5 0 a"
- echo "100644 $o0 0 c"
+ echo "100644 blob $o5 a" &&
+ echo "100644 blob $o0 c" &&
+ echo "100644 blob $o0 d/e" &&
+ echo "100644 $o5 0 a" &&
+ echo "100644 $o0 0 c" &&
echo "100644 $o0 0 d/e"
) >expected &&
test_cmp expected actual
rm -rf [abcd] &&
git checkout df-3 &&
- ( git ls-tree -r HEAD ; git ls-files -s ) >actual &&
+ ( git ls-tree -r HEAD && git ls-files -s ) >actual &&
(
- echo "100644 blob $o0 a"
- echo "100644 blob $o0 b"
- echo "100644 blob $o0 c"
- echo "100644 blob $o0 d/e"
- echo "100644 $o0 0 a"
- echo "100644 $o0 0 b"
- echo "100644 $o0 0 c"
+ echo "100644 blob $o0 a" &&
+ echo "100644 blob $o0 b" &&
+ echo "100644 blob $o0 c" &&
+ echo "100644 blob $o0 d/e" &&
+ echo "100644 $o0 0 a" &&
+ echo "100644 $o0 0 b" &&
+ echo "100644 $o0 0 c" &&
echo "100644 $o0 0 d/e"
) >expected &&
test_cmp expected actual &&
test_tick &&
git commit -m "df-3 makes d" &&
c6=$(git rev-parse --verify HEAD) &&
- ( git ls-tree -r HEAD ; git ls-files -s ) >actual &&
+ ( git ls-tree -r HEAD && git ls-files -s ) >actual &&
(
- echo "100644 blob $o0 a"
- echo "100644 blob $o0 b"
- echo "100644 blob $o0 c"
- echo "100644 blob $o6 d"
- echo "100644 $o0 0 a"
- echo "100644 $o0 0 b"
- echo "100644 $o0 0 c"
+ echo "100644 blob $o0 a" &&
+ echo "100644 blob $o0 b" &&
+ echo "100644 blob $o0 c" &&
+ echo "100644 blob $o6 d" &&
+ echo "100644 $o0 0 a" &&
+ echo "100644 $o0 0 b" &&
+ echo "100644 $o0 0 c" &&
echo "100644 $o6 0 d"
) >expected &&
test_cmp expected actual
git ls-files -s >actual &&
(
- echo "100644 $o0 1 a"
- echo "100644 $o2 2 a"
- echo "100644 $o1 3 a"
- echo "100644 $o0 0 b"
- echo "100644 $o0 0 c"
+ echo "100644 $o0 1 a" &&
+ echo "100644 $o2 2 a" &&
+ echo "100644 $o1 3 a" &&
+ echo "100644 $o0 0 b" &&
+ echo "100644 $o0 0 c" &&
echo "100644 $o1 0 d/e"
) >expected &&
test_cmp expected actual
git ls-files -s >actual &&
(
- echo "100644 $o0 1 a"
- echo "100644 $o1 2 a"
- echo "100644 $o5 3 a"
- echo "100644 $o0 0 c"
+ echo "100644 $o0 1 a" &&
+ echo "100644 $o1 2 a" &&
+ echo "100644 $o5 3 a" &&
+ echo "100644 $o0 0 c" &&
echo "100644 $o1 0 d/e"
) >expected &&
test_cmp expected actual
git ls-files -s >actual &&
(
- echo "100644 $o1 0 a"
- echo "100644 $o3 0 b/c"
- echo "100644 $o0 0 c"
+ echo "100644 $o1 0 a" &&
+ echo "100644 $o3 0 b/c" &&
+ echo "100644 $o0 0 c" &&
echo "100644 $o1 0 d/e"
) >expected &&
test_cmp expected actual
git ls-files -s >actual &&
(
- echo "100644 $o0 1 a"
- echo "100644 $o1 2 a"
- echo "100644 $o4 0 a/c"
- echo "100644 $o0 0 b"
- echo "100644 $o0 0 c"
+ echo "100644 $o0 1 a" &&
+ echo "100644 $o1 2 a" &&
+ echo "100644 $o4 0 a/c" &&
+ echo "100644 $o0 0 b" &&
+ echo "100644 $o0 0 c" &&
echo "100644 $o1 0 d/e"
) >expected &&
test_cmp expected actual
git ls-files -s >actual &&
(
- echo "100644 $o0 1 a"
- echo "100644 $o1 3 a"
- echo "100644 $o4 0 a/c"
- echo "100644 $o0 0 b"
- echo "100644 $o0 0 c"
+ echo "100644 $o0 1 a" &&
+ echo "100644 $o1 3 a" &&
+ echo "100644 $o4 0 a/c" &&
+ echo "100644 $o0 0 b" &&
+ echo "100644 $o0 0 c" &&
echo "100644 $o1 0 d/e"
) >expected &&
test_cmp expected actual
git ls-files -s >actual &&
(
- echo "100644 $o1 0 a"
- echo "100644 $o0 0 b"
- echo "100644 $o0 0 c"
- echo "100644 $o6 3 d"
- echo "100644 $o0 1 d/e"
+ echo "100644 $o1 0 a" &&
+ echo "100644 $o0 0 b" &&
+ echo "100644 $o0 0 c" &&
+ echo "100644 $o6 3 d" &&
+ echo "100644 $o0 1 d/e" &&
echo "100644 $o1 2 d/e"
) >expected &&
test_cmp expected actual
git ls-files -s >actual &&
(
- echo "100644 $o1 0 a"
- echo "100644 $o0 0 b"
- echo "100644 $o0 0 c"
- echo "100644 $o6 2 d"
- echo "100644 $o0 1 d/e"
+ echo "100644 $o1 0 a" &&
+ echo "100644 $o0 0 b" &&
+ echo "100644 $o0 0 c" &&
+ echo "100644 $o6 2 d" &&
+ echo "100644 $o0 1 d/e" &&
echo "100644 $o1 3 d/e"
) >expected &&
test_cmp expected actual
git read-tree --prefix=M/ master &&
git ls-files -s >actual &&
(
- echo "100644 $o1 0 M/a"
- echo "100644 $o0 0 M/b"
- echo "100644 $o0 0 M/c"
- echo "100644 $o1 0 M/d/e"
- echo "100644 $o1 0 a"
- echo "100644 $o0 0 b"
- echo "100644 $o0 0 c"
+ echo "100644 $o1 0 M/a" &&
+ echo "100644 $o0 0 M/b" &&
+ echo "100644 $o0 0 M/c" &&
+ echo "100644 $o1 0 M/d/e" &&
+ echo "100644 $o1 0 a" &&
+ echo "100644 $o0 0 b" &&
+ echo "100644 $o0 0 c" &&
echo "100644 $o1 0 d/e"
) >expected &&
test_cmp expected actual &&
git read-tree --prefix=a1/ master &&
git ls-files -s >actual &&
(
- echo "100644 $o1 0 M/a"
- echo "100644 $o0 0 M/b"
- echo "100644 $o0 0 M/c"
- echo "100644 $o1 0 M/d/e"
- echo "100644 $o1 0 a"
- echo "100644 $o1 0 a1/a"
- echo "100644 $o0 0 a1/b"
- echo "100644 $o0 0 a1/c"
- echo "100644 $o1 0 a1/d/e"
- echo "100644 $o0 0 b"
- echo "100644 $o0 0 c"
+ echo "100644 $o1 0 M/a" &&
+ echo "100644 $o0 0 M/b" &&
+ echo "100644 $o0 0 M/c" &&
+ echo "100644 $o1 0 M/d/e" &&
+ echo "100644 $o1 0 a" &&
+ echo "100644 $o1 0 a1/a" &&
+ echo "100644 $o0 0 a1/b" &&
+ echo "100644 $o0 0 a1/c" &&
+ echo "100644 $o1 0 a1/d/e" &&
+ echo "100644 $o0 0 b" &&
+ echo "100644 $o0 0 c" &&
echo "100644 $o1 0 d/e"
) >expected &&
test_cmp expected actual &&
git read-tree --prefix=z/ master &&
git ls-files -s >actual &&
(
- echo "100644 $o1 0 M/a"
- echo "100644 $o0 0 M/b"
- echo "100644 $o0 0 M/c"
- echo "100644 $o1 0 M/d/e"
- echo "100644 $o1 0 a"
- echo "100644 $o1 0 a1/a"
- echo "100644 $o0 0 a1/b"
- echo "100644 $o0 0 a1/c"
- echo "100644 $o1 0 a1/d/e"
- echo "100644 $o0 0 b"
- echo "100644 $o0 0 c"
- echo "100644 $o1 0 d/e"
- echo "100644 $o1 0 z/a"
- echo "100644 $o0 0 z/b"
- echo "100644 $o0 0 z/c"
+ echo "100644 $o1 0 M/a" &&
+ echo "100644 $o0 0 M/b" &&
+ echo "100644 $o0 0 M/c" &&
+ echo "100644 $o1 0 M/d/e" &&
+ echo "100644 $o1 0 a" &&
+ echo "100644 $o1 0 a1/a" &&
+ echo "100644 $o0 0 a1/b" &&
+ echo "100644 $o0 0 a1/c" &&
+ echo "100644 $o1 0 a1/d/e" &&
+ echo "100644 $o0 0 b" &&
+ echo "100644 $o0 0 c" &&
+ echo "100644 $o1 0 d/e" &&
+ echo "100644 $o1 0 z/a" &&
+ echo "100644 $o0 0 z/b" &&
+ echo "100644 $o0 0 z/c" &&
echo "100644 $o1 0 z/d/e"
) >expected &&
test_cmp expected actual
git ls-files -s >actual &&
(
- echo "100644 $o5 0 a"
- echo "100644 $o0 0 c"
+ echo "100644 $o5 0 a" &&
+ echo "100644 $o0 0 c" &&
echo "160000 $c1 0 d"
) >expected &&
test_cmp expected actual
git merge rename &&
( git ls-tree -r HEAD && git ls-files -s ) >actual &&
(
- echo "100644 blob $o0 b"
- echo "100644 blob $o0 c"
- echo "100644 blob $o0 d/e"
- echo "100644 blob $o0 e"
- echo "100644 $o0 0 b"
- echo "100644 $o0 0 c"
- echo "100644 $o0 0 d/e"
+ echo "100644 blob $o0 b" &&
+ echo "100644 blob $o0 c" &&
+ echo "100644 blob $o0 d/e" &&
+ echo "100644 blob $o0 e" &&
+ echo "100644 $o0 0 b" &&
+ echo "100644 $o0 0 c" &&
+ echo "100644 $o0 0 d/e" &&
echo "100644 $o0 0 e"
) >expected &&
test_cmp expected actual
git checkout -f rename &&
git merge rename-ln &&
- ( git ls-tree -r HEAD ; git ls-files -s ) >actual &&
+ ( git ls-tree -r HEAD && git ls-files -s ) >actual &&
(
- echo "120000 blob $oln a"
- echo "100644 blob $o0 b"
- echo "100644 blob $o0 c"
- echo "100644 blob $o0 d/e"
- echo "100644 blob $o0 e"
- echo "120000 $oln 0 a"
- echo "100644 $o0 0 b"
- echo "100644 $o0 0 c"
- echo "100644 $o0 0 d/e"
+ echo "120000 blob $oln a" &&
+ echo "100644 blob $o0 b" &&
+ echo "100644 blob $o0 c" &&
+ echo "100644 blob $o0 d/e" &&
+ echo "100644 blob $o0 e" &&
+ echo "120000 $oln 0 a" &&
+ echo "100644 $o0 0 b" &&
+ echo "100644 $o0 0 c" &&
+ echo "100644 $o0 0 d/e" &&
echo "100644 $o0 0 e"
) >expected &&
test_cmp expected actual
--- /dev/null
+#!/bin/sh
+
+test_description='merge with sparse files'
+
+. ./test-lib.sh
+
+# test_file $filename $content
+test_file () {
+ echo "$2" > "$1" &&
+ git add "$1"
+}
+
+# test_commit_this $message_and_tag
+test_commit_this () {
+ git commit -m "$1" &&
+ git tag "$1"
+}
+
+test_expect_success 'setup' '
+ : >empty &&
+ test_file checked-out init &&
+ test_file modify_delete modify_delete_init &&
+ test_commit_this init &&
+ test_file modify_delete modify_delete_theirs &&
+ test_commit_this theirs &&
+ git reset --hard init &&
+ git rm modify_delete &&
+ test_commit_this ours &&
+ git config core.sparseCheckout true &&
+ echo "/checked-out" >.git/info/sparse-checkout &&
+ git reset --hard &&
+ ! git merge theirs
+'
+
+test_expect_success 'reset --hard works after the conflict' '
+ git reset --hard
+'
+
+test_expect_success 'is reset properly' '
+ git status --porcelain -- modify_delete >out &&
+ test_cmp empty out &&
+ test_path_is_missing modify_delete
+'
+
+test_expect_success 'setup: conflict back' '
+ ! git merge theirs
+'
+
+test_expect_success 'Merge abort works after the conflict' '
+ git merge --abort
+'
+
+test_expect_success 'is aborted properly' '
+ git status --porcelain -- modify_delete >out &&
+ test_cmp empty out &&
+ test_path_is_missing modify_delete
+'
+
+test_done
test_expect_success clone '
git clone "file://$(pwd)/.git" cloned &&
- (git rev-parse HEAD; git ls-files -s) >expected &&
+ (git rev-parse HEAD && git ls-files -s) >expected &&
(
cd cloned &&
- (git rev-parse HEAD; git ls-files -s) >../actual
+ (git rev-parse HEAD && git ls-files -s) >../actual
) &&
test_cmp expected actual
'
'
test_expect_success fetch '
- (git rev-parse HEAD; git ls-files -s) >expected &&
+ (git rev-parse HEAD && git ls-files -s) >expected &&
(
cd cloned &&
git pull &&
- (git rev-parse HEAD; git ls-files -s) >../actual
+ (git rev-parse HEAD && git ls-files -s) >../actual
) &&
test_cmp expected actual
'
cat >expect <<-EOF &&
100644 blob $EMPTY_BLOB ../a[a]/three
EOF
- ( cd aa && git ls-tree -r HEAD "../a[a]"; ) >actual &&
+ ( cd aa && git ls-tree -r HEAD "../a[a]" ) >actual &&
test_cmp expect actual
'
test_expect_success 'retry acquiring packed-refs.lock' '
LOCK=.git/packed-refs.lock &&
>"$LOCK" &&
- test_when_finished "wait; rm -f $LOCK" &&
+ test_when_finished "wait && rm -f $LOCK" &&
{
- ( sleep 1 ; rm -f $LOCK ) &
+ ( sleep 1 && rm -f $LOCK ) &
} &&
git -c core.packedrefstimeout=3000 pack-refs --all --prune
'
${indent}
${indent}yet another note
EOF
- (echo $(git rev-parse HEAD~3) $(git rev-parse HEAD^); \
+ (echo $(git rev-parse HEAD~3) $(git rev-parse HEAD^) &&
echo $(git rev-parse HEAD~2) $(git rev-parse HEAD)) |
git notes copy --stdin &&
git log -2 >actual &&
EOF
test_commit 14th &&
test_commit 15th &&
- (echo $(git rev-parse HEAD~3) $(git rev-parse HEAD^); \
+ (echo $(git rev-parse HEAD~3) $(git rev-parse HEAD^) &&
echo $(git rev-parse HEAD~2) $(git rev-parse HEAD)) |
git notes copy --for-rewrite=foo &&
git log -2 >actual &&
EOF
test_config notes.rewriteMode overwrite &&
test_config notes.rewriteRef "refs/notes/*" &&
- (echo $(git rev-parse HEAD~3) $(git rev-parse HEAD^); \
+ (echo $(git rev-parse HEAD~3) $(git rev-parse HEAD^) &&
echo $(git rev-parse HEAD~2) $(git rev-parse HEAD)) |
git notes copy --for-rewrite=foo &&
git log -2 >actual &&
git notes add -f -m"append 2" HEAD^^ &&
test_config notes.rewriteMode concatenate &&
test_config notes.rewriteRef "refs/notes/*" &&
- (echo $(git rev-parse HEAD^) $(git rev-parse HEAD);
+ (echo $(git rev-parse HEAD^) $(git rev-parse HEAD) &&
echo $(git rev-parse HEAD^^) $(git rev-parse HEAD)) |
git notes copy --for-rewrite=foo &&
git log -1 >actual &&
test_expect_success 'Rebase a commit that sprinkles CRs in' '
(
- echo "One"
- echo "TwoQ"
- echo "Three"
- echo "FQur"
+ echo "One" &&
+ echo "TwoQ" &&
+ echo "Three" &&
+ echo "FQur" &&
echo "Five"
) | q_to_cr >CR &&
git add CR &&
--- /dev/null
+#!/bin/sh
+
+test_description='git rebase + directory rename tests'
+
+. ./test-lib.sh
+. "$TEST_DIRECTORY"/lib-rebase.sh
+
+test_expect_success 'setup testcase' '
+ test_create_repo dir-rename &&
+ (
+ cd dir-rename &&
+
+ mkdir x &&
+ test_seq 1 10 >x/a &&
+ test_seq 11 20 >x/b &&
+ test_seq 21 30 >x/c &&
+ test_write_lines a b c d e f g h i >l &&
+ git add x l &&
+ git commit -m "Initial" &&
+
+ git branch O &&
+ git branch A &&
+ git branch B &&
+
+ git checkout A &&
+ git mv x y &&
+ git mv l letters &&
+ git commit -m "Rename x to y, l to letters" &&
+
+ git checkout B &&
+ echo j >>l &&
+ test_seq 31 40 >x/d &&
+ git add l x/d &&
+ git commit -m "Modify l, add x/d"
+ )
+'
+
+test_expect_success 'rebase --interactive: directory rename detected' '
+ (
+ cd dir-rename &&
+
+ git checkout B^0 &&
+
+ set_fake_editor &&
+ FAKE_LINES="1" git rebase --interactive A &&
+
+ git ls-files -s >out &&
+ test_line_count = 5 out &&
+
+ test_path_is_file y/d &&
+ test_path_is_missing x/d
+ )
+'
+
+test_expect_failure 'rebase (am): directory rename detected' '
+ (
+ cd dir-rename &&
+
+ git checkout B^0 &&
+
+ git rebase A &&
+
+ git ls-files -s >out &&
+ test_line_count = 5 out &&
+
+ test_path_is_file y/d &&
+ test_path_is_missing x/d
+ )
+'
+
+test_expect_success 'rebase --merge: directory rename detected' '
+ (
+ cd dir-rename &&
+
+ git checkout B^0 &&
+
+ git rebase --merge A &&
+
+ git ls-files -s >out &&
+ test_line_count = 5 out &&
+
+ test_path_is_file y/d &&
+ test_path_is_missing x/d
+ )
+'
+
+test_expect_failure 'am: directory rename detected' '
+ (
+ cd dir-rename &&
+
+ git checkout A^0 &&
+
+ git format-patch -1 B &&
+
+ git am --3way 0001*.patch &&
+
+ git ls-files -s >out &&
+ test_line_count = 5 out &&
+
+ test_path_is_file y/d &&
+ test_path_is_missing x/d
+ )
+'
+
+test_done
git commit -a -m"master updates a bit more." &&
git checkout side &&
- (echo "0 $T" ; cat original) >renamed &&
+ (echo "0 $T" && cat original) >renamed &&
git add renamed &&
git update-index --force-remove original &&
git commit -a -m"side renames and edits." &&
git checkout -b test-funny master^ &&
test_commit funny &&
(
- PATH=./test-bin:$PATH
+ PATH=./test-bin:$PATH &&
git rebase -s funny -Xopt master
) &&
test -f funny.was.run
)
'
+test_expect_success 'rebase -i sets work tree properly' '
+ test_when_finished "rm -rf subdir" &&
+ test_when_finished "test_might_fail git rebase --abort" &&
+ mkdir subdir &&
+ git rebase -x "(cd subdir && git rev-parse --show-toplevel)" HEAD^ \
+ >actual &&
+ ! grep "/subdir$" actual
+'
+
test_expect_success 'rebase -i with the exec command checks tree cleanness' '
git checkout master &&
set_fake_editor &&
'
test_expect_success 'retain authorship w/ conflicts' '
+ oGIT_AUTHOR_NAME=$GIT_AUTHOR_NAME &&
+ test_when_finished "GIT_AUTHOR_NAME=\$oGIT_AUTHOR_NAME" &&
+
git reset --hard twerp &&
test_commit a conflict a conflict-a &&
git reset --hard twerp &&
- GIT_AUTHOR_NAME=AttributeMe \
+
+ GIT_AUTHOR_NAME=AttributeMe &&
+ export GIT_AUTHOR_NAME &&
test_commit b conflict b conflict-b &&
+ GIT_AUTHOR_NAME=$oGIT_AUTHOR_NAME &&
+
set_fake_editor &&
test_must_fail git rebase -i conflict-a &&
echo resolved >conflict &&
one=$(git rev-parse HEAD~3) &&
set_fake_editor &&
test_must_fail env FAKE_LINES="1 squash 3 2" git rebase -i HEAD~3 &&
- (echo one; echo two; echo four) > conflict &&
+ test_write_lines one two four > conflict &&
git add conflict &&
test_must_fail git rebase --continue &&
echo resolved > conflict &&
one=$(git rev-parse HEAD~3) &&
set_fake_editor &&
test_must_fail env FAKE_LINES="3 squash 1 2" git rebase -i HEAD~3 &&
- (echo one; echo four) > conflict &&
+ test_write_lines one four > conflict &&
git add conflict &&
test_must_fail git rebase --continue &&
- (echo one; echo two; echo four) > conflict &&
+ test_write_lines one two four > conflict &&
git add conflict &&
test_must_fail git rebase --continue &&
echo resolved > conflict &&
'
test_expect_success 'aborted --continue does not squash commits after "edit"' '
+ test_when_finished "git rebase --abort" &&
old=$(git rev-parse HEAD) &&
test_tick &&
set_fake_editor &&
FAKE_LINES="edit 1" git rebase -i HEAD^ &&
echo "edited again" > file7 &&
git add file7 &&
- test_must_fail env FAKE_COMMIT_MESSAGE=" " git rebase --continue &&
- test $old = $(git rev-parse HEAD) &&
- git rebase --abort
+ echo all the things >>conflict &&
+ test_must_fail git rebase --continue &&
+ test $old = $(git rev-parse HEAD)
'
test_expect_success 'auto-amend only edited commits after "edit"' '
'
test_expect_success 'rebase -m commit with empty message' '
- test_must_fail git rebase -m master empty-message-merge &&
- git rebase --abort &&
- git rebase -m --allow-empty-message master empty-message-merge
+ git rebase -m master empty-message-merge
'
test_expect_success 'rebase -i commit with empty message' '
git checkout diff-in-message &&
set_fake_editor &&
- test_must_fail env FAKE_COMMIT_MESSAGE=" " FAKE_LINES="reword 1" \
- git rebase -i HEAD^ &&
- git rebase --abort &&
- FAKE_COMMIT_MESSAGE=" " FAKE_LINES="reword 1" \
- git rebase -i --allow-empty-message HEAD^
+ env FAKE_COMMIT_MESSAGE=" " FAKE_LINES="reword 1" \
+ git rebase -i HEAD^
'
test_done
EOF
chmod +x test-bin/git-merge-funny &&
(
- PATH=./test-bin:$PATH
+ PATH=./test-bin:$PATH &&
test_must_fail git rebase -s funny -Xopt master topic
) &&
test -f funny.was.run &&
echo "Resolved" >F2 &&
git add F2 &&
(
- PATH=./test-bin:$PATH
+ PATH=./test-bin:$PATH &&
git rebase --continue
) &&
test -f funny.was.run
--- /dev/null
+#!/bin/sh
+
+test_description='test if rebase detects and aborts on incompatible options'
+. ./test-lib.sh
+
+test_expect_success 'setup' '
+ test_seq 2 9 >foo &&
+ git add foo &&
+ git commit -m orig &&
+
+ git branch A &&
+ git branch B &&
+
+ git checkout A &&
+ test_seq 1 9 >foo &&
+ git add foo &&
+ git commit -m A &&
+
+ git checkout B &&
+ echo "q qfoo();" | q_to_tab >>foo &&
+ git add foo &&
+ git commit -m B
+'
+
+#
+# Rebase has lots of useful options like --whitepsace=fix, which are
+# actually all built in terms of flags to git-am. Since neither
+# --merge nor --interactive (nor any options that imply those two) use
+# git-am, using them together will result in flags like --whitespace=fix
+# being ignored. Make sure rebase warns the user and aborts instead.
+#
+
+test_rebase_am_only () {
+ opt=$1
+ shift
+ test_expect_success "$opt incompatible with --merge" "
+ git checkout B^0 &&
+ test_must_fail git rebase $opt --merge A
+ "
+
+ test_expect_success "$opt incompatible with --strategy=ours" "
+ git checkout B^0 &&
+ test_must_fail git rebase $opt --strategy=ours A
+ "
+
+ test_expect_success "$opt incompatible with --strategy-option=ours" "
+ git checkout B^0 &&
+ test_must_fail git rebase $opt --strategy-option=ours A
+ "
+
+ test_expect_success "$opt incompatible with --interactive" "
+ git checkout B^0 &&
+ test_must_fail git rebase $opt --interactive A
+ "
+
+ test_expect_success "$opt incompatible with --exec" "
+ git checkout B^0 &&
+ test_must_fail git rebase $opt --exec 'true' A
+ "
+
+}
+
+test_rebase_am_only --whitespace=fix
+test_rebase_am_only --ignore-whitespace
+test_rebase_am_only --committer-date-is-author-date
+test_rebase_am_only -C4
+
+test_expect_success '--preserve-merges incompatible with --signoff' '
+ git checkout B^0 &&
+ test_must_fail git rebase --preserve-merges --signoff A
+'
+
+test_expect_success '--preserve-merges incompatible with --rebase-merges' '
+ git checkout B^0 &&
+ test_must_fail git rebase --preserve-merges --rebase-merges A
+'
+
+test_expect_success '--rebase-merges incompatible with --strategy' '
+ git checkout B^0 &&
+ test_must_fail git rebase --rebase-merges -s resolve A
+'
+
+test_expect_success '--rebase-merges incompatible with --strategy-option' '
+ git checkout B^0 &&
+ test_must_fail git rebase --rebase-merges -Xignore-space-change A
+'
+
+test_done
! grep "^label $third$" .git/ORIGINAL-TODO
'
+test_expect_success 'octopus merges' '
+ git checkout -b three &&
+ test_commit before-octopus &&
+ test_commit three &&
+ git checkout -b two HEAD^ &&
+ test_commit two &&
+ git checkout -b one HEAD^ &&
+ test_commit one &&
+ test_tick &&
+ (GIT_AUTHOR_NAME="Hank" GIT_AUTHOR_EMAIL="hank@sea.world" \
+ git merge -m "Tüntenfüsch" two three) &&
+
+ : fast forward if possible &&
+ before="$(git rev-parse --verify HEAD)" &&
+ test_tick &&
+ git rebase -i -r HEAD^^ &&
+ test_cmp_rev HEAD $before &&
+
+ test_tick &&
+ git rebase -i --force -r HEAD^^ &&
+ test "Hank" = "$(git show -s --format=%an HEAD)" &&
+ test "$before" != $(git rev-parse HEAD) &&
+ test_cmp_graph HEAD^^.. <<-\EOF
+ *-. Tüntenfüsch
+ |\ \
+ | | * three
+ | * | two
+ | |/
+ * | one
+ |/
+ o before-octopus
+ EOF
+'
+
test_done
test_expect_code 128 git cherry-pick --continue
'
-test_expect_success 'empty commit set' '
+test_expect_success 'empty commit set (no commits to walk)' '
pristine_detach initial &&
test_expect_code 128 git cherry-pick base..base
'
+test_expect_success 'empty commit set (culled during walk)' '
+ pristine_detach initial &&
+ test_expect_code 128 git cherry-pick -2 --author=no.such.author base
+'
+
test_expect_success 'malformed instruction sheet 3' '
pristine_detach initial &&
test_expect_code 1 git cherry-pick base..anotherpick &&
test_expect_success 'git add with filemode=0, symlinks=0 prefers stage 2 over stage 1' '
git rm --cached -f file symlink &&
(
- echo "100644 $(git hash-object -w stage1) 1 file"
- echo "100755 $(git hash-object -w stage2) 2 file"
- echo "100644 $(printf 1 | git hash-object -w -t blob --stdin) 1 symlink"
+ echo "100644 $(git hash-object -w stage1) 1 file" &&
+ echo "100755 $(git hash-object -w stage2) 2 file" &&
+ echo "100644 $(printf 1 | git hash-object -w -t blob --stdin) 1 symlink" &&
echo "120000 $(printf 2 | git hash-object -w -t blob --stdin) 2 symlink"
) | git update-index --index-info &&
git config core.filemode 0 &&
git reset --hard &&
H=$(git rev-parse :1/2/a) &&
(
- echo "100644 $H 1 track-this"
+ echo "100644 $H 1 track-this" &&
echo "100644 $H 3 track-this"
) | git update-index --index-info &&
echo track-this >>.gitignore &&
'
test_expect_success 'diff works (initial)' '
- (echo d; echo 1) | git add -i >output &&
+ test_write_lines d 1 | git add -i >output &&
sed -ne "/new file/,/content/p" <output >diff &&
diff_cmp expected diff
'
test_expect_success 'revert works (initial)' '
git add file &&
- (echo r; echo 1) | git add -i &&
+ test_write_lines r 1 | git add -i &&
git ls-files >output &&
! grep . output
'
'
test_expect_success 'diff works (commit)' '
- (echo d; echo 1) | git add -i >output &&
+ test_write_lines d 1 | git add -i >output &&
sed -ne "/^index/,/content/p" <output >diff &&
diff_cmp expected diff
'
test_expect_success 'revert works (commit)' '
git add file &&
- (echo r; echo 1) | git add -i &&
+ test_write_lines r 1 | git add -i &&
git add -i </dev/null >output &&
grep "unchanged *+3/-0 file" output
'
test_expect_success 'dummy edit works' '
test_set_editor : &&
- (echo e; echo a) | git add -p &&
+ test_write_lines e a | git add -p &&
git diff > diff &&
diff_cmp expected diff
'
test_expect_success 'bad edit rejected' '
git reset &&
- (echo e; echo n; echo d) | git add -p >output &&
+ test_write_lines e n d | git add -p >output &&
grep "hunk does not apply" output
'
test_expect_success 'garbage edit rejected' '
git reset &&
- (echo e; echo n; echo d) | git add -p >output &&
+ test_write_lines e n d | git add -p >output &&
grep "hunk does not apply" output
'
'
test_expect_success 'real edit works' '
- (echo e; echo n; echo d) | git add -p &&
+ test_write_lines e n d | git add -p &&
git diff >output &&
diff_cmp expected output
'
test_expect_success 'saying "n" does nothing' '
set_state HEAD HEADfile_work HEADfile_index &&
set_state dir/foo work index &&
- (echo n; echo n; echo n) | test_must_fail git stash save -p &&
+ test_write_lines n n n | test_must_fail git stash save -p &&
verify_state HEAD HEADfile_work HEADfile_index &&
verify_saved_state bar &&
verify_state dir/foo work index
'
test_expect_success 'git stash -p' '
- (echo y; echo n; echo y) | git stash save -p &&
+ test_write_lines y n y | git stash save -p &&
verify_state HEAD committed HEADfile_index &&
verify_saved_state bar &&
verify_state dir/foo head index &&
set_state HEAD HEADfile_work HEADfile_index &&
set_state bar bar_work bar_index &&
set_state dir/foo work index &&
- (echo y; echo n; echo y) | git stash save -p --no-keep-index &&
+ test_write_lines y n y | git stash save -p --no-keep-index &&
verify_state HEAD committed committed &&
verify_state bar bar_work dummy &&
verify_state dir/foo head head &&
set_state HEAD HEADfile_work HEADfile_index &&
set_state bar bar_work bar_index &&
set_state dir/foo work index &&
- (echo y; echo n; echo y) | git stash save --no-keep-index -p &&
+ test_write_lines y n y | git stash save --no-keep-index -p &&
verify_state HEAD committed committed &&
verify_state dir/foo head head &&
verify_state bar bar_work dummy &&
git add "path??" &&
test_tick &&
git commit -m "hundred" &&
- (cat path1; echo new) >new-path &&
+ (cat path1 && echo new) >new-path &&
echo old >>path1 &&
git add new-path path1 &&
git diff -l 4 -C -C --cached --name-status >actual 2>actual.err &&
test_expect_success 'setup submodules' '
test_tick &&
git init submod &&
- ( cd submod && test_commit first; ) &&
+ ( cd submod && test_commit first ) &&
git add submod &&
git commit -m first &&
- ( cd submod && test_commit second; ) &&
+ ( cd submod && test_commit second ) &&
git add submod &&
git commit -m second
'
test_expect_success 'diff --no-index with binary creation' '
echo Q | q_to_nul >binary &&
- (: hide error code from diff, which just indicates differences
- git diff --binary --no-index /dev/null binary >current ||
- true
- ) &&
+ # hide error code from diff, which just indicates differences
+ test_might_fail git diff --binary --no-index /dev/null binary >current &&
rm binary &&
git apply --binary <current &&
echo Q >expected &&
test_cmp expected actual
'
-test_expect_success 'detect permutations inside moved code -- dimmed_zebra' '
+test_expect_success 'detect blocks of moved code' '
git reset --hard &&
cat <<-\EOF >lines.txt &&
long line 1
test_config color.diff.newMovedDimmed "normal cyan" &&
test_config color.diff.oldMovedAlternativeDimmed "normal blue" &&
test_config color.diff.newMovedAlternativeDimmed "normal yellow" &&
- git diff HEAD --no-renames --color-moved=dimmed_zebra --color |
- grep -v "index" |
- test_decode_color >actual &&
+ git diff HEAD --no-renames --color-moved=blocks --color >actual.raw &&
+ grep -v "index" actual.raw | test_decode_color >actual &&
+ cat <<-\EOF >expected &&
+ <BOLD>diff --git a/lines.txt b/lines.txt<RESET>
+ <BOLD>--- a/lines.txt<RESET>
+ <BOLD>+++ b/lines.txt<RESET>
+ <CYAN>@@ -1,16 +1,16 @@<RESET>
+ <MAGENTA>-long line 1<RESET>
+ <MAGENTA>-long line 2<RESET>
+ <MAGENTA>-long line 3<RESET>
+ line 4<RESET>
+ line 5<RESET>
+ line 6<RESET>
+ line 7<RESET>
+ line 8<RESET>
+ line 9<RESET>
+ <CYAN>+<RESET><CYAN>long line 1<RESET>
+ <CYAN>+<RESET><CYAN>long line 2<RESET>
+ <CYAN>+<RESET><CYAN>long line 3<RESET>
+ <CYAN>+<RESET><CYAN>long line 14<RESET>
+ <CYAN>+<RESET><CYAN>long line 15<RESET>
+ <CYAN>+<RESET><CYAN>long line 16<RESET>
+ line 10<RESET>
+ line 11<RESET>
+ line 12<RESET>
+ line 13<RESET>
+ <MAGENTA>-long line 14<RESET>
+ <MAGENTA>-long line 15<RESET>
+ <MAGENTA>-long line 16<RESET>
+ EOF
+ test_cmp expected actual
+
+'
+
+test_expect_success 'detect permutations inside moved code -- dimmed_zebra' '
+ # reuse setup from test before!
+ test_config color.diff.oldMoved "magenta" &&
+ test_config color.diff.newMoved "cyan" &&
+ test_config color.diff.oldMovedAlternative "blue" &&
+ test_config color.diff.newMovedAlternative "yellow" &&
+ test_config color.diff.oldMovedDimmed "normal magenta" &&
+ test_config color.diff.newMovedDimmed "normal cyan" &&
+ test_config color.diff.oldMovedAlternativeDimmed "normal blue" &&
+ test_config color.diff.newMovedAlternativeDimmed "normal yellow" &&
+ git diff HEAD --no-renames --color-moved=dimmed_zebra --color >actual.raw &&
+ grep -v "index" actual.raw | test_decode_color >actual &&
cat <<-\EOF >expected &&
<BOLD>diff --git a/lines.txt b/lines.txt<RESET>
<BOLD>--- a/lines.txt<RESET>
test_config color.diff.oldMovedAlternativeDimmed "normal blue" &&
test_config color.diff.newMovedAlternativeDimmed "normal yellow" &&
test_config diff.colorMoved zebra &&
- git diff HEAD --no-renames --color-moved --color |
- grep -v "index" |
- test_decode_color >actual &&
+ git diff HEAD --no-renames --color-moved --color >actual.raw &&
+ grep -v "index" actual.raw | test_decode_color >actual &&
cat <<-\EOF >expected &&
<BOLD>diff --git a/lines.txt b/lines.txt<RESET>
<BOLD>--- a/lines.txt<RESET>
line 4
line 5
EOF
- git diff HEAD --no-renames --color-moved --color |
- grep -v "index" |
- test_decode_color >actual &&
+ git diff HEAD --no-renames --color-moved --color >actual.raw &&
+ grep -v "index" actual.raw | test_decode_color >actual &&
cat <<-\EOF >expected &&
<BOLD>diff --git a/lines.txt b/lines.txt<RESET>
<BOLD>--- a/lines.txt<RESET>
EOF
test_cmp expected actual &&
- git diff HEAD --no-renames -w --color-moved --color |
- grep -v "index" |
- test_decode_color >actual &&
+ git diff HEAD --no-renames --color-moved --color \
+ --color-moved-ws=ignore-all-space >actual.raw &&
+ grep -v "index" actual.raw | test_decode_color >actual &&
cat <<-\EOF >expected &&
<BOLD>diff --git a/lines.txt b/lines.txt<RESET>
<BOLD>--- a/lines.txt<RESET>
line 5
EOF
- git diff HEAD --no-renames --color-moved --color |
- grep -v "index" |
- test_decode_color >actual &&
+ git diff HEAD --no-renames --color-moved --color >actual.raw &&
+ grep -v "index" actual.raw | test_decode_color >actual &&
cat <<-\EOF >expected &&
<BOLD>diff --git a/lines.txt b/lines.txt<RESET>
<BOLD>--- a/lines.txt<RESET>
EOF
test_cmp expected actual &&
- git diff HEAD --no-renames -b --color-moved --color |
- grep -v "index" |
- test_decode_color >actual &&
+ git diff HEAD --no-renames --color-moved --color \
+ --color-moved-ws=ignore-space-change >actual.raw &&
+ grep -v "index" actual.raw | test_decode_color >actual &&
cat <<-\EOF >expected &&
<BOLD>diff --git a/lines.txt b/lines.txt<RESET>
<BOLD>--- a/lines.txt<RESET>
# avoid cluttering the output with complaints about our eol whitespace
test_config core.whitespace -blank-at-eol &&
- git diff HEAD --no-renames --color-moved --color |
- grep -v "index" |
- test_decode_color >actual &&
+ git diff HEAD --no-renames --color-moved --color >actual.raw &&
+ grep -v "index" actual.raw | test_decode_color >actual &&
cat <<-\EOF >expected &&
<BOLD>diff --git a/lines.txt b/lines.txt<RESET>
<BOLD>--- a/lines.txt<RESET>
EOF
test_cmp expected actual &&
- git diff HEAD --no-renames --ignore-space-at-eol --color-moved --color |
- grep -v "index" |
- test_decode_color >actual &&
+ git diff HEAD --no-renames --color-moved --color \
+ --color-moved-ws=ignore-space-at-eol >actual.raw &&
+ grep -v "index" actual.raw | test_decode_color >actual &&
cat <<-\EOF >expected &&
<BOLD>diff --git a/lines.txt b/lines.txt<RESET>
<BOLD>--- a/lines.txt<RESET>
irrelevant_line
EOF
- git diff HEAD --color-moved=zebra --color --no-renames |
- grep -v "index" |
- test_decode_color >actual &&
+ git diff HEAD --color-moved=zebra --color --no-renames >actual.raw &&
+ grep -v "index" actual.raw | test_decode_color >actual &&
cat >expected <<-\EOF &&
<BOLD>diff --git a/bar b/bar<RESET>
<BOLD>--- a/bar<RESET>
nineteen chars 456789
EOF
- git diff HEAD --color-moved=zebra --color --no-renames |
- grep -v "index" |
- test_decode_color >actual &&
+ git diff HEAD --color-moved=zebra --color --no-renames >actual.raw &&
+ grep -v "index" actual.raw | test_decode_color >actual &&
cat >expected <<-\EOF &&
<BOLD>diff --git a/bar b/bar<RESET>
<BOLD>--- a/bar<RESET>
7charsA
EOF
- git diff HEAD --color-moved=zebra --color --no-renames | grep -v "index" | test_decode_color >actual &&
+ git diff HEAD --color-moved=zebra --color --no-renames >actual.raw &&
+ grep -v "index" actual.raw | test_decode_color >actual &&
cat >expected <<-\EOF &&
<BOLD>diff --git a/bar b/bar<RESET>
<BOLD>--- a/bar<RESET>
# nor did we mess with it another way
git diff --submodule=diff --color | test_decode_color >expect &&
- test_cmp expect decoded_actual
+ test_cmp expect decoded_actual &&
+ rm -rf bananas &&
+ git submodule deinit bananas
+'
+
+test_expect_success 'only move detection ignores white spaces' '
+ git reset --hard &&
+ q_to_tab <<-\EOF >text.txt &&
+ a long line to exceed per-line minimum
+ another long line to exceed per-line minimum
+ original file
+ EOF
+ git add text.txt &&
+ git commit -m "add text" &&
+ q_to_tab <<-\EOF >text.txt &&
+ Qa long line to exceed per-line minimum
+ Qanother long line to exceed per-line minimum
+ new file
+ EOF
+
+ # Make sure we get a different diff using -w
+ git diff --color --color-moved -w >actual.raw &&
+ grep -v "index" actual.raw | test_decode_color >actual &&
+ q_to_tab <<-\EOF >expected &&
+ <BOLD>diff --git a/text.txt b/text.txt<RESET>
+ <BOLD>--- a/text.txt<RESET>
+ <BOLD>+++ b/text.txt<RESET>
+ <CYAN>@@ -1,3 +1,3 @@<RESET>
+ Qa long line to exceed per-line minimum<RESET>
+ Qanother long line to exceed per-line minimum<RESET>
+ <RED>-original file<RESET>
+ <GREEN>+<RESET><GREEN>new file<RESET>
+ EOF
+ test_cmp expected actual &&
+
+ # And now ignoring white space only in the move detection
+ git diff --color --color-moved \
+ --color-moved-ws=ignore-all-space,ignore-space-change,ignore-space-at-eol >actual.raw &&
+ grep -v "index" actual.raw | test_decode_color >actual &&
+ q_to_tab <<-\EOF >expected &&
+ <BOLD>diff --git a/text.txt b/text.txt<RESET>
+ <BOLD>--- a/text.txt<RESET>
+ <BOLD>+++ b/text.txt<RESET>
+ <CYAN>@@ -1,3 +1,3 @@<RESET>
+ <BOLD;MAGENTA>-a long line to exceed per-line minimum<RESET>
+ <BOLD;MAGENTA>-another long line to exceed per-line minimum<RESET>
+ <RED>-original file<RESET>
+ <BOLD;YELLOW>+<RESET>Q<BOLD;YELLOW>a long line to exceed per-line minimum<RESET>
+ <BOLD;YELLOW>+<RESET>Q<BOLD;YELLOW>another long line to exceed per-line minimum<RESET>
+ <GREEN>+<RESET><GREEN>new file<RESET>
+ EOF
+ test_cmp expected actual
+'
+
+test_expect_success 'compare whitespace delta across moved blocks' '
+
+ git reset --hard &&
+ q_to_tab <<-\EOF >text.txt &&
+ QIndented
+ QText across
+ Qsome lines
+ QBut! <- this stands out
+ QAdjusting with
+ QQdifferent starting
+ Qwhite spaces
+ QAnother outlier
+ QQQIndented
+ QQQText across
+ QQQfive lines
+ QQQthat has similar lines
+ QQQto previous blocks, but with different indent
+ QQQYetQAnotherQoutlierQ
+ EOF
+
+ git add text.txt &&
+ git commit -m "add text.txt" &&
+
+ q_to_tab <<-\EOF >text.txt &&
+ QQIndented
+ QQText across
+ QQsome lines
+ QQQBut! <- this stands out
+ Adjusting with
+ Qdifferent starting
+ white spaces
+ AnotherQoutlier
+ QQIndented
+ QQText across
+ QQfive lines
+ QQthat has similar lines
+ QQto previous blocks, but with different indent
+ QQYetQAnotherQoutlier
+ EOF
+
+ git diff --color --color-moved --color-moved-ws=allow-indentation-change >actual.raw &&
+ grep -v "index" actual.raw | test_decode_color >actual &&
+
+ q_to_tab <<-\EOF >expected &&
+ <BOLD>diff --git a/text.txt b/text.txt<RESET>
+ <BOLD>--- a/text.txt<RESET>
+ <BOLD>+++ b/text.txt<RESET>
+ <CYAN>@@ -1,14 +1,14 @@<RESET>
+ <BOLD;MAGENTA>-QIndented<RESET>
+ <BOLD;MAGENTA>-QText across<RESET>
+ <BOLD;MAGENTA>-Qsome lines<RESET>
+ <RED>-QBut! <- this stands out<RESET>
+ <BOLD;MAGENTA>-QAdjusting with<RESET>
+ <BOLD;MAGENTA>-QQdifferent starting<RESET>
+ <BOLD;MAGENTA>-Qwhite spaces<RESET>
+ <RED>-QAnother outlier<RESET>
+ <BOLD;MAGENTA>-QQQIndented<RESET>
+ <BOLD;MAGENTA>-QQQText across<RESET>
+ <BOLD;MAGENTA>-QQQfive lines<RESET>
+ <BOLD;MAGENTA>-QQQthat has similar lines<RESET>
+ <BOLD;MAGENTA>-QQQto previous blocks, but with different indent<RESET>
+ <RED>-QQQYetQAnotherQoutlierQ<RESET>
+ <BOLD;CYAN>+<RESET>QQ<BOLD;CYAN>Indented<RESET>
+ <BOLD;CYAN>+<RESET>QQ<BOLD;CYAN>Text across<RESET>
+ <BOLD;CYAN>+<RESET>QQ<BOLD;CYAN>some lines<RESET>
+ <GREEN>+<RESET>QQQ<GREEN>But! <- this stands out<RESET>
+ <BOLD;CYAN>+<RESET><BOLD;CYAN>Adjusting with<RESET>
+ <BOLD;CYAN>+<RESET>Q<BOLD;CYAN>different starting<RESET>
+ <BOLD;CYAN>+<RESET><BOLD;CYAN>white spaces<RESET>
+ <GREEN>+<RESET><GREEN>AnotherQoutlier<RESET>
+ <BOLD;CYAN>+<RESET>QQ<BOLD;CYAN>Indented<RESET>
+ <BOLD;CYAN>+<RESET>QQ<BOLD;CYAN>Text across<RESET>
+ <BOLD;CYAN>+<RESET>QQ<BOLD;CYAN>five lines<RESET>
+ <BOLD;CYAN>+<RESET>QQ<BOLD;CYAN>that has similar lines<RESET>
+ <BOLD;CYAN>+<RESET>QQ<BOLD;CYAN>to previous blocks, but with different indent<RESET>
+ <GREEN>+<RESET>QQ<GREEN>YetQAnotherQoutlier<RESET>
+ EOF
+
+ test_cmp expected actual
+'
+
+test_expect_success 'compare whitespace delta incompatible with other space options' '
+ test_must_fail git diff \
+ --color-moved-ws=allow-indentation-change,ignore-all-space \
+ 2>err &&
+ test_i18ngrep allow-indentation-change err
'
test_done
--- /dev/null
+abstract class RIGHT
+{
+ const FOO = 'ChangeMe';
+}
--- /dev/null
+class RIGHT
+{
+ const FOO = 'ChangeMe';
+}
--- /dev/null
+final class RIGHT
+{
+ const FOO = 'ChangeMe';
+}
--- /dev/null
+function RIGHT()
+{
+ return 'ChangeMe';
+}
--- /dev/null
+interface RIGHT
+{
+ public function foo($ChangeMe);
+}
--- /dev/null
+class Klass
+{
+ public static function RIGHT()
+ {
+ return 'ChangeMe';
+ }
+}
--- /dev/null
+trait RIGHT
+{
+ public function foo($ChangeMe)
+ {
+ return 'foo';
+ }
+}
for n in $sample
do
- ( zs $n ; echo a ) >file-a$n &&
- ( echo b; zs $n; echo ) >file-b$n &&
- ( printf c; zs $n ) >file-c$n &&
- ( echo d; zs $n ) >file-d$n &&
+ ( zs $n && echo a ) >file-a$n &&
+ ( echo b && zs $n && echo ) >file-b$n &&
+ ( printf c && zs $n ) >file-c$n &&
+ ( echo d && zs $n ) >file-d$n &&
git add file-a$n file-b$n file-c$n file-d$n &&
- ( zs $n ; echo A ) >file-a$n &&
- ( echo B; zs $n; echo ) >file-b$n &&
- ( printf C; zs $n ) >file-c$n &&
- ( echo D; zs $n ) >file-d$n &&
+ ( zs $n && echo A ) >file-a$n &&
+ ( echo B && zs $n && echo ) >file-b$n &&
+ ( printf C && zs $n ) >file-c$n &&
+ ( echo D && zs $n ) >file-d$n &&
expect_pattern $n || return 1
test_expect_success setup '
(
- echo "A $NS"
+ echo "A $NS" &&
for c in B C D E F G H I J K
do
echo " $c"
- done
- echo "L $NS"
+ done &&
+ echo "L $NS" &&
for c in M N O P Q R S T U V
do
echo " $c"
git diff | sed -n -e "s/^.*@@//p" >actual &&
(
- echo " A $N$N$N$N$N$N$N$N$N2"
+ echo " A $N$N$N$N$N$N$N$N$N2" &&
echo " L $N$N$N$N$N$N$N$N$N1"
) >expected &&
test_cmp actual expected
test_expect_success 'setup .git file for sm2' '
(cd sm2 &&
REAL="$(pwd)/../.real" &&
- mv .git "$REAL"
+ mv .git "$REAL" &&
echo "gitdir: $REAL" >.git)
'
git commit -m "sub a"
) &&
(cd sub_alt &&
- sha1_before=$(git rev-parse --short HEAD)
+ sha1_before=$(git rev-parse --short HEAD) &&
echo b >b &&
git add b &&
git commit -m b &&
test_expect_success 'setup .git file for sm2' '
(cd sm2 &&
REAL="$(pwd)/../.real" &&
- mv .git "$REAL"
+ mv .git "$REAL" &&
echo "gitdir: $REAL" >.git)
'
test_expect_success \
'check if contextually independent diffs for the same file apply' \
- '( git diff test~2 test~1; git diff test~1 test~0 )| git apply'
+ '( git diff test~2 test~1 && git diff test~1 test~0 )| git apply'
test_done
git log :/a --
'
+test_expect_success '"git log :/detached -- " should find a commit only in HEAD' '
+ test_when_finished "git checkout master" &&
+ git checkout --detach &&
+ # Must manually call `test_tick` instead of using `test_commit`,
+ # because the latter additionally creates a tag, which would make
+ # the commit reachable not only via HEAD.
+ test_tick &&
+ git commit --allow-empty -m detached &&
+ test_tick &&
+ git commit --allow-empty -m something-else &&
+ git log :/detached --
+'
+
+test_expect_success '"git log :/detached -- " should not find an orphaned commit' '
+ test_must_fail git log :/detached --
+'
+
+test_expect_success '"git log :/detached -- " should find HEAD only of own worktree' '
+ git worktree add other-tree HEAD &&
+ git -C other-tree checkout --detach &&
+ test_tick &&
+ git -C other-tree commit --allow-empty -m other-detached &&
+ git -C other-tree log :/other-detached -- &&
+ test_must_fail git log :/other-detached --
+'
+
test_expect_success '"git log -- :/a" should not be ambiguous' '
git log -- :/a
'
test_bad_opts "-L 1:simple" "There is no path"
test_bad_opts "-L '/foo:b.c'" "argument not .start,end:file"
test_bad_opts "-L 1000:b.c" "has only.*lines"
-test_bad_opts "-L 1,1000:b.c" "has only.*lines"
test_bad_opts "-L :b.c" "argument not .start,end:file"
test_bad_opts "-L :foo:b.c" "no match"
test_expect_success '-L ,Y (Y == nlines + 1)' '
n=$(expr $(wc -l <b.c) + 1) &&
- test_must_fail git log -L ,$n:b.c
+ git log -L ,$n:b.c
'
test_expect_success '-L ,Y (Y == nlines + 2)' '
n=$(expr $(wc -l <b.c) + 2) &&
- test_must_fail git log -L ,$n:b.c
+ git log -L ,$n:b.c
'
test_expect_success '-L with --first-parent and a merge' '
mkdir missing-pack &&
cd missing-pack &&
git init &&
- GOP=.git/objects/pack
+ GOP=.git/objects/pack &&
rm -fr $GOP &&
git index-pack --stdin --keep=test <../test-3-${packname_3}.pack &&
test -f $GOP/pack-${packname_3}.pack &&
rm -f .git/objects/pack/* &&
cp test-1-${pack1}.pack .git/objects/pack/pack-${pack1}.pack &&
(
- cd .git/objects/pack
+ cd .git/objects/pack &&
git index-pack pack-${pack1}.pack
) &&
test -f .git/objects/pack/pack-${pack1}.idx
test_cmp observed expected
'
+test_expect_success 'verify explicitly specifying oversized blob in input' '
+ git -C r2 ls-files -s large.1000 large.10000 \
+ | awk -f print_2.awk \
+ | sort >expected &&
+ git -C r2 pack-objects --rev --stdout --filter=blob:limit=1k >filter.pack <<-EOF &&
+ HEAD
+ $(git -C r2 rev-parse HEAD:large.10000)
+ EOF
+ git -C r2 index-pack ../filter.pack &&
+ git -C r2 verify-pack -v ../filter.pack \
+ | grep blob \
+ | awk -f print_1.awk \
+ | sort >observed &&
+ test_cmp observed expected
+'
+
test_expect_success 'verify blob:limit=1m' '
git -C r2 ls-files -s large.1000 large.10000 \
| awk -f print_2.awk \
objdir=".git/objects"
'
+test_expect_success 'verify graph with no graph file' '
+ cd "$TRASH_DIRECTORY/full" &&
+ git commit-graph verify
+'
+
test_expect_success 'write graph with no packs' '
cd "$TRASH_DIRECTORY/full" &&
git commit-graph write --object-dir . &&
'
graph_git_two_modes() {
- git -c core.graph=true $1 >output
- git -c core.graph=false $1 >expect
+ git -c core.commitGraph=true $1 >output
+ git -c core.commitGraph=false $1 >expect
test_cmp output expect
}
graph_git_behavior 'append graph, commit 8 vs merge 1' full commits/8 merge/1
graph_git_behavior 'append graph, commit 8 vs merge 2' full commits/8 merge/2
+test_expect_success 'build graph using --reachable' '
+ cd "$TRASH_DIRECTORY/full" &&
+ git commit-graph write --reachable &&
+ test_path_is_file $objdir/info/commit-graph &&
+ graph_read_expect "11" "large_edges"
+'
+
+graph_git_behavior 'append graph, commit 8 vs merge 1' full commits/8 merge/1
+graph_git_behavior 'append graph, commit 8 vs merge 2' full commits/8 merge/2
+
test_expect_success 'setup bare repo' '
cd "$TRASH_DIRECTORY" &&
git clone --bare --no-local full bare &&
test_cmp expect output
'
+test_expect_success 'check that gc computes commit-graph' '
+ cd "$TRASH_DIRECTORY/full" &&
+ git commit --allow-empty -m "blank" &&
+ git commit-graph write --reachable &&
+ cp $objdir/info/commit-graph commit-graph-before-gc &&
+ git reset --hard HEAD~1 &&
+ git config gc.writeCommitGraph true &&
+ git gc &&
+ cp $objdir/info/commit-graph commit-graph-after-gc &&
+ ! test_cmp commit-graph-before-gc commit-graph-after-gc &&
+ git commit-graph write --reachable &&
+ test_cmp commit-graph-after-gc $objdir/info/commit-graph
+'
+
+# the verify tests below expect the commit-graph to contain
+# exactly the commits reachable from the commits/8 branch.
+# If the file changes the set of commits in the list, then the
+# offsets into the binary file will result in different edits
+# and the tests will likely break.
+
+test_expect_success 'git commit-graph verify' '
+ cd "$TRASH_DIRECTORY/full" &&
+ git rev-parse commits/8 | git commit-graph write --stdin-commits &&
+ git commit-graph verify >output
+'
+
+NUM_COMMITS=9
+NUM_OCTOPUS_EDGES=2
+HASH_LEN=20
+GRAPH_BYTE_VERSION=4
+GRAPH_BYTE_HASH=5
+GRAPH_BYTE_CHUNK_COUNT=6
+GRAPH_CHUNK_LOOKUP_OFFSET=8
+GRAPH_CHUNK_LOOKUP_WIDTH=12
+GRAPH_CHUNK_LOOKUP_ROWS=5
+GRAPH_BYTE_OID_FANOUT_ID=$GRAPH_CHUNK_LOOKUP_OFFSET
+GRAPH_BYTE_OID_LOOKUP_ID=$(($GRAPH_CHUNK_LOOKUP_OFFSET + \
+ 1 * $GRAPH_CHUNK_LOOKUP_WIDTH))
+GRAPH_BYTE_COMMIT_DATA_ID=$(($GRAPH_CHUNK_LOOKUP_OFFSET + \
+ 2 * $GRAPH_CHUNK_LOOKUP_WIDTH))
+GRAPH_FANOUT_OFFSET=$(($GRAPH_CHUNK_LOOKUP_OFFSET + \
+ $GRAPH_CHUNK_LOOKUP_WIDTH * $GRAPH_CHUNK_LOOKUP_ROWS))
+GRAPH_BYTE_FANOUT1=$(($GRAPH_FANOUT_OFFSET + 4 * 4))
+GRAPH_BYTE_FANOUT2=$(($GRAPH_FANOUT_OFFSET + 4 * 255))
+GRAPH_OID_LOOKUP_OFFSET=$(($GRAPH_FANOUT_OFFSET + 4 * 256))
+GRAPH_BYTE_OID_LOOKUP_ORDER=$(($GRAPH_OID_LOOKUP_OFFSET + $HASH_LEN * 8))
+GRAPH_BYTE_OID_LOOKUP_MISSING=$(($GRAPH_OID_LOOKUP_OFFSET + $HASH_LEN * 4 + 10))
+GRAPH_COMMIT_DATA_OFFSET=$(($GRAPH_OID_LOOKUP_OFFSET + $HASH_LEN * $NUM_COMMITS))
+GRAPH_BYTE_COMMIT_TREE=$GRAPH_COMMIT_DATA_OFFSET
+GRAPH_BYTE_COMMIT_PARENT=$(($GRAPH_COMMIT_DATA_OFFSET + $HASH_LEN))
+GRAPH_BYTE_COMMIT_EXTRA_PARENT=$(($GRAPH_COMMIT_DATA_OFFSET + $HASH_LEN + 4))
+GRAPH_BYTE_COMMIT_WRONG_PARENT=$(($GRAPH_COMMIT_DATA_OFFSET + $HASH_LEN + 3))
+GRAPH_BYTE_COMMIT_GENERATION=$(($GRAPH_COMMIT_DATA_OFFSET + $HASH_LEN + 11))
+GRAPH_BYTE_COMMIT_DATE=$(($GRAPH_COMMIT_DATA_OFFSET + $HASH_LEN + 12))
+GRAPH_COMMIT_DATA_WIDTH=$(($HASH_LEN + 16))
+GRAPH_OCTOPUS_DATA_OFFSET=$(($GRAPH_COMMIT_DATA_OFFSET + \
+ $GRAPH_COMMIT_DATA_WIDTH * $NUM_COMMITS))
+GRAPH_BYTE_OCTOPUS=$(($GRAPH_OCTOPUS_DATA_OFFSET + 4))
+GRAPH_BYTE_FOOTER=$(($GRAPH_OCTOPUS_DATA_OFFSET + 4 * $NUM_OCTOPUS_EDGES))
+
+# usage: corrupt_graph_and_verify <position> <data> <string>
+# Manipulates the commit-graph file at the position
+# by inserting the data, then runs 'git commit-graph verify'
+# and places the output in the file 'err'. Test 'err' for
+# the given string.
+corrupt_graph_and_verify() {
+ pos=$1
+ data="${2:-\0}"
+ grepstr=$3
+ cd "$TRASH_DIRECTORY/full" &&
+ test_when_finished mv commit-graph-backup $objdir/info/commit-graph &&
+ cp $objdir/info/commit-graph commit-graph-backup &&
+ printf "$data" | dd of="$objdir/info/commit-graph" bs=1 seek="$pos" conv=notrunc &&
+ test_must_fail git commit-graph verify 2>test_err &&
+ grep -v "^+" test_err >err
+ test_i18ngrep "$grepstr" err
+}
+
+test_expect_success 'detect bad signature' '
+ corrupt_graph_and_verify 0 "\0" \
+ "graph signature"
+'
+
+test_expect_success 'detect bad version' '
+ corrupt_graph_and_verify $GRAPH_BYTE_VERSION "\02" \
+ "graph version"
+'
+
+test_expect_success 'detect bad hash version' '
+ corrupt_graph_and_verify $GRAPH_BYTE_HASH "\02" \
+ "hash version"
+'
+
+test_expect_success 'detect low chunk count' '
+ corrupt_graph_and_verify $GRAPH_BYTE_CHUNK_COUNT "\02" \
+ "missing the .* chunk"
+'
+
+test_expect_success 'detect missing OID fanout chunk' '
+ corrupt_graph_and_verify $GRAPH_BYTE_OID_FANOUT_ID "\0" \
+ "missing the OID Fanout chunk"
+'
+
+test_expect_success 'detect missing OID lookup chunk' '
+ corrupt_graph_and_verify $GRAPH_BYTE_OID_LOOKUP_ID "\0" \
+ "missing the OID Lookup chunk"
+'
+
+test_expect_success 'detect missing commit data chunk' '
+ corrupt_graph_and_verify $GRAPH_BYTE_COMMIT_DATA_ID "\0" \
+ "missing the Commit Data chunk"
+'
+
+test_expect_success 'detect incorrect fanout' '
+ corrupt_graph_and_verify $GRAPH_BYTE_FANOUT1 "\01" \
+ "fanout value"
+'
+
+test_expect_success 'detect incorrect fanout final value' '
+ corrupt_graph_and_verify $GRAPH_BYTE_FANOUT2 "\01" \
+ "fanout value"
+'
+
+test_expect_success 'detect incorrect OID order' '
+ corrupt_graph_and_verify $GRAPH_BYTE_OID_LOOKUP_ORDER "\01" \
+ "incorrect OID order"
+'
+
+test_expect_success 'detect OID not in object database' '
+ corrupt_graph_and_verify $GRAPH_BYTE_OID_LOOKUP_MISSING "\01" \
+ "from object database"
+'
+
+test_expect_success 'detect incorrect tree OID' '
+ corrupt_graph_and_verify $GRAPH_BYTE_COMMIT_TREE "\01" \
+ "root tree OID for commit"
+'
+
+test_expect_success 'detect incorrect parent int-id' '
+ corrupt_graph_and_verify $GRAPH_BYTE_COMMIT_PARENT "\01" \
+ "invalid parent"
+'
+
+test_expect_success 'detect extra parent int-id' '
+ corrupt_graph_and_verify $GRAPH_BYTE_COMMIT_EXTRA_PARENT "\00" \
+ "is too long"
+'
+
+test_expect_success 'detect wrong parent' '
+ corrupt_graph_and_verify $GRAPH_BYTE_COMMIT_WRONG_PARENT "\01" \
+ "commit-graph parent for"
+'
+
+test_expect_success 'detect incorrect generation number' '
+ corrupt_graph_and_verify $GRAPH_BYTE_COMMIT_GENERATION "\070" \
+ "generation for commit"
+'
+
+test_expect_success 'detect incorrect generation number' '
+ corrupt_graph_and_verify $GRAPH_BYTE_COMMIT_GENERATION "\01" \
+ "non-zero generation number"
+'
+
+test_expect_success 'detect incorrect commit date' '
+ corrupt_graph_and_verify $GRAPH_BYTE_COMMIT_DATE "\01" \
+ "commit date"
+'
+
+test_expect_success 'detect incorrect parent for octopus merge' '
+ corrupt_graph_and_verify $GRAPH_BYTE_OCTOPUS "\01" \
+ "invalid parent"
+'
+
+test_expect_success 'detect invalid checksum hash' '
+ corrupt_graph_and_verify $GRAPH_BYTE_FOOTER "\00" \
+ "incorrect checksum"
+'
+
+test_expect_success 'git fsck (checks commit-graph)' '
+ cd "$TRASH_DIRECTORY/full" &&
+ git fsck &&
+ corrupt_graph_and_verify $GRAPH_BYTE_FOOTER "\00" \
+ "incorrect checksum" &&
+ test_must_fail git fsck
+'
+
+test_expect_success 'setup non-the_repository tests' '
+ rm -rf repo &&
+ git init repo &&
+ test_commit -C repo one &&
+ test_commit -C repo two &&
+ git -C repo config core.commitGraph true &&
+ git -C repo rev-parse two | \
+ git -C repo commit-graph write --stdin-commits
+'
+
+test_expect_success 'parse_commit_in_graph works for non-the_repository' '
+ test-tool repository parse_commit_in_graph \
+ repo/.git repo "$(git -C repo rev-parse two)" >actual &&
+ echo $(git -C repo log --pretty="%ct" -1) \
+ $(git -C repo rev-parse one) >expect &&
+ test_cmp expect actual &&
+
+ test-tool repository parse_commit_in_graph \
+ repo/.git repo "$(git -C repo rev-parse one)" >actual &&
+ echo $(git -C repo log --pretty="%ct" -1 one) >expect &&
+ test_cmp expect actual
+'
+
+test_expect_success 'get_commit_tree_in_graph works for non-the_repository' '
+ test-tool repository get_commit_tree_in_graph \
+ repo/.git repo "$(git -C repo rev-parse two)" >actual &&
+ echo $(git -C repo rev-parse two^{tree}) >expect &&
+ test_cmp expect actual &&
+
+ test-tool repository get_commit_tree_in_graph \
+ repo/.git repo "$(git -C repo rev-parse one)" >actual &&
+ echo $(git -C repo rev-parse one^{tree}) >expect &&
+ test_cmp expect actual
+'
+
test_done
test_expect_success 'refuse deleting push with denyDeletes' '
(
cd victim &&
- ( git branch -D extra || : ) &&
+ test_might_fail git branch -D extra &&
git config receive.denyDeletes true &&
git branch extra master
) &&
test_expect_success 'denyNonFastforwards trumps --force' '
(
cd victim &&
- ( git branch -D extra || : ) &&
+ test_might_fail git branch -D extra &&
git config receive.denyNonFastforwards true
) &&
victim_orig=$(cd victim && git rev-parse --verify master) &&
'
test_expect_success 'pre-receive hook input' '
- (echo $commit0 $commit1 refs/heads/master;
+ (echo $commit0 $commit1 refs/heads/master &&
echo $commit1 $commit0 refs/heads/tofail
) | test_cmp - victim.git/pre-receive.stdin
'
test_expect_success 'update hook arguments' '
- (echo refs/heads/master $commit0 $commit1;
+ (echo refs/heads/master $commit0 $commit1 &&
echo refs/heads/tofail $commit1 $commit0
) | test_cmp - victim.git/update.args
'
(
cd another &&
- git push .. master:master
- test $? = 1
+ test_must_fail git push .. master:master
)
'
test_expect_success 'setup' '
mkdir .git/hooks &&
- (echo "#!/bin/sh" ; echo "exit 1") >.git/hooks/update &&
- chmod +x .git/hooks/update &&
+ write_script .git/hooks/update <<-\EOF &&
+ exit 1
+ EOF
echo 1 >file &&
git add file &&
git commit -m 1 &&
test_expect_success 'git rebase -m --skip' '
git reset --hard D &&
clear_hook_input &&
- test_must_fail git rebase --onto A B &&
+ test_must_fail git rebase -m --onto A B &&
test_must_fail git rebase --skip &&
echo D > foo &&
git add foo &&
test_expect_success 'pull in shallow repo with missing merge base' '
(
cd shallow &&
- git fetch --depth 4 .. A
+ git fetch --depth 4 .. A &&
test_must_fail git merge --allow-unrelated-histories FETCH_HEAD
)
'
# are reachable only via created tag references.
blob=$(echo "hello blob" | git hash-object -t blob -w --stdin) &&
git tag -a -m "tag -> blob" tag-to-blob $blob &&
- \
+
tree=$(printf "100644 blob $blob\tfile" | git mktree) &&
git tag -a -m "tag -> tree" tag-to-tree $tree &&
- \
+
tree2=$(printf "100644 blob $blob\tfile2" | git mktree) &&
commit=$(git commit-tree -m "hello commit" $tree) &&
git tag -a -m "tag -> commit" tag-to-commit $commit &&
- \
+
blob2=$(echo "hello blob2" | git hash-object -t blob -w --stdin) &&
- tag=$(printf "object $blob2\ntype blob\ntag tag-to-blob2\n\
-tagger author A U Thor <author@example.com> 0 +0000\n\nhello tag" | git mktag) &&
+ tag=$(git mktag <<-EOF
+ object $blob2
+ type blob
+ tag tag-to-blob2
+ tagger author A U Thor <author@example.com> 0 +0000
+
+ hello tag
+ EOF
+ ) &&
git tag -a -m "tag -> tag" tag-to-tag $tag &&
- \
+
# `fetch-pack --all` should succeed fetching all those objects.
mkdir fetchall &&
(
)
'
+test_expect_success 'use ref advertisement to prune "have" lines sent' '
+ rm -rf server client &&
+ git init server &&
+ test_commit -C server both_have_1 &&
+ git -C server tag -d both_have_1 &&
+ test_commit -C server both_have_2 &&
+
+ git clone server client &&
+ test_commit -C server server_has &&
+ test_commit -C client client_has &&
+
+ # In both protocol v0 and v2, ensure that the parent of both_have_2 is
+ # not sent as a "have" line. The client should know that the server has
+ # both_have_2, so it only needs to inform the server that it has
+ # both_have_2, and the server can infer the rest.
+
+ rm -f trace &&
+ cp -r client clientv0 &&
+ GIT_TRACE_PACKET="$(pwd)/trace" git -C clientv0 \
+ fetch origin server_has both_have_2 &&
+ grep "have $(git -C client rev-parse client_has)" trace &&
+ grep "have $(git -C client rev-parse both_have_2)" trace &&
+ ! grep "have $(git -C client rev-parse both_have_2^)" trace &&
+
+ rm -f trace &&
+ cp -r client clientv2 &&
+ GIT_TRACE_PACKET="$(pwd)/trace" git -C clientv2 -c protocol.version=2 \
+ fetch origin server_has both_have_2 &&
+ grep "have $(git -C client rev-parse client_has)" trace &&
+ grep "have $(git -C client rev-parse both_have_2)" trace &&
+ ! grep "have $(git -C client rev-parse both_have_2^)" trace
+'
+
test_expect_success 'filtering by size' '
rm -rf server client &&
test_create_repo server &&
EOF
test_expect_success 'prune --dry-run' '
- (
- cd one &&
- git branch -m side2 side) &&
+ git -C one branch -m side2 side &&
+ test_when_finished "git -C one branch -m side side2" &&
(
cd test &&
git remote prune --dry-run origin >output &&
git rev-parse refs/remotes/origin/side2 &&
test_must_fail git rev-parse refs/remotes/origin/side &&
- (
- cd ../one &&
- git branch -m side side2) &&
test_i18ncmp expect output
)
'
git remote rename origin origin &&
test_path_is_missing .git/branches/origin &&
test "$(git config remote.origin.url)" = "quux" &&
- test "$(git config remote.origin.fetch)" = "refs/heads/foom:refs/heads/origin"
+ test "$(git config remote.origin.fetch)" = "refs/heads/foom:refs/heads/origin" &&
test "$(git config remote.origin.push)" = "HEAD:refs/heads/foom"
)
'
test_commit test2 &&
(
cd auto-gc &&
+ git config fetch.unpackLimit 1 &&
git config gc.autoPackLimit 1 &&
git config gc.autoDetach false &&
GIT_ASK_YESNO="$D/askyesno" git fetch >fetch.out 2>&1 &&
+ test_i18ngrep "Auto packing the repository" fetch.out &&
! grep "Should I try again" fetch.out
)
'
test_cmp expect actual
'
+setup_negotiation_tip () {
+ SERVER="$1"
+ URL="$2"
+ USE_PROTOCOL_V2="$3"
+
+ rm -rf "$SERVER" client trace &&
+ git init "$SERVER" &&
+ test_commit -C "$SERVER" alpha_1 &&
+ test_commit -C "$SERVER" alpha_2 &&
+ git -C "$SERVER" checkout --orphan beta &&
+ test_commit -C "$SERVER" beta_1 &&
+ test_commit -C "$SERVER" beta_2 &&
+
+ git clone "$URL" client &&
+
+ if test "$USE_PROTOCOL_V2" -eq 1
+ then
+ git -C "$SERVER" config protocol.version 2 &&
+ git -C client config protocol.version 2
+ fi &&
+
+ test_commit -C "$SERVER" beta_s &&
+ git -C "$SERVER" checkout master &&
+ test_commit -C "$SERVER" alpha_s &&
+ git -C "$SERVER" tag -d alpha_1 alpha_2 beta_1 beta_2
+}
+
+check_negotiation_tip () {
+ # Ensure that {alpha,beta}_1 are sent as "have", but not {alpha_beta}_2
+ ALPHA_1=$(git -C client rev-parse alpha_1) &&
+ grep "fetch> have $ALPHA_1" trace &&
+ BETA_1=$(git -C client rev-parse beta_1) &&
+ grep "fetch> have $BETA_1" trace &&
+ ALPHA_2=$(git -C client rev-parse alpha_2) &&
+ ! grep "fetch> have $ALPHA_2" trace &&
+ BETA_2=$(git -C client rev-parse beta_2) &&
+ ! grep "fetch> have $BETA_2" trace
+}
+
+test_expect_success '--negotiation-tip limits "have" lines sent' '
+ setup_negotiation_tip server server 0 &&
+ GIT_TRACE_PACKET="$(pwd)/trace" git -C client fetch \
+ --negotiation-tip=alpha_1 --negotiation-tip=beta_1 \
+ origin alpha_s beta_s &&
+ check_negotiation_tip
+'
+
+test_expect_success '--negotiation-tip understands globs' '
+ setup_negotiation_tip server server 0 &&
+ GIT_TRACE_PACKET="$(pwd)/trace" git -C client fetch \
+ --negotiation-tip=*_1 \
+ origin alpha_s beta_s &&
+ check_negotiation_tip
+'
+
+test_expect_success '--negotiation-tip understands abbreviated SHA-1' '
+ setup_negotiation_tip server server 0 &&
+ GIT_TRACE_PACKET="$(pwd)/trace" git -C client fetch \
+ --negotiation-tip=$(git -C client rev-parse --short alpha_1) \
+ --negotiation-tip=$(git -C client rev-parse --short beta_1) \
+ origin alpha_s beta_s &&
+ check_negotiation_tip
+'
+
+. "$TEST_DIRECTORY"/lib-httpd.sh
+start_httpd
+
+test_expect_success '--negotiation-tip limits "have" lines sent with HTTP protocol v2' '
+ setup_negotiation_tip "$HTTPD_DOCUMENT_ROOT_PATH/server" \
+ "$HTTPD_URL/smart/server" 1 &&
+ GIT_TRACE_PACKET="$(pwd)/trace" git -C client fetch \
+ --negotiation-tip=alpha_1 --negotiation-tip=beta_1 \
+ origin alpha_s beta_s &&
+ check_negotiation_tip
+'
+
+stop_httpd
+
test_done
git tag mark1.10 &&
git show-ref --tags -d | sed -e "s/ / /" >expected.tag &&
(
- echo "$(git rev-parse HEAD) HEAD"
+ echo "$(git rev-parse HEAD) HEAD" &&
git show-ref -d | sed -e "s/ / /"
) >expected.all &&
git clone . other.git &&
(
cd other.git &&
- echo "$(git rev-parse HEAD) HEAD"
+ echo "$(git rev-parse HEAD) HEAD" &&
git show-ref | sed -e "s/ / /"
) >exp &&
(
cd child1 &&
git branch foo &&
- git symbolic-ref refs/heads/bar refs/heads/foo
+ git symbolic-ref refs/heads/bar refs/heads/foo &&
git config receive.denyCurrentBranch false
) &&
(
(
cd child1 &&
git branch foo &&
- git symbolic-ref refs/heads/bar refs/heads/foo
+ git symbolic-ref refs/heads/bar refs/heads/foo &&
git config receive.denyCurrentBranch false
) &&
(
mk_empty testrepo &&
git push testrepo refs/heads/master:refs/remotes/origin/master &&
(cd testrepo &&
- git reset --hard origin/master^
+ git reset --hard origin/master^ &&
git config receive.denyCurrentBranch true) &&
echo >.git/foo "To testrepo" &&
mk_empty testrepo &&
git push testrepo refs/heads/master:refs/remotes/origin/master &&
(cd testrepo &&
- git reset --hard origin/master
+ git reset --hard origin/master &&
git config receive.denyCurrentBranch true) &&
echo >.git/foo "To testrepo" &&
git commit --allow-empty -m "future commit" &&
git tag -m "future" future &&
git checkout master &&
- git for-each-ref refs/heads/master refs/tags/tag >../expect
+ git for-each-ref refs/heads/master refs/tags/tag >../expect &&
git push --follow-tag ../dst master
) &&
(
git push --mirror up &&
echo two >foo && git add foo && git commit -m two &&
git push --mirror up &&
- git reset --hard HEAD^
+ git reset --hard HEAD^ &&
git push --mirror up
) &&
master_master=$(cd master && git show-ref -s --verify refs/heads/master) &&
echo one >foo && git add foo && git commit -m one &&
git branch remove master &&
git push --mirror up &&
- git branch -D remove
+ git branch -D remove &&
git push --mirror up
) &&
(
echo two >foo && git add foo && git commit -m two &&
git tag -f tmaster master &&
git push --mirror up &&
- git reset --hard HEAD^
+ git reset --hard HEAD^ &&
git tag -f tmaster master &&
git push --mirror up
) &&
echo one >foo && git add foo && git commit -m one &&
git tag -f tremove master &&
git push --mirror up &&
- git tag -d tremove
+ git tag -d tremove &&
git push --mirror up
) &&
(
git branch keep master &&
git branch remove master &&
git push up &&
- git branch -D remove
+ git branch -D remove &&
git push up
) &&
(
)
'
+test_expect_success 'pull --rebase fails on corrupt HEAD' '
+ test_when_finished "rm -rf corrupt" &&
+ git init corrupt &&
+ (
+ cd corrupt &&
+ test_commit one &&
+ obj=$(git rev-parse --verify HEAD | sed "s#^..#&/#") &&
+ rm -f .git/objects/$obj &&
+ test_must_fail git pull --rebase
+ )
+'
+
test_expect_success 'setup for detecting upstreamed changes' '
mkdir src &&
(cd src &&
git config -f .gitmodules submodule.subdir/deepsubmodule.fetchRecursive false
) &&
git fetch --recurse-submodules=on-demand >../actual.out 2>../actual.err &&
- git config --unset fetch.recurseSubmodules
+ git config --unset fetch.recurseSubmodules &&
(
cd submodule &&
git config --unset -f .gitmodules submodule.subdir/deepsubmodule.fetchRecursive
git clone a a1 &&
(
cd a1 &&
- git init b
+ git init b &&
(
cd b &&
>junk &&
test_cmp expect actual
'
+. "$TEST_DIRECTORY"/lib-httpd.sh
+start_httpd
+
+REPO="$HTTPD_DOCUMENT_ROOT_PATH/repo"
+
+test_expect_success 'shallow fetches check connectivity before writing shallow file' '
+ rm -rf "$REPO" client &&
+
+ git init "$REPO" &&
+ test_commit -C "$REPO" one &&
+ test_commit -C "$REPO" two &&
+ test_commit -C "$REPO" three &&
+
+ git init client &&
+
+ # Use protocol v2 to ensure that shallow information is sent exactly
+ # once by the server, since we are planning to manipulate it.
+ git -C "$REPO" config protocol.version 2 &&
+ git -C client config protocol.version 2 &&
+
+ git -C client fetch --depth=2 "$HTTPD_URL/one_time_sed/repo" master:a_branch &&
+
+ # Craft a situation in which the server sends back an unshallow request
+ # with an empty packfile. This is done by refetching with a shorter
+ # depth (to ensure that the packfile is empty), and overwriting the
+ # shallow line in the response with the unshallow line we want.
+ printf "s/0034shallow %s/0036unshallow %s/" \
+ "$(git -C "$REPO" rev-parse HEAD)" \
+ "$(git -C "$REPO" rev-parse HEAD^)" \
+ >"$HTTPD_ROOT_PATH/one-time-sed" &&
+ test_must_fail git -C client fetch --depth=1 "$HTTPD_URL/one_time_sed/repo" \
+ master:a_branch &&
+
+ # Ensure that the one-time-sed script was used.
+ ! test -e "$HTTPD_ROOT_PATH/one-time-sed" &&
+
+ # Ensure that the resulting repo is consistent, despite our failure to
+ # fetch.
+ git -C client fsck
+'
+
+stop_httpd
+
test_done
POST /smart/test_repo.git/git-upload-pack HTTP/1.1 200
EOF
test_expect_success 'no empty path components' '
+ # Clear the log, so that it does not affect the "used receive-pack
+ # service" test which reads the log too.
+ test_when_finished ">\"\$HTTPD_ROOT_PATH\"/access.log" &&
+
# In the URL, add a trailing slash, and see if git appends yet another
# slash.
cd "$ROOT_PATH" &&
git clone $HTTPD_URL/smart/test_repo.git/ test_repo_clone &&
- sed -e "
- s/^.* \"//
- s/\"//
- s/ [1-9][0-9]*\$//
- s/^GET /GET /
- " >act <"$HTTPD_ROOT_PATH"/access.log &&
-
- # Clear the log, so that it does not affect the "used receive-pack
- # service" test which reads the log too.
- #
- # We do this before the actual comparison to ensure the log is cleared.
- echo > "$HTTPD_ROOT_PATH"/access.log &&
-
- test_cmp exp act
+ check_access_log exp
'
test_expect_success 'clone remote repository' '
rm -f "$HTTPD_DOCUMENT_ROOT_PATH/test_repo.git/hooks/update"
cat >exp <<EOF
-
GET /smart/test_repo.git/info/refs?service=git-upload-pack HTTP/1.1 200
POST /smart/test_repo.git/git-upload-pack HTTP/1.1 200
GET /smart/test_repo.git/info/refs?service=git-receive-pack HTTP/1.1 200
POST /smart/test_repo.git/git-receive-pack HTTP/1.1 200
EOF
test_expect_success 'used receive-pack service' '
- sed -e "
- s/^.* \"//
- s/\"//
- s/ [1-9][0-9]*\$//
- s/^GET /GET /
- " >act <"$HTTPD_ROOT_PATH"/access.log &&
- test_cmp exp act
+ check_access_log exp
'
test_http_push_nonff "$HTTPD_DOCUMENT_ROOT_PATH"/test_repo.git \
test_expect_success 'atomic push is not advertised if configured' '
mk_repo_pair &&
(
- cd upstream
+ cd upstream &&
git config receive.advertiseatomic 0
) &&
(
POST /smart/repo.git/git-upload-pack HTTP/1.1 200
EOF
test_expect_success 'used upload-pack service' '
- sed -e "
- s/^.* \"//
- s/\"//
- s/ [1-9][0-9]*\$//
- s/^GET /GET /
- " >act <"$HTTPD_ROOT_PATH"/access.log &&
- test_cmp exp act
+ check_access_log exp
'
test_expect_success 'follow redirects (301)' '
--- /dev/null
+#!/bin/sh
+
+test_description='test skipping fetch negotiator'
+. ./test-lib.sh
+
+have_sent () {
+ while test "$#" -ne 0
+ do
+ grep "fetch> have $(git -C client rev-parse $1)" trace
+ if test $? -ne 0
+ then
+ echo "No have $(git -C client rev-parse $1) ($1)"
+ return 1
+ fi
+ shift
+ done
+}
+
+have_not_sent () {
+ while test "$#" -ne 0
+ do
+ grep "fetch> have $(git -C client rev-parse $1)" trace
+ if test $? -eq 0
+ then
+ return 1
+ fi
+ shift
+ done
+}
+
+test_expect_success 'commits with no parents are sent regardless of skip distance' '
+ git init server &&
+ test_commit -C server to_fetch &&
+
+ git init client &&
+ for i in $(seq 7)
+ do
+ test_commit -C client c$i
+ done &&
+
+ # We send: "c7" (skip 1) "c5" (skip 2) "c2" (skip 4). After that, since
+ # "c1" has no parent, it is still sent as "have" even though it would
+ # normally be skipped.
+ test_config -C client fetch.negotiationalgorithm skipping &&
+ GIT_TRACE_PACKET="$(pwd)/trace" git -C client fetch "$(pwd)/server" &&
+ have_sent c7 c5 c2 c1 &&
+ have_not_sent c6 c4 c3
+'
+
+test_expect_success 'when two skips collide, favor the larger one' '
+ rm -rf server client trace &&
+ git init server &&
+ test_commit -C server to_fetch &&
+
+ git init client &&
+ for i in $(seq 11)
+ do
+ test_commit -C client c$i
+ done &&
+ git -C client checkout c5 &&
+ test_commit -C client c5side &&
+
+ # Before reaching c5, we send "c5side" (skip 1) and "c11" (skip 1) "c9"
+ # (skip 2) "c6" (skip 4). The larger skip (skip 4) takes precedence, so
+ # the next "have" sent will be "c1" (from "c6" skip 4) and not "c4"
+ # (from "c5side" skip 1).
+ test_config -C client fetch.negotiationalgorithm skipping &&
+ GIT_TRACE_PACKET="$(pwd)/trace" git -C client fetch "$(pwd)/server" &&
+ have_sent c5side c11 c9 c6 c1 &&
+ have_not_sent c10 c8 c7 c5 c4 c3 c2
+'
+
+test_expect_success 'use ref advertisement to filter out commits' '
+ rm -rf server client trace &&
+ git init server &&
+ test_commit -C server c1 &&
+ test_commit -C server c2 &&
+ test_commit -C server c3 &&
+ git -C server tag -d c1 c2 c3 &&
+
+ git clone server client &&
+ test_commit -C client c4 &&
+ test_commit -C client c5 &&
+ git -C client checkout c4^^ &&
+ test_commit -C client c2side &&
+
+ git -C server checkout --orphan anotherbranch &&
+ test_commit -C server to_fetch &&
+
+ # The server advertising "c3" (as "refs/heads/master") means that we do
+ # not need to send any ancestors of "c3", but we still need to send "c3"
+ # itself.
+ test_config -C client fetch.negotiationalgorithm skipping &&
+ GIT_TRACE_PACKET="$(pwd)/trace" git -C client fetch origin to_fetch &&
+ have_sent c5 c4^ c2side &&
+ have_not_sent c4 c4^^ c4^^^
+'
+
+test_expect_success 'handle clock skew' '
+ rm -rf server client trace &&
+ git init server &&
+ test_commit -C server to_fetch &&
+
+ git init client &&
+
+ # 2 regular commits
+ test_tick=2000000000 &&
+ test_commit -C client c1 &&
+ test_commit -C client c2 &&
+
+ # 4 old commits
+ test_tick=1000000000 &&
+ git -C client checkout c1 &&
+ test_commit -C client old1 &&
+ test_commit -C client old2 &&
+ test_commit -C client old3 &&
+ test_commit -C client old4 &&
+
+ # "c2" and "c1" are popped first, then "old4" to "old1". "old1" would
+ # normally be skipped, but is treated as a commit without a parent here
+ # and sent, because (due to clock skew) its only parent has already been
+ # popped off the priority queue.
+ test_config -C client fetch.negotiationalgorithm skipping &&
+ GIT_TRACE_PACKET="$(pwd)/trace" git -C client fetch "$(pwd)/server" &&
+ have_sent c2 c1 old4 old2 old1 &&
+ have_not_sent old3
+'
+
+test_expect_success 'do not send "have" with ancestors of commits that server ACKed' '
+ rm -rf server client trace &&
+ git init server &&
+ test_commit -C server to_fetch &&
+
+ git init client &&
+ for i in $(seq 8)
+ do
+ git -C client checkout --orphan b$i &&
+ test_commit -C client b$i.c0
+ done &&
+ for j in $(seq 19)
+ do
+ for i in $(seq 8)
+ do
+ git -C client checkout b$i &&
+ test_commit -C client b$i.c$j
+ done
+ done &&
+
+ # Copy this branch over to the server and add a commit on it so that it
+ # is reachable but not advertised.
+ git -C server fetch --no-tags "$(pwd)/client" b1:refs/heads/b1 &&
+ git -C server checkout b1 &&
+ test_commit -C server commit-on-b1 &&
+
+ test_config -C client fetch.negotiationalgorithm skipping &&
+ GIT_TRACE_PACKET="$(pwd)/trace" git -C client fetch "$(pwd)/server" to_fetch &&
+ grep " fetch" trace &&
+
+ # fetch-pack sends 2 requests each containing 16 "have" lines before
+ # processing the first response. In these 2 requests, 4 commits from
+ # each branch are sent. Just check the first branch.
+ have_sent b1.c19 b1.c17 b1.c14 b1.c9 &&
+ have_not_sent b1.c18 b1.c16 b1.c15 b1.c13 b1.c12 b1.c11 b1.c10 &&
+
+ # While fetch-pack is processing the first response, it should read that
+ # the server ACKs b1.c19 and b1.c17.
+ grep "fetch< ACK $(git -C client rev-parse b1.c19) common" trace &&
+ grep "fetch< ACK $(git -C client rev-parse b1.c17) common" trace &&
+
+ # fetch-pack should thus not send any more commits in the b1 branch, but
+ # should still send the others (in this test, just check b2).
+ for i in $(seq 0 8)
+ do
+ have_not_sent b1.c$i
+ done &&
+ have_sent b2.c1 b2.c0
+'
+
+test_done
POST /smart/repo.git/git-receive-pack HTTP/1.1 403 -
EOF
test_expect_success 'server request log matches test results' '
- sed -e "
- s/^.* \"//
- s/\"//
- s/ [1-9][0-9]*\$//
- s/^GET /GET /
- " >act <"$HTTPD_ROOT_PATH"/access.log &&
- test_cmp exp act
+ check_access_log exp
'
stop_httpd
test_expect_success 'clone on case-insensitive fs' '
git init icasefs &&
(
- cd icasefs
+ cd icasefs &&
o=$(git hash-object -w --stdin </dev/null | hex2oct) &&
t=$(printf "100644 X\0${o}100644 x\0${o}" |
git hash-object -w -t tree --stdin) &&
git config receive.denyCurrentBranch warn) &&
git clone empty empty-clone &&
test_tick &&
- (cd empty-clone
+ (cd empty-clone &&
echo "content" >> foo &&
git add foo &&
git commit -m "Initial commit" &&
printf "blob\nmark :$i\ndata $blobsize\n" &&
#test-tool genrandom $i $blobsize &&
printf "%-${blobsize}s" $i &&
- echo "M 100644 :$i $i" >> commit
+ echo "M 100644 :$i $i" >> commit &&
i=$(($i+1)) ||
echo $? > exit-status
done &&
grep "git index-pack.*--fsck-objects" trace
'
+test_expect_success 'partial clone fetches blobs pointed to by refs even if normally filtered out' '
+ rm -rf src dst &&
+ git init src &&
+ test_commit -C src x &&
+ test_config -C src uploadpack.allowfilter 1 &&
+ test_config -C src uploadpack.allowanysha1inwant 1 &&
+
+ # Create a tag pointing to a blob.
+ BLOB=$(echo blob-contents | git -C src hash-object --stdin -w) &&
+ git -C src tag myblob "$BLOB" &&
+
+ git clone --filter="blob:none" "file://$(pwd)/src" dst 2>err &&
+ ! grep "does not point to a valid object" err &&
+ git -C dst fsck
+'
+
+. "$TEST_DIRECTORY"/lib-httpd.sh
+start_httpd
+
+# Converts bytes into a form suitable for inclusion in a sed command. For
+# example, "printf 'ab\r\n' | hex_unpack" results in '\x61\x62\x0d\x0a'.
+sed_escape () {
+ perl -e '$/ = undef; $input = <>; print unpack("H2" x length($input), $input)' |
+ sed 's/\(..\)/\\x\1/g'
+}
+
+test_expect_success 'upon cloning, check that all refs point to objects' '
+ SERVER="$HTTPD_DOCUMENT_ROOT_PATH/server" &&
+ rm -rf "$SERVER" repo &&
+ test_create_repo "$SERVER" &&
+ test_commit -C "$SERVER" foo &&
+ test_config -C "$SERVER" uploadpack.allowfilter 1 &&
+ test_config -C "$SERVER" uploadpack.allowanysha1inwant 1 &&
+
+ # Create a tag pointing to a blob.
+ BLOB=$(echo blob-contents | git -C "$SERVER" hash-object --stdin -w) &&
+ git -C "$SERVER" tag myblob "$BLOB" &&
+
+ # Craft a packfile not including that blob.
+ git -C "$SERVER" rev-parse HEAD |
+ git -C "$SERVER" pack-objects --stdout >incomplete.pack &&
+
+ # Replace the existing packfile with the crafted one. The protocol
+ # requires that the packfile be sent in sideband 1, hence the extra
+ # \x01 byte at the beginning.
+ printf "1,/packfile/!c %04x\\\\x01%s0000" \
+ "$(($(wc -c <incomplete.pack) + 5))" \
+ "$(sed_escape <incomplete.pack)" \
+ >"$HTTPD_ROOT_PATH/one-time-sed" &&
+
+ # Use protocol v2 because the sed command looks for the "packfile"
+ # section header.
+ test_config -C "$SERVER" protocol.version 2 &&
+ test_must_fail git -c protocol.version=2 clone \
+ --filter=blob:none $HTTPD_URL/one_time_sed/server repo 2>err &&
+
+ grep "did not send all necessary objects" err &&
+
+ # Ensure that the one-time-sed script was used.
+ ! test -e "$HTTPD_ROOT_PATH/one-time-sed"
+'
+
+test_expect_success 'when partial cloning, tolerate server not sending target of tag' '
+ SERVER="$HTTPD_DOCUMENT_ROOT_PATH/server" &&
+ rm -rf "$SERVER" repo &&
+ test_create_repo "$SERVER" &&
+ test_commit -C "$SERVER" foo &&
+ test_config -C "$SERVER" uploadpack.allowfilter 1 &&
+ test_config -C "$SERVER" uploadpack.allowanysha1inwant 1 &&
+
+ # Create an annotated tag pointing to a blob.
+ BLOB=$(echo blob-contents | git -C "$SERVER" hash-object --stdin -w) &&
+ git -C "$SERVER" tag -m message -a myblob "$BLOB" &&
+
+ # Craft a packfile including the tag, but not the blob it points to.
+ # Also, omit objects referenced from HEAD in order to force a second
+ # fetch (to fetch missing objects) upon the automatic checkout that
+ # happens after a clone.
+ printf "%s\n%s\n--not\n%s\n%s\n" \
+ $(git -C "$SERVER" rev-parse HEAD) \
+ $(git -C "$SERVER" rev-parse myblob) \
+ $(git -C "$SERVER" rev-parse HEAD^{tree}) \
+ $(git -C "$SERVER" rev-parse myblob^{blob}) |
+ git -C "$SERVER" pack-objects --thin --stdout >incomplete.pack &&
+
+ # Replace the existing packfile with the crafted one. The protocol
+ # requires that the packfile be sent in sideband 1, hence the extra
+ # \x01 byte at the beginning.
+ printf "1,/packfile/!c %04x\\\\x01%s0000" \
+ "$(($(wc -c <incomplete.pack) + 5))" \
+ "$(sed_escape <incomplete.pack)" \
+ >"$HTTPD_ROOT_PATH/one-time-sed" &&
+
+ # Use protocol v2 because the sed command looks for the "packfile"
+ # section header.
+ test_config -C "$SERVER" protocol.version 2 &&
+
+ # Exercise to make sure it works.
+ git -c protocol.version=2 clone \
+ --filter=blob:none $HTTPD_URL/one_time_sed/server repo 2> err &&
+ ! grep "missing object referenced by" err &&
+
+ # Ensure that the one-time-sed script was used.
+ ! test -e "$HTTPD_ROOT_PATH/one-time-sed"
+'
+
+stop_httpd
+
test_done
--- /dev/null
+#!/bin/sh
+
+test_description='upload-pack ref-in-want'
+
+. ./test-lib.sh
+
+get_actual_refs () {
+ sed -n -e '/wanted-refs/,/0001/{
+ /wanted-refs/d
+ /0001/d
+ p
+ }' <out | test-pkt-line unpack >actual_refs
+}
+
+get_actual_commits () {
+ sed -n -e '/packfile/,/0000/{
+ /packfile/d
+ p
+ }' <out | test-pkt-line unpack-sideband >o.pack &&
+ git index-pack o.pack &&
+ git verify-pack -v o.idx | grep commit | cut -c-40 | sort >actual_commits
+}
+
+check_output () {
+ get_actual_refs &&
+ test_cmp expected_refs actual_refs &&
+ get_actual_commits &&
+ test_cmp expected_commits actual_commits
+}
+
+# c(o/foo) d(o/bar)
+# \ /
+# b e(baz) f(master)
+# \__ | __/
+# \ | /
+# a
+test_expect_success 'setup repository' '
+ test_commit a &&
+ git checkout -b o/foo &&
+ test_commit b &&
+ test_commit c &&
+ git checkout -b o/bar b &&
+ test_commit d &&
+ git checkout -b baz a &&
+ test_commit e &&
+ git checkout master &&
+ test_commit f
+'
+
+test_expect_success 'config controls ref-in-want advertisement' '
+ git serve --advertise-capabilities >out &&
+ ! grep -a ref-in-want out &&
+
+ git config uploadpack.allowRefInWant false &&
+ git serve --advertise-capabilities >out &&
+ ! grep -a ref-in-want out &&
+
+ git config uploadpack.allowRefInWant true &&
+ git serve --advertise-capabilities >out &&
+ grep -a ref-in-want out
+'
+
+test_expect_success 'invalid want-ref line' '
+ test-pkt-line pack >in <<-EOF &&
+ command=fetch
+ 0001
+ no-progress
+ want-ref refs/heads/non-existent
+ done
+ 0000
+ EOF
+
+ test_must_fail git serve --stateless-rpc 2>out <in &&
+ grep "unknown ref" out
+'
+
+test_expect_success 'basic want-ref' '
+ cat >expected_refs <<-EOF &&
+ $(git rev-parse f) refs/heads/master
+ EOF
+ git rev-parse f | sort >expected_commits &&
+
+ test-pkt-line pack >in <<-EOF &&
+ command=fetch
+ 0001
+ no-progress
+ want-ref refs/heads/master
+ have $(git rev-parse a)
+ done
+ 0000
+ EOF
+
+ git serve --stateless-rpc >out <in &&
+ check_output
+'
+
+test_expect_success 'multiple want-ref lines' '
+ cat >expected_refs <<-EOF &&
+ $(git rev-parse c) refs/heads/o/foo
+ $(git rev-parse d) refs/heads/o/bar
+ EOF
+ git rev-parse c d | sort >expected_commits &&
+
+ test-pkt-line pack >in <<-EOF &&
+ command=fetch
+ 0001
+ no-progress
+ want-ref refs/heads/o/foo
+ want-ref refs/heads/o/bar
+ have $(git rev-parse b)
+ done
+ 0000
+ EOF
+
+ git serve --stateless-rpc >out <in &&
+ check_output
+'
+
+test_expect_success 'mix want and want-ref' '
+ cat >expected_refs <<-EOF &&
+ $(git rev-parse f) refs/heads/master
+ EOF
+ git rev-parse e f | sort >expected_commits &&
+
+ test-pkt-line pack >in <<-EOF &&
+ command=fetch
+ 0001
+ no-progress
+ want-ref refs/heads/master
+ want $(git rev-parse e)
+ have $(git rev-parse a)
+ done
+ 0000
+ EOF
+
+ git serve --stateless-rpc >out <in &&
+ check_output
+'
+
+test_expect_success 'want-ref with ref we already have commit for' '
+ cat >expected_refs <<-EOF &&
+ $(git rev-parse c) refs/heads/o/foo
+ EOF
+ >expected_commits &&
+
+ test-pkt-line pack >in <<-EOF &&
+ command=fetch
+ 0001
+ no-progress
+ want-ref refs/heads/o/foo
+ have $(git rev-parse c)
+ done
+ 0000
+ EOF
+
+ git serve --stateless-rpc >out <in &&
+ check_output
+'
+
+. "$TEST_DIRECTORY"/lib-httpd.sh
+start_httpd
+
+REPO="$HTTPD_DOCUMENT_ROOT_PATH/repo"
+LOCAL_PRISTINE="$(pwd)/local_pristine"
+
+test_expect_success 'setup repos for change-while-negotiating test' '
+ (
+ git init "$REPO" &&
+ cd "$REPO" &&
+ >.git/git-daemon-export-ok &&
+ test_commit m1 &&
+ git tag -d m1 &&
+
+ # Local repo with many commits (so that negotiation will take
+ # more than 1 request/response pair)
+ git clone "http://127.0.0.1:$LIB_HTTPD_PORT/smart/repo" "$LOCAL_PRISTINE" &&
+ cd "$LOCAL_PRISTINE" &&
+ git checkout -b side &&
+ for i in $(seq 1 33); do test_commit s$i; done &&
+
+ # Add novel commits to upstream
+ git checkout master &&
+ cd "$REPO" &&
+ test_commit m2 &&
+ test_commit m3 &&
+ git tag -d m2 m3
+ ) &&
+ git -C "$LOCAL_PRISTINE" remote set-url origin "http://127.0.0.1:$LIB_HTTPD_PORT/one_time_sed/repo" &&
+ git -C "$LOCAL_PRISTINE" config protocol.version 2
+'
+
+inconsistency () {
+ # Simulate that the server initially reports $2 as the ref
+ # corresponding to $1, and after that, $1 as the ref corresponding to
+ # $1. This corresponds to the real-life situation where the server's
+ # repository appears to change during negotiation, for example, when
+ # different servers in a load-balancing arrangement serve (stateless)
+ # RPCs during a single negotiation.
+ printf "s/%s/%s/" \
+ $(git -C "$REPO" rev-parse $1 | tr -d "\n") \
+ $(git -C "$REPO" rev-parse $2 | tr -d "\n") \
+ >"$HTTPD_ROOT_PATH/one-time-sed"
+}
+
+test_expect_success 'server is initially ahead - no ref in want' '
+ git -C "$REPO" config uploadpack.allowRefInWant false &&
+ rm -rf local &&
+ cp -r "$LOCAL_PRISTINE" local &&
+ inconsistency master 1234567890123456789012345678901234567890 &&
+ test_must_fail git -C local fetch 2>err &&
+ grep "ERR upload-pack: not our ref" err
+'
+
+test_expect_success 'server is initially ahead - ref in want' '
+ git -C "$REPO" config uploadpack.allowRefInWant true &&
+ rm -rf local &&
+ cp -r "$LOCAL_PRISTINE" local &&
+ inconsistency master 1234567890123456789012345678901234567890 &&
+ git -C local fetch &&
+
+ git -C "$REPO" rev-parse --verify master >expected &&
+ git -C local rev-parse --verify refs/remotes/origin/master >actual &&
+ test_cmp expected actual
+'
+
+test_expect_success 'server is initially behind - no ref in want' '
+ git -C "$REPO" config uploadpack.allowRefInWant false &&
+ rm -rf local &&
+ cp -r "$LOCAL_PRISTINE" local &&
+ inconsistency master "master^" &&
+ git -C local fetch &&
+
+ git -C "$REPO" rev-parse --verify "master^" >expected &&
+ git -C local rev-parse --verify refs/remotes/origin/master >actual &&
+ test_cmp expected actual
+'
+
+test_expect_success 'server is initially behind - ref in want' '
+ git -C "$REPO" config uploadpack.allowRefInWant true &&
+ rm -rf local &&
+ cp -r "$LOCAL_PRISTINE" local &&
+ inconsistency master "master^" &&
+ git -C local fetch &&
+
+ git -C "$REPO" rev-parse --verify "master" >expected &&
+ git -C local rev-parse --verify refs/remotes/origin/master >actual &&
+ test_cmp expected actual
+'
+
+test_expect_success 'server loses a ref - ref in want' '
+ git -C "$REPO" config uploadpack.allowRefInWant true &&
+ rm -rf local &&
+ cp -r "$LOCAL_PRISTINE" local &&
+ echo "s/master/raster/" >"$HTTPD_ROOT_PATH/one-time-sed" &&
+ test_must_fail git -C local fetch 2>err &&
+
+ grep "ERR unknown ref refs/heads/raster" err
+'
+
+stop_httpd
+
+REPO="$(pwd)/repo"
+LOCAL_PRISTINE="$(pwd)/local_pristine"
+
+# $REPO
+# c(o/foo) d(o/bar)
+# \ /
+# b e(baz) f(master)
+# \__ | __/
+# \ | /
+# a
+#
+# $LOCAL_PRISTINE
+# s32(side)
+# |
+# .
+# .
+# |
+# a(master)
+test_expect_success 'setup repos for fetching with ref-in-want tests' '
+ (
+ git init "$REPO" &&
+ cd "$REPO" &&
+ test_commit a &&
+
+ # Local repo with many commits (so that negotiation will take
+ # more than 1 request/response pair)
+ rm -rf "$LOCAL_PRISTINE" &&
+ git clone "file://$REPO" "$LOCAL_PRISTINE" &&
+ cd "$LOCAL_PRISTINE" &&
+ git checkout -b side &&
+ for i in $(seq 1 33); do test_commit s$i; done &&
+
+ # Add novel commits to upstream
+ git checkout master &&
+ cd "$REPO" &&
+ git checkout -b o/foo &&
+ test_commit b &&
+ test_commit c &&
+ git checkout -b o/bar b &&
+ test_commit d &&
+ git checkout -b baz a &&
+ test_commit e &&
+ git checkout master &&
+ test_commit f
+ ) &&
+ git -C "$REPO" config uploadpack.allowRefInWant true &&
+ git -C "$LOCAL_PRISTINE" config protocol.version 2
+'
+
+test_expect_success 'fetching with exact OID' '
+ test_when_finished "rm -f log" &&
+
+ rm -rf local &&
+ cp -r "$LOCAL_PRISTINE" local &&
+ GIT_TRACE_PACKET="$(pwd)/log" git -C local fetch origin \
+ $(git -C "$REPO" rev-parse d):refs/heads/actual &&
+
+ git -C "$REPO" rev-parse "d" >expected &&
+ git -C local rev-parse refs/heads/actual >actual &&
+ test_cmp expected actual &&
+ grep "want $(git -C "$REPO" rev-parse d)" log
+'
+
+test_expect_success 'fetching multiple refs' '
+ test_when_finished "rm -f log" &&
+
+ rm -rf local &&
+ cp -r "$LOCAL_PRISTINE" local &&
+ GIT_TRACE_PACKET="$(pwd)/log" git -C local fetch origin master baz &&
+
+ git -C "$REPO" rev-parse "master" "baz" >expected &&
+ git -C local rev-parse refs/remotes/origin/master refs/remotes/origin/baz >actual &&
+ test_cmp expected actual &&
+ grep "want-ref refs/heads/master" log &&
+ grep "want-ref refs/heads/baz" log
+'
+
+test_expect_success 'fetching ref and exact OID' '
+ test_when_finished "rm -f log" &&
+
+ rm -rf local &&
+ cp -r "$LOCAL_PRISTINE" local &&
+ GIT_TRACE_PACKET="$(pwd)/log" git -C local fetch origin \
+ master $(git -C "$REPO" rev-parse b):refs/heads/actual &&
+
+ git -C "$REPO" rev-parse "master" "b" >expected &&
+ git -C local rev-parse refs/remotes/origin/master refs/heads/actual >actual &&
+ test_cmp expected actual &&
+ grep "want $(git -C "$REPO" rev-parse b)" log &&
+ grep "want-ref refs/heads/master" log
+'
+
+test_expect_success 'fetching with wildcard that does not match any refs' '
+ test_when_finished "rm -f log" &&
+
+ rm -rf local &&
+ cp -r "$LOCAL_PRISTINE" local &&
+ git -C local fetch origin refs/heads/none*:refs/heads/* >out &&
+ test_must_be_empty out
+'
+
+test_expect_success 'fetching with wildcard that matches multiple refs' '
+ test_when_finished "rm -f log" &&
+
+ rm -rf local &&
+ cp -r "$LOCAL_PRISTINE" local &&
+ GIT_TRACE_PACKET="$(pwd)/log" git -C local fetch origin refs/heads/o*:refs/heads/o* &&
+
+ git -C "$REPO" rev-parse "o/foo" "o/bar" >expected &&
+ git -C local rev-parse "o/foo" "o/bar" >actual &&
+ test_cmp expected actual &&
+ grep "want-ref refs/heads/o/foo" log &&
+ grep "want-ref refs/heads/o/bar" log
+'
+
+test_done
test_expect_success 'push new branch with HEAD:new refspec' '
(cd local &&
- git checkout new-name
+ git checkout new-name &&
git push origin HEAD:new-refspec-2
) &&
compare_refs local HEAD server refs/heads/new-refspec-2
git commit --allow-empty -m "Derived #$count" &&
git rev-parse HEAD >derived$count &&
git checkout -B base $E || exit 1
- done
+ done &&
for count in 1 2 3
do
git checkout -b work &&
git ls-files -s >actual &&
(
- echo "100644 $o1 0 git-gui/git-gui.sh"
+ echo "100644 $o1 0 git-gui/git-gui.sh" &&
echo "100644 $o2 0 git.c"
) >expected &&
test_cmp expected actual
git pull -s subtree gui master2 &&
git ls-files -s >actual &&
(
- echo "100644 $o3 0 git-gui/git-gui.sh"
+ echo "100644 $o3 0 git-gui/git-gui.sh" &&
echo "100644 $o2 0 git.c"
) >expected &&
test_cmp expected actual
git checkout -b work2 &&
git ls-files -s >actual &&
(
- echo "100644 $o1 0 git-gui/git-gui.sh"
- echo "100644 $o1 0 git-gui2/git-gui.sh"
+ echo "100644 $o1 0 git-gui/git-gui.sh" &&
+ echo "100644 $o1 0 git-gui2/git-gui.sh" &&
echo "100644 $o2 0 git.c"
) >expected &&
test_cmp expected actual
git pull -Xsubtree=git-gui gui master2 &&
git ls-files -s >actual &&
(
- echo "100644 $o3 0 git-gui/git-gui.sh"
- echo "100644 $o1 0 git-gui2/git-gui.sh"
+ echo "100644 $o3 0 git-gui/git-gui.sh" &&
+ echo "100644 $o1 0 git-gui2/git-gui.sh" &&
echo "100644 $o2 0 git.c"
) >expected &&
test_cmp expected actual
git pull -Xsubtree=git-gui2 gui master2 &&
git ls-files -s >actual &&
(
- echo "100644 $o1 0 git-gui/git-gui.sh"
- echo "100644 $o3 0 git-gui2/git-gui.sh"
+ echo "100644 $o1 0 git-gui/git-gui.sh" &&
+ echo "100644 $o3 0 git-gui2/git-gui.sh" &&
echo "100644 $o2 0 git.c"
) >expected &&
test_cmp expected actual
git rev-parse >actual \
:2:three :3:three &&
git hash-object >>actual \
- three~HEAD three~R2^0
+ three~HEAD three~R2^0 &&
test_cmp expect actual
)
'
git rev-parse >actual \
:2:three :3:three &&
git hash-object >>actual \
- three~HEAD three~R2^0
+ three~HEAD three~R2^0 &&
test_cmp expect actual
)
'
D:new_a E:new_a &&
git rev-parse >actual \
:2:new_a :3:new_a &&
- test_cmp expect actual
+ test_cmp expect actual &&
git cat-file -p B:new_a >ours &&
git cat-file -p C:new_a >theirs &&
)
'
+# SORRY FOR THE SUPER LONG DESCRIPTION, BUT THIS NEXT ONE IS HAIRY
#
# criss-cross + d/f conflict via add/add:
# Commit A: Neither file 'a' nor directory 'a/' exists.
# Commit B: Introduce 'a'
# Commit C: Introduce 'a/file'
-# Commit D: Merge B & C, keeping 'a' and deleting 'a/'
-#
-# Two different later cases:
+# Commit D1: Merge B & C, keeping 'a' and deleting 'a/'
# Commit E1: Merge B & C, deleting 'a' but keeping 'a/file'
-# Commit E2: Merge B & C, deleting 'a' but keeping a slightly modified 'a/file'
#
-# B D
+# B D1 or D2
# o---o
# / \ / \
# A o X ? F
# \ / \ /
# o---o
-# C E1 or E2
+# C E1 or E2 or E3
+#
+# I'll describe D2, E2, & E3 (which are alternatives for D1 & E1) more below...
+#
+# Merging D1 & E1 requires we first create a virtual merge base X from
+# merging A & B in memory. There are several possibilities for the merge-base:
+# 1: Keep both 'a' and 'a/file' (assuming crazy filesystem allowing a tree
+# with a directory and file at same path): results in merge of D1 & E1
+# being clean with both files deleted. Bad (no conflict detected).
+# 2: Keep 'a' but not 'a/file': Merging D1 & E1 is clean and matches E1. Bad.
+# 3: Keep 'a/file' but not 'a': Merging D1 & E1 is clean and matches D1. Bad.
+# 4: Keep neither file: Merging D1 & E1 reports the D/F add/add conflict.
+#
+# So 4 sounds good for this case, but if we were to merge D1 & E3, where E3
+# is defined as:
+# Commit E3: Merge B & C, keeping modified a, and deleting a/
+# then we'd get an add/add conflict for 'a', which seems suboptimal. A little
+# creativity leads us to an alternate choice:
+# 5: Keep 'a' as 'a~$UNIQUE' and a/file; results:
+# Merge D1 & E1: rename/delete conflict for 'a'; a/file silently deleted
+# Merge D1 & E3 is clean, as expected.
#
-# Merging D & E1 requires we first create a virtual merge base X from
-# merging A & B in memory. Now, if X could keep both 'a' and 'a/file' in
-# the index, then the merge of D & E1 could be resolved cleanly with both
-# 'a' and 'a/file' removed. Since git does not currently allow creating
-# such a tree, the best we can do is have X contain both 'a~<unique>' and
-# 'a/file' resulting in the merge of D and E1 having a rename/delete
-# conflict for 'a'. (Although this merge appears to be unsolvable with git
-# currently, git could do a lot better than it currently does with these
-# d/f conflicts, which is the purpose of this test.)
+# So choice 5 at least provides some kind of conflict for the original case,
+# and can merge cleanly as expected with D1 and E3. It also made things just
+# slightly funny for merging D1 and e$, where E4 is defined as:
+# Commit E4: Merge B & C, modifying 'a' and renaming to 'a2', and deleting 'a/'
+# in this case, we'll get a rename/rename(1to2) conflict because a~$UNIQUE
+# gets renamed to 'a' in D1 and to 'a2' in E4. But that's better than having
+# two files (both 'a' and 'a2') sitting around without the user being notified
+# that we could detect they were related and need to be merged. Also, choice
+# 5 makes the handling of 'a/file' seem suboptimal. What if we were to merge
+# D2 and E4, where D2 is:
+# Commit D2: Merge B & C, renaming 'a'->'a2', keeping 'a/file'
+# This would result in a clean merge with 'a2' having three-way merged
+# contents (good), and deleting 'a/' (bad) -- it doesn't detect the
+# conflict in how the different sides treated a/file differently.
+# Continuing down the creative route:
+# 6: Keep 'a' as 'a~$UNIQUE1' and keep 'a/' as 'a~$UNIQUE2/'; results:
+# Merge D1 & E1: rename/delete conflict for 'a' and each path under 'a/'.
+# Merge D1 & E3: clean, as expected.
+# Merge D1 & E4: rename/rename(1to2) conflict on 'a' vs 'a2'.
+# Merge D2 & E4: clean for 'a2', rename/delete for a/file
#
-# Merge of D & E2 has similar issues for path 'a', but should always result
-# in a modify/delete conflict for path 'a/file'.
+# Choice 6 could cause rename detection to take longer (providing more targets
+# that need to be searched). Also, the conflict message for each path under
+# 'a/' might be annoying unless we can detect it at the directory level, print
+# it once, and then suppress it for individual filepaths underneath.
#
-# We run each merge in both directions, to check for directional issues
-# with D/F conflict handling.
+#
+# As of time of writing, git uses choice 5. Directory rename detection and
+# rename detection performance improvements might make choice 6 a desirable
+# improvement. But we can at least document where we fall short for now...
+#
+#
+# Historically, this testcase also used:
+# Commit E2: Merge B & C, deleting 'a' but keeping slightly modified 'a/file'
+# The merge of D1 & E2 is very similar to D1 & E1 -- it has similar issues for
+# path 'a', but should always result in a modify/delete conflict for path
+# 'a/file'. These tests ran the two merges
+# D1 & E1
+# D1 & E2
+# in both directions, to check for directional issues with D/F conflict
+# handling. Later we added
+# D1 & E3
+# D1 & E4
+# D2 & E4
+# for good measure, though we only ran those one way because we had pretty
+# good confidence in merge-recursive's directional handling of D/F issues.
+#
+# Just to summarize all the intermediate merge commits:
+# Commit D1: Merge B & C, keeping a and deleting a/
+# Commit D2: Merge B & C, renaming a->a2, keeping a/file
+# Commit E1: Merge B & C, deleting a but keeping a/file
+# Commit E2: Merge B & C, deleting a but keeping slightly modified a/file
+# Commit E3: Merge B & C, keeping modified a, and deleting a/
+# Commit E4: Merge B & C, modifying 'a' and renaming to 'a2', and deleting 'a/'
#
test_expect_success 'setup differently handled merges of directory/file conflict' '
git branch B &&
git checkout -b C &&
mkdir a &&
- echo 10 >a/file &&
+ test_write_lines a b c d e f g >a/file &&
git add a/file &&
test_tick &&
git commit -m C &&
git checkout B &&
- echo 5 >a &&
+ test_write_lines 1 2 3 4 5 6 7 >a &&
git add a &&
test_tick &&
git commit -m B &&
git checkout B^0 &&
- test_must_fail git merge C &&
- git clean -f &&
- rm -rf a/ &&
- echo 5 >a &&
- git add a &&
- test_tick &&
- git commit -m D &&
- git tag D &&
+ git merge -s ours -m D1 C^0 &&
+ git tag D1 &&
+
+ git checkout B^0 &&
+ test_must_fail git merge C^0 &&
+ git clean -fd &&
+ git rm -rf a/ &&
+ git rm a &&
+ git cat-file -p B:a >a2 &&
+ git add a2 &&
+ git commit -m D2 &&
+ git tag D2 &&
git checkout C^0 &&
- test_must_fail git merge B &&
- git clean -f &&
- git rm --cached a &&
- echo 10 >a/file &&
- git add a/file &&
- test_tick &&
- git commit -m E1 &&
+ git merge -s ours -m E1 B^0 &&
git tag E1 &&
git checkout C^0 &&
- test_must_fail git merge B &&
- git clean -f &&
- git rm --cached a &&
- printf "10\n11\n" >a/file &&
+ git merge -s ours -m E2 B^0 &&
+ test_write_lines a b c d e f g h >a/file &&
git add a/file &&
- test_tick &&
- git commit -m E2 &&
- git tag E2
+ git commit --amend -C HEAD &&
+ git tag E2 &&
+
+ git checkout C^0 &&
+ test_must_fail git merge B^0 &&
+ git clean -fd &&
+ git rm -rf a/ &&
+ test_write_lines 1 2 3 4 5 6 7 8 >a &&
+ git add a &&
+ git commit -m E3 &&
+ git tag E3 &&
+
+ git checkout C^0 &&
+ test_must_fail git merge B^0 &&
+ git clean -fd &&
+ git rm -rf a/ &&
+ git rm a &&
+ test_write_lines 1 2 3 4 5 6 7 8 >a2 &&
+ git add a2 &&
+ git commit -m E4 &&
+ git tag E4
)
'
-test_expect_success 'merge of D & E1 fails but has appropriate contents' '
+test_expect_success 'merge of D1 & E1 fails but has appropriate contents' '
test_when_finished "git -C directory-file reset --hard" &&
test_when_finished "git -C directory-file clean -fdqx" &&
(
cd directory-file &&
- git checkout D^0 &&
+ git checkout D1^0 &&
test_must_fail git merge -s recursive E1^0 &&
)
'
-test_expect_success 'merge of E1 & D fails but has appropriate contents' '
+test_expect_success 'merge of E1 & D1 fails but has appropriate contents' '
test_when_finished "git -C directory-file reset --hard" &&
test_when_finished "git -C directory-file clean -fdqx" &&
(
git checkout E1^0 &&
- test_must_fail git merge -s recursive D^0 &&
+ test_must_fail git merge -s recursive D1^0 &&
git ls-files -s >out &&
test_line_count = 2 out &&
)
'
-test_expect_success 'merge of D & E2 fails but has appropriate contents' '
+test_expect_success 'merge of D1 & E2 fails but has appropriate contents' '
test_when_finished "git -C directory-file reset --hard" &&
test_when_finished "git -C directory-file clean -fdqx" &&
(
cd directory-file &&
- git checkout D^0 &&
+ git checkout D1^0 &&
test_must_fail git merge -s recursive E2^0 &&
test_line_count = 2 out &&
git rev-parse >expect \
- B:a E2:a/file c:a/file A:ignore-me &&
+ B:a E2:a/file C:a/file A:ignore-me &&
git rev-parse >actual \
:2:a :3:a/file :1:a/file :0:ignore-me &&
- test_cmp expect actual
+ test_cmp expect actual &&
test_path_is_file a~HEAD
)
'
-test_expect_success 'merge of E2 & D fails but has appropriate contents' '
+test_expect_success 'merge of E2 & D1 fails but has appropriate contents' '
test_when_finished "git -C directory-file reset --hard" &&
test_when_finished "git -C directory-file clean -fdqx" &&
(
git checkout E2^0 &&
- test_must_fail git merge -s recursive D^0 &&
+ test_must_fail git merge -s recursive D1^0 &&
git ls-files -s >out &&
test_line_count = 4 out &&
test_line_count = 2 out &&
git rev-parse >expect \
- B:a E2:a/file c:a/file A:ignore-me &&
+ B:a E2:a/file C:a/file A:ignore-me &&
git rev-parse >actual \
:3:a :2:a/file :1:a/file :0:ignore-me &&
+ test_cmp expect actual &&
+
+ test_path_is_file a~D1^0
+ )
+'
+
+test_expect_success 'merge of D1 & E3 succeeds' '
+ test_when_finished "git -C directory-file reset --hard" &&
+ test_when_finished "git -C directory-file clean -fdqx" &&
+ (
+ cd directory-file &&
+
+ git checkout D1^0 &&
+
+ git merge -s recursive E3^0 &&
+
+ git ls-files -s >out &&
+ test_line_count = 2 out &&
+ git ls-files -u >out &&
+ test_line_count = 0 out &&
+ git ls-files -o >out &&
+ test_line_count = 1 out &&
+
+ git rev-parse >expect \
+ A:ignore-me E3:a &&
+ git rev-parse >actual \
+ :0:ignore-me :0:a &&
test_cmp expect actual
+ )
+'
- test_path_is_file a~D^0
+test_expect_success 'merge of D1 & E4 notifies user a and a2 are related' '
+ test_when_finished "git -C directory-file reset --hard" &&
+ test_when_finished "git -C directory-file clean -fdqx" &&
+ (
+ cd directory-file &&
+
+ git checkout D1^0 &&
+
+ test_must_fail git merge -s recursive E4^0 &&
+
+ git ls-files -s >out &&
+ test_line_count = 4 out &&
+ git ls-files -u >out &&
+ test_line_count = 3 out &&
+ git ls-files -o >out &&
+ test_line_count = 1 out &&
+
+ git rev-parse >expect \
+ A:ignore-me B:a D1:a E4:a2 &&
+ git rev-parse >actual \
+ :0:ignore-me :1:a~Temporary\ merge\ branch\ 2 :2:a :3:a2 &&
+ test_cmp expect actual
+ )
+'
+
+test_expect_failure 'merge of D2 & E4 merges a2s & reports conflict for a/file' '
+ test_when_finished "git -C directory-file reset --hard" &&
+ test_when_finished "git -C directory-file clean -fdqx" &&
+ (
+ cd directory-file &&
+
+ git checkout D2^0 &&
+
+ test_must_fail git merge -s recursive E4^0 &&
+
+ git ls-files -s >out &&
+ test_line_count = 3 out &&
+ git ls-files -u >out &&
+ test_line_count = 1 out &&
+ git ls-files -o >out &&
+ test_line_count = 1 out &&
+
+ git rev-parse >expect \
+ A:ignore-me E4:a2 D2:a/file &&
+ git rev-parse >actual \
+ :0:ignore-me :0:a2 :2:a/file &&
+ test_cmp expect actual
)
'
)
'
+#
+# criss-cross with modify/modify on a symlink:
+#
+# B D
+# o---o
+# / \ / \
+# A o X ? F
+# \ / \ /
+# o---o
+# C E
+#
+# Commit A: simple simlink fickle->lagoon
+# Commit B: redirect fickle->disneyland
+# Commit C: redirect fickle->home
+# Commit D: merge B&C, resolving in favor of B
+# Commit E: merge B&C, resolving in favor of C
+#
+# This is an obvious modify/modify conflict for the symlink 'fickle'. Can
+# git detect it?
+
+test_expect_success 'setup symlink modify/modify' '
+ test_create_repo symlink-modify-modify &&
+ (
+ cd symlink-modify-modify &&
+
+ test_ln_s_add lagoon fickle &&
+ git commit -m A &&
+ git tag A &&
+
+ git checkout -b B A &&
+ git rm fickle &&
+ test_ln_s_add disneyland fickle &&
+ git commit -m B &&
+
+ git checkout -b C A &&
+ git rm fickle &&
+ test_ln_s_add home fickle &&
+ git add fickle &&
+ git commit -m C &&
+
+ git checkout -q B^0 &&
+ git merge -s ours -m D C^0 &&
+ git tag D &&
+
+ git checkout -q C^0 &&
+ git merge -s ours -m E B^0 &&
+ git tag E
+ )
+'
+
+test_expect_failure 'check symlink modify/modify' '
+ (
+ cd symlink-modify-modify &&
+
+ git checkout D^0 &&
+
+ test_must_fail git merge -s recursive E^0 &&
+
+ git ls-files -s >out &&
+ test_line_count = 3 out &&
+ git ls-files -u >out &&
+ test_line_count = 3 out &&
+ git ls-files -o >out &&
+ test_line_count = 1 out
+ )
+'
+
+#
+# criss-cross with add/add of a symlink:
+#
+# B D
+# o---o
+# / \ / \
+# A o X ? F
+# \ / \ /
+# o---o
+# C E
+#
+# Commit A: No symlink or path exists yet
+# Commit B: set up symlink: fickle->disneyland
+# Commit C: set up symlink: fickle->home
+# Commit D: merge B&C, resolving in favor of B
+# Commit E: merge B&C, resolving in favor of C
+#
+# This is an obvious add/add conflict for the symlink 'fickle'. Can
+# git detect it?
+
+test_expect_success 'setup symlink add/add' '
+ test_create_repo symlink-add-add &&
+ (
+ cd symlink-add-add &&
+
+ touch ignoreme &&
+ git add ignoreme &&
+ git commit -m A &&
+ git tag A &&
+
+ git checkout -b B A &&
+ test_ln_s_add disneyland fickle &&
+ git commit -m B &&
+
+ git checkout -b C A &&
+ test_ln_s_add home fickle &&
+ git add fickle &&
+ git commit -m C &&
+
+ git checkout -q B^0 &&
+ git merge -s ours -m D C^0 &&
+ git tag D &&
+
+ git checkout -q C^0 &&
+ git merge -s ours -m E B^0 &&
+ git tag E
+ )
+'
+
+test_expect_failure 'check symlink add/add' '
+ (
+ cd symlink-add-add &&
+
+ git checkout D^0 &&
+
+ test_must_fail git merge -s recursive E^0 &&
+
+ git ls-files -s >out &&
+ test_line_count = 2 out &&
+ git ls-files -u >out &&
+ test_line_count = 2 out &&
+ git ls-files -o >out &&
+ test_line_count = 1 out
+ )
+'
+
+#
+# criss-cross with modify/modify on a submodule:
+#
+# B D
+# o---o
+# / \ / \
+# A o X ? F
+# \ / \ /
+# o---o
+# C E
+#
+# Commit A: simple submodule repo
+# Commit B: update repo
+# Commit C: update repo differently
+# Commit D: merge B&C, resolving in favor of B
+# Commit E: merge B&C, resolving in favor of C
+#
+# This is an obvious modify/modify conflict for the submodule 'repo'. Can
+# git detect it?
+
+test_expect_success 'setup submodule modify/modify' '
+ test_create_repo submodule-modify-modify &&
+ (
+ cd submodule-modify-modify &&
+
+ test_create_repo submod &&
+ (
+ cd submod &&
+ touch file-A &&
+ git add file-A &&
+ git commit -m A &&
+ git tag A &&
+
+ git checkout -b B A &&
+ touch file-B &&
+ git add file-B &&
+ git commit -m B &&
+ git tag B &&
+
+ git checkout -b C A &&
+ touch file-C &&
+ git add file-C &&
+ git commit -m C &&
+ git tag C
+ ) &&
+
+ git -C submod reset --hard A &&
+ git add submod &&
+ git commit -m A &&
+ git tag A &&
+
+ git checkout -b B A &&
+ git -C submod reset --hard B &&
+ git add submod &&
+ git commit -m B &&
+
+ git checkout -b C A &&
+ git -C submod reset --hard C &&
+ git add submod &&
+ git commit -m C &&
+
+ git checkout -q B^0 &&
+ git merge -s ours -m D C^0 &&
+ git tag D &&
+
+ git checkout -q C^0 &&
+ git merge -s ours -m E B^0 &&
+ git tag E
+ )
+'
+
+test_expect_failure 'check submodule modify/modify' '
+ (
+ cd submodule-modify-modify &&
+
+ git checkout D^0 &&
+
+ test_must_fail git merge -s recursive E^0 &&
+
+ git ls-files -s >out &&
+ test_line_count = 3 out &&
+ git ls-files -u >out &&
+ test_line_count = 3 out &&
+ git ls-files -o >out &&
+ test_line_count = 1 out
+ )
+'
+
+#
+# criss-cross with add/add on a submodule:
+#
+# B D
+# o---o
+# / \ / \
+# A o X ? F
+# \ / \ /
+# o---o
+# C E
+#
+# Commit A: nothing of note
+# Commit B: introduce submodule repo
+# Commit C: introduce submodule repo at different commit
+# Commit D: merge B&C, resolving in favor of B
+# Commit E: merge B&C, resolving in favor of C
+#
+# This is an obvious add/add conflict for the submodule 'repo'. Can
+# git detect it?
+
+test_expect_success 'setup submodule add/add' '
+ test_create_repo submodule-add-add &&
+ (
+ cd submodule-add-add &&
+
+ test_create_repo submod &&
+ (
+ cd submod &&
+ touch file-A &&
+ git add file-A &&
+ git commit -m A &&
+ git tag A &&
+
+ git checkout -b B A &&
+ touch file-B &&
+ git add file-B &&
+ git commit -m B &&
+ git tag B &&
+
+ git checkout -b C A &&
+ touch file-C &&
+ git add file-C &&
+ git commit -m C &&
+ git tag C
+ ) &&
+
+ touch irrelevant-file &&
+ git add irrelevant-file &&
+ git commit -m A &&
+ git tag A &&
+
+ git checkout -b B A &&
+ git -C submod reset --hard B &&
+ git add submod &&
+ git commit -m B &&
+
+ git checkout -b C A &&
+ git -C submod reset --hard C &&
+ git add submod &&
+ git commit -m C &&
+
+ git checkout -q B^0 &&
+ git merge -s ours -m D C^0 &&
+ git tag D &&
+
+ git checkout -q C^0 &&
+ git merge -s ours -m E B^0 &&
+ git tag E
+ )
+'
+
+test_expect_failure 'check submodule add/add' '
+ (
+ cd submodule-add-add &&
+
+ git checkout D^0 &&
+
+ test_must_fail git merge -s recursive E^0 &&
+
+ git ls-files -s >out &&
+ test_line_count = 3 out &&
+ git ls-files -u >out &&
+ test_line_count = 2 out &&
+ git ls-files -o >out &&
+ test_line_count = 1 out
+ )
+'
+
+#
+# criss-cross with conflicting entry types:
+#
+# B D
+# o---o
+# / \ / \
+# A o X ? F
+# \ / \ /
+# o---o
+# C E
+#
+# Commit A: nothing of note
+# Commit B: introduce submodule 'path'
+# Commit C: introduce symlink 'path'
+# Commit D: merge B&C, resolving in favor of B
+# Commit E: merge B&C, resolving in favor of C
+#
+# This is an obvious add/add conflict for 'path'. Can git detect it?
+
+test_expect_success 'setup conflicting entry types (submodule vs symlink)' '
+ test_create_repo submodule-symlink-add-add &&
+ (
+ cd submodule-symlink-add-add &&
+
+ test_create_repo path &&
+ (
+ cd path &&
+ touch file-B &&
+ git add file-B &&
+ git commit -m B &&
+ git tag B
+ ) &&
+
+ touch irrelevant-file &&
+ git add irrelevant-file &&
+ git commit -m A &&
+ git tag A &&
+
+ git checkout -b B A &&
+ git -C path reset --hard B &&
+ git add path &&
+ git commit -m B &&
+
+ git checkout -b C A &&
+ rm -rf path/ &&
+ test_ln_s_add irrelevant-file path &&
+ git commit -m C &&
+
+ git checkout -q B^0 &&
+ git merge -s ours -m D C^0 &&
+ git tag D &&
+
+ git checkout -q C^0 &&
+ git merge -s ours -m E B^0 &&
+ git tag E
+ )
+'
+
+test_expect_failure 'check conflicting entry types (submodule vs symlink)' '
+ (
+ cd submodule-symlink-add-add &&
+
+ git checkout D^0 &&
+
+ test_must_fail git merge -s recursive E^0 &&
+
+ git ls-files -s >out &&
+ test_line_count = 3 out &&
+ git ls-files -u >out &&
+ test_line_count = 2 out &&
+ git ls-files -o >out &&
+ test_line_count = 1 out
+ )
+'
+
+#
+# criss-cross with regular files that have conflicting modes:
+#
+# B D
+# o---o
+# / \ / \
+# A o X ? F
+# \ / \ /
+# o---o
+# C E
+#
+# Commit A: nothing of note
+# Commit B: introduce file source_me.bash, not executable
+# Commit C: introduce file source_me.bash, executable
+# Commit D: merge B&C, resolving in favor of B
+# Commit E: merge B&C, resolving in favor of C
+#
+# This is an obvious add/add mode conflict. Can git detect it?
+
+test_expect_success 'setup conflicting modes for regular file' '
+ test_create_repo regular-file-mode-conflict &&
+ (
+ cd regular-file-mode-conflict &&
+
+ touch irrelevant-file &&
+ git add irrelevant-file &&
+ git commit -m A &&
+ git tag A &&
+
+ git checkout -b B A &&
+ echo "command_to_run" >source_me.bash &&
+ git add source_me.bash &&
+ git commit -m B &&
+
+ git checkout -b C A &&
+ echo "command_to_run" >source_me.bash &&
+ git add source_me.bash &&
+ test_chmod +x source_me.bash &&
+ git commit -m C &&
+
+ git checkout -q B^0 &&
+ git merge -s ours -m D C^0 &&
+ git tag D &&
+
+ git checkout -q C^0 &&
+ git merge -s ours -m E B^0 &&
+ git tag E
+ )
+'
+
+test_expect_failure 'check conflicting modes for regular file' '
+ (
+ cd regular-file-mode-conflict &&
+
+ git checkout D^0 &&
+
+ test_must_fail git merge -s recursive E^0 &&
+
+ git ls-files -s >out &&
+ test_line_count = 3 out &&
+ git ls-files -u >out &&
+ test_line_count = 2 out &&
+ git ls-files -o >out &&
+ test_line_count = 1 out
+ )
+'
+
test_done
base:file left-conflict:newfile right:file &&
git rev-parse >actual \
:1:newfile :2:newfile :3:newfile &&
- test_cmp expect actual
+ test_cmp expect actual &&
test_path_is_file newfile/realfile &&
test_path_is_file newfile~HEAD
C:a A:a B:b C:C &&
git rev-parse >actual \
:3:a :1:a :2:b :3:c &&
- test_cmp expect actual
+ test_cmp expect actual &&
test_path_is_file a &&
test_path_is_file b &&
A:a C:b B:b C:c B:c &&
git rev-parse >actual \
:1:a :2:b :3:b :2:c :3:c &&
- test_cmp expect actual
+ test_cmp expect actual &&
git rev-parse >expect \
C:c B:c C:b B:b &&
git hash-object >actual \
c~HEAD c~B\^0 b~HEAD b~B\^0 &&
- test_cmp expect actual
+ test_cmp expect actual &&
test_path_is_missing b &&
test_path_is_missing c
)
'
+# Testcase rad, rename/add/delete
+# Commit O: foo
+# Commit A: rm foo, add different bar
+# Commit B: rename foo->bar
+# Expected: CONFLICT (rename/add/delete), two-way merged bar
+
+test_expect_success 'rad-setup: rename/add/delete conflict' '
+ test_create_repo rad &&
+ (
+ cd rad &&
+ echo "original file" >foo &&
+ git add foo &&
+ git commit -m "original" &&
+
+ git branch O &&
+ git branch A &&
+ git branch B &&
+
+ git checkout A &&
+ git rm foo &&
+ echo "different file" >bar &&
+ git add bar &&
+ git commit -m "Remove foo, add bar" &&
+
+ git checkout B &&
+ git mv foo bar &&
+ git commit -m "rename foo to bar"
+ )
+'
+
+test_expect_failure 'rad-check: rename/add/delete conflict' '
+ (
+ cd rad &&
+
+ git checkout B^0 &&
+ test_must_fail git merge -s recursive A^0 >out 2>err &&
+
+ # Not sure whether the output should contain just one
+ # "CONFLICT (rename/add/delete)" line, or if it should break
+ # it into a pair of "CONFLICT (rename/delete)" and
+ # "CONFLICT (rename/add)"; allow for either.
+ test_i18ngrep "CONFLICT (rename.*add)" out &&
+ test_i18ngrep "CONFLICT (rename.*delete)" out &&
+ test_must_be_empty err &&
+
+ git ls-files -s >file_count &&
+ test_line_count = 2 file_count &&
+ git ls-files -u >file_count &&
+ test_line_count = 2 file_count &&
+ git ls-files -o >file_count &&
+ test_line_count = 2 file_count &&
+
+ git rev-parse >actual \
+ :2:bar :3:bar &&
+ git rev-parse >expect \
+ B:bar A:bar &&
+
+ test_cmp file_is_missing foo &&
+ # bar should have two-way merged contents of the different
+ # versions of bar; check that content from both sides is
+ # present.
+ grep original bar &&
+ grep different bar
+ )
+'
+
+# Testcase rrdd, rename/rename(2to1)/delete/delete
+# Commit O: foo, bar
+# Commit A: rename foo->baz, rm bar
+# Commit B: rename bar->baz, rm foo
+# Expected: CONFLICT (rename/rename/delete/delete), two-way merged baz
+
+test_expect_success 'rrdd-setup: rename/rename(2to1)/delete/delete conflict' '
+ test_create_repo rrdd &&
+ (
+ cd rrdd &&
+ echo foo >foo &&
+ echo bar >bar &&
+ git add foo bar &&
+ git commit -m O &&
+
+ git branch O &&
+ git branch A &&
+ git branch B &&
+
+ git checkout A &&
+ git mv foo baz &&
+ git rm bar &&
+ git commit -m "Rename foo, remove bar" &&
+
+ git checkout B &&
+ git mv bar baz &&
+ git rm foo &&
+ git commit -m "Rename bar, remove foo"
+ )
+'
+
+test_expect_failure 'rrdd-check: rename/rename(2to1)/delete/delete conflict' '
+ (
+ cd rrdd &&
+
+ git checkout A^0 &&
+ test_must_fail git merge -s recursive B^0 >out 2>err &&
+
+ # Not sure whether the output should contain just one
+ # "CONFLICT (rename/rename/delete/delete)" line, or if it
+ # should break it into three: "CONFLICT (rename/rename)" and
+ # two "CONFLICT (rename/delete)" lines; allow for either.
+ test_i18ngrep "CONFLICT (rename/rename)" out &&
+ test_i18ngrep "CONFLICT (rename.*delete)" out &&
+ test_must_be_empty err &&
+
+ git ls-files -s >file_count &&
+ test_line_count = 2 file_count &&
+ git ls-files -u >file_count &&
+ test_line_count = 2 file_count &&
+ git ls-files -o >file_count &&
+ test_line_count = 2 file_count &&
+
+ git rev-parse >actual \
+ :2:baz :3:baz &&
+ git rev-parse >expect \
+ O:foo O:bar &&
+
+ test_cmp file_is_missing foo &&
+ test_cmp file_is_missing bar &&
+ # baz should have two-way merged contents of the original
+ # contents of foo and bar; check that content from both sides
+ # is present.
+ grep foo baz &&
+ grep bar baz
+ )
+'
+
+# Testcase mod6, chains of rename/rename(1to2) and rename/rename(2to1)
+# Commit O: one, three, five
+# Commit A: one->two, three->four, five->six
+# Commit B: one->six, three->two, five->four
+# Expected: six CONFLICT(rename/rename) messages, each path in two of the
+# multi-way merged contents found in two, four, six
+
+test_expect_success 'mod6-setup: chains of rename/rename(1to2) and rename/rename(2to1)' '
+ test_create_repo mod6 &&
+ (
+ cd mod6 &&
+ test_seq 11 19 >one &&
+ test_seq 31 39 >three &&
+ test_seq 51 59 >five &&
+ git add . &&
+ test_tick &&
+ git commit -m "O" &&
+
+ git branch O &&
+ git branch A &&
+ git branch B &&
+
+ git checkout A &&
+ test_seq 10 19 >one &&
+ echo 40 >>three &&
+ git add one three &&
+ git mv one two &&
+ git mv three four &&
+ git mv five six &&
+ test_tick &&
+ git commit -m "A" &&
+
+ git checkout B &&
+ echo 20 >>one &&
+ echo forty >>three &&
+ echo 60 >>five &&
+ git add one three five &&
+ git mv one six &&
+ git mv three two &&
+ git mv five four &&
+ test_tick &&
+ git commit -m "B"
+ )
+'
+
+test_expect_failure 'mod6-check: chains of rename/rename(1to2) and rename/rename(2to1)' '
+ (
+ cd mod6 &&
+
+ git checkout A^0 &&
+
+ test_must_fail git merge -s recursive B^0 >out 2>err &&
+
+ test_i18ngrep "CONFLICT (rename/rename)" out &&
+ test_must_be_empty err &&
+
+ git ls-files -s >file_count &&
+ test_line_count = 6 file_count &&
+ git ls-files -u >file_count &&
+ test_line_count = 6 file_count &&
+ git ls-files -o >file_count &&
+ test_line_count = 3 file_count &&
+
+ test_seq 10 20 >merged-one &&
+ test_seq 51 60 >merged-five &&
+ # Determine what the merge of three would give us.
+ test_seq 30 40 >three-side-A &&
+ test_seq 31 39 >three-side-B &&
+ echo forty >three-side-B &&
+ >empty &&
+ test_must_fail git merge-file \
+ -L "HEAD" \
+ -L "" \
+ -L "B^0" \
+ three-side-A empty three-side-B &&
+ sed -e "s/^\([<=>]\)/\1\1\1/" three-side-A >merged-three &&
+
+ # Verify the index is as expected
+ git rev-parse >actual \
+ :2:two :3:two \
+ :2:four :3:four \
+ :2:six :3:six &&
+ git hash-object >expect \
+ merged-one merged-three \
+ merged-three merged-five \
+ merged-five merged-one &&
+ test_cmp expect actual &&
+
+ git cat-file -p :2:two >expect &&
+ git cat-file -p :3:two >other &&
+ test_must_fail git merge-file \
+ -L "HEAD" -L "" -L "B^0" \
+ expect empty other &&
+ test_cmp expect two &&
+
+ git cat-file -p :2:four >expect &&
+ git cat-file -p :3:four >other &&
+ test_must_fail git merge-file \
+ -L "HEAD" -L "" -L "B^0" \
+ expect empty other &&
+ test_cmp expect four &&
+
+ git cat-file -p :2:six >expect &&
+ git cat-file -p :3:six >other &&
+ test_must_fail git merge-file \
+ -L "HEAD" -L "" -L "B^0" \
+ expect empty other &&
+ test_cmp expect six
+ )
+'
+
test_done
grep -q stuff z/c &&
test_seq 1 10 >expected &&
echo stuff >>expected &&
- test_cmp expected z/c
+ test_cmp expected z/c &&
git ls-files -s >out &&
test_line_count = 4 out &&
touch subdir/e &&
git add subdir/e &&
- test_must_fail git merge E^0
+ test_must_fail git merge E^0 &&
+ test_path_is_missing .git/MERGE_HEAD
'
test_expect_success 'resolve, trivial' '
touch random_file && git add random_file &&
- test_must_fail git merge -s resolve C^0
+ test_must_fail git merge -s resolve C^0 &&
+ test_path_is_missing .git/MERGE_HEAD
'
test_expect_success 'resolve, non-trivial' '
touch random_file && git add random_file &&
- test_must_fail git merge -s resolve D^0
+ test_must_fail git merge -s resolve D^0 &&
+ test_path_is_missing .git/MERGE_HEAD
'
test_expect_success 'recursive' '
touch random_file && git add random_file &&
- test_must_fail git merge -s recursive C^0
+ test_must_fail git merge -s recursive C^0 &&
+ test_path_is_missing .git/MERGE_HEAD
'
test_expect_success 'recursive, when merge branch matches merge base' '
touch random_file && git add random_file &&
- test_must_fail git merge -s recursive F^0
+ test_must_fail git merge -s recursive F^0 &&
+ test_path_is_missing .git/MERGE_HEAD
+'
+
+test_expect_success 'merge-recursive, when index==head but head!=HEAD' '
+ git reset --hard &&
+ git checkout C^0 &&
+
+ # Make index match B
+ git diff C B -- | git apply --cached &&
+ # Merge B & F, with B as "head"
+ git merge-recursive A -- B F > out &&
+ test_i18ngrep "Already up to date" out
+'
+
+test_expect_success 'recursive, when file has staged changes not matching HEAD nor what a merge would give' '
+ git reset --hard &&
+ git checkout B^0 &&
+
+ mkdir subdir &&
+ test_seq 1 10 >subdir/a &&
+ git add subdir/a &&
+
+ # We have staged changes; merge should error out
+ test_must_fail git merge -s recursive E^0 2>err &&
+ test_i18ngrep "changes to the following files would be overwritten" err
+'
+
+test_expect_success 'recursive, when file has staged changes matching what a merge would give' '
+ git reset --hard &&
+ git checkout B^0 &&
+
+ mkdir subdir &&
+ test_seq 1 11 >subdir/a &&
+ git add subdir/a &&
+
+ # We have staged changes; merge should error out
+ test_must_fail git merge -s recursive E^0 2>err &&
+ test_i18ngrep "changes to the following files would be overwritten" err
'
test_expect_success 'octopus, unrelated file touched' '
touch random_file && git add random_file &&
- test_must_fail git merge C^0 D^0
+ test_must_fail git merge C^0 D^0 &&
+ test_path_is_missing .git/MERGE_HEAD
'
test_expect_success 'octopus, related file removed' '
git rm b &&
- test_must_fail git merge C^0 D^0
+ test_must_fail git merge C^0 D^0 &&
+ test_path_is_missing .git/MERGE_HEAD
'
test_expect_success 'octopus, related file modified' '
echo 12 >>a && git add a &&
- test_must_fail git merge C^0 D^0
+ test_must_fail git merge C^0 D^0 &&
+ test_path_is_missing .git/MERGE_HEAD
'
test_expect_success 'ours' '
touch random_file && git add random_file &&
- test_must_fail git merge -s ours C^0
+ test_must_fail git merge -s ours C^0 &&
+ test_path_is_missing .git/MERGE_HEAD
'
test_expect_success 'subtree' '
touch random_file && git add random_file &&
- test_must_fail git merge -s subtree E^0
+ test_must_fail git merge -s subtree E^0 &&
+ test_path_is_missing .git/MERGE_HEAD
'
test_done
git checkout A^0 &&
- GIT_MERGE_VERBOSITY=3 test_must_fail git merge -s recursive B^0 >out 2>err &&
+ GIT_MERGE_VERBOSITY=3 &&
+ export GIT_MERGE_VERBOSITY &&
+ test_must_fail git merge -s recursive B^0 >out 2>err &&
test_i18ngrep "CONFLICT (rename/add): Rename b->c" out &&
test_i18ngrep ! "Skipped c" out &&
)
'
+test_expect_success 'for-each-ref --ignore-case ignores case' '
+ >expect &&
+ git for-each-ref --format="%(refname)" refs/heads/MASTER >actual &&
+ test_cmp expect actual &&
+
+ echo refs/heads/master >expect &&
+ git for-each-ref --format="%(refname)" --ignore-case \
+ refs/heads/MASTER >actual &&
+ test_cmp expect actual
+'
+
test_done
touch nested_level1 &&
git init &&
git add . &&
- git commit -m "nested level 1"
+ git commit -m "nested level 1" &&
git submodule add ../sub_nested_nested &&
git commit -m "add nested level 2"
) &&
test_expect_success PERL 'saying "n" does nothing' '
set_and_save_state dir/foo work work &&
- (echo n; echo n) | git reset -p &&
+ test_write_lines n n | git reset -p &&
verify_saved_state dir/foo &&
verify_saved_state bar
'
test_expect_success PERL 'git reset -p' '
- (echo n; echo y) | git reset -p >output &&
+ test_write_lines n y | git reset -p >output &&
verify_state dir/foo work head &&
verify_saved_state bar &&
test_i18ngrep "Unstage" output
'
test_expect_success PERL 'git reset -p HEAD^' '
- (echo n; echo y) | git reset -p HEAD^ >output &&
+ test_write_lines n y | git reset -p HEAD^ >output &&
verify_state dir/foo work parent &&
verify_saved_state bar &&
test_i18ngrep "Apply" output
test_expect_success PERL 'git reset -p dir' '
set_state dir/foo work work &&
- (echo y; echo n) | git reset -p dir &&
+ test_write_lines y n | git reset -p dir &&
verify_state dir/foo work head &&
verify_saved_state bar
'
test_expect_success PERL 'git reset -p -- foo (inside dir)' '
set_state dir/foo work work &&
- (echo y; echo n) | (cd dir && git reset -p -- foo) &&
+ test_write_lines y n | (cd dir && git reset -p -- foo) &&
verify_state dir/foo work head &&
verify_saved_state bar
'
test_expect_success PERL 'git reset -p HEAD^ -- dir' '
- (echo y; echo n) | git reset -p HEAD^ -- dir &&
+ test_write_lines y n | git reset -p HEAD^ -- dir &&
verify_state dir/foo work parent &&
verify_saved_state bar
'
cat sample >filf &&
git checkout -m -- fild file filf &&
(
- echo "<<<<<<< ours"
- echo ourside
- echo "======="
- echo theirside
+ echo "<<<<<<< ours" &&
+ echo ourside &&
+ echo "=======" &&
+ echo theirside &&
echo ">>>>>>> theirs"
) >merged &&
test_cmp expect fild &&
cat sample >filf &&
git checkout -m -- fild file filf &&
(
- echo "<<<<<<< ours"
- echo ourside
- echo "||||||| base"
- echo original
- echo "======="
- echo theirside
+ echo "<<<<<<< ours" &&
+ echo ourside &&
+ echo "||||||| base" &&
+ echo original &&
+ echo "=======" &&
+ echo theirside &&
echo ">>>>>>> theirs"
) >merged &&
test_cmp expect fild &&
cat sample >filf &&
git checkout --conflict=merge -- fild file filf &&
(
- echo "<<<<<<< ours"
- echo ourside
- echo "======="
- echo theirside
+ echo "<<<<<<< ours" &&
+ echo ourside &&
+ echo "=======" &&
+ echo theirside &&
echo ">>>>>>> theirs"
) >merged &&
test_cmp expect fild &&
cat sample >filf &&
git checkout --conflict=diff3 -- fild file filf &&
(
- echo "<<<<<<< ours"
- echo ourside
- echo "||||||| base"
- echo original
- echo "======="
- echo theirside
+ echo "<<<<<<< ours" &&
+ echo ourside &&
+ echo "||||||| base" &&
+ echo original &&
+ echo "=======" &&
+ echo theirside &&
echo ">>>>>>> theirs"
) >merged &&
test_cmp expect fild &&
do
grep $t arm || exit 1
done
- exit 0
) &&
mv arm expect &&
mkdir -p build docs &&
touch a.out src/part3.c src/part3.h src/part4.c src/part4.h \
docs/manual.txt obj.o build/lib.so &&
- (echo f; echo "*"; echo; echo c) | \
+ test_write_lines f "*" "" c |
git clean -id &&
test -f Makefile &&
test -f README &&
mkdir -p build docs &&
touch a.out src/part3.c src/part3.h src/part4.c src/part4.h \
docs/manual.txt obj.o build/lib.so &&
- (echo f; echo "part3.* *.out"; echo; echo c) | \
+ test_write_lines f "part3.* *.out" "" c |
git clean -id &&
test -f Makefile &&
test -f README &&
mkdir -p build docs &&
touch a.out src/part3.c src/part3.h src/part4.c src/part4.h \
docs/manual.txt obj.o build/lib.so &&
- (echo f; echo "* !*.out"; echo; echo c) | \
+ test_write_lines f "* !*.out" "" c |
git clean -id &&
test -f Makefile &&
test -f README &&
mkdir -p build docs &&
touch a.out src/part3.c src/part3.h src/part4.c src/part4.h \
docs/manual.txt obj.o build/lib.so &&
- (echo s; echo "*"; echo; echo c) | \
+ test_write_lines s "*" "" c |
git clean -id &&
test -f Makefile &&
test -f README &&
mkdir -p build docs &&
touch a.out src/part3.c src/part3.h src/part4.c src/part4.h \
docs/manual.txt obj.o build/lib.so &&
- (echo s; echo; echo c) | \
+ test_write_lines s "" c |
git clean -id &&
test -f Makefile &&
test -f README &&
mkdir -p build docs &&
touch a.out src/part3.c src/part3.h src/part4.c src/part4.h \
docs/manual.txt obj.o build/lib.so &&
- (echo s; echo 3; echo; echo c) | \
+ test_write_lines s 3 "" c |
git clean -id &&
test -f Makefile &&
test -f README &&
mkdir -p build docs &&
touch a.out src/part3.c src/part3.h src/part4.c src/part4.h \
docs/manual.txt obj.o build/lib.so &&
- (echo s; echo 2 3; echo 5; echo; echo c) | \
+ test_write_lines s "2 3" 5 "" c |
git clean -id &&
test -f Makefile &&
test -f README &&
mkdir -p build docs &&
touch a.out src/part3.c src/part3.h src/part4.c src/part4.h \
docs/manual.txt obj.o build/lib.so &&
- (echo s; echo 3,4 5; echo; echo c) | \
+ test_write_lines s "3,4 5" "" c |
git clean -id &&
test -f Makefile &&
test -f README &&
mkdir -p build docs &&
touch a.out foo.txt bar.txt baz.txt &&
- (echo s; echo a.out fo ba bar; echo; echo c) | \
+ test_write_lines s "a.out fo ba bar" "" c |
git clean -id &&
test -f Makefile &&
test ! -f a.out &&
mkdir -p build docs &&
touch a.out src/part3.c src/part3.h src/part4.c src/part4.h \
docs/manual.txt obj.o build/lib.so &&
- (echo s; echo 1,3-4; echo 2; echo; echo c) | \
+ test_write_lines s "1,3-4" 2 "" c |
git clean -id &&
test -f Makefile &&
test -f README &&
mkdir -p build docs &&
touch a.out src/part3.c src/part3.h src/part4.c src/part4.h \
docs/manual.txt obj.o build/lib.so &&
- (echo s; echo 4- 1; echo; echo c) | \
+ test_write_lines s "4- 1" "" c |
git clean -id &&
test -f Makefile &&
test -f README &&
mkdir -p build docs &&
touch a.out src/part3.c src/part3.h src/part4.c src/part4.h \
docs/manual.txt obj.o build/lib.so &&
- (echo s; echo "*"; echo -5- 1 -2; echo; echo c) | \
+ test_write_lines s "*" "-5- 1 -2" "" c |
git clean -id &&
test -f Makefile &&
test -f README &&
mkdir -p build docs &&
touch a.out src/part3.c src/part3.h src/part4.c src/part4.h \
docs/manual.txt obj.o build/lib.so &&
- (echo a; echo Y; echo y; echo no; echo yes; echo bad; echo) | \
+ test_write_lines a Y y no yes bad "" |
git clean -id &&
test -f Makefile &&
test -f README &&
mkdir -p build docs &&
touch a.out src/part3.c src/part3.h src/part4.c src/part4.h \
docs/manual.txt obj.o build/lib.so &&
- (echo a; echo Y; echo no; echo yes; echo "\04") | \
+ test_write_lines a Y no yes "\04" |
git clean -id &&
test -f Makefile &&
test -f README &&
mkdir -p build docs &&
touch a.out src/part3.c src/part3.h src/part4.c src/part4.h \
docs/manual.txt obj.o build/lib.so &&
- (cd build/ && \
- (echo f; echo "docs"; echo "*.h"; echo ; echo c) | \
+ (cd build/ &&
+ test_write_lines f docs "*.h" "" c |
git clean -id ..) &&
test -f Makefile &&
test -f README &&
mkdir -p build docs &&
touch a.out src/part3.c src/part3.h src/part4.c src/part4.h \
docs/manual.txt obj.o build/lib.so &&
- (cd build/ && \
- (echo s; echo "../docs/"; echo "../src/part3.c"; \
- echo "../src/part4.c"; echo; echo c) | \
+ (cd build/ &&
+ test_write_lines s ../docs/ ../src/part3.c ../src/part4.c "" c |
git clean -id ..) &&
test -f Makefile &&
test -f README &&
mkdir -p build docs &&
touch a.out src/part3.c src/part3.h src/part4.c src/part4.h \
docs/manual.txt obj.o build/lib.so &&
- (cd build/ && \
- (echo a; echo Y; echo y; echo no; echo yes; echo bad; echo) | \
+ (cd build/ &&
+ test_write_lines a Y y no yes bad "" |
git clean -id ..) &&
test -f Makefile &&
test -f README &&
test_expect_success 'submodule add to reconfigure existing submodule with --force' '
(
cd addtest-ignore &&
- git submodule add --force bogus-url submod &&
- git submodule add -b initial "$submodurl" submod-branch &&
- test "bogus-url" = "$(git config -f .gitmodules submodule.submod.url)" &&
- test "bogus-url" = "$(git config submodule.submod.url)" &&
+ bogus_url="$(pwd)/bogus-url" &&
+ git submodule add --force "$bogus_url" submod &&
+ git submodule add --force -b initial "$submodurl" submod-branch &&
+ test "$bogus_url" = "$(git config -f .gitmodules submodule.submod.url)" &&
+ test "$bogus_url" = "$(git config submodule.submod.url)" &&
# Restore the url
- git submodule add --force "$submodurl" submod
+ git submodule add --force "$submodurl" submod &&
test "$submodurl" = "$(git config -f .gitmodules submodule.submod.url)" &&
test "$submodurl" = "$(git config submodule.submod.url)"
)
cp pristine-.git-config .git/config &&
cp pristine-.gitmodules .gitmodules &&
mkdir -p a/b/c &&
- (cd a/b/c; git init) &&
+ (cd a/b/c && git init) &&
git config remote.origin.url ../foo/bar.git &&
git submodule add ../bar/a/b/c ./a/b/c &&
git submodule init &&
grep "$(cat expect3)" actual > /dev/null)
'
+# File/submodule conflict
+# Commit O: <empty>
+# Commit A: path (submodule)
+# Commit B: path
+# Expected: path/ is submodule and file contents for B's path are somewhere
+
+test_expect_success 'setup file/submodule conflict' '
+ test_create_repo file-submodule &&
+ (
+ cd file-submodule &&
+
+ git commit --allow-empty -m O &&
+
+ git branch A &&
+ git branch B &&
+
+ git checkout B &&
+ echo content >path &&
+ git add path &&
+ git commit -m B &&
+
+ git checkout A &&
+ test_create_repo path &&
+ test_commit -C path world &&
+ git submodule add ./path &&
+ git commit -m A
+ )
+'
+
+test_expect_failure 'file/submodule conflict' '
+ test_when_finished "git -C file-submodule reset --hard" &&
+ (
+ cd file-submodule &&
+
+ git checkout A^0 &&
+ test_must_fail git merge B^0 &&
+
+ git ls-files -s >out &&
+ test_line_count = 3 out &&
+ git ls-files -u >out &&
+ test_line_count = 2 out &&
+
+ # path/ is still a submodule
+ test_path_is_dir path/.git &&
+
+ # There is a submodule at "path", so B:path cannot be written
+ # there. We expect it to be written somewhere in the same
+ # directory, though, so just grep for its content in all
+ # files, and ignore "grep: path: Is a directory" message
+ echo Checking if contents from B:path showed up anywhere &&
+ grep -q content * 2>/dev/null
+ )
+'
+
+test_expect_success 'file/submodule conflict; merge --abort works afterward' '
+ test_when_finished "git -C file-submodule reset --hard" &&
+ (
+ cd file-submodule &&
+
+ git checkout A^0 &&
+ test_must_fail git merge B^0 >out 2>err &&
+
+ test_path_is_file .git/MERGE_HEAD &&
+ git merge --abort
+ )
+'
+
+# Directory/submodule conflict
+# Commit O: <empty>
+# Commit A: path (submodule), with sole tracked file named 'world'
+# Commit B1: path/file
+# Commit B2: path/world
+#
+# Expected from merge of A & B1:
+# Contents under path/ from commit B1 are renamed elsewhere; we do not
+# want to write files from one of our tracked directories into a submodule
+#
+# Expected from merge of A & B2:
+# Similar to last merge, but with a slight twist: we don't want paths
+# under the submodule to be treated as untracked or in the way.
+
+test_expect_success 'setup directory/submodule conflict' '
+ test_create_repo directory-submodule &&
+ (
+ cd directory-submodule &&
+
+ git commit --allow-empty -m O &&
+
+ git branch A &&
+ git branch B1 &&
+ git branch B2 &&
+
+ git checkout B1 &&
+ mkdir path &&
+ echo contents >path/file &&
+ git add path/file &&
+ git commit -m B1 &&
+
+ git checkout B2 &&
+ mkdir path &&
+ echo contents >path/world &&
+ git add path/world &&
+ git commit -m B2 &&
+
+ git checkout A &&
+ test_create_repo path &&
+ test_commit -C path hello world &&
+ git submodule add ./path &&
+ git commit -m A
+ )
+'
+
+test_expect_failure 'directory/submodule conflict; keep submodule clean' '
+ test_when_finished "git -C directory-submodule reset --hard" &&
+ (
+ cd directory-submodule &&
+
+ git checkout A^0 &&
+ test_must_fail git merge B1^0 &&
+
+ git ls-files -s >out &&
+ test_line_count = 3 out &&
+ git ls-files -u >out &&
+ test_line_count = 1 out &&
+
+ # path/ is still a submodule
+ test_path_is_dir path/.git &&
+
+ echo Checking if contents from B1:path/file showed up &&
+ # Would rather use grep -r, but that is GNU extension...
+ git ls-files -co | xargs grep -q contents 2>/dev/null &&
+
+ # However, B1:path/file should NOT have shown up at path/file,
+ # because we should not write into the submodule
+ test_path_is_missing path/file
+ )
+'
+
+test_expect_failure 'directory/submodule conflict; should not treat submodule files as untracked or in the way' '
+ test_when_finished "git -C directory-submodule/path reset --hard" &&
+ test_when_finished "git -C directory-submodule reset --hard" &&
+ (
+ cd directory-submodule &&
+
+ git checkout A^0 &&
+ test_must_fail git merge B2^0 >out 2>err &&
+
+ # We do not want files within the submodule to prevent the
+ # merge from starting; we should not be writing to such paths
+ # anyway.
+ test_i18ngrep ! "refusing to lose untracked file at" err
+ )
+'
+
+test_expect_failure 'directory/submodule conflict; merge --abort works afterward' '
+ test_when_finished "git -C directory-submodule/path reset --hard" &&
+ test_when_finished "git -C directory-submodule reset --hard" &&
+ (
+ cd directory-submodule &&
+
+ git checkout A^0 &&
+ test_must_fail git merge B2^0 &&
+ test_path_is_file .git/MERGE_HEAD &&
+
+ # merge --abort should succeed, should clear .git/MERGE_HEAD,
+ # and should not leave behind any conflicted files
+ git merge --abort &&
+ test_path_is_missing .git/MERGE_HEAD &&
+ git ls-files -u >conflicts &&
+ test_must_be_empty conflicts
+ )
+'
+
test_done
(cd submodule/subsubmodule &&
git log > ../../expected
) &&
- (cd .git/modules/submodule/modules/subsubmodule
+ (cd .git/modules/submodule/modules/subsubmodule &&
git log > ../../../../../actual
- )
+ ) &&
test_cmp actual expected
)
'
git commit -am "pre move" &&
H2=$(git rev-parse --short HEAD) &&
git status | sed "s/$H/XXX/" >expect &&
- H=$(cd submodule2; git rev-parse HEAD) &&
+ H=$(cd submodule2 && git rev-parse HEAD) &&
git rm --cached submodule2 &&
rm -rf submodule2 &&
mkdir -p "moved/sub module" &&
cd supersuper &&
echo "I am super super." >file &&
git add file &&
- git commit -m B-super-super-initial
+ git commit -m B-super-super-initial &&
git submodule add "file://$base_dir/super" subwithsub &&
git commit -m B-super-super-added &&
git submodule update --init --recursive &&
)
'
+test_expect_success 'fsck detects corrupt .gitmodules' '
+ git init corrupt &&
+ (
+ cd corrupt &&
+
+ echo "[broken" >.gitmodules &&
+ git add .gitmodules &&
+ git commit -m "broken gitmodules" &&
+
+ git fsck 2>output &&
+ grep gitmodulesParse output &&
+ test_i18ngrep ! "bad config" output
+ )
+'
+
test_done
test_expect_success PERL 'can use paths with --interactive' '
echo bong-o-bong >file &&
# 2: update, 1:st path, that is all, 7: quit
- ( echo 2; echo 1; echo; echo 7 ) |
+ test_write_lines 2 1 "" 7 |
git commit -m foo --interactive file &&
git reset --hard HEAD^
'
test_expect_success PERL "commit --interactive doesn't change index if editor aborts" '
echo zoo >file &&
test_must_fail git diff --exit-code >diff1 &&
- (echo u ; echo "*" ; echo q) |
+ test_write_lines u "*" q |
(
EDITOR=: &&
export EDITOR &&
git commit -s -m "thank you" &&
git cat-file commit HEAD | sed -e "1,/^\$/d" >actual &&
(
- echo thank you
- echo
+ echo thank you &&
+ echo &&
git var GIT_COMMITTER_IDENT |
sed -e "s/>.*/>/" -e "s/^/Signed-off-by: /"
) >expected &&
$existing" &&
git cat-file commit HEAD | sed -e "1,/^\$/d" >actual &&
(
- echo thank you
- echo
- echo $existing
+ echo thank you &&
+ echo &&
+ echo $existing &&
git var GIT_COMMITTER_IDENT |
sed -e "s/>.*/>/" -e "s/^/Signed-off-by: /"
) >expected &&
$alt" &&
git cat-file commit HEAD | sed -e "1,/^\$/d" > actual &&
(
- echo welcome
- echo
- echo $alt
+ echo welcome &&
+ echo &&
+ echo $alt &&
git var GIT_COMMITTER_IDENT |
sed -e "s/>.*/>/" -e "s/^/Signed-off-by: /"
) >expected &&
$alt" &&
git cat-file commit HEAD | sed -e "1,/^\$/d" > actual &&
(
- echo welcome
- echo
- echo We have now
- echo $alt
- echo
+ echo welcome &&
+ echo &&
+ echo We have now &&
+ echo $alt &&
+ echo &&
git var GIT_COMMITTER_IDENT |
sed -e "s/>.*/>/" -e "s/^/Signed-off-by: /"
) >expected &&
Myfooter: x" &&
git cat-file commit HEAD | sed -e "1,/^\$/d" > actual &&
(
- echo subject
- echo
- echo non-trailer line
- echo Myfooter: x
- echo
+ echo subject &&
+ echo &&
+ echo non-trailer line &&
+ echo Myfooter: x &&
+ echo &&
echo "Signed-off-by: $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL>"
) >expected &&
test_cmp expected actual &&
Myfooter: x" &&
git cat-file commit HEAD | sed -e "1,/^\$/d" > actual &&
(
- echo subject
- echo
- echo non-trailer line
- echo Myfooter: x
+ echo subject &&
+ echo &&
+ echo non-trailer line &&
+ echo Myfooter: x &&
echo "Signed-off-by: $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL>"
) >expected &&
test_cmp expected actual
git commit -m "one" -m "two" -m "three" &&
git cat-file commit HEAD | sed -e "1,/^\$/d" >actual &&
(
- echo one
- echo
- echo two
- echo
+ echo one &&
+ echo &&
+ echo two &&
+ echo &&
echo three
) >expected &&
test_cmp expected actual
test_when_finished "git branch -D newbranch" &&
test_when_finished "git checkout -f master" &&
git checkout --orphan newbranch &&
+ git rm -f file &&
: >file2 &&
git add file2 &&
git commit --no-verify file2 -m in-side-branch &&
chmod -x "$HOOK"
test_expect_success POSIXPERM 'with non-executable hook' '
- echo "content" >> file &&
+ echo "content" >file &&
git add file &&
git commit -m "content"
test_when_finished "git branch -D newbranch" &&
test_when_finished "git checkout -f master" &&
git checkout --orphan newbranch &&
+ git rm -f file &&
: >file2 &&
git add file2 &&
git commit --no-verify file2 -m in-side-branch &&
test_expect_success 'setup .git file for sub' '
(cd sub &&
- rm -f new-file
+ rm -f new-file &&
REAL="$(pwd)/../.real" &&
- mv .git "$REAL"
+ mv .git "$REAL" &&
echo "gitdir: $REAL" >.git) &&
echo .real >>.gitignore &&
git commit -m "added .real to .gitignore" .gitignore
test_expect_success 'status with a lot of untracked files in the submodule' '
(
- cd sub
+ cd sub &&
i=0 &&
while test $i -lt 1024
do
- >some-file-$i
- i=$(( $i + 1 ))
+ >some-file-$i &&
+ i=$(( $i + 1 )) || exit 1
done
) &&
git status --porcelain sub 2>err.actual &&
git checkout -b delete-base branch1 &&
mkdir -p a/a &&
- (echo one; echo two; echo 3; echo 4) >a/a/file.txt &&
+ test_write_lines one two 3 4 >a/a/file.txt &&
git add a/a/file.txt &&
git commit -m"base file" &&
git checkout -b move-to-b delete-base &&
mkdir -p b/b &&
git mv a/a/file.txt b/b/file.txt &&
- (echo one; echo two; echo 4) >b/b/file.txt &&
+ test_write_lines one two 4 >b/b/file.txt &&
git commit -a -m"move to b" &&
git checkout -b move-to-c delete-base &&
mkdir -p c/c &&
git mv a/a/file.txt c/c/file.txt &&
- (echo one; echo two; echo 3) >c/c/file.txt &&
+ test_write_lines one two 3 >c/c/file.txt &&
git commit -a -m"move to c" &&
git checkout -b stash1 master &&
git checkout -b test$test_count move-to-c &&
test_config mergetool.keepTemporaries true &&
test_must_fail git merge move-to-b &&
- ! (echo a; echo n) | git mergetool a/a/file.txt &&
+ ! test_write_lines a n | git mergetool a/a/file.txt &&
test -d a/a &&
cat >expect <<-\EOF &&
file_BASE_.txt
test_cmp expect actual
'
-test_expect_success 'Abort clean merge with matching dirty index' '
- git add bar &&
- git diff --staged > expect &&
- git merge --no-commit clean_branch &&
- test -f .git/MERGE_HEAD &&
- ### When aborting the merge, git will discard all staged changes,
- ### including those that were staged pre-merge. In other words,
- ### --abort will LOSE any staged changes (the staged changes that
- ### are lost must match the merge result, or the merge would not
- ### have been allowed to start). Change expectations accordingly:
- rm expect &&
- touch expect &&
- # Abort merge
- git merge --abort &&
- test ! -f .git/MERGE_HEAD &&
- test "$pre_merge_head" = "$(git rev-parse HEAD)" &&
- git diff --staged > actual &&
- test_cmp expect actual &&
- test -z "$(git diff)"
-'
-
-test_expect_success 'Reset worktree changes' '
- git reset --hard "$pre_merge_head"
-'
-
test_expect_success 'Fail conflicting merge with matching dirty worktree' '
echo barf > bar &&
git diff > expect &&
test_cmp expect actual
'
-test_expect_success 'Abort conflicting merge with matching dirty index' '
- git add bar &&
- git diff --staged > expect &&
- test_must_fail git merge conflict_branch &&
- test -f .git/MERGE_HEAD &&
- ### When aborting the merge, git will discard all staged changes,
- ### including those that were staged pre-merge. In other words,
- ### --abort will LOSE any staged changes (the staged changes that
- ### are lost must match the merge result, or the merge would not
- ### have been allowed to start). Change expectations accordingly:
- rm expect &&
- touch expect &&
- # Abort merge
- git merge --abort &&
- test ! -f .git/MERGE_HEAD &&
- test "$pre_merge_head" = "$(git rev-parse HEAD)" &&
- git diff --staged > actual &&
- test_cmp expect actual &&
- test -z "$(git diff)"
-'
-
-test_expect_success 'Reset worktree changes' '
- git reset --hard "$pre_merge_head"
-'
-
-test_expect_success 'Abort merge with pre- and post-merge worktree changes' '
- # Pre-merge worktree changes
- echo xyzzy > foo &&
- echo barf > bar &&
- git add bar &&
- git diff > expect &&
- git diff --staged > expect-staged &&
- # Perform merge
- test_must_fail git merge conflict_branch &&
- test -f .git/MERGE_HEAD &&
- # Post-merge worktree changes
- echo yzxxz > foo &&
- echo blech > baz &&
- ### When aborting the merge, git will discard staged changes (bar)
- ### and unmerged changes (baz). Other changes that are neither
- ### staged nor marked as unmerged (foo), will be preserved. For
- ### these changed, git cannot tell pre-merge changes apart from
- ### post-merge changes, so the post-merge changes will be
- ### preserved. Change expectations accordingly:
- git diff -- foo > expect &&
- rm expect-staged &&
- touch expect-staged &&
- # Abort merge
- git merge --abort &&
- test ! -f .git/MERGE_HEAD &&
- test "$pre_merge_head" = "$(git rev-parse HEAD)" &&
- git diff > actual &&
- test_cmp expect actual &&
- git diff --staged > actual-staged &&
- test_cmp expect-staged actual-staged
-'
-
-test_expect_success 'Reset worktree changes' '
- git reset --hard "$pre_merge_head"
-'
-
-test_expect_success 'Abort merge with pre- and post-merge index changes' '
- # Pre-merge worktree changes
- echo xyzzy > foo &&
- echo barf > bar &&
- git add bar &&
- git diff > expect &&
- git diff --staged > expect-staged &&
- # Perform merge
- test_must_fail git merge conflict_branch &&
- test -f .git/MERGE_HEAD &&
- # Post-merge worktree changes
- echo yzxxz > foo &&
- echo blech > baz &&
- git add foo bar &&
- ### When aborting the merge, git will discard all staged changes
- ### (foo, bar and baz), and no changes will be preserved. Whether
- ### the changes were staged pre- or post-merge does not matter
- ### (except for not preventing starting the merge).
- ### Change expectations accordingly:
- rm expect expect-staged &&
- touch expect &&
- touch expect-staged &&
- # Abort merge
- git merge --abort &&
- test ! -f .git/MERGE_HEAD &&
- test "$pre_merge_head" = "$(git rev-parse HEAD)" &&
- git diff > actual &&
- test_cmp expect actual &&
- git diff --staged > actual-staged &&
- test_cmp expect-staged actual-staged
-'
-
test_done
fi
'
+ test_expect_success "grep $L (with --column, --only-matching)" '
+ {
+ echo ${HC}file:1:5:mmap
+ echo ${HC}file:2:5:mmap
+ echo ${HC}file:3:5:mmap
+ echo ${HC}file:3:13:mmap
+ echo ${HC}file:4:5:mmap
+ echo ${HC}file:4:13:mmap
+ echo ${HC}file:5:5:mmap
+ echo ${HC}file:5:13:mmap
+ } >expected &&
+ git grep --column -n -o -e mmap $H >actual &&
+ test_cmp expected actual
+ '
+
test_expect_success "grep $L (t-1)" '
echo "${HC}t/t:1:test" >expected &&
git grep -n -e test $H >actual &&
test_expect_success 'grep from a subdirectory to search wider area (2)' '
mkdir -p s &&
(
- cd s || exit 1
- ( git grep xxyyzz .. >out ; echo $? >status )
- ! test -s out &&
- test 1 = $(cat status)
+ cd s &&
+ test_expect_code 1 git grep xxyyzz .. >out &&
+ ! test -s out
)
'
'
test_expect_success 'blame -L with invalid end' '
- test_must_fail git blame -L1,5 tres 2>errors &&
- test_i18ngrep "has only 2 lines" errors
+ git blame -L1,5 tres >out &&
+ test_line_count = 2 out
'
test_expect_success 'blame parses <end> part of -L' '
git blame -L1,1 tres >out &&
- cat out &&
- test $(wc -l < out) -eq 1
+ test_line_count = 1 out
+'
+
+test_expect_success 'blame -Ln,-(n+1)' '
+ git blame -L3,-4 nine_lines >out &&
+ test_line_count = 3 out
'
test_expect_success 'indent of line numbers, nine lines' '
In-Reply-To: <unique-message-id@example.com>
References: <unique-message-id@example.com>
Reply-To: Reply <reply@example.com>
+MIME-Version: 1.0
+Content-Transfer-Encoding: 8bit
Result: OK
EOF
test_expect_success $PREREQ 'Prompting works' '
clean_fake_sendmail &&
- (echo "to@example.com"
+ (echo "to@example.com" &&
echo ""
) | GIT_SEND_EMAIL_NOTTY=1 git send-email \
--smtp-server="$(pwd)/fake.sendmail" \
--from="Example <nobody@example.com>" \
--to=nobody@example.com \
--smtp-server="$(pwd)/fake.sendmail" \
+ --transfer-encoding=8bit \
$patches longline.patch \
2>errors &&
grep longline.patch errors
2>errors
'
+test_expect_success $PREREQ 'short lines with auto encoding are 8bit' '
+ clean_fake_sendmail &&
+ git send-email \
+ --from="A <author@example.com>" \
+ --to=nobody@example.com \
+ --smtp-server="$(pwd)/fake.sendmail" \
+ --transfer-encoding=auto \
+ $patches &&
+ grep "Content-Transfer-Encoding: 8bit" msgtxt1
+'
+
+test_expect_success $PREREQ 'long lines with auto encoding are quoted-printable' '
+ clean_fake_sendmail &&
+ git send-email \
+ --from="Example <nobody@example.com>" \
+ --to=nobody@example.com \
+ --smtp-server="$(pwd)/fake.sendmail" \
+ --transfer-encoding=auto \
+ --no-validate \
+ longline.patch &&
+ grep "Content-Transfer-Encoding: quoted-printable" msgtxt1
+'
+
+for enc in auto quoted-printable base64
+do
+ test_expect_success $PREREQ "--validate passes with encoding $enc" '
+ git send-email \
+ --from="Example <nobody@example.com>" \
+ --to=nobody@example.com \
+ --smtp-server="$(pwd)/fake.sendmail" \
+ --transfer-encoding=$enc \
+ --validate \
+ $patches longline.patch
+ '
+done
+
test_expect_success $PREREQ 'Invalid In-Reply-To' '
clean_fake_sendmail &&
git send-email \
test_expect_success $PREREQ 'Valid In-Reply-To when prompting' '
clean_fake_sendmail &&
- (echo "From Example <from@example.com>"
- echo "To Example <to@example.com>"
+ (echo "From Example <from@example.com>" &&
+ echo "To Example <to@example.com>" &&
echo ""
) | GIT_SEND_EMAIL_NOTTY=1 git send-email \
--smtp-server="$(pwd)/fake.sendmail" \
Date: DATE-STRING
Message-Id: MESSAGE-ID-STRING
X-Mailer: X-MAILER-STRING
+MIME-Version: 1.0
+Content-Transfer-Encoding: 8bit
Result: OK
EOF
Date: DATE-STRING
Message-Id: MESSAGE-ID-STRING
X-Mailer: X-MAILER-STRING
+MIME-Version: 1.0
+Content-Transfer-Encoding: 8bit
Result: OK
EOF
Date: DATE-STRING
Message-Id: MESSAGE-ID-STRING
X-Mailer: X-MAILER-STRING
+MIME-Version: 1.0
+Content-Transfer-Encoding: 8bit
Result: OK
EOF
Date: DATE-STRING
Message-Id: MESSAGE-ID-STRING
X-Mailer: X-MAILER-STRING
+MIME-Version: 1.0
+Content-Transfer-Encoding: 8bit
Result: OK
EOF
Date: DATE-STRING
Message-Id: MESSAGE-ID-STRING
X-Mailer: X-MAILER-STRING
+MIME-Version: 1.0
+Content-Transfer-Encoding: 8bit
Result: OK
EOF
Date: DATE-STRING
Message-Id: MESSAGE-ID-STRING
X-Mailer: X-MAILER-STRING
+MIME-Version: 1.0
+Content-Transfer-Encoding: 8bit
Result: OK
EOF
Date: DATE-STRING
Message-Id: MESSAGE-ID-STRING
X-Mailer: X-MAILER-STRING
+MIME-Version: 1.0
+Content-Transfer-Encoding: 8bit
Result: OK
EOF
Date: DATE-STRING
Message-Id: MESSAGE-ID-STRING
X-Mailer: X-MAILER-STRING
+MIME-Version: 1.0
+Content-Transfer-Encoding: 8bit
Result: OK
EOF
Date: DATE-STRING
Message-Id: MESSAGE-ID-STRING
X-Mailer: X-MAILER-STRING
+MIME-Version: 1.0
+Content-Transfer-Encoding: 8bit
Result: OK
EOF
# Verify error message when a patch is rejected by the hook
sed -e "s/add master/x/" ../0001-add-master.patch >../another.patch &&
- git send-email \
+ test_must_fail git send-email \
--from="Example <nobody@example.com>" \
--to=nobody@example.com \
--smtp-server="$(pwd)/../fake.sendmail" \
- ../another.patch 2>err
+ ../another.patch 2>err &&
test_i18ngrep "rejected by sendemail-validate hook" err
)
'
(
cd import &&
echo foo >foo &&
- ln -s foo foo.link
+ ln -s foo foo.link &&
mkdir -p dir/a/b/c/d/e &&
echo "deep dir" >dir/a/b/c/d/e/file &&
mkdir bar &&
svn_cmd up &&
svn_cmd propset -R svn:ignore '
no-such-file*
-' .
+' . &&
svn_cmd commit -m 'propset svn:ignore'
) &&
git svn show-ignore > show-ignore.got &&
# same value as "svn info" (i.e. the commit timestamp that touched the
# path most recently); do not expect that field to match.
test_cmp_info () {
- sed -e '/^Text Last Updated:/d' "$1" >tmp.expect
- sed -e '/^Text Last Updated:/d' "$2" >tmp.actual
+ sed -e '/^Text Last Updated:/d' "$1" >tmp.expect &&
+ sed -e '/^Text Last Updated:/d' "$2" >tmp.actual &&
test_cmp tmp.expect tmp.actual &&
rm -f tmp.expect tmp.actual
}
'
test_expect_success 'info' "
- (cd svnwc; svn info) > expected.info &&
- (cd gitwc; git svn info) > actual.info &&
+ (cd svnwc && svn info) > expected.info &&
+ (cd gitwc && git svn info) > actual.info &&
test_cmp_info expected.info actual.info
"
test_expect_success 'info --url' '
- test "$(cd gitwc; git svn info --url)" = "$quoted_svnrepo"
+ test "$(cd gitwc && git svn info --url)" = "$quoted_svnrepo"
'
test_expect_success 'info .' "
- (cd svnwc; svn info .) > expected.info-dot &&
- (cd gitwc; git svn info .) > actual.info-dot &&
+ (cd svnwc && svn info .) > expected.info-dot &&
+ (cd gitwc && git svn info .) > actual.info-dot &&
test_cmp_info expected.info-dot actual.info-dot
"
test_expect_success 'info $(pwd)' '
- (cd svnwc; svn info "$(pwd)") >expected.info-pwd &&
- (cd gitwc; git svn info "$(pwd)") >actual.info-pwd &&
+ (cd svnwc && svn info "$(pwd)") >expected.info-pwd &&
+ (cd gitwc && git svn info "$(pwd)") >actual.info-pwd &&
grep -v ^Path: <expected.info-pwd >expected.info-np &&
grep -v ^Path: <actual.info-pwd >actual.info-np &&
test_cmp_info expected.info-np actual.info-np &&
'
test_expect_success 'info $(pwd)/../___wc' '
- (cd svnwc; svn info "$(pwd)/../svnwc") >expected.info-pwd &&
- (cd gitwc; git svn info "$(pwd)/../gitwc") >actual.info-pwd &&
+ (cd svnwc && svn info "$(pwd)/../svnwc") >expected.info-pwd &&
+ (cd gitwc && git svn info "$(pwd)/../gitwc") >actual.info-pwd &&
grep -v ^Path: <expected.info-pwd >expected.info-np &&
grep -v ^Path: <actual.info-pwd >actual.info-np &&
test_cmp_info expected.info-np actual.info-np &&
'
test_expect_success 'info $(pwd)/../___wc//file' '
- (cd svnwc; svn info "$(pwd)/../svnwc//file") >expected.info-pwd &&
- (cd gitwc; git svn info "$(pwd)/../gitwc//file") >actual.info-pwd &&
+ (cd svnwc && svn info "$(pwd)/../svnwc//file") >expected.info-pwd &&
+ (cd gitwc && git svn info "$(pwd)/../gitwc//file") >actual.info-pwd &&
grep -v ^Path: <expected.info-pwd >expected.info-np &&
grep -v ^Path: <actual.info-pwd >actual.info-np &&
test_cmp_info expected.info-np actual.info-np &&
'
test_expect_success 'info --url .' '
- test "$(cd gitwc; git svn info --url .)" = "$quoted_svnrepo"
+ test "$(cd gitwc && git svn info --url .)" = "$quoted_svnrepo"
'
test_expect_success 'info file' "
- (cd svnwc; svn info file) > expected.info-file &&
- (cd gitwc; git svn info file) > actual.info-file &&
+ (cd svnwc && svn info file) > expected.info-file &&
+ (cd gitwc && git svn info file) > actual.info-file &&
test_cmp_info expected.info-file actual.info-file
"
test_expect_success 'info --url file' '
- test "$(cd gitwc; git svn info --url file)" = "$quoted_svnrepo/file"
+ test "$(cd gitwc && git svn info --url file)" = "$quoted_svnrepo/file"
'
test_expect_success 'info directory' "
- (cd svnwc; svn info directory) > expected.info-directory &&
- (cd gitwc; git svn info directory) > actual.info-directory &&
+ (cd svnwc && svn info directory) > expected.info-directory &&
+ (cd gitwc && git svn info directory) > actual.info-directory &&
test_cmp_info expected.info-directory actual.info-directory
"
test_expect_success 'info inside directory' "
- (cd svnwc/directory; svn info) > expected.info-inside-directory &&
- (cd gitwc/directory; git svn info) > actual.info-inside-directory &&
+ (cd svnwc/directory && svn info) > expected.info-inside-directory &&
+ (cd gitwc/directory && git svn info) > actual.info-inside-directory &&
test_cmp_info expected.info-inside-directory actual.info-inside-directory
"
test_expect_success 'info --url directory' '
- test "$(cd gitwc; git svn info --url directory)" = "$quoted_svnrepo/directory"
+ test "$(cd gitwc && git svn info --url directory)" = "$quoted_svnrepo/directory"
'
test_expect_success 'info symlink-file' "
- (cd svnwc; svn info symlink-file) > expected.info-symlink-file &&
- (cd gitwc; git svn info symlink-file) > actual.info-symlink-file &&
+ (cd svnwc && svn info symlink-file) > expected.info-symlink-file &&
+ (cd gitwc && git svn info symlink-file) > actual.info-symlink-file &&
test_cmp_info expected.info-symlink-file actual.info-symlink-file
"
test_expect_success 'info --url symlink-file' '
- test "$(cd gitwc; git svn info --url symlink-file)" \
+ test "$(cd gitwc && git svn info --url symlink-file)" \
= "$quoted_svnrepo/symlink-file"
'
test_expect_success 'info symlink-directory' "
- (cd svnwc; svn info symlink-directory) \
+ (cd svnwc && svn info symlink-directory) \
> expected.info-symlink-directory &&
- (cd gitwc; git svn info symlink-directory) \
+ (cd gitwc && git svn info symlink-directory) \
> actual.info-symlink-directory &&
test_cmp_info expected.info-symlink-directory actual.info-symlink-directory
"
test_expect_success 'info --url symlink-directory' '
- test "$(cd gitwc; git svn info --url symlink-directory)" \
+ test "$(cd gitwc && git svn info --url symlink-directory)" \
= "$quoted_svnrepo/symlink-directory"
'
cd svnwc &&
svn_cmd add added-file > /dev/null
) &&
- (cd svnwc; svn info added-file) > expected.info-added-file &&
- (cd gitwc; git svn info added-file) > actual.info-added-file &&
+ (cd svnwc && svn info added-file) > expected.info-added-file &&
+ (cd gitwc && git svn info added-file) > actual.info-added-file &&
test_cmp_info expected.info-added-file actual.info-added-file
"
test_expect_success 'info --url added-file' '
- test "$(cd gitwc; git svn info --url added-file)" \
+ test "$(cd gitwc && git svn info --url added-file)" \
= "$quoted_svnrepo/added-file"
'
cd gitwc &&
git add added-directory
) &&
- (cd svnwc; svn info added-directory) \
+ (cd svnwc && svn info added-directory) \
> expected.info-added-directory &&
- (cd gitwc; git svn info added-directory) \
+ (cd gitwc && git svn info added-directory) \
> actual.info-added-directory &&
test_cmp_info expected.info-added-directory actual.info-added-directory
"
test_expect_success 'info --url added-directory' '
- test "$(cd gitwc; git svn info --url added-directory)" \
+ test "$(cd gitwc && git svn info --url added-directory)" \
= "$quoted_svnrepo/added-directory"
'
ln -s added-file added-symlink-file &&
svn_cmd add added-symlink-file > /dev/null
) &&
- (cd svnwc; svn info added-symlink-file) \
+ (cd svnwc && svn info added-symlink-file) \
> expected.info-added-symlink-file &&
- (cd gitwc; git svn info added-symlink-file) \
+ (cd gitwc && git svn info added-symlink-file) \
> actual.info-added-symlink-file &&
test_cmp_info expected.info-added-symlink-file \
actual.info-added-symlink-file
"
test_expect_success 'info --url added-symlink-file' '
- test "$(cd gitwc; git svn info --url added-symlink-file)" \
+ test "$(cd gitwc && git svn info --url added-symlink-file)" \
= "$quoted_svnrepo/added-symlink-file"
'
ln -s added-directory added-symlink-directory &&
svn_cmd add added-symlink-directory > /dev/null
) &&
- (cd svnwc; svn info added-symlink-directory) \
+ (cd svnwc && svn info added-symlink-directory) \
> expected.info-added-symlink-directory &&
- (cd gitwc; git svn info added-symlink-directory) \
+ (cd gitwc && git svn info added-symlink-directory) \
> actual.info-added-symlink-directory &&
test_cmp_info expected.info-added-symlink-directory \
actual.info-added-symlink-directory
"
test_expect_success 'info --url added-symlink-directory' '
- test "$(cd gitwc; git svn info --url added-symlink-directory)" \
+ test "$(cd gitwc && git svn info --url added-symlink-directory)" \
= "$quoted_svnrepo/added-symlink-directory"
'
cd svnwc &&
svn_cmd rm --force file > /dev/null
) &&
- (cd svnwc; svn info file) >expected.info-deleted-file &&
- (cd gitwc; git svn info file) >actual.info-deleted-file &&
+ (cd svnwc && svn info file) >expected.info-deleted-file &&
+ (cd gitwc && git svn info file) >actual.info-deleted-file &&
test_cmp_info expected.info-deleted-file actual.info-deleted-file
"
test_expect_success 'info --url file (deleted)' '
- test "$(cd gitwc; git svn info --url file)" \
+ test "$(cd gitwc && git svn info --url file)" \
= "$quoted_svnrepo/file"
'
cd svnwc &&
svn_cmd rm --force directory > /dev/null
) &&
- (cd svnwc; svn info directory) >expected.info-deleted-directory &&
- (cd gitwc; git svn info directory) >actual.info-deleted-directory &&
+ (cd svnwc && svn info directory) >expected.info-deleted-directory &&
+ (cd gitwc && git svn info directory) >actual.info-deleted-directory &&
test_cmp_info expected.info-deleted-directory actual.info-deleted-directory
"
test_expect_success 'info --url directory (deleted)' '
- test "$(cd gitwc; git svn info --url directory)" \
+ test "$(cd gitwc && git svn info --url directory)" \
= "$quoted_svnrepo/directory"
'
cd svnwc &&
svn_cmd rm --force symlink-file > /dev/null
) &&
- (cd svnwc; svn info symlink-file) >expected.info-deleted-symlink-file &&
- (cd gitwc; git svn info symlink-file) >actual.info-deleted-symlink-file &&
+ (cd svnwc && svn info symlink-file) >expected.info-deleted-symlink-file &&
+ (cd gitwc && git svn info symlink-file) >actual.info-deleted-symlink-file &&
test_cmp_info expected.info-deleted-symlink-file actual.info-deleted-symlink-file
"
test_expect_success 'info --url symlink-file (deleted)' '
- test "$(cd gitwc; git svn info --url symlink-file)" \
+ test "$(cd gitwc && git svn info --url symlink-file)" \
= "$quoted_svnrepo/symlink-file"
'
cd svnwc &&
svn_cmd rm --force symlink-directory > /dev/null
) &&
- (cd svnwc; svn info symlink-directory) >expected.info-deleted-symlink-directory &&
- (cd gitwc; git svn info symlink-directory) >actual.info-deleted-symlink-directory &&
+ (cd svnwc && svn info symlink-directory) >expected.info-deleted-symlink-directory &&
+ (cd gitwc && git svn info symlink-directory) >actual.info-deleted-symlink-directory &&
test_cmp_info expected.info-deleted-symlink-directory actual.info-deleted-symlink-directory
"
test_expect_success 'info --url symlink-directory (deleted)' '
- test "$(cd gitwc; git svn info --url symlink-directory)" \
+ test "$(cd gitwc && git svn info --url symlink-directory)" \
= "$quoted_svnrepo/symlink-directory"
'
test_expect_success 'info unknown-file' "
echo two > gitwc/unknown-file &&
- (cd gitwc; test_must_fail git svn info unknown-file) \
+ (cd gitwc && test_must_fail git svn info unknown-file) \
2> actual.info-unknown-file &&
grep unknown-file actual.info-unknown-file
"
test_expect_success 'info --url unknown-file' '
echo two > gitwc/unknown-file &&
- (cd gitwc; test_must_fail git svn info --url unknown-file) \
+ (cd gitwc && test_must_fail git svn info --url unknown-file) \
2> actual.info-url-unknown-file &&
grep unknown-file actual.info-url-unknown-file
'
test_expect_success 'info unknown-directory' "
mkdir gitwc/unknown-directory svnwc/unknown-directory &&
- (cd gitwc; test_must_fail git svn info unknown-directory) \
+ (cd gitwc && test_must_fail git svn info unknown-directory) \
2> actual.info-unknown-directory &&
grep unknown-directory actual.info-unknown-directory
"
test_expect_success 'info --url unknown-directory' '
- (cd gitwc; test_must_fail git svn info --url unknown-directory) \
+ (cd gitwc && test_must_fail git svn info --url unknown-directory) \
2> actual.info-url-unknown-directory &&
grep unknown-directory actual.info-url-unknown-directory
'
cd gitwc &&
ln -s unknown-file unknown-symlink-file
) &&
- (cd gitwc; test_must_fail git svn info unknown-symlink-file) \
+ (cd gitwc && test_must_fail git svn info unknown-symlink-file) \
2> actual.info-unknown-symlink-file &&
grep unknown-symlink-file actual.info-unknown-symlink-file
"
test_expect_success 'info --url unknown-symlink-file' '
- (cd gitwc; test_must_fail git svn info --url unknown-symlink-file) \
+ (cd gitwc && test_must_fail git svn info --url unknown-symlink-file) \
2> actual.info-url-unknown-symlink-file &&
grep unknown-symlink-file actual.info-url-unknown-symlink-file
'
cd gitwc &&
ln -s unknown-directory unknown-symlink-directory
) &&
- (cd gitwc; test_must_fail git svn info unknown-symlink-directory) \
+ (cd gitwc && test_must_fail git svn info unknown-symlink-directory) \
2> actual.info-unknown-symlink-directory &&
grep unknown-symlink-directory actual.info-unknown-symlink-directory
"
test_expect_success 'info --url unknown-symlink-directory' '
- (cd gitwc; test_must_fail git svn info --url unknown-symlink-directory) \
+ (cd gitwc && test_must_fail git svn info --url unknown-symlink-directory) \
2> actual.info-url-unknown-symlink-directory &&
grep unknown-symlink-directory actual.info-url-unknown-symlink-directory
'
svn_cmd checkout "$svnrepo" work.svn &&
(
cd work.svn &&
- echo >file
- svn_cmd add file
+ echo >file &&
+ svn_cmd add file &&
svn_cmd commit -m "first commit" file
)
'
mkdir work.git &&
(
cd work.git &&
- git svn init "$svnrepo"
+ git svn init "$svnrepo" &&
git svn fetch &&
echo modification >file &&
git add F &&
git commit -a -F "$TEST_DIRECTORY"/t3900/$H.txt &&
E=$(git cat-file commit HEAD | sed -ne "s/^encoding //p") &&
- test "z$E" = "z$H"
+ test "z$E" = "z$H" &&
compare_git_head_with "$TEST_DIRECTORY"/t3900/$H.txt
)
'
test_expect_success 'imported 2 revisions successfully' '
(
- cd x
+ cd x &&
git rev-list refs/remotes/git-svn >actual &&
test_line_count = 2 actual &&
git rev-list -1 --pretty=raw refs/remotes/git-svn >actual &&
test_expect_success 'continues to import once authors have been added' '
(
- cd x
+ cd x &&
git svn fetch --authors-file=../svn-authors &&
git rev-list refs/remotes/git-svn >actual &&
test_line_count = 4 actual &&
test_expect_success 'SVN-side change inside of ignored www' '
(
cd s &&
- echo zaq >> www/test_www.txt
+ echo zaq >> www/test_www.txt &&
svn_cmd commit -m "SVN-side change inside of www/test_www.txt" &&
svn_cmd up &&
svn_cmd log -v | fgrep "SVN-side change inside of www/test_www.txt"
test_expect_success 'SVN-side change in and out of ignored www' '
(
cd s &&
- echo cvf >> www/test_www.txt
- echo ygg >> qqq/test_qqq.txt
+ echo cvf >> www/test_www.txt &&
+ echo ygg >> qqq/test_qqq.txt &&
svn_cmd commit -m "SVN-side change in and out of ignored www" &&
svn_cmd up &&
svn_cmd log -v | fgrep "SVN-side change in and out of ignored www"
test_expect_success 'initialize repo' '
mkdir import &&
(cd import &&
- awk "BEGIN { for (i = 1; i < 64; i++) { print i } }" > file
+ awk "BEGIN { for (i = 1; i < 64; i++) { print i } }" > file &&
svn_cmd import -m "initial" . "$svnrepo"
) &&
git svn init "$svnrepo" &&
test_expect_success 'imported 6 revisions successfully' '
(
- cd x
+ cd x &&
git rev-list refs/remotes/git-svn >actual &&
test_line_count = 6 actual
)
test_expect_success 'authors-prog ran correctly' '
(
- cd x
+ cd x &&
git rev-list -1 --pretty=raw refs/remotes/git-svn~1 >actual &&
grep "^author ee-foo <ee-foo@example\.com> " actual &&
git rev-list -1 --pretty=raw refs/remotes/git-svn~2 >actual &&
test_expect_success 'authors-file overrode authors-prog' '
(
- cd x
+ cd x &&
git rev-list -1 --pretty=raw refs/remotes/git-svn >actual &&
grep "^author FFFFFFF FFFFFFF <fFf@other\.example\.com> " actual
)
do
if ! test -d "$i"
then
- echo >&2 "$i does not exist"
+ echo >&2 "$i does not exist" &&
exit 1
fi
done
do
if test -d "$i"
then
- echo >&2 "$i exists"
+ echo >&2 "$i exists" &&
exit 1
fi
done
do
if ! test -d "$i"
then
- echo >&2 "$i does not exist"
+ echo >&2 "$i does not exist" &&
exit 1
fi
done
do
if ! test -d "$i"
then
- echo >&2 "$i does not exist"
+ echo >&2 "$i does not exist" &&
exit 1
fi
- done
+ done &&
if test -d "! !"
then
- echo >&2 "$i should not exist"
+ echo >&2 "$i should not exist" &&
exit 1
- fi
+ fi &&
git svn mkdirs -r8 &&
if ! test -d "! !"
then
- echo >&2 "$i not exist"
+ echo >&2 "$i not exist" &&
exit 1
fi
)
do
if ! test -d "$i"
then
- echo >&2 "$i does not exist"
+ echo >&2 "$i does not exist" &&
exit 1
fi
done
do
if ! test -d "$i"
then
- echo >&2 "$i does not exist"
+ echo >&2 "$i does not exist" &&
exit 1
fi
done
test_expect_success 'SVN-side change inside of ignored www' '
(
cd s &&
- echo zaq >> www/test_www.txt
+ echo zaq >> www/test_www.txt &&
svn_cmd commit -m "SVN-side change inside of www/test_www.txt" &&
svn_cmd up &&
svn_cmd log -v | fgrep "SVN-side change inside of www/test_www.txt"
test_expect_success 'SVN-side change in and out of included qqq' '
(
cd s &&
- echo cvf >> www/test_www.txt
- echo ygg >> qqq/test_qqq.txt
+ echo cvf >> www/test_www.txt &&
+ echo ygg >> qqq/test_qqq.txt &&
svn_cmd commit -m "SVN-side change in and out of ignored www" &&
svn_cmd up &&
svn_cmd log -v | fgrep "SVN-side change in and out of ignored www"
do
if ! test -d "$i"
then
- echo >&2 "$i does not exist"
+ echo >&2 "$i does not exist" &&
exit 1
fi
done
svn_cmd checkout "$svnrepo" work.svn &&
(
cd work.svn &&
- echo >file && echo > auto_updated_file
+ echo >file && echo > auto_updated_file &&
svn_cmd add file auto_updated_file &&
svn_cmd commit -m "initial commit"
) &&
svn_cmd commit -m trunk &&
svn_cmd switch "$svnrepo"/branches/branch2 &&
svn_cmd merge "$svnrepo"/trunk &&
- svn_cmd commit -m "merge trunk"
+ svn_cmd commit -m "merge trunk" &&
svn_cmd switch "$svnrepo"/trunk &&
svn_cmd merge --reintegrate "$svnrepo"/branches/branch2 &&
svn_cmd commit -m "merge branch2"
git commit -a -m "Update with spaces" &&
id=$(git rev-list --max-count=1 HEAD) &&
(cd "$CVSWORK" &&
- git cvsexportcommit -c $id
+ git cvsexportcommit -c $id &&
check_entries "G g" "with spaces.png/1.2/-kb|with spaces.txt/1.2/"
)'
git add G/off &&
git commit -a -m "Execute test" &&
(cd "$CVSWORK" &&
- git cvsexportcommit -c HEAD
+ git cvsexportcommit -c HEAD &&
test -x G/on &&
! test -x G/off
)'
git add attic_gremlin &&
git commit -m "Added attic_gremlin" &&
git cvsexportcommit -w "$CVSWORK" -c HEAD &&
- (cd "$CVSWORK"; cvs -Q update -d) &&
+ (cd "$CVSWORK" && cvs -Q update -d) &&
test -f "$CVSWORK/attic_gremlin"
'
do
if test $n -gt 30
then
- echo >&2 "checkpoint did not update branch"
+ echo >&2 "checkpoint did not update branch" &&
exit 1
else
n=$(($n + 1))
'(for dir in A A/B A/B/C A/D E; do
mkdir $dir &&
echo "test file in $dir" >"$dir/file_in_$(echo $dir|sed -e "s#/# #g")" &&
- git add $dir;
+ git add $dir
done) &&
git commit -q -m "deep sub directory structure" &&
git push gitcvs.git >/dev/null &&
'echo Line 0 >expected &&
for i in 1 2 3 4 5 6 7
do
- echo Line $i >>merge
+ echo Line $i >>merge &&
echo Line $i >>expected
done &&
echo Line 8 >>expected &&
GIT_CONFIG="$git_config" cvs -Q update &&
test "$(echo $(grep merge CVS/Entries|cut -d/ -f2,3,5))" = "merge/1.1/" &&
test_cmp merge ../merge &&
- ( echo Line 0; cat merge ) >merge.tmp &&
+ ( echo Line 0 && cat merge ) >merge.tmp &&
mv merge.tmp merge &&
cd "$WORKDIR" &&
echo Line 8 >>merge &&
done
test_expect_success 'cvs update (conflict merge)' \
- '( echo LINE 0; cat merge ) >merge.tmp &&
+ '( echo LINE 0 && cat merge ) >merge.tmp &&
mv merge.tmp merge &&
git add merge &&
git commit -q -m "Merge test (conflict)" &&
(cd module-git &&
git log --format="o_fortuna 1.1 %H" -1 HEAD^^ &&
- git log --format="o_fortuna 1.2 %H" -1 HEAD^
+ git log --format="o_fortuna 1.2 %H" -1 HEAD^ &&
git log --format="tick 1.1 %H" -1 HEAD) > expected &&
test_cmp expected module-git/.git/cvs-revisions
'
(
cd "$git" &&
git log --oneline p4/master >lines &&
- test_line_count = 2 lines
+ test_line_count = 2 lines &&
test_path_is_file file1 &&
test_path_is_missing file2 &&
test_path_is_file file3
test_expect_success 'ktext expansion should not expand multi-line $File::' '
(
cd "$cli" &&
- cat >lv.pm <<-\EOF
+ cat >lv.pm <<-\EOF &&
my $wanted = sub { my $f = $File::Find::name;
if ( -f && $f =~ /foo/ ) {
EOF
p4 labels ... | grep LIGHTWEIGHT_TAG &&
p4 label -o GIT_TAG_1 | grep "tag created in git:xyzzy" &&
p4 sync ...@GIT_TAG_1 &&
- ! test -f main/f10
+ ! test -f main/f10 &&
p4 sync ...@GIT_TAG_2 &&
test -f main/f10
)
'
# We rely on this behavior to detect for p4 move availability.
-test_expect_success 'p4 help unknown returns 1' '
+test_expect_success '"p4 help unknown" errors out' '
(
cd "$cli" &&
- (
- p4 help client >errs 2>&1
- echo $? >retval
- )
- echo 0 >expected &&
- test_cmp expected retval &&
- rm retval &&
- (
- p4 help nosuchcommand >errs 2>&1
- echo $? >retval
- )
- echo 1 >expected &&
- test_cmp expected retval &&
- rm retval
+ p4 help client &&
+ ! p4 help nosuchcommand
)
'
(
cd "$cli" &&
test_path_is_missing text2 &&
- p4 fstat -T action text2 2>&1 | grep "no such file"
+ p4 fstat -T action text2 2>&1 | grep "no such file" &&
test_path_is_file text &&
! p4 fstat -T action text
)
(
cd "$cli" &&
p4 sync &&
- test -L some/sub/directory/subdir2
+ test -L some/sub/directory/subdir2 &&
test_path_is_file some/sub/directory/subdir2/file.t
)
cd "$cli" &&
echo file1 >file1 &&
p4 add file1 &&
- p4 submit -d "change 1"
+ p4 submit -d "change 1" &&
echo file2 >file2 &&
p4 add file2 &&
p4 submit -d "change 2"
) &&
p4 passwd -P newpassword &&
(
- P4PASSWD=badpassword test_must_fail git p4 clone //depot/foo 2>errmsg &&
+ P4PASSWD=badpassword &&
+ export P4PASSWD &&
+ test_must_fail git p4 clone //depot/foo 2>errmsg &&
grep -q "failure accessing depot.*P4PASSWD" errmsg
)
'
master-in-other Z
EOF
(
- cur=
+ cur= &&
__git_complete_refs --remote=other &&
print_comp
) &&
master-in-other Z
EOF
(
- cur=
+ cur= &&
__git_complete_refs --track &&
print_comp
) &&
printf "BEFORE: (${c_green}\${__git_ps1_branch_name}${c_clear}):AFTER\\nmaster" >expected &&
(
GIT_PS1_SHOWCOLORHINTS=y &&
- __git_ps1 "BEFORE:" ":AFTER" >"$actual"
+ __git_ps1 "BEFORE:" ":AFTER" >"$actual" &&
printf "%s\\n%s" "$PS1" "${__git_ps1_branch_name}" >"$actual"
) &&
test_cmp expected "$actual"
trace=
# 117 is magic because it is unlikely to match the exit
# code of other programs
- if test "OK-117" != "$(test_eval_ "(exit 117) && $1${LF}${LF}echo OK-\$?" 3>&1)"
+ if $(printf '%s\n' "$1" | sed -f "$GIT_BUILD_DIR/t/chainlint.sed" | grep -q '?![A-Z][A-Z]*?!') ||
+ test "OK-117" != "$(test_eval_ "(exit 117) && $1${LF}${LF}echo OK-\$?" 3>&1)"
then
error "bug in the test script: broken &&-chain or run-away HERE-DOC: $1"
fi
#include "blob.h"
#include "alloc.h"
#include "gpg-interface.h"
+#include "packfile.h"
const char *tag_type = "tag";
return ret;
}
-struct object *deref_tag(struct object *o, const char *warn, int warnlen)
+struct object *deref_tag(struct repository *r, struct object *o, const char *warn, int warnlen)
{
+ struct object_id *last_oid = NULL;
while (o && o->type == OBJ_TAG)
- if (((struct tag *)o)->tagged)
- o = parse_object(&((struct tag *)o)->tagged->oid);
- else
+ if (((struct tag *)o)->tagged) {
+ last_oid = &((struct tag *)o)->tagged->oid;
+ o = parse_object(r, last_oid);
+ } else {
+ last_oid = NULL;
o = NULL;
+ }
if (!o && warn) {
+ if (last_oid && is_promisor_object(last_oid))
+ return NULL;
if (!warnlen)
warnlen = strlen(warn);
error("missing object referenced by '%.*s'", warnlen, warn);
struct object *deref_tag_noverify(struct object *o)
{
while (o && o->type == OBJ_TAG) {
- o = parse_object(&o->oid);
+ o = parse_object(the_repository, &o->oid);
if (o && o->type == OBJ_TAG && ((struct tag *)o)->tagged)
o = ((struct tag *)o)->tagged;
else
return o;
}
-struct tag *lookup_tag(const struct object_id *oid)
+struct tag *lookup_tag(struct repository *r, const struct object_id *oid)
{
- struct object *obj = lookup_object(oid->hash);
+ struct object *obj = lookup_object(r, oid->hash);
if (!obj)
- return create_object(the_repository, oid->hash,
- alloc_tag_node(the_repository));
- return object_as_type(obj, OBJ_TAG, 0);
+ return create_object(r, oid->hash,
+ alloc_tag_node(r));
+ return object_as_type(r, obj, OBJ_TAG, 0);
}
static timestamp_t parse_tag_date(const char *buf, const char *tail)
t->date = 0;
}
-int parse_tag_buffer(struct tag *item, const void *data, unsigned long size)
+int parse_tag_buffer(struct repository *r, struct tag *item, const void *data, unsigned long size)
{
struct object_id oid;
char type[20];
bufptr = nl + 1;
if (!strcmp(type, blob_type)) {
- item->tagged = (struct object *)lookup_blob(&oid);
+ item->tagged = (struct object *)lookup_blob(r, &oid);
} else if (!strcmp(type, tree_type)) {
- item->tagged = (struct object *)lookup_tree(&oid);
+ item->tagged = (struct object *)lookup_tree(r, &oid);
} else if (!strcmp(type, commit_type)) {
- item->tagged = (struct object *)lookup_commit(&oid);
+ item->tagged = (struct object *)lookup_commit(r, &oid);
} else if (!strcmp(type, tag_type)) {
- item->tagged = (struct object *)lookup_tag(&oid);
+ item->tagged = (struct object *)lookup_tag(r, &oid);
} else {
error("Unknown type %s", type);
item->tagged = NULL;
return error("Object %s not a tag",
oid_to_hex(&item->object.oid));
}
- ret = parse_tag_buffer(item, data, size);
+ ret = parse_tag_buffer(the_repository, item, data, size);
free(data);
return ret;
}
char *tag;
timestamp_t date;
};
-
-extern struct tag *lookup_tag(const struct object_id *oid);
-extern int parse_tag_buffer(struct tag *item, const void *data, unsigned long size);
+extern struct tag *lookup_tag(struct repository *r, const struct object_id *oid);
+extern int parse_tag_buffer(struct repository *r, struct tag *item, const void *data, unsigned long size);
extern int parse_tag(struct tag *item);
extern void release_tag_memory(struct tag *t);
-extern struct object *deref_tag(struct object *, const char *, int);
+extern struct object *deref_tag(struct repository *r, struct object *, const char *, int);
extern struct object *deref_tag_noverify(struct object *);
extern int gpg_verify_tag(const struct object_id *oid,
const char *name_to_report, unsigned flags);
}
static int fetch(struct transport *transport,
- int nr_heads, struct ref **to_fetch)
+ int nr_heads, struct ref **to_fetch,
+ struct ref **fetched_refs)
{
struct helper_data *data = transport->data;
int i, count;
if (process_connect(transport, 0)) {
do_take_over(transport);
- return transport->vtable->fetch(transport, nr_heads, to_fetch);
+ return transport->vtable->fetch(transport, nr_heads, to_fetch,
+ fetched_refs);
}
count = 0;
transport, "filter",
data->transport_options.filter_options.filter_spec);
+ if (data->transport_options.negotiation_tips)
+ warning("Ignoring --negotiation-tip because the protocol does not support it.");
+
if (data->fetch)
return fetch_with_fetch(transport, nr_heads, to_fetch);
* Fetch the objects for the given refs. Note that this gets
* an array, and should ignore the list structure.
*
+ * The transport *may* provide, in fetched_refs, the list of refs that
+ * it fetched. If the transport knows anything about the fetched refs
+ * that the caller does not know (for example, shallow status), it
+ * should provide that list of refs and include that information in the
+ * list.
+ *
* If the transport did not get hashes for refs in
* get_refs_list(), it should set the old_sha1 fields in the
* provided refs now.
**/
- int (*fetch)(struct transport *transport, int refs_nr, struct ref **refs);
+ int (*fetch)(struct transport *transport, int refs_nr, struct ref **refs,
+ struct ref **fetched_refs);
/**
* Push the objects and refs. Send the necessary objects, and
}
static int fetch_refs_from_bundle(struct transport *transport,
- int nr_heads, struct ref **to_fetch)
+ int nr_heads, struct ref **to_fetch,
+ struct ref **fetched_refs)
{
struct bundle_transport_data *data = transport->data;
return unbundle(&data->header, data->fd,
}
static int fetch_refs_via_pack(struct transport *transport,
- int nr_heads, struct ref **to_fetch)
+ int nr_heads, struct ref **to_fetch,
+ struct ref **fetched_refs)
{
int ret = 0;
struct git_transport_data *data = transport->data;
args.filter_options = data->options.filter_options;
args.stateless_rpc = transport->stateless_rpc;
args.server_options = transport->server_options;
+ args.negotiation_tips = data->options.negotiation_tips;
if (!data->got_remote_heads)
refs_tmp = get_refs_via_connect(transport, 0, NULL);
data->got_remote_heads = 0;
data->options.self_contained_and_connected =
args.self_contained_and_connected;
+ data->options.connectivity_checked = args.connectivity_checked;
if (refs == NULL)
ret = -1;
if (report_unmatched_refs(to_fetch, nr_heads))
ret = -1;
+ if (fetched_refs)
+ *fetched_refs = refs;
+ else
+ free_refs(refs);
+
free_refs(refs_tmp);
- free_refs(refs);
free(dest);
return ret;
}
return transport->remote_refs;
}
-int transport_fetch_refs(struct transport *transport, struct ref *refs)
+int transport_fetch_refs(struct transport *transport, struct ref *refs,
+ struct ref **fetched_refs)
{
int rc;
int nr_heads = 0, nr_alloc = 0, nr_refs = 0;
struct ref **heads = NULL;
+ struct ref *nop_head = NULL, **nop_tail = &nop_head;
struct ref *rm;
for (rm = refs; rm; rm = rm->next) {
nr_refs++;
if (rm->peer_ref &&
!is_null_oid(&rm->old_oid) &&
- !oidcmp(&rm->peer_ref->old_oid, &rm->old_oid))
+ !oidcmp(&rm->peer_ref->old_oid, &rm->old_oid)) {
+ /*
+ * These need to be reported as fetched, but we don't
+ * actually need to fetch them.
+ */
+ if (fetched_refs) {
+ struct ref *nop_ref = copy_ref(rm);
+ *nop_tail = nop_ref;
+ nop_tail = &nop_ref->next;
+ }
continue;
+ }
ALLOC_GROW(heads, nr_heads + 1, nr_alloc);
heads[nr_heads++] = rm;
}
heads[nr_heads++] = rm;
}
- rc = transport->vtable->fetch(transport, nr_heads, heads);
+ rc = transport->vtable->fetch(transport, nr_heads, heads, fetched_refs);
+ if (fetched_refs && nop_head) {
+ *nop_tail = *fetched_refs;
+ *fetched_refs = nop_head;
+ }
free(heads);
return rc;
unsigned deepen_relative : 1;
unsigned from_promisor : 1;
unsigned no_dependents : 1;
+
+ /*
+ * If this transport supports connect or stateless-connect,
+ * the corresponding field in struct fetch_pack_args is copied
+ * here after fetching.
+ *
+ * See the definition of connectivity_checked in struct
+ * fetch_pack_args for more information.
+ */
+ unsigned connectivity_checked:1;
+
int depth;
const char *deepen_since;
const struct string_list *deepen_not;
const char *receivepack;
struct push_cas_option *cas;
struct list_objects_filter_options filter_options;
+
+ /*
+ * This is only used during fetch. See the documentation of
+ * negotiation_tips in struct fetch_pack_args.
+ *
+ * This field is only supported by transports that support connect or
+ * stateless_connect. Set this field directly instead of using
+ * transport_set_option().
+ */
+ struct oid_array *negotiation_tips;
};
enum transport_family {
const struct ref *transport_get_remote_refs(struct transport *transport,
const struct argv_array *ref_prefixes);
-int transport_fetch_refs(struct transport *transport, struct ref *refs);
+int transport_fetch_refs(struct transport *transport, struct ref *refs,
+ struct ref **fetched_refs);
void transport_unlock_pack(struct transport *transport);
int transport_disconnect(struct transport *transport);
char *transport_anonymize_url(const char *url);
{
const char *path;
unsigned int mode, len;
+ const unsigned hashsz = the_hash_algo->rawsz;
- if (size < 23 || buf[size - 21]) {
+ if (size < hashsz + 3 || buf[size - (hashsz + 1)]) {
strbuf_addstr(err, _("too-short tree object"));
return -1;
}
#include "tag.h"
#include "alloc.h"
#include "tree-walk.h"
+#include "repository.h"
const char *tree_type = "tree";
unsigned mode, int stage, int opt)
{
int len;
- unsigned int size;
struct cache_entry *ce;
if (S_ISDIR(mode))
return READ_TREE_RECURSIVE;
len = strlen(pathname);
- size = cache_entry_size(baselen + len);
- ce = xcalloc(1, size);
+ ce = make_empty_cache_entry(istate, baselen + len);
ce->ce_mode = create_ce_mode(mode);
ce->ce_flags = create_ce_flags(stage);
else if (S_ISGITLINK(entry.mode)) {
struct commit *commit;
- commit = lookup_commit(entry.oid);
+ commit = lookup_commit(the_repository, entry.oid);
if (!commit)
die("Commit %s in submodule path %s%s not found",
oid_to_hex(entry.oid),
len = tree_entry_len(&entry);
strbuf_add(base, entry.path, len);
strbuf_addch(base, '/');
- retval = read_tree_1(lookup_tree(&oid),
+ retval = read_tree_1(lookup_tree(the_repository, &oid),
base, stage, pathspec,
fn, context);
strbuf_setlen(base, oldlen);
return 0;
}
-struct tree *lookup_tree(const struct object_id *oid)
+struct tree *lookup_tree(struct repository *r, const struct object_id *oid)
{
- struct object *obj = lookup_object(oid->hash);
+ struct object *obj = lookup_object(r, oid->hash);
if (!obj)
- return create_object(the_repository, oid->hash,
- alloc_tree_node(the_repository));
- return object_as_type(obj, OBJ_TREE, 0);
+ return create_object(r, oid->hash,
+ alloc_tree_node(r));
+ return object_as_type(r, obj, OBJ_TREE, 0);
}
int parse_tree_buffer(struct tree *item, void *buffer, unsigned long size)
struct tree *parse_tree_indirect(const struct object_id *oid)
{
- struct object *obj = parse_object(oid);
+ struct object *obj = parse_object(the_repository, oid);
do {
if (!obj)
return NULL;
else
return NULL;
if (!obj->parsed)
- parse_object(&obj->oid);
+ parse_object(the_repository, &obj->oid);
} while (1);
}
unsigned long size;
};
-struct tree *lookup_tree(const struct object_id *oid);
+struct tree *lookup_tree(struct repository *r, const struct object_id *oid);
int parse_tree_buffer(struct tree *item, void *buffer, unsigned long size);
{ 0x0730, 0x074A },
{ 0x07A6, 0x07B0 },
{ 0x07EB, 0x07F3 },
+{ 0x07FD, 0x07FD },
{ 0x0816, 0x0819 },
{ 0x081B, 0x0823 },
{ 0x0825, 0x0827 },
{ 0x0829, 0x082D },
{ 0x0859, 0x085B },
-{ 0x08D4, 0x0902 },
+{ 0x08D3, 0x0902 },
{ 0x093A, 0x093A },
{ 0x093C, 0x093C },
{ 0x0941, 0x0948 },
{ 0x09C1, 0x09C4 },
{ 0x09CD, 0x09CD },
{ 0x09E2, 0x09E3 },
+{ 0x09FE, 0x09FE },
{ 0x0A01, 0x0A02 },
{ 0x0A3C, 0x0A3C },
{ 0x0A41, 0x0A42 },
{ 0x0BC0, 0x0BC0 },
{ 0x0BCD, 0x0BCD },
{ 0x0C00, 0x0C00 },
+{ 0x0C04, 0x0C04 },
{ 0x0C3E, 0x0C40 },
{ 0x0C46, 0x0C48 },
{ 0x0C4A, 0x0C4D },
{ 0xA825, 0xA826 },
{ 0xA8C4, 0xA8C5 },
{ 0xA8E0, 0xA8F1 },
+{ 0xA8FF, 0xA8FF },
{ 0xA926, 0xA92D },
{ 0xA947, 0xA951 },
{ 0xA980, 0xA982 },
{ 0x10A38, 0x10A3A },
{ 0x10A3F, 0x10A3F },
{ 0x10AE5, 0x10AE6 },
+{ 0x10D24, 0x10D27 },
+{ 0x10F46, 0x10F50 },
{ 0x11001, 0x11001 },
{ 0x11038, 0x11046 },
{ 0x1107F, 0x11081 },
{ 0x110B3, 0x110B6 },
{ 0x110B9, 0x110BA },
{ 0x110BD, 0x110BD },
+{ 0x110CD, 0x110CD },
{ 0x11100, 0x11102 },
{ 0x11127, 0x1112B },
{ 0x1112D, 0x11134 },
{ 0x11173, 0x11173 },
{ 0x11180, 0x11181 },
{ 0x111B6, 0x111BE },
-{ 0x111CA, 0x111CC },
+{ 0x111C9, 0x111CC },
{ 0x1122F, 0x11231 },
{ 0x11234, 0x11234 },
{ 0x11236, 0x11237 },
{ 0x112DF, 0x112DF },
{ 0x112E3, 0x112EA },
{ 0x11300, 0x11301 },
-{ 0x1133C, 0x1133C },
+{ 0x1133B, 0x1133C },
{ 0x11340, 0x11340 },
{ 0x11366, 0x1136C },
{ 0x11370, 0x11374 },
{ 0x11438, 0x1143F },
{ 0x11442, 0x11444 },
{ 0x11446, 0x11446 },
+{ 0x1145E, 0x1145E },
{ 0x114B3, 0x114B8 },
{ 0x114BA, 0x114BA },
{ 0x114BF, 0x114C0 },
{ 0x1171D, 0x1171F },
{ 0x11722, 0x11725 },
{ 0x11727, 0x1172B },
-{ 0x11A01, 0x11A06 },
-{ 0x11A09, 0x11A0A },
+{ 0x1182F, 0x11837 },
+{ 0x11839, 0x1183A },
+{ 0x11A01, 0x11A0A },
{ 0x11A33, 0x11A38 },
{ 0x11A3B, 0x11A3E },
{ 0x11A47, 0x11A47 },
{ 0x11D3C, 0x11D3D },
{ 0x11D3F, 0x11D45 },
{ 0x11D47, 0x11D47 },
+{ 0x11D90, 0x11D91 },
+{ 0x11D95, 0x11D95 },
+{ 0x11D97, 0x11D97 },
+{ 0x11EF3, 0x11EF4 },
{ 0x16AF0, 0x16AF4 },
{ 0x16B30, 0x16B36 },
{ 0x16F8F, 0x16F92 },
{ 0x3000, 0x303E },
{ 0x3041, 0x3096 },
{ 0x3099, 0x30FF },
-{ 0x3105, 0x312E },
+{ 0x3105, 0x312F },
{ 0x3131, 0x318E },
{ 0x3190, 0x31BA },
{ 0x31C0, 0x31E3 },
{ 0xFF01, 0xFF60 },
{ 0xFFE0, 0xFFE6 },
{ 0x16FE0, 0x16FE1 },
-{ 0x17000, 0x187EC },
+{ 0x17000, 0x187F1 },
{ 0x18800, 0x18AF2 },
{ 0x1B000, 0x1B11E },
{ 0x1B170, 0x1B2FB },
{ 0x1F6CC, 0x1F6CC },
{ 0x1F6D0, 0x1F6D2 },
{ 0x1F6EB, 0x1F6EC },
-{ 0x1F6F4, 0x1F6F8 },
+{ 0x1F6F4, 0x1F6F9 },
{ 0x1F910, 0x1F93E },
-{ 0x1F940, 0x1F94C },
-{ 0x1F950, 0x1F96B },
-{ 0x1F980, 0x1F997 },
-{ 0x1F9C0, 0x1F9C0 },
-{ 0x1F9D0, 0x1F9E6 },
+{ 0x1F940, 0x1F970 },
+{ 0x1F973, 0x1F976 },
+{ 0x1F97A, 0x1F97A },
+{ 0x1F97C, 0x1F9A2 },
+{ 0x1F9B0, 0x1F9B9 },
+{ 0x1F9C0, 0x1F9C2 },
+{ 0x1F9D0, 0x1F9FF },
{ 0x20000, 0x2FFFD },
{ 0x30000, 0x3FFFD }
};
ADD_CACHE_OK_TO_ADD | ADD_CACHE_OK_TO_REPLACE);
}
-static struct cache_entry *dup_entry(const struct cache_entry *ce)
-{
- unsigned int size = ce_size(ce);
- struct cache_entry *new_entry = xmalloc(size);
-
- memcpy(new_entry, ce, size);
- return new_entry;
-}
-
static void add_entry(struct unpack_trees_options *o,
const struct cache_entry *ce,
unsigned int set, unsigned int clear)
{
- do_add_entry(o, dup_entry(ce), set, clear);
+ do_add_entry(o, dup_cache_entry(ce, &o->result), set, clear);
}
/*
return (info->pathlen < ce_namelen(ce));
}
-static struct cache_entry *create_ce_entry(const struct traverse_info *info, const struct name_entry *n, int stage)
+static struct cache_entry *create_ce_entry(const struct traverse_info *info,
+ const struct name_entry *n,
+ int stage,
+ struct index_state *istate,
+ int is_transient)
{
int len = traverse_path_len(info, n);
- struct cache_entry *ce = xcalloc(1, cache_entry_size(len));
+ struct cache_entry *ce =
+ is_transient ?
+ make_empty_transient_cache_entry(len) :
+ make_empty_cache_entry(istate, len);
ce->ce_mode = create_ce_mode(n->mode);
ce->ce_flags = create_ce_flags(stage);
stage = 3;
else
stage = 2;
- src[i + o->merge] = create_ce_entry(info, names + i, stage);
+
+ /*
+ * If the merge bit is set, then the cache entries are
+ * discarded in the following block. In this case,
+ * construct "transient" cache_entries, as they are
+ * not stored in the index. otherwise construct the
+ * cache entry from the index aware logic.
+ */
+ src[i + o->merge] = create_ce_entry(info, names + i, stage, &o->result, o->merge);
}
if (o->merge) {
for (i = 0; i < n; i++) {
struct cache_entry *ce = src[i + o->merge];
if (ce != o->df_conflict_entry)
- free(ce);
+ discard_cache_entry(ce);
}
return rc;
}
if (select_flag && !(ce->ce_flags & select_flag))
continue;
- if (!ce_stage(ce))
+ if (!ce_stage(ce) && !(ce->ce_flags & CE_CONFLICTED))
ce->ce_flags |= skip_wt_flag;
else
ce->ce_flags &= ~skip_wt_flag;
struct unpack_trees_options *o)
{
int update = CE_UPDATE;
- struct cache_entry *merge = dup_entry(ce);
+ struct cache_entry *merge = dup_cache_entry(ce, &o->result);
if (!old) {
/*
if (verify_absent(merge,
ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN, o)) {
- free(merge);
+ discard_cache_entry(merge);
return -1;
}
invalidate_ce_path(merge, o);
update = 0;
} else {
if (verify_uptodate(old, o)) {
- free(merge);
+ discard_cache_entry(merge);
return -1;
}
/* Migrate old flags over */
#include "refs.h"
#include "pkt-line.h"
#include "sideband.h"
+#include "repository.h"
#include "object-store.h"
#include "tag.h"
#include "object.h"
static int filter_capability_requested;
static int allow_filter;
+static int allow_ref_in_want;
static struct list_objects_filter_options filter_options;
static void reset_timeout(void)
if (!has_object_file(oid))
return -1;
- o = parse_object(oid);
+ o = parse_object(the_repository, oid);
if (!o)
die("oops (%s)", oid_to_hex(oid));
if (o->type == OBJ_COMMIT) {
break;
}
if (!commit->object.parsed)
- parse_object(&commit->object.oid);
+ parse_object(the_repository, &commit->object.oid);
if (commit->object.flags & REACHABLE)
continue;
commit->object.flags |= REACHABLE;
if (want->flags & COMMON_KNOWN)
continue;
- want = deref_tag(want, "a want line", 0);
+ want = deref_tag(the_repository, want, "a want line", 0);
if (!want || want->type != OBJ_COMMIT) {
/* no way to tell if this is reachable by
* looking at the ancestry chain alone, so
if (parse_oid_hex(namebuf, &sha1, &p) || *p != '\n')
break;
- o = lookup_object(sha1.hash);
+ o = lookup_object(the_repository, sha1.hash);
if (o && o->type == OBJ_COMMIT) {
o->flags &= ~TMP_MARK;
}
struct object *object;
if (get_oid_hex(arg, &oid))
die("invalid shallow line: %s", line);
- object = parse_object(&oid);
+ object = parse_object(the_repository, &oid);
if (!object)
return 1;
if (object->type != OBJ_COMMIT)
if (allow_filter && parse_feature_request(features, "filter"))
filter_capability_requested = 1;
- o = parse_object(&oid_buf);
+ o = parse_object(the_repository, &oid_buf);
if (!o) {
packet_write_fmt(1,
"ERR upload-pack: not our ref %s",
return git_config_string(&pack_objects_hook, var, value);
} else if (!strcmp("uploadpack.allowfilter", var)) {
allow_filter = git_config_bool(var, value);
+ } else if (!strcmp("uploadpack.allowrefinwant", var)) {
+ allow_ref_in_want = git_config_bool(var, value);
}
return parse_hide_refs_config(var, value, "uploadpack");
}
struct upload_pack_data {
struct object_array wants;
+ struct string_list wanted_refs;
struct oid_array haves;
struct object_array shallows;
static void upload_pack_data_init(struct upload_pack_data *data)
{
struct object_array wants = OBJECT_ARRAY_INIT;
+ struct string_list wanted_refs = STRING_LIST_INIT_DUP;
struct oid_array haves = OID_ARRAY_INIT;
struct object_array shallows = OBJECT_ARRAY_INIT;
struct string_list deepen_not = STRING_LIST_INIT_DUP;
memset(data, 0, sizeof(*data));
data->wants = wants;
+ data->wanted_refs = wanted_refs;
data->haves = haves;
data->shallows = shallows;
data->deepen_not = deepen_not;
static void upload_pack_data_clear(struct upload_pack_data *data)
{
object_array_clear(&data->wants);
+ string_list_clear(&data->wanted_refs, 1);
oid_array_clear(&data->haves);
object_array_clear(&data->shallows);
string_list_clear(&data->deepen_not, 0);
die("git upload-pack: protocol error, "
"expected to get oid, not '%s'", line);
- o = parse_object(&oid);
+ o = parse_object(the_repository, &oid);
if (!o) {
packet_write_fmt(1,
"ERR upload-pack: not our ref %s",
return 0;
}
+static int parse_want_ref(const char *line, struct string_list *wanted_refs)
+{
+ const char *arg;
+ if (skip_prefix(line, "want-ref ", &arg)) {
+ struct object_id oid;
+ struct string_list_item *item;
+ struct object *o;
+
+ if (read_ref(arg, &oid)) {
+ packet_write_fmt(1, "ERR unknown ref %s", arg);
+ die("unknown ref %s", arg);
+ }
+
+ item = string_list_append(wanted_refs, arg);
+ item->util = oiddup(&oid);
+
+ o = parse_object_or_die(&oid, arg);
+ if (!(o->flags & WANTED)) {
+ o->flags |= WANTED;
+ add_object_array(o, NULL, &want_obj);
+ }
+
+ return 1;
+ }
+
+ return 0;
+}
+
static int parse_have(const char *line, struct oid_array *haves)
{
const char *arg;
/* process want */
if (parse_want(arg))
continue;
+ if (allow_ref_in_want && parse_want_ref(arg, &data->wanted_refs))
+ continue;
/* process have line */
if (parse_have(arg, &data->haves))
continue;
oid_array_append(common, oid);
- o = parse_object(oid);
+ o = parse_object(the_repository, oid);
if (!o)
die("oops (%s)", oid_to_hex(oid));
if (o->type == OBJ_COMMIT) {
return ret;
}
+static void send_wanted_ref_info(struct upload_pack_data *data)
+{
+ const struct string_list_item *item;
+
+ if (!data->wanted_refs.nr)
+ return;
+
+ packet_write_fmt(1, "wanted-refs\n");
+
+ for_each_string_list_item(item, &data->wanted_refs) {
+ packet_write_fmt(1, "%s %s\n",
+ oid_to_hex(item->util),
+ item->string);
+ }
+
+ packet_delim(1);
+}
+
static void send_shallow_info(struct upload_pack_data *data)
{
/* No shallow info needs to be sent */
state = FETCH_DONE;
break;
case FETCH_SEND_PACK:
+ send_wanted_ref_info(&data);
send_shallow_info(&data);
packet_write_fmt(1, "packfile\n");
{
if (value) {
int allow_filter_value;
+ int allow_ref_in_want;
+
strbuf_addstr(value, "shallow");
+
if (!repo_config_get_bool(the_repository,
"uploadpack.allowfilter",
&allow_filter_value) &&
allow_filter_value)
strbuf_addstr(value, " filter");
+
+ if (!repo_config_get_bool(the_repository,
+ "uploadpack.allowrefinwant",
+ &allow_ref_in_want) &&
+ allow_ref_in_want)
+ strbuf_addstr(value, " ref-in-want");
}
+
return 1;
}
"|<<|<>|<=>|>>"),
PATTERNS("php",
"^[\t ]*(((public|protected|private|static)[\t ]+)*function.*)$\n"
- "^[\t ]*(class.*)$",
+ "^[\t ]*((((final|abstract)[\t ]+)?class|interface|trait).*)$",
/* -- */
"[a-zA-Z_][a-zA-Z0-9_]*"
"|[-+0-9.e]+|0[xXbB]?[0-9a-fA-F]+"
return data && bom && (len >= bom_len) && !memcmp(data, bom, bom_len);
}
-static const char utf16_be_bom[] = {0xFE, 0xFF};
-static const char utf16_le_bom[] = {0xFF, 0xFE};
-static const char utf32_be_bom[] = {0x00, 0x00, 0xFE, 0xFF};
-static const char utf32_le_bom[] = {0xFF, 0xFE, 0x00, 0x00};
+static const char utf16_be_bom[] = {'\xFE', '\xFF'};
+static const char utf16_le_bom[] = {'\xFF', '\xFE'};
+static const char utf32_be_bom[] = {'\0', '\0', '\xFE', '\xFF'};
+static const char utf32_le_bom[] = {'\xFF', '\xFE', '\0', '\0'};
int has_prohibited_utf_bom(const char *enc, const char *data, size_t len)
{
#include "cache.h"
#include "walker.h"
+#include "repository.h"
#include "object-store.h"
#include "commit.h"
#include "tree.h"
if (S_ISGITLINK(entry.mode))
continue;
if (S_ISDIR(entry.mode)) {
- struct tree *tree = lookup_tree(entry.oid);
+ struct tree *tree = lookup_tree(the_repository,
+ entry.oid);
if (tree)
obj = &tree->object;
}
else {
- struct blob *blob = lookup_blob(entry.oid);
+ struct blob *blob = lookup_blob(the_repository,
+ entry.oid);
if (blob)
obj = &blob->object;
}
}
}
if (!obj->type)
- parse_object(&obj->oid);
+ parse_object(the_repository, &obj->oid);
if (process_object(walker, obj))
return -1;
}
static int mark_complete(const char *path, const struct object_id *oid,
int flag, void *cb_data)
{
- struct commit *commit = lookup_commit_reference_gently(oid, 1);
+ struct commit *commit = lookup_commit_reference_gently(the_repository,
+ oid, 1);
if (commit) {
commit->object.flags |= COMPLETE;
/* sha1 is a commit? match without further lookup */
(!oidcmp(&cb.noid, &oid) ||
/* perhaps sha1 is a tag, try to dereference to a commit */
- ((commit = lookup_commit_reference_gently(&oid, 1)) != NULL &&
+ ((commit = lookup_commit_reference_gently(the_repository, &oid, 1)) != NULL &&
!oidcmp(&cb.noid, &commit->object.oid)))) {
const char *from = ref;
if (!skip_prefix(from, "refs/tags/", &from))
if (ignore_submodules)
rev_info.diffopt.flags.ignore_submodules = 1;
rev_info.diffopt.flags.quick = 1;
+
add_head_to_pending(&rev_info);
+ if (!rev_info.pending.nr) {
+ /*
+ * We have no head (or it's corrupt); use the empty tree,
+ * which will complain if the index is non-empty.
+ */
+ struct tree *tree = lookup_tree(the_repository, the_hash_algo->empty_tree);
+ add_pending_object(&rev_info, &tree->object, "");
+ }
+
diff_setup_done(&rev_info.diffopt);
result = run_diff_index(&rev_info, 1);
return diff_result_code(&rev_info.diffopt, result);
#define XDL_EMIT_FUNCNAMES (1 << 0)
#define XDL_EMIT_FUNCCONTEXT (1 << 2)
-#define XDL_MMB_READONLY (1 << 0)
-
-#define XDL_MMF_ATOMIC (1 << 0)
-
-#define XDL_BDOP_INS 1
-#define XDL_BDOP_CPY 2
-#define XDL_BDOP_INSB 3
-
/* merge simplification levels */
#define XDL_MERGE_MINIMAL 0
#define XDL_MERGE_EAGER 1
#include "xinclude.h"
-
-
#define XDL_MAX_COST_MIN 256
#define XDL_HEUR_MIN_COST 256
#define XDL_LINE_MAX (long)((1UL << (CHAR_BIT * sizeof(long) - 1)) - 1)
#define XDL_SNAKE_CNT 20
#define XDL_K_HEUR 4
-
-
typedef struct s_xdpsplit {
long i1, i2;
int min_lo, min_hi;
} xdpsplit_t;
-
-
-
-static long xdl_split(unsigned long const *ha1, long off1, long lim1,
- unsigned long const *ha2, long off2, long lim2,
- long *kvdf, long *kvdb, int need_min, xdpsplit_t *spl,
- xdalgoenv_t *xenv);
-static xdchange_t *xdl_add_change(xdchange_t *xscr, long i1, long i2, long chg1, long chg2);
-
-
-
-
-
/*
* See "An O(ND) Difference Algorithm and its Variations", by Eugene Myers.
* Basically considers a "box" (off1, off2, lim1, lim2) and scan from both