ARTICLES += git-tools
ARTICLES += git-bisect-lk2009
# with their own formatting rules.
-SP_ARTICLES = howto/revert-branch-rebase howto/using-merge-subtree user-manual
+SP_ARTICLES = user-manual
+SP_ARTICLES += howto/revert-branch-rebase
+SP_ARTICLES += howto/using-merge-subtree
+SP_ARTICLES += howto/using-signed-tag-in-pull-request
API_DOCS = $(patsubst %.txt,%,$(filter-out technical/api-index-skel.txt technical/api-index.txt, $(wildcard technical/api-*.txt)))
SP_ARTICLES += $(API_DOCS)
SP_ARTICLES += technical/api-index
--- /dev/null
+Git v1.7.10 Release Notes
+=========================
+
+Updates since v1.7.9
+--------------------
+
+UI, Workflows & Features
+
+ * Improved handling of views, labels and branches in git-p4 (in contrib).
+
+ * "git am" learned to pass "-b" option to underlying "git mailinfo", so
+ that bracketed string other than "PATCH" at the beginning can be kept.
+
+ * "git clone" learned "--single-branch" option to limit cloning to a
+ single branch (surprise!).
+
+ * When showing a patch while ignoring whitespace changes, the context
+ lines are taken from the postimage, in order to make it easier to
+ view the output.
+
+Performance
+
+ * During "git upload-pack" in respose to "git fetch", unnecessary calls
+ to parse_object() have been eliminated, to help performance in
+ repositories with excessive number of refs.
+
+Internal Implementation
+
+ * Recursive call chains in "git index-pack" to deal with long delta
+ chains have been flattened, to reduce the stack footprint.
+
+ * Use of add_extra_ref() API is slowly getting removed, to make it
+ possible to cleanly restructure the overall refs API.
+
+ * The test suite supports the new "test_pause" helper function.
+
+Also contains minor documentation updates and code clean-ups.
+
+
+Fixes since v1.7.9
+------------------
+
+Unless otherwise noted, all the fixes since v1.7.9 in the maintenance
+releases are contained in this release (see release notes to them for
+details).
+
+ * When "git push" fails to update any refs, the client side did not
+ report an error correctly to the end user.
+ (merge 5238cbf sp/smart-http-failure-to-push later to maint).
+
+ * "git push -q" was not sufficiently quiet.
+ (merge d336572 cb/push-quiet later to maint).
+
+ * "git log --first-parent $pathspec" did not stay on the first parent
+ chain and veered into side branch from which the whole change to the
+ specified paths came.
+ (merge 36ed191 jc/maint-log-first-parent-pathspec later to maint).
+
+ * Subprocesses spawned from various git programs were often left running
+ to completion even when the top-level process was killed.
+ (merge 10c6cdd cb/maint-kill-subprocess-upon-signal later to maint).
+
+---
+exec >/var/tmp/1
+O=v1.7.9
+echo O=$(git describe)
+git log --first-parent --oneline ^maint $O..
+echo
+git shortlog --no-merges ^maint $O..
--- /dev/null
+Git v1.7.6.6 Release Notes
+==========================
+
+Fixes since v1.7.6.5
+--------------------
+
+ * The code to look up attributes for paths reused entries from a wrong
+ directory when two paths in question are in adjacent directories and
+ the name of the one directory is a prefix of the other.
+
+ * When producing a "thin pack" (primarily used in bundles and smart
+ HTTP transfers) out of a fully packed repository, we unnecessarily
+ avoided sending recent objects as a delta against objects we know
+ the other side has.
+
+Also contains minor fixes and documentation updates.
--- /dev/null
+Git v1.7.7.6 Release Notes
+==========================
+
+Fixes since v1.7.7.5
+--------------------
+
+ * The code to look up attributes for paths reused entries from a wrong
+ directory when two paths in question are in adjacent directories and
+ the name of the one directory is a prefix of the other.
+
+ * A wildcard that matches deeper hierarchy given to the "diff-index" command,
+ e.g. "git diff-index HEAD -- '*.txt'", incorrectly reported additions of
+ matching files even when there is no change.
+
+ * When producing a "thin pack" (primarily used in bundles and smart
+ HTTP transfers) out of a fully packed repository, we unnecessarily
+ avoided sending recent objects as a delta against objects we know
+ the other side has.
+
+Also contains minor fixes and documentation updates.
Git v1.7.8.1 Release Notes
==========================
-Fixes since v1.7.8.1
---------------------
+Fixes since v1.7.8
+------------------
* In some codepaths (notably, checkout and merge), the ignore patterns
recorded in $GIT_DIR/info/exclude were not honored. They now are.
- * After fetching from a remote that has very long refname, the reporting
- output could have corrupted by overrunning a static buffer.
+ * "git apply --check" did not error out when given an empty input
+ without any patch.
+
+ * "git archive" mistakenly allowed remote clients to ask for commits
+ that are not at the tip of any ref.
* "git checkout" and "git merge" treated in-tree .gitignore and exclude
file in $GIT_DIR/info/ directory inconsistently when deciding which
untracked files are ignored and expendable.
+ * LF-to-CRLF streaming filter used when checking out a large-ish blob
+ fell into an infinite loop with a rare input.
+
+ * The function header pattern for files with "diff=cpp" attribute did
+ not consider "type *funcname(type param1,..." as the beginning of a
+ function.
+
+ * The error message from "git diff" and "git status" when they fail
+ to inspect changes in submodules did not report which submodule they
+ had trouble with.
+
+ * After fetching from a remote that has very long refname, the reporting
+ output could have corrupted by overrunning a static buffer.
+
+ * "git pack-objects" avoids creating cyclic dependencies among deltas
+ when seeing a broken packfile that records the same object in both
+ the deflated form and as a delta.
+
Also contains minor fixes and documentation updates.
--- /dev/null
+Git v1.7.8.2 Release Notes
+==========================
+
+Fixes since v1.7.8.1
+--------------------
+
+ * Porcelain commands like "git reset" did not distinguish deletions
+ and type-changes from ordinary modification, and reported them with
+ the same 'M' moniker. They now use 'D' (for deletion) and 'T' (for
+ type-change) to match "git status -s" and "git diff --name-status".
+
+ * The configuration file parser used for sizes (e.g. bigFileThreshold)
+ did not correctly interpret 'g' suffix.
+
+ * The replacement implemention for snprintf used on platforms with
+ native snprintf that is broken did not use va_copy correctly.
+
+ * LF-to-CRLF streaming filter replaced all LF with CRLF, which might
+ be techinically correct but not friendly to people who are trying
+ to recover from earlier mistakes of using CRLF in the repository
+ data in the first place. It now refrains from doing so for LF that
+ follows a CR.
+
+ * git native connection going over TCP (not over SSH) did not set
+ SO_KEEPALIVE option which failed to receive link layer errors.
+
+ * "git branch -m <current branch> HEAD" is an obvious no-op but was not
+ allowed.
+
+ * "git checkout -m" did not recreate the conflicted state in a "both
+ sides added, without any common ancestor version" conflict
+ situation.
+
+ * "git cherry-pick $commit" (not a range) created an unnecessary
+ sequencer state and interfered with valid workflow to use the
+ command during a session to cherry-pick multiple commits.
+
+ * You could make "git commit" segfault by giving the "--no-message"
+ option.
+
+ * "fast-import" did not correctly update an existing notes tree,
+ possibly corrupting the fan-out.
+
+ * "git fetch-pack" accepted unqualified refs that do not begin with
+ refs/ by mistake and compensated it by matching the refspec with
+ tail-match, which was doubly wrong. This broke fetching from a
+ repository with a funny named ref "refs/foo/refs/heads/master" and a
+ 'master' branch with "git fetch-pack refs/heads/master", as the
+ command incorrectly considered the former a "match".
+
+ * "git log --follow" did not honor the rename threshold score given
+ with the -M option (e.g. "-M50%").
+
+ * "git mv" gave suboptimal error/warning messages when it overwrites
+ target files. It also did not pay attention to "-v" option.
+
+ * Authenticated "git push" over dumb HTTP were broken with a recent
+ change and failed without asking for password when username is
+ given.
+
+ * "git push" to an empty repository over HTTP were broken with a
+ recent change to the ref handling.
+
+ * "git push -v" forgot how to be verbose by mistake. It now properly
+ becomes verbose when asked to.
+
+ * When a "reword" action in "git rebase -i" failed to run "commit --amend",
+ we did not give the control back to the user to resolve the situation, and
+ instead kept the original commit log message.
+
+Also contains minor fixes and documentation updates.
--- /dev/null
+Git v1.7.8.3 Release Notes
+==========================
+
+Fixes since v1.7.8.2
+--------------------
+
+ * Attempt to fetch from an empty file pretending it to be a bundle did
+ not error out correctly.
+
+ * gitweb did not correctly fall back to configured $fallback_encoding
+ that is not 'latin1'.
+
+ * "git clone --depth $n" did not catch a non-number given as $n as an
+ error.
+
+Also contains minor fixes and documentation updates.
--- /dev/null
+Git v1.7.8.4 Release Notes
+==========================
+
+Fixes since v1.7.8.3
+--------------------
+
+ * The code to look up attributes for paths reused entries from a wrong
+ directory when two paths in question are in adjacent directories and
+ the name of the one directory is a prefix of the other.
+
+ * A wildcard that matches deeper hierarchy given to the "diff-index" command,
+ e.g. "git diff-index HEAD -- '*.txt'", incorrectly reported additions of
+ matching files even when there is no change.
+
+ * When producing a "thin pack" (primarily used in bundles and smart
+ HTTP transfers) out of a fully packed repository, we unnecessarily
+ avoided sending recent objects as a delta against objects we know
+ the other side has.
+
+ * "git send-email" did not properly treat sendemail.multiedit as a
+ boolean (e.g. setting it to "false" did not turn it off).
+
+Also contains minor fixes and documentation updates.
-Git v1.7.9 Release Notes (draft)
+Git v1.7.9 Release Notes
========================
Updates since v1.7.8
* git-p4 (in contrib/) updates.
- * Porcelain commands like "git reset" did not distinguish deletions
- and type-changes from ordinary modification, and reported them with
- the same 'M' moniker. They now use 'D' (for deletion) and 'T' (for
- type-change) to match "git status -s" and "git diff --name-status".
+ * Git uses gettext to translate its most common interface messages
+ into the user's language if translations are available and the
+ locale is appropriately set. Distributors can drop new PO files
+ in po/ to add new translations.
+
+ * The code to handle username/password for HTTP transactions used in
+ "git push" & "git fetch" learned to talk "credential API" to
+ external programs to cache or store them, to allow integration with
+ platform native keychain mechanisms.
+
+ * The input prompts in the terminal use our own getpass() replacement
+ when possible. HTTP transactions used to ask for the username without
+ echoing back what was typed, but with this change you will see it as
+ you type.
+
+ * The internals of "revert/cherry-pick" have been tweaked to prepare
+ building more generic "sequencer" on top of the implementation that
+ drives them.
+
+ * "git rev-parse FETCH_HEAD" after "git fetch" without specifying
+ what to fetch from the command line will now show the commit that
+ would be merged if the command were "git pull".
* "git add" learned to stream large files directly into a packfile
instead of writing them into individual loose object files.
- * "git branch -m <current branch> HEAD" is an obvious no-op and is
- now allowed.
-
* "git checkout -B <current branch> <elsewhere>" is a more intuitive
way to spell "git reset --keep <elsewhere>".
user is amending the tree being recorded, without updating the
commit log message.
+ * "git commit" and "git reset" re-learned the optimization to prime
+ the cache-tree information in the index, which makes it faster to
+ write a tree object out after the index entries are updated.
+
+ * "git commit" detects and rejects an attempt to stuff NUL byte in
+ the commit log message.
+
+ * "git commit" learned "-S" to GPG-sign the commit; this can be shown
+ with the "--show-signature" option to "git log".
+
* fsck and prune are relatively lengthy operations that still go
silent while making the end-user wait. They learned to give progress
output like other slow operations.
* The set of built-in function-header patterns for various languages
knows MATLAB.
+ * "git log --format='<format>'" learned new %g[nNeE] specifiers to
+ show information from the reflog entries when walking the reflog
+ (i.e. with "-g").
+
* "git pull" can be used to fetch and merge an annotated/signed tag,
instead of the tip of a topic branch. The GPG signature from the
signed tag is recorded in the resulting merge commit for later
auditing.
+ * "git log" learned "--show-signature" option to show the signed tag
+ that was merged that is embedded in the merge commit. It also can
+ show the signature made on the commit with "git commit -S".
+
* "git branch --edit-description" can be used to add descriptive text
to explain what a topic branch is about.
Fixes since v1.7.8
------------------
- * The function header pattern for files with "diff=cpp" attribute did
- not consider "type *funcname(type param1,..." as the beginning of a
- function.
- (merge 37e7793 tr/userdiff-c-returns-pointer later to maint).
-
- * LF-to-CRLF streaming filter used when checking out a large-ish blob
- fell into an infinite loop with a rare input.
- (merge 284e3d2 cn/maint-lf-to-crlf-filter later to maint).
-
- * "git archive" mistakenly allowed remote clients to ask for commits
- that are not at the tip of any ref.
- (merge 7b51c33 jk/maint-upload-archive later to maint).
-
- * "git apply --check" did not error out when given an empty input
- without any patch.
- (merge cc64b31 bc/maint-apply-check-no-patch later to maint).
-
- * The error message from "git diff" and "git status" when they fail
- to inspect changes in submodules did not report which submodule they
- had trouble with.
- (merge 6a5ceda jl/submodule-status-failure-report later to maint).
-
- * "fast-import" did not correctly update an existing notes tree,
- possibly corrupting the fan-out.
-
- * When a "reword" action in "git rebase -i" failed to run "commit --amend",
- we did not give the control back to the user to resolve the situation, and
- instead kept the original commit log message.
- (merge 0becb3e aw/rebase-i-stop-on-failure-to-amend later to maint).
-
---
-exec >/var/tmp/1
-O=v1.7.8-282-ga2add85
-echo O=$(git describe master)
-git log --first-parent --oneline --reverse ^$O master
-echo
-git shortlog --no-merges ^$O ^maint master
+Unless otherwise noted, all the fixes since v1.7.8 in the maintenance
+releases are contained in this release (see release notes to them for
+details).
porcelain configuration variables in the respective porcelain documentation.
advice.*::
- When set to 'true', display the given optional help message.
- When set to 'false', do not display. The configuration variables
- are:
+ These variables control various optional help messages designed to
+ aid new users. All 'advice.*' variables default to 'true', and you
+ can tell Git that you do not need help by setting these to 'false':
+
--
pushNonFastForward::
Advice shown when linkgit:git-push[1] refuses
- non-fast-forward refs. Default: true.
+ non-fast-forward refs.
statusHints::
Directions on how to stage/unstage/add shown in the
output of linkgit:git-status[1] and the template shown
- when writing commit messages. Default: true.
+ when writing commit messages.
commitBeforeMerge::
Advice shown when linkgit:git-merge[1] refuses to
merge to avoid overwriting local changes.
- Default: true.
resolveConflict::
Advices shown by various commands when conflicts
prevent the operation from being performed.
- Default: true.
implicitIdentity::
Advice on how to set your identity configuration when
your information is guessed from the system username and
- domain name. Default: true.
-
+ domain name.
detachedHead::
- Advice shown when you used linkgit::git-checkout[1] to
+ Advice shown when you used linkgit:git-checkout[1] to
move to the detach HEAD state, to instruct how to create
- a local branch after the fact. Default: true.
+ a local branch after the fact.
--
core.fileMode::
grep.extendedRegexp::
If set to true, enable '--extended-regexp' option by default.
+gpg.program::
+ Use this custom program instead of "gpg" found on $PATH when
+ making or verifying a PGP signature. The program must support the
+ same command line interface as GPG, namely, to verify a detached
+ signature, "gpg --verify $file - <$signature" is run, and the
+ program is expected to signal a good signature by exiting with
+ code 0, and to generate an ascii-armored detached signature, the
+ standard input of "gpg -bsau $key" is fed with the contents to be
+ signed, and the program is expected to send the result to its
+ standard output.
+
gui.commitmsgwidth::
Defines how wide the commit message window is in the
linkgit:git-gui[1]. "75" is the default.
rerere.enabled::
Activate recording of resolved conflicts, so that identical
- conflict hunks can be resolved automatically, should they
- be encountered again. linkgit:git-rerere[1] command is by
- default enabled if you create `rr-cache` directory under
- `$GIT_DIR`, but can be disabled by setting this option to false.
+ conflict hunks can be resolved automatically, should they be
+ encountered again. By default, linkgit:git-rerere[1] is
+ enabled if there is an `rr-cache` directory under the
+ `$GIT_DIR`, e.g. if "rerere" was previously used in the
+ repository.
sendemail.identity::
A configuration identity. When given, causes values in the
--keep::
Pass `-k` flag to 'git mailinfo' (see linkgit:git-mailinfo[1]).
+--keep-non-patch::
+ Pass `-b` flag to 'git mailinfo' (see linkgit:git-mailinfo[1]).
+
--keep-cr::
--no-keep-cr::
With `--keep-cr`, call 'git mailsplit' (see linkgit:git-mailsplit[1])
[-l] [-s] [--no-hardlinks] [-q] [-n] [--bare] [--mirror]
[-o <name>] [-b <name>] [-u <upload-pack>] [--reference <repository>]
[--separate-git-dir <git dir>]
- [--depth <depth>] [--recursive|--recurse-submodules] [--] <repository>
+ [--depth <depth>] [--[no-]single-branch]
+ [--recursive|--recurse-submodules] [--] <repository>
[<directory>]
DESCRIPTION
-b <name>::
Instead of pointing the newly created HEAD to the branch pointed
to by the cloned repository's HEAD, point to `<name>` branch
- instead. In a non-bare repository, this is the branch that will
- be checked out.
+ instead. `--branch` can also take tags and treat them like
+ detached HEAD. In a non-bare repository, this is the branch
+ that will be checked out.
--upload-pack <upload-pack>::
-u <upload-pack>::
with a long history, and would want to send in fixes
as patches.
+--single-branch::
+ Clone only the history leading to the tip of a single branch,
+ either specified by the `--branch` option or the primary
+ branch remote's `HEAD` points at. When creating a shallow
+ clone with the `--depth` option, this is the default, unless
+ `--no-single-branch` is given to fetch the histories near the
+ tips of all branches.
+
--recursive::
--recurse-submodules::
After the clone is created, initialize all submodules within,
progress status even if the standard error stream is not
directed to a terminal.
-It tests SHA1 and general object sanity, and it does full tracking of
-the resulting reachability and everything else. It prints out any
-corruption it finds (missing or bad objects), and if you use the
-'--unreachable' flag it will also print out objects that exist but
-that aren't reachable from any of the specified head nodes.
-
-So for example
-
- git fsck --unreachable HEAD \
- $(git for-each-ref --format="%(objectname)" refs/heads)
+DISCUSSION
+----------
-will do quite a _lot_ of verification on the tree. There are a few
-extra validity tests to be added (make sure that tree objects are
-sorted properly etc), but on the whole if 'git fsck' is happy, you
-do have a valid tree.
+git-fsck tests SHA1 and general object sanity, and it does full tracking
+of the resulting reachability and everything else. It prints out any
+corruption it finds (missing or bad objects), and if you use the
+'--unreachable' flag it will also print out objects that exist but that
+aren't reachable from any of the specified head nodes (or the default
+set, as mentioned above).
Any corrupt objects you will have to find in backups or other archives
(i.e., you can just remove them and do an 'rsync' with some other site in
the hopes that somebody else has the object you have corrupted).
-Of course, "valid tree" doesn't mean that it wasn't generated by some
-evil person, and the end result might be crap. git is a revision
-tracking system, not a quality assurance system ;)
-
Extracted Diagnostics
---------------------
--max-depth <depth>::
For each <pathspec> given on command line, descend at most <depth>
levels of directories. A negative value means no limit.
+ This option is ignored if <pathspec> contains active wildcards.
+ In other words if "a*" matches a directory named "a*",
+ "*" is matched literally so --max-depth is still effective.
-w::
--word-regexp::
OPTIONS
-------
-k::
- Usually the program 'cleans up' the Subject: header line
- to extract the title line for the commit log message,
- among which (1) remove 'Re:' or 're:', (2) leading
- whitespaces, (3) '[' up to ']', typically '[PATCH]', and
- then prepends "[PATCH] ". This flag forbids this
- munging, and is most useful when used to read back
- 'git format-patch -k' output.
+ Usually the program removes email cruft from the Subject:
+ header line to extract the title line for the commit log
+ message. This option prevents this munging, and is most
+ useful when used to read back 'git format-patch -k' output.
++
+Specifically, the following are removed until none of them remain:
++
+--
+* Leading and trailing whitespace.
+
+* Leading `Re:`, `re:`, and `:`.
+
+* Leading bracketed strings (between `[` and `]`, usually
+ `[PATCH]`).
+--
++
+Finally, runs of whitespace are normalized to a single ASCII space
+character.
-b::
When -k is not in effect, all leading strings bracketed with '['
--- /dev/null
+git-p4(1)
+=========
+
+NAME
+----
+git-p4 - Import from and submit to Perforce repositories
+
+
+SYNOPSIS
+--------
+[verse]
+'git p4 clone' [<sync options>] [<clone options>] <p4 depot path>...
+'git p4 sync' [<sync options>] [<p4 depot path>...]
+'git p4 rebase'
+'git p4 submit' [<submit options>] [<master branch name>]
+
+
+DESCRIPTION
+-----------
+This command provides a way to interact with p4 repositories
+using git.
+
+Create a new git repository from an existing p4 repository using
+'git p4 clone', giving it one or more p4 depot paths. Incorporate
+new commits from p4 changes with 'git p4 sync'. The 'sync' command
+is also used to include new branches from other p4 depot paths.
+Submit git changes back to p4 using 'git p4 submit'. The command
+'git p4 rebase' does a sync plus rebases the current branch onto
+the updated p4 remote branch.
+
+
+EXAMPLE
+-------
+* Create an alias for 'git p4', using the full path to the 'git-p4'
+ script if needed:
++
+------------
+$ git config --global alias.p4 '!git-p4'
+------------
+
+* Clone a repository:
++
+------------
+$ git p4 clone //depot/path/project
+------------
+
+* Do some work in the newly created git repository:
++
+------------
+$ cd project
+$ vi foo.h
+$ git commit -a -m "edited foo.h"
+------------
+
+* Update the git repository with recent changes from p4, rebasing your
+ work on top:
++
+------------
+$ git p4 rebase
+------------
+
+* Submit your commits back to p4:
++
+------------
+$ git p4 submit
+------------
+
+
+COMMANDS
+--------
+
+Clone
+~~~~~
+Generally, 'git p4 clone' is used to create a new git directory
+from an existing p4 repository:
+------------
+$ git p4 clone //depot/path/project
+------------
+This:
+
+1. Creates an empty git repository in a subdirectory called 'project'.
++
+2. Imports the full contents of the head revision from the given p4
+depot path into a single commit in the git branch 'refs/remotes/p4/master'.
++
+3. Creates a local branch, 'master' from this remote and checks it out.
+
+To reproduce the entire p4 history in git, use the '@all' modifier on
+the depot path:
+------------
+$ git p4 clone //depot/path/project@all
+------------
+
+
+Sync
+~~~~
+As development continues in the p4 repository, those changes can
+be included in the git repository using:
+------------
+$ git p4 sync
+------------
+This command finds new changes in p4 and imports them as git commits.
+
+P4 repositories can be added to an existing git repository using
+'git p4 sync' too:
+------------
+$ mkdir repo-git
+$ cd repo-git
+$ git init
+$ git p4 sync //path/in/your/perforce/depot
+------------
+This imports the specified depot into
+'refs/remotes/p4/master' in an existing git repository. The
+'--branch' option can be used to specify a different branch to
+be used for the p4 content.
+
+If a git repository includes branches 'refs/remotes/origin/p4', these
+will be fetched and consulted first during a 'git p4 sync'. Since
+importing directly from p4 is considerably slower than pulling changes
+from a git remote, this can be useful in a multi-developer environment.
+
+
+Rebase
+~~~~~~
+A common working pattern is to fetch the latest changes from the p4 depot
+and merge them with local uncommitted changes. Often, the p4 repository
+is the ultimate location for all code, thus a rebase workflow makes
+sense. This command does 'git p4 sync' followed by 'git rebase' to move
+local commits on top of updated p4 changes.
+------------
+$ git p4 rebase
+------------
+
+
+Submit
+~~~~~~
+Submitting changes from a git repository back to the p4 repository
+requires a separate p4 client workspace. This should be specified
+using the 'P4CLIENT' environment variable or the git configuration
+variable 'git-p4.client'. The p4 client must exist, but the client root
+will be created and populated if it does not already exist.
+
+To submit all changes that are in the current git branch but not in
+the 'p4/master' branch, use:
+------------
+$ git p4 submit
+------------
+
+To specify a branch other than the current one, use:
+------------
+$ git p4 submit topicbranch
+------------
+
+The upstream reference is generally 'refs/remotes/p4/master', but can
+be overridden using the '--origin=' command-line option.
+
+The p4 changes will be created as the user invoking 'git p4 submit'. The
+'--preserve-user' option will cause ownership to be modified
+according to the author of the git commit. This option requires admin
+privileges in p4, which can be granted using 'p4 protect'.
+
+
+OPTIONS
+-------
+
+General options
+~~~~~~~~~~~~~~~
+All commands except clone accept this option.
+
+--git-dir <dir>::
+ Set the 'GIT_DIR' environment variable. See linkgit:git[1].
+
+Sync options
+~~~~~~~~~~~~
+These options can be used in the initial 'clone' as well as in
+subsequent 'sync' operations.
+
+--branch <branch>::
+ Import changes into given branch. If the branch starts with
+ 'refs/', it will be used as is, otherwise the path 'refs/heads/'
+ will be prepended. The default branch is 'master'. If used
+ with an initial clone, no HEAD will be checked out.
++
+This example imports a new remote "p4/proj2" into an existing
+git repository:
+----
+ $ git init
+ $ git p4 sync --branch=refs/remotes/p4/proj2 //depot/proj2
+----
+
+--detect-branches::
+ Use the branch detection algorithm to find new paths in p4. It is
+ documented below in "BRANCH DETECTION".
+
+--changesfile <file>::
+ Import exactly the p4 change numbers listed in 'file', one per
+ line. Normally, 'git p4' inspects the current p4 repository
+ state and detects the changes it should import.
+
+--silent::
+ Do not print any progress information.
+
+--verbose::
+ Provide more progress information.
+
+--detect-labels::
+ Query p4 for labels associated with the depot paths, and add
+ them as tags in git.
+
+--import-local::
+ By default, p4 branches are stored in 'refs/remotes/p4/',
+ where they will be treated as remote-tracking branches by
+ linkgit:git-branch[1] and other commands. This option instead
+ puts p4 branches in 'refs/heads/p4/'. Note that future
+ sync operations must specify '--import-local' as well so that
+ they can find the p4 branches in refs/heads.
+
+--max-changes <n>::
+ Limit the number of imported changes to 'n'. Useful to
+ limit the amount of history when using the '@all' p4 revision
+ specifier.
+
+--keep-path::
+ The mapping of file names from the p4 depot path to git, by
+ default, involves removing the entire depot path. With this
+ option, the full p4 depot path is retained in git. For example,
+ path '//depot/main/foo/bar.c', when imported from
+ '//depot/main/', becomes 'foo/bar.c'. With '--keep-path', the
+ git path is instead 'depot/main/foo/bar.c'.
+
+--use-client-spec::
+ Use a client spec to find the list of interesting files in p4.
+ See the "CLIENT SPEC" section below.
+
+Clone options
+~~~~~~~~~~~~~
+These options can be used in an initial 'clone', along with the 'sync'
+options described above.
+
+--destination <directory>::
+ Where to create the git repository. If not provided, the last
+ component in the p4 depot path is used to create a new
+ directory.
+
+--bare::
+ Perform a bare clone. See linkgit:git-clone[1].
+
+-/ <path>::
+ Exclude selected depot paths when cloning.
+
+Submit options
+~~~~~~~~~~~~~~
+These options can be used to modify 'git p4 submit' behavior.
+
+--verbose::
+ Provide more progress information.
+
+--origin <commit>::
+ Upstream location from which commits are identified to submit to
+ p4. By default, this is the most recent p4 commit reachable
+ from 'HEAD'.
+
+-M[<n>]::
+ Detect renames. See linkgit:git-diff[1]. Renames will be
+ represented in p4 using explicit 'move' operations. There
+ is no corresponding option to detect copies, but there are
+ variables for both moves and copies.
+
+--preserve-user::
+ Re-author p4 changes before submitting to p4. This option
+ requires p4 admin privileges.
+
+
+DEPOT PATH SYNTAX
+-----------------
+The p4 depot path argument to 'git p4 sync' and 'git p4 clone' can
+be one or more space-separated p4 depot paths, with an optional
+p4 revision specifier on the end:
+
+"//depot/my/project"::
+ Import one commit with all files in the '#head' change under that tree.
+
+"//depot/my/project@all"::
+ Import one commit for each change in the history of that depot path.
+
+"//depot/my/project@1,6"::
+ Import only changes 1 through 6.
+
+"//depot/proj1@all //depot/proj2@all"::
+ Import all changes from both named depot paths into a single
+ repository. Only files below these directories are included.
+ There is not a subdirectory in git for each "proj1" and "proj2".
+ You must use the '--destination' option when specifying more
+ than one depot path. The revision specifier must be specified
+ identically on each depot path. If there are files in the
+ depot paths with the same name, the path with the most recently
+ updated version of the file is the one that appears in git.
+
+See 'p4 help revisions' for the full syntax of p4 revision specifiers.
+
+
+CLIENT SPEC
+-----------
+The p4 client specification is maintained with the 'p4 client' command
+and contains among other fields, a View that specifies how the depot
+is mapped into the client repository. Git-p4 can consult the client
+spec when given the '--use-client-spec' option or useClientSpec
+variable.
+
+The full syntax for a p4 view is documented in 'p4 help views'. Git-p4
+knows only a subset of the view syntax. It understands multi-line
+mappings, overlays with '+', exclusions with '-' and double-quotes
+around whitespace. Of the possible wildcards, git-p4 only handles
+'...', and only when it is at the end of the path. Git-p4 will complain
+if it encounters an unhandled wildcard.
+
+Bugs in the implementation of overlap mappings exist. If multiple depot
+paths map through overlays to the same location in the repository,
+git-p4 can choose the wrong one. This is hard to solve without
+dedicating a client spec just for git-p4.
+
+The name of the client can be given to git-p4 in multiple ways. The
+variable 'git-p4.client' takes precedence if it exists. Otherwise,
+normal p4 mechanisms of determining the client are used: environment
+variable P4CLIENT, a file referenced by P4CONFIG, or the local host name.
+
+
+BRANCH DETECTION
+----------------
+P4 does not have the same concept of a branch as git. Instead,
+p4 organizes its content as a directory tree, where by convention
+different logical branches are in different locations in the tree.
+The 'p4 branch' command is used to maintain mappings between
+different areas in the tree, and indicate related content. 'git p4'
+can use these mappings to determine branch relationships.
+
+If you have a repository where all the branches of interest exist as
+subdirectories of a single depot path, you can use '--detect-branches'
+when cloning or syncing to have 'git p4' automatically find
+subdirectories in p4, and to generate these as branches in git.
+
+For example, if the P4 repository structure is:
+----
+//depot/main/...
+//depot/branch1/...
+----
+
+And "p4 branch -o branch1" shows a View line that looks like:
+----
+//depot/main/... //depot/branch1/...
+----
+
+Then this 'git p4 clone' command:
+----
+git p4 clone --detect-branches //depot@all
+----
+produces a separate branch in 'refs/remotes/p4/' for //depot/main,
+called 'master', and one for //depot/branch1 called 'depot/branch1'.
+
+However, it is not necessary to create branches in p4 to be able to use
+them like branches. Because it is difficult to infer branch
+relationships automatically, a git configuration setting
+'git-p4.branchList' can be used to explicitly identify branch
+relationships. It is a list of "source:destination" pairs, like a
+simple p4 branch specification, where the "source" and "destination" are
+the path elements in the p4 repository. The example above relied on the
+presence of the p4 branch. Without p4 branches, the same result will
+occur with:
+----
+git config git-p4.branchList main:branch1
+git p4 clone --detect-branches //depot@all
+----
+
+
+PERFORMANCE
+-----------
+The fast-import mechanism used by 'git p4' creates one pack file for
+each invocation of 'git p4 sync'. Normally, git garbage compression
+(linkgit:git-gc[1]) automatically compresses these to fewer pack files,
+but explicit invocation of 'git repack -adf' may improve performance.
+
+
+CONFIGURATION VARIABLES
+-----------------------
+The following config settings can be used to modify 'git p4' behavior.
+They all are in the 'git-p4' section.
+
+General variables
+~~~~~~~~~~~~~~~~~
+git-p4.user::
+ User specified as an option to all p4 commands, with '-u <user>'.
+ The environment variable 'P4USER' can be used instead.
+
+git-p4.password::
+ Password specified as an option to all p4 commands, with
+ '-P <password>'.
+ The environment variable 'P4PASS' can be used instead.
+
+git-p4.port::
+ Port specified as an option to all p4 commands, with
+ '-p <port>'.
+ The environment variable 'P4PORT' can be used instead.
+
+git-p4.host::
+ Host specified as an option to all p4 commands, with
+ '-h <host>'.
+ The environment variable 'P4HOST' can be used instead.
+
+git-p4.client::
+ Client specified as an option to all p4 commands, with
+ '-c <client>', including the client spec.
+
+Clone and sync variables
+~~~~~~~~~~~~~~~~~~~~~~~~
+git-p4.syncFromOrigin::
+ Because importing commits from other git repositories is much faster
+ than importing them from p4, a mechanism exists to find p4 changes
+ first in git remotes. If branches exist under 'refs/remote/origin/p4',
+ those will be fetched and used when syncing from p4. This
+ variable can be set to 'false' to disable this behavior.
+
+git-p4.branchUser::
+ One phase in branch detection involves looking at p4 branches
+ to find new ones to import. By default, all branches are
+ inspected. This option limits the search to just those owned
+ by the single user named in the variable.
+
+git-p4.branchList::
+ List of branches to be imported when branch detection is
+ enabled. Each entry should be a pair of branch names separated
+ by a colon (:). This example declares that both branchA and
+ branchB were created from main:
+-------------
+git config git-p4.branchList main:branchA
+git config --add git-p4.branchList main:branchB
+-------------
+
+git-p4.useClientSpec::
+ Specify that the p4 client spec should be used to identify p4
+ depot paths of interest. This is equivalent to specifying the
+ option '--use-client-spec'. See the "CLIENT SPEC" section above.
+ This variable is a boolean, not the name of a p4 client.
+
+Submit variables
+~~~~~~~~~~~~~~~~
+git-p4.detectRenames::
+ Detect renames. See linkgit:git-diff[1].
+
+git-p4.detectCopies::
+ Detect copies. See linkgit:git-diff[1].
+
+git-p4.detectCopiesHarder::
+ Detect copies harder. See linkgit:git-diff[1].
+
+git-p4.preserveUser::
+ On submit, re-author changes to reflect the git author,
+ regardless of who invokes 'git p4 submit'.
+
+git-p4.allowMissingP4Users::
+ When 'preserveUser' is true, 'git p4' normally dies if it
+ cannot find an author in the p4 user map. This setting
+ submits the change regardless.
+
+git-p4.skipSubmitEdit::
+ The submit process invokes the editor before each p4 change
+ is submitted. If this setting is true, though, the editing
+ step is skipped.
+
+git-p4.skipSubmitEditCheck::
+ After editing the p4 change message, 'git p4' makes sure that
+ the description really was changed by looking at the file
+ modification time. This option disables that test.
+
+git-p4.allowSubmit::
+ By default, any branch can be used as the source for a 'git p4
+ submit' operation. This configuration variable, if set, permits only
+ the named branches to be used as submit sources. Branch names
+ must be the short names (no "refs/heads/"), and should be
+ separated by commas (","), with no spaces.
+
+git-p4.skipUserNameCheck::
+ If the user running 'git p4 submit' does not exist in the p4
+ user map, 'git p4' exits. This option can be used to force
+ submission regardless.
+
+
+IMPLEMENTATION DETAILS
+----------------------
+* Changesets from p4 are imported using git fast-import.
+* Cloning or syncing does not require a p4 client; file contents are
+ collected using 'p4 print'.
+* Submitting requires a p4 client, which is not in the same location
+ as the git repository. Patches are applied, one at a time, to
+ this p4 client and submitted from there.
+* Each commit imported by 'git p4' has a line at the end of the log
+ message indicating the p4 depot location and change number. This
+ line is used by later 'git p4 sync' operations to know which p4
+ changes are new.
--prefix=<prefix>/::
Keep the current index contents, and read the contents
- of the named tree-ish under the directory at `<prefix>`. The
- original index file cannot have anything at the path
- `<prefix>` itself, nor anything in the `<prefix>/`
- directory. Note that the `<prefix>/` value must end
- with a slash.
+ of the named tree-ish under the directory at `<prefix>`.
+ The command will refuse to overwrite entries that already
+ existed in the original index file. Note that the `<prefix>/`
+ value must end with a slash.
--exclude-per-directory=<gitignore>::
When running the command with `-u` and `-m` options, the
----------------
$ git fetch git://.... linus
-$ LT=`cat .git/FETCH_HEAD`
+$ LT=`git rev-parse FETCH_HEAD`
----------------
Your work tree is still based on your HEAD ($JC), but you have
cd_to_toplevel, which is impossible to do if there is no
working tree.
+require_clean_work_tree <action> [<hint>]::
+ checks that the working tree and index associated with the
+ repository have no uncommitted changes to tracked files.
+ Otherwise it emits an error message of the form `Cannot
+ <action>: <reason>. <hint>`, and dies. Example:
++
+----------------
+require_clean_work_tree rebase "Please commit or stash them."
+----------------
+
get_author_ident_from_commit::
outputs code for use with eval to set the GIT_AUTHOR_NAME,
GIT_AUTHOR_EMAIL and GIT_AUTHOR_DATE variables for a given commit.
-d::
--dereference::
- Dereference tags into object IDs as well. They will be shown with "^{}"
+ Dereference tags into object IDs as well. They will be shown with "{caret}{}"
appended.
-s::
--exclude-existing[=<pattern>]::
Make 'git show-ref' act as a filter that reads refs from stdin of the
- form "^(?:<anything>\s)?<refname>(?:{backslash}{caret}\{\})?$"
+ form "`{caret}(?:<anything>\s)?<refname>(?:{backslash}{caret}{})?$`"
and performs the following actions on each:
- (1) strip "^{}" at the end of line if any;
+ (1) strip "{caret}{}" at the end of line if any;
(2) ignore if pattern is provided and does not head-match refname;
(3) warn if refname is not a well-formed refname and skip;
(4) ignore if refname is a ref that exists in the local repository;
<repository> is the URL of the new submodule's origin repository.
This may be either an absolute URL, or (if it begins with ./
or ../), the location relative to the superproject's origin
-repository. If the superproject doesn't have an origin configured
+repository (Please note that to specify a repository 'foo.git'
+which is located right next to a superproject 'bar.git', you'll
+have to use '../foo.git' instead of './foo.git' - as one might expect
+when following the rules for relative URLs - because the evaluation
+of relative URLs in Git is identical to that of relative directories).
+If the superproject doesn't have an origin configured
the superproject is its own authoritative upstream and the current
working directory is used instead.
+
A GnuPG signed tag object will be created when `-s` or `-u
<key-id>` is used. When `-u <key-id>` is not used, the
committer identity for the current user is used to find the
-GnuPG key for signing.
+GnuPG key for signing. The configuration variable `gpg.program`
+is used to specify custom GnuPG binary.
+
OPTIONS
-------
-s::
--sign::
- Make a GPG-signed tag, using the default e-mail address's key
+ Make a GPG-signed tag, using the default e-mail address's key.
-u <key-id>::
--local-user=<key-id>::
- Make a GPG-signed tag, using the given key
+ Make a GPG-signed tag, using the given key.
-f::
--force::
branch of the `git.git` repository.
Documentation for older releases are available here:
-* link:v1.7.8/git.html[documentation for release 1.7.8]
+* link:v1.7.9/git.html[documentation for release 1.7.9]
* release notes for
+ link:RelNotes/1.7.9.txt[1.7.9].
+
+* link:v1.7.8.4/git.html[documentation for release 1.7.8.4]
+
+* release notes for
+ link:RelNotes/1.7.8.4.txt[1.7.8.4],
+ link:RelNotes/1.7.8.3.txt[1.7.8.3],
+ link:RelNotes/1.7.8.2.txt[1.7.8.2],
+ link:RelNotes/1.7.8.1.txt[1.7.8.1],
link:RelNotes/1.7.8.txt[1.7.8].
-* link:v1.7.7.5/git.html[documentation for release 1.7.7.5]
+* link:v1.7.7.6/git.html[documentation for release 1.7.7.6]
* release notes for
+ link:RelNotes/1.7.7.6.txt[1.7.7.6],
link:RelNotes/1.7.7.5.txt[1.7.7.5],
link:RelNotes/1.7.7.4.txt[1.7.7.4],
link:RelNotes/1.7.7.3.txt[1.7.7.3],
--- /dev/null
+From: Junio C Hamano <gitster@pobox.com>
+Date: Tue, 17 Jan 2011 13:00:00 -0800
+Subject: Using signed tag in pull requests
+Abstract: Beginning v1.7.9, a contributor can push a signed tag to her
+ publishing repository and ask her integrator to pull it. This assures the
+ integrator that the pulled history is authentic and allows others to
+ later validate it.
+Content-type: text/asciidoc
+
+Using signed tag in pull requests
+=================================
+
+A typical distributed workflow using Git is for a contributor to fork a
+project, build on it, publish the result to her public repository, and ask
+the "upstream" person (often the owner of the project where she forked
+from) to pull from her public repository. Requesting such a "pull" is made
+easy by the `git request-pull` command.
+
+Earlier, a typical pull request may have started like this:
+
+------------
+ The following changes since commit 406da78032179...:
+
+ Froboz 3.2 (2011-09-30 14:20:57 -0700)
+
+ are available in the git repository at:
+
+ example.com:/git/froboz.git for-xyzzy
+------------
+
+followed by a shortlog of the changes and a diffstat.
+
+The request was for a branch name (e.g. `for-xyzzy`) in the public
+repository of the contributor, and even though it stated where the
+contributor forked her work from, the message did not say anything about
+the commit to expect at the tip of the for-xyzzy branch. If the site that
+hosts the public repository of the contributor cannot be fully trusted, it
+was unnecessarily hard to make sure what was pulled by the integrator was
+genuinely what the contributor had produced for the project. Also there
+was no easy way for third-party auditors to later verify the resulting
+history.
+
+Starting from Git release v1.7.9, a contributor can add a signed tag to
+the commit at the tip of the history and ask the integrator to pull that
+signed tag. When the integrator runs `git pull`, the signed tag is
+automatically verified to assure that the history is not tampered with.
+In addition, the resulting merge commit records the content of the signed
+tag, so that other people can verify that the branch merged by the
+integrator was signed by the contributor, without fetching the signed tag
+used to validate the pull request separately and keeping it in the refs
+namespace.
+
+This document describes the workflow between the contributor and the
+integrator, using Git v1.7.9 or later.
+
+
+A contributor or a lieutenant
+-----------------------------
+
+After preparing her work to be pulled, the contributor uses `git tag -s`
+to create a signed tag:
+
+------------
+ $ git checkout work
+ $ ... "git pull" from sublieutenants, "git commit" your own work ...
+ $ git tag -s -m "Completed frotz feature" frotz-for-xyzzy work
+------------
+
+Note that this example uses the `-m` option to create a signed tag with
+just a one-liner message, but this is for illustration purposes only. It
+is advisable to compose a well-written explanation of what the topic does
+to justify why it is worthwhile for the integrator to pull it, as this
+message will eventually become part of the final history after the
+integrator responds to the pull request (as we will see later).
+
+Then she pushes the tag out to her public repository:
+
+------------
+ $ git push example.com:/git/froboz.git/ +frotz-for-xyzzy
+------------
+
+There is no need to push the `work` branch or anything else.
+
+Note that the above command line used a plus sign at the beginning of
+`+frotz-for-xyzzy` to allow forcing the update of a tag, as the same
+contributor may want to reuse a signed tag with the same name after the
+previous pull request has already been responded to.
+
+The contributor then prepares a message to request a "pull":
+
+------------
+ $ git request-pull v3.2 example.com:/git/froboz.git/ frotz-for-xyzzy >msg.txt
+------------
+
+The arguments are:
+
+. the version of the integrator's commit the contributor based her work on;
+. the URL of the repository, to which the contributor has pushed what she
+ wants to get pulled; and
+. the name of the tag the contributor wants to get pulled (earlier, she could
+ write only a branch name here).
+
+The resulting msg.txt file begins like so:
+
+------------
+ The following changes since commit 406da78032179...:
+
+ Froboz 3.2 (2011-09-30 14:20:57 -0700)
+
+ are available in the git repository at:
+
+ example.com:/git/froboz.git frotz-for-xyzzy
+
+ for you to fetch changes up to 703f05ad5835c...:
+
+ Add tests and documentation for frotz (2011-12-02 10:02:52 -0800)
+
+ -----------------------------------------------
+ Completed frotz feature
+ -----------------------------------------------
+------------
+
+followed by a shortlog of the changes and a diffstat. Comparing this with
+the earlier illustration of the output from the traditional `git request-pull`
+command, the reader should notice that:
+
+. The tip commit to expect is shown to the integrator; and
+. The signed tag message is shown prominently between the dashed lines
+ before the shortlog.
+
+The latter is why the contributor would want to justify why pulling her
+work is worthwhile when creating the signed tag. The contributor then
+opens her favorite MUA, reads msg.txt, edits and sends it to her upstream
+integrator.
+
+
+Integrator
+----------
+
+After receiving such a pull request message, the integrator fetches and
+integrates the tag named in the request, with:
+
+------------
+ $ git pull example.com:/git/froboz.git/ frotz-for-xyzzy
+------------
+
+This operation will always open an editor to allow the integrator to fine
+tune the commit log message when merging a signed tag. Also, pulling a
+signed tag will always create a merge commit even when the integrator does
+not have any new commit since the contributor's work forked (i.e. 'fast
+forward'), so that the integrator can properly explain what the merge is
+about and why it was made.
+
+In the editor, the integrator will see something like this:
+
+------------
+ Merge tag 'frotz-for-xyzzy' of example.com:/git/froboz.git/
+
+ Completed frotz feature
+ # gpg: Signature made Fri 02 Dec 2011 10:03:01 AM PST using RSA key ID 96AFE6CB
+ # gpg: Good signature from "Con Tributor <nitfol@example.com>"
+------------
+
+Notice that the message recorded in the signed tag "Completed frotz
+feature" appears here, and again that is why it is important for the
+contributor to explain her work well when creating the signed tag.
+
+As usual, the lines commented with `#` are stripped out. The resulting
+commit records the signed tag used for this validation in a hidden field
+so that it can later be used by others to audit the history. There is no
+need for the integrator to keep a separate copy of the tag in his
+repository (i.e. `git tag -l` won't list the `frotz-for-xyzzy` tag in the
+above example), and there is no need to publish the tag to his public
+repository, either.
+
+After the integrator responds to the pull request and her work becomes
+part of the permanent history, the contributor can remove the tag from
+her public repository, if she chooses, in order to keep the tag namespace
+of her public repository clean, with:
+
+------------
+ $ git push example.com:/git/froboz.git :frotz-for-xyzzy
+------------
+
+
+Auditors
+--------
+
+The `--show-signature` option can be given to `git log` or `git show` and
+shows the verification status of the embedded signed tag in merge commits
+created when the integrator responded to a pull request of a signed tag.
+
+A typical output from `git show --show-signature` may look like this:
+
+------------
+ $ git show --show-signature
+ commit 02306ef6a3498a39118aef9df7975bdb50091585
+ merged tag 'frotz-for-xyzzy'
+ gpg: Signature made Fri 06 Jan 2012 12:41:49 PM PST using RSA key ID 96AFE6CB
+ gpg: Good signature from "Con Tributor <nitfol@example.com>"
+ Merge: 406da78 703f05a
+ Author: Inte Grator <xyzzy@example.com>
+ Date: Tue Jan 17 13:49:41 2012 -0800
+
+ Merge tag 'frotz-for-xyzzy' of example.com:/git/froboz.git/
+
+ Completed frotz feature
+
+ * tag 'frotz-for-xyzzy' (100 commits)
+ Add tests and documentation for frotz
+ ...
+------------
+
+There is no need for the auditor to explicitly fetch the contributor's
+signature, or to even be aware of what tag(s) the contributor and integrator
+used to communicate the signature. All the required information is recorded
+as part of the merge commit.
- '%N': commit notes
- '%gD': reflog selector, e.g., `refs/stash@\{1\}`
- '%gd': shortened reflog selector, e.g., `stash@\{1\}`
+- '%gn': reflog identity name
+- '%gN': reflog identity name (respecting .mailmap, see linkgit:git-shortlog[1] or linkgit:git-blame[1])
+- '%ge': reflog identity email
+- '%gE': reflog identity email (respecting .mailmap, see linkgit:git-shortlog[1] or linkgit:git-blame[1])
- '%gs': reflog subject
- '%Cred': switch color to red
- '%Cgreen': switch color to green
#!/bin/sh
GVF=GIT-VERSION-FILE
-DEF_VER=v1.7.8.GIT
+DEF_VER=v1.7.9.GIT
LF='
'
- "Perl" version 5.8 or later is needed to use some of the
features (e.g. preparing a partial commit using "git add -i/-p",
interacting with svn repositories with "git svn"). If you can
- live without these, use NO_PERL.
+ live without these, use NO_PERL. Note that recent releases of
+ Redhat/Fedora are reported to ship Perl binary package with some
+ core modules stripped away (see http://lwn.net/Articles/477234/),
+ so you might need to install additional packages other than Perl
+ itself, e.g. Time::HiRes.
- "openssl" library is used by git-imap-send to use IMAP over SSL.
If you don't need it, use NO_OPENSSL.
#
# Define NO_REGEX if you have no or inferior regex support in your C library.
#
+# Define HAVE_DEV_TTY if your system can open /dev/tty to interact with the
+# user.
+#
# Define GETTEXT_POISON if you are debugging the choice of strings marked
# for translation. In a GETTEXT_POISON build, you can turn all strings marked
# for translation into gibberish by setting the GIT_GETTEXT_POISON variable
LIB_H += compat/cygwin.h
LIB_H += compat/mingw.h
LIB_H += compat/obstack.h
+LIB_H += compat/terminal.h
LIB_H += compat/win32/pthread.h
LIB_H += compat/win32/syslog.h
LIB_H += compat/win32/poll.h
LIB_H += patch-ids.h
LIB_H += pkt-line.h
LIB_H += progress.h
+LIB_H += prompt.h
LIB_H += quote.h
LIB_H += reflog-walk.h
LIB_H += refs.h
LIB_OBJS += combine-diff.o
LIB_OBJS += commit.o
LIB_OBJS += compat/obstack.o
+LIB_OBJS += compat/terminal.o
LIB_OBJS += config.o
LIB_OBJS += connect.o
LIB_OBJS += connected.o
LIB_OBJS += preload-index.o
LIB_OBJS += pretty.o
LIB_OBJS += progress.o
+LIB_OBJS += prompt.o
LIB_OBJS += quote.o
LIB_OBJS += reachable.o
LIB_OBJS += read-cache.o
NO_MKSTEMPS = YesPlease
HAVE_PATHS_H = YesPlease
LIBC_CONTAINS_LIBINTL = YesPlease
+ HAVE_DEV_TTY = YesPlease
endif
ifeq ($(uname_S),GNU/kFreeBSD)
NO_STRLCPY = YesPlease
endif
NO_MEMMEM = YesPlease
USE_ST_TIMESPEC = YesPlease
+ HAVE_DEV_TTY = YesPlease
endif
ifeq ($(uname_S),SunOS)
NEEDS_SOCKET = YesPlease
BASIC_CFLAGS += -DHAVE_LIBCHARSET_H
endif
+ifdef HAVE_DEV_TTY
+ BASIC_CFLAGS += -DHAVE_DEV_TTY
+endif
+
ifdef DIR_HAS_BSD_GROUP_SEMANTICS
COMPAT_CFLAGS += -DDIR_HAS_BSD_GROUP_SEMANTICS
endif
-Documentation/RelNotes/1.7.9.txt
\ No newline at end of file
+Documentation/RelNotes/1.7.10.txt
\ No newline at end of file
void advise(const char *advice, ...)
{
+ struct strbuf buf = STRBUF_INIT;
va_list params;
+ const char *cp, *np;
va_start(params, advice);
- vreportf("hint: ", advice, params);
+ strbuf_addf(&buf, advice, params);
va_end(params);
+
+ for (cp = buf.buf; *cp; cp = np) {
+ np = strchrnul(cp, '\n');
+ fprintf(stderr, _("hint: %.*s\n"), (int)(np - cp), cp);
+ if (*np)
+ np++;
+ }
+ strbuf_release(&buf);
}
int git_default_advice_config(const char *var, const char *value)
int error_resolve_conflict(const char *me)
{
error("'%s' is not possible because you have unmerged files.", me);
- if (advice_resolve_conflict) {
+ if (advice_resolve_conflict)
/*
* Message used both when 'git commit' fails and when
* other commands doing a merge do.
*/
- advise("Fix them up in the work tree,");
- advise("and then use 'git add/rm <file>' as");
- advise("appropriate to mark resolution and make a commit,");
- advise("or use 'git commit -a'.");
- }
+ advise(_("Fix them up in the work tree,\n"
+ "and then use 'git add/rm <file>' as\n"
+ "appropriate to mark resolution and make a commit,\n"
+ "or use 'git commit -a'."));
return -1;
}
error_resolve_conflict(me);
die("Exiting because of an unresolved conflict.");
}
+
+void detach_advice(const char *new_name)
+{
+ const char fmt[] =
+ "Note: checking out '%s'.\n\n"
+ "You are in 'detached HEAD' state. You can look around, make experimental\n"
+ "changes and commit them, and you can discard any commits you make in this\n"
+ "state without impacting any branches by performing another checkout.\n\n"
+ "If you want to create a new branch to retain commits you create, you may\n"
+ "do so (now or later) by using -b with the checkout command again. Example:\n\n"
+ " git checkout -b new_branch_name\n\n";
+
+ fprintf(stderr, fmt, new_name);
+}
void advise(const char *advice, ...);
int error_resolve_conflict(const char *me);
extern void NORETURN die_resolve_conflict(const char *me);
+void detach_advice(const char *new_name);
#endif /* ADVICE_H */
/* Remotes are only allowed to fetch actual refs */
if (remote) {
char *ref = NULL;
- if (!dwim_ref(name, strlen(name), sha1, &ref))
- die("no such ref: %s", name);
+ const char *refname, *colon = NULL;
+
+ colon = strchr(name, ':');
+ if (colon)
+ refname = xstrndup(name, colon - name);
+ else
+ refname = name;
+
+ if (!dwim_ref(refname, strlen(refname), sha1, &ref))
+ die("no such ref: %s", refname);
+ if (refname != name)
+ free((void *)refname);
free(ref);
}
- else {
- if (get_sha1(name, sha1))
- die("Not a valid object name");
- }
+
+ if (get_sha1(name, sha1))
+ die("Not a valid object name");
commit = lookup_commit_reference_gently(sha1, 1);
if (commit) {
}
free(a);
}
+ free(e->attrs);
free(e);
}
static void bootstrap_attr_stack(void)
{
- if (!attr_stack) {
- struct attr_stack *elem;
+ struct attr_stack *elem;
- elem = read_attr_from_array(builtin_attr);
- elem->origin = NULL;
- elem->prev = attr_stack;
- attr_stack = elem;
+ if (attr_stack)
+ return;
- if (git_attr_system()) {
- elem = read_attr_from_file(git_etc_gitattributes(), 1);
- if (elem) {
- elem->origin = NULL;
- elem->prev = attr_stack;
- attr_stack = elem;
- }
- }
+ elem = read_attr_from_array(builtin_attr);
+ elem->origin = NULL;
+ elem->prev = attr_stack;
+ attr_stack = elem;
- if (git_attributes_file) {
- elem = read_attr_from_file(git_attributes_file, 1);
- if (elem) {
- elem->origin = NULL;
- elem->prev = attr_stack;
- attr_stack = elem;
- }
+ if (git_attr_system()) {
+ elem = read_attr_from_file(git_etc_gitattributes(), 1);
+ if (elem) {
+ elem->origin = NULL;
+ elem->prev = attr_stack;
+ attr_stack = elem;
}
+ }
- if (!is_bare_repository() || direction == GIT_ATTR_INDEX) {
- elem = read_attr(GITATTRIBUTES_FILE, 1);
- elem->origin = xstrdup("");
+ if (git_attributes_file) {
+ elem = read_attr_from_file(git_attributes_file, 1);
+ if (elem) {
+ elem->origin = NULL;
elem->prev = attr_stack;
attr_stack = elem;
- debug_push(elem);
}
+ }
- elem = read_attr_from_file(git_path(INFOATTRIBUTES_FILE), 1);
- if (!elem)
- elem = xcalloc(1, sizeof(*elem));
- elem->origin = NULL;
+ if (!is_bare_repository() || direction == GIT_ATTR_INDEX) {
+ elem = read_attr(GITATTRIBUTES_FILE, 1);
+ elem->origin = xstrdup("");
elem->prev = attr_stack;
attr_stack = elem;
+ debug_push(elem);
}
+
+ elem = read_attr_from_file(git_path(INFOATTRIBUTES_FILE), 1);
+ if (!elem)
+ elem = xcalloc(1, sizeof(*elem));
+ elem->origin = NULL;
+ elem->prev = attr_stack;
+ attr_stack = elem;
}
static void prepare_attr_stack(const char *path)
/*
* Pop the ones from directories that are not the prefix of
- * the path we are checking.
+ * the path we are checking. Break out of the loop when we see
+ * the root one (whose origin is an empty string "") or the builtin
+ * one (whose origin is NULL) without popping it.
*/
- while (attr_stack && attr_stack->origin) {
+ while (attr_stack->origin) {
int namelen = strlen(attr_stack->origin);
elem = attr_stack;
if (namelen <= dirlen &&
- !strncmp(elem->origin, path, namelen))
+ !strncmp(elem->origin, path, namelen) &&
+ (!namelen || path[namelen] == '/'))
break;
debug_pop(elem);
* Read from parent directories and push them down
*/
if (!is_bare_repository() || direction == GIT_ATTR_INDEX) {
+ /*
+ * bootstrap_attr_stack() should have added, and the
+ * above loop should have stopped before popping, the
+ * root element whose attr_stack->origin is set to an
+ * empty string.
+ */
struct strbuf pathbuf = STRBUF_INIT;
+ assert(attr_stack->origin);
while (1) {
len = strlen(attr_stack->origin);
if (dirlen <= len)
strbuf_release(&sb);
}
-static void detach_advice(const char *old_path, const char *new_name)
-{
- const char fmt[] =
- "Note: checking out '%s'.\n\n"
- "You are in 'detached HEAD' state. You can look around, make experimental\n"
- "changes and commit them, and you can discard any commits you make in this\n"
- "state without impacting any branches by performing another checkout.\n\n"
- "If you want to create a new branch to retain commits you create, you may\n"
- "do so (now or later) by using -b with the checkout command again. Example:\n\n"
- " git checkout -b new_branch_name\n\n";
-
- fprintf(stderr, fmt, new_name);
-}
-
static void update_refs_for_switch(struct checkout_opts *opts,
struct branch_info *old,
struct branch_info *new)
REF_NODEREF, DIE_ON_ERR);
if (!opts->quiet) {
if (old->path && advice_detached_head)
- detach_advice(old->path, new->name);
+ detach_advice(new->name);
describe_detached_head(_("HEAD is now at"), new->commit);
}
} else if (new->path) { /* Switch branches. */
NULL
};
-static int option_no_checkout, option_bare, option_mirror;
+static int option_no_checkout, option_bare, option_mirror, option_single_branch = -1;
static int option_local, option_no_hardlinks, option_shared, option_recursive;
static char *option_template, *option_depth;
static char *option_origin = NULL;
"directory from which templates will be used"),
OPT_CALLBACK(0 , "reference", &option_reference, "repo",
"reference repository", &opt_parse_reference),
- OPT_STRING('o', "origin", &option_origin, "branch",
- "use <branch> instead of 'origin' to track upstream"),
+ OPT_STRING('o', "origin", &option_origin, "name",
+ "use <name> instead of 'origin' to track upstream"),
OPT_STRING('b', "branch", &option_branch, "branch",
"checkout <branch> instead of the remote's HEAD"),
OPT_STRING('u', "upload-pack", &option_upload_pack, "path",
"path to git-upload-pack on the remote"),
OPT_STRING(0, "depth", &option_depth, "depth",
"create a shallow clone of that depth"),
+ OPT_BOOL(0, "single-branch", &option_single_branch,
+ "clone only one branch, HEAD or --branch"),
OPT_STRING(0, "separate-git-dir", &real_git_dir, "gitdir",
"separate git dir from working tree"),
OPT_STRING_LIST('c', "config", &option_config, "key=value",
closedir(dir);
}
-static const struct ref *clone_local(const char *src_repo,
- const char *dest_repo)
+static void clone_local(const char *src_repo, const char *dest_repo)
{
- const struct ref *ret;
- struct remote *remote;
- struct transport *transport;
-
if (option_shared) {
struct strbuf alt = STRBUF_INIT;
strbuf_addf(&alt, "%s/objects", src_repo);
strbuf_release(&dest);
}
- remote = remote_get(src_repo);
- transport = transport_get(remote, src_repo);
- ret = transport_get_remote_refs(transport);
- transport_disconnect(transport);
if (0 <= option_verbosity)
printf(_("done.\n"));
- return ret;
}
static const char *junk_work_tree;
raise(signo);
}
+static struct ref *find_remote_branch(const struct ref *refs, const char *branch)
+{
+ struct ref *ref;
+ struct strbuf head = STRBUF_INIT;
+ strbuf_addstr(&head, "refs/heads/");
+ strbuf_addstr(&head, branch);
+ ref = find_ref_by_name(refs, head.buf);
+ strbuf_release(&head);
+
+ if (ref)
+ return ref;
+
+ strbuf_addstr(&head, "refs/tags/");
+ strbuf_addstr(&head, branch);
+ ref = find_ref_by_name(refs, head.buf);
+ strbuf_release(&head);
+
+ return ref;
+}
+
static struct ref *wanted_peer_refs(const struct ref *refs,
struct refspec *refspec)
{
struct ref *local_refs = head;
struct ref **tail = head ? &head->next : &local_refs;
- get_fetch_map(refs, refspec, &tail, 0);
- if (!option_mirror)
+ if (option_single_branch) {
+ struct ref *remote_head = NULL;
+
+ if (!option_branch)
+ remote_head = guess_remote_head(head, refs, 0);
+ else
+ remote_head = find_remote_branch(refs, option_branch);
+
+ if (!remote_head && option_branch)
+ warning(_("Could not find remote branch %s to clone."),
+ option_branch);
+ else {
+ get_fetch_map(remote_head, refspec, &tail, 0);
+
+ /* if --branch=tag, pull the requested tag explicitly */
+ get_fetch_map(remote_head, tag_refspec, &tail, 0);
+ }
+ } else
+ get_fetch_map(refs, refspec, &tail, 0);
+
+ if (!option_mirror && !option_single_branch)
get_fetch_map(refs, tag_refspec, &tail, 0);
return local_refs;
for (r = local_refs; r; r = r->next) {
if (!r->peer_ref)
continue;
- add_extra_ref(r->peer_ref->name, r->old_sha1, 0);
+ add_packed_ref(r->peer_ref->name, r->old_sha1);
}
pack_refs(PACK_REFS_ALL);
- clear_extra_refs();
+}
+
+static void write_followtags(const struct ref *refs, const char *msg)
+{
+ const struct ref *ref;
+ for (ref = refs; ref; ref = ref->next) {
+ if (prefixcmp(ref->name, "refs/tags/"))
+ continue;
+ if (!suffixcmp(ref->name, "^{}"))
+ continue;
+ if (!has_sha1_file(ref->old_sha1))
+ continue;
+ update_ref(msg, ref->name, ref->old_sha1,
+ NULL, 0, DIE_ON_ERR);
+ }
+}
+
+static void update_remote_refs(const struct ref *refs,
+ const struct ref *mapped_refs,
+ const struct ref *remote_head_points_at,
+ const char *branch_top,
+ const char *msg)
+{
+ if (refs) {
+ clear_extra_refs();
+ write_remote_refs(mapped_refs);
+ if (option_single_branch)
+ write_followtags(refs, msg);
+ }
+
+ if (remote_head_points_at && !option_bare) {
+ struct strbuf head_ref = STRBUF_INIT;
+ strbuf_addstr(&head_ref, branch_top);
+ strbuf_addstr(&head_ref, "HEAD");
+ create_symref(head_ref.buf,
+ remote_head_points_at->peer_ref->name,
+ msg);
+ }
+}
+
+static void update_head(const struct ref *our, const struct ref *remote,
+ const char *msg)
+{
+ if (our && !prefixcmp(our->name, "refs/heads/")) {
+ /* Local default branch link */
+ create_symref("HEAD", our->name, NULL);
+ if (!option_bare) {
+ const char *head = skip_prefix(our->name, "refs/heads/");
+ update_ref(msg, "HEAD", our->old_sha1, NULL, 0, DIE_ON_ERR);
+ install_branch_config(0, head, option_origin, our->name);
+ }
+ } else if (our) {
+ struct commit *c = lookup_commit_reference(our->old_sha1);
+ /* --branch specifies a non-branch (i.e. tags), detach HEAD */
+ update_ref(msg, "HEAD", c->object.sha1,
+ NULL, REF_NODEREF, DIE_ON_ERR);
+ } else if (remote) {
+ /*
+ * We know remote HEAD points to a non-branch, or
+ * HEAD points to a branch but we don't know which one.
+ * Detach HEAD in all these cases.
+ */
+ update_ref(msg, "HEAD", remote->old_sha1,
+ NULL, REF_NODEREF, DIE_ON_ERR);
+ }
+}
+
+static int checkout(void)
+{
+ unsigned char sha1[20];
+ char *head;
+ struct lock_file *lock_file;
+ struct unpack_trees_options opts;
+ struct tree *tree;
+ struct tree_desc t;
+ int err = 0, fd;
+
+ if (option_no_checkout)
+ return 0;
+
+ head = resolve_refdup("HEAD", sha1, 1, NULL);
+ if (!head) {
+ warning(_("remote HEAD refers to nonexistent ref, "
+ "unable to checkout.\n"));
+ return 0;
+ }
+ if (!strcmp(head, "HEAD")) {
+ if (advice_detached_head)
+ detach_advice(sha1_to_hex(sha1));
+ } else {
+ if (prefixcmp(head, "refs/heads/"))
+ die(_("HEAD not found below refs/heads!"));
+ }
+ free(head);
+
+ /* We need to be in the new work tree for the checkout */
+ setup_work_tree();
+
+ lock_file = xcalloc(1, sizeof(struct lock_file));
+ fd = hold_locked_index(lock_file, 1);
+
+ memset(&opts, 0, sizeof opts);
+ opts.update = 1;
+ opts.merge = 1;
+ opts.fn = oneway_merge;
+ opts.verbose_update = (option_verbosity > 0);
+ opts.src_index = &the_index;
+ opts.dst_index = &the_index;
+
+ tree = parse_tree_indirect(sha1);
+ parse_tree(tree);
+ init_tree_desc(&t, tree->buffer, tree->size);
+ unpack_trees(1, &t, &opts);
+
+ if (write_cache(fd, active_cache, active_nr) ||
+ commit_locked_index(lock_file))
+ die(_("unable to write new index file"));
+
+ err |= run_hook(NULL, "post-checkout", sha1_to_hex(null_sha1),
+ sha1_to_hex(sha1), "1", NULL);
+
+ if (!err && option_recursive)
+ err = run_command_v_opt(argv_submodule, RUN_GIT_CMD);
+
+ return err;
}
static int write_one_config(const char *key, const char *value, void *data)
const struct ref *remote_head_points_at;
const struct ref *our_head_points_at;
struct ref *mapped_refs;
+ const struct ref *ref;
struct strbuf key = STRBUF_INIT, value = STRBUF_INIT;
struct strbuf branch_top = STRBUF_INIT, reflog_msg = STRBUF_INIT;
struct transport *transport = NULL;
- char *src_ref_prefix = "refs/heads/";
- int err = 0;
+ const char *src_ref_prefix = "refs/heads/";
+ struct remote *remote;
+ int err = 0, complete_refs_before_fetch = 1;
struct refspec *refspec;
const char *fetch_pattern;
usage_msg_opt(_("You must specify a repository to clone."),
builtin_clone_usage, builtin_clone_options);
+ if (option_single_branch == -1)
+ option_single_branch = option_depth ? 1 : 0;
+
if (option_mirror)
option_bare = 1;
strbuf_reset(&value);
- if (is_local) {
- refs = clone_local(path, git_dir);
- mapped_refs = wanted_peer_refs(refs, refspec);
- } else {
- struct remote *remote = remote_get(option_origin);
- transport = transport_get(remote, remote->url[0]);
+ remote = remote_get(option_origin);
+ transport = transport_get(remote, remote->url[0]);
+ if (!is_local) {
if (!transport->get_refs_list || !transport->fetch)
die(_("Don't know how to clone %s"), transport->url);
if (option_depth)
transport_set_option(transport, TRANS_OPT_DEPTH,
option_depth);
+ if (option_single_branch)
+ transport_set_option(transport, TRANS_OPT_FOLLOWTAGS, "1");
transport_set_verbosity(transport, option_verbosity, option_progress);
if (option_upload_pack)
transport_set_option(transport, TRANS_OPT_UPLOADPACK,
option_upload_pack);
-
- refs = transport_get_remote_refs(transport);
- if (refs) {
- mapped_refs = wanted_peer_refs(refs, refspec);
- transport_fetch_refs(transport, mapped_refs);
- }
}
- if (refs) {
- clear_extra_refs();
+ refs = transport_get_remote_refs(transport);
+ mapped_refs = refs ? wanted_peer_refs(refs, refspec) : NULL;
- write_remote_refs(mapped_refs);
+ /*
+ * transport_get_remote_refs() may return refs with null sha-1
+ * in mapped_refs (see struct transport->get_refs_list
+ * comment). In that case we need fetch it early because
+ * remote_head code below relies on it.
+ *
+ * for normal clones, transport_get_remote_refs() should
+ * return reliable ref set, we can delay cloning until after
+ * remote HEAD check.
+ */
+ for (ref = refs; ref; ref = ref->next)
+ if (is_null_sha1(ref->old_sha1)) {
+ complete_refs_before_fetch = 0;
+ break;
+ }
+ if (!is_local && !complete_refs_before_fetch && refs)
+ transport_fetch_refs(transport, mapped_refs);
+
+ if (refs) {
remote_head = find_ref_by_name(refs, "HEAD");
remote_head_points_at =
guess_remote_head(remote_head, mapped_refs, 0);
if (option_branch) {
- struct strbuf head = STRBUF_INIT;
- strbuf_addstr(&head, src_ref_prefix);
- strbuf_addstr(&head, option_branch);
our_head_points_at =
- find_ref_by_name(mapped_refs, head.buf);
- strbuf_release(&head);
-
- if (!our_head_points_at) {
- warning(_("Remote branch %s not found in "
- "upstream %s, using HEAD instead"),
- option_branch, option_origin);
- our_head_points_at = remote_head_points_at;
- }
+ find_remote_branch(mapped_refs, option_branch);
+
+ if (!our_head_points_at)
+ die(_("Remote branch %s not found in upstream %s"),
+ option_branch, option_origin);
}
else
our_head_points_at = remote_head_points_at;
"refs/heads/master");
}
- if (remote_head_points_at && !option_bare) {
- struct strbuf head_ref = STRBUF_INIT;
- strbuf_addstr(&head_ref, branch_top.buf);
- strbuf_addstr(&head_ref, "HEAD");
- create_symref(head_ref.buf,
- remote_head_points_at->peer_ref->name,
- reflog_msg.buf);
- }
+ if (is_local)
+ clone_local(path, git_dir);
+ else if (refs && complete_refs_before_fetch)
+ transport_fetch_refs(transport, mapped_refs);
- if (our_head_points_at) {
- /* Local default branch link */
- create_symref("HEAD", our_head_points_at->name, NULL);
- if (!option_bare) {
- const char *head = skip_prefix(our_head_points_at->name,
- "refs/heads/");
- update_ref(reflog_msg.buf, "HEAD",
- our_head_points_at->old_sha1,
- NULL, 0, DIE_ON_ERR);
- install_branch_config(0, head, option_origin,
- our_head_points_at->name);
- }
- } else if (remote_head) {
- /* Source had detached HEAD pointing somewhere. */
- if (!option_bare) {
- update_ref(reflog_msg.buf, "HEAD",
- remote_head->old_sha1,
- NULL, REF_NODEREF, DIE_ON_ERR);
- our_head_points_at = remote_head;
- }
- } else {
- /* Nothing to checkout out */
- if (!option_no_checkout)
- warning(_("remote HEAD refers to nonexistent ref, "
- "unable to checkout.\n"));
- option_no_checkout = 1;
- }
+ update_remote_refs(refs, mapped_refs, remote_head_points_at,
+ branch_top.buf, reflog_msg.buf);
- if (transport) {
- transport_unlock_pack(transport);
- transport_disconnect(transport);
- }
+ update_head(our_head_points_at, remote_head, reflog_msg.buf);
- if (!option_no_checkout) {
- struct lock_file *lock_file = xcalloc(1, sizeof(struct lock_file));
- struct unpack_trees_options opts;
- struct tree *tree;
- struct tree_desc t;
- int fd;
-
- /* We need to be in the new work tree for the checkout */
- setup_work_tree();
-
- fd = hold_locked_index(lock_file, 1);
-
- memset(&opts, 0, sizeof opts);
- opts.update = 1;
- opts.merge = 1;
- opts.fn = oneway_merge;
- opts.verbose_update = (option_verbosity > 0);
- opts.src_index = &the_index;
- opts.dst_index = &the_index;
-
- tree = parse_tree_indirect(our_head_points_at->old_sha1);
- parse_tree(tree);
- init_tree_desc(&t, tree->buffer, tree->size);
- unpack_trees(1, &t, &opts);
-
- if (write_cache(fd, active_cache, active_nr) ||
- commit_locked_index(lock_file))
- die(_("unable to write new index file"));
-
- err |= run_hook(NULL, "post-checkout", sha1_to_hex(null_sha1),
- sha1_to_hex(our_head_points_at->old_sha1), "1",
- NULL);
-
- if (!err && option_recursive)
- err = run_command_v_opt(argv_submodule, RUN_GIT_CMD);
- }
+ transport_unlock_pack(transport);
+ transport_disconnect(transport);
+
+ err = checkout();
strbuf_release(&reflog_msg);
strbuf_release(&branch_top);
#include "tree.h"
#include "builtin.h"
#include "utf8.h"
+#include "gpg-interface.h"
-static const char commit_tree_usage[] = "git commit-tree [(-p <sha1>)...] [-m <message>] [-F <file>] <sha1> <changelog";
+static const char commit_tree_usage[] = "git commit-tree [(-p <sha1>)...] [-S<signer>] [-m <message>] [-F <file>] <sha1> <changelog";
static void new_parent(struct commit *parent, struct commit_list **parents_p)
{
commit_list_insert(parent, parents_p);
}
+static int commit_tree_config(const char *var, const char *value, void *cb)
+{
+ int status = git_gpg_config(var, value, NULL);
+ if (status)
+ return status;
+ return git_default_config(var, value, cb);
+}
+
int cmd_commit_tree(int argc, const char **argv, const char *prefix)
{
int i, got_tree = 0;
unsigned char tree_sha1[20];
unsigned char commit_sha1[20];
struct strbuf buffer = STRBUF_INIT;
+ const char *sign_commit = NULL;
- git_config(git_default_config, NULL);
+ git_config(commit_tree_config, NULL);
if (argc < 2 || !strcmp(argv[1], "-h"))
usage(commit_tree_usage);
+ if (get_sha1(argv[1], tree_sha1))
+ die("Not a valid object name %s", argv[1]);
+
for (i = 1; i < argc; i++) {
const char *arg = argv[i];
if (!strcmp(arg, "-p")) {
continue;
}
+ if (!memcmp(arg, "-S", 2)) {
+ sign_commit = arg + 2;
+ continue;
+ }
+
if (!strcmp(arg, "-m")) {
if (argc <= ++i)
usage(commit_tree_usage);
die_errno("git commit-tree: failed to read");
}
- if (commit_tree(buffer.buf, tree_sha1, parents, commit_sha1, NULL)) {
+ if (commit_tree(&buffer, tree_sha1, parents, commit_sha1,
+ NULL, sign_commit)) {
strbuf_release(&buffer);
return 1;
}
#include "unpack-trees.h"
#include "quote.h"
#include "submodule.h"
+#include "gpg-interface.h"
static const char * const builtin_commit_usage[] = {
"git commit [options] [--] <filepattern>...",
static int quiet, verbose, no_verify, allow_empty, dry_run, renew_authorship;
static int no_post_rewrite, allow_empty_message;
static char *untracked_files_arg, *force_date, *ignore_submodule_arg;
+static char *sign_commit;
+
/*
* The default commit message cleanup mode will remove the lines
* beginning with # (shell comments) and leading and trailing
static int use_editor = 1, include_status = 1;
static int show_ignored_in_status;
static const char *only_include_assumed;
-static struct strbuf message;
+static struct strbuf message = STRBUF_INIT;
static int null_termination;
static enum {
OPT_STRING('C', "reuse-message", &use_message, "commit", "reuse message from specified commit"),
OPT_STRING(0, "fixup", &fixup_message, "commit", "use autosquash formatted message to fixup specified commit"),
OPT_STRING(0, "squash", &squash_message, "commit", "use autosquash formatted message to squash specified commit"),
- OPT_BOOLEAN(0, "reset-author", &renew_authorship, "the commit is authored by me now (used with -C-c/--amend)"),
+ OPT_BOOLEAN(0, "reset-author", &renew_authorship, "the commit is authored by me now (used with -C/-c/--amend)"),
OPT_BOOLEAN('s', "signoff", &signoff, "add Signed-off-by:"),
OPT_FILENAME('t', "template", &template_file, "use specified template file"),
OPT_BOOL('e', "edit", &edit_flag, "force edit of commit"),
OPT_STRING(0, "cleanup", &cleanup_arg, "default", "how to strip spaces and #comments from message"),
OPT_BOOLEAN(0, "status", &include_status, "include status in commit message template"),
+ { OPTION_STRING, 'S', "gpg-sign", &sign_commit, "key id",
+ "GPG sign commit", PARSE_OPT_OPTARG, NULL, (intptr_t) "" },
/* end commit message options */
OPT_GROUP("Commit contents options"),
static int git_commit_config(const char *k, const char *v, void *cb)
{
struct wt_status *s = cb;
+ int status;
if (!strcmp(k, "commit.template"))
return git_config_pathname(&template_file, k, v);
return 0;
}
+ status = git_gpg_config(k, v, NULL);
+ if (status)
+ return status;
return git_status_config(k, v, s);
}
exit(1);
}
- if (amend)
- extra = read_commit_extra_headers(current_head);
+ if (amend) {
+ const char *exclude_gpgsig[2] = { "gpgsig", NULL };
+ extra = read_commit_extra_headers(current_head, exclude_gpgsig);
+ } else {
+ struct commit_extra_header **tail = &extra;
+ append_merge_tag_headers(parents, &tail);
+ }
- if (commit_tree_extended(sb.buf, active_cache_tree->sha1, parents, sha1,
- author_ident.buf, extra)) {
+ if (commit_tree_extended(&sb, active_cache_tree->sha1, parents, sha1,
+ author_ident.buf, sign_commit, extra)) {
rollback_index_files();
die(_("failed to write commit object"));
}
ret = git_config_set(argv[0], value);
if (ret == CONFIG_NOTHING_SET)
error("cannot overwrite multiple values with a single value\n"
- " Use a regexp, --add or --set-all to change %s.", argv[0]);
+ " Use a regexp, --add or --replace-all to change %s.", argv[0]);
return ret;
}
else if (actions == ACTION_SET_ALL) {
#include "log-tree.h"
#include "builtin.h"
#include "submodule.h"
+#include "sha1-array.h"
struct blobinfo {
unsigned char sha1[20];
struct object_array_entry *ent,
int ents)
{
- const unsigned char (*parent)[20];
+ struct sha1_array parents = SHA1_ARRAY_INIT;
int i;
if (argc > 1)
if (!revs->dense_combined_merges && !revs->combine_merges)
revs->dense_combined_merges = revs->combine_merges = 1;
- parent = xmalloc(ents * sizeof(*parent));
- for (i = 0; i < ents; i++)
- hashcpy((unsigned char *)(parent + i), ent[i].item->sha1);
- diff_tree_combined(parent[0], parent + 1, ents - 1,
+ for (i = 1; i < ents; i++)
+ sha1_array_append(&parents, ent[i].item->sha1);
+ diff_tree_combined(ent[0].item->sha1, &parents,
revs->dense_combined_merges, revs);
- free((void *)parent);
+ sha1_array_clear(&parents);
return 0;
}
static int progress;
static enum { ABORT, VERBATIM, WARN, STRIP } signed_tag_mode = ABORT;
-static enum { ERROR, DROP, REWRITE } tag_of_filtered_mode = ABORT;
+static enum { ERROR, DROP, REWRITE } tag_of_filtered_mode = ERROR;
static int fake_missing_tagger;
static int use_done_feature;
static int no_data;
const char *arg, int unset)
{
if (unset || !strcmp(arg, "abort"))
- tag_of_filtered_mode = ABORT;
+ tag_of_filtered_mode = ERROR;
else if (!strcmp(arg, "drop"))
tag_of_filtered_mode = DROP;
else if (!strcmp(arg, "rewrite"))
const char *what, *kind;
struct ref *rm;
char *url, *filename = dry_run ? "/dev/null" : git_path("FETCH_HEAD");
+ int want_merge;
fp = fopen(filename, "a");
if (!fp)
goto abort;
}
- for (rm = ref_map; rm; rm = rm->next) {
- struct ref *ref = NULL;
-
- if (rm->peer_ref) {
- ref = xcalloc(1, sizeof(*ref) + strlen(rm->peer_ref->name) + 1);
- strcpy(ref->name, rm->peer_ref->name);
- hashcpy(ref->old_sha1, rm->peer_ref->old_sha1);
- hashcpy(ref->new_sha1, rm->old_sha1);
- ref->force = rm->peer_ref->force;
- }
+ /*
+ * The first pass writes objects to be merged and then the
+ * second pass writes the rest, in order to allow using
+ * FETCH_HEAD as a refname to refer to the ref to be merged.
+ */
+ for (want_merge = 1; 0 <= want_merge; want_merge--) {
+ for (rm = ref_map; rm; rm = rm->next) {
+ struct ref *ref = NULL;
+
+ commit = lookup_commit_reference_gently(rm->old_sha1, 1);
+ if (!commit)
+ rm->merge = 0;
+
+ if (rm->merge != want_merge)
+ continue;
+
+ if (rm->peer_ref) {
+ ref = xcalloc(1, sizeof(*ref) + strlen(rm->peer_ref->name) + 1);
+ strcpy(ref->name, rm->peer_ref->name);
+ hashcpy(ref->old_sha1, rm->peer_ref->old_sha1);
+ hashcpy(ref->new_sha1, rm->old_sha1);
+ ref->force = rm->peer_ref->force;
+ }
- commit = lookup_commit_reference_gently(rm->old_sha1, 1);
- if (!commit)
- rm->merge = 0;
- if (!strcmp(rm->name, "HEAD")) {
- kind = "";
- what = "";
- }
- else if (!prefixcmp(rm->name, "refs/heads/")) {
- kind = "branch";
- what = rm->name + 11;
- }
- else if (!prefixcmp(rm->name, "refs/tags/")) {
- kind = "tag";
- what = rm->name + 10;
- }
- else if (!prefixcmp(rm->name, "refs/remotes/")) {
- kind = "remote-tracking branch";
- what = rm->name + 13;
- }
- else {
- kind = "";
- what = rm->name;
- }
+ if (!strcmp(rm->name, "HEAD")) {
+ kind = "";
+ what = "";
+ }
+ else if (!prefixcmp(rm->name, "refs/heads/")) {
+ kind = "branch";
+ what = rm->name + 11;
+ }
+ else if (!prefixcmp(rm->name, "refs/tags/")) {
+ kind = "tag";
+ what = rm->name + 10;
+ }
+ else if (!prefixcmp(rm->name, "refs/remotes/")) {
+ kind = "remote-tracking branch";
+ what = rm->name + 13;
+ }
+ else {
+ kind = "";
+ what = rm->name;
+ }
- url_len = strlen(url);
- for (i = url_len - 1; url[i] == '/' && 0 <= i; i--)
- ;
- url_len = i + 1;
- if (4 < i && !strncmp(".git", url + i - 3, 4))
- url_len = i - 3;
-
- strbuf_reset(¬e);
- if (*what) {
- if (*kind)
- strbuf_addf(¬e, "%s ", kind);
- strbuf_addf(¬e, "'%s' of ", what);
- }
- fprintf(fp, "%s\t%s\t%s",
- sha1_to_hex(rm->old_sha1),
- rm->merge ? "" : "not-for-merge",
- note.buf);
- for (i = 0; i < url_len; ++i)
- if ('\n' == url[i])
- fputs("\\n", fp);
- else
- fputc(url[i], fp);
- fputc('\n', fp);
-
- strbuf_reset(¬e);
- if (ref) {
- rc |= update_local_ref(ref, what, ¬e);
- free(ref);
- } else
- strbuf_addf(¬e, "* %-*s %-*s -> FETCH_HEAD",
- TRANSPORT_SUMMARY_WIDTH,
- *kind ? kind : "branch",
- REFCOL_WIDTH,
- *what ? what : "HEAD");
- if (note.len) {
- if (verbosity >= 0 && !shown_url) {
- fprintf(stderr, _("From %.*s\n"),
- url_len, url);
- shown_url = 1;
+ url_len = strlen(url);
+ for (i = url_len - 1; url[i] == '/' && 0 <= i; i--)
+ ;
+ url_len = i + 1;
+ if (4 < i && !strncmp(".git", url + i - 3, 4))
+ url_len = i - 3;
+
+ strbuf_reset(¬e);
+ if (*what) {
+ if (*kind)
+ strbuf_addf(¬e, "%s ", kind);
+ strbuf_addf(¬e, "'%s' of ", what);
+ }
+ fprintf(fp, "%s\t%s\t%s",
+ sha1_to_hex(rm->old_sha1),
+ rm->merge ? "" : "not-for-merge",
+ note.buf);
+ for (i = 0; i < url_len; ++i)
+ if ('\n' == url[i])
+ fputs("\\n", fp);
+ else
+ fputc(url[i], fp);
+ fputc('\n', fp);
+
+ strbuf_reset(¬e);
+ if (ref) {
+ rc |= update_local_ref(ref, what, ¬e);
+ free(ref);
+ } else
+ strbuf_addf(¬e, "* %-*s %-*s -> FETCH_HEAD",
+ TRANSPORT_SUMMARY_WIDTH,
+ *kind ? kind : "branch",
+ REFCOL_WIDTH,
+ *what ? what : "HEAD");
+ if (note.len) {
+ if (verbosity >= 0 && !shown_url) {
+ fprintf(stderr, _("From %.*s\n"),
+ url_len, url);
+ shown_url = 1;
+ }
+ if (verbosity >= 0)
+ fprintf(stderr, " %s\n", note.buf);
}
- if (verbosity >= 0)
- fprintf(stderr, " %s\n", note.buf);
}
}
for_each_ref(add_existing, &existing_refs);
for (ref = transport_get_remote_refs(transport); ref; ref = ref->next) {
- if (prefixcmp(ref->name, "refs/tags"))
+ if (prefixcmp(ref->name, "refs/tags/"))
continue;
/*
#include "grep.h"
#include "quote.h"
#include "dir.h"
-#include "thread-utils.h"
static char const * const grep_usage[] = {
"git grep [options] [-e] <pattern> [<rev>...] [[--] <path>...]",
pthread_mutex_init(&grep_mutex, NULL);
pthread_mutex_init(&read_sha1_mutex, NULL);
+ pthread_mutex_init(&grep_attr_mutex, NULL);
pthread_cond_init(&cond_add, NULL);
pthread_cond_init(&cond_write, NULL);
pthread_cond_init(&cond_result, NULL);
pthread_mutex_destroy(&grep_mutex);
pthread_mutex_destroy(&read_sha1_mutex);
+ pthread_mutex_destroy(&grep_attr_mutex);
pthread_cond_destroy(&cond_add);
pthread_cond_destroy(&cond_write);
pthread_cond_destroy(&cond_result);
if (!opt.fixed && opt.ignore_case)
opt.regflags |= REG_ICASE;
-#ifndef NO_PTHREADS
- if (online_cpus() == 1 || !grep_threads_ok(&opt))
- use_threads = 0;
-
- if (use_threads) {
- if (opt.pre_context || opt.post_context || opt.file_break ||
- opt.funcbody)
- skip_first_line = 1;
- start_threads(&opt);
- }
-#else
- use_threads = 0;
-#endif
-
compile_grep_patterns(&opt);
/* Check revs and then paths */
break;
}
+#ifndef NO_PTHREADS
+ if (list.nr || cached || online_cpus() == 1)
+ use_threads = 0;
+#else
+ use_threads = 0;
+#endif
+
+ opt.use_threads = use_threads;
+
+#ifndef NO_PTHREADS
+ if (use_threads) {
+ if (!(opt.name_only || opt.unmatch_name_only || opt.count)
+ && (opt.pre_context || opt.post_context ||
+ opt.file_break || opt.funcbody))
+ skip_first_line = 1;
+ start_threads(&opt);
+ }
+#endif
+
/* The rest are paths */
if (!seen_dashdash) {
int j;
struct object_entry *obj;
void *data;
unsigned long size;
+ int ref_first, ref_last;
+ int ofs_first, ofs_last;
};
/*
if (from_stdin) {
input_fd = 0;
if (!pack_name) {
- static char tmpfile[PATH_MAX];
- output_fd = odb_mkstemp(tmpfile, sizeof(tmpfile),
+ static char tmp_file[PATH_MAX];
+ output_fd = odb_mkstemp(tmp_file, sizeof(tmp_file),
"pack/tmp_pack_XXXXXX");
- pack_name = xstrdup(tmpfile);
+ pack_name = xstrdup(tmp_file);
} else
output_fd = open(pack_name, O_CREAT|O_EXCL|O_RDWR, 0600);
if (output_fd < 0)
die("pack has bad object at offset %lu: %s", offset, buf);
}
+static struct base_data *alloc_base_data(void)
+{
+ struct base_data *base = xmalloc(sizeof(struct base_data));
+ memset(base, 0, sizeof(*base));
+ base->ref_last = -1;
+ base->ofs_last = -1;
+ return base;
+}
+
static void free_base_data(struct base_data *c)
{
if (c->data) {
return (type == OBJ_REF_DELTA || type == OBJ_OFS_DELTA);
}
+/*
+ * This function is part of find_unresolved_deltas(). There are two
+ * walkers going in the opposite ways.
+ *
+ * The first one in find_unresolved_deltas() traverses down from
+ * parent node to children, deflating nodes along the way. However,
+ * memory for deflated nodes is limited by delta_base_cache_limit, so
+ * at some point parent node's deflated content may be freed.
+ *
+ * The second walker is this function, which goes from current node up
+ * to top parent if necessary to deflate the node. In normal
+ * situation, its parent node would be already deflated, so it just
+ * needs to apply delta.
+ *
+ * In the worst case scenario, parent node is no longer deflated because
+ * we're running out of delta_base_cache_limit; we need to re-deflate
+ * parents, possibly up to the top base.
+ *
+ * All deflated objects here are subject to be freed if we exceed
+ * delta_base_cache_limit, just like in find_unresolved_deltas(), we
+ * just need to make sure the last node is not freed.
+ */
static void *get_base_data(struct base_data *c)
{
if (!c->data) {
struct object_entry *obj = c->obj;
+ struct base_data **delta = NULL;
+ int delta_nr = 0, delta_alloc = 0;
- if (is_delta_type(obj->type)) {
- void *base = get_base_data(c->base);
- void *raw = get_data_from_pack(obj);
+ while (is_delta_type(c->obj->type) && !c->data) {
+ ALLOC_GROW(delta, delta_nr + 1, delta_alloc);
+ delta[delta_nr++] = c;
+ c = c->base;
+ }
+ if (!delta_nr) {
+ c->data = get_data_from_pack(obj);
+ c->size = obj->size;
+ base_cache_used += c->size;
+ prune_base_data(c);
+ }
+ for (; delta_nr > 0; delta_nr--) {
+ void *base, *raw;
+ c = delta[delta_nr - 1];
+ obj = c->obj;
+ base = get_base_data(c->base);
+ raw = get_data_from_pack(obj);
c->data = patch_delta(
base, c->base->size,
raw, obj->size,
free(raw);
if (!c->data)
bad_object(obj->idx.offset, "failed to apply delta");
- } else {
- c->data = get_data_from_pack(obj);
- c->size = obj->size;
+ base_cache_used += c->size;
+ prune_base_data(c);
}
-
- base_cache_used += c->size;
- prune_base_data(c);
+ free(delta);
}
return c->data;
}
nr_resolved_deltas++;
}
-static void find_unresolved_deltas(struct base_data *base,
- struct base_data *prev_base)
+static struct base_data *find_unresolved_deltas_1(struct base_data *base,
+ struct base_data *prev_base)
{
- int i, ref_first, ref_last, ofs_first, ofs_last;
-
- /*
- * This is a recursive function. Those brackets should help reducing
- * stack usage by limiting the scope of the delta_base union.
- */
- {
+ if (base->ref_last == -1 && base->ofs_last == -1) {
union delta_base base_spec;
hashcpy(base_spec.sha1, base->obj->idx.sha1);
find_delta_children(&base_spec,
- &ref_first, &ref_last, OBJ_REF_DELTA);
+ &base->ref_first, &base->ref_last, OBJ_REF_DELTA);
memset(&base_spec, 0, sizeof(base_spec));
base_spec.offset = base->obj->idx.offset;
find_delta_children(&base_spec,
- &ofs_first, &ofs_last, OBJ_OFS_DELTA);
- }
+ &base->ofs_first, &base->ofs_last, OBJ_OFS_DELTA);
- if (ref_last == -1 && ofs_last == -1) {
- free(base->data);
- return;
- }
+ if (base->ref_last == -1 && base->ofs_last == -1) {
+ free(base->data);
+ return NULL;
+ }
- link_base_data(prev_base, base);
+ link_base_data(prev_base, base);
+ }
- for (i = ref_first; i <= ref_last; i++) {
- struct object_entry *child = objects + deltas[i].obj_no;
- struct base_data result;
+ if (base->ref_first <= base->ref_last) {
+ struct object_entry *child = objects + deltas[base->ref_first].obj_no;
+ struct base_data *result = alloc_base_data();
assert(child->real_type == OBJ_REF_DELTA);
- resolve_delta(child, base, &result);
- if (i == ref_last && ofs_last == -1)
+ resolve_delta(child, base, result);
+ if (base->ref_first == base->ref_last && base->ofs_last == -1)
free_base_data(base);
- find_unresolved_deltas(&result, base);
+
+ base->ref_first++;
+ return result;
}
- for (i = ofs_first; i <= ofs_last; i++) {
- struct object_entry *child = objects + deltas[i].obj_no;
- struct base_data result;
+ if (base->ofs_first <= base->ofs_last) {
+ struct object_entry *child = objects + deltas[base->ofs_first].obj_no;
+ struct base_data *result = alloc_base_data();
assert(child->real_type == OBJ_OFS_DELTA);
- resolve_delta(child, base, &result);
- if (i == ofs_last)
+ resolve_delta(child, base, result);
+ if (base->ofs_first == base->ofs_last)
free_base_data(base);
- find_unresolved_deltas(&result, base);
+
+ base->ofs_first++;
+ return result;
}
unlink_base_data(base);
+ return NULL;
+}
+
+static void find_unresolved_deltas(struct base_data *base)
+{
+ struct base_data *new_base, *prev_base = NULL;
+ for (;;) {
+ new_base = find_unresolved_deltas_1(base, prev_base);
+
+ if (new_base) {
+ prev_base = base;
+ base = new_base;
+ } else {
+ free(base);
+ base = prev_base;
+ if (!base)
+ return;
+ prev_base = base->base;
+ }
+ }
}
static int compare_delta_entry(const void *a, const void *b)
progress = start_progress("Resolving deltas", nr_deltas);
for (i = 0; i < nr_objects; i++) {
struct object_entry *obj = &objects[i];
- struct base_data base_obj;
+ struct base_data *base_obj = alloc_base_data();
if (is_delta_type(obj->type))
continue;
- base_obj.obj = obj;
- base_obj.data = NULL;
- find_unresolved_deltas(&base_obj, NULL);
+ base_obj->obj = obj;
+ base_obj->data = NULL;
+ find_unresolved_deltas(base_obj);
display_progress(progress, nr_resolved_deltas);
}
}
for (i = 0; i < n; i++) {
struct delta_entry *d = sorted_by_pos[i];
enum object_type type;
- struct base_data base_obj;
+ struct base_data *base_obj = alloc_base_data();
if (objects[d->obj_no].real_type != OBJ_REF_DELTA)
continue;
- base_obj.data = read_sha1_file(d->base.sha1, &type, &base_obj.size);
- if (!base_obj.data)
+ base_obj->data = read_sha1_file(d->base.sha1, &type, &base_obj->size);
+ if (!base_obj->data)
continue;
- if (check_sha1_signature(d->base.sha1, base_obj.data,
- base_obj.size, typename(type)))
+ if (check_sha1_signature(d->base.sha1, base_obj->data,
+ base_obj->size, typename(type)))
die("local object %s is corrupt", sha1_to_hex(d->base.sha1));
- base_obj.obj = append_obj_to_pack(f, d->base.sha1,
- base_obj.data, base_obj.size, type);
- find_unresolved_deltas(&base_obj, NULL);
+ base_obj->obj = append_obj_to_pack(f, d->base.sha1,
+ base_obj->data, base_obj->size, type);
+ find_unresolved_deltas(base_obj);
display_progress(progress, nr_resolved_deltas);
}
free(sorted_by_pos);
else if (S_ISDIR(st.st_mode))
src = git_link;
else
- die(_("unable to handle file type %d"), st.st_mode);
+ die(_("unable to handle file type %d"), (int)st.st_mode);
if (rename(src, git_dir))
die_errno(_("unable to move %s to %s"), src, git_dir);
static void cmd_log_init_defaults(struct rev_info *rev)
{
- rev->abbrev = DEFAULT_ABBREV;
- rev->commit_format = CMIT_FMT_DEFAULT;
if (fmt_pretty)
get_commit_format(fmt_pretty, rev);
rev->verbose_header = 1;
(7 <= remove &&
memmem(subject->buf + at, remove, "PATCH", 5)))
strbuf_remove(subject, at, remove);
- else
+ else {
at += remove;
+ /*
+ * If the input had a space after the ], keep
+ * it. We don't bother with finding the end of
+ * the space, since we later normalize it
+ * anyway.
+ */
+ if (isspace(subject->buf[at]))
+ at += 1;
+ }
continue;
}
break;
#include "resolve-undo.h"
#include "remote.h"
#include "fmt-merge-msg.h"
+#include "gpg-interface.h"
#define DEFAULT_TWOHEAD (1<<0)
#define DEFAULT_OCTOPUS (1<<1)
static int fast_forward_only, option_edit;
static int allow_trivial = 1, have_message;
static int overwrite_ignore = 1;
-static struct strbuf merge_msg;
+static struct strbuf merge_msg = STRBUF_INIT;
static struct commit_list *remoteheads;
static struct strategy **use_strategies;
static size_t use_strategies_nr, use_strategies_alloc;
static int abort_current_merge;
static int show_progress = -1;
static int default_to_upstream;
+static const char *sign_commit;
static struct strategy all_strategy[] = {
{ "recursive", DEFAULT_TWOHEAD | NO_TRIVIAL },
OPT_BOOLEAN(0, "abort", &abort_current_merge,
"abort the current in-progress merge"),
OPT_SET_INT(0, "progress", &show_progress, "force progress reporting", 1),
+ { OPTION_STRING, 'S', "gpg-sign", &sign_commit, "key id",
+ "GPG sign commit", PARSE_OPT_OPTARG, NULL, (intptr_t) "" },
OPT_BOOLEAN(0, "overwrite-ignore", &overwrite_ignore, "update ignored files (default)"),
OPT_END()
};
default_to_upstream = git_config_bool(k, v);
return 0;
}
+
status = fmt_merge_msg_config(k, v, cb);
+ if (status)
+ return status;
+ status = git_gpg_config(k, v, NULL);
if (status)
return status;
return git_diff_ui_config(k, v, cb);
parent->next->item = remoteheads->item;
parent->next->next = NULL;
prepare_to_commit();
- commit_tree(merge_msg.buf, result_tree, parent, result_commit, NULL);
+ if (commit_tree(&merge_msg, result_tree, parent, result_commit, NULL,
+ sign_commit))
+ die(_("failed to write commit object"));
finish(head, result_commit, "In-index merge");
drop_save();
return 0;
strbuf_addch(&merge_msg, '\n');
prepare_to_commit();
free_commit_list(remoteheads);
- commit_tree(merge_msg.buf, result_tree, parents, result_commit, NULL);
+ if (commit_tree(&merge_msg, result_tree, parents, result_commit,
+ NULL, sign_commit))
+ die(_("failed to write commit object"));
strbuf_addf(&buf, "Merge made by the '%s' strategy.", wt_strategy);
finish(head, result_commit, buf.buf);
strbuf_release(&buf);
return; /* don't have to commit an unchanged tree */
/* Prepare commit message and reflog message */
- strbuf_addstr(&buf, "notes: "); /* commit message starts at index 7 */
strbuf_addstr(&buf, msg);
if (buf.buf[buf.len - 1] != '\n')
strbuf_addch(&buf, '\n'); /* Make sure msg ends with newline */
- create_notes_commit(t, NULL, buf.buf + 7, commit_sha1);
+ create_notes_commit(t, NULL, &buf, commit_sha1);
+ strbuf_insert(&buf, 0, "notes: ", 7); /* commit message starts at index 7 */
update_ref(buf.buf, t->ref, commit_sha1, NULL, 0, DIE_ON_ERR);
strbuf_release(&buf);
return -1;
/*
- * We do not bother to try a delta that we discarded
- * on an earlier try, but only when reusing delta data.
+ * We do not bother to try a delta that we discarded on an
+ * earlier try, but only when reusing delta data. Note that
+ * src_entry that is marked as the preferred_base should always
+ * be considered, as even if we produce a suboptimal delta against
+ * it, we will still save the transfer cost, as we already know
+ * the other side has it and we won't send src_entry at all.
*/
if (reuse_delta && trg_entry->in_pack &&
trg_entry->in_pack == src_entry->in_pack &&
+ !src_entry->preferred_base &&
trg_entry->in_pack_type != OBJ_REF_DELTA &&
trg_entry->in_pack_type != OBJ_OFS_DELTA)
return 0;
static int unpack_limit = 100;
static int report_status;
static int use_sideband;
+static int quiet;
static int prefer_ofs_delta = 1;
static int auto_update_server_info;
static int auto_gc = 1;
return git_default_config(var, value, cb);
}
-static int show_ref(const char *path, const unsigned char *sha1, int flag, void *cb_data)
+static void show_ref(const char *path, const unsigned char *sha1)
{
if (sent_capabilities)
packet_write(1, "%s %s\n", sha1_to_hex(sha1), path);
else
packet_write(1, "%s %s%c%s%s\n",
sha1_to_hex(sha1), path, 0,
- " report-status delete-refs side-band-64k",
+ " report-status delete-refs side-band-64k quiet",
prefer_ofs_delta ? " ofs-delta" : "");
sent_capabilities = 1;
- return 0;
}
-static int show_ref_cb(const char *path, const unsigned char *sha1, int flag, void *cb_data)
+static int show_ref_cb(const char *path, const unsigned char *sha1, int flag, void *unused)
{
path = strip_namespace(path);
/*
*/
if (!path)
path = ".have";
- return show_ref(path, sha1, flag, cb_data);
+ show_ref(path, sha1);
+ return 0;
+}
+
+static void show_one_alternate_sha1(const unsigned char sha1[20], void *unused)
+{
+ show_ref(".have", sha1);
+}
+
+static void collect_one_alternate_ref(const struct ref *ref, void *data)
+{
+ struct sha1_array *sa = data;
+ sha1_array_append(sa, ref->old_sha1);
}
static void write_head_info(void)
{
+ struct sha1_array sa = SHA1_ARRAY_INIT;
+ for_each_alternate_ref(collect_one_alternate_ref, &sa);
+ sha1_array_for_each_unique(&sa, show_one_alternate_sha1, NULL);
+ sha1_array_clear(&sa);
for_each_ref(show_ref_cb, NULL);
if (!sent_capabilities)
- show_ref("capabilities^{}", null_sha1, 0, NULL);
+ show_ref("capabilities^{}", null_sha1);
+ /* EOF */
+ packet_flush(1);
}
struct command {
refname = line + 82;
reflen = strlen(refname);
if (reflen + 82 < len) {
- if (strstr(refname + reflen + 1, "report-status"))
+ const char *feature_list = refname + reflen + 1;
+ if (parse_feature_request(feature_list, "report-status"))
report_status = 1;
- if (strstr(refname + reflen + 1, "side-band-64k"))
+ if (parse_feature_request(feature_list, "side-band-64k"))
use_sideband = LARGE_PACKET_MAX;
+ if (parse_feature_request(feature_list, "quiet"))
+ quiet = 1;
}
cmd = xcalloc(1, sizeof(struct command) + len - 80);
hashcpy(cmd->old_sha1, old_sha1);
if (ntohl(hdr.hdr_entries) < unpack_limit) {
int code, i = 0;
- const char *unpacker[4];
+ const char *unpacker[5];
unpacker[i++] = "unpack-objects";
+ if (quiet)
+ unpacker[i++] = "-q";
if (fsck_objects)
unpacker[i++] = "--strict";
unpacker[i++] = hdr_arg;
return 1;
}
-static void add_one_alternate_sha1(const unsigned char sha1[20], void *unused)
-{
- add_extra_ref(".have", sha1, 0);
-}
-
-static void collect_one_alternate_ref(const struct ref *ref, void *data)
-{
- struct sha1_array *sa = data;
- sha1_array_append(sa, ref->old_sha1);
-}
-
-static void add_alternate_refs(void)
-{
- struct sha1_array sa = SHA1_ARRAY_INIT;
- for_each_alternate_ref(collect_one_alternate_ref, &sa);
- sha1_array_for_each_unique(&sa, add_one_alternate_sha1, NULL);
- sha1_array_clear(&sa);
-}
-
int cmd_receive_pack(int argc, const char **argv, const char *prefix)
{
int advertise_refs = 0;
const char *arg = *argv++;
if (*arg == '-') {
+ if (!strcmp(arg, "--quiet")) {
+ quiet = 1;
+ continue;
+ }
+
if (!strcmp(arg, "--advertise-refs")) {
advertise_refs = 1;
continue;
unpack_limit = receive_unpack_limit;
if (advertise_refs || !stateless_rpc) {
- add_alternate_refs();
write_head_info();
- clear_extra_refs();
-
- /* EOF */
- packet_flush(1);
}
if (advertise_refs)
return 0;
}
/* don't delete non-remote-tracking refs */
- if (prefixcmp(refname, "refs/remotes")) {
+ if (prefixcmp(refname, "refs/remotes/")) {
/* advise user how to delete local branches */
if (!prefixcmp(refname, "refs/heads/"))
string_list_append(branches->skipped,
#include "cache.h"
#include "builtin.h"
-#include "object.h"
-#include "commit.h"
-#include "tag.h"
-#include "run-command.h"
-#include "exec_cmd.h"
-#include "utf8.h"
#include "parse-options.h"
-#include "cache-tree.h"
#include "diff.h"
#include "revision.h"
#include "rerere.h"
-#include "merge-recursive.h"
-#include "refs.h"
#include "dir.h"
#include "sequencer.h"
NULL
};
-enum replay_action { REVERT, CHERRY_PICK };
-enum replay_subcommand {
- REPLAY_NONE,
- REPLAY_REMOVE_STATE,
- REPLAY_CONTINUE,
- REPLAY_ROLLBACK
-};
-
-struct replay_opts {
- enum replay_action action;
- enum replay_subcommand subcommand;
-
- /* Boolean options */
- int edit;
- int record_origin;
- int no_commit;
- int signoff;
- int allow_ff;
- int allow_rerere_auto;
-
- int mainline;
-
- /* Merge strategy */
- const char *strategy;
- const char **xopts;
- size_t xopts_nr, xopts_alloc;
-
- /* Only used by REPLAY_NONE */
- struct rev_info *revs;
-};
-
-#define GIT_REFLOG_ACTION "GIT_REFLOG_ACTION"
-
static const char *action_name(const struct replay_opts *opts)
{
- return opts->action == REVERT ? "revert" : "cherry-pick";
+ return opts->action == REPLAY_REVERT ? "revert" : "cherry-pick";
}
-static char *get_encoding(const char *message);
-
static const char * const *revert_or_cherry_pick_usage(struct replay_opts *opts)
{
- return opts->action == REVERT ? revert_usage : cherry_pick_usage;
+ return opts->action == REPLAY_REVERT ? revert_usage : cherry_pick_usage;
}
static int option_parse_x(const struct option *opt,
OPT_END(),
};
- if (opts->action == CHERRY_PICK) {
+ if (opts->action == REPLAY_PICK) {
struct option cp_extra[] = {
OPT_BOOLEAN('x', NULL, &opts->record_origin, "append commit name"),
OPT_BOOLEAN(0, "ff", &opts->allow_ff, "allow fast-forward"),
usage_with_options(usage_str, options);
}
-struct commit_message {
- char *parent_label;
- const char *label;
- const char *subject;
- char *reencoded_message;
- const char *message;
-};
-
-static int get_message(struct commit *commit, struct commit_message *out)
-{
- const char *encoding;
- const char *abbrev, *subject;
- int abbrev_len, subject_len;
- char *q;
-
- if (!commit->buffer)
- return -1;
- encoding = get_encoding(commit->buffer);
- if (!encoding)
- encoding = "UTF-8";
- if (!git_commit_encoding)
- git_commit_encoding = "UTF-8";
-
- out->reencoded_message = NULL;
- out->message = commit->buffer;
- if (strcmp(encoding, git_commit_encoding))
- out->reencoded_message = reencode_string(commit->buffer,
- git_commit_encoding, encoding);
- if (out->reencoded_message)
- out->message = out->reencoded_message;
-
- abbrev = find_unique_abbrev(commit->object.sha1, DEFAULT_ABBREV);
- abbrev_len = strlen(abbrev);
-
- subject_len = find_commit_subject(out->message, &subject);
-
- out->parent_label = xmalloc(strlen("parent of ") + abbrev_len +
- strlen("... ") + subject_len + 1);
- q = out->parent_label;
- q = mempcpy(q, "parent of ", strlen("parent of "));
- out->label = q;
- q = mempcpy(q, abbrev, abbrev_len);
- q = mempcpy(q, "... ", strlen("... "));
- out->subject = q;
- q = mempcpy(q, subject, subject_len);
- *q = '\0';
- return 0;
-}
-
-static void free_message(struct commit_message *msg)
-{
- free(msg->parent_label);
- free(msg->reencoded_message);
-}
-
-static char *get_encoding(const char *message)
-{
- const char *p = message, *eol;
-
- while (*p && *p != '\n') {
- for (eol = p + 1; *eol && *eol != '\n'; eol++)
- ; /* do nothing */
- if (!prefixcmp(p, "encoding ")) {
- char *result = xmalloc(eol - 8 - p);
- strlcpy(result, p + 9, eol - 8 - p);
- return result;
- }
- p = eol;
- if (*p == '\n')
- p++;
- }
- return NULL;
-}
-
-static void write_cherry_pick_head(struct commit *commit, const char *pseudoref)
-{
- const char *filename;
- int fd;
- struct strbuf buf = STRBUF_INIT;
-
- strbuf_addf(&buf, "%s\n", sha1_to_hex(commit->object.sha1));
-
- filename = git_path("%s", pseudoref);
- fd = open(filename, O_WRONLY | O_CREAT, 0666);
- if (fd < 0)
- die_errno(_("Could not open '%s' for writing"), filename);
- if (write_in_full(fd, buf.buf, buf.len) != buf.len || close(fd))
- die_errno(_("Could not write to '%s'"), filename);
- strbuf_release(&buf);
-}
-
-static void print_advice(int show_hint)
-{
- char *msg = getenv("GIT_CHERRY_PICK_HELP");
-
- if (msg) {
- fprintf(stderr, "%s\n", msg);
- /*
- * A conflict has occured but the porcelain
- * (typically rebase --interactive) wants to take care
- * of the commit itself so remove CHERRY_PICK_HEAD
- */
- unlink(git_path("CHERRY_PICK_HEAD"));
- return;
- }
-
- if (show_hint) {
- advise("after resolving the conflicts, mark the corrected paths");
- advise("with 'git add <paths>' or 'git rm <paths>'");
- advise("and commit the result with 'git commit'");
- }
-}
-
-static void write_message(struct strbuf *msgbuf, const char *filename)
-{
- static struct lock_file msg_file;
-
- int msg_fd = hold_lock_file_for_update(&msg_file, filename,
- LOCK_DIE_ON_ERROR);
- if (write_in_full(msg_fd, msgbuf->buf, msgbuf->len) < 0)
- die_errno(_("Could not write to %s"), filename);
- strbuf_release(msgbuf);
- if (commit_lock_file(&msg_file) < 0)
- die(_("Error wrapping up %s"), filename);
-}
-
-static struct tree *empty_tree(void)
-{
- return lookup_tree((const unsigned char *)EMPTY_TREE_SHA1_BIN);
-}
-
-static int error_dirty_index(struct replay_opts *opts)
-{
- if (read_cache_unmerged())
- return error_resolve_conflict(action_name(opts));
-
- /* Different translation strings for cherry-pick and revert */
- if (opts->action == CHERRY_PICK)
- error(_("Your local changes would be overwritten by cherry-pick."));
- else
- error(_("Your local changes would be overwritten by revert."));
-
- if (advice_commit_before_merge)
- advise(_("Commit your changes or stash them to proceed."));
- return -1;
-}
-
-static int fast_forward_to(const unsigned char *to, const unsigned char *from)
-{
- struct ref_lock *ref_lock;
-
- read_cache();
- if (checkout_fast_forward(from, to))
- exit(1); /* the callee should have complained already */
- ref_lock = lock_any_ref_for_update("HEAD", from, 0);
- return write_ref_sha1(ref_lock, to, "cherry-pick");
-}
-
-static int do_recursive_merge(struct commit *base, struct commit *next,
- const char *base_label, const char *next_label,
- unsigned char *head, struct strbuf *msgbuf,
- struct replay_opts *opts)
-{
- struct merge_options o;
- struct tree *result, *next_tree, *base_tree, *head_tree;
- int clean, index_fd;
- const char **xopt;
- static struct lock_file index_lock;
-
- index_fd = hold_locked_index(&index_lock, 1);
-
- read_cache();
-
- init_merge_options(&o);
- o.ancestor = base ? base_label : "(empty tree)";
- o.branch1 = "HEAD";
- o.branch2 = next ? next_label : "(empty tree)";
-
- head_tree = parse_tree_indirect(head);
- next_tree = next ? next->tree : empty_tree();
- base_tree = base ? base->tree : empty_tree();
-
- for (xopt = opts->xopts; xopt != opts->xopts + opts->xopts_nr; xopt++)
- parse_merge_opt(&o, *xopt);
-
- clean = merge_trees(&o,
- head_tree,
- next_tree, base_tree, &result);
-
- if (active_cache_changed &&
- (write_cache(index_fd, active_cache, active_nr) ||
- commit_locked_index(&index_lock)))
- /* TRANSLATORS: %s will be "revert" or "cherry-pick" */
- die(_("%s: Unable to write new index file"), action_name(opts));
- rollback_lock_file(&index_lock);
-
- if (!clean) {
- int i;
- strbuf_addstr(msgbuf, "\nConflicts:\n\n");
- for (i = 0; i < active_nr;) {
- struct cache_entry *ce = active_cache[i++];
- if (ce_stage(ce)) {
- strbuf_addch(msgbuf, '\t');
- strbuf_addstr(msgbuf, ce->name);
- strbuf_addch(msgbuf, '\n');
- while (i < active_nr && !strcmp(ce->name,
- active_cache[i]->name))
- i++;
- }
- }
- }
-
- return !clean;
-}
-
-/*
- * If we are cherry-pick, and if the merge did not result in
- * hand-editing, we will hit this commit and inherit the original
- * author date and name.
- * If we are revert, or if our cherry-pick results in a hand merge,
- * we had better say that the current user is responsible for that.
- */
-static int run_git_commit(const char *defmsg, struct replay_opts *opts)
-{
- /* 6 is max possible length of our args array including NULL */
- const char *args[6];
- int i = 0;
-
- args[i++] = "commit";
- args[i++] = "-n";
- if (opts->signoff)
- args[i++] = "-s";
- if (!opts->edit) {
- args[i++] = "-F";
- args[i++] = defmsg;
- }
- args[i] = NULL;
-
- return run_command_v_opt(args, RUN_GIT_CMD);
-}
-
-static int do_pick_commit(struct commit *commit, struct replay_opts *opts)
-{
- unsigned char head[20];
- struct commit *base, *next, *parent;
- const char *base_label, *next_label;
- struct commit_message msg = { NULL, NULL, NULL, NULL, NULL };
- char *defmsg = NULL;
- struct strbuf msgbuf = STRBUF_INIT;
- int res;
-
- if (opts->no_commit) {
- /*
- * We do not intend to commit immediately. We just want to
- * merge the differences in, so let's compute the tree
- * that represents the "current" state for merge-recursive
- * to work on.
- */
- if (write_cache_as_tree(head, 0, NULL))
- die (_("Your index file is unmerged."));
- } else {
- if (get_sha1("HEAD", head))
- return error(_("You do not have a valid HEAD"));
- if (index_differs_from("HEAD", 0))
- return error_dirty_index(opts);
- }
- discard_cache();
-
- if (!commit->parents) {
- parent = NULL;
- }
- else if (commit->parents->next) {
- /* Reverting or cherry-picking a merge commit */
- int cnt;
- struct commit_list *p;
-
- if (!opts->mainline)
- return error(_("Commit %s is a merge but no -m option was given."),
- sha1_to_hex(commit->object.sha1));
-
- for (cnt = 1, p = commit->parents;
- cnt != opts->mainline && p;
- cnt++)
- p = p->next;
- if (cnt != opts->mainline || !p)
- return error(_("Commit %s does not have parent %d"),
- sha1_to_hex(commit->object.sha1), opts->mainline);
- parent = p->item;
- } else if (0 < opts->mainline)
- return error(_("Mainline was specified but commit %s is not a merge."),
- sha1_to_hex(commit->object.sha1));
- else
- parent = commit->parents->item;
-
- if (opts->allow_ff && parent && !hashcmp(parent->object.sha1, head))
- return fast_forward_to(commit->object.sha1, head);
-
- if (parent && parse_commit(parent) < 0)
- /* TRANSLATORS: The first %s will be "revert" or
- "cherry-pick", the second %s a SHA1 */
- return error(_("%s: cannot parse parent commit %s"),
- action_name(opts), sha1_to_hex(parent->object.sha1));
-
- if (get_message(commit, &msg) != 0)
- return error(_("Cannot get commit message for %s"),
- sha1_to_hex(commit->object.sha1));
-
- /*
- * "commit" is an existing commit. We would want to apply
- * the difference it introduces since its first parent "prev"
- * on top of the current HEAD if we are cherry-pick. Or the
- * reverse of it if we are revert.
- */
-
- defmsg = git_pathdup("MERGE_MSG");
-
- if (opts->action == REVERT) {
- base = commit;
- base_label = msg.label;
- next = parent;
- next_label = msg.parent_label;
- strbuf_addstr(&msgbuf, "Revert \"");
- strbuf_addstr(&msgbuf, msg.subject);
- strbuf_addstr(&msgbuf, "\"\n\nThis reverts commit ");
- strbuf_addstr(&msgbuf, sha1_to_hex(commit->object.sha1));
-
- if (commit->parents && commit->parents->next) {
- strbuf_addstr(&msgbuf, ", reversing\nchanges made to ");
- strbuf_addstr(&msgbuf, sha1_to_hex(parent->object.sha1));
- }
- strbuf_addstr(&msgbuf, ".\n");
- } else {
- const char *p;
-
- base = parent;
- base_label = msg.parent_label;
- next = commit;
- next_label = msg.label;
-
- /*
- * Append the commit log message to msgbuf; it starts
- * after the tree, parent, author, committer
- * information followed by "\n\n".
- */
- p = strstr(msg.message, "\n\n");
- if (p) {
- p += 2;
- strbuf_addstr(&msgbuf, p);
- }
-
- if (opts->record_origin) {
- strbuf_addstr(&msgbuf, "(cherry picked from commit ");
- strbuf_addstr(&msgbuf, sha1_to_hex(commit->object.sha1));
- strbuf_addstr(&msgbuf, ")\n");
- }
- }
-
- if (!opts->strategy || !strcmp(opts->strategy, "recursive") || opts->action == REVERT) {
- res = do_recursive_merge(base, next, base_label, next_label,
- head, &msgbuf, opts);
- write_message(&msgbuf, defmsg);
- } else {
- struct commit_list *common = NULL;
- struct commit_list *remotes = NULL;
-
- write_message(&msgbuf, defmsg);
-
- commit_list_insert(base, &common);
- commit_list_insert(next, &remotes);
- res = try_merge_command(opts->strategy, opts->xopts_nr, opts->xopts,
- common, sha1_to_hex(head), remotes);
- free_commit_list(common);
- free_commit_list(remotes);
- }
-
- /*
- * If the merge was clean or if it failed due to conflict, we write
- * CHERRY_PICK_HEAD for the subsequent invocation of commit to use.
- * However, if the merge did not even start, then we don't want to
- * write it at all.
- */
- if (opts->action == CHERRY_PICK && !opts->no_commit && (res == 0 || res == 1))
- write_cherry_pick_head(commit, "CHERRY_PICK_HEAD");
- if (opts->action == REVERT && ((opts->no_commit && res == 0) || res == 1))
- write_cherry_pick_head(commit, "REVERT_HEAD");
-
- if (res) {
- error(opts->action == REVERT
- ? _("could not revert %s... %s")
- : _("could not apply %s... %s"),
- find_unique_abbrev(commit->object.sha1, DEFAULT_ABBREV),
- msg.subject);
- print_advice(res == 1);
- rerere(opts->allow_rerere_auto);
- } else {
- if (!opts->no_commit)
- res = run_git_commit(defmsg, opts);
- }
-
- free_message(&msg);
- free(defmsg);
-
- return res;
-}
-
-static void prepare_revs(struct replay_opts *opts)
-{
- if (opts->action != REVERT)
- opts->revs->reverse ^= 1;
-
- if (prepare_revision_walk(opts->revs))
- die(_("revision walk setup failed"));
-
- if (!opts->revs->commits)
- die(_("empty commit set passed"));
-}
-
-static void read_and_refresh_cache(struct replay_opts *opts)
-{
- static struct lock_file index_lock;
- int index_fd = hold_locked_index(&index_lock, 0);
- if (read_index_preload(&the_index, NULL) < 0)
- die(_("git %s: failed to read the index"), action_name(opts));
- refresh_index(&the_index, REFRESH_QUIET|REFRESH_UNMERGED, NULL, NULL, NULL);
- if (the_index.cache_changed) {
- if (write_index(&the_index, index_fd) ||
- commit_locked_index(&index_lock))
- die(_("git %s: failed to refresh the index"), action_name(opts));
- }
- rollback_lock_file(&index_lock);
-}
-
-/*
- * Append a commit to the end of the commit_list.
- *
- * next starts by pointing to the variable that holds the head of an
- * empty commit_list, and is updated to point to the "next" field of
- * the last item on the list as new commits are appended.
- *
- * Usage example:
- *
- * struct commit_list *list;
- * struct commit_list **next = &list;
- *
- * next = commit_list_append(c1, next);
- * next = commit_list_append(c2, next);
- * assert(commit_list_count(list) == 2);
- * return list;
- */
-static struct commit_list **commit_list_append(struct commit *commit,
- struct commit_list **next)
-{
- struct commit_list *new = xmalloc(sizeof(struct commit_list));
- new->item = commit;
- *next = new;
- new->next = NULL;
- return &new->next;
-}
-
-static int format_todo(struct strbuf *buf, struct commit_list *todo_list,
- struct replay_opts *opts)
-{
- struct commit_list *cur = NULL;
- struct commit_message msg = { NULL, NULL, NULL, NULL, NULL };
- const char *sha1_abbrev = NULL;
- const char *action_str = opts->action == REVERT ? "revert" : "pick";
-
- for (cur = todo_list; cur; cur = cur->next) {
- sha1_abbrev = find_unique_abbrev(cur->item->object.sha1, DEFAULT_ABBREV);
- if (get_message(cur->item, &msg))
- return error(_("Cannot get commit message for %s"), sha1_abbrev);
- strbuf_addf(buf, "%s %s %s\n", action_str, sha1_abbrev, msg.subject);
- }
- return 0;
-}
-
-static struct commit *parse_insn_line(char *start, struct replay_opts *opts)
-{
- unsigned char commit_sha1[20];
- char sha1_abbrev[40];
- enum replay_action action;
- int insn_len = 0;
- char *p, *q;
-
- if (!prefixcmp(start, "pick ")) {
- action = CHERRY_PICK;
- insn_len = strlen("pick");
- p = start + insn_len + 1;
- } else if (!prefixcmp(start, "revert ")) {
- action = REVERT;
- insn_len = strlen("revert");
- p = start + insn_len + 1;
- } else
- return NULL;
-
- q = strchr(p, ' ');
- if (!q)
- return NULL;
- q++;
-
- strlcpy(sha1_abbrev, p, q - p);
-
- /*
- * Verify that the action matches up with the one in
- * opts; we don't support arbitrary instructions
- */
- if (action != opts->action) {
- const char *action_str;
- action_str = action == REVERT ? "revert" : "cherry-pick";
- error(_("Cannot %s during a %s"), action_str, action_name(opts));
- return NULL;
- }
-
- if (get_sha1(sha1_abbrev, commit_sha1) < 0)
- return NULL;
-
- return lookup_commit_reference(commit_sha1);
-}
-
-static int parse_insn_buffer(char *buf, struct commit_list **todo_list,
- struct replay_opts *opts)
-{
- struct commit_list **next = todo_list;
- struct commit *commit;
- char *p = buf;
- int i;
-
- for (i = 1; *p; i++) {
- commit = parse_insn_line(p, opts);
- if (!commit)
- return error(_("Could not parse line %d."), i);
- next = commit_list_append(commit, next);
- p = strchrnul(p, '\n');
- if (*p)
- p++;
- }
- if (!*todo_list)
- return error(_("No commits parsed."));
- return 0;
-}
-
-static void read_populate_todo(struct commit_list **todo_list,
- struct replay_opts *opts)
-{
- const char *todo_file = git_path(SEQ_TODO_FILE);
- struct strbuf buf = STRBUF_INIT;
- int fd, res;
-
- fd = open(todo_file, O_RDONLY);
- if (fd < 0)
- die_errno(_("Could not open %s"), todo_file);
- if (strbuf_read(&buf, fd, 0) < 0) {
- close(fd);
- strbuf_release(&buf);
- die(_("Could not read %s."), todo_file);
- }
- close(fd);
-
- res = parse_insn_buffer(buf.buf, todo_list, opts);
- strbuf_release(&buf);
- if (res)
- die(_("Unusable instruction sheet: %s"), todo_file);
-}
-
-static int populate_opts_cb(const char *key, const char *value, void *data)
-{
- struct replay_opts *opts = data;
- int error_flag = 1;
-
- if (!value)
- error_flag = 0;
- else if (!strcmp(key, "options.no-commit"))
- opts->no_commit = git_config_bool_or_int(key, value, &error_flag);
- else if (!strcmp(key, "options.edit"))
- opts->edit = git_config_bool_or_int(key, value, &error_flag);
- else if (!strcmp(key, "options.signoff"))
- opts->signoff = git_config_bool_or_int(key, value, &error_flag);
- else if (!strcmp(key, "options.record-origin"))
- opts->record_origin = git_config_bool_or_int(key, value, &error_flag);
- else if (!strcmp(key, "options.allow-ff"))
- opts->allow_ff = git_config_bool_or_int(key, value, &error_flag);
- else if (!strcmp(key, "options.mainline"))
- opts->mainline = git_config_int(key, value);
- else if (!strcmp(key, "options.strategy"))
- git_config_string(&opts->strategy, key, value);
- else if (!strcmp(key, "options.strategy-option")) {
- ALLOC_GROW(opts->xopts, opts->xopts_nr + 1, opts->xopts_alloc);
- opts->xopts[opts->xopts_nr++] = xstrdup(value);
- } else
- return error(_("Invalid key: %s"), key);
-
- if (!error_flag)
- return error(_("Invalid value for %s: %s"), key, value);
-
- return 0;
-}
-
-static void read_populate_opts(struct replay_opts **opts_ptr)
-{
- const char *opts_file = git_path(SEQ_OPTS_FILE);
-
- if (!file_exists(opts_file))
- return;
- if (git_config_from_file(populate_opts_cb, opts_file, *opts_ptr) < 0)
- die(_("Malformed options sheet: %s"), opts_file);
-}
-
-static void walk_revs_populate_todo(struct commit_list **todo_list,
- struct replay_opts *opts)
-{
- struct commit *commit;
- struct commit_list **next;
-
- prepare_revs(opts);
-
- next = todo_list;
- while ((commit = get_revision(opts->revs)))
- next = commit_list_append(commit, next);
-}
-
-static int create_seq_dir(void)
-{
- const char *seq_dir = git_path(SEQ_DIR);
-
- if (file_exists(seq_dir)) {
- error(_("a cherry-pick or revert is already in progress"));
- advise(_("try \"git cherry-pick (--continue | --quit | --abort)\""));
- return -1;
- }
- else if (mkdir(seq_dir, 0777) < 0)
- die_errno(_("Could not create sequencer directory %s"), seq_dir);
- return 0;
-}
-
-static void save_head(const char *head)
-{
- const char *head_file = git_path(SEQ_HEAD_FILE);
- static struct lock_file head_lock;
- struct strbuf buf = STRBUF_INIT;
- int fd;
-
- fd = hold_lock_file_for_update(&head_lock, head_file, LOCK_DIE_ON_ERROR);
- strbuf_addf(&buf, "%s\n", head);
- if (write_in_full(fd, buf.buf, buf.len) < 0)
- die_errno(_("Could not write to %s"), head_file);
- if (commit_lock_file(&head_lock) < 0)
- die(_("Error wrapping up %s."), head_file);
-}
-
-static int reset_for_rollback(const unsigned char *sha1)
-{
- const char *argv[4]; /* reset --merge <arg> + NULL */
- argv[0] = "reset";
- argv[1] = "--merge";
- argv[2] = sha1_to_hex(sha1);
- argv[3] = NULL;
- return run_command_v_opt(argv, RUN_GIT_CMD);
-}
-
-static int rollback_single_pick(void)
-{
- unsigned char head_sha1[20];
-
- if (!file_exists(git_path("CHERRY_PICK_HEAD")) &&
- !file_exists(git_path("REVERT_HEAD")))
- return error(_("no cherry-pick or revert in progress"));
- if (read_ref_full("HEAD", head_sha1, 0, NULL))
- return error(_("cannot resolve HEAD"));
- if (is_null_sha1(head_sha1))
- return error(_("cannot abort from a branch yet to be born"));
- return reset_for_rollback(head_sha1);
-}
-
-static int sequencer_rollback(struct replay_opts *opts)
-{
- const char *filename;
- FILE *f;
- unsigned char sha1[20];
- struct strbuf buf = STRBUF_INIT;
-
- filename = git_path(SEQ_HEAD_FILE);
- f = fopen(filename, "r");
- if (!f && errno == ENOENT) {
- /*
- * There is no multiple-cherry-pick in progress.
- * If CHERRY_PICK_HEAD or REVERT_HEAD indicates
- * a single-cherry-pick in progress, abort that.
- */
- return rollback_single_pick();
- }
- if (!f)
- return error(_("cannot open %s: %s"), filename,
- strerror(errno));
- if (strbuf_getline(&buf, f, '\n')) {
- error(_("cannot read %s: %s"), filename, ferror(f) ?
- strerror(errno) : _("unexpected end of file"));
- fclose(f);
- goto fail;
- }
- fclose(f);
- if (get_sha1_hex(buf.buf, sha1) || buf.buf[40] != '\0') {
- error(_("stored pre-cherry-pick HEAD file '%s' is corrupt"),
- filename);
- goto fail;
- }
- if (reset_for_rollback(sha1))
- goto fail;
- remove_sequencer_state();
- strbuf_release(&buf);
- return 0;
-fail:
- strbuf_release(&buf);
- return -1;
-}
-
-static void save_todo(struct commit_list *todo_list, struct replay_opts *opts)
-{
- const char *todo_file = git_path(SEQ_TODO_FILE);
- static struct lock_file todo_lock;
- struct strbuf buf = STRBUF_INIT;
- int fd;
-
- fd = hold_lock_file_for_update(&todo_lock, todo_file, LOCK_DIE_ON_ERROR);
- if (format_todo(&buf, todo_list, opts) < 0)
- die(_("Could not format %s."), todo_file);
- if (write_in_full(fd, buf.buf, buf.len) < 0) {
- strbuf_release(&buf);
- die_errno(_("Could not write to %s"), todo_file);
- }
- if (commit_lock_file(&todo_lock) < 0) {
- strbuf_release(&buf);
- die(_("Error wrapping up %s."), todo_file);
- }
- strbuf_release(&buf);
-}
-
-static void save_opts(struct replay_opts *opts)
-{
- const char *opts_file = git_path(SEQ_OPTS_FILE);
-
- if (opts->no_commit)
- git_config_set_in_file(opts_file, "options.no-commit", "true");
- if (opts->edit)
- git_config_set_in_file(opts_file, "options.edit", "true");
- if (opts->signoff)
- git_config_set_in_file(opts_file, "options.signoff", "true");
- if (opts->record_origin)
- git_config_set_in_file(opts_file, "options.record-origin", "true");
- if (opts->allow_ff)
- git_config_set_in_file(opts_file, "options.allow-ff", "true");
- if (opts->mainline) {
- struct strbuf buf = STRBUF_INIT;
- strbuf_addf(&buf, "%d", opts->mainline);
- git_config_set_in_file(opts_file, "options.mainline", buf.buf);
- strbuf_release(&buf);
- }
- if (opts->strategy)
- git_config_set_in_file(opts_file, "options.strategy", opts->strategy);
- if (opts->xopts) {
- int i;
- for (i = 0; i < opts->xopts_nr; i++)
- git_config_set_multivar_in_file(opts_file,
- "options.strategy-option",
- opts->xopts[i], "^$", 0);
- }
-}
-
-static int pick_commits(struct commit_list *todo_list, struct replay_opts *opts)
-{
- struct commit_list *cur;
- int res;
-
- setenv(GIT_REFLOG_ACTION, action_name(opts), 0);
- if (opts->allow_ff)
- assert(!(opts->signoff || opts->no_commit ||
- opts->record_origin || opts->edit));
- read_and_refresh_cache(opts);
-
- for (cur = todo_list; cur; cur = cur->next) {
- save_todo(cur, opts);
- res = do_pick_commit(cur->item, opts);
- if (res)
- return res;
- }
-
- /*
- * Sequence of picks finished successfully; cleanup by
- * removing the .git/sequencer directory
- */
- remove_sequencer_state();
- return 0;
-}
-
-static int continue_single_pick(void)
-{
- const char *argv[] = { "commit", NULL };
-
- if (!file_exists(git_path("CHERRY_PICK_HEAD")) &&
- !file_exists(git_path("REVERT_HEAD")))
- return error(_("no cherry-pick or revert in progress"));
- return run_command_v_opt(argv, RUN_GIT_CMD);
-}
-
-static int sequencer_continue(struct replay_opts *opts)
-{
- struct commit_list *todo_list = NULL;
-
- if (!file_exists(git_path(SEQ_TODO_FILE)))
- return continue_single_pick();
- read_populate_opts(&opts);
- read_populate_todo(&todo_list, opts);
-
- /* Verify that the conflict has been resolved */
- if (file_exists(git_path("CHERRY_PICK_HEAD")) ||
- file_exists(git_path("REVERT_HEAD"))) {
- int ret = continue_single_pick();
- if (ret)
- return ret;
- }
- if (index_differs_from("HEAD", 0))
- return error_dirty_index(opts);
- todo_list = todo_list->next;
- return pick_commits(todo_list, opts);
-}
-
-static int single_pick(struct commit *cmit, struct replay_opts *opts)
-{
- setenv(GIT_REFLOG_ACTION, action_name(opts), 0);
- return do_pick_commit(cmit, opts);
-}
-
-static int pick_revisions(struct replay_opts *opts)
-{
- struct commit_list *todo_list = NULL;
- unsigned char sha1[20];
-
- if (opts->subcommand == REPLAY_NONE)
- assert(opts->revs);
-
- read_and_refresh_cache(opts);
-
- /*
- * Decide what to do depending on the arguments; a fresh
- * cherry-pick should be handled differently from an existing
- * one that is being continued
- */
- if (opts->subcommand == REPLAY_REMOVE_STATE) {
- remove_sequencer_state();
- return 0;
- }
- if (opts->subcommand == REPLAY_ROLLBACK)
- return sequencer_rollback(opts);
- if (opts->subcommand == REPLAY_CONTINUE)
- return sequencer_continue(opts);
-
- /*
- * If we were called as "git cherry-pick <commit>", just
- * cherry-pick/revert it, set CHERRY_PICK_HEAD /
- * REVERT_HEAD, and don't touch the sequencer state.
- * This means it is possible to cherry-pick in the middle
- * of a cherry-pick sequence.
- */
- if (opts->revs->cmdline.nr == 1 &&
- opts->revs->cmdline.rev->whence == REV_CMD_REV &&
- opts->revs->no_walk &&
- !opts->revs->cmdline.rev->flags) {
- struct commit *cmit;
- if (prepare_revision_walk(opts->revs))
- die(_("revision walk setup failed"));
- cmit = get_revision(opts->revs);
- if (!cmit || get_revision(opts->revs))
- die("BUG: expected exactly one commit from walk");
- return single_pick(cmit, opts);
- }
-
- /*
- * Start a new cherry-pick/ revert sequence; but
- * first, make sure that an existing one isn't in
- * progress
- */
-
- walk_revs_populate_todo(&todo_list, opts);
- if (create_seq_dir() < 0)
- return -1;
- if (get_sha1("HEAD", sha1)) {
- if (opts->action == REVERT)
- return error(_("Can't revert as initial commit"));
- return error(_("Can't cherry-pick into empty head"));
- }
- save_head(sha1_to_hex(sha1));
- save_opts(opts);
- return pick_commits(todo_list, opts);
-}
-
int cmd_revert(int argc, const char **argv, const char *prefix)
{
struct replay_opts opts;
memset(&opts, 0, sizeof(opts));
if (isatty(0))
opts.edit = 1;
- opts.action = REVERT;
+ opts.action = REPLAY_REVERT;
git_config(git_default_config, NULL);
parse_args(argc, argv, &opts);
- res = pick_revisions(&opts);
+ res = sequencer_pick_revisions(&opts);
if (res < 0)
die(_("revert failed"));
return res;
int res;
memset(&opts, 0, sizeof(opts));
- opts.action = CHERRY_PICK;
+ opts.action = REPLAY_PICK;
git_config(git_default_config, NULL);
parse_args(argc, argv, &opts);
- res = pick_revisions(&opts);
+ res = sequencer_pick_revisions(&opts);
if (res < 0)
die(_("cherry-pick failed"));
return res;
args->use_ofs_delta = 1;
if (server_supports("side-band-64k"))
use_sideband = 1;
+ if (!server_supports("quiet"))
+ args->quiet = 0;
if (!remote_refs) {
fprintf(stderr, "No refs in common and none specified; doing nothing.\n"
char *old_hex = sha1_to_hex(ref->old_sha1);
char *new_hex = sha1_to_hex(ref->new_sha1);
- if (!cmds_sent && (status_report || use_sideband)) {
- packet_buf_write(&req_buf, "%s %s %s%c%s%s",
+ if (!cmds_sent && (status_report || use_sideband || args->quiet)) {
+ packet_buf_write(&req_buf, "%s %s %s%c%s%s%s",
old_hex, new_hex, ref->name, 0,
status_report ? " report-status" : "",
- use_sideband ? " side-band-64k" : "");
+ use_sideband ? " side-band-64k" : "",
+ args->quiet ? " quiet" : "");
}
else
packet_buf_write(&req_buf, "%s %s %s",
args.force_update = 1;
continue;
}
+ if (!strcmp(arg, "--quiet")) {
+ args.quiet = 1;
+ continue;
+ }
if (!strcmp(arg, "--verbose")) {
args.verbose = 1;
continue;
while (1) {
char ch;
ssize_t len = xread(fd, &ch, 1);
- if (len < 0)
- return -1;
+ if (len <= 0)
+ return len;
strbuf_addch(sb, ch);
if (ch == '\n')
break;
extern int get_sha1_hex(const char *hex, unsigned char *sha1);
extern char *sha1_to_hex(const unsigned char *sha1); /* static buffer result! */
-extern int read_ref_full(const char *filename, unsigned char *sha1,
+extern int read_ref_full(const char *refname, unsigned char *sha1,
int reading, int *flags);
-extern int read_ref(const char *filename, unsigned char *sha1);
+extern int read_ref(const char *refname, unsigned char *sha1);
/*
* Resolve a reference, recursively following symbolic refererences.
extern struct ref *find_ref_by_name(const struct ref *list, const char *name);
#define CONNECT_VERBOSE (1u << 0)
-extern char *git_getpass(const char *prompt);
extern struct child_process *git_connect(int fd[2], const char *url, const char *prog, int flags);
extern int finish_connect(struct child_process *conn);
extern int git_connection_is_socket(struct child_process *conn);
};
extern struct ref **get_remote_heads(int in, struct ref **list, unsigned int flags, struct extra_have_objects *);
extern int server_supports(const char *feature);
+extern const char *parse_feature_request(const char *features, const char *feature);
extern struct packed_git *parse_pack_index(unsigned char *sha1, const char *idx_path);
#include "log-tree.h"
#include "refs.h"
#include "userdiff.h"
+#include "sha1-array.h"
static struct combine_diff_path *intersect_paths(struct combine_diff_path *curr, int n, int num_parent)
{
}
void diff_tree_combined(const unsigned char *sha1,
- const unsigned char parent[][20],
- int num_parent,
+ const struct sha1_array *parents,
int dense,
struct rev_info *rev)
{
struct diff_options *opt = &rev->diffopt;
struct diff_options diffopts;
struct combine_diff_path *p, *paths = NULL;
- int i, num_paths, needsep, show_log_first;
+ int i, num_paths, needsep, show_log_first, num_parent = parents->nr;
diffopts = *opt;
diffopts.output_format = DIFF_FORMAT_NO_OUTPUT;
diffopts.output_format = stat_opt;
else
diffopts.output_format = DIFF_FORMAT_NO_OUTPUT;
- diff_tree_sha1(parent[i], sha1, "", &diffopts);
+ diff_tree_sha1(parents->sha1[i], sha1, "", &diffopts);
diffcore_std(&diffopts);
paths = intersect_paths(paths, i, num_parent);
}
}
-void diff_tree_combined_merge(const unsigned char *sha1,
- int dense, struct rev_info *rev)
+void diff_tree_combined_merge(const struct commit *commit, int dense,
+ struct rev_info *rev)
{
- int num_parent;
- const unsigned char (*parent)[20];
- struct commit *commit = lookup_commit(sha1);
- struct commit_list *parents;
-
- /* count parents */
- for (parents = commit->parents, num_parent = 0;
- parents;
- parents = parents->next, num_parent++)
- ; /* nothing */
-
- parent = xmalloc(num_parent * sizeof(*parent));
- for (parents = commit->parents, num_parent = 0;
- parents;
- parents = parents->next, num_parent++)
- hashcpy((unsigned char *)(parent + num_parent),
- parents->item->object.sha1);
- diff_tree_combined(sha1, parent, num_parent, dense, rev);
+ struct commit_list *parent = commit->parents;
+ struct sha1_array parents = SHA1_ARRAY_INIT;
+
+ while (parent) {
+ sha1_array_append(&parents, parent->item->object.sha1);
+ parent = parent->next;
+ }
+ diff_tree_combined(commit->object.sha1, &parents, dense, rev);
+ sha1_array_clear(&parents);
}
#include "diff.h"
#include "revision.h"
#include "notes.h"
+#include "gpg-interface.h"
int save_commit_buffer = 1;
return ret;
}
-void clear_commit_marks(struct commit *commit, unsigned int mark)
+static void clear_commit_marks_1(struct commit_list **plist,
+ struct commit *commit, unsigned int mark)
{
while (commit) {
struct commit_list *parents;
return;
while ((parents = parents->next))
- clear_commit_marks(parents->item, mark);
+ commit_list_insert(parents->item, plist);
commit = commit->parents->item;
}
}
+void clear_commit_marks(struct commit *commit, unsigned int mark)
+{
+ struct commit_list *list = NULL;
+ commit_list_insert(commit, &list);
+ while (list)
+ clear_commit_marks_1(&list, pop_commit(&list), mark);
+}
+
void clear_commit_marks_for_object_array(struct object_array *a, unsigned mark)
{
struct object *object;
return result;
}
+static const char gpg_sig_header[] = "gpgsig";
+static const int gpg_sig_header_len = sizeof(gpg_sig_header) - 1;
+
+static int do_sign_commit(struct strbuf *buf, const char *keyid)
+{
+ struct strbuf sig = STRBUF_INIT;
+ int inspos, copypos;
+
+ /* find the end of the header */
+ inspos = strstr(buf->buf, "\n\n") - buf->buf + 1;
+
+ if (!keyid || !*keyid)
+ keyid = get_signing_key();
+ if (sign_buffer(buf, &sig, keyid)) {
+ strbuf_release(&sig);
+ return -1;
+ }
+
+ for (copypos = 0; sig.buf[copypos]; ) {
+ const char *bol = sig.buf + copypos;
+ const char *eol = strchrnul(bol, '\n');
+ int len = (eol - bol) + !!*eol;
+
+ if (!copypos) {
+ strbuf_insert(buf, inspos, gpg_sig_header, gpg_sig_header_len);
+ inspos += gpg_sig_header_len;
+ }
+ strbuf_insert(buf, inspos++, " ", 1);
+ strbuf_insert(buf, inspos, bol, len);
+ inspos += len;
+ copypos += len;
+ }
+ strbuf_release(&sig);
+ return 0;
+}
+
+int parse_signed_commit(const unsigned char *sha1,
+ struct strbuf *payload, struct strbuf *signature)
+{
+ unsigned long size;
+ enum object_type type;
+ char *buffer = read_sha1_file(sha1, &type, &size);
+ int in_signature, saw_signature = -1;
+ char *line, *tail;
+
+ if (!buffer || type != OBJ_COMMIT)
+ goto cleanup;
+
+ line = buffer;
+ tail = buffer + size;
+ in_signature = 0;
+ saw_signature = 0;
+ while (line < tail) {
+ const char *sig = NULL;
+ char *next = memchr(line, '\n', tail - line);
+
+ next = next ? next + 1 : tail;
+ if (in_signature && line[0] == ' ')
+ sig = line + 1;
+ else if (!prefixcmp(line, gpg_sig_header) &&
+ line[gpg_sig_header_len] == ' ')
+ sig = line + gpg_sig_header_len + 1;
+ if (sig) {
+ strbuf_add(signature, sig, next - sig);
+ saw_signature = 1;
+ in_signature = 1;
+ } else {
+ if (*line == '\n')
+ /* dump the whole remainder of the buffer */
+ next = tail;
+ strbuf_add(payload, line, next - line);
+ in_signature = 0;
+ }
+ line = next;
+ }
+ cleanup:
+ free(buffer);
+ return saw_signature;
+}
+
static void handle_signed_tag(struct commit *parent, struct commit_extra_header ***tail)
{
struct merge_remote_desc *desc;
strbuf_addch(buffer, '\n');
}
-struct commit_extra_header *read_commit_extra_headers(struct commit *commit)
+struct commit_extra_header *read_commit_extra_headers(struct commit *commit,
+ const char **exclude)
{
struct commit_extra_header *extra = NULL;
unsigned long size;
enum object_type type;
char *buffer = read_sha1_file(commit->object.sha1, &type, &size);
if (buffer && type == OBJ_COMMIT)
- extra = read_commit_extra_header_lines(buffer, size);
+ extra = read_commit_extra_header_lines(buffer, size, exclude);
free(buffer);
return extra;
}
(len == 8 && !memcmp(field, "encoding ", 9)));
}
-struct commit_extra_header *read_commit_extra_header_lines(const char *buffer, size_t size)
+static int excluded_header_field(const char *field, size_t len, const char **exclude)
+{
+ if (!exclude)
+ return 0;
+
+ while (*exclude) {
+ size_t xlen = strlen(*exclude);
+ if (len == xlen &&
+ !memcmp(field, *exclude, xlen) && field[xlen] == ' ')
+ return 1;
+ exclude++;
+ }
+ return 0;
+}
+
+struct commit_extra_header *read_commit_extra_header_lines(const char *buffer, size_t size,
+ const char **exclude)
{
struct commit_extra_header *extra = NULL, **tail = &extra, *it = NULL;
const char *line, *next, *eof, *eob;
if (next <= eof)
eof = next;
- if (standard_header_field(line, eof - line))
+ if (standard_header_field(line, eof - line) ||
+ excluded_header_field(line, eof - line, exclude))
continue;
it = xcalloc(1, sizeof(*it));
}
}
-int commit_tree(const char *msg, unsigned char *tree,
+int commit_tree(const struct strbuf *msg, unsigned char *tree,
struct commit_list *parents, unsigned char *ret,
- const char *author)
+ const char *author, const char *sign_commit)
{
struct commit_extra_header *extra = NULL, **tail = &extra;
int result;
append_merge_tag_headers(parents, &tail);
- result = commit_tree_extended(msg, tree, parents, ret, author, extra);
+ result = commit_tree_extended(msg, tree, parents, ret,
+ author, sign_commit, extra);
free_commit_extra_headers(extra);
return result;
}
"You may want to amend it after fixing the message, or set the config\n"
"variable i18n.commitencoding to the encoding your project uses.\n";
-int commit_tree_extended(const char *msg, unsigned char *tree,
+int commit_tree_extended(const struct strbuf *msg, unsigned char *tree,
struct commit_list *parents, unsigned char *ret,
- const char *author, struct commit_extra_header *extra)
+ const char *author, const char *sign_commit,
+ struct commit_extra_header *extra)
{
int result;
int encoding_is_utf8;
assert_sha1_type(tree, OBJ_TREE);
+ if (memchr(msg->buf, '\0', msg->len))
+ return error("a NUL byte in commit log message not allowed.");
+
/* Not having i18n.commitencoding is the same as having utf-8 */
encoding_is_utf8 = is_encoding_utf8(git_commit_encoding);
strbuf_addch(&buffer, '\n');
/* And add the comment */
- strbuf_addstr(&buffer, msg);
+ strbuf_addbuf(&buffer, msg);
/* And check the encoding */
if (encoding_is_utf8 && !is_utf8(buffer.buf))
fprintf(stderr, commit_utf8_warn);
+ if (sign_commit && do_sign_commit(&buffer, sign_commit))
+ return -1;
+
result = write_sha1_file(buffer.buf, buffer.len, commit_type, ret);
strbuf_release(&buffer);
return result;
extern void append_merge_tag_headers(struct commit_list *parents,
struct commit_extra_header ***tail);
-extern int commit_tree(const char *msg, unsigned char *tree,
+extern int commit_tree(const struct strbuf *msg, unsigned char *tree,
struct commit_list *parents, unsigned char *ret,
- const char *author);
+ const char *author, const char *sign_commit);
-extern int commit_tree_extended(const char *msg, unsigned char *tree,
+extern int commit_tree_extended(const struct strbuf *msg, unsigned char *tree,
struct commit_list *parents, unsigned char *ret,
- const char *author,
+ const char *author, const char *sign_commit,
struct commit_extra_header *);
-extern struct commit_extra_header *read_commit_extra_headers(struct commit *);
-extern struct commit_extra_header *read_commit_extra_header_lines(const char *buf, size_t len);
+extern struct commit_extra_header *read_commit_extra_headers(struct commit *, const char **);
+extern struct commit_extra_header *read_commit_extra_header_lines(const char *buf, size_t len, const char **);
extern void free_commit_extra_headers(struct commit_extra_header *extra);
*/
struct commit *get_merge_parent(const char *name);
+extern int parse_signed_commit(const unsigned char *sha1,
+ struct strbuf *message, struct strbuf *signature);
#endif /* COMMIT_H */
size_t namelen, valuelen;
char *envstr;
- if (!name || !value) return -1;
+ if (!name || strchr(name, '=') || !value) {
+ errno = EINVAL;
+ return -1;
+ }
if (!replace) {
char *oldval = NULL;
oldval = getenv(name);
namelen = strlen(name);
valuelen = strlen(value);
envstr = malloc((namelen + valuelen + 2));
- if (!envstr) return -1;
+ if (!envstr) {
+ errno = ENOMEM;
+ return -1;
+ }
memcpy(envstr, name, namelen);
envstr[namelen] = '=';
--- /dev/null
+#include "git-compat-util.h"
+#include "compat/terminal.h"
+#include "sigchain.h"
+#include "strbuf.h"
+
+#ifdef HAVE_DEV_TTY
+
+static int term_fd = -1;
+static struct termios old_term;
+
+static void restore_term(void)
+{
+ if (term_fd < 0)
+ return;
+
+ tcsetattr(term_fd, TCSAFLUSH, &old_term);
+ term_fd = -1;
+}
+
+static void restore_term_on_signal(int sig)
+{
+ restore_term();
+ sigchain_pop(sig);
+ raise(sig);
+}
+
+char *git_terminal_prompt(const char *prompt, int echo)
+{
+ static struct strbuf buf = STRBUF_INIT;
+ int r;
+ FILE *fh;
+
+ fh = fopen("/dev/tty", "w+");
+ if (!fh)
+ return NULL;
+
+ if (!echo) {
+ struct termios t;
+
+ if (tcgetattr(fileno(fh), &t) < 0) {
+ fclose(fh);
+ return NULL;
+ }
+
+ old_term = t;
+ term_fd = fileno(fh);
+ sigchain_push_common(restore_term_on_signal);
+
+ t.c_lflag &= ~ECHO;
+ if (tcsetattr(fileno(fh), TCSAFLUSH, &t) < 0) {
+ term_fd = -1;
+ fclose(fh);
+ return NULL;
+ }
+ }
+
+ fputs(prompt, fh);
+ fflush(fh);
+
+ r = strbuf_getline(&buf, fh, '\n');
+ if (!echo) {
+ putc('\n', fh);
+ fflush(fh);
+ }
+
+ restore_term();
+ fclose(fh);
+
+ if (r == EOF)
+ return NULL;
+ return buf.buf;
+}
+
+#else
+
+char *git_terminal_prompt(const char *prompt, int echo)
+{
+ return getpass(prompt);
+}
+
+#endif
--- /dev/null
+#ifndef COMPAT_TERMINAL_H
+#define COMPAT_TERMINAL_H
+
+char *git_terminal_prompt(const char *prompt, int echo);
+
+#endif /* COMPAT_TERMINAL_H */
int server_supports(const char *feature)
{
- return server_capabilities &&
- strstr(server_capabilities, feature) != NULL;
+ return !!parse_feature_request(server_capabilities, feature);
+}
+
+const char *parse_feature_request(const char *feature_list, const char *feature)
+{
+ int len;
+
+ if (!feature_list)
+ return NULL;
+
+ len = strlen(feature);
+ while (*feature_list) {
+ const char *found = strstr(feature_list, feature);
+ if (!found)
+ return NULL;
+ if ((feature_list == found || isspace(found[-1])) &&
+ (!found[len] || isspace(found[len]) || found[len] == '='))
+ return found;
+ feature_list = found + 1;
+ }
+ return NULL;
}
enum protocol {
free(conn);
return code;
}
-
-char *git_getpass(const char *prompt)
-{
- const char *askpass;
- struct child_process pass;
- const char *args[3];
- static struct strbuf buffer = STRBUF_INIT;
-
- askpass = getenv("GIT_ASKPASS");
- if (!askpass)
- askpass = askpass_program;
- if (!askpass)
- askpass = getenv("SSH_ASKPASS");
- if (!askpass || !(*askpass)) {
- char *result = getpass(prompt);
- if (!result)
- die_errno("Could not read password");
- return result;
- }
-
- args[0] = askpass;
- args[1] = prompt;
- args[2] = NULL;
-
- memset(&pass, 0, sizeof(pass));
- pass.argv = args;
- pass.out = -1;
-
- if (start_command(&pass))
- exit(1);
-
- strbuf_reset(&buffer);
- if (strbuf_read(&buffer, pass.out, 20) < 0)
- die("failed to read password from %s\n", askpass);
-
- close(pass.out);
-
- if (finish_command(&pass))
- exit(1);
-
- strbuf_setlen(&buffer, strcspn(buffer.buf, "\r\n"));
-
- return buffer.buf;
-}
# get some config options from git-config
local output="$(git config -z --get-regexp '^(svn-remote\..*\.url|bash\.showupstream)$' 2>/dev/null | tr '\0\n' '\n ')"
- while read key value; do
+ while read -r key value; do
case "$key" in
bash.showupstream)
GIT_PS1_SHOWUPSTREAM="$value"
local ref entry
git --git-dir="$dir" for-each-ref --shell --format="ref=%(refname:short)" \
"refs/remotes/" | \
- while read entry; do
+ while read -r entry; do
eval "$entry"
ref="${ref#*/}"
if [[ "$ref" == "$cur"* ]]; then
case "$cur" in
refs|refs/*)
git ls-remote "$dir" "$cur*" 2>/dev/null | \
- while read hash i; do
+ while read -r hash i; do
case "$i" in
*^{}) ;;
*) echo "$i" ;;
;;
*)
git ls-remote "$dir" HEAD ORIG_HEAD 'refs/tags/*' 'refs/heads/*' 'refs/remotes/*' 2>/dev/null | \
- while read hash i; do
+ while read -r hash i; do
case "$i" in
*^{}) ;;
refs/*) echo "${i#refs/*/}" ;;
{
local i hash
git ls-remote "$1" 'refs/heads/*' 2>/dev/null | \
- while read hash i; do
+ while read -r hash i; do
echo "$i:refs/remotes/$1/${i#refs/heads/}"
done
}
__git_merge_options="
--no-commit --no-stat --log --no-log --squash --strategy
- --commit --stat --no-squash --ff --no-ff --ff-only
+ --commit --stat --no-squash --ff --no-ff --ff-only --edit --no-edit
"
_git_merge ()
done
git --git-dir="$(__gitdir)" config $config_file --list 2>/dev/null |
- while read line
+ while read -r line
do
case "$line" in
*.*=*)
# workaround zsh's bug that leaves 'words' as a special
# variable in versions < 4.3.12
typeset -h words
+
+ # workaround zsh's bug that quotes spaces in the COMPREPLY
+ # array if IFS doesn't contain spaces.
+ typeset -h IFS
fi
local cur words cword prev
# workaround zsh's bug that leaves 'words' as a special
# variable in versions < 4.3.12
typeset -h words
+
+ # workaround zsh's bug that quotes spaces in the COMPREPLY
+ # array if IFS doesn't contain spaces.
+ typeset -h IFS
fi
local cur words cword prev
--- /dev/null
+git-credential-osxkeychain
--- /dev/null
+all:: git-credential-osxkeychain
+
+CC = gcc
+RM = rm -f
+CFLAGS = -g -Wall
+
+git-credential-osxkeychain: git-credential-osxkeychain.o
+ $(CC) -o $@ $< -Wl,-framework -Wl,Security
+
+git-credential-osxkeychain.o: git-credential-osxkeychain.c
+ $(CC) -c $(CFLAGS) $<
+
+clean:
+ $(RM) git-credential-osxkeychain git-credential-osxkeychain.o
--- /dev/null
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <Security/Security.h>
+
+static SecProtocolType protocol;
+static char *host;
+static char *path;
+static char *username;
+static char *password;
+static UInt16 port;
+
+static void die(const char *err, ...)
+{
+ char msg[4096];
+ va_list params;
+ va_start(params, err);
+ vsnprintf(msg, sizeof(msg), err, params);
+ fprintf(stderr, "%s\n", msg);
+ va_end(params);
+ exit(1);
+}
+
+static void *xstrdup(const char *s1)
+{
+ void *ret = strdup(s1);
+ if (!ret)
+ die("Out of memory");
+ return ret;
+}
+
+#define KEYCHAIN_ITEM(x) (x ? strlen(x) : 0), x
+#define KEYCHAIN_ARGS \
+ NULL, /* default keychain */ \
+ KEYCHAIN_ITEM(host), \
+ 0, NULL, /* account domain */ \
+ KEYCHAIN_ITEM(username), \
+ KEYCHAIN_ITEM(path), \
+ port, \
+ protocol, \
+ kSecAuthenticationTypeDefault
+
+static void write_item(const char *what, const char *buf, int len)
+{
+ printf("%s=", what);
+ fwrite(buf, 1, len, stdout);
+ putchar('\n');
+}
+
+static void find_username_in_item(SecKeychainItemRef item)
+{
+ SecKeychainAttributeList list;
+ SecKeychainAttribute attr;
+
+ list.count = 1;
+ list.attr = &attr;
+ attr.tag = kSecAccountItemAttr;
+
+ if (SecKeychainItemCopyContent(item, NULL, &list, NULL, NULL))
+ return;
+
+ write_item("username", attr.data, attr.length);
+ SecKeychainItemFreeContent(&list, NULL);
+}
+
+static void find_internet_password(void)
+{
+ void *buf;
+ UInt32 len;
+ SecKeychainItemRef item;
+
+ if (SecKeychainFindInternetPassword(KEYCHAIN_ARGS, &len, &buf, &item))
+ return;
+
+ write_item("password", buf, len);
+ if (!username)
+ find_username_in_item(item);
+
+ SecKeychainItemFreeContent(NULL, buf);
+}
+
+static void delete_internet_password(void)
+{
+ SecKeychainItemRef item;
+
+ /*
+ * Require at least a protocol and host for removal, which is what git
+ * will give us; if you want to do something more fancy, use the
+ * Keychain manager.
+ */
+ if (!protocol || !host)
+ return;
+
+ if (SecKeychainFindInternetPassword(KEYCHAIN_ARGS, 0, NULL, &item))
+ return;
+
+ SecKeychainItemDelete(item);
+}
+
+static void add_internet_password(void)
+{
+ /* Only store complete credentials */
+ if (!protocol || !host || !username || !password)
+ return;
+
+ if (SecKeychainAddInternetPassword(
+ KEYCHAIN_ARGS,
+ KEYCHAIN_ITEM(password),
+ NULL))
+ return;
+}
+
+static void read_credential(void)
+{
+ char buf[1024];
+
+ while (fgets(buf, sizeof(buf), stdin)) {
+ char *v;
+
+ if (!strcmp(buf, "\n"))
+ break;
+ buf[strlen(buf)-1] = '\0';
+
+ v = strchr(buf, '=');
+ if (!v)
+ die("bad input: %s", buf);
+ *v++ = '\0';
+
+ if (!strcmp(buf, "protocol")) {
+ if (!strcmp(v, "https"))
+ protocol = kSecProtocolTypeHTTPS;
+ else if (!strcmp(v, "http"))
+ protocol = kSecProtocolTypeHTTP;
+ else /* we don't yet handle other protocols */
+ exit(0);
+ }
+ else if (!strcmp(buf, "host")) {
+ char *colon = strchr(v, ':');
+ if (colon) {
+ *colon++ = '\0';
+ port = atoi(colon);
+ }
+ host = xstrdup(v);
+ }
+ else if (!strcmp(buf, "path"))
+ path = xstrdup(v);
+ else if (!strcmp(buf, "username"))
+ username = xstrdup(v);
+ else if (!strcmp(buf, "password"))
+ password = xstrdup(v);
+ }
+}
+
+int main(int argc, const char **argv)
+{
+ const char *usage =
+ "Usage: git credential-osxkeychain <get|store|erase>";
+
+ if (!argv[1])
+ die(usage);
+
+ read_credential();
+
+ if (!strcmp(argv[1], "get"))
+ find_internet_password();
+ else if (!strcmp(argv[1], "store"))
+ add_internet_password();
+ else if (!strcmp(argv[1], "erase"))
+ delete_internet_password();
+ /* otherwise, ignore unknown action */
+
+ return 0;
+}
def parseRevision(ref):
return read_pipe("git rev-parse %s" % ref).strip()
+def branchExists(ref):
+ rev = read_pipe(["git", "rev-parse", "-q", "--verify", ref],
+ ignore_error=True)
+ return len(rev) > 0
+
def extractLogMessageFromGitCommit(commit):
logMessage = ""
class P4UserMap:
def __init__(self):
self.userMapFromPerforceServer = False
+ self.myP4UserId = None
+
+ def p4UserId(self):
+ if self.myP4UserId:
+ return self.myP4UserId
+
+ results = p4CmdList("user -o")
+ for r in results:
+ if r.has_key('User'):
+ self.myP4UserId = r['User']
+ return r['User']
+ die("Could not find your p4 user id")
+
+ def p4UserIsMe(self, p4User):
+ # return True if the given p4 user is actually me
+ me = self.p4UserId()
+ if not p4User or p4User != me:
+ return False
+ else:
+ return True
def getUserCacheFilename(self):
home = os.environ.get("HOME", os.environ.get("USERPROFILE"))
self.verbose = False
self.preserveUser = gitConfig("git-p4.preserveUser").lower() == "true"
self.isWindows = (platform.system() == "Windows")
- self.myP4UserId = None
def check(self):
if len(p4CmdList("opened ...")) > 0:
def canChangeChangelists(self):
# check to see if we have p4 admin or super-user permissions, either of
# which are required to modify changelists.
- results = p4CmdList("protects %s" % self.depotPath)
+ results = p4CmdList(["protects", self.depotPath])
for r in results:
if r.has_key('perm'):
if r['perm'] == 'admin':
return 1
return 0
- def p4UserId(self):
- if self.myP4UserId:
- return self.myP4UserId
-
- results = p4CmdList("user -o")
- for r in results:
- if r.has_key('User'):
- self.myP4UserId = r['User']
- return r['User']
- die("Could not find your p4 user id")
-
- def p4UserIsMe(self, p4User):
- # return True if the given p4 user is actually me
- me = self.p4UserId()
- if not p4User or p4User != me:
- return False
- else:
- return True
-
def prepareSubmitTemplate(self):
# remove lines in the Files section that show changes to files outside the depot path we're committing into
template = ""
die("Detecting current git branch failed!")
elif len(args) == 1:
self.master = args[0]
+ if not branchExists(self.master):
+ die("Branch %s does not exist" % self.master)
else:
return False
return True
+class View(object):
+ """Represent a p4 view ("p4 help views"), and map files in a
+ repo according to the view."""
+
+ class Path(object):
+ """A depot or client path, possibly containing wildcards.
+ The only one supported is ... at the end, currently.
+ Initialize with the full path, with //depot or //client."""
+
+ def __init__(self, path, is_depot):
+ self.path = path
+ self.is_depot = is_depot
+ self.find_wildcards()
+ # remember the prefix bit, useful for relative mappings
+ m = re.match("(//[^/]+/)", self.path)
+ if not m:
+ die("Path %s does not start with //prefix/" % self.path)
+ prefix = m.group(1)
+ if not self.is_depot:
+ # strip //client/ on client paths
+ self.path = self.path[len(prefix):]
+
+ def find_wildcards(self):
+ """Make sure wildcards are valid, and set up internal
+ variables."""
+
+ self.ends_triple_dot = False
+ # There are three wildcards allowed in p4 views
+ # (see "p4 help views"). This code knows how to
+ # handle "..." (only at the end), but cannot deal with
+ # "%%n" or "*". Only check the depot_side, as p4 should
+ # validate that the client_side matches too.
+ if re.search(r'%%[1-9]', self.path):
+ die("Can't handle %%n wildcards in view: %s" % self.path)
+ if self.path.find("*") >= 0:
+ die("Can't handle * wildcards in view: %s" % self.path)
+ triple_dot_index = self.path.find("...")
+ if triple_dot_index >= 0:
+ if triple_dot_index != len(self.path) - 3:
+ die("Can handle only single ... wildcard, at end: %s" %
+ self.path)
+ self.ends_triple_dot = True
+
+ def ensure_compatible(self, other_path):
+ """Make sure the wildcards agree."""
+ if self.ends_triple_dot != other_path.ends_triple_dot:
+ die("Both paths must end with ... if either does;\n" +
+ "paths: %s %s" % (self.path, other_path.path))
+
+ def match_wildcards(self, test_path):
+ """See if this test_path matches us, and fill in the value
+ of the wildcards if so. Returns a tuple of
+ (True|False, wildcards[]). For now, only the ... at end
+ is supported, so at most one wildcard."""
+ if self.ends_triple_dot:
+ dotless = self.path[:-3]
+ if test_path.startswith(dotless):
+ wildcard = test_path[len(dotless):]
+ return (True, [ wildcard ])
+ else:
+ if test_path == self.path:
+ return (True, [])
+ return (False, [])
+
+ def match(self, test_path):
+ """Just return if it matches; don't bother with the wildcards."""
+ b, _ = self.match_wildcards(test_path)
+ return b
+
+ def fill_in_wildcards(self, wildcards):
+ """Return the relative path, with the wildcards filled in
+ if there are any."""
+ if self.ends_triple_dot:
+ return self.path[:-3] + wildcards[0]
+ else:
+ return self.path
+
+ class Mapping(object):
+ def __init__(self, depot_side, client_side, overlay, exclude):
+ # depot_side is without the trailing /... if it had one
+ self.depot_side = View.Path(depot_side, is_depot=True)
+ self.client_side = View.Path(client_side, is_depot=False)
+ self.overlay = overlay # started with "+"
+ self.exclude = exclude # started with "-"
+ assert not (self.overlay and self.exclude)
+ self.depot_side.ensure_compatible(self.client_side)
+
+ def __str__(self):
+ c = " "
+ if self.overlay:
+ c = "+"
+ if self.exclude:
+ c = "-"
+ return "View.Mapping: %s%s -> %s" % \
+ (c, self.depot_side.path, self.client_side.path)
+
+ def map_depot_to_client(self, depot_path):
+ """Calculate the client path if using this mapping on the
+ given depot path; does not consider the effect of other
+ mappings in a view. Even excluded mappings are returned."""
+ matches, wildcards = self.depot_side.match_wildcards(depot_path)
+ if not matches:
+ return ""
+ client_path = self.client_side.fill_in_wildcards(wildcards)
+ return client_path
+
+ #
+ # View methods
+ #
+ def __init__(self):
+ self.mappings = []
+
+ def append(self, view_line):
+ """Parse a view line, splitting it into depot and client
+ sides. Append to self.mappings, preserving order."""
+
+ # Split the view line into exactly two words. P4 enforces
+ # structure on these lines that simplifies this quite a bit.
+ #
+ # Either or both words may be double-quoted.
+ # Single quotes do not matter.
+ # Double-quote marks cannot occur inside the words.
+ # A + or - prefix is also inside the quotes.
+ # There are no quotes unless they contain a space.
+ # The line is already white-space stripped.
+ # The two words are separated by a single space.
+ #
+ if view_line[0] == '"':
+ # First word is double quoted. Find its end.
+ close_quote_index = view_line.find('"', 1)
+ if close_quote_index <= 0:
+ die("No first-word closing quote found: %s" % view_line)
+ depot_side = view_line[1:close_quote_index]
+ # skip closing quote and space
+ rhs_index = close_quote_index + 1 + 1
+ else:
+ space_index = view_line.find(" ")
+ if space_index <= 0:
+ die("No word-splitting space found: %s" % view_line)
+ depot_side = view_line[0:space_index]
+ rhs_index = space_index + 1
+
+ if view_line[rhs_index] == '"':
+ # Second word is double quoted. Make sure there is a
+ # double quote at the end too.
+ if not view_line.endswith('"'):
+ die("View line with rhs quote should end with one: %s" %
+ view_line)
+ # skip the quotes
+ client_side = view_line[rhs_index+1:-1]
+ else:
+ client_side = view_line[rhs_index:]
+
+ # prefix + means overlay on previous mapping
+ overlay = False
+ if depot_side.startswith("+"):
+ overlay = True
+ depot_side = depot_side[1:]
+
+ # prefix - means exclude this path
+ exclude = False
+ if depot_side.startswith("-"):
+ exclude = True
+ depot_side = depot_side[1:]
+
+ m = View.Mapping(depot_side, client_side, overlay, exclude)
+ self.mappings.append(m)
+
+ def map_in_client(self, depot_path):
+ """Return the relative location in the client where this
+ depot file should live. Returns "" if the file should
+ not be mapped in the client."""
+
+ paths_filled = []
+ client_path = ""
+
+ # look at later entries first
+ for m in self.mappings[::-1]:
+
+ # see where will this path end up in the client
+ p = m.map_depot_to_client(depot_path)
+
+ if p == "":
+ # Depot path does not belong in client. Must remember
+ # this, as previous items should not cause files to
+ # exist in this path either. Remember that the list is
+ # being walked from the end, which has higher precedence.
+ # Overlap mappings do not exclude previous mappings.
+ if not m.overlay:
+ paths_filled.append(m.client_side)
+
+ else:
+ # This mapping matched; no need to search any further.
+ # But, the mapping could be rejected if the client path
+ # has already been claimed by an earlier mapping (i.e.
+ # one later in the list, which we are walking backwards).
+ already_mapped_in_client = False
+ for f in paths_filled:
+ # this is View.Path.match
+ if f.match(p):
+ already_mapped_in_client = True
+ break
+ if not already_mapped_in_client:
+ # Include this file, unless it is from a line that
+ # explicitly said to exclude it.
+ if not m.exclude:
+ client_path = p
+
+ # a match, even if rejected, always stops the search
+ break
+
+ return client_path
+
class P4Sync(Command, P4UserMap):
delete_actions = ( "delete", "move/delete", "purge" )
self.p4BranchesInGit = []
self.cloneExclude = []
self.useClientSpec = False
- self.clientSpecDirs = []
+ self.clientSpecDirs = None
+ self.tempBranches = []
+ self.tempBranchLocation = "git-p4-tmp"
if gitConfig("git-p4.syncFromOrigin") == "false":
self.syncWithOrigin = False
.replace("%25", "%")
return path
+ # Force a checkpoint in fast-import and wait for it to finish
+ def checkpoint(self):
+ self.gitStream.write("checkpoint\n\n")
+ self.gitStream.write("progress checkpoint\n\n")
+ out = self.gitOutput.readline()
+ if self.verbose:
+ print "checkpoint finished: " + out
+
def extractFilesFromCommit(self, commit):
self.cloneExclude = [re.sub(r"\.\.\.$", "", path)
for path in self.cloneExclude]
def stripRepoPath(self, path, prefixes):
if self.useClientSpec:
-
- # if using the client spec, we use the output directory
- # specified in the client. For example, a view
- # //depot/foo/branch/... //client/branch/foo/...
- # will end up putting all foo/branch files into
- # branch/foo/
- for val in self.clientSpecDirs:
- if path.startswith(val[0]):
- # replace the depot path with the client path
- path = path.replace(val[0], val[1][1])
- # now strip out the client (//client/...)
- path = re.sub("^(//[^/]+/)", '', path)
- # the rest is all path
- return path
+ return self.clientSpecDirs.map_in_client(path)
if self.keepRepoPath:
prefixes = [re.sub("^(//[^/]+/).*", r'\1', prefixes[0])]
filesToDelete = []
for f in files:
- includeFile = True
- for val in self.clientSpecDirs:
- if f['path'].startswith(val[0]):
- if val[1][0] <= 0:
- includeFile = False
- break
+ # if using a client spec, only add the files that have
+ # a path in the client
+ if self.clientSpecDirs:
+ if self.clientSpecDirs.map_in_client(f['path']) == "":
+ continue
- if includeFile:
- filesForCommit.append(f)
- if f['action'] in self.delete_actions:
- filesToDelete.append(f)
- else:
- filesToRead.append(f)
+ filesForCommit.append(f)
+ if f['action'] in self.delete_actions:
+ filesToDelete.append(f)
+ else:
+ filesToRead.append(f)
# deleted files...
for f in filesToDelete:
if self.stream_file.has_key('depotFile'):
self.streamOneP4File(self.stream_file, self.stream_contents)
+ def make_email(self, userid):
+ if userid in self.users:
+ return self.users[userid]
+ else:
+ return "%s <a@b>" % userid
+
def commit(self, details, files, branch, branchPrefixes, parent = ""):
epoch = details["time"]
author = details["user"]
committer = ""
if author not in self.users:
self.getUserMapFromPerforceServer()
- if author in self.users:
- committer = "%s %s %s" % (self.users[author], epoch, self.tz)
- else:
- committer = "%s <a@b> %s %s" % (author, epoch, self.tz)
+ committer = "%s %s %s" % (self.make_email(author), epoch, self.tz)
self.gitStream.write("committer %s\n" % committer)
self.gitStream.write("from %s\n" % branch)
owner = labelDetails["Owner"]
- tagger = ""
- if author in self.users:
- tagger = "%s %s %s" % (self.users[owner], epoch, self.tz)
+
+ # Try to use the owner of the p4 label, or failing that,
+ # the current p4 user id.
+ if owner:
+ email = self.make_email(owner)
else:
- tagger = "%s <a@b> %s %s" % (owner, epoch, self.tz)
+ email = self.make_email(self.p4UserId())
+ tagger = "%s %s %s" % (email, epoch, self.tz)
+
self.gitStream.write("tagger %s\n" % tagger)
- self.gitStream.write("data <<EOT\n")
- self.gitStream.write(labelDetails["Description"])
- self.gitStream.write("EOT\n\n")
+
+ description = labelDetails["Description"]
+ self.gitStream.write("data %d\n" % len(description))
+ self.gitStream.write(description)
+ self.gitStream.write("\n")
else:
if not self.silent:
def getLabels(self):
self.labels = {}
- l = p4CmdList("labels %s..." % ' '.join (self.depotPaths))
+ l = p4CmdList(["labels"] + ["%s..." % p for p in self.depotPaths])
if len(l) > 0 and not self.silent:
print "Finding files belonging to labels in %s" % `self.depotPaths`
command = "branches"
for info in p4CmdList(command):
- details = p4Cmd("branch -o %s" % info["branch"])
+ details = p4Cmd(["branch", "-o", info["branch"]])
viewIdx = 0
while details.has_key("View%s" % viewIdx):
paths = details["View%s" % viewIdx].split(" ")
sourceRef = self.gitRefForBranch(sourceBranch)
#print "source " + sourceBranch
- branchParentChange = int(p4Cmd("changes -m 1 %s...@1,%s" % (sourceDepotPath, firstChange))["change"])
+ branchParentChange = int(p4Cmd(["changes", "-m", "1", "%s...@1,%s" % (sourceDepotPath, firstChange)])["change"])
#print "branch parent: %s" % branchParentChange
gitParent = self.gitCommitByP4Change(sourceRef, branchParentChange)
if len(gitParent) > 0:
self.importChanges(changes)
return True
+ def searchParent(self, parent, branch, target):
+ parentFound = False
+ for blob in read_pipe_lines(["git", "rev-list", "--reverse", "--no-merges", parent]):
+ blob = blob.strip()
+ if len(read_pipe(["git", "diff-tree", blob, target])) == 0:
+ parentFound = True
+ if self.verbose:
+ print "Found parent of %s in commit %s" % (branch, blob)
+ break
+ if parentFound:
+ return blob
+ else:
+ return None
+
def importChanges(self, changes):
cnt = 1
for change in changes:
- description = p4Cmd("describe %s" % change)
+ description = p4Cmd(["describe", str(change)])
self.updateOptionDict(description)
if not self.silent:
parent = self.initialParents[branch]
del self.initialParents[branch]
- self.commit(description, filesForCommit, branch, [branchPrefix], parent)
+ blob = None
+ if len(parent) > 0:
+ tempBranch = os.path.join(self.tempBranchLocation, "%d" % (change))
+ if self.verbose:
+ print "Creating temporary branch: " + tempBranch
+ self.commit(description, filesForCommit, tempBranch, [branchPrefix])
+ self.tempBranches.append(tempBranch)
+ self.checkpoint()
+ blob = self.searchParent(parent, branch, tempBranch)
+ if blob:
+ self.commit(description, filesForCommit, branch, [branchPrefix], blob)
+ else:
+ if self.verbose:
+ print "Parent of %s not found. Committing into head of %s" % (branch, parent)
+ self.commit(description, filesForCommit, branch, [branchPrefix], parent)
else:
files = self.extractFilesFromCommit(description)
self.commit(description, files, self.branch, self.depotPaths,
def getClientSpec(self):
- specList = p4CmdList( "client -o" )
- temp = {}
- for entry in specList:
- for k,v in entry.iteritems():
- if k.startswith("View"):
-
- # p4 has these %%1 to %%9 arguments in specs to
- # reorder paths; which we can't handle (yet :)
- if re.match('%%\d', v) != None:
- print "Sorry, can't handle %%n arguments in client specs"
- sys.exit(1)
-
- if v.startswith('"'):
- start = 1
- else:
- start = 0
- index = v.find("...")
-
- # save the "client view"; i.e the RHS of the view
- # line that tells the client where to put the
- # files for this view.
- cv = v[index+3:].strip() # +3 to remove previous '...'
-
- # if the client view doesn't end with a
- # ... wildcard, then we're going to mess up the
- # output directory, so fail gracefully.
- if not cv.endswith('...'):
- print 'Sorry, client view in "%s" needs to end with wildcard' % (k)
- sys.exit(1)
- cv=cv[:-3]
-
- # now save the view; +index means included, -index
- # means it should be filtered out.
- v = v[start:index]
- if v.startswith("-"):
- v = v[1:]
- include = -len(v)
- else:
- include = len(v)
+ specList = p4CmdList("client -o")
+ if len(specList) != 1:
+ die('Output from "client -o" is %d lines, expecting 1' %
+ len(specList))
+
+ # dictionary of all client parameters
+ entry = specList[0]
+
+ # just the keys that start with "View"
+ view_keys = [ k for k in entry.keys() if k.startswith("View") ]
- temp[v] = (include, cv)
+ # hold this new View
+ view = View()
- self.clientSpecDirs = temp.items()
- self.clientSpecDirs.sort( lambda x, y: abs( y[1][0] ) - abs( x[1][0] ) )
+ # append the lines, in order, to the view
+ for view_num in range(len(view_keys)):
+ k = "View%d" % view_num
+ if k not in view_keys:
+ die("Expected view key %s missing" % k)
+ view.append(entry[k])
+
+ self.clientSpecDirs = view
+ if self.verbose:
+ for i, m in enumerate(self.clientSpecDirs.mappings):
+ print "clientSpecDirs %d: %s" % (i, str(m))
def run(self, args):
self.depotPaths = []
if not gitBranchExists(self.refPrefix + "HEAD") and self.importIntoRemotes and gitBranchExists(self.branch):
system("git symbolic-ref %sHEAD %s" % (self.refPrefix, self.branch))
- if self.useClientSpec or gitConfig("git-p4.useclientspec") == "true":
+ if not self.useClientSpec:
+ if gitConfig("git-p4.useclientspec", "--bool") == "true":
+ self.useClientSpec = True
+ if self.useClientSpec:
self.getClientSpec()
# TODO: should always look at previous commits,
revision = ""
self.users = {}
+ # Make sure no revision specifiers are used when --changesfile
+ # is specified.
+ bad_changesfile = False
+ if len(self.changesFile) > 0:
+ for p in self.depotPaths:
+ if p.find("@") >= 0 or p.find("#") >= 0:
+ bad_changesfile = True
+ break
+ if bad_changesfile:
+ die("Option --changesfile is incompatible with revision specifiers")
+
newPaths = []
for p in self.depotPaths:
if p.find("@") != -1:
revision = p[hashIdx:]
p = p[:hashIdx]
elif self.previousDepotPaths == []:
- revision = "#head"
+ # pay attention to changesfile, if given, else import
+ # the entire p4 tree at the head revision
+ if len(self.changesFile) == 0:
+ revision = "#head"
p = re.sub ("\.\.\.$", "", p)
if not p.endswith("/"):
self.gitOutput.close()
self.gitError.close()
+ # Cleanup temporary branches created during import
+ if self.tempBranches != []:
+ for branch in self.tempBranches:
+ read_pipe("git update-ref -d %s" % branch)
+ os.rmdir(os.path.join(os.environ.get("GIT_DIR", ".git"), self.tempBranchLocation))
+
return True
class P4Rebase(Command):
args = sys.argv[2:]
if len(options) > 0:
- options.append(optparse.make_option("--git-dir", dest="gitdir"))
+ if cmd.needsGit:
+ options.append(optparse.make_option("--git-dir", dest="gitdir"))
parser = optparse.OptionParser(cmd.usage.replace("%prog", "%prog " + cmdName),
options,
if not cmd.run(args):
parser.print_help()
+ sys.exit(2)
if __name__ == '__main__':
+++ /dev/null
-git-p4 - Perforce <-> Git converter using git-fast-import
-
-Usage
-=====
-
-git-p4 can be used in two different ways:
-
-1) To import changes from Perforce to a Git repository, using "git-p4 sync".
-
-2) To submit changes from Git back to Perforce, using "git-p4 submit".
-
-Importing
-=========
-
-Simply start with
-
- git-p4 clone //depot/path/project
-
-or
-
- git-p4 clone //depot/path/project myproject
-
-This will:
-
-1) Create an empty git repository in a subdirectory called "project" (or
-"myproject" with the second command)
-
-2) Import the head revision from the given Perforce path into a git branch
-called "p4" (remotes/p4 actually)
-
-3) Create a master branch based on it and check it out.
-
-If you want the entire history (not just the head revision) then you can simply
-append a "@all" to the depot path:
-
- git-p4 clone //depot/project/main@all myproject
-
-
-
-If you want more control you can also use the git-p4 sync command directly:
-
- mkdir repo-git
- cd repo-git
- git init
- git-p4 sync //path/in/your/perforce/depot
-
-This will import the current head revision of the specified depot path into a
-"remotes/p4/master" branch of your git repository. You can use the
---branch=mybranch option to import into a different branch.
-
-If you want to import the entire history of a given depot path simply use:
-
- git-p4 sync //path/in/depot@all
-
-
-Note:
-
-To achieve optimal compression you may want to run 'git repack -a -d -f' after
-a big import. This may take a while.
-
-Incremental Imports
-===================
-
-After an initial import you can continue to synchronize your git repository
-with newer changes from the Perforce depot by just calling
-
- git-p4 sync
-
-in your git repository. By default the "remotes/p4/master" branch is updated.
-
-Advanced Setup
-==============
-
-Suppose you have a periodically updated git repository somewhere, containing a
-complete import of a Perforce project. This repository can be cloned and used
-with git-p4. When updating the cloned repository with the "sync" command,
-git-p4 will try to fetch changes from the original repository first. The git
-protocol used with this is usually faster than importing from Perforce
-directly.
-
-This behaviour can be disabled by setting the "git-p4.syncFromOrigin" git
-configuration variable to "false".
-
-Updating
-========
-
-A common working pattern is to fetch the latest changes from the Perforce depot
-and merge them with local uncommitted changes. The recommended way is to use
-git's rebase mechanism to preserve linear history. git-p4 provides a convenient
-
- git-p4 rebase
-
-command that calls git-p4 sync followed by git rebase to rebase the current
-working branch.
-
-Submitting
-==========
-
-git-p4 has support for submitting changes from a git repository back to the
-Perforce depot. This requires a Perforce checkout separate from your git
-repository. To submit all changes that are in the current git branch but not in
-the "p4" branch (or "origin" if "p4" doesn't exist) simply call
-
- git-p4 submit
-
-in your git repository. If you want to submit changes in a specific branch that
-is not your current git branch you can also pass that as an argument:
-
- git-p4 submit mytopicbranch
-
-You can override the reference branch with the --origin=mysourcebranch option.
-
-The Perforce changelists will be created with the user who ran git-p4. If you
-use --preserve-user then git-p4 will attempt to create Perforce changelists
-with the Perforce user corresponding to the git commit author. You need to
-have sufficient permissions within Perforce, and the git users need to have
-Perforce accounts. Permissions can be granted using 'p4 protect'.
-
-If a submit fails you may have to "p4 resolve" and submit manually. You can
-continue importing the remaining changes with
-
- git-p4 submit --continue
-
-Example
-=======
-
-# Clone a repository
- git-p4 clone //depot/path/project
-# Enter the newly cloned directory
- cd project
-# Do some work...
- vi foo.h
-# ... and commit locally to gi
- git commit foo.h
-# In the meantime somebody submitted changes to the Perforce depot. Rebase your latest
-# changes against the latest changes in Perforce:
- git-p4 rebase
-# Submit your locally committed changes back to Perforce
- git-p4 submit
-# ... and synchronize with Perforce
- git-p4 rebase
-
-
-Configuration parameters
-========================
-
-git-p4.user ($P4USER)
-
-Allows you to specify the username to use to connect to the Perforce repository.
-
- git config [--global] git-p4.user public
-
-git-p4.password ($P4PASS)
-
-Allows you to specify the password to use to connect to the Perforce repository.
-Warning this password will be visible on the command-line invocation of the p4 binary.
-
- git config [--global] git-p4.password public1234
-
-git-p4.port ($P4PORT)
-
-Specify the port to be used to contact the Perforce server. As this will be passed
-directly to the p4 binary, it may be in the format host:port as well.
-
- git config [--global] git-p4.port codes.zimbra.com:2666
-
-git-p4.host ($P4HOST)
-
-Specify the host to contact for a Perforce repository.
-
- git config [--global] git-p4.host perforce.example.com
-
-git-p4.client ($P4CLIENT)
-
-Specify the client name to use
-
- git config [--global] git-p4.client public-view
-
-git-p4.allowSubmit
-
- git config [--global] git-p4.allowSubmit false
-
-git-p4.syncFromOrigin
-
-A useful setup may be that you have a periodically updated git repository
-somewhere that contains a complete import of a Perforce project. That git
-repository can be used to clone the working repository from and one would
-import from Perforce directly after cloning using git-p4. If the connection to
-the Perforce server is slow and the working repository hasn't been synced for a
-while it may be desirable to fetch changes from the origin git repository using
-the efficient git protocol. git-p4 supports this setup by calling "git fetch origin"
-by default if there is an origin branch. You can disable this using:
-
- git config [--global] git-p4.syncFromOrigin false
-
-git-p4.useclientspec
-
- git config [--global] git-p4.useclientspec false
-
-The P4CLIENT environment variable should be correctly set for p4 to be
-able to find the relevant client. This client spec will be used to
-both filter the files cloned by git and set the directory layout as
-specified in the client (this implies --keep-path style semantics).
-
-git-p4.skipSubmitEdit
-
- git config [--global] git-p4.skipSubmitEdit false
-
-Normally, git-p4 invokes an editor after each commit is applied so
-that you can make changes to the submit message. Setting this
-variable to true will skip the editing step, submitting the change as is.
-
-git-p4.skipSubmitEditCheck
-
- git config [--global] git-p4.skipSubmitEditCheck false
-
-After the editor is invoked, git-p4 normally makes sure you saved the
-change description, as an indication that you did indeed read it over
-and edit it. You can quit without saving to abort the submit (or skip
-this change and continue). Setting this variable to true will cause
-git-p4 not to check if you saved the change description. This variable
-only matters if git-p4.skipSubmitEdit has not been set to true.
-
-git-p4.preserveUser
-
- git config [--global] git-p4.preserveUser false
-
-If true, attempt to preserve user names by modifying the p4 changelists. See
-the "--preserve-user" submit option.
-
-git-p4.allowMissingPerforceUsers
-
- git config [--global] git-p4.allowMissingP4Users false
-
-If git-p4 is setting the perforce user for a commit (--preserve-user) then
-if there is no perforce user corresponding to the git author, git-p4 will
-stop. With allowMissingPerforceUsers set to true, git-p4 will use the
-current user (i.e. the behavior without --preserve-user) and carry on with
-the perforce commit.
-
-git-p4.skipUserNameCheck
-
- git config [--global] git-p4.skipUserNameCheck false
-
-When submitting, git-p4 checks that the git commits are authored by the current
-p4 user, and warns if they are not. This disables the check.
-
-git-p4.detectRenames
-
-Detect renames when submitting changes to Perforce server. Will enable -M git
-argument. Can be optionally set to a number representing the threshold
-percentage value of the rename detection.
-
- git config [--global] git-p4.detectRenames true
- git config [--global] git-p4.detectRenames 50
-
-git-p4.detectCopies
-
-Detect copies when submitting changes to Perforce server. Will enable -C git
-argument. Can be optionally set to a number representing the threshold
-percentage value of the copy detection.
-
- git config [--global] git-p4.detectCopies true
- git config [--global] git-p4.detectCopies 80
-
-git-p4.detectCopiesHarder
-
-Detect copies even between files that did not change when submitting changes to
-Perforce server. Will enable --find-copies-harder git argument.
-
- git config [--global] git-p4.detectCopies true
-
-git-p4.branchUser
-
-Only use branch specifications defined by the selected username.
-
- git config [--global] git-p4.branchUser username
-
-git-p4.branchList
-
-List of branches to be imported when branch detection is enabled.
-
- git config [--global] git-p4.branchList main:branchA
- git config [--global] --add git-p4.branchList main:branchB
-
-Implementation Details...
-=========================
-
-* Changesets from Perforce are imported using git fast-import.
-* The import does not require anything from the Perforce client view as it just uses
- "p4 print //depot/path/file#revision" to get the actual file contents.
-* Every imported changeset has a special [git-p4...] line at the
- end of the log message that gives information about the corresponding
- Perforce change number and is also used by git-p4 itself to find out
- where to continue importing when doing incremental imports.
- Basically when syncing it extracts the perforce change number of the
- latest commit in the "p4" branch and uses "p4 changes //depot/path/...@changenum,#head"
- to find out which changes need to be imported.
-* git-p4 submit uses "git rev-list" to pick the commits between the "p4" branch
- and the current branch.
- The commits themselves are applied using git diff/format-patch ... | git apply
-
struct lf_to_crlf_filter {
struct stream_filter filter;
- int want_lf;
+ unsigned has_held:1;
+ char held;
};
static int lf_to_crlf_filter_fn(struct stream_filter *filter,
size_t count, o = 0;
struct lf_to_crlf_filter *lf_to_crlf = (struct lf_to_crlf_filter *)filter;
- /* Output a pending LF if we need to */
- if (lf_to_crlf->want_lf) {
- output[o++] = '\n';
- lf_to_crlf->want_lf = 0;
+ /*
+ * We may be holding onto the CR to see if it is followed by a
+ * LF, in which case we would need to go to the main loop.
+ * Otherwise, just emit it to the output stream.
+ */
+ if (lf_to_crlf->has_held && (lf_to_crlf->held != '\r' || !input)) {
+ output[o++] = lf_to_crlf->held;
+ lf_to_crlf->has_held = 0;
}
- if (!input)
- return 0; /* We've already dealt with the state */
+ /* We are told to drain */
+ if (!input) {
+ *osize_p -= o;
+ return 0;
+ }
count = *isize_p;
- if (count) {
+ if (count || lf_to_crlf->has_held) {
size_t i;
+ int was_cr = 0;
+
+ if (lf_to_crlf->has_held) {
+ was_cr = 1;
+ lf_to_crlf->has_held = 0;
+ }
+
for (i = 0; o < *osize_p && i < count; i++) {
char ch = input[i];
+
if (ch == '\n') {
output[o++] = '\r';
- if (o >= *osize_p) {
- lf_to_crlf->want_lf = 1;
- continue; /* We need to increase i */
- }
+ } else if (was_cr) {
+ /*
+ * Previous round saw CR and it is not followed
+ * by a LF; emit the CR before processing the
+ * current character.
+ */
+ output[o++] = '\r';
+ }
+
+ /*
+ * We may have consumed the last output slot,
+ * in which case we need to break out of this
+ * loop; hold the current character before
+ * returning.
+ */
+ if (*osize_p <= o) {
+ lf_to_crlf->has_held = 1;
+ lf_to_crlf->held = ch;
+ continue; /* break but increment i */
}
+
+ if (ch == '\r') {
+ was_cr = 1;
+ continue;
+ }
+
+ was_cr = 0;
output[o++] = ch;
}
*osize_p -= o;
*isize_p -= i;
+
+ if (!lf_to_crlf->has_held && was_cr) {
+ lf_to_crlf->has_held = 1;
+ lf_to_crlf->held = '\r';
+ }
}
return 0;
}
static struct stream_filter *lf_to_crlf_filter(void)
{
- struct lf_to_crlf_filter *lf_to_crlf = xmalloc(sizeof(*lf_to_crlf));
+ struct lf_to_crlf_filter *lf_to_crlf = xcalloc(1, sizeof(*lf_to_crlf));
lf_to_crlf->filter.vtbl = &lf_to_crlf_vtbl;
- lf_to_crlf->want_lf = 0;
return (struct stream_filter *)lf_to_crlf;
}
die_errno("unable to relay credential");
}
- if (!send_request(socket, &buf))
- return;
- if (flags & FLAG_SPAWN) {
- spawn_daemon(socket);
- send_request(socket, &buf);
+ if (send_request(socket, &buf) < 0) {
+ if (errno != ENOENT && errno != ECONNREFUSED)
+ die_errno("unable to connect to cache daemon");
+ if (flags & FLAG_SPAWN) {
+ spawn_daemon(socket);
+ if (send_request(socket, &buf) < 0)
+ die_errno("unable to connect to cache daemon");
+ }
}
strbuf_release(&buf);
}
#include "string-list.h"
#include "run-command.h"
#include "url.h"
+#include "prompt.h"
void credential_init(struct credential *c)
{
strbuf_addf(out, "/%s", c->path);
}
-static char *credential_ask_one(const char *what, struct credential *c)
+static char *credential_ask_one(const char *what, struct credential *c,
+ int flags)
{
struct strbuf desc = STRBUF_INIT;
struct strbuf prompt = STRBUF_INIT;
else
strbuf_addf(&prompt, "%s: ", what);
- /* FIXME: for usernames, we should do something less magical that
- * actually echoes the characters. However, we need to read from
- * /dev/tty and not stdio, which is not portable (but getpass will do
- * it for us). http.c uses the same workaround. */
- r = git_getpass(prompt.buf);
+ r = git_prompt(prompt.buf, flags);
strbuf_release(&desc);
strbuf_release(&prompt);
static void credential_getpass(struct credential *c)
{
if (!c->username)
- c->username = credential_ask_one("Username", c);
+ c->username = credential_ask_one("Username", c,
+ PROMPT_ASKPASS|PROMPT_ECHO);
if (!c->password)
- c->password = credential_ask_one("Password", c);
+ c->password = credential_ask_one("Password", c,
+ PROMPT_ASKPASS);
}
int credential_read(struct credential *c, FILE *fp)
drop_privileges(cred);
+ loginfo("Ready to rumble");
+
return service_loop(&socklist);
}
if (inetd_mode || serve_mode)
return execute();
- if (detach) {
+ if (detach)
daemonize();
- loginfo("Ready to rumble");
- }
else
sanitize_stdfds();
opts.src_index = &the_index;
opts.dst_index = NULL;
opts.pathspec = &revs->diffopt.pathspec;
+ opts.pathspec->recursive = 1;
+ opts.pathspec->max_depth = -1;
init_tree_desc(&t, tree->buffer, tree->size);
return unpack_trees(1, &t, &opts);
diff_words_append(line, len,
&ecbdata->diff_words->plus);
return;
+ } else if (!prefixcmp(line, "\\ ")) {
+ /*
+ * Eat the "no newline at eof" marker as if we
+ * saw a "+" or "-" line with nothing on it,
+ * and return without diff_words_flush() to
+ * defer processing. If this is the end of
+ * preimage, more "+" lines may come after it.
+ */
+ return;
}
diff_words_flush(ecbdata);
if (ecbdata->diff_words->type == DIFF_WORDS_PORCELAIN) {
struct strbuf;
struct diff_filespec;
struct userdiff_driver;
+struct sha1_array;
+struct commit;
typedef void (*change_fn_t)(struct diff_options *options,
unsigned old_mode, unsigned new_mode,
extern void show_combined_diff(struct combine_diff_path *elem, int num_parent,
int dense, struct rev_info *);
-extern void diff_tree_combined(const unsigned char *sha1, const unsigned char parent[][20], int num_parent, int dense, struct rev_info *rev);
+extern void diff_tree_combined(const unsigned char *sha1, const struct sha1_array *parents, int dense, struct rev_info *rev);
-extern void diff_tree_combined_merge(const unsigned char *sha1, int, struct rev_info *);
+extern void diff_tree_combined_merge(const struct commit *commit, int dense, struct rev_info *rev);
void diff_set_mnemonic_prefix(struct diff_options *options, const char *a, const char *b);
static void start_packfile(void)
{
- static char tmpfile[PATH_MAX];
+ static char tmp_file[PATH_MAX];
struct packed_git *p;
struct pack_header hdr;
int pack_fd;
- pack_fd = odb_mkstemp(tmpfile, sizeof(tmpfile),
+ pack_fd = odb_mkstemp(tmp_file, sizeof(tmp_file),
"pack/tmp_pack_XXXXXX");
- p = xcalloc(1, sizeof(*p) + strlen(tmpfile) + 2);
- strcpy(p->pack_name, tmpfile);
+ p = xcalloc(1, sizeof(*p) + strlen(tmp_file) + 2);
+ strcpy(p->pack_name, tmp_file);
p->pack_fd = pack_fd;
p->do_not_close = 1;
pack_file = sha1fd(pack_fd, p->pack_name);
s,signoff add a Signed-off-by line to the commit message
u,utf8 recode into utf8 (default)
k,keep pass -k flag to git-mailinfo
+keep-non-patch pass -b flag to git-mailinfo
keep-cr pass --keep-cr flag to git-mailsplit for mbox format
no-keep-cr do not pass --keep-cr flag to git-mailsplit independent of am.keepcr
c,scissors strip everything before a scissors line
utf8= ;;
-k|--keep)
keep=t ;;
+ --keep-non-patch)
+ keep=b ;;
-c|--scissors)
scissors=t ;;
--no-scissors)
fi
esac
+# Now, decide what command line options we will give to the git
+# commands we invoke, based on the result of parsing command line
+# options and previous invocation state stored in $dotest/ files.
+
if test "$(cat "$dotest/utf8")" = t
then
utf8=-u
else
utf8=-n
fi
-if test "$(cat "$dotest/keep")" = t
-then
- keep=-k
-fi
+keep=$(cat "$dotest/keep")
+case "$keep" in
+t)
+ keep=-k ;;
+b)
+ keep=-b ;;
+*)
+ keep= ;;
+esac
case "$(cat "$dotest/scissors")" in
t)
scissors=--scissors ;;
chomp($gd);
$ENV{GIT_DIR} = $gd;
}
+
+ # On MSYS, convert a Windows-style path to an MSYS-style path
+ # so that rel2abs() below works correctly.
+ if ($^O eq 'msys') {
+ $ENV{GIT_DIR} =~ s#^([[:alpha:]]):/#/$1/#;
+ }
+
# Make sure GIT_DIR is absolute
$ENV{GIT_DIR} = File::Spec->rel2abs($ENV{GIT_DIR});
}
}
checkout_staged_file () {
- tmpfile=$(expr "$(git checkout-index --temp --stage="$1" "$2")" : '\([^ ]*\) ')
+ tmpfile=$(expr \
+ "$(git checkout-index --temp --stage="$1" "$2" 2>/dev/null)" \
+ : '\([^ ]*\) ')
if test $? -eq 0 -a -n "$tmpfile" ; then
mv -- "$(git rev-parse --show-cdup)$tmpfile" "$3"
+ else
+ >"$3"
fi
}
mv -- "$MERGED" "$BACKUP"
cp -- "$BACKUP" "$MERGED"
- base_present && checkout_staged_file 1 "$MERGED" "$BASE"
- local_present && checkout_staged_file 2 "$MERGED" "$LOCAL"
- remote_present && checkout_staged_file 3 "$MERGED" "$REMOTE"
+ checkout_staged_file 1 "$MERGED" "$BASE"
+ checkout_staged_file 2 "$MERGED" "$LOCAL"
+ checkout_staged_file 3 "$MERGED" "$REMOTE"
if test -z "$local_mode" -o -z "$remote_mode"; then
echo "Deleted merge conflict for '$MERGED':"
merge_base=$(git merge-base $baserev $headrev) ||
die "fatal: No commits in common between $base and $head"
-find_matching_branch="/^$headrev "'refs\/heads\//{
- s/^.* refs\/heads\///
- p
- q
-}'
-branch=$(git ls-remote "$url" | sed -n -e "$find_matching_branch")
+# $head is the token given from the command line. If a ref with that
+# name exists at the remote and their values match, we should use it.
+# Otherwise find a ref that matches $headrev.
+find_matching_ref='
+ sub abbr {
+ my $ref = shift;
+ if ($ref =~ s|refs/heads/|| || $ref =~ s|refs/tags/||) {
+ return $ref;
+ } else {
+ return $ref;
+ }
+ }
+
+ my ($exact, $found);
+ while (<STDIN>) {
+ my ($sha1, $ref, $deref) = /^(\S+)\s+(\S+?)(\^\{\})?$/;
+ next unless ($sha1 eq $ARGV[1]);
+ $found = abbr($ref);
+ if ($ref =~ m|/\Q$ARGV[0]\E$|) {
+ $exact = $found;
+ last;
+ }
+ }
+ if ($exact) {
+ print "$exact\n";
+ } elsif ($found) {
+ print "$found\n";
+ }
+'
+
+ref=$(git ls-remote "$url" | perl -e "$find_matching_ref" "$head" "$headrev")
+
url=$(git ls-remote --get-url "$url")
git show -s --format='The following changes since commit %H:
%s (%ci)
are available in the git repository at:
-' $baserev &&
-echo " $url${branch+ $branch}" &&
+' $merge_base &&
+echo " $url${ref+ $ref}" &&
git show -s --format='
for you to fetch changes up to %H:
if test -n "$branch_name"
then
- echo "(from the branch description for $branch local branch)"
+ echo "(from the branch description for $branch_name local branch)"
echo
git config "branch.$branch_name.description"
fi &&
git shortlog ^$baserev $headrev &&
git diff -M --stat --summary $patch $merge_base..$headrev || status=1
-if test -z "$branch"
+if test -z "$ref"
then
echo "warn: No branch of $url is at:" >&2
git show -s --format='warn: %h: %s' $headrev >&2
"signedoffbycc" => [\$signed_off_by_cc, undef],
"signedoffcc" => [\$signed_off_by_cc, undef], # Deprecated
"validate" => [\$validate, 1],
+ "multiedit" => [\$multiedit, undef]
);
my %config_settings = (
"bcc" => \@bcclist,
"suppresscc" => \@suppress_cc,
"envelopesender" => \$envelope_sender,
- "multiedit" => \$multiedit,
"confirm" => \$confirm,
"from" => \$sender,
"assume8bitencoding" => \$auto_8bit_encoding,
git read-tree --index-output="$TMPindex" -m $i_tree &&
GIT_INDEX_FILE="$TMPindex" &&
export GIT_INDEX_FILE &&
- git diff --name-only -z HEAD | git update-index -z --add --remove --stdin &&
+ git diff --name-only -z HEAD -- >"$TMP-stagenames" &&
+ git update-index -z --add --remove --stdin <"$TMP-stagenames" &&
git write-tree &&
rm -f "$TMPindex"
) ) ||
w_tree=$(GIT_INDEX_FILE="$TMP-index" git write-tree) ||
die "$(gettext "Cannot save the current worktree state")"
- git diff-tree -p HEAD $w_tree > "$TMP-patch" &&
+ git diff-tree -p HEAD $w_tree -- >"$TMP-patch" &&
test -s "$TMP-patch" ||
die "$(gettext "No changes selected")"
die "$(eval_gettext "\${REV}: Could not drop stash entry")"
# clear_stash if we just dropped the last stash entry
- git rev-parse --verify "$ref_stash@{0}" > /dev/null 2>&1 || clear_stash
+ git rev-parse --verify "$ref_stash@{0}" >/dev/null 2>&1 || clear_stash
}
apply_to_branch () {
gitdir=
gitdir_base=
name=$(module_name "$path" 2>/dev/null)
+ test -n "$name" || name="$path"
base_path=$(dirname "$path")
gitdir=$(git rev-parse --git-dir)
* if we fail because the command is not found, it is
* OK to return. Otherwise, we just pass along the status code.
*/
- status = run_command_v_opt(argv, RUN_SILENT_EXEC_FAILURE);
+ status = run_command_v_opt(argv, RUN_SILENT_EXEC_FAILURE | RUN_CLEAN_ON_EXIT);
if (status >= 0 || errno != ENOENT)
exit(status);
if (!defined $action) {
if (defined $hash) {
$action = git_get_type($hash);
+ $action or die_error(404, "Object does not exist");
} elsif (defined $hash_base && defined $file_name) {
$action = git_get_type("$hash_base:$file_name");
+ $action or die_error(404, "File or directory does not exist");
} elsif (defined $project) {
$action = 'summary';
} else {
sub to_utf8 {
my $str = shift;
return undef unless defined $str;
- if (utf8::valid($str)) {
- utf8::decode($str);
+
+ if (utf8::is_utf8($str) || utf8::decode($str)) {
return $str;
} else {
return decode($fallback_encoding, $str, Encode::FB_DEFAULT);
my ($str) = @_;
my $chopped = chop_str(@_);
+ $str = to_utf8($str);
if ($chopped eq $str) {
return esc_html($chopped);
} else {
return unless (defined $project);
# some views should link to OPML, or to generic project feed,
# or don't have specific feed yet (so they should use generic)
- return if ($action =~ /^(?:tags|heads|forks|tag|search)$/x);
+ return if (!$action || $action =~ /^(?:tags|heads|forks|tag|search)$/x);
my $branch;
# branches refs uses 'refs/heads/' prefix (fullname) to differentiate
my $dir = $projects_list;
# remove the trailing "/"
$dir =~ s!/+$!!;
- my $pfxlen = length("$projects_list");
- my $pfxdepth = ($projects_list =~ tr!/!!);
+ my $pfxlen = length("$dir");
+ my $pfxdepth = ($dir =~ tr!/!!);
# when filtering, search only given subdirectory
if ($filter) {
$dir .= "/$filter";
my %co = @_;
local $/ = "\n";
- open my $fd, "-|", git_cmd(), 'grep', '-n',
+ open my $fd, "-|", git_cmd(), 'grep', '-n', '-z',
$search_use_regexp ? ('-E', '-i') : '-F',
$searchtext, $co{'tree'}
or die_error(500, "Open git-grep failed");
my $lastfile = '';
while (my $line = <$fd>) {
chomp $line;
- my ($file, $lno, $ltext, $binary);
+ my ($file, $file_href, $lno, $ltext, $binary);
last if ($matches++ > 1000);
if ($line =~ /^Binary file (.+) matches$/) {
$file = $1;
$binary = 1;
} else {
- (undef, $file, $lno, $ltext) = split(/:/, $line, 4);
+ ($file, $lno, $ltext) = split(/\0/, $line, 3);
+ $file =~ s/^$co{'tree'}://;
}
if ($file ne $lastfile) {
$lastfile and print "</td></tr>\n";
} else {
print "<tr class=\"light\">\n";
}
+ $file_href = href(action=>"blob", hash_base=>$co{'id'},
+ file_name=>$file);
print "<td class=\"list\">".
- $cgi->a({-href => href(action=>"blob", hash=>$co{'hash'},
- file_name=>"$file"),
- -class => "list"}, esc_path($file));
+ $cgi->a({-href => $file_href, -class => "list"}, esc_path($file));
print "</td><td>\n";
$lastfile = $file;
}
$ltext = esc_html($ltext, -nbsp=>1);
}
print "<div class=\"pre\">" .
- $cgi->a({-href => href(action=>"blob", hash=>$co{'hash'},
- file_name=>"$file").'#l'.$lno,
- -class => "linenr"}, sprintf('%4i', $lno))
- . ' ' . $ltext . "</div>\n";
+ $cgi->a({-href => $file_href.'#l'.$lno,
+ -class => "linenr"}, sprintf('%4i', $lno)) .
+ ' ' . $ltext . "</div>\n";
}
}
if ($lastfile) {
-type=>"text/plain", -charset => "utf-8",
-status=> "200 OK");
local $| = 1; # output autoflush
- print while <$fd>;
+ while (my $line = <$fd>) {
+ print to_utf8($line);
+ }
close $fd
or print "ERROR $!\n";
-charset => 'utf-8',
-content_disposition => 'inline; filename="opml.xml"');
+ my $title = esc_html($site_name);
print <<XML;
<?xml version="1.0" encoding="utf-8"?>
<opml version="1.0">
<head>
- <title>$site_name OPML Export</title>
+ <title>$title OPML Export</title>
</head>
<body>
<outline text="git RSS feeds">
#include "sigchain.h"
static char *configured_signing_key;
+static const char *gpg_program = "gpg";
void set_signing_key(const char *key)
{
int git_gpg_config(const char *var, const char *value, void *cb)
{
if (!strcmp(var, "user.signingkey")) {
+ set_signing_key(value);
+ }
+ if (!strcmp(var, "gpg.program")) {
if (!value)
return config_error_nonbool(var);
- set_signing_key(value);
+ gpg_program = xstrdup(value);
}
return 0;
}
gpg.argv = args;
gpg.in = -1;
gpg.out = -1;
- args[0] = "gpg";
+ args[0] = gpg_program;
args[1] = "-bsau";
args[2] = signing_key;
args[3] = NULL;
/*
* Run "gpg" to see if the payload matches the detached signature.
- * gpg_output_to tells where the output from "gpg" should go:
- * < 0: /dev/null
- * = 0: standard error of the calling process
- * > 0: the specified file descriptor
+ * gpg_output, when set, receives the diagnostic output from GPG.
*/
int verify_signed_buffer(const char *payload, size_t payload_size,
const char *signature, size_t signature_size,
struct strbuf *gpg_output)
{
struct child_process gpg;
- const char *args_gpg[] = {"gpg", "--verify", "FILE", "-", NULL};
+ const char *args_gpg[] = {NULL, "--verify", "FILE", "-", NULL};
char path[PATH_MAX];
int fd, ret;
+ args_gpg[0] = gpg_program;
fd = git_mkstemp(path, PATH_MAX, ".git_vtag_tmpXXXXXX");
if (fd < 0)
return error("could not create temporary file '%s': %s",
opt->output(opt, "\n", 1);
}
-static int match_funcname(struct grep_opt *opt, char *bol, char *eol)
+#ifndef NO_PTHREADS
+/*
+ * This lock protects access to the gitattributes machinery, which is
+ * not thread-safe.
+ */
+pthread_mutex_t grep_attr_mutex;
+
+static inline void grep_attr_lock(struct grep_opt *opt)
+{
+ if (opt->use_threads)
+ pthread_mutex_lock(&grep_attr_mutex);
+}
+
+static inline void grep_attr_unlock(struct grep_opt *opt)
+{
+ if (opt->use_threads)
+ pthread_mutex_unlock(&grep_attr_mutex);
+}
+#else
+#define grep_attr_lock(opt)
+#define grep_attr_unlock(opt)
+#endif
+
+static int match_funcname(struct grep_opt *opt, const char *name, char *bol, char *eol)
{
xdemitconf_t *xecfg = opt->priv;
- if (xecfg && xecfg->find_func) {
+ if (xecfg && !xecfg->find_func) {
+ struct userdiff_driver *drv;
+ grep_attr_lock(opt);
+ drv = userdiff_find_by_path(name);
+ grep_attr_unlock(opt);
+ if (drv && drv->funcname.pattern) {
+ const struct userdiff_funcname *pe = &drv->funcname;
+ xdiff_set_find_func(xecfg, pe->pattern, pe->cflags);
+ } else {
+ xecfg = opt->priv = NULL;
+ }
+ }
+
+ if (xecfg) {
char buf[1];
return xecfg->find_func(bol, eol - bol, buf, 1,
xecfg->find_func_priv) >= 0;
if (lno <= opt->last_shown)
break;
- if (match_funcname(opt, bol, eol)) {
+ if (match_funcname(opt, name, bol, eol)) {
show_line(opt, bol, eol, name, lno, '=');
break;
}
unsigned cur = lno, from = 1, funcname_lno = 0;
int funcname_needed = !!opt->funcname;
- if (opt->funcbody && !match_funcname(opt, bol, end))
+ if (opt->funcbody && !match_funcname(opt, name, bol, end))
funcname_needed = 2;
if (opt->pre_context < lno)
while (bol > buf && bol[-1] != '\n')
bol--;
cur--;
- if (funcname_needed && match_funcname(opt, bol, eol)) {
+ if (funcname_needed && match_funcname(opt, name, bol, eol)) {
funcname_lno = cur;
funcname_needed = 0;
}
return 0;
}
-int grep_threads_ok(const struct grep_opt *opt)
-{
- /* If this condition is true, then we may use the attribute
- * machinery in grep_buffer_1. The attribute code is not
- * thread safe, so we disable the use of threads.
- */
- if (opt->funcname && !opt->unmatch_name_only && !opt->status_only &&
- !opt->name_only)
- return 0;
-
- return 1;
-}
-
static void std_output(struct grep_opt *opt, const void *buf, size_t size)
{
fwrite(buf, size, 1, stdout);
}
memset(&xecfg, 0, sizeof(xecfg));
- if (opt->funcname && !opt->unmatch_name_only && !opt->status_only &&
- !opt->name_only && !binary_match_only && !collect_hits) {
- struct userdiff_driver *drv = userdiff_find_by_path(name);
- if (drv && drv->funcname.pattern) {
- const struct userdiff_funcname *pe = &drv->funcname;
- xdiff_set_find_func(&xecfg, pe->pattern, pe->cflags);
- opt->priv = &xecfg;
- }
- }
+ opt->priv = &xecfg;
+
try_lookahead = should_lookahead(opt);
while (left) {
show_function = 1;
goto next_line;
}
- if (show_function && match_funcname(opt, bol, eol))
+ if (show_function && match_funcname(opt, name, bol, eol))
show_function = 0;
if (show_function ||
(last_hit && lno <= last_hit + opt->post_context)) {
typedef int pcre_extra;
#endif
#include "kwset.h"
+#include "thread-utils.h"
enum grep_pat_token {
GREP_PATTERN,
int show_hunk_mark;
int file_break;
int heading;
+ int use_threads;
void *priv;
void (*output)(struct grep_opt *opt, const void *data, size_t size);
extern struct grep_opt *grep_opt_dup(const struct grep_opt *opt);
extern int grep_threads_ok(const struct grep_opt *opt);
+#ifndef NO_PTHREADS
+/*
+ * Mutex used around access to the attributes machinery if
+ * opt->use_threads. Must be initialized/destroyed by callers!
+ */
+extern pthread_mutex_t grep_attr_mutex;
+#endif
+
#endif
#include "cache.h"
#include "exec_cmd.h"
#include "run-command.h"
+#include "prompt.h"
#ifdef NO_OPENSSL
typedef void *SSL;
#else
goto bail;
}
if (!srvc->pass) {
- char prompt[80];
- sprintf(prompt, "Password (%s@%s): ", srvc->user, srvc->host);
- arg = git_getpass(prompt);
- if (!arg) {
- perror("getpass");
- exit(1);
- }
+ struct strbuf prompt = STRBUF_INIT;
+ strbuf_addf(&prompt, "Password (%s@%s): ", srvc->user, srvc->host);
+ arg = git_getpass(prompt.buf);
+ strbuf_release(&prompt);
if (!*arg) {
fprintf(stderr, "Skipping account %s@%s, no password\n", srvc->user, srvc->host);
goto bail;
#include "refs.h"
#include "string-list.h"
#include "color.h"
+#include "gpg-interface.h"
struct decoration name_decoration = { "object names" };
type = DECORATION_REF_REMOTE;
else if (!prefixcmp(refname, "refs/tags/"))
type = DECORATION_REF_TAG;
- else if (!prefixcmp(refname, "refs/stash"))
+ else if (!strcmp(refname, "refs/stash"))
type = DECORATION_REF_STASH;
- else if (!prefixcmp(refname, "HEAD"))
+ else if (!strcmp(refname, "HEAD"))
type = DECORATION_REF_HEAD;
if (!cb_data || *(int *)cb_data == DECORATE_SHORT_REFS)
*extra_headers_p = extra_headers;
}
+static void show_sig_lines(struct rev_info *opt, int status, const char *bol)
+{
+ const char *color, *reset, *eol;
+
+ color = diff_get_color_opt(&opt->diffopt,
+ status ? DIFF_WHITESPACE : DIFF_FRAGINFO);
+ reset = diff_get_color_opt(&opt->diffopt, DIFF_RESET);
+ while (*bol) {
+ eol = strchrnul(bol, '\n');
+ printf("%s%.*s%s%s", color, (int)(eol - bol), bol, reset,
+ *eol ? "\n" : "");
+ bol = (*eol) ? (eol + 1) : eol;
+ }
+}
+
+static void show_signature(struct rev_info *opt, struct commit *commit)
+{
+ struct strbuf payload = STRBUF_INIT;
+ struct strbuf signature = STRBUF_INIT;
+ struct strbuf gpg_output = STRBUF_INIT;
+ int status;
+
+ if (parse_signed_commit(commit->object.sha1, &payload, &signature) <= 0)
+ goto out;
+
+ status = verify_signed_buffer(payload.buf, payload.len,
+ signature.buf, signature.len,
+ &gpg_output);
+ if (status && !gpg_output.len)
+ strbuf_addstr(&gpg_output, "No signature\n");
+
+ show_sig_lines(opt, status, gpg_output.buf);
+
+ out:
+ strbuf_release(&gpg_output);
+ strbuf_release(&payload);
+ strbuf_release(&signature);
+}
+
+static int which_parent(const unsigned char *sha1, const struct commit *commit)
+{
+ int nth;
+ const struct commit_list *parent;
+
+ for (nth = 0, parent = commit->parents; parent; parent = parent->next) {
+ if (!hashcmp(parent->item->object.sha1, sha1))
+ return nth;
+ nth++;
+ }
+ return -1;
+}
+
+static int is_common_merge(const struct commit *commit)
+{
+ return (commit->parents
+ && commit->parents->next
+ && !commit->parents->next->next);
+}
+
+static void show_one_mergetag(struct rev_info *opt,
+ struct commit_extra_header *extra,
+ struct commit *commit)
+{
+ unsigned char sha1[20];
+ struct tag *tag;
+ struct strbuf verify_message;
+ int status, nth;
+ size_t payload_size, gpg_message_offset;
+
+ hash_sha1_file(extra->value, extra->len, typename(OBJ_TAG), sha1);
+ tag = lookup_tag(sha1);
+ if (!tag)
+ return; /* error message already given */
+
+ strbuf_init(&verify_message, 256);
+ if (parse_tag_buffer(tag, extra->value, extra->len))
+ strbuf_addstr(&verify_message, "malformed mergetag\n");
+ else if (is_common_merge(commit) &&
+ !hashcmp(tag->tagged->sha1,
+ commit->parents->next->item->object.sha1))
+ strbuf_addf(&verify_message,
+ "merged tag '%s'\n", tag->tag);
+ else if ((nth = which_parent(tag->tagged->sha1, commit)) < 0)
+ strbuf_addf(&verify_message, "tag %s names a non-parent %s\n",
+ tag->tag, tag->tagged->sha1);
+ else
+ strbuf_addf(&verify_message,
+ "parent #%d, tagged '%s'\n", nth + 1, tag->tag);
+ gpg_message_offset = verify_message.len;
+
+ payload_size = parse_signature(extra->value, extra->len);
+ if ((extra->len <= payload_size) ||
+ (verify_signed_buffer(extra->value, payload_size,
+ extra->value + payload_size,
+ extra->len - payload_size,
+ &verify_message) &&
+ verify_message.len <= gpg_message_offset)) {
+ strbuf_addstr(&verify_message, "No signature\n");
+ status = -1;
+ }
+ else if (strstr(verify_message.buf + gpg_message_offset,
+ ": Good signature from "))
+ status = 0;
+ else
+ status = -1;
+
+ show_sig_lines(opt, status, verify_message.buf);
+ strbuf_release(&verify_message);
+}
+
+static void show_mergetag(struct rev_info *opt, struct commit *commit)
+{
+ struct commit_extra_header *extra, *to_free;
+
+ to_free = read_commit_extra_headers(commit, NULL);
+ for (extra = to_free; extra; extra = extra->next) {
+ if (strcmp(extra->key, "mergetag"))
+ continue; /* not a merge tag */
+ show_one_mergetag(opt, extra, commit);
+ }
+ free_commit_extra_headers(to_free);
+}
+
void show_log(struct rev_info *opt)
{
struct strbuf msgbuf = STRBUF_INIT;
}
}
+ if (opt->show_signature) {
+ show_signature(opt, commit);
+ show_mergetag(opt, commit);
+ }
+
if (!commit->buffer)
return;
static int do_diff_combined(struct rev_info *opt, struct commit *commit)
{
- unsigned const char *sha1 = commit->object.sha1;
-
- diff_tree_combined_merge(sha1, opt->dense_combined_merges, opt);
+ diff_tree_combined_merge(commit, opt->dense_combined_merges, opt);
return !opt->loginfo;
}
{
unsigned char tree_sha1[20];
unsigned char commit_sha1[20];
+ struct strbuf msg = STRBUF_INIT;
if (!c || !c->tree.initialized || !c->tree.ref || !*c->tree.ref)
return -1;
if (write_notes_tree(&c->tree, tree_sha1))
return -1;
- if (commit_tree(c->validity, tree_sha1, NULL, commit_sha1, NULL) < 0)
+ strbuf_attach(&msg, c->validity,
+ strlen(c->validity), strlen(c->validity) + 1);
+ if (commit_tree(&msg, tree_sha1, NULL, commit_sha1, NULL, NULL) < 0)
return -1;
if (update_ref("update notes cache", c->tree.ref, commit_sha1, NULL,
0, QUIET_ON_ERR) < 0)
}
void create_notes_commit(struct notes_tree *t, struct commit_list *parents,
- const char *msg, unsigned char *result_sha1)
+ const struct strbuf *msg, unsigned char *result_sha1)
{
unsigned char tree_sha1[20];
/* else: t->ref points to nothing, assume root/orphan commit */
}
- if (commit_tree(msg, tree_sha1, parents, result_sha1, NULL))
+ if (commit_tree(msg, tree_sha1, parents, result_sha1, NULL, NULL))
die("Failed to commit notes tree to database");
}
struct commit_list *parents = NULL;
commit_list_insert(remote, &parents); /* LIFO order */
commit_list_insert(local, &parents);
- create_notes_commit(local_tree, parents, o->commit_msg.buf,
+ create_notes_commit(local_tree, parents, &o->commit_msg,
result_sha1);
}
struct dir_struct dir;
char *path = xstrdup(git_path(NOTES_MERGE_WORKTREE "/"));
int path_len = strlen(path), i;
- const char *msg = strstr(partial_commit->buffer, "\n\n");
+ char *msg = strstr(partial_commit->buffer, "\n\n");
+ struct strbuf sb_msg = STRBUF_INIT;
if (o->verbosity >= 3)
printf("Committing notes in notes merge worktree at %.*s\n",
sha1_to_hex(obj_sha1), sha1_to_hex(blob_sha1));
}
- create_notes_commit(partial_tree, partial_commit->parents, msg,
+ strbuf_attach(&sb_msg, msg, strlen(msg), strlen(msg) + 1);
+ create_notes_commit(partial_tree, partial_commit->parents, &sb_msg,
result_sha1);
if (o->verbosity >= 4)
printf("Finalized notes merge commit: %s\n",
* The resulting commit SHA1 is stored in result_sha1.
*/
void create_notes_commit(struct notes_tree *t, struct commit_list *parents,
- const char *msg, unsigned char *result_sha1);
+ const struct strbuf *msg, unsigned char *result_sha1);
/*
* Merge notes from o->remote_ref into o->local_ref
enum object_type type;
int eaten;
const unsigned char *repl = lookup_replace_object(sha1);
- void *buffer = read_sha1_file(sha1, &type, &size);
+ void *buffer;
+ struct object *obj;
+
+ obj = lookup_object(sha1);
+ if (obj && obj->parsed)
+ return obj;
+ buffer = read_sha1_file(sha1, &type, &size);
if (buffer) {
- struct object *obj;
if (check_sha1_signature(repl, buffer, size, typename(type)) < 0) {
free(buffer);
error("sha1 mismatch %s\n", sha1_to_hex(repl));
packed.fd = -1;
if (commit_lock_file(&packed) < 0)
die_errno("unable to overwrite old ref-pack file");
- if (cbdata.flags & PACK_REFS_PRUNE)
- prune_refs(cbdata.ref_to_prune);
+ prune_refs(cbdata.ref_to_prune);
return 0;
}
f = sha1fd_check(index_name);
} else {
if (!index_name) {
- static char tmpfile[PATH_MAX];
- fd = odb_mkstemp(tmpfile, sizeof(tmpfile), "pack/tmp_idx_XXXXXX");
- index_name = xstrdup(tmpfile);
+ static char tmp_file[PATH_MAX];
+ fd = odb_mkstemp(tmp_file, sizeof(tmp_file), "pack/tmp_idx_XXXXXX");
+ index_name = xstrdup(tmp_file);
} else {
unlink(index_name);
fd = open(index_name, O_CREAT|O_EXCL|O_WRONLY, 0600);
perl.mak
perl.mak.old
+MYMETA.json
MYMETA.yml
blib
blibdirs
#include "notes.h"
#include "color.h"
#include "reflog-walk.h"
+#include "gpg-interface.h"
static char *user_format;
static struct cmt_fmt_map {
const struct pretty_print_context *pretty_ctx;
unsigned commit_header_parsed:1;
unsigned commit_message_parsed:1;
+ unsigned commit_signature_parsed:1;
+ struct {
+ char *gpg_output;
+ char good_bad;
+ char *signer;
+ } signature;
char *message;
size_t width, indent1, indent2;
c->indent2 = new_indent2;
}
+static struct {
+ char result;
+ const char *check;
+} signature_check[] = {
+ { 'G', ": Good signature from " },
+ { 'B', ": BAD signature from " },
+};
+
+static void parse_signature_lines(struct format_commit_context *ctx)
+{
+ const char *buf = ctx->signature.gpg_output;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(signature_check); i++) {
+ const char *found = strstr(buf, signature_check[i].check);
+ const char *next;
+ if (!found)
+ continue;
+ ctx->signature.good_bad = signature_check[i].result;
+ found += strlen(signature_check[i].check);
+ next = strchrnul(found, '\n');
+ ctx->signature.signer = xmemdupz(found, next - found);
+ break;
+ }
+}
+
+static void parse_commit_signature(struct format_commit_context *ctx)
+{
+ struct strbuf payload = STRBUF_INIT;
+ struct strbuf signature = STRBUF_INIT;
+ struct strbuf gpg_output = STRBUF_INIT;
+ int status;
+
+ ctx->commit_signature_parsed = 1;
+
+ if (parse_signed_commit(ctx->commit->object.sha1,
+ &payload, &signature) <= 0)
+ goto out;
+ status = verify_signed_buffer(payload.buf, payload.len,
+ signature.buf, signature.len,
+ &gpg_output);
+ if (status && !gpg_output.len)
+ goto out;
+ ctx->signature.gpg_output = strbuf_detach(&gpg_output, NULL);
+ parse_signature_lines(ctx);
+
+ out:
+ strbuf_release(&gpg_output);
+ strbuf_release(&payload);
+ strbuf_release(&signature);
+}
+
+
+static int format_reflog_person(struct strbuf *sb,
+ char part,
+ struct reflog_walk_info *log,
+ enum date_mode dmode)
+{
+ const char *ident;
+
+ if (!log)
+ return 2;
+
+ ident = get_reflog_ident(log);
+ if (!ident)
+ return 2;
+
+ return format_person_part(sb, part, ident, strlen(ident), dmode);
+}
+
static size_t format_commit_one(struct strbuf *sb, const char *placeholder,
void *context)
{
if (c->pretty_ctx->reflog_info)
get_reflog_message(sb, c->pretty_ctx->reflog_info);
return 2;
+ case 'n':
+ case 'N':
+ case 'e':
+ case 'E':
+ return format_reflog_person(sb,
+ placeholder[1],
+ c->pretty_ctx->reflog_info,
+ c->pretty_ctx->date_mode);
}
return 0; /* unknown %g placeholder */
case 'N':
return 0;
}
+ if (placeholder[0] == 'G') {
+ if (!c->commit_signature_parsed)
+ parse_commit_signature(c);
+ switch (placeholder[1]) {
+ case 'G':
+ if (c->signature.gpg_output)
+ strbuf_addstr(sb, c->signature.gpg_output);
+ break;
+ case '?':
+ switch (c->signature.good_bad) {
+ case 'G':
+ case 'B':
+ strbuf_addch(sb, c->signature.good_bad);
+ }
+ break;
+ case 'S':
+ if (c->signature.signer)
+ strbuf_addstr(sb, c->signature.signer);
+ break;
+ }
+ return 2;
+ }
+
+
/* For the rest we have to parse the commit header. */
if (!c->commit_header_parsed)
parse_commit_header(c);
if (context.message != commit->buffer)
free(context.message);
+ free(context.signature.gpg_output);
+ free(context.signature.signer);
}
static void pp_header(const struct pretty_print_context *pp,
--- /dev/null
+#include "cache.h"
+#include "run-command.h"
+#include "strbuf.h"
+#include "prompt.h"
+#include "compat/terminal.h"
+
+static char *do_askpass(const char *cmd, const char *prompt)
+{
+ struct child_process pass;
+ const char *args[3];
+ static struct strbuf buffer = STRBUF_INIT;
+
+ args[0] = cmd;
+ args[1] = prompt;
+ args[2] = NULL;
+
+ memset(&pass, 0, sizeof(pass));
+ pass.argv = args;
+ pass.out = -1;
+
+ if (start_command(&pass))
+ exit(1);
+
+ strbuf_reset(&buffer);
+ if (strbuf_read(&buffer, pass.out, 20) < 0)
+ die("failed to get '%s' from %s\n", prompt, cmd);
+
+ close(pass.out);
+
+ if (finish_command(&pass))
+ exit(1);
+
+ strbuf_setlen(&buffer, strcspn(buffer.buf, "\r\n"));
+
+ return buffer.buf;
+}
+
+char *git_prompt(const char *prompt, int flags)
+{
+ char *r;
+
+ if (flags & PROMPT_ASKPASS) {
+ const char *askpass;
+
+ askpass = getenv("GIT_ASKPASS");
+ if (!askpass)
+ askpass = askpass_program;
+ if (!askpass)
+ askpass = getenv("SSH_ASKPASS");
+ if (askpass && *askpass)
+ return do_askpass(askpass, prompt);
+ }
+
+ r = git_terminal_prompt(prompt, flags & PROMPT_ECHO);
+ if (!r)
+ die_errno("could not read '%s'", prompt);
+ return r;
+}
+
+char *git_getpass(const char *prompt)
+{
+ return git_prompt(prompt, PROMPT_ASKPASS);
+}
--- /dev/null
+#ifndef PROMPT_H
+#define PROMPT_H
+
+#define PROMPT_ASKPASS (1<<0)
+#define PROMPT_ECHO (1<<1)
+
+char *git_prompt(const char *prompt, int flags);
+char *git_getpass(const char *prompt);
+
+#endif /* PROMPT_H */
strbuf_add(sb, info->message, len);
}
+const char *get_reflog_ident(struct reflog_walk_info *reflog_info)
+{
+ struct commit_reflog *commit_reflog = reflog_info->last_commit_reflog;
+ struct reflog_info *info;
+
+ if (!commit_reflog)
+ return NULL;
+
+ info = &commit_reflog->reflogs->items[commit_reflog->recno+1];
+ return info->email;
+}
+
void show_reflog_message(struct reflog_walk_info *reflog_info, int oneline,
enum date_mode dmode)
{
enum date_mode);
extern void get_reflog_message(struct strbuf *sb,
struct reflog_walk_info *reflog_info);
+extern const char *get_reflog_ident(struct reflog_walk_info *reflog_info);
extern void get_reflog_selector(struct strbuf *sb,
struct reflog_walk_info *reflog_info,
enum date_mode dmode,
unsigned char flag; /* ISSYMREF? ISPACKED? */
unsigned char sha1[20];
unsigned char peeled[20];
+ /* The full name of the reference (e.g., "refs/heads/master"): */
char name[FLEX_ARRAY];
};
struct ref_array {
int nr, alloc;
+
+ /*
+ * Entries with index 0 <= i < sorted are sorted by name. New
+ * entries are appended to the list unsorted, and are sorted
+ * only when required; thus we avoid the need to sort the list
+ * after the addition of every reference.
+ */
+ int sorted;
+
struct ref_entry **refs;
};
+/*
+ * Parse one line from a packed-refs file. Write the SHA1 to sha1.
+ * Return a pointer to the refname within the line (null-terminated),
+ * or NULL if there was a problem.
+ */
static const char *parse_ref_line(char *line, unsigned char *sha1)
{
/*
return line;
}
-static void add_ref(const char *name, const unsigned char *sha1,
- int flag, int check_name, struct ref_array *refs,
- struct ref_entry **new_entry)
+static struct ref_entry *create_ref_entry(const char *refname,
+ const unsigned char *sha1, int flag,
+ int check_name)
{
int len;
- struct ref_entry *entry;
+ struct ref_entry *ref;
- /* Allocate it and add it in.. */
- len = strlen(name) + 1;
- entry = xmalloc(sizeof(struct ref_entry) + len);
- hashcpy(entry->sha1, sha1);
- hashclr(entry->peeled);
if (check_name &&
- check_refname_format(name, REFNAME_ALLOW_ONELEVEL|REFNAME_DOT_COMPONENT))
- die("Reference has invalid format: '%s'", name);
- memcpy(entry->name, name, len);
- entry->flag = flag;
- if (new_entry)
- *new_entry = entry;
+ check_refname_format(refname, REFNAME_ALLOW_ONELEVEL|REFNAME_DOT_COMPONENT))
+ die("Reference has invalid format: '%s'", refname);
+ len = strlen(refname) + 1;
+ ref = xmalloc(sizeof(struct ref_entry) + len);
+ hashcpy(ref->sha1, sha1);
+ hashclr(ref->peeled);
+ memcpy(ref->name, refname, len);
+ ref->flag = flag;
+ return ref;
+}
+
+/* Add a ref_entry to the end of the ref_array (unsorted). */
+static void add_ref(struct ref_array *refs, struct ref_entry *ref)
+{
ALLOC_GROW(refs->refs, refs->nr + 1, refs->alloc);
- refs->refs[refs->nr++] = entry;
+ refs->refs[refs->nr++] = ref;
}
static int ref_entry_cmp(const void *a, const void *b)
return strcmp(one->name, two->name);
}
+/*
+ * Emit a warning and return true iff ref1 and ref2 have the same name
+ * and the same sha1. Die if they have the same name but different
+ * sha1s.
+ */
+static int is_dup_ref(const struct ref_entry *ref1, const struct ref_entry *ref2)
+{
+ if (!strcmp(ref1->name, ref2->name)) {
+ /* Duplicate name; make sure that the SHA1s match: */
+ if (hashcmp(ref1->sha1, ref2->sha1))
+ die("Duplicated ref, and SHA1s don't match: %s",
+ ref1->name);
+ warning("Duplicated ref: %s", ref1->name);
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+/*
+ * Sort the entries in array (if they are not already sorted).
+ */
static void sort_ref_array(struct ref_array *array)
{
- int i = 0, j = 1;
+ int i, j;
- /* Nothing to sort unless there are at least two entries */
- if (array->nr < 2)
+ /*
+ * This check also prevents passing a zero-length array to qsort(),
+ * which is a problem on some platforms.
+ */
+ if (array->sorted == array->nr)
return;
qsort(array->refs, array->nr, sizeof(*array->refs), ref_entry_cmp);
/* Remove any duplicates from the ref_array */
- for (; j < array->nr; j++) {
- struct ref_entry *a = array->refs[i];
- struct ref_entry *b = array->refs[j];
- if (!strcmp(a->name, b->name)) {
- if (hashcmp(a->sha1, b->sha1))
- die("Duplicated ref, and SHA1s don't match: %s",
- a->name);
- warning("Duplicated ref: %s", a->name);
- free(b);
+ i = 0;
+ for (j = 1; j < array->nr; j++) {
+ if (is_dup_ref(array->refs[i], array->refs[j])) {
+ free(array->refs[j]);
continue;
}
- i++;
- array->refs[i] = array->refs[j];
+ array->refs[++i] = array->refs[j];
}
- array->nr = i + 1;
+ array->sorted = array->nr = i + 1;
}
-static struct ref_entry *search_ref_array(struct ref_array *array, const char *name)
+static struct ref_entry *search_ref_array(struct ref_array *array, const char *refname)
{
struct ref_entry *e, **r;
int len;
- if (name == NULL)
+ if (refname == NULL)
return NULL;
if (!array->nr)
return NULL;
-
- len = strlen(name) + 1;
+ sort_ref_array(array);
+ len = strlen(refname) + 1;
e = xmalloc(sizeof(struct ref_entry) + len);
- memcpy(e->name, name, len);
+ memcpy(e->name, refname, len);
r = bsearch(&e, array->refs, array->nr, sizeof(*array->refs), ref_entry_cmp);
static struct ref_entry *current_ref;
+/*
+ * Never call sort_ref_array() on the extra_refs, because it is
+ * allowed to contain entries with duplicate names.
+ */
static struct ref_array extra_refs;
-static void free_ref_array(struct ref_array *array)
+static void clear_ref_array(struct ref_array *array)
{
int i;
for (i = 0; i < array->nr; i++)
free(array->refs[i]);
free(array->refs);
- array->nr = array->alloc = 0;
+ array->sorted = array->nr = array->alloc = 0;
array->refs = NULL;
}
static void clear_packed_ref_cache(struct ref_cache *refs)
{
if (refs->did_packed)
- free_ref_array(&refs->packed);
+ clear_ref_array(&refs->packed);
refs->did_packed = 0;
}
static void clear_loose_ref_cache(struct ref_cache *refs)
{
if (refs->did_loose)
- free_ref_array(&refs->loose);
+ clear_ref_array(&refs->loose);
refs->did_loose = 0;
}
while (fgets(refline, sizeof(refline), f)) {
unsigned char sha1[20];
- const char *name;
+ const char *refname;
static const char header[] = "# pack-refs with:";
if (!strncmp(refline, header, sizeof(header)-1)) {
continue;
}
- name = parse_ref_line(refline, sha1);
- if (name) {
- add_ref(name, sha1, flag, 1, array, &last);
+ refname = parse_ref_line(refline, sha1);
+ if (refname) {
+ last = create_ref_entry(refname, sha1, flag, 1);
+ add_ref(array, last);
continue;
}
if (last &&
!get_sha1_hex(refline + 1, sha1))
hashcpy(last->peeled, sha1);
}
- sort_ref_array(array);
}
-void add_extra_ref(const char *name, const unsigned char *sha1, int flag)
+void add_extra_ref(const char *refname, const unsigned char *sha1, int flag)
{
- add_ref(name, sha1, flag, 0, &extra_refs, NULL);
+ add_ref(&extra_refs, create_ref_entry(refname, sha1, flag, 0));
}
void clear_extra_refs(void)
{
- free_ref_array(&extra_refs);
+ clear_ref_array(&extra_refs);
}
-static struct ref_array *get_packed_refs(const char *submodule)
+static struct ref_array *get_packed_refs(struct ref_cache *refs)
{
- struct ref_cache *refs = get_ref_cache(submodule);
-
if (!refs->did_packed) {
const char *packed_refs_file;
FILE *f;
- if (submodule)
- packed_refs_file = git_path_submodule(submodule, "packed-refs");
+ if (*refs->name)
+ packed_refs_file = git_path_submodule(refs->name, "packed-refs");
else
packed_refs_file = git_path("packed-refs");
f = fopen(packed_refs_file, "r");
return &refs->packed;
}
-static void get_ref_dir(const char *submodule, const char *base,
+void add_packed_ref(const char *refname, const unsigned char *sha1)
+{
+ add_ref(get_packed_refs(get_ref_cache(NULL)),
+ create_ref_entry(refname, sha1, REF_ISPACKED, 1));
+}
+
+static void get_ref_dir(struct ref_cache *refs, const char *base,
struct ref_array *array)
{
DIR *dir;
const char *path;
- if (submodule)
- path = git_path_submodule(submodule, "%s", base);
+ if (*refs->name)
+ path = git_path_submodule(refs->name, "%s", base);
else
path = git_path("%s", base);
if (dir) {
struct dirent *de;
int baselen = strlen(base);
- char *ref = xmalloc(baselen + 257);
+ char *refname = xmalloc(baselen + 257);
- memcpy(ref, base, baselen);
+ memcpy(refname, base, baselen);
if (baselen && base[baselen-1] != '/')
- ref[baselen++] = '/';
+ refname[baselen++] = '/';
while ((de = readdir(dir)) != NULL) {
unsigned char sha1[20];
continue;
if (has_extension(de->d_name, ".lock"))
continue;
- memcpy(ref + baselen, de->d_name, namelen+1);
- refdir = submodule
- ? git_path_submodule(submodule, "%s", ref)
- : git_path("%s", ref);
+ memcpy(refname + baselen, de->d_name, namelen+1);
+ refdir = *refs->name
+ ? git_path_submodule(refs->name, "%s", refname)
+ : git_path("%s", refname);
if (stat(refdir, &st) < 0)
continue;
if (S_ISDIR(st.st_mode)) {
- get_ref_dir(submodule, ref, array);
+ get_ref_dir(refs, refname, array);
continue;
}
- if (submodule) {
+ if (*refs->name) {
hashclr(sha1);
flag = 0;
- if (resolve_gitlink_ref(submodule, ref, sha1) < 0) {
+ if (resolve_gitlink_ref(refs->name, refname, sha1) < 0) {
hashclr(sha1);
flag |= REF_ISBROKEN;
}
- } else if (read_ref_full(ref, sha1, 1, &flag)) {
+ } else if (read_ref_full(refname, sha1, 1, &flag)) {
hashclr(sha1);
flag |= REF_ISBROKEN;
}
- add_ref(ref, sha1, flag, 1, array, NULL);
+ add_ref(array, create_ref_entry(refname, sha1, flag, 1));
}
- free(ref);
+ free(refname);
closedir(dir);
}
}
for_each_rawref(warn_if_dangling_symref, &data);
}
-static struct ref_array *get_loose_refs(const char *submodule)
+static struct ref_array *get_loose_refs(struct ref_cache *refs)
{
- struct ref_cache *refs = get_ref_cache(submodule);
-
if (!refs->did_loose) {
- get_ref_dir(submodule, "refs", &refs->loose);
- sort_ref_array(&refs->loose);
+ get_ref_dir(refs, "refs", &refs->loose);
refs->did_loose = 1;
}
return &refs->loose;
/*
* Called by resolve_gitlink_ref_recursive() after it failed to read
- * from "name", which is "module/.git/<refname>". Find <refname> in
- * the packed-refs file for the submodule.
+ * from the loose refs in ref_cache refs. Find <refname> in the
+ * packed-refs file for the submodule.
*/
-static int resolve_gitlink_packed_ref(char *name, int pathlen, const char *refname, unsigned char *result)
+static int resolve_gitlink_packed_ref(struct ref_cache *refs,
+ const char *refname, unsigned char *sha1)
{
- int retval = -1;
struct ref_entry *ref;
- struct ref_array *array;
+ struct ref_array *array = get_packed_refs(refs);
- /* being defensive: resolve_gitlink_ref() did this for us */
- if (pathlen < 6 || memcmp(name + pathlen - 6, "/.git/", 6))
- die("Oops");
- name[pathlen - 6] = '\0'; /* make it path to the submodule */
- array = get_packed_refs(name);
ref = search_ref_array(array, refname);
- if (ref != NULL) {
- memcpy(result, ref->sha1, 20);
- retval = 0;
- }
- return retval;
+ if (ref == NULL)
+ return -1;
+
+ memcpy(sha1, ref->sha1, 20);
+ return 0;
}
-static int resolve_gitlink_ref_recursive(char *name, int pathlen, const char *refname, unsigned char *result, int recursion)
+static int resolve_gitlink_ref_recursive(struct ref_cache *refs,
+ const char *refname, unsigned char *sha1,
+ int recursion)
{
- int fd, len = strlen(refname);
+ int fd, len;
char buffer[128], *p;
+ char *path;
- if (recursion > MAXDEPTH || len > MAXREFLEN)
+ if (recursion > MAXDEPTH || strlen(refname) > MAXREFLEN)
return -1;
- memcpy(name + pathlen, refname, len+1);
- fd = open(name, O_RDONLY);
+ path = *refs->name
+ ? git_path_submodule(refs->name, "%s", refname)
+ : git_path("%s", refname);
+ fd = open(path, O_RDONLY);
if (fd < 0)
- return resolve_gitlink_packed_ref(name, pathlen, refname, result);
+ return resolve_gitlink_packed_ref(refs, refname, sha1);
len = read(fd, buffer, sizeof(buffer)-1);
close(fd);
buffer[len] = 0;
/* Was it a detached head or an old-fashioned symlink? */
- if (!get_sha1_hex(buffer, result))
+ if (!get_sha1_hex(buffer, sha1))
return 0;
/* Symref? */
while (isspace(*p))
p++;
- return resolve_gitlink_ref_recursive(name, pathlen, p, result, recursion+1);
+ return resolve_gitlink_ref_recursive(refs, p, sha1, recursion+1);
}
-int resolve_gitlink_ref(const char *path, const char *refname, unsigned char *result)
+int resolve_gitlink_ref(const char *path, const char *refname, unsigned char *sha1)
{
int len = strlen(path), retval;
- char *gitdir;
- const char *tmp;
+ char *submodule;
+ struct ref_cache *refs;
while (len && path[len-1] == '/')
len--;
if (!len)
return -1;
- gitdir = xmalloc(len + MAXREFLEN + 8);
- memcpy(gitdir, path, len);
- memcpy(gitdir + len, "/.git", 6);
- len += 5;
-
- tmp = read_gitfile(gitdir);
- if (tmp) {
- free(gitdir);
- len = strlen(tmp);
- gitdir = xmalloc(len + MAXREFLEN + 3);
- memcpy(gitdir, tmp, len);
- }
- gitdir[len] = '/';
- gitdir[++len] = '\0';
- retval = resolve_gitlink_ref_recursive(gitdir, len, refname, result, 0);
- free(gitdir);
+ submodule = xstrndup(path, len);
+ refs = get_ref_cache(submodule);
+ free(submodule);
+
+ retval = resolve_gitlink_ref_recursive(refs, refname, sha1, 0);
return retval;
}
* Try to read ref from the packed references. On success, set sha1
* and return 0; otherwise, return -1.
*/
-static int get_packed_ref(const char *ref, unsigned char *sha1)
+static int get_packed_ref(const char *refname, unsigned char *sha1)
{
- struct ref_array *packed = get_packed_refs(NULL);
- struct ref_entry *entry = search_ref_array(packed, ref);
+ struct ref_array *packed = get_packed_refs(get_ref_cache(NULL));
+ struct ref_entry *entry = search_ref_array(packed, refname);
if (entry) {
hashcpy(sha1, entry->sha1);
return 0;
return -1;
}
-const char *resolve_ref_unsafe(const char *ref, unsigned char *sha1, int reading, int *flag)
+const char *resolve_ref_unsafe(const char *refname, unsigned char *sha1, int reading, int *flag)
{
int depth = MAXDEPTH;
ssize_t len;
char buffer[256];
- static char ref_buffer[256];
+ static char refname_buffer[256];
if (flag)
*flag = 0;
- if (check_refname_format(ref, REFNAME_ALLOW_ONELEVEL))
+ if (check_refname_format(refname, REFNAME_ALLOW_ONELEVEL))
return NULL;
for (;;) {
if (--depth < 0)
return NULL;
- git_snpath(path, sizeof(path), "%s", ref);
+ git_snpath(path, sizeof(path), "%s", refname);
if (lstat(path, &st) < 0) {
if (errno != ENOENT)
* The loose reference file does not exist;
* check for a packed reference.
*/
- if (!get_packed_ref(ref, sha1)) {
+ if (!get_packed_ref(refname, sha1)) {
if (flag)
*flag |= REF_ISPACKED;
- return ref;
+ return refname;
}
/* The reference is not a packed reference, either. */
if (reading) {
return NULL;
} else {
hashclr(sha1);
- return ref;
+ return refname;
}
}
buffer[len] = 0;
if (!prefixcmp(buffer, "refs/") &&
!check_refname_format(buffer, 0)) {
- strcpy(ref_buffer, buffer);
- ref = ref_buffer;
+ strcpy(refname_buffer, buffer);
+ refname = refname_buffer;
if (flag)
*flag |= REF_ISSYMREF;
continue;
*flag |= REF_ISBROKEN;
return NULL;
}
- ref = strcpy(ref_buffer, buf);
+ refname = strcpy(refname_buffer, buf);
}
/* Please note that FETCH_HEAD has a second line containing other data. */
if (get_sha1_hex(buffer, sha1) || (buffer[40] != '\0' && !isspace(buffer[40]))) {
*flag |= REF_ISBROKEN;
return NULL;
}
- return ref;
+ return refname;
}
char *resolve_refdup(const char *ref, unsigned char *sha1, int reading, int *flag)
void *cb_data;
};
-int read_ref_full(const char *ref, unsigned char *sha1, int reading, int *flags)
+int read_ref_full(const char *refname, unsigned char *sha1, int reading, int *flags)
{
- if (resolve_ref_unsafe(ref, sha1, reading, flags))
+ if (resolve_ref_unsafe(refname, sha1, reading, flags))
return 0;
return -1;
}
-int read_ref(const char *ref, unsigned char *sha1)
+int read_ref(const char *refname, unsigned char *sha1)
{
- return read_ref_full(ref, sha1, 1, NULL);
+ return read_ref_full(refname, sha1, 1, NULL);
}
#define DO_FOR_EACH_INCLUDE_BROKEN 01
return fn(entry->name + trim, entry->sha1, entry->flag, cb_data);
}
-static int filter_refs(const char *ref, const unsigned char *sha, int flags,
- void *data)
+static int filter_refs(const char *refname, const unsigned char *sha1, int flags,
+ void *data)
{
struct ref_filter *filter = (struct ref_filter *)data;
- if (fnmatch(filter->pattern, ref, 0))
+ if (fnmatch(filter->pattern, refname, 0))
return 0;
- return filter->fn(ref, sha, flags, filter->cb_data);
+ return filter->fn(refname, sha1, flags, filter->cb_data);
}
-int peel_ref(const char *ref, unsigned char *sha1)
+int peel_ref(const char *refname, unsigned char *sha1)
{
int flag;
unsigned char base[20];
struct object *o;
- if (current_ref && (current_ref->name == ref
- || !strcmp(current_ref->name, ref))) {
+ if (current_ref && (current_ref->name == refname
+ || !strcmp(current_ref->name, refname))) {
if (current_ref->flag & REF_KNOWS_PEELED) {
hashcpy(sha1, current_ref->peeled);
return 0;
goto fallback;
}
- if (read_ref_full(ref, base, 1, &flag))
+ if (read_ref_full(refname, base, 1, &flag))
return -1;
if ((flag & REF_ISPACKED)) {
- struct ref_array *array = get_packed_refs(NULL);
- struct ref_entry *r = search_ref_array(array, ref);
+ struct ref_array *array = get_packed_refs(get_ref_cache(NULL));
+ struct ref_entry *r = search_ref_array(array, refname);
if (r != NULL && r->flag & REF_KNOWS_PEELED) {
hashcpy(sha1, r->peeled);
fallback:
o = parse_object(base);
if (o && o->type == OBJ_TAG) {
- o = deref_tag(o, ref, 0);
+ o = deref_tag(o, refname, 0);
if (o) {
hashcpy(sha1, o->sha1);
return 0;
int trim, int flags, void *cb_data)
{
int retval = 0, i, p = 0, l = 0;
- struct ref_array *packed = get_packed_refs(submodule);
- struct ref_array *loose = get_loose_refs(submodule);
+ struct ref_cache *refs = get_ref_cache(submodule);
+ struct ref_array *packed = get_packed_refs(refs);
+ struct ref_array *loose = get_loose_refs(refs);
struct ref_array *extra = &extra_refs;
for (i = 0; i < extra->nr; i++)
retval = do_one_ref(base, fn, trim, flags, cb_data, extra->refs[i]);
+ sort_ref_array(packed);
+ sort_ref_array(loose);
while (p < packed->nr && l < loose->nr) {
struct ref_entry *entry;
int cmp = strcmp(packed->refs[p]->name, loose->refs[l]->name);
}
/*
- * Try to read one refname component from the front of ref. Return
+ * Try to read one refname component from the front of refname. Return
* the length of the component found, or -1 if the component is not
* legal.
*/
-static int check_refname_component(const char *ref, int flags)
+static int check_refname_component(const char *refname, int flags)
{
const char *cp;
char last = '\0';
- for (cp = ref; ; cp++) {
+ for (cp = refname; ; cp++) {
char ch = *cp;
if (ch == '\0' || ch == '/')
break;
return -1; /* Refname contains "@{". */
last = ch;
}
- if (cp == ref)
+ if (cp == refname)
return -1; /* Component has zero length. */
- if (ref[0] == '.') {
+ if (refname[0] == '.') {
if (!(flags & REFNAME_DOT_COMPONENT))
return -1; /* Component starts with '.'. */
/*
* Even if leading dots are allowed, don't allow "."
* as a component (".." is prevented by a rule above).
*/
- if (ref[1] == '\0')
+ if (refname[1] == '\0')
return -1; /* Component equals ".". */
}
- if (cp - ref >= 5 && !memcmp(cp - 5, ".lock", 5))
+ if (cp - refname >= 5 && !memcmp(cp - 5, ".lock", 5))
return -1; /* Refname ends with ".lock". */
- return cp - ref;
+ return cp - refname;
}
-int check_refname_format(const char *ref, int flags)
+int check_refname_format(const char *refname, int flags)
{
int component_len, component_count = 0;
while (1) {
/* We are at the start of a path component. */
- component_len = check_refname_component(ref, flags);
+ component_len = check_refname_component(refname, flags);
if (component_len < 0) {
if ((flags & REFNAME_REFSPEC_PATTERN) &&
- ref[0] == '*' &&
- (ref[1] == '\0' || ref[1] == '/')) {
+ refname[0] == '*' &&
+ (refname[1] == '\0' || refname[1] == '/')) {
/* Accept one wildcard as a full refname component. */
flags &= ~REFNAME_REFSPEC_PATTERN;
component_len = 1;
}
}
component_count++;
- if (ref[component_len] == '\0')
+ if (refname[component_len] == '\0')
break;
/* Skip to next component. */
- ref += component_len + 1;
+ refname += component_len + 1;
}
- if (ref[component_len - 1] == '.')
+ if (refname[component_len - 1] == '.')
return -1; /* Refname ends with '.'. */
if (!(flags & REFNAME_ALLOW_ONELEVEL) && component_count < 2)
return -1; /* Refname has only one component. */
return result;
}
-static int is_refname_available(const char *ref, const char *oldref,
- struct ref_array *array, int quiet)
+/*
+ * Return true iff a reference named refname could be created without
+ * conflicting with the name of an existing reference. If oldrefname
+ * is non-NULL, ignore potential conflicts with oldrefname (e.g.,
+ * because oldrefname is scheduled for deletion in the same
+ * operation).
+ */
+static int is_refname_available(const char *refname, const char *oldrefname,
+ struct ref_array *array)
{
- int i, namlen = strlen(ref); /* e.g. 'foo/bar' */
+ int i, namlen = strlen(refname); /* e.g. 'foo/bar' */
for (i = 0; i < array->nr; i++ ) {
struct ref_entry *entry = array->refs[i];
/* entry->name could be 'foo' or 'foo/bar/baz' */
- if (!oldref || strcmp(oldref, entry->name)) {
+ if (!oldrefname || strcmp(oldrefname, entry->name)) {
int len = strlen(entry->name);
int cmplen = (namlen < len) ? namlen : len;
- const char *lead = (namlen < len) ? entry->name : ref;
- if (!strncmp(ref, entry->name, cmplen) &&
+ const char *lead = (namlen < len) ? entry->name : refname;
+ if (!strncmp(refname, entry->name, cmplen) &&
lead[cmplen] == '/') {
- if (!quiet)
- error("'%s' exists; cannot create '%s'",
- entry->name, ref);
+ error("'%s' exists; cannot create '%s'",
+ entry->name, refname);
return 0;
}
}
return logs_found;
}
-static struct ref_lock *lock_ref_sha1_basic(const char *ref, const unsigned char *old_sha1, int flags, int *type_p)
+static struct ref_lock *lock_ref_sha1_basic(const char *refname,
+ const unsigned char *old_sha1,
+ int flags, int *type_p)
{
char *ref_file;
- const char *orig_ref = ref;
+ const char *orig_refname = refname;
struct ref_lock *lock;
int last_errno = 0;
int type, lflags;
lock = xcalloc(1, sizeof(struct ref_lock));
lock->lock_fd = -1;
- ref = resolve_ref_unsafe(ref, lock->old_sha1, mustexist, &type);
- if (!ref && errno == EISDIR) {
+ refname = resolve_ref_unsafe(refname, lock->old_sha1, mustexist, &type);
+ if (!refname && errno == EISDIR) {
/* we are trying to lock foo but we used to
* have foo/bar which now does not exist;
* it is normal for the empty directory 'foo'
* to remain.
*/
- ref_file = git_path("%s", orig_ref);
+ ref_file = git_path("%s", orig_refname);
if (remove_empty_directories(ref_file)) {
last_errno = errno;
- error("there are still refs under '%s'", orig_ref);
+ error("there are still refs under '%s'", orig_refname);
goto error_return;
}
- ref = resolve_ref_unsafe(orig_ref, lock->old_sha1, mustexist, &type);
+ refname = resolve_ref_unsafe(orig_refname, lock->old_sha1, mustexist, &type);
}
if (type_p)
*type_p = type;
- if (!ref) {
+ if (!refname) {
last_errno = errno;
error("unable to resolve reference %s: %s",
- orig_ref, strerror(errno));
+ orig_refname, strerror(errno));
goto error_return;
}
missing = is_null_sha1(lock->old_sha1);
* name is a proper prefix of our refname.
*/
if (missing &&
- !is_refname_available(ref, NULL, get_packed_refs(NULL), 0)) {
+ !is_refname_available(refname, NULL, get_packed_refs(get_ref_cache(NULL)))) {
last_errno = ENOTDIR;
goto error_return;
}
lflags = LOCK_DIE_ON_ERROR;
if (flags & REF_NODEREF) {
- ref = orig_ref;
+ refname = orig_refname;
lflags |= LOCK_NODEREF;
}
- lock->ref_name = xstrdup(ref);
- lock->orig_ref_name = xstrdup(orig_ref);
- ref_file = git_path("%s", ref);
+ lock->ref_name = xstrdup(refname);
+ lock->orig_ref_name = xstrdup(orig_refname);
+ ref_file = git_path("%s", refname);
if (missing)
lock->force_write = 1;
if ((flags & REF_NODEREF) && (type & REF_ISSYMREF))
return NULL;
}
-struct ref_lock *lock_ref_sha1(const char *ref, const unsigned char *old_sha1)
+struct ref_lock *lock_ref_sha1(const char *refname, const unsigned char *old_sha1)
{
char refpath[PATH_MAX];
- if (check_refname_format(ref, 0))
+ if (check_refname_format(refname, 0))
return NULL;
- strcpy(refpath, mkpath("refs/%s", ref));
+ strcpy(refpath, mkpath("refs/%s", refname));
return lock_ref_sha1_basic(refpath, old_sha1, 0, NULL);
}
-struct ref_lock *lock_any_ref_for_update(const char *ref, const unsigned char *old_sha1, int flags)
+struct ref_lock *lock_any_ref_for_update(const char *refname,
+ const unsigned char *old_sha1, int flags)
{
- if (check_refname_format(ref, REFNAME_ALLOW_ONELEVEL))
+ if (check_refname_format(refname, REFNAME_ALLOW_ONELEVEL))
return NULL;
- return lock_ref_sha1_basic(ref, old_sha1, flags, NULL);
+ return lock_ref_sha1_basic(refname, old_sha1, flags, NULL);
}
static struct lock_file packlock;
static int repack_without_ref(const char *refname)
{
struct ref_array *packed;
- struct ref_entry *ref;
int fd, i;
- packed = get_packed_refs(NULL);
- ref = search_ref_array(packed, refname);
- if (ref == NULL)
+ packed = get_packed_refs(get_ref_cache(NULL));
+ if (search_ref_array(packed, refname) == NULL)
return 0;
fd = hold_lock_file_for_update(&packlock, git_path("packed-refs"), 0);
if (fd < 0) {
for (i = 0; i < packed->nr; i++) {
char line[PATH_MAX + 100];
int len;
-
- ref = packed->refs[i];
+ struct ref_entry *ref = packed->refs[i];
if (!strcmp(refname, ref->name))
continue;
*/
#define TMP_RENAMED_LOG "logs/refs/.tmp-renamed-log"
-int rename_ref(const char *oldref, const char *newref, const char *logmsg)
+int rename_ref(const char *oldrefname, const char *newrefname, const char *logmsg)
{
unsigned char sha1[20], orig_sha1[20];
int flag = 0, logmoved = 0;
struct ref_lock *lock;
struct stat loginfo;
- int log = !lstat(git_path("logs/%s", oldref), &loginfo);
+ int log = !lstat(git_path("logs/%s", oldrefname), &loginfo);
const char *symref = NULL;
+ struct ref_cache *refs = get_ref_cache(NULL);
if (log && S_ISLNK(loginfo.st_mode))
- return error("reflog for %s is a symlink", oldref);
+ return error("reflog for %s is a symlink", oldrefname);
- symref = resolve_ref_unsafe(oldref, orig_sha1, 1, &flag);
+ symref = resolve_ref_unsafe(oldrefname, orig_sha1, 1, &flag);
if (flag & REF_ISSYMREF)
return error("refname %s is a symbolic ref, renaming it is not supported",
- oldref);
+ oldrefname);
if (!symref)
- return error("refname %s not found", oldref);
+ return error("refname %s not found", oldrefname);
- if (!is_refname_available(newref, oldref, get_packed_refs(NULL), 0))
+ if (!is_refname_available(newrefname, oldrefname, get_packed_refs(refs)))
return 1;
- if (!is_refname_available(newref, oldref, get_loose_refs(NULL), 0))
+ if (!is_refname_available(newrefname, oldrefname, get_loose_refs(refs)))
return 1;
- if (log && rename(git_path("logs/%s", oldref), git_path(TMP_RENAMED_LOG)))
+ if (log && rename(git_path("logs/%s", oldrefname), git_path(TMP_RENAMED_LOG)))
return error("unable to move logfile logs/%s to "TMP_RENAMED_LOG": %s",
- oldref, strerror(errno));
+ oldrefname, strerror(errno));
- if (delete_ref(oldref, orig_sha1, REF_NODEREF)) {
- error("unable to delete old %s", oldref);
+ if (delete_ref(oldrefname, orig_sha1, REF_NODEREF)) {
+ error("unable to delete old %s", oldrefname);
goto rollback;
}
- if (!read_ref_full(newref, sha1, 1, &flag) &&
- delete_ref(newref, sha1, REF_NODEREF)) {
+ if (!read_ref_full(newrefname, sha1, 1, &flag) &&
+ delete_ref(newrefname, sha1, REF_NODEREF)) {
if (errno==EISDIR) {
- if (remove_empty_directories(git_path("%s", newref))) {
- error("Directory not empty: %s", newref);
+ if (remove_empty_directories(git_path("%s", newrefname))) {
+ error("Directory not empty: %s", newrefname);
goto rollback;
}
} else {
- error("unable to delete existing %s", newref);
+ error("unable to delete existing %s", newrefname);
goto rollback;
}
}
- if (log && safe_create_leading_directories(git_path("logs/%s", newref))) {
- error("unable to create directory for %s", newref);
+ if (log && safe_create_leading_directories(git_path("logs/%s", newrefname))) {
+ error("unable to create directory for %s", newrefname);
goto rollback;
}
retry:
- if (log && rename(git_path(TMP_RENAMED_LOG), git_path("logs/%s", newref))) {
+ if (log && rename(git_path(TMP_RENAMED_LOG), git_path("logs/%s", newrefname))) {
if (errno==EISDIR || errno==ENOTDIR) {
/*
* rename(a, b) when b is an existing
* directory ought to result in ISDIR, but
* Solaris 5.8 gives ENOTDIR. Sheesh.
*/
- if (remove_empty_directories(git_path("logs/%s", newref))) {
- error("Directory not empty: logs/%s", newref);
+ if (remove_empty_directories(git_path("logs/%s", newrefname))) {
+ error("Directory not empty: logs/%s", newrefname);
goto rollback;
}
goto retry;
} else {
error("unable to move logfile "TMP_RENAMED_LOG" to logs/%s: %s",
- newref, strerror(errno));
+ newrefname, strerror(errno));
goto rollback;
}
}
logmoved = log;
- lock = lock_ref_sha1_basic(newref, NULL, 0, NULL);
+ lock = lock_ref_sha1_basic(newrefname, NULL, 0, NULL);
if (!lock) {
- error("unable to lock %s for update", newref);
+ error("unable to lock %s for update", newrefname);
goto rollback;
}
lock->force_write = 1;
hashcpy(lock->old_sha1, orig_sha1);
if (write_ref_sha1(lock, orig_sha1, logmsg)) {
- error("unable to write current sha1 into %s", newref);
+ error("unable to write current sha1 into %s", newrefname);
goto rollback;
}
return 0;
rollback:
- lock = lock_ref_sha1_basic(oldref, NULL, 0, NULL);
+ lock = lock_ref_sha1_basic(oldrefname, NULL, 0, NULL);
if (!lock) {
- error("unable to lock %s for rollback", oldref);
+ error("unable to lock %s for rollback", oldrefname);
goto rollbacklog;
}
flag = log_all_ref_updates;
log_all_ref_updates = 0;
if (write_ref_sha1(lock, orig_sha1, NULL))
- error("unable to write current sha1 into %s", oldref);
+ error("unable to write current sha1 into %s", oldrefname);
log_all_ref_updates = flag;
rollbacklog:
- if (logmoved && rename(git_path("logs/%s", newref), git_path("logs/%s", oldref)))
+ if (logmoved && rename(git_path("logs/%s", newrefname), git_path("logs/%s", oldrefname)))
error("unable to restore logfile %s from %s: %s",
- oldref, newref, strerror(errno));
+ oldrefname, newrefname, strerror(errno));
if (!logmoved && log &&
- rename(git_path(TMP_RENAMED_LOG), git_path("logs/%s", oldref)))
+ rename(git_path(TMP_RENAMED_LOG), git_path("logs/%s", oldrefname)))
error("unable to restore logfile %s from "TMP_RENAMED_LOG": %s",
- oldref, strerror(errno));
+ oldrefname, strerror(errno));
return 1;
}
return cp - buf;
}
-int log_ref_setup(const char *ref_name, char *logfile, int bufsize)
+int log_ref_setup(const char *refname, char *logfile, int bufsize)
{
int logfd, oflags = O_APPEND | O_WRONLY;
- git_snpath(logfile, bufsize, "logs/%s", ref_name);
+ git_snpath(logfile, bufsize, "logs/%s", refname);
if (log_all_ref_updates &&
- (!prefixcmp(ref_name, "refs/heads/") ||
- !prefixcmp(ref_name, "refs/remotes/") ||
- !prefixcmp(ref_name, "refs/notes/") ||
- !strcmp(ref_name, "HEAD"))) {
+ (!prefixcmp(refname, "refs/heads/") ||
+ !prefixcmp(refname, "refs/remotes/") ||
+ !prefixcmp(refname, "refs/notes/") ||
+ !strcmp(refname, "HEAD"))) {
if (safe_create_leading_directories(logfile) < 0)
return error("unable to create directory for %s",
logfile);
return 0;
}
-static int log_ref_write(const char *ref_name, const unsigned char *old_sha1,
+static int log_ref_write(const char *refname, const unsigned char *old_sha1,
const unsigned char *new_sha1, const char *msg)
{
int logfd, result, written, oflags = O_APPEND | O_WRONLY;
if (log_all_ref_updates < 0)
log_all_ref_updates = !is_bare_repository();
- result = log_ref_setup(ref_name, log_file, sizeof(log_file));
+ result = log_ref_setup(refname, log_file, sizeof(log_file));
if (result)
return result;
return xmemdupz(line, ep - line);
}
-int read_ref_at(const char *ref, unsigned long at_time, int cnt, unsigned char *sha1, char **msg, unsigned long *cutoff_time, int *cutoff_tz, int *cutoff_cnt)
+int read_ref_at(const char *refname, unsigned long at_time, int cnt,
+ unsigned char *sha1, char **msg,
+ unsigned long *cutoff_time, int *cutoff_tz, int *cutoff_cnt)
{
const char *logfile, *logdata, *logend, *rec, *lastgt, *lastrec;
char *tz_c;
void *log_mapped;
size_t mapsz;
- logfile = git_path("logs/%s", ref);
+ logfile = git_path("logs/%s", refname);
logfd = open(logfile, O_RDONLY, 0);
if (logfd < 0)
die_errno("Unable to read log '%s'", logfile);
return 1;
}
-int for_each_recent_reflog_ent(const char *ref, each_reflog_ent_fn fn, long ofs, void *cb_data)
+int for_each_recent_reflog_ent(const char *refname, each_reflog_ent_fn fn, long ofs, void *cb_data)
{
const char *logfile;
FILE *logfp;
struct strbuf sb = STRBUF_INIT;
int ret = 0;
- logfile = git_path("logs/%s", ref);
+ logfile = git_path("logs/%s", refname);
logfp = fopen(logfile, "r");
if (!logfp)
return -1;
return ret;
}
-int for_each_reflog_ent(const char *ref, each_reflog_ent_fn fn, void *cb_data)
+int for_each_reflog_ent(const char *refname, each_reflog_ent_fn fn, void *cb_data)
{
- return for_each_recent_reflog_ent(ref, fn, 0, cb_data);
+ return for_each_recent_reflog_ent(refname, fn, 0, cb_data);
}
static int do_for_each_reflog(const char *base, each_ref_fn fn, void *cb_data)
return;
}
-char *shorten_unambiguous_ref(const char *ref, int strict)
+char *shorten_unambiguous_ref(const char *refname, int strict)
{
int i;
static char **scanf_fmts;
/* bail out if there are no rules */
if (!nr_rules)
- return xstrdup(ref);
+ return xstrdup(refname);
- /* buffer for scanf result, at most ref must fit */
- short_name = xstrdup(ref);
+ /* buffer for scanf result, at most refname must fit */
+ short_name = xstrdup(refname);
/* skip first rule, it will always match */
for (i = nr_rules - 1; i > 0 ; --i) {
int rules_to_fail = i;
int short_name_len;
- if (1 != sscanf(ref, scanf_fmts[i], short_name))
+ if (1 != sscanf(refname, scanf_fmts[i], short_name))
continue;
short_name_len = strlen(short_name);
}
free(short_name);
- return xstrdup(ref);
+ return xstrdup(refname);
}
extern void warn_dangling_symref(FILE *fp, const char *msg_fmt, const char *refname);
+/*
+ * Add a reference to the in-memory packed reference cache. To actually
+ * write the reference to the packed-refs file, call pack_refs().
+ */
+extern void add_packed_ref(const char *refname, const unsigned char *sha1);
+
/*
* Extra refs will be listed by for_each_ref() before any actual refs
* for the duration of this process or until clear_extra_refs() is
extern void clear_extra_refs(void);
extern int ref_exists(const char *);
-extern int peel_ref(const char *, unsigned char *);
+extern int peel_ref(const char *refname, unsigned char *sha1);
/** Locks a "refs/" ref returning the lock on success and NULL on failure. **/
-extern struct ref_lock *lock_ref_sha1(const char *ref, const unsigned char *old_sha1);
+extern struct ref_lock *lock_ref_sha1(const char *refname, const unsigned char *old_sha1);
/** Locks any ref (for 'HEAD' type refs). */
#define REF_NODEREF 0x01
-extern struct ref_lock *lock_any_ref_for_update(const char *ref, const unsigned char *old_sha1, int flags);
+extern struct ref_lock *lock_any_ref_for_update(const char *refname,
+ const unsigned char *old_sha1,
+ int flags);
/** Close the file descriptor owned by a lock and return the status */
extern int close_ref(struct ref_lock *lock);
int log_ref_setup(const char *ref_name, char *logfile, int bufsize);
/** Reads log for the value of ref during at_time. **/
-extern int read_ref_at(const char *ref, unsigned long at_time, int cnt, unsigned char *sha1, char **msg, unsigned long *cutoff_time, int *cutoff_tz, int *cutoff_cnt);
+extern int read_ref_at(const char *refname, unsigned long at_time, int cnt,
+ unsigned char *sha1, char **msg,
+ unsigned long *cutoff_time, int *cutoff_tz, int *cutoff_cnt);
/* iterate over reflog entries */
typedef int each_reflog_ent_fn(unsigned char *osha1, unsigned char *nsha1, const char *, unsigned long, int, const char *, void *);
-int for_each_reflog_ent(const char *ref, each_reflog_ent_fn fn, void *cb_data);
-int for_each_recent_reflog_ent(const char *ref, each_reflog_ent_fn fn, long, void *cb_data);
+int for_each_reflog_ent(const char *refname, each_reflog_ent_fn fn, void *cb_data);
+int for_each_recent_reflog_ent(const char *refname, each_reflog_ent_fn fn, long, void *cb_data);
/*
* Calls the specified function for each reflog file until it returns nonzero,
#define REFNAME_DOT_COMPONENT 4
/*
- * Return 0 iff ref has the correct format for a refname according to
- * the rules described in Documentation/git-check-ref-format.txt. If
- * REFNAME_ALLOW_ONELEVEL is set in flags, then accept one-level
+ * Return 0 iff refname has the correct format for a refname according
+ * to the rules described in Documentation/git-check-ref-format.txt.
+ * If REFNAME_ALLOW_ONELEVEL is set in flags, then accept one-level
* reference names. If REFNAME_REFSPEC_PATTERN is set in flags, then
* allow a "*" wildcard character in place of one of the name
* components. No leading or repeated slashes are accepted. If
* components to start with "." (but not a whole component equal to
* "." or "..").
*/
-extern int check_refname_format(const char *ref, int flags);
+extern int check_refname_format(const char *refname, int flags);
extern const char *prettify_refname(const char *refname);
-extern char *shorten_unambiguous_ref(const char *ref, int strict);
+extern char *shorten_unambiguous_ref(const char *refname, int strict);
/** rename ref, return 0 on success **/
extern int rename_ref(const char *oldref, const char *newref, const char *logmsg);
-/** resolve ref in nested "gitlink" repository */
-extern int resolve_gitlink_ref(const char *name, const char *refname, unsigned char *result);
+/**
+ * Resolve refname in the nested "gitlink" repository that is located
+ * at path. If the resolution is successful, return 0 and set sha1 to
+ * the name of the object; otherwise, return a non-zero value.
+ */
+extern int resolve_gitlink_ref(const char *path, const char *refname, unsigned char *sha1);
/** lock a ref and then write its file */
enum action_on_err { MSG_ON_ERR, DIE_ON_ERR, QUIET_ON_ERR };
return err;
}
-static struct ref *parse_git_refs(struct discovery *heads)
+static struct ref *parse_git_refs(struct discovery *heads, int for_push)
{
struct ref *list = NULL;
struct async async;
if (start_async(&async))
die("cannot start thread to parse advertised refs");
- get_remote_heads(async.out, &list, 0, NULL);
+ get_remote_heads(async.out, &list,
+ for_push ? REF_NORMAL : 0, NULL);
close(async.out);
if (finish_async(&async))
die("ref parsing thread failed");
heads = discover_refs("git-upload-pack");
if (heads->proto_git)
- return parse_git_refs(heads);
+ return parse_git_refs(heads, for_push);
return parse_info_refs(heads);
}
argv[argc++] = "--thin";
if (options.dry_run)
argv[argc++] = "--dry-run";
- if (options.verbosity > 1)
+ if (options.verbosity == 0)
+ argv[argc++] = "--quiet";
+ else if (options.verbosity > 1)
argv[argc++] = "--verbose";
argv[argc++] = url;
for (i = 0; i < nr_spec; i++)
static void parse_push(struct strbuf *buf)
{
char **specs = NULL;
- int alloc_spec = 0, nr_spec = 0, i;
+ int alloc_spec = 0, nr_spec = 0, i, ret;
do {
if (!prefixcmp(buf->buf, "push ")) {
break;
} while (1);
- if (push(nr_spec, specs))
- exit(128); /* error already reported */
-
+ ret = push(nr_spec, specs);
printf("\n");
fflush(stdout);
+ if (ret)
+ exit(128); /* error already reported */
+
free_specs:
for (i = 0; i < nr_spec; i++)
free(specs[i]);
void mark_parents_uninteresting(struct commit *commit)
{
- struct commit_list *parents = commit->parents;
+ struct commit_list *parents = NULL, *l;
+
+ for (l = commit->parents; l; l = l->next)
+ commit_list_insert(l->item, &parents);
while (parents) {
struct commit *commit = parents->item;
- if (!(commit->object.flags & UNINTERESTING)) {
+ l = parents;
+ parents = parents->next;
+ free(l);
+
+ while (commit) {
+ /*
+ * A missing commit is ok iff its parent is marked
+ * uninteresting.
+ *
+ * We just mark such a thing parsed, so that when
+ * it is popped next time around, we won't be trying
+ * to parse it and get an error.
+ */
+ if (!has_sha1_file(commit->object.sha1))
+ commit->object.parsed = 1;
+
+ if (commit->object.flags & UNINTERESTING)
+ break;
+
commit->object.flags |= UNINTERESTING;
/*
* wasn't uninteresting), in which case we need
* to mark its parents recursively too..
*/
- if (commit->parents)
- mark_parents_uninteresting(commit);
- }
+ if (!commit->parents)
+ break;
- /*
- * A missing commit is ok iff its parent is marked
- * uninteresting.
- *
- * We just mark such a thing parsed, so that when
- * it is popped next time around, we won't be trying
- * to parse it and get an error.
- */
- if (!has_sha1_file(commit->object.sha1))
- commit->object.parsed = 1;
- parents = parents->next;
+ for (l = commit->parents->next; l; l = l->next)
+ commit_list_insert(l->item, &parents);
+ commit = commit->parents->item;
+ }
}
}
static void try_to_simplify_commit(struct rev_info *revs, struct commit *commit)
{
struct commit_list **pp, *parent;
- int tree_changed = 0, tree_same = 0;
+ int tree_changed = 0, tree_same = 0, nth_parent = 0;
/*
* If we don't do pruning, everything is interesting
while ((parent = *pp) != NULL) {
struct commit *p = parent->item;
+ /*
+ * Do not compare with later parents when we care only about
+ * the first parent chain, in order to avoid derailing the
+ * traversal to follow a side branch that brought everything
+ * in the path we are limited to by the pathspec.
+ */
+ if (revs->first_parent_only && nth_parent++)
+ break;
if (parse_commit(p) < 0)
die("cannot simplify commit %s (because of %s)",
sha1_to_hex(commit->object.sha1),
revs->show_notes = 1;
revs->show_notes_given = 1;
revs->notes_opt.use_default_notes = 1;
+ } else if (!strcmp(arg, "--show-signature")) {
+ revs->show_signature = 1;
} else if (!prefixcmp(arg, "--show-notes=") ||
!prefixcmp(arg, "--notes=")) {
struct strbuf buf = STRBUF_INIT;
show_merge:1,
show_notes:1,
show_notes_given:1,
+ show_signature:1,
pretty_given:1,
abbrev_commit:1,
abbrev_commit_given:1,
#include "cache.h"
#include "run-command.h"
#include "exec_cmd.h"
+#include "sigchain.h"
#include "argv-array.h"
+struct child_to_clean {
+ pid_t pid;
+ struct child_to_clean *next;
+};
+static struct child_to_clean *children_to_clean;
+static int installed_child_cleanup_handler;
+
+static void cleanup_children(int sig)
+{
+ while (children_to_clean) {
+ struct child_to_clean *p = children_to_clean;
+ children_to_clean = p->next;
+ kill(p->pid, sig);
+ free(p);
+ }
+}
+
+static void cleanup_children_on_signal(int sig)
+{
+ cleanup_children(sig);
+ sigchain_pop(sig);
+ raise(sig);
+}
+
+static void cleanup_children_on_exit(void)
+{
+ cleanup_children(SIGTERM);
+}
+
+static void mark_child_for_cleanup(pid_t pid)
+{
+ struct child_to_clean *p = xmalloc(sizeof(*p));
+ p->pid = pid;
+ p->next = children_to_clean;
+ children_to_clean = p;
+
+ if (!installed_child_cleanup_handler) {
+ atexit(cleanup_children_on_exit);
+ sigchain_push_common(cleanup_children_on_signal);
+ installed_child_cleanup_handler = 1;
+ }
+}
+
+static void clear_child_for_cleanup(pid_t pid)
+{
+ struct child_to_clean **last, *p;
+
+ last = &children_to_clean;
+ for (p = children_to_clean; p; p = p->next) {
+ if (p->pid == pid) {
+ *last = p->next;
+ free(p);
+ return;
+ }
+ }
+}
+
static inline void close_pair(int fd[2])
{
close(fd[0]);
} else {
error("waitpid is confused (%s)", argv0);
}
+
+ clear_child_for_cleanup(pid);
+
errno = failed_errno;
return code;
}
if (cmd->pid < 0)
error("cannot fork() for %s: %s", cmd->argv[0],
strerror(failed_errno = errno));
+ else if (cmd->clean_on_exit)
+ mark_child_for_cleanup(cmd->pid);
/*
* Wait for child's execvp. If the execvp succeeds (or if fork()
cmd->pid = -1;
}
close(notify_pipe[0]);
+
}
#else
{
failed_errno = errno;
if (cmd->pid < 0 && (!cmd->silent_exec_failure || errno != ENOENT))
error("cannot spawn %s: %s", cmd->argv[0], strerror(errno));
+ if (cmd->clean_on_exit && cmd->pid >= 0)
+ mark_child_for_cleanup(cmd->pid);
if (cmd->env)
free_environ(env);
cmd->stdout_to_stderr = opt & RUN_COMMAND_STDOUT_TO_STDERR ? 1 : 0;
cmd->silent_exec_failure = opt & RUN_SILENT_EXEC_FAILURE ? 1 : 0;
cmd->use_shell = opt & RUN_USING_SHELL ? 1 : 0;
+ cmd->clean_on_exit = opt & RUN_CLEAN_ON_EXIT ? 1 : 0;
}
int run_command_v_opt(const char **argv, int opt)
exit(!!async->proc(proc_in, proc_out, async->data));
}
+ mark_child_for_cleanup(async->pid);
+
if (need_in)
close(fdin[0]);
else if (async->in)
unsigned silent_exec_failure:1;
unsigned stdout_to_stderr:1;
unsigned use_shell:1;
+ unsigned clean_on_exit:1;
void (*preexec_cb)(void);
};
#define RUN_COMMAND_STDOUT_TO_STDERR 4
#define RUN_SILENT_EXEC_FAILURE 8
#define RUN_USING_SHELL 16
+#define RUN_CLEAN_ON_EXIT 32
int run_command_v_opt(const char **argv, int opt);
/*
#include "cache.h"
#include "sequencer.h"
-#include "strbuf.h"
#include "dir.h"
+#include "object.h"
+#include "commit.h"
+#include "tag.h"
+#include "run-command.h"
+#include "exec_cmd.h"
+#include "utf8.h"
+#include "cache-tree.h"
+#include "diff.h"
+#include "revision.h"
+#include "rerere.h"
+#include "merge-recursive.h"
+#include "refs.h"
+
+#define GIT_REFLOG_ACTION "GIT_REFLOG_ACTION"
void remove_sequencer_state(void)
{
remove_dir_recursively(&seq_dir, 0);
strbuf_release(&seq_dir);
}
+
+static const char *action_name(const struct replay_opts *opts)
+{
+ return opts->action == REPLAY_REVERT ? "revert" : "cherry-pick";
+}
+
+static char *get_encoding(const char *message);
+
+struct commit_message {
+ char *parent_label;
+ const char *label;
+ const char *subject;
+ char *reencoded_message;
+ const char *message;
+};
+
+static int get_message(struct commit *commit, struct commit_message *out)
+{
+ const char *encoding;
+ const char *abbrev, *subject;
+ int abbrev_len, subject_len;
+ char *q;
+
+ if (!commit->buffer)
+ return -1;
+ encoding = get_encoding(commit->buffer);
+ if (!encoding)
+ encoding = "UTF-8";
+ if (!git_commit_encoding)
+ git_commit_encoding = "UTF-8";
+
+ out->reencoded_message = NULL;
+ out->message = commit->buffer;
+ if (strcmp(encoding, git_commit_encoding))
+ out->reencoded_message = reencode_string(commit->buffer,
+ git_commit_encoding, encoding);
+ if (out->reencoded_message)
+ out->message = out->reencoded_message;
+
+ abbrev = find_unique_abbrev(commit->object.sha1, DEFAULT_ABBREV);
+ abbrev_len = strlen(abbrev);
+
+ subject_len = find_commit_subject(out->message, &subject);
+
+ out->parent_label = xmalloc(strlen("parent of ") + abbrev_len +
+ strlen("... ") + subject_len + 1);
+ q = out->parent_label;
+ q = mempcpy(q, "parent of ", strlen("parent of "));
+ out->label = q;
+ q = mempcpy(q, abbrev, abbrev_len);
+ q = mempcpy(q, "... ", strlen("... "));
+ out->subject = q;
+ q = mempcpy(q, subject, subject_len);
+ *q = '\0';
+ return 0;
+}
+
+static void free_message(struct commit_message *msg)
+{
+ free(msg->parent_label);
+ free(msg->reencoded_message);
+}
+
+static char *get_encoding(const char *message)
+{
+ const char *p = message, *eol;
+
+ while (*p && *p != '\n') {
+ for (eol = p + 1; *eol && *eol != '\n'; eol++)
+ ; /* do nothing */
+ if (!prefixcmp(p, "encoding ")) {
+ char *result = xmalloc(eol - 8 - p);
+ strlcpy(result, p + 9, eol - 8 - p);
+ return result;
+ }
+ p = eol;
+ if (*p == '\n')
+ p++;
+ }
+ return NULL;
+}
+
+static void write_cherry_pick_head(struct commit *commit, const char *pseudoref)
+{
+ const char *filename;
+ int fd;
+ struct strbuf buf = STRBUF_INIT;
+
+ strbuf_addf(&buf, "%s\n", sha1_to_hex(commit->object.sha1));
+
+ filename = git_path("%s", pseudoref);
+ fd = open(filename, O_WRONLY | O_CREAT, 0666);
+ if (fd < 0)
+ die_errno(_("Could not open '%s' for writing"), filename);
+ if (write_in_full(fd, buf.buf, buf.len) != buf.len || close(fd))
+ die_errno(_("Could not write to '%s'"), filename);
+ strbuf_release(&buf);
+}
+
+static void print_advice(int show_hint)
+{
+ char *msg = getenv("GIT_CHERRY_PICK_HELP");
+
+ if (msg) {
+ fprintf(stderr, "%s\n", msg);
+ /*
+ * A conflict has occured but the porcelain
+ * (typically rebase --interactive) wants to take care
+ * of the commit itself so remove CHERRY_PICK_HEAD
+ */
+ unlink(git_path("CHERRY_PICK_HEAD"));
+ return;
+ }
+
+ if (show_hint)
+ advise(_("after resolving the conflicts, mark the corrected paths\n"
+ "with 'git add <paths>' or 'git rm <paths>'\n"
+ "and commit the result with 'git commit'"));
+}
+
+static void write_message(struct strbuf *msgbuf, const char *filename)
+{
+ static struct lock_file msg_file;
+
+ int msg_fd = hold_lock_file_for_update(&msg_file, filename,
+ LOCK_DIE_ON_ERROR);
+ if (write_in_full(msg_fd, msgbuf->buf, msgbuf->len) < 0)
+ die_errno(_("Could not write to %s"), filename);
+ strbuf_release(msgbuf);
+ if (commit_lock_file(&msg_file) < 0)
+ die(_("Error wrapping up %s"), filename);
+}
+
+static struct tree *empty_tree(void)
+{
+ return lookup_tree((const unsigned char *)EMPTY_TREE_SHA1_BIN);
+}
+
+static int error_dirty_index(struct replay_opts *opts)
+{
+ if (read_cache_unmerged())
+ return error_resolve_conflict(action_name(opts));
+
+ /* Different translation strings for cherry-pick and revert */
+ if (opts->action == REPLAY_PICK)
+ error(_("Your local changes would be overwritten by cherry-pick."));
+ else
+ error(_("Your local changes would be overwritten by revert."));
+
+ if (advice_commit_before_merge)
+ advise(_("Commit your changes or stash them to proceed."));
+ return -1;
+}
+
+static int fast_forward_to(const unsigned char *to, const unsigned char *from)
+{
+ struct ref_lock *ref_lock;
+
+ read_cache();
+ if (checkout_fast_forward(from, to))
+ exit(1); /* the callee should have complained already */
+ ref_lock = lock_any_ref_for_update("HEAD", from, 0);
+ return write_ref_sha1(ref_lock, to, "cherry-pick");
+}
+
+static int do_recursive_merge(struct commit *base, struct commit *next,
+ const char *base_label, const char *next_label,
+ unsigned char *head, struct strbuf *msgbuf,
+ struct replay_opts *opts)
+{
+ struct merge_options o;
+ struct tree *result, *next_tree, *base_tree, *head_tree;
+ int clean, index_fd;
+ const char **xopt;
+ static struct lock_file index_lock;
+
+ index_fd = hold_locked_index(&index_lock, 1);
+
+ read_cache();
+
+ init_merge_options(&o);
+ o.ancestor = base ? base_label : "(empty tree)";
+ o.branch1 = "HEAD";
+ o.branch2 = next ? next_label : "(empty tree)";
+
+ head_tree = parse_tree_indirect(head);
+ next_tree = next ? next->tree : empty_tree();
+ base_tree = base ? base->tree : empty_tree();
+
+ for (xopt = opts->xopts; xopt != opts->xopts + opts->xopts_nr; xopt++)
+ parse_merge_opt(&o, *xopt);
+
+ clean = merge_trees(&o,
+ head_tree,
+ next_tree, base_tree, &result);
+
+ if (active_cache_changed &&
+ (write_cache(index_fd, active_cache, active_nr) ||
+ commit_locked_index(&index_lock)))
+ /* TRANSLATORS: %s will be "revert" or "cherry-pick" */
+ die(_("%s: Unable to write new index file"), action_name(opts));
+ rollback_lock_file(&index_lock);
+
+ if (!clean) {
+ int i;
+ strbuf_addstr(msgbuf, "\nConflicts:\n\n");
+ for (i = 0; i < active_nr;) {
+ struct cache_entry *ce = active_cache[i++];
+ if (ce_stage(ce)) {
+ strbuf_addch(msgbuf, '\t');
+ strbuf_addstr(msgbuf, ce->name);
+ strbuf_addch(msgbuf, '\n');
+ while (i < active_nr && !strcmp(ce->name,
+ active_cache[i]->name))
+ i++;
+ }
+ }
+ }
+
+ return !clean;
+}
+
+/*
+ * If we are cherry-pick, and if the merge did not result in
+ * hand-editing, we will hit this commit and inherit the original
+ * author date and name.
+ * If we are revert, or if our cherry-pick results in a hand merge,
+ * we had better say that the current user is responsible for that.
+ */
+static int run_git_commit(const char *defmsg, struct replay_opts *opts)
+{
+ /* 6 is max possible length of our args array including NULL */
+ const char *args[6];
+ int i = 0;
+
+ args[i++] = "commit";
+ args[i++] = "-n";
+ if (opts->signoff)
+ args[i++] = "-s";
+ if (!opts->edit) {
+ args[i++] = "-F";
+ args[i++] = defmsg;
+ }
+ args[i] = NULL;
+
+ return run_command_v_opt(args, RUN_GIT_CMD);
+}
+
+static int do_pick_commit(struct commit *commit, struct replay_opts *opts)
+{
+ unsigned char head[20];
+ struct commit *base, *next, *parent;
+ const char *base_label, *next_label;
+ struct commit_message msg = { NULL, NULL, NULL, NULL, NULL };
+ char *defmsg = NULL;
+ struct strbuf msgbuf = STRBUF_INIT;
+ int res;
+
+ if (opts->no_commit) {
+ /*
+ * We do not intend to commit immediately. We just want to
+ * merge the differences in, so let's compute the tree
+ * that represents the "current" state for merge-recursive
+ * to work on.
+ */
+ if (write_cache_as_tree(head, 0, NULL))
+ die (_("Your index file is unmerged."));
+ } else {
+ if (get_sha1("HEAD", head))
+ return error(_("You do not have a valid HEAD"));
+ if (index_differs_from("HEAD", 0))
+ return error_dirty_index(opts);
+ }
+ discard_cache();
+
+ if (!commit->parents) {
+ parent = NULL;
+ }
+ else if (commit->parents->next) {
+ /* Reverting or cherry-picking a merge commit */
+ int cnt;
+ struct commit_list *p;
+
+ if (!opts->mainline)
+ return error(_("Commit %s is a merge but no -m option was given."),
+ sha1_to_hex(commit->object.sha1));
+
+ for (cnt = 1, p = commit->parents;
+ cnt != opts->mainline && p;
+ cnt++)
+ p = p->next;
+ if (cnt != opts->mainline || !p)
+ return error(_("Commit %s does not have parent %d"),
+ sha1_to_hex(commit->object.sha1), opts->mainline);
+ parent = p->item;
+ } else if (0 < opts->mainline)
+ return error(_("Mainline was specified but commit %s is not a merge."),
+ sha1_to_hex(commit->object.sha1));
+ else
+ parent = commit->parents->item;
+
+ if (opts->allow_ff && parent && !hashcmp(parent->object.sha1, head))
+ return fast_forward_to(commit->object.sha1, head);
+
+ if (parent && parse_commit(parent) < 0)
+ /* TRANSLATORS: The first %s will be "revert" or
+ "cherry-pick", the second %s a SHA1 */
+ return error(_("%s: cannot parse parent commit %s"),
+ action_name(opts), sha1_to_hex(parent->object.sha1));
+
+ if (get_message(commit, &msg) != 0)
+ return error(_("Cannot get commit message for %s"),
+ sha1_to_hex(commit->object.sha1));
+
+ /*
+ * "commit" is an existing commit. We would want to apply
+ * the difference it introduces since its first parent "prev"
+ * on top of the current HEAD if we are cherry-pick. Or the
+ * reverse of it if we are revert.
+ */
+
+ defmsg = git_pathdup("MERGE_MSG");
+
+ if (opts->action == REPLAY_REVERT) {
+ base = commit;
+ base_label = msg.label;
+ next = parent;
+ next_label = msg.parent_label;
+ strbuf_addstr(&msgbuf, "Revert \"");
+ strbuf_addstr(&msgbuf, msg.subject);
+ strbuf_addstr(&msgbuf, "\"\n\nThis reverts commit ");
+ strbuf_addstr(&msgbuf, sha1_to_hex(commit->object.sha1));
+
+ if (commit->parents && commit->parents->next) {
+ strbuf_addstr(&msgbuf, ", reversing\nchanges made to ");
+ strbuf_addstr(&msgbuf, sha1_to_hex(parent->object.sha1));
+ }
+ strbuf_addstr(&msgbuf, ".\n");
+ } else {
+ const char *p;
+
+ base = parent;
+ base_label = msg.parent_label;
+ next = commit;
+ next_label = msg.label;
+
+ /*
+ * Append the commit log message to msgbuf; it starts
+ * after the tree, parent, author, committer
+ * information followed by "\n\n".
+ */
+ p = strstr(msg.message, "\n\n");
+ if (p) {
+ p += 2;
+ strbuf_addstr(&msgbuf, p);
+ }
+
+ if (opts->record_origin) {
+ strbuf_addstr(&msgbuf, "(cherry picked from commit ");
+ strbuf_addstr(&msgbuf, sha1_to_hex(commit->object.sha1));
+ strbuf_addstr(&msgbuf, ")\n");
+ }
+ }
+
+ if (!opts->strategy || !strcmp(opts->strategy, "recursive") || opts->action == REPLAY_REVERT) {
+ res = do_recursive_merge(base, next, base_label, next_label,
+ head, &msgbuf, opts);
+ write_message(&msgbuf, defmsg);
+ } else {
+ struct commit_list *common = NULL;
+ struct commit_list *remotes = NULL;
+
+ write_message(&msgbuf, defmsg);
+
+ commit_list_insert(base, &common);
+ commit_list_insert(next, &remotes);
+ res = try_merge_command(opts->strategy, opts->xopts_nr, opts->xopts,
+ common, sha1_to_hex(head), remotes);
+ free_commit_list(common);
+ free_commit_list(remotes);
+ }
+
+ /*
+ * If the merge was clean or if it failed due to conflict, we write
+ * CHERRY_PICK_HEAD for the subsequent invocation of commit to use.
+ * However, if the merge did not even start, then we don't want to
+ * write it at all.
+ */
+ if (opts->action == REPLAY_PICK && !opts->no_commit && (res == 0 || res == 1))
+ write_cherry_pick_head(commit, "CHERRY_PICK_HEAD");
+ if (opts->action == REPLAY_REVERT && ((opts->no_commit && res == 0) || res == 1))
+ write_cherry_pick_head(commit, "REVERT_HEAD");
+
+ if (res) {
+ error(opts->action == REPLAY_REVERT
+ ? _("could not revert %s... %s")
+ : _("could not apply %s... %s"),
+ find_unique_abbrev(commit->object.sha1, DEFAULT_ABBREV),
+ msg.subject);
+ print_advice(res == 1);
+ rerere(opts->allow_rerere_auto);
+ } else {
+ if (!opts->no_commit)
+ res = run_git_commit(defmsg, opts);
+ }
+
+ free_message(&msg);
+ free(defmsg);
+
+ return res;
+}
+
+static void prepare_revs(struct replay_opts *opts)
+{
+ if (opts->action != REPLAY_REVERT)
+ opts->revs->reverse ^= 1;
+
+ if (prepare_revision_walk(opts->revs))
+ die(_("revision walk setup failed"));
+
+ if (!opts->revs->commits)
+ die(_("empty commit set passed"));
+}
+
+static void read_and_refresh_cache(struct replay_opts *opts)
+{
+ static struct lock_file index_lock;
+ int index_fd = hold_locked_index(&index_lock, 0);
+ if (read_index_preload(&the_index, NULL) < 0)
+ die(_("git %s: failed to read the index"), action_name(opts));
+ refresh_index(&the_index, REFRESH_QUIET|REFRESH_UNMERGED, NULL, NULL, NULL);
+ if (the_index.cache_changed) {
+ if (write_index(&the_index, index_fd) ||
+ commit_locked_index(&index_lock))
+ die(_("git %s: failed to refresh the index"), action_name(opts));
+ }
+ rollback_lock_file(&index_lock);
+}
+
+/*
+ * Append a commit to the end of the commit_list.
+ *
+ * next starts by pointing to the variable that holds the head of an
+ * empty commit_list, and is updated to point to the "next" field of
+ * the last item on the list as new commits are appended.
+ *
+ * Usage example:
+ *
+ * struct commit_list *list;
+ * struct commit_list **next = &list;
+ *
+ * next = commit_list_append(c1, next);
+ * next = commit_list_append(c2, next);
+ * assert(commit_list_count(list) == 2);
+ * return list;
+ */
+static struct commit_list **commit_list_append(struct commit *commit,
+ struct commit_list **next)
+{
+ struct commit_list *new = xmalloc(sizeof(struct commit_list));
+ new->item = commit;
+ *next = new;
+ new->next = NULL;
+ return &new->next;
+}
+
+static int format_todo(struct strbuf *buf, struct commit_list *todo_list,
+ struct replay_opts *opts)
+{
+ struct commit_list *cur = NULL;
+ const char *sha1_abbrev = NULL;
+ const char *action_str = opts->action == REPLAY_REVERT ? "revert" : "pick";
+ const char *subject;
+ int subject_len;
+
+ for (cur = todo_list; cur; cur = cur->next) {
+ sha1_abbrev = find_unique_abbrev(cur->item->object.sha1, DEFAULT_ABBREV);
+ subject_len = find_commit_subject(cur->item->buffer, &subject);
+ strbuf_addf(buf, "%s %s %.*s\n", action_str, sha1_abbrev,
+ subject_len, subject);
+ }
+ return 0;
+}
+
+static struct commit *parse_insn_line(char *bol, char *eol, struct replay_opts *opts)
+{
+ unsigned char commit_sha1[20];
+ enum replay_action action;
+ char *end_of_object_name;
+ int saved, status, padding;
+
+ if (!prefixcmp(bol, "pick")) {
+ action = REPLAY_PICK;
+ bol += strlen("pick");
+ } else if (!prefixcmp(bol, "revert")) {
+ action = REPLAY_REVERT;
+ bol += strlen("revert");
+ } else
+ return NULL;
+
+ /* Eat up extra spaces/ tabs before object name */
+ padding = strspn(bol, " \t");
+ if (!padding)
+ return NULL;
+ bol += padding;
+
+ end_of_object_name = bol + strcspn(bol, " \t\n");
+ saved = *end_of_object_name;
+ *end_of_object_name = '\0';
+ status = get_sha1(bol, commit_sha1);
+ *end_of_object_name = saved;
+
+ /*
+ * Verify that the action matches up with the one in
+ * opts; we don't support arbitrary instructions
+ */
+ if (action != opts->action) {
+ const char *action_str;
+ action_str = action == REPLAY_REVERT ? "revert" : "cherry-pick";
+ error(_("Cannot %s during a %s"), action_str, action_name(opts));
+ return NULL;
+ }
+
+ if (status < 0)
+ return NULL;
+
+ return lookup_commit_reference(commit_sha1);
+}
+
+static int parse_insn_buffer(char *buf, struct commit_list **todo_list,
+ struct replay_opts *opts)
+{
+ struct commit_list **next = todo_list;
+ struct commit *commit;
+ char *p = buf;
+ int i;
+
+ for (i = 1; *p; i++) {
+ char *eol = strchrnul(p, '\n');
+ commit = parse_insn_line(p, eol, opts);
+ if (!commit)
+ return error(_("Could not parse line %d."), i);
+ next = commit_list_append(commit, next);
+ p = *eol ? eol + 1 : eol;
+ }
+ if (!*todo_list)
+ return error(_("No commits parsed."));
+ return 0;
+}
+
+static void read_populate_todo(struct commit_list **todo_list,
+ struct replay_opts *opts)
+{
+ const char *todo_file = git_path(SEQ_TODO_FILE);
+ struct strbuf buf = STRBUF_INIT;
+ int fd, res;
+
+ fd = open(todo_file, O_RDONLY);
+ if (fd < 0)
+ die_errno(_("Could not open %s"), todo_file);
+ if (strbuf_read(&buf, fd, 0) < 0) {
+ close(fd);
+ strbuf_release(&buf);
+ die(_("Could not read %s."), todo_file);
+ }
+ close(fd);
+
+ res = parse_insn_buffer(buf.buf, todo_list, opts);
+ strbuf_release(&buf);
+ if (res)
+ die(_("Unusable instruction sheet: %s"), todo_file);
+}
+
+static int populate_opts_cb(const char *key, const char *value, void *data)
+{
+ struct replay_opts *opts = data;
+ int error_flag = 1;
+
+ if (!value)
+ error_flag = 0;
+ else if (!strcmp(key, "options.no-commit"))
+ opts->no_commit = git_config_bool_or_int(key, value, &error_flag);
+ else if (!strcmp(key, "options.edit"))
+ opts->edit = git_config_bool_or_int(key, value, &error_flag);
+ else if (!strcmp(key, "options.signoff"))
+ opts->signoff = git_config_bool_or_int(key, value, &error_flag);
+ else if (!strcmp(key, "options.record-origin"))
+ opts->record_origin = git_config_bool_or_int(key, value, &error_flag);
+ else if (!strcmp(key, "options.allow-ff"))
+ opts->allow_ff = git_config_bool_or_int(key, value, &error_flag);
+ else if (!strcmp(key, "options.mainline"))
+ opts->mainline = git_config_int(key, value);
+ else if (!strcmp(key, "options.strategy"))
+ git_config_string(&opts->strategy, key, value);
+ else if (!strcmp(key, "options.strategy-option")) {
+ ALLOC_GROW(opts->xopts, opts->xopts_nr + 1, opts->xopts_alloc);
+ opts->xopts[opts->xopts_nr++] = xstrdup(value);
+ } else
+ return error(_("Invalid key: %s"), key);
+
+ if (!error_flag)
+ return error(_("Invalid value for %s: %s"), key, value);
+
+ return 0;
+}
+
+static void read_populate_opts(struct replay_opts **opts_ptr)
+{
+ const char *opts_file = git_path(SEQ_OPTS_FILE);
+
+ if (!file_exists(opts_file))
+ return;
+ if (git_config_from_file(populate_opts_cb, opts_file, *opts_ptr) < 0)
+ die(_("Malformed options sheet: %s"), opts_file);
+}
+
+static void walk_revs_populate_todo(struct commit_list **todo_list,
+ struct replay_opts *opts)
+{
+ struct commit *commit;
+ struct commit_list **next;
+
+ prepare_revs(opts);
+
+ next = todo_list;
+ while ((commit = get_revision(opts->revs)))
+ next = commit_list_append(commit, next);
+}
+
+static int create_seq_dir(void)
+{
+ const char *seq_dir = git_path(SEQ_DIR);
+
+ if (file_exists(seq_dir)) {
+ error(_("a cherry-pick or revert is already in progress"));
+ advise(_("try \"git cherry-pick (--continue | --quit | --abort)\""));
+ return -1;
+ }
+ else if (mkdir(seq_dir, 0777) < 0)
+ die_errno(_("Could not create sequencer directory %s"), seq_dir);
+ return 0;
+}
+
+static void save_head(const char *head)
+{
+ const char *head_file = git_path(SEQ_HEAD_FILE);
+ static struct lock_file head_lock;
+ struct strbuf buf = STRBUF_INIT;
+ int fd;
+
+ fd = hold_lock_file_for_update(&head_lock, head_file, LOCK_DIE_ON_ERROR);
+ strbuf_addf(&buf, "%s\n", head);
+ if (write_in_full(fd, buf.buf, buf.len) < 0)
+ die_errno(_("Could not write to %s"), head_file);
+ if (commit_lock_file(&head_lock) < 0)
+ die(_("Error wrapping up %s."), head_file);
+}
+
+static int reset_for_rollback(const unsigned char *sha1)
+{
+ const char *argv[4]; /* reset --merge <arg> + NULL */
+ argv[0] = "reset";
+ argv[1] = "--merge";
+ argv[2] = sha1_to_hex(sha1);
+ argv[3] = NULL;
+ return run_command_v_opt(argv, RUN_GIT_CMD);
+}
+
+static int rollback_single_pick(void)
+{
+ unsigned char head_sha1[20];
+
+ if (!file_exists(git_path("CHERRY_PICK_HEAD")) &&
+ !file_exists(git_path("REVERT_HEAD")))
+ return error(_("no cherry-pick or revert in progress"));
+ if (read_ref_full("HEAD", head_sha1, 0, NULL))
+ return error(_("cannot resolve HEAD"));
+ if (is_null_sha1(head_sha1))
+ return error(_("cannot abort from a branch yet to be born"));
+ return reset_for_rollback(head_sha1);
+}
+
+static int sequencer_rollback(struct replay_opts *opts)
+{
+ const char *filename;
+ FILE *f;
+ unsigned char sha1[20];
+ struct strbuf buf = STRBUF_INIT;
+
+ filename = git_path(SEQ_HEAD_FILE);
+ f = fopen(filename, "r");
+ if (!f && errno == ENOENT) {
+ /*
+ * There is no multiple-cherry-pick in progress.
+ * If CHERRY_PICK_HEAD or REVERT_HEAD indicates
+ * a single-cherry-pick in progress, abort that.
+ */
+ return rollback_single_pick();
+ }
+ if (!f)
+ return error(_("cannot open %s: %s"), filename,
+ strerror(errno));
+ if (strbuf_getline(&buf, f, '\n')) {
+ error(_("cannot read %s: %s"), filename, ferror(f) ?
+ strerror(errno) : _("unexpected end of file"));
+ fclose(f);
+ goto fail;
+ }
+ fclose(f);
+ if (get_sha1_hex(buf.buf, sha1) || buf.buf[40] != '\0') {
+ error(_("stored pre-cherry-pick HEAD file '%s' is corrupt"),
+ filename);
+ goto fail;
+ }
+ if (reset_for_rollback(sha1))
+ goto fail;
+ remove_sequencer_state();
+ strbuf_release(&buf);
+ return 0;
+fail:
+ strbuf_release(&buf);
+ return -1;
+}
+
+static void save_todo(struct commit_list *todo_list, struct replay_opts *opts)
+{
+ const char *todo_file = git_path(SEQ_TODO_FILE);
+ static struct lock_file todo_lock;
+ struct strbuf buf = STRBUF_INIT;
+ int fd;
+
+ fd = hold_lock_file_for_update(&todo_lock, todo_file, LOCK_DIE_ON_ERROR);
+ if (format_todo(&buf, todo_list, opts) < 0)
+ die(_("Could not format %s."), todo_file);
+ if (write_in_full(fd, buf.buf, buf.len) < 0) {
+ strbuf_release(&buf);
+ die_errno(_("Could not write to %s"), todo_file);
+ }
+ if (commit_lock_file(&todo_lock) < 0) {
+ strbuf_release(&buf);
+ die(_("Error wrapping up %s."), todo_file);
+ }
+ strbuf_release(&buf);
+}
+
+static void save_opts(struct replay_opts *opts)
+{
+ const char *opts_file = git_path(SEQ_OPTS_FILE);
+
+ if (opts->no_commit)
+ git_config_set_in_file(opts_file, "options.no-commit", "true");
+ if (opts->edit)
+ git_config_set_in_file(opts_file, "options.edit", "true");
+ if (opts->signoff)
+ git_config_set_in_file(opts_file, "options.signoff", "true");
+ if (opts->record_origin)
+ git_config_set_in_file(opts_file, "options.record-origin", "true");
+ if (opts->allow_ff)
+ git_config_set_in_file(opts_file, "options.allow-ff", "true");
+ if (opts->mainline) {
+ struct strbuf buf = STRBUF_INIT;
+ strbuf_addf(&buf, "%d", opts->mainline);
+ git_config_set_in_file(opts_file, "options.mainline", buf.buf);
+ strbuf_release(&buf);
+ }
+ if (opts->strategy)
+ git_config_set_in_file(opts_file, "options.strategy", opts->strategy);
+ if (opts->xopts) {
+ int i;
+ for (i = 0; i < opts->xopts_nr; i++)
+ git_config_set_multivar_in_file(opts_file,
+ "options.strategy-option",
+ opts->xopts[i], "^$", 0);
+ }
+}
+
+static int pick_commits(struct commit_list *todo_list, struct replay_opts *opts)
+{
+ struct commit_list *cur;
+ int res;
+
+ setenv(GIT_REFLOG_ACTION, action_name(opts), 0);
+ if (opts->allow_ff)
+ assert(!(opts->signoff || opts->no_commit ||
+ opts->record_origin || opts->edit));
+ read_and_refresh_cache(opts);
+
+ for (cur = todo_list; cur; cur = cur->next) {
+ save_todo(cur, opts);
+ res = do_pick_commit(cur->item, opts);
+ if (res)
+ return res;
+ }
+
+ /*
+ * Sequence of picks finished successfully; cleanup by
+ * removing the .git/sequencer directory
+ */
+ remove_sequencer_state();
+ return 0;
+}
+
+static int continue_single_pick(void)
+{
+ const char *argv[] = { "commit", NULL };
+
+ if (!file_exists(git_path("CHERRY_PICK_HEAD")) &&
+ !file_exists(git_path("REVERT_HEAD")))
+ return error(_("no cherry-pick or revert in progress"));
+ return run_command_v_opt(argv, RUN_GIT_CMD);
+}
+
+static int sequencer_continue(struct replay_opts *opts)
+{
+ struct commit_list *todo_list = NULL;
+
+ if (!file_exists(git_path(SEQ_TODO_FILE)))
+ return continue_single_pick();
+ read_populate_opts(&opts);
+ read_populate_todo(&todo_list, opts);
+
+ /* Verify that the conflict has been resolved */
+ if (file_exists(git_path("CHERRY_PICK_HEAD")) ||
+ file_exists(git_path("REVERT_HEAD"))) {
+ int ret = continue_single_pick();
+ if (ret)
+ return ret;
+ }
+ if (index_differs_from("HEAD", 0))
+ return error_dirty_index(opts);
+ todo_list = todo_list->next;
+ return pick_commits(todo_list, opts);
+}
+
+static int single_pick(struct commit *cmit, struct replay_opts *opts)
+{
+ setenv(GIT_REFLOG_ACTION, action_name(opts), 0);
+ return do_pick_commit(cmit, opts);
+}
+
+int sequencer_pick_revisions(struct replay_opts *opts)
+{
+ struct commit_list *todo_list = NULL;
+ unsigned char sha1[20];
+
+ if (opts->subcommand == REPLAY_NONE)
+ assert(opts->revs);
+
+ read_and_refresh_cache(opts);
+
+ /*
+ * Decide what to do depending on the arguments; a fresh
+ * cherry-pick should be handled differently from an existing
+ * one that is being continued
+ */
+ if (opts->subcommand == REPLAY_REMOVE_STATE) {
+ remove_sequencer_state();
+ return 0;
+ }
+ if (opts->subcommand == REPLAY_ROLLBACK)
+ return sequencer_rollback(opts);
+ if (opts->subcommand == REPLAY_CONTINUE)
+ return sequencer_continue(opts);
+
+ /*
+ * If we were called as "git cherry-pick <commit>", just
+ * cherry-pick/revert it, set CHERRY_PICK_HEAD /
+ * REVERT_HEAD, and don't touch the sequencer state.
+ * This means it is possible to cherry-pick in the middle
+ * of a cherry-pick sequence.
+ */
+ if (opts->revs->cmdline.nr == 1 &&
+ opts->revs->cmdline.rev->whence == REV_CMD_REV &&
+ opts->revs->no_walk &&
+ !opts->revs->cmdline.rev->flags) {
+ struct commit *cmit;
+ if (prepare_revision_walk(opts->revs))
+ die(_("revision walk setup failed"));
+ cmit = get_revision(opts->revs);
+ if (!cmit || get_revision(opts->revs))
+ die("BUG: expected exactly one commit from walk");
+ return single_pick(cmit, opts);
+ }
+
+ /*
+ * Start a new cherry-pick/ revert sequence; but
+ * first, make sure that an existing one isn't in
+ * progress
+ */
+
+ walk_revs_populate_todo(&todo_list, opts);
+ if (create_seq_dir() < 0)
+ return -1;
+ if (get_sha1("HEAD", sha1)) {
+ if (opts->action == REPLAY_REVERT)
+ return error(_("Can't revert as initial commit"));
+ return error(_("Can't cherry-pick into empty head"));
+ }
+ save_head(sha1_to_hex(sha1));
+ save_opts(opts);
+ return pick_commits(todo_list, opts);
+}
#define SEQ_TODO_FILE "sequencer/todo"
#define SEQ_OPTS_FILE "sequencer/opts"
+enum replay_action {
+ REPLAY_REVERT,
+ REPLAY_PICK
+};
+
+enum replay_subcommand {
+ REPLAY_NONE,
+ REPLAY_REMOVE_STATE,
+ REPLAY_CONTINUE,
+ REPLAY_ROLLBACK
+};
+
+struct replay_opts {
+ enum replay_action action;
+ enum replay_subcommand subcommand;
+
+ /* Boolean options */
+ int edit;
+ int record_origin;
+ int no_commit;
+ int signoff;
+ int allow_ff;
+ int allow_rerere_auto;
+
+ int mainline;
+
+ /* Merge strategy */
+ const char *strategy;
+ const char **xopts;
+ size_t xopts_nr, xopts_alloc;
+
+ /* Only used by REPLAY_NONE */
+ struct rev_info *revs;
+};
+
/* Removes SEQ_DIR. */
extern void remove_sequencer_state(void);
+int sequencer_pick_revisions(struct replay_opts *opts);
+
#endif
git_SHA_CTX c;
unsigned char parano_sha1[20];
char *filename;
- static char tmpfile[PATH_MAX];
+ static char tmp_file[PATH_MAX];
filename = sha1_file_name(sha1);
- fd = create_tmpfile(tmpfile, sizeof(tmpfile), filename);
+ fd = create_tmpfile(tmp_file, sizeof(tmp_file), filename);
if (fd < 0) {
if (errno == EACCES)
return error("insufficient permission for adding an object to repository database %s\n", get_object_directory());
else
- return error("unable to create temporary sha1 filename %s: %s\n", tmpfile, strerror(errno));
+ return error("unable to create temporary sha1 filename %s: %s\n", tmp_file, strerror(errno));
}
/* Set it up */
struct utimbuf utb;
utb.actime = mtime;
utb.modtime = mtime;
- if (utime(tmpfile, &utb) < 0)
+ if (utime(tmp_file, &utb) < 0)
warning("failed utime() on %s: %s",
- tmpfile, strerror(errno));
+ tmp_file, strerror(errno));
}
- return move_temp_to_file(tmpfile, filename);
+ return move_temp_to_file(tmp_file, filename);
}
int write_sha1_file(const void *buf, unsigned long len, const char *type, unsigned char *returnsha1)
}
-static void commit_need_pushing(struct commit *commit, struct commit_list *parent, int *needs_pushing)
+static void commit_need_pushing(struct commit *commit, int *needs_pushing)
{
- const unsigned char (*parents)[20];
- unsigned int i, n;
struct rev_info rev;
- n = commit_list_count(parent);
- parents = xmalloc(n * sizeof(*parents));
-
- for (i = 0; i < n; i++) {
- hashcpy((unsigned char *)(parents + i), parent->item->object.sha1);
- parent = parent->next;
- }
-
init_revisions(&rev, NULL);
rev.diffopt.output_format |= DIFF_FORMAT_CALLBACK;
rev.diffopt.format_callback = collect_submodules_from_diff;
rev.diffopt.format_callback_data = needs_pushing;
- diff_tree_combined(commit->object.sha1, parents, n, 1, &rev);
-
- free((void *)parents);
+ diff_tree_combined_merge(commit, 1, &rev);
}
int check_submodule_needs_pushing(unsigned char new_sha1[20], const char *remotes_name)
die("revision walk setup failed");
while ((commit = get_revision(&rev)) && !needs_pushing)
- commit_need_pushing(commit, commit->parents, &needs_pushing);
+ commit_need_pushing(commit, &needs_pushing);
free(sha1_copy);
strbuf_release(&remotes_arg);
# Shell quote;
SHELL_PATH_SQ = $(subst ','\'',$(SHELL_PATH))
-T = $(wildcard t[0-9][0-9][0-9][0-9]-*.sh)
-TSVN = $(wildcard t91[0-9][0-9]-*.sh)
-TGITWEB = $(wildcard t95[0-9][0-9]-*.sh)
+T = $(sort $(wildcard t[0-9][0-9][0-9][0-9]-*.sh))
+TSVN = $(sort $(wildcard t91[0-9][0-9]-*.sh))
+TGITWEB = $(sort $(wildcard t95[0-9][0-9]-*.sh))
all: $(DEFAULT_TEST_TARGET)
...
'
+ - test_pause
+
+ This command is useful for writing and debugging tests and must be
+ removed before submitting. It halts the execution of the test and
+ spawns a shell in the trash directory. Exit the shell to continue
+ the test. Example:
+
+ test_expect_success 'test' '
+ git do-something >actual &&
+ test_pause &&
+ test_cmp expected actual
+ '
+
Prerequisites
-------------
--- /dev/null
+#!/bin/sh
+
+if test -z "$GIT_TEST_GIT_DAEMON"
+then
+ skip_all="git-daemon testing disabled (define GIT_TEST_GIT_DAEMON to enable)"
+ test_done
+fi
+
+LIB_GIT_DAEMON_PORT=${LIB_GIT_DAEMON_PORT-'8121'}
+
+GIT_DAEMON_PID=
+GIT_DAEMON_DOCUMENT_ROOT_PATH="$PWD"/repo
+GIT_DAEMON_URL=git://127.0.0.1:$LIB_GIT_DAEMON_PORT
+
+start_git_daemon() {
+ if test -n "$GIT_DAEMON_PID"
+ then
+ error "start_git_daemon already called"
+ fi
+
+ mkdir -p "$GIT_DAEMON_DOCUMENT_ROOT_PATH"
+
+ trap 'code=$?; stop_git_daemon; (exit $code); die' EXIT
+
+ say >&3 "Starting git daemon ..."
+ mkfifo git_daemon_output
+ git daemon --listen=127.0.0.1 --port="$LIB_GIT_DAEMON_PORT" \
+ --reuseaddr --verbose \
+ --base-path="$GIT_DAEMON_DOCUMENT_ROOT_PATH" \
+ "$@" "$GIT_DAEMON_DOCUMENT_ROOT_PATH" \
+ >&3 2>git_daemon_output &
+ GIT_DAEMON_PID=$!
+ {
+ read line
+ echo >&4 "$line"
+ cat >&4 &
+
+ # Check expected output
+ if test x"$(expr "$line" : "\[[0-9]*\] \(.*\)")" != x"Ready to rumble"
+ then
+ kill "$GIT_DAEMON_PID"
+ wait "$GIT_DAEMON_PID"
+ trap 'die' EXIT
+ error "git daemon failed to start"
+ fi
+ } <git_daemon_output
+}
+
+stop_git_daemon() {
+ if test -z "$GIT_DAEMON_PID"
+ then
+ return
+ fi
+
+ trap 'die' EXIT
+
+ # kill git-daemon child of git
+ say >&3 "Stopping git daemon ..."
+ kill "$GIT_DAEMON_PID"
+ wait "$GIT_DAEMON_PID" >&3 2>&4
+ ret=$?
+ # expect exit with status 143 = 128+15 for signal TERM=15
+ if test $ret -ne 143
+ then
+ error "git daemon exited with status: $ret"
+ fi
+ GIT_DAEMON_PID=
+ rm -f git_daemon_output
+}
(cd b && attr_check ../a/b/g a/b/g)
'
+test_expect_success 'prefixes are not confused with leading directories' '
+ attr_check a_plus/g unspecified &&
+ cat >expect <<-\EOF &&
+ a/g: test: a/g
+ a_plus/g: test: unspecified
+ EOF
+ git check-attr test a/g a_plus/g >actual &&
+ test_cmp expect actual
+'
+
test_expect_success 'core.attributesfile' '
attr_check global unspecified &&
git config core.attributesfile "$HOME/global-gitattributes" &&
# test-dump-cache-tree already verifies that all existing data is
# correct.
test_shallow_cache_tree () {
- echo "SHA " \
- "($(git ls-files|wc -l) entries, 0 subtrees)" >expect &&
+ printf "SHA (%d entries, 0 subtrees)\n" $(git ls-files|wc -l) >expect &&
cmp_cache_tree expect
}
test_invalid_cache_tree () {
echo "invalid (0 subtrees)" >expect &&
- echo "SHA #(ref) " \
- "($(git ls-files|wc -l) entries, 0 subtrees)" >>expect &&
+ printf "SHA #(ref) (%d entries, 0 subtrees)\n" $(git ls-files|wc -l) >>expect &&
cmp_cache_tree expect
}
test_commit added_in_topic each.txt in_topic
'
-test_must_fail git merge master
+test_expect_success 'git merge master' '
+ test_must_fail git merge master
+'
clean_branchnames () {
# Remove branch names after conflict lines
echo frotz >nitfol &&
git add rezrov &&
git add -N nitfol &&
- test_must_fail git commit
+ test_must_fail git commit -m initial
'
test_expect_success 'can commit with an unrelated i-t-a entry in index' '
git reset --hard &&
git checkout a^0 &&
- test_must_fail git cherry-pick -m 1 b &&
+ test_expect_code 128 git cherry-pick -m 1 b &&
git diff --exit-code a --
'
. ./test-lib.sh
+# Repeat first match 10 times
+_r10='\1\1\1\1\1\1\1\1\1\1'
+
pristine_detach () {
git cherry-pick --quit &&
git checkout -f "$1^0" &&
test_expect_success 'cherry-pick persists data on failure' '
pristine_detach initial &&
- test_must_fail git cherry-pick -s base..anotherpick &&
+ test_expect_code 1 git cherry-pick -s base..anotherpick &&
test_path_is_dir .git/sequencer &&
test_path_is_file .git/sequencer/head &&
test_path_is_file .git/sequencer/todo &&
test_expect_success 'cherry-pick persists opts correctly' '
pristine_detach initial &&
- test_must_fail git cherry-pick -s -m 1 --strategy=recursive -X patience -X ours base..anotherpick &&
+ test_expect_code 128 git cherry-pick -s -m 1 --strategy=recursive -X patience -X ours initial..anotherpick &&
test_path_is_dir .git/sequencer &&
test_path_is_file .git/sequencer/head &&
test_path_is_file .git/sequencer/todo &&
test_expect_success '--quit cleans up sequencer state' '
pristine_detach initial &&
- test_must_fail git cherry-pick base..picked &&
+ test_expect_code 1 git cherry-pick base..picked &&
git cherry-pick --quit &&
test_path_is_missing .git/sequencer
'
:000000 100644 OBJID OBJID A foo
:000000 100644 OBJID OBJID A unrelated
EOF
- test_must_fail git cherry-pick base..picked &&
+ test_expect_code 1 git cherry-pick base..picked &&
git cherry-pick --quit &&
test_path_is_missing .git/sequencer &&
test_must_fail git update-index --refresh &&
test_expect_success '--abort to cancel multiple cherry-pick' '
pristine_detach initial &&
- test_must_fail git cherry-pick base..anotherpick &&
+ test_expect_code 1 git cherry-pick base..anotherpick &&
git cherry-pick --abort &&
test_path_is_missing .git/sequencer &&
test_cmp_rev initial HEAD &&
test_expect_success '--abort to cancel single cherry-pick' '
pristine_detach initial &&
- test_must_fail git cherry-pick picked &&
+ test_expect_code 1 git cherry-pick picked &&
git cherry-pick --abort &&
test_path_is_missing .git/sequencer &&
test_cmp_rev initial HEAD &&
test_expect_success 'cherry-pick --abort to cancel multiple revert' '
pristine_detach anotherpick &&
- test_must_fail git revert base..picked &&
+ test_expect_code 1 git revert base..picked &&
git cherry-pick --abort &&
test_path_is_missing .git/sequencer &&
test_cmp_rev anotherpick HEAD &&
test_expect_success 'revert --abort works, too' '
pristine_detach anotherpick &&
- test_must_fail git revert base..picked &&
+ test_expect_code 1 git revert base..picked &&
git revert --abort &&
test_path_is_missing .git/sequencer &&
test_cmp_rev anotherpick HEAD
test_expect_success '--abort to cancel single revert' '
pristine_detach anotherpick &&
- test_must_fail git revert picked &&
+ test_expect_code 1 git revert picked &&
git revert --abort &&
test_path_is_missing .git/sequencer &&
test_cmp_rev anotherpick HEAD &&
test_expect_success '--abort keeps unrelated change, easy case' '
pristine_detach unrelatedpick &&
echo changed >expect &&
- test_must_fail git cherry-pick picked..yetanotherpick &&
+ test_expect_code 1 git cherry-pick picked..yetanotherpick &&
echo changed >unrelated &&
git cherry-pick --abort &&
test_cmp expect unrelated
test_expect_success '--abort refuses to clobber unrelated change, harder case' '
pristine_detach initial &&
echo changed >expect &&
- test_must_fail git cherry-pick base..anotherpick &&
+ test_expect_code 1 git cherry-pick base..anotherpick &&
echo changed >unrelated &&
test_must_fail git cherry-pick --abort &&
test_cmp expect unrelated &&
test_expect_success 'cherry-pick still writes sequencer state when one commit is left' '
pristine_detach initial &&
- test_must_fail git cherry-pick base..picked &&
+ test_expect_code 1 git cherry-pick base..picked &&
test_path_is_dir .git/sequencer &&
echo "resolved" >foo &&
git add foo &&
test_expect_success '--abort after last commit in sequence' '
pristine_detach initial &&
- test_must_fail git cherry-pick base..picked &&
+ test_expect_code 1 git cherry-pick base..picked &&
git cherry-pick --abort &&
test_path_is_missing .git/sequencer &&
test_cmp_rev initial HEAD &&
test_expect_success 'cherry-pick does not implicitly stomp an existing operation' '
pristine_detach initial &&
- test_must_fail git cherry-pick base..anotherpick &&
+ test_expect_code 1 git cherry-pick base..anotherpick &&
test-chmtime -v +0 .git/sequencer >expect &&
- test_must_fail git cherry-pick unrelatedpick &&
+ test_expect_code 128 git cherry-pick unrelatedpick &&
test-chmtime -v +0 .git/sequencer >actual &&
test_cmp expect actual
'
test_expect_success '--continue complains when no cherry-pick is in progress' '
pristine_detach initial &&
- test_must_fail git cherry-pick --continue
+ test_expect_code 128 git cherry-pick --continue
'
test_expect_success '--continue complains when there are unresolved conflicts' '
pristine_detach initial &&
- test_must_fail git cherry-pick base..anotherpick &&
- test_must_fail git cherry-pick --continue
+ test_expect_code 1 git cherry-pick base..anotherpick &&
+ test_expect_code 128 git cherry-pick --continue
'
test_expect_success '--continue of single cherry-pick' '
test_expect_success '--continue after resolving conflicts and committing' '
pristine_detach initial &&
- test_must_fail git cherry-pick base..anotherpick &&
+ test_expect_code 1 git cherry-pick base..anotherpick &&
echo "c" >foo &&
git add foo &&
git commit &&
test_expect_success '--continue respects opts' '
pristine_detach initial &&
- test_must_fail git cherry-pick -x base..anotherpick &&
+ test_expect_code 1 git cherry-pick -x base..anotherpick &&
echo "c" >foo &&
git add foo &&
git commit &&
test_expect_success '--signoff is not automatically propagated to resolved conflict' '
pristine_detach initial &&
- test_must_fail git cherry-pick --signoff base..anotherpick &&
+ test_expect_code 1 git cherry-pick --signoff base..anotherpick &&
echo "c" >foo &&
git add foo &&
git commit &&
test_expect_success 'malformed instruction sheet 1' '
pristine_detach initial &&
- test_must_fail git cherry-pick base..anotherpick &&
+ test_expect_code 1 git cherry-pick base..anotherpick &&
echo "resolved" >foo &&
git add foo &&
git commit &&
sed "s/pick /pick/" .git/sequencer/todo >new_sheet &&
cp new_sheet .git/sequencer/todo &&
- test_must_fail git cherry-pick --continue
+ test_expect_code 128 git cherry-pick --continue
'
test_expect_success 'malformed instruction sheet 2' '
pristine_detach initial &&
- test_must_fail git cherry-pick base..anotherpick &&
+ test_expect_code 1 git cherry-pick base..anotherpick &&
echo "resolved" >foo &&
git add foo &&
git commit &&
sed "s/pick/revert/" .git/sequencer/todo >new_sheet &&
cp new_sheet .git/sequencer/todo &&
- test_must_fail git cherry-pick --continue
+ test_expect_code 128 git cherry-pick --continue
'
test_expect_success 'empty commit set' '
test_expect_code 128 git cherry-pick base..base
'
+test_expect_success 'malformed instruction sheet 3' '
+ pristine_detach initial &&
+ test_expect_code 1 git cherry-pick base..anotherpick &&
+ echo "resolved" >foo &&
+ git add foo &&
+ git commit &&
+ sed "s/pick \([0-9a-f]*\)/pick $_r10/" .git/sequencer/todo >new_sheet &&
+ cp new_sheet .git/sequencer/todo &&
+ test_expect_code 128 git cherry-pick --continue
+'
+
+test_expect_success 'instruction sheet, fat-fingers version' '
+ pristine_detach initial &&
+ test_expect_code 1 git cherry-pick base..anotherpick &&
+ echo "c" >foo &&
+ git add foo &&
+ git commit &&
+ sed "s/pick \([0-9a-f]*\)/pick \1 /" .git/sequencer/todo >new_sheet &&
+ cp new_sheet .git/sequencer/todo &&
+ git cherry-pick --continue
+'
+
+test_expect_success 'commit descriptions in insn sheet are optional' '
+ pristine_detach initial &&
+ test_expect_code 1 git cherry-pick base..anotherpick &&
+ echo "c" >foo &&
+ git add foo &&
+ git commit &&
+ cut -d" " -f1,2 .git/sequencer/todo >new_sheet &&
+ cp new_sheet .git/sequencer/todo &&
+ git cherry-pick --continue &&
+ test_path_is_missing .git/sequencer &&
+ git rev-list HEAD >commits &&
+ test_line_count = 4 commits
+'
+
test_done
test z = "z$E"
'
+test_expect_failure 'UTF-16 refused because of NULs' '
+ echo UTF-16 >F &&
+ git commit -a -F "$TEST_DIRECTORY"/t3900/UTF-16.txt
+'
+
+
for H in ISO8859-1 eucJP ISO-2022-JP
do
test_expect_success "$H setup" '
test_cmp expect actual
'
+cat > expect << EOF
+diff --git a/HEAD b/HEAD
+new file mode 100644
+index 0000000..fe0cbee
+--- /dev/null
++++ b/HEAD
+@@ -0,0 +1 @@
++file-not-a-ref
+EOF
+
+test_expect_success 'stash where working directory contains "HEAD" file' '
+ git stash clear &&
+ git reset --hard &&
+ echo file-not-a-ref > HEAD &&
+ git add HEAD &&
+ test_tick &&
+ git stash &&
+ git diff-files --quiet &&
+ git diff-index --cached --quiet HEAD &&
+ test "$(git rev-parse stash^)" = "$(git rev-parse HEAD)" &&
+ git diff stash^..stash > output &&
+ test_cmp output expect
+'
+
test_done
mkdir dir &&
echo parent > dir/foo &&
echo dummy > bar &&
- git add bar dir/foo &&
+ echo committed > HEAD &&
+ git add bar dir/foo HEAD &&
git commit -m initial &&
test_tick &&
test_commit second dir/foo head &&
save_head
'
-# note: bar sorts before dir, so the first 'n' is always to skip 'bar'
+# note: order of files with unstaged changes: HEAD bar dir/foo
test_expect_success PERL 'saying "n" does nothing' '
+ set_state HEAD HEADfile_work HEADfile_index &&
set_state dir/foo work index &&
- (echo n; echo n) | test_must_fail git stash save -p &&
- verify_state dir/foo work index &&
- verify_saved_state bar
+ (echo n; echo n; echo n) | test_must_fail git stash save -p &&
+ verify_state HEAD HEADfile_work HEADfile_index &&
+ verify_saved_state bar &&
+ verify_state dir/foo work index
'
test_expect_success PERL 'git stash -p' '
- (echo n; echo y) | git stash save -p &&
- verify_state dir/foo head index &&
+ (echo y; echo n; echo y) | git stash save -p &&
+ verify_state HEAD committed HEADfile_index &&
verify_saved_state bar &&
+ verify_state dir/foo head index &&
git reset --hard &&
git stash apply &&
- verify_state dir/foo work head &&
- verify_state bar dummy dummy
+ verify_state HEAD HEADfile_work committed &&
+ verify_state bar dummy dummy &&
+ verify_state dir/foo work head
'
test_expect_success PERL 'git stash -p --no-keep-index' '
- set_state dir/foo work index &&
+ set_state HEAD HEADfile_work HEADfile_index &&
set_state bar bar_work bar_index &&
- (echo n; echo y) | git stash save -p --no-keep-index &&
- verify_state dir/foo head head &&
+ set_state dir/foo work index &&
+ (echo y; echo n; echo y) | git stash save -p --no-keep-index &&
+ verify_state HEAD committed committed &&
verify_state bar bar_work dummy &&
+ verify_state dir/foo head head &&
git reset --hard &&
git stash apply --index &&
- verify_state dir/foo work index &&
- verify_state bar dummy bar_index
+ verify_state HEAD HEADfile_work HEADfile_index &&
+ verify_state bar dummy bar_index &&
+ verify_state dir/foo work index
'
test_expect_success PERL 'git stash --no-keep-index -p' '
- set_state dir/foo work index &&
+ set_state HEAD HEADfile_work HEADfile_index &&
set_state bar bar_work bar_index &&
- (echo n; echo y) | git stash save --no-keep-index -p &&
+ set_state dir/foo work index &&
+ (echo y; echo n; echo y) | git stash save --no-keep-index -p &&
+ verify_state HEAD committed committed &&
verify_state dir/foo head head &&
verify_state bar bar_work dummy &&
git reset --hard &&
git stash apply --index &&
- verify_state dir/foo work index &&
- verify_state bar dummy bar_index
+ verify_state HEAD HEADfile_work HEADfile_index &&
+ verify_state bar dummy bar_index &&
+ verify_state dir/foo work index
'
test_expect_success PERL 'none of this moved HEAD' '
echo 3 > file &&
test_tick &&
echo 1 > file2 &&
+ echo 1 > HEAD &&
mkdir untracked &&
echo untracked >untracked/untracked &&
git stash --include-untracked &&
'
cat > expect.diff <<EOF
+diff --git a/HEAD b/HEAD
+new file mode 100644
+index 0000000..d00491f
+--- /dev/null
++++ b/HEAD
+@@ -0,0 +1 @@
++1
diff --git a/file2 b/file2
new file mode 100644
index 0000000..d00491f
+untracked
EOF
cat > expect.lstree <<EOF
+HEAD
file2
untracked
EOF
test_expect_success 'stash save --include-untracked stashed the untracked files' '
- test "!" -f file2 &&
- test ! -e untracked &&
- git diff HEAD stash^3 -- file2 untracked >actual &&
+ test_path_is_missing file2 &&
+ test_path_is_missing untracked &&
+ test_path_is_missing HEAD &&
+ git diff HEAD stash^3 -- HEAD file2 untracked >actual &&
test_cmp expect.diff actual &&
git ls-tree --name-only stash^3: >actual &&
test_cmp expect.lstree actual
cat > expect <<EOF
M file
+?? HEAD
?? actual
?? expect
?? file2
git reset > /dev/null
+# Must direct output somewhere where it won't be considered an untracked file
test_expect_success 'stash save --include-untracked -q is quiet' '
echo 1 > file5 &&
- git stash save --include-untracked --quiet > output.out 2>&1 &&
- test ! -s output.out
+ git stash save --include-untracked --quiet > .git/stash-output.out 2>&1 &&
+ test_line_count = 0 .git/stash-output.out &&
+ rm -f .git/stash-output.out
'
test_expect_success 'stash save --include-untracked removed files' '
test_expect_success 'stash save --include-untracked removed files got stashed' '
git stash pop &&
- test ! -f file
+ test_path_is_missing file
'
cat > .gitignore <<EOF
test_expect_success 'stash save -u can stash with only untracked files different' '
echo 4 > file4 &&
git stash -u &&
- test "!" -f file4
+ test_path_is_missing file4
'
test_expect_success 'stash save --all does not respect .gitignore' '
git stash -a &&
- test "!" -f ignored &&
- test "!" -e ignored.d &&
- test "!" -f .gitignore
+ test_path_is_missing ignored &&
+ test_path_is_missing ignored.d &&
+ test_path_is_missing .gitignore
'
test_expect_success 'stash save --all is stash poppable' '
'git diff-index --cached $tree -- path1/ >current &&
compare_diff_raw current expected'
+cat >expected <<\EOF
+:100644 100644 766498d93a4b06057a8e49d23f4068f1170ff38f 0a41e115ab61be0328a19b29f18cdcb49338d516 M path1/file1
+EOF
+test_expect_success \
+ '"*file1" should show path1/file1' \
+ 'git diff-index --cached $tree -- "*file1" >current &&
+ compare_diff_raw current expected'
+
cat >expected <<\EOF
:100644 100644 766498d93a4b06057a8e49d23f4068f1170ff38f 0a41e115ab61be0328a19b29f18cdcb49338d516 M file0
EOF
git diff -w -b --ignore-space-at-eol > out
test_expect_success 'another test, with -w -b --ignore-space-at-eol' 'test_cmp expect out'
-tr 'Q' '\015' << EOF > expect
+tr 'Q_' '\015 ' << EOF > expect
diff --git a/x b/x
index d99af23..8b32fb5 100644
--- a/x
@@ -1,6 +1,6 @@
-whitespace at beginning
+ whitespace at beginning
- whitespace change
+ whitespace change
-whitespace in the middle
+white space in the middle
- whitespace at end
+ whitespace at end__
unchanged line
- CR at endQ
+ CR at end
EOF
git diff -b > out
test_expect_success 'another test, with -b' 'test_cmp expect out'
git diff -b --ignore-space-at-eol > out
test_expect_success 'another test, with -b --ignore-space-at-eol' 'test_cmp expect out'
-tr 'Q' '\015' << EOF > expect
+tr 'Q_' '\015 ' << EOF > expect
diff --git a/x b/x
index d99af23..8b32fb5 100644
--- a/x
+ whitespace at beginning
+whitespace change
+white space in the middle
- whitespace at end
+ whitespace at end__
unchanged line
- CR at endQ
+ CR at end
EOF
git diff --ignore-space-at-eol > out
test_expect_success 'another test, with --ignore-space-at-eol' 'test_cmp expect out'
word_diff --word-diff=plain
'
+test_expect_success 'word-diff with no newline at EOF' '
+ cat >expect <<-\EOF &&
+ diff --git a/pre b/post
+ index 7bf316e..3dd0303 100644
+ --- a/pre
+ +++ b/post
+ @@ -1 +1 @@
+ a a [-a-]{+ab+} a a
+ EOF
+ printf "%s" "a a a a a" >pre &&
+ printf "%s" "a a ab a a" >post &&
+ word_diff --word-diff=plain
+'
+
test_done
test_expect_success 'am --signoff does not add Signed-off-by: line if already there' '
git format-patch --stdout HEAD^ >patch3 &&
- sed -e "/^Subject/ s,\[PATCH,Re: Re: Re: & 1/5 v2," patch3 >patch4 &&
+ sed -e "/^Subject/ s,\[PATCH,Re: Re: Re: & 1/5 v2] [foo," patch3 >patch4 &&
rm -fr .git/rebase-apply &&
git reset --hard &&
git checkout HEAD^ &&
git am --keep patch4 &&
! test -d .git/rebase-apply &&
git cat-file commit HEAD >actual &&
- grep "Re: Re: Re: \[PATCH 1/5 v2\] third" actual
+ grep "Re: Re: Re: \[PATCH 1/5 v2\] \[foo\] third" actual
+'
+
+test_expect_success 'am --keep-non-patch really keeps the non-patch part' '
+ rm -fr .git/rebase-apply &&
+ git reset --hard &&
+ git checkout HEAD^ &&
+ git am --keep-non-patch patch4 &&
+ ! test -d .git/rebase-apply &&
+ git cat-file commit HEAD >actual &&
+ grep "^\[foo\] third" actual
'
test_expect_success 'am -3 falls back to 3-way merge' '
cat <<-\EOT >read-request.sed &&
#!/bin/sed -nf
+ # Note that a request could ask for "tag $tagname"
/ in the git repository at:$/!d
n
/^$/ n
+ s/ tag \([^ ]*\)$/ tag--\1/
s/^[ ]*\(.*\) \([^ ]*\)/please pull\
\1\
\2/p
read branch
} <digest &&
{
+ test "$branch" = full ||
test "$branch" = master ||
test "$branch" = for-upstream
}
pull_to_client 3rd "refs/heads/A" $((1*3))
+test_expect_success 'single branch clone' '
+ git clone --single-branch "file://$(pwd)/." singlebranch
+'
+
+test_expect_success 'single branch object count' '
+ GIT_DIR=singlebranch/.git git count-objects -v |
+ grep "^in-pack:" > count.singlebranch &&
+ echo "in-pack: 198" >expected &&
+ test_cmp expected count.singlebranch
+'
+
test_expect_success 'clone shallow' '
- git clone --depth 2 "file://$(pwd)/." shallow
+ git clone --no-single-branch --depth 2 "file://$(pwd)/." shallow
'
test_expect_success 'clone shallow object count' '
grep "^count: 52" count.shallow
'
+test_expect_success 'clone shallow without --no-single-branch' '
+ git clone --depth 1 "file://$(pwd)/." shallow2
+'
+
+test_expect_success 'clone shallow object count' '
+ (
+ cd shallow2 &&
+ git count-objects -v
+ ) > count.shallow2 &&
+ grep "^in-pack: 6" count.shallow2
+'
+
+test_expect_success 'clone shallow with --branch' '
+ git clone --depth 1 --branch A "file://$(pwd)/." shallow3
+'
+
+test_expect_success 'clone shallow object count' '
+ echo "in-pack: 12" > count3.expected &&
+ GIT_DIR=shallow3/.git git count-objects -v |
+ grep "^in-pack" > count3.actual &&
+ test_cmp count3.expected count3.actual
+'
+
+test_expect_success 'clone shallow with detached HEAD' '
+ git checkout HEAD^ &&
+ git clone --depth 1 "file://$(pwd)/." shallow5 &&
+ git checkout - &&
+ GIT_DIR=shallow5/.git git rev-parse HEAD >actual &&
+ git rev-parse HEAD^ >expected &&
+ test_cmp expected actual
+'
+
+test_expect_success 'shallow clone pulling tags' '
+ git tag -a -m A TAGA1 A &&
+ git tag -a -m B TAGB1 B &&
+ git tag TAGA2 A &&
+ git tag TAGB2 B &&
+ git clone --depth 1 "file://$(pwd)/." shallow6 &&
+
+ cat >taglist.expected <<\EOF &&
+TAGB1
+TAGB2
+EOF
+ GIT_DIR=shallow6/.git git tag -l >taglist.actual &&
+ test_cmp taglist.expected taglist.actual &&
+
+ echo "in-pack: 7" > count6.expected &&
+ GIT_DIR=shallow6/.git git count-objects -v |
+ grep "^in-pack" > count6.actual &&
+ test_cmp count6.expected count6.actual
+'
+
+test_expect_success 'shallow cloning single tag' '
+ git clone --depth 1 --branch=TAGB1 "file://$(pwd)/." shallow7 &&
+ cat >taglist.expected <<\EOF &&
+TAGB1
+TAGB2
+EOF
+ GIT_DIR=shallow7/.git git tag -l >taglist.actual &&
+ test_cmp taglist.expected taglist.actual &&
+
+ echo "in-pack: 7" > count7.expected &&
+ GIT_DIR=shallow7/.git git count-objects -v |
+ grep "^in-pack" > count7.actual &&
+ test_cmp count7.expected count7.actual
+'
+
test_done
master_in_two=`cd ../two && git rev-parse master` &&
one_in_two=`cd ../two && git rev-parse one` &&
{
- echo "$master_in_two not-for-merge"
echo "$one_in_two "
+ echo "$master_in_two not-for-merge"
} >expected &&
cut -f -2 .git/FETCH_HEAD >actual &&
test_cmp expected actual'
# br-branches-default-merge
-754b754407bf032e9a2f9d5a9ad05ca79a6b228f not-for-merge branch 'master' of ../
0567da4d5edd2ff4bb292a465ba9e64dcad9536b branch 'three' of ../
+754b754407bf032e9a2f9d5a9ad05ca79a6b228f not-for-merge branch 'master' of ../
6c9dec2b923228c9ff994c6cfe4ae16c12408dc5 not-for-merge tag 'tag-master' of ../
8e32a6d901327a23ef831511badce7bf3bf46689 not-for-merge tag 'tag-one' of ../
22feea448b023a2d864ef94b013735af34d238ba not-for-merge tag 'tag-one-tree' of ../
# br-branches-default-merge branches-default
-754b754407bf032e9a2f9d5a9ad05ca79a6b228f not-for-merge branch 'master' of ../
0567da4d5edd2ff4bb292a465ba9e64dcad9536b branch 'three' of ../
+754b754407bf032e9a2f9d5a9ad05ca79a6b228f not-for-merge branch 'master' of ../
6c9dec2b923228c9ff994c6cfe4ae16c12408dc5 not-for-merge tag 'tag-master' of ../
8e32a6d901327a23ef831511badce7bf3bf46689 not-for-merge tag 'tag-one' of ../
22feea448b023a2d864ef94b013735af34d238ba not-for-merge tag 'tag-one-tree' of ../
# br-branches-default-octopus
-754b754407bf032e9a2f9d5a9ad05ca79a6b228f not-for-merge branch 'master' of ../
8e32a6d901327a23ef831511badce7bf3bf46689 branch 'one' of ../
6134ee8f857693b96ff1cc98d3e2fd62b199e5a8 branch 'two' of ../
+754b754407bf032e9a2f9d5a9ad05ca79a6b228f not-for-merge branch 'master' of ../
6c9dec2b923228c9ff994c6cfe4ae16c12408dc5 not-for-merge tag 'tag-master' of ../
8e32a6d901327a23ef831511badce7bf3bf46689 not-for-merge tag 'tag-one' of ../
22feea448b023a2d864ef94b013735af34d238ba not-for-merge tag 'tag-one-tree' of ../
# br-branches-default-octopus branches-default
-754b754407bf032e9a2f9d5a9ad05ca79a6b228f not-for-merge branch 'master' of ../
8e32a6d901327a23ef831511badce7bf3bf46689 branch 'one' of ../
6134ee8f857693b96ff1cc98d3e2fd62b199e5a8 branch 'two' of ../
+754b754407bf032e9a2f9d5a9ad05ca79a6b228f not-for-merge branch 'master' of ../
6c9dec2b923228c9ff994c6cfe4ae16c12408dc5 not-for-merge tag 'tag-master' of ../
8e32a6d901327a23ef831511badce7bf3bf46689 not-for-merge tag 'tag-one' of ../
22feea448b023a2d864ef94b013735af34d238ba not-for-merge tag 'tag-one-tree' of ../
# br-branches-one-merge
-8e32a6d901327a23ef831511badce7bf3bf46689 not-for-merge branch 'one' of ../
0567da4d5edd2ff4bb292a465ba9e64dcad9536b branch 'three' of ../
+8e32a6d901327a23ef831511badce7bf3bf46689 not-for-merge branch 'one' of ../
6c9dec2b923228c9ff994c6cfe4ae16c12408dc5 not-for-merge tag 'tag-master' of ../
8e32a6d901327a23ef831511badce7bf3bf46689 not-for-merge tag 'tag-one' of ../
22feea448b023a2d864ef94b013735af34d238ba not-for-merge tag 'tag-one-tree' of ../
# br-branches-one-merge branches-one
-8e32a6d901327a23ef831511badce7bf3bf46689 not-for-merge branch 'one' of ../
0567da4d5edd2ff4bb292a465ba9e64dcad9536b branch 'three' of ../
+8e32a6d901327a23ef831511badce7bf3bf46689 not-for-merge branch 'one' of ../
6c9dec2b923228c9ff994c6cfe4ae16c12408dc5 not-for-merge tag 'tag-master' of ../
8e32a6d901327a23ef831511badce7bf3bf46689 not-for-merge tag 'tag-one' of ../
22feea448b023a2d864ef94b013735af34d238ba not-for-merge tag 'tag-one-tree' of ../
# br-config-explicit-merge
+0567da4d5edd2ff4bb292a465ba9e64dcad9536b branch 'three' of ../
754b754407bf032e9a2f9d5a9ad05ca79a6b228f not-for-merge branch 'master' of ../
8e32a6d901327a23ef831511badce7bf3bf46689 not-for-merge branch 'one' of ../
6134ee8f857693b96ff1cc98d3e2fd62b199e5a8 not-for-merge branch 'two' of ../
-0567da4d5edd2ff4bb292a465ba9e64dcad9536b branch 'three' of ../
6c9dec2b923228c9ff994c6cfe4ae16c12408dc5 not-for-merge tag 'tag-master' of ../
8e32a6d901327a23ef831511badce7bf3bf46689 not-for-merge tag 'tag-one' of ../
22feea448b023a2d864ef94b013735af34d238ba not-for-merge tag 'tag-one-tree' of ../
# br-config-explicit-merge config-explicit
+0567da4d5edd2ff4bb292a465ba9e64dcad9536b branch 'three' of ../
754b754407bf032e9a2f9d5a9ad05ca79a6b228f not-for-merge branch 'master' of ../
8e32a6d901327a23ef831511badce7bf3bf46689 not-for-merge branch 'one' of ../
6134ee8f857693b96ff1cc98d3e2fd62b199e5a8 not-for-merge branch 'two' of ../
-0567da4d5edd2ff4bb292a465ba9e64dcad9536b branch 'three' of ../
6c9dec2b923228c9ff994c6cfe4ae16c12408dc5 not-for-merge tag 'tag-master' of ../
8e32a6d901327a23ef831511badce7bf3bf46689 not-for-merge tag 'tag-one' of ../
22feea448b023a2d864ef94b013735af34d238ba not-for-merge tag 'tag-one-tree' of ../
# br-config-explicit-octopus
-754b754407bf032e9a2f9d5a9ad05ca79a6b228f not-for-merge branch 'master' of ../
8e32a6d901327a23ef831511badce7bf3bf46689 branch 'one' of ../
6134ee8f857693b96ff1cc98d3e2fd62b199e5a8 branch 'two' of ../
+754b754407bf032e9a2f9d5a9ad05ca79a6b228f not-for-merge branch 'master' of ../
0567da4d5edd2ff4bb292a465ba9e64dcad9536b not-for-merge branch 'three' of ../
6c9dec2b923228c9ff994c6cfe4ae16c12408dc5 not-for-merge tag 'tag-master' of ../
8e32a6d901327a23ef831511badce7bf3bf46689 not-for-merge tag 'tag-one' of ../
# br-config-explicit-octopus config-explicit
-754b754407bf032e9a2f9d5a9ad05ca79a6b228f not-for-merge branch 'master' of ../
8e32a6d901327a23ef831511badce7bf3bf46689 branch 'one' of ../
6134ee8f857693b96ff1cc98d3e2fd62b199e5a8 branch 'two' of ../
+754b754407bf032e9a2f9d5a9ad05ca79a6b228f not-for-merge branch 'master' of ../
0567da4d5edd2ff4bb292a465ba9e64dcad9536b not-for-merge branch 'three' of ../
6c9dec2b923228c9ff994c6cfe4ae16c12408dc5 not-for-merge tag 'tag-master' of ../
8e32a6d901327a23ef831511badce7bf3bf46689 not-for-merge tag 'tag-one' of ../
# br-config-glob-merge
+0567da4d5edd2ff4bb292a465ba9e64dcad9536b branch 'three' of ../
754b754407bf032e9a2f9d5a9ad05ca79a6b228f not-for-merge branch 'master' of ../
8e32a6d901327a23ef831511badce7bf3bf46689 not-for-merge branch 'one' of ../
-0567da4d5edd2ff4bb292a465ba9e64dcad9536b branch 'three' of ../
6134ee8f857693b96ff1cc98d3e2fd62b199e5a8 not-for-merge branch 'two' of ../
6c9dec2b923228c9ff994c6cfe4ae16c12408dc5 not-for-merge tag 'tag-master' of ../
8e32a6d901327a23ef831511badce7bf3bf46689 not-for-merge tag 'tag-one' of ../
# br-config-glob-merge config-glob
+0567da4d5edd2ff4bb292a465ba9e64dcad9536b branch 'three' of ../
754b754407bf032e9a2f9d5a9ad05ca79a6b228f not-for-merge branch 'master' of ../
8e32a6d901327a23ef831511badce7bf3bf46689 not-for-merge branch 'one' of ../
-0567da4d5edd2ff4bb292a465ba9e64dcad9536b branch 'three' of ../
6134ee8f857693b96ff1cc98d3e2fd62b199e5a8 not-for-merge branch 'two' of ../
6c9dec2b923228c9ff994c6cfe4ae16c12408dc5 not-for-merge tag 'tag-master' of ../
8e32a6d901327a23ef831511badce7bf3bf46689 not-for-merge tag 'tag-one' of ../
# br-config-glob-octopus
-754b754407bf032e9a2f9d5a9ad05ca79a6b228f not-for-merge branch 'master' of ../
8e32a6d901327a23ef831511badce7bf3bf46689 branch 'one' of ../
-0567da4d5edd2ff4bb292a465ba9e64dcad9536b not-for-merge branch 'three' of ../
6134ee8f857693b96ff1cc98d3e2fd62b199e5a8 branch 'two' of ../
+754b754407bf032e9a2f9d5a9ad05ca79a6b228f not-for-merge branch 'master' of ../
+0567da4d5edd2ff4bb292a465ba9e64dcad9536b not-for-merge branch 'three' of ../
6c9dec2b923228c9ff994c6cfe4ae16c12408dc5 not-for-merge tag 'tag-master' of ../
8e32a6d901327a23ef831511badce7bf3bf46689 not-for-merge tag 'tag-one' of ../
22feea448b023a2d864ef94b013735af34d238ba not-for-merge tag 'tag-one-tree' of ../
# br-config-glob-octopus config-glob
-754b754407bf032e9a2f9d5a9ad05ca79a6b228f not-for-merge branch 'master' of ../
8e32a6d901327a23ef831511badce7bf3bf46689 branch 'one' of ../
-0567da4d5edd2ff4bb292a465ba9e64dcad9536b not-for-merge branch 'three' of ../
6134ee8f857693b96ff1cc98d3e2fd62b199e5a8 branch 'two' of ../
+754b754407bf032e9a2f9d5a9ad05ca79a6b228f not-for-merge branch 'master' of ../
+0567da4d5edd2ff4bb292a465ba9e64dcad9536b not-for-merge branch 'three' of ../
6c9dec2b923228c9ff994c6cfe4ae16c12408dc5 not-for-merge tag 'tag-master' of ../
8e32a6d901327a23ef831511badce7bf3bf46689 not-for-merge tag 'tag-one' of ../
22feea448b023a2d864ef94b013735af34d238ba not-for-merge tag 'tag-one-tree' of ../
# br-remote-explicit-merge
+0567da4d5edd2ff4bb292a465ba9e64dcad9536b branch 'three' of ../
754b754407bf032e9a2f9d5a9ad05ca79a6b228f not-for-merge branch 'master' of ../
8e32a6d901327a23ef831511badce7bf3bf46689 not-for-merge branch 'one' of ../
6134ee8f857693b96ff1cc98d3e2fd62b199e5a8 not-for-merge branch 'two' of ../
-0567da4d5edd2ff4bb292a465ba9e64dcad9536b branch 'three' of ../
6c9dec2b923228c9ff994c6cfe4ae16c12408dc5 not-for-merge tag 'tag-master' of ../
8e32a6d901327a23ef831511badce7bf3bf46689 not-for-merge tag 'tag-one' of ../
22feea448b023a2d864ef94b013735af34d238ba not-for-merge tag 'tag-one-tree' of ../
# br-remote-explicit-merge remote-explicit
+0567da4d5edd2ff4bb292a465ba9e64dcad9536b branch 'three' of ../
754b754407bf032e9a2f9d5a9ad05ca79a6b228f not-for-merge branch 'master' of ../
8e32a6d901327a23ef831511badce7bf3bf46689 not-for-merge branch 'one' of ../
6134ee8f857693b96ff1cc98d3e2fd62b199e5a8 not-for-merge branch 'two' of ../
-0567da4d5edd2ff4bb292a465ba9e64dcad9536b branch 'three' of ../
6c9dec2b923228c9ff994c6cfe4ae16c12408dc5 not-for-merge tag 'tag-master' of ../
8e32a6d901327a23ef831511badce7bf3bf46689 not-for-merge tag 'tag-one' of ../
22feea448b023a2d864ef94b013735af34d238ba not-for-merge tag 'tag-one-tree' of ../
# br-remote-explicit-octopus
-754b754407bf032e9a2f9d5a9ad05ca79a6b228f not-for-merge branch 'master' of ../
8e32a6d901327a23ef831511badce7bf3bf46689 branch 'one' of ../
6134ee8f857693b96ff1cc98d3e2fd62b199e5a8 branch 'two' of ../
+754b754407bf032e9a2f9d5a9ad05ca79a6b228f not-for-merge branch 'master' of ../
0567da4d5edd2ff4bb292a465ba9e64dcad9536b not-for-merge branch 'three' of ../
6c9dec2b923228c9ff994c6cfe4ae16c12408dc5 not-for-merge tag 'tag-master' of ../
8e32a6d901327a23ef831511badce7bf3bf46689 not-for-merge tag 'tag-one' of ../
# br-remote-explicit-octopus remote-explicit
-754b754407bf032e9a2f9d5a9ad05ca79a6b228f not-for-merge branch 'master' of ../
8e32a6d901327a23ef831511badce7bf3bf46689 branch 'one' of ../
6134ee8f857693b96ff1cc98d3e2fd62b199e5a8 branch 'two' of ../
+754b754407bf032e9a2f9d5a9ad05ca79a6b228f not-for-merge branch 'master' of ../
0567da4d5edd2ff4bb292a465ba9e64dcad9536b not-for-merge branch 'three' of ../
6c9dec2b923228c9ff994c6cfe4ae16c12408dc5 not-for-merge tag 'tag-master' of ../
8e32a6d901327a23ef831511badce7bf3bf46689 not-for-merge tag 'tag-one' of ../
# br-remote-glob-merge
+0567da4d5edd2ff4bb292a465ba9e64dcad9536b branch 'three' of ../
754b754407bf032e9a2f9d5a9ad05ca79a6b228f not-for-merge branch 'master' of ../
8e32a6d901327a23ef831511badce7bf3bf46689 not-for-merge branch 'one' of ../
-0567da4d5edd2ff4bb292a465ba9e64dcad9536b branch 'three' of ../
6134ee8f857693b96ff1cc98d3e2fd62b199e5a8 not-for-merge branch 'two' of ../
6c9dec2b923228c9ff994c6cfe4ae16c12408dc5 not-for-merge tag 'tag-master' of ../
8e32a6d901327a23ef831511badce7bf3bf46689 not-for-merge tag 'tag-one' of ../
# br-remote-glob-merge remote-glob
+0567da4d5edd2ff4bb292a465ba9e64dcad9536b branch 'three' of ../
754b754407bf032e9a2f9d5a9ad05ca79a6b228f not-for-merge branch 'master' of ../
8e32a6d901327a23ef831511badce7bf3bf46689 not-for-merge branch 'one' of ../
-0567da4d5edd2ff4bb292a465ba9e64dcad9536b branch 'three' of ../
6134ee8f857693b96ff1cc98d3e2fd62b199e5a8 not-for-merge branch 'two' of ../
6c9dec2b923228c9ff994c6cfe4ae16c12408dc5 not-for-merge tag 'tag-master' of ../
8e32a6d901327a23ef831511badce7bf3bf46689 not-for-merge tag 'tag-one' of ../
# br-remote-glob-octopus
-754b754407bf032e9a2f9d5a9ad05ca79a6b228f not-for-merge branch 'master' of ../
8e32a6d901327a23ef831511badce7bf3bf46689 branch 'one' of ../
-0567da4d5edd2ff4bb292a465ba9e64dcad9536b not-for-merge branch 'three' of ../
6134ee8f857693b96ff1cc98d3e2fd62b199e5a8 branch 'two' of ../
+754b754407bf032e9a2f9d5a9ad05ca79a6b228f not-for-merge branch 'master' of ../
+0567da4d5edd2ff4bb292a465ba9e64dcad9536b not-for-merge branch 'three' of ../
6c9dec2b923228c9ff994c6cfe4ae16c12408dc5 not-for-merge tag 'tag-master' of ../
8e32a6d901327a23ef831511badce7bf3bf46689 not-for-merge tag 'tag-one' of ../
22feea448b023a2d864ef94b013735af34d238ba not-for-merge tag 'tag-one-tree' of ../
# br-remote-glob-octopus remote-glob
-754b754407bf032e9a2f9d5a9ad05ca79a6b228f not-for-merge branch 'master' of ../
8e32a6d901327a23ef831511badce7bf3bf46689 branch 'one' of ../
-0567da4d5edd2ff4bb292a465ba9e64dcad9536b not-for-merge branch 'three' of ../
6134ee8f857693b96ff1cc98d3e2fd62b199e5a8 branch 'two' of ../
+754b754407bf032e9a2f9d5a9ad05ca79a6b228f not-for-merge branch 'master' of ../
+0567da4d5edd2ff4bb292a465ba9e64dcad9536b not-for-merge branch 'three' of ../
6c9dec2b923228c9ff994c6cfe4ae16c12408dc5 not-for-merge tag 'tag-master' of ../
8e32a6d901327a23ef831511badce7bf3bf46689 not-for-merge tag 'tag-one' of ../
22feea448b023a2d864ef94b013735af34d238ba not-for-merge tag 'tag-one-tree' of ../
! grep "Writing objects" err
'
+test_expect_success TTY 'quiet push' '
+ ensure_fresh_upstream &&
+
+ test_terminal git push --quiet --no-progress upstream master 2>&1 | tee output &&
+ test_cmp /dev/null output
+'
+
test_done
ROOT_PATH="$PWD"
LIB_HTTPD_PORT=${LIB_HTTPD_PORT-'5541'}
. "$TEST_DIRECTORY"/lib-httpd.sh
+. "$TEST_DIRECTORY"/lib-terminal.sh
start_httpd
test_expect_success 'setup remote repository' '
test_must_fail git show-ref --verify refs/remotes/origin/dev
'
+cat >"$HTTPD_DOCUMENT_ROOT_PATH/test_repo.git/hooks/update" <<EOF
+#!/bin/sh
+exit 1
+EOF
+chmod a+x "$HTTPD_DOCUMENT_ROOT_PATH/test_repo.git/hooks/update"
+
+cat >exp <<EOF
+remote: error: hook declined to update refs/heads/dev2
+To http://127.0.0.1:$LIB_HTTPD_PORT/smart/test_repo.git
+ ! [remote rejected] dev2 -> dev2 (hook declined)
+error: failed to push some refs to 'http://127.0.0.1:5541/smart/test_repo.git'
+EOF
+
+test_expect_success 'rejected update prints status' '
+ cd "$ROOT_PATH"/test_repo_clone &&
+ git checkout -b dev2 &&
+ : >path4 &&
+ git add path4 &&
+ test_tick &&
+ git commit -m dev2 &&
+ test_must_fail git push origin dev2 2>act &&
+ sed -e "/^remote: /s/ *$//" <act >cmp &&
+ test_cmp exp cmp
+'
+rm -f "$HTTPD_DOCUMENT_ROOT_PATH/test_repo.git/hooks/update"
+
cat >exp <<EOF
GET /smart/test_repo.git/info/refs?service=git-upload-pack HTTP/1.1 200
POST /smart/test_repo.git/git-receive-pack HTTP/1.1 200
GET /smart/test_repo.git/info/refs?service=git-receive-pack HTTP/1.1 200
POST /smart/test_repo.git/git-receive-pack HTTP/1.1 200
+GET /smart/test_repo.git/info/refs?service=git-receive-pack HTTP/1.1 200
+POST /smart/test_repo.git/git-receive-pack HTTP/1.1 200
EOF
test_expect_success 'used receive-pack service' '
sed -e "
test $HEAD = $(git rev-parse --verify HEAD))
'
+test_expect_success 'push --all can push to empty repo' '
+ d=$HTTPD_DOCUMENT_ROOT_PATH/empty-all.git &&
+ git init --bare "$d" &&
+ git --git-dir="$d" config http.receivepack true &&
+ git push --all "$HTTPD_URL"/smart/empty-all.git
+'
+
+test_expect_success 'push --mirror can push to empty repo' '
+ d=$HTTPD_DOCUMENT_ROOT_PATH/empty-mirror.git &&
+ git init --bare "$d" &&
+ git --git-dir="$d" config http.receivepack true &&
+ git push --mirror "$HTTPD_URL"/smart/empty-mirror.git
+'
+
+test_expect_success 'push --all to repo with alternates' '
+ s=$HTTPD_DOCUMENT_ROOT_PATH/test_repo.git &&
+ d=$HTTPD_DOCUMENT_ROOT_PATH/alternates-all.git &&
+ git clone --bare --shared "$s" "$d" &&
+ git --git-dir="$d" config http.receivepack true &&
+ git --git-dir="$d" repack -adl &&
+ git push --all "$HTTPD_URL"/smart/alternates-all.git
+'
+
+test_expect_success 'push --mirror to repo with alternates' '
+ s=$HTTPD_DOCUMENT_ROOT_PATH/test_repo.git &&
+ d=$HTTPD_DOCUMENT_ROOT_PATH/alternates-mirror.git &&
+ git clone --bare --shared "$s" "$d" &&
+ git --git-dir="$d" config http.receivepack true &&
+ git --git-dir="$d" repack -adl &&
+ git push --mirror "$HTTPD_URL"/smart/alternates-mirror.git
+'
+
+test_expect_success TTY 'quiet push' '
+ cd "$ROOT_PATH"/test_repo_clone &&
+ test_commit quiet &&
+ test_terminal git push --quiet --no-progress 2>&1 | tee output &&
+ test_cmp /dev/null output
+'
+
stop_httpd
test_done
test_expect_success 'fetch packed objects' '
cp -R "$HTTPD_DOCUMENT_ROOT_PATH"/repo.git "$HTTPD_DOCUMENT_ROOT_PATH"/repo_pack.git &&
(cd "$HTTPD_DOCUMENT_ROOT_PATH"/repo_pack.git &&
- git --bare repack &&
- git --bare prune-packed
+ git --bare repack -a -d
) &&
git clone $HTTPD_URL/dumb/repo_pack.git
'
--- /dev/null
+#!/bin/sh
+
+test_description='test fetching over git protocol'
+. ./test-lib.sh
+
+LIB_GIT_DAEMON_PORT=${LIB_GIT_DAEMON_PORT-5570}
+. "$TEST_DIRECTORY"/lib-git-daemon.sh
+start_git_daemon
+
+test_expect_success 'setup repository' '
+ echo content >file &&
+ git add file &&
+ git commit -m one
+'
+
+test_expect_success 'create git-accessible bare repository' '
+ mkdir "$GIT_DAEMON_DOCUMENT_ROOT_PATH/repo.git" &&
+ (cd "$GIT_DAEMON_DOCUMENT_ROOT_PATH/repo.git" &&
+ git --bare init &&
+ : >git-daemon-export-ok
+ ) &&
+ git remote add public "$GIT_DAEMON_DOCUMENT_ROOT_PATH/repo.git" &&
+ git push public master:master
+'
+
+test_expect_success 'clone git repository' '
+ git clone "$GIT_DAEMON_URL/repo.git" clone &&
+ test_cmp file clone/file
+'
+
+test_expect_success 'fetch changes via git protocol' '
+ echo content >>file &&
+ git commit -a -m two &&
+ git push public &&
+ (cd clone && git pull) &&
+ test_cmp file clone/file
+'
+
+test_expect_failure 'remote detects correct HEAD' '
+ git push public master:other &&
+ (cd clone &&
+ git remote set-head -d origin &&
+ git remote set-head -a origin &&
+ git symbolic-ref refs/remotes/origin/HEAD > output &&
+ echo refs/remotes/origin/master > expect &&
+ test_cmp expect output
+ )
+'
+
+test_expect_success 'prepare pack objects' '
+ cp -R "$GIT_DAEMON_DOCUMENT_ROOT_PATH"/repo.git "$GIT_DAEMON_DOCUMENT_ROOT_PATH"/repo_pack.git &&
+ (cd "$GIT_DAEMON_DOCUMENT_ROOT_PATH"/repo_pack.git &&
+ git --bare repack -a -d
+ )
+'
+
+test_expect_success 'fetch notices corrupt pack' '
+ cp -R "$GIT_DAEMON_DOCUMENT_ROOT_PATH"/repo_pack.git "$GIT_DAEMON_DOCUMENT_ROOT_PATH"/repo_bad1.git &&
+ (cd "$GIT_DAEMON_DOCUMENT_ROOT_PATH"/repo_bad1.git &&
+ p=`ls objects/pack/pack-*.pack` &&
+ chmod u+w $p &&
+ printf %0256d 0 | dd of=$p bs=256 count=1 seek=1 conv=notrunc
+ ) &&
+ mkdir repo_bad1.git &&
+ (cd repo_bad1.git &&
+ git --bare init &&
+ test_must_fail git --bare fetch "$GIT_DAEMON_URL/repo_bad1.git" &&
+ test 0 = `ls objects/pack/pack-*.pack | wc -l`
+ )
+'
+
+test_expect_success 'fetch notices corrupt idx' '
+ cp -R "$GIT_DAEMON_DOCUMENT_ROOT_PATH"/repo_pack.git "$GIT_DAEMON_DOCUMENT_ROOT_PATH"/repo_bad2.git &&
+ (cd "$GIT_DAEMON_DOCUMENT_ROOT_PATH"/repo_bad2.git &&
+ p=`ls objects/pack/pack-*.idx` &&
+ chmod u+w $p &&
+ printf %0256d 0 | dd of=$p bs=256 count=1 seek=1 conv=notrunc
+ ) &&
+ mkdir repo_bad2.git &&
+ (cd repo_bad2.git &&
+ git --bare init &&
+ test_must_fail git --bare fetch "$GIT_DAEMON_URL/repo_bad2.git" &&
+ test 0 = `ls objects/pack | wc -l`
+ )
+'
+
+test_remote_error()
+{
+ do_export=YesPlease
+ while test $# -gt 0
+ do
+ case $1 in
+ -x)
+ shift
+ chmod -x "$GIT_DAEMON_DOCUMENT_ROOT_PATH/repo.git"
+ ;;
+ -n)
+ shift
+ do_export=
+ ;;
+ *)
+ break
+ esac
+ done
+
+ if test $# -ne 3
+ then
+ error "invalid number of arguments"
+ fi
+
+ cmd=$1
+ repo=$2
+ msg=$3
+
+ if test -x "$GIT_DAEMON_DOCUMENT_ROOT_PATH/$repo"
+ then
+ if test -n "$do_export"
+ then
+ : >"$GIT_DAEMON_DOCUMENT_ROOT_PATH/$repo/git-daemon-export-ok"
+ else
+ rm -f "$GIT_DAEMON_DOCUMENT_ROOT_PATH/$repo/git-daemon-export-ok"
+ fi
+ fi
+
+ test_must_fail git "$cmd" "$GIT_DAEMON_URL/$repo" 2>output &&
+ echo "fatal: remote error: $msg: /$repo" >expect &&
+ test_cmp expect output
+ ret=$?
+ chmod +x "$GIT_DAEMON_DOCUMENT_ROOT_PATH/repo.git"
+ (exit $ret)
+}
+
+msg="access denied or repository not exported"
+test_expect_success 'clone non-existent' "test_remote_error clone nowhere.git '$msg'"
+test_expect_success 'push disabled' "test_remote_error push repo.git '$msg'"
+test_expect_success 'read access denied' "test_remote_error -x fetch repo.git '$msg'"
+test_expect_success 'not exported' "test_remote_error -n fetch repo.git '$msg'"
+
+stop_git_daemon
+start_git_daemon --informative-errors
+
+test_expect_success 'clone non-existent' "test_remote_error clone nowhere.git 'no such repository'"
+test_expect_success 'push disabled' "test_remote_error push repo.git 'service not enabled'"
+test_expect_success 'read access denied' "test_remote_error -x fetch repo.git 'no such repository'"
+test_expect_success 'not exported' "test_remote_error -n fetch repo.git 'repository not exported'"
+
+stop_git_daemon
+test_done
rm -fr .git &&
test_create_repo src &&
(
- cd src
- >file
- git add file
- git commit -m initial
+ cd src &&
+ >file &&
+ git add file &&
+ git commit -m initial &&
+ echo 1 >file &&
+ git add file &&
+ git commit -m updated
)
'
'
+test_expect_success 'clone --mirror with detached HEAD' '
+
+ ( cd src && git checkout HEAD^ && git rev-parse HEAD >../expected ) &&
+ git clone --mirror src mirror.detached &&
+ ( cd src && git checkout - ) &&
+ GIT_DIR=mirror.detached git rev-parse HEAD >actual &&
+ test_cmp expected actual
+
+'
+
+test_expect_success 'clone --bare with detached HEAD' '
+
+ ( cd src && git checkout HEAD^ && git rev-parse HEAD >../expected ) &&
+ git clone --bare src bare.detached &&
+ ( cd src && git checkout - ) &&
+ GIT_DIR=bare.detached git rev-parse HEAD >actual &&
+ test_cmp expected actual
+
+'
+
test_expect_success 'clone --bare names the local repository <name>.git' '
git clone --bare src &&
grep /src/\\.git/objects target-10/objects/info/alternates
'
+test_expect_success 'clone checking out a tag' '
+ git clone --branch=some-tag src dst.tag &&
+ GIT_DIR=src/.git git rev-parse some-tag >expected &&
+ test_cmp expected dst.tag/.git/HEAD &&
+ GIT_DIR=dst.tag/.git git config remote.origin.fetch >fetch.actual &&
+ echo "+refs/heads/*:refs/remotes/origin/*" >fetch.expected &&
+ test_cmp fetch.expected fetch.actual
+'
+
test_done
'
+test_expect_success 'empty bundle file is rejected' '
+
+ >empty-bundle && test_must_fail git fetch empty-bundle
+
+'
+
test_done
)
'
-test_expect_success 'clone -b with bogus branch chooses HEAD' '
- git clone -b bogus parent clone-bogus &&
- (cd clone-bogus &&
- check_HEAD master &&
- check_file one
- )
+test_expect_success 'clone -b with bogus branch' '
+ test_must_fail git clone -b bogus parent clone-bogus
'
test_done
test_cmp expect.gd-short actual.gd-short
'
+test_expect_success 'reflog identity' '
+ echo "C O Mitter:committer@example.com" >expect &&
+ git log -g -1 --format="%gn:%ge" >actual &&
+ test_cmp expect actual
+'
+
test_expect_success 'oneline with empty message' '
git commit -m "dummy" --allow-empty &&
git commit -m "dummy" --allow-empty &&
check_result 'I E C B A' --simplify-merges -- file
check_result 'I B A' -- file
check_result 'I B A' --topo-order -- file
+check_result 'H' --first-parent -- another-file
test_done
. "$TEST_DIRECTORY"/lib-pager.sh
. "$TEST_DIRECTORY"/lib-terminal.sh
-cleanup_fail() {
- echo >&2 cleanup failed
- (exit 1)
-}
-
test_expect_success 'setup' '
sane_unset GIT_PAGER GIT_PAGER_IN_USE &&
test_unconfig core.pager &&
'
test_expect_success TTY 'some commands use a pager' '
- rm -f paginated.out ||
- cleanup_fail &&
-
+ rm -f paginated.out &&
test_terminal git log &&
test -e paginated.out
'
'
test_expect_success TTY 'some commands do not use a pager' '
- rm -f paginated.out ||
- cleanup_fail &&
-
+ rm -f paginated.out &&
test_terminal git rev-list HEAD &&
! test -e paginated.out
'
test_expect_success 'no pager when stdout is a pipe' '
- rm -f paginated.out ||
- cleanup_fail &&
-
+ rm -f paginated.out &&
git log | cat &&
! test -e paginated.out
'
test_expect_success 'no pager when stdout is a regular file' '
- rm -f paginated.out ||
- cleanup_fail &&
-
+ rm -f paginated.out &&
git log >file &&
! test -e paginated.out
'
test_expect_success TTY 'git --paginate rev-list uses a pager' '
- rm -f paginated.out ||
- cleanup_fail &&
-
+ rm -f paginated.out &&
test_terminal git --paginate rev-list HEAD &&
test -e paginated.out
'
test_expect_success 'no pager even with --paginate when stdout is a pipe' '
- rm -f file paginated.out ||
- cleanup_fail &&
-
+ rm -f file paginated.out &&
git --paginate log | cat &&
! test -e paginated.out
'
test_expect_success TTY 'no pager with --no-pager' '
- rm -f paginated.out ||
- cleanup_fail &&
-
+ rm -f paginated.out &&
test_terminal git --no-pager log &&
! test -e paginated.out
'
}
test_expect_success 'tests can detect color' '
- rm -f colorful.log colorless.log ||
- cleanup_fail &&
-
+ rm -f colorful.log colorless.log &&
git log --no-color >colorless.log &&
git log --color >colorful.log &&
! colorful colorless.log &&
test_expect_success 'no color when stdout is a regular file' '
rm -f colorless.log &&
- test_config color.ui auto ||
- cleanup_fail &&
-
+ test_config color.ui auto &&
git log >colorless.log &&
! colorful colorless.log
'
test_expect_success TTY 'color when writing to a pager' '
rm -f paginated.out &&
- test_config color.ui auto ||
- cleanup_fail &&
-
+ test_config color.ui auto &&
(
TERM=vt100 &&
export TERM &&
test_expect_success 'color when writing to a file intended for a pager' '
rm -f colorful.log &&
- test_config color.ui auto ||
- cleanup_fail &&
-
+ test_config color.ui auto &&
(
TERM=vt100 &&
GIT_PAGER_IN_USE=true &&
$test_expectation SIMPLEPAGER,TTY "$cmd - default pager is used by default" "
sane_unset PAGER GIT_PAGER &&
test_unconfig core.pager &&
- rm -f default_pager_used ||
- cleanup_fail &&
-
+ rm -f default_pager_used &&
cat >\$less <<-\EOF &&
#!/bin/sh
wc >default_pager_used
$test_expectation TTY "$cmd - PAGER overrides default pager" "
sane_unset GIT_PAGER &&
test_unconfig core.pager &&
- rm -f PAGER_used ||
- cleanup_fail &&
-
+ rm -f PAGER_used &&
PAGER='wc >PAGER_used' &&
export PAGER &&
$full_command &&
$test_expectation TTY "$cmd - repository-local core.pager setting $used_if_wanted" "
sane_unset GIT_PAGER &&
- rm -f core.pager_used ||
- cleanup_fail &&
-
+ rm -f core.pager_used &&
PAGER=wc &&
export PAGER &&
test_config core.pager 'wc >core.pager_used' &&
$test_expectation TTY "$cmd - core.pager $used_if_wanted from subdirectory" "
sane_unset GIT_PAGER &&
rm -f core.pager_used &&
- rm -fr sub ||
- cleanup_fail &&
-
+ rm -fr sub &&
PAGER=wc &&
stampname=\$(pwd)/core.pager_used &&
export PAGER stampname &&
parse_args "$@"
$test_expectation TTY "$cmd - GIT_PAGER overrides core.pager" "
- rm -f GIT_PAGER_used ||
- cleanup_fail &&
-
+ rm -f GIT_PAGER_used &&
test_config core.pager wc &&
GIT_PAGER='wc >GIT_PAGER_used' &&
export GIT_PAGER &&
parse_args "$@"
$test_expectation TTY "no pager for '$cmd'" "
- rm -f GIT_PAGER_used ||
- cleanup_fail &&
-
+ rm -f GIT_PAGER_used &&
GIT_PAGER='wc >GIT_PAGER_used' &&
export GIT_PAGER &&
$full_command &&
)
'
+test_expect_success 'submodule add properly re-creates deeper level submodules' '
+ (cd super &&
+ git reset --hard master &&
+ rm -rf deeper/ &&
+ git submodule add ../submodule deeper/submodule
+ )
+'
+
test_done
--- /dev/null
+#!/bin/sh
+
+test_description='signed commit tests'
+. ./test-lib.sh
+. "$TEST_DIRECTORY/lib-gpg.sh"
+
+test_expect_success GPG 'create signed commits' '
+ echo 1 >file && git add file &&
+ test_tick && git commit -S -m initial &&
+ git tag initial &&
+ git branch side &&
+
+ echo 2 >file && test_tick && git commit -a -S -m second &&
+ git tag second &&
+
+ git checkout side &&
+ echo 3 >elif && git add elif &&
+ test_tick && git commit -m "third on side" &&
+
+ git checkout master &&
+ test_tick && git merge -S side &&
+ git tag merge &&
+
+ echo 4 >file && test_tick && git commit -a -m "fourth unsigned" &&
+ git tag fourth-unsigned &&
+
+ test_tick && git commit --amend -S -m "fourth signed" &&
+ git tag fourth-signed
+'
+
+test_expect_success GPG 'show signatures' '
+ (
+ for commit in initial second merge master
+ do
+ git show --pretty=short --show-signature $commit >actual &&
+ grep "Good signature from" actual || exit 1
+ ! grep "BAD signature from" actual || exit 1
+ echo $commit OK
+ done
+ ) &&
+ (
+ for commit in merge^2 fourth-unsigned
+ do
+ git show --pretty=short --show-signature $commit >actual &&
+ grep "Good signature from" actual && exit 1
+ ! grep "BAD signature from" actual || exit 1
+ echo $commit OK
+ done
+ )
+'
+
+test_expect_success GPG 'detect fudged signature' '
+ git cat-file commit master >raw &&
+
+ sed -e "s/fourth signed/4th forged/" raw >forged1 &&
+ git hash-object -w -t commit forged1 >forged1.commit &&
+ git show --pretty=short --show-signature $(cat forged1.commit) >actual1 &&
+ grep "BAD signature from" actual1 &&
+ ! grep "Good signature from" actual1
+'
+
+test_expect_success GPG 'detect fudged signature with NUL' '
+ git cat-file commit master >raw &&
+ cat raw >forged2 &&
+ echo Qwik | tr "Q" "\000" >>forged2 &&
+ git hash-object -w -t commit forged2 >forged2.commit &&
+ git show --pretty=short --show-signature $(cat forged2.commit) >actual2 &&
+ grep "BAD signature from" actual2 &&
+ ! grep "Good signature from" actual2
+'
+
+test_expect_success GPG 'amending already signed commit' '
+ git checkout fourth-signed^0 &&
+ git commit --amend -S --no-edit &&
+ git show -s --show-signature HEAD >actual &&
+ grep "Good signature from" actual &&
+ ! grep "BAD signature from" actual
+'
+
+test_done
echo branch1 change >file1 &&
echo branch1 newfile >file2 &&
echo branch1 spaced >"spaced name" &&
+ echo branch1 both added >both &&
echo branch1 change file11 >file11 &&
echo branch1 change file13 >file13 &&
echo branch1 sub >subdir/file3 &&
git checkout -b submod-branch1
) &&
git add file1 "spaced name" file11 file13 file2 subdir/file3 submod &&
+ git add both &&
git rm file12 &&
git commit -m "branch1 changes" &&
echo master updated >file1 &&
echo master new >file2 &&
echo master updated spaced >"spaced name" &&
+ echo master both added >both &&
echo master updated file12 >file12 &&
echo master updated file14 >file14 &&
echo master new sub >subdir/file3 &&
git checkout -b submod-master
) &&
git add file1 "spaced name" file12 file14 file2 subdir/file3 submod &&
+ git add both &&
git rm file11 &&
git commit -m "master updates" &&
git config merge.tool mytool &&
git config mergetool.mytool.cmd "cat \"\$REMOTE\" >\"\$MERGED\"" &&
- git config mergetool.mytool.trustExitCode true
+ git config mergetool.mytool.trustExitCode true &&
+ git config mergetool.mybase.cmd "cat \"\$BASE\" >\"\$MERGED\"" &&
+ git config mergetool.mybase.trustExitCode true
'
test_expect_success 'custom mergetool' '
git checkout -b test1 branch1 &&
git submodule update -N &&
test_must_fail git merge master >/dev/null 2>&1 &&
+ ( yes "" | git mergetool both >/dev/null 2>&1 ) &&
( yes "" | git mergetool file1 file1 ) &&
( yes "" | git mergetool file2 "spaced name" >/dev/null 2>&1 ) &&
( yes "" | git mergetool subdir/file3 >/dev/null 2>&1 ) &&
( yes "" | git mergetool file1 >/dev/null 2>&1 ) &&
( yes "" | git mergetool file2 >/dev/null 2>&1 ) &&
( yes "" | git mergetool "spaced name" >/dev/null 2>&1 ) &&
+ ( yes "" | git mergetool both >/dev/null 2>&1 ) &&
( yes "" | git mergetool subdir/file3 >/dev/null 2>&1 ) &&
( yes "d" | git mergetool file11 >/dev/null 2>&1 ) &&
( yes "d" | git mergetool file12 >/dev/null 2>&1 ) &&
cd subdir &&
( yes "" | git mergetool ../file1 >/dev/null 2>&1 ) &&
( yes "" | git mergetool ../file2 ../spaced\ name >/dev/null 2>&1 ) &&
+ ( yes "" | git mergetool ../both >/dev/null 2>&1 ) &&
( yes "d" | git mergetool ../file11 >/dev/null 2>&1 ) &&
( yes "d" | git mergetool ../file12 >/dev/null 2>&1 ) &&
( yes "l" | git mergetool ../submod >/dev/null 2>&1 ) &&
test_must_fail git merge master &&
test -n "$(git ls-files -u)" &&
( yes "" | git mergetool file1 file2 spaced\ name subdir/file3 >/dev/null 2>&1 ) &&
+ ( yes "" | git mergetool both >/dev/null 2>&1 ) &&
( yes "d" | git mergetool file11 file12 >/dev/null 2>&1 ) &&
( yes "r" | git mergetool submod ) &&
rmdir submod && mv submod-movedaside submod &&
test_must_fail git merge master &&
test -n "$(git ls-files -u)" &&
( yes "" | git mergetool file1 file2 spaced\ name subdir/file3 >/dev/null 2>&1 ) &&
+ ( yes "" | git mergetool both >/dev/null 2>&1 ) &&
( yes "d" | git mergetool file11 file12 >/dev/null 2>&1 ) &&
( yes "l" | git mergetool submod ) &&
test ! -e submod &&
test_must_fail git merge test6 &&
test -n "$(git ls-files -u)" &&
( yes "" | git mergetool file1 file2 spaced\ name subdir/file3 >/dev/null 2>&1 ) &&
+ ( yes "" | git mergetool both >/dev/null 2>&1 ) &&
( yes "d" | git mergetool file11 file12 >/dev/null 2>&1 ) &&
( yes "r" | git mergetool submod ) &&
test ! -e submod &&
test_must_fail git merge test6 &&
test -n "$(git ls-files -u)" &&
( yes "" | git mergetool file1 file2 spaced\ name subdir/file3 >/dev/null 2>&1 ) &&
+ ( yes "" | git mergetool both >/dev/null 2>&1 ) &&
( yes "d" | git mergetool file11 file12 >/dev/null 2>&1 ) &&
( yes "l" | git mergetool submod ) &&
test "$(cat submod/bar)" = "master submodule" &&
test_must_fail git merge master &&
test -n "$(git ls-files -u)" &&
( yes "" | git mergetool file1 file2 spaced\ name subdir/file3 >/dev/null 2>&1 ) &&
+ ( yes "" | git mergetool both >/dev/null 2>&1 ) &&
( yes "d" | git mergetool file11 file12 >/dev/null 2>&1 ) &&
( yes "r" | git mergetool submod ) &&
rmdir submod && mv submod-movedaside submod &&
test_must_fail git merge master &&
test -n "$(git ls-files -u)" &&
( yes "" | git mergetool file1 file2 spaced\ name subdir/file3 >/dev/null 2>&1 ) &&
+ ( yes "" | git mergetool both >/dev/null 2>&1 ) &&
( yes "d" | git mergetool file11 file12 >/dev/null 2>&1 ) &&
( yes "l" | git mergetool submod ) &&
git submodule update -N &&
test_must_fail git merge test7 &&
test -n "$(git ls-files -u)" &&
( yes "" | git mergetool file1 file2 spaced\ name subdir/file3 >/dev/null 2>&1 ) &&
+ ( yes "" | git mergetool both >/dev/null 2>&1 ) &&
( yes "d" | git mergetool file11 file12 >/dev/null 2>&1 ) &&
( yes "r" | git mergetool submod ) &&
test -d submod.orig &&
test_must_fail git merge test7 &&
test -n "$(git ls-files -u)" &&
( yes "" | git mergetool file1 file2 spaced\ name subdir/file3 >/dev/null 2>&1 ) &&
+ ( yes "" | git mergetool both>/dev/null 2>&1 ) &&
( yes "d" | git mergetool file11 file12 >/dev/null 2>&1 ) &&
( yes "l" | git mergetool submod ) &&
test "$(cat submod/bar)" = "master submodule" &&
git submodule update -N
'
+test_expect_success 'file with no base' '
+ git checkout -b test13 branch1 &&
+ test_must_fail git merge master &&
+ git mergetool --no-prompt --tool mybase -- both &&
+ >expected &&
+ test_cmp both expected &&
+ git reset --hard master >/dev/null 2>&1
+'
+
test_done
'
done
+cat >expected <<EOF
+file
+EOF
+test_expect_success 'grep -l -C' '
+ git grep -l -C1 foo >actual &&
+ test_cmp expected actual
+'
+
+cat >expected <<EOF
+file:5
+EOF
+test_expect_success 'grep -l -C' '
+ git grep -c -C1 foo >actual &&
+ test_cmp expected actual
+'
+
+test_expect_success 'grep -L -C' '
+ git ls-files >expected &&
+ git grep -L -C1 nonexistent_string >actual &&
+ test_cmp expected actual
+'
+
cat >expected <<EOF
file:foo mmap bar_mmap
EOF
test_cmp expected actual
'
+cat >expected <<EOF
+hello.c= printf("Hello world.\n");
+hello.c: return 0;
+hello.c- /* char ?? */
+EOF
+
+test_expect_success 'grep -W with userdiff' '
+ test_when_finished "rm -f .gitattributes" &&
+ git config diff.custom.xfuncname "(printf.*|})$" &&
+ echo "hello.c diff=custom" >.gitattributes &&
+ git grep -W return >actual &&
+ test_cmp expected actual
+'
+
test_expect_success 'grep from a subdirectory to search wider area (1)' '
mkdir -p s &&
(
cat >helper <<'EOF'
#!/bin/sh
grep -q '^bin: ' "$1" || { echo "E: $1 is not \"binary\" file" 1>&2; exit 1; }
-sed 's/^bin: /converted: /' "$1"
+perl -p -e 's/^bin: /converted: /' "$1"
EOF
chmod +x helper
test_done
fi
-CVSROOT=$(pwd)/cvsroot
-CVSWORK=$(pwd)/cvswork
-GIT_DIR=$(pwd)/.git
+CVSROOT=$PWD/cvsroot
+CVSWORK=$PWD/cvswork
+GIT_DIR=$PWD/.git
export CVSROOT CVSWORK GIT_DIR
rm -rf "$CVSROOT" "$CVSWORK"
'path_info: project/branch:dir/' \
'gitweb_run "" "/.git/master:foo/"'
+test_expect_success \
+ 'path_info: project/branch (non-existent)' \
+ 'gitweb_run "" "/.git/non-existent"'
+
+test_expect_success \
+ 'path_info: project/branch:filename (non-existent branch)' \
+ 'gitweb_run "" "/.git/non-existent:non-existent"'
+
test_expect_success \
'path_info: project/branch:file (non-existent)' \
'gitweb_run "" "/.git/master:non-existent"'
--- /dev/null
+#!/bin/sh
+
+test_description='git-p4 tests'
+
+. ./lib-git-p4.sh
+
+test_expect_success 'start p4d' '
+ start_p4d
+'
+
+test_expect_success 'add p4 files' '
+ (
+ cd "$cli" &&
+ echo file1 >file1 &&
+ p4 add file1 &&
+ p4 submit -d "file1" &&
+ echo file2 >file2 &&
+ p4 add file2 &&
+ p4 submit -d "file2"
+ )
+'
+
+test_expect_success 'basic git-p4 clone' '
+ "$GITP4" clone --dest="$git" //depot &&
+ test_when_finished cleanup_git &&
+ (
+ cd "$git" &&
+ git log --oneline >lines &&
+ test_line_count = 1 lines
+ )
+'
+
+test_expect_success 'git-p4 clone @all' '
+ "$GITP4" clone --dest="$git" //depot@all &&
+ test_when_finished cleanup_git &&
+ (
+ cd "$git" &&
+ git log --oneline >lines &&
+ test_line_count = 2 lines
+ )
+'
+
+test_expect_success 'git-p4 sync uninitialized repo' '
+ test_create_repo "$git" &&
+ test_when_finished cleanup_git &&
+ (
+ cd "$git" &&
+ test_must_fail "$GITP4" sync
+ )
+'
+
+#
+# Create a git repo by hand. Add a commit so that HEAD is valid.
+# Test imports a new p4 repository into a new git branch.
+#
+test_expect_success 'git-p4 sync new branch' '
+ test_create_repo "$git" &&
+ test_when_finished cleanup_git &&
+ (
+ cd "$git" &&
+ test_commit head &&
+ "$GITP4" sync --branch=refs/remotes/p4/depot //depot@all &&
+ git log --oneline p4/depot >lines &&
+ test_line_count = 2 lines
+ )
+'
+
+test_expect_success 'clone two dirs' '
+ (
+ cd "$cli" &&
+ mkdir sub1 sub2 &&
+ echo sub1/f1 >sub1/f1 &&
+ echo sub2/f2 >sub2/f2 &&
+ p4 add sub1/f1 &&
+ p4 submit -d "sub1/f1" &&
+ p4 add sub2/f2 &&
+ p4 submit -d "sub2/f2"
+ ) &&
+ "$GITP4" clone --dest="$git" //depot/sub1 //depot/sub2 &&
+ test_when_finished cleanup_git &&
+ (
+ cd "$git" &&
+ git ls-files >lines &&
+ test_line_count = 2 lines &&
+ git log --oneline p4/master >lines &&
+ test_line_count = 1 lines
+ )
+'
+
+test_expect_success 'clone two dirs, @all' '
+ (
+ cd "$cli" &&
+ echo sub1/f3 >sub1/f3 &&
+ p4 add sub1/f3 &&
+ p4 submit -d "sub1/f3"
+ ) &&
+ "$GITP4" clone --dest="$git" //depot/sub1@all //depot/sub2@all &&
+ test_when_finished cleanup_git &&
+ (
+ cd "$git" &&
+ git ls-files >lines &&
+ test_line_count = 3 lines &&
+ git log --oneline p4/master >lines &&
+ test_line_count = 3 lines
+ )
+'
+
+test_expect_success 'clone two dirs, @all, conflicting files' '
+ (
+ cd "$cli" &&
+ echo sub2/f3 >sub2/f3 &&
+ p4 add sub2/f3 &&
+ p4 submit -d "sub2/f3"
+ ) &&
+ "$GITP4" clone --dest="$git" //depot/sub1@all //depot/sub2@all &&
+ test_when_finished cleanup_git &&
+ (
+ cd "$git" &&
+ git ls-files >lines &&
+ test_line_count = 3 lines &&
+ git log --oneline p4/master >lines &&
+ test_line_count = 4 lines &&
+ echo sub2/f3 >expected &&
+ test_cmp expected f3
+ )
+'
+
+test_expect_success 'exit when p4 fails to produce marshaled output' '
+ badp4dir="$TRASH_DIRECTORY/badp4dir" &&
+ mkdir "$badp4dir" &&
+ test_when_finished "rm \"$badp4dir/p4\" && rmdir \"$badp4dir\"" &&
+ cat >"$badp4dir"/p4 <<-EOF &&
+ #!$SHELL_PATH
+ exit 1
+ EOF
+ chmod 755 "$badp4dir"/p4 &&
+ PATH="$badp4dir:$PATH" "$GITP4" clone --dest="$git" //depot >errs 2>&1 ; retval=$? &&
+ test $retval -eq 1 &&
+ test_must_fail grep -q Traceback errs
+'
+
+test_expect_success 'add p4 files with wildcards in the names' '
+ (
+ cd "$cli" &&
+ echo file-wild-hash >file-wild#hash &&
+ echo file-wild-star >file-wild\*star &&
+ echo file-wild-at >file-wild@at &&
+ echo file-wild-percent >file-wild%percent &&
+ p4 add -f file-wild* &&
+ p4 submit -d "file wildcards"
+ )
+'
+
+test_expect_success 'wildcard files git-p4 clone' '
+ "$GITP4" clone --dest="$git" //depot &&
+ test_when_finished cleanup_git &&
+ (
+ cd "$git" &&
+ test -f file-wild#hash &&
+ test -f file-wild\*star &&
+ test -f file-wild@at &&
+ test -f file-wild%percent
+ )
+'
+
+test_expect_success 'clone bare' '
+ "$GITP4" clone --dest="$git" --bare //depot &&
+ test_when_finished cleanup_git &&
+ (
+ cd "$git" &&
+ test ! -d .git &&
+ bare=`git config --get core.bare` &&
+ test "$bare" = true
+ )
+'
+
+p4_add_user() {
+ name=$1 fullname=$2 &&
+ p4 user -f -i <<-EOF &&
+ User: $name
+ Email: $name@localhost
+ FullName: $fullname
+ EOF
+ p4 passwd -P secret $name
+}
+
+p4_grant_admin() {
+ name=$1 &&
+ {
+ p4 protect -o &&
+ echo " admin user $name * //depot/..."
+ } | p4 protect -i
+}
+
+p4_check_commit_author() {
+ file=$1 user=$2 &&
+ p4 changes -m 1 //depot/$file | grep -q $user
+}
+
+make_change_by_user() {
+ file=$1 name=$2 email=$3 &&
+ echo "username: a change by $name" >>"$file" &&
+ git add "$file" &&
+ git commit --author "$name <$email>" -m "a change by $name"
+}
+
+# Test username support, submitting as user 'alice'
+test_expect_success 'preserve users' '
+ p4_add_user alice Alice &&
+ p4_add_user bob Bob &&
+ p4_grant_admin alice &&
+ "$GITP4" clone --dest="$git" //depot &&
+ test_when_finished cleanup_git &&
+ (
+ cd "$git" &&
+ echo "username: a change by alice" >>file1 &&
+ echo "username: a change by bob" >>file2 &&
+ git commit --author "Alice <alice@localhost>" -m "a change by alice" file1 &&
+ git commit --author "Bob <bob@localhost>" -m "a change by bob" file2 &&
+ git config git-p4.skipSubmitEditCheck true &&
+ P4EDITOR=touch P4USER=alice P4PASSWD=secret "$GITP4" commit --preserve-user &&
+ p4_check_commit_author file1 alice &&
+ p4_check_commit_author file2 bob
+ )
+'
+
+# Test username support, submitting as bob, who lacks admin rights. Should
+# not submit change to p4 (git diff should show deltas).
+test_expect_success 'refuse to preserve users without perms' '
+ "$GITP4" clone --dest="$git" //depot &&
+ test_when_finished cleanup_git &&
+ (
+ cd "$git" &&
+ git config git-p4.skipSubmitEditCheck true &&
+ echo "username-noperms: a change by alice" >>file1 &&
+ git commit --author "Alice <alice@localhost>" -m "perms: a change by alice" file1 &&
+ P4EDITOR=touch P4USER=bob P4PASSWD=secret test_must_fail "$GITP4" commit --preserve-user &&
+ test_must_fail git diff --exit-code HEAD..p4/master
+ )
+'
+
+# What happens with unknown author? Without allowMissingP4Users it should fail.
+test_expect_success 'preserve user where author is unknown to p4' '
+ "$GITP4" clone --dest="$git" //depot &&
+ test_when_finished cleanup_git &&
+ (
+ cd "$git" &&
+ git config git-p4.skipSubmitEditCheck true &&
+ echo "username-bob: a change by bob" >>file1 &&
+ git commit --author "Bob <bob@localhost>" -m "preserve: a change by bob" file1 &&
+ echo "username-unknown: a change by charlie" >>file1 &&
+ git commit --author "Charlie <charlie@localhost>" -m "preserve: a change by charlie" file1 &&
+ P4EDITOR=touch P4USER=alice P4PASSWD=secret test_must_fail "$GITP4" commit --preserve-user &&
+ test_must_fail git diff --exit-code HEAD..p4/master &&
+
+ echo "$0: repeat with allowMissingP4Users enabled" &&
+ git config git-p4.allowMissingP4Users true &&
+ git config git-p4.preserveUser true &&
+ P4EDITOR=touch P4USER=alice P4PASSWD=secret "$GITP4" commit &&
+ git diff --exit-code HEAD..p4/master &&
+ p4_check_commit_author file1 alice
+ )
+'
+
+# If we're *not* using --preserve-user, git-p4 should warn if we're submitting
+# changes that are not all ours.
+# Test: user in p4 and user unknown to p4.
+# Test: warning disabled and user is the same.
+test_expect_success 'not preserving user with mixed authorship' '
+ "$GITP4" clone --dest="$git" //depot &&
+ test_when_finished cleanup_git &&
+ (
+ cd "$git" &&
+ git config git-p4.skipSubmitEditCheck true &&
+ p4_add_user derek Derek &&
+
+ make_change_by_user usernamefile3 Derek derek@localhost &&
+ P4EDITOR=cat P4USER=alice P4PASSWD=secret "$GITP4" commit |\
+ grep "git author derek@localhost does not match" &&
+
+ make_change_by_user usernamefile3 Charlie charlie@localhost &&
+ P4EDITOR=cat P4USER=alice P4PASSWD=secret "$GITP4" commit |\
+ grep "git author charlie@localhost does not match" &&
+
+ make_change_by_user usernamefile3 alice alice@localhost &&
+ P4EDITOR=cat P4USER=alice P4PASSWD=secret "$GITP4" |\
+ test_must_fail grep "git author.*does not match" &&
+
+ git config git-p4.skipUserNameCheck true &&
+ make_change_by_user usernamefile3 Charlie charlie@localhost &&
+ P4EDITOR=cat P4USER=alice P4PASSWD=secret "$GITP4" commit |\
+ test_must_fail grep "git author.*does not match" &&
+
+ p4_check_commit_author usernamefile3 alice
+ )
+'
+
+marshal_dump() {
+ what=$1
+ "$PYTHON_PATH" -c 'import marshal, sys; d = marshal.load(sys.stdin); print d["'$what'"]'
+}
+
+# Sleep a bit so that the top-most p4 change did not happen "now". Then
+# import the repo and make sure that the initial import has the same time
+# as the top-most change.
+test_expect_success 'initial import time from top change time' '
+ p4change=$(p4 -G changes -m 1 //depot/... | marshal_dump change) &&
+ p4time=$(p4 -G changes -m 1 //depot/... | marshal_dump time) &&
+ sleep 3 &&
+ "$GITP4" clone --dest="$git" //depot &&
+ test_when_finished cleanup_git &&
+ (
+ cd "$git" &&
+ gittime=$(git show -s --raw --pretty=format:%at HEAD) &&
+ echo $p4time $gittime &&
+ test $p4time = $gittime
+ )
+'
+
+# Rename a file and confirm that rename is not detected in P4.
+# Rename the new file again with detectRenames option enabled and confirm that
+# this is detected in P4.
+# Rename the new file again adding an extra line, configure a big threshold in
+# detectRenames and confirm that rename is not detected in P4.
+# Repeat, this time with a smaller threshold and confirm that the rename is
+# detected in P4.
+test_expect_success 'detect renames' '
+ "$GITP4" clone --dest="$git" //depot@all &&
+ test_when_finished cleanup_git &&
+ (
+ cd "$git" &&
+ git config git-p4.skipSubmitEditCheck true &&
+
+ git mv file1 file4 &&
+ git commit -a -m "Rename file1 to file4" &&
+ git diff-tree -r -M HEAD &&
+ "$GITP4" submit &&
+ p4 filelog //depot/file4 &&
+ p4 filelog //depot/file4 | test_must_fail grep -q "branch from" &&
+
+ git mv file4 file5 &&
+ git commit -a -m "Rename file4 to file5" &&
+ git diff-tree -r -M HEAD &&
+ git config git-p4.detectRenames true &&
+ "$GITP4" submit &&
+ p4 filelog //depot/file5 &&
+ p4 filelog //depot/file5 | grep -q "branch from //depot/file4" &&
+
+ git mv file5 file6 &&
+ echo update >>file6 &&
+ git add file6 &&
+ git commit -a -m "Rename file5 to file6 with changes" &&
+ git diff-tree -r -M HEAD &&
+ level=$(git diff-tree -r -M HEAD | sed 1d | cut -f1 | cut -d" " -f5 | sed "s/R0*//") &&
+ test -n "$level" && test "$level" -gt 0 && test "$level" -lt 98 &&
+ git config git-p4.detectRenames $(($level + 2)) &&
+ "$GITP4" submit &&
+ p4 filelog //depot/file6 &&
+ p4 filelog //depot/file6 | test_must_fail grep -q "branch from" &&
+
+ git mv file6 file7 &&
+ echo update >>file7 &&
+ git add file7 &&
+ git commit -a -m "Rename file6 to file7 with changes" &&
+ git diff-tree -r -M HEAD &&
+ level=$(git diff-tree -r -M HEAD | sed 1d | cut -f1 | cut -d" " -f5 | sed "s/R0*//") &&
+ test -n "$level" && test "$level" -gt 2 && test "$level" -lt 100 &&
+ git config git-p4.detectRenames $(($level - 2)) &&
+ "$GITP4" submit &&
+ p4 filelog //depot/file7 &&
+ p4 filelog //depot/file7 | grep -q "branch from //depot/file6"
+ )
+'
+
+# Copy a file and confirm that copy is not detected in P4.
+# Copy a file with detectCopies option enabled and confirm that copy is not
+# detected in P4.
+# Modify and copy a file with detectCopies option enabled and confirm that copy
+# is detected in P4.
+# Copy a file with detectCopies and detectCopiesHarder options enabled and
+# confirm that copy is detected in P4.
+# Modify and copy a file, configure a bigger threshold in detectCopies and
+# confirm that copy is not detected in P4.
+# Modify and copy a file, configure a smaller threshold in detectCopies and
+# confirm that copy is detected in P4.
+test_expect_success 'detect copies' '
+ "$GITP4" clone --dest="$git" //depot@all &&
+ test_when_finished cleanup_git &&
+ (
+ cd "$git" &&
+ git config git-p4.skipSubmitEditCheck true &&
+
+ cp file2 file8 &&
+ git add file8 &&
+ git commit -a -m "Copy file2 to file8" &&
+ git diff-tree -r -C HEAD &&
+ "$GITP4" submit &&
+ p4 filelog //depot/file8 &&
+ p4 filelog //depot/file8 | test_must_fail grep -q "branch from" &&
+
+ cp file2 file9 &&
+ git add file9 &&
+ git commit -a -m "Copy file2 to file9" &&
+ git diff-tree -r -C HEAD &&
+ git config git-p4.detectCopies true &&
+ "$GITP4" submit &&
+ p4 filelog //depot/file9 &&
+ p4 filelog //depot/file9 | test_must_fail grep -q "branch from" &&
+
+ echo "file2" >>file2 &&
+ cp file2 file10 &&
+ git add file2 file10 &&
+ git commit -a -m "Modify and copy file2 to file10" &&
+ git diff-tree -r -C HEAD &&
+ "$GITP4" submit &&
+ p4 filelog //depot/file10 &&
+ p4 filelog //depot/file10 | grep -q "branch from //depot/file" &&
+
+ cp file2 file11 &&
+ git add file11 &&
+ git commit -a -m "Copy file2 to file11" &&
+ git diff-tree -r -C --find-copies-harder HEAD &&
+ src=$(git diff-tree -r -C --find-copies-harder HEAD | sed 1d | cut -f2) &&
+ test "$src" = file10 &&
+ git config git-p4.detectCopiesHarder true &&
+ "$GITP4" submit &&
+ p4 filelog //depot/file11 &&
+ p4 filelog //depot/file11 | grep -q "branch from //depot/file" &&
+
+ cp file2 file12 &&
+ echo "some text" >>file12 &&
+ git add file12 &&
+ git commit -a -m "Copy file2 to file12 with changes" &&
+ git diff-tree -r -C --find-copies-harder HEAD &&
+ level=$(git diff-tree -r -C --find-copies-harder HEAD | sed 1d | cut -f1 | cut -d" " -f5 | sed "s/C0*//") &&
+ test -n "$level" && test "$level" -gt 0 && test "$level" -lt 98 &&
+ src=$(git diff-tree -r -C --find-copies-harder HEAD | sed 1d | cut -f2) &&
+ test "$src" = file10 &&
+ git config git-p4.detectCopies $(($level + 2)) &&
+ "$GITP4" submit &&
+ p4 filelog //depot/file12 &&
+ p4 filelog //depot/file12 | test_must_fail grep -q "branch from" &&
+
+ cp file2 file13 &&
+ echo "different text" >>file13 &&
+ git add file13 &&
+ git commit -a -m "Copy file2 to file13 with changes" &&
+ git diff-tree -r -C --find-copies-harder HEAD &&
+ level=$(git diff-tree -r -C --find-copies-harder HEAD | sed 1d | cut -f1 | cut -d" " -f5 | sed "s/C0*//") &&
+ test -n "$level" && test "$level" -gt 2 && test "$level" -lt 100 &&
+ src=$(git diff-tree -r -C --find-copies-harder HEAD | sed 1d | cut -f2) &&
+ test "$src" = file10 &&
+ git config git-p4.detectCopies $(($level - 2)) &&
+ "$GITP4" submit &&
+ p4 filelog //depot/file13 &&
+ p4 filelog //depot/file13 | grep -q "branch from //depot/file"
+ )
+'
+
+test_expect_success 'kill p4d' '
+ kill_p4d
+'
+
+test_done
+++ /dev/null
-#!/bin/sh
-
-test_description='git-p4 tests'
-
-. ./lib-git-p4.sh
-
-test_expect_success 'start p4d' '
- start_p4d
-'
-
-test_expect_success 'add p4 files' '
- (
- cd "$cli" &&
- echo file1 >file1 &&
- p4 add file1 &&
- p4 submit -d "file1" &&
- echo file2 >file2 &&
- p4 add file2 &&
- p4 submit -d "file2"
- )
-'
-
-test_expect_success 'basic git-p4 clone' '
- "$GITP4" clone --dest="$git" //depot &&
- test_when_finished cleanup_git &&
- (
- cd "$git" &&
- git log --oneline >lines &&
- test_line_count = 1 lines
- )
-'
-
-test_expect_success 'git-p4 clone @all' '
- "$GITP4" clone --dest="$git" //depot@all &&
- test_when_finished cleanup_git &&
- (
- cd "$git" &&
- git log --oneline >lines &&
- test_line_count = 2 lines
- )
-'
-
-test_expect_success 'git-p4 sync uninitialized repo' '
- test_create_repo "$git" &&
- test_when_finished cleanup_git &&
- (
- cd "$git" &&
- test_must_fail "$GITP4" sync
- )
-'
-
-#
-# Create a git repo by hand. Add a commit so that HEAD is valid.
-# Test imports a new p4 repository into a new git branch.
-#
-test_expect_success 'git-p4 sync new branch' '
- test_create_repo "$git" &&
- test_when_finished cleanup_git &&
- (
- cd "$git" &&
- test_commit head &&
- "$GITP4" sync --branch=refs/remotes/p4/depot //depot@all &&
- git log --oneline p4/depot >lines &&
- test_line_count = 2 lines
- )
-'
-
-test_expect_success 'exit when p4 fails to produce marshaled output' '
- badp4dir="$TRASH_DIRECTORY/badp4dir" &&
- mkdir "$badp4dir" &&
- test_when_finished "rm \"$badp4dir/p4\" && rmdir \"$badp4dir\"" &&
- cat >"$badp4dir"/p4 <<-EOF &&
- #!$SHELL_PATH
- exit 1
- EOF
- chmod 755 "$badp4dir"/p4 &&
- PATH="$badp4dir:$PATH" "$GITP4" clone --dest="$git" //depot >errs 2>&1 ; retval=$? &&
- test $retval -eq 1 &&
- test_must_fail grep -q Traceback errs
-'
-
-test_expect_success 'add p4 files with wildcards in the names' '
- (
- cd "$cli" &&
- echo file-wild-hash >file-wild#hash &&
- echo file-wild-star >file-wild\*star &&
- echo file-wild-at >file-wild@at &&
- echo file-wild-percent >file-wild%percent &&
- p4 add -f file-wild* &&
- p4 submit -d "file wildcards"
- )
-'
-
-test_expect_success 'wildcard files git-p4 clone' '
- "$GITP4" clone --dest="$git" //depot &&
- test_when_finished cleanup_git &&
- (
- cd "$git" &&
- test -f file-wild#hash &&
- test -f file-wild\*star &&
- test -f file-wild@at &&
- test -f file-wild%percent
- )
-'
-
-test_expect_success 'clone bare' '
- "$GITP4" clone --dest="$git" --bare //depot &&
- test_when_finished cleanup_git &&
- (
- cd "$git" &&
- test ! -d .git &&
- bare=`git config --get core.bare` &&
- test "$bare" = true
- )
-'
-
-p4_add_user() {
- name=$1 fullname=$2 &&
- p4 user -f -i <<-EOF &&
- User: $name
- Email: $name@localhost
- FullName: $fullname
- EOF
- p4 passwd -P secret $name
-}
-
-p4_grant_admin() {
- name=$1 &&
- {
- p4 protect -o &&
- echo " admin user $name * //depot/..."
- } | p4 protect -i
-}
-
-p4_check_commit_author() {
- file=$1 user=$2 &&
- p4 changes -m 1 //depot/$file | grep -q $user
-}
-
-make_change_by_user() {
- file=$1 name=$2 email=$3 &&
- echo "username: a change by $name" >>"$file" &&
- git add "$file" &&
- git commit --author "$name <$email>" -m "a change by $name"
-}
-
-# Test username support, submitting as user 'alice'
-test_expect_success 'preserve users' '
- p4_add_user alice Alice &&
- p4_add_user bob Bob &&
- p4_grant_admin alice &&
- "$GITP4" clone --dest="$git" //depot &&
- test_when_finished cleanup_git &&
- (
- cd "$git" &&
- echo "username: a change by alice" >>file1 &&
- echo "username: a change by bob" >>file2 &&
- git commit --author "Alice <alice@localhost>" -m "a change by alice" file1 &&
- git commit --author "Bob <bob@localhost>" -m "a change by bob" file2 &&
- git config git-p4.skipSubmitEditCheck true &&
- P4EDITOR=touch P4USER=alice P4PASSWD=secret "$GITP4" commit --preserve-user &&
- p4_check_commit_author file1 alice &&
- p4_check_commit_author file2 bob
- )
-'
-
-# Test username support, submitting as bob, who lacks admin rights. Should
-# not submit change to p4 (git diff should show deltas).
-test_expect_success 'refuse to preserve users without perms' '
- "$GITP4" clone --dest="$git" //depot &&
- test_when_finished cleanup_git &&
- (
- cd "$git" &&
- git config git-p4.skipSubmitEditCheck true &&
- echo "username-noperms: a change by alice" >>file1 &&
- git commit --author "Alice <alice@localhost>" -m "perms: a change by alice" file1 &&
- P4EDITOR=touch P4USER=bob P4PASSWD=secret test_must_fail "$GITP4" commit --preserve-user &&
- test_must_fail git diff --exit-code HEAD..p4/master
- )
-'
-
-# What happens with unknown author? Without allowMissingP4Users it should fail.
-test_expect_success 'preserve user where author is unknown to p4' '
- "$GITP4" clone --dest="$git" //depot &&
- test_when_finished cleanup_git &&
- (
- cd "$git" &&
- git config git-p4.skipSubmitEditCheck true &&
- echo "username-bob: a change by bob" >>file1 &&
- git commit --author "Bob <bob@localhost>" -m "preserve: a change by bob" file1 &&
- echo "username-unknown: a change by charlie" >>file1 &&
- git commit --author "Charlie <charlie@localhost>" -m "preserve: a change by charlie" file1 &&
- P4EDITOR=touch P4USER=alice P4PASSWD=secret test_must_fail "$GITP4" commit --preserve-user &&
- test_must_fail git diff --exit-code HEAD..p4/master &&
-
- echo "$0: repeat with allowMissingP4Users enabled" &&
- git config git-p4.allowMissingP4Users true &&
- git config git-p4.preserveUser true &&
- P4EDITOR=touch P4USER=alice P4PASSWD=secret "$GITP4" commit &&
- git diff --exit-code HEAD..p4/master &&
- p4_check_commit_author file1 alice
- )
-'
-
-# If we're *not* using --preserve-user, git-p4 should warn if we're submitting
-# changes that are not all ours.
-# Test: user in p4 and user unknown to p4.
-# Test: warning disabled and user is the same.
-test_expect_success 'not preserving user with mixed authorship' '
- "$GITP4" clone --dest="$git" //depot &&
- test_when_finished cleanup_git &&
- (
- cd "$git" &&
- git config git-p4.skipSubmitEditCheck true &&
- p4_add_user derek Derek &&
-
- make_change_by_user usernamefile3 Derek derek@localhost &&
- P4EDITOR=cat P4USER=alice P4PASSWD=secret "$GITP4" commit |\
- grep "git author derek@localhost does not match" &&
-
- make_change_by_user usernamefile3 Charlie charlie@localhost &&
- P4EDITOR=cat P4USER=alice P4PASSWD=secret "$GITP4" commit |\
- grep "git author charlie@localhost does not match" &&
-
- make_change_by_user usernamefile3 alice alice@localhost &&
- P4EDITOR=cat P4USER=alice P4PASSWD=secret "$GITP4" |\
- test_must_fail grep "git author.*does not match" &&
-
- git config git-p4.skipUserNameCheck true &&
- make_change_by_user usernamefile3 Charlie charlie@localhost &&
- P4EDITOR=cat P4USER=alice P4PASSWD=secret "$GITP4" commit |\
- test_must_fail grep "git author.*does not match" &&
-
- p4_check_commit_author usernamefile3 alice
- )
-'
-
-marshal_dump() {
- what=$1
- "$PYTHON_PATH" -c 'import marshal, sys; d = marshal.load(sys.stdin); print d["'$what'"]'
-}
-
-# Sleep a bit so that the top-most p4 change did not happen "now". Then
-# import the repo and make sure that the initial import has the same time
-# as the top-most change.
-test_expect_success 'initial import time from top change time' '
- p4change=$(p4 -G changes -m 1 //depot/... | marshal_dump change) &&
- p4time=$(p4 -G changes -m 1 //depot/... | marshal_dump time) &&
- sleep 3 &&
- "$GITP4" clone --dest="$git" //depot &&
- test_when_finished cleanup_git &&
- (
- cd "$git" &&
- gittime=$(git show -s --raw --pretty=format:%at HEAD) &&
- echo $p4time $gittime &&
- test $p4time = $gittime
- )
-'
-
-# Rename a file and confirm that rename is not detected in P4.
-# Rename the new file again with detectRenames option enabled and confirm that
-# this is detected in P4.
-# Rename the new file again adding an extra line, configure a big threshold in
-# detectRenames and confirm that rename is not detected in P4.
-# Repeat, this time with a smaller threshold and confirm that the rename is
-# detected in P4.
-test_expect_success 'detect renames' '
- "$GITP4" clone --dest="$git" //depot@all &&
- test_when_finished cleanup_git &&
- (
- cd "$git" &&
- git config git-p4.skipSubmitEditCheck true &&
-
- git mv file1 file4 &&
- git commit -a -m "Rename file1 to file4" &&
- git diff-tree -r -M HEAD &&
- "$GITP4" submit &&
- p4 filelog //depot/file4 &&
- p4 filelog //depot/file4 | test_must_fail grep -q "branch from" &&
-
- git mv file4 file5 &&
- git commit -a -m "Rename file4 to file5" &&
- git diff-tree -r -M HEAD &&
- git config git-p4.detectRenames true &&
- "$GITP4" submit &&
- p4 filelog //depot/file5 &&
- p4 filelog //depot/file5 | grep -q "branch from //depot/file4" &&
-
- git mv file5 file6 &&
- echo update >>file6 &&
- git add file6 &&
- git commit -a -m "Rename file5 to file6 with changes" &&
- git diff-tree -r -M HEAD &&
- level=$(git diff-tree -r -M HEAD | sed 1d | cut -f1 | cut -d" " -f5 | sed "s/R0*//") &&
- test -n "$level" && test "$level" -gt 0 && test "$level" -lt 98 &&
- git config git-p4.detectRenames $(($level + 2)) &&
- "$GITP4" submit &&
- p4 filelog //depot/file6 &&
- p4 filelog //depot/file6 | test_must_fail grep -q "branch from" &&
-
- git mv file6 file7 &&
- echo update >>file7 &&
- git add file7 &&
- git commit -a -m "Rename file6 to file7 with changes" &&
- git diff-tree -r -M HEAD &&
- level=$(git diff-tree -r -M HEAD | sed 1d | cut -f1 | cut -d" " -f5 | sed "s/R0*//") &&
- test -n "$level" && test "$level" -gt 2 && test "$level" -lt 100 &&
- git config git-p4.detectRenames $(($level - 2)) &&
- "$GITP4" submit &&
- p4 filelog //depot/file7 &&
- p4 filelog //depot/file7 | grep -q "branch from //depot/file6"
- )
-'
-
-# Copy a file and confirm that copy is not detected in P4.
-# Copy a file with detectCopies option enabled and confirm that copy is not
-# detected in P4.
-# Modify and copy a file with detectCopies option enabled and confirm that copy
-# is detected in P4.
-# Copy a file with detectCopies and detectCopiesHarder options enabled and
-# confirm that copy is detected in P4.
-# Modify and copy a file, configure a bigger threshold in detectCopies and
-# confirm that copy is not detected in P4.
-# Modify and copy a file, configure a smaller threshold in detectCopies and
-# confirm that copy is detected in P4.
-test_expect_success 'detect copies' '
- "$GITP4" clone --dest="$git" //depot@all &&
- test_when_finished cleanup_git &&
- (
- cd "$git" &&
- git config git-p4.skipSubmitEditCheck true &&
-
- cp file2 file8 &&
- git add file8 &&
- git commit -a -m "Copy file2 to file8" &&
- git diff-tree -r -C HEAD &&
- "$GITP4" submit &&
- p4 filelog //depot/file8 &&
- p4 filelog //depot/file8 | test_must_fail grep -q "branch from" &&
-
- cp file2 file9 &&
- git add file9 &&
- git commit -a -m "Copy file2 to file9" &&
- git diff-tree -r -C HEAD &&
- git config git-p4.detectCopies true &&
- "$GITP4" submit &&
- p4 filelog //depot/file9 &&
- p4 filelog //depot/file9 | test_must_fail grep -q "branch from" &&
-
- echo "file2" >>file2 &&
- cp file2 file10 &&
- git add file2 file10 &&
- git commit -a -m "Modify and copy file2 to file10" &&
- git diff-tree -r -C HEAD &&
- "$GITP4" submit &&
- p4 filelog //depot/file10 &&
- p4 filelog //depot/file10 | grep -q "branch from //depot/file" &&
-
- cp file2 file11 &&
- git add file11 &&
- git commit -a -m "Copy file2 to file11" &&
- git diff-tree -r -C --find-copies-harder HEAD &&
- src=$(git diff-tree -r -C --find-copies-harder HEAD | sed 1d | cut -f2) &&
- test "$src" = file10 &&
- git config git-p4.detectCopiesHarder true &&
- "$GITP4" submit &&
- p4 filelog //depot/file11 &&
- p4 filelog //depot/file11 | grep -q "branch from //depot/file" &&
-
- cp file2 file12 &&
- echo "some text" >>file12 &&
- git add file12 &&
- git commit -a -m "Copy file2 to file12 with changes" &&
- git diff-tree -r -C --find-copies-harder HEAD &&
- level=$(git diff-tree -r -C --find-copies-harder HEAD | sed 1d | cut -f1 | cut -d" " -f5 | sed "s/C0*//") &&
- test -n "$level" && test "$level" -gt 0 && test "$level" -lt 98 &&
- src=$(git diff-tree -r -C --find-copies-harder HEAD | sed 1d | cut -f2) &&
- test "$src" = file10 &&
- git config git-p4.detectCopies $(($level + 2)) &&
- "$GITP4" submit &&
- p4 filelog //depot/file12 &&
- p4 filelog //depot/file12 | test_must_fail grep -q "branch from" &&
-
- cp file2 file13 &&
- echo "different text" >>file13 &&
- git add file13 &&
- git commit -a -m "Copy file2 to file13 with changes" &&
- git diff-tree -r -C --find-copies-harder HEAD &&
- level=$(git diff-tree -r -C --find-copies-harder HEAD | sed 1d | cut -f1 | cut -d" " -f5 | sed "s/C0*//") &&
- test -n "$level" && test "$level" -gt 2 && test "$level" -lt 100 &&
- src=$(git diff-tree -r -C --find-copies-harder HEAD | sed 1d | cut -f2) &&
- test "$src" = file10 &&
- git config git-p4.detectCopies $(($level - 2)) &&
- "$GITP4" submit &&
- p4 filelog //depot/file13 &&
- p4 filelog //depot/file13 | grep -q "branch from //depot/file"
- )
-'
-
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
-test_done
echo file1 >file1 &&
echo file2 >file2 &&
p4 add file1 file2 &&
- p4 submit -d "branch1" &&
+ p4 submit -d "Create branch1" &&
p4 integrate //depot/branch1/... //depot/branch2/... &&
- p4 submit -d "branch2" &&
+ p4 submit -d "Integrate branch2 from branch1" &&
echo file3 >file3 &&
p4 add file3 &&
p4 submit -d "add file3 in branch1" &&
echo update >>file2 &&
p4 submit -d "update file2 in branch1" &&
p4 integrate //depot/branch1/... //depot/branch3/... &&
- p4 submit -d "branch3"
+ p4 submit -d "Integrate branch3 from branch1"
)
'
test -f file1 &&
test -f file2 &&
test -f file3 &&
- grep -q update file2 &&
+ grep update file2 &&
git reset --hard p4/depot/branch2 &&
test -f file1 &&
test -f file2 &&
test ! -f file3 &&
- test_must_fail grep -q update file2 &&
+ ! grep update file2 &&
git reset --hard p4/depot/branch3 &&
test -f file1 &&
test -f file2 &&
test -f file3 &&
- grep -q update file2 &&
+ grep update file2 &&
cd "$cli" &&
cd branch1 &&
p4 edit file2 &&
cd "$git" &&
git reset --hard p4/depot/branch1 &&
"$GITP4" rebase &&
- grep -q file2_ file2
+ grep file2_ file2
+ )
+'
+
+# Create a complex branch structure in P4 depot to check if they are correctly
+# cloned. The branches are created from older changelists to check if git-p4 is
+# able to correctly detect them.
+# The final expected structure is:
+# `branch1
+# | `- file1
+# | `- file2 (updated)
+# | `- file3
+# `branch2
+# | `- file1
+# | `- file2
+# `branch3
+# | `- file1
+# | `- file2 (updated)
+# | `- file3
+# `branch4
+# | `- file1
+# | `- file2
+# `branch5
+# `- file1
+# `- file2
+# `- file3
+test_expect_success 'git-p4 add complex branches' '
+ test_when_finished cleanup_git &&
+ test_create_repo "$git" &&
+ (
+ cd "$cli" &&
+ changelist=$(p4 changes -m1 //depot/... | cut -d" " -f2) &&
+ changelist=$(($changelist - 5)) &&
+ p4 integrate //depot/branch1/...@$changelist //depot/branch4/... &&
+ p4 submit -d "Integrate branch4 from branch1@${changelist}" &&
+ changelist=$(($changelist + 2)) &&
+ p4 integrate //depot/branch1/...@$changelist //depot/branch5/... &&
+ p4 submit -d "Integrate branch5 from branch1@${changelist}"
+ )
+'
+
+# Configure branches through git-config and clone them. git-p4 will only be able
+# to clone the original structure if it is able to detect the origin changelist
+# of each branch.
+test_expect_success 'git-p4 clone complex branches' '
+ test_when_finished cleanup_git &&
+ test_create_repo "$git" &&
+ (
+ cd "$git" &&
+ git config git-p4.branchList branch1:branch2 &&
+ git config --add git-p4.branchList branch1:branch3 &&
+ git config --add git-p4.branchList branch1:branch4 &&
+ git config --add git-p4.branchList branch1:branch5 &&
+ "$GITP4" clone --dest=. --detect-branches //depot@all &&
+ git log --all --graph --decorate --stat &&
+ git reset --hard p4/depot/branch1 &&
+ test_path_is_file file1 &&
+ test_path_is_file file2 &&
+ test_path_is_file file3 &&
+ grep update file2 &&
+ git reset --hard p4/depot/branch2 &&
+ test_path_is_file file1 &&
+ test_path_is_file file2 &&
+ test_path_is_missing file3 &&
+ ! grep update file2 &&
+ git reset --hard p4/depot/branch3 &&
+ test_path_is_file file1 &&
+ test_path_is_file file2 &&
+ test_path_is_file file3 &&
+ grep update file2 &&
+ git reset --hard p4/depot/branch4 &&
+ test_path_is_file file1 &&
+ test_path_is_file file2 &&
+ test_path_is_missing file3 &&
+ ! grep update file2 &&
+ git reset --hard p4/depot/branch5 &&
+ test_path_is_file file1 &&
+ test_path_is_file file2 &&
+ test_path_is_file file3 &&
+ ! grep update file2 &&
+ test_path_is_missing .git/git-p4-tmp
)
'
--- /dev/null
+#!/bin/sh
+
+test_description='git-p4 transparency to shell metachars in filenames'
+
+. ./lib-git-p4.sh
+
+test_expect_success 'start p4d' '
+ start_p4d
+'
+
+test_expect_success 'init depot' '
+ (
+ cd "$cli" &&
+ echo file1 >file1 &&
+ p4 add file1 &&
+ p4 submit -d "file1"
+ )
+'
+
+test_expect_success 'shell metachars in filenames' '
+ "$GITP4" clone --dest="$git" //depot &&
+ test_when_finished cleanup_git &&
+ (
+ cd "$git" &&
+ git config git-p4.skipSubmitEditCheck true &&
+ echo f1 >foo\$bar &&
+ git add foo\$bar &&
+ echo f2 >"file with spaces" &&
+ git add "file with spaces" &&
+ git commit -m "add files" &&
+ P4EDITOR=touch "$GITP4" submit
+ ) &&
+ (
+ cd "$cli" &&
+ p4 sync ... &&
+ test -e "file with spaces" &&
+ test -e "foo\$bar"
+ )
+'
+
+test_expect_success 'deleting with shell metachars' '
+ "$GITP4" clone --dest="$git" //depot &&
+ test_when_finished cleanup_git &&
+ (
+ cd "$git" &&
+ git config git-p4.skipSubmitEditCheck true &&
+ git rm foo\$bar &&
+ git rm file\ with\ spaces &&
+ git commit -m "remove files" &&
+ P4EDITOR=touch "$GITP4" submit
+ ) &&
+ (
+ cd "$cli" &&
+ p4 sync ... &&
+ test ! -e "file with spaces" &&
+ test ! -e foo\$bar
+ )
+'
+
+# Create a branch with a shell metachar in its name
+#
+# 1. //depot/main
+# 2. //depot/branch$3
+
+test_expect_success 'branch with shell char' '
+ test_when_finished cleanup_git &&
+ test_create_repo "$git" &&
+ (
+ cd "$cli" &&
+
+ mkdir -p main &&
+
+ echo f1 >main/f1 &&
+ p4 add main/f1 &&
+ p4 submit -d "main/f1" &&
+
+ p4 integrate //depot/main/... //depot/branch\$3/... &&
+ p4 submit -d "integrate main to branch\$3" &&
+
+ echo f1 >branch\$3/shell_char_branch_file &&
+ p4 add branch\$3/shell_char_branch_file &&
+ p4 submit -d "branch\$3/shell_char_branch_file" &&
+
+ p4 branch -i <<-EOF &&
+ Branch: branch\$3
+ View: //depot/main/... //depot/branch\$3/...
+ EOF
+
+ p4 edit main/f1 &&
+ echo "a change" >> main/f1 &&
+ p4 submit -d "a change" main/f1 &&
+
+ p4 integrate -b branch\$3 &&
+ p4 resolve -am branch\$3/... &&
+ p4 submit -d "integrate main to branch\$3" &&
+
+ cd "$git" &&
+
+ git config git-p4.branchList main:branch\$3 &&
+ "$GITP4" clone --dest=. --detect-branches //depot@all &&
+ git log --all --graph --decorate --stat &&
+ git reset --hard p4/depot/branch\$3 &&
+ test -f shell_char_branch_file &&
+ test -f f1
+ )
+'
+
+test_expect_success 'kill p4d' '
+ kill_p4d
+'
+
+test_done
+++ /dev/null
-#!/bin/sh
-
-test_description='git-p4 transparency to shell metachars in filenames'
-
-. ./lib-git-p4.sh
-
-test_expect_success 'start p4d' '
- start_p4d
-'
-
-test_expect_success 'init depot' '
- (
- cd "$cli" &&
- echo file1 >file1 &&
- p4 add file1 &&
- p4 submit -d "file1"
- )
-'
-
-test_expect_success 'shell metachars in filenames' '
- "$GITP4" clone --dest="$git" //depot &&
- test_when_finished cleanup_git &&
- (
- cd "$git" &&
- git config git-p4.skipSubmitEditCheck true &&
- echo f1 >foo\$bar &&
- git add foo\$bar &&
- echo f2 >"file with spaces" &&
- git add "file with spaces" &&
- git commit -m "add files" &&
- P4EDITOR=touch "$GITP4" submit
- ) &&
- (
- cd "$cli" &&
- p4 sync ... &&
- test -e "file with spaces" &&
- test -e "foo\$bar"
- )
-'
-
-test_expect_success 'deleting with shell metachars' '
- "$GITP4" clone --dest="$git" //depot &&
- test_when_finished cleanup_git &&
- (
- cd "$git" &&
- git config git-p4.skipSubmitEditCheck true &&
- git rm foo\$bar &&
- git rm file\ with\ spaces &&
- git commit -m "remove files" &&
- P4EDITOR=touch "$GITP4" submit
- ) &&
- (
- cd "$cli" &&
- p4 sync ... &&
- test ! -e "file with spaces" &&
- test ! -e foo\$bar
- )
-'
-
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
-test_done
--- /dev/null
+test_description='git-p4 p4 label tests'
+
+. ./lib-git-p4.sh
+
+test_expect_success 'start p4d' '
+ start_p4d
+'
+
+# Basic p4 label tests.
+#
+# Note: can't have more than one label per commit - others
+# are silently discarded.
+#
+test_expect_success 'basic p4 labels' '
+ test_when_finished cleanup_git &&
+ (
+ cd "$cli" &&
+ mkdir -p main &&
+
+ echo f1 >main/f1 &&
+ p4 add main/f1 &&
+ p4 submit -d "main/f1" &&
+
+ echo f2 >main/f2 &&
+ p4 add main/f2 &&
+ p4 submit -d "main/f2" &&
+
+ echo f3 >main/file_with_\$metachar &&
+ p4 add main/file_with_\$metachar &&
+ p4 submit -d "file with metachar" &&
+
+ p4 tag -l tag_f1_only main/f1 &&
+ p4 tag -l tag_with\$_shell_char main/... &&
+
+ echo f4 >main/f4 &&
+ p4 add main/f4 &&
+ p4 submit -d "main/f4" &&
+
+ p4 label -i <<-EOF &&
+ Label: long_label
+ Description:
+ A Label first line
+ A Label second line
+ View: //depot/...
+ EOF
+
+ p4 tag -l long_label ... &&
+
+ p4 labels ... &&
+
+ "$GITP4" clone --dest="$git" --detect-labels //depot@all &&
+ cd "$git" &&
+
+ git tag &&
+ git tag >taglist &&
+ test_line_count = 3 taglist &&
+
+ cd main &&
+ git checkout tag_tag_f1_only &&
+ ! test -f f2 &&
+ git checkout tag_tag_with\$_shell_char &&
+ test -f f1 && test -f f2 && test -f file_with_\$metachar &&
+
+ git show tag_long_label | grep -q "A Label second line"
+ )
+'
+
+# Test some label corner cases:
+#
+# - two tags on the same file; both should be available
+# - a tag that is only on one file; this kind of tag
+# cannot be imported (at least not easily).
+
+test_expect_failure 'two labels on the same changelist' '
+ test_when_finished cleanup_git &&
+ (
+ cd "$cli" &&
+ mkdir -p main &&
+
+ p4 edit main/f1 main/f2 &&
+ echo "hello world" >main/f1 &&
+ echo "not in the tag" >main/f2 &&
+ p4 submit -d "main/f[12]: testing two labels" &&
+
+ p4 tag -l tag_f1_1 main/... &&
+ p4 tag -l tag_f1_2 main/... &&
+
+ p4 labels ... &&
+
+ "$GITP4" clone --dest="$git" --detect-labels //depot@all &&
+ cd "$git" &&
+
+ git tag | grep tag_f1 &&
+ git tag | grep -q tag_f1_1 &&
+ git tag | grep -q tag_f1_2 &&
+
+ cd main &&
+
+ git checkout tag_tag_f1_1 &&
+ ls &&
+ test -f f1 &&
+
+ git checkout tag_tag_f1_2 &&
+ ls &&
+ test -f f1
+ )
+'
+
+test_expect_success 'kill p4d' '
+ kill_p4d
+'
+
+test_done
--- /dev/null
+#!/bin/sh
+
+test_description='git-p4 skipSubmitEdit config variables'
+
+. ./lib-git-p4.sh
+
+test_expect_success 'start p4d' '
+ start_p4d
+'
+
+test_expect_success 'init depot' '
+ (
+ cd "$cli" &&
+ echo file1 >file1 &&
+ p4 add file1 &&
+ p4 submit -d "change 1"
+ )
+'
+
+# this works because EDITOR is set to :
+test_expect_success 'no config, unedited, say yes' '
+ "$GITP4" clone --dest="$git" //depot &&
+ test_when_finished cleanup_git &&
+ (
+ cd "$git" &&
+ echo line >>file1 &&
+ git commit -a -m "change 2" &&
+ echo y | "$GITP4" submit &&
+ p4 changes //depot/... >wc &&
+ test_line_count = 2 wc
+ )
+'
+
+test_expect_success 'no config, unedited, say no' '
+ "$GITP4" clone --dest="$git" //depot &&
+ test_when_finished cleanup_git &&
+ (
+ cd "$git" &&
+ echo line >>file1 &&
+ git commit -a -m "change 3 (not really)" &&
+ printf "bad response\nn\n" | "$GITP4" submit &&
+ p4 changes //depot/... >wc &&
+ test_line_count = 2 wc
+ )
+'
+
+test_expect_success 'skipSubmitEdit' '
+ "$GITP4" clone --dest="$git" //depot &&
+ test_when_finished cleanup_git &&
+ (
+ cd "$git" &&
+ git config git-p4.skipSubmitEdit true &&
+ # will fail if editor is even invoked
+ git config core.editor /bin/false &&
+ echo line >>file1 &&
+ git commit -a -m "change 3" &&
+ "$GITP4" submit &&
+ p4 changes //depot/... >wc &&
+ test_line_count = 3 wc
+ )
+'
+
+test_expect_success 'skipSubmitEditCheck' '
+ "$GITP4" clone --dest="$git" //depot &&
+ test_when_finished cleanup_git &&
+ (
+ cd "$git" &&
+ git config git-p4.skipSubmitEditCheck true &&
+ echo line >>file1 &&
+ git commit -a -m "change 4" &&
+ "$GITP4" submit &&
+ p4 changes //depot/... >wc &&
+ test_line_count = 4 wc
+ )
+'
+
+# check the normal case, where the template really is edited
+test_expect_success 'no config, edited' '
+ "$GITP4" clone --dest="$git" //depot &&
+ test_when_finished cleanup_git &&
+ ed="$TRASH_DIRECTORY/ed.sh" &&
+ test_when_finished "rm \"$ed\"" &&
+ cat >"$ed" <<-EOF &&
+ #!$SHELL_PATH
+ sleep 1
+ touch "\$1"
+ exit 0
+ EOF
+ chmod 755 "$ed" &&
+ (
+ cd "$git" &&
+ echo line >>file1 &&
+ git commit -a -m "change 5" &&
+ EDITOR="\"$ed\"" "$GITP4" submit &&
+ p4 changes //depot/... >wc &&
+ test_line_count = 5 wc
+ )
+'
+
+test_expect_success 'kill p4d' '
+ kill_p4d
+'
+
+test_done
+++ /dev/null
-#!/bin/sh
-
-test_description='git-p4 skipSubmitEdit config variables'
-
-. ./lib-git-p4.sh
-
-test_expect_success 'start p4d' '
- start_p4d
-'
-
-test_expect_success 'init depot' '
- (
- cd "$cli" &&
- echo file1 >file1 &&
- p4 add file1 &&
- p4 submit -d "change 1"
- )
-'
-
-# this works because EDITOR is set to :
-test_expect_success 'no config, unedited, say yes' '
- "$GITP4" clone --dest="$git" //depot &&
- test_when_finished cleanup_git &&
- (
- cd "$git" &&
- echo line >>file1 &&
- git commit -a -m "change 2" &&
- echo y | "$GITP4" submit &&
- p4 changes //depot/... >wc &&
- test_line_count = 2 wc
- )
-'
-
-test_expect_success 'no config, unedited, say no' '
- "$GITP4" clone --dest="$git" //depot &&
- test_when_finished cleanup_git &&
- (
- cd "$git" &&
- echo line >>file1 &&
- git commit -a -m "change 3 (not really)" &&
- printf "bad response\nn\n" | "$GITP4" submit &&
- p4 changes //depot/... >wc &&
- test_line_count = 2 wc
- )
-'
-
-test_expect_success 'skipSubmitEdit' '
- "$GITP4" clone --dest="$git" //depot &&
- test_when_finished cleanup_git &&
- (
- cd "$git" &&
- git config git-p4.skipSubmitEdit true &&
- # will fail if editor is even invoked
- git config core.editor /bin/false &&
- echo line >>file1 &&
- git commit -a -m "change 3" &&
- "$GITP4" submit &&
- p4 changes //depot/... >wc &&
- test_line_count = 3 wc
- )
-'
-
-test_expect_success 'skipSubmitEditCheck' '
- "$GITP4" clone --dest="$git" //depot &&
- test_when_finished cleanup_git &&
- (
- cd "$git" &&
- git config git-p4.skipSubmitEditCheck true &&
- echo line >>file1 &&
- git commit -a -m "change 4" &&
- "$GITP4" submit &&
- p4 changes //depot/... >wc &&
- test_line_count = 4 wc
- )
-'
-
-# check the normal case, where the template really is edited
-test_expect_success 'no config, edited' '
- "$GITP4" clone --dest="$git" //depot &&
- test_when_finished cleanup_git &&
- ed="$TRASH_DIRECTORY/ed.sh" &&
- test_when_finished "rm \"$ed\"" &&
- cat >"$ed" <<-EOF &&
- #!$SHELL_PATH
- sleep 1
- touch "\$1"
- exit 0
- EOF
- chmod 755 "$ed" &&
- (
- cd "$git" &&
- echo line >>file1 &&
- git commit -a -m "change 5" &&
- EDITOR="\"$ed\"" "$GITP4" submit &&
- p4 changes //depot/... >wc &&
- test_line_count = 5 wc
- )
-'
-
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
-test_done
--- /dev/null
+#!/bin/sh
+
+test_description='git-p4 options'
+
+. ./lib-git-p4.sh
+
+test_expect_success 'start p4d' '
+ start_p4d
+'
+
+test_expect_success 'init depot' '
+ (
+ cd "$cli" &&
+ echo file1 >file1 &&
+ p4 add file1 &&
+ p4 submit -d "change 1" &&
+ echo file2 >file2 &&
+ p4 add file2 &&
+ p4 submit -d "change 2" &&
+ echo file3 >file3 &&
+ p4 add file3 &&
+ p4 submit -d "change 3"
+ )
+'
+
+test_expect_success 'clone no --git-dir' '
+ test_must_fail "$GITP4" clone --git-dir=xx //depot
+'
+
+test_expect_success 'clone --branch' '
+ "$GITP4" clone --branch=refs/remotes/p4/sb --dest="$git" //depot &&
+ test_when_finished cleanup_git &&
+ (
+ cd "$git" &&
+ git ls-files >files &&
+ test_line_count = 0 files &&
+ test_path_is_file .git/refs/remotes/p4/sb
+ )
+'
+
+test_expect_success 'clone --changesfile' '
+ cf="$TRASH_DIRECTORY/cf" &&
+ test_when_finished "rm \"$cf\"" &&
+ printf "1\n3\n" >"$cf" &&
+ "$GITP4" clone --changesfile="$cf" --dest="$git" //depot &&
+ test_when_finished cleanup_git &&
+ (
+ cd "$git" &&
+ git log --oneline p4/master >lines &&
+ test_line_count = 2 lines
+ test_path_is_file file1 &&
+ test_path_is_missing file2 &&
+ test_path_is_file file3
+ )
+'
+
+test_expect_success 'clone --changesfile, @all' '
+ cf="$TRASH_DIRECTORY/cf" &&
+ test_when_finished "rm \"$cf\"" &&
+ printf "1\n3\n" >"$cf" &&
+ test_must_fail "$GITP4" clone --changesfile="$cf" --dest="$git" //depot@all
+'
+
+# imports both master and p4/master in refs/heads
+# requires --import-local on sync to find p4 refs/heads
+# does not update master on sync, just p4/master
+test_expect_success 'clone/sync --import-local' '
+ "$GITP4" clone --import-local --dest="$git" //depot@1,2 &&
+ test_when_finished cleanup_git &&
+ (
+ cd "$git" &&
+ git log --oneline refs/heads/master >lines &&
+ test_line_count = 2 lines &&
+ git log --oneline refs/heads/p4/master >lines &&
+ test_line_count = 2 lines &&
+ test_must_fail "$GITP4" sync &&
+
+ "$GITP4" sync --import-local &&
+ git log --oneline refs/heads/master >lines &&
+ test_line_count = 2 lines &&
+ git log --oneline refs/heads/p4/master >lines &&
+ test_line_count = 3 lines
+ )
+'
+
+test_expect_success 'clone --max-changes' '
+ "$GITP4" clone --dest="$git" --max-changes 2 //depot@all &&
+ test_when_finished cleanup_git &&
+ (
+ cd "$git" &&
+ git log --oneline refs/heads/master >lines &&
+ test_line_count = 2 lines
+ )
+'
+
+test_expect_success 'clone --keep-path' '
+ (
+ cd "$cli" &&
+ mkdir -p sub/dir &&
+ echo f4 >sub/dir/f4 &&
+ p4 add sub/dir/f4 &&
+ p4 submit -d "change 4"
+ ) &&
+ "$GITP4" clone --dest="$git" --keep-path //depot/sub/dir@all &&
+ test_when_finished cleanup_git &&
+ (
+ cd "$git" &&
+ test_path_is_missing f4 &&
+ test_path_is_file sub/dir/f4
+ ) &&
+ cleanup_git &&
+ "$GITP4" clone --dest="$git" //depot/sub/dir@all &&
+ (
+ cd "$git" &&
+ test_path_is_file f4 &&
+ test_path_is_missing sub/dir/f4
+ )
+'
+
+# clone --use-client-spec must still specify a depot path
+# if given, it should rearrange files according to client spec
+# when it has view lines that match the depot path
+# XXX: should clone/sync just use the client spec exactly, rather
+# than needing depot paths?
+test_expect_success 'clone --use-client-spec' '
+ (
+ # big usage message
+ exec >/dev/null &&
+ test_must_fail "$GITP4" clone --dest="$git" --use-client-spec
+ ) &&
+ cli2="$TRASH_DIRECTORY/cli2" &&
+ mkdir -p "$cli2" &&
+ test_when_finished "rmdir \"$cli2\"" &&
+ (
+ cd "$cli2" &&
+ p4 client -i <<-EOF
+ Client: client2
+ Description: client2
+ Root: $cli2
+ View: //depot/sub/... //client2/bus/...
+ EOF
+ ) &&
+ P4CLIENT=client2 &&
+ test_when_finished cleanup_git &&
+ "$GITP4" clone --dest="$git" --use-client-spec //depot/... &&
+ (
+ cd "$git" &&
+ test_path_is_file bus/dir/f4 &&
+ test_path_is_missing file1
+ ) &&
+ cleanup_git &&
+
+ # same thing again, this time with variable instead of option
+ mkdir "$git" &&
+ (
+ cd "$git" &&
+ git init &&
+ git config git-p4.useClientSpec true &&
+ "$GITP4" sync //depot/... &&
+ git checkout -b master p4/master &&
+ test_path_is_file bus/dir/f4 &&
+ test_path_is_missing file1
+ )
+'
+
+test_expect_success 'kill p4d' '
+ kill_p4d
+'
+
+test_done
--- /dev/null
+#!/bin/sh
+
+test_description='git-p4 submit'
+
+. ./lib-git-p4.sh
+
+test_expect_success 'start p4d' '
+ start_p4d
+'
+
+test_expect_success 'init depot' '
+ (
+ cd "$cli" &&
+ echo file1 >file1 &&
+ p4 add file1 &&
+ p4 submit -d "change 1"
+ )
+'
+
+test_expect_success 'submit with no client dir' '
+ test_when_finished cleanup_git &&
+ "$GITP4" clone --dest="$git" //depot &&
+ (
+ cd "$git" &&
+ echo file2 >file2 &&
+ git add file2 &&
+ git commit -m "git commit 2" &&
+ rm -rf "$cli" &&
+ git config git-p4.skipSubmitEdit true &&
+ "$GITP4" submit
+ )
+'
+
+# make two commits, but tell it to apply only from HEAD^
+test_expect_success 'submit --origin' '
+ test_when_finished cleanup_git &&
+ "$GITP4" clone --dest="$git" //depot &&
+ (
+ cd "$git" &&
+ test_commit "file3" &&
+ test_commit "file4" &&
+ git config git-p4.skipSubmitEdit true &&
+ "$GITP4" submit --origin=HEAD^
+ ) &&
+ (
+ cd "$cli" &&
+ p4 sync &&
+ test_path_is_missing "file3.t" &&
+ test_path_is_file "file4.t"
+ )
+'
+
+test_expect_success 'submit with allowSubmit' '
+ test_when_finished cleanup_git &&
+ "$GITP4" clone --dest="$git" //depot &&
+ (
+ cd "$git" &&
+ test_commit "file5" &&
+ git config git-p4.skipSubmitEdit true &&
+ git config git-p4.allowSubmit "nobranch" &&
+ test_must_fail "$GITP4" submit &&
+ git config git-p4.allowSubmit "nobranch,master" &&
+ "$GITP4" submit
+ )
+'
+
+test_expect_success 'submit with master branch name from argv' '
+ test_when_finished cleanup_git &&
+ "$GITP4" clone --dest="$git" //depot &&
+ (
+ cd "$git" &&
+ test_commit "file6" &&
+ git config git-p4.skipSubmitEdit true &&
+ test_must_fail "$GITP4" submit nobranch &&
+ git branch otherbranch &&
+ git reset --hard HEAD^ &&
+ test_commit "file7" &&
+ "$GITP4" submit otherbranch
+ ) &&
+ (
+ cd "$cli" &&
+ p4 sync &&
+ test_path_is_file "file6.t" &&
+ test_path_is_missing "file7.t"
+ )
+'
+
+test_expect_success 'kill p4d' '
+ kill_p4d
+'
+
+test_done
+++ /dev/null
-#!/bin/sh
-
-test_description='git-p4 submit'
-
-. ./lib-git-p4.sh
-
-test_expect_success 'start p4d' '
- start_p4d
-'
-
-test_expect_success 'init depot' '
- (
- cd "$cli" &&
- echo file1 >file1 &&
- p4 add file1 &&
- p4 submit -d "change 1"
- )
-'
-
-test_expect_success 'submit with no client dir' '
- test_when_finished cleanup_git &&
- "$GITP4" clone --dest="$git" //depot &&
- (
- cd "$git" &&
- echo file2 >file2 &&
- git add file2 &&
- git commit -m "git commit 2" &&
- rm -rf "$cli" &&
- git config git-p4.skipSubmitEdit true &&
- "$GITP4" submit
- )
-'
-
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
-test_done
+++ /dev/null
-#!/bin/sh
-
-test_description='git-p4 relative chdir'
-
-. ./lib-git-p4.sh
-
-test_expect_success 'start p4d' '
- start_p4d
-'
-
-test_expect_success 'init depot' '
- (
- cd "$cli" &&
- echo file1 >file1 &&
- p4 add file1 &&
- p4 submit -d "change 1"
- )
-'
-
-# P4 reads from P4CONFIG file to find its server params, if the
-# environment variable is set
-test_expect_success 'P4CONFIG and absolute dir clone' '
- printf "P4PORT=$P4PORT\nP4CLIENT=$P4CLIENT\n" >p4config &&
- test_when_finished "rm \"$TRASH_DIRECTORY/p4config\"" &&
- test_when_finished cleanup_git &&
- (
- P4CONFIG=p4config && export P4CONFIG &&
- unset P4PORT P4CLIENT &&
- "$GITP4" clone --verbose --dest="$git" //depot
- )
-'
-
-# same thing, but with relative directory name, note missing $ on --dest
-test_expect_success 'P4CONFIG and relative dir clone' '
- printf "P4PORT=$P4PORT\nP4CLIENT=$P4CLIENT\n" >p4config &&
- test_when_finished "rm \"$TRASH_DIRECTORY/p4config\"" &&
- test_when_finished cleanup_git &&
- (
- P4CONFIG=p4config && export P4CONFIG &&
- unset P4PORT P4CLIENT &&
- "$GITP4" clone --verbose --dest="git" //depot
- )
-'
-
-test_expect_success 'kill p4d' '
- kill_p4d
-'
-
-test_done
--- /dev/null
+#!/bin/sh
+
+test_description='git-p4 relative chdir'
+
+. ./lib-git-p4.sh
+
+test_expect_success 'start p4d' '
+ start_p4d
+'
+
+test_expect_success 'init depot' '
+ (
+ cd "$cli" &&
+ echo file1 >file1 &&
+ p4 add file1 &&
+ p4 submit -d "change 1"
+ )
+'
+
+# P4 reads from P4CONFIG file to find its server params, if the
+# environment variable is set
+test_expect_success 'P4CONFIG and absolute dir clone' '
+ printf "P4PORT=$P4PORT\nP4CLIENT=$P4CLIENT\n" >p4config &&
+ test_when_finished "rm \"$TRASH_DIRECTORY/p4config\"" &&
+ test_when_finished cleanup_git &&
+ (
+ P4CONFIG=p4config && export P4CONFIG &&
+ unset P4PORT P4CLIENT &&
+ "$GITP4" clone --verbose --dest="$git" //depot
+ )
+'
+
+# same thing, but with relative directory name, note missing $ on --dest
+test_expect_success 'P4CONFIG and relative dir clone' '
+ printf "P4PORT=$P4PORT\nP4CLIENT=$P4CLIENT\n" >p4config &&
+ test_when_finished "rm \"$TRASH_DIRECTORY/p4config\"" &&
+ test_when_finished cleanup_git &&
+ (
+ P4CONFIG=p4config && export P4CONFIG &&
+ unset P4PORT P4CLIENT &&
+ "$GITP4" clone --verbose --dest="git" //depot
+ )
+'
+
+test_expect_success 'kill p4d' '
+ kill_p4d
+'
+
+test_done
--- /dev/null
+#!/bin/sh
+
+test_description='git-p4 client view'
+
+. ./lib-git-p4.sh
+
+test_expect_success 'start p4d' '
+ start_p4d
+'
+
+#
+# Construct a client with this list of View lines
+#
+client_view() {
+ (
+ cat <<-EOF &&
+ Client: client
+ Description: client
+ Root: $cli
+ View:
+ EOF
+ for arg ; do
+ printf "\t$arg\n"
+ done
+ ) | p4 client -i
+}
+
+#
+# Verify these files exist, exactly. Caller creates
+# a list of files in file "files".
+#
+check_files_exist() {
+ ok=0 &&
+ num=${#@} &&
+ for arg ; do
+ test_path_is_file "$arg" &&
+ ok=$(($ok + 1))
+ done &&
+ test $ok -eq $num &&
+ test_line_count = $num files
+}
+
+#
+# Sync up the p4 client, make sure the given files (and only
+# those) exist.
+#
+client_verify() {
+ (
+ cd "$cli" &&
+ p4 sync &&
+ find . -type f ! -name files >files &&
+ check_files_exist "$@"
+ )
+}
+
+#
+# Make sure the named files, exactly, exist.
+#
+git_verify() {
+ (
+ cd "$git" &&
+ git ls-files >files &&
+ check_files_exist "$@"
+ )
+}
+
+# //depot
+# - dir1
+# - file11
+# - file12
+# - dir2
+# - file21
+# - file22
+test_expect_success 'init depot' '
+ (
+ cd "$cli" &&
+ for d in 1 2 ; do
+ mkdir -p dir$d &&
+ for f in 1 2 ; do
+ echo dir$d/file$d$f >dir$d/file$d$f &&
+ p4 add dir$d/file$d$f &&
+ p4 submit -d "dir$d/file$d$f"
+ done
+ done &&
+ find . -type f ! -name files >files &&
+ check_files_exist dir1/file11 dir1/file12 \
+ dir2/file21 dir2/file22
+ )
+'
+
+# double % for printf
+test_expect_success 'unsupported view wildcard %%n' '
+ client_view "//depot/%%%%1/sub/... //client/sub/%%%%1/..." &&
+ test_when_finished cleanup_git &&
+ test_must_fail "$GITP4" clone --use-client-spec --dest="$git" //depot
+'
+
+test_expect_success 'unsupported view wildcard *' '
+ client_view "//depot/*/bar/... //client/*/bar/..." &&
+ test_when_finished cleanup_git &&
+ test_must_fail "$GITP4" clone --use-client-spec --dest="$git" //depot
+'
+
+test_expect_success 'wildcard ... only supported at end of spec 1' '
+ client_view "//depot/.../file11 //client/.../file11" &&
+ test_when_finished cleanup_git &&
+ test_must_fail "$GITP4" clone --use-client-spec --dest="$git" //depot
+'
+
+test_expect_success 'wildcard ... only supported at end of spec 2' '
+ client_view "//depot/.../a/... //client/.../a/..." &&
+ test_when_finished cleanup_git &&
+ test_must_fail "$GITP4" clone --use-client-spec --dest="$git" //depot
+'
+
+test_expect_success 'basic map' '
+ client_view "//depot/dir1/... //client/cli1/..." &&
+ files="cli1/file11 cli1/file12" &&
+ client_verify $files &&
+ test_when_finished cleanup_git &&
+ "$GITP4" clone --use-client-spec --dest="$git" //depot &&
+ git_verify $files
+'
+
+test_expect_success 'client view with no mappings' '
+ client_view &&
+ client_verify &&
+ test_when_finished cleanup_git &&
+ "$GITP4" clone --use-client-spec --dest="$git" //depot &&
+ git_verify
+'
+
+test_expect_success 'single file map' '
+ client_view "//depot/dir1/file11 //client/file11" &&
+ files="file11" &&
+ client_verify $files &&
+ test_when_finished cleanup_git &&
+ "$GITP4" clone --use-client-spec --dest="$git" //depot &&
+ git_verify $files
+'
+
+test_expect_success 'later mapping takes precedence (entire repo)' '
+ client_view "//depot/dir1/... //client/cli1/..." \
+ "//depot/... //client/cli2/..." &&
+ files="cli2/dir1/file11 cli2/dir1/file12
+ cli2/dir2/file21 cli2/dir2/file22" &&
+ client_verify $files &&
+ test_when_finished cleanup_git &&
+ "$GITP4" clone --use-client-spec --dest="$git" //depot &&
+ git_verify $files
+'
+
+test_expect_success 'later mapping takes precedence (partial repo)' '
+ client_view "//depot/dir1/... //client/..." \
+ "//depot/dir2/... //client/..." &&
+ files="file21 file22" &&
+ client_verify $files &&
+ test_when_finished cleanup_git &&
+ "$GITP4" clone --use-client-spec --dest="$git" //depot &&
+ git_verify $files
+'
+
+# Reading the view backwards,
+# dir2 goes to cli12
+# dir1 cannot go to cli12 since it was filled by dir2
+# dir1 also does not go to cli3, since the second rule
+# noticed that it matched, but was already filled
+test_expect_success 'depot path matching rejected client path' '
+ client_view "//depot/dir1/... //client/cli3/..." \
+ "//depot/dir1/... //client/cli12/..." \
+ "//depot/dir2/... //client/cli12/..." &&
+ files="cli12/file21 cli12/file22" &&
+ client_verify $files &&
+ test_when_finished cleanup_git &&
+ "$GITP4" clone --use-client-spec --dest="$git" //depot &&
+ git_verify $files
+'
+
+# since both have the same //client/..., the exclusion
+# rule keeps everything out
+test_expect_success 'exclusion wildcard, client rhs same (odd)' '
+ client_view "//depot/... //client/..." \
+ "-//depot/dir2/... //client/..." &&
+ client_verify &&
+ test_when_finished cleanup_git &&
+ "$GITP4" clone --use-client-spec --dest="$git" //depot &&
+ git_verify
+'
+
+test_expect_success 'exclusion wildcard, client rhs different (normal)' '
+ client_view "//depot/... //client/..." \
+ "-//depot/dir2/... //client/dir2/..." &&
+ files="dir1/file11 dir1/file12" &&
+ client_verify $files &&
+ test_when_finished cleanup_git &&
+ "$GITP4" clone --use-client-spec --dest="$git" //depot &&
+ git_verify $files
+'
+
+test_expect_success 'exclusion single file' '
+ client_view "//depot/... //client/..." \
+ "-//depot/dir2/file22 //client/file22" &&
+ files="dir1/file11 dir1/file12 dir2/file21" &&
+ client_verify $files &&
+ test_when_finished cleanup_git &&
+ "$GITP4" clone --use-client-spec --dest="$git" //depot &&
+ git_verify $files
+'
+
+test_expect_success 'overlay wildcard' '
+ client_view "//depot/dir1/... //client/cli/..." \
+ "+//depot/dir2/... //client/cli/...\n" &&
+ files="cli/file11 cli/file12 cli/file21 cli/file22" &&
+ client_verify $files &&
+ test_when_finished cleanup_git &&
+ "$GITP4" clone --use-client-spec --dest="$git" //depot &&
+ git_verify $files
+'
+
+test_expect_success 'overlay single file' '
+ client_view "//depot/dir1/... //client/cli/..." \
+ "+//depot/dir2/file21 //client/cli/file21" &&
+ files="cli/file11 cli/file12 cli/file21" &&
+ client_verify $files &&
+ test_when_finished cleanup_git &&
+ "$GITP4" clone --use-client-spec --dest="$git" //depot &&
+ git_verify $files
+'
+
+test_expect_success 'exclusion with later inclusion' '
+ client_view "//depot/... //client/..." \
+ "-//depot/dir2/... //client/dir2/..." \
+ "//depot/dir2/... //client/dir2incl/..." &&
+ files="dir1/file11 dir1/file12 dir2incl/file21 dir2incl/file22" &&
+ client_verify $files &&
+ test_when_finished cleanup_git &&
+ "$GITP4" clone --use-client-spec --dest="$git" //depot &&
+ git_verify $files
+'
+
+test_expect_success 'quotes on rhs only' '
+ client_view "//depot/dir1/... \"//client/cdir 1/...\"" &&
+ client_verify "cdir 1/file11" "cdir 1/file12" &&
+ test_when_finished cleanup_git &&
+ "$GITP4" clone --use-client-spec --dest="$git" //depot &&
+ git_verify "cdir 1/file11" "cdir 1/file12"
+'
+
+#
+# What happens when two files of the same name are overlayed together?
+# The last-listed file should take preference.
+#
+# //depot
+# - dir1
+# - file11
+# - file12
+# - filecollide
+# - dir2
+# - file21
+# - file22
+# - filecollide
+#
+test_expect_success 'overlay collision setup' '
+ client_view "//depot/... //client/..." &&
+ (
+ cd "$cli" &&
+ p4 sync &&
+ echo dir1/filecollide >dir1/filecollide &&
+ p4 add dir1/filecollide &&
+ p4 submit -d dir1/filecollide &&
+ echo dir2/filecollide >dir2/filecollide &&
+ p4 add dir2/filecollide &&
+ p4 submit -d dir2/filecollide
+ )
+'
+
+test_expect_success 'overlay collision 1 to 2' '
+ client_view "//depot/dir1/... //client/..." \
+ "+//depot/dir2/... //client/..." &&
+ files="file11 file12 file21 file22 filecollide" &&
+ echo dir2/filecollide >actual &&
+ client_verify $files &&
+ test_cmp actual "$cli"/filecollide &&
+ test_when_finished cleanup_git &&
+ "$GITP4" clone --use-client-spec --dest="$git" //depot &&
+ git_verify $files &&
+ test_cmp actual "$git"/filecollide
+'
+
+test_expect_failure 'overlay collision 2 to 1' '
+ client_view "//depot/dir2/... //client/..." \
+ "+//depot/dir1/... //client/..." &&
+ files="file11 file12 file21 file22 filecollide" &&
+ echo dir1/filecollide >actual &&
+ client_verify $files &&
+ test_cmp actual "$cli"/filecollide &&
+ test_when_finished cleanup_git &&
+ "$GITP4" clone --use-client-spec --dest="$git" //depot &&
+ git_verify $files &&
+ test_cmp actual "$git"/filecollide
+'
+
+test_expect_success 'overlay collision delete 2' '
+ client_view "//depot/... //client/..." &&
+ (
+ cd "$cli" &&
+ p4 sync &&
+ p4 delete dir2/filecollide &&
+ p4 submit -d "remove dir2/filecollide"
+ )
+'
+
+# no filecollide, got deleted with dir2
+test_expect_failure 'overlay collision 1 to 2, but 2 deleted' '
+ client_view "//depot/dir1/... //client/..." \
+ "+//depot/dir2/... //client/..." &&
+ files="file11 file12 file21 file22" &&
+ client_verify $files &&
+ test_when_finished cleanup_git &&
+ "$GITP4" clone --use-client-spec --dest="$git" //depot &&
+ git_verify $files
+'
+
+test_expect_success 'overlay collision update 1' '
+ client_view "//depot/dir1/... //client/dir1/..." &&
+ (
+ cd "$cli" &&
+ p4 sync &&
+ p4 open dir1/filecollide &&
+ echo dir1/filecollide update >dir1/filecollide &&
+ p4 submit -d "update dir1/filecollide"
+ )
+'
+
+# still no filecollide, dir2 still wins with the deletion even though the
+# change to dir1 is more recent
+test_expect_failure 'overlay collision 1 to 2, but 2 deleted, then 1 updated' '
+ client_view "//depot/dir1/... //client/..." \
+ "+//depot/dir2/... //client/..." &&
+ files="file11 file12 file21 file22" &&
+ client_verify $files &&
+ test_when_finished cleanup_git &&
+ "$GITP4" clone --use-client-spec --dest="$git" //depot &&
+ git_verify $files
+'
+
+test_expect_success 'overlay collision delete filecollides' '
+ client_view "//depot/... //client/..." &&
+ (
+ cd "$cli" &&
+ p4 sync &&
+ p4 delete dir1/filecollide dir2/filecollide &&
+ p4 submit -d "remove filecollides"
+ )
+'
+
+#
+# Overlays as part of sync, rather than initial checkout:
+# 1. add a file in dir1
+# 2. sync to include it
+# 3. add same file in dir2
+# 4. sync, make sure content switches as dir2 has priority
+# 5. add another file in dir1
+# 6. sync
+# 7. add/delete same file in dir2
+# 8. sync, make sure it disappears, again dir2 wins
+# 9. cleanup
+#
+# //depot
+# - dir1
+# - file11
+# - file12
+# - colA
+# - colB
+# - dir2
+# - file21
+# - file22
+# - colA
+# - colB
+#
+test_expect_success 'overlay sync: add colA in dir1' '
+ client_view "//depot/dir1/... //client/dir1/..." &&
+ (
+ cd "$cli" &&
+ p4 sync &&
+ echo dir1/colA >dir1/colA &&
+ p4 add dir1/colA &&
+ p4 submit -d dir1/colA
+ )
+'
+
+test_expect_success 'overlay sync: initial git checkout' '
+ client_view "//depot/dir1/... //client/..." \
+ "+//depot/dir2/... //client/..." &&
+ files="file11 file12 file21 file22 colA" &&
+ echo dir1/colA >actual &&
+ client_verify $files &&
+ test_cmp actual "$cli"/colA &&
+ "$GITP4" clone --use-client-spec --dest="$git" //depot &&
+ git_verify $files &&
+ test_cmp actual "$git"/colA
+'
+
+test_expect_success 'overlay sync: add colA in dir2' '
+ client_view "//depot/dir2/... //client/dir2/..." &&
+ (
+ cd "$cli" &&
+ p4 sync &&
+ echo dir2/colA >dir2/colA &&
+ p4 add dir2/colA &&
+ p4 submit -d dir2/colA
+ )
+'
+
+test_expect_success 'overlay sync: colA content switch' '
+ client_view "//depot/dir1/... //client/..." \
+ "+//depot/dir2/... //client/..." &&
+ files="file11 file12 file21 file22 colA" &&
+ echo dir2/colA >actual &&
+ client_verify $files &&
+ test_cmp actual "$cli"/colA &&
+ (
+ cd "$git" &&
+ "$GITP4" sync --use-client-spec &&
+ git merge --ff-only p4/master
+ ) &&
+ git_verify $files &&
+ test_cmp actual "$git"/colA
+'
+
+test_expect_success 'overlay sync: add colB in dir1' '
+ client_view "//depot/dir1/... //client/dir1/..." &&
+ (
+ cd "$cli" &&
+ p4 sync &&
+ echo dir1/colB >dir1/colB &&
+ p4 add dir1/colB &&
+ p4 submit -d dir1/colB
+ )
+'
+
+test_expect_success 'overlay sync: colB appears' '
+ client_view "//depot/dir1/... //client/..." \
+ "+//depot/dir2/... //client/..." &&
+ files="file11 file12 file21 file22 colA colB" &&
+ echo dir1/colB >actual &&
+ client_verify $files &&
+ test_cmp actual "$cli"/colB &&
+ (
+ cd "$git" &&
+ "$GITP4" sync --use-client-spec &&
+ git merge --ff-only p4/master
+ ) &&
+ git_verify $files &&
+ test_cmp actual "$git"/colB
+'
+
+test_expect_success 'overlay sync: add/delete colB in dir2' '
+ client_view "//depot/dir2/... //client/dir2/..." &&
+ (
+ cd "$cli" &&
+ p4 sync &&
+ echo dir2/colB >dir2/colB &&
+ p4 add dir2/colB &&
+ p4 submit -d dir2/colB &&
+ p4 delete dir2/colB &&
+ p4 submit -d "delete dir2/colB"
+ )
+'
+
+test_expect_success 'overlay sync: colB disappears' '
+ client_view "//depot/dir1/... //client/..." \
+ "+//depot/dir2/... //client/..." &&
+ files="file11 file12 file21 file22 colA" &&
+ client_verify $files &&
+ test_when_finished cleanup_git &&
+ (
+ cd "$git" &&
+ "$GITP4" sync --use-client-spec &&
+ git merge --ff-only p4/master
+ ) &&
+ git_verify $files
+'
+
+test_expect_success 'overlay sync: cleanup' '
+ client_view "//depot/... //client/..." &&
+ (
+ cd "$cli" &&
+ p4 sync &&
+ p4 delete dir1/colA dir2/colA dir1/colB &&
+ p4 submit -d "remove overlay sync files"
+ )
+'
+
+#
+# Overlay tests again, but swapped so dir1 has priority.
+# 1. add a file in dir1
+# 2. sync to include it
+# 3. add same file in dir2
+# 4. sync, make sure content does not switch
+# 5. add another file in dir1
+# 6. sync
+# 7. add/delete same file in dir2
+# 8. sync, make sure it is still there
+# 9. cleanup
+#
+# //depot
+# - dir1
+# - file11
+# - file12
+# - colA
+# - colB
+# - dir2
+# - file21
+# - file22
+# - colA
+# - colB
+#
+test_expect_success 'overlay sync swap: add colA in dir1' '
+ client_view "//depot/dir1/... //client/dir1/..." &&
+ (
+ cd "$cli" &&
+ p4 sync &&
+ echo dir1/colA >dir1/colA &&
+ p4 add dir1/colA &&
+ p4 submit -d dir1/colA
+ )
+'
+
+test_expect_success 'overlay sync swap: initial git checkout' '
+ client_view "//depot/dir2/... //client/..." \
+ "+//depot/dir1/... //client/..." &&
+ files="file11 file12 file21 file22 colA" &&
+ echo dir1/colA >actual &&
+ client_verify $files &&
+ test_cmp actual "$cli"/colA &&
+ "$GITP4" clone --use-client-spec --dest="$git" //depot &&
+ git_verify $files &&
+ test_cmp actual "$git"/colA
+'
+
+test_expect_success 'overlay sync swap: add colA in dir2' '
+ client_view "//depot/dir2/... //client/dir2/..." &&
+ (
+ cd "$cli" &&
+ p4 sync &&
+ echo dir2/colA >dir2/colA &&
+ p4 add dir2/colA &&
+ p4 submit -d dir2/colA
+ )
+'
+
+test_expect_failure 'overlay sync swap: colA no content switch' '
+ client_view "//depot/dir2/... //client/..." \
+ "+//depot/dir1/... //client/..." &&
+ files="file11 file12 file21 file22 colA" &&
+ echo dir1/colA >actual &&
+ client_verify $files &&
+ test_cmp actual "$cli"/colA &&
+ (
+ cd "$git" &&
+ "$GITP4" sync --use-client-spec &&
+ git merge --ff-only p4/master
+ ) &&
+ git_verify $files &&
+ test_cmp actual "$git"/colA
+'
+
+test_expect_success 'overlay sync swap: add colB in dir1' '
+ client_view "//depot/dir1/... //client/dir1/..." &&
+ (
+ cd "$cli" &&
+ p4 sync &&
+ echo dir1/colB >dir1/colB &&
+ p4 add dir1/colB &&
+ p4 submit -d dir1/colB
+ )
+'
+
+test_expect_success 'overlay sync swap: colB appears' '
+ client_view "//depot/dir2/... //client/..." \
+ "+//depot/dir1/... //client/..." &&
+ files="file11 file12 file21 file22 colA colB" &&
+ echo dir1/colB >actual &&
+ client_verify $files &&
+ test_cmp actual "$cli"/colB &&
+ (
+ cd "$git" &&
+ "$GITP4" sync --use-client-spec &&
+ git merge --ff-only p4/master
+ ) &&
+ git_verify $files &&
+ test_cmp actual "$git"/colB
+'
+
+test_expect_success 'overlay sync swap: add/delete colB in dir2' '
+ client_view "//depot/dir2/... //client/dir2/..." &&
+ (
+ cd "$cli" &&
+ p4 sync &&
+ echo dir2/colB >dir2/colB &&
+ p4 add dir2/colB &&
+ p4 submit -d dir2/colB &&
+ p4 delete dir2/colB &&
+ p4 submit -d "delete dir2/colB"
+ )
+'
+
+test_expect_failure 'overlay sync swap: colB no change' '
+ client_view "//depot/dir2/... //client/..." \
+ "+//depot/dir1/... //client/..." &&
+ files="file11 file12 file21 file22 colA colB" &&
+ echo dir1/colB >actual &&
+ client_verify $files &&
+ test_cmp actual "$cli"/colB &&
+ test_when_finished cleanup_git &&
+ (
+ cd "$git" &&
+ "$GITP4" sync --use-client-spec &&
+ git merge --ff-only p4/master
+ ) &&
+ git_verify $files &&
+ test_cmp actual "$cli"/colB
+'
+
+test_expect_success 'overlay sync swap: cleanup' '
+ client_view "//depot/... //client/..." &&
+ (
+ cd "$cli" &&
+ p4 sync &&
+ p4 delete dir1/colA dir2/colA dir1/colB &&
+ p4 submit -d "remove overlay sync files"
+ )
+'
+
+#
+# Rename directories to test quoting in depot-side mappings
+# //depot
+# - "dir 1"
+# - file11
+# - file12
+# - "dir 2"
+# - file21
+# - file22
+#
+test_expect_success 'rename files to introduce spaces' '
+ client_view "//depot/... //client/..." &&
+ client_verify dir1/file11 dir1/file12 \
+ dir2/file21 dir2/file22 &&
+ (
+ cd "$cli" &&
+ p4 open dir1/... &&
+ p4 move dir1/... "dir 1"/... &&
+ p4 open dir2/... &&
+ p4 move dir2/... "dir 2"/... &&
+ p4 submit -d "rename with spaces"
+ ) &&
+ client_verify "dir 1/file11" "dir 1/file12" \
+ "dir 2/file21" "dir 2/file22"
+'
+
+test_expect_success 'quotes on lhs only' '
+ client_view "\"//depot/dir 1/...\" //client/cdir1/..." &&
+ files="cdir1/file11 cdir1/file12" &&
+ client_verify $files &&
+ test_when_finished cleanup_git &&
+ "$GITP4" clone --use-client-spec --dest="$git" //depot &&
+ client_verify $files
+'
+
+test_expect_success 'quotes on both sides' '
+ client_view "\"//depot/dir 1/...\" \"//client/cdir 1/...\"" &&
+ client_verify "cdir 1/file11" "cdir 1/file12" &&
+ test_when_finished cleanup_git &&
+ "$GITP4" clone --use-client-spec --dest="$git" //depot &&
+ git_verify "cdir 1/file11" "cdir 1/file12"
+'
+
+test_expect_success 'kill p4d' '
+ kill_p4d
+'
+
+test_done
fi
exec 5>&1
+exec 6<&0
if test "$verbose" = "t"
then
exec 4>&2 3>&1
export GIT_COMMITTER_DATE GIT_AUTHOR_DATE
}
+# Stop execution and start a shell. This is useful for debugging tests and
+# only makes sense together with "-v".
+#
+# Be sure to remove all invocations of this command before submitting.
+
+test_pause () {
+ if test "$verbose" = t; then
+ "$SHELL_PATH" <&6 >&3 2>&4
+ else
+ error >&5 "test_pause requires --verbose"
+ fi
+}
+
# Call test_commit with the arguments "<message> [<file> [<contents>]]"
#
# This will commit a file with the given contents and the given commit
test_eval_ () {
# This is a separate function because some tests use
# "return" to end a test_expect_success block early.
- eval >&3 2>&4 "$*"
+ eval </dev/null >&3 2>&4 "$*"
}
test_run_ () {
}
my $master_out = new IO::Pty;
my $master_err = new IO::Pty;
+$master_out->set_raw();
+$master_err->set_raw();
+$master_out->slave->set_raw();
+$master_err->slave->set_raw();
my $pid = start_child(\@ARGV, $master_out->slave, $master_err->slave);
close $master_out->slave;
close $master_err->slave;
return o;
}
+struct object *deref_tag_noverify(struct object *o)
+{
+ while (o && o->type == OBJ_TAG) {
+ o = parse_object(o->sha1);
+ if (o && o->type == OBJ_TAG && ((struct tag *)o)->tagged)
+ o = ((struct tag *)o)->tagged;
+ else
+ o = NULL;
+ }
+ return o;
+}
+
struct tag *lookup_tag(const unsigned char *sha1)
{
struct object *obj = lookup_object(sha1);
extern int parse_tag_buffer(struct tag *item, const void *data, unsigned long size);
extern int parse_tag(struct tag *item);
extern struct object *deref_tag(struct object *, const char *, int);
+extern struct object *deref_tag_noverify(struct object *);
extern size_t parse_signature(const char *buf, unsigned long size);
#endif /* TAG_H */
int main(int argc, char *argv[])
{
struct strbuf sb = STRBUF_INIT;
- struct trp_root root = { ~0 };
+ struct trp_root root = { ~0U };
uint32_t item;
if (argc != 1)
rsync.argv = args;
rsync.stdout_to_stderr = 1;
args[0] = "rsync";
- args[1] = (transport->verbose > 0) ? "-rv" : "-r";
+ args[1] = (transport->verbose > 1) ? "-rv" : "-r";
args[2] = buf.buf;
args[3] = temp_dir.buf;
args[4] = NULL;
rsync.argv = args;
rsync.stdout_to_stderr = 1;
args[0] = "rsync";
- args[1] = (transport->verbose > 0) ? "-rv" : "-r";
+ args[1] = (transport->verbose > 1) ? "-rv" : "-r";
args[2] = "--ignore-existing";
args[3] = "--exclude";
args[4] = "info";
args[i++] = "-a";
if (flags & TRANSPORT_PUSH_DRY_RUN)
args[i++] = "--dry-run";
- if (transport->verbose > 0)
+ if (transport->verbose > 1)
args[i++] = "-v";
args[i++] = "--ignore-existing";
args[i++] = "--exclude";
} else if (!strcmp(name, TRANS_OPT_DEPTH)) {
if (!value)
opts->depth = 0;
- else
- opts->depth = atoi(value);
+ else {
+ char *end;
+ opts->depth = strtol(value, &end, 0);
+ if (*end)
+ die("transport: invalid depth option '%s'", value);
+ }
return 0;
}
return 1;
args.lock_pack = 1;
args.use_thin_pack = data->options.thin;
args.include_tag = data->options.followtags;
- args.verbose = (transport->verbose > 0);
+ args.verbose = (transport->verbose > 1);
args.quiet = (transport->verbose < 0);
args.no_progress = !transport->progress;
args.depth = data->options.depth;
void transport_set_verbosity(struct transport *transport, int verbosity,
int force_progress)
{
- if (verbosity >= 2)
+ if (verbosity >= 1)
transport->verbose = verbosity <= 3 ? verbosity : 3;
if (verbosity < 0)
transport->verbose = -1;
diff_opts.output_format = DIFF_FORMAT_NO_OUTPUT;
diff_opts.single_follow = opt->pathspec.raw[0];
diff_opts.break_opt = opt->break_opt;
+ diff_opts.rename_score = opt->rename_score;
paths[0] = NULL;
diff_tree_setup_paths(paths, &diff_opts);
if (diff_setup_done(&diff_opts) < 0)
/*
* Match all directories. We'll try to match files
* later on.
+ * max_depth is ignored but we may consider support it
+ * in future, see
+ * http://thread.gmane.org/gmane.comp.version-control.git/163757/focus=163840
*/
if (ps->recursive && S_ISDIR(entry->mode))
return entry_interesting;
return fd;
}
-static void unix_sockaddr_init(struct sockaddr_un *sa, const char *path)
+static int chdir_len(const char *orig, int len)
+{
+ char *path = xmemdupz(orig, len);
+ int r = chdir(path);
+ free(path);
+ return r;
+}
+
+struct unix_sockaddr_context {
+ char orig_dir[PATH_MAX];
+};
+
+static void unix_sockaddr_cleanup(struct unix_sockaddr_context *ctx)
+{
+ if (!ctx->orig_dir[0])
+ return;
+ /*
+ * If we fail, we can't just return an error, since we have
+ * moved the cwd of the whole process, which could confuse calling
+ * code. We are better off to just die.
+ */
+ if (chdir(ctx->orig_dir) < 0)
+ die("unable to restore original working directory");
+}
+
+static int unix_sockaddr_init(struct sockaddr_un *sa, const char *path,
+ struct unix_sockaddr_context *ctx)
{
int size = strlen(path) + 1;
- if (size > sizeof(sa->sun_path))
- die("socket path is too long to fit in sockaddr");
+
+ ctx->orig_dir[0] = '\0';
+ if (size > sizeof(sa->sun_path)) {
+ const char *slash = find_last_dir_sep(path);
+ const char *dir;
+
+ if (!slash) {
+ errno = ENAMETOOLONG;
+ return -1;
+ }
+
+ dir = path;
+ path = slash + 1;
+ size = strlen(path) + 1;
+ if (size > sizeof(sa->sun_path)) {
+ errno = ENAMETOOLONG;
+ return -1;
+ }
+
+ if (!getcwd(ctx->orig_dir, sizeof(ctx->orig_dir))) {
+ errno = ENAMETOOLONG;
+ return -1;
+ }
+ if (chdir_len(dir, slash - dir) < 0)
+ return -1;
+ }
+
memset(sa, 0, sizeof(*sa));
sa->sun_family = AF_UNIX;
memcpy(sa->sun_path, path, size);
+ return 0;
}
int unix_stream_connect(const char *path)
{
- int fd;
+ int fd, saved_errno;
struct sockaddr_un sa;
+ struct unix_sockaddr_context ctx;
- unix_sockaddr_init(&sa, path);
- fd = unix_stream_socket();
- if (connect(fd, (struct sockaddr *)&sa, sizeof(sa)) < 0) {
- close(fd);
+ if (unix_sockaddr_init(&sa, path, &ctx) < 0)
return -1;
- }
+ fd = unix_stream_socket();
+ if (connect(fd, (struct sockaddr *)&sa, sizeof(sa)) < 0)
+ goto fail;
+ unix_sockaddr_cleanup(&ctx);
return fd;
+
+fail:
+ saved_errno = errno;
+ unix_sockaddr_cleanup(&ctx);
+ close(fd);
+ errno = saved_errno;
+ return -1;
}
int unix_stream_listen(const char *path)
{
- int fd;
+ int fd, saved_errno;
struct sockaddr_un sa;
+ struct unix_sockaddr_context ctx;
- unix_sockaddr_init(&sa, path);
+ if (unix_sockaddr_init(&sa, path, &ctx) < 0)
+ return -1;
fd = unix_stream_socket();
unlink(path);
- if (bind(fd, (struct sockaddr *)&sa, sizeof(sa)) < 0) {
- close(fd);
- return -1;
- }
+ if (bind(fd, (struct sockaddr *)&sa, sizeof(sa)) < 0)
+ goto fail;
- if (listen(fd, 5) < 0) {
- close(fd);
- return -1;
- }
+ if (listen(fd, 5) < 0)
+ goto fail;
+ unix_sockaddr_cleanup(&ctx);
return fd;
+
+fail:
+ saved_errno = errno;
+ unix_sockaddr_cleanup(&ctx);
+ close(fd);
+ errno = saved_errno;
+ return -1;
}
write_str_in_full(debug_fd, "#S\n");
for (;;) {
struct object *o;
+ const char *features;
unsigned char sha1_buf[20];
len = packet_read_line(0, line, sizeof(line));
reset_timeout();
get_sha1_hex(line+5, sha1_buf))
die("git upload-pack: protocol error, "
"expected to get sha, not '%s'", line);
- if (strstr(line+45, "multi_ack_detailed"))
+
+ features = line + 45;
+
+ if (parse_feature_request(features, "multi_ack_detailed"))
multi_ack = 2;
- else if (strstr(line+45, "multi_ack"))
+ else if (parse_feature_request(features, "multi_ack"))
multi_ack = 1;
- if (strstr(line+45, "no-done"))
+ if (parse_feature_request(features, "no-done"))
no_done = 1;
- if (strstr(line+45, "thin-pack"))
+ if (parse_feature_request(features, "thin-pack"))
use_thin_pack = 1;
- if (strstr(line+45, "ofs-delta"))
+ if (parse_feature_request(features, "ofs-delta"))
use_ofs_delta = 1;
- if (strstr(line+45, "side-band-64k"))
+ if (parse_feature_request(features, "side-band-64k"))
use_sideband = LARGE_PACKET_MAX;
- else if (strstr(line+45, "side-band"))
+ else if (parse_feature_request(features, "side-band"))
use_sideband = DEFAULT_PACKET_MAX;
- if (strstr(line+45, "no-progress"))
+ if (parse_feature_request(features, "no-progress"))
no_progress = 1;
- if (strstr(line+45, "include-tag"))
+ if (parse_feature_request(features, "include-tag"))
use_include_tag = 1;
o = lookup_object(sha1_buf);
static const char *capabilities = "multi_ack thin-pack side-band"
" side-band-64k ofs-delta shallow no-progress"
" include-tag multi_ack_detailed";
- struct object *o = parse_object(sha1);
+ struct object *o = lookup_unknown_object(sha1);
const char *refname_nons = strip_namespace(refname);
- if (!o)
- die("git upload-pack: cannot find object %s:", sha1_to_hex(sha1));
+ if (o->type == OBJ_NONE) {
+ o->type = sha1_object_info(sha1, NULL);
+ if (o->type < 0)
+ die("git upload-pack: cannot find object %s:", sha1_to_hex(sha1));
+ }
if (capabilities)
packet_write(1, "%s %s%c%s%s\n", sha1_to_hex(sha1), refname_nons,
nr_our_refs++;
}
if (o->type == OBJ_TAG) {
- o = deref_tag(o, refname, 0);
+ o = deref_tag_noverify(o);
if (o)
packet_write(1, "%s %s^{}\n", sha1_to_hex(o->sha1), refname_nons);
}
static void repo_write_dirent(const uint32_t *path, uint32_t mode,
uint32_t content_offset, uint32_t del)
{
- uint32_t name, revision, dir_o = ~0, parent_dir_o = ~0;
+ uint32_t name, revision, dir_o = ~0U, parent_dir_o = ~0U;
struct repo_dir *dir;
struct repo_dirent *key;
struct repo_dirent *dent = NULL;
#include "obj_pool.h"
#include "string_pool.h"
-static struct trp_root tree = { ~0 };
+static struct trp_root tree = { ~0U };
struct node {
uint32_t offset;
uint32_t pool_tok_seq(uint32_t sz, uint32_t *seq, const char *delim, char *str)
{
char *context = NULL;
- uint32_t token = ~0;
+ uint32_t token = ~0U;
uint32_t length;
if (sz == 0)
static int xdl_emit_common(xdfenv_t *xe, xdchange_t *xscr, xdemitcb_t *ecb,
xdemitconf_t const *xecfg) {
- xdfile_t *xdf = &xe->xdf1;
+ xdfile_t *xdf = &xe->xdf2;
const char *rchg = xdf->rchg;
long ix;
/*
* Emit pre-context.
*/
- for (; s1 < xch->i1; s1++)
- if (xdl_emit_record(&xe->xdf1, s1, " ", ecb) < 0)
+ for (; s2 < xch->i2; s2++)
+ if (xdl_emit_record(&xe->xdf2, s2, " ", ecb) < 0)
return -1;
for (s1 = xch->i1, s2 = xch->i2;; xch = xch->next) {
* Merge previous with current change atom.
*/
for (; s1 < xch->i1 && s2 < xch->i2; s1++, s2++)
- if (xdl_emit_record(&xe->xdf1, s1, " ", ecb) < 0)
+ if (xdl_emit_record(&xe->xdf2, s2, " ", ecb) < 0)
return -1;
/*
/*
* Emit post-context.
*/
- for (s1 = xche->i1 + xche->chg1; s1 < e1; s1++)
- if (xdl_emit_record(&xe->xdf1, s1, " ", ecb) < 0)
+ for (s2 = xche->i2 + xche->chg2; s2 < e2; s2++)
+ if (xdl_emit_record(&xe->xdf2, s2, " ", ecb) < 0)
return -1;
}