PenaltyBreakFirstLessLess: 0
PenaltyBreakString: 10
PenaltyExcessCharacter: 100
-PenaltyReturnTypeOnItsOwnLine: 5
+PenaltyReturnTypeOnItsOwnLine: 60
# Don't sort #include's
SortIncludes: false
- Use Git's gettext wrappers to make the user interface
translatable. See "Marking strings for translation" in po/README.
+ - Variables and functions local to a given source file should be marked
+ with "static". Variables that are visible to other source files
+ must be declared with "extern" in header files. However, function
+ declarations should not use "extern", as that is already the default.
+
For Perl programs:
- Most of the C guidelines above apply.
TECH_DOCS += technical/hash-function-transition
TECH_DOCS += technical/http-protocol
TECH_DOCS += technical/index-format
+TECH_DOCS += technical/long-running-process-protocol
TECH_DOCS += technical/pack-format
TECH_DOCS += technical/pack-heuristics
TECH_DOCS += technical/pack-protocol
--- /dev/null
+Git v2.16.2 Release Notes
+=========================
+
+Fixes since v2.16.1
+-------------------
+
+ * An old regression in "git describe --all $annotated_tag^0" has been
+ fixed.
+
+ * "git svn dcommit" did not take into account the fact that a
+ svn+ssh:// URL with a username@ (typically used for pushing) refers
+ to the same SVN repository without the username@ and failed when
+ svn.pushmergeinfo option is set.
+
+ * "git merge -Xours/-Xtheirs" learned to use our/their version when
+ resolving a conflicting updates to a symbolic link.
+
+ * "git clone $there $here" is allowed even when here directory exists
+ as long as it is an empty directory, but the command incorrectly
+ removed it upon a failure of the operation.
+
+ * "git stash -- <pathspec>" incorrectly blew away untracked files in
+ the directory that matched the pathspec, which has been corrected.
+
+ * "git add -p" was taught to ignore local changes to submodules as
+ they do not interfere with the partial addition of regular changes
+ anyway.
+
+
+Also contains various documentation updates and code clean-ups.
* "diff" family of commands learned "--find-object=<object-id>" option
to limit the findings to changes that involve the named object.
- (merge 4d8c51aa19 sb/diff-blobfind-pickaxe later to maint).
+
+ * "git format-patch" learned to give 72-cols to diffstat, which is
+ consistent with other line length limits the subcommand uses for
+ its output meant for e-mails.
+
+ * The log from "git daemon" can be redirected with a new option; one
+ relevant use case is to send the log to standard error (instead of
+ syslog) when running it from inetd.
+
+ * "git rebase" learned to take "--allow-empty-message" option.
+
+ * "git am" has learned the "--quit" option, in addition to the
+ existing "--abort" option; having the pair mirrors a few other
+ commands like "rebase" and "cherry-pick".
+
+ * "git worktree add" learned to run the post-checkout hook, just like
+ "git clone" runs it upon the initial checkout.
Performance, Internal Implementation, Development Support etc.
* More perf tests for threaded grep
- (merge 7b31b55db1 ab/perf-grep-threads later to maint).
* "perf" test output can be sent to codespeed server.
(merge 19cf57a92e cc/codespeed later to maint).
+ * The build procedure for perl/ part has been greatly simplified by
+ weaning ourselves off of MakeMaker.
+
+ * In preparation for implementing narrow/partial clone, the machinery
+ for checking object connectivity used by gc and fsck has been
+ taught that a missing object is OK when it is referenced by a
+ packfile specially marked as coming from trusted repository that
+ promises to make them available on-demand and lazily.
+
+ * The machinery to clone & fetch, which in turn involves packing and
+ unpacking objects, has been told how to omit certain objects using
+ the filtering mechanism introduced by another topic. It now knows
+ to mark the resulting pack as a promisor pack to tolerate missing
+ objects, laying foundation for "narrow" clones.
+
+ * The first step to getting rid of mru API and using the
+ doubly-linked list API directly instead.
+
+ * Retire mru API as it does not give enough abstraction over
+ underlying list API to be worth it.
+
+ * Rewrite two more "git submodule" subcommands in C.
+
+ * The tracing machinery learned to report tweaking of environment
+ variables as well.
+ (merge 090a09272a nd/trace-with-env later to maint).
+
+ * Update Coccinelle rules to catch and optimize strbuf_addf(&buf, "%s", str)
+ (merge cd9a4b6d93 rs/strbuf-cocci-workaround later to maint).
+
+ * Prevent "clang-format" from breaking line after function return type.
+ (merge a3715d43e8 po/clang-format-functype-weight later to maint).
+
+ * The sequencer infrastructure is shared across "git cherry-pick",
+ "git rebase -i", etc., and has always spawned "git commit" when it
+ needs to create a commit. It has been taught to do so internally,
+ when able, by reusing the codepath "git commit" itself uses, which
+ gives performance boost for a few tens of percents in some sample
+ scenarios.
+
+ * Push the submodule version of collision-detecting SHA-1 hash
+ implementation a bit harder on builders.
+
+ * Avoid mmapping small files while using packed refs (especially ones
+ with zero size, which would cause later munmap() to fail).
+ (merge ba41a8b600 kg/packed-ref-cache-fix later to maint).
+
+ * Conversion from uchar[20] to struct object_id continues.
+
+ * More tests for wildmatch functions.
+
+ * The code to binary search starting from a fan-out table (which is
+ how the packfile is indexed with object names) has been refactored
+ into a reusable helper.
Also contains various documentation updates and code clean-ups.
* An old regression in "git describe --all $annotated_tag^0" has been
fixed.
- (merge 1bba00130a dk/describe-all-output-fix later to maint).
* "git status" after moving a path in the working tree (hence making
it appear "removed") and then adding with the -N option (hence
making that appear "added") detected it as a rename, but did not
report the old and new pathnames correctly.
- (merge 176ea74793 nd/ita-wt-renames-in-status later to maint).
* "git svn dcommit" did not take into account the fact that a
svn+ssh:// URL with a username@ (typically used for pushing) refers
to the same SVN repository without the username@ and failed when
svn.pushmergeinfo option is set.
- (merge 8aaed892fd jm/svn-pushmergeinfo-fix later to maint).
* API clean-up around revision traversal.
- (merge 6fcec2f9ae rs/lose-leak-pending later to maint).
* "git merge -Xours/-Xtheirs" learned to use our/their version when
resolving a conflicting updates to a symbolic link.
- (merge fd48b46474 jc/merge-symlink-ours-theirs later to maint).
* "git clone $there $here" is allowed even when here directory exists
as long as it is an empty directory, but the command incorrectly
removed it upon a failure of the operation.
- (merge d45420c1c8 jk/abort-clone-with-existing-dest later to maint).
* "git commit --fixup" did not allow "-m<message>" option to be used
at the same time; allow it to annotate resulting commit with more
text.
- (merge 30884c9afc ab/commit-m-with-fixup later to maint).
* When resetting the working tree files recursively, the working tree
of submodules are now also reset to match.
- (merge 7dcc1f4df8 sb/submodule-update-reset-fix later to maint).
* "git stash -- <pathspec>" incorrectly blew away untracked files in
the directory that matched the pathspec, which has been corrected.
- (merge bba067d2fa tg/stash-with-pathspec-fix later to maint).
* Instead of maintaining home-grown email address parsing code, ship
a copy of reasonably recent Mail::Address to be used as a fallback
* "git add -p" was taught to ignore local changes to submodules as
they do not interfere with the partial addition of regular changes
anyway.
- (merge 12434efc1d nd/add-i-ignore-submodules later to maint).
+
+ * Avoid showing a warning message in the middle of a line of "git
+ diff" output.
+ (merge 4e056c989f nd/diff-flush-before-warning later to maint).
+
+ * The http tracing code, often used to debug connection issues,
+ learned to redact potentially sensitive information from its output
+ so that it can be more safely sharable.
+ (merge 8ba18e6fa4 jt/http-redact-cookies later to maint).
+
+ * Crash fix for a corner case where an error codepath tried to unlock
+ what it did not acquire lock on.
+ (merge 81fcb698e0 mr/packed-ref-store-fix later to maint).
+
+ * The split-index mode had a few corner case bugs fixed.
+ (merge ae59a4e44f tg/split-index-fixes later to maint).
+
+ * Assorted fixes to "git daemon".
+ (merge ed15e58efe jk/daemon-fixes later to maint).
+
+ * Completion of "git merge -s<strategy>" (in contrib/) did not work
+ well in non-C locale.
+ (merge 7cc763aaa3 nd/list-merge-strategy later to maint).
+
+ * Workaround for segfault with more recent versions of SVN.
+ (merge 7f6f75e97a ew/svn-branch-segfault-fix later to maint).
+
+ * Plug recently introduced leaks in fsck.
+ (merge ba3a08ca0e jt/fsck-code-cleanup later to maint).
+
+ * "git pull --rebase" did not pass verbosity setting down when
+ recursing into a submodule.
+ (merge a56771a668 sb/pull-rebase-submodule later to maint).
+
+ * The way "git reset --hard" reports the commit the updated HEAD
+ points at is made consistent with the way how the commit title is
+ generated by the other parts of the system. This matters when the
+ title is spread across physically multiple lines.
+ (merge 1cf823fb68 tg/reset-hard-show-head-with-pretty later to maint).
+
+ * Test fixes.
+ (merge 63b1a175ee sg/test-i18ngrep later to maint).
+
+ * Some bugs around "untracked cache" feature have been fixed. This
+ will notice corrupt data in the untracked cache left by old and
+ buggy code and issue a warning---the index can be fixed by clearing
+ the untracked cache from it.
+ (merge 0cacebf099 nd/fix-untracked-cache-invalidation later to maint).
+ (merge 7bf0be7501 ab/untracked-cache-invalidation-docs later to maint).
+
+ * "git blame HEAD COPYING" in a bare repository failed to run, while
+ "git blame HEAD -- COPYING" run just fine. This has been corrected.
+
+ * "git add" files in the same directory, but spelling the directory
+ path in different cases on case insensitive filesystem, corrupted
+ the name hash data structure and led to unexpected results. This
+ has been corrected.
+ (merge c95525e90d bp/name-hash-dirname-fix later to maint).
+
+ * "git rebase -p" mangled log messages of a merge commit, which is
+ now fixed.
+ (merge ed5144d7eb js/fix-merge-arg-quoting-in-rebase-p later to maint).
+
+ * Some low level protocol codepath could crash when they get an
+ unexpected flush packet, which is now fixed.
+ (merge bb1356dc64 js/packet-read-line-check-null later to maint).
+
+ * "git check-ignore" with multiple paths got confused when one is a
+ file and the other is a directory, which has been fixed.
+ (merge d60771e930 rs/check-ignore-multi later to maint).
+
+ * "git describe $garbage" stopped giving any errors when the garbage
+ happens to be a string with 40 hexadecimal letters.
+ (merge a8e7a2bf0f sb/describe-blob later to maint).
+
+ * Code to unquote single-quoted string (used in the parser for
+ configuration files, etc.) did not diagnose bogus input correctly
+ and produced bogus results instead.
+ (merge ddbbf8eb25 jk/sq-dequote-on-bogus-input later to maint).
+
+ * Many places in "git apply" knew that "/dev/null" that signals
+ "there is no such file on this side of the diff" can be followed by
+ whitespace and garbage when parsing a patch, except for one, which
+ made an otherwise valid patch (e.g. ones from subversion) rejected.
+ (merge e454ad4bec tk/apply-dev-null-verify-name-fix later to maint).
+
+ * We no longer create any *.spec file, so "make clean" should not
+ remove it.
+ (merge 4321bdcabb tz/do-not-clean-spec-file later to maint).
+
+ * "git push" over http transport did not unquote the push-options
+ correctly.
+ (merge 90dce21eb0 jk/push-options-via-transport-fix later to maint).
* Other minor doc, test and build updates and code cleanups.
(merge e2a5a028c7 bw/oidmap-autoinit later to maint).
- (merge f0a6068a9f ys/bisect-object-id-missing-conversion-fix later to maint).
- (merge 30221a3389 as/read-tree-prefix-doc-fix later to maint).
- (merge 9bd2ce5432 ab/doc-cat-file-e-still-shows-errors later to maint).
+ (merge ec3b4b06f8 cl/t9001-cleanup later to maint).
+ (merge e1b3f3dd38 ks/submodule-doc-updates later to maint).
+ (merge fbac558a9b rs/describe-unique-abbrev later to maint).
+ (merge 8462ff43e4 tb/crlf-conv-flags later to maint).
+ (merge 7d68bb0766 rb/hashmap-h-compilation-fix later to maint).
+ (merge 3449847168 cc/sha1-file-name later to maint).
+ (merge ad622a256f ds/use-get-be64 later to maint).
+ (merge f919ffebed sg/cocci-move-array later to maint).
+ (merge 4e801463c7 jc/mailinfo-cleanup-fix later to maint).
+ (merge ef5b3a6c5e nd/shared-index-fix later to maint).
+ (merge 9f5258cbb8 tz/doc-show-defaults-to-head later to maint).
+ (merge b780e4407d jc/worktree-add-short-help later to maint).
+ (merge ae239fc8e5 rs/cocci-strbuf-addf-to-addstr later to maint).
+ (merge 2e22a85e5c nd/ignore-glob-doc-update later to maint).
+ (merge 3738031581 jk/gettext-poison later to maint).
+ (merge 54360a1956 rj/sparse-updates later to maint).
+ (merge 12e31a6b12 sg/doc-test-must-fail-args later to maint).
+ (merge 760f1ad101 bc/doc-interpret-trailers-grammofix later to maint).
+ (merge 4ccf461f56 bp/fsmonitor later to maint).
+ (merge a6119f82b1 jk/test-hashmap-updates later to maint).
+ (merge 5aea9fe6cc rd/typofix later to maint).
+ (merge e4e5da2796 sb/status-doc-fix later to maint).
+ (merge 7976e901c8 gs/test-unset-xdg-cache-home later to maint).
+ (merge d023df1ee6 tg/worktree-create-tracking later to maint).
fetch.prune::
If true, fetch will automatically behave as if the `--prune`
- option was given on the command line. See also `remote.<name>.prune`.
+ option was given on the command line. See also `remote.<name>.prune`
+ and the PRUNING section of linkgit:git-fetch[1].
+
+fetch.pruneTags::
+ If true, fetch will automatically behave as if the
+ `refs/tags/*:refs/tags/*` refspec was provided when pruning,
+ if not set already. This allows for setting both this option
+ and `fetch.prune` to maintain a 1=1 mapping to upstream
+ refs. See also `remote.<name>.pruneTags` and the PRUNING
+ section of linkgit:git-fetch[1].
fetch.output::
Control how ref update status is printed. Valid values are
remote (as if the `--prune` option was given on the command line).
Overrides `fetch.prune` settings, if any.
+remote.<name>.pruneTags::
+ When set to true, fetching from this remote by default will also
+ remove any local tags that no longer exist on the remote if pruning
+ is activated in general via `remote.<name>.prune`, `fetch.prune` or
+ `--prune`. Overrides `fetch.pruneTags` settings, if any.
++
+See also `remote.<name>.prune` and the PRUNING section of
+linkgit:git-fetch[1].
+
remotes.<group>::
The list of remotes which are fetched by "git remote update
<group>". See linkgit:git-remote[1].
was run. I.e., `upload-pack` will feed input intended for
`pack-objects` to the hook, and expects a completed packfile on
stdout.
+
+uploadpack.allowFilter::
+ If this option is set, `upload-pack` will advertise partial
+ clone and partial fetch object filtering.
+
Note that this configuration variable is ignored if it is seen in the
repository-level config (this is a safety measure against fetching from
are fetched due to an explicit refspec (either on the command
line or in the remote configuration, for example if the remote
was cloned with the --mirror option), then they are also
- subject to pruning.
+ subject to pruning. Supplying `--prune-tags` is a shorthand for
+ providing the tag refspec.
++
+See the PRUNING section below for more details.
+
+-P::
+--prune-tags::
+ Before fetching, remove any local tags that no longer exist on
+ the remote if `--prune` is enabled. This option should be used
+ more carefully, unlike `--prune` it will remove any local
+ references (local tags) that have been created. This option is
+ a shorthand for providing the explicit tag refspec along with
+ `--prune`, see the discussion about that in its documentation.
++
+See the PRUNING section below for more details.
+
endif::git-pull[]
ifndef::git-pull[]
[--exclude=<path>] [--include=<path>] [--reject] [-q | --quiet]
[--[no-]scissors] [-S[<keyid>]] [--patch-format=<format>]
[(<mbox> | <Maildir>)...]
-'git am' (--continue | --skip | --abort)
+'git am' (--continue | --skip | --abort | --quit | --show-current-patch)
DESCRIPTION
-----------
--abort::
Restore the original branch and abort the patching operation.
+--quit::
+ Abort the patching operation but keep HEAD and the index
+ untouched.
+
+--show-current-patch::
+ Show the patch being applied when "git am" is stopped because
+ of conflicts.
+
DISCUSSION
----------
[--inetd |
[--listen=<host_or_ipaddr>] [--port=<n>]
[--user=<user> [--group=<group>]]]
+ [--log-destination=(stderr|syslog|none)]
[<directory>...]
DESCRIPTION
do not have the 'git-daemon-export-ok' file.
--inetd::
- Have the server run as an inetd service. Implies --syslog.
+ Have the server run as an inetd service. Implies --syslog (may be
+ overridden with `--log-destination=`).
Incompatible with --detach, --port, --listen, --user and --group
options.
zero for no limit.
--syslog::
- Log to syslog instead of stderr. Note that this option does not imply
- --verbose, thus by default only error conditions will be logged.
+ Short for `--log-destination=syslog`.
+
+--log-destination=<destination>::
+ Send log messages to the specified destination.
+ Note that this option does not imply --verbose,
+ thus by default only error conditions will be logged.
+ The <destination> must be one of:
++
+--
+stderr::
+ Write to standard error.
+ Note that if `--detach` is specified,
+ the process disconnects from the real standard error,
+ making this destination effectively equivalent to `none`.
+syslog::
+ Write to syslog, using the `git-daemon` identifier.
+none::
+ Disable all logging.
+--
++
+The default destination is `syslog` if `--inetd` or `--detach` is specified,
+otherwise `stderr`.
--user-path::
--user-path=<path>::
overridden by giving the `--refmap=<refspec>` parameter(s) on the
command line.
+PRUNING
+-------
+
+Git has a default disposition of keeping data unless it's explicitly
+thrown away; this extends to holding onto local references to branches
+on remotes that have themselves deleted those branches.
+
+If left to accumulate, these stale references might make performance
+worse on big and busy repos that have a lot of branch churn, and
+e.g. make the output of commands like `git branch -a --contains
+<commit>` needlessly verbose, as well as impacting anything else
+that'll work with the complete set of known references.
+
+These remote-tracking references can be deleted as a one-off with
+either of:
+
+------------------------------------------------
+# While fetching
+$ git fetch --prune <name>
+
+# Only prune, don't fetch
+$ git remote prune <name>
+------------------------------------------------
+
+To prune references as part of your normal workflow without needing to
+remember to run that, set `fetch.prune` globally, or
+`remote.<name>.prune` per-remote in the config. See
+linkgit:git-config[1].
+
+Here's where things get tricky and more specific. The pruning feature
+doesn't actually care about branches, instead it'll prune local <->
+remote-references as a function of the refspec of the remote (see
+`<refspec>` and <<CRTB,CONFIGURED REMOTE-TRACKING BRANCHES>> above).
+
+Therefore if the refspec for the remote includes
+e.g. `refs/tags/*:refs/tags/*`, or you manually run e.g. `git fetch
+--prune <name> "refs/tags/*:refs/tags/*"` it won't be stale remote
+tracking branches that are deleted, but any local tag that doesn't
+exist on the remote.
+
+This might not be what you expect, i.e. you want to prune remote
+`<name>`, but also explicitly fetch tags from it, so when you fetch
+from it you delete all your local tags, most of which may not have
+come from the `<name>` remote in the first place.
+
+So be careful when using this with a refspec like
+`refs/tags/*:refs/tags/*`, or any other refspec which might map
+references from multiple remotes to the same local namespace.
+
+Since keeping up-to-date with both branches and tags on the remote is
+a common use-case the `--prune-tags` option can be supplied along with
+`--prune` to prune local tags that don't exist on the remote, and
+force-update those tags that differ. Tag pruning can also be enabled
+with `fetch.pruneTags` or `remote.<name>.pruneTags` in the config. See
+linkgit:git-config[1].
+
+The `--prune-tags` option is equivalent to having
+`refs/tags/*:refs/tags/*` declared in the refspecs of the remote. This
+can lead to some seemingly strange interactions:
+
+------------------------------------------------
+# These both fetch tags
+$ git fetch --no-tags origin 'refs/tags/*:refs/tags/*'
+$ git fetch --no-tags --prune-tags origin
+------------------------------------------------
+
+The reason it doesn't error out when provided without `--prune` or its
+config versions is for flexibility of the configured versions, and to
+maintain a 1=1 mapping between what the command line flags do, and
+what the configuration versions do.
+
+It's reasonable to e.g. configure `fetch.pruneTags=true` in
+`~/.gitconfig` to have tags pruned whenever `git fetch --prune` is
+run, without making every invocation of `git fetch` without `--prune`
+an error.
+
+Pruning tags with `--prune-tags` also works when fetching a URL
+instead of a named remote. These will all prune tags not found on
+origin:
+
+------------------------------------------------
+$ git fetch origin --prune --prune-tags
+$ git fetch origin --prune 'refs/tags/*:refs/tags/*'
+$ git fetch <url of origin> --prune --prune-tags
+$ git fetch <url of origin> --prune 'refs/tags/*:refs/tags/*'
+------------------------------------------------
+
OUTPUT
------
will be added before the new trailer.
Existing trailers are extracted from the input message by looking for
-a group of one or more lines that (i) are all trailers, or (ii) contains at
+a group of one or more lines that (i) is all trailers, or (ii) contains at
least one Git-generated or user-configured trailer and consists of at
least 25% trailers.
The group must be preceded by one or more empty (or whitespace-only) lines.
The form '--missing=allow-any' will allow object traversal to continue
if a missing object is encountered. Missing objects will silently be
omitted from the results.
++
+The form '--missing=allow-promisor' is like 'allow-any', but will only
+allow object traversal to continue for EXPECTED promisor missing objects.
+Unexpected missing object will raise an error.
+
+--exclude-promisor-objects::
+ Omit objects that are known to be in the promisor remote. (This
+ option has the purpose of operating only on locally created objects,
+ so that when we repack, we still maintain a distinction between
+ locally created objects [without .promisor] and objects from the
+ promisor remote [with .promisor].) This is used with partial clone.
SEE ALSO
--------
[<upstream> [<branch>]]
'git rebase' [-i | --interactive] [options] [--exec <cmd>] [--onto <newbase>]
--root [<branch>]
-'git rebase' --continue | --skip | --abort | --quit | --edit-todo
+'git rebase' --continue | --skip | --abort | --quit | --edit-todo | --show-current-patch
DESCRIPTION
-----------
Keep the commits that do not change anything from its
parents in the result.
+--allow-empty-message::
+ By default, rebasing commits with an empty message will fail.
+ This option overrides that behavior, allowing commits with empty
+ messages to be rebased.
+
--skip::
Restart the rebasing process by skipping the current patch.
--edit-todo::
Edit the todo list during an interactive rebase.
+--show-current-patch::
+ Show the current patch in an interactive rebase or when rebase
+ is stopped because of conflicts. This is the equivalent of
+ `git show REBASE_HEAD`.
+
-m::
--merge::
Use merging strategies to rebase. When the recursive (default) merge
'prune'::
-Deletes all stale remote-tracking branches under <name>.
-These stale branches have already been removed from the remote repository
-referenced by <name>, but are still locally available in
-"remotes/<name>".
+Deletes stale references associated with <name>. By default, stale
+remote-tracking branches under <name> are deleted, but depending on
+global configuration and the configuration of the remote we might even
+prune local tags that haven't been pushed there. Equivalent to `git
+fetch --prune <name>`, except that no new references will be fetched.
++
+See the PRUNING section of linkgit:git-fetch[1] for what it'll prune
+depending on various configuration.
+
With `--dry-run` option, report what branches will be pruned, but do not
actually prune them.
configuration parameter remote.<name>.skipDefaultUpdate set to true will
be updated. (See linkgit:git-config[1]).
+
-With `--prune` option, prune all the remotes that are updated.
+With `--prune` option, run pruning against all the remotes that are updated.
DISCUSSION
SYNOPSIS
--------
[verse]
-'git show' [options] <object>...
+'git show' [options] [<object>...]
DESCRIPTION
-----------
OPTIONS
-------
<object>...::
- The names of objects to show.
+ The names of objects to show (defaults to 'HEAD').
For a more complete list of ways to spell object names, see
"SPECIFYING REVISIONS" section in linkgit:gitrevisions[7].
X Y Meaning
-------------------------------------------------
- [MD] not updated
+ [AMD] not updated
M [ MD] updated in index
A [ MD] added to index
- D [ M] deleted from index
+ D deleted from index
R [ MD] renamed in index
C [ MD] copied in index
[MARC] index and work tree matches
Show the status of the submodules. This will print the SHA-1 of the
currently checked out commit for each submodule, along with the
submodule path and the output of 'git describe' for the
- SHA-1. Each SHA-1 will be prefixed with `-` if the submodule is not
- initialized, `+` if the currently checked out submodule commit
+ SHA-1. Each SHA-1 will possibly be prefixed with `-` if the submodule is
+ not initialized, `+` if the currently checked out submodule commit
does not match the SHA-1 found in the index of the containing
repository and `U` if the submodule has merge conflicts.
+
the submodules. The "updating" can be done in several ways depending
on command line options and the value of `submodule.<name>.update`
configuration variable. The command line option takes precedence over
-the configuration variable. if neither is given, a checkout is performed.
-update procedures supported both from the command line as well as setting
-`submodule.<name>.update`:
+the configuration variable. If neither is given, a 'checkout' is performed.
+The 'update' procedures supported both from the command line as well as
+through the `submodule.<name>.update` configuration are:
checkout;; the commit recorded in the superproject will be
checked out in the submodule on a detached HEAD.
+
If `--force` is specified, the submodule will be checked out (using
-`git checkout --force` if appropriate), even if the commit specified
+`git checkout --force`), even if the commit specified
in the index of the containing repository already matches the commit
checked out in the submodule.
merge;; the commit recorded in the superproject will be merged
into the current branch in the submodule.
-The following procedures are only available via the `submodule.<name>.update`
-configuration variable:
+The following 'update' procedures are only available via the
+`submodule.<name>.update` configuration variable:
custom command;; arbitrary shell command that takes a single
argument (the sha1 of the commit recorded in the
SYNOPSIS
--------
[verse]
-'git tag' [-a | -s | -u <keyid>] [-f] [-m <msg> | -F <file>]
+'git tag' [-a | -s | -u <keyid>] [-f] [-m <msg> | -F <file>] [-e]
<tagname> [<commit> | <object>]
'git tag' -d <tagname>...
'git tag' [-n[<num>]] -l [--contains <commit>] [--no-contains <commit>]
Implies `-a` if none of `-a`, `-s`, or `-u <keyid>`
is given.
+-e::
+--edit::
+ The message taken from file with `-F` and command line with
+ `-m` are usually used as the tag message unmodified.
+ This option lets you further edit the message taken from these sources.
+
--cleanup=<mode>::
This option sets how the tag message is cleaned up.
The '<mode>' can be one of 'verbatim', 'whitespace' and 'strip'. The
are used, the untracked cache is immediately added to or removed from
the index.
+Before 2.17, the untracked cache had a bug where replacing a directory
+with a symlink to another directory could cause it to incorrectly show
+files tracked by git as untracked. See the "status: add a failing test
+showing a core.untrackedCache bug" commit to git.git. A workaround for
+that is (and this might work for other undiscovered bugs in the
+future):
+
+----------------
+$ git -c core.untrackedCache=false status
+----------------
+
+This bug has also been shown to affect non-symlink cases of replacing
+a directory with a file when it comes to the internal structures of
+the untracked cache, but no case has been reported where this resulted in
+wrong "git status" output.
+
+There are also cases where existing indexes written by git versions
+before 2.17 will reference directories that don't exist anymore,
+potentially causing many "could not open directory" warnings to be
+printed on "git status". These are new warnings for existing issues
+that were previously silently discarded.
+
+As with the bug described above the solution is to one-off do a "git
+status" run with `core.untrackedCache=false` to flush out the leftover
+bad data.
+
File System Monitor
-------------------
linkgit:git-config[1]) than using the `--fsmonitor` option to
`git update-index` in each repository, especially if you want to do so
across all repositories you use, because you can set the configuration
-variable to `true` (or `false`) in your `$HOME/.gitconfig` just once
-and have it affect all repositories you touch.
+variable in your `$HOME/.gitconfig` just once and have it affect all
+repositories you touch.
When the `core.fsmonitor` configuration variable is changed, the
file system monitor is added to or removed from the index the next time
directory specific files such as HEAD, index, etc. `-` may also be
specified as `<commit-ish>`; it is synonymous with `@{-1}`.
+
-If <commit-ish> is a branch name (call it `<branch>` and is not found,
+If <commit-ish> is a branch name (call it `<branch>`) and is not found,
and neither `-b` nor `-B` nor `--detach` are used, but there does
exist a tracking branch in exactly one remote (call it `<remote>`)
-with a matching name, treat as equivalent to
+with a matching name, treat as equivalent to:
++
------------
$ git worktree add --track -b <branch> <path> <remote>/<branch>
------------
variable.
See `GIT_TRACE` for available trace output options.
+`GIT_TRACE_CURL_NO_DATA`::
+ When a curl trace is enabled (see `GIT_TRACE_CURL` above), do not dump
+ data (that is, only dump info lines and headers).
+
+`GIT_REDACT_COOKIES`::
+ This can be set to a comma-separated list of strings. When a curl trace
+ is enabled (see `GIT_TRACE_CURL` above), whenever a "Cookies:" header
+ sent by the client is dumped, values of cookies whose key is in that
+ list (case-sensitive) are redacted.
+
`GIT_LITERAL_PATHSPECS`::
Setting this variable to `1` will cause Git to treat all
pathspecs literally, rather than as glob patterns. For example,
If the filter command (a string value) is defined via
`filter.<driver>.process` then Git can process all blobs with a
single filter invocation for the entire life of a single Git
-command. This is achieved by using a packet format (pkt-line,
-see technical/protocol-common.txt) based protocol over standard
-input and standard output as follows. All packets, except for the
-"*CONTENT" packets and the "0000" flush packet, are considered
-text and therefore are terminated by a LF.
-
-Git starts the filter when it encounters the first file
-that needs to be cleaned or smudged. After the filter started
-Git sends a welcome message ("git-filter-client"), a list of supported
-protocol version numbers, and a flush packet. Git expects to read a welcome
-response message ("git-filter-server"), exactly one protocol version number
-from the previously sent list, and a flush packet. All further
-communication will be based on the selected version. The remaining
-protocol description below documents "version=2". Please note that
-"version=42" in the example below does not exist and is only there
-to illustrate how the protocol would look like with more than one
-version.
-
-After the version negotiation Git sends a list of all capabilities that
-it supports and a flush packet. Git expects to read a list of desired
-capabilities, which must be a subset of the supported capabilities list,
-and a flush packet as response:
-------------------------
-packet: git> git-filter-client
-packet: git> version=2
-packet: git> version=42
-packet: git> 0000
-packet: git< git-filter-server
-packet: git< version=2
-packet: git< 0000
-packet: git> capability=clean
-packet: git> capability=smudge
-packet: git> capability=not-yet-invented
-packet: git> 0000
-packet: git< capability=clean
-packet: git< capability=smudge
-packet: git< 0000
-------------------------
-Supported filter capabilities in version 2 are "clean", "smudge",
-and "delay".
+command. This is achieved by using the long-running process protocol
+(described in technical/long-running-process-protocol.txt).
+
+When Git encounters the first file that needs to be cleaned or smudged,
+it starts the filter and performs the handshake. In the handshake, the
+welcome message sent by Git is "git-filter-client", only version 2 is
+suppported, and the supported capabilities are "clean", "smudge", and
+"delay".
Afterwards Git sends a list of "key=value" pairs terminated with
a flush packet. The list will contain at least the filter command
with the next file that needs to be processed. Depending on the
`filter.<driver>.required` flag Git will interpret that as error.
-After the filter has processed a command it is expected to wait for
-a "key=value" list containing the next command. Git will close
-the command pipe on exit. The filter is expected to detect EOF
-and exit gracefully on its own. Git will wait until the filter
-process has stopped.
-
Delay
^^^^^
(relative to the toplevel of the work tree if not from a
`.gitignore` file).
- - Otherwise, Git treats the pattern as a shell glob suitable
- for consumption by fnmatch(3) with the FNM_PATHNAME flag:
- wildcards in the pattern will not match a / in the pathname.
- For example, "Documentation/{asterisk}.html" matches
- "Documentation/git.html" but not "Documentation/ppc/ppc.html"
- or "tools/perf/Documentation/perf.html".
+ - Otherwise, Git treats the pattern as a shell glob: "`*`" matches
+ anything except "`/`", "`?`" matches any one character except "`/`"
+ and "`[]`" matches one character in a selected range. See
+ fnmatch(3) and the FNM_PATHNAME flag for a more detailed
+ description.
- A leading slash matches the beginning of the pathname.
For example, "/{asterisk}.c" matches "cat-file.c" but not
Transmit <string> as a push option. As the push option
must not contain LF or NUL characters, the string is not encoded.
+'option from-promisor' {'true'|'false'}::
+ Indicate that these objects are being fetched from a promisor.
+
+'option no-dependents' {'true'|'false'}::
+ Indicate that only the objects wanted need to be fetched, not
+ their dependents.
+
SEE ALSO
--------
linkgit:git-remote[1]
superproject expects the submodule’s working directory to be at.
The section `submodule.foo.*` in the `.gitmodules` file gives additional
-hints to Gits porcelain layer such as where to obtain the submodule via
-the `submodule.foo.url` setting.
+hints to Git's porcelain layer. For example, the `submodule.foo.url`
+setting specifies where to obtain the submodule.
Submodules can be used for at least two different use cases:
2. Splitting a (logically single) project into multiple
repositories and tying them back together. This can be used to
- overcome current limitations of Gits implementation to have
+ overcome current limitations of Git's implementation to have
finer grained access:
- * Size of the git repository:
+ * Size of the Git repository:
In its current form Git scales up poorly for large repositories containing
content that is not compressed by delta computation between trees.
- However you can also use submodules to e.g. hold large binary assets
- and these repositories are then shallowly cloned such that you do not
+ For example, you can use submodules to hold large binary assets
+ and these repositories can be shallowly cloned such that you do not
have a large history locally.
* Transfer size:
In its current form Git requires the whole working tree present. It
does not allow partial trees to be transferred in fetch or clone.
+ If the project you work on consists of multiple repositories tied
+ together as submodules in a superproject, you can avoid fetching the
+ working trees of the repositories you are not interested in.
* Access control:
By restricting user access to submodules, this can be used to implement
read/write policies for different users.
Submodule operations can be configured using the following mechanisms
(from highest to lowest precedence):
- * The command line for those commands that support taking submodule specs.
- Most commands have a boolean flag '--recurse-submodules' whether to
- recurse into submodules. Examples are `ls-files` or `checkout`.
+ * The command line for those commands that support taking submodules
+ as part of their pathspecs. Most commands have a boolean flag
+ `--recurse-submodules` which specify whether to recurse into submodules.
+ Examples are `grep` and `checkout`.
Some commands take enums, such as `fetch` and `push`, where you can
specify how submodules are affected.
For example an effect from the submodule's `.gitignore` file
would be observed when you run `git status --ignore-submodules=none` in
the superproject. This collects information from the submodule's working
-directory by running `status` in the submodule, which does pay attention
-to its `.gitignore` file.
+directory by running `status` in the submodule while paying attention
+to the `.gitignore` file of the submodule.
+
The submodule's `$GIT_DIR/config` file would come into play when running
`git push --recurse-submodules=check` in the superproject, as this would
file.
* The configuration file `$GIT_DIR/config` in the superproject.
- Typical configuration at this place is controlling if a submodule
- is recursed into at all via the `active` flag for example.
+ Git only recurses into active submodules (see "ACTIVE SUBMODULES"
+ section below).
+
If the submodule is not yet initialized, then the configuration
-inside the submodule does not exist yet, so configuration where to
+inside the submodule does not exist yet, so where to
obtain the submodule from is configured here for example.
- * the `.gitmodules` file inside the superproject. Additionally to the
- required mapping between submodule's name and path, a project usually
+ * The `.gitmodules` file inside the superproject. A project usually
uses this file to suggest defaults for the upstream collection
- of repositories.
+ of repositories for the mapping that is required between a
+ submodule's name and its path.
+
-This file mainly serves as the mapping between name and path in
-the superproject, such that the submodule's git directory can be
+This file mainly serves as the mapping between the name and path of submodules
+in the superproject, such that the submodule's Git directory can be
located.
+
If the submodule has never been initialized, this is the only place
of the superproject.
* Deinitialized submodule: A `gitlink`, and a `.gitmodules` entry,
-but no submodule working directory. The submodule’s git directory
-may be there as after deinitializing the git directory is kept around.
+but no submodule working directory. The submodule’s Git directory
+may be there as after deinitializing the Git directory is kept around.
The directory which is supposed to be the working directory is empty instead.
+
A submodule can be deinitialized by running `git submodule deinit`.
To completely remove a submodule, manually delete
`$GIT_DIR/modules/<name>/`.
+ACTIVE SUBMODULES
+-----------------
+
+A submodule is considered active,
+
+ (a) if `submodule.<name>.active` is set to `true`
+ or
+ (b) if the submodule's path matches the pathspec in `submodule.active`
+ or
+ (c) if `submodule.<name>.url` is set.
+
+and these are evaluated in this order.
+
+For example:
+
+ [submodule "foo"]
+ active = false
+ url = https://example.org/foo
+ [submodule "bar"]
+ active = true
+ url = https://example.org/bar
+ [submodule "baz"]
+ url = https://example.org/baz
+
+In the above config only the submodule 'bar' and 'baz' are active,
+'bar' due to (a) and 'baz' due to (c). 'foo' is inactive because
+(a) takes precedence over (c)
+
+Note that (c) is a historical artefact and will be ignored if the
+(a) and (b) specify that the submodule is not active. In other words,
+if we have an `submodule.<name>.active` set to `false` or if the
+submodule's path is excluded in the pathspec in `submodule.active`, the
+url doesn't matter whether it is present or not. This is illustrated in
+the example that follows.
+
+ [submodule "foo"]
+ active = true
+ url = https://example.org/foo
+ [submodule "bar"]
+ url = https://example.org/bar
+ [submodule "baz"]
+ url = https://example.org/baz
+ [submodule "bob"]
+ ignore = true
+ [submodule]
+ active = b*
+ active = :(exclude) baz
+
+In here all submodules except 'baz' (foo, bar, bob) are active.
+'foo' due to its own active flag and all the others due to the
+submodule active pathspec, which specifies that any submodule
+starting with 'b' except 'baz' are also active, regardless of the
+presence of the .url field.
+
Workflow for a third party library
----------------------------------
if a missing object is encountered. Missing objects will silently be
omitted from the results.
+
+The form '--missing=allow-promisor' is like 'allow-any', but will only
+allow object traversal to continue for EXPECTED promisor missing objects.
+Unexpected missing objects will raise an error.
++
The form '--missing=print' is like 'allow-any', but will also print a
list of the missing objects. Object IDs are prefixed with a ``?'' character.
endif::git-rev-list[]
+--exclude-promisor-objects::
+ (For internal use only.) Prefilter object traversal at
+ promisor boundary. This is used with partial clone. This is
+ stronger than `--missing=allow-promisor` because it limits the
+ traversal, rather than just silencing errors about missing
+ objects.
+
--no-walk[=(sorted|unsorted)]::
Only show the given commits, but do not traverse their ancestors.
This has no effect if a range is specified. If the argument
* read_object_with_reference()
* has_sha1_file()
* write_sha1_file()
-* pretend_sha1_file()
+* pretend_object_file()
* lookup_{object,commit,tag,blob,tree}
* parse_{object,commit,tag,blob,tree}
* Use of object flags
The submodule config cache API allows to read submodule
configurations/information from specified revisions. Internally
information is lazily read into a cache that is used to avoid
-unnecessary parsing of the same .gitmodule files. Lookups can be done by
+unnecessary parsing of the same .gitmodules files. Lookups can be done by
submodule path or name.
Usage
--- /dev/null
+Long-running process protocol
+=============================
+
+This protocol is used when Git needs to communicate with an external
+process throughout the entire life of a single Git command. All
+communication is in pkt-line format (see technical/protocol-common.txt)
+over standard input and standard output.
+
+Handshake
+---------
+
+Git starts by sending a welcome message (for example,
+"git-filter-client"), a list of supported protocol version numbers, and
+a flush packet. Git expects to read the welcome message with "server"
+instead of "client" (for example, "git-filter-server"), exactly one
+protocol version number from the previously sent list, and a flush
+packet. All further communication will be based on the selected version.
+The remaining protocol description below documents "version=2". Please
+note that "version=42" in the example below does not exist and is only
+there to illustrate how the protocol would look like with more than one
+version.
+
+After the version negotiation Git sends a list of all capabilities that
+it supports and a flush packet. Git expects to read a list of desired
+capabilities, which must be a subset of the supported capabilities list,
+and a flush packet as response:
+------------------------
+packet: git> git-filter-client
+packet: git> version=2
+packet: git> version=42
+packet: git> 0000
+packet: git< git-filter-server
+packet: git< version=2
+packet: git< 0000
+packet: git> capability=clean
+packet: git> capability=smudge
+packet: git> capability=not-yet-invented
+packet: git> 0000
+packet: git< capability=clean
+packet: git< capability=smudge
+packet: git< 0000
+------------------------
+
+Shutdown
+--------
+
+Git will close
+the command pipe on exit. The filter is expected to detect EOF
+and exit gracefully on its own. Git will wait until the filter
+process has stopped.
upload-request = want-list
*shallow-line
*1depth-request
+ [filter-request]
flush-pkt
want-list = first-want
additional-want = PKT-LINE("want" SP obj-id)
depth = 1*DIGIT
+
+ filter-request = PKT-LINE("filter" SP filter-spec)
----
Clients MUST send all the obj-ids it wants from the reference
result are defined as shallow and marked as such in the server. This
information is sent back to the client in the next step.
+The client can optionally request that pack-objects omit various
+objects from the packfile using one of several filtering techniques.
+These are intended for use with partial clone and partial fetch
+operations. See `rev-list` for possible "filter-spec" values.
+
Once all the 'want's and 'shallow's (and optional 'deepen') are
transferred, clients MUST send a flush-pkt, to tell the server side
that it is done sending the list.
included in the push certificate. A send-pack client MUST NOT
send a push-cert packet unless the receive-pack server advertises
this capability.
+
+filter
+------
+
+If the upload-pack server advertises the 'filter' capability,
+fetch-pack may send "filter" commands to request a partial clone
+or partial fetch and request that the server omit various objects
+from the packfile.
When the config key `extensions.preciousObjects` is set to `true`,
objects in the repository MUST NOT be deleted (e.g., by `git-prune` or
`git repack -d`).
+
+`partialclone`
+~~~~~~~~~~~~~~
+
+When the config key `extensions.partialclone` is set, it indicates
+that the repo was created with a partial clone (or later performed
+a partial fetch) and that the remote may have omitted sending
+certain unwanted objects. Such a remote is called a "promisor remote"
+and it promises that all such omitted objects can be fetched from it
+in the future.
+
+The value of this key is the name of the promisor remote.
GIT_EXEC_PATH=`pwd`
PATH=`pwd`:$PATH
- GITPERLLIB=`pwd`/perl/blib/lib
+ GITPERLLIB=`pwd`/perl/build/lib
export GIT_EXEC_PATH PATH GITPERLLIB
+ - By default (unless NO_PERL is provided) Git will ship various perl
+ scripts & libraries it needs. However, for simplicity it doesn't
+ use the ExtUtils::MakeMaker toolchain to decide where to place the
+ perl libraries. Depending on the system this can result in the perl
+ libraries not being where you'd like them if they're expected to be
+ used by things other than Git itself.
+
+ Manually supplying a perllibdir prefix should fix this, if this is
+ a problem you care about, e.g.:
+
+ prefix=/usr perllibdir=/usr/$(/usr/bin/perl -MConfig -wle 'print substr $Config{installsitelib}, 1 + length $Config{siteprefixexp}')
+
+ Will result in e.g. perllibdir=/usr/share/perl/5.26.1 on Debian,
+ perllibdir=/usr/share/perl5 (which we'd use by default) on CentOS.
+
- Git is reasonably self-sufficient, but does depend on a few external
programs and libraries. Git can be used without most of them by adding
the approriate "NO_<LIBRARY>=YesPlease" to the make command line or
#
# Define PERL_PATH to the path of your Perl binary (usually /usr/bin/perl).
#
-# Define NO_PERL_MAKEMAKER if you cannot use Makefiles generated by perl's
-# MakeMaker (e.g. using ActiveState under Cygwin).
-#
# Define NO_PERL if you do not want Perl scripts or libraries at all.
#
# Define PYTHON_PATH to the path of your Python binary (often /usr/bin/python
mergetoolsdir = $(gitexecdir)/mergetools
sharedir = $(prefix)/share
gitwebdir = $(sharedir)/gitweb
+perllibdir = $(sharedir)/perl5
localedir = $(sharedir)/locale
template_dir = share/git-core/templates
htmldir = $(prefix)/share/doc/git-doc
infodir_relative = $(patsubst $(prefix)/%,%,$(infodir))
htmldir_relative = $(patsubst $(prefix)/%,%,$(htmldir))
-export prefix bindir sharedir sysconfdir gitwebdir localedir
+export prefix bindir sharedir sysconfdir gitwebdir perllibdir localedir
CC = cc
AR = ar
LIB_OBJS += ewah/ewah_io.o
LIB_OBJS += ewah/ewah_rlw.o
LIB_OBJS += exec_cmd.o
+LIB_OBJS += fetch-object.o
LIB_OBJS += fetch-pack.o
LIB_OBJS += fsck.o
LIB_OBJS += fsmonitor.o
LIB_OBJS += merge-blobs.o
LIB_OBJS += merge-recursive.o
LIB_OBJS += mergesort.o
-LIB_OBJS += mru.o
LIB_OBJS += name-hash.o
LIB_OBJS += notes.o
LIB_OBJS += notes-cache.o
LIB_OBJS += sha1dc_git.o
ifdef DC_SHA1_EXTERNAL
ifdef DC_SHA1_SUBMODULE
+ ifneq ($(DC_SHA1_SUBMODULE),auto)
$(error Only set DC_SHA1_EXTERNAL or DC_SHA1_SUBMODULE, not both)
+ endif
endif
BASIC_CFLAGS += -DDC_SHA1_EXTERNAL
EXTLIBS += -lsha1detectcoll
LIB_OBJS += compat/sha1-chunked.o
BASIC_CFLAGS += -DSHA1_MAX_BLOCK_SIZE="$(SHA1_MAX_BLOCK_SIZE)"
endif
-ifdef NO_PERL_MAKEMAKER
- export NO_PERL_MAKEMAKER
-endif
ifdef NO_HSTRERROR
COMPAT_CFLAGS += -DNO_HSTRERROR
COMPAT_OBJS += compat/hstrerror.o
DESTDIR_SQ = $(subst ','\'',$(DESTDIR))
bindir_SQ = $(subst ','\'',$(bindir))
bindir_relative_SQ = $(subst ','\'',$(bindir_relative))
+mandir_SQ = $(subst ','\'',$(mandir))
mandir_relative_SQ = $(subst ','\'',$(mandir_relative))
infodir_relative_SQ = $(subst ','\'',$(infodir_relative))
+perllibdir_SQ = $(subst ','\'',$(perllibdir))
localedir_SQ = $(subst ','\'',$(localedir))
gitexecdir_SQ = $(subst ','\'',$(gitexecdir))
template_dir_SQ = $(subst ','\'',$(template_dir))
ifndef NO_TCLTK
$(QUIET_SUBDIR0)git-gui $(QUIET_SUBDIR1) gitexecdir='$(gitexec_instdir_SQ)' all
$(QUIET_SUBDIR0)gitk-git $(QUIET_SUBDIR1) all
-endif
-ifndef NO_PERL
- $(QUIET_SUBDIR0)perl $(QUIET_SUBDIR1) PERL_PATH='$(PERL_PATH_SQ)' prefix='$(prefix_SQ)' localedir='$(localedir_SQ)' all
endif
$(QUIET_SUBDIR0)templates $(QUIET_SUBDIR1) SHELL_PATH='$(SHELL_PATH_SQ)' PERL_PATH='$(PERL_PATH_SQ)'
SCRIPT_DEFINES = $(SHELL_PATH_SQ):$(DIFF_SQ):$(GIT_VERSION):\
$(localedir_SQ):$(NO_CURL):$(USE_GETTEXT_SCHEME):$(SANE_TOOL_PATH_SQ):\
- $(gitwebdir_SQ):$(PERL_PATH_SQ):$(SANE_TEXT_GREP):$(PAGER_ENV)
+ $(gitwebdir_SQ):$(PERL_PATH_SQ):$(SANE_TEXT_GREP):$(PAGER_ENV):\
+ $(perllibdir_SQ)
define cmd_munge_script
$(RM) $@ $@+ && \
sed -e '1s|#!.*/sh|#!$(SHELL_PATH_SQ)|' \
$(SCRIPT_PERL_GEN): GIT-BUILD-OPTIONS
ifndef NO_PERL
-$(SCRIPT_PERL_GEN): perl/perl.mak
+$(SCRIPT_PERL_GEN):
-perl/perl.mak: perl/PM.stamp
-
-perl/PM.stamp: FORCE
- @$(FIND) perl -type f -name '*.pm' | sort >$@+ && \
- $(PERL_PATH) -V >>$@+ && \
- { cmp $@+ $@ >/dev/null 2>/dev/null || mv $@+ $@; } && \
- $(RM) $@+
-
-perl/perl.mak: GIT-CFLAGS GIT-PREFIX perl/Makefile perl/Makefile.PL
- $(QUIET_SUBDIR0)perl $(QUIET_SUBDIR1) PERL_PATH='$(PERL_PATH_SQ)' prefix='$(prefix_SQ)' $(@F)
-
-PERL_DEFINES = $(PERL_PATH_SQ):$(PERLLIB_EXTRA_SQ)
-$(SCRIPT_PERL_GEN): % : %.perl perl/perl.mak GIT-PERL-DEFINES GIT-VERSION-FILE
+PERL_DEFINES = $(PERL_PATH_SQ):$(PERLLIB_EXTRA_SQ):$(perllibdir_SQ)
+$(SCRIPT_PERL_GEN): % : %.perl GIT-PERL-DEFINES GIT-VERSION-FILE
$(QUIET_GEN)$(RM) $@ $@+ && \
- INSTLIBDIR=`MAKEFLAGS= $(MAKE) -C perl -s --no-print-directory instlibdir` && \
+ INSTLIBDIR='$(perllibdir_SQ)' && \
INSTLIBDIR_EXTRA='$(PERLLIB_EXTRA_SQ)' && \
INSTLIBDIR="$$INSTLIBDIR$${INSTLIBDIR_EXTRA:+:$$INSTLIBDIR_EXTRA}" && \
sed -e '1{' \
http-push.sp http.sp http-walker.sp remote-curl.sp imap-send.sp: SPARSE_FLAGS += \
-DCURL_DISABLE_TYPECHECK
+pack-revindex.sp: SPARSE_FLAGS += -Wno-memcpy-max-count
+
ifdef NO_EXPAT
http-walker.sp http-walker.s http-walker.o: EXTRA_CPPFLAGS = -DNO_EXPAT
endif
po/build/locale/%/LC_MESSAGES/git.mo: po/%.po
$(QUIET_MSGFMT)mkdir -p $(dir $@) && $(MSGFMT) -o $@ $<
+LIB_PERL := $(wildcard perl/Git.pm perl/Git/*.pm perl/Git/*/*.pm perl/Git/*/*/*.pm)
+LIB_PERL_GEN := $(patsubst perl/%.pm,perl/build/lib/%.pm,$(LIB_PERL))
+
+ifndef NO_PERL
+all:: $(LIB_PERL_GEN)
+endif
+
+perl/build/lib/%.pm: perl/%.pm
+ $(QUIET_GEN)mkdir -p $(dir $@) && \
+ sed -e 's|@@LOCALEDIR@@|$(localedir_SQ)|g' < $< > $@
+
+perl/build/man/man3/Git.3pm: perl/Git.pm
+ $(QUIET_GEN)mkdir -p $(dir $@) && \
+ pod2man $< $@
+
FIND_SOURCE_FILES = ( \
git ls-files \
'*.[hcS]' \
(cd '$(DESTDIR_SQ)$(localedir_SQ)' && umask 022 && $(TAR) xof -)
endif
ifndef NO_PERL
- $(MAKE) -C perl prefix='$(prefix_SQ)' DESTDIR='$(DESTDIR_SQ)' install
+ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perllibdir_SQ)'
+ (cd perl/build/lib && $(TAR) cf - .) | \
+ (cd '$(DESTDIR_SQ)$(perllibdir_SQ)' && umask 022 && $(TAR) xof -)
$(MAKE) -C gitweb install
endif
ifndef NO_TCLTK
install-gitweb:
$(MAKE) -C gitweb install
-install-doc:
+install-doc: install-man-perl
$(MAKE) -C Documentation install
-install-man:
+install-man: install-man-perl
$(MAKE) -C Documentation install-man
+install-man-perl: perl/build/man/man3/Git.3pm
+ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(mandir_SQ)/man3'
+ (cd perl/build/man/man3 && $(TAR) cf - .) | \
+ (cd '$(DESTDIR_SQ)$(mandir_SQ)/man3' && umask 022 && $(TAR) xof -)
+
install-html:
$(MAKE) -C Documentation install-html
$(GIT_TARNAME)/configure \
$(GIT_TARNAME)/version \
$(GIT_TARNAME)/git-gui/version
+ifdef DC_SHA1_SUBMODULE
+ @mkdir -p $(GIT_TARNAME)/sha1collisiondetection/lib
+ @cp sha1collisiondetection/LICENSE.txt \
+ $(GIT_TARNAME)/sha1collisiondetection/
+ @cp sha1collisiondetection/LICENSE.txt \
+ $(GIT_TARNAME)/sha1collisiondetection/
+ @cp sha1collisiondetection/lib/sha1.[ch] \
+ $(GIT_TARNAME)/sha1collisiondetection/lib/
+ @cp sha1collisiondetection/lib/ubc_check.[ch] \
+ $(GIT_TARNAME)/sha1collisiondetection/lib/
+ $(TAR) rf $(GIT_TARNAME).tar \
+ $(GIT_TARNAME)/sha1collisiondetection/LICENSE.txt \
+ $(GIT_TARNAME)/sha1collisiondetection/lib/sha1.[ch] \
+ $(GIT_TARNAME)/sha1collisiondetection/lib/ubc_check.[ch]
+endif
@$(RM) -r $(GIT_TARNAME)
gzip -f -9 $(GIT_TARNAME).tar
$(RM) $(TEST_PROGRAMS) $(NO_INSTALL)
$(RM) -r bin-wrappers $(dep_dirs)
$(RM) -r po/build/
- $(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo common-cmds.h $(ETAGS_TARGET) tags cscope*
+ $(RM) *.pyc *.pyo */*.pyc */*.pyo common-cmds.h $(ETAGS_TARGET) tags cscope*
$(RM) -r $(GIT_TARNAME) .doc-tmp-dir
$(RM) $(GIT_TARNAME).tar.gz git-core_$(GIT_VERSION)-*.tar.gz
$(RM) $(htmldocs).tar.gz $(manpages).tar.gz
$(MAKE) -C Documentation/ clean
ifndef NO_PERL
$(MAKE) -C gitweb clean
- $(MAKE) -C perl clean
+ $(RM) -r perl/build/
endif
$(MAKE) -C templates/ clean
$(MAKE) -C t/ clean
}
free(another);
} else {
- if (!starts_with(line, "/dev/null\n"))
+ if (!is_dev_null(line))
return error(_("git apply: bad git-diff - expected /dev/null on line %d"), state->linenr);
}
static int read_old_data(struct stat *st, struct patch *patch,
const char *path, struct strbuf *buf)
{
- enum safe_crlf safe_crlf = patch->crlf_in_old ?
- SAFE_CRLF_KEEP_CRLF : SAFE_CRLF_RENORMALIZE;
+ int conv_flags = patch->crlf_in_old ?
+ CONV_EOL_KEEP_CRLF : CONV_EOL_RENORMALIZE;
switch (st->st_mode & S_IFMT) {
case S_IFLNK:
if (strbuf_readlink(buf, path, st->st_size) < 0)
* should never look at the index when explicit crlf option
* is given.
*/
- convert_to_git(NULL, path, buf->buf, buf->len, buf, safe_crlf);
+ convert_to_git(NULL, path, buf->buf, buf->len, buf, conv_flags);
return 0;
default:
return -1;
* See if the old one matches what the patch
* applies to.
*/
- hash_sha1_file(img->buf, img->len, blob_type, oid.hash);
+ hash_object_file(img->buf, img->len, blob_type, &oid);
if (strcmp(oid_to_hex(&oid), patch->old_sha1_prefix))
return error(_("the patch applies to '%s' (%s), "
"which does not match the "
name);
/* verify that the result matches */
- hash_sha1_file(img->buf, img->len, blob_type, oid.hash);
+ hash_object_file(img->buf, img->len, blob_type, &oid);
if (strcmp(oid_to_hex(&oid), patch->new_sha1_prefix))
return error(_("binary patch to '%s' creates incorrect result (expecting %s, got %s)"),
name, patch->new_sha1_prefix, oid_to_hex(&oid));
/* Preimage the patch was prepared for */
if (patch->is_new)
- write_sha1_file("", 0, blob_type, pre_oid.hash);
+ write_object_file("", 0, blob_type, &pre_oid);
else if (get_oid(patch->old_sha1_prefix, &pre_oid) ||
read_blob_object(&buf, &pre_oid, patch->old_mode))
return error(_("repository lacks the necessary blob to fall back on 3-way merge."));
return -1;
}
/* post_oid is theirs */
- write_sha1_file(tmp_image.buf, tmp_image.len, blob_type, post_oid.hash);
+ write_object_file(tmp_image.buf, tmp_image.len, blob_type, &post_oid);
clear_image(&tmp_image);
/* our_oid is ours */
return error(_("cannot read the current contents of '%s'"),
patch->old_name);
}
- write_sha1_file(tmp_image.buf, tmp_image.len, blob_type, our_oid.hash);
+ write_object_file(tmp_image.buf, tmp_image.len, blob_type, &our_oid);
clear_image(&tmp_image);
/* in-core three-way merge between post and our using pre as base */
}
fill_stat_cache_info(ce, &st);
}
- if (write_sha1_file(buf, size, blob_type, ce->oid.hash) < 0) {
+ if (write_object_file(buf, size, blob_type, &ce->oid) < 0) {
free(ce);
return error(_("unable to create backing store "
"for newly created file %s"), path);
convert_to_git(&the_index, path, buf.buf, buf.len, &buf, 0);
origin->file.ptr = buf.buf;
origin->file.size = buf.len;
- pretend_sha1_file(buf.buf, buf.len, OBJ_BLOB, origin->blob_oid.hash);
+ pretend_object_file(buf.buf, buf.len, OBJ_BLOB, &origin->blob_oid);
/*
* Read the current index, replace the path entry with
if (mkdir(state->dir, 0777) < 0 && errno != EEXIST)
die_errno(_("failed to create directory '%s'"), state->dir);
+ delete_ref(NULL, "REBASE_HEAD", NULL, REF_NO_DEREF);
if (split_mail(state, patch_format, paths, keep_cr) < 0) {
am_destroy(state);
}
write_state_text(state, "scissors", str);
- sq_quote_argv(&sb, state->git_apply_opts.argv, 0);
+ sq_quote_argv(&sb, state->git_apply_opts.argv);
write_state_text(state, "apply-opt", sb.buf);
if (state->rebasing)
oidclr(&state->orig_commit);
unlink(am_path(state, "original-commit"));
+ delete_ref(NULL, "REBASE_HEAD", NULL, REF_NO_DEREF);
if (!get_oid("HEAD", &head))
write_state_text(state, "abort-safety", oid_to_hex(&head));
oidcpy(&state->orig_commit, &commit_oid);
write_state_text(state, "original-commit", oid_to_hex(&commit_oid));
+ update_ref("am", "REBASE_HEAD", &commit_oid,
+ NULL, REF_NO_DEREF, UPDATE_REFS_DIE_ON_ERR);
return 0;
}
setenv("GIT_COMMITTER_DATE",
state->ignore_date ? "" : state->author_date, 1);
- if (commit_tree(state->msg, state->msg_len, tree.hash, parents, commit.hash,
- author, state->sign_commit))
+ if (commit_tree(state->msg, state->msg_len, &tree, parents, &commit,
+ author, state->sign_commit))
die(_("failed to write commit object"));
reflog_msg = getenv("GIT_REFLOG_ACTION");
git_config_get_bool("advice.amworkdir", &advice_amworkdir);
if (advice_amworkdir)
- printf_ln(_("The copy of the patch that failed is found in: %s"),
- am_path(state, "patch"));
+ printf_ln(_("Use 'git am --show-current-patch' to see the failed patch"));
die_user_resolve(state);
}
am_destroy(state);
}
+static int show_patch(struct am_state *state)
+{
+ struct strbuf sb = STRBUF_INIT;
+ const char *patch_path;
+ int len;
+
+ if (!is_null_oid(&state->orig_commit)) {
+ const char *av[4] = { "show", NULL, "--", NULL };
+ char *new_oid_str;
+ int ret;
+
+ av[1] = new_oid_str = xstrdup(oid_to_hex(&state->orig_commit));
+ ret = run_command_v_opt(av, RUN_GIT_CMD);
+ free(new_oid_str);
+ return ret;
+ }
+
+ patch_path = am_path(state, msgnum(state));
+ len = strbuf_read_file(&sb, patch_path, 0);
+ if (len < 0)
+ die_errno(_("failed to read '%s'"), patch_path);
+
+ setup_pager();
+ write_in_full(1, sb.buf, sb.len);
+ strbuf_release(&sb);
+ return 0;
+}
+
/**
* parse_options() callback that validates and sets opt->value to the
* PATCH_FORMAT_* enum value corresponding to `arg`.
RESUME_APPLY,
RESUME_RESOLVED,
RESUME_SKIP,
- RESUME_ABORT
+ RESUME_ABORT,
+ RESUME_QUIT,
+ RESUME_SHOW_PATCH
};
static int git_am_config(const char *k, const char *v, void *cb)
int patch_format = PATCH_FORMAT_UNKNOWN;
enum resume_mode resume = RESUME_FALSE;
int in_progress;
+ int ret = 0;
const char * const usage[] = {
N_("git am [<options>] [(<mbox> | <Maildir>)...]"),
OPT_CMDMODE(0, "abort", &resume,
N_("restore the original branch and abort the patching operation."),
RESUME_ABORT),
+ OPT_CMDMODE(0, "quit", &resume,
+ N_("abort the patching operation but keep HEAD where it is."),
+ RESUME_QUIT),
+ OPT_CMDMODE(0, "show-current-patch", &resume,
+ N_("show the patch being applied."),
+ RESUME_SHOW_PATCH),
OPT_BOOL(0, "committer-date-is-author-date",
&state.committer_date_is_author_date,
N_("lie about committer date")),
* stray directories.
*/
if (file_exists(state.dir) && !state.rebasing) {
- if (resume == RESUME_ABORT) {
+ if (resume == RESUME_ABORT || resume == RESUME_QUIT) {
am_destroy(&state);
am_state_release(&state);
return 0;
case RESUME_ABORT:
am_abort(&state);
break;
+ case RESUME_QUIT:
+ am_rerere_clear();
+ am_destroy(&state);
+ break;
+ case RESUME_SHOW_PATCH:
+ ret = show_patch(&state);
+ break;
default:
die("BUG: invalid resume value");
}
am_state_release(&state);
- return 0;
+ return ret;
}
buf = packet_read_line(fd[0], NULL);
if (!buf)
- die(_("git archive: expected ACK/NAK, got EOF"));
+ die(_("git archive: expected ACK/NAK, got a flush packet"));
if (strcmp(buf, "ACK")) {
if (starts_with(buf, "NACK "))
die(_("git archive: NACK %s"), buf + 5);
return 0;
}
+static int is_a_rev(const char *name)
+{
+ struct object_id oid;
+
+ if (get_oid(name, &oid))
+ return 0;
+ return OBJ_NONE < sha1_object_info(oid.hash, NULL);
+}
+
int cmd_blame(int argc, const char **argv, const char *prefix)
{
struct rev_info revs;
} else {
if (argc < 2)
usage_with_options(blame_opt_usage, options);
- path = add_prefix(prefix, argv[argc - 1]);
- if (argc == 3 && !file_exists(path)) { /* (2b) */
+ if (argc == 3 && is_a_rev(argv[argc - 1])) { /* (2b) */
path = add_prefix(prefix, argv[1]);
argv[1] = argv[2];
+ } else { /* (2a) */
+ if (argc == 2 && is_a_rev(argv[1]) && !get_git_work_tree())
+ die("missing <path> to blame");
+ path = add_prefix(prefix, argv[argc - 1]);
}
argv[argc - 1] = "--";
-
- setup_work_tree();
- if (!file_exists(path))
- die_errno("cannot stat path '%s'", path);
}
revs.disable_stdin = 1;
for_each_loose_object(batch_loose_object, &sa, 0);
for_each_packed_object(batch_packed_object, &sa, 0);
+ if (repository_format_partial_clone)
+ warning("This repository has extensions.partialClone set. Some objects may not be loaded.");
cb.opt = opt;
cb.expand = &data;
{
const char *full_path;
char *seen;
- int num_ignored = 0, dtype = DT_UNKNOWN, i;
+ int num_ignored = 0, i;
struct exclude *exclude;
struct pathspec pathspec;
full_path = pathspec.items[i].match;
exclude = NULL;
if (!seen[i]) {
+ int dtype = DT_UNKNOWN;
exclude = last_exclude_matching(dir, &the_index,
full_path, &dtype);
}
* (it also writes the merge result to the object database even
* when it may contain conflicts).
*/
- if (write_sha1_file(result_buf.ptr, result_buf.size,
- blob_type, oid.hash))
+ if (write_object_file(result_buf.ptr, result_buf.size, blob_type, &oid))
die(_("Unable to add merge result for '%s'"), path);
free(result_buf.ptr);
ce = make_cache_entry(mode, oid.hash, path, 2, 0);
#include "run-command.h"
#include "connected.h"
#include "packfile.h"
+#include "list-objects-filter-options.h"
/*
* Overall FIXMEs:
static int option_dissociate;
static int max_jobs = -1;
static struct string_list option_recurse_submodules = STRING_LIST_INIT_NODUP;
+static struct list_objects_filter_options filter_options;
static int recurse_submodules_cb(const struct option *opt,
const char *arg, int unset)
TRANSPORT_FAMILY_IPV4),
OPT_SET_INT('6', "ipv6", &family, N_("use IPv6 addresses only"),
TRANSPORT_FAMILY_IPV6),
+ OPT_PARSE_LIST_OBJECTS_FILTER(&filter_options),
OPT_END()
};
struct refspec *refspec;
const char *fetch_pattern;
+ fetch_if_missing = 0;
+
packet_trace_identity("clone");
argc = parse_options(argc, argv, prefix, builtin_clone_options,
builtin_clone_usage, 0);
warning(_("--shallow-since is ignored in local clones; use file:// instead."));
if (option_not.nr)
warning(_("--shallow-exclude is ignored in local clones; use file:// instead."));
+ if (filter_options.choice)
+ warning(_("--filter is ignored in local clones; use file:// instead."));
if (!access(mkpath("%s/shallow", path), F_OK)) {
if (option_local > 0)
warning(_("source repository is shallow, ignoring --local"));
transport_set_option(transport, TRANS_OPT_UPLOADPACK,
option_upload_pack);
- if (transport->smart_options && !deepen)
+ if (filter_options.choice) {
+ transport_set_option(transport, TRANS_OPT_LIST_OBJECTS_FILTER,
+ filter_options.filter_spec);
+ transport_set_option(transport, TRANS_OPT_FROM_PROMISOR, "1");
+ }
+
+ if (transport->smart_options && !deepen && !filter_options.choice)
transport->smart_options->check_self_contained_and_connected = 1;
refs = transport_get_remote_refs(transport);
write_refspec_config(src_ref_prefix, our_head_points_at,
remote_head_points_at, &branch_top);
+ if (filter_options.choice)
+ partial_clone_register("origin", &filter_options);
+
if (is_local)
clone_local(path, git_dir);
else if (refs && complete_refs_before_fetch)
transport_fetch_refs(transport, mapped_refs);
update_remote_refs(refs, mapped_refs, remote_head_points_at,
- branch_top.buf, reflog_msg.buf, transport, !is_local);
+ branch_top.buf, reflog_msg.buf, transport,
+ !is_local && !filter_options.choice);
update_head(our_head_points_at, remote_head, reflog_msg.buf);
}
junk_mode = JUNK_LEAVE_REPO;
+ fetch_if_missing = 1;
err = checkout(submodule_progress);
strbuf_release(&reflog_msg);
die_errno("git commit-tree: failed to read");
}
- if (commit_tree(buffer.buf, buffer.len, tree_oid.hash, parents,
- commit_oid.hash, NULL, sign_commit)) {
+ if (commit_tree(buffer.buf, buffer.len, &tree_oid, parents, &commit_oid,
+ NULL, sign_commit)) {
strbuf_release(&buffer);
return 1;
}
#include "gpg-interface.h"
#include "column.h"
#include "sequencer.h"
-#include "notes-utils.h"
#include "mailmap.h"
-#include "sigchain.h"
static const char * const builtin_commit_usage[] = {
N_("git commit [<options>] [--] <pathspec>..."),
NULL
};
-static const char implicit_ident_advice_noconfig[] =
-N_("Your name and email address were configured automatically based\n"
-"on your username and hostname. Please check that they are accurate.\n"
-"You can suppress this message by setting them explicitly. Run the\n"
-"following command and follow the instructions in your editor to edit\n"
-"your configuration file:\n"
-"\n"
-" git config --global --edit\n"
-"\n"
-"After doing this, you may fix the identity used for this commit with:\n"
-"\n"
-" git commit --amend --reset-author\n");
-
-static const char implicit_ident_advice_config[] =
-N_("Your name and email address were configured automatically based\n"
-"on your username and hostname. Please check that they are accurate.\n"
-"You can suppress this message by setting them explicitly:\n"
-"\n"
-" git config --global user.name \"Your Name\"\n"
-" git config --global user.email you@example.com\n"
-"\n"
-"After doing this, you may fix the identity used for this commit with:\n"
-"\n"
-" git commit --amend --reset-author\n");
-
static const char empty_amend_advice[] =
N_("You asked to amend the most recent commit, but doing so would make\n"
"it empty. You can repeat your command with --allow-empty, or you can\n"
"Then \"git cherry-pick --continue\" will resume cherry-picking\n"
"the remaining commits.\n");
-static GIT_PATH_FUNC(git_path_commit_editmsg, "COMMIT_EDITMSG")
-
static const char *use_message_buffer;
static struct lock_file index_lock; /* real index */
static struct lock_file false_lock; /* used only for partial commits */
* if editor is used, and only the whitespaces if the message
* is specified explicitly.
*/
-static enum {
- CLEANUP_SPACE,
- CLEANUP_NONE,
- CLEANUP_SCISSORS,
- CLEANUP_ALL
-} cleanup_mode;
+static enum commit_msg_cleanup_mode cleanup_mode;
static const char *cleanup_arg;
static enum commit_whence whence;
struct strbuf sb = STRBUF_INIT;
const char *hook_arg1 = NULL;
const char *hook_arg2 = NULL;
- int clean_message_contents = (cleanup_mode != CLEANUP_NONE);
+ int clean_message_contents = (cleanup_mode != COMMIT_MSG_CLEANUP_NONE);
int old_display_comment_prefix;
/* This checks and barfs if author is badly specified */
struct ident_split ci, ai;
if (whence != FROM_COMMIT) {
- if (cleanup_mode == CLEANUP_SCISSORS)
+ if (cleanup_mode == COMMIT_MSG_CLEANUP_SCISSORS)
wt_status_add_cut_line(s->fp);
status_printf_ln(s, GIT_COLOR_NORMAL,
whence == FROM_MERGE
}
fprintf(s->fp, "\n");
- if (cleanup_mode == CLEANUP_ALL)
+ if (cleanup_mode == COMMIT_MSG_CLEANUP_ALL)
status_printf(s, GIT_COLOR_NORMAL,
_("Please enter the commit message for your changes."
" Lines starting\nwith '%c' will be ignored, and an empty"
" message aborts the commit.\n"), comment_line_char);
- else if (cleanup_mode == CLEANUP_SCISSORS && whence == FROM_COMMIT)
+ else if (cleanup_mode == COMMIT_MSG_CLEANUP_SCISSORS &&
+ whence == FROM_COMMIT)
wt_status_add_cut_line(s->fp);
- else /* CLEANUP_SPACE, that is. */
+ else /* COMMIT_MSG_CLEANUP_SPACE, that is. */
status_printf(s, GIT_COLOR_NORMAL,
_("Please enter the commit message for your changes."
" Lines starting\n"
return 1;
}
-static int rest_is_empty(struct strbuf *sb, int start)
-{
- int i, eol;
- const char *nl;
-
- /* Check if the rest is just whitespace and Signed-off-by's. */
- for (i = start; i < sb->len; i++) {
- nl = memchr(sb->buf + i, '\n', sb->len - i);
- if (nl)
- eol = nl - sb->buf;
- else
- eol = sb->len;
-
- if (strlen(sign_off_header) <= eol - i &&
- starts_with(sb->buf + i, sign_off_header)) {
- i = eol;
- continue;
- }
- while (i < eol)
- if (!isspace(sb->buf[i++]))
- return 0;
- }
-
- return 1;
-}
-
-/*
- * Find out if the message in the strbuf contains only whitespace and
- * Signed-off-by lines.
- */
-static int message_is_empty(struct strbuf *sb)
-{
- if (cleanup_mode == CLEANUP_NONE && sb->len)
- return 0;
- return rest_is_empty(sb, 0);
-}
-
-/*
- * See if the user edited the message in the editor or left what
- * was in the template intact
- */
-static int template_untouched(struct strbuf *sb)
-{
- struct strbuf tmpl = STRBUF_INIT;
- const char *start;
-
- if (cleanup_mode == CLEANUP_NONE && sb->len)
- return 0;
-
- if (!template_file || strbuf_read_file(&tmpl, template_file, 0) <= 0)
- return 0;
-
- strbuf_stripspace(&tmpl, cleanup_mode == CLEANUP_ALL);
- if (!skip_prefix(sb->buf, tmpl.buf, &start))
- start = sb->buf;
- strbuf_release(&tmpl);
- return rest_is_empty(sb, start - sb->buf);
-}
-
static const char *find_author_by_nickname(const char *name)
{
struct rev_info revs;
if (argc == 0 && (also || (only && !amend && !allow_empty)))
die(_("No paths with --include/--only does not make sense."));
if (!cleanup_arg || !strcmp(cleanup_arg, "default"))
- cleanup_mode = use_editor ? CLEANUP_ALL : CLEANUP_SPACE;
+ cleanup_mode = use_editor ? COMMIT_MSG_CLEANUP_ALL :
+ COMMIT_MSG_CLEANUP_SPACE;
else if (!strcmp(cleanup_arg, "verbatim"))
- cleanup_mode = CLEANUP_NONE;
+ cleanup_mode = COMMIT_MSG_CLEANUP_NONE;
else if (!strcmp(cleanup_arg, "whitespace"))
- cleanup_mode = CLEANUP_SPACE;
+ cleanup_mode = COMMIT_MSG_CLEANUP_SPACE;
else if (!strcmp(cleanup_arg, "strip"))
- cleanup_mode = CLEANUP_ALL;
+ cleanup_mode = COMMIT_MSG_CLEANUP_ALL;
else if (!strcmp(cleanup_arg, "scissors"))
- cleanup_mode = use_editor ? CLEANUP_SCISSORS : CLEANUP_SPACE;
+ cleanup_mode = use_editor ? COMMIT_MSG_CLEANUP_SCISSORS :
+ COMMIT_MSG_CLEANUP_SPACE;
else
die(_("Invalid cleanup mode %s"), cleanup_arg);
return 0;
}
-static const char *implicit_ident_advice(void)
-{
- char *user_config = expand_user_path("~/.gitconfig", 0);
- char *xdg_config = xdg_config_home("config");
- int config_exists = file_exists(user_config) || file_exists(xdg_config);
-
- free(user_config);
- free(xdg_config);
-
- if (config_exists)
- return _(implicit_ident_advice_config);
- else
- return _(implicit_ident_advice_noconfig);
-
-}
-
-static void print_summary(const char *prefix, const struct object_id *oid,
- int initial_commit)
-{
- struct rev_info rev;
- struct commit *commit;
- struct strbuf format = STRBUF_INIT;
- const char *head;
- struct pretty_print_context pctx = {0};
- struct strbuf author_ident = STRBUF_INIT;
- struct strbuf committer_ident = STRBUF_INIT;
-
- commit = lookup_commit(oid);
- if (!commit)
- die(_("couldn't look up newly created commit"));
- if (parse_commit(commit))
- die(_("could not parse newly created commit"));
-
- strbuf_addstr(&format, "format:%h] %s");
-
- format_commit_message(commit, "%an <%ae>", &author_ident, &pctx);
- format_commit_message(commit, "%cn <%ce>", &committer_ident, &pctx);
- if (strbuf_cmp(&author_ident, &committer_ident)) {
- strbuf_addstr(&format, "\n Author: ");
- strbuf_addbuf_percentquote(&format, &author_ident);
- }
- if (author_date_is_interesting()) {
- struct strbuf date = STRBUF_INIT;
- format_commit_message(commit, "%ad", &date, &pctx);
- strbuf_addstr(&format, "\n Date: ");
- strbuf_addbuf_percentquote(&format, &date);
- strbuf_release(&date);
- }
- if (!committer_ident_sufficiently_given()) {
- strbuf_addstr(&format, "\n Committer: ");
- strbuf_addbuf_percentquote(&format, &committer_ident);
- if (advice_implicit_identity) {
- strbuf_addch(&format, '\n');
- strbuf_addstr(&format, implicit_ident_advice());
- }
- }
- strbuf_release(&author_ident);
- strbuf_release(&committer_ident);
-
- init_revisions(&rev, prefix);
- setup_revisions(0, NULL, &rev, NULL);
-
- rev.diff = 1;
- rev.diffopt.output_format =
- DIFF_FORMAT_SHORTSTAT | DIFF_FORMAT_SUMMARY;
-
- rev.verbose_header = 1;
- rev.show_root_diff = 1;
- get_commit_format(format.buf, &rev);
- rev.always_show_header = 0;
- rev.diffopt.detect_rename = DIFF_DETECT_RENAME;
- rev.diffopt.break_opt = 0;
- diff_setup_done(&rev.diffopt);
-
- head = resolve_ref_unsafe("HEAD", 0, NULL, NULL);
- if (!head)
- die_errno(_("unable to resolve HEAD after creating commit"));
- if (!strcmp(head, "HEAD"))
- head = _("detached HEAD");
- else
- skip_prefix(head, "refs/heads/", &head);
- printf("[%s%s ", head, initial_commit ? _(" (root-commit)") : "");
-
- if (!log_tree_commit(&rev, commit)) {
- rev.always_show_header = 1;
- rev.use_terminator = 1;
- log_tree_commit(&rev, commit);
- }
-
- strbuf_release(&format);
-}
-
static int git_commit_config(const char *k, const char *v, void *cb)
{
struct wt_status *s = cb;
return git_status_config(k, v, s);
}
-static int run_rewrite_hook(const struct object_id *oldoid,
- const struct object_id *newoid)
-{
- struct child_process proc = CHILD_PROCESS_INIT;
- const char *argv[3];
- int code;
- struct strbuf sb = STRBUF_INIT;
-
- argv[0] = find_hook("post-rewrite");
- if (!argv[0])
- return 0;
-
- argv[1] = "amend";
- argv[2] = NULL;
-
- proc.argv = argv;
- proc.in = -1;
- proc.stdout_to_stderr = 1;
-
- code = start_command(&proc);
- if (code)
- return code;
- strbuf_addf(&sb, "%s %s\n", oid_to_hex(oldoid), oid_to_hex(newoid));
- sigchain_push(SIGPIPE, SIG_IGN);
- write_in_full(proc.in, sb.buf, sb.len);
- close(proc.in);
- strbuf_release(&sb);
- sigchain_pop(SIGPIPE);
- return finish_command(&proc);
-}
-
int run_commit_hook(int editor_is_used, const char *index_file, const char *name, ...)
{
struct argv_array hook_env = ARGV_ARRAY_INIT;
struct strbuf sb = STRBUF_INIT;
struct strbuf author_ident = STRBUF_INIT;
const char *index_file, *reflog_msg;
- char *nl;
struct object_id oid;
struct commit_list *parents = NULL;
struct stat statbuf;
struct commit *current_head = NULL;
struct commit_extra_header *extra = NULL;
- struct ref_transaction *transaction;
struct strbuf err = STRBUF_INIT;
if (argc == 2 && !strcmp(argv[1], "-h"))
}
if (verbose || /* Truncate the message just before the diff, if any. */
- cleanup_mode == CLEANUP_SCISSORS)
+ cleanup_mode == COMMIT_MSG_CLEANUP_SCISSORS)
strbuf_setlen(&sb, wt_status_locate_end(sb.buf, sb.len));
- if (cleanup_mode != CLEANUP_NONE)
- strbuf_stripspace(&sb, cleanup_mode == CLEANUP_ALL);
+ if (cleanup_mode != COMMIT_MSG_CLEANUP_NONE)
+ strbuf_stripspace(&sb, cleanup_mode == COMMIT_MSG_CLEANUP_ALL);
- if (message_is_empty(&sb) && !allow_empty_message) {
+ if (message_is_empty(&sb, cleanup_mode) && !allow_empty_message) {
rollback_index_files();
fprintf(stderr, _("Aborting commit due to empty commit message.\n"));
exit(1);
}
- if (template_untouched(&sb) && !allow_empty_message) {
+ if (template_untouched(&sb, template_file, cleanup_mode) && !allow_empty_message) {
rollback_index_files();
fprintf(stderr, _("Aborting commit; you did not edit the message.\n"));
exit(1);
append_merge_tag_headers(parents, &tail);
}
- if (commit_tree_extended(sb.buf, sb.len, active_cache_tree->oid.hash,
- parents, oid.hash, author_ident.buf, sign_commit, extra)) {
+ if (commit_tree_extended(sb.buf, sb.len, &active_cache_tree->oid,
+ parents, &oid, author_ident.buf, sign_commit,
+ extra)) {
rollback_index_files();
die(_("failed to write commit object"));
}
strbuf_release(&author_ident);
free_commit_extra_headers(extra);
- nl = strchr(sb.buf, '\n');
- if (nl)
- strbuf_setlen(&sb, nl + 1 - sb.buf);
- else
- strbuf_addch(&sb, '\n');
- strbuf_insert(&sb, 0, reflog_msg, strlen(reflog_msg));
- strbuf_insert(&sb, strlen(reflog_msg), ": ", 2);
-
- transaction = ref_transaction_begin(&err);
- if (!transaction ||
- ref_transaction_update(transaction, "HEAD", &oid,
- current_head
- ? ¤t_head->object.oid : &null_oid,
- 0, sb.buf, &err) ||
- ref_transaction_commit(transaction, &err)) {
+ if (update_head_with_reflog(current_head, &oid, reflog_msg, &sb,
+ &err)) {
rollback_index_files();
die("%s", err.buf);
}
- ref_transaction_free(transaction);
unlink(git_path_cherry_pick_head());
unlink(git_path_revert_head());
rerere(0);
run_commit_hook(use_editor, get_index_file(), "post-commit", NULL);
if (amend && !no_post_rewrite) {
- struct notes_rewrite_cfg *cfg;
- cfg = init_copy_notes_for_rewrite("amend");
- if (cfg) {
- /* we are amending, so current_head is not NULL */
- copy_note_for_rewrite(cfg, ¤t_head->object.oid, &oid);
- finish_copy_notes_for_rewrite(cfg, "Notes added by 'git commit --amend'");
- }
- run_rewrite_hook(¤t_head->object.oid, &oid);
+ commit_post_rewrite(current_head, &oid);
+ }
+ if (!quiet) {
+ unsigned int flags = 0;
+
+ if (!current_head)
+ flags |= SUMMARY_INITIAL_COMMIT;
+ if (author_date_is_interesting())
+ flags |= SUMMARY_SHOW_AUTHOR_DATE;
+ print_commit_summary(prefix, &oid, flags);
}
- if (!quiet)
- print_summary(prefix, &oid, !current_head);
UNLEAK(err);
UNLEAK(sb);
if (!match_cnt) {
struct object_id *cmit_oid = &cmit->object.oid;
if (always) {
- strbuf_addstr(dst, find_unique_abbrev(cmit_oid->hash, abbrev));
+ strbuf_add_unique_abbrev(dst, cmit_oid->hash, abbrev);
if (suffix)
strbuf_addstr(dst, suffix);
return;
if (cmit)
describe_commit(&oid, &sb);
- else if (lookup_blob(&oid))
+ else if (sha1_object_info(oid.hash, NULL) == OBJ_BLOB)
describe_blob(oid, &sb);
else
die(_("%s is neither a commit nor blob"), arg);
struct oid_array shallow = OID_ARRAY_INIT;
struct string_list deepen_not = STRING_LIST_INIT_DUP;
+ fetch_if_missing = 0;
+
packet_trace_identity("fetch-pack");
memset(&args, 0, sizeof(args));
args.update_shallow = 1;
continue;
}
+ if (!strcmp("--from-promisor", arg)) {
+ args.from_promisor = 1;
+ continue;
+ }
+ if (!strcmp("--no-dependents", arg)) {
+ args.no_dependents = 1;
+ continue;
+ }
+ if (skip_prefix(arg, ("--" CL_ARG__FILTER "="), &arg)) {
+ parse_list_objects_filter(&args.filter_options, arg);
+ continue;
+ }
+ if (!strcmp(arg, ("--no-" CL_ARG__FILTER))) {
+ list_objects_filter_set_no_filter(&args.filter_options);
+ continue;
+ }
usage(fetch_pack_usage);
}
if (deepen_not.nr)
#include "argv-array.h"
#include "utf8.h"
#include "packfile.h"
+#include "list-objects-filter-options.h"
static const char * const builtin_fetch_usage[] = {
N_("git fetch [<options>] [<repository> [<refspec>...]]"),
static int prune = -1; /* unspecified */
#define PRUNE_BY_DEFAULT 0 /* do we prune by default? */
+static int fetch_prune_tags_config = -1; /* unspecified */
+static int prune_tags = -1; /* unspecified */
+#define PRUNE_TAGS_BY_DEFAULT 0 /* do we prune tags by default? */
+
static int all, append, dry_run, force, keep, multiple, update_head_ok, verbosity, deepen_relative;
static int progress = -1;
static int tags = TAGS_DEFAULT, unshallow, update_shallow, deepen;
static int shown_url = 0;
static int refmap_alloc, refmap_nr;
static const char **refmap_array;
+static struct list_objects_filter_options filter_options;
static int git_fetch_config(const char *k, const char *v, void *cb)
{
return 0;
}
+ if (!strcmp(k, "fetch.prunetags")) {
+ fetch_prune_tags_config = git_config_bool(k, v);
+ return 0;
+ }
+
if (!strcmp(k, "submodule.recurse")) {
int r = git_config_bool(k, v) ?
RECURSE_SUBMODULES_ON : RECURSE_SUBMODULES_OFF;
N_("number of submodules fetched in parallel")),
OPT_BOOL('p', "prune", &prune,
N_("prune remote-tracking branches no longer on remote")),
+ OPT_BOOL('P', "prune-tags", &prune_tags,
+ N_("prune local tags no longer on remote and clobber changed tags")),
{ OPTION_CALLBACK, 0, "recurse-submodules", &recurse_submodules, N_("on-demand"),
N_("control recursive fetching of submodules"),
PARSE_OPT_OPTARG, option_fetch_parse_recurse_submodules },
TRANSPORT_FAMILY_IPV4),
OPT_SET_INT('6', "ipv6", &family, N_("use IPv6 addresses only"),
TRANSPORT_FAMILY_IPV6),
+ OPT_PARSE_LIST_OBJECTS_FILTER(&filter_options),
OPT_END()
};
set_option(transport, TRANS_OPT_DEEPEN_RELATIVE, "yes");
if (update_shallow)
set_option(transport, TRANS_OPT_UPDATE_SHALLOW, "yes");
+ if (filter_options.choice) {
+ set_option(transport, TRANS_OPT_LIST_OBJECTS_FILTER,
+ filter_options.filter_spec);
+ set_option(transport, TRANS_OPT_FROM_PROMISOR, "1");
+ }
return transport;
}
argv_array_push(argv, "--dry-run");
if (prune != -1)
argv_array_push(argv, prune ? "--prune" : "--no-prune");
+ if (prune_tags != -1)
+ argv_array_push(argv, prune_tags ? "--prune-tags" : "--no-prune-tags");
if (update_head_ok)
argv_array_push(argv, "--update-head-ok");
if (force)
return result;
}
-static int fetch_one(struct remote *remote, int argc, const char **argv)
+/*
+ * Fetching from the promisor remote should use the given filter-spec
+ * or inherit the default filter-spec from the config.
+ */
+static inline void fetch_one_setup_partial(struct remote *remote)
+{
+ /*
+ * Explicit --no-filter argument overrides everything, regardless
+ * of any prior partial clones and fetches.
+ */
+ if (filter_options.no_filter)
+ return;
+
+ /*
+ * If no prior partial clone/fetch and the current fetch DID NOT
+ * request a partial-fetch, do a normal fetch.
+ */
+ if (!repository_format_partial_clone && !filter_options.choice)
+ return;
+
+ /*
+ * If this is the FIRST partial-fetch request, we enable partial
+ * on this repo and remember the given filter-spec as the default
+ * for subsequent fetches to this remote.
+ */
+ if (!repository_format_partial_clone && filter_options.choice) {
+ partial_clone_register(remote->name, &filter_options);
+ return;
+ }
+
+ /*
+ * We are currently limited to only ONE promisor remote and only
+ * allow partial-fetches from the promisor remote.
+ */
+ if (strcmp(remote->name, repository_format_partial_clone)) {
+ if (filter_options.choice)
+ die(_("--filter can only be used with the remote configured in core.partialClone"));
+ return;
+ }
+
+ /*
+ * Do a partial-fetch from the promisor remote using either the
+ * explicitly given filter-spec or inherit the filter-spec from
+ * the config.
+ */
+ if (!filter_options.choice)
+ partial_clone_get_default_filter_spec(&filter_options);
+ return;
+}
+
+static int fetch_one(struct remote *remote, int argc, const char **argv, int prune_tags_ok)
{
static const char **refs = NULL;
struct refspec *refspec;
int ref_nr = 0;
+ int j = 0;
int exit_code;
+ int maybe_prune_tags;
+ int remote_via_config = remote_is_configured(remote, 0);
if (!remote)
die(_("No remote repository specified. Please, specify either a URL or a\n"
if (prune < 0) {
/* no command line request */
- if (0 <= gtransport->remote->prune)
- prune = gtransport->remote->prune;
+ if (0 <= remote->prune)
+ prune = remote->prune;
else if (0 <= fetch_prune_config)
prune = fetch_prune_config;
else
prune = PRUNE_BY_DEFAULT;
}
+ if (prune_tags < 0) {
+ /* no command line request */
+ if (0 <= remote->prune_tags)
+ prune_tags = remote->prune_tags;
+ else if (0 <= fetch_prune_tags_config)
+ prune_tags = fetch_prune_tags_config;
+ else
+ prune_tags = PRUNE_TAGS_BY_DEFAULT;
+ }
+
+ maybe_prune_tags = prune_tags_ok && prune_tags;
+ if (maybe_prune_tags && remote_via_config)
+ add_prune_tags_to_fetch_refspec(remote);
+
+ if (argc > 0 || (maybe_prune_tags && !remote_via_config)) {
+ size_t nr_alloc = st_add3(argc, maybe_prune_tags, 1);
+ refs = xcalloc(nr_alloc, sizeof(const char *));
+ if (maybe_prune_tags) {
+ refs[j++] = xstrdup("refs/tags/*:refs/tags/*");
+ ref_nr++;
+ }
+ }
+
if (argc > 0) {
- int j = 0;
int i;
- refs = xcalloc(st_add(argc, 1), sizeof(const char *));
for (i = 0; i < argc; i++) {
if (!strcmp(argv[i], "tag")) {
i++;
argv[i], argv[i]);
} else
refs[j++] = argv[i];
+ ref_nr++;
}
- refs[j] = NULL;
- ref_nr = j;
}
sigchain_push_common(unlock_pack_on_signal);
{
int i;
struct string_list list = STRING_LIST_INIT_DUP;
- struct remote *remote;
+ struct remote *remote = NULL;
int result = 0;
+ int prune_tags_ok = 1;
struct argv_array argv_gc_auto = ARGV_ARRAY_INIT;
packet_trace_identity("fetch");
+ fetch_if_missing = 0;
+
/* Record the command line for the reflog */
strbuf_addstr(&default_rla, "fetch");
for (i = 1; i < argc; i++)
if (depth || deepen_since || deepen_not.nr)
deepen = 1;
+ if (filter_options.choice && !repository_format_partial_clone)
+ die("--filter can only be used when extensions.partialClone is set");
+
if (all) {
if (argc == 1)
die(_("fetch --all does not take a repository argument"));
else if (argc > 1)
die(_("fetch --all does not make sense with refspecs"));
(void) for_each_remote(get_one_remote_for_fetch, &list);
- result = fetch_multiple(&list);
} else if (argc == 0) {
/* No arguments -- use default remote */
remote = remote_get(NULL);
- result = fetch_one(remote, argc, argv);
} else if (multiple) {
/* All arguments are assumed to be remotes or groups */
for (i = 0; i < argc; i++)
if (!add_remote_or_group(argv[i], &list))
die(_("No such remote or remote group: %s"), argv[i]);
- result = fetch_multiple(&list);
} else {
/* Single remote or group */
(void) add_remote_or_group(argv[0], &list);
/* More than one remote */
if (argc > 1)
die(_("Fetching a group and specifying refspecs does not make sense"));
- result = fetch_multiple(&list);
} else {
/* Zero or one remotes */
remote = remote_get(argv[0]);
- result = fetch_one(remote, argc-1, argv+1);
+ prune_tags_ok = (argc == 1);
+ argc--;
+ argv++;
}
}
+ if (remote) {
+ if (filter_options.choice || repository_format_partial_clone)
+ fetch_one_setup_partial(remote);
+ result = fetch_one(remote, argc, argv, prune_tags_ok);
+ } else {
+ if (filter_options.choice)
+ die(_("--filter can only be used with the remote configured in core.partialClone"));
+ /* TODO should this also die if we have a previous partial-clone? */
+ result = fetch_multiple(&list);
+ }
+
if (!result && (recurse_submodules != RECURSE_SUBMODULES_OFF)) {
struct argv_array options = ARGV_ARRAY_INIT;
if (obj->flags & REACHABLE)
return 0;
obj->flags |= REACHABLE;
+
+ if (is_promisor_object(&obj->oid))
+ /*
+ * Further recursion does not need to be performed on this
+ * object since it is a promisor object (so it does not need to
+ * be added to "pending").
+ */
+ return 0;
+
if (!(obj->flags & HAS_OBJ)) {
if (parent && !has_object_file(&obj->oid)) {
printf("broken link from %7s %s\n",
static int traverse_one_object(struct object *obj)
{
- return fsck_walk(obj, obj, &fsck_walk_options);
+ int result = fsck_walk(obj, obj, &fsck_walk_options);
+
+ if (obj->type == OBJ_TREE) {
+ struct tree *tree = (struct tree *)obj;
+ free_tree_buffer(tree);
+ }
+ return result;
}
static int traverse_reachable(void)
* do a full fsck
*/
if (!(obj->flags & HAS_OBJ)) {
+ if (is_promisor_object(&obj->oid))
+ return;
if (has_sha1_pack(obj->oid.hash))
return; /* it is in pack - forget about it */
printf("missing %s %s\n", printable_type(obj),
xstrfmt("%s@{%"PRItime"}", refname, timestamp));
obj->flags |= USED;
mark_object_reachable(obj);
- } else {
+ } else if (!is_promisor_object(oid)) {
error("%s: invalid reflog entry %s", refname, oid_to_hex(oid));
errors_found |= ERROR_REACHABLE;
}
obj = parse_object(oid);
if (!obj) {
+ if (is_promisor_object(oid)) {
+ /*
+ * Increment default_refs anyway, because this is a
+ * valid ref.
+ */
+ default_refs++;
+ return 0;
+ }
error("%s: invalid sha1 pointer %s", refname, oid_to_hex(oid));
errors_found |= ERROR_REACHABLE;
/* We'll continue with the rest despite the error.. */
int i;
struct alternate_object_database *alt;
+ /* fsck knows how to handle missing promisor objects */
+ fetch_if_missing = 0;
+
errors_found = 0;
check_replace_refs = 0;
struct object *obj = lookup_object(oid.hash);
if (!obj || !(obj->flags & HAS_OBJ)) {
+ if (is_promisor_object(&oid))
+ continue;
error("%s: object missing", oid_to_hex(&oid));
errors_found |= ERROR_OBJECT;
continue;
argv_array_push(&prune, prune_expire);
if (quiet)
argv_array_push(&prune, "--no-progress");
+ if (repository_format_partial_clone)
+ argv_array_push(&prune,
+ "--exclude-promisor-objects");
if (run_command_v_opt(prune.argv, RUN_GIT_CMD))
return error(FAILED_RUN, prune.argv[0]);
}
if (strbuf_read(&buf, fd, 4096) < 0)
ret = -1;
else
- ret = hash_sha1_file_literally(buf.buf, buf.len, type, oid, flags);
+ ret = hash_object_file_literally(buf.buf, buf.len, type, oid,
+ flags);
strbuf_release(&buf);
return ret;
}
static off_t consumed_bytes;
static off_t max_input_size;
static unsigned deepest_delta;
-static git_SHA_CTX input_ctx;
+static git_hash_ctx input_ctx;
static uint32_t input_crc32;
static int input_fd, output_fd;
static const char *curr_pack;
if (input_offset) {
if (output_fd >= 0)
write_or_die(output_fd, input_buffer, input_offset);
- git_SHA1_Update(&input_ctx, input_buffer, input_offset);
+ the_hash_algo->update_fn(&input_ctx, input_buffer, input_offset);
memmove(input_buffer, input_buffer + input_offset, input_len);
input_offset = 0;
}
output_fd = -1;
nothread_data.pack_fd = input_fd;
}
- git_SHA1_Init(&input_ctx);
+ the_hash_algo->init_fn(&input_ctx);
return pack_name;
}
}
static void *unpack_entry_data(off_t offset, unsigned long size,
- enum object_type type, unsigned char *sha1)
+ enum object_type type, struct object_id *oid)
{
static char fixed_buf[8192];
int status;
git_zstream stream;
void *buf;
- git_SHA_CTX c;
+ git_hash_ctx c;
char hdr[32];
int hdrlen;
if (!is_delta_type(type)) {
hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", typename(type), size) + 1;
- git_SHA1_Init(&c);
- git_SHA1_Update(&c, hdr, hdrlen);
+ the_hash_algo->init_fn(&c);
+ the_hash_algo->update_fn(&c, hdr, hdrlen);
} else
- sha1 = NULL;
+ oid = NULL;
if (type == OBJ_BLOB && size > big_file_threshold)
buf = fixed_buf;
else
stream.avail_in = input_len;
status = git_inflate(&stream, 0);
use(input_len - stream.avail_in);
- if (sha1)
- git_SHA1_Update(&c, last_out, stream.next_out - last_out);
+ if (oid)
+ the_hash_algo->update_fn(&c, last_out, stream.next_out - last_out);
if (buf == fixed_buf) {
stream.next_out = buf;
stream.avail_out = sizeof(fixed_buf);
if (stream.total_out != size || status != Z_STREAM_END)
bad_object(offset, _("inflate returned %d"), status);
git_inflate_end(&stream);
- if (sha1)
- git_SHA1_Final(sha1, &c);
+ if (oid)
+ the_hash_algo->final_fn(oid->hash, &c);
return buf == fixed_buf ? NULL : buf;
}
static void *unpack_raw_entry(struct object_entry *obj,
off_t *ofs_offset,
- unsigned char *ref_sha1,
- unsigned char *sha1)
+ struct object_id *ref_oid,
+ struct object_id *oid)
{
unsigned char *p;
unsigned long size, c;
switch (obj->type) {
case OBJ_REF_DELTA:
- hashcpy(ref_sha1, fill(20));
- use(20);
+ hashcpy(ref_oid->hash, fill(the_hash_algo->rawsz));
+ use(the_hash_algo->rawsz);
break;
case OBJ_OFS_DELTA:
p = fill(1);
}
obj->hdr_size = consumed_bytes - obj->idx.offset;
- data = unpack_entry_data(obj->idx.offset, obj->size, obj->type, sha1);
+ data = unpack_entry_data(obj->idx.offset, obj->size, obj->type, oid);
obj->idx.crc32 = input_crc32;
return data;
}
free(delta_data);
if (!result->data)
bad_object(delta_obj->idx.offset, _("failed to apply delta"));
- hash_sha1_file(result->data, result->size,
- typename(delta_obj->real_type),
- delta_obj->idx.oid.hash);
+ hash_object_file(result->data, result->size,
+ typename(delta_obj->real_type), &delta_obj->idx.oid);
sha1_object(result->data, NULL, result->size, delta_obj->real_type,
&delta_obj->idx.oid);
counter_lock();
* - calculate SHA1 of all non-delta objects;
* - remember base (SHA1 or offset) for all deltas.
*/
-static void parse_pack_objects(unsigned char *sha1)
+static void parse_pack_objects(unsigned char *hash)
{
int i, nr_delays = 0;
struct ofs_delta_entry *ofs_delta = ofs_deltas;
- unsigned char ref_delta_sha1[20];
+ struct object_id ref_delta_oid;
struct stat st;
if (verbose)
for (i = 0; i < nr_objects; i++) {
struct object_entry *obj = &objects[i];
void *data = unpack_raw_entry(obj, &ofs_delta->offset,
- ref_delta_sha1,
- obj->idx.oid.hash);
+ &ref_delta_oid,
+ &obj->idx.oid);
obj->real_type = obj->type;
if (obj->type == OBJ_OFS_DELTA) {
nr_ofs_deltas++;
ofs_delta++;
} else if (obj->type == OBJ_REF_DELTA) {
ALLOC_GROW(ref_deltas, nr_ref_deltas + 1, ref_deltas_alloc);
- hashcpy(ref_deltas[nr_ref_deltas].sha1, ref_delta_sha1);
+ hashcpy(ref_deltas[nr_ref_deltas].sha1, ref_delta_oid.hash);
ref_deltas[nr_ref_deltas].obj_no = i;
nr_ref_deltas++;
} else if (!data) {
/* Check pack integrity */
flush();
- git_SHA1_Final(sha1, &input_ctx);
- if (hashcmp(fill(20), sha1))
+ the_hash_algo->final_fn(hash, &input_ctx);
+ if (hashcmp(fill(the_hash_algo->rawsz), hash))
die(_("pack is corrupted (SHA1 mismatch)"));
- use(20);
+ use(the_hash_algo->rawsz);
/* If input_fd is a file, we should have reached its end now. */
if (fstat(input_fd, &st))
/*
* Third pass:
* - append objects to convert thin pack to full pack if required
- * - write the final 20-byte SHA-1
+ * - write the final pack hash
*/
-static void fix_unresolved_deltas(struct sha1file *f);
-static void conclude_pack(int fix_thin_pack, const char *curr_pack, unsigned char *pack_sha1)
+static void fix_unresolved_deltas(struct hashfile *f);
+static void conclude_pack(int fix_thin_pack, const char *curr_pack, unsigned char *pack_hash)
{
if (nr_ref_deltas + nr_ofs_deltas == nr_resolved_deltas) {
stop_progress(&progress);
- /* Flush remaining pack final 20-byte SHA1. */
+ /* Flush remaining pack final hash. */
flush();
return;
}
if (fix_thin_pack) {
- struct sha1file *f;
- unsigned char read_sha1[20], tail_sha1[20];
+ struct hashfile *f;
+ unsigned char read_hash[GIT_MAX_RAWSZ], tail_hash[GIT_MAX_RAWSZ];
struct strbuf msg = STRBUF_INIT;
int nr_unresolved = nr_ofs_deltas + nr_ref_deltas - nr_resolved_deltas;
int nr_objects_initial = nr_objects;
REALLOC_ARRAY(objects, nr_objects + nr_unresolved + 1);
memset(objects + nr_objects + 1, 0,
nr_unresolved * sizeof(*objects));
- f = sha1fd(output_fd, curr_pack);
+ f = hashfd(output_fd, curr_pack);
fix_unresolved_deltas(f);
strbuf_addf(&msg, Q_("completed with %d local object",
"completed with %d local objects",
nr_objects - nr_objects_initial);
stop_progress_msg(&progress, msg.buf);
strbuf_release(&msg);
- sha1close(f, tail_sha1, 0);
- hashcpy(read_sha1, pack_sha1);
- fixup_pack_header_footer(output_fd, pack_sha1,
+ hashclose(f, tail_hash, 0);
+ hashcpy(read_hash, pack_hash);
+ fixup_pack_header_footer(output_fd, pack_hash,
curr_pack, nr_objects,
- read_sha1, consumed_bytes-20);
- if (hashcmp(read_sha1, tail_sha1) != 0)
+ read_hash, consumed_bytes-the_hash_algo->rawsz);
+ if (hashcmp(read_hash, tail_hash) != 0)
die(_("Unexpected tail checksum for %s "
"(disk corruption?)"), curr_pack);
}
nr_ofs_deltas + nr_ref_deltas - nr_resolved_deltas);
}
-static int write_compressed(struct sha1file *f, void *in, unsigned int size)
+static int write_compressed(struct hashfile *f, void *in, unsigned int size)
{
git_zstream stream;
int status;
stream.next_out = outbuf;
stream.avail_out = sizeof(outbuf);
status = git_deflate(&stream, Z_FINISH);
- sha1write(f, outbuf, sizeof(outbuf) - stream.avail_out);
+ hashwrite(f, outbuf, sizeof(outbuf) - stream.avail_out);
} while (status == Z_OK);
if (status != Z_STREAM_END)
return size;
}
-static struct object_entry *append_obj_to_pack(struct sha1file *f,
+static struct object_entry *append_obj_to_pack(struct hashfile *f,
const unsigned char *sha1, void *buf,
unsigned long size, enum object_type type)
{
}
header[n++] = c;
crc32_begin(f);
- sha1write(f, header, n);
+ hashwrite(f, header, n);
obj[0].size = size;
obj[0].hdr_size = n;
obj[0].type = type;
obj[1].idx.offset = obj[0].idx.offset + n;
obj[1].idx.offset += write_compressed(f, buf, size);
obj[0].idx.crc32 = crc32_end(f);
- sha1flush(f);
+ hashflush(f);
hashcpy(obj->idx.oid.hash, sha1);
return obj;
}
return a->obj_no - b->obj_no;
}
-static void fix_unresolved_deltas(struct sha1file *f)
+static void fix_unresolved_deltas(struct hashfile *f)
{
struct ref_delta_entry **sorted_by_pos;
int i;
free(sorted_by_pos);
}
+static const char *derive_filename(const char *pack_name, const char *suffix,
+ struct strbuf *buf)
+{
+ size_t len;
+ if (!strip_suffix(pack_name, ".pack", &len))
+ die(_("packfile name '%s' does not end with '.pack'"),
+ pack_name);
+ strbuf_add(buf, pack_name, len);
+ strbuf_addch(buf, '.');
+ strbuf_addstr(buf, suffix);
+ return buf->buf;
+}
+
+static void write_special_file(const char *suffix, const char *msg,
+ const char *pack_name, const unsigned char *hash,
+ const char **report)
+{
+ struct strbuf name_buf = STRBUF_INIT;
+ const char *filename;
+ int fd;
+ int msg_len = strlen(msg);
+
+ if (pack_name)
+ filename = derive_filename(pack_name, suffix, &name_buf);
+ else
+ filename = odb_pack_name(&name_buf, hash, suffix);
+
+ fd = odb_pack_keep(filename);
+ if (fd < 0) {
+ if (errno != EEXIST)
+ die_errno(_("cannot write %s file '%s'"),
+ suffix, filename);
+ } else {
+ if (msg_len > 0) {
+ write_or_die(fd, msg, msg_len);
+ write_or_die(fd, "\n", 1);
+ }
+ if (close(fd) != 0)
+ die_errno(_("cannot close written %s file '%s'"),
+ suffix, filename);
+ if (report)
+ *report = suffix;
+ }
+ strbuf_release(&name_buf);
+}
+
static void final(const char *final_pack_name, const char *curr_pack_name,
const char *final_index_name, const char *curr_index_name,
- const char *keep_name, const char *keep_msg,
- unsigned char *sha1)
+ const char *keep_msg, const char *promisor_msg,
+ unsigned char *hash)
{
const char *report = "pack";
struct strbuf pack_name = STRBUF_INIT;
struct strbuf index_name = STRBUF_INIT;
- struct strbuf keep_name_buf = STRBUF_INIT;
int err;
if (!from_stdin) {
die_errno(_("error while closing pack file"));
}
- if (keep_msg) {
- int keep_fd, keep_msg_len = strlen(keep_msg);
-
- if (!keep_name)
- keep_name = odb_pack_name(&keep_name_buf, sha1, "keep");
-
- keep_fd = odb_pack_keep(keep_name);
- if (keep_fd < 0) {
- if (errno != EEXIST)
- die_errno(_("cannot write keep file '%s'"),
- keep_name);
- } else {
- if (keep_msg_len > 0) {
- write_or_die(keep_fd, keep_msg, keep_msg_len);
- write_or_die(keep_fd, "\n", 1);
- }
- if (close(keep_fd) != 0)
- die_errno(_("cannot close written keep file '%s'"),
- keep_name);
- report = "keep";
- }
- }
+ if (keep_msg)
+ write_special_file("keep", keep_msg, final_pack_name, hash,
+ &report);
+ if (promisor_msg)
+ write_special_file("promisor", promisor_msg, final_pack_name,
+ hash, NULL);
if (final_pack_name != curr_pack_name) {
if (!final_pack_name)
- final_pack_name = odb_pack_name(&pack_name, sha1, "pack");
+ final_pack_name = odb_pack_name(&pack_name, hash, "pack");
if (finalize_object_file(curr_pack_name, final_pack_name))
die(_("cannot store pack file"));
} else if (from_stdin)
if (final_index_name != curr_index_name) {
if (!final_index_name)
- final_index_name = odb_pack_name(&index_name, sha1, "idx");
+ final_index_name = odb_pack_name(&index_name, hash, "idx");
if (finalize_object_file(curr_index_name, final_index_name))
die(_("cannot store index file"));
} else
chmod(final_index_name, 0444);
if (!from_stdin) {
- printf("%s\n", sha1_to_hex(sha1));
+ printf("%s\n", sha1_to_hex(hash));
} else {
struct strbuf buf = STRBUF_INIT;
- strbuf_addf(&buf, "%s\t%s\n", report, sha1_to_hex(sha1));
+ strbuf_addf(&buf, "%s\t%s\n", report, sha1_to_hex(hash));
write_or_die(1, buf.buf, buf.len);
strbuf_release(&buf);
strbuf_release(&index_name);
strbuf_release(&pack_name);
- strbuf_release(&keep_name_buf);
}
static int git_index_pack_config(const char *k, const char *v, void *cb)
}
}
-static const char *derive_filename(const char *pack_name, const char *suffix,
- struct strbuf *buf)
-{
- size_t len;
- if (!strip_suffix(pack_name, ".pack", &len))
- die(_("packfile name '%s' does not end with '.pack'"),
- pack_name);
- strbuf_add(buf, pack_name, len);
- strbuf_addstr(buf, suffix);
- return buf->buf;
-}
-
int cmd_index_pack(int argc, const char **argv, const char *prefix)
{
int i, fix_thin_pack = 0, verify = 0, stat_only = 0;
const char *curr_index;
const char *index_name = NULL, *pack_name = NULL;
- const char *keep_name = NULL, *keep_msg = NULL;
- struct strbuf index_name_buf = STRBUF_INIT,
- keep_name_buf = STRBUF_INIT;
+ const char *keep_msg = NULL;
+ const char *promisor_msg = NULL;
+ struct strbuf index_name_buf = STRBUF_INIT;
struct pack_idx_entry **idx_objects;
struct pack_idx_option opts;
- unsigned char pack_sha1[20];
+ unsigned char pack_hash[GIT_MAX_RAWSZ];
unsigned foreign_nr = 1; /* zero is a "good" value, assume bad */
int report_end_of_input = 0;
+ /*
+ * index-pack never needs to fetch missing objects, since it only
+ * accesses the repo to do hash collision checks
+ */
+ fetch_if_missing = 0;
+
if (argc == 2 && !strcmp(argv[1], "-h"))
usage(index_pack_usage);
stat_only = 1;
} else if (skip_to_optional_arg(arg, "--keep", &keep_msg)) {
; /* nothing to do */
+ } else if (skip_to_optional_arg(arg, "--promisor", &promisor_msg)) {
+ ; /* already parsed */
} else if (starts_with(arg, "--threads=")) {
char *end;
nr_threads = strtoul(arg+10, &end, 0);
if (from_stdin && !startup_info->have_repository)
die(_("--stdin requires a git repository"));
if (!index_name && pack_name)
- index_name = derive_filename(pack_name, ".idx", &index_name_buf);
- if (keep_msg && !keep_name && pack_name)
- keep_name = derive_filename(pack_name, ".keep", &keep_name_buf);
+ index_name = derive_filename(pack_name, "idx", &index_name_buf);
if (verify) {
if (!index_name)
if (show_stat)
obj_stat = xcalloc(st_add(nr_objects, 1), sizeof(struct object_stat));
ofs_deltas = xcalloc(nr_objects, sizeof(struct ofs_delta_entry));
- parse_pack_objects(pack_sha1);
+ parse_pack_objects(pack_hash);
if (report_end_of_input)
write_in_full(2, "\0", 1);
resolve_deltas();
- conclude_pack(fix_thin_pack, curr_pack, pack_sha1);
+ conclude_pack(fix_thin_pack, curr_pack, pack_hash);
free(ofs_deltas);
free(ref_deltas);
if (strict)
ALLOC_ARRAY(idx_objects, nr_objects);
for (i = 0; i < nr_objects; i++)
idx_objects[i] = &objects[i].idx;
- curr_index = write_idx_file(index_name, idx_objects, nr_objects, &opts, pack_sha1);
+ curr_index = write_idx_file(index_name, idx_objects, nr_objects, &opts, pack_hash);
free(idx_objects);
if (!verify)
final(pack_name, curr_pack,
index_name, curr_index,
- keep_name, keep_msg,
- pack_sha1);
+ keep_msg, promisor_msg,
+ pack_hash);
else
close(input_fd);
free(objects);
strbuf_release(&index_name_buf);
- strbuf_release(&keep_name_buf);
if (pack_name == NULL)
free((void *) curr_pack);
if (index_name == NULL)
#include "gpg-interface.h"
#include "progress.h"
+#define MAIL_DEFAULT_WRAP 72
+
/* Set a default date-time format for git log ("log.date" config variable) */
static const char *default_date_mode = NULL;
shortlog_init(&log);
log.wrap_lines = 1;
- log.wrap = 72;
+ log.wrap = MAIL_DEFAULT_WRAP;
log.in1 = 2;
log.in2 = 4;
log.file = rev->diffopt.file;
memcpy(&opts, &rev->diffopt, sizeof(opts));
opts.output_format = DIFF_FORMAT_SUMMARY | DIFF_FORMAT_DIFFSTAT;
+ opts.stat_width = MAIL_DEFAULT_WRAP;
diff_setup_done(&opts);
(!rev.diffopt.output_format ||
rev.diffopt.output_format == DIFF_FORMAT_PATCH))
rev.diffopt.output_format = DIFF_FORMAT_DIFFSTAT | DIFF_FORMAT_SUMMARY;
+ if (!rev.diffopt.stat_width)
+ rev.diffopt.stat_width = MAIL_DEFAULT_WRAP;
/* Always generate a patch */
rev.diffopt.output_format |= DIFF_FORMAT_PATCH;
pptr = commit_list_append(head, pptr);
pptr = commit_list_append(remoteheads->item, pptr);
prepare_to_commit(remoteheads);
- if (commit_tree(merge_msg.buf, merge_msg.len, result_tree.hash, parents,
- result_commit.hash, NULL, sign_commit))
+ if (commit_tree(merge_msg.buf, merge_msg.len, &result_tree, parents,
+ &result_commit, NULL, sign_commit))
die(_("failed to write commit object"));
finish(head, remoteheads, &result_commit, "In-index merge");
drop_save();
commit_list_insert(head, &parents);
strbuf_addch(&merge_msg, '\n');
prepare_to_commit(remoteheads);
- if (commit_tree(merge_msg.buf, merge_msg.len, result_tree->hash, parents,
- result_commit.hash, NULL, sign_commit))
+ if (commit_tree(merge_msg.buf, merge_msg.len, result_tree, parents,
+ &result_commit, NULL, sign_commit))
die(_("failed to write commit object"));
strbuf_addf(&buf, "Merge made by the '%s' strategy.", wt_strategy);
finish(head, remoteheads, &result_commit, buf.buf);
int cmd_mktag(int argc, const char **argv, const char *prefix)
{
struct strbuf buf = STRBUF_INIT;
- unsigned char result_sha1[20];
+ struct object_id result;
if (argc != 1)
usage("git mktag");
if (verify_tag(buf.buf, buf.len) < 0)
die("invalid tag signature file");
- if (write_sha1_file(buf.buf, buf.len, tag_type, result_sha1) < 0)
+ if (write_object_file(buf.buf, buf.len, tag_type, &result) < 0)
die("unable to write tag file");
strbuf_release(&buf);
- printf("%s\n", sha1_to_hex(result_sha1));
+ printf("%s\n", oid_to_hex(&result));
return 0;
}
b->name, b->len, b->mode);
}
-static void write_tree(unsigned char *sha1)
+static void write_tree(struct object_id *oid)
{
struct strbuf buf;
size_t size;
strbuf_add(&buf, ent->sha1, 20);
}
- write_sha1_file(buf.buf, buf.len, tree_type, sha1);
+ write_object_file(buf.buf, buf.len, tree_type, oid);
strbuf_release(&buf);
}
int cmd_mktree(int ac, const char **av, const char *prefix)
{
struct strbuf sb = STRBUF_INIT;
- unsigned char sha1[20];
+ struct object_id oid;
int nul_term_line = 0;
int allow_missing = 0;
int is_batch_mode = 0;
*/
; /* skip creating an empty tree */
} else {
- write_tree(sha1);
- puts(sha1_to_hex(sha1));
+ write_tree(&oid);
+ puts(oid_to_hex(&oid));
fflush(stdout);
}
used=0; /* reset tree entry buffer for re-use in batch mode */
pos = cache_name_pos(src, strlen(src));
assert(pos >= 0);
- if (!show_only)
- rename_cache_entry_at(pos, dst);
+ rename_cache_entry_at(pos, dst);
}
if (gitmodules_modified)
}
}
-static void write_note_data(struct note_data *d, unsigned char *sha1)
+static void write_note_data(struct note_data *d, struct object_id *oid)
{
- if (write_sha1_file(d->buf.buf, d->buf.len, blob_type, sha1)) {
+ if (write_object_file(d->buf.buf, d->buf.len, blob_type, oid)) {
error(_("unable to write note object"));
if (d->edit_path)
error(_("the note contents have been left in %s"),
prepare_note_data(&object, &d, note ? note->hash : NULL);
if (d.buf.len || allow_empty) {
- write_note_data(&d, new_note.hash);
+ write_note_data(&d, &new_note);
if (add_note(t, &object, &new_note, combine_notes_overwrite))
die("BUG: combine_notes_overwrite failed");
commit_notes(t, "Notes added by 'git notes add'");
}
if (d.buf.len || allow_empty) {
- write_note_data(&d, new_note.hash);
+ write_note_data(&d, &new_note);
if (add_note(t, &object, &new_note, combine_notes_overwrite))
die("BUG: combine_notes_overwrite failed");
logmsg = xstrfmt("Notes added by 'git notes %s'", argv[0]);
#include "reachable.h"
#include "sha1-array.h"
#include "argv-array.h"
-#include "mru.h"
+#include "list.h"
#include "packfile.h"
static const char *pack_usage[] = {
static int write_bitmap_index;
static uint16_t write_bitmap_options;
+static int exclude_promisor_objects;
+
static unsigned long delta_cache_size = 0;
static unsigned long max_delta_cache_size = 256 * 1024 * 1024;
static unsigned long cache_max_small_delta_size = 1000;
static struct list_objects_filter_options filter_options;
enum missing_action {
- MA_ERROR = 0, /* fail if any missing objects are encountered */
- MA_ALLOW_ANY, /* silently allow ALL missing objects */
+ MA_ERROR = 0, /* fail if any missing objects are encountered */
+ MA_ALLOW_ANY, /* silently allow ALL missing objects */
+ MA_ALLOW_PROMISOR, /* silently allow all missing PROMISOR objects */
};
static enum missing_action arg_missing_action;
static show_object_fn fn_show_object;
return stream.total_out;
}
-static unsigned long write_large_blob_data(struct git_istream *st, struct sha1file *f,
+static unsigned long write_large_blob_data(struct git_istream *st, struct hashfile *f,
const struct object_id *oid)
{
git_zstream stream;
stream.next_out = obuf;
stream.avail_out = sizeof(obuf);
zret = git_deflate(&stream, readlen ? 0 : Z_FINISH);
- sha1write(f, obuf, stream.next_out - obuf);
+ hashwrite(f, obuf, stream.next_out - obuf);
olen += stream.next_out - obuf;
}
if (stream.avail_in)
stream.total_in == len) ? 0 : -1;
}
-static void copy_pack_data(struct sha1file *f,
+static void copy_pack_data(struct hashfile *f,
struct packed_git *p,
struct pack_window **w_curs,
off_t offset,
in = use_pack(p, w_curs, offset, &avail);
if (avail > len)
avail = (unsigned long)len;
- sha1write(f, in, avail);
+ hashwrite(f, in, avail);
offset += avail;
len -= avail;
}
}
/* Return 0 if we will bust the pack-size limit */
-static unsigned long write_no_reuse_object(struct sha1file *f, struct object_entry *entry,
+static unsigned long write_no_reuse_object(struct hashfile *f, struct object_entry *entry,
unsigned long limit, int usable_delta)
{
unsigned long size, datalen;
free(buf);
return 0;
}
- sha1write(f, header, hdrlen);
- sha1write(f, dheader + pos, sizeof(dheader) - pos);
+ hashwrite(f, header, hdrlen);
+ hashwrite(f, dheader + pos, sizeof(dheader) - pos);
hdrlen += sizeof(dheader) - pos;
} else if (type == OBJ_REF_DELTA) {
/*
free(buf);
return 0;
}
- sha1write(f, header, hdrlen);
- sha1write(f, entry->delta->idx.oid.hash, 20);
+ hashwrite(f, header, hdrlen);
+ hashwrite(f, entry->delta->idx.oid.hash, 20);
hdrlen += 20;
} else {
if (limit && hdrlen + datalen + 20 >= limit) {
free(buf);
return 0;
}
- sha1write(f, header, hdrlen);
+ hashwrite(f, header, hdrlen);
}
if (st) {
datalen = write_large_blob_data(st, f, &entry->idx.oid);
close_istream(st);
} else {
- sha1write(f, buf, datalen);
+ hashwrite(f, buf, datalen);
free(buf);
}
}
/* Return 0 if we will bust the pack-size limit */
-static off_t write_reuse_object(struct sha1file *f, struct object_entry *entry,
+static off_t write_reuse_object(struct hashfile *f, struct object_entry *entry,
unsigned long limit, int usable_delta)
{
struct packed_git *p = entry->in_pack;
unuse_pack(&w_curs);
return 0;
}
- sha1write(f, header, hdrlen);
- sha1write(f, dheader + pos, sizeof(dheader) - pos);
+ hashwrite(f, header, hdrlen);
+ hashwrite(f, dheader + pos, sizeof(dheader) - pos);
hdrlen += sizeof(dheader) - pos;
reused_delta++;
} else if (type == OBJ_REF_DELTA) {
unuse_pack(&w_curs);
return 0;
}
- sha1write(f, header, hdrlen);
- sha1write(f, entry->delta->idx.oid.hash, 20);
+ hashwrite(f, header, hdrlen);
+ hashwrite(f, entry->delta->idx.oid.hash, 20);
hdrlen += 20;
reused_delta++;
} else {
unuse_pack(&w_curs);
return 0;
}
- sha1write(f, header, hdrlen);
+ hashwrite(f, header, hdrlen);
}
copy_pack_data(f, p, &w_curs, offset, datalen);
unuse_pack(&w_curs);
}
/* Return 0 if we will bust the pack-size limit */
-static off_t write_object(struct sha1file *f,
+static off_t write_object(struct hashfile *f,
struct object_entry *entry,
off_t write_offset)
{
WRITE_ONE_RECURSIVE = 2 /* already scheduled to be written */
};
-static enum write_one_status write_one(struct sha1file *f,
+static enum write_one_status write_one(struct hashfile *f,
struct object_entry *e,
off_t *offset)
{
return wo;
}
-static off_t write_reused_pack(struct sha1file *f)
+static off_t write_reused_pack(struct hashfile *f)
{
unsigned char buffer[8192];
off_t to_write, total;
if (read_pack > to_write)
read_pack = to_write;
- sha1write(f, buffer, read_pack);
+ hashwrite(f, buffer, read_pack);
to_write -= read_pack;
/*
static void write_pack_file(void)
{
uint32_t i = 0, j;
- struct sha1file *f;
+ struct hashfile *f;
off_t offset;
uint32_t nr_remaining = nr_result;
time_t last_mtime = 0;
char *pack_tmp_name = NULL;
if (pack_to_stdout)
- f = sha1fd_throughput(1, "<stdout>", progress_state);
+ f = hashfd_throughput(1, "<stdout>", progress_state);
else
f = create_tmp_packfile(&pack_tmp_name);
* If so, rewrite it like in fast-import
*/
if (pack_to_stdout) {
- sha1close(f, oid.hash, CSUM_CLOSE);
+ hashclose(f, oid.hash, CSUM_CLOSE);
} else if (nr_written == nr_remaining) {
- sha1close(f, oid.hash, CSUM_FSYNC);
+ hashclose(f, oid.hash, CSUM_FSYNC);
} else {
- int fd = sha1close(f, oid.hash, 0);
+ int fd = hashclose(f, oid.hash, 0);
fixup_pack_header_footer(fd, oid.hash, pack_tmp_name,
nr_written, oid.hash, offset);
close(fd);
struct packed_git **found_pack,
off_t *found_offset)
{
- struct mru_entry *entry;
int want;
+ struct list_head *pos;
if (!exclude && local && has_loose_object_nonlocal(oid->hash))
return 0;
return want;
}
- for (entry = packed_git_mru.head; entry; entry = entry->next) {
- struct packed_git *p = entry->item;
+ list_for_each(pos, &packed_git_mru) {
+ struct packed_git *p = list_entry(pos, struct packed_git, mru);
off_t offset;
if (p == *found_pack)
}
want = want_found_object(exclude, p);
if (!exclude && want > 0)
- mru_mark(&packed_git_mru, entry);
+ list_move(&p->mru, &packed_git_mru);
if (want != -1)
return want;
}
show_object(obj, name, data);
}
+static void show_object__ma_allow_promisor(struct object *obj, const char *name, void *data)
+{
+ assert(arg_missing_action == MA_ALLOW_PROMISOR);
+
+ /*
+ * Quietly ignore EXPECTED missing objects. This avoids problems with
+ * staging them now and getting an odd error later.
+ */
+ if (!has_object_file(&obj->oid) && is_promisor_object(&obj->oid))
+ return;
+
+ show_object(obj, name, data);
+}
+
static int option_parse_missing_action(const struct option *opt,
const char *arg, int unset)
{
if (!strcmp(arg, "allow-any")) {
arg_missing_action = MA_ALLOW_ANY;
+ fetch_if_missing = 0;
fn_show_object = show_object__ma_allow_any;
return 0;
}
+ if (!strcmp(arg, "allow-promisor")) {
+ arg_missing_action = MA_ALLOW_PROMISOR;
+ fetch_if_missing = 0;
+ fn_show_object = show_object__ma_allow_promisor;
+ return 0;
+ }
+
die(_("invalid value for --missing"));
return 0;
}
if (!packlist_find(&to_pack, oid.hash, NULL) &&
!has_sha1_pack_kept_or_nonlocal(&oid) &&
!loosened_object_can_be_discarded(&oid, p->mtime))
- if (force_object_loose(oid.hash, p->mtime))
+ if (force_object_loose(&oid, p->mtime))
die("unable to force loose object");
}
}
{ OPTION_CALLBACK, 0, "missing", NULL, N_("action"),
N_("handling for missing objects"), PARSE_OPT_NONEG,
option_parse_missing_action },
+ OPT_BOOL(0, "exclude-promisor-objects", &exclude_promisor_objects,
+ N_("do not pack objects in promisor packfiles")),
OPT_END(),
};
argv_array_push(&rp, "--unpacked");
}
+ if (exclude_promisor_objects) {
+ use_internal_rev_list = 1;
+ fetch_if_missing = 0;
+ argv_array_push(&rp, "--exclude-promisor-objects");
+ }
+
if (!reuse_object)
reuse_delta = 0;
if (pack_compression_level == -1)
{
struct rev_info revs;
struct progress *progress = NULL;
+ int exclude_promisor_objects = 0;
const struct option options[] = {
OPT__DRY_RUN(&show_only, N_("do not remove, show only")),
OPT__VERBOSE(&verbose, N_("report pruned objects")),
OPT_BOOL(0, "progress", &show_progress, N_("show progress")),
OPT_EXPIRY_DATE(0, "expire", &expire,
N_("expire objects older than <time>")),
+ OPT_BOOL(0, "exclude-promisor-objects", &exclude_promisor_objects,
+ N_("limit traversal to objects outside promisor packfiles")),
OPT_END()
};
char *s;
show_progress = isatty(2);
if (show_progress)
progress = start_delayed_progress(_("Checking connectivity"), 0);
+ if (exclude_promisor_objects) {
+ fetch_if_missing = 0;
+ revs.exclude_promisor_objects = 1;
+ }
mark_reachable_objects(&revs, 1, expire, progress);
stop_progress(&progress);
cp.no_stdin = 1;
argv_array_pushl(&cp.args, "submodule", "update",
"--recursive", "--rebase", NULL);
+ argv_push_verbosity(&cp.args);
return run_command(&cp);
}
cp.no_stdin = 1;
argv_array_pushl(&cp.args, "submodule", "update",
"--recursive", "--checkout", NULL);
+ argv_push_verbosity(&cp.args);
return run_command(&cp);
}
struct option options[] = {
OPT_BOOL(0, "ff", &opts.allow_ff, N_("allow fast-forward")),
OPT_BOOL(0, "keep-empty", &keep_empty, N_("keep empty commits")),
+ OPT_BOOL(0, "allow-empty-message", &opts.allow_empty_message,
+ N_("allow commits with empty messages")),
OPT_CMDMODE(0, "continue", &command, N_("continue rebase"),
CONTINUE),
OPT_CMDMODE(0, "abort", &command, N_("abort rebase"),
OPT_END()
};
- git_config(git_default_config, NULL);
+ sequencer_init_config(&opts);
git_config_get_bool("rebase.abbreviatecommands", &abbreviate_commands);
opts.action = REPLAY_INTERACTIVE_REBASE;
static int shallow_update;
static const char *alt_shallow_file;
static struct strbuf push_cert = STRBUF_INIT;
-static unsigned char push_cert_sha1[20];
+static struct object_id push_cert_oid;
static struct signature_check sigcheck;
static const char *push_cert_nonce;
static const char *cert_nonce_seed;
int bogs /* beginning_of_gpg_sig */;
already_done = 1;
- if (write_sha1_file(push_cert.buf, push_cert.len, "blob", push_cert_sha1))
- hashclr(push_cert_sha1);
+ if (write_object_file(push_cert.buf, push_cert.len, "blob",
+ &push_cert_oid))
+ oidclr(&push_cert_oid);
memset(&sigcheck, '\0', sizeof(sigcheck));
sigcheck.result = 'N';
strbuf_release(&gpg_status);
nonce_status = check_nonce(push_cert.buf, bogs);
}
- if (!is_null_sha1(push_cert_sha1)) {
+ if (!is_null_oid(&push_cert_oid)) {
argv_array_pushf(&proc->env_array, "GIT_PUSH_CERT=%s",
- sha1_to_hex(push_cert_sha1));
+ oid_to_hex(&push_cert_oid));
argv_array_pushf(&proc->env_array, "GIT_PUSH_CERT_SIGNER=%s",
sigcheck.signer ? sigcheck.signer : "");
argv_array_pushf(&proc->env_array, "GIT_PUSH_CERT_KEY=%s",
/*
* Adds all packs hex strings to the fname list, which do not
- * have a corresponding .keep file.
+ * have a corresponding .keep or .promisor file. These packs are not to
+ * be kept if we are going to pack everything into one file.
*/
static void get_non_kept_pack_filenames(struct string_list *fname_list)
{
fname = xmemdupz(e->d_name, len);
- if (!file_exists(mkpath("%s/%s.keep", packdir, fname)))
+ if (!file_exists(mkpath("%s/%s.keep", packdir, fname)) &&
+ !file_exists(mkpath("%s/%s.promisor", packdir, fname)))
string_list_append_nodup(fname_list, fname);
else
free(fname);
argv_array_push(&cmd.args, "--all");
argv_array_push(&cmd.args, "--reflog");
argv_array_push(&cmd.args, "--indexed-objects");
+ if (repository_format_partial_clone)
+ argv_array_push(&cmd.args, "--exclude-promisor-objects");
if (window)
argv_array_pushf(&cmd.args, "--window=%s", window);
if (window_memory)
struct tag *tag;
int i;
- hash_sha1_file(extra->value, extra->len, typename(OBJ_TAG), tag_oid.hash);
+ hash_object_file(extra->value, extra->len, typename(OBJ_TAG), &tag_oid);
tag = lookup_tag(&tag_oid);
if (!tag)
die(_("bad mergetag in commit '%s'"), ref);
check_mergetags(commit, argc, argv);
- if (write_sha1_file(buf.buf, buf.len, commit_type, new.hash))
+ if (write_object_file(buf.buf, buf.len, commit_type, &new))
die(_("could not write replacement commit for: '%s'"), old_ref);
strbuf_release(&buf);
static void print_new_head_line(struct commit *commit)
{
- const char *hex, *body;
- const char *msg;
-
- hex = find_unique_abbrev(commit->object.oid.hash, DEFAULT_ABBREV);
- printf(_("HEAD is now at %s"), hex);
- msg = logmsg_reencode(commit, NULL, get_log_output_encoding());
- body = strstr(msg, "\n\n");
- if (body) {
- const char *eol;
- size_t len;
- body = skip_blank_lines(body + 2);
- eol = strchr(body, '\n');
- len = eol ? eol - body : strlen(body);
- printf(" %.*s\n", (int) len, body);
- }
- else
- printf("\n");
- unuse_commit_buffer(commit, msg);
+ struct strbuf buf = STRBUF_INIT;
+
+ printf(_("HEAD is now at %s"),
+ find_unique_abbrev(commit->object.oid.hash, DEFAULT_ABBREV));
+
+ pp_commit_easy(CMIT_FMT_ONELINE, commit, &buf);
+ if (buf.len > 0)
+ printf(" %s", buf.buf);
+ putchar('\n');
+ strbuf_release(&buf);
}
static void update_index_from_diff(struct diff_queue_struct *q,
#include "progress.h"
#include "reflog-walk.h"
#include "oidset.h"
+#include "packfile.h"
static const char rev_list_usage[] =
"git rev-list [OPTION] <commit-id>... [ -- paths... ]\n"
MA_ERROR = 0, /* fail if any missing objects are encountered */
MA_ALLOW_ANY, /* silently allow ALL missing objects */
MA_PRINT, /* print ALL missing objects in special section */
+ MA_ALLOW_PROMISOR, /* silently allow all missing PROMISOR objects */
};
static enum missing_action arg_missing_action;
static inline void finish_object__ma(struct object *obj)
{
+ /*
+ * Whether or not we try to dynamically fetch missing objects
+ * from the server, we currently DO NOT have the object. We
+ * can either print, allow (ignore), or conditionally allow
+ * (ignore) them.
+ */
switch (arg_missing_action) {
case MA_ERROR:
die("missing blob object '%s'", oid_to_hex(&obj->oid));
oidset_insert(&missing_objects, &obj->oid);
return;
+ case MA_ALLOW_PROMISOR:
+ if (is_promisor_object(&obj->oid))
+ return;
+ die("unexpected missing blob object '%s'",
+ oid_to_hex(&obj->oid));
+ return;
+
default:
BUG("unhandled missing_action");
return;
}
}
-static void finish_object(struct object *obj, const char *name, void *cb_data)
+static int finish_object(struct object *obj, const char *name, void *cb_data)
{
struct rev_list_info *info = cb_data;
- if (obj->type == OBJ_BLOB && !has_object_file(&obj->oid))
+ if (obj->type == OBJ_BLOB && !has_object_file(&obj->oid)) {
finish_object__ma(obj);
+ return 1;
+ }
if (info->revs->verify_objects && !obj->parsed && obj->type != OBJ_COMMIT)
parse_object(&obj->oid);
+ return 0;
}
static void show_object(struct object *obj, const char *name, void *cb_data)
{
struct rev_list_info *info = cb_data;
- finish_object(obj, name, cb_data);
+ if (finish_object(obj, name, cb_data))
+ return;
display_progress(progress, ++progress_counter);
if (info->flags & REV_LIST_QUIET)
return;
if (!strcmp(value, "allow-any")) {
arg_missing_action = MA_ALLOW_ANY;
+ fetch_if_missing = 0;
return 1;
}
if (!strcmp(value, "print")) {
arg_missing_action = MA_PRINT;
+ fetch_if_missing = 0;
+ return 1;
+ }
+
+ if (!strcmp(value, "allow-promisor")) {
+ arg_missing_action = MA_ALLOW_PROMISOR;
+ fetch_if_missing = 0;
return 1;
}
init_revisions(&revs, prefix);
revs.abbrev = DEFAULT_ABBREV;
revs.commit_format = CMIT_FMT_UNSPECIFIED;
+
+ /*
+ * Scan the argument list before invoking setup_revisions(), so that we
+ * know if fetch_if_missing needs to be set to 0.
+ *
+ * "--exclude-promisor-objects" acts as a pre-filter on missing objects
+ * by not crossing the boundary from realized objects to promisor
+ * objects.
+ *
+ * Let "--missing" to conditionally set fetch_if_missing.
+ */
+ for (i = 1; i < argc; i++) {
+ const char *arg = argv[i];
+ if (!strcmp(arg, "--exclude-promisor-objects")) {
+ fetch_if_missing = 0;
+ revs.exclude_promisor_objects = 1;
+ break;
+ }
+ }
+ for (i = 1; i < argc; i++) {
+ const char *arg = argv[i];
+ if (skip_prefix(arg, "--missing=", &arg)) {
+ if (revs.exclude_promisor_objects)
+ die(_("cannot combine --exclude-promisor-objects and --missing"));
+ if (parse_missing_action_value(arg))
+ break;
+ }
+ }
+
argc = setup_revisions(argc, argv, &revs, NULL);
memset(&info, 0, sizeof(info));
continue;
}
if (!strcmp(arg, ("--no-" CL_ARG__FILTER))) {
- list_objects_filter_release(&filter_options);
+ list_objects_filter_set_no_filter(&filter_options);
continue;
}
if (!strcmp(arg, "--filter-print-omitted")) {
continue;
}
- if (skip_prefix(arg, "--missing=", &arg) &&
- parse_missing_action_value(arg))
- continue;
+ if (!strcmp(arg, "--exclude-promisor-objects"))
+ continue; /* already handled above */
+ if (skip_prefix(arg, "--missing=", &arg))
+ continue; /* already handled above */
usage(rev_list_usage);
PARSE_OPT_SHELL_EVAL);
strbuf_addstr(&parsed, " --");
- sq_quote_argv(&parsed, argv, 0);
+ sq_quote_argv(&parsed, argv);
puts(parsed.buf);
return 0;
}
struct strbuf buf = STRBUF_INIT;
if (argc)
- sq_quote_argv(&buf, argv, 0);
+ sq_quote_argv(&buf, argv);
printf("%s\n", buf.buf);
strbuf_release(&buf);
if (isatty(0))
opts.edit = 1;
opts.action = REPLAY_REVERT;
- git_config(git_default_config, NULL);
+ sequencer_init_config(&opts);
res = run_sequencer(argc, argv, &opts);
if (res < 0)
die(_("revert failed"));
int res;
opts.action = REPLAY_PICK;
- git_config(git_default_config, NULL);
+ sequencer_init_config(&opts);
res = run_sequencer(argc, argv, &opts);
if (res < 0)
die(_("cherry-pick failed"));
#define OPT_QUIET (1 << 0)
#define OPT_CACHED (1 << 1)
#define OPT_RECURSIVE (1 << 2)
+#define OPT_FORCE (1 << 3)
typedef void (*each_submodule_fn)(const struct cache_entry *list_item,
void *cb_data);
return ret;
}
+static int print_default_remote(int argc, const char **argv, const char *prefix)
+{
+ const char *remote;
+
+ if (argc != 1)
+ die(_("submodule--helper print-default-remote takes no arguments"));
+
+ remote = get_default_remote();
+ if (remote)
+ printf("%s\n", remote);
+
+ return 0;
+}
+
static int starts_with_dot_slash(const char *str)
{
return str[0] == '.' && is_dir_sep(str[1]);
*list = active_modules;
}
+static char *get_up_path(const char *path)
+{
+ int i;
+ struct strbuf sb = STRBUF_INIT;
+
+ for (i = count_slashes(path); i; i--)
+ strbuf_addstr(&sb, "../");
+
+ /*
+ * Check if 'path' ends with slash or not
+ * for having the same output for dir/sub_dir
+ * and dir/sub_dir/
+ */
+ if (!is_dir_sep(path[strlen(path) - 1]))
+ strbuf_addstr(&sb, "../");
+
+ return strbuf_detach(&sb, NULL);
+}
+
static int module_list(int argc, const char **argv, const char *prefix)
{
int i;
return 0;
}
+struct sync_cb {
+ const char *prefix;
+ unsigned int flags;
+};
+
+#define SYNC_CB_INIT { NULL, 0 }
+
+static void sync_submodule(const char *path, const char *prefix,
+ unsigned int flags)
+{
+ const struct submodule *sub;
+ char *remote_key = NULL;
+ char *sub_origin_url, *super_config_url, *displaypath;
+ struct strbuf sb = STRBUF_INIT;
+ struct child_process cp = CHILD_PROCESS_INIT;
+ char *sub_config_path = NULL;
+
+ if (!is_submodule_active(the_repository, path))
+ return;
+
+ sub = submodule_from_path(&null_oid, path);
+
+ if (sub && sub->url) {
+ if (starts_with_dot_dot_slash(sub->url) ||
+ starts_with_dot_slash(sub->url)) {
+ char *remote_url, *up_path;
+ char *remote = get_default_remote();
+ strbuf_addf(&sb, "remote.%s.url", remote);
+
+ if (git_config_get_string(sb.buf, &remote_url))
+ remote_url = xgetcwd();
+
+ up_path = get_up_path(path);
+ sub_origin_url = relative_url(remote_url, sub->url, up_path);
+ super_config_url = relative_url(remote_url, sub->url, NULL);
+
+ free(remote);
+ free(up_path);
+ free(remote_url);
+ } else {
+ sub_origin_url = xstrdup(sub->url);
+ super_config_url = xstrdup(sub->url);
+ }
+ } else {
+ sub_origin_url = xstrdup("");
+ super_config_url = xstrdup("");
+ }
+
+ displaypath = get_submodule_displaypath(path, prefix);
+
+ if (!(flags & OPT_QUIET))
+ printf(_("Synchronizing submodule url for '%s'\n"),
+ displaypath);
+
+ strbuf_reset(&sb);
+ strbuf_addf(&sb, "submodule.%s.url", sub->name);
+ if (git_config_set_gently(sb.buf, super_config_url))
+ die(_("failed to register url for submodule path '%s'"),
+ displaypath);
+
+ if (!is_submodule_populated_gently(path, NULL))
+ goto cleanup;
+
+ prepare_submodule_repo_env(&cp.env_array);
+ cp.git_cmd = 1;
+ cp.dir = path;
+ argv_array_pushl(&cp.args, "submodule--helper",
+ "print-default-remote", NULL);
+
+ strbuf_reset(&sb);
+ if (capture_command(&cp, &sb, 0))
+ die(_("failed to get the default remote for submodule '%s'"),
+ path);
+
+ strbuf_strip_suffix(&sb, "\n");
+ remote_key = xstrfmt("remote.%s.url", sb.buf);
+
+ strbuf_reset(&sb);
+ submodule_to_gitdir(&sb, path);
+ strbuf_addstr(&sb, "/config");
+
+ if (git_config_set_in_file_gently(sb.buf, remote_key, sub_origin_url))
+ die(_("failed to update remote for submodule '%s'"),
+ path);
+
+ if (flags & OPT_RECURSIVE) {
+ struct child_process cpr = CHILD_PROCESS_INIT;
+
+ cpr.git_cmd = 1;
+ cpr.dir = path;
+ prepare_submodule_repo_env(&cpr.env_array);
+
+ argv_array_push(&cpr.args, "--super-prefix");
+ argv_array_pushf(&cpr.args, "%s/", displaypath);
+ argv_array_pushl(&cpr.args, "submodule--helper", "sync",
+ "--recursive", NULL);
+
+ if (flags & OPT_QUIET)
+ argv_array_push(&cpr.args, "--quiet");
+
+ if (run_command(&cpr))
+ die(_("failed to recurse into submodule '%s'"),
+ path);
+ }
+
+cleanup:
+ free(super_config_url);
+ free(sub_origin_url);
+ strbuf_release(&sb);
+ free(remote_key);
+ free(displaypath);
+ free(sub_config_path);
+}
+
+static void sync_submodule_cb(const struct cache_entry *list_item, void *cb_data)
+{
+ struct sync_cb *info = cb_data;
+ sync_submodule(list_item->name, info->prefix, info->flags);
+
+}
+
+static int module_sync(int argc, const char **argv, const char *prefix)
+{
+ struct sync_cb info = SYNC_CB_INIT;
+ struct pathspec pathspec;
+ struct module_list list = MODULE_LIST_INIT;
+ int quiet = 0;
+ int recursive = 0;
+
+ struct option module_sync_options[] = {
+ OPT__QUIET(&quiet, N_("Suppress output of synchronizing submodule url")),
+ OPT_BOOL(0, "recursive", &recursive,
+ N_("Recurse into nested submodules")),
+ OPT_END()
+ };
+
+ const char *const git_submodule_helper_usage[] = {
+ N_("git submodule--helper sync [--quiet] [--recursive] [<path>]"),
+ NULL
+ };
+
+ argc = parse_options(argc, argv, prefix, module_sync_options,
+ git_submodule_helper_usage, 0);
+
+ if (module_list_compute(argc, argv, prefix, &pathspec, &list) < 0)
+ return 1;
+
+ info.prefix = prefix;
+ if (quiet)
+ info.flags |= OPT_QUIET;
+ if (recursive)
+ info.flags |= OPT_RECURSIVE;
+
+ for_each_listed_submodule(&list, sync_submodule_cb, &info);
+
+ return 0;
+}
+
+struct deinit_cb {
+ const char *prefix;
+ unsigned int flags;
+};
+#define DEINIT_CB_INIT { NULL, 0 }
+
+static void deinit_submodule(const char *path, const char *prefix,
+ unsigned int flags)
+{
+ const struct submodule *sub;
+ char *displaypath = NULL;
+ struct child_process cp_config = CHILD_PROCESS_INIT;
+ struct strbuf sb_config = STRBUF_INIT;
+ char *sub_git_dir = xstrfmt("%s/.git", path);
+
+ sub = submodule_from_path(&null_oid, path);
+
+ if (!sub || !sub->name)
+ goto cleanup;
+
+ displaypath = get_submodule_displaypath(path, prefix);
+
+ /* remove the submodule work tree (unless the user already did it) */
+ if (is_directory(path)) {
+ struct strbuf sb_rm = STRBUF_INIT;
+ const char *format;
+
+ /*
+ * protect submodules containing a .git directory
+ * NEEDSWORK: instead of dying, automatically call
+ * absorbgitdirs and (possibly) warn.
+ */
+ if (is_directory(sub_git_dir))
+ die(_("Submodule work tree '%s' contains a .git "
+ "directory (use 'rm -rf' if you really want "
+ "to remove it including all of its history)"),
+ displaypath);
+
+ if (!(flags & OPT_FORCE)) {
+ struct child_process cp_rm = CHILD_PROCESS_INIT;
+ cp_rm.git_cmd = 1;
+ argv_array_pushl(&cp_rm.args, "rm", "-qn",
+ path, NULL);
+
+ if (run_command(&cp_rm))
+ die(_("Submodule work tree '%s' contains local "
+ "modifications; use '-f' to discard them"),
+ displaypath);
+ }
+
+ strbuf_addstr(&sb_rm, path);
+
+ if (!remove_dir_recursively(&sb_rm, 0))
+ format = _("Cleared directory '%s'\n");
+ else
+ format = _("Could not remove submodule work tree '%s'\n");
+
+ if (!(flags & OPT_QUIET))
+ printf(format, displaypath);
+
+ strbuf_release(&sb_rm);
+ }
+
+ if (mkdir(path, 0777))
+ printf(_("could not create empty submodule directory %s"),
+ displaypath);
+
+ cp_config.git_cmd = 1;
+ argv_array_pushl(&cp_config.args, "config", "--get-regexp", NULL);
+ argv_array_pushf(&cp_config.args, "submodule.%s\\.", sub->name);
+
+ /* remove the .git/config entries (unless the user already did it) */
+ if (!capture_command(&cp_config, &sb_config, 0) && sb_config.len) {
+ char *sub_key = xstrfmt("submodule.%s", sub->name);
+ /*
+ * remove the whole section so we have a clean state when
+ * the user later decides to init this submodule again
+ */
+ git_config_rename_section_in_file(NULL, sub_key, NULL);
+ if (!(flags & OPT_QUIET))
+ printf(_("Submodule '%s' (%s) unregistered for path '%s'\n"),
+ sub->name, sub->url, displaypath);
+ free(sub_key);
+ }
+
+cleanup:
+ free(displaypath);
+ free(sub_git_dir);
+ strbuf_release(&sb_config);
+}
+
+static void deinit_submodule_cb(const struct cache_entry *list_item,
+ void *cb_data)
+{
+ struct deinit_cb *info = cb_data;
+ deinit_submodule(list_item->name, info->prefix, info->flags);
+}
+
+static int module_deinit(int argc, const char **argv, const char *prefix)
+{
+ struct deinit_cb info = DEINIT_CB_INIT;
+ struct pathspec pathspec;
+ struct module_list list = MODULE_LIST_INIT;
+ int quiet = 0;
+ int force = 0;
+ int all = 0;
+
+ struct option module_deinit_options[] = {
+ OPT__QUIET(&quiet, N_("Suppress submodule status output")),
+ OPT__FORCE(&force, N_("Remove submodule working trees even if they contain local changes")),
+ OPT_BOOL(0, "all", &all, N_("Unregister all submodules")),
+ OPT_END()
+ };
+
+ const char *const git_submodule_helper_usage[] = {
+ N_("git submodule deinit [--quiet] [-f | --force] [--all | [--] [<path>...]]"),
+ NULL
+ };
+
+ argc = parse_options(argc, argv, prefix, module_deinit_options,
+ git_submodule_helper_usage, 0);
+
+ if (all && argc) {
+ error("pathspec and --all are incompatible");
+ usage_with_options(git_submodule_helper_usage,
+ module_deinit_options);
+ }
+
+ if (!argc && !all)
+ die(_("Use '--all' if you really want to deinitialize all submodules"));
+
+ if (module_list_compute(argc, argv, prefix, &pathspec, &list) < 0)
+ BUG("module_list_compute should not choke on empty pathspec");
+
+ info.prefix = prefix;
+ if (quiet)
+ info.flags |= OPT_QUIET;
+ if (force)
+ info.flags |= OPT_FORCE;
+
+ for_each_listed_submodule(&list, deinit_submodule_cb, &info);
+
+ return 0;
+}
+
static int clone_submodule(const char *path, const char *gitdir, const char *url,
const char *depth, struct string_list *reference,
int quiet, int progress)
{"resolve-relative-url-test", resolve_relative_url_test, 0},
{"init", module_init, SUPPORT_SUPER_PREFIX},
{"status", module_status, SUPPORT_SUPER_PREFIX},
+ {"print-default-remote", print_default_remote, 0},
+ {"sync", module_sync, SUPPORT_SUPER_PREFIX},
+ {"deinit", module_deinit, 0},
{"remote-branch", resolve_remote_submodule_branch, 0},
{"push-check", push_check, 0},
{"absorb-git-dirs", absorb_git_dirs, SUPPORT_SUPER_PREFIX},
{
if (sign && do_sign(buf) < 0)
return error(_("unable to sign the tag"));
- if (write_sha1_file(buf->buf, buf->len, tag_type, result->hash) < 0)
+ if (write_object_file(buf->buf, buf->len, tag_type, result) < 0)
return error(_("unable to write tag file"));
return 0;
}
struct create_tag_options {
unsigned int message_given:1;
+ unsigned int use_editor:1;
unsigned int sign;
enum {
CLEANUP_NONE,
tag,
git_committer_info(IDENT_STRICT));
- if (!opt->message_given) {
+ if (!opt->message_given || opt->use_editor) {
int fd;
/* write the template message before editing: */
if (fd < 0)
die_errno(_("could not create file '%s'"), path);
- if (!is_null_oid(prev)) {
+ if (opt->message_given) {
+ write_or_die(fd, buf->buf, buf->len);
+ strbuf_reset(buf);
+ } else if (!is_null_oid(prev)) {
write_tag_body(fd, prev);
} else {
struct strbuf buf = STRBUF_INIT;
static struct ref_sorting *sorting = NULL, **sorting_tail = &sorting;
struct ref_format format = REF_FORMAT_INIT;
int icase = 0;
+ int edit_flag = 0;
struct option options[] = {
OPT_CMDMODE('l', "list", &cmdmode, N_("list tag names"), 'l'),
{ OPTION_INTEGER, 'n', NULL, &filter.lines, N_("n"),
OPT_CALLBACK('m', "message", &msg, N_("message"),
N_("tag message"), parse_msg_arg),
OPT_FILENAME('F', "file", &msgfile, N_("read message from file")),
+ OPT_BOOL('e', "edit", &edit_flag, N_("force edit of tag message")),
OPT_BOOL('s', "sign", &opt.sign, N_("annotated and GPG-signed tag")),
OPT_STRING(0, "cleanup", &cleanup_arg, N_("mode"),
N_("how to strip spaces and #comments from message")),
die(_("tag '%s' already exists"), tag);
opt.message_given = msg.given || msgfile;
+ opt.use_editor = edit_flag;
if (!cleanup_arg || !strcmp(cleanup_arg, "strip"))
opt.cleanup_mode = CLEANUP_ALL;
static unsigned int offset, len;
static off_t consumed_bytes;
static off_t max_input_size;
-static git_SHA_CTX ctx;
+static git_hash_ctx ctx;
static struct fsck_options fsck_options = FSCK_OPTIONS_STRICT;
/*
if (min > sizeof(buffer))
die("cannot fill %d bytes", min);
if (offset) {
- git_SHA1_Update(&ctx, buffer, offset);
+ the_hash_algo->update_fn(&ctx, buffer, offset);
memmove(buffer, buffer + offset, len);
offset = 0;
}
{
struct object_id oid;
- if (write_sha1_file(obj_buf->buffer, obj_buf->size, typename(obj->type), oid.hash) < 0)
+ if (write_object_file(obj_buf->buffer, obj_buf->size,
+ typename(obj->type), &oid) < 0)
die("failed to write object %s", oid_to_hex(&obj->oid));
obj->flags |= FLAG_WRITTEN;
}
void *buf, unsigned long size)
{
if (!strict) {
- if (write_sha1_file(buf, size, typename(type), obj_list[nr].oid.hash) < 0)
+ if (write_object_file(buf, size, typename(type),
+ &obj_list[nr].oid) < 0)
die("failed to write object");
added_object(nr, type, buf, size);
free(buf);
obj_list[nr].obj = NULL;
} else if (type == OBJ_BLOB) {
struct blob *blob;
- if (write_sha1_file(buf, size, typename(type), obj_list[nr].oid.hash) < 0)
+ if (write_object_file(buf, size, typename(type),
+ &obj_list[nr].oid) < 0)
die("failed to write object");
added_object(nr, type, buf, size);
free(buf);
} else {
struct object *obj;
int eaten;
- hash_sha1_file(buf, size, typename(type), obj_list[nr].oid.hash);
+ hash_object_file(buf, size, typename(type), &obj_list[nr].oid);
added_object(nr, type, buf, size);
obj = parse_object_buffer(&obj_list[nr].oid, type, size, buf,
&eaten);
struct object_id base_oid;
if (type == OBJ_REF_DELTA) {
- hashcpy(base_oid.hash, fill(GIT_SHA1_RAWSZ));
- use(GIT_SHA1_RAWSZ);
+ hashcpy(base_oid.hash, fill(the_hash_algo->rawsz));
+ use(the_hash_algo->rawsz);
delta_data = get_data(delta_size);
if (dry_run || !delta_data) {
free(delta_data);
/* We don't take any non-flag arguments now.. Maybe some day */
usage(unpack_usage);
}
- git_SHA1_Init(&ctx);
+ the_hash_algo->init_fn(&ctx);
unpack_all();
- git_SHA1_Update(&ctx, buffer, offset);
- git_SHA1_Final(oid.hash, &ctx);
+ the_hash_algo->update_fn(&ctx, buffer, offset);
+ the_hash_algo->final_fn(oid.hash, &ctx);
if (strict)
write_rest();
- if (hashcmp(fill(GIT_SHA1_RAWSZ), oid.hash))
+ if (hashcmp(fill(the_hash_algo->rawsz), oid.hash))
die("final sha1 did not match");
- use(GIT_SHA1_RAWSZ);
+ use(the_hash_algo->rawsz);
/* Write the last part of the buffer to stdout */
while (len) {
#include "worktree.h"
static const char * const worktree_usage[] = {
- N_("git worktree add [<options>] <path> [<branch>]"),
+ N_("git worktree add [<options>] <path> [<commit-ish>]"),
N_("git worktree list [<options>]"),
N_("git worktree lock [<options>] <path>"),
N_("git worktree prune [<options>]"),
* Hook failure does not warrant worktree deletion, so run hook after
* is_junk is cleared, but do return appropriate code when hook fails.
*/
- if (!ret && opts->checkout)
- ret = run_hook_le(NULL, "post-checkout", oid_to_hex(&null_oid),
- oid_to_hex(&commit->object.oid), "1", NULL);
+ if (!ret && opts->checkout) {
+ const char *hook = find_hook("post-checkout");
+ if (hook) {
+ const char *env[] = { "GIT_DIR", "GIT_WORK_TREE", NULL };
+ cp.git_cmd = 0;
+ cp.no_stdin = 1;
+ cp.stdout_to_stderr = 1;
+ cp.dir = path;
+ cp.env = env;
+ cp.argv = NULL;
+ argv_array_pushl(&cp.args, absolute_path(hook),
+ oid_to_hex(&null_oid),
+ oid_to_hex(&commit->object.oid),
+ "1", NULL);
+ ret = run_command(&cp);
+ }
+ }
argv_array_clear(&child_env);
strbuf_release(&sb);
unsigned plugged:1;
char *pack_tmp_name;
- struct sha1file *f;
+ struct hashfile *f;
off_t offset;
struct pack_idx_option pack_idx_opts;
unlink(state->pack_tmp_name);
goto clear_exit;
} else if (state->nr_written == 1) {
- sha1close(state->f, oid.hash, CSUM_FSYNC);
+ hashclose(state->f, oid.hash, CSUM_FSYNC);
} else {
- int fd = sha1close(state->f, oid.hash, 0);
+ int fd = hashclose(state->f, oid.hash, 0);
fixup_pack_header_footer(fd, oid.hash, state->pack_tmp_name,
state->nr_written, oid.hash,
state->offset);
* with a new pack.
*/
static int stream_to_pack(struct bulk_checkin_state *state,
- git_SHA_CTX *ctx, off_t *already_hashed_to,
+ git_hash_ctx *ctx, off_t *already_hashed_to,
int fd, size_t size, enum object_type type,
const char *path, unsigned flags)
{
if (rsize < hsize)
hsize = rsize;
if (hsize)
- git_SHA1_Update(ctx, ibuf, hsize);
+ the_hash_algo->update_fn(ctx, ibuf, hsize);
*already_hashed_to = offset;
}
s.next_in = ibuf;
return -1;
}
- sha1write(state->f, obuf, written);
+ hashwrite(state->f, obuf, written);
state->offset += written;
}
s.next_out = obuf;
unsigned flags)
{
off_t seekback, already_hashed_to;
- git_SHA_CTX ctx;
+ git_hash_ctx ctx;
unsigned char obuf[16384];
unsigned header_len;
- struct sha1file_checkpoint checkpoint;
+ struct hashfile_checkpoint checkpoint;
struct pack_idx_entry *idx = NULL;
seekback = lseek(fd, 0, SEEK_CUR);
header_len = xsnprintf((char *)obuf, sizeof(obuf), "%s %" PRIuMAX,
typename(type), (uintmax_t)size) + 1;
- git_SHA1_Init(&ctx);
- git_SHA1_Update(&ctx, obuf, header_len);
+ the_hash_algo->init_fn(&ctx);
+ the_hash_algo->update_fn(&ctx, obuf, header_len);
/* Note: idx is non-NULL when we are writing */
if ((flags & HASH_WRITE_OBJECT) != 0)
while (1) {
prepare_to_stream(state, flags);
if (idx) {
- sha1file_checkpoint(state->f, &checkpoint);
+ hashfile_checkpoint(state->f, &checkpoint);
idx->offset = state->offset;
crc32_begin(state->f);
}
*/
if (!idx)
die("BUG: should not happen");
- sha1file_truncate(state->f, &checkpoint);
+ hashfile_truncate(state->f, &checkpoint);
state->offset = checkpoint.offset;
finish_bulk_checkin(state);
if (lseek(fd, seekback, SEEK_SET) == (off_t) -1)
return error("cannot seek back");
}
- git_SHA1_Final(result_sha1, &ctx);
+ the_hash_algo->final_fn(result_sha1, &ctx);
if (!idx)
return 0;
idx->crc32 = crc32_end(state->f);
if (already_written(state, result_sha1)) {
- sha1file_truncate(state->f, &checkpoint);
+ hashfile_truncate(state->f, &checkpoint);
state->offset = checkpoint.offset;
free(idx);
} else {
down->namelen = pathlen;
if (pos < it->subtree_nr)
- memmove(it->down + pos + 1,
- it->down + pos,
- sizeof(down) * (it->subtree_nr - pos - 1));
+ MOVE_ARRAY(it->down + pos + 1, it->down + pos,
+ it->subtree_nr - pos - 1);
it->down[pos] = down;
return down;
}
}
if (repair) {
- unsigned char sha1[20];
- hash_sha1_file(buffer.buf, buffer.len, tree_type, sha1);
- if (has_sha1_file(sha1))
- hashcpy(it->oid.hash, sha1);
+ struct object_id oid;
+ hash_object_file(buffer.buf, buffer.len, tree_type, &oid);
+ if (has_sha1_file(oid.hash))
+ oidcpy(&it->oid, &oid);
else
to_invalidate = 1;
- } else if (dryrun)
- hash_sha1_file(buffer.buf, buffer.len, tree_type,
- it->oid.hash);
- else if (write_sha1_file(buffer.buf, buffer.len, tree_type, it->oid.hash)) {
+ } else if (dryrun) {
+ hash_object_file(buffer.buf, buffer.len, tree_type, &it->oid);
+ } else if (write_object_file(buffer.buf, buffer.len, tree_type,
+ &it->oid)) {
strbuf_release(&buffer);
return -1;
}
hold_lock_file_for_update(&lock_file, index_path, LOCK_DIE_ON_ERROR);
- entries = read_index_from(index_state, index_path);
+ entries = read_index_from(index_state, index_path, get_git_dir());
if (entries < 0) {
ret = WRITE_TREE_UNREADABLE_INDEX;
goto out;
#include "git-compat-util.h"
#include "strbuf.h"
#include "hashmap.h"
-#include "mru.h"
+#include "list.h"
#include "advice.h"
#include "gettext.h"
#include "convert.h"
#include "sha1-array.h"
#include "repository.h"
-#ifndef platform_SHA_CTX
-/*
- * platform's underlying implementation of SHA-1; could be OpenSSL,
- * blk_SHA, Apple CommonCrypto, etc... Note that including
- * SHA1_HEADER may have already defined platform_SHA_CTX for our
- * own implementations like block-sha1 and ppc-sha1, so we list
- * the default for OpenSSL compatible SHA-1 implementations here.
- */
-#define platform_SHA_CTX SHA_CTX
-#define platform_SHA1_Init SHA1_Init
-#define platform_SHA1_Update SHA1_Update
-#define platform_SHA1_Final SHA1_Final
-#endif
-
-#define git_SHA_CTX platform_SHA_CTX
-#define git_SHA1_Init platform_SHA1_Init
-#define git_SHA1_Update platform_SHA1_Update
-#define git_SHA1_Final platform_SHA1_Final
-
-#ifdef SHA1_MAX_BLOCK_SIZE
-#include "compat/sha1-chunked.h"
-#undef git_SHA1_Update
-#define git_SHA1_Update git_SHA1_Update_Chunked
-#endif
-
#include <zlib.h>
typedef struct git_zstream {
z_stream z;
struct split_index *split_index;
struct cache_time timestamp;
unsigned name_hash_initialized : 1,
- initialized : 1;
+ initialized : 1,
+ drop_cache_tree : 1;
struct hashmap name_hash;
struct hashmap dir_hash;
unsigned char sha1[20];
#define active_cache_tree (the_index.cache_tree)
#define read_cache() read_index(&the_index)
-#define read_cache_from(path) read_index_from(&the_index, (path))
+#define read_cache_from(path) read_index_from(&the_index, (path), (get_git_dir()))
#define read_cache_preload(pathspec) read_index_preload(&the_index, (pathspec))
#define is_cache_unborn() is_index_unborn(&the_index)
#define read_cache_unmerged() read_index_unmerged(&the_index)
extern int read_index_preload(struct index_state *, const struct pathspec *pathspec);
extern int do_read_index(struct index_state *istate, const char *path,
int must_exist); /* for testting only! */
-extern int read_index_from(struct index_state *, const char *path);
+extern int read_index_from(struct index_state *, const char *path,
+ const char *gitdir);
extern int is_index_unborn(struct index_state *);
extern int read_index_unmerged(struct index_state *);
#define GIT_REPO_VERSION 0
#define GIT_REPO_VERSION_READ 1
extern int repository_format_precious_objects;
+extern char *repository_format_partial_clone;
+extern const char *core_partial_clone_filter_default;
struct repository_format {
int version;
int precious_objects;
+ char *partial_clone; /* value of extensions.partialclone */
int is_bare;
int hash_algo;
char *work_tree;
#define TYPE_CHANGED 0x0040
/*
- * Return the name of the file in the local object database that would
- * be used to store a loose object with the specified sha1. The
- * return value is a pointer to a statically allocated buffer that is
- * overwritten each time the function is called.
+ * Put in `buf` the name of the file in the local object database that
+ * would be used to store a loose object with the specified sha1.
*/
-extern const char *sha1_file_name(const unsigned char *sha1);
+extern void sha1_file_name(struct strbuf *buf, const unsigned char *sha1);
/*
* Return an abbreviated sha1 unique within this repository's object database.
static inline void oidclr(struct object_id *oid)
{
- hashclr(oid->hash);
+ memset(oid->hash, 0, GIT_MAX_RAWSZ);
}
"\xe6\x9d\xe2\x9b\xb2\xd1\xd6\x43\x4b\x8b" \
"\x29\xae\x77\x5a\xd8\xc2\xe4\x8c\x53\x91"
extern const struct object_id empty_blob_oid;
-#define EMPTY_BLOB_SHA1_BIN (empty_blob_oid.hash)
-
static inline int is_empty_blob_sha1(const unsigned char *sha1)
{
/* Read and unpack a sha1 file into memory, write memory to a sha1 file */
extern int sha1_object_info(const unsigned char *, unsigned long *);
-extern int hash_sha1_file(const void *buf, unsigned long len, const char *type, unsigned char *sha1);
-extern int write_sha1_file(const void *buf, unsigned long len, const char *type, unsigned char *return_sha1);
-extern int hash_sha1_file_literally(const void *buf, unsigned long len, const char *type, struct object_id *oid, unsigned flags);
-extern int pretend_sha1_file(void *, unsigned long, enum object_type, unsigned char *);
-extern int force_object_loose(const unsigned char *sha1, time_t mtime);
+
+extern int hash_object_file(const void *buf, unsigned long len,
+ const char *type, struct object_id *oid);
+
+extern int write_object_file(const void *buf, unsigned long len,
+ const char *type, struct object_id *oid);
+
+extern int hash_object_file_literally(const void *buf, unsigned long len,
+ const char *type, struct object_id *oid,
+ unsigned flags);
+
+extern int pretend_object_file(void *, unsigned long, enum object_type,
+ struct object_id *oid);
+
+extern int force_object_loose(const struct object_id *oid, time_t mtime);
+
extern int git_open_cloexec(const char *name, int flags);
#define git_open(name) git_open_cloexec(name, O_RDONLY)
extern void *map_sha1_file(const unsigned char *sha1, unsigned long *size);
extern struct packed_git {
struct packed_git *next;
+ struct list_head mru;
struct pack_window *windows;
off_t pack_size;
const void *index_data;
unsigned pack_local:1,
pack_keep:1,
freshened:1,
- do_not_close:1;
+ do_not_close:1,
+ pack_promisor:1;
unsigned char sha1[20];
struct revindex_entry *revindex;
/* something like ".git/objects/pack/xxxxx.pack" */
} *packed_git;
/*
- * A most-recently-used ordered version of the packed_git list, which can
- * be iterated instead of packed_git (and marked via mru_mark).
+ * A most-recently-used ordered version of the packed_git list.
*/
-extern struct mru packed_git_mru;
+extern struct list_head packed_git_mru;
struct pack_entry {
off_t offset;
#define OBJECT_INFO_QUICK 8
extern int sha1_object_info_extended(const unsigned char *, struct object_info *, unsigned flags);
+/*
+ * Set this to 0 to prevent sha1_object_info_extended() from fetching missing
+ * blobs. This has a difference only if extensions.partialClone is set.
+ *
+ * Its default value is 1.
+ */
+extern int fetch_if_missing;
+
/* Dumb servers support */
extern int update_server_info(int);
fi
}
-good_trees_file="$HOME/travis-cache/good-trees"
-
# Save some info about the current commit's tree, so we can skip the build
# job if we encounter the same tree again and can provide a useful info
# message.
# and installing dependencies.
set -ex
-mkdir -p "$HOME/travis-cache"
+cache_dir="$HOME/travis-cache"
+good_trees_file="$cache_dir/good-trees"
+
+mkdir -p "$cache_dir"
skip_branch_tip_with_tag
skip_good_tree
# Build and test Git in a 32-bit environment
#
# Usage:
-# run-linux32-build.sh [host-user-id]
+# run-linux32-build.sh <host-user-id>
#
-set -x
+set -ex
+
+if test $# -ne 1 || test -z "$1"
+then
+ echo >&2 "usage: run-linux32-build.sh <host-user-id>"
+ exit 1
+fi
# Update packages to the latest available versions
linux32 --32bit i386 sh -c '
apt update >/dev/null &&
apt install -y build-essential libcurl4-openssl-dev libssl-dev \
libexpat-dev gettext python >/dev/null
-' &&
+'
# If this script runs inside a docker container, then all commands are
# usually executed as root. Consequently, the host user might not be
# able to access the test output files.
-# If a host user id is given, then create a user "ci" with the host user
-# id to make everything accessible to the host user.
-HOST_UID=$1 &&
-CI_USER=$USER &&
-test -z $HOST_UID || (CI_USER="ci" && useradd -u $HOST_UID $CI_USER) &&
+# If a non 0 host user id is given, then create a user "ci" with that
+# user id to make everything accessible to the host user.
+HOST_UID=$1
+if test $HOST_UID -eq 0
+then
+ # Just in case someone does want to run the test suite as root.
+ CI_USER=root
+else
+ CI_USER=ci
+ if test "$(id -u $CI_USER 2>/dev/null)" = $HOST_UID
+ then
+ echo "user '$CI_USER' already exists with the requested ID $HOST_UID"
+ else
+ useradd -u $HOST_UID $CI_USER
+ fi
+
+ # Due to a bug the test suite was run as root in the past, so
+ # a prove state file created back then is only accessible by
+ # root. Now that bug is fixed, the test suite is run as a
+ # regular user, but the prove state file coming from Travis
+ # CI's cache might still be owned by root.
+ # Make sure that this user has rights to any cached files,
+ # including an existing prove state file.
+ test -n "$cache_dir" && chown -R $HOST_UID:$HOST_UID "$cache_dir"
+fi
# Build and test
linux32 --32bit i386 su -m -l $CI_USER -c '
- cd /usr/src/git &&
- ln -s /tmp/travis-cache/.prove t/.prove &&
- make --jobs=2 &&
- make --quiet test
+ set -ex
+ cd /usr/src/git
+ test -n "$cache_dir" && ln -s "$cache_dir/.prove" t/.prove
+ make --jobs=2
+ make --quiet test
'
# Use the following command to debug the docker build locally:
# $ docker run -itv "${PWD}:/usr/src/git" --entrypoint /bin/bash daald/ubuntu32:xenial
-# root@container:/# /usr/src/git/ci/run-linux32-build.sh
+# root@container:/# /usr/src/git/ci/run-linux32-build.sh <host-user-id>
+
+container_cache_dir=/tmp/travis-cache
docker run \
--interactive \
--env GIT_PROVE_OPTS \
--env GIT_TEST_OPTS \
--env GIT_TEST_CLONE_2GB \
+ --env cache_dir="$container_cache_dir" \
--volume "${PWD}:/usr/src/git" \
- --volume "${HOME}/travis-cache:/tmp/travis-cache" \
+ --volume "$cache_dir:$container_cache_dir" \
daald/ubuntu32:xenial \
/usr/src/git/ci/run-linux32-build.sh $(id -u $USER)
. ${0%/*}/lib-travisci.sh
-ln -s $HOME/travis-cache/.prove t/.prove
+ln -s "$cache_dir/.prove" t/.prove
+
make --quiet test
+if test "$jobname" = "linux-gcc"
+then
+ GIT_TEST_SPLIT_INDEX=YesPlease make --quiet test
+fi
check_unignored_build_artifacts
return color_parse_mem(value, strlen(value), dst);
}
-void color_set(char *dst, const char *color_bytes)
-{
- xsnprintf(dst, COLOR_MAXLEN, "%s", color_bytes);
-}
-
/*
* Write the ANSI color codes for "c" to "out"; the string should
* already have the ANSI escape code in it. "out" should have enough
return r;
}
-
-
int color_fprintf(FILE *fp, const char *color, const char *fmt, ...)
{
va_list args;
int git_color_default_config(const char *var, const char *value, void *cb);
/*
- * Set the color buffer (which must be COLOR_MAXLEN bytes)
- * to the raw color bytes; this is useful for initializing
- * default color variables.
+ * Parse a config option, which can be a boolean or one of
+ * "never", "auto", "always". Return a constant of
+ * GIT_COLOR_NEVER for "never" or negative boolean,
+ * GIT_COLOR_ALWAYS for "always" or a positive boolean,
+ * and GIT_COLOR_AUTO for "auto".
*/
-void color_set(char *dst, const char *color_bytes);
-
int git_config_colorbool(const char *var, const char *value);
+
+/*
+ * Return a boolean whether to use color, where the argument 'var' is
+ * one of GIT_COLOR_UNKNOWN, GIT_COLOR_NEVER, GIT_COLOR_ALWAYS, GIT_COLOR_AUTO.
+ */
int want_color(int var);
+
+/*
+ * Translate a Git color from 'value' into a string that the terminal can
+ * interpret and store it into 'dst'. The Git color values are of the form
+ * "foreground [background] [attr]" where fore- and background can be a color
+ * name ("red"), a RGB code (#0xFF0000) or a 256-color-mode from the terminal.
+ */
int color_parse(const char *value, char *dst);
int color_parse_mem(const char *value, int len, char *dst);
+
+/*
+ * Output the formatted string in the specified color (and then reset to normal
+ * color so subsequent output is uncolored). Omits the color encapsulation if
+ * `color` is NULL. The `color_fprintf_ln` prints a new line after resetting
+ * the color. The `color_print_strbuf` prints the contents of the given
+ * strbuf (BUG: but only up to its first NUL character).
+ */
__attribute__((format (printf, 3, 4)))
int color_fprintf(FILE *fp, const char *color, const char *fmt, ...);
__attribute__((format (printf, 3, 4)))
int color_fprintf_ln(FILE *fp, const char *color, const char *fmt, ...);
void color_print_strbuf(FILE *fp, const char *color, const struct strbuf *sb);
+/*
+ * Check if the given color is GIT_COLOR_NIL that means "no color selected".
+ * The caller needs to replace the color with the actual desired color.
+ */
int color_is_nil(const char *color);
#endif /* COLOR_H */
if (is_file) {
struct strbuf buf = STRBUF_INIT;
- if (convert_to_git(&the_index, elem->path, result, len, &buf, safe_crlf)) {
+ if (convert_to_git(&the_index, elem->path, result, len, &buf, global_conv_flags_eol)) {
free(result);
result = strbuf_detach(&buf, &len);
result_size = len;
ALLOC_GROW(commit_graft, commit_graft_nr + 1, commit_graft_alloc);
commit_graft_nr++;
if (pos < commit_graft_nr)
- memmove(commit_graft + pos + 1,
- commit_graft + pos,
- (commit_graft_nr - pos - 1) *
- sizeof(*commit_graft));
+ MOVE_ARRAY(commit_graft + pos + 1, commit_graft + pos,
+ commit_graft_nr - pos - 1);
commit_graft[pos] = graft;
return 0;
}
}
}
-int commit_tree(const char *msg, size_t msg_len,
- const unsigned char *tree,
- struct commit_list *parents, unsigned char *ret,
+int commit_tree(const char *msg, size_t msg_len, const struct object_id *tree,
+ struct commit_list *parents, struct object_id *ret,
const char *author, const char *sign_commit)
{
struct commit_extra_header *extra = NULL, **tail = &extra;
"variable i18n.commitencoding to the encoding your project uses.\n");
int commit_tree_extended(const char *msg, size_t msg_len,
- const unsigned char *tree,
- struct commit_list *parents, unsigned char *ret,
+ const struct object_id *tree,
+ struct commit_list *parents, struct object_id *ret,
const char *author, const char *sign_commit,
struct commit_extra_header *extra)
{
int encoding_is_utf8;
struct strbuf buffer;
- assert_sha1_type(tree, OBJ_TREE);
+ assert_sha1_type(tree->hash, OBJ_TREE);
if (memchr(msg, '\0', msg_len))
return error("a NUL byte in commit log message not allowed.");
encoding_is_utf8 = is_encoding_utf8(git_commit_encoding);
strbuf_init(&buffer, 8192); /* should avoid reallocs for the headers */
- strbuf_addf(&buffer, "tree %s\n", sha1_to_hex(tree));
+ strbuf_addf(&buffer, "tree %s\n", oid_to_hex(tree));
/*
* NOTE! This ordering means that the same exact tree merged with a
goto out;
}
- result = write_sha1_file(buffer.buf, buffer.len, commit_type, ret);
+ result = write_object_file(buffer.buf, buffer.len, commit_type, ret);
out:
strbuf_release(&buffer);
return result;
struct commit_extra_header ***tail);
extern int commit_tree(const char *msg, size_t msg_len,
- const unsigned char *tree,
- struct commit_list *parents, unsigned char *ret,
+ const struct object_id *tree,
+ struct commit_list *parents, struct object_id *ret,
const char *author, const char *sign_commit);
extern int commit_tree_extended(const char *msg, size_t msg_len,
- const unsigned char *tree,
- struct commit_list *parents, unsigned char *ret,
- const char *author, const char *sign_commit,
+ const struct object_id *tree,
+ struct commit_list *parents,
+ struct object_id *ret, const char *author,
+ const char *sign_commit,
struct commit_extra_header *);
extern struct commit_extra_header *read_commit_extra_headers(struct commit *, const char **);
}
if (!strcmp(var, "core.safecrlf")) {
+ int eol_rndtrp_die;
if (value && !strcasecmp(value, "warn")) {
- safe_crlf = SAFE_CRLF_WARN;
+ global_conv_flags_eol = CONV_EOL_RNDTRP_WARN;
return 0;
}
- safe_crlf = git_config_bool(var, value);
+ eol_rndtrp_die = git_config_bool(var, value);
+ global_conv_flags_eol = eol_rndtrp_die ?
+ CONV_EOL_RNDTRP_DIE : CONV_EOL_RNDTRP_WARN;
return 0;
}
return 0;
}
+ if (!strcmp(var, "core.partialclonefilter")) {
+ return git_config_string(&core_partial_clone_filter_default,
+ var, value);
+ }
+
/* Add other config variables here and to Documentation/config.txt. */
return 0;
}
NO_ST_BLOCKS_IN_STRUCT_STAT = YesPlease
X = .exe
UNRELIABLE_FSTAT = UnfortunatelyYes
- SPARSE_FLAGS = -isystem /usr/include/w32api -Wno-one-bit-signed-bitfield
OBJECT_CREATION_USES_RENAMES = UnfortunatelyNeedsTo
MMAP_PREVENTS_DELETE = UnfortunatelyYes
COMPAT_OBJS += compat/cygwin.o
argv_array_push(&rev_list.args,"rev-list");
argv_array_push(&rev_list.args, "--objects");
argv_array_push(&rev_list.args, "--stdin");
+ if (repository_format_partial_clone)
+ argv_array_push(&rev_list.args, "--exclude-promisor-objects");
argv_array_push(&rev_list.args, "--not");
argv_array_push(&rev_list.args, "--all");
argv_array_push(&rev_list.args, "--quiet");
@ strbuf_addf_with_format_only @
expression E;
-constant fmt;
-@@
- strbuf_addf(E,
-(
- fmt
-|
- _(fmt)
-)
- );
-
-@ script:python @
-fmt << strbuf_addf_with_format_only.fmt;
-@@
-cocci.include_match("%" not in fmt)
-
-@ extends strbuf_addf_with_format_only @
+constant fmt !~ "%";
@@
- strbuf_addf
+ strbuf_addstr
@@
expression E1, E2;
+format F =~ "s";
@@
-- strbuf_addf(E1, "%s", E2);
+- strbuf_addf(E1, "%@F@", E2);
+ strbuf_addstr(E1, E2);
@@
track=""
;;
*)
- for i in HEAD FETCH_HEAD ORIG_HEAD MERGE_HEAD; do
+ for i in HEAD FETCH_HEAD ORIG_HEAD MERGE_HEAD REBASE_HEAD; do
case "$i" in
$match*)
if [ -e "$dir/$i" ]; then
__git_list_merge_strategies ()
{
- git merge -s help 2>&1 |
+ LANG=C LC_ALL=C git merge -s help 2>&1 |
sed -n -e '/[Aa]vailable strategies are: /,/^$/{
s/\.$//
s/.*://
{
__git_find_repo_path
if [ -d "$__git_repo_path"/rebase-apply ]; then
- __gitcomp "--skip --continue --resolved --abort"
+ __gitcomp "--skip --continue --resolved --abort --quit --show-current-patch"
return
fi
case "$cur" in
__git_fetch_options="
--quiet --verbose --append --upload-pack --force --keep --depth=
--tags --no-tags --all --prune --dry-run --recurse-submodules=
- --unshallow --update-shallow
+ --unshallow --update-shallow --prune-tags
"
_git_fetch ()
{
__git_find_repo_path
if [ -f "$__git_repo_path"/rebase-merge/interactive ]; then
- __gitcomp "--continue --skip --abort --quit --edit-todo"
+ __gitcomp "--continue --skip --abort --quit --edit-todo --show-current-patch"
return
elif [ -d "$__git_repo_path"/rebase-apply ] || \
[ -d "$__git_repo_path"/rebase-merge ]; then
- __gitcomp "--continue --skip --abort --quit"
+ __gitcomp "--continue --skip --abort --quit --show-current-patch"
return
fi
__git_complete_strategy && return
use 5.008;
use strict;
use warnings;
-use Error qw(:try);
+use Git::Error qw(:try);
use File::Basename qw(dirname);
use File::Copy;
use File::Find;
Subtrees are not to be confused with submodules, which are meant for
the same task. Unlike submodules, subtrees do not need any special
-constructions (like .gitmodule files or gitlinks) be present in
+constructions (like .gitmodules files or gitlinks) be present in
your repository, and do not force end-users of your
repository to do anything special or to understand how subtrees
work. A subtree is just a subdirectory that can be
return core_eol;
}
-static void check_safe_crlf(const char *path, enum crlf_action crlf_action,
+static void check_global_conv_flags_eol(const char *path, enum crlf_action crlf_action,
struct text_stat *old_stats, struct text_stat *new_stats,
- enum safe_crlf checksafe)
+ int conv_flags)
{
if (old_stats->crlf && !new_stats->crlf ) {
/*
* CRLFs would not be restored by checkout
*/
- if (checksafe == SAFE_CRLF_WARN)
+ if (conv_flags & CONV_EOL_RNDTRP_DIE)
+ die(_("CRLF would be replaced by LF in %s."), path);
+ else if (conv_flags & CONV_EOL_RNDTRP_WARN)
warning(_("CRLF will be replaced by LF in %s.\n"
"The file will have its original line"
" endings in your working directory."), path);
- else /* i.e. SAFE_CRLF_FAIL */
- die(_("CRLF would be replaced by LF in %s."), path);
} else if (old_stats->lonelf && !new_stats->lonelf ) {
/*
* CRLFs would be added by checkout
*/
- if (checksafe == SAFE_CRLF_WARN)
+ if (conv_flags & CONV_EOL_RNDTRP_DIE)
+ die(_("LF would be replaced by CRLF in %s"), path);
+ else if (conv_flags & CONV_EOL_RNDTRP_WARN)
warning(_("LF will be replaced by CRLF in %s.\n"
"The file will have its original line"
" endings in your working directory."), path);
- else /* i.e. SAFE_CRLF_FAIL */
- die(_("LF would be replaced by CRLF in %s"), path);
}
}
static int crlf_to_git(const struct index_state *istate,
const char *path, const char *src, size_t len,
struct strbuf *buf,
- enum crlf_action crlf_action, enum safe_crlf checksafe)
+ enum crlf_action crlf_action, int conv_flags)
{
struct text_stat stats;
char *dst;
* unless we want to renormalize in a merge or
* cherry-pick.
*/
- if ((checksafe != SAFE_CRLF_RENORMALIZE) &&
+ if ((!(conv_flags & CONV_EOL_RENORMALIZE)) &&
has_crlf_in_index(istate, path))
convert_crlf_into_lf = 0;
}
- if ((checksafe == SAFE_CRLF_WARN ||
- (checksafe == SAFE_CRLF_FAIL)) && len) {
+ if (((conv_flags & CONV_EOL_RNDTRP_WARN) ||
+ ((conv_flags & CONV_EOL_RNDTRP_DIE) && len))) {
struct text_stat new_stats;
memcpy(&new_stats, &stats, sizeof(new_stats));
/* simulate "git add" */
new_stats.crlf += new_stats.lonelf;
new_stats.lonelf = 0;
}
- check_safe_crlf(path, crlf_action, &stats, &new_stats, checksafe);
+ check_global_conv_flags_eol(path, crlf_action, &stats, &new_stats, conv_flags);
}
if (!convert_crlf_into_lf)
return 0;
static int ident_to_worktree(const char *path, const char *src, size_t len,
struct strbuf *buf, int ident)
{
- unsigned char sha1[20];
+ struct object_id oid;
char *to_free = NULL, *dollar, *spc;
int cnt;
/* are we "faking" in place editing ? */
if (src == buf->buf)
to_free = strbuf_detach(buf, NULL);
- hash_sha1_file(src, len, "blob", sha1);
+ hash_object_file(src, len, "blob", &oid);
strbuf_grow(buf, len + cnt * 43);
for (;;) {
/* step 4: substitute */
strbuf_addstr(buf, "Id: ");
- strbuf_add(buf, sha1_to_hex(sha1), 40);
+ strbuf_addstr(buf, oid_to_hex(&oid));
strbuf_addstr(buf, " $");
}
strbuf_add(buf, src, len);
int convert_to_git(const struct index_state *istate,
const char *path, const char *src, size_t len,
- struct strbuf *dst, enum safe_crlf checksafe)
+ struct strbuf *dst, int conv_flags)
{
int ret = 0;
struct conv_attrs ca;
src = dst->buf;
len = dst->len;
}
- if (checksafe != SAFE_CRLF_KEEP_CRLF) {
- ret |= crlf_to_git(istate, path, src, len, dst, ca.crlf_action, checksafe);
+ if (!(conv_flags & CONV_EOL_KEEP_CRLF)) {
+ ret |= crlf_to_git(istate, path, src, len, dst, ca.crlf_action, conv_flags);
if (ret && dst) {
src = dst->buf;
len = dst->len;
void convert_to_git_filter_fd(const struct index_state *istate,
const char *path, int fd, struct strbuf *dst,
- enum safe_crlf checksafe)
+ int conv_flags)
{
struct conv_attrs ca;
convert_attrs(&ca, path);
if (!apply_filter(path, NULL, 0, fd, dst, ca.drv, CAP_CLEAN, NULL))
die("%s: clean filter '%s' failed", path, ca.drv->name);
- crlf_to_git(istate, path, dst->buf, dst->len, dst, ca.crlf_action, checksafe);
+ crlf_to_git(istate, path, dst->buf, dst->len, dst, ca.crlf_action, conv_flags);
ident_to_git(path, dst->buf, dst->len, dst, ca.ident);
}
src = dst->buf;
len = dst->len;
}
- return ret | convert_to_git(istate, path, src, len, dst, SAFE_CRLF_RENORMALIZE);
+ return ret | convert_to_git(istate, path, src, len, dst, CONV_EOL_RENORMALIZE);
}
/*****************************************************************
struct index_state;
-enum safe_crlf {
- SAFE_CRLF_FALSE = 0,
- SAFE_CRLF_FAIL = 1,
- SAFE_CRLF_WARN = 2,
- SAFE_CRLF_RENORMALIZE = 3,
- SAFE_CRLF_KEEP_CRLF = 4
-};
+#define CONV_EOL_RNDTRP_DIE (1<<0) /* Die if CRLF to LF to CRLF is different */
+#define CONV_EOL_RNDTRP_WARN (1<<1) /* Warn if CRLF to LF to CRLF is different */
+#define CONV_EOL_RENORMALIZE (1<<2) /* Convert CRLF to LF */
+#define CONV_EOL_KEEP_CRLF (1<<3) /* Keep CRLF line endings as is */
-extern enum safe_crlf safe_crlf;
+extern int global_conv_flags_eol;
enum auto_crlf {
AUTO_CRLF_FALSE = 0,
/* returns 1 if *dst was used */
extern int convert_to_git(const struct index_state *istate,
const char *path, const char *src, size_t len,
- struct strbuf *dst, enum safe_crlf checksafe);
+ struct strbuf *dst, int conv_flags);
extern int convert_to_working_tree(const char *path, const char *src,
size_t len, struct strbuf *dst);
extern int async_convert_to_working_tree(const char *path, const char *src,
extern void convert_to_git_filter_fd(const struct index_state *istate,
const char *path, int fd,
struct strbuf *dst,
- enum safe_crlf checksafe);
+ int conv_flags);
extern int would_convert_to_git_filter_fd(const char *path);
/*****************************************************************
#include "progress.h"
#include "csum-file.h"
-static void flush(struct sha1file *f, const void *buf, unsigned int count)
+static void flush(struct hashfile *f, const void *buf, unsigned int count)
{
if (0 <= f->check_fd && count) {
unsigned char check_buffer[8192];
}
}
-void sha1flush(struct sha1file *f)
+void hashflush(struct hashfile *f)
{
unsigned offset = f->offset;
if (offset) {
- git_SHA1_Update(&f->ctx, f->buffer, offset);
+ the_hash_algo->update_fn(&f->ctx, f->buffer, offset);
flush(f, f->buffer, offset);
f->offset = 0;
}
}
-int sha1close(struct sha1file *f, unsigned char *result, unsigned int flags)
+int hashclose(struct hashfile *f, unsigned char *result, unsigned int flags)
{
int fd;
- sha1flush(f);
- git_SHA1_Final(f->buffer, &f->ctx);
+ hashflush(f);
+ the_hash_algo->final_fn(f->buffer, &f->ctx);
if (result)
hashcpy(result, f->buffer);
if (flags & (CSUM_CLOSE | CSUM_FSYNC)) {
/* write checksum and close fd */
- flush(f, f->buffer, 20);
+ flush(f, f->buffer, the_hash_algo->rawsz);
if (flags & CSUM_FSYNC)
fsync_or_die(f->fd, f->name);
if (close(f->fd))
return fd;
}
-void sha1write(struct sha1file *f, const void *buf, unsigned int count)
+void hashwrite(struct hashfile *f, const void *buf, unsigned int count)
{
while (count) {
unsigned offset = f->offset;
buf = (char *) buf + nr;
left -= nr;
if (!left) {
- git_SHA1_Update(&f->ctx, data, offset);
+ the_hash_algo->update_fn(&f->ctx, data, offset);
flush(f, data, offset);
offset = 0;
}
}
}
-struct sha1file *sha1fd(int fd, const char *name)
+struct hashfile *hashfd(int fd, const char *name)
{
- return sha1fd_throughput(fd, name, NULL);
+ return hashfd_throughput(fd, name, NULL);
}
-struct sha1file *sha1fd_check(const char *name)
+struct hashfile *hashfd_check(const char *name)
{
int sink, check;
- struct sha1file *f;
+ struct hashfile *f;
sink = open("/dev/null", O_WRONLY);
if (sink < 0)
check = open(name, O_RDONLY);
if (check < 0)
die_errno("unable to open '%s'", name);
- f = sha1fd(sink, name);
+ f = hashfd(sink, name);
f->check_fd = check;
return f;
}
-struct sha1file *sha1fd_throughput(int fd, const char *name, struct progress *tp)
+struct hashfile *hashfd_throughput(int fd, const char *name, struct progress *tp)
{
- struct sha1file *f = xmalloc(sizeof(*f));
+ struct hashfile *f = xmalloc(sizeof(*f));
f->fd = fd;
f->check_fd = -1;
f->offset = 0;
f->tp = tp;
f->name = name;
f->do_crc = 0;
- git_SHA1_Init(&f->ctx);
+ the_hash_algo->init_fn(&f->ctx);
return f;
}
-void sha1file_checkpoint(struct sha1file *f, struct sha1file_checkpoint *checkpoint)
+void hashfile_checkpoint(struct hashfile *f, struct hashfile_checkpoint *checkpoint)
{
- sha1flush(f);
+ hashflush(f);
checkpoint->offset = f->total;
checkpoint->ctx = f->ctx;
}
-int sha1file_truncate(struct sha1file *f, struct sha1file_checkpoint *checkpoint)
+int hashfile_truncate(struct hashfile *f, struct hashfile_checkpoint *checkpoint)
{
off_t offset = checkpoint->offset;
return -1;
f->total = offset;
f->ctx = checkpoint->ctx;
- f->offset = 0; /* sha1flush() was called in checkpoint */
+ f->offset = 0; /* hashflush() was called in checkpoint */
return 0;
}
-void crc32_begin(struct sha1file *f)
+void crc32_begin(struct hashfile *f)
{
f->crc32 = crc32(0, NULL, 0);
f->do_crc = 1;
}
-uint32_t crc32_end(struct sha1file *f)
+uint32_t crc32_end(struct hashfile *f)
{
f->do_crc = 0;
return f->crc32;
struct progress;
/* A SHA1-protected file */
-struct sha1file {
+struct hashfile {
int fd;
int check_fd;
unsigned int offset;
- git_SHA_CTX ctx;
+ git_hash_ctx ctx;
off_t total;
struct progress *tp;
const char *name;
};
/* Checkpoint */
-struct sha1file_checkpoint {
+struct hashfile_checkpoint {
off_t offset;
- git_SHA_CTX ctx;
+ git_hash_ctx ctx;
};
-extern void sha1file_checkpoint(struct sha1file *, struct sha1file_checkpoint *);
-extern int sha1file_truncate(struct sha1file *, struct sha1file_checkpoint *);
+extern void hashfile_checkpoint(struct hashfile *, struct hashfile_checkpoint *);
+extern int hashfile_truncate(struct hashfile *, struct hashfile_checkpoint *);
-/* sha1close flags */
+/* hashclose flags */
#define CSUM_CLOSE 1
#define CSUM_FSYNC 2
-extern struct sha1file *sha1fd(int fd, const char *name);
-extern struct sha1file *sha1fd_check(const char *name);
-extern struct sha1file *sha1fd_throughput(int fd, const char *name, struct progress *tp);
-extern int sha1close(struct sha1file *, unsigned char *, unsigned int);
-extern void sha1write(struct sha1file *, const void *, unsigned int);
-extern void sha1flush(struct sha1file *f);
-extern void crc32_begin(struct sha1file *);
-extern uint32_t crc32_end(struct sha1file *);
+extern struct hashfile *hashfd(int fd, const char *name);
+extern struct hashfile *hashfd_check(const char *name);
+extern struct hashfile *hashfd_throughput(int fd, const char *name, struct progress *tp);
+extern int hashclose(struct hashfile *, unsigned char *, unsigned int);
+extern void hashwrite(struct hashfile *, const void *, unsigned int);
+extern void hashflush(struct hashfile *f);
+extern void crc32_begin(struct hashfile *);
+extern uint32_t crc32_end(struct hashfile *);
-static inline void sha1write_u8(struct sha1file *f, uint8_t data)
+static inline void hashwrite_u8(struct hashfile *f, uint8_t data)
{
- sha1write(f, &data, sizeof(data));
+ hashwrite(f, &data, sizeof(data));
}
-static inline void sha1write_be32(struct sha1file *f, uint32_t data)
+static inline void hashwrite_be32(struct hashfile *f, uint32_t data)
{
data = htonl(data);
- sha1write(f, &data, sizeof(data));
+ hashwrite(f, &data, sizeof(data));
}
#endif
#define initgroups(x, y) (0) /* nothing */
#endif
-static int log_syslog;
+static enum log_destination {
+ LOG_DESTINATION_UNSET = -1,
+ LOG_DESTINATION_NONE = 0,
+ LOG_DESTINATION_STDERR = 1,
+ LOG_DESTINATION_SYSLOG = 2,
+} log_destination = LOG_DESTINATION_UNSET;
static int verbose;
static int reuseaddr;
static int informative_errors;
" [--access-hook=<path>]\n"
" [--inetd | [--listen=<host_or_ipaddr>] [--port=<n>]\n"
" [--detach] [--user=<user> [--group=<group>]]\n"
+" [--log-destination=(stderr|syslog|none)]\n"
" [<directory>...]";
/* List of acceptable pathname prefixes */
static void logreport(int priority, const char *err, va_list params)
{
- if (log_syslog) {
+ switch (log_destination) {
+ case LOG_DESTINATION_SYSLOG: {
char buf[1024];
vsnprintf(buf, sizeof(buf), err, params);
syslog(priority, "%s", buf);
- } else {
+ break;
+ }
+ case LOG_DESTINATION_STDERR:
/*
* Since stderr is set to buffered mode, the
* logging of different processes will not overlap
vfprintf(stderr, err, params);
fputc('\n', stderr);
fflush(stderr);
+ break;
+ case LOG_DESTINATION_NONE:
+ break;
+ case LOG_DESTINATION_UNSET:
+ BUG("log destination not initialized correctly");
}
}
if (strncasecmp("host=", extra_args, 5) == 0) {
val = extra_args + 5;
vallen = strlen(val) + 1;
+ loginfo("Extended attribute \"host\": %s", val);
if (*val) {
/* Split <host>:<port> at colon. */
char *host;
}
}
- if (git_protocol.len > 0)
+ if (git_protocol.len > 0) {
+ loginfo("Extended attribute \"protocol\": %s", git_protocol.buf);
argv_array_pushf(env, GIT_PROTOCOL_ENVIRONMENT "=%s",
git_protocol.buf);
+ }
strbuf_release(&git_protocol);
}
alarm(0);
len = strlen(line);
- if (pktlen != len)
- loginfo("Extended attributes (%d bytes) exist <%.*s>",
- (int) pktlen - len,
- (int) pktlen - len, line + len + 1);
- if (len && line[len-1] == '\n') {
- line[--len] = 0;
- pktlen--;
- }
+ if (len && line[len-1] == '\n')
+ line[len-1] = 0;
/* parse additional args hidden behind a NUL byte */
if (len != pktlen)
}
if (!strcmp(arg, "--inetd")) {
inetd_mode = 1;
- log_syslog = 1;
continue;
}
if (!strcmp(arg, "--verbose")) {
continue;
}
if (!strcmp(arg, "--syslog")) {
- log_syslog = 1;
+ log_destination = LOG_DESTINATION_SYSLOG;
continue;
}
+ if (skip_prefix(arg, "--log-destination=", &v)) {
+ if (!strcmp(v, "syslog")) {
+ log_destination = LOG_DESTINATION_SYSLOG;
+ continue;
+ } else if (!strcmp(v, "stderr")) {
+ log_destination = LOG_DESTINATION_STDERR;
+ continue;
+ } else if (!strcmp(v, "none")) {
+ log_destination = LOG_DESTINATION_NONE;
+ continue;
+ } else
+ die("unknown log destination '%s'", v);
+ }
if (!strcmp(arg, "--export-all")) {
export_all_trees = 1;
continue;
}
if (!strcmp(arg, "--detach")) {
detach = 1;
- log_syslog = 1;
continue;
}
if (skip_prefix(arg, "--user=", &v)) {
usage(daemon_usage);
}
- if (log_syslog) {
+ if (log_destination == LOG_DESTINATION_UNSET) {
+ if (inetd_mode || detach)
+ log_destination = LOG_DESTINATION_SYSLOG;
+ else
+ log_destination = LOG_DESTINATION_STDERR;
+ }
+
+ if (log_destination == LOG_DESTINATION_SYSLOG) {
openlog("git-daemon", LOG_PID, LOG_DAEMON);
set_die_routine(daemon_die);
} else
int diff_unmerged_stage = revs->max_count;
unsigned ce_option = ((option & DIFF_RACY_IS_MODIFIED)
? CE_MATCH_RACY_IS_DIRTY : 0);
+ uint64_t start = getnanotime();
diff_set_mnemonic_prefix(&revs->diffopt, "i/", "w/");
}
diffcore_std(&revs->diffopt);
diff_flush(&revs->diffopt);
+ trace_performance_since(start, "diff-files");
return 0;
}
int run_diff_index(struct rev_info *revs, int cached)
{
struct object_array_entry *ent;
+ uint64_t start = getnanotime();
ent = revs->pending.objects;
if (diff_cache(revs, &ent->item->oid, ent->name, cached))
diffcore_fix_diff_index(&revs->diffopt);
diffcore_std(&revs->diffopt);
diff_flush(&revs->diffopt);
+ trace_performance_since(start, "diff-index");
return 0;
}
{
int size_only = flags & CHECK_SIZE_ONLY;
int err = 0;
+ int conv_flags = global_conv_flags_eol;
/*
* demote FAIL to WARN to allow inspecting the situation
* instead of refusing.
*/
- enum safe_crlf crlf_warn = (safe_crlf == SAFE_CRLF_FAIL
- ? SAFE_CRLF_WARN
- : safe_crlf);
+ if (conv_flags & CONV_EOL_RNDTRP_DIE)
+ conv_flags = CONV_EOL_RNDTRP_WARN;
if (!DIFF_FILE_VALID(s))
die("internal error: asking to populate invalid file.");
/*
* Convert from working tree format to canonical git format
*/
- if (convert_to_git(&the_index, s->path, s->data, s->size, &buf, crlf_warn)) {
+ if (convert_to_git(&the_index, s->path, s->data, s->size, &buf, conv_flags)) {
size_t size = 0;
munmap(s->data, s->size);
s->should_munmap = 0;
void diff_warn_rename_limit(const char *varname, int needed, int degraded_cc)
{
+ fflush(stdout);
if (degraded_cc)
warning(_(degrade_cc_to_c_warning));
else if (needed)
ALLOC_GROW(rename_dst, rename_dst_nr + 1, rename_dst_alloc);
rename_dst_nr++;
if (first < rename_dst_nr)
- memmove(rename_dst + first + 1, rename_dst + first,
- (rename_dst_nr - first - 1) * sizeof(*rename_dst));
+ MOVE_ARRAY(rename_dst + first + 1, rename_dst + first,
+ rename_dst_nr - first - 1);
rename_dst[first].two = alloc_filespec(two->path);
fill_filespec(rename_dst[first].two, &two->oid, two->oid_valid,
two->mode);
ALLOC_GROW(rename_src, rename_src_nr + 1, rename_src_alloc);
rename_src_nr++;
if (first < rename_src_nr)
- memmove(rename_src + first + 1, rename_src + first,
- (rename_src_nr - first - 1) * sizeof(*rename_src));
+ MOVE_ARRAY(rename_src + first + 1, rename_src + first,
+ rename_src_nr - first - 1);
rename_src[first].p = p;
rename_src[first].score = score;
return &(rename_src[first]);
if (!filespec->oid_valid) {
if (diff_populate_filespec(filespec, 0))
return 0;
- hash_sha1_file(filespec->data, filespec->size, "blob",
- filespec->oid.hash);
+ hash_object_file(filespec->data, filespec->size, "blob",
+ &filespec->oid);
}
return sha1hash(filespec->oid.hash);
}
* 1 along with { data, size } of the (possibly augmented) buffer
* when successful.
*
- * Optionally updates the given sha1_stat with the given OID (when valid).
+ * Optionally updates the given oid_stat with the given OID (when valid).
*/
-static int do_read_blob(const struct object_id *oid,
- struct sha1_stat *sha1_stat,
- size_t *size_out,
- char **data_out)
+static int do_read_blob(const struct object_id *oid, struct oid_stat *oid_stat,
+ size_t *size_out, char **data_out)
{
enum object_type type;
unsigned long sz;
return -1;
}
- if (sha1_stat) {
- memset(&sha1_stat->stat, 0, sizeof(sha1_stat->stat));
- hashcpy(sha1_stat->sha1, oid->hash);
+ if (oid_stat) {
+ memset(&oid_stat->stat, 0, sizeof(oid_stat->stat));
+ oidcpy(&oid_stat->oid, oid);
}
if (sz == 0) {
static int read_skip_worktree_file_from_index(const struct index_state *istate,
const char *path,
- size_t *size_out,
- char **data_out,
- struct sha1_stat *sha1_stat)
+ size_t *size_out, char **data_out,
+ struct oid_stat *oid_stat)
{
int pos, len;
if (!ce_skip_worktree(istate->cache[pos]))
return -1;
- return do_read_blob(&istate->cache[pos]->oid, sha1_stat, size_out, data_out);
+ return do_read_blob(&istate->cache[pos]->oid, oid_stat, size_out, data_out);
}
/*
FLEX_ALLOC_MEM(d, name, name, len);
ALLOC_GROW(dir->dirs, dir->dirs_nr + 1, dir->dirs_alloc);
- memmove(dir->dirs + first + 1, dir->dirs + first,
- (dir->dirs_nr - first) * sizeof(*dir->dirs));
+ MOVE_ARRAY(dir->dirs + first + 1, dir->dirs + first,
+ dir->dirs_nr - first);
dir->dirs_nr++;
dir->dirs[first] = d;
return d;
struct untracked_cache_dir *dir)
{
int i;
- uc->dir_invalidated++;
+
+ /*
+ * Invalidation increment here is just roughly correct. If
+ * untracked_nr or any of dirs[].recurse is non-zero, we
+ * should increment dir_invalidated too. But that's more
+ * expensive to do.
+ */
+ if (dir->valid)
+ uc->dir_invalidated++;
+
dir->valid = 0;
dir->untracked_nr = 0;
for (i = 0; i < dir->dirs_nr; i++)
* ss_valid is non-zero, "ss" must contain good value as input.
*/
static int add_excludes(const char *fname, const char *base, int baselen,
- struct exclude_list *el,
- struct index_state *istate,
- struct sha1_stat *sha1_stat)
+ struct exclude_list *el, struct index_state *istate,
+ struct oid_stat *oid_stat)
{
struct stat st;
int r;
return -1;
r = read_skip_worktree_file_from_index(istate, fname,
&size, &buf,
- sha1_stat);
+ oid_stat);
if (r != 1)
return r;
} else {
size = xsize_t(st.st_size);
if (size == 0) {
- if (sha1_stat) {
- fill_stat_data(&sha1_stat->stat, &st);
- hashcpy(sha1_stat->sha1, EMPTY_BLOB_SHA1_BIN);
- sha1_stat->valid = 1;
+ if (oid_stat) {
+ fill_stat_data(&oid_stat->stat, &st);
+ oidcpy(&oid_stat->oid, &empty_blob_oid);
+ oid_stat->valid = 1;
}
close(fd);
return 0;
}
buf[size++] = '\n';
close(fd);
- if (sha1_stat) {
+ if (oid_stat) {
int pos;
- if (sha1_stat->valid &&
- !match_stat_data_racy(istate, &sha1_stat->stat, &st))
+ if (oid_stat->valid &&
+ !match_stat_data_racy(istate, &oid_stat->stat, &st))
; /* no content change, ss->sha1 still good */
else if (istate &&
(pos = index_name_pos(istate, fname, strlen(fname))) >= 0 &&
!ce_stage(istate->cache[pos]) &&
ce_uptodate(istate->cache[pos]) &&
!would_convert_to_git(istate, fname))
- hashcpy(sha1_stat->sha1,
- istate->cache[pos]->oid.hash);
+ oidcpy(&oid_stat->oid,
+ &istate->cache[pos]->oid);
else
- hash_sha1_file(buf, size, "blob", sha1_stat->sha1);
- fill_stat_data(&sha1_stat->stat, &st);
- sha1_stat->valid = 1;
+ hash_object_file(buf, size, "blob",
+ &oid_stat->oid);
+ fill_stat_data(&oid_stat->stat, &st);
+ oid_stat->valid = 1;
}
}
* Used to set up core.excludesfile and .git/info/exclude lists.
*/
static void add_excludes_from_file_1(struct dir_struct *dir, const char *fname,
- struct sha1_stat *sha1_stat)
+ struct oid_stat *oid_stat)
{
struct exclude_list *el;
/*
if (!dir->untracked)
dir->unmanaged_exclude_files++;
el = add_exclude_list(dir, EXC_FILE, fname);
- if (add_excludes(fname, "", 0, el, NULL, sha1_stat) < 0)
+ if (add_excludes(fname, "", 0, el, NULL, oid_stat) < 0)
die("cannot use %s as an exclude file", fname);
}
while (current < baselen) {
const char *cp;
- struct sha1_stat sha1_stat;
+ struct oid_stat oid_stat;
stk = xcalloc(1, sizeof(*stk));
if (current < 0) {
}
/* Try to read per-directory file */
- hashclr(sha1_stat.sha1);
- sha1_stat.valid = 0;
+ oidclr(&oid_stat.oid);
+ oid_stat.valid = 0;
if (dir->exclude_per_dir &&
/*
* If we know that no files have been added in
strbuf_addstr(&sb, dir->exclude_per_dir);
el->src = strbuf_detach(&sb, NULL);
add_excludes(el->src, el->src, stk->baselen, el, istate,
- untracked ? &sha1_stat : NULL);
+ untracked ? &oid_stat : NULL);
}
/*
* NEEDSWORK: when untracked cache is enabled, prep_exclude()
* order, though, if you do that.
*/
if (untracked &&
- hashcmp(sha1_stat.sha1, untracked->exclude_sha1)) {
+ hashcmp(oid_stat.oid.hash, untracked->exclude_sha1)) {
invalidate_gitignore(dir->untracked, untracked);
- hashcpy(untracked->exclude_sha1, sha1_stat.sha1);
+ hashcpy(untracked->exclude_sha1, oid_stat.oid.hash);
}
dir->exclude_stack = stk;
current = stk->baselen;
if (!de)
return treat_path_fast(dir, untracked, cdir, istate, path,
baselen, pathspec);
- if (is_dot_or_dotdot(de->d_name) || !strcmp(de->d_name, ".git"))
+ if (is_dot_or_dotdot(de->d_name) || !fspathcmp(de->d_name, ".git"))
return path_none;
strbuf_setlen(path, baselen);
strbuf_addstr(path, de->d_name);
*/
refresh_fsmonitor(istate);
if (!(dir->untracked->use_fsmonitor && untracked->valid)) {
- if (stat(path->len ? path->buf : ".", &st)) {
- invalidate_directory(dir->untracked, untracked);
+ if (lstat(path->len ? path->buf : ".", &st)) {
memset(&untracked->stat_data, 0, sizeof(untracked->stat_data));
return 0;
}
if (!untracked->valid ||
match_stat_data_racy(istate, &untracked->stat_data, &st)) {
- if (untracked->valid)
- invalidate_directory(dir->untracked, untracked);
fill_stat_data(&untracked->stat_data, &st);
return 0;
}
}
- if (untracked->check_only != !!check_only) {
- invalidate_directory(dir->untracked, untracked);
+ if (untracked->check_only != !!check_only)
return 0;
- }
/*
* prep_exclude will be called eventually on this directory,
struct strbuf *path,
int check_only)
{
+ const char *c_path;
+
memset(cdir, 0, sizeof(*cdir));
cdir->untracked = untracked;
if (valid_cached_dir(dir, untracked, istate, path, check_only))
return 0;
- cdir->fdir = opendir(path->len ? path->buf : ".");
- if (dir->untracked)
+ c_path = path->len ? path->buf : ".";
+ cdir->fdir = opendir(c_path);
+ if (!cdir->fdir)
+ warning_errno(_("could not open directory '%s'"), c_path);
+ if (dir->untracked) {
+ invalidate_directory(dir->untracked, untracked);
dir->untracked->dir_opened++;
+ }
if (!cdir->fdir)
return -1;
return 0;
/* Validate $GIT_DIR/info/exclude and core.excludesfile */
root = dir->untracked->root;
- if (hashcmp(dir->ss_info_exclude.sha1,
- dir->untracked->ss_info_exclude.sha1)) {
+ if (oidcmp(&dir->ss_info_exclude.oid,
+ &dir->untracked->ss_info_exclude.oid)) {
invalidate_gitignore(dir->untracked, root);
dir->untracked->ss_info_exclude = dir->ss_info_exclude;
}
- if (hashcmp(dir->ss_excludes_file.sha1,
- dir->untracked->ss_excludes_file.sha1)) {
+ if (oidcmp(&dir->ss_excludes_file.oid,
+ &dir->untracked->ss_excludes_file.oid)) {
invalidate_gitignore(dir->untracked, root);
dir->untracked->ss_excludes_file = dir->ss_excludes_file;
}
const char *path, int len, const struct pathspec *pathspec)
{
struct untracked_cache_dir *untracked;
+ uint64_t start = getnanotime();
if (has_symlink_leading_path(path, len))
return dir->nr;
dir->nr = i;
}
+ trace_performance_since(start, "read directory %.*s", len, path);
if (dir->untracked) {
static struct trace_key trace_untracked_stats = TRACE_KEY_INIT(UNTRACKED_STATS);
trace_printf_key(&trace_untracked_stats,
FLEX_ALLOC_MEM(ouc, exclude_per_dir, untracked->exclude_per_dir, len);
stat_data_to_disk(&ouc->info_exclude_stat, &untracked->ss_info_exclude.stat);
stat_data_to_disk(&ouc->excludes_file_stat, &untracked->ss_excludes_file.stat);
- hashcpy(ouc->info_exclude_sha1, untracked->ss_info_exclude.sha1);
- hashcpy(ouc->excludes_file_sha1, untracked->ss_excludes_file.sha1);
+ hashcpy(ouc->info_exclude_sha1, untracked->ss_info_exclude.oid.hash);
+ hashcpy(ouc->excludes_file_sha1, untracked->ss_excludes_file.oid.hash);
ouc->dir_flags = htonl(untracked->dir_flags);
varint_len = encode_varint(untracked->ident.len, varbuf);
rd->data += 20;
}
-static void load_sha1_stat(struct sha1_stat *sha1_stat,
- const unsigned char *data,
- const unsigned char *sha1)
+static void load_oid_stat(struct oid_stat *oid_stat, const unsigned char *data,
+ const unsigned char *sha1)
{
- stat_data_from_disk(&sha1_stat->stat, data);
- hashcpy(sha1_stat->sha1, sha1);
- sha1_stat->valid = 1;
+ stat_data_from_disk(&oid_stat->stat, data);
+ hashcpy(oid_stat->oid.hash, sha1);
+ oid_stat->valid = 1;
}
struct untracked_cache *read_untracked_extension(const void *data, unsigned long sz)
uc = xcalloc(1, sizeof(*uc));
strbuf_init(&uc->ident, ident_len);
strbuf_add(&uc->ident, ident, ident_len);
- load_sha1_stat(&uc->ss_info_exclude,
- next + ouc_offset(info_exclude_stat),
- next + ouc_offset(info_exclude_sha1));
- load_sha1_stat(&uc->ss_excludes_file,
- next + ouc_offset(excludes_file_stat),
- next + ouc_offset(excludes_file_sha1));
+ load_oid_stat(&uc->ss_info_exclude,
+ next + ouc_offset(info_exclude_stat),
+ next + ouc_offset(info_exclude_sha1));
+ load_oid_stat(&uc->ss_excludes_file,
+ next + ouc_offset(excludes_file_stat),
+ next + ouc_offset(excludes_file_sha1));
uc->dir_flags = get_be32(next + ouc_offset(dir_flags));
exclude_per_dir = (const char *)next + ouc_offset(exclude_per_dir);
uc->exclude_per_dir = xstrdup(exclude_per_dir);
}
void untracked_cache_invalidate_path(struct index_state *istate,
- const char *path)
+ const char *path, int safe_path)
{
if (!istate->untracked || !istate->untracked->root)
return;
+ if (!safe_path && !verify_path(path))
+ return;
invalidate_one_component(istate->untracked, istate->untracked->root,
path, strlen(path));
}
void untracked_cache_remove_from_index(struct index_state *istate,
const char *path)
{
- untracked_cache_invalidate_path(istate, path);
+ untracked_cache_invalidate_path(istate, path, 1);
}
void untracked_cache_add_to_index(struct index_state *istate,
const char *path)
{
- untracked_cache_invalidate_path(istate, path);
+ untracked_cache_invalidate_path(istate, path, 1);
}
/* Update gitfile and core.worktree setting to connect work tree and git dir */
struct exclude_list *el;
};
-struct sha1_stat {
+struct oid_stat {
struct stat_data stat;
- unsigned char sha1[20];
+ struct object_id oid;
int valid;
};
};
struct untracked_cache {
- struct sha1_stat ss_info_exclude;
- struct sha1_stat ss_excludes_file;
+ struct oid_stat ss_info_exclude;
+ struct oid_stat ss_excludes_file;
const char *exclude_per_dir;
struct strbuf ident;
/*
/* Enable untracked file cache if set */
struct untracked_cache *untracked;
- struct sha1_stat ss_info_exclude;
- struct sha1_stat ss_excludes_file;
+ struct oid_stat ss_info_exclude;
+ struct oid_stat ss_excludes_file;
unsigned unmanaged_exclude_files;
};
int cmp_dir_entry(const void *p1, const void *p2);
int check_dir_entry_contains(const struct dir_entry *out, const struct dir_entry *in);
-void untracked_cache_invalidate_path(struct index_state *, const char *);
+void untracked_cache_invalidate_path(struct index_state *, const char *, int safe_path);
void untracked_cache_remove_from_index(struct index_state *, const char *);
void untracked_cache_add_to_index(struct index_state *, const char *);
int warn_on_object_refname_ambiguity = 1;
int ref_paranoia = -1;
int repository_format_precious_objects;
+char *repository_format_partial_clone;
+const char *core_partial_clone_filter_default;
const char *git_commit_encoding;
const char *git_log_output_encoding;
const char *apply_default_whitespace;
int check_replace_refs = 1;
char *git_replace_ref_base;
enum eol core_eol = EOL_UNSET;
-enum safe_crlf safe_crlf = SAFE_CRLF_WARN;
+int global_conv_flags_eol = CONV_EOL_RNDTRP_WARN;
unsigned whitespace_rule_cfg = WS_DEFAULT_RULE;
enum branch_track git_branch_track = BRANCH_TRACK_REMOTE;
enum rebase_setup_type autorebase = AUTOREBASE_NEVER;
/* The .pack file being generated */
static struct pack_idx_option pack_idx_opts;
static unsigned int pack_id;
-static struct sha1file *pack_file;
+static struct hashfile *pack_file;
static struct packed_git *pack_data;
static struct packed_git **all_packs;
static off_t pack_size;
p->pack_fd = pack_fd;
p->do_not_close = 1;
- pack_file = sha1fd(pack_fd, p->pack_name);
+ pack_file = hashfd(pack_fd, p->pack_name);
hdr.hdr_signature = htonl(PACK_SIGNATURE);
hdr.hdr_version = htonl(2);
hdr.hdr_entries = 0;
- sha1write(pack_file, &hdr, sizeof(hdr));
+ hashwrite(pack_file, &hdr, sizeof(hdr));
pack_data = p;
pack_size = sizeof(hdr);
struct tag *t;
close_pack_windows(pack_data);
- sha1close(pack_file, cur_pack_oid.hash, 0);
+ hashclose(pack_file, cur_pack_oid.hash, 0);
fixup_pack_header_footer(pack_data->pack_fd, pack_data->sha1,
pack_data->pack_name, object_count,
cur_pack_oid.hash, pack_size);
unsigned char hdr[96];
struct object_id oid;
unsigned long hdrlen, deltalen;
- git_SHA_CTX c;
+ git_hash_ctx c;
git_zstream s;
hdrlen = xsnprintf((char *)hdr, sizeof(hdr), "%s %lu",
typename(type), (unsigned long)dat->len) + 1;
- git_SHA1_Init(&c);
- git_SHA1_Update(&c, hdr, hdrlen);
- git_SHA1_Update(&c, dat->buf, dat->len);
- git_SHA1_Final(oid.hash, &c);
+ the_hash_algo->init_fn(&c);
+ the_hash_algo->update_fn(&c, hdr, hdrlen);
+ the_hash_algo->update_fn(&c, dat->buf, dat->len);
+ the_hash_algo->final_fn(oid.hash, &c);
if (oidout)
oidcpy(oidout, &oid);
return 1;
}
- if (last && last->data.buf && last->depth < max_depth && dat->len > 20) {
+ if (last && last->data.buf && last->depth < max_depth
+ && dat->len > the_hash_algo->rawsz) {
+
delta_count_attempts_by_type[type]++;
delta = diff_delta(last->data.buf, last->data.len,
dat->buf, dat->len,
- &deltalen, dat->len - 20);
+ &deltalen, dat->len - the_hash_algo->rawsz);
} else
delta = NULL;
hdrlen = encode_in_pack_object_header(hdr, sizeof(hdr),
OBJ_OFS_DELTA, deltalen);
- sha1write(pack_file, hdr, hdrlen);
+ hashwrite(pack_file, hdr, hdrlen);
pack_size += hdrlen;
hdr[pos] = ofs & 127;
while (ofs >>= 7)
hdr[--pos] = 128 | (--ofs & 127);
- sha1write(pack_file, hdr + pos, sizeof(hdr) - pos);
+ hashwrite(pack_file, hdr + pos, sizeof(hdr) - pos);
pack_size += sizeof(hdr) - pos;
} else {
e->depth = 0;
hdrlen = encode_in_pack_object_header(hdr, sizeof(hdr),
type, dat->len);
- sha1write(pack_file, hdr, hdrlen);
+ hashwrite(pack_file, hdr, hdrlen);
pack_size += hdrlen;
}
- sha1write(pack_file, out, s.total_out);
+ hashwrite(pack_file, out, s.total_out);
pack_size += s.total_out;
e->idx.crc32 = crc32_end(pack_file);
return 0;
}
-static void truncate_pack(struct sha1file_checkpoint *checkpoint)
+static void truncate_pack(struct hashfile_checkpoint *checkpoint)
{
- if (sha1file_truncate(pack_file, checkpoint))
+ if (hashfile_truncate(pack_file, checkpoint))
die_errno("cannot truncate pack to skip duplicate");
pack_size = checkpoint->offset;
}
struct object_id oid;
unsigned long hdrlen;
off_t offset;
- git_SHA_CTX c;
+ git_hash_ctx c;
git_zstream s;
- struct sha1file_checkpoint checkpoint;
+ struct hashfile_checkpoint checkpoint;
int status = Z_OK;
/* Determine if we should auto-checkpoint. */
|| (pack_size + 60 + len) < pack_size)
cycle_packfile();
- sha1file_checkpoint(pack_file, &checkpoint);
+ hashfile_checkpoint(pack_file, &checkpoint);
offset = checkpoint.offset;
hdrlen = xsnprintf((char *)out_buf, out_sz, "blob %" PRIuMAX, len) + 1;
- git_SHA1_Init(&c);
- git_SHA1_Update(&c, out_buf, hdrlen);
+ the_hash_algo->init_fn(&c);
+ the_hash_algo->update_fn(&c, out_buf, hdrlen);
crc32_begin(pack_file);
if (!n && feof(stdin))
die("EOF in data (%" PRIuMAX " bytes remaining)", len);
- git_SHA1_Update(&c, in_buf, n);
+ the_hash_algo->update_fn(&c, in_buf, n);
s.next_in = in_buf;
s.avail_in = n;
len -= n;
if (!s.avail_out || status == Z_STREAM_END) {
size_t n = s.next_out - out_buf;
- sha1write(pack_file, out_buf, n);
+ hashwrite(pack_file, out_buf, n);
pack_size += n;
s.next_out = out_buf;
s.avail_out = out_sz;
}
}
git_deflate_end(&s);
- git_SHA1_Final(oid.hash, &c);
+ the_hash_algo->final_fn(oid.hash, &c);
if (oidout)
oidcpy(oidout, &oid);
{
enum object_type type;
struct packed_git *p = all_packs[oe->pack_id];
- if (p == pack_data && p->pack_size < (pack_size + 20)) {
+ if (p == pack_data && p->pack_size < (pack_size + the_hash_algo->rawsz)) {
/* The object is stored in the packfile we are writing to
* and we have modified it since the last time we scanned
* back to read a previously written object. If an old
- * window covered [p->pack_size, p->pack_size + 20) its
+ * window covered [p->pack_size, p->pack_size + rawsz) its
* data is stale and is not valid. Closing all windows
* and updating the packfile length ensures we can read
* the newly written data.
*/
close_pack_windows(p);
- sha1flush(pack_file);
+ hashflush(pack_file);
- /* We have to offer 20 bytes additional on the end of
+ /* We have to offer rawsz bytes additional on the end of
* the packfile as the core unpacker code assumes the
* footer is present at the file end and must promise
- * at least 20 bytes within any window it maps. But
+ * at least rawsz bytes within any window it maps. But
* we don't actually create the footer here.
*/
- p->pack_size = pack_size + 20;
+ p->pack_size = pack_size + the_hash_algo->rawsz;
}
return unpack_entry(p, oe->idx.offset, &type, sizep);
}
unsigned char fanout, char *path)
{
unsigned int i = 0, j = 0;
- if (fanout >= 20)
+ if (fanout >= the_hash_algo->rawsz)
die("Too large fanout (%u)", fanout);
while (fanout) {
path[i++] = hex_sha1[j++];
path[i++] = '/';
fanout--;
}
- memcpy(path + i, hex_sha1 + j, GIT_SHA1_HEXSZ - j);
- path[i + GIT_SHA1_HEXSZ - j] = '\0';
+ memcpy(path + i, hex_sha1 + j, the_hash_algo->hexsz - j);
+ path[i + the_hash_algo->hexsz - j] = '\0';
}
static uintmax_t do_change_note_fanout(
--- /dev/null
+#include "cache.h"
+#include "packfile.h"
+#include "pkt-line.h"
+#include "strbuf.h"
+#include "transport.h"
+#include "fetch-object.h"
+
+static void fetch_refs(const char *remote_name, struct ref *ref)
+{
+ struct remote *remote;
+ struct transport *transport;
+ int original_fetch_if_missing = fetch_if_missing;
+
+ fetch_if_missing = 0;
+ remote = remote_get(remote_name);
+ if (!remote->url[0])
+ die(_("Remote with no URL"));
+ transport = transport_get(remote, remote->url[0]);
+
+ transport_set_option(transport, TRANS_OPT_FROM_PROMISOR, "1");
+ transport_set_option(transport, TRANS_OPT_NO_DEPENDENTS, "1");
+ transport_fetch_refs(transport, ref);
+ fetch_if_missing = original_fetch_if_missing;
+}
+
+void fetch_object(const char *remote_name, const unsigned char *sha1)
+{
+ struct ref *ref = alloc_ref(sha1_to_hex(sha1));
+ hashcpy(ref->old_oid.hash, sha1);
+ fetch_refs(remote_name, ref);
+}
+
+void fetch_objects(const char *remote_name, const struct oid_array *to_fetch)
+{
+ struct ref *ref = NULL;
+ int i;
+
+ for (i = 0; i < to_fetch->nr; i++) {
+ struct ref *new_ref = alloc_ref(oid_to_hex(&to_fetch->oid[i]));
+ oidcpy(&new_ref->old_oid, &to_fetch->oid[i]);
+ new_ref->next = ref;
+ ref = new_ref;
+ }
+ fetch_refs(remote_name, ref);
+}
--- /dev/null
+#ifndef FETCH_OBJECT_H
+#define FETCH_OBJECT_H
+
+#include "sha1-array.h"
+
+extern void fetch_object(const char *remote_name, const unsigned char *sha1);
+
+extern void fetch_objects(const char *remote_name,
+ const struct oid_array *to_fetch);
+
+#endif
static int fetch_fsck_objects = -1;
static int transfer_fsck_objects = -1;
static int agent_supported;
+static int server_supports_filtering;
static struct lock_file shallow_lock;
static const char *alternate_shallow_file;
char *line = packet_read_line(fd, &len);
const char *arg;
- if (!len)
- die(_("git fetch-pack: expected ACK/NAK, got EOF"));
+ if (!line)
+ die(_("git fetch-pack: expected ACK/NAK, got a flush packet"));
if (!strcmp(line, "NAK"))
return NAK;
if (skip_prefix(line, "ACK ", &arg)) {
if (deepen_not_ok) strbuf_addstr(&c, " deepen-not");
if (agent_supported) strbuf_addf(&c, " agent=%s",
git_user_agent_sanitized());
+ if (args->filter_options.choice)
+ strbuf_addstr(&c, " filter");
packet_buf_write(&req_buf, "want %s%s\n", remote_hex, c.buf);
strbuf_release(&c);
} else
packet_buf_write(&req_buf, "deepen-not %s", s->string);
}
}
+ if (server_supports_filtering && args->filter_options.choice)
+ packet_buf_write(&req_buf, "filter %s",
+ args->filter_options.filter_spec);
packet_buf_flush(&req_buf);
state_len = req_buf.len;
flushes = 0;
retval = -1;
+ if (args->no_dependents)
+ goto done;
while ((oid = get_rev())) {
packet_buf_write(&req_buf, "have %s\n", oid_to_hex(oid));
print_verbose(args, "have %s", oid_to_hex(oid));
{
struct ref *ref;
int retval;
+ int old_save_commit_buffer = save_commit_buffer;
timestamp_t cutoff = 0;
save_commit_buffer = 0;
}
}
- if (!args->deepen) {
- for_each_ref(mark_complete_oid, NULL);
- for_each_cached_alternate(mark_alternate_complete);
- commit_list_sort_by_date(&complete);
- if (cutoff)
- mark_recent_complete_commits(args, cutoff);
- }
+ if (!args->no_dependents) {
+ if (!args->deepen) {
+ for_each_ref(mark_complete_oid, NULL);
+ for_each_cached_alternate(mark_alternate_complete);
+ commit_list_sort_by_date(&complete);
+ if (cutoff)
+ mark_recent_complete_commits(args, cutoff);
+ }
- /*
- * Mark all complete remote refs as common refs.
- * Don't mark them common yet; the server has to be told so first.
- */
- for (ref = *refs; ref; ref = ref->next) {
- struct object *o = deref_tag(lookup_object(ref->old_oid.hash),
- NULL, 0);
+ /*
+ * Mark all complete remote refs as common refs.
+ * Don't mark them common yet; the server has to be told so first.
+ */
+ for (ref = *refs; ref; ref = ref->next) {
+ struct object *o = deref_tag(lookup_object(ref->old_oid.hash),
+ NULL, 0);
- if (!o || o->type != OBJ_COMMIT || !(o->flags & COMPLETE))
- continue;
+ if (!o || o->type != OBJ_COMMIT || !(o->flags & COMPLETE))
+ continue;
- if (!(o->flags & SEEN)) {
- rev_list_push((struct commit *)o, COMMON_REF | SEEN);
+ if (!(o->flags & SEEN)) {
+ rev_list_push((struct commit *)o, COMMON_REF | SEEN);
- mark_common((struct commit *)o, 1, 1);
+ mark_common((struct commit *)o, 1, 1);
+ }
}
}
print_verbose(args, _("already have %s (%s)"), oid_to_hex(remote),
ref->name);
}
+
+ save_commit_buffer = old_save_commit_buffer;
+
return retval;
}
argv_array_push(&cmd.args, alternate_shallow_file);
}
- if (do_keep) {
+ if (do_keep || args->from_promisor) {
if (pack_lockfile)
cmd.out = -1;
cmd_name = "index-pack";
argv_array_push(&cmd.args, "-v");
if (args->use_thin_pack)
argv_array_push(&cmd.args, "--fix-thin");
- if (args->lock_pack || unpack_limit) {
+ if (do_keep && (args->lock_pack || unpack_limit)) {
char hostname[HOST_NAME_MAX + 1];
if (xgethostname(hostname, sizeof(hostname)))
xsnprintf(hostname, sizeof(hostname), "localhost");
}
if (args->check_self_contained_and_connected)
argv_array_push(&cmd.args, "--check-self-contained-and-connected");
+ if (args->from_promisor)
+ argv_array_push(&cmd.args, "--promisor");
}
else {
cmd_name = "unpack-objects";
else
prefer_ofs_delta = 0;
+ if (server_supports("filter")) {
+ server_supports_filtering = 1;
+ print_verbose(args, _("Server supports filter"));
+ } else if (args->filter_options.choice) {
+ warning("filtering not recognized by server, ignoring");
+ }
+
if ((agent_feature = server_feature_value("agent", &agent_len))) {
agent_supported = 1;
if (agent_len)
#include "string-list.h"
#include "run-command.h"
+#include "list-objects-filter-options.h"
struct oid_array;
int depth;
const char *deepen_since;
const struct string_list *deepen_not;
+ struct list_objects_filter_options filter_options;
unsigned deepen_relative:1;
unsigned quiet:1;
unsigned keep_pack:1;
unsigned cloning:1;
unsigned update_shallow:1;
unsigned deepen:1;
+ unsigned from_promisor:1;
+
+ /*
+ * If 1, fetch_pack() will also not modify any object flags.
+ * This allows fetch_pack() to safely be called by any function,
+ * regardless of which object flags it uses (if any).
+ */
+ unsigned no_dependents:1;
};
/*
* as it could be a new untracked file.
*/
trace_printf_key(&trace_fsmonitor, "fsmonitor_refresh_callback '%s'", name);
- untracked_cache_invalidate_path(istate, name);
+ untracked_cache_invalidate_path(istate, name, 0);
}
void refresh_fsmonitor(struct index_state *istate)
{
if (core_fsmonitor) {
ce->ce_flags &= ~CE_FSMONITOR_VALID;
- untracked_cache_invalidate_path(istate, ce->name);
+ untracked_cache_invalidate_path(istate, ce->name, 1);
trace_printf_key(&trace_fsmonitor, "mark_fsmonitor_invalid '%s'", ce->name);
}
}
move_to_original_branch
return
;;
+show-current-patch)
+ exec git am --show-current-patch
+ ;;
esac
if test -z "$rebase_root"
# makes this easy
git cherry-pick ${gpg_sign_opt:+"$gpg_sign_opt"} --allow-empty \
$allow_rerere_autoupdate --right-only "$revisions" \
+ $allow_empty_message \
${restrict_revision+^$restrict_revision}
ret=$?
else
die_with_patch () {
echo "$1" > "$state_dir"/stopped-sha
+ git update-ref REBASE_HEAD "$1"
make_patch "$1"
die "$2"
}
exit_with_patch () {
echo "$1" > "$state_dir"/stopped-sha
+ git update-ref REBASE_HEAD "$1"
make_patch $1
git rev-parse --verify HEAD > "$amend"
gpg_sign_opt_quoted=${gpg_sign_opt:+$(git rev-parse --sq-quote "$gpg_sign_opt")}
test -d "$rewritten" &&
pick_one_preserving_merges "$@" && return
- output eval git cherry-pick $allow_rerere_autoupdate \
+ output eval git cherry-pick $allow_rerere_autoupdate $allow_empty_message \
${gpg_sign_opt:+$(git rev-parse --sq-quote "$gpg_sign_opt")} \
"$strategy_args" $empty_args $ff "$@"
--sq-quote "$gpg_sign_opt")} \
$allow_rerere_autoupdate "$merge_args" \
"$strategy_args" \
- -m $(git rev-parse --sq-quote "$msg_content") \
+ -m "$(git rev-parse --sq-quote "$msg_content")" \
"$new_parents"
then
printf "%s\n" "$msg_content" > "$GIT_DIR"/MERGE_MSG
;;
*)
output eval git cherry-pick $allow_rerere_autoupdate \
+ $allow_empty_message \
${gpg_sign_opt:+$(git rev-parse --sq-quote "$gpg_sign_opt")} \
"$strategy_args" "$@" ||
die_with_patch $sha1 "$(eval_gettext "Could not pick \$sha1")"
mark_action_done
do_pick $sha1 "$rest"
- git commit --amend --no-post-rewrite ${gpg_sign_opt:+"$gpg_sign_opt"} || {
+ git commit --amend --no-post-rewrite ${gpg_sign_opt:+"$gpg_sign_opt"} \
+ $allow_empty_message || {
warn "$(eval_gettext "\
Could not amend commit after successfully picking \$sha1... \$rest
This is most likely due to an empty commit message, or the pre-commit hook
# This is an intermediate commit; its message will only be
# used in case of trouble. So use the long version:
do_with_author output git commit --amend --no-verify -F "$squash_msg" \
- ${gpg_sign_opt:+"$gpg_sign_opt"} ||
+ ${gpg_sign_opt:+"$gpg_sign_opt"} $allow_empty_message ||
die_failed_squash $sha1 "$rest"
;;
*)
if test -f "$fixup_msg"
then
do_with_author git commit --amend --no-verify -F "$fixup_msg" \
- ${gpg_sign_opt:+"$gpg_sign_opt"} ||
+ ${gpg_sign_opt:+"$gpg_sign_opt"} $allow_empty_message ||
die_failed_squash $sha1 "$rest"
else
cp "$squash_msg" "$GIT_DIR"/SQUASH_MSG || exit
rm -f "$GIT_DIR"/MERGE_MSG
do_with_author git commit --amend --no-verify -F "$GIT_DIR"/SQUASH_MSG -e \
- ${gpg_sign_opt:+"$gpg_sign_opt"} ||
+ ${gpg_sign_opt:+"$gpg_sign_opt"} $allow_empty_message ||
die_failed_squash $sha1 "$rest"
fi
rm -f "$squash_msg" "$fixup_msg"
continue)
if test ! -d "$rewritten"
then
- exec git rebase--helper ${force_rebase:+--no-ff} --continue
+ exec git rebase--helper ${force_rebase:+--no-ff} $allow_empty_message \
+ --continue
fi
# do we have anything to commit?
if git diff-index --cached --quiet HEAD --
You have uncommitted changes in your working tree. Please commit them
first and then run 'git rebase --continue' again.")"
do_with_author git commit --amend --no-verify -F "$msg" -e \
- ${gpg_sign_opt:+"$gpg_sign_opt"} ||
+ ${gpg_sign_opt:+"$gpg_sign_opt"} $allow_empty_message ||
die "$(gettext "Could not commit staged changes.")"
else
do_with_author git commit --no-verify -F "$msg" -e \
- ${gpg_sign_opt:+"$gpg_sign_opt"} ||
+ ${gpg_sign_opt:+"$gpg_sign_opt"} $allow_empty_message ||
die "$(gettext "Could not commit staged changes.")"
fi
fi
if test ! -d "$rewritten"
then
- exec git rebase--helper ${force_rebase:+--no-ff} --continue
+ exec git rebase--helper ${force_rebase:+--no-ff} $allow_empty_message \
+ --continue
fi
do_rest
return 0
exit
;;
+show-current-patch)
+ exec git show REBASE_HEAD --
+ ;;
esac
comment_for_reflog start
orig_head=$(git rev-parse --verify HEAD) || die "$(gettext "No HEAD?")"
mkdir -p "$state_dir" || die "$(eval_gettext "Could not create temporary \$state_dir")"
+rm -f "$(git rev-parse --git-path REBASE_HEAD)"
: > "$state_dir"/interactive || die "$(gettext "Could not mark as interactive")"
write_basic_state
if test -z "$rebase_root" && test ! -d "$rewritten"
then
require_clean_work_tree "rebase"
- exec git rebase--helper ${force_rebase:+--no-ff} --continue
+ exec git rebase--helper ${force_rebase:+--no-ff} $allow_empty_message \
+ --continue
fi
do_rest
cmt=$(cat "$state_dir/current")
if ! git diff-index --quiet --ignore-submodules HEAD --
then
- if ! git commit ${gpg_sign_opt:+"$gpg_sign_opt"} --no-verify -C "$cmt"
+ if ! git commit ${gpg_sign_opt:+"$gpg_sign_opt"} $allow_empty_message \
+ --no-verify -C "$cmt"
then
echo "Commit failed, please do not call \"git commit\""
echo "directly, but instead do one of the following: "
echo "$msgnum" >"$state_dir/msgnum"
cmt="$(cat "$state_dir/cmt.$msgnum")"
echo "$cmt" > "$state_dir/current"
+ git update-ref REBASE_HEAD "$cmt"
hd=$(git rev-parse --verify HEAD)
cmt_name=$(git symbolic-ref HEAD 2> /dev/null || echo HEAD)
eval GITHEAD_$cmt='"${cmt_name##refs/heads/}~$(($end - $msgnum))"'
finish_rb_merge
return
;;
+show-current-patch)
+ exec git show REBASE_HEAD --
+ ;;
esac
mkdir -p "$state_dir"
echo "$onto_name" > "$state_dir/onto_name"
write_basic_state
+rm -f "$(git rev-parse --git-path REBASE_HEAD)"
msgnum=0
for cmt in $(git rev-list --reverse --no-merges "$revisions")
i,interactive! let the user edit the list of commits to rebase
x,exec=! add exec lines after each commit of the editable list
k,keep-empty preserve empty commits during rebase
+allow-empty-message allow rebasing commits with empty messages
f,force-rebase! force rebase even if branch is up to date
X,strategy-option=! pass the argument through to the merge strategy
stat! display a diffstat of what changed upstream
skip! skip current patch and continue
edit-todo! edit the todo list during an interactive rebase
quit! abort but keep HEAD where it is
+show-current-patch! show the patch file being applied or merged
"
. git-sh-setup
set_reflog_action rebase
preserve_merges=
autosquash=
keep_empty=
+allow_empty_message=
test "$(git config --bool rebase.autosquash)" = "true" && autosquash=t
case "$(git config --bool commit.gpgsign)" in
true) gpg_sign_opt=-S ;;
}
finish_rebase () {
+ rm -f "$(git rev-parse --git-path REBASE_HEAD)"
apply_autostash &&
{ git gc --auto || true; } &&
rm -rf "$state_dir"
--verify)
ok_to_skip_pre_rebase=
;;
- --continue|--skip|--abort|--quit|--edit-todo)
+ --continue|--skip|--abort|--quit|--edit-todo|--show-current-patch)
test $total_argc -eq 2 || usage
action=${1##--}
;;
--keep-empty)
keep_empty=yes
;;
+ --allow-empty-message)
+ allow_empty_message=--allow-empty-message
+ ;;
--preserve-merges)
preserve_merges=t
test -z "$interactive_rebase" && interactive_rebase=implied
edit-todo)
run_specific_rebase
;;
+show-current-patch)
+ run_specific_rebase
+ die "BUG: run_specific_rebase is not supposed to return here"
+ ;;
esac
# Make sure no rebase is in progress
use Term::ANSIColor;
use File::Temp qw/ tempdir tempfile /;
use File::Spec::Functions qw(catdir catfile);
-use Error qw(:try);
+use Git::Error qw(:try);
use Cwd qw(abs_path cwd);
use Git;
use Git::I18N;
die __("Cannot run git format-patch from outside a repository\n")
if $format_patch and not $repo;
+die __("`batch-size` and `relogin` must be specified together " .
+ "(via command-line or configuration option)\n")
+ if defined $relogin_delay and not defined $batch_size;
+
# Now, let's fill any that aren't set in with defaults:
sub read_config {
# First decide what scheme to use...
GIT_INTERNAL_GETTEXT_SH_SCHEME=fallthrough
-if test -n "@@USE_GETTEXT_SCHEME@@"
+if test -n "$GIT_GETTEXT_POISON"
+then
+ GIT_INTERNAL_GETTEXT_SH_SCHEME=poison
+elif test -n "@@USE_GETTEXT_SCHEME@@"
then
GIT_INTERNAL_GETTEXT_SH_SCHEME="@@USE_GETTEXT_SCHEME@@"
elif test -n "$GIT_INTERNAL_GETTEXT_TEST_FALLBACKS"
then
: no probing necessary
-elif test -n "$GIT_GETTEXT_POISON"
-then
- GIT_INTERNAL_GETTEXT_SH_SCHEME=poison
elif type gettext.sh >/dev/null 2>&1
then
# GNU libintl's gettext.sh
shift
done
- if test -n "$deinit_all" && test "$#" -ne 0
- then
- echo >&2 "$(eval_gettext "pathspec and --all are incompatible")"
- usage
- fi
- if test $# = 0 && test -z "$deinit_all"
- then
- die "$(eval_gettext "Use '--all' if you really want to deinitialize all submodules")"
- fi
-
- {
- git submodule--helper list --prefix "$wt_prefix" "$@" ||
- echo "#unmatched" $?
- } |
- while read -r mode sha1 stage sm_path
- do
- die_if_unmatched "$mode" "$sha1"
- name=$(git submodule--helper name "$sm_path") || exit
-
- displaypath=$(git submodule--helper relative-path "$sm_path" "$wt_prefix")
-
- # Remove the submodule work tree (unless the user already did it)
- if test -d "$sm_path"
- then
- # Protect submodules containing a .git directory
- if test -d "$sm_path/.git"
- then
- die "$(eval_gettext "\
-Submodule work tree '\$displaypath' contains a .git directory
-(use 'rm -rf' if you really want to remove it including all of its history)")"
- fi
-
- if test -z "$force"
- then
- git rm -qn "$sm_path" ||
- die "$(eval_gettext "Submodule work tree '\$displaypath' contains local modifications; use '-f' to discard them")"
- fi
- rm -rf "$sm_path" &&
- say "$(eval_gettext "Cleared directory '\$displaypath'")" ||
- say "$(eval_gettext "Could not remove submodule work tree '\$displaypath'")"
- fi
-
- mkdir "$sm_path" || say "$(eval_gettext "Could not create empty submodule directory '\$displaypath'")"
-
- # Remove the .git/config entries (unless the user already did it)
- if test -n "$(git config --get-regexp submodule."$name\.")"
- then
- # Remove the whole section so we have a clean state when
- # the user later decides to init this submodule again
- url=$(git config submodule."$name".url)
- git config --remove-section submodule."$name" 2>/dev/null &&
- say "$(eval_gettext "Submodule '\$name' (\$url) unregistered for path '\$displaypath'")"
- fi
- done
+ git ${wt_prefix:+-C "$wt_prefix"} submodule--helper deinit ${GIT_QUIET:+--quiet} ${prefix:+--prefix "$prefix"} ${force:+--force} ${deinit_all:+--all} "$@"
}
is_tip_reachable () (
;;
esac
done
- cd_to_toplevel
- {
- git submodule--helper list --prefix "$wt_prefix" "$@" ||
- echo "#unmatched" $?
- } |
- while read -r mode sha1 stage sm_path
- do
- die_if_unmatched "$mode" "$sha1"
-
- # skip inactive submodules
- if ! git submodule--helper is-active "$sm_path"
- then
- continue
- fi
-
- name=$(git submodule--helper name "$sm_path")
- url=$(git config -f .gitmodules --get submodule."$name".url)
-
- # Possibly a url relative to parent
- case "$url" in
- ./*|../*)
- # rewrite foo/bar as ../.. to find path from
- # submodule work tree to superproject work tree
- up_path="$(printf '%s\n' "$sm_path" | sed "s/[^/][^/]*/../g")" &&
- # guarantee a trailing /
- up_path=${up_path%/}/ &&
- # path from submodule work tree to submodule origin repo
- sub_origin_url=$(git submodule--helper resolve-relative-url "$url" "$up_path") &&
- # path from superproject work tree to submodule origin repo
- super_config_url=$(git submodule--helper resolve-relative-url "$url") || exit
- ;;
- *)
- sub_origin_url="$url"
- super_config_url="$url"
- ;;
- esac
-
- displaypath=$(git submodule--helper relative-path "$prefix$sm_path" "$wt_prefix")
- say "$(eval_gettext "Synchronizing submodule url for '\$displaypath'")"
- git config submodule."$name".url "$super_config_url"
-
- if test -e "$sm_path"/.git
- then
- (
- sanitize_submodule_env
- cd "$sm_path"
- remote=$(get_default_remote)
- git config remote."$remote".url "$sub_origin_url"
- if test -n "$recursive"
- then
- prefix="$prefix$sm_path/"
- eval cmd_sync
- fi
- )
- fi
- done
+ git ${wt_prefix:+-C "$wt_prefix"} ${prefix:+--super-prefix "$prefix"} submodule--helper sync ${GIT_QUIET:+--quiet} ${recursive:+--recursive} "$@"
}
cmd_absorbgitdirs()
$ctx->copy($src, $rev, $dst)
unless $_dry_run;
+ # Release resources held by ctx before creating another SVN::Ra
+ # so destruction is orderly. This seems necessary with SVN 1.9.5
+ # to avoid segfaults.
+ $ctx = undef;
+
$gs->fetch_all;
}
#include "run-command.h"
const char git_usage_string[] =
- "git [--version] [--help] [-C <path>] [-c name=value]\n"
- " [--exec-path[=<path>]] [--html-path] [--man-path] [--info-path]\n"
- " [-p | --paginate | --no-pager] [--no-replace-objects] [--bare]\n"
- " [--git-dir=<path>] [--work-tree=<path>] [--namespace=<name>]\n"
- " <command> [<args>]";
+ N_("git [--version] [--help] [-C <path>] [-c <name>=<value>]\n"
+ " [--exec-path[=<path>]] [--html-path] [--man-path] [--info-path]\n"
+ " [-p | --paginate | --no-pager] [--no-replace-objects] [--bare]\n"
+ " [--git-dir=<path>] [--work-tree=<path>] [--namespace=<name>]\n"
+ " <command> [<args>]");
const char git_more_info_string[] =
N_("'git help -a' and 'git help -g' list available subcommands and some\n"
*envchanged = 1;
} else if (!strcmp(cmd, "--git-dir")) {
if (*argc < 2) {
- fprintf(stderr, "No directory given for --git-dir.\n" );
+ fprintf(stderr, _("no directory given for --git-dir\n" ));
usage(git_usage_string);
}
setenv(GIT_DIR_ENVIRONMENT, (*argv)[1], 1);
*envchanged = 1;
} else if (!strcmp(cmd, "--namespace")) {
if (*argc < 2) {
- fprintf(stderr, "No namespace given for --namespace.\n" );
+ fprintf(stderr, _("no namespace given for --namespace\n" ));
usage(git_usage_string);
}
setenv(GIT_NAMESPACE_ENVIRONMENT, (*argv)[1], 1);
*envchanged = 1;
} else if (!strcmp(cmd, "--work-tree")) {
if (*argc < 2) {
- fprintf(stderr, "No directory given for --work-tree.\n" );
+ fprintf(stderr, _("no directory given for --work-tree\n" ));
usage(git_usage_string);
}
setenv(GIT_WORK_TREE_ENVIRONMENT, (*argv)[1], 1);
*envchanged = 1;
} else if (!strcmp(cmd, "--super-prefix")) {
if (*argc < 2) {
- fprintf(stderr, "No prefix given for --super-prefix.\n" );
+ fprintf(stderr, _("no prefix given for --super-prefix\n" ));
usage(git_usage_string);
}
setenv(GIT_SUPER_PREFIX_ENVIRONMENT, (*argv)[1], 1);
*envchanged = 1;
} else if (!strcmp(cmd, "-c")) {
if (*argc < 2) {
- fprintf(stderr, "-c expects a configuration string\n" );
+ fprintf(stderr, _("-c expects a configuration string\n" ));
usage(git_usage_string);
}
git_config_push_parameter((*argv)[1]);
*envchanged = 1;
} else if (!strcmp(cmd, "-C")) {
if (*argc < 2) {
- fprintf(stderr, "No directory given for -C.\n" );
+ fprintf(stderr, _("no directory given for -C\n" ));
usage(git_usage_string);
}
if ((*argv)[1][0]) {
if (chdir((*argv)[1]))
- die_errno("Cannot change to '%s'", (*argv)[1]);
+ die_errno("cannot change to '%s'", (*argv)[1]);
if (envchanged)
*envchanged = 1;
}
list_builtins();
exit(0);
} else {
- fprintf(stderr, "Unknown option: %s\n", cmd);
+ fprintf(stderr, _("unknown option: %s\n"), cmd);
usage(git_usage_string);
}
if (ret >= 0) /* normal exit */
exit(ret);
- die_errno("While expanding alias '%s': '%s'",
+ die_errno("while expanding alias '%s': '%s'",
alias_command, alias_string + 1);
}
count = split_cmdline(alias_string, &new_argv);
split_cmdline_strerror(count));
option_count = handle_options(&new_argv, &count, &envchanged);
if (envchanged)
- die("alias '%s' changes environment variables\n"
- "You can use '!git' in the alias to do this.",
+ die("alias '%s' changes environment variables.\n"
+ "You can use '!git' in the alias to do this",
alias_command);
memmove(new_argv - option_count, new_argv,
count * sizeof(char *));
if (errno != ENOENT)
break;
if (was_alias) {
- fprintf(stderr, "Expansion of alias '%s' failed; "
- "'%s' is not a git command\n",
+ fprintf(stderr, _("expansion of alias '%s' failed; "
+ "'%s' is not a git command\n"),
cmd, argv[0]);
exit(1);
}
break;
}
- fprintf(stderr, "Failed to run command '%s': %s\n",
+ fprintf(stderr, _("failed to run command '%s': %s\n"),
cmd, strerror(errno));
return 1;
fwrite(buf, size, 1, stdout);
}
+static void color_set(char *dst, const char *color_bytes)
+{
+ xsnprintf(dst, COLOR_MAXLEN, "%s", color_bytes);
+}
+
/*
* Initialize the grep_defaults template with hardcoded defaults.
* We could let the compiler do this, but without C99 initializers
#include "block-sha1/sha1.h"
#endif
+#ifndef platform_SHA_CTX
+/*
+ * platform's underlying implementation of SHA-1; could be OpenSSL,
+ * blk_SHA, Apple CommonCrypto, etc... Note that the relevant
+ * SHA-1 header may have already defined platform_SHA_CTX for our
+ * own implementations like block-sha1 and ppc-sha1, so we list
+ * the default for OpenSSL compatible SHA-1 implementations here.
+ */
+#define platform_SHA_CTX SHA_CTX
+#define platform_SHA1_Init SHA1_Init
+#define platform_SHA1_Update SHA1_Update
+#define platform_SHA1_Final SHA1_Final
+#endif
+
+#define git_SHA_CTX platform_SHA_CTX
+#define git_SHA1_Init platform_SHA1_Init
+#define git_SHA1_Update platform_SHA1_Update
+#define git_SHA1_Final platform_SHA1_Final
+
+#ifdef SHA1_MAX_BLOCK_SIZE
+#include "compat/sha1-chunked.h"
+#undef git_SHA1_Update
+#define git_SHA1_Update git_SHA1_Update_Chunked
+#endif
+
/*
* Note that these constants are suitable for indexing the hash_algos array and
* comparing against each other, but are otherwise arbitrary, so they should not
/* Number of algorithms supported (including unknown). */
#define GIT_HASH_NALGOS (GIT_HASH_SHA1 + 1)
-typedef void (*git_hash_init_fn)(void *ctx);
-typedef void (*git_hash_update_fn)(void *ctx, const void *in, size_t len);
-typedef void (*git_hash_final_fn)(unsigned char *hash, void *ctx);
+/* A suitably aligned type for stack allocations of hash contexts. */
+union git_hash_ctx {
+ git_SHA_CTX sha1;
+};
+typedef union git_hash_ctx git_hash_ctx;
+
+typedef void (*git_hash_init_fn)(git_hash_ctx *ctx);
+typedef void (*git_hash_update_fn)(git_hash_ctx *ctx, const void *in, size_t len);
+typedef void (*git_hash_final_fn)(unsigned char *hash, git_hash_ctx *ctx);
struct git_hash_algo {
/*
/* A four-byte version identifier, used in pack indices. */
uint32_t format_id;
- /* The size of a hash context (e.g. git_SHA_CTX). */
- size_t ctxsz;
-
/* The length of the hash in binary. */
size_t rawsz;
*/
static inline void hashmap_enable_item_counting(struct hashmap *map)
{
- void *item;
unsigned int n = 0;
struct hashmap_iter iter;
return;
hashmap_iter_init(map, &iter);
- while ((item = hashmap_iter_next(&iter)))
+ while (hashmap_iter_next(&iter))
n++;
map->do_count_items = 1;
lock->timeout = -1;
}
XML_ParserFree(parser);
+ } else {
+ fprintf(stderr,
+ "error: curl result=%d, HTTP code=%ld\n",
+ results.curl_result, results.http_code);
}
} else {
fprintf(stderr, "Unable to start LOCK request\n");
} else if (hashcmp(obj_req->sha1, req->real_sha1)) {
ret = error("File %s has bad hash", hex);
} else if (req->rename < 0) {
- ret = error("unable to write sha1 filename %s",
- sha1_file_name(req->sha1));
+ struct strbuf buf = STRBUF_INIT;
+ sha1_file_name(&buf, req->sha1);
+ ret = error("unable to write sha1 filename %s", buf.buf);
+ strbuf_release(&buf);
}
release_http_object_request(req);
#include "transport.h"
#include "packfile.h"
#include "protocol.h"
+#include "string-list.h"
static struct trace_key trace_curl = TRACE_KEY_INIT(CURL);
+static int trace_curl_data = 1;
+static struct string_list cookies_to_redact = STRING_LIST_INIT_DUP;
#if LIBCURL_VERSION_NUM >= 0x070a08
long int git_curl_ipresolve = CURL_IPRESOLVE_WHATEVER;
#else
/* Everything else is opaque and possibly sensitive */
strbuf_setlen(header, sensitive_header - header->buf);
strbuf_addstr(header, " <redacted>");
+ } else if (cookies_to_redact.nr &&
+ skip_prefix(header->buf, "Cookie:", &sensitive_header)) {
+ struct strbuf redacted_header = STRBUF_INIT;
+ char *cookie;
+
+ while (isspace(*sensitive_header))
+ sensitive_header++;
+
+ /*
+ * The contents of header starting from sensitive_header will
+ * subsequently be overridden, so it is fine to mutate this
+ * string (hence the assignment to "char *").
+ */
+ cookie = (char *) sensitive_header;
+
+ while (cookie) {
+ char *equals;
+ char *semicolon = strstr(cookie, "; ");
+ if (semicolon)
+ *semicolon = 0;
+ equals = strchrnul(cookie, '=');
+ if (!equals) {
+ /* invalid cookie, just append and continue */
+ strbuf_addstr(&redacted_header, cookie);
+ continue;
+ }
+ *equals = 0; /* temporarily set to NUL for lookup */
+ if (string_list_lookup(&cookies_to_redact, cookie)) {
+ strbuf_addstr(&redacted_header, cookie);
+ strbuf_addstr(&redacted_header, "=<redacted>");
+ } else {
+ *equals = '=';
+ strbuf_addstr(&redacted_header, cookie);
+ }
+ if (semicolon) {
+ /*
+ * There are more cookies. (Or, for some
+ * reason, the input string ends in "; ".)
+ */
+ strbuf_addstr(&redacted_header, "; ");
+ cookie = semicolon + strlen("; ");
+ } else {
+ cookie = NULL;
+ }
+ }
+
+ strbuf_setlen(header, sensitive_header - header->buf);
+ strbuf_addbuf(header, &redacted_header);
}
}
curl_dump_header(text, (unsigned char *)data, size, DO_FILTER);
break;
case CURLINFO_DATA_OUT:
- text = "=> Send data";
- curl_dump_data(text, (unsigned char *)data, size);
+ if (trace_curl_data) {
+ text = "=> Send data";
+ curl_dump_data(text, (unsigned char *)data, size);
+ }
break;
case CURLINFO_SSL_DATA_OUT:
- text = "=> Send SSL data";
- curl_dump_data(text, (unsigned char *)data, size);
+ if (trace_curl_data) {
+ text = "=> Send SSL data";
+ curl_dump_data(text, (unsigned char *)data, size);
+ }
break;
case CURLINFO_HEADER_IN:
text = "<= Recv header";
curl_dump_header(text, (unsigned char *)data, size, NO_FILTER);
break;
case CURLINFO_DATA_IN:
- text = "<= Recv data";
- curl_dump_data(text, (unsigned char *)data, size);
+ if (trace_curl_data) {
+ text = "<= Recv data";
+ curl_dump_data(text, (unsigned char *)data, size);
+ }
break;
case CURLINFO_SSL_DATA_IN:
- text = "<= Recv SSL data";
- curl_dump_data(text, (unsigned char *)data, size);
+ if (trace_curl_data) {
+ text = "<= Recv SSL data";
+ curl_dump_data(text, (unsigned char *)data, size);
+ }
break;
default: /* we ignore unknown types by default */
if (getenv("GIT_CURL_VERBOSE"))
curl_easy_setopt(result, CURLOPT_VERBOSE, 1L);
setup_curl_trace(result);
+ if (getenv("GIT_TRACE_CURL_NO_DATA"))
+ trace_curl_data = 0;
+ if (getenv("GIT_REDACT_COOKIES")) {
+ string_list_split(&cookies_to_redact,
+ getenv("GIT_REDACT_COOKIES"), ',', -1);
+ string_list_sort(&cookies_to_redact);
+ }
curl_easy_setopt(result, CURLOPT_USERAGENT,
user_agent ? user_agent : git_user_agent());
unsigned char *sha1)
{
char *hex = sha1_to_hex(sha1);
- const char *filename;
+ struct strbuf filename = STRBUF_INIT;
char prevfile[PATH_MAX];
int prevlocal;
char prev_buf[PREV_BUF_SIZE];
hashcpy(freq->sha1, sha1);
freq->localfile = -1;
- filename = sha1_file_name(sha1);
+ sha1_file_name(&filename, sha1);
snprintf(freq->tmpfile, sizeof(freq->tmpfile),
- "%s.temp", filename);
+ "%s.temp", filename.buf);
- snprintf(prevfile, sizeof(prevfile), "%s.prev", filename);
+ snprintf(prevfile, sizeof(prevfile), "%s.prev", filename.buf);
unlink_or_warn(prevfile);
rename(freq->tmpfile, prevfile);
unlink_or_warn(freq->tmpfile);
+ strbuf_release(&filename);
if (freq->localfile != -1)
error("fd leakage in start: %d", freq->localfile);
int finish_http_object_request(struct http_object_request *freq)
{
struct stat st;
+ struct strbuf filename = STRBUF_INIT;
close(freq->localfile);
freq->localfile = -1;
unlink_or_warn(freq->tmpfile);
return -1;
}
- freq->rename =
- finalize_object_file(freq->tmpfile, sha1_file_name(freq->sha1));
+
+ sha1_file_name(&filename, freq->sha1);
+ freq->rename = finalize_object_file(freq->tmpfile, filename.buf);
+ strbuf_release(&filename);
return freq->rename;
}
* subordinate commands when necessary. We also "intern" the arg for
* the convenience of the current command.
*/
-int parse_list_objects_filter(struct list_objects_filter_options *filter_options,
- const char *arg)
+static int gently_parse_list_objects_filter(
+ struct list_objects_filter_options *filter_options,
+ const char *arg,
+ struct strbuf *errbuf)
{
const char *v0;
- if (filter_options->choice)
- die(_("multiple object filter types cannot be combined"));
+ if (filter_options->choice) {
+ if (errbuf) {
+ strbuf_init(errbuf, 0);
+ strbuf_addstr(
+ errbuf,
+ _("multiple filter-specs cannot be combined"));
+ }
+ return 1;
+ }
filter_options->filter_spec = strdup(arg);
if (!strcmp(arg, "blob:none")) {
filter_options->choice = LOFC_BLOB_NONE;
return 0;
- }
- if (skip_prefix(arg, "blob:limit=", &v0)) {
- if (!git_parse_ulong(v0, &filter_options->blob_limit_value))
- die(_("invalid filter-spec expression '%s'"), arg);
- filter_options->choice = LOFC_BLOB_LIMIT;
- return 0;
- }
+ } else if (skip_prefix(arg, "blob:limit=", &v0)) {
+ if (git_parse_ulong(v0, &filter_options->blob_limit_value)) {
+ filter_options->choice = LOFC_BLOB_LIMIT;
+ return 0;
+ }
- if (skip_prefix(arg, "sparse:oid=", &v0)) {
+ } else if (skip_prefix(arg, "sparse:oid=", &v0)) {
struct object_context oc;
struct object_id sparse_oid;
filter_options->sparse_oid_value = oiddup(&sparse_oid);
filter_options->choice = LOFC_SPARSE_OID;
return 0;
- }
- if (skip_prefix(arg, "sparse:path=", &v0)) {
+ } else if (skip_prefix(arg, "sparse:path=", &v0)) {
filter_options->choice = LOFC_SPARSE_PATH;
filter_options->sparse_path_value = strdup(v0);
return 0;
}
- die(_("invalid filter-spec expression '%s'"), arg);
+ if (errbuf) {
+ strbuf_init(errbuf, 0);
+ strbuf_addf(errbuf, "invalid filter-spec '%s'", arg);
+ }
+ memset(filter_options, 0, sizeof(*filter_options));
+ return 1;
+}
+
+int parse_list_objects_filter(struct list_objects_filter_options *filter_options,
+ const char *arg)
+{
+ struct strbuf buf = STRBUF_INIT;
+ if (gently_parse_list_objects_filter(filter_options, arg, &buf))
+ die("%s", buf.buf);
return 0;
}
struct list_objects_filter_options *filter_options = opt->value;
if (unset || !arg) {
- list_objects_filter_release(filter_options);
+ list_objects_filter_set_no_filter(filter_options);
return 0;
}
free(filter_options->sparse_path_value);
memset(filter_options, 0, sizeof(*filter_options));
}
+
+void partial_clone_register(
+ const char *remote,
+ const struct list_objects_filter_options *filter_options)
+{
+ /*
+ * Record the name of the partial clone remote in the
+ * config and in the global variable -- the latter is
+ * used throughout to indicate that partial clone is
+ * enabled and to expect missing objects.
+ */
+ if (repository_format_partial_clone &&
+ *repository_format_partial_clone &&
+ strcmp(remote, repository_format_partial_clone))
+ die(_("cannot change partial clone promisor remote"));
+
+ git_config_set("core.repositoryformatversion", "1");
+ git_config_set("extensions.partialclone", remote);
+
+ repository_format_partial_clone = xstrdup(remote);
+
+ /*
+ * Record the initial filter-spec in the config as
+ * the default for subsequent fetches from this remote.
+ */
+ core_partial_clone_filter_default =
+ xstrdup(filter_options->filter_spec);
+ git_config_set("core.partialclonefilter",
+ core_partial_clone_filter_default);
+}
+
+void partial_clone_get_default_filter_spec(
+ struct list_objects_filter_options *filter_options)
+{
+ /*
+ * Parse default value, but silently ignore it if it is invalid.
+ */
+ gently_parse_list_objects_filter(filter_options,
+ core_partial_clone_filter_default,
+ NULL);
+}
*/
enum list_objects_filter_choice choice;
+ /*
+ * Choice is LOFC_DISABLED because "--no-filter" was requested.
+ */
+ unsigned int no_filter : 1;
+
/*
* Parsed values (fields) from within the filter-spec. These are
* choice-specific; not all values will be defined for any given
void list_objects_filter_release(
struct list_objects_filter_options *filter_options);
+static inline void list_objects_filter_set_no_filter(
+ struct list_objects_filter_options *filter_options)
+{
+ list_objects_filter_release(filter_options);
+ filter_options->no_filter = 1;
+}
+
+void partial_clone_register(
+ const char *remote,
+ const struct list_objects_filter_options *filter_options);
+void partial_clone_get_default_filter_spec(
+ struct list_objects_filter_options *filter_options);
+
#endif /* LIST_OBJECTS_FILTER_OPTIONS_H */
#include "list-objects.h"
#include "list-objects-filter.h"
#include "list-objects-filter-options.h"
+#include "packfile.h"
static void process_blob(struct rev_info *revs,
struct blob *blob,
if (obj->flags & (UNINTERESTING | SEEN))
return;
+ /*
+ * Pre-filter known-missing objects when explicitly requested.
+ * Otherwise, a missing object error message may be reported
+ * later (depending on other filtering criteria).
+ *
+ * Note that this "--exclude-promisor-objects" pre-filtering
+ * may cause the actual filter to report an incomplete list
+ * of missing objects.
+ */
+ if (revs->exclude_promisor_objects &&
+ !has_object_file(&obj->oid) &&
+ is_promisor_object(&obj->oid))
+ return;
+
pathlen = path->len;
strbuf_addstr(path, name);
if (filter_fn)
all_entries_interesting: entry_not_interesting;
int baselen = base->len;
enum list_objects_filter_result r = LOFR_MARK_SEEN | LOFR_DO_SHOW;
+ int gently = revs->ignore_missing_links ||
+ revs->exclude_promisor_objects;
if (!revs->tree_objects)
return;
die("bad tree object");
if (obj->flags & (UNINTERESTING | SEEN))
return;
- if (parse_tree_gently(tree, revs->ignore_missing_links) < 0) {
+ if (parse_tree_gently(tree, gently) < 0) {
if (revs->ignore_missing_links)
return;
+
+ /*
+ * Pre-filter known-missing tree objects when explicitly
+ * requested. This may cause the actual filter to report
+ * an incomplete list of missing objects.
+ */
+ if (revs->exclude_promisor_objects &&
+ is_promisor_object(&obj->oid))
+ return;
+
die("bad tree object %s", oid_to_hex(&obj->oid));
}
int status, nth;
size_t payload_size, gpg_message_offset;
- hash_sha1_file(extra->value, extra->len, typename(OBJ_TAG), oid.hash);
+ hash_object_file(extra->value, extra->len, typename(OBJ_TAG), &oid);
tag = lookup_tag(&oid);
if (!tag)
return; /* error message already given */
strbuf_release(&mi->inbody_header_accum);
free(mi->message_id);
- for (i = 0; mi->p_hdr_data[i]; i++)
- strbuf_release(mi->p_hdr_data[i]);
+ if (mi->p_hdr_data)
+ for (i = 0; mi->p_hdr_data[i]; i++)
+ strbuf_release(mi->p_hdr_data[i]);
free(mi->p_hdr_data);
- for (i = 0; mi->s_hdr_data[i]; i++)
- strbuf_release(mi->s_hdr_data[i]);
+ if (mi->s_hdr_data)
+ for (i = 0; mi->s_hdr_data[i]; i++)
+ strbuf_release(mi->s_hdr_data[i]);
free(mi->s_hdr_data);
while (mi->content < mi->content_top) {
}
/*
- * A tree "hash1" has a subdirectory at "prefix". Come up with a
- * tree object by replacing it with another tree "hash2".
+ * A tree "oid1" has a subdirectory at "prefix". Come up with a tree object by
+ * replacing it with another tree "oid2".
*/
-static int splice_tree(const unsigned char *hash1,
- const char *prefix,
- const unsigned char *hash2,
- unsigned char *result)
+static int splice_tree(const struct object_id *oid1, const char *prefix,
+ const struct object_id *oid2, struct object_id *result)
{
char *subpath;
int toplen;
char *buf;
unsigned long sz;
struct tree_desc desc;
- unsigned char *rewrite_here;
- const unsigned char *rewrite_with;
- unsigned char subtree[20];
+ struct object_id *rewrite_here;
+ const struct object_id *rewrite_with;
+ struct object_id subtree;
enum object_type type;
int status;
if (*subpath)
subpath++;
- buf = read_sha1_file(hash1, &type, &sz);
+ buf = read_sha1_file(oid1->hash, &type, &sz);
if (!buf)
- die("cannot read tree %s", sha1_to_hex(hash1));
+ die("cannot read tree %s", oid_to_hex(oid1));
init_tree_desc(&desc, buf, sz);
rewrite_here = NULL;
if (strlen(name) == toplen &&
!memcmp(name, prefix, toplen)) {
if (!S_ISDIR(mode))
- die("entry %s in tree %s is not a tree",
- name, sha1_to_hex(hash1));
- rewrite_here = (unsigned char *) oid->hash;
+ die("entry %s in tree %s is not a tree", name,
+ oid_to_hex(oid1));
+ rewrite_here = (struct object_id *)oid;
break;
}
update_tree_entry(&desc);
}
if (!rewrite_here)
- die("entry %.*s not found in tree %s",
- toplen, prefix, sha1_to_hex(hash1));
+ die("entry %.*s not found in tree %s", toplen, prefix,
+ oid_to_hex(oid1));
if (*subpath) {
- status = splice_tree(rewrite_here, subpath, hash2, subtree);
+ status = splice_tree(rewrite_here, subpath, oid2, &subtree);
if (status)
return status;
- rewrite_with = subtree;
+ rewrite_with = &subtree;
+ } else {
+ rewrite_with = oid2;
}
- else
- rewrite_with = hash2;
- hashcpy(rewrite_here, rewrite_with);
- status = write_sha1_file(buf, sz, tree_type, result);
+ oidcpy(rewrite_here, rewrite_with);
+ status = write_object_file(buf, sz, tree_type, result);
free(buf);
return status;
}
if (!*add_prefix)
return;
- splice_tree(hash1->hash, add_prefix, hash2->hash, shifted->hash);
+ splice_tree(hash1, add_prefix, hash2, shifted);
}
/*
* shift tree2 down by adding shift_prefix above it
* to match tree1.
*/
- splice_tree(hash1->hash, shift_prefix, hash2->hash, shifted->hash);
+ splice_tree(hash1, shift_prefix, hash2, shifted);
else
/*
* shift tree2 up by removing shift_prefix from it
struct rename {
struct diff_filepair *pair;
+ /*
+ * Purpose of src_entry and dst_entry:
+ *
+ * If 'before' is renamed to 'after' then src_entry will contain
+ * the versions of 'before' from the merge_base, HEAD, and MERGE in
+ * stages 1, 2, and 3; dst_entry will contain the respective
+ * versions of 'after' in corresponding locations. Thus, we have a
+ * total of six modes and oids, though some will be null. (Stage 0
+ * is ignored; we're interested in handling conflicts.)
+ *
+ * Since we don't turn on break-rewrites by default, neither
+ * src_entry nor dst_entry can have all three of their stages have
+ * non-null oids, meaning at most four of the six will be non-null.
+ * Also, since this is a rename, both src_entry and dst_entry will
+ * have at least one non-null oid, meaning at least two will be
+ * non-null. Of the six oids, a typical rename will have three be
+ * non-null. Only two implies a rename/delete, and four implies a
+ * rename/add.
+ */
struct stage_data *src_entry;
struct stage_data *dst_entry;
unsigned processed:1;
if ((merge_status < 0) || !result_buf.ptr)
ret = err(o, _("Failed to execute internal merge"));
- if (!ret && write_sha1_file(result_buf.ptr, result_buf.size,
- blob_type, result->oid.hash))
+ if (!ret &&
+ write_object_file(result_buf.ptr, result_buf.size,
+ blob_type, &result->oid))
ret = err(o, _("Unable to add %s to database"),
a->path);
get_files_dirs(o, merge);
entries = get_unmerged();
- record_df_conflict_files(o, entries);
re_head = get_renames(o, head, common, head, merge, entries);
re_merge = get_renames(o, merge, common, head, merge, entries);
clean = process_renames(o, re_head, re_merge);
+ record_df_conflict_files(o, entries);
if (clean < 0)
goto cleanup;
for (i = entries->nr-1; 0 <= i; i--) {
+++ /dev/null
-#include "cache.h"
-#include "mru.h"
-
-void mru_append(struct mru *mru, void *item)
-{
- struct mru_entry *cur = xmalloc(sizeof(*cur));
- cur->item = item;
- cur->prev = mru->tail;
- cur->next = NULL;
-
- if (mru->tail)
- mru->tail->next = cur;
- else
- mru->head = cur;
- mru->tail = cur;
-}
-
-void mru_mark(struct mru *mru, struct mru_entry *entry)
-{
- /* If we're already at the front of the list, nothing to do */
- if (mru->head == entry)
- return;
-
- /* Otherwise, remove us from our current slot... */
- if (entry->prev)
- entry->prev->next = entry->next;
- if (entry->next)
- entry->next->prev = entry->prev;
- else
- mru->tail = entry->prev;
-
- /* And insert us at the beginning. */
- entry->prev = NULL;
- entry->next = mru->head;
- if (mru->head)
- mru->head->prev = entry;
- mru->head = entry;
-}
-
-void mru_clear(struct mru *mru)
-{
- struct mru_entry *p = mru->head;
-
- while (p) {
- struct mru_entry *to_free = p;
- p = p->next;
- free(to_free);
- }
- mru->head = mru->tail = NULL;
-}
+++ /dev/null
-#ifndef MRU_H
-#define MRU_H
-
-/**
- * A simple most-recently-used cache, backed by a doubly-linked list.
- *
- * Usage is roughly:
- *
- * // Create a list. Zero-initialization is required.
- * static struct mru cache;
- * mru_append(&cache, item);
- * ...
- *
- * // Iterate in MRU order.
- * struct mru_entry *p;
- * for (p = cache.head; p; p = p->next) {
- * if (matches(p->item))
- * break;
- * }
- *
- * // Mark an item as used, moving it to the front of the list.
- * mru_mark(&cache, p);
- *
- * // Reset the list to empty, cleaning up all resources.
- * mru_clear(&cache);
- *
- * Note that you SHOULD NOT call mru_mark() and then continue traversing the
- * list; it reorders the marked item to the front of the list, and therefore
- * you will begin traversing the whole list again.
- */
-
-struct mru_entry {
- void *item;
- struct mru_entry *prev, *next;
-};
-
-struct mru {
- struct mru_entry *head, *tail;
-};
-
-void mru_append(struct mru *mru, void *item);
-void mru_mark(struct mru *mru, struct mru_entry *entry);
-void mru_clear(struct mru *mru);
-
-#endif /* MRU_H */
static void lazy_init_name_hash(struct index_state *istate)
{
+ uint64_t start = getnanotime();
+
if (istate->name_hash_initialized)
return;
hashmap_init(&istate->name_hash, cache_entry_cmp, NULL, istate->cache_nr);
}
istate->name_hash_initialized = 1;
+ trace_performance_since(start, "initialize name hash");
}
/*
if (*ptr == '/') {
struct dir_entry *dir;
- ptr++;
- dir = find_dir_entry(istate, name, ptr - name + 1);
+ dir = find_dir_entry(istate, name, ptr - name);
if (dir) {
memcpy((void *)startPtr, dir->name + (startPtr - name), ptr - startPtr);
- startPtr = ptr;
+ startPtr = ptr + 1;
}
+ ptr++;
}
}
}
if (!c->tree.dirty)
return 0;
- if (write_notes_tree(&c->tree, tree_oid.hash))
+ if (write_notes_tree(&c->tree, &tree_oid))
return -1;
- if (commit_tree(c->validity, strlen(c->validity), tree_oid.hash, NULL,
- commit_oid.hash, NULL, NULL) < 0)
+ if (commit_tree(c->validity, strlen(c->validity), &tree_oid, NULL,
+ &commit_oid, NULL, NULL) < 0)
return -1;
if (update_ref("update notes cache", c->tree.update_ref, &commit_oid,
NULL, 0, UPDATE_REFS_QUIET_ON_ERR) < 0)
{
struct object_id value_oid;
- if (write_sha1_file(data, size, "blob", value_oid.hash) < 0)
+ if (write_object_file(data, size, "blob", &value_oid) < 0)
return -1;
return add_note(&c->tree, key_oid, &value_oid, NULL);
}
struct commit_list *parents = NULL;
commit_list_insert(remote, &parents); /* LIFO order */
commit_list_insert(local, &parents);
- create_notes_commit(local_tree, parents,
- o->commit_msg.buf, o->commit_msg.len,
- result_oid->hash);
+ create_notes_commit(local_tree, parents, o->commit_msg.buf,
+ o->commit_msg.len, result_oid);
}
found_result:
strbuf_setlen(&path, baselen);
}
- create_notes_commit(partial_tree, partial_commit->parents,
- msg, strlen(msg), result_oid->hash);
+ create_notes_commit(partial_tree, partial_commit->parents, msg,
+ strlen(msg), result_oid);
unuse_commit_buffer(partial_commit, buffer);
if (o->verbosity >= 4)
printf("Finalized notes merge commit: %s\n",
void create_notes_commit(struct notes_tree *t, struct commit_list *parents,
const char *msg, size_t msg_len,
- unsigned char *result_sha1)
+ struct object_id *result_oid)
{
struct object_id tree_oid;
assert(t->initialized);
- if (write_notes_tree(t, tree_oid.hash))
+ if (write_notes_tree(t, &tree_oid))
die("Failed to write notes tree to database");
if (!parents) {
/* else: t->ref points to nothing, assume root/orphan commit */
}
- if (commit_tree(msg, msg_len, tree_oid.hash, parents, result_sha1, NULL, NULL))
+ if (commit_tree(msg, msg_len, &tree_oid, parents, result_oid, NULL,
+ NULL))
die("Failed to commit notes tree to database");
}
strbuf_addstr(&buf, msg);
strbuf_complete_line(&buf);
- create_notes_commit(t, NULL, buf.buf, buf.len, commit_oid.hash);
+ create_notes_commit(t, NULL, buf.buf, buf.len, &commit_oid);
strbuf_insert(&buf, 0, "notes: ", 7); /* commit message starts at index 7 */
update_ref(buf.buf, t->update_ref, &commit_oid, NULL, 0,
UPDATE_REFS_DIE_ON_ERR);
* The resulting commit SHA1 is stored in result_sha1.
*/
void create_notes_commit(struct notes_tree *t, struct commit_list *parents,
- const char *msg, size_t msg_len, unsigned char *result_sha1);
+ const char *msg, size_t msg_len,
+ struct object_id *result_oid);
void commit_notes(struct notes_tree *t, const char *msg);
if (!oidcmp(&l->val_oid, &entry->val_oid))
return 0;
- ret = combine_notes(l->val_oid.hash,
- entry->val_oid.hash);
+ ret = combine_notes(&l->val_oid,
+ &entry->val_oid);
if (!ret && is_null_oid(&l->val_oid))
note_tree_remove(t, tree, n, entry);
free(entry);
ret = tree_write_stack_finish_subtree(n);
if (ret)
return ret;
- ret = write_sha1_file(n->buf.buf, n->buf.len, tree_type, s.hash);
+ ret = write_object_file(n->buf.buf, n->buf.len, tree_type, &s);
if (ret)
return ret;
strbuf_release(&n->buf);
return 0;
}
-int combine_notes_concatenate(unsigned char *cur_sha1,
- const unsigned char *new_sha1)
+int combine_notes_concatenate(struct object_id *cur_oid,
+ const struct object_id *new_oid)
{
char *cur_msg = NULL, *new_msg = NULL, *buf;
unsigned long cur_len, new_len, buf_len;
int ret;
/* read in both note blob objects */
- if (!is_null_sha1(new_sha1))
- new_msg = read_sha1_file(new_sha1, &new_type, &new_len);
+ if (!is_null_oid(new_oid))
+ new_msg = read_sha1_file(new_oid->hash, &new_type, &new_len);
if (!new_msg || !new_len || new_type != OBJ_BLOB) {
free(new_msg);
return 0;
}
- if (!is_null_sha1(cur_sha1))
- cur_msg = read_sha1_file(cur_sha1, &cur_type, &cur_len);
+ if (!is_null_oid(cur_oid))
+ cur_msg = read_sha1_file(cur_oid->hash, &cur_type, &cur_len);
if (!cur_msg || !cur_len || cur_type != OBJ_BLOB) {
free(cur_msg);
free(new_msg);
- hashcpy(cur_sha1, new_sha1);
+ oidcpy(cur_oid, new_oid);
return 0;
}
free(new_msg);
/* create a new blob object from buf */
- ret = write_sha1_file(buf, buf_len, blob_type, cur_sha1);
+ ret = write_object_file(buf, buf_len, blob_type, cur_oid);
free(buf);
return ret;
}
-int combine_notes_overwrite(unsigned char *cur_sha1,
- const unsigned char *new_sha1)
+int combine_notes_overwrite(struct object_id *cur_oid,
+ const struct object_id *new_oid)
{
- hashcpy(cur_sha1, new_sha1);
+ oidcpy(cur_oid, new_oid);
return 0;
}
-int combine_notes_ignore(unsigned char *cur_sha1,
- const unsigned char *new_sha1)
+int combine_notes_ignore(struct object_id *cur_oid,
+ const struct object_id *new_oid)
{
return 0;
}
* newlines removed.
*/
static int string_list_add_note_lines(struct string_list *list,
- const unsigned char *sha1)
+ const struct object_id *oid)
{
char *data;
unsigned long len;
enum object_type t;
- if (is_null_sha1(sha1))
+ if (is_null_oid(oid))
return 0;
/* read_sha1_file NUL-terminates */
- data = read_sha1_file(sha1, &t, &len);
+ data = read_sha1_file(oid->hash, &t, &len);
if (t != OBJ_BLOB || !data || !len) {
free(data);
return t != OBJ_BLOB || !data;
return 0;
}
-int combine_notes_cat_sort_uniq(unsigned char *cur_sha1,
- const unsigned char *new_sha1)
+int combine_notes_cat_sort_uniq(struct object_id *cur_oid,
+ const struct object_id *new_oid)
{
struct string_list sort_uniq_list = STRING_LIST_INIT_DUP;
struct strbuf buf = STRBUF_INIT;
int ret = 1;
/* read both note blob objects into unique_lines */
- if (string_list_add_note_lines(&sort_uniq_list, cur_sha1))
+ if (string_list_add_note_lines(&sort_uniq_list, cur_oid))
goto out;
- if (string_list_add_note_lines(&sort_uniq_list, new_sha1))
+ if (string_list_add_note_lines(&sort_uniq_list, new_oid))
goto out;
string_list_remove_empty_items(&sort_uniq_list, 0);
string_list_sort(&sort_uniq_list);
string_list_join_lines_helper, &buf))
goto out;
- ret = write_sha1_file(buf.buf, buf.len, blob_type, cur_sha1);
+ ret = write_object_file(buf.buf, buf.len, blob_type, cur_oid);
out:
strbuf_release(&buf);
return for_each_note_helper(t, t->root, 0, 0, flags, fn, cb_data);
}
-int write_notes_tree(struct notes_tree *t, unsigned char *result)
+int write_notes_tree(struct notes_tree *t, struct object_id *result)
{
struct tree_write_stack root;
struct write_each_note_data cb_data;
int ret;
+ int flags;
if (!t)
t = &default_notes_tree;
cb_data.next_non_note = t->first_non_note;
/* Write tree objects representing current notes tree */
- ret = for_each_note(t, FOR_EACH_NOTE_DONT_UNPACK_SUBTREES |
- FOR_EACH_NOTE_YIELD_SUBTREES,
- write_each_note, &cb_data) ||
- write_each_non_note_until(NULL, &cb_data) ||
- tree_write_stack_finish_subtree(&root) ||
- write_sha1_file(root.buf.buf, root.buf.len, tree_type, result);
+ flags = FOR_EACH_NOTE_DONT_UNPACK_SUBTREES |
+ FOR_EACH_NOTE_YIELD_SUBTREES;
+ ret = for_each_note(t, flags, write_each_note, &cb_data) ||
+ write_each_non_note_until(NULL, &cb_data) ||
+ tree_write_stack_finish_subtree(&root) ||
+ write_object_file(root.buf.buf, root.buf.len, tree_type, result);
strbuf_release(&root.buf);
return ret;
}
* When adding a new note annotating the same object as an existing note, it is
* up to the caller to decide how to combine the two notes. The decision is
* made by passing in a function of the following form. The function accepts
- * two SHA1s -- of the existing note and the new note, respectively. The
+ * two object_ids -- of the existing note and the new note, respectively. The
* function then combines the notes in whatever way it sees fit, and writes the
- * resulting SHA1 into the first SHA1 argument (cur_sha1). A non-zero return
+ * resulting oid into the first argument (cur_oid). A non-zero return
* value indicates failure.
*
- * The two given SHA1s shall both be non-NULL and different from each other.
- * Either of them (but not both) may be == null_sha1, which indicates an
- * empty/non-existent note. If the resulting SHA1 (cur_sha1) is == null_sha1,
+ * The two given object_ids shall both be non-NULL and different from each
+ * other. Either of them (but not both) may be == null_oid, which indicates an
+ * empty/non-existent note. If the resulting oid (cur_oid) is == null_oid,
* the note will be removed from the notes tree.
*
* The default combine_notes function (you get this when passing NULL) is
* combine_notes_concatenate(), which appends the contents of the new note to
* the contents of the existing note.
*/
-typedef int (*combine_notes_fn)(unsigned char *cur_sha1, const unsigned char *new_sha1);
+typedef int (*combine_notes_fn)(struct object_id *cur_oid,
+ const struct object_id *new_oid);
/* Common notes combinators */
-int combine_notes_concatenate(unsigned char *cur_sha1, const unsigned char *new_sha1);
-int combine_notes_overwrite(unsigned char *cur_sha1, const unsigned char *new_sha1);
-int combine_notes_ignore(unsigned char *cur_sha1, const unsigned char *new_sha1);
-int combine_notes_cat_sort_uniq(unsigned char *cur_sha1, const unsigned char *new_sha1);
+int combine_notes_concatenate(struct object_id *cur_oid,
+ const struct object_id *new_oid);
+int combine_notes_overwrite(struct object_id *cur_oid,
+ const struct object_id *new_oid);
+int combine_notes_ignore(struct object_id *cur_oid,
+ const struct object_id *new_oid);
+int combine_notes_cat_sort_uniq(struct object_id *cur_oid,
+ const struct object_id *new_oid);
/*
* Notes tree object
* Write the given notes_tree structure to the object database
*
* Creates a new tree object encapsulating the current state of the given
- * notes_tree, and stores its SHA1 into the 'result' argument.
+ * notes_tree, and stores its object id into the 'result' argument.
*
* Returns zero on success, non-zero on failure.
*
* this function has returned zero. Please also remember to create a
* corresponding commit object, and update the appropriate notes ref.
*/
-int write_notes_tree(struct notes_tree *t, unsigned char *result);
+int write_notes_tree(struct notes_tree *t, struct object_id *result);
/* Flags controlling the operation of prune */
#define NOTES_PRUNE_VERBOSE 1
if (obj && obj->parsed)
return obj;
- if ((obj && obj->type == OBJ_BLOB) ||
+ if ((obj && obj->type == OBJ_BLOB && has_object_file(oid)) ||
(!obj && has_object_file(oid) &&
sha1_object_info(oid->hash, NULL) == OBJ_BLOB)) {
if (check_sha1_signature(repl, NULL, 0, NULL) < 0) {
}
-static int sha1write_ewah_helper(void *f, const void *buf, size_t len)
+static int hashwrite_ewah_helper(void *f, const void *buf, size_t len)
{
- /* sha1write will die on error */
- sha1write(f, buf, len);
+ /* hashwrite will die on error */
+ hashwrite(f, buf, len);
return len;
}
/**
* Write the bitmap index to disk
*/
-static inline void dump_bitmap(struct sha1file *f, struct ewah_bitmap *bitmap)
+static inline void dump_bitmap(struct hashfile *f, struct ewah_bitmap *bitmap)
{
- if (ewah_serialize_to(bitmap, sha1write_ewah_helper, f) < 0)
+ if (ewah_serialize_to(bitmap, hashwrite_ewah_helper, f) < 0)
die("Failed to write bitmap index");
}
return index[pos]->oid.hash;
}
-static void write_selected_commits_v1(struct sha1file *f,
+static void write_selected_commits_v1(struct hashfile *f,
struct pack_idx_entry **index,
uint32_t index_nr)
{
if (commit_pos < 0)
die("BUG: trying to write commit not in index");
- sha1write_be32(f, commit_pos);
- sha1write_u8(f, stored->xor_offset);
- sha1write_u8(f, stored->flags);
+ hashwrite_be32(f, commit_pos);
+ hashwrite_u8(f, stored->xor_offset);
+ hashwrite_u8(f, stored->flags);
dump_bitmap(f, stored->write_as);
}
}
-static void write_hash_cache(struct sha1file *f,
+static void write_hash_cache(struct hashfile *f,
struct pack_idx_entry **index,
uint32_t index_nr)
{
for (i = 0; i < index_nr; ++i) {
struct object_entry *entry = (struct object_entry *)index[i];
uint32_t hash_value = htonl(entry->hash);
- sha1write(f, &hash_value, sizeof(hash_value));
+ hashwrite(f, &hash_value, sizeof(hash_value));
}
}
static uint16_t default_version = 1;
static uint16_t flags = BITMAP_OPT_FULL_DAG;
struct strbuf tmp_file = STRBUF_INIT;
- struct sha1file *f;
+ struct hashfile *f;
struct bitmap_disk_header header;
int fd = odb_mkstemp(&tmp_file, "pack/tmp_bitmap_XXXXXX");
- f = sha1fd(fd, tmp_file.buf);
+ f = hashfd(fd, tmp_file.buf);
memcpy(header.magic, BITMAP_IDX_SIGNATURE, sizeof(BITMAP_IDX_SIGNATURE));
header.version = htons(default_version);
header.entry_count = htonl(writer.selected_nr);
hashcpy(header.checksum, writer.pack_checksum);
- sha1write(f, &header, sizeof(header));
+ hashwrite(f, &header, sizeof(header));
dump_bitmap(f, writer.commits);
dump_bitmap(f, writer.trees);
dump_bitmap(f, writer.blobs);
if (options & BITMAP_OPT_HASH_CACHE)
write_hash_cache(f, index, index_nr);
- sha1close(f, NULL, CSUM_FSYNC);
+ hashclose(f, NULL, CSUM_FSYNC);
if (adjust_shared_perm(tmp_file.buf))
die_errno("unable to make temporary bitmap file readable");
} while (len);
index_crc = p->index_data;
- index_crc += 2 + 256 + p->num_objects * (20/4) + nr;
+ index_crc += 2 + 256 + p->num_objects * (the_hash_algo->rawsz/4) + nr;
return data_crc != ntohl(*index_crc);
}
{
off_t index_size = p->index_size;
const unsigned char *index_base = p->index_data;
- git_SHA_CTX ctx;
+ git_hash_ctx ctx;
unsigned char hash[GIT_MAX_RAWSZ], *pack_sig;
off_t offset = 0, pack_sig_ofs = 0;
uint32_t nr_objects, i;
if (!is_pack_valid(p))
return error("packfile %s cannot be accessed", p->pack_name);
- git_SHA1_Init(&ctx);
+ the_hash_algo->init_fn(&ctx);
do {
unsigned long remaining;
unsigned char *in = use_pack(p, w_curs, offset, &remaining);
offset += remaining;
if (!pack_sig_ofs)
- pack_sig_ofs = p->pack_size - 20;
+ pack_sig_ofs = p->pack_size - the_hash_algo->rawsz;
if (offset > pack_sig_ofs)
remaining -= (unsigned int)(offset - pack_sig_ofs);
- git_SHA1_Update(&ctx, in, remaining);
+ the_hash_algo->update_fn(&ctx, in, remaining);
} while (offset < pack_sig_ofs);
- git_SHA1_Final(hash, &ctx);
+ the_hash_algo->final_fn(hash, &ctx);
pack_sig = use_pack(p, w_curs, pack_sig_ofs, NULL);
if (hashcmp(hash, pack_sig))
- err = error("%s SHA1 checksum mismatch",
+ err = error("%s pack checksum mismatch",
p->pack_name);
- if (hashcmp(index_base + index_size - 40, pack_sig))
- err = error("%s SHA1 does not match its index",
+ if (hashcmp(index_base + index_size - the_hash_algo->hexsz, pack_sig))
+ err = error("%s pack checksum does not match its index",
p->pack_name);
unuse_pack(w_curs);
{
off_t index_size;
const unsigned char *index_base;
- git_SHA_CTX ctx;
- unsigned char sha1[20];
+ git_hash_ctx ctx;
+ unsigned char hash[GIT_MAX_RAWSZ];
int err = 0;
if (open_pack_index(p))
index_base = p->index_data;
/* Verify SHA1 sum of the index file */
- git_SHA1_Init(&ctx);
- git_SHA1_Update(&ctx, index_base, (unsigned int)(index_size - 20));
- git_SHA1_Final(sha1, &ctx);
- if (hashcmp(sha1, index_base + index_size - 20))
- err = error("Packfile index for %s SHA1 mismatch",
+ the_hash_algo->init_fn(&ctx);
+ the_hash_algo->update_fn(&ctx, index_base, (unsigned int)(index_size - the_hash_algo->rawsz));
+ the_hash_algo->final_fn(hash, &ctx);
+ if (hashcmp(hash, index_base + index_size - the_hash_algo->rawsz))
+ err = error("Packfile index for %s hash mismatch",
p->pack_name);
return err;
}
if (!(off & 0x80000000)) {
p->revindex[i].offset = off;
} else {
- p->revindex[i].offset =
- ((uint64_t)ntohl(*off_64++)) << 32;
- p->revindex[i].offset |=
- ntohl(*off_64++);
+ p->revindex[i].offset = get_be64(off_64);
+ off_64 += 2;
}
p->revindex[i].nr = i;
}
int nr_objects, const struct pack_idx_option *opts,
const unsigned char *sha1)
{
- struct sha1file *f;
+ struct hashfile *f;
struct pack_idx_entry **sorted_by_sha, **list, **last;
off_t last_obj_offset = 0;
uint32_t array[256];
if (opts->flags & WRITE_IDX_VERIFY) {
assert(index_name);
- f = sha1fd_check(index_name);
+ f = hashfd_check(index_name);
} else {
if (!index_name) {
struct strbuf tmp_file = STRBUF_INIT;
if (fd < 0)
die_errno("unable to create '%s'", index_name);
}
- f = sha1fd(fd, index_name);
+ f = hashfd(fd, index_name);
}
/* if last object's offset is >= 2^31 we should use index V2 */
struct pack_idx_header hdr;
hdr.idx_signature = htonl(PACK_IDX_SIGNATURE);
hdr.idx_version = htonl(index_version);
- sha1write(f, &hdr, sizeof(hdr));
+ hashwrite(f, &hdr, sizeof(hdr));
}
/*
array[i] = htonl(next - sorted_by_sha);
list = next;
}
- sha1write(f, array, 256 * 4);
+ hashwrite(f, array, 256 * 4);
/*
* Write the actual SHA1 entries..
struct pack_idx_entry *obj = *list++;
if (index_version < 2) {
uint32_t offset = htonl(obj->offset);
- sha1write(f, &offset, 4);
+ hashwrite(f, &offset, 4);
}
- sha1write(f, obj->oid.hash, 20);
+ hashwrite(f, obj->oid.hash, the_hash_algo->rawsz);
if ((opts->flags & WRITE_IDX_STRICT) &&
(i && !oidcmp(&list[-2]->oid, &obj->oid)))
die("The same object %s appears twice in the pack",
for (i = 0; i < nr_objects; i++) {
struct pack_idx_entry *obj = *list++;
uint32_t crc32_val = htonl(obj->crc32);
- sha1write(f, &crc32_val, 4);
+ hashwrite(f, &crc32_val, 4);
}
/* write the 32-bit offset table */
? (0x80000000 | nr_large_offset++)
: obj->offset);
offset = htonl(offset);
- sha1write(f, &offset, 4);
+ hashwrite(f, &offset, 4);
}
/* write the large offset table */
continue;
split[0] = htonl(offset >> 32);
split[1] = htonl(offset & 0xffffffff);
- sha1write(f, split, 8);
+ hashwrite(f, split, 8);
nr_large_offset--;
}
}
- sha1write(f, sha1, 20);
- sha1close(f, NULL, ((opts->flags & WRITE_IDX_VERIFY)
+ hashwrite(f, sha1, the_hash_algo->rawsz);
+ hashclose(f, NULL, ((opts->flags & WRITE_IDX_VERIFY)
? CSUM_CLOSE : CSUM_FSYNC));
return index_name;
}
-off_t write_pack_header(struct sha1file *f, uint32_t nr_entries)
+off_t write_pack_header(struct hashfile *f, uint32_t nr_entries)
{
struct pack_header hdr;
hdr.hdr_signature = htonl(PACK_SIGNATURE);
hdr.hdr_version = htonl(PACK_VERSION);
hdr.hdr_entries = htonl(nr_entries);
- sha1write(f, &hdr, sizeof(hdr));
+ hashwrite(f, &hdr, sizeof(hdr));
return sizeof(hdr);
}
* interested in the resulting SHA1 of pack data above partial_pack_offset.
*/
void fixup_pack_header_footer(int pack_fd,
- unsigned char *new_pack_sha1,
+ unsigned char *new_pack_hash,
const char *pack_name,
uint32_t object_count,
- unsigned char *partial_pack_sha1,
+ unsigned char *partial_pack_hash,
off_t partial_pack_offset)
{
int aligned_sz, buf_sz = 8 * 1024;
- git_SHA_CTX old_sha1_ctx, new_sha1_ctx;
+ git_hash_ctx old_hash_ctx, new_hash_ctx;
struct pack_header hdr;
char *buf;
ssize_t read_result;
- git_SHA1_Init(&old_sha1_ctx);
- git_SHA1_Init(&new_sha1_ctx);
+ the_hash_algo->init_fn(&old_hash_ctx);
+ the_hash_algo->init_fn(&new_hash_ctx);
if (lseek(pack_fd, 0, SEEK_SET) != 0)
die_errno("Failed seeking to start of '%s'", pack_name);
pack_name);
if (lseek(pack_fd, 0, SEEK_SET) != 0)
die_errno("Failed seeking to start of '%s'", pack_name);
- git_SHA1_Update(&old_sha1_ctx, &hdr, sizeof(hdr));
+ the_hash_algo->update_fn(&old_hash_ctx, &hdr, sizeof(hdr));
hdr.hdr_entries = htonl(object_count);
- git_SHA1_Update(&new_sha1_ctx, &hdr, sizeof(hdr));
+ the_hash_algo->update_fn(&new_hash_ctx, &hdr, sizeof(hdr));
write_or_die(pack_fd, &hdr, sizeof(hdr));
partial_pack_offset -= sizeof(hdr);
aligned_sz = buf_sz - sizeof(hdr);
for (;;) {
ssize_t m, n;
- m = (partial_pack_sha1 && partial_pack_offset < aligned_sz) ?
+ m = (partial_pack_hash && partial_pack_offset < aligned_sz) ?
partial_pack_offset : aligned_sz;
n = xread(pack_fd, buf, m);
if (!n)
break;
if (n < 0)
die_errno("Failed to checksum '%s'", pack_name);
- git_SHA1_Update(&new_sha1_ctx, buf, n);
+ the_hash_algo->update_fn(&new_hash_ctx, buf, n);
aligned_sz -= n;
if (!aligned_sz)
aligned_sz = buf_sz;
- if (!partial_pack_sha1)
+ if (!partial_pack_hash)
continue;
- git_SHA1_Update(&old_sha1_ctx, buf, n);
+ the_hash_algo->update_fn(&old_hash_ctx, buf, n);
partial_pack_offset -= n;
if (partial_pack_offset == 0) {
- unsigned char sha1[20];
- git_SHA1_Final(sha1, &old_sha1_ctx);
- if (hashcmp(sha1, partial_pack_sha1) != 0)
+ unsigned char hash[GIT_MAX_RAWSZ];
+ the_hash_algo->final_fn(hash, &old_hash_ctx);
+ if (hashcmp(hash, partial_pack_hash) != 0)
die("Unexpected checksum for %s "
"(disk corruption?)", pack_name);
/*
* pack, which also means making partial_pack_offset
* big enough not to matter anymore.
*/
- git_SHA1_Init(&old_sha1_ctx);
+ the_hash_algo->init_fn(&old_hash_ctx);
partial_pack_offset = ~partial_pack_offset;
partial_pack_offset -= MSB(partial_pack_offset, 1);
}
}
free(buf);
- if (partial_pack_sha1)
- git_SHA1_Final(partial_pack_sha1, &old_sha1_ctx);
- git_SHA1_Final(new_pack_sha1, &new_sha1_ctx);
- write_or_die(pack_fd, new_pack_sha1, 20);
+ if (partial_pack_hash)
+ the_hash_algo->final_fn(partial_pack_hash, &old_hash_ctx);
+ the_hash_algo->final_fn(new_pack_hash, &new_hash_ctx);
+ write_or_die(pack_fd, new_pack_hash, the_hash_algo->rawsz);
fsync_or_die(pack_fd, pack_name);
}
char *index_pack_lockfile(int ip_out)
{
- char packname[46];
+ char packname[GIT_MAX_HEXSZ + 6];
+ const int len = the_hash_algo->hexsz + 6;
/*
* The first thing we expect from index-pack's output
* case, we need it to remove the corresponding .keep file
* later on. If we don't get that then tough luck with it.
*/
- if (read_in_full(ip_out, packname, 46) == 46 && packname[45] == '\n') {
+ if (read_in_full(ip_out, packname, len) == len && packname[len-1] == '\n') {
const char *name;
- packname[45] = 0;
+ packname[len-1] = 0;
if (skip_prefix(packname, "keep\t", &name))
return xstrfmt("%s/pack/pack-%s.keep",
get_object_directory(), name);
return n;
}
-struct sha1file *create_tmp_packfile(char **pack_tmp_name)
+struct hashfile *create_tmp_packfile(char **pack_tmp_name)
{
struct strbuf tmpname = STRBUF_INIT;
int fd;
fd = odb_mkstemp(&tmpname, "pack/tmp_pack_XXXXXX");
*pack_tmp_name = strbuf_detach(&tmpname, NULL);
- return sha1fd(fd, *pack_tmp_name);
+ return hashfd(fd, *pack_tmp_name);
}
void finish_tmp_packfile(struct strbuf *name_buffer,
extern int check_pack_crc(struct packed_git *p, struct pack_window **w_curs, off_t offset, off_t len, unsigned int nr);
extern int verify_pack_index(struct packed_git *);
extern int verify_pack(struct packed_git *, verify_fn fn, struct progress *, uint32_t);
-extern off_t write_pack_header(struct sha1file *f, uint32_t);
+extern off_t write_pack_header(struct hashfile *f, uint32_t);
extern void fixup_pack_header_footer(int, unsigned char *, const char *, uint32_t, unsigned char *, off_t);
extern char *index_pack_lockfile(int fd);
#define PH_ERROR_PROTOCOL (-3)
extern int read_pack_header(int fd, struct pack_header *);
-extern struct sha1file *create_tmp_packfile(char **pack_tmp_name);
+extern struct hashfile *create_tmp_packfile(char **pack_tmp_name);
extern void finish_tmp_packfile(struct strbuf *name_buffer, const char *pack_tmp_name, struct pack_idx_entry **written_list, uint32_t nr_written, struct pack_idx_option *pack_idx_opts, unsigned char sha1[]);
#endif
#include "cache.h"
-#include "mru.h"
+#include "list.h"
#include "pack.h"
#include "dir.h"
#include "mergesort.h"
#include "list.h"
#include "streaming.h"
#include "sha1-lookup.h"
+#include "commit.h"
+#include "object.h"
+#include "tag.h"
+#include "tree-walk.h"
+#include "tree.h"
char *odb_pack_name(struct strbuf *buf,
const unsigned char *sha1,
static size_t peak_pack_mapped;
static size_t pack_mapped;
struct packed_git *packed_git;
-struct mru packed_git_mru;
+LIST_HEAD(packed_git_mru);
#define SZ_FMT PRIuMAX
static inline uintmax_t sz_fmt(size_t s) { return s; }
return NULL;
/*
- * ".pack" is long enough to hold any suffix we're adding (and
+ * ".promisor" is long enough to hold any suffix we're adding (and
* the use xsnprintf double-checks that)
*/
- alloc = st_add3(path_len, strlen(".pack"), 1);
+ alloc = st_add3(path_len, strlen(".promisor"), 1);
p = alloc_packed_git(alloc);
memcpy(p->pack_name, path, path_len);
if (!access(p->pack_name, F_OK))
p->pack_keep = 1;
+ xsnprintf(p->pack_name + path_len, alloc - path_len, ".promisor");
+ if (!access(p->pack_name, F_OK))
+ p->pack_promisor = 1;
+
xsnprintf(p->pack_name + path_len, alloc - path_len, ".pack");
if (stat(p->pack_name, &st) || !S_ISREG(st.st_mode)) {
free(p);
if (ends_with(de->d_name, ".idx") ||
ends_with(de->d_name, ".pack") ||
ends_with(de->d_name, ".bitmap") ||
- ends_with(de->d_name, ".keep"))
+ ends_with(de->d_name, ".keep") ||
+ ends_with(de->d_name, ".promisor"))
string_list_append(&garbage, path.buf);
else
report_garbage(PACKDIR_FILE_GARBAGE, path.buf);
{
struct packed_git *p;
- mru_clear(&packed_git_mru);
+ INIT_LIST_HEAD(&packed_git_mru);
+
for (p = packed_git; p; p = p->next)
- mru_append(&packed_git_mru, p);
+ list_add_tail(&p->mru, &packed_git_mru);
}
static int prepare_packed_git_run_once = 0;
return off;
index += p->num_objects * 4 + (off & 0x7fffffff) * 8;
check_pack_index_ptr(p, index);
- return (((uint64_t)ntohl(*((uint32_t *)(index + 0)))) << 32) |
- ntohl(*((uint32_t *)(index + 4)));
+ return get_be64(index);
}
}
{
const uint32_t *level1_ofs = p->index_data;
const unsigned char *index = p->index_data;
- unsigned hi, lo, stride;
- static int debug_lookup = -1;
-
- if (debug_lookup < 0)
- debug_lookup = !!getenv("GIT_DEBUG_LOOKUP");
+ unsigned stride;
+ uint32_t result;
if (!index) {
if (open_pack_index(p))
index += 8;
}
index += 4 * 256;
- hi = ntohl(level1_ofs[*sha1]);
- lo = ((*sha1 == 0x0) ? 0 : ntohl(level1_ofs[*sha1 - 1]));
if (p->index_version > 1) {
stride = 20;
} else {
index += 4;
}
- if (debug_lookup)
- printf("%02x%02x%02x... lo %u hi %u nr %"PRIu32"\n",
- sha1[0], sha1[1], sha1[2], lo, hi, p->num_objects);
-
- while (lo < hi) {
- unsigned mi = lo + (hi - lo) / 2;
- int cmp = hashcmp(index + mi * stride, sha1);
-
- if (debug_lookup)
- printf("lo %u hi %u rg %u mi %u\n",
- lo, hi, hi - lo, mi);
- if (!cmp)
- return nth_packed_object_offset(p, mi);
- if (cmp > 0)
- hi = mi;
- else
- lo = mi+1;
- }
+ if (bsearch_hash(sha1, level1_ofs, index, stride, &result))
+ return nth_packed_object_offset(p, result);
return 0;
}
*/
int find_pack_entry(const unsigned char *sha1, struct pack_entry *e)
{
- struct mru_entry *p;
+ struct list_head *pos;
prepare_packed_git();
if (!packed_git)
return 0;
- for (p = packed_git_mru.head; p; p = p->next) {
- if (fill_pack_entry(sha1, e, p->item)) {
- mru_mark(&packed_git_mru, p);
+ list_for_each(pos, &packed_git_mru) {
+ struct packed_git *p = list_entry(pos, struct packed_git, mru);
+ if (fill_pack_entry(sha1, e, p)) {
+ list_move(&p->mru, &packed_git_mru);
return 1;
}
}
for (p = packed_git; p; p = p->next) {
if ((flags & FOR_EACH_OBJECT_LOCAL_ONLY) && !p->pack_local)
continue;
+ if ((flags & FOR_EACH_OBJECT_PROMISOR_ONLY) &&
+ !p->pack_promisor)
+ continue;
if (open_pack_index(p)) {
pack_errors = 1;
continue;
}
return r ? r : pack_errors;
}
+
+static int add_promisor_object(const struct object_id *oid,
+ struct packed_git *pack,
+ uint32_t pos,
+ void *set_)
+{
+ struct oidset *set = set_;
+ struct object *obj = parse_object(oid);
+ if (!obj)
+ return 1;
+
+ oidset_insert(set, oid);
+
+ /*
+ * If this is a tree, commit, or tag, the objects it refers
+ * to are also promisor objects. (Blobs refer to no objects.)
+ */
+ if (obj->type == OBJ_TREE) {
+ struct tree *tree = (struct tree *)obj;
+ struct tree_desc desc;
+ struct name_entry entry;
+ if (init_tree_desc_gently(&desc, tree->buffer, tree->size))
+ /*
+ * Error messages are given when packs are
+ * verified, so do not print any here.
+ */
+ return 0;
+ while (tree_entry_gently(&desc, &entry))
+ oidset_insert(set, entry.oid);
+ } else if (obj->type == OBJ_COMMIT) {
+ struct commit *commit = (struct commit *) obj;
+ struct commit_list *parents = commit->parents;
+
+ oidset_insert(set, &commit->tree->object.oid);
+ for (; parents; parents = parents->next)
+ oidset_insert(set, &parents->item->object.oid);
+ } else if (obj->type == OBJ_TAG) {
+ struct tag *tag = (struct tag *) obj;
+ oidset_insert(set, &tag->tagged->oid);
+ }
+ return 0;
+}
+
+int is_promisor_object(const struct object_id *oid)
+{
+ static struct oidset promisor_objects;
+ static int promisor_objects_prepared;
+
+ if (!promisor_objects_prepared) {
+ if (repository_format_partial_clone) {
+ for_each_packed_object(add_promisor_object,
+ &promisor_objects,
+ FOR_EACH_OBJECT_PROMISOR_ONLY);
+ }
+ promisor_objects_prepared = 1;
+ }
+ return oidset_contains(&promisor_objects, oid);
+}
#ifndef PACKFILE_H
#define PACKFILE_H
+#include "oidset.h"
+
/*
* Generate the filename to be used for a pack file with checksum "sha1" and
* extension "ext". The result is written into the strbuf "buf", overwriting
extern int has_pack_index(const unsigned char *sha1);
+/*
+ * Only iterate over packs obtained from the promisor remote.
+ */
+#define FOR_EACH_OBJECT_PROMISOR_ONLY 2
+
/*
* Iterate over packed objects in both the local
* repository and any alternates repositories (unless the
void *data);
extern int for_each_packed_object(each_packed_object_fn, void *, unsigned flags);
+/*
+ * Return 1 if an object in a promisor packfile is or refers to the given
+ * object, 0 otherwise.
+ */
+extern int is_promisor_object(const struct object_id *oid);
+
#endif
int parse_options_end(struct parse_opt_ctx_t *ctx)
{
- memmove(ctx->out + ctx->cpidx, ctx->argv, ctx->argc * sizeof(*ctx->out));
+ MOVE_ARRAY(ctx->out + ctx->cpidx, ctx->argv, ctx->argc);
ctx->out[ctx->cpidx + ctx->argc] = NULL;
return ctx->cpidx + ctx->argc;
}
-perl.mak
-perl.mak.old
-MYMETA.json
-MYMETA.yml
-blib
-blibdirs
-pm_to_blib
-PM.stamp
+/build/
use Carp qw(carp croak); # but croak is bad - throw instead
-use Error qw(:try);
+use Git::Error qw(:try);
use Cwd qw(abs_path cwd);
use IPC::Open2 qw(open2);
use Fcntl qw(SEEK_SET SEEK_CUR);
--- /dev/null
+package Git::Error;
+use 5.008;
+use strict;
+use warnings;
+
+=head1 NAME
+
+Git::Error - Wrapper for the L<Error> module, in case it's not installed
+
+=head1 DESCRIPTION
+
+Wraps the import function for the L<Error> module.
+
+This module is only intended to be used for code shipping in the
+C<git.git> repository. Use it for anything else at your peril!
+
+=cut
+
+sub import {
+ shift;
+ my $caller = caller;
+
+ eval {
+ require Error;
+ 1;
+ } or do {
+ my $error = $@ || "Zombie Error";
+
+ my $Git_Error_pm_path = $INC{"Git/Error.pm"} || die "BUG: Should have our own path from %INC!";
+
+ require File::Basename;
+ my $Git_Error_pm_root = File::Basename::dirname($Git_Error_pm_path) || die "BUG: Can't figure out lib/Git dirname from '$Git_Error_pm_path'!";
+
+ require File::Spec;
+ my $Git_pm_FromCPAN_root = File::Spec->catdir($Git_Error_pm_root, 'FromCPAN');
+ die "BUG: '$Git_pm_FromCPAN_root' should be a directory!" unless -d $Git_pm_FromCPAN_root;
+
+ local @INC = ($Git_pm_FromCPAN_root, @INC);
+ require Error;
+ };
+
+ unshift @_, $caller;
+ goto &Error::import;
+}
+
+1;
--- /dev/null
+# Error.pm
+#
+# Copyright (c) 1997-8 Graham Barr <gbarr@ti.com>. All rights reserved.
+# This program is free software; you can redistribute it and/or
+# modify it under the same terms as Perl itself.
+#
+# Based on my original Error.pm, and Exceptions.pm by Peter Seibel
+# <peter@weblogic.com> and adapted by Jesse Glick <jglick@sig.bsh.com>.
+#
+# but modified ***significantly***
+
+package Error;
+
+use strict;
+use vars qw($VERSION);
+use 5.004;
+
+$VERSION = "0.15009";
+
+use overload (
+ '""' => 'stringify',
+ '0+' => 'value',
+ 'bool' => sub { return 1; },
+ 'fallback' => 1
+);
+
+$Error::Depth = 0; # Depth to pass to caller()
+$Error::Debug = 0; # Generate verbose stack traces
+@Error::STACK = (); # Clause stack for try
+$Error::THROWN = undef; # last error thrown, a workaround until die $ref works
+
+my $LAST; # Last error created
+my %ERROR; # Last error associated with package
+
+sub throw_Error_Simple
+{
+ my $args = shift;
+ return Error::Simple->new($args->{'text'});
+}
+
+$Error::ObjectifyCallback = \&throw_Error_Simple;
+
+
+# Exported subs are defined in Error::subs
+
+sub import {
+ shift;
+ local $Exporter::ExportLevel = $Exporter::ExportLevel + 1;
+ Error::subs->import(@_);
+}
+
+# I really want to use last for the name of this method, but it is a keyword
+# which prevent the syntax last Error
+
+sub prior {
+ shift; # ignore
+
+ return $LAST unless @_;
+
+ my $pkg = shift;
+ return exists $ERROR{$pkg} ? $ERROR{$pkg} : undef
+ unless ref($pkg);
+
+ my $obj = $pkg;
+ my $err = undef;
+ if($obj->isa('HASH')) {
+ $err = $obj->{'__Error__'}
+ if exists $obj->{'__Error__'};
+ }
+ elsif($obj->isa('GLOB')) {
+ $err = ${*$obj}{'__Error__'}
+ if exists ${*$obj}{'__Error__'};
+ }
+
+ $err;
+}
+
+sub flush {
+ shift; #ignore
+
+ unless (@_) {
+ $LAST = undef;
+ return;
+ }
+
+ my $pkg = shift;
+ return unless ref($pkg);
+
+ undef $ERROR{$pkg} if defined $ERROR{$pkg};
+}
+
+# Return as much information as possible about where the error
+# happened. The -stacktrace element only exists if $Error::DEBUG
+# was set when the error was created
+
+sub stacktrace {
+ my $self = shift;
+
+ return $self->{'-stacktrace'}
+ if exists $self->{'-stacktrace'};
+
+ my $text = exists $self->{'-text'} ? $self->{'-text'} : "Died";
+
+ $text .= sprintf(" at %s line %d.\n", $self->file, $self->line)
+ unless($text =~ /\n$/s);
+
+ $text;
+}
+
+# Allow error propagation, ie
+#
+# $ber->encode(...) or
+# return Error->prior($ber)->associate($ldap);
+
+sub associate {
+ my $err = shift;
+ my $obj = shift;
+
+ return unless ref($obj);
+
+ if($obj->isa('HASH')) {
+ $obj->{'__Error__'} = $err;
+ }
+ elsif($obj->isa('GLOB')) {
+ ${*$obj}{'__Error__'} = $err;
+ }
+ $obj = ref($obj);
+ $ERROR{ ref($obj) } = $err;
+
+ return;
+}
+
+sub new {
+ my $self = shift;
+ my($pkg,$file,$line) = caller($Error::Depth);
+
+ my $err = bless {
+ '-package' => $pkg,
+ '-file' => $file,
+ '-line' => $line,
+ @_
+ }, $self;
+
+ $err->associate($err->{'-object'})
+ if(exists $err->{'-object'});
+
+ # To always create a stacktrace would be very inefficient, so
+ # we only do it if $Error::Debug is set
+
+ if($Error::Debug) {
+ require Carp;
+ local $Carp::CarpLevel = $Error::Depth;
+ my $text = defined($err->{'-text'}) ? $err->{'-text'} : "Error";
+ my $trace = Carp::longmess($text);
+ # Remove try calls from the trace
+ $trace =~ s/(\n\s+\S+__ANON__[^\n]+)?\n\s+eval[^\n]+\n\s+Error::subs::try[^\n]+(?=\n)//sog;
+ $trace =~ s/(\n\s+\S+__ANON__[^\n]+)?\n\s+eval[^\n]+\n\s+Error::subs::run_clauses[^\n]+\n\s+Error::subs::try[^\n]+(?=\n)//sog;
+ $err->{'-stacktrace'} = $trace
+ }
+
+ $@ = $LAST = $ERROR{$pkg} = $err;
+}
+
+# Throw an error. this contains some very gory code.
+
+sub throw {
+ my $self = shift;
+ local $Error::Depth = $Error::Depth + 1;
+
+ # if we are not rethrow-ing then create the object to throw
+ $self = $self->new(@_) unless ref($self);
+
+ die $Error::THROWN = $self;
+}
+
+# syntactic sugar for
+#
+# die with Error( ... );
+
+sub with {
+ my $self = shift;
+ local $Error::Depth = $Error::Depth + 1;
+
+ $self->new(@_);
+}
+
+# syntactic sugar for
+#
+# record Error( ... ) and return;
+
+sub record {
+ my $self = shift;
+ local $Error::Depth = $Error::Depth + 1;
+
+ $self->new(@_);
+}
+
+# catch clause for
+#
+# try { ... } catch CLASS with { ... }
+
+sub catch {
+ my $pkg = shift;
+ my $code = shift;
+ my $clauses = shift || {};
+ my $catch = $clauses->{'catch'} ||= [];
+
+ unshift @$catch, $pkg, $code;
+
+ $clauses;
+}
+
+# Object query methods
+
+sub object {
+ my $self = shift;
+ exists $self->{'-object'} ? $self->{'-object'} : undef;
+}
+
+sub file {
+ my $self = shift;
+ exists $self->{'-file'} ? $self->{'-file'} : undef;
+}
+
+sub line {
+ my $self = shift;
+ exists $self->{'-line'} ? $self->{'-line'} : undef;
+}
+
+sub text {
+ my $self = shift;
+ exists $self->{'-text'} ? $self->{'-text'} : undef;
+}
+
+# overload methods
+
+sub stringify {
+ my $self = shift;
+ defined $self->{'-text'} ? $self->{'-text'} : "Died";
+}
+
+sub value {
+ my $self = shift;
+ exists $self->{'-value'} ? $self->{'-value'} : undef;
+}
+
+package Error::Simple;
+
+@Error::Simple::ISA = qw(Error);
+
+sub new {
+ my $self = shift;
+ my $text = "" . shift;
+ my $value = shift;
+ my(@args) = ();
+
+ local $Error::Depth = $Error::Depth + 1;
+
+ @args = ( -file => $1, -line => $2)
+ if($text =~ s/\s+at\s+(\S+)\s+line\s+(\d+)(?:,\s*<[^>]*>\s+line\s+\d+)?\.?\n?$//s);
+ push(@args, '-value', 0 + $value)
+ if defined($value);
+
+ $self->SUPER::new(-text => $text, @args);
+}
+
+sub stringify {
+ my $self = shift;
+ my $text = $self->SUPER::stringify;
+ $text .= sprintf(" at %s line %d.\n", $self->file, $self->line)
+ unless($text =~ /\n$/s);
+ $text;
+}
+
+##########################################################################
+##########################################################################
+
+# Inspired by code from Jesse Glick <jglick@sig.bsh.com> and
+# Peter Seibel <peter@weblogic.com>
+
+package Error::subs;
+
+use Exporter ();
+use vars qw(@EXPORT_OK @ISA %EXPORT_TAGS);
+
+@EXPORT_OK = qw(try with finally except otherwise);
+%EXPORT_TAGS = (try => \@EXPORT_OK);
+
+@ISA = qw(Exporter);
+
+
+sub blessed {
+ my $item = shift;
+ local $@; # don't kill an outer $@
+ ref $item and eval { $item->can('can') };
+}
+
+
+sub run_clauses ($$$\@) {
+ my($clauses,$err,$wantarray,$result) = @_;
+ my $code = undef;
+
+ $err = $Error::ObjectifyCallback->({'text' =>$err}) unless ref($err);
+
+ CATCH: {
+
+ # catch
+ my $catch;
+ if(defined($catch = $clauses->{'catch'})) {
+ my $i = 0;
+
+ CATCHLOOP:
+ for( ; $i < @$catch ; $i += 2) {
+ my $pkg = $catch->[$i];
+ unless(defined $pkg) {
+ #except
+ splice(@$catch,$i,2,$catch->[$i+1]->());
+ $i -= 2;
+ next CATCHLOOP;
+ }
+ elsif(blessed($err) && $err->isa($pkg)) {
+ $code = $catch->[$i+1];
+ while(1) {
+ my $more = 0;
+ local($Error::THROWN);
+ my $ok = eval {
+ if($wantarray) {
+ @{$result} = $code->($err,\$more);
+ }
+ elsif(defined($wantarray)) {
+ @{$result} = ();
+ $result->[0] = $code->($err,\$more);
+ }
+ else {
+ $code->($err,\$more);
+ }
+ 1;
+ };
+ if( $ok ) {
+ next CATCHLOOP if $more;
+ undef $err;
+ }
+ else {
+ $err = defined($Error::THROWN)
+ ? $Error::THROWN : $@;
+ $err = $Error::ObjectifyCallback->({'text' =>$err})
+ unless ref($err);
+ }
+ last CATCH;
+ };
+ }
+ }
+ }
+
+ # otherwise
+ my $owise;
+ if(defined($owise = $clauses->{'otherwise'})) {
+ my $code = $clauses->{'otherwise'};
+ my $more = 0;
+ my $ok = eval {
+ if($wantarray) {
+ @{$result} = $code->($err,\$more);
+ }
+ elsif(defined($wantarray)) {
+ @{$result} = ();
+ $result->[0] = $code->($err,\$more);
+ }
+ else {
+ $code->($err,\$more);
+ }
+ 1;
+ };
+ if( $ok ) {
+ undef $err;
+ }
+ else {
+ $err = defined($Error::THROWN)
+ ? $Error::THROWN : $@;
+
+ $err = $Error::ObjectifyCallback->({'text' =>$err})
+ unless ref($err);
+ }
+ }
+ }
+ $err;
+}
+
+sub try (&;$) {
+ my $try = shift;
+ my $clauses = @_ ? shift : {};
+ my $ok = 0;
+ my $err = undef;
+ my @result = ();
+
+ unshift @Error::STACK, $clauses;
+
+ my $wantarray = wantarray();
+
+ do {
+ local $Error::THROWN = undef;
+ local $@ = undef;
+
+ $ok = eval {
+ if($wantarray) {
+ @result = $try->();
+ }
+ elsif(defined $wantarray) {
+ $result[0] = $try->();
+ }
+ else {
+ $try->();
+ }
+ 1;
+ };
+
+ $err = defined($Error::THROWN) ? $Error::THROWN : $@
+ unless $ok;
+ };
+
+ shift @Error::STACK;
+
+ $err = run_clauses($clauses,$err,wantarray,@result)
+ unless($ok);
+
+ $clauses->{'finally'}->()
+ if(defined($clauses->{'finally'}));
+
+ if (defined($err))
+ {
+ if (blessed($err) && $err->can('throw'))
+ {
+ throw $err;
+ }
+ else
+ {
+ die $err;
+ }
+ }
+
+ wantarray ? @result : $result[0];
+}
+
+# Each clause adds a sub to the list of clauses. The finally clause is
+# always the last, and the otherwise clause is always added just before
+# the finally clause.
+#
+# All clauses, except the finally clause, add a sub which takes one argument
+# this argument will be the error being thrown. The sub will return a code ref
+# if that clause can handle that error, otherwise undef is returned.
+#
+# The otherwise clause adds a sub which unconditionally returns the users
+# code reference, this is why it is forced to be last.
+#
+# The catch clause is defined in Error.pm, as the syntax causes it to
+# be called as a method
+
+sub with (&;$) {
+ @_
+}
+
+sub finally (&) {
+ my $code = shift;
+ my $clauses = { 'finally' => $code };
+ $clauses;
+}
+
+# The except clause is a block which returns a hashref or a list of
+# key-value pairs, where the keys are the classes and the values are subs.
+
+sub except (&;$) {
+ my $code = shift;
+ my $clauses = shift || {};
+ my $catch = $clauses->{'catch'} ||= [];
+
+ my $sub = sub {
+ my $ref;
+ my(@array) = $code->($_[0]);
+ if(@array == 1 && ref($array[0])) {
+ $ref = $array[0];
+ $ref = [ %$ref ]
+ if(UNIVERSAL::isa($ref,'HASH'));
+ }
+ else {
+ $ref = \@array;
+ }
+ @$ref
+ };
+
+ unshift @{$catch}, undef, $sub;
+
+ $clauses;
+}
+
+sub otherwise (&;$) {
+ my $code = shift;
+ my $clauses = shift || {};
+
+ if(exists $clauses->{'otherwise'}) {
+ require Carp;
+ Carp::croak("Multiple otherwise clauses");
+ }
+
+ $clauses->{'otherwise'} = $code;
+
+ $clauses;
+}
+
+1;
+__END__
+
+=head1 NAME
+
+Error - Error/exception handling in an OO-ish way
+
+=head1 SYNOPSIS
+
+ use Error qw(:try);
+
+ throw Error::Simple( "A simple error");
+
+ sub xyz {
+ ...
+ record Error::Simple("A simple error")
+ and return;
+ }
+
+ unlink($file) or throw Error::Simple("$file: $!",$!);
+
+ try {
+ do_some_stuff();
+ die "error!" if $condition;
+ throw Error::Simple -text => "Oops!" if $other_condition;
+ }
+ catch Error::IO with {
+ my $E = shift;
+ print STDERR "File ", $E->{'-file'}, " had a problem\n";
+ }
+ except {
+ my $E = shift;
+ my $general_handler=sub {send_message $E->{-description}};
+ return {
+ UserException1 => $general_handler,
+ UserException2 => $general_handler
+ };
+ }
+ otherwise {
+ print STDERR "Well I don't know what to say\n";
+ }
+ finally {
+ close_the_garage_door_already(); # Should be reliable
+ }; # Don't forget the trailing ; or you might be surprised
+
+=head1 DESCRIPTION
+
+The C<Error> package provides two interfaces. Firstly C<Error> provides
+a procedural interface to exception handling. Secondly C<Error> is a
+base class for errors/exceptions that can either be thrown, for
+subsequent catch, or can simply be recorded.
+
+Errors in the class C<Error> should not be thrown directly, but the
+user should throw errors from a sub-class of C<Error>.
+
+=head1 PROCEDURAL INTERFACE
+
+C<Error> exports subroutines to perform exception handling. These will
+be exported if the C<:try> tag is used in the C<use> line.
+
+=over 4
+
+=item try BLOCK CLAUSES
+
+C<try> is the main subroutine called by the user. All other subroutines
+exported are clauses to the try subroutine.
+
+The BLOCK will be evaluated and, if no error is throw, try will return
+the result of the block.
+
+C<CLAUSES> are the subroutines below, which describe what to do in the
+event of an error being thrown within BLOCK.
+
+=item catch CLASS with BLOCK
+
+This clauses will cause all errors that satisfy C<$err-E<gt>isa(CLASS)>
+to be caught and handled by evaluating C<BLOCK>.
+
+C<BLOCK> will be passed two arguments. The first will be the error
+being thrown. The second is a reference to a scalar variable. If this
+variable is set by the catch block then, on return from the catch
+block, try will continue processing as if the catch block was never
+found.
+
+To propagate the error the catch block may call C<$err-E<gt>throw>
+
+If the scalar reference by the second argument is not set, and the
+error is not thrown. Then the current try block will return with the
+result from the catch block.
+
+=item except BLOCK
+
+When C<try> is looking for a handler, if an except clause is found
+C<BLOCK> is evaluated. The return value from this block should be a
+HASHREF or a list of key-value pairs, where the keys are class names
+and the values are CODE references for the handler of errors of that
+type.
+
+=item otherwise BLOCK
+
+Catch any error by executing the code in C<BLOCK>
+
+When evaluated C<BLOCK> will be passed one argument, which will be the
+error being processed.
+
+Only one otherwise block may be specified per try block
+
+=item finally BLOCK
+
+Execute the code in C<BLOCK> either after the code in the try block has
+successfully completed, or if the try block throws an error then
+C<BLOCK> will be executed after the handler has completed.
+
+If the handler throws an error then the error will be caught, the
+finally block will be executed and the error will be re-thrown.
+
+Only one finally block may be specified per try block
+
+=back
+
+=head1 CLASS INTERFACE
+
+=head2 CONSTRUCTORS
+
+The C<Error> object is implemented as a HASH. This HASH is initialized
+with the arguments that are passed to its constructor. The elements
+that are used by, or are retrievable by the C<Error> class are listed
+below, other classes may add to these.
+
+ -file
+ -line
+ -text
+ -value
+ -object
+
+If C<-file> or C<-line> are not specified in the constructor arguments
+then these will be initialized with the file name and line number where
+the constructor was called from.
+
+If the error is associated with an object then the object should be
+passed as the C<-object> argument. This will allow the C<Error> package
+to associate the error with the object.
+
+The C<Error> package remembers the last error created, and also the
+last error associated with a package. This could either be the last
+error created by a sub in that package, or the last error which passed
+an object blessed into that package as the C<-object> argument.
+
+=over 4
+
+=item throw ( [ ARGS ] )
+
+Create a new C<Error> object and throw an error, which will be caught
+by a surrounding C<try> block, if there is one. Otherwise it will cause
+the program to exit.
+
+C<throw> may also be called on an existing error to re-throw it.
+
+=item with ( [ ARGS ] )
+
+Create a new C<Error> object and returns it. This is defined for
+syntactic sugar, eg
+
+ die with Some::Error ( ... );
+
+=item record ( [ ARGS ] )
+
+Create a new C<Error> object and returns it. This is defined for
+syntactic sugar, eg
+
+ record Some::Error ( ... )
+ and return;
+
+=back
+
+=head2 STATIC METHODS
+
+=over 4
+
+=item prior ( [ PACKAGE ] )
+
+Return the last error created, or the last error associated with
+C<PACKAGE>
+
+=item flush ( [ PACKAGE ] )
+
+Flush the last error created, or the last error associated with
+C<PACKAGE>.It is necessary to clear the error stack before exiting the
+package or uncaught errors generated using C<record> will be reported.
+
+ $Error->flush;
+
+=cut
+
+=back
+
+=head2 OBJECT METHODS
+
+=over 4
+
+=item stacktrace
+
+If the variable C<$Error::Debug> was non-zero when the error was
+created, then C<stacktrace> returns a string created by calling
+C<Carp::longmess>. If the variable was zero the C<stacktrace> returns
+the text of the error appended with the filename and line number of
+where the error was created, providing the text does not end with a
+newline.
+
+=item object
+
+The object this error was associated with
+
+=item file
+
+The file where the constructor of this error was called from
+
+=item line
+
+The line where the constructor of this error was called from
+
+=item text
+
+The text of the error
+
+=back
+
+=head2 OVERLOAD METHODS
+
+=over 4
+
+=item stringify
+
+A method that converts the object into a string. This method may simply
+return the same as the C<text> method, or it may append more
+information. For example the file name and line number.
+
+By default this method returns the C<-text> argument that was passed to
+the constructor, or the string C<"Died"> if none was given.
+
+=item value
+
+A method that will return a value that can be associated with the
+error. For example if an error was created due to the failure of a
+system call, then this may return the numeric value of C<$!> at the
+time.
+
+By default this method returns the C<-value> argument that was passed
+to the constructor.
+
+=back
+
+=head1 PRE-DEFINED ERROR CLASSES
+
+=over 4
+
+=item Error::Simple
+
+This class can be used to hold simple error strings and values. Its
+constructor takes two arguments. The first is a text value, the second
+is a numeric value. These values are what will be returned by the
+overload methods.
+
+If the text value ends with C<at file line 1> as $@ strings do, then
+this information will be used to set the C<-file> and C<-line> arguments
+of the error object.
+
+This class is used internally if an eval'd block die's with an error
+that is a plain string. (Unless C<$Error::ObjectifyCallback> is modified)
+
+=back
+
+=head1 $Error::ObjectifyCallback
+
+This variable holds a reference to a subroutine that converts errors that
+are plain strings to objects. It is used by Error.pm to convert textual
+errors to objects, and can be overridden by the user.
+
+It accepts a single argument which is a hash reference to named parameters.
+Currently the only named parameter passed is C<'text'> which is the text
+of the error, but others may be available in the future.
+
+For example the following code will cause Error.pm to throw objects of the
+class MyError::Bar by default:
+
+ sub throw_MyError_Bar
+ {
+ my $args = shift;
+ my $err = MyError::Bar->new();
+ $err->{'MyBarText'} = $args->{'text'};
+ return $err;
+ }
+
+ {
+ local $Error::ObjectifyCallback = \&throw_MyError_Bar;
+
+ # Error handling here.
+ }
+
+=head1 KNOWN BUGS
+
+None, but that does not mean there are not any.
+
+=head1 AUTHORS
+
+Graham Barr <gbarr@pobox.com>
+
+The code that inspired me to write this was originally written by
+Peter Seibel <peter@weblogic.com> and adapted by Jesse Glick
+<jglick@sig.bsh.com>.
+
+=head1 MAINTAINER
+
+Shlomi Fish <shlomif@iglu.org.il>
+
+=head1 PAST MAINTAINERS
+
+Arun Kumar U <u_arunkumar@yahoo.com>
+
+=cut
sub __bootstrap_locale_messages {
our $TEXTDOMAIN = 'git';
- our $TEXTDOMAINDIR = $ENV{GIT_TEXTDOMAINDIR} || '++LOCALEDIR++';
+ our $TEXTDOMAINDIR = $ENV{GIT_TEXTDOMAINDIR} || '@@LOCALEDIR@@';
require POSIX;
POSIX->import(qw(setlocale));
+++ /dev/null
-#
-# Makefile for perl support modules and routine
-#
-makfile:=perl.mak
-modules =
-
-PERL_PATH_SQ = $(subst ','\'',$(PERL_PATH))
-prefix_SQ = $(subst ','\'',$(prefix))
-localedir_SQ = $(subst ','\'',$(localedir))
-
-ifndef V
- QUIET = @
-endif
-
-all install instlibdir: $(makfile)
- $(QUIET)$(MAKE) -f $(makfile) $@
-
-clean:
- $(QUIET)test -f $(makfile) && $(MAKE) -f $(makfile) $@ || exit 0
- $(RM) ppport.h
- $(RM) $(makfile)
- $(RM) $(makfile).old
- $(RM) PM.stamp
-
-$(makfile): PM.stamp
-
-ifdef NO_PERL_MAKEMAKER
-instdir_SQ = $(subst ','\'',$(prefix)/lib)
-
-modules += Git
-modules += Git/I18N
-modules += Git/IndexInfo
-modules += Git/Packet
-modules += Git/SVN
-modules += Git/SVN/Memoize/YAML
-modules += Git/SVN/Fetcher
-modules += Git/SVN/Editor
-modules += Git/SVN/GlobSpec
-modules += Git/SVN/Log
-modules += Git/SVN/Migration
-modules += Git/SVN/Prompt
-modules += Git/SVN/Ra
-modules += Git/SVN/Utils
-
-$(makfile): ../GIT-CFLAGS Makefile
- echo all: private-Error.pm Git.pm Git/I18N.pm > $@
- set -e; \
- for i in $(modules); \
- do \
- if test $$i = $${i%/*}; \
- then \
- subdir=; \
- else \
- subdir=/$${i%/*}; \
- fi; \
- echo ' $(RM) blib/lib/'$$i'.pm' >> $@; \
- echo ' mkdir -p blib/lib'$$subdir >> $@; \
- echo ' cp '$$i'.pm blib/lib/'$$i'.pm' >> $@; \
- done
- echo ' $(RM) blib/lib/Error.pm' >> $@
- '$(PERL_PATH_SQ)' -MError -e 'exit($$Error::VERSION < 0.15009)' || \
- echo ' cp private-Error.pm blib/lib/Error.pm' >> $@
- echo install: >> $@
- set -e; \
- for i in $(modules); \
- do \
- if test $$i = $${i%/*}; \
- then \
- subdir=; \
- else \
- subdir=/$${i%/*}; \
- fi; \
- echo ' $(RM) "$$(DESTDIR)$(instdir_SQ)/'$$i'.pm"' >> $@; \
- echo ' mkdir -p "$$(DESTDIR)$(instdir_SQ)'$$subdir'"' >> $@; \
- echo ' cp '$$i'.pm "$$(DESTDIR)$(instdir_SQ)/'$$i'.pm"' >> $@; \
- done
- echo ' $(RM) "$$(DESTDIR)$(instdir_SQ)/Error.pm"' >> $@
- '$(PERL_PATH_SQ)' -MError -e 'exit($$Error::VERSION < 0.15009)' || \
- echo ' cp private-Error.pm "$$(DESTDIR)$(instdir_SQ)/Error.pm"' >> $@
- echo instlibdir: >> $@
- echo ' echo $(instdir_SQ)' >> $@
-else
-$(makfile): Makefile.PL ../GIT-CFLAGS
- $(PERL_PATH) $< PREFIX='$(prefix_SQ)' INSTALL_BASE='' --localedir='$(localedir_SQ)'
-endif
-
-# this is just added comfort for calling make directly in perl dir
-# (even though GIT-CFLAGS aren't used yet. If ever)
-../GIT-CFLAGS:
- $(MAKE) -C .. GIT-CFLAGS
+++ /dev/null
-use strict;
-use warnings;
-use ExtUtils::MakeMaker;
-use Getopt::Long;
-use File::Find;
-
-# Don't forget to update the perl/Makefile, too.
-# Don't forget to test with NO_PERL_MAKEMAKER=YesPlease
-
-# Sanity: die at first unknown option
-Getopt::Long::Configure qw/ pass_through /;
-
-my $localedir = '';
-GetOptions("localedir=s" => \$localedir);
-
-sub MY::postamble {
- return <<'MAKE_FRAG';
-instlibdir:
- @echo '$(INSTALLSITELIB)'
-
-ifneq (,$(DESTDIR))
-ifeq (0,$(shell expr '$(MM_VERSION)' '>' 6.10))
-$(error ExtUtils::MakeMaker version "$(MM_VERSION)" is older than 6.11 and so \
- is likely incompatible with the DESTDIR mechanism. Try setting \
- NO_PERL_MAKEMAKER=1 instead)
-endif
-endif
-
-MAKE_FRAG
-}
-
-# Find all the .pm files in "Git/" and Git.pm
-my %pm;
-find sub {
- return unless /\.pm$/;
-
- # sometimes File::Find prepends a ./ Strip it.
- my $pm_path = $File::Find::name;
- $pm_path =~ s{^\./}{};
-
- $pm{$pm_path} = '$(INST_LIBDIR)/'.$pm_path;
-}, "Git", "Git.pm";
-
-
-# We come with our own bundled Error.pm. It's not in the set of default
-# Perl modules so install it if it's not available on the system yet.
-if ( !eval { require Error } || $Error::VERSION < 0.15009) {
- $pm{'private-Error.pm'} = '$(INST_LIBDIR)/Error.pm';
-}
-
-# redirect stdout, otherwise the message "Writing perl.mak for Git"
-# disrupts the output for the target 'instlibdir'
-open STDOUT, ">&STDERR";
-
-WriteMakefile(
- NAME => 'Git',
- VERSION_FROM => 'Git.pm',
- PM => \%pm,
- PM_FILTER => qq[\$(PERL) -pe "s<\\Q++LOCALEDIR++\\E><$localedir>"],
- MAKEFILE => 'perl.mak',
- INSTALLSITEMAN3DIR => '$(SITEPREFIX)/share/man/man3'
-);
+++ /dev/null
-# Error.pm
-#
-# Copyright (c) 1997-8 Graham Barr <gbarr@ti.com>. All rights reserved.
-# This program is free software; you can redistribute it and/or
-# modify it under the same terms as Perl itself.
-#
-# Based on my original Error.pm, and Exceptions.pm by Peter Seibel
-# <peter@weblogic.com> and adapted by Jesse Glick <jglick@sig.bsh.com>.
-#
-# but modified ***significantly***
-
-package Error;
-
-use strict;
-use vars qw($VERSION);
-use 5.004;
-
-$VERSION = "0.15009";
-
-use overload (
- '""' => 'stringify',
- '0+' => 'value',
- 'bool' => sub { return 1; },
- 'fallback' => 1
-);
-
-$Error::Depth = 0; # Depth to pass to caller()
-$Error::Debug = 0; # Generate verbose stack traces
-@Error::STACK = (); # Clause stack for try
-$Error::THROWN = undef; # last error thrown, a workaround until die $ref works
-
-my $LAST; # Last error created
-my %ERROR; # Last error associated with package
-
-sub throw_Error_Simple
-{
- my $args = shift;
- return Error::Simple->new($args->{'text'});
-}
-
-$Error::ObjectifyCallback = \&throw_Error_Simple;
-
-
-# Exported subs are defined in Error::subs
-
-sub import {
- shift;
- local $Exporter::ExportLevel = $Exporter::ExportLevel + 1;
- Error::subs->import(@_);
-}
-
-# I really want to use last for the name of this method, but it is a keyword
-# which prevent the syntax last Error
-
-sub prior {
- shift; # ignore
-
- return $LAST unless @_;
-
- my $pkg = shift;
- return exists $ERROR{$pkg} ? $ERROR{$pkg} : undef
- unless ref($pkg);
-
- my $obj = $pkg;
- my $err = undef;
- if($obj->isa('HASH')) {
- $err = $obj->{'__Error__'}
- if exists $obj->{'__Error__'};
- }
- elsif($obj->isa('GLOB')) {
- $err = ${*$obj}{'__Error__'}
- if exists ${*$obj}{'__Error__'};
- }
-
- $err;
-}
-
-sub flush {
- shift; #ignore
-
- unless (@_) {
- $LAST = undef;
- return;
- }
-
- my $pkg = shift;
- return unless ref($pkg);
-
- undef $ERROR{$pkg} if defined $ERROR{$pkg};
-}
-
-# Return as much information as possible about where the error
-# happened. The -stacktrace element only exists if $Error::DEBUG
-# was set when the error was created
-
-sub stacktrace {
- my $self = shift;
-
- return $self->{'-stacktrace'}
- if exists $self->{'-stacktrace'};
-
- my $text = exists $self->{'-text'} ? $self->{'-text'} : "Died";
-
- $text .= sprintf(" at %s line %d.\n", $self->file, $self->line)
- unless($text =~ /\n$/s);
-
- $text;
-}
-
-# Allow error propagation, ie
-#
-# $ber->encode(...) or
-# return Error->prior($ber)->associate($ldap);
-
-sub associate {
- my $err = shift;
- my $obj = shift;
-
- return unless ref($obj);
-
- if($obj->isa('HASH')) {
- $obj->{'__Error__'} = $err;
- }
- elsif($obj->isa('GLOB')) {
- ${*$obj}{'__Error__'} = $err;
- }
- $obj = ref($obj);
- $ERROR{ ref($obj) } = $err;
-
- return;
-}
-
-sub new {
- my $self = shift;
- my($pkg,$file,$line) = caller($Error::Depth);
-
- my $err = bless {
- '-package' => $pkg,
- '-file' => $file,
- '-line' => $line,
- @_
- }, $self;
-
- $err->associate($err->{'-object'})
- if(exists $err->{'-object'});
-
- # To always create a stacktrace would be very inefficient, so
- # we only do it if $Error::Debug is set
-
- if($Error::Debug) {
- require Carp;
- local $Carp::CarpLevel = $Error::Depth;
- my $text = defined($err->{'-text'}) ? $err->{'-text'} : "Error";
- my $trace = Carp::longmess($text);
- # Remove try calls from the trace
- $trace =~ s/(\n\s+\S+__ANON__[^\n]+)?\n\s+eval[^\n]+\n\s+Error::subs::try[^\n]+(?=\n)//sog;
- $trace =~ s/(\n\s+\S+__ANON__[^\n]+)?\n\s+eval[^\n]+\n\s+Error::subs::run_clauses[^\n]+\n\s+Error::subs::try[^\n]+(?=\n)//sog;
- $err->{'-stacktrace'} = $trace
- }
-
- $@ = $LAST = $ERROR{$pkg} = $err;
-}
-
-# Throw an error. this contains some very gory code.
-
-sub throw {
- my $self = shift;
- local $Error::Depth = $Error::Depth + 1;
-
- # if we are not rethrow-ing then create the object to throw
- $self = $self->new(@_) unless ref($self);
-
- die $Error::THROWN = $self;
-}
-
-# syntactic sugar for
-#
-# die with Error( ... );
-
-sub with {
- my $self = shift;
- local $Error::Depth = $Error::Depth + 1;
-
- $self->new(@_);
-}
-
-# syntactic sugar for
-#
-# record Error( ... ) and return;
-
-sub record {
- my $self = shift;
- local $Error::Depth = $Error::Depth + 1;
-
- $self->new(@_);
-}
-
-# catch clause for
-#
-# try { ... } catch CLASS with { ... }
-
-sub catch {
- my $pkg = shift;
- my $code = shift;
- my $clauses = shift || {};
- my $catch = $clauses->{'catch'} ||= [];
-
- unshift @$catch, $pkg, $code;
-
- $clauses;
-}
-
-# Object query methods
-
-sub object {
- my $self = shift;
- exists $self->{'-object'} ? $self->{'-object'} : undef;
-}
-
-sub file {
- my $self = shift;
- exists $self->{'-file'} ? $self->{'-file'} : undef;
-}
-
-sub line {
- my $self = shift;
- exists $self->{'-line'} ? $self->{'-line'} : undef;
-}
-
-sub text {
- my $self = shift;
- exists $self->{'-text'} ? $self->{'-text'} : undef;
-}
-
-# overload methods
-
-sub stringify {
- my $self = shift;
- defined $self->{'-text'} ? $self->{'-text'} : "Died";
-}
-
-sub value {
- my $self = shift;
- exists $self->{'-value'} ? $self->{'-value'} : undef;
-}
-
-package Error::Simple;
-
-@Error::Simple::ISA = qw(Error);
-
-sub new {
- my $self = shift;
- my $text = "" . shift;
- my $value = shift;
- my(@args) = ();
-
- local $Error::Depth = $Error::Depth + 1;
-
- @args = ( -file => $1, -line => $2)
- if($text =~ s/\s+at\s+(\S+)\s+line\s+(\d+)(?:,\s*<[^>]*>\s+line\s+\d+)?\.?\n?$//s);
- push(@args, '-value', 0 + $value)
- if defined($value);
-
- $self->SUPER::new(-text => $text, @args);
-}
-
-sub stringify {
- my $self = shift;
- my $text = $self->SUPER::stringify;
- $text .= sprintf(" at %s line %d.\n", $self->file, $self->line)
- unless($text =~ /\n$/s);
- $text;
-}
-
-##########################################################################
-##########################################################################
-
-# Inspired by code from Jesse Glick <jglick@sig.bsh.com> and
-# Peter Seibel <peter@weblogic.com>
-
-package Error::subs;
-
-use Exporter ();
-use vars qw(@EXPORT_OK @ISA %EXPORT_TAGS);
-
-@EXPORT_OK = qw(try with finally except otherwise);
-%EXPORT_TAGS = (try => \@EXPORT_OK);
-
-@ISA = qw(Exporter);
-
-
-sub blessed {
- my $item = shift;
- local $@; # don't kill an outer $@
- ref $item and eval { $item->can('can') };
-}
-
-
-sub run_clauses ($$$\@) {
- my($clauses,$err,$wantarray,$result) = @_;
- my $code = undef;
-
- $err = $Error::ObjectifyCallback->({'text' =>$err}) unless ref($err);
-
- CATCH: {
-
- # catch
- my $catch;
- if(defined($catch = $clauses->{'catch'})) {
- my $i = 0;
-
- CATCHLOOP:
- for( ; $i < @$catch ; $i += 2) {
- my $pkg = $catch->[$i];
- unless(defined $pkg) {
- #except
- splice(@$catch,$i,2,$catch->[$i+1]->());
- $i -= 2;
- next CATCHLOOP;
- }
- elsif(blessed($err) && $err->isa($pkg)) {
- $code = $catch->[$i+1];
- while(1) {
- my $more = 0;
- local($Error::THROWN);
- my $ok = eval {
- if($wantarray) {
- @{$result} = $code->($err,\$more);
- }
- elsif(defined($wantarray)) {
- @{$result} = ();
- $result->[0] = $code->($err,\$more);
- }
- else {
- $code->($err,\$more);
- }
- 1;
- };
- if( $ok ) {
- next CATCHLOOP if $more;
- undef $err;
- }
- else {
- $err = defined($Error::THROWN)
- ? $Error::THROWN : $@;
- $err = $Error::ObjectifyCallback->({'text' =>$err})
- unless ref($err);
- }
- last CATCH;
- };
- }
- }
- }
-
- # otherwise
- my $owise;
- if(defined($owise = $clauses->{'otherwise'})) {
- my $code = $clauses->{'otherwise'};
- my $more = 0;
- my $ok = eval {
- if($wantarray) {
- @{$result} = $code->($err,\$more);
- }
- elsif(defined($wantarray)) {
- @{$result} = ();
- $result->[0] = $code->($err,\$more);
- }
- else {
- $code->($err,\$more);
- }
- 1;
- };
- if( $ok ) {
- undef $err;
- }
- else {
- $err = defined($Error::THROWN)
- ? $Error::THROWN : $@;
-
- $err = $Error::ObjectifyCallback->({'text' =>$err})
- unless ref($err);
- }
- }
- }
- $err;
-}
-
-sub try (&;$) {
- my $try = shift;
- my $clauses = @_ ? shift : {};
- my $ok = 0;
- my $err = undef;
- my @result = ();
-
- unshift @Error::STACK, $clauses;
-
- my $wantarray = wantarray();
-
- do {
- local $Error::THROWN = undef;
- local $@ = undef;
-
- $ok = eval {
- if($wantarray) {
- @result = $try->();
- }
- elsif(defined $wantarray) {
- $result[0] = $try->();
- }
- else {
- $try->();
- }
- 1;
- };
-
- $err = defined($Error::THROWN) ? $Error::THROWN : $@
- unless $ok;
- };
-
- shift @Error::STACK;
-
- $err = run_clauses($clauses,$err,wantarray,@result)
- unless($ok);
-
- $clauses->{'finally'}->()
- if(defined($clauses->{'finally'}));
-
- if (defined($err))
- {
- if (blessed($err) && $err->can('throw'))
- {
- throw $err;
- }
- else
- {
- die $err;
- }
- }
-
- wantarray ? @result : $result[0];
-}
-
-# Each clause adds a sub to the list of clauses. The finally clause is
-# always the last, and the otherwise clause is always added just before
-# the finally clause.
-#
-# All clauses, except the finally clause, add a sub which takes one argument
-# this argument will be the error being thrown. The sub will return a code ref
-# if that clause can handle that error, otherwise undef is returned.
-#
-# The otherwise clause adds a sub which unconditionally returns the users
-# code reference, this is why it is forced to be last.
-#
-# The catch clause is defined in Error.pm, as the syntax causes it to
-# be called as a method
-
-sub with (&;$) {
- @_
-}
-
-sub finally (&) {
- my $code = shift;
- my $clauses = { 'finally' => $code };
- $clauses;
-}
-
-# The except clause is a block which returns a hashref or a list of
-# key-value pairs, where the keys are the classes and the values are subs.
-
-sub except (&;$) {
- my $code = shift;
- my $clauses = shift || {};
- my $catch = $clauses->{'catch'} ||= [];
-
- my $sub = sub {
- my $ref;
- my(@array) = $code->($_[0]);
- if(@array == 1 && ref($array[0])) {
- $ref = $array[0];
- $ref = [ %$ref ]
- if(UNIVERSAL::isa($ref,'HASH'));
- }
- else {
- $ref = \@array;
- }
- @$ref
- };
-
- unshift @{$catch}, undef, $sub;
-
- $clauses;
-}
-
-sub otherwise (&;$) {
- my $code = shift;
- my $clauses = shift || {};
-
- if(exists $clauses->{'otherwise'}) {
- require Carp;
- Carp::croak("Multiple otherwise clauses");
- }
-
- $clauses->{'otherwise'} = $code;
-
- $clauses;
-}
-
-1;
-__END__
-
-=head1 NAME
-
-Error - Error/exception handling in an OO-ish way
-
-=head1 SYNOPSIS
-
- use Error qw(:try);
-
- throw Error::Simple( "A simple error");
-
- sub xyz {
- ...
- record Error::Simple("A simple error")
- and return;
- }
-
- unlink($file) or throw Error::Simple("$file: $!",$!);
-
- try {
- do_some_stuff();
- die "error!" if $condition;
- throw Error::Simple -text => "Oops!" if $other_condition;
- }
- catch Error::IO with {
- my $E = shift;
- print STDERR "File ", $E->{'-file'}, " had a problem\n";
- }
- except {
- my $E = shift;
- my $general_handler=sub {send_message $E->{-description}};
- return {
- UserException1 => $general_handler,
- UserException2 => $general_handler
- };
- }
- otherwise {
- print STDERR "Well I don't know what to say\n";
- }
- finally {
- close_the_garage_door_already(); # Should be reliable
- }; # Don't forget the trailing ; or you might be surprised
-
-=head1 DESCRIPTION
-
-The C<Error> package provides two interfaces. Firstly C<Error> provides
-a procedural interface to exception handling. Secondly C<Error> is a
-base class for errors/exceptions that can either be thrown, for
-subsequent catch, or can simply be recorded.
-
-Errors in the class C<Error> should not be thrown directly, but the
-user should throw errors from a sub-class of C<Error>.
-
-=head1 PROCEDURAL INTERFACE
-
-C<Error> exports subroutines to perform exception handling. These will
-be exported if the C<:try> tag is used in the C<use> line.
-
-=over 4
-
-=item try BLOCK CLAUSES
-
-C<try> is the main subroutine called by the user. All other subroutines
-exported are clauses to the try subroutine.
-
-The BLOCK will be evaluated and, if no error is throw, try will return
-the result of the block.
-
-C<CLAUSES> are the subroutines below, which describe what to do in the
-event of an error being thrown within BLOCK.
-
-=item catch CLASS with BLOCK
-
-This clauses will cause all errors that satisfy C<$err-E<gt>isa(CLASS)>
-to be caught and handled by evaluating C<BLOCK>.
-
-C<BLOCK> will be passed two arguments. The first will be the error
-being thrown. The second is a reference to a scalar variable. If this
-variable is set by the catch block then, on return from the catch
-block, try will continue processing as if the catch block was never
-found.
-
-To propagate the error the catch block may call C<$err-E<gt>throw>
-
-If the scalar reference by the second argument is not set, and the
-error is not thrown. Then the current try block will return with the
-result from the catch block.
-
-=item except BLOCK
-
-When C<try> is looking for a handler, if an except clause is found
-C<BLOCK> is evaluated. The return value from this block should be a
-HASHREF or a list of key-value pairs, where the keys are class names
-and the values are CODE references for the handler of errors of that
-type.
-
-=item otherwise BLOCK
-
-Catch any error by executing the code in C<BLOCK>
-
-When evaluated C<BLOCK> will be passed one argument, which will be the
-error being processed.
-
-Only one otherwise block may be specified per try block
-
-=item finally BLOCK
-
-Execute the code in C<BLOCK> either after the code in the try block has
-successfully completed, or if the try block throws an error then
-C<BLOCK> will be executed after the handler has completed.
-
-If the handler throws an error then the error will be caught, the
-finally block will be executed and the error will be re-thrown.
-
-Only one finally block may be specified per try block
-
-=back
-
-=head1 CLASS INTERFACE
-
-=head2 CONSTRUCTORS
-
-The C<Error> object is implemented as a HASH. This HASH is initialized
-with the arguments that are passed to its constructor. The elements
-that are used by, or are retrievable by the C<Error> class are listed
-below, other classes may add to these.
-
- -file
- -line
- -text
- -value
- -object
-
-If C<-file> or C<-line> are not specified in the constructor arguments
-then these will be initialized with the file name and line number where
-the constructor was called from.
-
-If the error is associated with an object then the object should be
-passed as the C<-object> argument. This will allow the C<Error> package
-to associate the error with the object.
-
-The C<Error> package remembers the last error created, and also the
-last error associated with a package. This could either be the last
-error created by a sub in that package, or the last error which passed
-an object blessed into that package as the C<-object> argument.
-
-=over 4
-
-=item throw ( [ ARGS ] )
-
-Create a new C<Error> object and throw an error, which will be caught
-by a surrounding C<try> block, if there is one. Otherwise it will cause
-the program to exit.
-
-C<throw> may also be called on an existing error to re-throw it.
-
-=item with ( [ ARGS ] )
-
-Create a new C<Error> object and returns it. This is defined for
-syntactic sugar, eg
-
- die with Some::Error ( ... );
-
-=item record ( [ ARGS ] )
-
-Create a new C<Error> object and returns it. This is defined for
-syntactic sugar, eg
-
- record Some::Error ( ... )
- and return;
-
-=back
-
-=head2 STATIC METHODS
-
-=over 4
-
-=item prior ( [ PACKAGE ] )
-
-Return the last error created, or the last error associated with
-C<PACKAGE>
-
-=item flush ( [ PACKAGE ] )
-
-Flush the last error created, or the last error associated with
-C<PACKAGE>.It is necessary to clear the error stack before exiting the
-package or uncaught errors generated using C<record> will be reported.
-
- $Error->flush;
-
-=cut
-
-=back
-
-=head2 OBJECT METHODS
-
-=over 4
-
-=item stacktrace
-
-If the variable C<$Error::Debug> was non-zero when the error was
-created, then C<stacktrace> returns a string created by calling
-C<Carp::longmess>. If the variable was zero the C<stacktrace> returns
-the text of the error appended with the filename and line number of
-where the error was created, providing the text does not end with a
-newline.
-
-=item object
-
-The object this error was associated with
-
-=item file
-
-The file where the constructor of this error was called from
-
-=item line
-
-The line where the constructor of this error was called from
-
-=item text
-
-The text of the error
-
-=back
-
-=head2 OVERLOAD METHODS
-
-=over 4
-
-=item stringify
-
-A method that converts the object into a string. This method may simply
-return the same as the C<text> method, or it may append more
-information. For example the file name and line number.
-
-By default this method returns the C<-text> argument that was passed to
-the constructor, or the string C<"Died"> if none was given.
-
-=item value
-
-A method that will return a value that can be associated with the
-error. For example if an error was created due to the failure of a
-system call, then this may return the numeric value of C<$!> at the
-time.
-
-By default this method returns the C<-value> argument that was passed
-to the constructor.
-
-=back
-
-=head1 PRE-DEFINED ERROR CLASSES
-
-=over 4
-
-=item Error::Simple
-
-This class can be used to hold simple error strings and values. Its
-constructor takes two arguments. The first is a text value, the second
-is a numeric value. These values are what will be returned by the
-overload methods.
-
-If the text value ends with C<at file line 1> as $@ strings do, then
-this information will be used to set the C<-file> and C<-line> arguments
-of the error object.
-
-This class is used internally if an eval'd block die's with an error
-that is a plain string. (Unless C<$Error::ObjectifyCallback> is modified)
-
-=back
-
-=head1 $Error::ObjectifyCallback
-
-This variable holds a reference to a subroutine that converts errors that
-are plain strings to objects. It is used by Error.pm to convert textual
-errors to objects, and can be overridden by the user.
-
-It accepts a single argument which is a hash reference to named parameters.
-Currently the only named parameter passed is C<'text'> which is the text
-of the error, but others may be available in the future.
-
-For example the following code will cause Error.pm to throw objects of the
-class MyError::Bar by default:
-
- sub throw_MyError_Bar
- {
- my $args = shift;
- my $err = MyError::Bar->new();
- $err->{'MyBarText'} = $args->{'text'};
- return $err;
- }
-
- {
- local $Error::ObjectifyCallback = \&throw_MyError_Bar;
-
- # Error handling here.
- }
-
-=head1 KNOWN BUGS
-
-None, but that does not mean there are not any.
-
-=head1 AUTHORS
-
-Graham Barr <gbarr@pobox.com>
-
-The code that inspired me to write this was originally written by
-Peter Seibel <peter@weblogic.com> and adapted by Jesse Glick
-<jglick@sig.bsh.com>.
-
-=head1 MAINTAINER
-
-Shlomi Fish <shlomif@iglu.org.il>
-
-=head1 PAST MAINTAINERS
-
-Arun Kumar U <u_arunkumar@yahoo.com>
-
-=cut
{
int threads, i, work, offset;
struct thread_data data[MAX_PARALLEL];
+ uint64_t start = getnanotime();
if (!core_preload_index)
return;
if (pthread_join(p->pthread, NULL))
die("unable to join threaded lstat");
}
+ trace_performance_since(start, "preload index");
}
#endif
free(to_free);
}
+void sq_quote_buf_pretty(struct strbuf *dst, const char *src)
+{
+ static const char ok_punct[] = "+,-./:=@_^";
+ const char *p;
+
+ for (p = src; *p; p++) {
+ if (!isalpha(*p) && !isdigit(*p) && !strchr(ok_punct, *p)) {
+ sq_quote_buf(dst, src);
+ return;
+ }
+ }
+
+ /* if we get here, we did not need quoting */
+ strbuf_addstr(dst, src);
+}
+
void sq_quotef(struct strbuf *dst, const char *fmt, ...)
{
struct strbuf src = STRBUF_INIT;
strbuf_release(&src);
}
-void sq_quote_argv(struct strbuf *dst, const char** argv, size_t maxlen)
+void sq_quote_argv(struct strbuf *dst, const char **argv)
{
int i;
for (i = 0; argv[i]; ++i) {
strbuf_addch(dst, ' ');
sq_quote_buf(dst, argv[i]);
- if (maxlen && dst->len > maxlen)
- die("Too many or long arguments");
+ }
+}
+
+void sq_quote_argv_pretty(struct strbuf *dst, const char **argv)
+{
+ int i;
+
+ for (i = 0; argv[i]; i++) {
+ strbuf_addch(dst, ' ');
+ sq_quote_buf_pretty(dst, argv[i]);
}
}
*next = NULL;
return arg;
case '\\':
- c = *++src;
- if (need_bs_quote(c) && *++src == '\'') {
- *dst++ = c;
+ /*
+ * Allow backslashed characters outside of
+ * single-quotes only if they need escaping,
+ * and only if we resume the single-quoted part
+ * afterward.
+ */
+ if (need_bs_quote(src[1]) && src[2] == '\'') {
+ *dst++ = src[1];
+ src += 2;
continue;
}
/* Fallthrough */
*/
extern void sq_quote_buf(struct strbuf *, const char *src);
-extern void sq_quote_argv(struct strbuf *, const char **argv, size_t maxlen);
+extern void sq_quote_argv(struct strbuf *, const char **argv);
extern void sq_quotef(struct strbuf *, const char *fmt, ...);
+/*
+ * These match their non-pretty variants, except that they avoid
+ * quoting when there are no exotic characters. These should only be used for
+ * human-readable output, as sq_dequote() is not smart enough to dequote it.
+ */
+void sq_quote_buf_pretty(struct strbuf *, const char *src);
+void sq_quote_argv_pretty(struct strbuf *, const char **argv);
+
/* This unwraps what sq_quote() produces in place, but returns
* NULL if the input does not look like what sq_quote would have
* produced.
void set_object_name_for_intent_to_add_entry(struct cache_entry *ce)
{
- unsigned char sha1[20];
- if (write_sha1_file("", 0, blob_type, sha1))
+ struct object_id oid;
+ if (write_object_file("", 0, blob_type, &oid))
die("cannot create an empty blob in the object database");
- hashcpy(ce->oid.hash, sha1);
+ oidcpy(&ce->oid, &oid);
}
int add_to_index(struct index_state *istate, const char *path, struct stat *st, int flags)
/* Add it in.. */
istate->cache_nr++;
if (istate->cache_nr > pos + 1)
- memmove(istate->cache + pos + 1,
- istate->cache + pos,
- (istate->cache_nr - pos - 1) * sizeof(ce));
+ MOVE_ARRAY(istate->cache + pos + 1, istate->cache + pos,
+ istate->cache_nr - pos - 1);
set_index_entry(istate, pos, ce);
istate->cache_changed |= CE_ENTRY_ADDED;
return 0;
const char *typechange_fmt;
const char *added_fmt;
const char *unmerged_fmt;
+ uint64_t start = getnanotime();
modified_fmt = (in_porcelain ? "M\t%s\n" : "%s: needs update\n");
deleted_fmt = (in_porcelain ? "D\t%s\n" : "%s: needs update\n");
replace_index_entry(istate, i, new);
}
+ trace_performance_since(start, "refresh index");
return has_errors;
}
static int verify_hdr(struct cache_header *hdr, unsigned long size)
{
- git_SHA_CTX c;
- unsigned char sha1[20];
+ git_hash_ctx c;
+ unsigned char hash[GIT_MAX_RAWSZ];
int hdr_version;
if (hdr->hdr_signature != htonl(CACHE_SIGNATURE))
if (!verify_index_checksum)
return 0;
- git_SHA1_Init(&c);
- git_SHA1_Update(&c, hdr, size - 20);
- git_SHA1_Final(sha1, &c);
- if (hashcmp(sha1, (unsigned char *)hdr + size - 20))
+ the_hash_algo->init_fn(&c);
+ the_hash_algo->update_fn(&c, hdr, size - the_hash_algo->rawsz);
+ the_hash_algo->final_fn(hash, &c);
+ if (hashcmp(hash, (unsigned char *)hdr + size - the_hash_algo->rawsz))
return error("bad index file sha1 signature");
return 0;
}
int read_index(struct index_state *istate)
{
- return read_index_from(istate, get_index_file());
+ return read_index_from(istate, get_index_file(), get_git_dir());
}
static struct cache_entry *cache_entry_from_ondisk(struct ondisk_cache_entry *ondisk,
die_errno("cannot stat the open index");
mmap_size = xsize_t(st.st_size);
- if (mmap_size < sizeof(struct cache_header) + 20)
+ if (mmap_size < sizeof(struct cache_header) + the_hash_algo->rawsz)
die("index file smaller than expected");
mmap = xmmap(NULL, mmap_size, PROT_READ, MAP_PRIVATE, fd, 0);
if (verify_hdr(hdr, mmap_size) < 0)
goto unmap;
- hashcpy(istate->sha1, (const unsigned char *)hdr + mmap_size - 20);
+ hashcpy(istate->sha1, (const unsigned char *)hdr + mmap_size - the_hash_algo->rawsz);
istate->version = ntohl(hdr->hdr_version);
istate->cache_nr = ntohl(hdr->hdr_entries);
istate->cache_alloc = alloc_nr(istate->cache_nr);
istate->timestamp.sec = st.st_mtime;
istate->timestamp.nsec = ST_MTIME_NSEC(st);
- while (src_offset <= mmap_size - 20 - 8) {
+ while (src_offset <= mmap_size - the_hash_algo->rawsz - 8) {
/* After an array of active_nr index entries,
* there can be arbitrary number of extended
* sections, each of which is prefixed with
* This way, shared index can be removed if they have not been used
* for some time.
*/
-static void freshen_shared_index(char *base_sha1_hex, int warn)
+static void freshen_shared_index(const char *shared_index, int warn)
{
- char *shared_index = git_pathdup("sharedindex.%s", base_sha1_hex);
if (!check_and_freshen_file(shared_index, 1) && warn)
warning("could not freshen shared index '%s'", shared_index);
- free(shared_index);
}
-int read_index_from(struct index_state *istate, const char *path)
+int read_index_from(struct index_state *istate, const char *path,
+ const char *gitdir)
{
+ uint64_t start = getnanotime();
struct split_index *split_index;
int ret;
char *base_sha1_hex;
- const char *base_path;
+ char *base_path;
/* istate->initialized covers both .git/index and .git/sharedindex.xxx */
if (istate->initialized)
return istate->cache_nr;
ret = do_read_index(istate, path, 0);
+ trace_performance_since(start, "read cache %s", path);
split_index = istate->split_index;
if (!split_index || is_null_sha1(split_index->base_sha1)) {
split_index->base = xcalloc(1, sizeof(*split_index->base));
base_sha1_hex = sha1_to_hex(split_index->base_sha1);
- base_path = git_path("sharedindex.%s", base_sha1_hex);
+ base_path = xstrfmt("%s/sharedindex.%s", gitdir, base_sha1_hex);
ret = do_read_index(split_index->base, base_path, 1);
if (hashcmp(split_index->base_sha1, split_index->base->sha1))
die("broken index, expect %s in %s, got %s",
base_sha1_hex, base_path,
sha1_to_hex(split_index->base->sha1));
- freshen_shared_index(base_sha1_hex, 0);
+ freshen_shared_index(base_path, 0);
merge_base_index(istate);
post_read_index_from(istate);
+ trace_performance_since(start, "read cache %s", base_path);
+ free(base_path);
return ret;
}
static unsigned char write_buffer[WRITE_BUFFER_SIZE];
static unsigned long write_buffer_len;
-static int ce_write_flush(git_SHA_CTX *context, int fd)
+static int ce_write_flush(git_hash_ctx *context, int fd)
{
unsigned int buffered = write_buffer_len;
if (buffered) {
- git_SHA1_Update(context, write_buffer, buffered);
+ the_hash_algo->update_fn(context, write_buffer, buffered);
if (write_in_full(fd, write_buffer, buffered) < 0)
return -1;
write_buffer_len = 0;
return 0;
}
-static int ce_write(git_SHA_CTX *context, int fd, void *data, unsigned int len)
+static int ce_write(git_hash_ctx *context, int fd, void *data, unsigned int len)
{
while (len) {
unsigned int buffered = write_buffer_len;
return 0;
}
-static int write_index_ext_header(git_SHA_CTX *context, int fd,
+static int write_index_ext_header(git_hash_ctx *context, int fd,
unsigned int ext, unsigned int sz)
{
ext = htonl(ext);
(ce_write(context, fd, &sz, 4) < 0)) ? -1 : 0;
}
-static int ce_flush(git_SHA_CTX *context, int fd, unsigned char *sha1)
+static int ce_flush(git_hash_ctx *context, int fd, unsigned char *hash)
{
unsigned int left = write_buffer_len;
if (left) {
write_buffer_len = 0;
- git_SHA1_Update(context, write_buffer, left);
+ the_hash_algo->update_fn(context, write_buffer, left);
}
- /* Flush first if not enough space for SHA1 signature */
- if (left + 20 > WRITE_BUFFER_SIZE) {
+ /* Flush first if not enough space for hash signature */
+ if (left + the_hash_algo->rawsz > WRITE_BUFFER_SIZE) {
if (write_in_full(fd, write_buffer, left) < 0)
return -1;
left = 0;
}
- /* Append the SHA1 signature at the end */
- git_SHA1_Final(write_buffer + left, context);
- hashcpy(sha1, write_buffer + left);
- left += 20;
+ /* Append the hash signature at the end */
+ the_hash_algo->final_fn(write_buffer + left, context);
+ hashcpy(hash, write_buffer + left);
+ left += the_hash_algo->rawsz;
return (write_in_full(fd, write_buffer, left) < 0) ? -1 : 0;
}
}
}
-static int ce_write_entry(git_SHA_CTX *c, int fd, struct cache_entry *ce,
+static int ce_write_entry(git_hash_ctx *c, int fd, struct cache_entry *ce,
struct strbuf *previous_name, struct ondisk_cache_entry *ondisk)
{
int size;
int fd;
ssize_t n;
struct stat st;
- unsigned char sha1[20];
+ unsigned char hash[GIT_MAX_RAWSZ];
if (!istate->initialized)
return 0;
if (fstat(fd, &st))
goto out;
- if (st.st_size < sizeof(struct cache_header) + 20)
+ if (st.st_size < sizeof(struct cache_header) + the_hash_algo->rawsz)
goto out;
- n = pread_in_full(fd, sha1, 20, st.st_size - 20);
- if (n != 20)
+ n = pread_in_full(fd, hash, the_hash_algo->rawsz, st.st_size - the_hash_algo->rawsz);
+ if (n != the_hash_algo->rawsz)
goto out;
- if (hashcmp(istate->sha1, sha1))
+ if (hashcmp(istate->sha1, hash))
goto out;
close(fd);
static int do_write_index(struct index_state *istate, struct tempfile *tempfile,
int strip_extensions)
{
+ uint64_t start = getnanotime();
int newfd = tempfile->fd;
- git_SHA_CTX c;
+ git_hash_ctx c;
struct cache_header hdr;
int i, err = 0, removed, extended, hdr_version;
struct cache_entry **cache = istate->cache;
struct stat st;
struct ondisk_cache_entry_extended ondisk;
struct strbuf previous_name_buf = STRBUF_INIT, *previous_name;
- int drop_cache_tree = 0;
+ int drop_cache_tree = istate->drop_cache_tree;
for (i = removed = extended = 0; i < entries; i++) {
if (cache[i]->ce_flags & CE_REMOVE)
hdr.hdr_version = htonl(hdr_version);
hdr.hdr_entries = htonl(entries - removed);
- git_SHA1_Init(&c);
+ the_hash_algo->init_fn(&c);
if (ce_write(&c, newfd, &hdr, sizeof(hdr)) < 0)
return -1;
return -1;
istate->timestamp.sec = (unsigned int)st.st_mtime;
istate->timestamp.nsec = ST_MTIME_NSEC(st);
+ trace_performance_since(start, "write index, changed mask = %x", istate->cache_changed);
return 0;
}
}
static int write_shared_index(struct index_state *istate,
- struct lock_file *lock, unsigned flags)
+ struct tempfile **temp)
{
- struct tempfile *temp;
struct split_index *si = istate->split_index;
int ret;
- temp = mks_tempfile(git_path("sharedindex_XXXXXX"));
- if (!temp) {
- hashclr(si->base_sha1);
- return do_write_locked_index(istate, lock, flags);
- }
move_cache_to_base_index(istate);
- ret = do_write_index(si->base, temp, 1);
- if (ret) {
- delete_tempfile(&temp);
+ ret = do_write_index(si->base, *temp, 1);
+ if (ret)
return ret;
- }
- ret = adjust_shared_perm(get_tempfile_path(temp));
+ ret = adjust_shared_perm(get_tempfile_path(*temp));
if (ret) {
- int save_errno = errno;
- error("cannot fix permission bits on %s", get_tempfile_path(temp));
- delete_tempfile(&temp);
- errno = save_errno;
+ error("cannot fix permission bits on %s", get_tempfile_path(*temp));
return ret;
}
- ret = rename_tempfile(&temp,
+ ret = rename_tempfile(temp,
git_path("sharedindex.%s", sha1_to_hex(si->base->sha1)));
if (!ret) {
hashcpy(si->base_sha1, si->base->sha1);
new_shared_index = istate->cache_changed & SPLIT_INDEX_ORDERED;
if (new_shared_index) {
- ret = write_shared_index(istate, lock, flags);
+ struct tempfile *temp;
+ int saved_errno;
+
+ temp = mks_tempfile(git_path("sharedindex_XXXXXX"));
+ if (!temp) {
+ hashclr(si->base_sha1);
+ ret = do_write_locked_index(istate, lock, flags);
+ goto out;
+ }
+ ret = write_shared_index(istate, &temp);
+
+ saved_errno = errno;
+ if (is_tempfile_active(temp))
+ delete_tempfile(&temp);
+ errno = saved_errno;
+
if (ret)
goto out;
}
ret = write_split_index(istate, lock, flags);
/* Freshen the shared index only if the split-index was written */
- if (!ret && !new_shared_index)
- freshen_shared_index(sha1_to_hex(si->base_sha1), 1);
+ if (!ret && !new_shared_index) {
+ const char *shared_index = git_path("sharedindex.%s",
+ sha1_to_hex(si->base_sha1));
+ freshen_shared_index(shared_index, 1);
+ }
out:
if (flags & COMMIT_LOCK)
if (initial_ref_transaction_commit(packed_transaction, err)) {
ret = TRANSACTION_GENERIC_ERROR;
- goto cleanup;
}
+ packed_refs_unlock(refs->packed_ref_store);
cleanup:
if (packed_transaction)
ref_transaction_free(packed_transaction);
- packed_refs_unlock(refs->packed_ref_store);
transaction->state = REF_TRANSACTION_CLOSED;
string_list_clear(&affected_refnames, 0);
return ret;
int mmapped;
/*
- * The contents of the `packed-refs` file. If the file was
- * already sorted, this points at the mmapped contents of the
- * file. If not, this points at heap-allocated memory
- * containing the contents, sorted. If there were no contents
- * (e.g., because the file didn't exist), `buf` and `eof` are
- * both NULL.
+ * The contents of the `packed-refs` file:
+ *
+ * - buf -- a pointer to the start of the memory
+ * - start -- a pointer to the first byte of actual references
+ * (i.e., after the header line, if one is present)
+ * - eof -- a pointer just past the end of the reference
+ * contents
+ *
+ * If the `packed-refs` file was already sorted, `buf` points
+ * at the mmapped contents of the file. If not, it points at
+ * heap-allocated memory containing the contents, sorted. If
+ * there were no contents (e.g., because the file didn't
+ * exist), `buf`, `start`, and `eof` are all NULL.
*/
- char *buf, *eof;
-
- /* The size of the header line, if any; otherwise, 0: */
- size_t header_len;
+ char *buf, *start, *eof;
/*
* What is the peeled state of the `packed-refs` file that
} else {
free(snapshot->buf);
}
- snapshot->buf = snapshot->eof = NULL;
- snapshot->header_len = 0;
+ snapshot->buf = snapshot->start = snapshot->eof = NULL;
}
/*
size_t len, i;
char *new_buffer, *dst;
- pos = snapshot->buf + snapshot->header_len;
+ pos = snapshot->start;
eof = snapshot->eof;
- len = eof - pos;
- if (!len)
+ if (pos == eof)
return;
+ len = eof - pos;
+
/*
* Initialize records based on a crude estimate of the number
* of references in the file (we'll grow it below if needed):
* place:
*/
clear_snapshot_buffer(snapshot);
- snapshot->buf = new_buffer;
+ snapshot->buf = snapshot->start = new_buffer;
snapshot->eof = new_buffer + len;
- snapshot->header_len = 0;
cleanup:
free(records);
*/
static void verify_buffer_safe(struct snapshot *snapshot)
{
- const char *buf = snapshot->buf + snapshot->header_len;
+ const char *start = snapshot->start;
const char *eof = snapshot->eof;
const char *last_line;
- if (buf == eof)
+ if (start == eof)
return;
- last_line = find_start_of_record(buf, eof - 1);
+ last_line = find_start_of_record(start, eof - 1);
if (*(eof - 1) != '\n' || eof - last_line < GIT_SHA1_HEXSZ + 2)
die_invalid_line(snapshot->refs->path,
last_line, eof - last_line);
}
+#define SMALL_FILE_SIZE (32*1024)
+
/*
* Depending on `mmap_strategy`, either mmap or read the contents of
* the `packed-refs` file into the snapshot. Return 1 if the file
- * existed and was read, or 0 if the file was absent. Die on errors.
+ * existed and was read, or 0 if the file was absent or empty. Die on
+ * errors.
*/
static int load_contents(struct snapshot *snapshot)
{
die_errno("couldn't stat %s", snapshot->refs->path);
size = xsize_t(st.st_size);
- switch (mmap_strategy) {
- case MMAP_NONE:
+ if (!size) {
+ return 0;
+ } else if (mmap_strategy == MMAP_NONE || size <= SMALL_FILE_SIZE) {
snapshot->buf = xmalloc(size);
bytes_read = read_in_full(fd, snapshot->buf, size);
if (bytes_read < 0 || bytes_read != size)
die_errno("couldn't read %s", snapshot->refs->path);
- snapshot->eof = snapshot->buf + size;
snapshot->mmapped = 0;
- break;
- case MMAP_TEMPORARY:
- case MMAP_OK:
+ } else {
snapshot->buf = xmmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0);
- snapshot->eof = snapshot->buf + size;
snapshot->mmapped = 1;
- break;
}
close(fd);
+ snapshot->start = snapshot->buf;
+ snapshot->eof = snapshot->buf + size;
+
return 1;
}
* `refname` starts. If `mustexist` is true and the reference doesn't
* exist, then return NULL. If `mustexist` is false and the reference
* doesn't exist, then return the point where that reference would be
- * inserted. In the latter mode, `refname` doesn't have to be a proper
- * reference name; for example, one could search for "refs/replace/"
- * to find the start of any replace references.
+ * inserted, or `snapshot->eof` (which might be NULL) if it would be
+ * inserted at the end of the file. In the latter mode, `refname`
+ * doesn't have to be a proper reference name; for example, one could
+ * search for "refs/replace/" to find the start of any replace
+ * references.
*
* The record is sought using a binary search, so `snapshot->buf` must
* be sorted.
* preceding records all have reference names that come
* *before* `refname`.
*/
- const char *lo = snapshot->buf + snapshot->header_len;
+ const char *lo = snapshot->start;
/*
* A pointer to a the first character of a record whose
*/
const char *hi = snapshot->eof;
- while (lo < hi) {
+ while (lo != hi) {
const char *mid, *rec;
int cmp;
/* If the file has a header line, process it: */
if (snapshot->buf < snapshot->eof && *snapshot->buf == '#') {
- struct strbuf tmp = STRBUF_INIT;
- char *p;
- const char *eol;
+ char *tmp, *p, *eol;
struct string_list traits = STRING_LIST_INIT_NODUP;
eol = memchr(snapshot->buf, '\n',
snapshot->buf,
snapshot->eof - snapshot->buf);
- strbuf_add(&tmp, snapshot->buf, eol - snapshot->buf);
+ tmp = xmemdupz(snapshot->buf, eol - snapshot->buf);
- if (!skip_prefix(tmp.buf, "# pack-refs with:", (const char **)&p))
+ if (!skip_prefix(tmp, "# pack-refs with:", (const char **)&p))
die_invalid_line(refs->path,
snapshot->buf,
snapshot->eof - snapshot->buf);
/* perhaps other traits later as well */
/* The "+ 1" is for the LF character. */
- snapshot->header_len = eol + 1 - snapshot->buf;
+ snapshot->start = eol + 1;
string_list_clear(&traits, 0);
- strbuf_release(&tmp);
+ free(tmp);
}
verify_buffer_safe(snapshot);
* We don't want to leave the file mmapped, so we are
* forced to make a copy now:
*/
- size_t size = snapshot->eof -
- (snapshot->buf + snapshot->header_len);
+ size_t size = snapshot->eof - snapshot->start;
char *buf_copy = xmalloc(size);
- memcpy(buf_copy, snapshot->buf + snapshot->header_len, size);
+ memcpy(buf_copy, snapshot->start, size);
clear_snapshot_buffer(snapshot);
- snapshot->buf = buf_copy;
+ snapshot->buf = snapshot->start = buf_copy;
snapshot->eof = buf_copy + size;
}
*/
snapshot = get_snapshot(refs);
- if (!snapshot->buf)
+ if (prefix && *prefix)
+ start = find_reference_location(snapshot, prefix, 0);
+ else
+ start = snapshot->start;
+
+ if (start == snapshot->eof)
return empty_ref_iterator_begin();
iter = xcalloc(1, sizeof(*iter));
iter->snapshot = snapshot;
acquire_snapshot(snapshot);
- if (prefix && *prefix)
- start = find_reference_location(snapshot, prefix, 0);
- else
- start = snapshot->buf + snapshot->header_len;
-
iter->pos = start;
iter->eof = snapshot->eof;
strbuf_init(&iter->refname_buf, 0);
return -1;
entry = dir->entries[entry_index];
- memmove(&dir->entries[entry_index],
- &dir->entries[entry_index + 1],
- (dir->nr - entry_index - 1) * sizeof(*dir->entries)
- );
+ MOVE_ARRAY(&dir->entries[entry_index],
+ &dir->entries[entry_index + 1], dir->nr - entry_index - 1);
dir->nr--;
if (dir->sorted > entry_index)
dir->sorted--;
#include "credential.h"
#include "sha1-array.h"
#include "send-pack.h"
+#include "quote.h"
static struct remote *remote;
/* always ends with a trailing slash */
char *deepen_since;
struct string_list deepen_not;
struct string_list push_options;
+ char *filter;
unsigned progress : 1,
check_self_contained_and_connected : 1,
cloning : 1,
thin : 1,
/* One of the SEND_PACK_PUSH_CERT_* constants. */
push_cert : 2,
- deepen_relative : 1;
+ deepen_relative : 1,
+ from_promisor : 1,
+ no_dependents : 1;
};
static struct options options;
static struct string_list cas_options = STRING_LIST_INIT_DUP;
return -1;
return 0;
} else if (!strcmp(name, "push-option")) {
- string_list_append(&options.push_options, value);
+ if (*value != '"')
+ string_list_append(&options.push_options, value);
+ else {
+ struct strbuf unquoted = STRBUF_INIT;
+ if (unquote_c_style(&unquoted, value, NULL) < 0)
+ die("invalid quoting in push-option value");
+ string_list_append_nodup(&options.push_options,
+ strbuf_detach(&unquoted, NULL));
+ }
return 0;
#if LIBCURL_VERSION_NUM >= 0x070a08
return -1;
return 0;
#endif /* LIBCURL_VERSION_NUM >= 0x070a08 */
+ } else if (!strcmp(name, "from-promisor")) {
+ options.from_promisor = 1;
+ return 0;
+ } else if (!strcmp(name, "no-dependents")) {
+ options.no_dependents = 1;
+ return 0;
+ } else if (!strcmp(name, "filter")) {
+ options.filter = xstrdup(value);;
+ return 0;
} else {
return 1 /* unsupported */;
}
* pkt-line matches our request.
*/
line = packet_read_line_buf(&last->buf, &last->len, NULL);
+ if (!line)
+ die("invalid server response; expected service, got flush packet");
strbuf_reset(&exp);
strbuf_addf(&exp, "# service=%s", service);
options.deepen_not.items[i].string);
if (options.deepen_relative && options.depth)
argv_array_push(&args, "--deepen-relative");
+ if (options.from_promisor)
+ argv_array_push(&args, "--from-promisor");
+ if (options.no_dependents)
+ argv_array_push(&args, "--no-dependents");
+ if (options.filter)
+ argv_array_pushf(&args, "--filter=%s", options.filter);
argv_array_push(&args, url.buf);
for (i = 0; i < nr_heads; i++) {
"refs/tags/*"
};
+/* See TAG_REFSPEC for the string version */
const struct refspec *tag_refspec = &s_tag_refspec;
struct counted_string {
remote->fetch_refspec[remote->fetch_refspec_nr++] = ref;
}
+void add_prune_tags_to_fetch_refspec(struct remote *remote)
+{
+ int nr = remote->fetch_refspec_nr;
+ int bufsize = nr + 1;
+ int size = sizeof(struct refspec);
+
+ remote->fetch = xrealloc(remote->fetch, size * bufsize);
+ memcpy(&remote->fetch[nr], tag_refspec, size);
+ add_fetch_refspec(remote, xstrdup(TAG_REFSPEC));
+}
+
static void add_url(struct remote *remote, const char *url)
{
ALLOC_GROW(remote->url, remote->url_nr + 1, remote->url_alloc);
ret = xcalloc(1, sizeof(struct remote));
ret->prune = -1; /* unspecified */
+ ret->prune_tags = -1; /* unspecified */
ALLOC_GROW(remotes, remotes_nr + 1, remotes_alloc);
remotes[remotes_nr++] = ret;
ret->name = xstrndup(name, len);
remote->skip_default_update = git_config_bool(key, value);
else if (!strcmp(subkey, "prune"))
remote->prune = git_config_bool(key, value);
+ else if (!strcmp(subkey, "prunetags"))
+ remote->prune_tags = git_config_bool(key, value);
else if (!strcmp(subkey, "url")) {
const char *v;
if (git_config_string(&v, key, value))
int skip_default_update;
int mirror;
int prune;
+ int prune_tags;
const char *receivepack;
const char *uploadpack;
extern int is_empty_cas(const struct push_cas_option *);
void apply_push_cas(struct push_cas_option *, struct remote *, struct ref *);
+#define TAG_REFSPEC "refs/tags/*:refs/tags/*"
+
+void add_prune_tags_to_fetch_refspec(struct remote *remote);
+
#endif
ALLOC_GROW(replace_object, replace_object_nr + 1, replace_object_alloc);
replace_object_nr++;
if (pos < replace_object_nr)
- memmove(replace_object + pos + 1,
- replace_object + pos,
- (replace_object_nr - pos - 1) *
- sizeof(*replace_object));
+ MOVE_ARRAY(replace_object + pos + 1, replace_object + pos,
+ replace_object_nr - pos - 1);
replace_object[pos] = replace;
return 0;
}
if (!repo->index)
repo->index = xcalloc(1, sizeof(*repo->index));
- return read_index_from(repo->index, repo->index_file);
+ return read_index_from(repo->index, repo->index_file, repo->gitdir);
}
ALLOC_GROW(rerere_dir, rerere_dir_nr + 1, rerere_dir_alloc);
/* ... and add it in. */
rerere_dir_nr++;
- memmove(rerere_dir + pos + 1, rerere_dir + pos,
- (rerere_dir_nr - pos - 1) * sizeof(*rerere_dir));
+ MOVE_ARRAY(rerere_dir + pos + 1, rerere_dir + pos,
+ rerere_dir_nr - pos - 1);
rerere_dir[pos] = rr_dir;
scan_rerere_dir(rr_dir);
}
if (!object) {
if (revs->ignore_missing)
return object;
+ if (revs->exclude_promisor_objects && is_promisor_object(oid))
+ return NULL;
die("bad object %s", name);
}
object->flags |= flags;
for (parent = commit->parents; parent; parent = parent->next) {
struct commit *p = parent->item;
-
- if (parse_commit_gently(p, revs->ignore_missing_links) < 0)
+ int gently = revs->ignore_missing_links ||
+ revs->exclude_promisor_objects;
+ if (parse_commit_gently(p, gently) < 0) {
+ if (revs->exclude_promisor_objects &&
+ is_promisor_object(&p->object.oid)) {
+ if (revs->first_parent_only)
+ break;
+ continue;
+ }
return -1;
+ }
if (revs->show_source && !p->util)
p->util = commit->util;
p->object.flags |= left_flag;
continue; /* current index already taken care of */
if (read_index_from(&istate,
- worktree_git_path(wt, "index")) > 0)
+ worktree_git_path(wt, "index"),
+ get_worktree_git_dir(wt)) > 0)
do_add_index_objects_to_pending(revs, &istate);
discard_index(&istate);
}
revs->limited = 1;
} else if (!strcmp(arg, "--ignore-missing")) {
revs->ignore_missing = 1;
+ } else if (!strcmp(arg, "--exclude-promisor-objects")) {
+ if (fetch_if_missing)
+ die("BUG: exclude_promisor_objects can only be used when fetch_if_missing is 0");
+ revs->exclude_promisor_objects = 1;
} else {
int opts = diff_opt_parse(&revs->diffopt, argv, argc, revs->prefix);
if (!opts)
clear_object_flags(SEEN | ADDED | SHOWN);
}
+static int mark_uninteresting(const struct object_id *oid,
+ struct packed_git *pack,
+ uint32_t pos,
+ void *unused)
+{
+ struct object *o = parse_object(oid);
+ o->flags |= UNINTERESTING | SEEN;
+ return 0;
+}
+
int prepare_revision_walk(struct rev_info *revs)
{
int i;
(revs->limited && limiting_can_increase_treesame(revs)))
revs->treesame.name = "treesame";
+ if (revs->exclude_promisor_objects) {
+ for_each_packed_object(mark_uninteresting, NULL,
+ FOR_EACH_OBJECT_PROMISOR_ONLY);
+ }
+
if (revs->no_walk != REVISION_WALK_NO_WALK_UNSORTED)
commit_list_sort_by_date(&revs->commits);
if (revs->no_walk)
ancestry_path:1,
first_parent_only:1,
line_level_traverse:1,
- tree_blobs_in_commit_order:1;
+ tree_blobs_in_commit_order:1,
+
+ /* for internal use only */
+ exclude_promisor_objects:1;
/* Diff flags */
unsigned int diff:1,
#include "thread-utils.h"
#include "strbuf.h"
#include "string-list.h"
+#include "quote.h"
void child_process_init(struct child_process *child)
{
return code;
}
+static void trace_add_env(struct strbuf *dst, const char *const *deltaenv)
+{
+ struct string_list envs = STRING_LIST_INIT_DUP;
+ const char *const *e;
+ int i;
+ int printed_unset = 0;
+
+ /* Last one wins, see run-command.c:prep_childenv() for context */
+ for (e = deltaenv; e && *e; e++) {
+ struct strbuf key = STRBUF_INIT;
+ char *equals = strchr(*e, '=');
+
+ if (equals) {
+ strbuf_add(&key, *e, equals - *e);
+ string_list_insert(&envs, key.buf)->util = equals + 1;
+ } else {
+ string_list_insert(&envs, *e)->util = NULL;
+ }
+ strbuf_release(&key);
+ }
+
+ /* "unset X Y...;" */
+ for (i = 0; i < envs.nr; i++) {
+ const char *var = envs.items[i].string;
+ const char *val = envs.items[i].util;
+
+ if (val || !getenv(var))
+ continue;
+
+ if (!printed_unset) {
+ strbuf_addstr(dst, " unset");
+ printed_unset = 1;
+ }
+ strbuf_addf(dst, " %s", var);
+ }
+ if (printed_unset)
+ strbuf_addch(dst, ';');
+
+ /* ... followed by "A=B C=D ..." */
+ for (i = 0; i < envs.nr; i++) {
+ const char *var = envs.items[i].string;
+ const char *val = envs.items[i].util;
+ const char *oldval;
+
+ if (!val)
+ continue;
+
+ oldval = getenv(var);
+ if (oldval && !strcmp(val, oldval))
+ continue;
+
+ strbuf_addf(dst, " %s=", var);
+ sq_quote_buf_pretty(dst, val);
+ }
+ string_list_clear(&envs, 0);
+}
+
+static void trace_run_command(const struct child_process *cp)
+{
+ struct strbuf buf = STRBUF_INIT;
+
+ if (!trace_want(&trace_default_key))
+ return;
+
+ strbuf_addf(&buf, "trace: run_command:");
+ if (cp->dir) {
+ strbuf_addstr(&buf, " cd ");
+ sq_quote_buf_pretty(&buf, cp->dir);
+ strbuf_addch(&buf, ';');
+ }
+ /*
+ * The caller is responsible for initializing cp->env from
+ * cp->env_array if needed. We only check one place.
+ */
+ if (cp->env)
+ trace_add_env(&buf, cp->env);
+ if (cp->git_cmd)
+ strbuf_addstr(&buf, " git");
+ sq_quote_argv_pretty(&buf, cp->argv);
+
+ trace_printf("%s", buf.buf);
+ strbuf_release(&buf);
+}
+
int start_command(struct child_process *cmd)
{
int need_in, need_out, need_err;
cmd->err = fderr[0];
}
- trace_argv_printf(cmd->argv, "trace: run_command:");
+ trace_run_command(cmd);
+
fflush(NULL);
#ifndef GIT_WINDOWS_NATIVE
static int receive_unpack_status(int in)
{
const char *line = packet_read_line(in, NULL);
+ if (!line)
+ return error(_("unexpected flush packet while reading remote unpack status"));
if (!skip_prefix(line, "unpack ", &line))
return error(_("unable to parse remote unpack status: %s"), line);
if (strcmp(line, "ok"))
#include "cache.h"
#include "config.h"
#include "lockfile.h"
-#include "sequencer.h"
#include "dir.h"
#include "object.h"
#include "commit.h"
+#include "sequencer.h"
#include "tag.h"
#include "run-command.h"
#include "exec_cmd.h"
#include "log-tree.h"
#include "wt-status.h"
#include "hashmap.h"
+#include "notes-utils.h"
+#include "sigchain.h"
#define GIT_REFLOG_ACTION "GIT_REFLOG_ACTION"
const char sign_off_header[] = "Signed-off-by: ";
static const char cherry_picked_prefix[] = "(cherry picked from commit ";
+GIT_PATH_FUNC(git_path_commit_editmsg, "COMMIT_EDITMSG")
+
GIT_PATH_FUNC(git_path_seq_dir, "sequencer")
static GIT_PATH_FUNC(git_path_todo_file, "sequencer/todo")
static GIT_PATH_FUNC(rebase_path_strategy_opts, "rebase-merge/strategy_opts")
static GIT_PATH_FUNC(rebase_path_allow_rerere_autoupdate, "rebase-merge/allow_rerere_autoupdate")
+static int git_sequencer_config(const char *k, const char *v, void *cb)
+{
+ struct replay_opts *opts = cb;
+ int status;
+
+ if (!strcmp(k, "commit.cleanup")) {
+ const char *s;
+
+ status = git_config_string(&s, k, v);
+ if (status)
+ return status;
+
+ if (!strcmp(s, "verbatim"))
+ opts->default_msg_cleanup = COMMIT_MSG_CLEANUP_NONE;
+ else if (!strcmp(s, "whitespace"))
+ opts->default_msg_cleanup = COMMIT_MSG_CLEANUP_SPACE;
+ else if (!strcmp(s, "strip"))
+ opts->default_msg_cleanup = COMMIT_MSG_CLEANUP_ALL;
+ else if (!strcmp(s, "scissors"))
+ opts->default_msg_cleanup = COMMIT_MSG_CLEANUP_SPACE;
+ else
+ warning(_("invalid commit message cleanup mode '%s'"),
+ s);
+
+ return status;
+ }
+
+ if (!strcmp(k, "commit.gpgsign")) {
+ opts->gpg_sign = git_config_bool(k, v) ? xstrdup("") : NULL;
+ return 0;
+ }
+
+ status = git_gpg_config(k, v, NULL);
+ if (status)
+ return status;
+
+ return git_diff_basic_config(k, v, NULL);
+}
+
+void sequencer_init_config(struct replay_opts *opts)
+{
+ opts->default_msg_cleanup = COMMIT_MSG_CLEANUP_NONE;
+ git_config(git_sequencer_config, opts);
+}
+
static inline int is_rebase_i(const struct replay_opts *opts)
{
return opts->action == REPLAY_INTERACTIVE_REBASE;
_(action_name(opts)));
rollback_lock_file(&index_lock);
- if (opts->signoff)
- append_signoff(msgbuf, 0, 0);
-
if (!clean)
append_conflicts_hint(msgbuf);
return 0;
}
+static char *get_author(const char *message)
+{
+ size_t len;
+ const char *a;
+
+ a = find_commit_header(message, "author", &len);
+ if (a)
+ return xmemdupz(a, len);
+
+ return NULL;
+}
+
static const char staged_changes_advice[] =
N_("you have staged changes in your working tree\n"
"If these changes are meant to be squashed into the previous commit, run:\n"
argv_array_push(&cmd.args, "--amend");
if (opts->gpg_sign)
argv_array_pushf(&cmd.args, "-S%s", opts->gpg_sign);
- if (opts->signoff)
- argv_array_push(&cmd.args, "-s");
if (defmsg)
argv_array_pushl(&cmd.args, "-F", defmsg, NULL);
if ((flags & CLEANUP_MSG))
return run_command(&cmd);
}
+static int rest_is_empty(const struct strbuf *sb, int start)
+{
+ int i, eol;
+ const char *nl;
+
+ /* Check if the rest is just whitespace and Signed-off-by's. */
+ for (i = start; i < sb->len; i++) {
+ nl = memchr(sb->buf + i, '\n', sb->len - i);
+ if (nl)
+ eol = nl - sb->buf;
+ else
+ eol = sb->len;
+
+ if (strlen(sign_off_header) <= eol - i &&
+ starts_with(sb->buf + i, sign_off_header)) {
+ i = eol;
+ continue;
+ }
+ while (i < eol)
+ if (!isspace(sb->buf[i++]))
+ return 0;
+ }
+
+ return 1;
+}
+
+/*
+ * Find out if the message in the strbuf contains only whitespace and
+ * Signed-off-by lines.
+ */
+int message_is_empty(const struct strbuf *sb,
+ enum commit_msg_cleanup_mode cleanup_mode)
+{
+ if (cleanup_mode == COMMIT_MSG_CLEANUP_NONE && sb->len)
+ return 0;
+ return rest_is_empty(sb, 0);
+}
+
+/*
+ * See if the user edited the message in the editor or left what
+ * was in the template intact
+ */
+int template_untouched(const struct strbuf *sb, const char *template_file,
+ enum commit_msg_cleanup_mode cleanup_mode)
+{
+ struct strbuf tmpl = STRBUF_INIT;
+ const char *start;
+
+ if (cleanup_mode == COMMIT_MSG_CLEANUP_NONE && sb->len)
+ return 0;
+
+ if (!template_file || strbuf_read_file(&tmpl, template_file, 0) <= 0)
+ return 0;
+
+ strbuf_stripspace(&tmpl, cleanup_mode == COMMIT_MSG_CLEANUP_ALL);
+ if (!skip_prefix(sb->buf, tmpl.buf, &start))
+ start = sb->buf;
+ strbuf_release(&tmpl);
+ return rest_is_empty(sb, start - sb->buf);
+}
+
+int update_head_with_reflog(const struct commit *old_head,
+ const struct object_id *new_head,
+ const char *action, const struct strbuf *msg,
+ struct strbuf *err)
+{
+ struct ref_transaction *transaction;
+ struct strbuf sb = STRBUF_INIT;
+ const char *nl;
+ int ret = 0;
+
+ if (action) {
+ strbuf_addstr(&sb, action);
+ strbuf_addstr(&sb, ": ");
+ }
+
+ nl = strchr(msg->buf, '\n');
+ if (nl) {
+ strbuf_add(&sb, msg->buf, nl + 1 - msg->buf);
+ } else {
+ strbuf_addbuf(&sb, msg);
+ strbuf_addch(&sb, '\n');
+ }
+
+ transaction = ref_transaction_begin(err);
+ if (!transaction ||
+ ref_transaction_update(transaction, "HEAD", new_head,
+ old_head ? &old_head->object.oid : &null_oid,
+ 0, sb.buf, err) ||
+ ref_transaction_commit(transaction, err)) {
+ ret = -1;
+ }
+ ref_transaction_free(transaction);
+ strbuf_release(&sb);
+
+ return ret;
+}
+
+static int run_rewrite_hook(const struct object_id *oldoid,
+ const struct object_id *newoid)
+{
+ struct child_process proc = CHILD_PROCESS_INIT;
+ const char *argv[3];
+ int code;
+ struct strbuf sb = STRBUF_INIT;
+
+ argv[0] = find_hook("post-rewrite");
+ if (!argv[0])
+ return 0;
+
+ argv[1] = "amend";
+ argv[2] = NULL;
+
+ proc.argv = argv;
+ proc.in = -1;
+ proc.stdout_to_stderr = 1;
+
+ code = start_command(&proc);
+ if (code)
+ return code;
+ strbuf_addf(&sb, "%s %s\n", oid_to_hex(oldoid), oid_to_hex(newoid));
+ sigchain_push(SIGPIPE, SIG_IGN);
+ write_in_full(proc.in, sb.buf, sb.len);
+ close(proc.in);
+ strbuf_release(&sb);
+ sigchain_pop(SIGPIPE);
+ return finish_command(&proc);
+}
+
+void commit_post_rewrite(const struct commit *old_head,
+ const struct object_id *new_head)
+{
+ struct notes_rewrite_cfg *cfg;
+
+ cfg = init_copy_notes_for_rewrite("amend");
+ if (cfg) {
+ /* we are amending, so old_head is not NULL */
+ copy_note_for_rewrite(cfg, &old_head->object.oid, new_head);
+ finish_copy_notes_for_rewrite(cfg, "Notes added by 'git commit --amend'");
+ }
+ run_rewrite_hook(&old_head->object.oid, new_head);
+}
+
+static int run_prepare_commit_msg_hook(struct strbuf *msg, const char *commit)
+{
+ struct argv_array hook_env = ARGV_ARRAY_INIT;
+ int ret;
+ const char *name;
+
+ name = git_path_commit_editmsg();
+ if (write_message(msg->buf, msg->len, name, 0))
+ return -1;
+
+ argv_array_pushf(&hook_env, "GIT_INDEX_FILE=%s", get_index_file());
+ argv_array_push(&hook_env, "GIT_EDITOR=:");
+ if (commit)
+ ret = run_hook_le(hook_env.argv, "prepare-commit-msg", name,
+ "commit", commit, NULL);
+ else
+ ret = run_hook_le(hook_env.argv, "prepare-commit-msg", name,
+ "message", NULL);
+ if (ret)
+ ret = error(_("'prepare-commit-msg' hook failed"));
+ argv_array_clear(&hook_env);
+
+ return ret;
+}
+
+static const char implicit_ident_advice_noconfig[] =
+N_("Your name and email address were configured automatically based\n"
+"on your username and hostname. Please check that they are accurate.\n"
+"You can suppress this message by setting them explicitly. Run the\n"
+"following command and follow the instructions in your editor to edit\n"
+"your configuration file:\n"
+"\n"
+" git config --global --edit\n"
+"\n"
+"After doing this, you may fix the identity used for this commit with:\n"
+"\n"
+" git commit --amend --reset-author\n");
+
+static const char implicit_ident_advice_config[] =
+N_("Your name and email address were configured automatically based\n"
+"on your username and hostname. Please check that they are accurate.\n"
+"You can suppress this message by setting them explicitly:\n"
+"\n"
+" git config --global user.name \"Your Name\"\n"
+" git config --global user.email you@example.com\n"
+"\n"
+"After doing this, you may fix the identity used for this commit with:\n"
+"\n"
+" git commit --amend --reset-author\n");
+
+static const char *implicit_ident_advice(void)
+{
+ char *user_config = expand_user_path("~/.gitconfig", 0);
+ char *xdg_config = xdg_config_home("config");
+ int config_exists = file_exists(user_config) || file_exists(xdg_config);
+
+ free(user_config);
+ free(xdg_config);
+
+ if (config_exists)
+ return _(implicit_ident_advice_config);
+ else
+ return _(implicit_ident_advice_noconfig);
+
+}
+
+void print_commit_summary(const char *prefix, const struct object_id *oid,
+ unsigned int flags)
+{
+ struct rev_info rev;
+ struct commit *commit;
+ struct strbuf format = STRBUF_INIT;
+ const char *head;
+ struct pretty_print_context pctx = {0};
+ struct strbuf author_ident = STRBUF_INIT;
+ struct strbuf committer_ident = STRBUF_INIT;
+
+ commit = lookup_commit(oid);
+ if (!commit)
+ die(_("couldn't look up newly created commit"));
+ if (parse_commit(commit))
+ die(_("could not parse newly created commit"));
+
+ strbuf_addstr(&format, "format:%h] %s");
+
+ format_commit_message(commit, "%an <%ae>", &author_ident, &pctx);
+ format_commit_message(commit, "%cn <%ce>", &committer_ident, &pctx);
+ if (strbuf_cmp(&author_ident, &committer_ident)) {
+ strbuf_addstr(&format, "\n Author: ");
+ strbuf_addbuf_percentquote(&format, &author_ident);
+ }
+ if (flags & SUMMARY_SHOW_AUTHOR_DATE) {
+ struct strbuf date = STRBUF_INIT;
+
+ format_commit_message(commit, "%ad", &date, &pctx);
+ strbuf_addstr(&format, "\n Date: ");
+ strbuf_addbuf_percentquote(&format, &date);
+ strbuf_release(&date);
+ }
+ if (!committer_ident_sufficiently_given()) {
+ strbuf_addstr(&format, "\n Committer: ");
+ strbuf_addbuf_percentquote(&format, &committer_ident);
+ if (advice_implicit_identity) {
+ strbuf_addch(&format, '\n');
+ strbuf_addstr(&format, implicit_ident_advice());
+ }
+ }
+ strbuf_release(&author_ident);
+ strbuf_release(&committer_ident);
+
+ init_revisions(&rev, prefix);
+ setup_revisions(0, NULL, &rev, NULL);
+
+ rev.diff = 1;
+ rev.diffopt.output_format =
+ DIFF_FORMAT_SHORTSTAT | DIFF_FORMAT_SUMMARY;
+
+ rev.verbose_header = 1;
+ rev.show_root_diff = 1;
+ get_commit_format(format.buf, &rev);
+ rev.always_show_header = 0;
+ rev.diffopt.detect_rename = DIFF_DETECT_RENAME;
+ rev.diffopt.break_opt = 0;
+ diff_setup_done(&rev.diffopt);
+
+ head = resolve_ref_unsafe("HEAD", 0, NULL, NULL);
+ if (!head)
+ die_errno(_("unable to resolve HEAD after creating commit"));
+ if (!strcmp(head, "HEAD"))
+ head = _("detached HEAD");
+ else
+ skip_prefix(head, "refs/heads/", &head);
+ printf("[%s%s ", head, (flags & SUMMARY_INITIAL_COMMIT) ?
+ _(" (root-commit)") : "");
+
+ if (!log_tree_commit(&rev, commit)) {
+ rev.always_show_header = 1;
+ rev.use_terminator = 1;
+ log_tree_commit(&rev, commit);
+ }
+
+ strbuf_release(&format);
+}
+
+static int parse_head(struct commit **head)
+{
+ struct commit *current_head;
+ struct object_id oid;
+
+ if (get_oid("HEAD", &oid)) {
+ current_head = NULL;
+ } else {
+ current_head = lookup_commit_reference(&oid);
+ if (!current_head)
+ return error(_("could not parse HEAD"));
+ if (oidcmp(&oid, ¤t_head->object.oid)) {
+ warning(_("HEAD %s is not a commit!"),
+ oid_to_hex(&oid));
+ }
+ if (parse_commit(current_head))
+ return error(_("could not parse HEAD commit"));
+ }
+ *head = current_head;
+
+ return 0;
+}
+
+/*
+ * Try to commit without forking 'git commit'. In some cases we need
+ * to run 'git commit' to display an error message
+ *
+ * Returns:
+ * -1 - error unable to commit
+ * 0 - success
+ * 1 - run 'git commit'
+ */
+static int try_to_commit(struct strbuf *msg, const char *author,
+ struct replay_opts *opts, unsigned int flags,
+ struct object_id *oid)
+{
+ struct object_id tree;
+ struct commit *current_head;
+ struct commit_list *parents = NULL;
+ struct commit_extra_header *extra = NULL;
+ struct strbuf err = STRBUF_INIT;
+ struct strbuf commit_msg = STRBUF_INIT;
+ char *amend_author = NULL;
+ const char *hook_commit = NULL;
+ enum commit_msg_cleanup_mode cleanup;
+ int res = 0;
+
+ if (parse_head(¤t_head))
+ return -1;
+
+ if (flags & AMEND_MSG) {
+ const char *exclude_gpgsig[] = { "gpgsig", NULL };
+ const char *out_enc = get_commit_output_encoding();
+ const char *message = logmsg_reencode(current_head, NULL,
+ out_enc);
+
+ if (!msg) {
+ const char *orig_message = NULL;
+
+ find_commit_subject(message, &orig_message);
+ msg = &commit_msg;
+ strbuf_addstr(msg, orig_message);
+ hook_commit = "HEAD";
+ }
+ author = amend_author = get_author(message);
+ unuse_commit_buffer(current_head, message);
+ if (!author) {
+ res = error(_("unable to parse commit author"));
+ goto out;
+ }
+ parents = copy_commit_list(current_head->parents);
+ extra = read_commit_extra_headers(current_head, exclude_gpgsig);
+ } else if (current_head) {
+ commit_list_insert(current_head, &parents);
+ }
+
+ if (write_cache_as_tree(tree.hash, 0, NULL)) {
+ res = error(_("git write-tree failed to write a tree"));
+ goto out;
+ }
+
+ if (!(flags & ALLOW_EMPTY) && !oidcmp(current_head ?
+ ¤t_head->tree->object.oid :
+ &empty_tree_oid, &tree)) {
+ res = 1; /* run 'git commit' to display error message */
+ goto out;
+ }
+
+ if (find_hook("prepare-commit-msg")) {
+ res = run_prepare_commit_msg_hook(msg, hook_commit);
+ if (res)
+ goto out;
+ if (strbuf_read_file(&commit_msg, git_path_commit_editmsg(),
+ 2048) < 0) {
+ res = error_errno(_("unable to read commit message "
+ "from '%s'"),
+ git_path_commit_editmsg());
+ goto out;
+ }
+ msg = &commit_msg;
+ }
+
+ cleanup = (flags & CLEANUP_MSG) ? COMMIT_MSG_CLEANUP_ALL :
+ opts->default_msg_cleanup;
+
+ if (cleanup != COMMIT_MSG_CLEANUP_NONE)
+ strbuf_stripspace(msg, cleanup == COMMIT_MSG_CLEANUP_ALL);
+ if (!opts->allow_empty_message && message_is_empty(msg, cleanup)) {
+ res = 1; /* run 'git commit' to display error message */
+ goto out;
+ }
+
+ if (commit_tree_extended(msg->buf, msg->len, &tree, parents,
+ oid, author, opts->gpg_sign, extra)) {
+ res = error(_("failed to write commit object"));
+ goto out;
+ }
+
+ if (update_head_with_reflog(current_head, oid,
+ getenv("GIT_REFLOG_ACTION"), msg, &err)) {
+ res = error("%s", err.buf);
+ goto out;
+ }
+
+ if (flags & AMEND_MSG)
+ commit_post_rewrite(current_head, oid);
+
+out:
+ free_commit_extra_headers(extra);
+ strbuf_release(&err);
+ strbuf_release(&commit_msg);
+ free(amend_author);
+
+ return res;
+}
+
+static int do_commit(const char *msg_file, const char *author,
+ struct replay_opts *opts, unsigned int flags)
+{
+ int res = 1;
+
+ if (!(flags & EDIT_MSG) && !(flags & VERIFY_MSG)) {
+ struct object_id oid;
+ struct strbuf sb = STRBUF_INIT;
+
+ if (msg_file && strbuf_read_file(&sb, msg_file, 2048) < 0)
+ return error_errno(_("unable to read commit message "
+ "from '%s'"),
+ msg_file);
+
+ res = try_to_commit(msg_file ? &sb : NULL, author, opts, flags,
+ &oid);
+ strbuf_release(&sb);
+ if (!res) {
+ unlink(git_path_cherry_pick_head());
+ unlink(git_path_merge_msg());
+ if (!is_rebase_i(opts))
+ print_commit_summary(NULL, &oid,
+ SUMMARY_SHOW_AUTHOR_DATE);
+ return res;
+ }
+ }
+ if (res == 1)
+ return run_git_commit(msg_file, opts, flags);
+
+ return res;
+}
+
static int is_original_commit_empty(struct commit *commit)
{
const struct object_id *ptree_oid;
struct object_id head;
struct commit *base, *next, *parent;
const char *base_label, *next_label;
+ char *author = NULL;
struct commit_message msg = { NULL, NULL, NULL, NULL };
struct strbuf msgbuf = STRBUF_INIT;
int res, unborn = 0, allow;
strbuf_addstr(&msgbuf, oid_to_hex(&commit->object.oid));
strbuf_addstr(&msgbuf, ")\n");
}
+ if (!is_fixup(command))
+ author = get_author(msg.message);
}
if (command == TODO_REWORD)
}
}
+ if (opts->signoff)
+ append_signoff(&msgbuf, 0, 0);
+
if (is_rebase_i(opts) && write_author_script(msg.message) < 0)
res = -1;
else if (!opts->strategy || !strcmp(opts->strategy, "recursive") || command == TODO_REVERT) {
goto leave;
} else if (allow)
flags |= ALLOW_EMPTY;
- if (!opts->no_commit)
+ if (!opts->no_commit) {
fast_forward_edit:
- res = run_git_commit(msg_file, opts, flags);
+ if (author || command == TODO_REVERT || (flags & AMEND_MSG))
+ res = do_commit(msg_file, author, opts, flags);
+ else
+ res = error(_("unable to parse commit author"));
+ }
if (!res && final_fixup) {
unlink(rebase_path_fixup_msg());
leave:
free_message(commit, &msg);
+ free(author);
update_abort_safety_file();
return res;
p = short_commit_name(commit);
if (write_message(p, strlen(p), rebase_path_stopped_sha(), 1) < 0)
return -1;
+ if (update_ref("rebase", "REBASE_HEAD", &commit->object.oid,
+ NULL, REF_NO_DEREF, UPDATE_REFS_MSG_ON_ERR))
+ res |= error(_("could not update %s"), "REBASE_HEAD");
strbuf_addf(&buf, "%s/patch", get_dir(opts));
memset(&log_tree_opt, 0, sizeof(log_tree_opt));
unlink(rebase_path_author_script());
unlink(rebase_path_stopped_sha());
unlink(rebase_path_amend());
+ delete_ref(NULL, "REBASE_HEAD", NULL, REF_NO_DEREF);
}
if (item->command <= TODO_SQUASH) {
if (is_rebase_i(opts))
#ifndef SEQUENCER_H
#define SEQUENCER_H
+const char *git_path_commit_editmsg(void);
const char *git_path_seq_dir(void);
#define APPEND_SIGNOFF_DEDUP (1u << 0)
REPLAY_INTERACTIVE_REBASE
};
+enum commit_msg_cleanup_mode {
+ COMMIT_MSG_CLEANUP_SPACE,
+ COMMIT_MSG_CLEANUP_NONE,
+ COMMIT_MSG_CLEANUP_SCISSORS,
+ COMMIT_MSG_CLEANUP_ALL
+};
+
struct replay_opts {
enum replay_action action;
int mainline;
char *gpg_sign;
+ enum commit_msg_cleanup_mode default_msg_cleanup;
/* Merge strategy */
char *strategy;
};
#define REPLAY_OPTS_INIT { -1 }
+/* Call this to setup defaults before parsing command line options */
+void sequencer_init_config(struct replay_opts *opts);
int sequencer_pick_revisions(struct replay_opts *opts);
int sequencer_continue(struct replay_opts *opts);
int sequencer_rollback(struct replay_opts *opts);
void append_signoff(struct strbuf *msgbuf, int ignore_footer, unsigned flag);
void append_conflicts_hint(struct strbuf *msgbuf);
+int message_is_empty(const struct strbuf *sb,
+ enum commit_msg_cleanup_mode cleanup_mode);
+int template_untouched(const struct strbuf *sb, const char *template_file,
+ enum commit_msg_cleanup_mode cleanup_mode);
+int update_head_with_reflog(const struct commit *old_head,
+ const struct object_id *new_head,
+ const char *action, const struct strbuf *msg,
+ struct strbuf *err);
+void commit_post_rewrite(const struct commit *current_head,
+ const struct object_id *new_head);
+#define SUMMARY_INITIAL_COMMIT (1 << 0)
+#define SUMMARY_SHOW_AUTHOR_DATE (1 << 1)
+void print_commit_summary(const char *prefix, const struct object_id *oid,
+ unsigned int flags);
#endif
{
char *r = prefix_path_gently(prefix, len, NULL, path);
if (!r)
- die("'%s' is outside repository", path);
+ die(_("'%s' is outside repository"), path);
return r;
}
free(to_free);
return 0; /* file does not exist */
}
- die_errno("failed to stat '%s'", arg);
+ die_errno(_("failed to stat '%s'"), arg);
}
static void NORETURN die_verify_filename(const char *prefix,
int diagnose_misspelt_rev)
{
if (*arg == '-')
- die("option '%s' must come before non-option arguments", arg);
+ die(_("option '%s' must come before non-option arguments"), arg);
if (looks_like_pathspec(arg) || check_filename(prefix, arg))
return;
die_verify_filename(prefix, arg, diagnose_misspelt_rev);
return;
if (work_tree_config_is_bogus)
- die("unable to set up work tree using invalid config");
+ die(_("unable to set up work tree using invalid config"));
work_tree = get_git_work_tree();
git_dir = get_git_dir();
if (!is_absolute_path(git_dir))
git_dir = real_path(get_git_dir());
if (!work_tree || chdir(work_tree))
- die("This operation must be run in a work tree");
+ die(_("this operation must be run in a work tree"));
/*
* Make sure subsequent git processes find correct worktree
;
else if (!strcmp(ext, "preciousobjects"))
data->precious_objects = git_config_bool(var, value);
- else
+ else if (!strcmp(ext, "partialclone")) {
+ if (!value)
+ return config_error_nonbool(var);
+ data->partial_clone = xstrdup(value);
+ } else
string_list_append(&data->unknown_extensions, ext);
} else if (strcmp(var, "core.bare") == 0) {
data->is_bare = git_config_bool(var, value);
}
repository_format_precious_objects = candidate->precious_objects;
+ repository_format_partial_clone = candidate->partial_clone;
string_list_clear(&candidate->unknown_extensions, 0);
if (!has_common) {
if (candidate->is_bare != -1) {
/* non-fatal; follow return path */
break;
case READ_GITFILE_ERR_OPEN_FAILED:
- die_errno("Error opening '%s'", path);
+ die_errno(_("error opening '%s'"), path);
case READ_GITFILE_ERR_TOO_LARGE:
- die("Too large to be a .git file: '%s'", path);
+ die(_("too large to be a .git file: '%s'"), path);
case READ_GITFILE_ERR_READ_FAILED:
- die("Error reading %s", path);
+ die(_("error reading %s"), path);
case READ_GITFILE_ERR_INVALID_FORMAT:
- die("Invalid gitfile format: %s", path);
+ die(_("invalid gitfile format: %s"), path);
case READ_GITFILE_ERR_NO_PATH:
- die("No path in gitfile: %s", path);
+ die(_("no path in gitfile: %s"), path);
case READ_GITFILE_ERR_NOT_A_REPO:
- die("Not a git repository: %s", dir);
+ die(_("not a git repository: %s"), dir);
default:
die("BUG: unknown error code");
}
int offset;
if (PATH_MAX - 40 < strlen(gitdirenv))
- die("'$%s' too big", GIT_DIR_ENVIRONMENT);
+ die(_("'$%s' too big"), GIT_DIR_ENVIRONMENT);
gitfile = (char*)read_gitfile(gitdirenv);
if (gitfile) {
free(gitfile);
return NULL;
}
- die("Not a git repository: '%s'", gitdirenv);
+ die(_("not a git repository: '%s'"), gitdirenv);
}
if (check_repository_format_gently(gitdirenv, repo_fmt, nongit_ok)) {
else {
char *core_worktree;
if (chdir(gitdirenv))
- die_errno("Could not chdir to '%s'", gitdirenv);
+ die_errno(_("cannot chdir to '%s'"), gitdirenv);
if (chdir(git_work_tree_cfg))
- die_errno("Could not chdir to '%s'", git_work_tree_cfg);
+ die_errno(_("cannot chdir to '%s'"), git_work_tree_cfg);
core_worktree = xgetcwd();
if (chdir(cwd->buf))
- die_errno("Could not come back to cwd");
+ die_errno(_("cannot come back to cwd"));
set_git_work_tree(core_worktree);
free(core_worktree);
}
if (offset >= 0) { /* cwd inside worktree? */
set_git_dir(real_path(gitdirenv));
if (chdir(worktree))
- die_errno("Could not chdir to '%s'", worktree);
+ die_errno(_("cannot chdir to '%s'"), worktree);
strbuf_addch(cwd, '/');
free(gitfile);
return cwd->buf + offset;
if (offset != cwd->len && !is_absolute_path(gitdir))
gitdir = to_free = real_pathdup(gitdir, 1);
if (chdir(cwd->buf))
- die_errno("Could not come back to cwd");
+ die_errno(_("cannot come back to cwd"));
ret = setup_explicit_git_dir(gitdir, cwd, repo_fmt, nongit_ok);
free(to_free);
return ret;
if (is_bare_repository_cfg > 0) {
set_git_dir(offset == cwd->len ? gitdir : real_path(gitdir));
if (chdir(cwd->buf))
- die_errno("Could not come back to cwd");
+ die_errno(_("cannot come back to cwd"));
return NULL;
}
gitdir = offset == cwd->len ? "." : xmemdupz(cwd->buf, offset);
if (chdir(cwd->buf))
- die_errno("Could not come back to cwd");
+ die_errno(_("cannot come back to cwd"));
return setup_explicit_git_dir(gitdir, cwd, repo_fmt, nongit_ok);
}
inside_work_tree = 0;
if (offset != cwd->len) {
if (chdir(cwd->buf))
- die_errno("Cannot come back to cwd");
+ die_errno(_("cannot come back to cwd"));
root_len = offset_1st_component(cwd->buf);
strbuf_setlen(cwd, offset > root_len ? offset : root_len);
set_git_dir(cwd->buf);
static const char *setup_nongit(const char *cwd, int *nongit_ok)
{
if (!nongit_ok)
- die(_("Not a git repository (or any of the parent directories): %s"), DEFAULT_GIT_DIR_ENVIRONMENT);
+ die(_("not a git repository (or any of the parent directories): %s"), DEFAULT_GIT_DIR_ENVIRONMENT);
if (chdir(cwd))
- die_errno(_("Cannot come back to cwd"));
+ die_errno(_("cannot come back to cwd"));
*nongit_ok = 1;
return NULL;
}
{
struct stat buf;
if (stat(path, &buf)) {
- die_errno("failed to stat '%*s%s%s'",
+ die_errno(_("failed to stat '%*s%s%s'"),
prefix_len,
prefix ? prefix : "",
prefix ? "/" : "", path);
break;
case GIT_DIR_DISCOVERED:
if (dir.len < cwd.len && chdir(dir.buf))
- die(_("Cannot change to '%s'"), dir.buf);
+ die(_("cannot change to '%s'"), dir.buf);
prefix = setup_discovered_git_dir(gitdir.buf, &cwd, dir.len,
&repo_fmt, nongit_ok);
break;
case GIT_DIR_BARE:
if (dir.len < cwd.len && chdir(dir.buf))
- die(_("Cannot change to '%s'"), dir.buf);
+ die(_("cannot change to '%s'"), dir.buf);
prefix = setup_bare_git_dir(&cwd, dir.len, &repo_fmt, nongit_ok);
break;
case GIT_DIR_HIT_CEILING:
strbuf_release(&dir);
return NULL;
}
- die(_("Not a git repository (or any parent up to mount point %s)\n"
+ die(_("not a git repository (or any parent up to mount point %s)\n"
"Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set)."),
dir.buf);
default:
/* A filemode value was given: 0xxx */
if ((i & 0600) != 0600)
- die(_("Problem with core.sharedRepository filemode value "
+ die(_("problem with core.sharedRepository filemode value "
"(0%.3o).\nThe owner of files must always have "
"read and write permissions."), i);
while (fd != -1 && fd < 2)
fd = dup(fd);
if (fd == -1)
- die_errno("open /dev/null or dup failed");
+ die_errno(_("open /dev/null or dup failed"));
if (fd > 2)
close(fd);
}
case 0:
break;
case -1:
- die_errno("fork failed");
+ die_errno(_("fork failed"));
default:
exit(0);
}
if (setsid() == -1)
- die_errno("setsid failed");
+ die_errno(_("setsid failed"));
close(0);
close(1);
close(2);
} while (lo < hi);
return -lo-1;
}
+
+int bsearch_hash(const unsigned char *sha1, const uint32_t *fanout_nbo,
+ const unsigned char *table, size_t stride, uint32_t *result)
+{
+ uint32_t hi, lo;
+
+ hi = ntohl(fanout_nbo[*sha1]);
+ lo = ((*sha1 == 0x0) ? 0 : ntohl(fanout_nbo[*sha1 - 1]));
+
+ while (lo < hi) {
+ unsigned mi = lo + (hi - lo) / 2;
+ int cmp = hashcmp(table + mi * stride, sha1);
+
+ if (!cmp) {
+ if (result)
+ *result = mi;
+ return 1;
+ }
+ if (cmp > 0)
+ hi = mi;
+ else
+ lo = mi + 1;
+ }
+
+ if (result)
+ *result = lo;
+ return 0;
+}
void *table,
size_t nr,
sha1_access_fn fn);
+
+/*
+ * Searches for sha1 in table, using the given fanout table to determine the
+ * interval to search, then using binary search. Returns 1 if found, 0 if not.
+ *
+ * Takes the following parameters:
+ *
+ * - sha1: the hash to search for
+ * - fanout_nbo: a 256-element array of NETWORK-order 32-bit integers; the
+ * integer at position i represents the number of elements in table whose
+ * first byte is less than or equal to i
+ * - table: a sorted list of hashes with optional extra information in between
+ * - stride: distance between two consecutive elements in table (should be
+ * GIT_MAX_RAWSZ or greater)
+ * - result: if not NULL, this function stores the element index of the
+ * position found (if the search is successful) or the index of the least
+ * element that is greater than sha1 (if the search is not successful)
+ *
+ * This function does not verify the validity of the fanout table.
+ */
+int bsearch_hash(const unsigned char *sha1, const uint32_t *fanout_nbo,
+ const unsigned char *table, size_t stride, uint32_t *result);
#endif
#include "bulk-checkin.h"
#include "streaming.h"
#include "dir.h"
-#include "mru.h"
#include "list.h"
#include "mergesort.h"
#include "quote.h"
#include "packfile.h"
+#include "fetch-object.h"
const unsigned char null_sha1[GIT_MAX_RAWSZ];
const struct object_id null_oid;
EMPTY_BLOB_SHA1_BIN_LITERAL
};
-static void git_hash_sha1_init(void *ctx)
+static void git_hash_sha1_init(git_hash_ctx *ctx)
{
- git_SHA1_Init((git_SHA_CTX *)ctx);
+ git_SHA1_Init(&ctx->sha1);
}
-static void git_hash_sha1_update(void *ctx, const void *data, size_t len)
+static void git_hash_sha1_update(git_hash_ctx *ctx, const void *data, size_t len)
{
- git_SHA1_Update((git_SHA_CTX *)ctx, data, len);
+ git_SHA1_Update(&ctx->sha1, data, len);
}
-static void git_hash_sha1_final(unsigned char *hash, void *ctx)
+static void git_hash_sha1_final(unsigned char *hash, git_hash_ctx *ctx)
{
- git_SHA1_Final(hash, (git_SHA_CTX *)ctx);
+ git_SHA1_Final(hash, &ctx->sha1);
}
-static void git_hash_unknown_init(void *ctx)
+static void git_hash_unknown_init(git_hash_ctx *ctx)
{
die("trying to init unknown hash");
}
-static void git_hash_unknown_update(void *ctx, const void *data, size_t len)
+static void git_hash_unknown_update(git_hash_ctx *ctx, const void *data, size_t len)
{
die("trying to update unknown hash");
}
-static void git_hash_unknown_final(unsigned char *hash, void *ctx)
+static void git_hash_unknown_final(unsigned char *hash, git_hash_ctx *ctx)
{
die("trying to finalize unknown hash");
}
0x00000000,
0,
0,
- 0,
git_hash_unknown_init,
git_hash_unknown_update,
git_hash_unknown_final,
"sha-1",
/* "sha1", big-endian */
0x73686131,
- sizeof(git_SHA_CTX),
GIT_SHA1_RAWSZ,
GIT_SHA1_HEXSZ,
git_hash_sha1_init,
}
-static enum safe_crlf get_safe_crlf(unsigned flags)
+static int get_conv_flags(unsigned flags)
{
if (flags & HASH_RENORMALIZE)
- return SAFE_CRLF_RENORMALIZE;
+ return CONV_EOL_RENORMALIZE;
else if (flags & HASH_WRITE_OBJECT)
- return safe_crlf;
+ return global_conv_flags_eol;
else
- return SAFE_CRLF_FALSE;
+ return 0;
}
}
}
-const char *sha1_file_name(const unsigned char *sha1)
+void sha1_file_name(struct strbuf *buf, const unsigned char *sha1)
{
- static struct strbuf buf = STRBUF_INIT;
-
- strbuf_reset(&buf);
- strbuf_addf(&buf, "%s/", get_object_directory());
-
- fill_sha1_path(&buf, sha1);
- return buf.buf;
+ strbuf_addstr(buf, get_object_directory());
+ strbuf_addch(buf, '/');
+ fill_sha1_path(buf, sha1);
}
struct strbuf *alt_scratch_buf(struct alternate_object_database *alt)
static int check_and_freshen_local(const unsigned char *sha1, int freshen)
{
- return check_and_freshen_file(sha1_file_name(sha1), freshen);
+ static struct strbuf buf = STRBUF_INIT;
+
+ strbuf_reset(&buf);
+ sha1_file_name(&buf, sha1);
+
+ return check_and_freshen_file(buf.buf, freshen);
}
static int check_and_freshen_nonlocal(const unsigned char *sha1, int freshen)
int check_sha1_signature(const unsigned char *sha1, void *map,
unsigned long size, const char *type)
{
- unsigned char real_sha1[20];
+ struct object_id real_oid;
enum object_type obj_type;
struct git_istream *st;
- git_SHA_CTX c;
+ git_hash_ctx c;
char hdr[32];
int hdrlen;
if (map) {
- hash_sha1_file(map, size, type, real_sha1);
- return hashcmp(sha1, real_sha1) ? -1 : 0;
+ hash_object_file(map, size, type, &real_oid);
+ return hashcmp(sha1, real_oid.hash) ? -1 : 0;
}
st = open_istream(sha1, &obj_type, &size, NULL);
hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", typename(obj_type), size) + 1;
/* Sha1.. */
- git_SHA1_Init(&c);
- git_SHA1_Update(&c, hdr, hdrlen);
+ the_hash_algo->init_fn(&c);
+ the_hash_algo->update_fn(&c, hdr, hdrlen);
for (;;) {
char buf[1024 * 16];
ssize_t readlen = read_istream(st, buf, sizeof(buf));
}
if (!readlen)
break;
- git_SHA1_Update(&c, buf, readlen);
+ the_hash_algo->update_fn(&c, buf, readlen);
}
- git_SHA1_Final(real_sha1, &c);
+ the_hash_algo->final_fn(real_oid.hash, &c);
close_istream(st);
- return hashcmp(sha1, real_sha1) ? -1 : 0;
+ return hashcmp(sha1, real_oid.hash) ? -1 : 0;
}
int git_open_cloexec(const char *name, int flags)
const char **path)
{
struct alternate_object_database *alt;
+ static struct strbuf buf = STRBUF_INIT;
+
+ strbuf_reset(&buf);
+ sha1_file_name(&buf, sha1);
+ *path = buf.buf;
- *path = sha1_file_name(sha1);
if (!lstat(*path, st))
return 0;
int fd;
struct alternate_object_database *alt;
int most_interesting_errno;
+ static struct strbuf buf = STRBUF_INIT;
+
+ strbuf_reset(&buf);
+ sha1_file_name(&buf, sha1);
+ *path = buf.buf;
- *path = sha1_file_name(sha1);
fd = git_open(*path);
if (fd >= 0)
return fd;
return (status < 0) ? status : 0;
}
+int fetch_if_missing = 1;
+
int sha1_object_info_extended(const unsigned char *sha1, struct object_info *oi, unsigned flags)
{
static struct object_info blank_oi = OBJECT_INFO_INIT;
const unsigned char *real = (flags & OBJECT_INFO_LOOKUP_REPLACE) ?
lookup_replace_object(sha1) :
sha1;
+ int already_retried = 0;
if (is_null_sha1(real))
return -1;
}
}
- if (!find_pack_entry(real, &e)) {
+ while (1) {
+ if (find_pack_entry(real, &e))
+ break;
+
/* Most likely it's a loose object. */
if (!sha1_loose_object_info(real, oi, flags))
return 0;
/* Not a loose object; someone else may have just packed it. */
- if (flags & OBJECT_INFO_QUICK) {
- return -1;
- } else {
- reprepare_packed_git();
- if (!find_pack_entry(real, &e))
- return -1;
+ reprepare_packed_git();
+ if (find_pack_entry(real, &e))
+ break;
+
+ /* Check if it is a missing object */
+ if (fetch_if_missing && repository_format_partial_clone &&
+ !already_retried) {
+ /*
+ * TODO Investigate haveing fetch_object() return
+ * TODO error/success and stopping the music here.
+ */
+ fetch_object(repository_format_partial_clone, real);
+ already_retried = 1;
+ continue;
}
+
+ return -1;
}
if (oi == &blank_oi)
* information below, so return early.
*/
return 0;
-
rtype = packed_object_info(e.p, e.offset, oi);
if (rtype < 0) {
mark_bad_packed_object(e.p, real);
return content;
}
-int pretend_sha1_file(void *buf, unsigned long len, enum object_type type,
- unsigned char *sha1)
+int pretend_object_file(void *buf, unsigned long len, enum object_type type,
+ struct object_id *oid)
{
struct cached_object *co;
- hash_sha1_file(buf, len, typename(type), sha1);
- if (has_sha1_file(sha1) || find_cached_object(sha1))
+ hash_object_file(buf, len, typename(type), oid);
+ if (has_sha1_file(oid->hash) || find_cached_object(oid->hash))
return 0;
ALLOC_GROW(cached_objects, cached_object_nr + 1, cached_object_alloc);
co = &cached_objects[cached_object_nr++];
co->type = type;
co->buf = xmalloc(len);
memcpy(co->buf, buf, len);
- hashcpy(co->sha1, sha1);
+ hashcpy(co->sha1, oid->hash);
return 0;
}
}
}
-static void write_sha1_file_prepare(const void *buf, unsigned long len,
- const char *type, unsigned char *sha1,
- char *hdr, int *hdrlen)
+static void write_object_file_prepare(const void *buf, unsigned long len,
+ const char *type, struct object_id *oid,
+ char *hdr, int *hdrlen)
{
- git_SHA_CTX c;
+ git_hash_ctx c;
/* Generate the header */
*hdrlen = xsnprintf(hdr, *hdrlen, "%s %lu", type, len)+1;
/* Sha1.. */
- git_SHA1_Init(&c);
- git_SHA1_Update(&c, hdr, *hdrlen);
- git_SHA1_Update(&c, buf, len);
- git_SHA1_Final(sha1, &c);
+ the_hash_algo->init_fn(&c);
+ the_hash_algo->update_fn(&c, hdr, *hdrlen);
+ the_hash_algo->update_fn(&c, buf, len);
+ the_hash_algo->final_fn(oid->hash, &c);
}
/*
return 0;
}
-int hash_sha1_file(const void *buf, unsigned long len, const char *type,
- unsigned char *sha1)
+int hash_object_file(const void *buf, unsigned long len, const char *type,
+ struct object_id *oid)
{
char hdr[32];
int hdrlen = sizeof(hdr);
- write_sha1_file_prepare(buf, len, type, sha1, hdr, &hdrlen);
+ write_object_file_prepare(buf, len, type, oid, hdr, &hdrlen);
return 0;
}
return fd;
}
-static int write_loose_object(const unsigned char *sha1, char *hdr, int hdrlen,
- const void *buf, unsigned long len, time_t mtime)
+static int write_loose_object(const struct object_id *oid, char *hdr,
+ int hdrlen, const void *buf, unsigned long len,
+ time_t mtime)
{
int fd, ret;
unsigned char compressed[4096];
git_zstream stream;
- git_SHA_CTX c;
- unsigned char parano_sha1[20];
+ git_hash_ctx c;
+ struct object_id parano_oid;
static struct strbuf tmp_file = STRBUF_INIT;
- const char *filename = sha1_file_name(sha1);
+ static struct strbuf filename = STRBUF_INIT;
+
+ strbuf_reset(&filename);
+ sha1_file_name(&filename, oid->hash);
- fd = create_tmpfile(&tmp_file, filename);
+ fd = create_tmpfile(&tmp_file, filename.buf);
if (fd < 0) {
if (errno == EACCES)
return error("insufficient permission for adding an object to repository database %s", get_object_directory());
git_deflate_init(&stream, zlib_compression_level);
stream.next_out = compressed;
stream.avail_out = sizeof(compressed);
- git_SHA1_Init(&c);
+ the_hash_algo->init_fn(&c);
/* First header.. */
stream.next_in = (unsigned char *)hdr;
stream.avail_in = hdrlen;
while (git_deflate(&stream, 0) == Z_OK)
; /* nothing */
- git_SHA1_Update(&c, hdr, hdrlen);
+ the_hash_algo->update_fn(&c, hdr, hdrlen);
/* Then the data itself.. */
stream.next_in = (void *)buf;
do {
unsigned char *in0 = stream.next_in;
ret = git_deflate(&stream, Z_FINISH);
- git_SHA1_Update(&c, in0, stream.next_in - in0);
+ the_hash_algo->update_fn(&c, in0, stream.next_in - in0);
if (write_buffer(fd, compressed, stream.next_out - compressed) < 0)
die("unable to write sha1 file");
stream.next_out = compressed;
} while (ret == Z_OK);
if (ret != Z_STREAM_END)
- die("unable to deflate new object %s (%d)", sha1_to_hex(sha1), ret);
+ die("unable to deflate new object %s (%d)", oid_to_hex(oid),
+ ret);
ret = git_deflate_end_gently(&stream);
if (ret != Z_OK)
- die("deflateEnd on object %s failed (%d)", sha1_to_hex(sha1), ret);
- git_SHA1_Final(parano_sha1, &c);
- if (hashcmp(sha1, parano_sha1) != 0)
- die("confused by unstable object source data for %s", sha1_to_hex(sha1));
+ die("deflateEnd on object %s failed (%d)", oid_to_hex(oid),
+ ret);
+ the_hash_algo->final_fn(parano_oid.hash, &c);
+ if (oidcmp(oid, ¶no_oid) != 0)
+ die("confused by unstable object source data for %s",
+ oid_to_hex(oid));
close_sha1_file(fd);
warning_errno("failed utime() on %s", tmp_file.buf);
}
- return finalize_object_file(tmp_file.buf, filename);
+ return finalize_object_file(tmp_file.buf, filename.buf);
}
static int freshen_loose_object(const unsigned char *sha1)
return 1;
}
-int write_sha1_file(const void *buf, unsigned long len, const char *type, unsigned char *sha1)
+int write_object_file(const void *buf, unsigned long len, const char *type,
+ struct object_id *oid)
{
char hdr[32];
int hdrlen = sizeof(hdr);
/* Normally if we have it in the pack then we do not bother writing
* it out into .git/objects/??/?{38} file.
*/
- write_sha1_file_prepare(buf, len, type, sha1, hdr, &hdrlen);
- if (freshen_packed_object(sha1) || freshen_loose_object(sha1))
+ write_object_file_prepare(buf, len, type, oid, hdr, &hdrlen);
+ if (freshen_packed_object(oid->hash) || freshen_loose_object(oid->hash))
return 0;
- return write_loose_object(sha1, hdr, hdrlen, buf, len, 0);
+ return write_loose_object(oid, hdr, hdrlen, buf, len, 0);
}
-int hash_sha1_file_literally(const void *buf, unsigned long len, const char *type,
- struct object_id *oid, unsigned flags)
+int hash_object_file_literally(const void *buf, unsigned long len,
+ const char *type, struct object_id *oid,
+ unsigned flags)
{
char *header;
int hdrlen, status = 0;
/* type string, SP, %lu of the length plus NUL must fit this */
hdrlen = strlen(type) + 32;
header = xmalloc(hdrlen);
- write_sha1_file_prepare(buf, len, type, oid->hash, header, &hdrlen);
+ write_object_file_prepare(buf, len, type, oid, header, &hdrlen);
if (!(flags & HASH_WRITE_OBJECT))
goto cleanup;
if (freshen_packed_object(oid->hash) || freshen_loose_object(oid->hash))
goto cleanup;
- status = write_loose_object(oid->hash, header, hdrlen, buf, len, 0);
+ status = write_loose_object(oid, header, hdrlen, buf, len, 0);
cleanup:
free(header);
return status;
}
-int force_object_loose(const unsigned char *sha1, time_t mtime)
+int force_object_loose(const struct object_id *oid, time_t mtime)
{
void *buf;
unsigned long len;
int hdrlen;
int ret;
- if (has_loose_object(sha1))
+ if (has_loose_object(oid->hash))
return 0;
- buf = read_object(sha1, &type, &len);
+ buf = read_object(oid->hash, &type, &len);
if (!buf)
- return error("cannot read sha1_file for %s", sha1_to_hex(sha1));
+ return error("cannot read sha1_file for %s", oid_to_hex(oid));
hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", typename(type), len) + 1;
- ret = write_loose_object(sha1, hdr, hdrlen, buf, len, mtime);
+ ret = write_loose_object(oid, hdr, hdrlen, buf, len, mtime);
free(buf);
return ret;
if ((type == OBJ_BLOB) && path) {
struct strbuf nbuf = STRBUF_INIT;
if (convert_to_git(&the_index, path, buf, size, &nbuf,
- get_safe_crlf(flags))) {
+ get_conv_flags(flags))) {
buf = strbuf_detach(&nbuf, &size);
re_allocated = 1;
}
}
if (write_object)
- ret = write_sha1_file(buf, size, typename(type), oid->hash);
+ ret = write_object_file(buf, size, typename(type), oid);
else
- ret = hash_sha1_file(buf, size, typename(type), oid->hash);
+ ret = hash_object_file(buf, size, typename(type), oid);
if (re_allocated)
free(buf);
return ret;
assert(would_convert_to_git_filter_fd(path));
convert_to_git_filter_fd(&the_index, path, fd, &sbuf,
- get_safe_crlf(flags));
+ get_conv_flags(flags));
if (write_object)
- ret = write_sha1_file(sbuf.buf, sbuf.len, typename(OBJ_BLOB),
- oid->hash);
+ ret = write_object_file(sbuf.buf, sbuf.len, typename(OBJ_BLOB),
+ oid);
else
- ret = hash_sha1_file(sbuf.buf, sbuf.len, typename(OBJ_BLOB),
- oid->hash);
+ ret = hash_object_file(sbuf.buf, sbuf.len, typename(OBJ_BLOB),
+ oid);
strbuf_release(&sbuf);
return ret;
}
if (strbuf_readlink(&sb, path, st->st_size))
return error_errno("readlink(\"%s\")", path);
if (!(flags & HASH_WRITE_OBJECT))
- hash_sha1_file(sb.buf, sb.len, blob_type, oid->hash);
- else if (write_sha1_file(sb.buf, sb.len, blob_type, oid->hash))
+ hash_object_file(sb.buf, sb.len, blob_type, oid);
+ else if (write_object_file(sb.buf, sb.len, blob_type, oid))
rc = error("%s: failed to insert into database", path);
strbuf_release(&sb);
break;
const char *path,
const unsigned char *expected_sha1)
{
- git_SHA_CTX c;
+ git_hash_ctx c;
unsigned char real_sha1[GIT_MAX_RAWSZ];
unsigned char buf[4096];
unsigned long total_read;
int status = Z_OK;
- git_SHA1_Init(&c);
- git_SHA1_Update(&c, hdr, stream->total_out);
+ the_hash_algo->init_fn(&c);
+ the_hash_algo->update_fn(&c, hdr, stream->total_out);
/*
* We already read some bytes into hdr, but the ones up to the NUL
if (size - total_read < stream->avail_out)
stream->avail_out = size - total_read;
status = git_inflate(stream, Z_FINISH);
- git_SHA1_Update(&c, buf, stream->next_out - buf);
+ the_hash_algo->update_fn(&c, buf, stream->next_out - buf);
total_read += stream->next_out - buf;
}
git_inflate_end(stream);
return -1;
}
- git_SHA1_Final(real_sha1, &c);
+ the_hash_algo->final_fn(real_sha1, &c);
if (hashcmp(expected_sha1, real_sha1)) {
error("sha1 mismatch for %s (expected %s)", path,
sha1_to_hex(expected_sha1));
/* Plumbing with collition-detecting SHA1 code */
-#ifdef DC_SHA1_SUBMODULE
-#include "sha1collisiondetection/lib/sha1.h"
-#elif defined(DC_SHA1_EXTERNAL)
+#ifdef DC_SHA1_EXTERNAL
#include <sha1dc/sha1.h>
+#elif defined(DC_SHA1_SUBMODULE)
+#include "sha1collisiondetection/lib/sha1.h"
#else
#include "sha1dc/sha1.h"
#endif
ALLOC_GROW(entries, nr_entries+1, nr_alloc);
entries[nr_entries++] = ce;
}
+ if (is_null_oid(&ce->oid))
+ istate->drop_cache_tree = 1;
}
}
}
/*
- * Perform the version and capability negotiation as described in the "Long
- * Running Filter Process" section of the gitattributes documentation using the
+ * Perform the version and capability negotiation as described in the
+ * "Handshake" section of long-running-process-protocol.txt using the
* given requested versions and capabilities. The "versions" and "capabilities"
* parameters are arrays terminated by a 0 or blank struct.
*
/*
* submodule cache lookup structure
* There is one shared set of 'struct submodule' entries which can be
- * looked up by their sha1 blob id of the .gitmodule file and either
+ * looked up by their sha1 blob id of the .gitmodules file and either
* using path or name as key.
* for_path stores submodule entries with path as key
* for_name stores submodule entries with name as key
/*
* We iterate over the name hash here to be symmetric with the
* allocation of struct submodule entries. Each is allocated by
- * their .gitmodule blob sha1 and submodule name.
+ * their .gitmodules blob sha1 and submodule name.
*/
hashmap_iter_init(&cache->for_name, &iter);
while ((entry = hashmap_iter_next(&iter)))
test_expect_code 1 git merge "merge msg" B master
'
- - test_must_fail <git-command>
+ - test_must_fail [<options>] <git-command>
Run a git command and ensure it fails in a controlled way. Use
this instead of "! <git-command>". When git-command dies due to a
treats it as just another expected failure, which would let such a
bug go unnoticed.
- - test_might_fail <git-command>
+ Accepts the following options:
+
+ ok=<signal-name>[,<...>]:
+ Don't treat an exit caused by the given signal as error.
+ Multiple signals can be specified as a comma separated list.
+ Currently recognized signal names are: sigpipe, success.
+ (Don't use 'success', use 'test_might_fail' instead.)
+
+ - test_might_fail [<options>] <git-command>
Similar to test_must_fail, but tolerate success, too. Use this
instead of "<git-command> || :" to catch failures due to segv.
+ Accepts the same options as test_must_fail.
+
- test_cmp <expected> <actual>
Check whether the content of the <actual> file matches the
printf("no untracked cache\n");
return 0;
}
- printf("info/exclude %s\n", sha1_to_hex(uc->ss_info_exclude.sha1));
- printf("core.excludesfile %s\n", sha1_to_hex(uc->ss_excludes_file.sha1));
+ printf("info/exclude %s\n", oid_to_hex(&uc->ss_info_exclude.oid));
+ printf("core.excludesfile %s\n", oid_to_hex(&uc->ss_excludes_file.oid));
printf("exclude_per_dir %s\n", uc->exclude_per_dir);
printf("flags %08x\n", uc->dir_flags);
if (uc->root)
#include "git-compat-util.h"
#include "hashmap.h"
+#include "strbuf.h"
struct test_entry
{
return strcmp(e1->key, key ? key : e2->key);
}
-static struct test_entry *alloc_test_entry(int hash, char *key, int klen,
- char *value, int vlen)
+static struct test_entry *alloc_test_entry(unsigned int hash,
+ char *key, char *value)
{
- struct test_entry *entry = malloc(sizeof(struct test_entry) + klen
- + vlen + 2);
+ size_t klen = strlen(key);
+ size_t vlen = strlen(value);
+ struct test_entry *entry = xmalloc(st_add4(sizeof(*entry), klen, vlen, 2));
hashmap_entry_init(entry, hash);
memcpy(entry->key, key, klen + 1);
memcpy(entry->key + klen + 1, value, vlen + 1);
unsigned int *hashes;
unsigned int i, j;
- entries = malloc(TEST_SIZE * sizeof(struct test_entry *));
- hashes = malloc(TEST_SIZE * sizeof(int));
+ ALLOC_ARRAY(entries, TEST_SIZE);
+ ALLOC_ARRAY(hashes, TEST_SIZE);
for (i = 0; i < TEST_SIZE; i++) {
- snprintf(buf, sizeof(buf), "%i", i);
- entries[i] = alloc_test_entry(0, buf, strlen(buf), "", 0);
+ xsnprintf(buf, sizeof(buf), "%i", i);
+ entries[i] = alloc_test_entry(0, buf, "");
hashes[i] = hash(method, i, entries[i]->key);
}
*/
int cmd_main(int argc, const char **argv)
{
- char line[1024];
+ struct strbuf line = STRBUF_INIT;
struct hashmap map;
int icase;
hashmap_init(&map, test_entry_cmp, &icase, 0);
/* process commands from stdin */
- while (fgets(line, sizeof(line), stdin)) {
+ while (strbuf_getline(&line, stdin) != EOF) {
char *cmd, *p1 = NULL, *p2 = NULL;
- int l1 = 0, l2 = 0, hash = 0;
+ unsigned int hash = 0;
struct test_entry *entry;
/* break line into command and up to two parameters */
- cmd = strtok(line, DELIM);
+ cmd = strtok(line.buf, DELIM);
/* ignore empty lines */
if (!cmd || *cmd == '#')
continue;
p1 = strtok(NULL, DELIM);
if (p1) {
- l1 = strlen(p1);
hash = icase ? strihash(p1) : strhash(p1);
p2 = strtok(NULL, DELIM);
- if (p2)
- l2 = strlen(p2);
}
- if (!strcmp("hash", cmd) && l1) {
+ if (!strcmp("hash", cmd) && p1) {
/* print results of different hash functions */
- printf("%u %u %u %u\n", strhash(p1), memhash(p1, l1),
- strihash(p1), memihash(p1, l1));
+ printf("%u %u %u %u\n",
+ strhash(p1), memhash(p1, strlen(p1)),
+ strihash(p1), memihash(p1, strlen(p1)));
- } else if (!strcmp("add", cmd) && l1 && l2) {
+ } else if (!strcmp("add", cmd) && p1 && p2) {
/* create entry with key = p1, value = p2 */
- entry = alloc_test_entry(hash, p1, l1, p2, l2);
+ entry = alloc_test_entry(hash, p1, p2);
/* add to hashmap */
hashmap_add(&map, entry);
- } else if (!strcmp("put", cmd) && l1 && l2) {
+ } else if (!strcmp("put", cmd) && p1 && p2) {
/* create entry with key = p1, value = p2 */
- entry = alloc_test_entry(hash, p1, l1, p2, l2);
+ entry = alloc_test_entry(hash, p1, p2);
/* add / replace entry */
entry = hashmap_put(&map, entry);
puts(entry ? get_value(entry) : "NULL");
free(entry);
- } else if (!strcmp("get", cmd) && l1) {
+ } else if (!strcmp("get", cmd) && p1) {
/* lookup entry in hashmap */
entry = hashmap_get_from_hash(&map, hash, p1);
entry = hashmap_get_next(&map, entry);
}
- } else if (!strcmp("remove", cmd) && l1) {
+ } else if (!strcmp("remove", cmd) && p1) {
/* setup static key */
struct hashmap_entry key;
printf("%u %u\n", map.tablesize,
hashmap_get_size(&map));
- } else if (!strcmp("intern", cmd) && l1) {
+ } else if (!strcmp("intern", cmd) && p1) {
/* test that strintern works */
const char *i1 = strintern(p1);
else
printf("%s\n", i1);
- } else if (!strcmp("perfhashmap", cmd) && l1 && l2) {
+ } else if (!strcmp("perfhashmap", cmd) && p1 && p2) {
perf_hashmap(atoi(p1), atoi(p2));
}
}
+ strbuf_release(&line);
hashmap_free(&map, 1);
return 0;
}
struct child_process proc = CHILD_PROCESS_INIT;
int jobs;
+ if (argc < 3)
+ return 1;
+ while (!strcmp(argv[1], "env")) {
+ if (!argv[2])
+ die("env specifier without a value");
+ argv_array_push(&proc.env_array, argv[2]);
+ argv += 2;
+ argc -= 2;
+ }
if (argc < 3)
return 1;
proc.argv = (const char **)argv + 2;
return !!wildmatch(argv[3], argv[2], WM_PATHNAME | WM_CASEFOLD);
else if (!strcmp(argv[1], "pathmatch"))
return !!wildmatch(argv[3], argv[2], 0);
+ else if (!strcmp(argv[1], "ipathmatch"))
+ return !!wildmatch(argv[3], argv[2], WM_CASEFOLD);
else
return 1;
}
GIT_DAEMON_PID=
GIT_DAEMON_DOCUMENT_ROOT_PATH="$PWD"/repo
-GIT_DAEMON_URL=git://127.0.0.1:$LIB_GIT_DAEMON_PORT
+GIT_DAEMON_HOST_PORT=127.0.0.1:$LIB_GIT_DAEMON_PORT
+GIT_DAEMON_URL=git://$GIT_DAEMON_HOST_PORT
start_git_daemon() {
if test -n "$GIT_DAEMON_PID"
"$@" "$GIT_DAEMON_DOCUMENT_ROOT_PATH" \
>&3 2>git_daemon_output &
GIT_DAEMON_PID=$!
+ >daemon.log
{
- read line <&7
- echo >&4 "$line"
- cat <&7 >&4 &
- } 7<git_daemon_output &&
+ read -r line <&7
+ printf "%s\n" "$line"
+ printf >&4 "%s\n" "$line"
+ (
+ while read -r line <&7
+ do
+ printf "%s\n" "$line"
+ printf >&4 "%s\n" "$line"
+ done
+ ) &
+ } 7<git_daemon_output >>"$TRASH_DIRECTORY/daemon.log" &&
# Check expected output
if test x"$(expr "$line" : "\[[0-9]*\] \(.*\)")" != x"Ready to rumble"
GIT_DAEMON_PID=
rm -f git_daemon_output
}
+
+# A stripped-down version of a netcat client, that connects to a "host:port"
+# given in $1, sends its stdin followed by EOF, then dumps the response (until
+# EOF) to stdout.
+fake_nc() {
+ if ! test_declared_prereq FAKENC
+ then
+ echo >&4 "fake_nc: need to declare FAKENC prerequisite"
+ return 127
+ fi
+ perl -Mstrict -MIO::Socket::INET -e '
+ my $s = IO::Socket::INET->new(shift)
+ or die "unable to open socket: $!";
+ print $s <STDIN>;
+ $s->shutdown(1);
+ print <$s>;
+ ' "$@"
+}
+
+test_lazy_prereq FAKENC '
+ perl -MIO::Socket::INET -e "exit 0"
+'
#!/usr/bin/perl
-use lib '../../perl/blib/lib';
+use lib '../../perl/build/lib';
use strict;
use warnings;
use JSON;
return $out;
}
-my (@dirs, %dirnames, %dirabbrevs, %prefixes, @tests, $codespeed);
+my (@dirs, %dirnames, %dirabbrevs, %prefixes, @tests,
+ $codespeed, $subsection, $reponame);
while (scalar @ARGV) {
my $arg = $ARGV[0];
my $dir;
shift @ARGV;
next;
}
+ if ($arg eq "--subsection") {
+ shift @ARGV;
+ $subsection = $ARGV[0];
+ shift @ARGV;
+ if (! $subsection) {
+ die "empty subsection";
+ }
+ next;
+ }
+ if ($arg eq "--reponame") {
+ shift @ARGV;
+ $reponame = $ARGV[0];
+ shift @ARGV;
+ if (! $reponame) {
+ die "empty reponame";
+ }
+ next;
+ }
last if -f $arg or $arg eq "--";
if (! -d $arg) {
my $rev = Git::command_oneline(qw(rev-parse --verify), $arg);
}
my $resultsdir = "test-results";
-my $results_section = "";
-if (exists $ENV{GIT_PERF_SUBSECTION} and $ENV{GIT_PERF_SUBSECTION} ne "") {
- $resultsdir .= "/" . $ENV{GIT_PERF_SUBSECTION};
- $results_section = $ENV{GIT_PERF_SUBSECTION};
+
+if (! $subsection and
+ exists $ENV{GIT_PERF_SUBSECTION} and
+ $ENV{GIT_PERF_SUBSECTION} ne "") {
+ $subsection = $ENV{GIT_PERF_SUBSECTION};
+}
+
+if ($subsection) {
+ $resultsdir .= "/" . $subsection;
}
my @subtests;
}
sub print_codespeed_results {
- my ($results_section) = @_;
+ my ($subsection) = @_;
my $project = "Git";
my $executable = `uname -s -m`;
chomp $executable;
- if ($results_section ne "") {
- $executable .= ", " . $results_section;
+ if ($subsection) {
+ $executable .= ", " . $subsection;
}
my $environment;
- if (exists $ENV{GIT_PERF_REPO_NAME} and $ENV{GIT_PERF_REPO_NAME} ne "") {
+ if ($reponame) {
+ $environment = $reponame;
+ } elsif (exists $ENV{GIT_PERF_REPO_NAME} and $ENV{GIT_PERF_REPO_NAME} ne "") {
$environment = $ENV{GIT_PERF_REPO_NAME};
} elsif (exists $ENV{GIT_TEST_INSTALLED} and $ENV{GIT_TEST_INSTALLED} ne "") {
$environment = $ENV{GIT_TEST_INSTALLED};
}
}
- print to_json(\@data, {utf8 => 1, pretty => 1}), "\n";
+ print to_json(\@data, {utf8 => 1, pretty => 1, canonical => 1}), "\n";
}
binmode STDOUT, ":utf8" or die "PANIC on binmode: $!";
if ($codespeed) {
- print_codespeed_results($results_section);
+ print_codespeed_results($subsection);
} else {
print_default_results();
}
echo "$1" | sed -e 's|\(..\)|\1/|'
}
-objck() {
- p=$(objpath "$1")
- if test ! -f "$REAL/objects/$p"
- then
- echo "Object not found: $REAL/objects/$p"
- false
- fi
-}
-
test_expect_success 'initial setup' '
REAL="$(pwd)/.real" &&
mv .git "$REAL"
test_expect_success 'bad setup: invalid .git file format' '
echo "gitdir $REAL" >.git &&
- if git rev-parse 2>.err
- then
- echo "git rev-parse accepted an invalid .git file"
- false
- fi &&
- if ! grep "Invalid gitfile format" .err
- then
- echo "git rev-parse returned wrong error"
- false
- fi
+ test_must_fail git rev-parse 2>.err &&
+ test_i18ngrep "invalid gitfile format" .err
'
test_expect_success 'bad setup: invalid .git file path' '
echo "gitdir: $REAL.not" >.git &&
- if git rev-parse 2>.err
- then
- echo "git rev-parse accepted an invalid .git file path"
- false
- fi &&
- if ! grep "Not a git repository" .err
- then
- echo "git rev-parse returned wrong error"
- false
- fi
+ test_must_fail git rev-parse 2>.err &&
+ test_i18ngrep "not a git repository" .err
'
test_expect_success 'final setup + check rev-parse --git-dir' '
test_expect_success 'check hash-object' '
echo "foo" >bar &&
SHA=$(cat bar | git hash-object -w --stdin) &&
- objck $SHA
+ test_path_is_file "$REAL/objects/$(objpath $SHA)"
'
test_expect_success 'check cat-file' '
'
test_expect_success 'check update-index' '
- if test -f "$REAL/index"
- then
- echo "Hmm, $REAL/index exists?"
- false
- fi &&
+ test_path_is_missing "$REAL/index" &&
rm -f "$REAL/objects/$(objpath $SHA)" &&
git update-index --add bar &&
- if ! test -f "$REAL/index"
- then
- echo "$REAL/index not found"
- false
- fi &&
- objck $SHA
+ test_path_is_file "$REAL/index" &&
+ test_path_is_file "$REAL/objects/$(objpath $SHA)"
'
test_expect_success 'check write-tree' '
SHA=$(git write-tree) &&
- objck $SHA
+ test_path_is_file "$REAL/objects/$(objpath $SHA)"
'
test_expect_success 'check commit-tree' '
SHA=$(echo "commit bar" | git commit-tree $SHA) &&
- objck $SHA
+ test_path_is_file "$REAL/objects/$(objpath $SHA)"
'
test_expect_success 'check rev-list' '
cd .git &&
test_check_ignore "foo" 128
) &&
- stderr_contains "fatal: This operation must be run in a work tree"
+ stderr_contains "fatal: this operation must be run in a work tree"
'
############################################################################
echo "$response" | grep "^:: two"
'
+test_expect_success 'existing file and directory' '
+ test_when_finished "rm one" &&
+ test_when_finished "rmdir top-level-dir" &&
+ >one &&
+ mkdir top-level-dir &&
+ git check-ignore one top-level-dir >actual &&
+ grep one actual &&
+ grep top-level-dir actual
+'
+
+test_expect_success 'existing directory and file' '
+ test_when_finished "rm one" &&
+ test_when_finished "rmdir top-level-dir" &&
+ >one &&
+ mkdir top-level-dir &&
+ git check-ignore top-level-dir one >actual &&
+ grep one actual &&
+ grep top-level-dir actual
+'
+
############################################################################
#
# test whitespace handling
git merge topic
'
-
+test_expect_success CASE_INSENSITIVE_FS 'add directory (with different case)' '
+ git reset --hard initial &&
+ mkdir -p dir1/dir2 &&
+ echo >dir1/dir2/a &&
+ echo >dir1/dir2/b &&
+ git add dir1/dir2/a &&
+ git add dir1/DIR2/b &&
+ git ls-files >actual &&
+ cat >expected <<-\EOF &&
+ camelcase
+ dir1/dir2/a
+ dir1/dir2/b
+ EOF
+ test_cmp expected actual
+'
test_expect_failure CASE_INSENSITIVE_FS 'add (with different case)' '
git reset --hard initial &&
test_cmp expect actual
'
+test_trace () {
+ expect="$1"
+ shift
+ GIT_TRACE=1 test-run-command "$@" run-command true 2>&1 >/dev/null | \
+ sed 's/.* run_command: //' >actual &&
+ echo "$expect true" >expect &&
+ test_cmp expect actual
+}
+
+test_expect_success 'GIT_TRACE with environment variables' '
+ test_trace "abc=1 def=2" env abc=1 env def=2 &&
+ test_trace "abc=2" env abc env abc=1 env abc=2 &&
+ test_trace "abc=2" env abc env abc=2 &&
+ (
+ abc=1 && export abc &&
+ test_trace "def=1" env abc=1 env def=1
+ ) &&
+ (
+ abc=1 && export abc &&
+ test_trace "def=1" env abc env abc=1 env def=1
+ ) &&
+ test_trace "def=1" env non-exist env def=1 &&
+ test_trace "abc=2" env abc=1 env abc env abc=2 &&
+ (
+ abc=1 def=2 && export abc def &&
+ test_trace "unset abc def;" env abc env def
+ ) &&
+ (
+ abc=1 def=2 && export abc def &&
+ test_trace "unset def; abc=3" env abc env def env abc=3
+ ) &&
+ (
+ abc=1 && export abc &&
+ test_trace "unset abc;" env abc=2 env abc
+ )
+'
+
test_done
. ./lib-gettext.sh
-test_expect_success GETTEXT_POISON "sanity: \$GIT_INTERNAL_GETTEXT_SH_SCHEME is set (to $GIT_INTERNAL_GETTEXT_SH_SCHEME)" '
- test -n "$GIT_INTERNAL_GETTEXT_SH_SCHEME"
-'
-
test_expect_success GETTEXT_POISON 'sanity: $GIT_INTERNAL_GETTEXT_SH_SCHEME" is poison' '
test "$GIT_INTERNAL_GETTEXT_SH_SCHEME" = "poison"
'
--- /dev/null
+#!/bin/sh
+
+test_description='partial clone'
+
+. ./test-lib.sh
+
+delete_object () {
+ rm $1/.git/objects/$(echo $2 | sed -e 's|^..|&/|')
+}
+
+pack_as_from_promisor () {
+ HASH=$(git -C repo pack-objects .git/objects/pack/pack) &&
+ >repo/.git/objects/pack/pack-$HASH.promisor &&
+ echo $HASH
+}
+
+promise_and_delete () {
+ HASH=$(git -C repo rev-parse "$1") &&
+ git -C repo tag -a -m message my_annotated_tag "$HASH" &&
+ git -C repo rev-parse my_annotated_tag | pack_as_from_promisor &&
+ # tag -d prints a message to stdout, so redirect it
+ git -C repo tag -d my_annotated_tag >/dev/null &&
+ delete_object repo "$HASH"
+}
+
+test_expect_success 'missing reflog object, but promised by a commit, passes fsck' '
+ test_create_repo repo &&
+ test_commit -C repo my_commit &&
+
+ A=$(git -C repo commit-tree -m a HEAD^{tree}) &&
+ C=$(git -C repo commit-tree -m c -p $A HEAD^{tree}) &&
+
+ # Reference $A only from reflog, and delete it
+ git -C repo branch my_branch "$A" &&
+ git -C repo branch -f my_branch my_commit &&
+ delete_object repo "$A" &&
+
+ # State that we got $C, which refers to $A, from promisor
+ printf "$C\n" | pack_as_from_promisor &&
+
+ # Normally, it fails
+ test_must_fail git -C repo fsck &&
+
+ # But with the extension, it succeeds
+ git -C repo config core.repositoryformatversion 1 &&
+ git -C repo config extensions.partialclone "arbitrary string" &&
+ git -C repo fsck
+'
+
+test_expect_success 'missing reflog object, but promised by a tag, passes fsck' '
+ rm -rf repo &&
+ test_create_repo repo &&
+ test_commit -C repo my_commit &&
+
+ A=$(git -C repo commit-tree -m a HEAD^{tree}) &&
+ git -C repo tag -a -m d my_tag_name $A &&
+ T=$(git -C repo rev-parse my_tag_name) &&
+ git -C repo tag -d my_tag_name &&
+
+ # Reference $A only from reflog, and delete it
+ git -C repo branch my_branch "$A" &&
+ git -C repo branch -f my_branch my_commit &&
+ delete_object repo "$A" &&
+
+ # State that we got $T, which refers to $A, from promisor
+ printf "$T\n" | pack_as_from_promisor &&
+
+ git -C repo config core.repositoryformatversion 1 &&
+ git -C repo config extensions.partialclone "arbitrary string" &&
+ git -C repo fsck
+'
+
+test_expect_success 'missing reflog object alone fails fsck, even with extension set' '
+ rm -rf repo &&
+ test_create_repo repo &&
+ test_commit -C repo my_commit &&
+
+ A=$(git -C repo commit-tree -m a HEAD^{tree}) &&
+ B=$(git -C repo commit-tree -m b HEAD^{tree}) &&
+
+ # Reference $A only from reflog, and delete it
+ git -C repo branch my_branch "$A" &&
+ git -C repo branch -f my_branch my_commit &&
+ delete_object repo "$A" &&
+
+ git -C repo config core.repositoryformatversion 1 &&
+ git -C repo config extensions.partialclone "arbitrary string" &&
+ test_must_fail git -C repo fsck
+'
+
+test_expect_success 'missing ref object, but promised, passes fsck' '
+ rm -rf repo &&
+ test_create_repo repo &&
+ test_commit -C repo my_commit &&
+
+ A=$(git -C repo commit-tree -m a HEAD^{tree}) &&
+
+ # Reference $A only from ref
+ git -C repo branch my_branch "$A" &&
+ promise_and_delete "$A" &&
+
+ git -C repo config core.repositoryformatversion 1 &&
+ git -C repo config extensions.partialclone "arbitrary string" &&
+ git -C repo fsck
+'
+
+test_expect_success 'missing object, but promised, passes fsck' '
+ rm -rf repo &&
+ test_create_repo repo &&
+ test_commit -C repo 1 &&
+ test_commit -C repo 2 &&
+ test_commit -C repo 3 &&
+ git -C repo tag -a annotated_tag -m "annotated tag" &&
+
+ C=$(git -C repo rev-parse 1) &&
+ T=$(git -C repo rev-parse 2^{tree}) &&
+ B=$(git hash-object repo/3.t) &&
+ AT=$(git -C repo rev-parse annotated_tag) &&
+
+ promise_and_delete "$C" &&
+ promise_and_delete "$T" &&
+ promise_and_delete "$B" &&
+ promise_and_delete "$AT" &&
+
+ git -C repo config core.repositoryformatversion 1 &&
+ git -C repo config extensions.partialclone "arbitrary string" &&
+ git -C repo fsck
+'
+
+test_expect_success 'missing CLI object, but promised, passes fsck' '
+ rm -rf repo &&
+ test_create_repo repo &&
+ test_commit -C repo my_commit &&
+
+ A=$(git -C repo commit-tree -m a HEAD^{tree}) &&
+ promise_and_delete "$A" &&
+
+ git -C repo config core.repositoryformatversion 1 &&
+ git -C repo config extensions.partialclone "arbitrary string" &&
+ git -C repo fsck "$A"
+'
+
+test_expect_success 'fetching of missing objects' '
+ rm -rf repo &&
+ test_create_repo server &&
+ test_commit -C server foo &&
+ git -C server repack -a -d --write-bitmap-index &&
+
+ git clone "file://$(pwd)/server" repo &&
+ HASH=$(git -C repo rev-parse foo) &&
+ rm -rf repo/.git/objects/* &&
+
+ git -C repo config core.repositoryformatversion 1 &&
+ git -C repo config extensions.partialclone "origin" &&
+ git -C repo cat-file -p "$HASH" &&
+
+ # Ensure that the .promisor file is written, and check that its
+ # associated packfile contains the object
+ ls repo/.git/objects/pack/pack-*.promisor >promisorlist &&
+ test_line_count = 1 promisorlist &&
+ IDX=$(cat promisorlist | sed "s/promisor$/idx/") &&
+ git verify-pack --verbose "$IDX" | grep "$HASH"
+'
+
+test_expect_success 'rev-list stops traversal at missing and promised commit' '
+ rm -rf repo &&
+ test_create_repo repo &&
+ test_commit -C repo foo &&
+ test_commit -C repo bar &&
+
+ FOO=$(git -C repo rev-parse foo) &&
+ promise_and_delete "$FOO" &&
+
+ git -C repo config core.repositoryformatversion 1 &&
+ git -C repo config extensions.partialclone "arbitrary string" &&
+ git -C repo rev-list --exclude-promisor-objects --objects bar >out &&
+ grep $(git -C repo rev-parse bar) out &&
+ ! grep $FOO out
+'
+
+test_expect_success 'rev-list stops traversal at missing and promised tree' '
+ rm -rf repo &&
+ test_create_repo repo &&
+ test_commit -C repo foo &&
+ mkdir repo/a_dir &&
+ echo something >repo/a_dir/something &&
+ git -C repo add a_dir/something &&
+ git -C repo commit -m bar &&
+
+ # foo^{tree} (tree referenced from commit)
+ TREE=$(git -C repo rev-parse foo^{tree}) &&
+
+ # a tree referenced by HEAD^{tree} (tree referenced from tree)
+ TREE2=$(git -C repo ls-tree HEAD^{tree} | grep " tree " | head -1 | cut -b13-52) &&
+
+ promise_and_delete "$TREE" &&
+ promise_and_delete "$TREE2" &&
+
+ git -C repo config core.repositoryformatversion 1 &&
+ git -C repo config extensions.partialclone "arbitrary string" &&
+ git -C repo rev-list --exclude-promisor-objects --objects HEAD >out &&
+ grep $(git -C repo rev-parse foo) out &&
+ ! grep $TREE out &&
+ grep $(git -C repo rev-parse HEAD) out &&
+ ! grep $TREE2 out
+'
+
+test_expect_success 'rev-list stops traversal at missing and promised blob' '
+ rm -rf repo &&
+ test_create_repo repo &&
+ echo something >repo/something &&
+ git -C repo add something &&
+ git -C repo commit -m foo &&
+
+ BLOB=$(git -C repo hash-object -w something) &&
+ promise_and_delete "$BLOB" &&
+
+ git -C repo config core.repositoryformatversion 1 &&
+ git -C repo config extensions.partialclone "arbitrary string" &&
+ git -C repo rev-list --exclude-promisor-objects --objects HEAD >out &&
+ grep $(git -C repo rev-parse HEAD) out &&
+ ! grep $BLOB out
+'
+
+test_expect_success 'rev-list stops traversal at promisor commit, tree, and blob' '
+ rm -rf repo &&
+ test_create_repo repo &&
+ test_commit -C repo foo &&
+ test_commit -C repo bar &&
+ test_commit -C repo baz &&
+
+ COMMIT=$(git -C repo rev-parse foo) &&
+ TREE=$(git -C repo rev-parse bar^{tree}) &&
+ BLOB=$(git hash-object repo/baz.t) &&
+ printf "%s\n%s\n%s\n" $COMMIT $TREE $BLOB | pack_as_from_promisor &&
+
+ git -C repo config core.repositoryformatversion 1 &&
+ git -C repo config extensions.partialclone "arbitrary string" &&
+ git -C repo rev-list --exclude-promisor-objects --objects HEAD >out &&
+ ! grep $COMMIT out &&
+ ! grep $TREE out &&
+ ! grep $BLOB out &&
+ grep $(git -C repo rev-parse bar) out # sanity check that some walking was done
+'
+
+test_expect_success 'rev-list accepts missing and promised objects on command line' '
+ rm -rf repo &&
+ test_create_repo repo &&
+ test_commit -C repo foo &&
+ test_commit -C repo bar &&
+ test_commit -C repo baz &&
+
+ COMMIT=$(git -C repo rev-parse foo) &&
+ TREE=$(git -C repo rev-parse bar^{tree}) &&
+ BLOB=$(git hash-object repo/baz.t) &&
+
+ promise_and_delete $COMMIT &&
+ promise_and_delete $TREE &&
+ promise_and_delete $BLOB &&
+
+ git -C repo config core.repositoryformatversion 1 &&
+ git -C repo config extensions.partialclone "arbitrary string" &&
+ git -C repo rev-list --exclude-promisor-objects --objects "$COMMIT" "$TREE" "$BLOB"
+'
+
+test_expect_success 'gc does not repack promisor objects' '
+ rm -rf repo &&
+ test_create_repo repo &&
+ test_commit -C repo my_commit &&
+
+ TREE_HASH=$(git -C repo rev-parse HEAD^{tree}) &&
+ HASH=$(printf "$TREE_HASH\n" | pack_as_from_promisor) &&
+
+ git -C repo config core.repositoryformatversion 1 &&
+ git -C repo config extensions.partialclone "arbitrary string" &&
+ git -C repo gc &&
+
+ # Ensure that the promisor packfile still exists, and remove it
+ test -e repo/.git/objects/pack/pack-$HASH.pack &&
+ rm repo/.git/objects/pack/pack-$HASH.* &&
+
+ # Ensure that the single other pack contains the commit, but not the tree
+ ls repo/.git/objects/pack/pack-*.pack >packlist &&
+ test_line_count = 1 packlist &&
+ git verify-pack repo/.git/objects/pack/pack-*.pack -v >out &&
+ grep "$(git -C repo rev-parse HEAD)" out &&
+ ! grep "$TREE_HASH" out
+'
+
+test_expect_success 'gc stops traversal when a missing but promised object is reached' '
+ rm -rf repo &&
+ test_create_repo repo &&
+ test_commit -C repo my_commit &&
+
+ TREE_HASH=$(git -C repo rev-parse HEAD^{tree}) &&
+ HASH=$(promise_and_delete $TREE_HASH) &&
+
+ git -C repo config core.repositoryformatversion 1 &&
+ git -C repo config extensions.partialclone "arbitrary string" &&
+ git -C repo gc &&
+
+ # Ensure that the promisor packfile still exists, and remove it
+ test -e repo/.git/objects/pack/pack-$HASH.pack &&
+ rm repo/.git/objects/pack/pack-$HASH.* &&
+
+ # Ensure that the single other pack contains the commit, but not the tree
+ ls repo/.git/objects/pack/pack-*.pack >packlist &&
+ test_line_count = 1 packlist &&
+ git verify-pack repo/.git/objects/pack/pack-*.pack -v >out &&
+ grep "$(git -C repo rev-parse HEAD)" out &&
+ ! grep "$TREE_HASH" out
+'
+
+LIB_HTTPD_PORT=12345 # default port, 410, cannot be used as non-root
+. "$TEST_DIRECTORY"/lib-httpd.sh
+start_httpd
+
+test_expect_success 'fetching of missing objects from an HTTP server' '
+ rm -rf repo &&
+ SERVER="$HTTPD_DOCUMENT_ROOT_PATH/server" &&
+ test_create_repo "$SERVER" &&
+ test_commit -C "$SERVER" foo &&
+ git -C "$SERVER" repack -a -d --write-bitmap-index &&
+
+ git clone $HTTPD_URL/smart/server repo &&
+ HASH=$(git -C repo rev-parse foo) &&
+ rm -rf repo/.git/objects/* &&
+
+ git -C repo config core.repositoryformatversion 1 &&
+ git -C repo config extensions.partialclone "origin" &&
+ git -C repo cat-file -p "$HASH" &&
+
+ # Ensure that the .promisor file is written, and check that its
+ # associated packfile contains the object
+ ls repo/.git/objects/pack/pack-*.promisor >promisorlist &&
+ test_line_count = 1 promisorlist &&
+ IDX=$(cat promisorlist | sed "s/promisor$/idx/") &&
+ git verify-pack --verbose "$IDX" | grep "$HASH"
+'
+
+stop_httpd
+
+test_done
GIT_CONFIG_PARAMETERS="" git -c x.one=1 config --list
'
+sq="'"
+test_expect_success 'detect bogus GIT_CONFIG_PARAMETERS' '
+ cat >expect <<-\EOF &&
+ env.one one
+ env.two two
+ EOF
+ GIT_CONFIG_PARAMETERS="${sq}env.one=one${sq} ${sq}env.two=two${sq}" \
+ git config --get-regexp "env.*" >actual &&
+ test_cmp expect actual &&
+
+ cat >expect <<-EOF &&
+ env.one one${sq}
+ env.two two
+ EOF
+ GIT_CONFIG_PARAMETERS="${sq}env.one=one${sq}\\$sq$sq$sq ${sq}env.two=two${sq}" \
+ git config --get-regexp "env.*" >actual &&
+ test_cmp expect actual &&
+
+ test_must_fail env \
+ GIT_CONFIG_PARAMETERS="${sq}env.one=one${sq}\\$sq ${sq}env.two=two${sq}" \
+ git config --get-regexp "env.*"
+'
+
test_expect_success 'git config --edit works' '
git config -f tmp test.value no &&
echo test.value=yes >expect &&
test_expect_success 'relative path outside worktree' '
test_must_fail git rev-parse HEAD:../file.txt >output 2>error &&
test -z "$(cat output)" &&
- grep "outside repository" error
+ test_i18ngrep "outside repository" error
'
test_expect_success 'relative path when cwd is outside worktree' '
0642 -rw-r---w-
EOF
+test_expect_success POSIXPERM,SANITY 'graceful handling when splitting index is not allowed' '
+ test_create_repo ro &&
+ (
+ cd ro &&
+ test_commit initial &&
+ git update-index --split-index &&
+ test -f .git/sharedindex.*
+ ) &&
+ cp ro/.git/index new-index &&
+ test_when_finished "chmod u+w ro/.git" &&
+ chmod u-w ro/.git &&
+ GIT_INDEX_FILE="$(pwd)/new-index" git -C ro update-index --split-index &&
+ chmod u+w ro/.git &&
+ rm ro/.git/sharedindex.* &&
+ GIT_INDEX_FILE=new-index git ls-files >actual &&
+ echo initial.t >expected &&
+ test_cmp expected actual
+'
+
+test_expect_success 'writing split index with null sha1 does not write cache tree' '
+ git config core.splitIndex true &&
+ git config splitIndex.maxPercentChange 0 &&
+ git commit -m "commit" &&
+ {
+ git ls-tree HEAD &&
+ printf "160000 commit $_z40\\tbroken\\n"
+ } >broken-tree &&
+ echo "add broken entry" >msg &&
+
+ tree=$(git mktree <broken-tree) &&
+ test_tick &&
+ commit=$(git commit-tree $tree -p HEAD <msg) &&
+ git update-ref HEAD "$commit" &&
+ GIT_ALLOW_NULL_SHA1=1 git reset --hard &&
+ (test-dump-cache-tree >cache-tree.out || true) &&
+ test_line_count = 0 cache-tree.out
+'
+
test_done
'
post_checkout_hook () {
- test_when_finished "rm -f .git/hooks/post-checkout" &&
- mkdir -p .git/hooks &&
- write_script .git/hooks/post-checkout <<-\EOF
- echo $* >hook.actual
+ gitdir=${1:-.git}
+ test_when_finished "rm -f $gitdir/hooks/post-checkout" &&
+ mkdir -p $gitdir/hooks &&
+ write_script $gitdir/hooks/post-checkout <<-\EOF
+ {
+ echo $*
+ git rev-parse --git-dir --show-toplevel
+ } >hook.actual
EOF
}
test_expect_success '"add" invokes post-checkout hook (branch)' '
post_checkout_hook &&
- printf "%s %s 1\n" $_z40 $(git rev-parse HEAD) >hook.expect &&
+ {
+ echo $_z40 $(git rev-parse HEAD) 1 &&
+ echo $(pwd)/.git/worktrees/gumby &&
+ echo $(pwd)/gumby
+ } >hook.expect &&
git worktree add gumby &&
- test_cmp hook.expect hook.actual
+ test_cmp hook.expect gumby/hook.actual
'
test_expect_success '"add" invokes post-checkout hook (detached)' '
post_checkout_hook &&
- printf "%s %s 1\n" $_z40 $(git rev-parse HEAD) >hook.expect &&
+ {
+ echo $_z40 $(git rev-parse HEAD) 1 &&
+ echo $(pwd)/.git/worktrees/grumpy &&
+ echo $(pwd)/grumpy
+ } >hook.expect &&
git worktree add --detach grumpy &&
- test_cmp hook.expect hook.actual
+ test_cmp hook.expect grumpy/hook.actual
'
test_expect_success '"add --no-checkout" suppresses post-checkout hook' '
post_checkout_hook &&
rm -f hook.actual &&
git worktree add --no-checkout gloopy &&
- test_path_is_missing hook.actual
+ test_path_is_missing gloopy/hook.actual
+'
+
+test_expect_success '"add" in other worktree invokes post-checkout hook' '
+ post_checkout_hook &&
+ {
+ echo $_z40 $(git rev-parse HEAD) 1 &&
+ echo $(pwd)/.git/worktrees/guppy &&
+ echo $(pwd)/guppy
+ } >hook.expect &&
+ git -C gloopy worktree add --detach ../guppy &&
+ test_cmp hook.expect guppy/hook.actual
+'
+
+test_expect_success '"add" in bare repo invokes post-checkout hook' '
+ rm -rf bare &&
+ git clone --bare . bare &&
+ {
+ echo $_z40 $(git --git-dir=bare rev-parse HEAD) 1 &&
+ echo $(pwd)/bare/worktrees/goozy &&
+ echo $(pwd)/goozy
+ } >hook.expect &&
+ post_checkout_hook bare &&
+ git -C bare worktree add --detach ../goozy &&
+ test_cmp hook.expect goozy/hook.actual
'
test_done
. ./test-lib.sh
-match() {
- if [ $1 = 1 ]; then
- test_expect_success "wildmatch: match '$3' '$4'" "
- test-wildmatch wildmatch '$3' '$4'
- "
- else
- test_expect_success "wildmatch: no match '$3' '$4'" "
- ! test-wildmatch wildmatch '$3' '$4'
- "
- fi
+should_create_test_file() {
+ file=$1
+
+ case $file in
+ # `touch .` will succeed but obviously not do what we intend
+ # here.
+ ".")
+ return 1
+ ;;
+ # We cannot create a file with an empty filename.
+ "")
+ return 1
+ ;;
+ # The tests that are testing that e.g. foo//bar is matched by
+ # foo/*/bar can't be tested on filesystems since there's no
+ # way we're getting a double slash.
+ *//*)
+ return 1
+ ;;
+ # When testing the difference between foo/bar and foo/bar/ we
+ # can't test the latter.
+ */)
+ return 1
+ ;;
+ # On Windows, \ in paths is silently converted to /, which
+ # would result in the "touch" below working, but the test
+ # itself failing. See 6fd1106aa4 ("t3700: Skip a test with
+ # backslashes in pathspec", 2009-03-13) for prior art and
+ # details.
+ *\\*)
+ if ! test_have_prereq BSLASHPSPEC
+ then
+ return 1
+ fi
+ # NOTE: The ;;& bash extension is not portable, so
+ # this test needs to be at the end of the pattern
+ # list.
+ #
+ # If we want to add more conditional returns we either
+ # need a new case statement, or turn this whole thing
+ # into a series of "if" tests.
+ ;;
+ esac
+
+
+ # On Windows proper (i.e. not Cygwin) many file names which
+ # under Cygwin would be emulated don't work.
+ if test_have_prereq MINGW
+ then
+ case $file in
+ " ")
+ # Files called " " are forbidden on Windows
+ return 1
+ ;;
+ *\<*|*\>*|*:*|*\"*|*\|*|*\?*|*\**)
+ # Files with various special characters aren't
+ # allowed on Windows. Sourced from
+ # https://stackoverflow.com/a/31976060
+ return 1
+ ;;
+ esac
+ fi
+
+ return 0
}
-imatch() {
- if [ $1 = 1 ]; then
- test_expect_success "iwildmatch: match '$2' '$3'" "
- test-wildmatch iwildmatch '$2' '$3'
- "
- else
- test_expect_success "iwildmatch: no match '$2' '$3'" "
- ! test-wildmatch iwildmatch '$2' '$3'
- "
- fi
+match_with_function() {
+ text=$1
+ pattern=$2
+ match_expect=$3
+ match_function=$4
+
+ if test "$match_expect" = 1
+ then
+ test_expect_success "$match_function: match '$text' '$pattern'" "
+ test-wildmatch $match_function '$text' '$pattern'
+ "
+ elif test "$match_expect" = 0
+ then
+ test_expect_success "$match_function: no match '$text' '$pattern'" "
+ test_must_fail test-wildmatch $match_function '$text' '$pattern'
+ "
+ else
+ test_expect_success "PANIC: Test framework error. Unknown matches value $match_expect" 'false'
+ fi
+
+}
+
+match_with_ls_files() {
+ text=$1
+ pattern=$2
+ match_expect=$3
+ match_function=$4
+ ls_files_args=$5
+
+ match_stdout_stderr_cmp="
+ tr -d '\0' <actual.raw >actual &&
+ >expect.err &&
+ test_cmp expect.err actual.err &&
+ test_cmp expect actual"
+
+ if test "$match_expect" = 'E'
+ then
+ if test -e .git/created_test_file
+ then
+ test_expect_success EXPENSIVE_ON_WINDOWS "$match_function (via ls-files): match dies on '$pattern' '$text'" "
+ printf '%s' '$text' >expect &&
+ test_must_fail git$ls_files_args ls-files -z -- '$pattern'
+ "
+ else
+ test_expect_failure EXPENSIVE_ON_WINDOWS "$match_function (via ls-files): match skip '$pattern' '$text'" 'false'
+ fi
+ elif test "$match_expect" = 1
+ then
+ if test -e .git/created_test_file
+ then
+ test_expect_success EXPENSIVE_ON_WINDOWS "$match_function (via ls-files): match '$pattern' '$text'" "
+ printf '%s' '$text' >expect &&
+ git$ls_files_args ls-files -z -- '$pattern' >actual.raw 2>actual.err &&
+ $match_stdout_stderr_cmp
+ "
+ else
+ test_expect_failure EXPENSIVE_ON_WINDOWS "$match_function (via ls-files): match skip '$pattern' '$text'" 'false'
+ fi
+ elif test "$match_expect" = 0
+ then
+ if test -e .git/created_test_file
+ then
+ test_expect_success EXPENSIVE_ON_WINDOWS "$match_function (via ls-files): no match '$pattern' '$text'" "
+ >expect &&
+ git$ls_files_args ls-files -z -- '$pattern' >actual.raw 2>actual.err &&
+ $match_stdout_stderr_cmp
+ "
+ else
+ test_expect_failure EXPENSIVE_ON_WINDOWS "$match_function (via ls-files): no match skip '$pattern' '$text'" 'false'
+ fi
+ else
+ test_expect_success "PANIC: Test framework error. Unknown matches value $match_expect" 'false'
+ fi
}
-pathmatch() {
- if [ $1 = 1 ]; then
- test_expect_success "pathmatch: match '$2' '$3'" "
- test-wildmatch pathmatch '$2' '$3'
- "
- else
- test_expect_success "pathmatch: no match '$2' '$3'" "
- ! test-wildmatch pathmatch '$2' '$3'
- "
- fi
+match() {
+ if test "$#" = 6
+ then
+ # When test-wildmatch and git ls-files produce the same
+ # result.
+ match_glob=$1
+ match_file_glob=$match_glob
+ match_iglob=$2
+ match_file_iglob=$match_iglob
+ match_pathmatch=$3
+ match_file_pathmatch=$match_pathmatch
+ match_pathmatchi=$4
+ match_file_pathmatchi=$match_pathmatchi
+ text=$5
+ pattern=$6
+ elif test "$#" = 10
+ then
+ match_glob=$1
+ match_iglob=$2
+ match_pathmatch=$3
+ match_pathmatchi=$4
+ match_file_glob=$5
+ match_file_iglob=$6
+ match_file_pathmatch=$7
+ match_file_pathmatchi=$8
+ text=$9
+ pattern=${10}
+ fi
+
+ test_expect_success EXPENSIVE_ON_WINDOWS 'cleanup after previous file test' '
+ if test -e .git/created_test_file
+ then
+ git reset &&
+ git clean -df
+ fi
+ '
+
+ printf '%s' "$text" >.git/expected_test_file
+
+ test_expect_success EXPENSIVE_ON_WINDOWS "setup match file test for $text" '
+ file=$(cat .git/expected_test_file) &&
+ if should_create_test_file "$file"
+ then
+ dirs=${file%/*}
+ if test "$file" != "$dirs"
+ then
+ mkdir -p -- "$dirs" &&
+ touch -- "./$text"
+ else
+ touch -- "./$file"
+ fi &&
+ git add -A &&
+ printf "%s" "$file" >.git/created_test_file
+ elif test -e .git/created_test_file
+ then
+ rm .git/created_test_file
+ fi
+ '
+
+ # $1: Case sensitive glob match: test-wildmatch & ls-files
+ match_with_function "$text" "$pattern" $match_glob "wildmatch"
+ match_with_ls_files "$text" "$pattern" $match_file_glob "wildmatch" " --glob-pathspecs"
+
+ # $2: Case insensitive glob match: test-wildmatch & ls-files
+ match_with_function "$text" "$pattern" $match_iglob "iwildmatch"
+ match_with_ls_files "$text" "$pattern" $match_file_iglob "iwildmatch" " --glob-pathspecs --icase-pathspecs"
+
+ # $3: Case sensitive path match: test-wildmatch & ls-files
+ match_with_function "$text" "$pattern" $match_pathmatch "pathmatch"
+ match_with_ls_files "$text" "$pattern" $match_file_pathmatch "pathmatch" ""
+
+ # $4: Case insensitive path match: test-wildmatch & ls-files
+ match_with_function "$text" "$pattern" $match_pathmatchi "ipathmatch"
+ match_with_ls_files "$text" "$pattern" $match_file_pathmatchi "ipathmatch" " --icase-pathspecs"
}
-# Basic wildmat features
-match 1 1 foo foo
-match 0 0 foo bar
-match 1 1 '' ""
-match 1 1 foo '???'
-match 0 0 foo '??'
-match 1 1 foo '*'
-match 1 1 foo 'f*'
-match 0 0 foo '*f'
-match 1 1 foo '*foo*'
-match 1 1 foobar '*ob*a*r*'
-match 1 1 aaaaaaabababab '*ab'
-match 1 1 'foo*' 'foo\*'
-match 0 0 foobar 'foo\*bar'
-match 1 1 'f\oo' 'f\\oo'
-match 1 1 ball '*[al]?'
-match 0 0 ten '[ten]'
-match 0 1 ten '**[!te]'
-match 0 0 ten '**[!ten]'
-match 1 1 ten 't[a-g]n'
-match 0 0 ten 't[!a-g]n'
-match 1 1 ton 't[!a-g]n'
-match 1 1 ton 't[^a-g]n'
-match 1 x 'a]b' 'a[]]b'
-match 1 x a-b 'a[]-]b'
-match 1 x 'a]b' 'a[]-]b'
-match 0 x aab 'a[]-]b'
-match 1 x aab 'a[]a-]b'
-match 1 1 ']' ']'
+# Basic wildmatch features
+match 1 1 1 1 foo foo
+match 0 0 0 0 foo bar
+match 1 1 1 1 '' ""
+match 1 1 1 1 foo '???'
+match 0 0 0 0 foo '??'
+match 1 1 1 1 foo '*'
+match 1 1 1 1 foo 'f*'
+match 0 0 0 0 foo '*f'
+match 1 1 1 1 foo '*foo*'
+match 1 1 1 1 foobar '*ob*a*r*'
+match 1 1 1 1 aaaaaaabababab '*ab'
+match 1 1 1 1 'foo*' 'foo\*'
+match 0 0 0 0 foobar 'foo\*bar'
+match 1 1 1 1 'f\oo' 'f\\oo'
+match 1 1 1 1 ball '*[al]?'
+match 0 0 0 0 ten '[ten]'
+match 0 0 1 1 ten '**[!te]'
+match 0 0 0 0 ten '**[!ten]'
+match 1 1 1 1 ten 't[a-g]n'
+match 0 0 0 0 ten 't[!a-g]n'
+match 1 1 1 1 ton 't[!a-g]n'
+match 1 1 1 1 ton 't[^a-g]n'
+match 1 1 1 1 'a]b' 'a[]]b'
+match 1 1 1 1 a-b 'a[]-]b'
+match 1 1 1 1 'a]b' 'a[]-]b'
+match 0 0 0 0 aab 'a[]-]b'
+match 1 1 1 1 aab 'a[]a-]b'
+match 1 1 1 1 ']' ']'
# Extended slash-matching features
-match 0 0 'foo/baz/bar' 'foo*bar'
-match 0 0 'foo/baz/bar' 'foo**bar'
-match 0 1 'foobazbar' 'foo**bar'
-match 1 1 'foo/baz/bar' 'foo/**/bar'
-match 1 0 'foo/baz/bar' 'foo/**/**/bar'
-match 1 0 'foo/b/a/z/bar' 'foo/**/bar'
-match 1 0 'foo/b/a/z/bar' 'foo/**/**/bar'
-match 1 0 'foo/bar' 'foo/**/bar'
-match 1 0 'foo/bar' 'foo/**/**/bar'
-match 0 0 'foo/bar' 'foo?bar'
-match 0 0 'foo/bar' 'foo[/]bar'
-match 0 0 'foo/bar' 'foo[^a-z]bar'
-match 0 0 'foo/bar' 'f[^eiu][^eiu][^eiu][^eiu][^eiu]r'
-match 1 1 'foo-bar' 'f[^eiu][^eiu][^eiu][^eiu][^eiu]r'
-match 1 0 'foo' '**/foo'
-match 1 x 'XXX/foo' '**/foo'
-match 1 0 'bar/baz/foo' '**/foo'
-match 0 0 'bar/baz/foo' '*/foo'
-match 0 0 'foo/bar/baz' '**/bar*'
-match 1 0 'deep/foo/bar/baz' '**/bar/*'
-match 0 0 'deep/foo/bar/baz/' '**/bar/*'
-match 1 0 'deep/foo/bar/baz/' '**/bar/**'
-match 0 0 'deep/foo/bar' '**/bar/*'
-match 1 0 'deep/foo/bar/' '**/bar/**'
-match 0 0 'foo/bar/baz' '**/bar**'
-match 1 0 'foo/bar/baz/x' '*/bar/**'
-match 0 0 'deep/foo/bar/baz/x' '*/bar/**'
-match 1 0 'deep/foo/bar/baz/x' '**/bar/*/*'
+match 0 0 1 1 'foo/baz/bar' 'foo*bar'
+match 0 0 1 1 'foo/baz/bar' 'foo**bar'
+match 0 0 1 1 'foobazbar' 'foo**bar'
+match 1 1 1 1 'foo/baz/bar' 'foo/**/bar'
+match 1 1 0 0 'foo/baz/bar' 'foo/**/**/bar'
+match 1 1 1 1 'foo/b/a/z/bar' 'foo/**/bar'
+match 1 1 1 1 'foo/b/a/z/bar' 'foo/**/**/bar'
+match 1 1 0 0 'foo/bar' 'foo/**/bar'
+match 1 1 0 0 'foo/bar' 'foo/**/**/bar'
+match 0 0 1 1 'foo/bar' 'foo?bar'
+match 0 0 1 1 'foo/bar' 'foo[/]bar'
+match 0 0 1 1 'foo/bar' 'foo[^a-z]bar'
+match 0 0 1 1 'foo/bar' 'f[^eiu][^eiu][^eiu][^eiu][^eiu]r'
+match 1 1 1 1 'foo-bar' 'f[^eiu][^eiu][^eiu][^eiu][^eiu]r'
+match 1 1 0 0 'foo' '**/foo'
+match 1 1 1 1 'XXX/foo' '**/foo'
+match 1 1 1 1 'bar/baz/foo' '**/foo'
+match 0 0 1 1 'bar/baz/foo' '*/foo'
+match 0 0 1 1 'foo/bar/baz' '**/bar*'
+match 1 1 1 1 'deep/foo/bar/baz' '**/bar/*'
+match 0 0 1 1 'deep/foo/bar/baz/' '**/bar/*'
+match 1 1 1 1 'deep/foo/bar/baz/' '**/bar/**'
+match 0 0 0 0 'deep/foo/bar' '**/bar/*'
+match 1 1 1 1 'deep/foo/bar/' '**/bar/**'
+match 0 0 1 1 'foo/bar/baz' '**/bar**'
+match 1 1 1 1 'foo/bar/baz/x' '*/bar/**'
+match 0 0 1 1 'deep/foo/bar/baz/x' '*/bar/**'
+match 1 1 1 1 'deep/foo/bar/baz/x' '**/bar/*/*'
# Various additional tests
-match 0 0 'acrt' 'a[c-c]st'
-match 1 1 'acrt' 'a[c-c]rt'
-match 0 0 ']' '[!]-]'
-match 1 x 'a' '[!]-]'
-match 0 0 '' '\'
-match 0 x '\' '\'
-match 0 x 'XXX/\' '*/\'
-match 1 x 'XXX/\' '*/\\'
-match 1 1 'foo' 'foo'
-match 1 1 '@foo' '@foo'
-match 0 0 'foo' '@foo'
-match 1 1 '[ab]' '\[ab]'
-match 1 1 '[ab]' '[[]ab]'
-match 1 x '[ab]' '[[:]ab]'
-match 0 x '[ab]' '[[::]ab]'
-match 1 x '[ab]' '[[:digit]ab]'
-match 1 x '[ab]' '[\[:]ab]'
-match 1 1 '?a?b' '\??\?b'
-match 1 1 'abc' '\a\b\c'
-match 0 0 'foo' ''
-match 1 0 'foo/bar/baz/to' '**/t[o]'
+match 0 0 0 0 'acrt' 'a[c-c]st'
+match 1 1 1 1 'acrt' 'a[c-c]rt'
+match 0 0 0 0 ']' '[!]-]'
+match 1 1 1 1 'a' '[!]-]'
+match 0 0 0 0 '' '\'
+match 0 0 0 0 \
+ 1 1 1 1 '\' '\'
+match 0 0 0 0 'XXX/\' '*/\'
+match 1 1 1 1 'XXX/\' '*/\\'
+match 1 1 1 1 'foo' 'foo'
+match 1 1 1 1 '@foo' '@foo'
+match 0 0 0 0 'foo' '@foo'
+match 1 1 1 1 '[ab]' '\[ab]'
+match 1 1 1 1 '[ab]' '[[]ab]'
+match 1 1 1 1 '[ab]' '[[:]ab]'
+match 0 0 0 0 '[ab]' '[[::]ab]'
+match 1 1 1 1 '[ab]' '[[:digit]ab]'
+match 1 1 1 1 '[ab]' '[\[:]ab]'
+match 1 1 1 1 '?a?b' '\??\?b'
+match 1 1 1 1 'abc' '\a\b\c'
+match 0 0 0 0 \
+ E E E E 'foo' ''
+match 1 1 1 1 'foo/bar/baz/to' '**/t[o]'
# Character class tests
-match 1 x 'a1B' '[[:alpha:]][[:digit:]][[:upper:]]'
-match 0 x 'a' '[[:digit:][:upper:][:space:]]'
-match 1 x 'A' '[[:digit:][:upper:][:space:]]'
-match 1 x '1' '[[:digit:][:upper:][:space:]]'
-match 0 x '1' '[[:digit:][:upper:][:spaci:]]'
-match 1 x ' ' '[[:digit:][:upper:][:space:]]'
-match 0 x '.' '[[:digit:][:upper:][:space:]]'
-match 1 x '.' '[[:digit:][:punct:][:space:]]'
-match 1 x '5' '[[:xdigit:]]'
-match 1 x 'f' '[[:xdigit:]]'
-match 1 x 'D' '[[:xdigit:]]'
-match 1 x '_' '[[:alnum:][:alpha:][:blank:][:cntrl:][:digit:][:graph:][:lower:][:print:][:punct:][:space:][:upper:][:xdigit:]]'
-match 1 x '.' '[^[:alnum:][:alpha:][:blank:][:cntrl:][:digit:][:lower:][:space:][:upper:][:xdigit:]]'
-match 1 x '5' '[a-c[:digit:]x-z]'
-match 1 x 'b' '[a-c[:digit:]x-z]'
-match 1 x 'y' '[a-c[:digit:]x-z]'
-match 0 x 'q' '[a-c[:digit:]x-z]'
-
-# Additional tests, including some malformed wildmats
-match 1 x ']' '[\\-^]'
-match 0 0 '[' '[\\-^]'
-match 1 x '-' '[\-_]'
-match 1 x ']' '[\]]'
-match 0 0 '\]' '[\]]'
-match 0 0 '\' '[\]]'
-match 0 0 'ab' 'a[]b'
-match 0 x 'a[]b' 'a[]b'
-match 0 x 'ab[' 'ab['
-match 0 0 'ab' '[!'
-match 0 0 'ab' '[-'
-match 1 1 '-' '[-]'
-match 0 0 '-' '[a-'
-match 0 0 '-' '[!a-'
-match 1 x '-' '[--A]'
-match 1 x '5' '[--A]'
-match 1 1 ' ' '[ --]'
-match 1 1 '$' '[ --]'
-match 1 1 '-' '[ --]'
-match 0 0 '0' '[ --]'
-match 1 x '-' '[---]'
-match 1 x '-' '[------]'
-match 0 0 'j' '[a-e-n]'
-match 1 x '-' '[a-e-n]'
-match 1 x 'a' '[!------]'
-match 0 0 '[' '[]-a]'
-match 1 x '^' '[]-a]'
-match 0 0 '^' '[!]-a]'
-match 1 x '[' '[!]-a]'
-match 1 1 '^' '[a^bc]'
-match 1 x '-b]' '[a-]b]'
-match 0 0 '\' '[\]'
-match 1 1 '\' '[\\]'
-match 0 0 '\' '[!\\]'
-match 1 1 'G' '[A-\\]'
-match 0 0 'aaabbb' 'b*a'
-match 0 0 'aabcaa' '*ba*'
-match 1 1 ',' '[,]'
-match 1 1 ',' '[\\,]'
-match 1 1 '\' '[\\,]'
-match 1 1 '-' '[,-.]'
-match 0 0 '+' '[,-.]'
-match 0 0 '-.]' '[,-.]'
-match 1 1 '2' '[\1-\3]'
-match 1 1 '3' '[\1-\3]'
-match 0 0 '4' '[\1-\3]'
-match 1 1 '\' '[[-\]]'
-match 1 1 '[' '[[-\]]'
-match 1 1 ']' '[[-\]]'
-match 0 0 '-' '[[-\]]'
+match 1 1 1 1 'a1B' '[[:alpha:]][[:digit:]][[:upper:]]'
+match 0 1 0 1 'a' '[[:digit:][:upper:][:space:]]'
+match 1 1 1 1 'A' '[[:digit:][:upper:][:space:]]'
+match 1 1 1 1 '1' '[[:digit:][:upper:][:space:]]'
+match 0 0 0 0 '1' '[[:digit:][:upper:][:spaci:]]'
+match 1 1 1 1 ' ' '[[:digit:][:upper:][:space:]]'
+match 0 0 0 0 '.' '[[:digit:][:upper:][:space:]]'
+match 1 1 1 1 '.' '[[:digit:][:punct:][:space:]]'
+match 1 1 1 1 '5' '[[:xdigit:]]'
+match 1 1 1 1 'f' '[[:xdigit:]]'
+match 1 1 1 1 'D' '[[:xdigit:]]'
+match 1 1 1 1 '_' '[[:alnum:][:alpha:][:blank:][:cntrl:][:digit:][:graph:][:lower:][:print:][:punct:][:space:][:upper:][:xdigit:]]'
+match 1 1 1 1 '.' '[^[:alnum:][:alpha:][:blank:][:cntrl:][:digit:][:lower:][:space:][:upper:][:xdigit:]]'
+match 1 1 1 1 '5' '[a-c[:digit:]x-z]'
+match 1 1 1 1 'b' '[a-c[:digit:]x-z]'
+match 1 1 1 1 'y' '[a-c[:digit:]x-z]'
+match 0 0 0 0 'q' '[a-c[:digit:]x-z]'
-# Test recursion and the abort code (use "wildtest -i" to see iteration counts)
-match 1 1 '-adobe-courier-bold-o-normal--12-120-75-75-m-70-iso8859-1' '-*-*-*-*-*-*-12-*-*-*-m-*-*-*'
-match 0 0 '-adobe-courier-bold-o-normal--12-120-75-75-X-70-iso8859-1' '-*-*-*-*-*-*-12-*-*-*-m-*-*-*'
-match 0 0 '-adobe-courier-bold-o-normal--12-120-75-75-/-70-iso8859-1' '-*-*-*-*-*-*-12-*-*-*-m-*-*-*'
-match 1 1 'XXX/adobe/courier/bold/o/normal//12/120/75/75/m/70/iso8859/1' 'XXX/*/*/*/*/*/*/12/*/*/*/m/*/*/*'
-match 0 0 'XXX/adobe/courier/bold/o/normal//12/120/75/75/X/70/iso8859/1' 'XXX/*/*/*/*/*/*/12/*/*/*/m/*/*/*'
-match 1 0 'abcd/abcdefg/abcdefghijk/abcdefghijklmnop.txt' '**/*a*b*g*n*t'
-match 0 0 'abcd/abcdefg/abcdefghijk/abcdefghijklmnop.txtz' '**/*a*b*g*n*t'
-match 0 x foo '*/*/*'
-match 0 x foo/bar '*/*/*'
-match 1 x foo/bba/arr '*/*/*'
-match 0 x foo/bb/aa/rr '*/*/*'
-match 1 x foo/bb/aa/rr '**/**/**'
-match 1 x abcXdefXghi '*X*i'
-match 0 x ab/cXd/efXg/hi '*X*i'
-match 1 x ab/cXd/efXg/hi '*/*X*/*/*i'
-match 1 x ab/cXd/efXg/hi '**/*X*/**/*i'
+# Additional tests, including some malformed wildmatch patterns
+match 1 1 1 1 ']' '[\\-^]'
+match 0 0 0 0 '[' '[\\-^]'
+match 1 1 1 1 '-' '[\-_]'
+match 1 1 1 1 ']' '[\]]'
+match 0 0 0 0 '\]' '[\]]'
+match 0 0 0 0 '\' '[\]]'
+match 0 0 0 0 'ab' 'a[]b'
+match 0 0 0 0 \
+ 1 1 1 1 'a[]b' 'a[]b'
+match 0 0 0 0 \
+ 1 1 1 1 'ab[' 'ab['
+match 0 0 0 0 'ab' '[!'
+match 0 0 0 0 'ab' '[-'
+match 1 1 1 1 '-' '[-]'
+match 0 0 0 0 '-' '[a-'
+match 0 0 0 0 '-' '[!a-'
+match 1 1 1 1 '-' '[--A]'
+match 1 1 1 1 '5' '[--A]'
+match 1 1 1 1 ' ' '[ --]'
+match 1 1 1 1 '$' '[ --]'
+match 1 1 1 1 '-' '[ --]'
+match 0 0 0 0 '0' '[ --]'
+match 1 1 1 1 '-' '[---]'
+match 1 1 1 1 '-' '[------]'
+match 0 0 0 0 'j' '[a-e-n]'
+match 1 1 1 1 '-' '[a-e-n]'
+match 1 1 1 1 'a' '[!------]'
+match 0 0 0 0 '[' '[]-a]'
+match 1 1 1 1 '^' '[]-a]'
+match 0 0 0 0 '^' '[!]-a]'
+match 1 1 1 1 '[' '[!]-a]'
+match 1 1 1 1 '^' '[a^bc]'
+match 1 1 1 1 '-b]' '[a-]b]'
+match 0 0 0 0 '\' '[\]'
+match 1 1 1 1 '\' '[\\]'
+match 0 0 0 0 '\' '[!\\]'
+match 1 1 1 1 'G' '[A-\\]'
+match 0 0 0 0 'aaabbb' 'b*a'
+match 0 0 0 0 'aabcaa' '*ba*'
+match 1 1 1 1 ',' '[,]'
+match 1 1 1 1 ',' '[\\,]'
+match 1 1 1 1 '\' '[\\,]'
+match 1 1 1 1 '-' '[,-.]'
+match 0 0 0 0 '+' '[,-.]'
+match 0 0 0 0 '-.]' '[,-.]'
+match 1 1 1 1 '2' '[\1-\3]'
+match 1 1 1 1 '3' '[\1-\3]'
+match 0 0 0 0 '4' '[\1-\3]'
+match 1 1 1 1 '\' '[[-\]]'
+match 1 1 1 1 '[' '[[-\]]'
+match 1 1 1 1 ']' '[[-\]]'
+match 0 0 0 0 '-' '[[-\]]'
-pathmatch 1 foo foo
-pathmatch 0 foo fo
-pathmatch 1 foo/bar foo/bar
-pathmatch 1 foo/bar 'foo/*'
-pathmatch 1 foo/bba/arr 'foo/*'
-pathmatch 1 foo/bba/arr 'foo/**'
-pathmatch 1 foo/bba/arr 'foo*'
-pathmatch 1 foo/bba/arr 'foo**'
-pathmatch 1 foo/bba/arr 'foo/*arr'
-pathmatch 1 foo/bba/arr 'foo/**arr'
-pathmatch 0 foo/bba/arr 'foo/*z'
-pathmatch 0 foo/bba/arr 'foo/**z'
-pathmatch 1 foo/bar 'foo?bar'
-pathmatch 1 foo/bar 'foo[/]bar'
-pathmatch 1 foo/bar 'foo[^a-z]bar'
-pathmatch 0 foo '*/*/*'
-pathmatch 0 foo/bar '*/*/*'
-pathmatch 1 foo/bba/arr '*/*/*'
-pathmatch 1 foo/bb/aa/rr '*/*/*'
-pathmatch 1 abcXdefXghi '*X*i'
-pathmatch 1 ab/cXd/efXg/hi '*/*X*/*/*i'
-pathmatch 1 ab/cXd/efXg/hi '*Xg*i'
+# Test recursion
+match 1 1 1 1 '-adobe-courier-bold-o-normal--12-120-75-75-m-70-iso8859-1' '-*-*-*-*-*-*-12-*-*-*-m-*-*-*'
+match 0 0 0 0 '-adobe-courier-bold-o-normal--12-120-75-75-X-70-iso8859-1' '-*-*-*-*-*-*-12-*-*-*-m-*-*-*'
+match 0 0 0 0 '-adobe-courier-bold-o-normal--12-120-75-75-/-70-iso8859-1' '-*-*-*-*-*-*-12-*-*-*-m-*-*-*'
+match 1 1 1 1 'XXX/adobe/courier/bold/o/normal//12/120/75/75/m/70/iso8859/1' 'XXX/*/*/*/*/*/*/12/*/*/*/m/*/*/*'
+match 0 0 0 0 'XXX/adobe/courier/bold/o/normal//12/120/75/75/X/70/iso8859/1' 'XXX/*/*/*/*/*/*/12/*/*/*/m/*/*/*'
+match 1 1 1 1 'abcd/abcdefg/abcdefghijk/abcdefghijklmnop.txt' '**/*a*b*g*n*t'
+match 0 0 0 0 'abcd/abcdefg/abcdefghijk/abcdefghijklmnop.txtz' '**/*a*b*g*n*t'
+match 0 0 0 0 foo '*/*/*'
+match 0 0 0 0 foo/bar '*/*/*'
+match 1 1 1 1 foo/bba/arr '*/*/*'
+match 0 0 1 1 foo/bb/aa/rr '*/*/*'
+match 1 1 1 1 foo/bb/aa/rr '**/**/**'
+match 1 1 1 1 abcXdefXghi '*X*i'
+match 0 0 1 1 ab/cXd/efXg/hi '*X*i'
+match 1 1 1 1 ab/cXd/efXg/hi '*/*X*/*/*i'
+match 1 1 1 1 ab/cXd/efXg/hi '**/*X*/**/*i'
-# Case-sensitivity features
-match 0 x 'a' '[A-Z]'
-match 1 x 'A' '[A-Z]'
-match 0 x 'A' '[a-z]'
-match 1 x 'a' '[a-z]'
-match 0 x 'a' '[[:upper:]]'
-match 1 x 'A' '[[:upper:]]'
-match 0 x 'A' '[[:lower:]]'
-match 1 x 'a' '[[:lower:]]'
-match 0 x 'A' '[B-Za]'
-match 1 x 'a' '[B-Za]'
-match 0 x 'A' '[B-a]'
-match 1 x 'a' '[B-a]'
-match 0 x 'z' '[Z-y]'
-match 1 x 'Z' '[Z-y]'
+# Extra pathmatch tests
+match 0 0 0 0 foo fo
+match 1 1 1 1 foo/bar foo/bar
+match 1 1 1 1 foo/bar 'foo/*'
+match 0 0 1 1 foo/bba/arr 'foo/*'
+match 1 1 1 1 foo/bba/arr 'foo/**'
+match 0 0 1 1 foo/bba/arr 'foo*'
+match 0 0 1 1 \
+ 1 1 1 1 foo/bba/arr 'foo**'
+match 0 0 1 1 foo/bba/arr 'foo/*arr'
+match 0 0 1 1 foo/bba/arr 'foo/**arr'
+match 0 0 0 0 foo/bba/arr 'foo/*z'
+match 0 0 0 0 foo/bba/arr 'foo/**z'
+match 0 0 1 1 foo/bar 'foo?bar'
+match 0 0 1 1 foo/bar 'foo[/]bar'
+match 0 0 1 1 foo/bar 'foo[^a-z]bar'
+match 0 0 1 1 ab/cXd/efXg/hi '*Xg*i'
-imatch 1 'a' '[A-Z]'
-imatch 1 'A' '[A-Z]'
-imatch 1 'A' '[a-z]'
-imatch 1 'a' '[a-z]'
-imatch 1 'a' '[[:upper:]]'
-imatch 1 'A' '[[:upper:]]'
-imatch 1 'A' '[[:lower:]]'
-imatch 1 'a' '[[:lower:]]'
-imatch 1 'A' '[B-Za]'
-imatch 1 'a' '[B-Za]'
-imatch 1 'A' '[B-a]'
-imatch 1 'a' '[B-a]'
-imatch 1 'z' '[Z-y]'
-imatch 1 'Z' '[Z-y]'
+# Extra case-sensitivity tests
+match 0 1 0 1 'a' '[A-Z]'
+match 1 1 1 1 'A' '[A-Z]'
+match 0 1 0 1 'A' '[a-z]'
+match 1 1 1 1 'a' '[a-z]'
+match 0 1 0 1 'a' '[[:upper:]]'
+match 1 1 1 1 'A' '[[:upper:]]'
+match 0 1 0 1 'A' '[[:lower:]]'
+match 1 1 1 1 'a' '[[:lower:]]'
+match 0 1 0 1 'A' '[B-Za]'
+match 1 1 1 1 'a' '[B-Za]'
+match 0 1 0 1 'A' '[B-a]'
+match 1 1 1 1 'a' '[B-a]'
+match 0 1 0 1 'z' '[Z-y]'
+match 1 1 1 1 'Z' '[Z-y]'
test_done
test_cmp From_.msg out
'
+test_expect_success 'rebase--am.sh and --show-current-patch' '
+ test_create_repo conflict-apply &&
+ (
+ cd conflict-apply &&
+ test_commit init &&
+ echo one >>init.t &&
+ git commit -a -m one &&
+ echo two >>init.t &&
+ git commit -a -m two &&
+ git tag two &&
+ test_must_fail git rebase --onto init HEAD^ &&
+ GIT_TRACE=1 git rebase --show-current-patch >/dev/null 2>stderr &&
+ grep "show.*$(git rev-parse two)" stderr
+ )
+'
+
+test_expect_success 'rebase--merge.sh and --show-current-patch' '
+ test_create_repo conflict-merge &&
+ (
+ cd conflict-merge &&
+ test_commit init &&
+ echo one >>init.t &&
+ git commit -a -m one &&
+ echo two >>init.t &&
+ git commit -a -m two &&
+ git tag two &&
+ test_must_fail git rebase --merge --onto init HEAD^ &&
+ git rebase --show-current-patch >actual.patch &&
+ GIT_TRACE=1 git rebase --show-current-patch >/dev/null 2>stderr &&
+ grep "show.*REBASE_HEAD" stderr &&
+ test "$(git rev-parse REBASE_HEAD)" = "$(git rev-parse two)"
+ )
+'
+
test_done
test 0 = $(grep -c "^[^#]" < .git/rebase-merge/git-rebase-todo)
'
+test_expect_success 'show conflicted patch' '
+ GIT_TRACE=1 git rebase --show-current-patch >/dev/null 2>stderr &&
+ grep "show.*REBASE_HEAD" stderr &&
+ # the original stopped-sha1 is abbreviated
+ stopped_sha1="$(git rev-parse $(cat ".git/rebase-merge/stopped-sha"))" &&
+ test "$(git rev-parse REBASE_HEAD)" = "$stopped_sha1"
+'
+
test_expect_success 'abort' '
git rebase --abort &&
test $(git rev-parse new-branch1) = $(git rev-parse HEAD) &&
git rebase -i $base &&
git cat-file commit HEAD | sed -e 1,/^\$/d > actual-squash-fixup &&
test_cmp expect-squash-fixup actual-squash-fixup &&
+ git cat-file commit HEAD@{2} |
+ grep "^# This is a combination of 3 commits\." &&
+ git cat-file commit HEAD@{3} |
+ grep "^# This is a combination of 2 commits\." &&
git checkout to-be-rebased &&
git branch -D squash-fixup
'
SQ="'"
test_expect_success 'rebase -i --gpg-sign=<key-id>' '
+ test_when_finished "test_might_fail git rebase --abort" &&
+ set_fake_editor &&
+ FAKE_LINES="edit 1" git rebase -i --gpg-sign="\"S I Gner\"" HEAD^ \
+ >out 2>err &&
+ test_i18ngrep "$SQ-S\"S I Gner\"$SQ" err
+'
+
+test_expect_success 'rebase -i --gpg-sign=<key-id> overrides commit.gpgSign' '
+ test_when_finished "test_might_fail git rebase --abort" &&
+ test_config commit.gpgsign true &&
set_fake_editor &&
FAKE_LINES="edit 1" git rebase -i --gpg-sign="\"S I Gner\"" HEAD^ \
>out 2>err &&
test_description='rebase should handle arbitrary git message'
. ./test-lib.sh
+. "$TEST_DIRECTORY"/lib-rebase.sh
cat >F <<\EOF
This is an example of a commit log message
test_tick &&
git commit -m "Initial commit" &&
git branch diff-in-message &&
+ git branch empty-message-merge &&
git checkout -b multi-line-subject &&
cat F >file2 &&
git cat-file commit HEAD | sed -e "1,/^\$/d" >G0 &&
+ git checkout empty-message-merge &&
+ echo file3 >file3 &&
+ git add file3 &&
+ git commit --allow-empty-message -m "" &&
+
git checkout master &&
echo One >file1 &&
test_cmp G G0
'
+test_expect_success 'rebase -m commit with empty message' '
+ test_must_fail git rebase -m master empty-message-merge &&
+ git rebase --abort &&
+ git rebase -m --allow-empty-message master empty-message-merge
+'
+
+test_expect_success 'rebase -i commit with empty message' '
+ git checkout diff-in-message &&
+ set_fake_editor &&
+ test_must_fail env FAKE_COMMIT_MESSAGE=" " FAKE_LINES="reword 1" \
+ git rebase -i HEAD^ &&
+ git rebase --abort &&
+ FAKE_COMMIT_MESSAGE=" " FAKE_LINES="reword 1" \
+ git rebase -i --allow-empty-message HEAD^
+'
+
test_done
>elif &&
git add elif &&
test_tick &&
- git commit -m second
+ git commit -m second &&
+ git checkout -b side2 &&
+ >afile &&
+ git add afile &&
+ test_tick &&
+ git commit -m third &&
+ echo hello >afile &&
+ test_tick &&
+ git commit -a -m fourth &&
+ git checkout -b side-merge &&
+ git reset --hard HEAD^^ &&
+ git merge --no-ff -m "A merge commit log message that has a long
+summary that spills over multiple lines.
+
+But otherwise with a sane description." side2 &&
+ git branch side-merge-original
'
test_expect_success rebase '
git cat-file commit side@{1} | sed -e "1,/^\$/d" >expect &&
test_cmp expect actual
+'
+test_expect_success rebasep '
+
+ git checkout side-merge &&
+ git rebase -p side &&
+ git cat-file commit HEAD | sed -e "1,/^\$/d" >actual &&
+ git cat-file commit side-merge-original | sed -e "1,/^\$/d" >expect &&
+ test_cmp expect actual
+
'
test_done
test_cmp expect actual
'
-test_expect_success 'cherry-pick works with dirty renamed file' '
+test_expect_failure 'cherry-pick works with dirty renamed file' '
test_commit to-rename &&
git checkout -b unrelated &&
test_commit unrelated &&
test_tick &&
git commit -m renamed &&
echo modified >renamed &&
- git cherry-pick refs/heads/unrelated
+ test_must_fail git cherry-pick refs/heads/unrelated >out &&
+ test_i18ngrep "Refusing to lose dirty file at renamed" out &&
+ test $(git rev-parse :0:renamed) = $(git rev-parse HEAD^:to-rename.t) &&
+ grep -q "^modified$" renamed
'
test_done
. ./test-lib.sh
. "$TEST_DIRECTORY"/lib-submodule-update.sh
-KNOWN_FAILURE_CHERRY_PICK_SEES_EMPTY_COMMIT=1
KNOWN_FAILURE_NOFF_MERGE_DOESNT_CREATE_EMPTY_SUBMODULE_DIR=1
KNOWN_FAILURE_NOFF_MERGE_ATTEMPTS_TO_MERGE_REMOVED_SUBMODULE_FILES=1
test_submodule_switch "git cherry-pick"
git revert HEAD
}
-KNOWN_FAILURE_CHERRY_PICK_SEES_EMPTY_COMMIT=1
KNOWN_FAILURE_NOFF_MERGE_DOESNT_CREATE_EMPTY_SUBMODULE_DIR=1
test_submodule_switch "git_revert"
git rm path1 &&
mkdir subdir &&
git mv another-path subdir/path1 &&
- git status | test_i18ngrep "renamed: .*path1 -> subdir/path1"'
+ git status >out &&
+ test_i18ngrep "renamed: .*path1 -> subdir/path1" out
+'
test_expect_success 'favour same basenames even with minor differences' '
git show HEAD:path1 | sed "s/15/16/" > subdir/path1 &&
- git status | test_i18ngrep "renamed: .*path1 -> subdir/path1"'
+ git status >out &&
+ test_i18ngrep "renamed: .*path1 -> subdir/path1" out
+'
test_expect_success 'two files with same basename and same content' '
git reset --hard &&
git add dir &&
git commit -m 2 &&
git mv dir other-dir &&
- git status | test_i18ngrep "renamed: .*dir/A/file -> other-dir/A/file"
+ git status >out &&
+ test_i18ngrep "renamed: .*dir/A/file -> other-dir/A/file" out
'
test_expect_success 'setup for many rename source candidates' '
git commit -m message "$name"
'
+cat >expect72 <<-'EOF'
+ ...aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa | 1 +
+EOF
+test_expect_success "format-patch: small change with long name gives more space to the name" '
+ git format-patch -1 --stdout >output &&
+ grep " | " output >actual &&
+ test_cmp expect72 actual
+'
+
while read cmd args
do
- cat >expect <<-'EOF'
+ cat >expect80 <<-'EOF'
...aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa | 1 +
EOF
test_expect_success "$cmd: small change with long name gives more space to the name" '
git $cmd $args >output &&
grep " | " output >actual &&
- test_cmp expect actual
+ test_cmp expect80 actual
'
+done <<\EOF
+diff HEAD^ HEAD --stat
+show --stat
+log -1 --stat
+EOF
+while read cmd args
+do
cat >expect <<-'EOF'
...aaaaaaaaaaaaaaaaaaaaaaaaaaaaa | 1 +
EOF
git commit -m message abcd
'
-cat >expect80 <<'EOF'
- abcd | 1000 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+cat >expect72 <<'EOF'
+ abcd | 1000 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
EOF
-cat >expect80-graph <<'EOF'
-| abcd | 1000 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+cat >expect72-graph <<'EOF'
+| abcd | 1000 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
EOF
cat >expect200 <<'EOF'
abcd | 1000 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
test_cmp "$expect-graph" actual
'
done <<\EOF
-ignores expect80 format-patch -1 --stdout
+ignores expect72 format-patch -1 --stdout
respects expect200 diff HEAD^ HEAD --stat
respects expect200 show --stat
respects expect200 log -1 --stat
test_cmp "$expect-graph" actual
'
done <<\EOF
-ignores expect80 format-patch -1 --stdout
+ignores expect72 format-patch -1 --stdout
respects expect40 diff HEAD^ HEAD --stat
respects expect40 show --stat
respects expect40 log -1 --stat
test_cmp "$expect-graph" actual
'
done <<\EOF
-ignores expect80 format-patch -1 --stdout
+ignores expect72 format-patch -1 --stdout
respects expect40 diff HEAD^ HEAD --stat
respects expect40 show --stat
respects expect40 log -1 --stat
log -1 --stat
EOF
-cat >expect80 <<'EOF'
- ...aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa | 1000 ++++++++++++++++++++
+cat >expect72 <<'EOF'
+ ...aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa | 1000 +++++++++++++++++
EOF
-cat >expect80-graph <<'EOF'
-| ...aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa | 1000 ++++++++++++++++++++
+cat >expect72-graph <<'EOF'
+| ...aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa | 1000 +++++++++++++++++
EOF
cat >expect200 <<'EOF'
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa | 1000 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
test_cmp "$expect-graph" actual
'
done <<\EOF
-ignores expect80 format-patch -1 --stdout
+ignores expect72 format-patch -1 --stdout
respects expect200 diff HEAD^ HEAD --stat
respects expect200 show --stat
respects expect200 log -1 --stat
test_cmp "$expect-graph" actual
'
done <<\EOF
-ignores expect80 format-patch -1 --stdout
+ignores expect72 format-patch -1 --stdout
respects expect1 diff HEAD^ HEAD --stat
respects expect1 show --stat
respects expect1 log -1 --stat
test_cmp expected "post image.txt"
'
+cat >diff-from-svn <<\EOF
+Index: Makefile
+===================================================================
+diff --git a/branches/Makefile
+deleted file mode 100644
+--- a/branches/Makefile (revision 13)
++++ /dev/null (nonexistent)
+@@ +1 0,0 @@
+-
+EOF
+
+test_expect_success 'apply handles a diff generated by Subversion' '
+ >Makefile &&
+ git apply -p2 diff-from-svn &&
+ test_path_is_missing Makefile
+'
+
test_done
test -d .git/rebase-apply
'
+test_expect_success 'am --show-current-patch' '
+ git am --show-current-patch >actual.patch &&
+ test_cmp .git/rebase-apply/0001 actual.patch
+'
+
test_expect_success 'am --skip works' '
echo goodbye >expected &&
git am --skip &&
git cat-file commit HEAD | grep "^$LONG$"
'
+test_expect_success 'am --quit keeps HEAD where it is' '
+ mkdir .git/rebase-apply &&
+ >.git/rebase-apply/last &&
+ >.git/rebase-apply/next &&
+ git rev-parse HEAD^ >.git/ORIG_HEAD &&
+ git rev-parse HEAD >expected &&
+ git am --quit &&
+ test_path_is_missing .git/rebase-apply &&
+ git rev-parse HEAD >actual &&
+ test_cmp expected actual
+'
+
test_done
)
'
+test_expect_success 'filtering by size' '
+ rm -rf server client &&
+ test_create_repo server &&
+ test_commit -C server one &&
+ test_config -C server uploadpack.allowfilter 1 &&
+
+ test_create_repo client &&
+ git -C client fetch-pack --filter=blob:limit=0 ../server HEAD &&
+
+ # Ensure that object is not inadvertently fetched
+ test_must_fail git -C client cat-file -e $(git hash-object server/one.t)
+'
+
+test_expect_success 'filtering by size has no effect if support for it is not advertised' '
+ rm -rf server client &&
+ test_create_repo server &&
+ test_commit -C server one &&
+
+ test_create_repo client &&
+ git -C client fetch-pack --filter=blob:limit=0 ../server HEAD 2> err &&
+
+ # Ensure that object is fetched
+ git -C client cat-file -e $(git hash-object server/one.t) &&
+
+ test_i18ngrep "filtering not recognized by server" err
+'
+
+fetch_filter_blob_limit_zero () {
+ SERVER="$1"
+ URL="$2"
+
+ rm -rf "$SERVER" client &&
+ test_create_repo "$SERVER" &&
+ test_commit -C "$SERVER" one &&
+ test_config -C "$SERVER" uploadpack.allowfilter 1 &&
+
+ git clone "$URL" client &&
+ test_config -C client extensions.partialclone origin &&
+
+ test_commit -C "$SERVER" two &&
+
+ git -C client fetch --filter=blob:limit=0 origin HEAD:somewhere &&
+
+ # Ensure that commit is fetched, but blob is not
+ test_config -C client extensions.partialclone "arbitrary string" &&
+ git -C client cat-file -e $(git -C "$SERVER" rev-parse two) &&
+ test_must_fail git -C client cat-file -e $(git hash-object "$SERVER/two.t")
+}
+
+test_expect_success 'fetch with --filter=blob:limit=0' '
+ fetch_filter_blob_limit_zero server server
+'
+
+. "$TEST_DIRECTORY"/lib-httpd.sh
+start_httpd
+
+test_expect_success 'fetch with --filter=blob:limit=0 and HTTP' '
+ fetch_filter_blob_limit_zero "$HTTPD_DOCUMENT_ROOT_PATH/server" "$HTTPD_URL/smart/server"
+'
+
+stop_httpd
+
+
test_done
(
cd descriptive &&
git fetch o 2>actual &&
- grep " -> refs/crazyheads/descriptive-branch$" actual |
- test_i18ngrep "new branch" &&
- grep " -> descriptive-tag$" actual |
- test_i18ngrep "new tag" &&
- grep " -> crazy$" actual |
- test_i18ngrep "new ref"
+ test_i18ngrep "new branch.* -> refs/crazyheads/descriptive-branch$" actual &&
+ test_i18ngrep "new tag.* -> descriptive-tag$" actual &&
+ test_i18ngrep "new ref.* -> crazy$" actual
) &&
git checkout master
'
set_config_tristate () {
# var=$1 val=$2
case "$2" in
- unset) test_unconfig "$1" ;;
- *) git config "$1" "$2" ;;
+ unset)
+ test_unconfig "$1"
+ ;;
+ *)
+ git config "$1" "$2"
+ key=$(echo $1 | sed -e 's/^remote\.origin/fetch/')
+ git_fetch_c="$git_fetch_c -c $key=$2"
+ ;;
esac
}
test_configured_prune () {
- fetch_prune=$1 remote_origin_prune=$2 cmdline=$3 expected=$4
+ test_configured_prune_type "$@" "name"
+ test_configured_prune_type "$@" "link"
+}
- test_expect_success "prune fetch.prune=$1 remote.origin.prune=$2${3:+ $3}; $4" '
+test_configured_prune_type () {
+ fetch_prune=$1
+ remote_origin_prune=$2
+ fetch_prune_tags=$3
+ remote_origin_prune_tags=$4
+ expected_branch=$5
+ expected_tag=$6
+ cmdline=$7
+ mode=$8
+
+ if test -z "$cmdline_setup"
+ then
+ test_expect_success 'setup cmdline_setup variable for subsequent test' '
+ remote_url="file://$(git -C one config remote.origin.url)" &&
+ remote_fetch="$(git -C one config remote.origin.fetch)" &&
+ cmdline_setup="\"$remote_url\" \"$remote_fetch\""
+ '
+ fi
+
+ if test "$mode" = 'link'
+ then
+ new_cmdline=""
+
+ if test "$cmdline" = ""
+ then
+ new_cmdline=$cmdline_setup
+ else
+ new_cmdline=$(printf "%s" "$cmdline" | perl -pe 's[origin(?!/)]["'"$remote_url"'"]g')
+ fi
+
+ if test "$fetch_prune_tags" = 'true' ||
+ test "$remote_origin_prune_tags" = 'true'
+ then
+ if ! printf '%s' "$cmdline\n" | grep -q refs/remotes/origin/
+ then
+ new_cmdline="$new_cmdline refs/tags/*:refs/tags/*"
+ fi
+ fi
+
+ cmdline="$new_cmdline"
+ fi
+
+ test_expect_success "$mode prune fetch.prune=$1 remote.origin.prune=$2 fetch.pruneTags=$3 remote.origin.pruneTags=$4${7:+ $7}; branch:$5 tag:$6" '
# make sure a newbranch is there in . and also in one
git branch -f newbranch &&
+ git tag -f newtag &&
(
cd one &&
test_unconfig fetch.prune &&
+ test_unconfig fetch.pruneTags &&
test_unconfig remote.origin.prune &&
- git fetch &&
- git rev-parse --verify refs/remotes/origin/newbranch
+ test_unconfig remote.origin.pruneTags &&
+ git fetch '"$cmdline_setup"' &&
+ git rev-parse --verify refs/remotes/origin/newbranch &&
+ git rev-parse --verify refs/tags/newtag
) &&
# now remove it
git branch -d newbranch &&
+ git tag -d newtag &&
# then test
(
cd one &&
+ git_fetch_c="" &&
set_config_tristate fetch.prune $fetch_prune &&
+ set_config_tristate fetch.pruneTags $fetch_prune_tags &&
set_config_tristate remote.origin.prune $remote_origin_prune &&
-
- git fetch $cmdline &&
- case "$expected" in
+ set_config_tristate remote.origin.pruneTags $remote_origin_prune_tags &&
+
+ if test "$mode" != "link"
+ then
+ git_fetch_c=""
+ fi &&
+ git$git_fetch_c fetch '"$cmdline"' &&
+ case "$expected_branch" in
pruned)
test_must_fail git rev-parse --verify refs/remotes/origin/newbranch
;;
kept)
git rev-parse --verify refs/remotes/origin/newbranch
;;
+ esac &&
+ case "$expected_tag" in
+ pruned)
+ test_must_fail git rev-parse --verify refs/tags/newtag
+ ;;
+ kept)
+ git rev-parse --verify refs/tags/newtag
+ ;;
esac
)
'
}
-test_configured_prune unset unset "" kept
-test_configured_prune unset unset "--no-prune" kept
-test_configured_prune unset unset "--prune" pruned
-
-test_configured_prune false unset "" kept
-test_configured_prune false unset "--no-prune" kept
-test_configured_prune false unset "--prune" pruned
-
-test_configured_prune true unset "" pruned
-test_configured_prune true unset "--prune" pruned
-test_configured_prune true unset "--no-prune" kept
-
-test_configured_prune unset false "" kept
-test_configured_prune unset false "--no-prune" kept
-test_configured_prune unset false "--prune" pruned
-
-test_configured_prune false false "" kept
-test_configured_prune false false "--no-prune" kept
-test_configured_prune false false "--prune" pruned
-
-test_configured_prune true false "" kept
-test_configured_prune true false "--prune" pruned
-test_configured_prune true false "--no-prune" kept
-
-test_configured_prune unset true "" pruned
-test_configured_prune unset true "--no-prune" kept
-test_configured_prune unset true "--prune" pruned
-
-test_configured_prune false true "" pruned
-test_configured_prune false true "--no-prune" kept
-test_configured_prune false true "--prune" pruned
-
-test_configured_prune true true "" pruned
-test_configured_prune true true "--prune" pruned
-test_configured_prune true true "--no-prune" kept
+# $1 config: fetch.prune
+# $2 config: remote.<name>.prune
+# $3 config: fetch.pruneTags
+# $4 config: remote.<name>.pruneTags
+# $5 expect: branch to be pruned?
+# $6 expect: tag to be pruned?
+# $7 git-fetch $cmdline:
+#
+# $1 $2 $3 $4 $5 $6 $7
+test_configured_prune unset unset unset unset kept kept ""
+test_configured_prune unset unset unset unset kept kept "--no-prune"
+test_configured_prune unset unset unset unset pruned kept "--prune"
+test_configured_prune unset unset unset unset kept pruned \
+ "--prune origin refs/tags/*:refs/tags/*"
+test_configured_prune unset unset unset unset pruned pruned \
+ "--prune origin refs/tags/*:refs/tags/* +refs/heads/*:refs/remotes/origin/*"
+
+test_configured_prune false unset unset unset kept kept ""
+test_configured_prune false unset unset unset kept kept "--no-prune"
+test_configured_prune false unset unset unset pruned kept "--prune"
+
+test_configured_prune true unset unset unset pruned kept ""
+test_configured_prune true unset unset unset pruned kept "--prune"
+test_configured_prune true unset unset unset kept kept "--no-prune"
+
+test_configured_prune unset false unset unset kept kept ""
+test_configured_prune unset false unset unset kept kept "--no-prune"
+test_configured_prune unset false unset unset pruned kept "--prune"
+
+test_configured_prune false false unset unset kept kept ""
+test_configured_prune false false unset unset kept kept "--no-prune"
+test_configured_prune false false unset unset pruned kept "--prune"
+test_configured_prune false false unset unset kept pruned \
+ "--prune origin refs/tags/*:refs/tags/*"
+test_configured_prune false false unset unset pruned pruned \
+ "--prune origin refs/tags/*:refs/tags/* +refs/heads/*:refs/remotes/origin/*"
+
+test_configured_prune true false unset unset kept kept ""
+test_configured_prune true false unset unset pruned kept "--prune"
+test_configured_prune true false unset unset kept kept "--no-prune"
+
+test_configured_prune unset true unset unset pruned kept ""
+test_configured_prune unset true unset unset kept kept "--no-prune"
+test_configured_prune unset true unset unset pruned kept "--prune"
+
+test_configured_prune false true unset unset pruned kept ""
+test_configured_prune false true unset unset kept kept "--no-prune"
+test_configured_prune false true unset unset pruned kept "--prune"
+
+test_configured_prune true true unset unset pruned kept ""
+test_configured_prune true true unset unset pruned kept "--prune"
+test_configured_prune true true unset unset kept kept "--no-prune"
+test_configured_prune true true unset unset kept pruned \
+ "--prune origin refs/tags/*:refs/tags/*"
+test_configured_prune true true unset unset pruned pruned \
+ "--prune origin refs/tags/*:refs/tags/* +refs/heads/*:refs/remotes/origin/*"
+
+# --prune-tags on its own does nothing, needs --prune as well, same
+# for for fetch.pruneTags without fetch.prune
+test_configured_prune unset unset unset unset kept kept "--prune-tags"
+test_configured_prune unset unset true unset kept kept ""
+test_configured_prune unset unset unset true kept kept ""
+
+# These will prune the tags
+test_configured_prune unset unset unset unset pruned pruned "--prune --prune-tags"
+test_configured_prune true unset true unset pruned pruned ""
+test_configured_prune unset true unset true pruned pruned ""
+
+# remote.<name>.pruneTags overrides fetch.pruneTags, just like
+# remote.<name>.prune overrides fetch.prune if set.
+test_configured_prune true unset true unset pruned pruned ""
+test_configured_prune false true false true pruned pruned ""
+test_configured_prune true false true false kept kept ""
+
+# When --prune-tags is supplied it's ignored if an explicit refspec is
+# given, same for the configuration options.
+test_configured_prune unset unset unset unset pruned kept \
+ "--prune --prune-tags origin +refs/heads/*:refs/remotes/origin/*"
+test_configured_prune unset unset true unset pruned kept \
+ "--prune origin +refs/heads/*:refs/remotes/origin/*"
+test_configured_prune unset unset unset true pruned kept \
+ "--prune origin +refs/heads/*:refs/remotes/origin/*"
+
+# Pruning that also takes place if a file:// url replaces a named
+# remote. However, because there's no implicit
+# +refs/heads/*:refs/remotes/origin/* refspec and supplying it on the
+# command-line negates --prune-tags, the branches will not be pruned.
+test_configured_prune_type unset unset unset unset kept kept "origin --prune-tags" "name"
+test_configured_prune_type unset unset unset unset kept kept "origin --prune-tags" "link"
+test_configured_prune_type unset unset unset unset pruned pruned "origin --prune --prune-tags" "name"
+test_configured_prune_type unset unset unset unset kept pruned "origin --prune --prune-tags" "link"
+test_configured_prune_type unset unset unset unset pruned pruned "--prune --prune-tags origin" "name"
+test_configured_prune_type unset unset unset unset kept pruned "--prune --prune-tags origin" "link"
+test_configured_prune_type unset unset true unset pruned pruned "--prune origin" "name"
+test_configured_prune_type unset unset true unset kept pruned "--prune origin" "link"
+test_configured_prune_type unset unset unset true pruned pruned "--prune origin" "name"
+test_configured_prune_type unset unset unset true kept pruned "--prune origin" "link"
+test_configured_prune_type true unset true unset pruned pruned "origin" "name"
+test_configured_prune_type true unset true unset kept pruned "origin" "link"
+test_configured_prune_type unset true true unset pruned pruned "origin" "name"
+test_configured_prune_type unset true true unset kept pruned "origin" "link"
+test_configured_prune_type unset true unset true pruned pruned "origin" "name"
+test_configured_prune_type unset true unset true kept pruned "origin" "link"
+
+# When all remote.origin.fetch settings are deleted a --prune
+# --prune-tags still implicitly supplies refs/tags/*:refs/tags/* so
+# tags, but not tracking branches, will be deleted.
+test_expect_success 'remove remote.origin.fetch "one"' '
+ (
+ cd one &&
+ git config --unset-all remote.origin.fetch
+ )
+'
+test_configured_prune_type unset unset unset unset kept pruned "origin --prune --prune-tags" "name"
+test_configured_prune_type unset unset unset unset kept pruned "origin --prune --prune-tags" "link"
test_expect_success 'all boundary commits are excluded' '
test_commit base &&
)
'
-test_expect_success "'fetch.recurseSubmodules=on-demand' works also without .gitmodule entry" '
+test_expect_success "'fetch.recurseSubmodules=on-demand' works also without .gitmodules entry" '
(
cd downstream &&
git fetch --recurse-submodules
cat >expected &&
# We're not interested in the error
# "fatal: The remote end hung up unexpectedly":
- test_i18ngrep -E '^(fatal|warning):' <error | grep -v 'hung up' >actual | sort &&
+ test_i18ngrep -E '^(fatal|warning):' error | grep -v 'hung up' >actual | sort &&
test_i18ncmp expected actual
}
test_commit no-progress &&
test_terminal git push --no-progress >output 2>&1 &&
test_i18ngrep "^To http" output &&
- test_i18ngrep ! "^Writing objects"
+ test_i18ngrep ! "^Writing objects" output
'
test_expect_success 'push --progress shows progress to non-tty' '
test_refs master HEAD@{1}
'
+test_expect_success 'push options keep quoted characters intact (direct)' '
+ mk_repo_pair &&
+ git -C upstream config receive.advertisePushOptions true &&
+ test_commit -C workbench one &&
+ git -C workbench push --push-option="\"embedded quotes\"" up master &&
+ echo "\"embedded quotes\"" >expect &&
+ test_cmp expect upstream/.git/hooks/pre-receive.push_options
+'
+
. "$TEST_DIRECTORY"/lib-httpd.sh
start_httpd
-test_expect_success 'push option denied properly by http server' '
+# set up http repository for fetching/pushing, with push options config
+# bool set to $1
+mk_http_pair () {
test_when_finished "rm -rf test_http_clone" &&
- test_when_finished "rm -rf \"$HTTPD_DOCUMENT_ROOT_PATH\"/upstream.git" &&
+ test_when_finished 'rm -rf "$HTTPD_DOCUMENT_ROOT_PATH"/upstream.git' &&
mk_repo_pair &&
- git -C upstream config receive.advertisePushOptions false &&
+ git -C upstream config receive.advertisePushOptions "$1" &&
git -C upstream config http.receivepack true &&
cp -R upstream/.git "$HTTPD_DOCUMENT_ROOT_PATH"/upstream.git &&
- git clone "$HTTPD_URL"/smart/upstream test_http_clone &&
+ git clone "$HTTPD_URL"/smart/upstream test_http_clone
+}
+
+test_expect_success 'push option denied properly by http server' '
+ mk_http_pair false &&
test_commit -C test_http_clone one &&
test_must_fail git -C test_http_clone push --push-option=asdf origin master 2>actual &&
test_i18ngrep "the receiving end does not support push options" actual &&
'
test_expect_success 'push options work properly across http' '
- test_when_finished "rm -rf test_http_clone" &&
- test_when_finished "rm -rf \"$HTTPD_DOCUMENT_ROOT_PATH\"/upstream.git" &&
- mk_repo_pair &&
- git -C upstream config receive.advertisePushOptions true &&
- git -C upstream config http.receivepack true &&
- cp -R upstream/.git "$HTTPD_DOCUMENT_ROOT_PATH"/upstream.git &&
- git clone "$HTTPD_URL"/smart/upstream test_http_clone &&
+ mk_http_pair true &&
test_commit -C test_http_clone one &&
git -C test_http_clone push origin master &&
test_cmp expect actual
'
+test_expect_success 'push options keep quoted characters intact (http)' '
+ mk_http_pair true &&
+
+ test_commit -C test_http_clone one &&
+ git -C test_http_clone push --push-option="\"embedded quotes\"" origin master &&
+ echo "\"embedded quotes\"" >expect &&
+ test_cmp expect "$HTTPD_DOCUMENT_ROOT_PATH"/upstream.git/hooks/pre-receive.push_options
+'
+
stop_httpd
test_done
submodule update sub
'
+test_expect_success 'GIT_REDACT_COOKIES redacts cookies' '
+ rm -rf clone &&
+ echo "Set-Cookie: Foo=1" >cookies &&
+ echo "Set-Cookie: Bar=2" >>cookies &&
+ GIT_TRACE_CURL=true GIT_REDACT_COOKIES=Bar,Baz \
+ git -c "http.cookieFile=$(pwd)/cookies" clone \
+ $HTTPD_URL/smart/repo.git clone 2>err &&
+ grep "Cookie:.*Foo=1" err &&
+ grep "Cookie:.*Bar=<redacted>" err &&
+ ! grep "Cookie:.*Bar=2" err
+'
+
+test_expect_success 'GIT_REDACT_COOKIES handles empty values' '
+ rm -rf clone &&
+ echo "Set-Cookie: Foo=" >cookies &&
+ GIT_TRACE_CURL=true GIT_REDACT_COOKIES=Foo \
+ git -c "http.cookieFile=$(pwd)/cookies" clone \
+ $HTTPD_URL/smart/repo.git clone 2>err &&
+ grep "Cookie:.*Foo=<redacted>" err
+'
+
+test_expect_success 'GIT_TRACE_CURL_NO_DATA prevents data from being traced' '
+ rm -rf clone &&
+ GIT_TRACE_CURL=true \
+ git clone $HTTPD_URL/smart/repo.git clone 2>err &&
+ grep "=> Send data" err &&
+
+ rm -rf clone &&
+ GIT_TRACE_CURL=true GIT_TRACE_CURL_NO_DATA=1 \
+ git clone $HTTPD_URL/smart/repo.git clone 2>err &&
+ ! grep "=> Send data" err
+'
+
stop_httpd
test_done
git init --bare "$repo" &&
git push "$repo" HEAD &&
>"$repo"/git-daemon-export-ok &&
- rm -rf tmp.git &&
GIT_OVERRIDE_VIRTUAL_HOST=localhost \
- git clone --bare "$GIT_DAEMON_URL/interp.git" tmp.git &&
- rm -rf tmp.git &&
+ git ls-remote "$GIT_DAEMON_URL/interp.git" &&
GIT_OVERRIDE_VIRTUAL_HOST=LOCALHOST \
- git clone --bare "$GIT_DAEMON_URL/interp.git" tmp.git
+ git ls-remote "$GIT_DAEMON_URL/interp.git"
'
test_expect_success 'hostname cannot break out of directory' '
- rm -rf tmp.git &&
repo="$GIT_DAEMON_DOCUMENT_ROOT_PATH/../escape.git" &&
git init --bare "$repo" &&
git push "$repo" HEAD &&
>"$repo"/git-daemon-export-ok &&
test_must_fail \
env GIT_OVERRIDE_VIRTUAL_HOST=.. \
- git clone --bare "$GIT_DAEMON_URL/escape.git" tmp.git
+ git ls-remote "$GIT_DAEMON_URL/escape.git"
+'
+
+test_expect_success 'daemon log records all attributes' '
+ cat >expect <<-\EOF &&
+ Extended attribute "host": localhost
+ Extended attribute "protocol": version=1
+ EOF
+ >daemon.log &&
+ GIT_OVERRIDE_VIRTUAL_HOST=localhost \
+ git -c protocol.version=1 \
+ ls-remote "$GIT_DAEMON_URL/interp.git" &&
+ grep -i extended.attribute daemon.log | cut -d" " -f2- >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success FAKENC 'hostname interpolation works after LF-stripping' '
+ {
+ printf "git-upload-pack /interp.git\n\0host=localhost" | packetize
+ printf "0000"
+ } >input &&
+ fake_nc "$GIT_DAEMON_HOST_PORT" <input >output &&
+ depacketize <output >output.raw &&
+
+ # just pick out the value of master, which avoids any protocol
+ # particulars
+ perl -lne "print \$1 if m{^(\\S+) refs/heads/master}" <output.raw >actual &&
+ git -C "$repo" rev-parse master >expect &&
+ test_cmp expect actual
'
stop_git_daemon
)
'
+partial_clone () {
+ SERVER="$1" &&
+ URL="$2" &&
+
+ rm -rf "$SERVER" client &&
+ test_create_repo "$SERVER" &&
+ test_commit -C "$SERVER" one &&
+ HASH1=$(git hash-object "$SERVER/one.t") &&
+ git -C "$SERVER" revert HEAD &&
+ test_commit -C "$SERVER" two &&
+ HASH2=$(git hash-object "$SERVER/two.t") &&
+ test_config -C "$SERVER" uploadpack.allowfilter 1 &&
+ test_config -C "$SERVER" uploadpack.allowanysha1inwant 1 &&
+
+ git clone --filter=blob:limit=0 "$URL" client &&
+
+ git -C client fsck &&
+
+ # Ensure that unneeded blobs are not inadvertently fetched.
+ test_config -C client extensions.partialclone "not a remote" &&
+ test_must_fail git -C client cat-file -e "$HASH1" &&
+
+ # But this blob was fetched, because clone performs an initial checkout
+ git -C client cat-file -e "$HASH2"
+}
+
+test_expect_success 'partial clone' '
+ partial_clone server "file://$(pwd)/server"
+'
+
+test_expect_success 'partial clone: warn if server does not support object filtering' '
+ rm -rf server client &&
+ test_create_repo server &&
+ test_commit -C server one &&
+
+ git clone --filter=blob:limit=0 "file://$(pwd)/server" client 2> err &&
+
+ test_i18ngrep "filtering not recognized by server" err
+'
+
+test_expect_success 'batch missing blob request during checkout' '
+ rm -rf server client &&
+
+ test_create_repo server &&
+ echo a >server/a &&
+ echo b >server/b &&
+ git -C server add a b &&
+
+ git -C server commit -m x &&
+ echo aa >server/a &&
+ echo bb >server/b &&
+ git -C server add a b &&
+ git -C server commit -m x &&
+
+ test_config -C server uploadpack.allowfilter 1 &&
+ test_config -C server uploadpack.allowanysha1inwant 1 &&
+
+ git clone --filter=blob:limit=0 "file://$(pwd)/server" client &&
+
+ # Ensure that there is only one negotiation by checking that there is
+ # only "done" line sent. ("done" marks the end of negotiation.)
+ GIT_TRACE_PACKET="$(pwd)/trace" git -C client checkout HEAD^ &&
+ grep "git> done" trace >done_lines &&
+ test_line_count = 1 done_lines
+'
+
+test_expect_success 'batch missing blob request does not inadvertently try to fetch gitlinks' '
+ rm -rf server client &&
+
+ test_create_repo repo_for_submodule &&
+ test_commit -C repo_for_submodule x &&
+
+ test_create_repo server &&
+ echo a >server/a &&
+ echo b >server/b &&
+ git -C server add a b &&
+ git -C server commit -m x &&
+
+ echo aa >server/a &&
+ echo bb >server/b &&
+ # Also add a gitlink pointing to an arbitrary repository
+ git -C server submodule add "$(pwd)/repo_for_submodule" c &&
+ git -C server add a b c &&
+ git -C server commit -m x &&
+
+ test_config -C server uploadpack.allowfilter 1 &&
+ test_config -C server uploadpack.allowanysha1inwant 1 &&
+
+ # Make sure that it succeeds
+ git clone --filter=blob:limit=0 "file://$(pwd)/server" client
+'
+
+. "$TEST_DIRECTORY"/lib-httpd.sh
+start_httpd
+
+test_expect_success 'partial clone using HTTP' '
+ partial_clone "$HTTPD_DOCUMENT_ROOT_PATH/server" "$HTTPD_URL/smart/server"
+'
+
+stop_httpd
+
test_done
--- /dev/null
+#!/bin/sh
+
+test_description='git partial clone'
+
+. ./test-lib.sh
+
+# create a normal "src" repo where we can later create new commits.
+# expect_1.oids will contain a list of the OIDs of all blobs.
+test_expect_success 'setup normal src repo' '
+ echo "{print \$1}" >print_1.awk &&
+ echo "{print \$2}" >print_2.awk &&
+
+ git init src &&
+ for n in 1 2 3 4
+ do
+ echo "This is file: $n" > src/file.$n.txt
+ git -C src add file.$n.txt
+ git -C src commit -m "file $n"
+ git -C src ls-files -s file.$n.txt >>temp
+ done &&
+ awk -f print_2.awk <temp | sort >expect_1.oids &&
+ test_line_count = 4 expect_1.oids
+'
+
+# bare clone "src" giving "srv.bare" for use as our server.
+test_expect_success 'setup bare clone for server' '
+ git clone --bare "file://$(pwd)/src" srv.bare &&
+ git -C srv.bare config --local uploadpack.allowfilter 1 &&
+ git -C srv.bare config --local uploadpack.allowanysha1inwant 1
+'
+
+# do basic partial clone from "srv.bare"
+# confirm we are missing all of the known blobs.
+# confirm partial clone was registered in the local config.
+test_expect_success 'do partial clone 1' '
+ git clone --no-checkout --filter=blob:none "file://$(pwd)/srv.bare" pc1 &&
+ git -C pc1 rev-list HEAD --quiet --objects --missing=print \
+ | awk -f print_1.awk \
+ | sed "s/?//" \
+ | sort >observed.oids &&
+ test_cmp expect_1.oids observed.oids &&
+ test "$(git -C pc1 config --local core.repositoryformatversion)" = "1" &&
+ test "$(git -C pc1 config --local extensions.partialclone)" = "origin" &&
+ test "$(git -C pc1 config --local core.partialclonefilter)" = "blob:none"
+'
+
+# checkout master to force dynamic object fetch of blobs at HEAD.
+test_expect_success 'verify checkout with dynamic object fetch' '
+ git -C pc1 rev-list HEAD --quiet --objects --missing=print >observed &&
+ test_line_count = 4 observed &&
+ git -C pc1 checkout master &&
+ git -C pc1 rev-list HEAD --quiet --objects --missing=print >observed &&
+ test_line_count = 0 observed
+'
+
+# create new commits in "src" repo to establish a blame history on file.1.txt
+# and push to "srv.bare".
+test_expect_success 'push new commits to server' '
+ git -C src remote add srv "file://$(pwd)/srv.bare" &&
+ for x in a b c d e
+ do
+ echo "Mod file.1.txt $x" >>src/file.1.txt
+ git -C src add file.1.txt
+ git -C src commit -m "mod $x"
+ done &&
+ git -C src blame master -- file.1.txt >expect.blame &&
+ git -C src push -u srv master
+'
+
+# (partial) fetch in the partial clone repo from the promisor remote.
+# verify that fetch inherited the filter-spec from the config and DOES NOT
+# have the new blobs.
+test_expect_success 'partial fetch inherits filter settings' '
+ git -C pc1 fetch origin &&
+ git -C pc1 rev-list master..origin/master --quiet --objects --missing=print >observed &&
+ test_line_count = 5 observed
+'
+
+# force dynamic object fetch using diff.
+# we should only get 1 new blob (for the file in origin/master).
+test_expect_success 'verify diff causes dynamic object fetch' '
+ git -C pc1 diff master..origin/master -- file.1.txt &&
+ git -C pc1 rev-list master..origin/master --quiet --objects --missing=print >observed &&
+ test_line_count = 4 observed
+'
+
+# force full dynamic object fetch of the file's history using blame.
+# we should get the intermediate blobs for the file.
+test_expect_success 'verify blame causes dynamic object fetch' '
+ git -C pc1 blame origin/master -- file.1.txt >observed.blame &&
+ test_cmp expect.blame observed.blame &&
+ git -C pc1 rev-list master..origin/master --quiet --objects --missing=print >observed &&
+ test_line_count = 0 observed
+'
+
+# create new commits in "src" repo to establish a history on file.2.txt
+# and push to "srv.bare".
+test_expect_success 'push new commits to server for file.2.txt' '
+ for x in a b c d e f
+ do
+ echo "Mod file.2.txt $x" >>src/file.2.txt
+ git -C src add file.2.txt
+ git -C src commit -m "mod $x"
+ done &&
+ git -C src push -u srv master
+'
+
+# Do FULL fetch by disabling inherited filter-spec using --no-filter.
+# Verify we have all the new blobs.
+test_expect_success 'override inherited filter-spec using --no-filter' '
+ git -C pc1 fetch --no-filter origin &&
+ git -C pc1 rev-list master..origin/master --quiet --objects --missing=print >observed &&
+ test_line_count = 0 observed
+'
+
+# create new commits in "src" repo to establish a history on file.3.txt
+# and push to "srv.bare".
+test_expect_success 'push new commits to server for file.3.txt' '
+ for x in a b c d e f
+ do
+ echo "Mod file.3.txt $x" >>src/file.3.txt
+ git -C src add file.3.txt
+ git -C src commit -m "mod $x"
+ done &&
+ git -C src push -u srv master
+'
+
+# Do a partial fetch and then try to manually fetch the missing objects.
+# This can be used as the basis of a pre-command hook to bulk fetch objects
+# perhaps combined with a command in dry-run mode.
+test_expect_success 'manual prefetch of missing objects' '
+ git -C pc1 fetch --filter=blob:none origin &&
+ git -C pc1 rev-list master..origin/master --quiet --objects --missing=print \
+ | awk -f print_1.awk \
+ | sed "s/?//" \
+ | sort >observed.oids &&
+ test_line_count = 6 observed.oids &&
+ git -C pc1 fetch-pack --stdin "file://$(pwd)/srv.bare" <observed.oids &&
+ git -C pc1 rev-list master..origin/master --quiet --objects --missing=print \
+ | awk -f print_1.awk \
+ | sed "s/?//" \
+ | sort >observed.oids &&
+ test_line_count = 0 observed.oids
+'
+
+test_done
test_must_fail env GIT_ALLOW_PROTOCOL=http:https \
GIT_SMART_HTTP=0 \
git clone "$HTTPD_URL/ftp-redir/repo.git" 2>stderr &&
- {
- test_i18ngrep "ftp.*disabled" stderr ||
- test_i18ngrep "your curl version is too old"
- }
+ test_i18ngrep -E "(ftp.*disabled|your curl version is too old)" stderr
'
test_expect_success 'curl limits redirects' '
rm -f A M N &&
git reset --hard &&
git checkout change+rename &&
- GIT_MERGE_VERBOSITY=3 git merge change | test_i18ngrep "^Skipped B" &&
+ GIT_MERGE_VERBOSITY=3 git merge change >out &&
+ test_i18ngrep "^Skipped B" out &&
git reset --hard HEAD^ &&
git checkout change &&
- GIT_MERGE_VERBOSITY=3 git merge change+rename | test_i18ngrep "^Skipped B"
+ GIT_MERGE_VERBOSITY=3 git merge change+rename >out &&
+ test_i18ngrep "^Skipped B" out
'
test_expect_success 'setup for rename + d/f conflicts' '
check_describe tags/c --all c
check_describe heads/branch_A --all --match='branch_*' branch_A
+test_expect_success 'describe complains about tree object' '
+ test_must_fail git describe HEAD^{tree}
+'
+
+test_expect_success 'describe complains about missing object' '
+ test_must_fail git describe $_z40
+'
+
test_done
for i in "--perl --shell" "-s --python" "--python --tcl" "--tcl --perl"; do
test_expect_success "more than one quoting style: $i" "
- git for-each-ref $i 2>&1 | (read line &&
- case \$line in
- \"error: more than one quoting style\"*) : happy;;
- *) false
- esac)
+ test_must_fail git for-each-ref $i 2>err &&
+ grep '^error: more than one quoting style' err
"
done
'git diff-tree -r -M --name-status HEAD^ HEAD | \
grep "^R100..*path1/COPYING..*path0/COPYING"'
+test_expect_success \
+ 'mv --dry-run does not move file' \
+ 'git mv -n path0/COPYING MOVED &&
+ test -f path0/COPYING &&
+ test ! -f MOVED'
+
test_expect_success \
'checking -k on non-existing file' \
'git mv -k idontexist path0'
test_cmp expect actual
'
+get_tag_header annotated-tag-edit $commit commit $time >expect
+echo "An edited message" >>expect
+test_expect_success 'set up editor' '
+ write_script fakeeditor <<-\EOF
+ sed -e "s/A message/An edited message/g" <"$1" >"$1-"
+ mv "$1-" "$1"
+ EOF
+'
+test_expect_success \
+ 'creating an annotated tag with -m message --edit should succeed' '
+ GIT_EDITOR=./fakeeditor git tag -m "A message" --edit annotated-tag-edit &&
+ get_tag_msg annotated-tag-edit >actual &&
+ test_cmp expect actual
+'
+
cat >msgfile <<EOF
Another message
in a file.
test_cmp expect actual
'
+get_tag_header file-annotated-tag-edit $commit commit $time >expect
+sed -e "s/Another message/Another edited message/g" msgfile >>expect
+test_expect_success 'set up editor' '
+ write_script fakeeditor <<-\EOF
+ sed -e "s/Another message/Another edited message/g" <"$1" >"$1-"
+ mv "$1-" "$1"
+ EOF
+'
+test_expect_success \
+ 'creating an annotated tag with -F messagefile --edit should succeed' '
+ GIT_EDITOR=./fakeeditor git tag -F msgfile --edit file-annotated-tag-edit &&
+ get_tag_msg file-annotated-tag-edit >actual &&
+ test_cmp expect actual
+'
+
cat >inputmsg <<EOF
A message from the
standard input
sleep 1
}
+status_is_clean() {
+ >../status.expect &&
+ git status --porcelain >../status.actual &&
+ test_cmp ../status.expect ../status.actual
+}
+
test_lazy_prereq UNTRACKED_CACHE '
{ git update-index --test-untracked-cache; ret=$?; } &&
test $ret -ne 1
test_cmp ../before ../after
'
+test_expect_success 'teardown worktree' '
+ cd ..
+'
+
+test_expect_success SYMLINKS 'setup worktree for symlink test' '
+ git init worktree-symlink &&
+ cd worktree-symlink &&
+ git config core.untrackedCache true &&
+ mkdir one two &&
+ touch one/file two/file &&
+ git add one/file two/file &&
+ git commit -m"first commit" &&
+ git rm -rf one &&
+ ln -s two one &&
+ git add one &&
+ git commit -m"second commit"
+'
+
+test_expect_success SYMLINKS '"status" after symlink replacement should be clean with UC=true' '
+ git checkout HEAD~ &&
+ status_is_clean &&
+ status_is_clean &&
+ git checkout master &&
+ avoid_racy &&
+ status_is_clean &&
+ status_is_clean
+'
+
+test_expect_success SYMLINKS '"status" after symlink replacement should be clean with UC=false' '
+ git config core.untrackedCache false &&
+ git checkout HEAD~ &&
+ status_is_clean &&
+ status_is_clean &&
+ git checkout master &&
+ avoid_racy &&
+ status_is_clean &&
+ status_is_clean
+'
+
+test_expect_success 'setup worktree for non-symlink test' '
+ git init worktree-non-symlink &&
+ cd worktree-non-symlink &&
+ git config core.untrackedCache true &&
+ mkdir one two &&
+ touch one/file two/file &&
+ git add one/file two/file &&
+ git commit -m"first commit" &&
+ git rm -rf one &&
+ cp two/file one &&
+ git add one &&
+ git commit -m"second commit"
+'
+
+test_expect_success '"status" after file replacement should be clean with UC=true' '
+ git checkout HEAD~ &&
+ status_is_clean &&
+ status_is_clean &&
+ git checkout master &&
+ avoid_racy &&
+ status_is_clean &&
+ test-dump-untracked-cache >../actual &&
+ grep -F "recurse valid" ../actual >../actual.grep &&
+ cat >../expect.grep <<EOF &&
+/ 0000000000000000000000000000000000000000 recurse valid
+/two/ 0000000000000000000000000000000000000000 recurse valid
+EOF
+ status_is_clean &&
+ test_cmp ../expect.grep ../actual.grep
+'
+
+test_expect_success '"status" after file replacement should be clean with UC=false' '
+ git config core.untrackedCache false &&
+ git checkout HEAD~ &&
+ status_is_clean &&
+ status_is_clean &&
+ git checkout master &&
+ avoid_racy &&
+ status_is_clean &&
+ status_is_clean
+'
+
test_done
test_description='Test submodules on detached working tree
This test verifies that "git submodule" initialization, update and addition works
-on detahced working trees
+on detached working trees
'
TEST_NO_CREATE_REPO=1
. ./test-lib.sh
+test_expect_success 'set up commits for rebasing' '
+ test_commit root &&
+ test_commit a a a &&
+ test_commit b b b &&
+ git checkout -b rebase-me root &&
+ test_commit rebase-a a aa &&
+ test_commit rebase-b b bb &&
+ for i in $(test_seq 1 13)
+ do
+ test_commit rebase-$i c $i
+ done &&
+ git checkout master &&
+
+ cat >rebase-todo <<-EOF
+ pick $(git rev-parse rebase-a)
+ pick $(git rev-parse rebase-b)
+ fixup $(git rev-parse rebase-1)
+ fixup $(git rev-parse rebase-2)
+ pick $(git rev-parse rebase-3)
+ fixup $(git rev-parse rebase-4)
+ squash $(git rev-parse rebase-5)
+ reword $(git rev-parse rebase-6)
+ squash $(git rev-parse rebase-7)
+ fixup $(git rev-parse rebase-8)
+ fixup $(git rev-parse rebase-9)
+ edit $(git rev-parse rebase-10)
+ squash $(git rev-parse rebase-11)
+ squash $(git rev-parse rebase-12)
+ edit $(git rev-parse rebase-13)
+ EOF
+'
+
test_expect_success 'with no hook' '
echo "foo" > file &&
echo "#!$SHELL_PATH" > "$HOOK"
cat >> "$HOOK" <<'EOF'
-if test "$2" = commit; then
- source=$(git rev-parse "$3")
+GIT_DIR=$(git rev-parse --git-dir)
+if test -d "$GIT_DIR/rebase-merge"
+then
+ rebasing=1
else
- source=${2-default}
+ rebasing=0
fi
-if test "$GIT_EDITOR" = :; then
- sed -e "1s/.*/$source (no editor)/" "$1" > msg.tmp
+
+get_last_cmd () {
+ tail -n1 "$GIT_DIR/rebase-merge/done" | {
+ read cmd id _
+ git log --pretty="[$cmd %s]" -n1 $id
+ }
+}
+
+if test "$2" = commit
+then
+ if test $rebasing = 1
+ then
+ source="$3"
+ else
+ source=$(git rev-parse "$3")
+ fi
else
- sed -e "1s/.*/$source/" "$1" > msg.tmp
+ source=${2-default}
+fi
+test "$GIT_EDITOR" = : && source="$source (no editor)"
+
+if test $rebasing = 1
+then
+ echo "$source $(get_last_cmd)" >"$1"
+else
+ sed -e "1s/.*/$source/" "$1" >msg.tmp
+ mv msg.tmp "$1"
fi
-mv msg.tmp "$1"
exit 0
EOF
chmod +x "$HOOK"
test "$(git log -1 --pretty=format:%s)" = "merge"
'
+test_rebase () {
+ expect=$1 &&
+ mode=$2 &&
+ test_expect_$expect C_LOCALE_OUTPUT "with hook (rebase $mode)" '
+ test_when_finished "\
+ git rebase --abort
+ git checkout -f master
+ git branch -D tmp" &&
+ git checkout -b tmp rebase-me &&
+ GIT_SEQUENCE_EDITOR="cp rebase-todo" &&
+ GIT_EDITOR="\"$FAKE_EDITOR\"" &&
+ (
+ export GIT_SEQUENCE_EDITOR GIT_EDITOR &&
+ test_must_fail git rebase $mode b &&
+ echo x >a &&
+ git add a &&
+ test_must_fail git rebase --continue &&
+ echo x >b &&
+ git add b &&
+ git commit &&
+ git rebase --continue &&
+ echo y >a &&
+ git add a &&
+ git commit &&
+ git rebase --continue &&
+ echo y >b &&
+ git add b &&
+ git rebase --continue
+ ) &&
+ if test $mode = -p # reword amended after pick
+ then
+ n=18
+ else
+ n=17
+ fi &&
+ git log --pretty=%s -g -n$n HEAD@{1} >actual &&
+ test_cmp "$TEST_DIRECTORY/t7505/expected-rebase$mode" actual
+ '
+}
+
+test_rebase success -i
+test_rebase success -p
+
+test_expect_success 'with hook (cherry-pick)' '
+ test_when_finished "git checkout -f master" &&
+ git checkout -B other b &&
+ git cherry-pick rebase-1 &&
+ test "$(git log -1 --pretty=format:%s)" = "message (no editor)"
+'
+
+test_expect_success 'with hook and editor (cherry-pick)' '
+ test_when_finished "git checkout -f master" &&
+ git checkout -B other b &&
+ git cherry-pick -e rebase-1 &&
+ test "$(git log -1 --pretty=format:%s)" = merge
+'
+
cat > "$HOOK" <<'EOF'
#!/bin/sh
exit 1
'
+test_expect_success C_LOCALE_OUTPUT 'with failing hook (cherry-pick)' '
+ test_when_finished "git checkout -f master" &&
+ git checkout -B other b &&
+ test_must_fail git cherry-pick rebase-1 2>actual &&
+ test $(grep -c prepare-commit-msg actual) = 1
+'
+
test_done
--- /dev/null
+message [edit rebase-13]
+message (no editor) [edit rebase-13]
+message [squash rebase-12]
+message (no editor) [squash rebase-11]
+default [edit rebase-10]
+message (no editor) [edit rebase-10]
+message [fixup rebase-9]
+message (no editor) [fixup rebase-8]
+message (no editor) [squash rebase-7]
+message [reword rebase-6]
+message [squash rebase-5]
+message (no editor) [fixup rebase-4]
+message (no editor) [pick rebase-3]
+message (no editor) [fixup rebase-2]
+message (no editor) [fixup rebase-1]
+merge [pick rebase-b]
+message [pick rebase-a]
--- /dev/null
+message [edit rebase-13]
+message (no editor) [edit rebase-13]
+message [squash rebase-12]
+message (no editor) [squash rebase-11]
+default [edit rebase-10]
+message (no editor) [edit rebase-10]
+message [fixup rebase-9]
+message (no editor) [fixup rebase-8]
+message (no editor) [squash rebase-7]
+HEAD [reword rebase-6]
+message (no editor) [reword rebase-6]
+message [squash rebase-5]
+message (no editor) [fixup rebase-4]
+message (no editor) [pick rebase-3]
+message (no editor) [fixup rebase-2]
+message (no editor) [fixup rebase-1]
+merge [pick rebase-b]
+message [pick rebase-a]
test_cmp expect actual
'
+test_expect_success UNTRACKED_CACHE 'ignore .git changes when invalidating UNTR' '
+ test_create_repo dot-git &&
+ (
+ cd dot-git &&
+ mkdir -p .git/hooks &&
+ : >tracked &&
+ : >modified &&
+ mkdir dir1 &&
+ : >dir1/tracked &&
+ : >dir1/modified &&
+ mkdir dir2 &&
+ : >dir2/tracked &&
+ : >dir2/modified &&
+ write_integration_script &&
+ git config core.fsmonitor .git/hooks/fsmonitor-test &&
+ git update-index --untracked-cache &&
+ git update-index --fsmonitor &&
+ GIT_TRACE_UNTRACKED_STATS="$TRASH_DIRECTORY/trace-before" \
+ git status &&
+ test-dump-untracked-cache >../before
+ ) &&
+ cat >>dot-git/.git/hooks/fsmonitor-test <<-\EOF &&
+ printf ".git\0"
+ printf ".git/index\0"
+ printf "dir1/.git\0"
+ printf "dir1/.git/index\0"
+ EOF
+ (
+ cd dot-git &&
+ GIT_TRACE_UNTRACKED_STATS="$TRASH_DIRECTORY/trace-after" \
+ git status &&
+ test-dump-untracked-cache >../after
+ ) &&
+ grep "directory invalidation" trace-before >>before &&
+ grep "directory invalidation" trace-after >>after &&
+ # UNTR extension unchanged, dir invalidation count unchanged
+ test_cmp before after
+'
+
test_done
git mv c1.c other.c &&
git commit -m rename &&
cp important other.c &&
- git merge c1a &&
+ test_must_fail git merge c1a >out &&
+ test_i18ngrep "Refusing to lose dirty file at other.c" out &&
+ test_path_is_file other.c~HEAD &&
+ test $(git hash-object other.c~HEAD) = $(git rev-parse c1a:c1.c) &&
test_cmp important other.c
'
# May be altered later in the test
PREREQ="PERL"
+replace_variable_fields () {
+ sed -e "s/^\(Date:\).*/\1 DATE-STRING/" \
+ -e "s/^\(Message-Id:\).*/\1 MESSAGE-ID-STRING/" \
+ -e "s/^\(X-Mailer:\).*/\1 X-MAILER-STRING/"
+}
+
test_expect_success $PREREQ 'prepare reference tree' '
echo "1A quick brown fox jumps over the" >file &&
echo "lazy dog" >>file &&
--bcc=bcc@example.com \
--in-reply-to="<unique-message-id@example.com>" \
--smtp-server relay.example.com \
- $patches |
- sed -e "s/^\(Date:\).*/\1 DATE-STRING/" \
- -e "s/^\(Message-Id:\).*/\1 MESSAGE-ID-STRING/" \
- -e "s/^\(X-Mailer:\).*/\1 X-MAILER-STRING/" \
+ $patches | replace_variable_fields \
>actual-show-all-headers &&
test_cmp expected-show-all-headers actual-show-all-headers
'
EOF
"
-replace_variable_fields () {
- sed -e "s/^\(Date:\).*/\1 DATE-STRING/" \
- -e "s/^\(Message-Id:\).*/\1 MESSAGE-ID-STRING/" \
- -e "s/^\(X-Mailer:\).*/\1 X-MAILER-STRING/"
-}
-
test_suppression () {
git send-email \
--dry-run \
#
# Writing this as "! git checkout ../outerspace" is wrong, because
# the failure could be due to a segv. We want a controlled failure.
+#
+# Accepts the following options:
+#
+# ok=<signal-name>[,<...>]:
+# Don't treat an exit caused by the given signal as error.
+# Multiple signals can be specified as a comma separated list.
+# Currently recognized signal names are: sigpipe, success.
+# (Don't use 'success', use 'test_might_fail' instead.)
test_must_fail () {
case "$1" in
#
# Writing "git config --unset all.configuration || :" would be wrong,
# because we want to notice if it fails due to segv.
+#
+# Accepts the same options as test_must_fail.
test_might_fail () {
test_must_fail ok=success "$@"
cmp "$@"
}
+# Use this instead of test_cmp to compare files that contain expected and
+# actual output from git commands that can be translated. When running
+# under GETTEXT_POISON this pretends that the command produced expected
+# results.
+test_i18ncmp () {
+ test -n "$GETTEXT_POISON" || test_cmp "$@"
+}
+
+# Use this instead of "grep expected-string actual" to see if the
+# output from a git command that can be translated either contains an
+# expected string, or does not contain an unwanted one. When running
+# under GETTEXT_POISON this pretends that the command produced expected
+# results.
+test_i18ngrep () {
+ eval "last_arg=\${$#}"
+
+ test -f "$last_arg" ||
+ error "bug in the test script: test_i18ngrep requires a file" \
+ "to read as the last parameter"
+
+ if test $# -lt 2 ||
+ { test "x!" = "x$1" && test $# -lt 3 ; }
+ then
+ error "bug in the test script: too few parameters to test_i18ngrep"
+ fi
+
+ if test -n "$GETTEXT_POISON"
+ then
+ # pretend success
+ return 0
+ fi
+
+ if test "x!" = "x$1"
+ then
+ shift
+ ! grep "$@" && return 0
+
+ echo >&2 "error: '! grep $@' did find a match in:"
+ else
+ grep "$@" && return 0
+
+ echo >&2 "error: 'grep $@' didn't find a match in:"
+ fi
+
+ if test -s "$last_arg"
+ then
+ cat >&2 "$last_arg"
+ else
+ echo >&2 "<File '$last_arg' is empty>"
+ fi
+
+ return 1
+}
+
# Call any command "$@" but be more verbose about its
# failure. This is handy for commands like "test" which do
# not output anything when they fail.
"$@"
)
}
+
+# convert stdin to pktline representation; note that empty input becomes an
+# empty packet, not a flush packet (for that you can just print 0000 yourself).
+packetize() {
+ cat >packetize.tmp &&
+ len=$(wc -c <packetize.tmp) &&
+ printf '%04x%s' "$(($len + 4))" &&
+ cat packetize.tmp &&
+ rm -f packetize.tmp
+}
+
+# Parse the input as a series of pktlines, writing the result to stdout.
+# Sideband markers are removed automatically, and the output is routed to
+# stderr if appropriate.
+#
+# NUL bytes are converted to "\\0" for ease of parsing with text tools.
+depacketize () {
+ perl -e '
+ while (read(STDIN, $len, 4) == 4) {
+ if ($len eq "0000") {
+ print "FLUSH\n";
+ } else {
+ read(STDIN, $buf, hex($len) - 4);
+ $buf =~ s/\0/\\0/g;
+ if ($buf =~ s/^[\x2\x3]//) {
+ print STDERR $buf;
+ } else {
+ $buf =~ s/^\x1//;
+ print $buf;
+ }
+ }
+ }
+ '
+}
my @vars = grep(/^GIT_/ && !/^GIT_($ok)/o, @env);
print join("\n", @vars);
')
+unset XDG_CACHE_HOME
unset XDG_CONFIG_HOME
unset GITPERLLIB
GIT_AUTHOR_EMAIL=author@example.com
fi
fi
-GITPERLLIB="$GIT_BUILD_DIR"/perl/blib/lib:"$GIT_BUILD_DIR"/perl/blib/arch/auto/Git
+GITPERLLIB="$GIT_BUILD_DIR"/perl/build/lib
export GITPERLLIB
test -d "$GIT_BUILD_DIR"/templates/blt || {
error "You haven't built things yet, have you?"
test_set_prereq C_LOCALE_OUTPUT
fi
-# Use this instead of test_cmp to compare files that contain expected and
-# actual output from git commands that can be translated. When running
-# under GETTEXT_POISON this pretends that the command produced expected
-# results.
-test_i18ncmp () {
- test -n "$GETTEXT_POISON" || test_cmp "$@"
-}
-
-# Use this instead of "grep expected-string actual" to see if the
-# output from a git command that can be translated either contains an
-# expected string, or does not contain an unwanted one. When running
-# under GETTEXT_POISON this pretends that the command produced expected
-# results.
-test_i18ngrep () {
- if test -n "$GETTEXT_POISON"
- then
- : # pretend success
- elif test "x!" = "x$1"
- then
- shift
- ! grep "$@"
- else
- grep "$@"
- fi
-}
-
test_lazy_prereq PIPE '
# test whether the filesystem supports FIFOs
test_have_prereq !MINGW,!CYGWIN &&
test -n "$GIT_TEST_LONG"
'
+test_lazy_prereq EXPENSIVE_ON_WINDOWS '
+ test_have_prereq EXPENSIVE || test_have_prereq !MINGW,!CYGWIN
+'
+
test_lazy_prereq USR_BIN_TIME '
test -x /usr/bin/time
'
{
strbuf_complete_line(buf);
trace_write(key, buf->buf, buf->len);
- strbuf_release(buf);
}
static void trace_vprintf_fl(const char *file, int line, struct trace_key *key,
strbuf_vaddf(&buf, format, ap);
print_trace_line(key, &buf);
+ strbuf_release(&buf);
}
static void trace_argv_vprintf_fl(const char *file, int line,
strbuf_vaddf(&buf, format, ap);
- sq_quote_argv(&buf, argv, 0);
+ sq_quote_argv_pretty(&buf, argv);
print_trace_line(&trace_default_key, &buf);
+ strbuf_release(&buf);
}
void trace_strbuf_fl(const char *file, int line, struct trace_key *key,
strbuf_addbuf(&buf, data);
print_trace_line(key, &buf);
+ strbuf_release(&buf);
}
static void trace_performance_vprintf_fl(const char *file, int line,
}
print_trace_line(&trace_perf_key, &buf);
+ strbuf_release(&buf);
}
#ifndef HAVE_VARIADIC_MACROS
atexit(print_command_performance_atexit);
strbuf_reset(&command_line);
- sq_quote_argv(&command_line, argv, 0);
+ sq_quote_argv_pretty(&command_line, argv);
command_start_time = getnanotime();
}
if (data->transport_options.update_shallow)
set_helper_option(transport, "update-shallow", "true");
+ if (data->transport_options.filter_options.choice)
+ set_helper_option(
+ transport, "filter",
+ data->transport_options.filter_options.filter_spec);
+
if (data->fetch)
return fetch_with_fetch(transport, nr_heads, to_fetch);
} else if (!strcmp(name, TRANS_OPT_DEEPEN_RELATIVE)) {
opts->deepen_relative = !!value;
return 0;
+ } else if (!strcmp(name, TRANS_OPT_FROM_PROMISOR)) {
+ opts->from_promisor = !!value;
+ return 0;
+ } else if (!strcmp(name, TRANS_OPT_NO_DEPENDENTS)) {
+ opts->no_dependents = !!value;
+ return 0;
+ } else if (!strcmp(name, TRANS_OPT_LIST_OBJECTS_FILTER)) {
+ parse_list_objects_filter(&opts->filter_options, value);
+ return 0;
}
return 1;
}
data->options.check_self_contained_and_connected;
args.cloning = transport->cloning;
args.update_shallow = data->options.update_shallow;
+ args.from_promisor = data->options.from_promisor;
+ args.no_dependents = data->options.no_dependents;
+ args.filter_options = data->options.filter_options;
if (!data->got_remote_heads) {
connect_setup(transport, 0);
#include "cache.h"
#include "run-command.h"
#include "remote.h"
+#include "list-objects-filter-options.h"
struct string_list;
unsigned self_contained_and_connected : 1;
unsigned update_shallow : 1;
unsigned deepen_relative : 1;
+ unsigned from_promisor : 1;
+ unsigned no_dependents : 1;
int depth;
const char *deepen_since;
const struct string_list *deepen_not;
const char *uploadpack;
const char *receivepack;
struct push_cas_option *cas;
+ struct list_objects_filter_options filter_options;
};
enum transport_family {
/* Send push certificates */
#define TRANS_OPT_PUSH_CERT "pushcert"
+/* Indicate that these objects are being fetched by a promisor */
+#define TRANS_OPT_FROM_PROMISOR "from-promisor"
+
+/*
+ * Indicate that only the objects wanted need to be fetched, not their
+ * dependents
+ */
+#define TRANS_OPT_NO_DEPENDENTS "no-dependents"
+
+/* Filter objects for partial clone and fetch */
+#define TRANS_OPT_LIST_OBJECTS_FILTER "filter"
+
/**
* Returns 0 if the option was used, non-zero otherwise. Prints a
* message to stderr if the option is not used.
#include "submodule.h"
#include "submodule-config.h"
#include "fsmonitor.h"
+#include "fetch-object.h"
/*
* Error messages expected by scripts out of plumbing commands such as
load_gitmodules_file(index, &state);
enable_delayed_checkout(&state);
+ if (repository_format_partial_clone && o->update && !o->dry_run) {
+ /*
+ * Prefetch the objects that are to be checked out in the loop
+ * below.
+ */
+ struct oid_array to_fetch = OID_ARRAY_INIT;
+ int fetch_if_missing_store = fetch_if_missing;
+ fetch_if_missing = 0;
+ for (i = 0; i < index->cache_nr; i++) {
+ struct cache_entry *ce = index->cache[i];
+ if ((ce->ce_flags & CE_UPDATE) &&
+ !S_ISGITLINK(ce->ce_mode)) {
+ if (!has_object_file(&ce->oid))
+ oid_array_append(&to_fetch, &ce->oid);
+ }
+ }
+ if (to_fetch.nr)
+ fetch_objects(repository_format_partial_clone,
+ &to_fetch);
+ fetch_if_missing = fetch_if_missing_store;
+ }
for (i = 0; i < index->cache_nr; i++) {
struct cache_entry *ce = index->cache[i];
if (!ce)
return;
cache_tree_invalidate_path(o->src_index, ce->name);
- untracked_cache_invalidate_path(o->src_index, ce->name);
+ untracked_cache_invalidate_path(o->src_index, ce->name, 1);
}
/*
#include "diff.h"
#include "revision.h"
#include "list-objects.h"
+#include "list-objects-filter.h"
+#include "list-objects-filter-options.h"
#include "run-command.h"
#include "connect.h"
#include "sigchain.h"
#include "argv-array.h"
#include "prio-queue.h"
#include "protocol.h"
+#include "quote.h"
static const char * const upload_pack_usage[] = {
N_("git upload-pack [<options>] <dir>"),
static int stateless_rpc;
static const char *pack_objects_hook;
+static int filter_capability_requested;
+static int filter_advertise;
+static struct list_objects_filter_options filter_options;
+
static void reset_timeout(void)
{
alarm(timeout);
argv_array_push(&pack_objects.args, "--delta-base-offset");
if (use_include_tag)
argv_array_push(&pack_objects.args, "--include-tag");
+ if (filter_options.filter_spec) {
+ if (pack_objects.use_shell) {
+ struct strbuf buf = STRBUF_INIT;
+ sq_quote_buf(&buf, filter_options.filter_spec);
+ argv_array_pushf(&pack_objects.args, "--filter=%s", buf.buf);
+ strbuf_release(&buf);
+ } else {
+ argv_array_pushf(&pack_objects.args, "--filter=%s",
+ filter_options.filter_spec);
+ }
+ }
pack_objects.in = -1;
pack_objects.out = -1;
deepen_rev_list = 1;
continue;
}
+ if (skip_prefix(line, "filter ", &arg)) {
+ if (!filter_capability_requested)
+ die("git upload-pack: filtering capability not negotiated");
+ parse_list_objects_filter(&filter_options, arg);
+ continue;
+ }
if (!skip_prefix(line, "want ", &arg) ||
get_oid_hex(arg, &oid_buf))
die("git upload-pack: protocol error, "
no_progress = 1;
if (parse_feature_request(features, "include-tag"))
use_include_tag = 1;
+ if (parse_feature_request(features, "filter"))
+ filter_capability_requested = 1;
o = parse_object(&oid_buf);
if (!o) {
struct strbuf symref_info = STRBUF_INIT;
format_symref_info(&symref_info, cb_data);
- packet_write_fmt(1, "%s %s%c%s%s%s%s%s agent=%s\n",
+ packet_write_fmt(1, "%s %s%c%s%s%s%s%s%s agent=%s\n",
oid_to_hex(oid), refname_nons,
0, capabilities,
(allow_unadvertised_object_request & ALLOW_TIP_SHA1) ?
" allow-reachable-sha1-in-want" : "",
stateless_rpc ? " no-done" : "",
symref_info.buf,
+ filter_advertise ? " filter" : "",
git_user_agent_sanitized());
strbuf_release(&symref_info);
} else {
} else if (current_config_scope() != CONFIG_SCOPE_REPO) {
if (!strcmp("uploadpack.packobjectshook", var))
return git_config_string(&pack_objects_hook, var, value);
+ } else if (!strcmp("uploadpack.allowfilter", var)) {
+ filter_advertise = git_config_bool(var, value);
}
return parse_hide_refs_config(var, value, "uploadpack");
}
GIT_TEMPLATE_DIR='@@BUILD_DIR@@/templates/blt'
export GIT_TEMPLATE_DIR
fi
-GITPERLLIB='@@BUILD_DIR@@/perl/blib/lib'"${GITPERLLIB:+:$GITPERLLIB}"
+GITPERLLIB='@@BUILD_DIR@@/perl/build/lib'"${GITPERLLIB:+:$GITPERLLIB}"
GIT_TEXTDOMAINDIR='@@BUILD_DIR@@/po/build/locale'
PATH='@@BUILD_DIR@@/bin-wrappers:'"$PATH"