PenaltyBreakFirstLessLess: 0
PenaltyBreakString: 10
PenaltyExcessCharacter: 100
-PenaltyReturnTypeOnItsOwnLine: 5
+PenaltyReturnTypeOnItsOwnLine: 60
# Don't sort #include's
SortIncludes: false
compiler:
addons:
before_install:
- before_script:
script:
- >
test "$TRAVIS_REPO_SLUG" != "git/git" ||
services:
- docker
before_install:
- before_script:
script: ci/run-linux32-docker.sh
- env: jobname=StaticAnalysis
os: linux
packages:
- coccinelle
before_install:
- before_script:
script: ci/run-static-analysis.sh
after_failure:
- env: jobname=Documentation
- asciidoc
- xmlto
before_install:
- before_script:
script: ci/test-documentation.sh
after_failure:
before_install: ci/install-dependencies.sh
-before_script: ci/run-build.sh
-script: ci/run-tests.sh
+script: ci/run-build-and-tests.sh
after_failure: ci/print-test-failures.sh
notifications:
- Use Git's gettext wrappers to make the user interface
translatable. See "Marking strings for translation" in po/README.
+ - Variables and functions local to a given source file should be marked
+ with "static". Variables that are visible to other source files
+ must be declared with "extern" in header files. However, function
+ declarations should not use "extern", as that is already the default.
+
For Perl programs:
- Most of the C guidelines above apply.
TECH_DOCS += technical/hash-function-transition
TECH_DOCS += technical/http-protocol
TECH_DOCS += technical/index-format
+TECH_DOCS += technical/long-running-process-protocol
TECH_DOCS += technical/pack-format
TECH_DOCS += technical/pack-heuristics
TECH_DOCS += technical/pack-protocol
--- /dev/null
+Git 2.17 Release Notes
+======================
+
+Updates since v2.16
+-------------------
+
+UI, Workflows & Features
+
+ * "diff" family of commands learned "--find-object=<object-id>" option
+ to limit the findings to changes that involve the named object.
+
+ * "git format-patch" learned to give 72-cols to diffstat, which is
+ consistent with other line length limits the subcommand uses for
+ its output meant for e-mails.
+
+ * The log from "git daemon" can be redirected with a new option; one
+ relevant use case is to send the log to standard error (instead of
+ syslog) when running it from inetd.
+
+ * "git rebase" learned to take "--allow-empty-message" option.
+
+ * "git am" has learned the "--quit" option, in addition to the
+ existing "--abort" option; having the pair mirrors a few other
+ commands like "rebase" and "cherry-pick".
+
+ * "git worktree add" learned to run the post-checkout hook, just like
+ "git clone" runs it upon the initial checkout.
+
+ * "git tag" learned an explicit "--edit" option that allows the
+ message given via "-m" and "-F" to be further edited.
+
+ * "git fetch --prune-tags" may be used as a handy short-hand for
+ getting rid of stale tags that are locally held.
+
+ * The new "--show-current-patch" option gives an end-user facing way
+ to get the diff being applied when "git rebase" (and "git am")
+ stops with a conflict.
+
+ * "git add -p" used to offer "/" (look for a matching hunk) as a
+ choice, even there was only one hunk, which has been corrected.
+ Also the single-key help is now given only for keys that are
+ enabled (e.g. help for '/' won't be shown when there is only one
+ hunk).
+
+ * Since Git 1.7.9, "git merge" defaulted to --no-ff (i.e. even when
+ the side branch being merged is a descendant of the current commit,
+ create a merge commit instead of fast-forwarding) when merging a
+ tag object. This was appropriate default for integrators who pull
+ signed tags from their downstream contributors, but caused an
+ unnecessary merges when used by downstream contributors who
+ habitually "catch up" their topic branches with tagged releases
+ from the upstream. Update "git merge" to default to --no-ff only
+ when merging a tag object that does *not* sit at its usual place in
+ refs/tags/ hierarchy, and allow fast-forwarding otherwise, to
+ mitigate the problem.
+
+ * "git status" can spend a lot of cycles to compute the relation
+ between the current branch and its upstream, which can now be
+ disabled with "--no-ahead-behind" option.
+
+ * "git diff" and friends learned funcname patterns for Go language
+ source files.
+
+ * "git send-email" learned "--reply-to=<address>" option.
+
+ * Funcname pattern used for C# now recognizes "async" keyword.
+
+
+Performance, Internal Implementation, Development Support etc.
+
+ * More perf tests for threaded grep
+
+ * "perf" test output can be sent to codespeed server.
+
+ * The build procedure for perl/ part has been greatly simplified by
+ weaning ourselves off of MakeMaker.
+
+ * In preparation for implementing narrow/partial clone, the machinery
+ for checking object connectivity used by gc and fsck has been
+ taught that a missing object is OK when it is referenced by a
+ packfile specially marked as coming from trusted repository that
+ promises to make them available on-demand and lazily.
+
+ * The machinery to clone & fetch, which in turn involves packing and
+ unpacking objects, has been told how to omit certain objects using
+ the filtering mechanism introduced by another topic. It now knows
+ to mark the resulting pack as a promisor pack to tolerate missing
+ objects, laying foundation for "narrow" clones.
+
+ * The first step to getting rid of mru API and using the
+ doubly-linked list API directly instead.
+
+ * Retire mru API as it does not give enough abstraction over
+ underlying list API to be worth it.
+
+ * Rewrite two more "git submodule" subcommands in C.
+
+ * The tracing machinery learned to report tweaking of environment
+ variables as well.
+
+ * Update Coccinelle rules to catch and optimize strbuf_addf(&buf, "%s", str)
+
+ * Prevent "clang-format" from breaking line after function return type.
+
+ * The sequencer infrastructure is shared across "git cherry-pick",
+ "git rebase -i", etc., and has always spawned "git commit" when it
+ needs to create a commit. It has been taught to do so internally,
+ when able, by reusing the codepath "git commit" itself uses, which
+ gives performance boost for a few tens of percents in some sample
+ scenarios.
+
+ * Push the submodule version of collision-detecting SHA-1 hash
+ implementation a bit harder on builders.
+
+ * Avoid mmapping small files while using packed refs (especially ones
+ with zero size, which would cause later munmap() to fail).
+
+ * Conversion from uchar[20] to struct object_id continues.
+
+ * More tests for wildmatch functions.
+
+ * The code to binary search starting from a fan-out table (which is
+ how the packfile is indexed with object names) has been refactored
+ into a reusable helper.
+
+ * We now avoid using identifiers that clash with C++ keywords. Even
+ though it is not a goal to compile Git with C++ compilers, changes
+ like this help use of code analysis tools that targets C++ on our
+ codebase.
+
+ * The executable is now built in 'script' phase in Travis CI integration,
+ to follow the established practice, rather than during 'before_script'
+ phase. This allows the CI categorize the failures better ('failed'
+ is project's fault, 'errored' is build environment's).
+ (merge 3c93b82920 sg/travis-build-during-script-phase later to maint).
+
+ * Writing out the index file when the only thing that changed in it
+ is the untracked cache information is often wasteful, and this has
+ been optimized out.
+
+ * Various pieces of Perl code we have have been cleaned up.
+
+
+Also contains various documentation updates and code clean-ups.
+
+
+Fixes since v2.16
+-----------------
+
+ * An old regression in "git describe --all $annotated_tag^0" has been
+ fixed.
+
+ * "git status" after moving a path in the working tree (hence making
+ it appear "removed") and then adding with the -N option (hence
+ making that appear "added") detected it as a rename, but did not
+ report the old and new pathnames correctly.
+
+ * "git svn dcommit" did not take into account the fact that a
+ svn+ssh:// URL with a username@ (typically used for pushing) refers
+ to the same SVN repository without the username@ and failed when
+ svn.pushmergeinfo option is set.
+
+ * API clean-up around revision traversal.
+
+ * "git merge -Xours/-Xtheirs" learned to use our/their version when
+ resolving a conflicting updates to a symbolic link.
+
+ * "git clone $there $here" is allowed even when here directory exists
+ as long as it is an empty directory, but the command incorrectly
+ removed it upon a failure of the operation.
+
+ * "git commit --fixup" did not allow "-m<message>" option to be used
+ at the same time; allow it to annotate resulting commit with more
+ text.
+
+ * When resetting the working tree files recursively, the working tree
+ of submodules are now also reset to match.
+
+ * "git stash -- <pathspec>" incorrectly blew away untracked files in
+ the directory that matched the pathspec, which has been corrected.
+
+ * Instead of maintaining home-grown email address parsing code, ship
+ a copy of reasonably recent Mail::Address to be used as a fallback
+ in 'git send-email' when the platform lacks it.
+ (merge d60be8acab mm/send-email-fallback-to-local-mail-address later to maint).
+
+ * "git add -p" was taught to ignore local changes to submodules as
+ they do not interfere with the partial addition of regular changes
+ anyway.
+
+ * Avoid showing a warning message in the middle of a line of "git
+ diff" output.
+ (merge 4e056c989f nd/diff-flush-before-warning later to maint).
+
+ * The http tracing code, often used to debug connection issues,
+ learned to redact potentially sensitive information from its output
+ so that it can be more safely sharable.
+ (merge 8ba18e6fa4 jt/http-redact-cookies later to maint).
+
+ * Crash fix for a corner case where an error codepath tried to unlock
+ what it did not acquire lock on.
+ (merge 81fcb698e0 mr/packed-ref-store-fix later to maint).
+
+ * The split-index mode had a few corner case bugs fixed.
+ (merge ae59a4e44f tg/split-index-fixes later to maint).
+
+ * Assorted fixes to "git daemon".
+ (merge ed15e58efe jk/daemon-fixes later to maint).
+
+ * Completion of "git merge -s<strategy>" (in contrib/) did not work
+ well in non-C locale.
+ (merge 7cc763aaa3 nd/list-merge-strategy later to maint).
+
+ * Workaround for segfault with more recent versions of SVN.
+ (merge 7f6f75e97a ew/svn-branch-segfault-fix later to maint).
+
+ * Plug recently introduced leaks in fsck.
+ (merge ba3a08ca0e jt/fsck-code-cleanup later to maint).
+
+ * "git pull --rebase" did not pass verbosity setting down when
+ recursing into a submodule.
+ (merge a56771a668 sb/pull-rebase-submodule later to maint).
+
+ * The way "git reset --hard" reports the commit the updated HEAD
+ points at is made consistent with the way how the commit title is
+ generated by the other parts of the system. This matters when the
+ title is spread across physically multiple lines.
+ (merge 1cf823fb68 tg/reset-hard-show-head-with-pretty later to maint).
+
+ * Test fixes.
+ (merge 63b1a175ee sg/test-i18ngrep later to maint).
+
+ * Some bugs around "untracked cache" feature have been fixed. This
+ will notice corrupt data in the untracked cache left by old and
+ buggy code and issue a warning---the index can be fixed by clearing
+ the untracked cache from it.
+ (merge 0cacebf099 nd/fix-untracked-cache-invalidation later to maint).
+ (merge 7bf0be7501 ab/untracked-cache-invalidation-docs later to maint).
+
+ * "git blame HEAD COPYING" in a bare repository failed to run, while
+ "git blame HEAD -- COPYING" run just fine. This has been corrected.
+
+ * "git add" files in the same directory, but spelling the directory
+ path in different cases on case insensitive filesystem, corrupted
+ the name hash data structure and led to unexpected results. This
+ has been corrected.
+ (merge c95525e90d bp/name-hash-dirname-fix later to maint).
+
+ * "git rebase -p" mangled log messages of a merge commit, which is
+ now fixed.
+ (merge ed5144d7eb js/fix-merge-arg-quoting-in-rebase-p later to maint).
+
+ * Some low level protocol codepath could crash when they get an
+ unexpected flush packet, which is now fixed.
+ (merge bb1356dc64 js/packet-read-line-check-null later to maint).
+
+ * "git check-ignore" with multiple paths got confused when one is a
+ file and the other is a directory, which has been fixed.
+ (merge d60771e930 rs/check-ignore-multi later to maint).
+
+ * "git describe $garbage" stopped giving any errors when the garbage
+ happens to be a string with 40 hexadecimal letters.
+ (merge a8e7a2bf0f sb/describe-blob later to maint).
+
+ * Code to unquote single-quoted string (used in the parser for
+ configuration files, etc.) did not diagnose bogus input correctly
+ and produced bogus results instead.
+ (merge ddbbf8eb25 jk/sq-dequote-on-bogus-input later to maint).
+
+ * Many places in "git apply" knew that "/dev/null" that signals
+ "there is no such file on this side of the diff" can be followed by
+ whitespace and garbage when parsing a patch, except for one, which
+ made an otherwise valid patch (e.g. ones from subversion) rejected.
+ (merge e454ad4bec tk/apply-dev-null-verify-name-fix later to maint).
+
+ * We no longer create any *.spec file, so "make clean" should not
+ remove it.
+ (merge 4321bdcabb tz/do-not-clean-spec-file later to maint).
+
+ * "git push" over http transport did not unquote the push-options
+ correctly.
+ (merge 90dce21eb0 jk/push-options-via-transport-fix later to maint).
+
+ * "git send-email" learned to complain when the batch-size option is
+ not defined when the relogin-delay option is, since these two are
+ mutually required.
+ (merge 9caa70697b xz/send-email-batch-size later to maint).
+
+ * Y2k20 fix ;-) for our perl scripts.
+ (merge a40e06ee33 bw/perl-timegm-timelocal-fix later to maint).
+
+ * Threaded "git grep" has been optimized to avoid allocation in code
+ section that is covered under a mutex.
+ (merge 38ef24dccf rv/grep-cleanup later to maint).
+
+ * "git subtree" script (in contrib/) scripted around "git log", whose
+ output got affected by end-user configuration like log.showsignature
+ (merge 8841b5222c sg/subtree-signed-commits later to maint).
+
+ * While finding unique object name abbreviation, the code may
+ accidentally have read beyond the end of the array of object names
+ in a pack.
+ (merge 21abed500c ds/find-unique-abbrev-optim later to maint).
+
+ * Micro optimization in revision traversal code.
+ (merge ebbed3ba04 ds/mark-parents-uninteresting-optim later to maint).
+
+ * "git commit" used to run "gc --auto" near the end, which was lost
+ when the command was reimplemented in C by mistake.
+ (merge 095c741edd ab/gc-auto-in-commit later to maint).
+
+ * Allow running a couple of tests with "sh -x".
+ (merge c20bf94abc sg/cvs-tests-with-x later to maint).
+
+ * Other minor doc, test and build updates and code cleanups.
+ (merge e2a5a028c7 bw/oidmap-autoinit later to maint).
+ (merge ec3b4b06f8 cl/t9001-cleanup later to maint).
+ (merge e1b3f3dd38 ks/submodule-doc-updates later to maint).
+ (merge fbac558a9b rs/describe-unique-abbrev later to maint).
+ (merge 8462ff43e4 tb/crlf-conv-flags later to maint).
+ (merge 7d68bb0766 rb/hashmap-h-compilation-fix later to maint).
+ (merge 3449847168 cc/sha1-file-name later to maint).
+ (merge ad622a256f ds/use-get-be64 later to maint).
+ (merge f919ffebed sg/cocci-move-array later to maint).
+ (merge 4e801463c7 jc/mailinfo-cleanup-fix later to maint).
+ (merge ef5b3a6c5e nd/shared-index-fix later to maint).
+ (merge 9f5258cbb8 tz/doc-show-defaults-to-head later to maint).
+ (merge b780e4407d jc/worktree-add-short-help later to maint).
+ (merge ae239fc8e5 rs/cocci-strbuf-addf-to-addstr later to maint).
+ (merge 2e22a85e5c nd/ignore-glob-doc-update later to maint).
+ (merge 3738031581 jk/gettext-poison later to maint).
+ (merge 54360a1956 rj/sparse-updates later to maint).
+ (merge 12e31a6b12 sg/doc-test-must-fail-args later to maint).
+ (merge 760f1ad101 bc/doc-interpret-trailers-grammofix later to maint).
+ (merge 4ccf461f56 bp/fsmonitor later to maint).
+ (merge a6119f82b1 jk/test-hashmap-updates later to maint).
+ (merge 5aea9fe6cc rd/typofix later to maint).
+ (merge e4e5da2796 sb/status-doc-fix later to maint).
+ (merge 7976e901c8 gs/test-unset-xdg-cache-home later to maint).
+ (merge d023df1ee6 tg/worktree-create-tracking later to maint).
+ (merge 4cbe92fd41 sm/mv-dry-run-update later to maint).
+ (merge 75e5e9c3f7 sb/color-h-cleanup later to maint).
+ (merge 2708ef4af6 sg/t6300-modernize later to maint).
+ (merge d88e92d4e0 bw/doc-submodule-recurse-config-with-clone later to maint).
+ (merge f74bbc8dd2 jk/cached-commit-buffer later to maint).
+ (merge 1316416903 ms/non-ascii-ticks later to maint).
+ (merge 878056005e rs/strbuf-read-file-or-whine later to maint).
+ (merge 79f0ba1547 jk/strbuf-read-file-close-error later to maint).
+ (merge edfb8ba068 ot/ref-filter-cleanup later to maint).
+ (merge 11395a3b4b jc/test-must-be-empty later to maint).
+ (merge 768b9d6db7 mk/doc-pretty-fill later to maint).
+ (merge 2caa7b8d27 ab/man-sec-list later to maint).
fetch.prune::
If true, fetch will automatically behave as if the `--prune`
- option was given on the command line. See also `remote.<name>.prune`.
+ option was given on the command line. See also `remote.<name>.prune`
+ and the PRUNING section of linkgit:git-fetch[1].
+
+fetch.pruneTags::
+ If true, fetch will automatically behave as if the
+ `refs/tags/*:refs/tags/*` refspec was provided when pruning,
+ if not set already. This allows for setting both this option
+ and `fetch.prune` to maintain a 1=1 mapping to upstream
+ refs. See also `remote.<name>.pruneTags` and the PRUNING
+ section of linkgit:git-fetch[1].
fetch.output::
Control how ref update status is printed. Valid values are
remote (as if the `--prune` option was given on the command line).
Overrides `fetch.prune` settings, if any.
+remote.<name>.pruneTags::
+ When set to true, fetching from this remote by default will also
+ remove any local tags that no longer exist on the remote if pruning
+ is activated in general via `remote.<name>.prune`, `fetch.prune` or
+ `--prune`. Overrides `fetch.pruneTags` settings, if any.
++
+See also `remote.<name>.prune` and the PRUNING section of
+linkgit:git-fetch[1].
+
remotes.<group>::
The list of remotes which are fetched by "git remote update
<group>". See linkgit:git-remote[1].
submodule.recurse::
Specifies if commands recurse into submodules by default. This
- applies to all commands that have a `--recurse-submodules` option.
+ applies to all commands that have a `--recurse-submodules` option,
+ except `clone`.
Defaults to false.
submodule.fetchJobs::
was run. I.e., `upload-pack` will feed input intended for
`pack-objects` to the hook, and expects a completed packfile on
stdout.
+
+uploadpack.allowFilter::
+ If this option is set, `upload-pack` will advertise partial
+ clone and partial fetch object filtering.
+
Note that this configuration variable is ignored if it is seen in the
repository-level config (this is a safety measure against fetching from
These parameters can also be set individually with `--stat-width=<width>`,
`--stat-name-width=<name-width>` and `--stat-count=<count>`.
+--compact-summary::
+ Output a condensed summary of extended header information such
+ as file creations or deletions ("new" or "gone", optionally "+l"
+ if it's a symlink) and mode changes ("+x" or "-x" for adding
+ or removing executable bit respectively) in diffstat. The
+ information is put betwen the filename part and the graph
+ part. Implies `--stat`.
+
--numstat::
Similar to `--stat`, but shows number of added and
deleted lines in decimal notation and pathname without
See the 'pickaxe' entry in linkgit:gitdiffcore[7] for more
information.
+--find-object=<object-id>::
+ Look for differences that change the number of occurrences of
+ the specified object. Similar to `-S`, just the argument is different
+ in that it doesn't search for a specific string but for a specific
+ object id.
++
+The object can be a blob or a submodule commit. It implies the `-t` option in
+`git-log` to also find trees.
+
--pickaxe-all::
When `-S` or `-G` finds a change, show all the changes in that
changeset, not just the files that contain the change
--pickaxe-regex::
Treat the <string> given to `-S` as an extended POSIX regular
expression to match.
+
endif::git-format-patch[]
-O<orderfile>::
are fetched due to an explicit refspec (either on the command
line or in the remote configuration, for example if the remote
was cloned with the --mirror option), then they are also
- subject to pruning.
+ subject to pruning. Supplying `--prune-tags` is a shorthand for
+ providing the tag refspec.
++
+See the PRUNING section below for more details.
+
+-P::
+--prune-tags::
+ Before fetching, remove any local tags that no longer exist on
+ the remote if `--prune` is enabled. This option should be used
+ more carefully, unlike `--prune` it will remove any local
+ references (local tags) that have been created. This option is
+ a shorthand for providing the explicit tag refspec along with
+ `--prune`, see the discussion about that in its documentation.
++
+See the PRUNING section below for more details.
+
endif::git-pull[]
ifndef::git-pull[]
[--exclude=<path>] [--include=<path>] [--reject] [-q | --quiet]
[--[no-]scissors] [-S[<keyid>]] [--patch-format=<format>]
[(<mbox> | <Maildir>)...]
-'git am' (--continue | --skip | --abort)
+'git am' (--continue | --skip | --abort | --quit | --show-current-patch)
DESCRIPTION
-----------
--abort::
Restore the original branch and abort the patching operation.
+--quit::
+ Abort the patching operation but keep HEAD and the index
+ untouched.
+
+--show-current-patch::
+ Show the patch being applied when "git am" is stopped because
+ of conflicts.
+
DISCUSSION
----------
using `--file`, `--global`, etc) and `on` when searching all
config files.
+CONFIGURATION
+-------------
+`pager.config` is only respected when listing configuration, i.e., when
+using `--list` or any of the `--get-*` which may return multiple results.
+The default is to use a pager.
+
[[FILES]]
FILES
-----
[--inetd |
[--listen=<host_or_ipaddr>] [--port=<n>]
[--user=<user> [--group=<group>]]]
+ [--log-destination=(stderr|syslog|none)]
[<directory>...]
DESCRIPTION
do not have the 'git-daemon-export-ok' file.
--inetd::
- Have the server run as an inetd service. Implies --syslog.
+ Have the server run as an inetd service. Implies --syslog (may be
+ overridden with `--log-destination=`).
Incompatible with --detach, --port, --listen, --user and --group
options.
zero for no limit.
--syslog::
- Log to syslog instead of stderr. Note that this option does not imply
- --verbose, thus by default only error conditions will be logged.
+ Short for `--log-destination=syslog`.
+
+--log-destination=<destination>::
+ Send log messages to the specified destination.
+ Note that this option does not imply --verbose,
+ thus by default only error conditions will be logged.
+ The <destination> must be one of:
++
+--
+stderr::
+ Write to standard error.
+ Note that if `--detach` is specified,
+ the process disconnects from the real standard error,
+ making this destination effectively equivalent to `none`.
+syslog::
+ Write to syslog, using the `git-daemon` identifier.
+none::
+ Disable all logging.
+--
++
+The default destination is `syslog` if `--inetd` or `--detach` is specified,
+otherwise `stderr`.
--user-path::
--user-path=<path>::
overridden by giving the `--refmap=<refspec>` parameter(s) on the
command line.
+PRUNING
+-------
+
+Git has a default disposition of keeping data unless it's explicitly
+thrown away; this extends to holding onto local references to branches
+on remotes that have themselves deleted those branches.
+
+If left to accumulate, these stale references might make performance
+worse on big and busy repos that have a lot of branch churn, and
+e.g. make the output of commands like `git branch -a --contains
+<commit>` needlessly verbose, as well as impacting anything else
+that'll work with the complete set of known references.
+
+These remote-tracking references can be deleted as a one-off with
+either of:
+
+------------------------------------------------
+# While fetching
+$ git fetch --prune <name>
+
+# Only prune, don't fetch
+$ git remote prune <name>
+------------------------------------------------
+
+To prune references as part of your normal workflow without needing to
+remember to run that, set `fetch.prune` globally, or
+`remote.<name>.prune` per-remote in the config. See
+linkgit:git-config[1].
+
+Here's where things get tricky and more specific. The pruning feature
+doesn't actually care about branches, instead it'll prune local <->
+remote-references as a function of the refspec of the remote (see
+`<refspec>` and <<CRTB,CONFIGURED REMOTE-TRACKING BRANCHES>> above).
+
+Therefore if the refspec for the remote includes
+e.g. `refs/tags/*:refs/tags/*`, or you manually run e.g. `git fetch
+--prune <name> "refs/tags/*:refs/tags/*"` it won't be stale remote
+tracking branches that are deleted, but any local tag that doesn't
+exist on the remote.
+
+This might not be what you expect, i.e. you want to prune remote
+`<name>`, but also explicitly fetch tags from it, so when you fetch
+from it you delete all your local tags, most of which may not have
+come from the `<name>` remote in the first place.
+
+So be careful when using this with a refspec like
+`refs/tags/*:refs/tags/*`, or any other refspec which might map
+references from multiple remotes to the same local namespace.
+
+Since keeping up-to-date with both branches and tags on the remote is
+a common use-case the `--prune-tags` option can be supplied along with
+`--prune` to prune local tags that don't exist on the remote, and
+force-update those tags that differ. Tag pruning can also be enabled
+with `fetch.pruneTags` or `remote.<name>.pruneTags` in the config. See
+linkgit:git-config[1].
+
+The `--prune-tags` option is equivalent to having
+`refs/tags/*:refs/tags/*` declared in the refspecs of the remote. This
+can lead to some seemingly strange interactions:
+
+------------------------------------------------
+# These both fetch tags
+$ git fetch --no-tags origin 'refs/tags/*:refs/tags/*'
+$ git fetch --no-tags --prune-tags origin
+------------------------------------------------
+
+The reason it doesn't error out when provided without `--prune` or its
+config versions is for flexibility of the configured versions, and to
+maintain a 1=1 mapping between what the command line flags do, and
+what the configuration versions do.
+
+It's reasonable to e.g. configure `fetch.pruneTags=true` in
+`~/.gitconfig` to have tags pruned whenever `git fetch --prune` is
+run, without making every invocation of `git fetch` without `--prune`
+an error.
+
+Pruning tags with `--prune-tags` also works when fetching a URL
+instead of a named remote. These will all prune tags not found on
+origin:
+
+------------------------------------------------
+$ git fetch origin --prune --prune-tags
+$ git fetch origin --prune 'refs/tags/*:refs/tags/*'
+$ git fetch <url of origin> --prune --prune-tags
+$ git fetch <url of origin> --prune 'refs/tags/*:refs/tags/*'
+------------------------------------------------
+
OUTPUT
------
--check-self-contained-and-connected::
Die if the pack contains broken links. For internal use only.
+--fsck-objects::
+ Die if the pack contains broken objects. For internal use only.
+
--threads=<n>::
Specifies the number of threads to spawn when resolving
deltas. This requires that index-pack be compiled with
will be added before the new trailer.
Existing trailers are extracted from the input message by looking for
-a group of one or more lines that (i) are all trailers, or (ii) contains at
+a group of one or more lines that (i) is all trailers, or (ii) contains at
least one Git-generated or user-configured trailer and consists of at
least 25% trailers.
The group must be preceded by one or more empty (or whitespace-only) lines.
The form '--missing=allow-any' will allow object traversal to continue
if a missing object is encountered. Missing objects will silently be
omitted from the results.
++
+The form '--missing=allow-promisor' is like 'allow-any', but will only
+allow object traversal to continue for EXPECTED promisor missing objects.
+Unexpected missing object will raise an error.
+
+--exclude-promisor-objects::
+ Omit objects that are known to be in the promisor remote. (This
+ option has the purpose of operating only on locally created objects,
+ so that when we repack, we still maintain a distinction between
+ locally created objects [without .promisor] and objects from the
+ promisor remote [with .promisor].) This is used with partial clone.
SEE ALSO
--------
[<upstream> [<branch>]]
'git rebase' [-i | --interactive] [options] [--exec <cmd>] [--onto <newbase>]
--root [<branch>]
-'git rebase' --continue | --skip | --abort | --quit | --edit-todo
+'git rebase' --continue | --skip | --abort | --quit | --edit-todo | --show-current-patch
DESCRIPTION
-----------
Keep the commits that do not change anything from its
parents in the result.
+--allow-empty-message::
+ By default, rebasing commits with an empty message will fail.
+ This option overrides that behavior, allowing commits with empty
+ messages to be rebased.
+
--skip::
Restart the rebasing process by skipping the current patch.
--edit-todo::
Edit the todo list during an interactive rebase.
+--show-current-patch::
+ Show the current patch in an interactive rebase or when rebase
+ is stopped because of conflicts. This is the equivalent of
+ `git show REBASE_HEAD`.
+
-m::
--merge::
Use merging strategies to rebase. When the recursive (default) merge
'prune'::
-Deletes all stale remote-tracking branches under <name>.
-These stale branches have already been removed from the remote repository
-referenced by <name>, but are still locally available in
-"remotes/<name>".
+Deletes stale references associated with <name>. By default, stale
+remote-tracking branches under <name> are deleted, but depending on
+global configuration and the configuration of the remote we might even
+prune local tags that haven't been pushed there. Equivalent to `git
+fetch --prune <name>`, except that no new references will be fetched.
++
+See the PRUNING section of linkgit:git-fetch[1] for what it'll prune
+depending on various configuration.
+
With `--dry-run` option, report what branches will be pruned, but do not
actually prune them.
configuration parameter remote.<name>.skipDefaultUpdate set to true will
be updated. (See linkgit:git-config[1]).
+
-With `--prune` option, prune all the remotes that are updated.
+With `--prune` option, run pruning against all the remotes that are updated.
DISCUSSION
the value of GIT_AUTHOR_IDENT, or GIT_COMMITTER_IDENT if that is not
set, as returned by "git var -l".
+--reply-to=<address>::
+ Specify the address where replies from recipients should go to.
+ Use this if replies to messages should go to another address than what
+ is specified with the --from parameter.
+
--in-reply-to=<identifier>::
Make the first mail (or all the mails with `--no-thread`) appear as a
reply to the given Message-Id, which avoids breaking threads to
SYNOPSIS
--------
[verse]
-'git show' [options] <object>...
+'git show' [options] [<object>...]
DESCRIPTION
-----------
OPTIONS
-------
<object>...::
- The names of objects to show.
+ The names of objects to show (defaults to 'HEAD').
For a more complete list of ways to spell object names, see
"SPECIFYING REVISIONS" section in linkgit:gitrevisions[7].
without options are equivalent to 'always' and 'never'
respectively.
+--ahead-behind::
+--no-ahead-behind::
+ Display or do not display detailed ahead/behind counts for the
+ branch relative to its upstream branch. Defaults to true.
+
<pathspec>...::
See the 'pathspec' entry in linkgit:gitglossary[7].
X Y Meaning
-------------------------------------------------
- [MD] not updated
+ [AMD] not updated
M [ MD] updated in index
A [ MD] added to index
- D [ M] deleted from index
+ D deleted from index
R [ MD] renamed in index
C [ MD] copied in index
[MARC] index and work tree matches
Show the status of the submodules. This will print the SHA-1 of the
currently checked out commit for each submodule, along with the
submodule path and the output of 'git describe' for the
- SHA-1. Each SHA-1 will be prefixed with `-` if the submodule is not
- initialized, `+` if the currently checked out submodule commit
+ SHA-1. Each SHA-1 will possibly be prefixed with `-` if the submodule is
+ not initialized, `+` if the currently checked out submodule commit
does not match the SHA-1 found in the index of the containing
repository and `U` if the submodule has merge conflicts.
+
the submodules. The "updating" can be done in several ways depending
on command line options and the value of `submodule.<name>.update`
configuration variable. The command line option takes precedence over
-the configuration variable. if neither is given, a checkout is performed.
-update procedures supported both from the command line as well as setting
-`submodule.<name>.update`:
+the configuration variable. If neither is given, a 'checkout' is performed.
+The 'update' procedures supported both from the command line as well as
+through the `submodule.<name>.update` configuration are:
checkout;; the commit recorded in the superproject will be
checked out in the submodule on a detached HEAD.
+
If `--force` is specified, the submodule will be checked out (using
-`git checkout --force` if appropriate), even if the commit specified
+`git checkout --force`), even if the commit specified
in the index of the containing repository already matches the commit
checked out in the submodule.
merge;; the commit recorded in the superproject will be merged
into the current branch in the submodule.
-The following procedures are only available via the `submodule.<name>.update`
-configuration variable:
+The following 'update' procedures are only available via the
+`submodule.<name>.update` configuration variable:
custom command;; arbitrary shell command that takes a single
argument (the sha1 of the commit recorded in the
SYNOPSIS
--------
[verse]
-'git tag' [-a | -s | -u <keyid>] [-f] [-m <msg> | -F <file>]
+'git tag' [-a | -s | -u <keyid>] [-f] [-m <msg> | -F <file>] [-e]
<tagname> [<commit> | <object>]
'git tag' -d <tagname>...
'git tag' [-n[<num>]] -l [--contains <commit>] [--no-contains <commit>]
Implies `-a` if none of `-a`, `-s`, or `-u <keyid>`
is given.
+-e::
+--edit::
+ The message taken from file with `-F` and command line with
+ `-m` are usually used as the tag message unmodified.
+ This option lets you further edit the message taken from these sources.
+
--cleanup=<mode>::
This option sets how the tag message is cleaned up.
The '<mode>' can be one of 'verbatim', 'whitespace' and 'strip'. The
are used, the untracked cache is immediately added to or removed from
the index.
+Before 2.17, the untracked cache had a bug where replacing a directory
+with a symlink to another directory could cause it to incorrectly show
+files tracked by git as untracked. See the "status: add a failing test
+showing a core.untrackedCache bug" commit to git.git. A workaround for
+that is (and this might work for other undiscovered bugs in the
+future):
+
+----------------
+$ git -c core.untrackedCache=false status
+----------------
+
+This bug has also been shown to affect non-symlink cases of replacing
+a directory with a file when it comes to the internal structures of
+the untracked cache, but no case has been reported where this resulted in
+wrong "git status" output.
+
+There are also cases where existing indexes written by git versions
+before 2.17 will reference directories that don't exist anymore,
+potentially causing many "could not open directory" warnings to be
+printed on "git status". These are new warnings for existing issues
+that were previously silently discarded.
+
+As with the bug described above the solution is to one-off do a "git
+status" run with `core.untrackedCache=false` to flush out the leftover
+bad data.
+
File System Monitor
-------------------
linkgit:git-config[1]) than using the `--fsmonitor` option to
`git update-index` in each repository, especially if you want to do so
across all repositories you use, because you can set the configuration
-variable to `true` (or `false`) in your `$HOME/.gitconfig` just once
-and have it affect all repositories you touch.
+variable in your `$HOME/.gitconfig` just once and have it affect all
+repositories you touch.
When the `core.fsmonitor` configuration variable is changed, the
file system monitor is added to or removed from the index the next time
'git worktree add' [-f] [--detach] [--checkout] [--lock] [-b <new-branch>] <path> [<commit-ish>]
'git worktree list' [--porcelain]
'git worktree lock' [--reason <string>] <worktree>
+'git worktree move' <worktree> <new-path>
'git worktree prune' [-n] [-v] [--expire <expire>]
+'git worktree remove' [--force] <worktree>
'git worktree unlock' <worktree>
DESCRIPTION
`git worktree prune` in the main or any linked working tree to
clean up any stale administrative files.
-If you move a linked working tree, you need to manually update the
-administrative files so that they do not get pruned automatically. See
-section "DETAILS" for more information.
-
If a linked working tree is stored on a portable device or network share
which is not always mounted, you can prevent its administrative files from
being pruned by issuing the `git worktree lock` command, optionally
directory specific files such as HEAD, index, etc. `-` may also be
specified as `<commit-ish>`; it is synonymous with `@{-1}`.
+
-If <commit-ish> is a branch name (call it `<branch>` and is not found,
+If <commit-ish> is a branch name (call it `<branch>`) and is not found,
and neither `-b` nor `-B` nor `--detach` are used, but there does
exist a tracking branch in exactly one remote (call it `<remote>`)
-with a matching name, treat as equivalent to
+with a matching name, treat as equivalent to:
++
------------
$ git worktree add --track -b <branch> <path> <remote>/<branch>
------------
being moved or deleted. Optionally, specify a reason for the lock
with `--reason`.
+move::
+
+Move a working tree to a new location. Note that the main working tree
+or linked working trees containing submodules cannot be moved.
+
prune::
Prune working tree information in $GIT_DIR/worktrees.
+remove::
+
+Remove a working tree. Only clean working trees (no untracked files
+and no modification in tracked files) can be removed. Unclean working
+trees or ones with submodules can be removed with `--force`. The main
+working tree cannot be removed.
+
unlock::
Unlock a working tree, allowing it to be pruned, moved or deleted.
-f::
--force::
- By default, `add` refuses to create a new working tree when `<commit-ish>` is a branch name and
- is already checked out by another working tree. This option overrides
- that safeguard.
+ By default, `add` refuses to create a new working tree when
+ `<commit-ish>` is a branch name and is already checked out by
+ another working tree and `remove` refuses to remove an unclean
+ working tree. This option overrides that safeguard.
-b <new-branch>::
-B <new-branch>::
$GIT_DIR or $GIT_COMMON_DIR when you need to directly access something
inside $GIT_DIR. Use `git rev-parse --git-path` to get the final path.
-If you move a linked working tree, you need to update the 'gitdir' file
+If you manually move a linked working tree, you need to update the 'gitdir' file
in the entry's directory. For example, if a linked working tree is moved
to `/newpath/test-next` and its `.git` file points to
`/path/main/.git/worktrees/test-next`, then update
for submodules is incomplete. It is NOT recommended to make multiple
checkouts of a superproject.
-git-worktree could provide more automation for tasks currently
-performed manually, such as:
-
-- `remove` to remove a linked working tree and its administrative files (and
- warn if the working tree is dirty)
-- `mv` to move or rename a working tree and update its administrative files
-
GIT
---
Part of the linkgit:git[1] suite
variable.
See `GIT_TRACE` for available trace output options.
+`GIT_TRACE_CURL_NO_DATA`::
+ When a curl trace is enabled (see `GIT_TRACE_CURL` above), do not dump
+ data (that is, only dump info lines and headers).
+
+`GIT_REDACT_COOKIES`::
+ This can be set to a comma-separated list of strings. When a curl trace
+ is enabled (see `GIT_TRACE_CURL` above), whenever a "Cookies:" header
+ sent by the client is dumped, values of cookies whose key is in that
+ list (case-sensitive) are redacted.
+
`GIT_LITERAL_PATHSPECS`::
Setting this variable to `1` will cause Git to treat all
pathspecs literally, rather than as glob patterns. For example,
development and maintenance is primarily done. You do not have to be
subscribed to the list to send a message there.
+Issues which are security relevant should be disclosed privately to
+the Git Security mailing list <git-security@googlegroups.com>.
+
SEE ALSO
--------
linkgit:gittutorial[7], linkgit:gittutorial-2[7],
If the filter command (a string value) is defined via
`filter.<driver>.process` then Git can process all blobs with a
single filter invocation for the entire life of a single Git
-command. This is achieved by using a packet format (pkt-line,
-see technical/protocol-common.txt) based protocol over standard
-input and standard output as follows. All packets, except for the
-"*CONTENT" packets and the "0000" flush packet, are considered
-text and therefore are terminated by a LF.
-
-Git starts the filter when it encounters the first file
-that needs to be cleaned or smudged. After the filter started
-Git sends a welcome message ("git-filter-client"), a list of supported
-protocol version numbers, and a flush packet. Git expects to read a welcome
-response message ("git-filter-server"), exactly one protocol version number
-from the previously sent list, and a flush packet. All further
-communication will be based on the selected version. The remaining
-protocol description below documents "version=2". Please note that
-"version=42" in the example below does not exist and is only there
-to illustrate how the protocol would look like with more than one
-version.
-
-After the version negotiation Git sends a list of all capabilities that
-it supports and a flush packet. Git expects to read a list of desired
-capabilities, which must be a subset of the supported capabilities list,
-and a flush packet as response:
-------------------------
-packet: git> git-filter-client
-packet: git> version=2
-packet: git> version=42
-packet: git> 0000
-packet: git< git-filter-server
-packet: git< version=2
-packet: git< 0000
-packet: git> capability=clean
-packet: git> capability=smudge
-packet: git> capability=not-yet-invented
-packet: git> 0000
-packet: git< capability=clean
-packet: git< capability=smudge
-packet: git< 0000
-------------------------
-Supported filter capabilities in version 2 are "clean", "smudge",
-and "delay".
+command. This is achieved by using the long-running process protocol
+(described in technical/long-running-process-protocol.txt).
+
+When Git encounters the first file that needs to be cleaned or smudged,
+it starts the filter and performs the handshake. In the handshake, the
+welcome message sent by Git is "git-filter-client", only version 2 is
+suppported, and the supported capabilities are "clean", "smudge", and
+"delay".
Afterwards Git sends a list of "key=value" pairs terminated with
a flush packet. The list will contain at least the filter command
with the next file that needs to be processed. Depending on the
`filter.<driver>.required` flag Git will interpret that as error.
-After the filter has processed a command it is expected to wait for
-a "key=value" list containing the next command. Git will close
-the command pipe on exit. The filter is expected to detect EOF
-and exit gracefully on its own. Git will wait until the filter
-process has stopped.
-
Delay
^^^^^
- `fountain` suitable for Fountain documents.
+- `golang` suitable for source code in the Go language.
+
- `html` suitable for HTML/XHTML documents.
- `java` suitable for source code in the Java language.
(relative to the toplevel of the work tree if not from a
`.gitignore` file).
- - Otherwise, Git treats the pattern as a shell glob suitable
- for consumption by fnmatch(3) with the FNM_PATHNAME flag:
- wildcards in the pattern will not match a / in the pathname.
- For example, "Documentation/{asterisk}.html" matches
- "Documentation/git.html" but not "Documentation/ppc/ppc.html"
- or "tools/perf/Documentation/perf.html".
+ - Otherwise, Git treats the pattern as a shell glob: "`*`" matches
+ anything except "`/`", "`?`" matches any one character except "`/`"
+ and "`[]`" matches one character in a selected range. See
+ fnmatch(3) and the FNM_PATHNAME flag for a more detailed
+ description.
- A leading slash matches the beginning of the pathname.
For example, "/{asterisk}.c" matches "cat-file.c" but not
Transmit <string> as a push option. As the push option
must not contain LF or NUL characters, the string is not encoded.
+'option from-promisor' {'true'|'false'}::
+ Indicate that these objects are being fetched from a promisor.
+
+'option no-dependents' {'true'|'false'}::
+ Indicate that only the objects wanted need to be fetched, not
+ their dependents.
+
SEE ALSO
--------
linkgit:git-remote[1]
consists of (i) a Git directory located under the `$GIT_DIR/modules/`
directory of its superproject, (ii) a working directory inside the
superproject's working directory, and a `.git` file at the root of
-the submodule’s working directory pointing to (i).
+the submodule's working directory pointing to (i).
Assuming the submodule has a Git directory at `$GIT_DIR/modules/foo/`
and a working directory at `path/to/bar/`, the superproject tracks the
`submodule.foo.path = path/to/bar`.
The `gitlink` entry contains the object name of the commit that the
-superproject expects the submodule’s working directory to be at.
+superproject expects the submodule's working directory to be at.
The section `submodule.foo.*` in the `.gitmodules` file gives additional
-hints to Gits porcelain layer such as where to obtain the submodule via
-the `submodule.foo.url` setting.
+hints to Git's porcelain layer. For example, the `submodule.foo.url`
+setting specifies where to obtain the submodule.
Submodules can be used for at least two different use cases:
2. Splitting a (logically single) project into multiple
repositories and tying them back together. This can be used to
- overcome current limitations of Gits implementation to have
+ overcome current limitations of Git's implementation to have
finer grained access:
- * Size of the git repository:
+ * Size of the Git repository:
In its current form Git scales up poorly for large repositories containing
content that is not compressed by delta computation between trees.
- However you can also use submodules to e.g. hold large binary assets
- and these repositories are then shallowly cloned such that you do not
+ For example, you can use submodules to hold large binary assets
+ and these repositories can be shallowly cloned such that you do not
have a large history locally.
* Transfer size:
In its current form Git requires the whole working tree present. It
does not allow partial trees to be transferred in fetch or clone.
+ If the project you work on consists of multiple repositories tied
+ together as submodules in a superproject, you can avoid fetching the
+ working trees of the repositories you are not interested in.
* Access control:
By restricting user access to submodules, this can be used to implement
read/write policies for different users.
Submodule operations can be configured using the following mechanisms
(from highest to lowest precedence):
- * The command line for those commands that support taking submodule specs.
- Most commands have a boolean flag '--recurse-submodules' whether to
- recurse into submodules. Examples are `ls-files` or `checkout`.
+ * The command line for those commands that support taking submodules
+ as part of their pathspecs. Most commands have a boolean flag
+ `--recurse-submodules` which specify whether to recurse into submodules.
+ Examples are `grep` and `checkout`.
Some commands take enums, such as `fetch` and `push`, where you can
specify how submodules are affected.
For example an effect from the submodule's `.gitignore` file
would be observed when you run `git status --ignore-submodules=none` in
the superproject. This collects information from the submodule's working
-directory by running `status` in the submodule, which does pay attention
-to its `.gitignore` file.
+directory by running `status` in the submodule while paying attention
+to the `.gitignore` file of the submodule.
+
The submodule's `$GIT_DIR/config` file would come into play when running
`git push --recurse-submodules=check` in the superproject, as this would
file.
* The configuration file `$GIT_DIR/config` in the superproject.
- Typical configuration at this place is controlling if a submodule
- is recursed into at all via the `active` flag for example.
+ Git only recurses into active submodules (see "ACTIVE SUBMODULES"
+ section below).
+
If the submodule is not yet initialized, then the configuration
-inside the submodule does not exist yet, so configuration where to
+inside the submodule does not exist yet, so where to
obtain the submodule from is configured here for example.
- * the `.gitmodules` file inside the superproject. Additionally to the
- required mapping between submodule's name and path, a project usually
+ * The `.gitmodules` file inside the superproject. A project usually
uses this file to suggest defaults for the upstream collection
- of repositories.
+ of repositories for the mapping that is required between a
+ submodule's name and its path.
+
-This file mainly serves as the mapping between name and path in
-the superproject, such that the submodule's git directory can be
+This file mainly serves as the mapping between the name and path of submodules
+in the superproject, such that the submodule's Git directory can be
located.
+
If the submodule has never been initialized, this is the only place
+
It is possible to construct these old form repositories manually.
+
-When deinitialized or deleted (see below), the submodule’s Git
+When deinitialized or deleted (see below), the submodule's Git
directory is automatically moved to `$GIT_DIR/modules/<name>/`
of the superproject.
* Deinitialized submodule: A `gitlink`, and a `.gitmodules` entry,
-but no submodule working directory. The submodule’s git directory
-may be there as after deinitializing the git directory is kept around.
+but no submodule working directory. The submodule's Git directory
+may be there as after deinitializing the Git directory is kept around.
The directory which is supposed to be the working directory is empty instead.
+
A submodule can be deinitialized by running `git submodule deinit`.
Besides emptying the working directory, this command only modifies
-the superproject’s `$GIT_DIR/config` file, so the superproject’s history
+the superproject's `$GIT_DIR/config` file, so the superproject's history
is not affected. This can be undone using `git submodule init`.
* Deleted submodule: A submodule can be deleted by running
`git rm <submodule path> && git commit`. This can be undone
using `git revert`.
+
-The deletion removes the superproject’s tracking data, which are
+The deletion removes the superproject's tracking data, which are
both the `gitlink` entry and the section in the `.gitmodules` file.
-The submodule’s working directory is removed from the file
+The submodule's working directory is removed from the file
system, but the Git directory is kept around as it to make it
possible to checkout past commits without requiring fetching
from another repository.
To completely remove a submodule, manually delete
`$GIT_DIR/modules/<name>/`.
+ACTIVE SUBMODULES
+-----------------
+
+A submodule is considered active,
+
+ (a) if `submodule.<name>.active` is set to `true`
+ or
+ (b) if the submodule's path matches the pathspec in `submodule.active`
+ or
+ (c) if `submodule.<name>.url` is set.
+
+and these are evaluated in this order.
+
+For example:
+
+ [submodule "foo"]
+ active = false
+ url = https://example.org/foo
+ [submodule "bar"]
+ active = true
+ url = https://example.org/bar
+ [submodule "baz"]
+ url = https://example.org/baz
+
+In the above config only the submodule 'bar' and 'baz' are active,
+'bar' due to (a) and 'baz' due to (c). 'foo' is inactive because
+(a) takes precedence over (c)
+
+Note that (c) is a historical artefact and will be ignored if the
+(a) and (b) specify that the submodule is not active. In other words,
+if we have an `submodule.<name>.active` set to `false` or if the
+submodule's path is excluded in the pathspec in `submodule.active`, the
+url doesn't matter whether it is present or not. This is illustrated in
+the example that follows.
+
+ [submodule "foo"]
+ active = true
+ url = https://example.org/foo
+ [submodule "bar"]
+ url = https://example.org/bar
+ [submodule "baz"]
+ url = https://example.org/baz
+ [submodule "bob"]
+ ignore = true
+ [submodule]
+ active = b*
+ active = :(exclude) baz
+
+In here all submodules except 'baz' (foo, bar, bob) are active.
+'foo' due to its own active flag and all the others due to the
+submodule active pathspec, which specifies that any submodule
+starting with 'b' except 'baz' are also active, regardless of the
+presence of the .url field.
+
Workflow for a third party library
----------------------------------
--no-ff::
Create a merge commit even when the merge resolves as a
fast-forward. This is the default behaviour when merging an
- annotated (and possibly signed) tag.
+ annotated (and possibly signed) tag that is not stored in
+ its natural place in 'refs/tags/' hierarchy.
--ff-only::
Refuse to merge and exit with a non-zero status unless the
- '%>>(<N>)', '%>>|(<N>)': similar to '%>(<N>)', '%>|(<N>)'
respectively, except that if the next placeholder takes more spaces
than given and there are spaces on its left, use those spaces
-- '%><(<N>)', '%><|(<N>)': similar to '% <(<N>)', '%<|(<N>)'
+- '%><(<N>)', '%><|(<N>)': similar to '%<(<N>)', '%<|(<N>)'
respectively, but padding both sides (i.e. the text is centered)
- %(trailers[:options]): display the trailers of the body as interpreted
by linkgit:git-interpret-trailers[1]. The `trailers` string may be
if a missing object is encountered. Missing objects will silently be
omitted from the results.
+
+The form '--missing=allow-promisor' is like 'allow-any', but will only
+allow object traversal to continue for EXPECTED promisor missing objects.
+Unexpected missing objects will raise an error.
++
The form '--missing=print' is like 'allow-any', but will also print a
list of the missing objects. Object IDs are prefixed with a ``?'' character.
endif::git-rev-list[]
+--exclude-promisor-objects::
+ (For internal use only.) Prefilter object traversal at
+ promisor boundary. This is used with partial clone. This is
+ stronger than `--missing=allow-promisor` because it limits the
+ traversal, rather than just silencing errors about missing
+ objects.
+
--no-walk[=(sorted|unsorted)]::
Only show the given commits, but do not traverse their ancestors.
This has no effect if a range is specified. If the argument
* read_object_with_reference()
* has_sha1_file()
* write_sha1_file()
-* pretend_sha1_file()
+* pretend_object_file()
* lookup_{object,commit,tag,blob,tree}
* parse_{object,commit,tag,blob,tree}
* Use of object flags
The submodule config cache API allows to read submodule
configurations/information from specified revisions. Internally
information is lazily read into a cache that is used to avoid
-unnecessary parsing of the same .gitmodule files. Lookups can be done by
+unnecessary parsing of the same .gitmodules files. Lookups can be done by
submodule path or name.
Usage
S: Cache-Control: no-cache
S:
S: 001e# service=git-upload-pack\n
+ S: 0000
S: 004895dcfa3633004da0049d3d0fa03f80589cbcaf31 refs/heads/maint\0multi_ack\n
S: 0042d049f6c27a2244e12041955e262a404c7faba355 refs/heads/master\n
S: 003c2cb58b79488a98d2721cea644875a8dd0026b115 refs/tags/v1.0\n
S: 003fa3c2e2402b99163d1d59756e5f207ae21cccba4c refs/tags/v1.0^{}\n
+ S: 0000
The client may send Extra Parameters (see
Documentation/technical/pack-protocol.txt) as a colon-separated string
Extra Parameter.
smart_reply = PKT-LINE("# service=$servicename" LF)
+ "0000"
*1("version 1")
ref_list
"0000"
--- /dev/null
+Long-running process protocol
+=============================
+
+This protocol is used when Git needs to communicate with an external
+process throughout the entire life of a single Git command. All
+communication is in pkt-line format (see technical/protocol-common.txt)
+over standard input and standard output.
+
+Handshake
+---------
+
+Git starts by sending a welcome message (for example,
+"git-filter-client"), a list of supported protocol version numbers, and
+a flush packet. Git expects to read the welcome message with "server"
+instead of "client" (for example, "git-filter-server"), exactly one
+protocol version number from the previously sent list, and a flush
+packet. All further communication will be based on the selected version.
+The remaining protocol description below documents "version=2". Please
+note that "version=42" in the example below does not exist and is only
+there to illustrate how the protocol would look like with more than one
+version.
+
+After the version negotiation Git sends a list of all capabilities that
+it supports and a flush packet. Git expects to read a list of desired
+capabilities, which must be a subset of the supported capabilities list,
+and a flush packet as response:
+------------------------
+packet: git> git-filter-client
+packet: git> version=2
+packet: git> version=42
+packet: git> 0000
+packet: git< git-filter-server
+packet: git< version=2
+packet: git< 0000
+packet: git> capability=clean
+packet: git> capability=smudge
+packet: git> capability=not-yet-invented
+packet: git> 0000
+packet: git< capability=clean
+packet: git< capability=smudge
+packet: git< 0000
+------------------------
+
+Shutdown
+--------
+
+Git will close
+the command pipe on exit. The filter is expected to detect EOF
+and exit gracefully on its own. Git will wait until the filter
+process has stopped.
upload-request = want-list
*shallow-line
*1depth-request
+ [filter-request]
flush-pkt
want-list = first-want
additional-want = PKT-LINE("want" SP obj-id)
depth = 1*DIGIT
+
+ filter-request = PKT-LINE("filter" SP filter-spec)
----
Clients MUST send all the obj-ids it wants from the reference
result are defined as shallow and marked as such in the server. This
information is sent back to the client in the next step.
+The client can optionally request that pack-objects omit various
+objects from the packfile using one of several filtering techniques.
+These are intended for use with partial clone and partial fetch
+operations. See `rev-list` for possible "filter-spec" values.
+
Once all the 'want's and 'shallow's (and optional 'deepen') are
transferred, clients MUST send a flush-pkt, to tell the server side
that it is done sending the list.
included in the push certificate. A send-pack client MUST NOT
send a push-cert packet unless the receive-pack server advertises
this capability.
+
+filter
+------
+
+If the upload-pack server advertises the 'filter' capability,
+fetch-pack may send "filter" commands to request a partial clone
+or partial fetch and request that the server omit various objects
+from the packfile.
When the config key `extensions.preciousObjects` is set to `true`,
objects in the repository MUST NOT be deleted (e.g., by `git-prune` or
`git repack -d`).
+
+`partialclone`
+~~~~~~~~~~~~~~
+
+When the config key `extensions.partialclone` is set, it indicates
+that the repo was created with a partial clone (or later performed
+a partial fetch) and that the remote may have omitted sending
+certain unwanted objects. Such a remote is called a "promisor remote"
+and it promises that all such omitted objects can be fetched from it
+in the future.
+
+The value of this key is the name of the promisor remote.
#!/bin/sh
GVF=GIT-VERSION-FILE
-DEF_VER=v2.16.2
+DEF_VER=v2.17.0-rc0
LF='
'
GIT_EXEC_PATH=`pwd`
PATH=`pwd`:$PATH
- GITPERLLIB=`pwd`/perl/blib/lib
+ GITPERLLIB=`pwd`/perl/build/lib
export GIT_EXEC_PATH PATH GITPERLLIB
+ - By default (unless NO_PERL is provided) Git will ship various perl
+ scripts. However, for simplicity it doesn't use the
+ ExtUtils::MakeMaker toolchain to decide where to place the perl
+ libraries. Depending on the system this can result in the perl
+ libraries not being where you'd like them if they're expected to be
+ used by things other than Git itself.
+
+ Manually supplying a perllibdir prefix should fix this, if this is
+ a problem you care about, e.g.:
+
+ prefix=/usr perllibdir=/usr/$(/usr/bin/perl -MConfig -wle 'print substr $Config{installsitelib}, 1 + length $Config{siteprefixexp}')
+
+ Will result in e.g. perllibdir=/usr/share/perl/5.26.1 on Debian,
+ perllibdir=/usr/share/perl5 (which we'd use by default) on CentOS.
+
+ - Unless NO_PERL is provided Git will ship various perl libraries it
+ needs. Distributors of Git will usually want to set
+ NO_PERL_CPAN_FALLBACKS if NO_PERL is not provided to use their own
+ copies of the CPAN modules Git needs.
+
- Git is reasonably self-sufficient, but does depend on a few external
programs and libraries. Git can be used without most of them by adding
the approriate "NO_<LIBRARY>=YesPlease" to the make command line or
#
# Define PERL_PATH to the path of your Perl binary (usually /usr/bin/perl).
#
-# Define NO_PERL_MAKEMAKER if you cannot use Makefiles generated by perl's
-# MakeMaker (e.g. using ActiveState under Cygwin).
-#
# Define NO_PERL if you do not want Perl scripts or libraries at all.
#
+# Define NO_PERL_CPAN_FALLBACKS if you do not want to install bundled
+# copies of CPAN modules that serve as a fallback in case the modules
+# are not available on the system. This option is intended for
+# distributions that want to use their packaged versions of Perl
+# modules, instead of the fallbacks shipped with Git.
+#
# Define PYTHON_PATH to the path of your Python binary (often /usr/bin/python
# but /usr/bin/python2.7 on some platforms).
#
mergetoolsdir = $(gitexecdir)/mergetools
sharedir = $(prefix)/share
gitwebdir = $(sharedir)/gitweb
+perllibdir = $(sharedir)/perl5
localedir = $(sharedir)/locale
template_dir = share/git-core/templates
htmldir = $(prefix)/share/doc/git-doc
infodir_relative = $(patsubst $(prefix)/%,%,$(infodir))
htmldir_relative = $(patsubst $(prefix)/%,%,$(htmldir))
-export prefix bindir sharedir sysconfdir gitwebdir localedir
+export prefix bindir sharedir sysconfdir gitwebdir perllibdir localedir
CC = cc
AR = ar
LIB_OBJS += ewah/ewah_io.o
LIB_OBJS += ewah/ewah_rlw.o
LIB_OBJS += exec_cmd.o
+LIB_OBJS += fetch-object.o
LIB_OBJS += fetch-pack.o
LIB_OBJS += fsck.o
LIB_OBJS += fsmonitor.o
LIB_OBJS += merge-blobs.o
LIB_OBJS += merge-recursive.o
LIB_OBJS += mergesort.o
-LIB_OBJS += mru.o
LIB_OBJS += name-hash.o
LIB_OBJS += notes.o
LIB_OBJS += notes-cache.o
LIB_OBJS += sha1dc_git.o
ifdef DC_SHA1_EXTERNAL
ifdef DC_SHA1_SUBMODULE
+ ifneq ($(DC_SHA1_SUBMODULE),auto)
$(error Only set DC_SHA1_EXTERNAL or DC_SHA1_SUBMODULE, not both)
+ endif
endif
BASIC_CFLAGS += -DDC_SHA1_EXTERNAL
EXTLIBS += -lsha1detectcoll
LIB_OBJS += compat/sha1-chunked.o
BASIC_CFLAGS += -DSHA1_MAX_BLOCK_SIZE="$(SHA1_MAX_BLOCK_SIZE)"
endif
-ifdef NO_PERL_MAKEMAKER
- export NO_PERL_MAKEMAKER
-endif
ifdef NO_HSTRERROR
COMPAT_CFLAGS += -DNO_HSTRERROR
COMPAT_OBJS += compat/hstrerror.o
DESTDIR_SQ = $(subst ','\'',$(DESTDIR))
bindir_SQ = $(subst ','\'',$(bindir))
bindir_relative_SQ = $(subst ','\'',$(bindir_relative))
+mandir_SQ = $(subst ','\'',$(mandir))
mandir_relative_SQ = $(subst ','\'',$(mandir_relative))
infodir_relative_SQ = $(subst ','\'',$(infodir_relative))
+perllibdir_SQ = $(subst ','\'',$(perllibdir))
localedir_SQ = $(subst ','\'',$(localedir))
gitexecdir_SQ = $(subst ','\'',$(gitexecdir))
template_dir_SQ = $(subst ','\'',$(template_dir))
ifndef NO_TCLTK
$(QUIET_SUBDIR0)git-gui $(QUIET_SUBDIR1) gitexecdir='$(gitexec_instdir_SQ)' all
$(QUIET_SUBDIR0)gitk-git $(QUIET_SUBDIR1) all
-endif
-ifndef NO_PERL
- $(QUIET_SUBDIR0)perl $(QUIET_SUBDIR1) PERL_PATH='$(PERL_PATH_SQ)' prefix='$(prefix_SQ)' localedir='$(localedir_SQ)' all
endif
$(QUIET_SUBDIR0)templates $(QUIET_SUBDIR1) SHELL_PATH='$(SHELL_PATH_SQ)' PERL_PATH='$(PERL_PATH_SQ)'
SCRIPT_DEFINES = $(SHELL_PATH_SQ):$(DIFF_SQ):$(GIT_VERSION):\
$(localedir_SQ):$(NO_CURL):$(USE_GETTEXT_SCHEME):$(SANE_TOOL_PATH_SQ):\
- $(gitwebdir_SQ):$(PERL_PATH_SQ):$(SANE_TEXT_GREP):$(PAGER_ENV)
+ $(gitwebdir_SQ):$(PERL_PATH_SQ):$(SANE_TEXT_GREP):$(PAGER_ENV):\
+ $(perllibdir_SQ)
define cmd_munge_script
$(RM) $@ $@+ && \
sed -e '1s|#!.*/sh|#!$(SHELL_PATH_SQ)|' \
$(SCRIPT_PERL_GEN): GIT-BUILD-OPTIONS
ifndef NO_PERL
-$(SCRIPT_PERL_GEN): perl/perl.mak
+$(SCRIPT_PERL_GEN):
-perl/perl.mak: perl/PM.stamp
-
-perl/PM.stamp: FORCE
- @$(FIND) perl -type f -name '*.pm' | sort >$@+ && \
- $(PERL_PATH) -V >>$@+ && \
- { cmp $@+ $@ >/dev/null 2>/dev/null || mv $@+ $@; } && \
- $(RM) $@+
-
-perl/perl.mak: GIT-CFLAGS GIT-PREFIX perl/Makefile perl/Makefile.PL
- $(QUIET_SUBDIR0)perl $(QUIET_SUBDIR1) PERL_PATH='$(PERL_PATH_SQ)' prefix='$(prefix_SQ)' $(@F)
-
-PERL_DEFINES = $(PERL_PATH_SQ):$(PERLLIB_EXTRA_SQ)
-$(SCRIPT_PERL_GEN): % : %.perl perl/perl.mak GIT-PERL-DEFINES GIT-VERSION-FILE
+PERL_DEFINES = $(PERL_PATH_SQ):$(PERLLIB_EXTRA_SQ):$(perllibdir_SQ)
+$(SCRIPT_PERL_GEN): % : %.perl GIT-PERL-DEFINES GIT-VERSION-FILE
$(QUIET_GEN)$(RM) $@ $@+ && \
- INSTLIBDIR=`MAKEFLAGS= $(MAKE) -C perl -s --no-print-directory instlibdir` && \
+ INSTLIBDIR='$(perllibdir_SQ)' && \
INSTLIBDIR_EXTRA='$(PERLLIB_EXTRA_SQ)' && \
INSTLIBDIR="$$INSTLIBDIR$${INSTLIBDIR_EXTRA:+:$$INSTLIBDIR_EXTRA}" && \
sed -e '1{' \
http-push.sp http.sp http-walker.sp remote-curl.sp imap-send.sp: SPARSE_FLAGS += \
-DCURL_DISABLE_TYPECHECK
+pack-revindex.sp: SPARSE_FLAGS += -Wno-memcpy-max-count
+
ifdef NO_EXPAT
http-walker.sp http-walker.s http-walker.o: EXTRA_CPPFLAGS = -DNO_EXPAT
endif
export DEFAULT_EDITOR DEFAULT_PAGER
-.PHONY: doc man html info pdf
-doc:
+.PHONY: doc man man-perl html info pdf
+doc: man-perl
$(MAKE) -C Documentation all
-man:
+man: man-perl
$(MAKE) -C Documentation man
+man-perl: perl/build/man/man3/Git.3pm
+
html:
$(MAKE) -C Documentation html
po/build/locale/%/LC_MESSAGES/git.mo: po/%.po
$(QUIET_MSGFMT)mkdir -p $(dir $@) && $(MSGFMT) -o $@ $<
+LIB_PERL := $(wildcard perl/Git.pm perl/Git/*.pm perl/Git/*/*.pm perl/Git/*/*/*.pm)
+LIB_PERL_GEN := $(patsubst perl/%.pm,perl/build/lib/%.pm,$(LIB_PERL))
+LIB_CPAN := $(wildcard perl/FromCPAN/*.pm perl/FromCPAN/*/*.pm)
+LIB_CPAN_GEN := $(patsubst perl/%.pm,perl/build/lib/%.pm,$(LIB_CPAN))
+
+ifndef NO_PERL
+all:: $(LIB_PERL_GEN)
+ifndef NO_PERL_CPAN_FALLBACKS
+all:: $(LIB_CPAN_GEN)
+endif
+NO_PERL_CPAN_FALLBACKS_SQ = $(subst ','\'',$(NO_PERL_CPAN_FALLBACKS))
+endif
+
+perl/build/lib/%.pm: perl/%.pm
+ $(QUIET_GEN)mkdir -p $(dir $@) && \
+ sed -e 's|@@LOCALEDIR@@|$(localedir_SQ)|g' \
+ -e 's|@@NO_PERL_CPAN_FALLBACKS@@|$(NO_PERL_CPAN_FALLBACKS_SQ)|g' \
+ < $< > $@
+
+perl/build/man/man3/Git.3pm: perl/Git.pm
+ $(QUIET_GEN)mkdir -p $(dir $@) && \
+ pod2man $< $@
+
FIND_SOURCE_FILES = ( \
git ls-files \
'*.[hcS]' \
(cd '$(DESTDIR_SQ)$(localedir_SQ)' && umask 022 && $(TAR) xof -)
endif
ifndef NO_PERL
- $(MAKE) -C perl prefix='$(prefix_SQ)' DESTDIR='$(DESTDIR_SQ)' install
+ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perllibdir_SQ)'
+ (cd perl/build/lib && $(TAR) cf - .) | \
+ (cd '$(DESTDIR_SQ)$(perllibdir_SQ)' && umask 022 && $(TAR) xof -)
$(MAKE) -C gitweb install
endif
ifndef NO_TCLTK
done && \
./check_bindir "z$$bindir" "z$$execdir" "$$bindir/git-add$X"
-.PHONY: install-gitweb install-doc install-man install-html install-info install-pdf
+.PHONY: install-gitweb install-doc install-man install-man-perl install-html install-info install-pdf
.PHONY: quick-install-doc quick-install-man quick-install-html
install-gitweb:
$(MAKE) -C gitweb install
-install-doc:
+install-doc: install-man-perl
$(MAKE) -C Documentation install
-install-man:
+install-man: install-man-perl
$(MAKE) -C Documentation install-man
+install-man-perl: man-perl
+ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(mandir_SQ)/man3'
+ (cd perl/build/man/man3 && $(TAR) cf - .) | \
+ (cd '$(DESTDIR_SQ)$(mandir_SQ)/man3' && umask 022 && $(TAR) xof -)
+
install-html:
$(MAKE) -C Documentation install-html
$(GIT_TARNAME)/configure \
$(GIT_TARNAME)/version \
$(GIT_TARNAME)/git-gui/version
+ifdef DC_SHA1_SUBMODULE
+ @mkdir -p $(GIT_TARNAME)/sha1collisiondetection/lib
+ @cp sha1collisiondetection/LICENSE.txt \
+ $(GIT_TARNAME)/sha1collisiondetection/
+ @cp sha1collisiondetection/LICENSE.txt \
+ $(GIT_TARNAME)/sha1collisiondetection/
+ @cp sha1collisiondetection/lib/sha1.[ch] \
+ $(GIT_TARNAME)/sha1collisiondetection/lib/
+ @cp sha1collisiondetection/lib/ubc_check.[ch] \
+ $(GIT_TARNAME)/sha1collisiondetection/lib/
+ $(TAR) rf $(GIT_TARNAME).tar \
+ $(GIT_TARNAME)/sha1collisiondetection/LICENSE.txt \
+ $(GIT_TARNAME)/sha1collisiondetection/lib/sha1.[ch] \
+ $(GIT_TARNAME)/sha1collisiondetection/lib/ubc_check.[ch]
+endif
@$(RM) -r $(GIT_TARNAME)
gzip -f -9 $(GIT_TARNAME).tar
$(RM) $(TEST_PROGRAMS) $(NO_INSTALL)
$(RM) -r bin-wrappers $(dep_dirs)
$(RM) -r po/build/
- $(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo common-cmds.h $(ETAGS_TARGET) tags cscope*
+ $(RM) *.pyc *.pyo */*.pyc */*.pyo common-cmds.h $(ETAGS_TARGET) tags cscope*
$(RM) -r $(GIT_TARNAME) .doc-tmp-dir
$(RM) $(GIT_TARNAME).tar.gz git-core_$(GIT_VERSION)-*.tar.gz
$(RM) $(htmldocs).tar.gz $(manpages).tar.gz
$(MAKE) -C Documentation/ clean
ifndef NO_PERL
$(MAKE) -C gitweb clean
- $(MAKE) -C perl clean
+ $(RM) -r perl/build/
endif
$(MAKE) -C templates/ clean
$(MAKE) -C t/ clean
-Documentation/RelNotes/2.16.2.txt
\ No newline at end of file
+Documentation/RelNotes/2.17.0.txt
\ No newline at end of file
}
free(another);
} else {
- if (!starts_with(line, "/dev/null\n"))
+ if (!is_dev_null(line))
return error(_("git apply: bad git-diff - expected /dev/null on line %d"), state->linenr);
}
static int read_old_data(struct stat *st, struct patch *patch,
const char *path, struct strbuf *buf)
{
- enum safe_crlf safe_crlf = patch->crlf_in_old ?
- SAFE_CRLF_KEEP_CRLF : SAFE_CRLF_RENORMALIZE;
+ int conv_flags = patch->crlf_in_old ?
+ CONV_EOL_KEEP_CRLF : CONV_EOL_RENORMALIZE;
switch (st->st_mode & S_IFMT) {
case S_IFLNK:
if (strbuf_readlink(buf, path, st->st_size) < 0)
* should never look at the index when explicit crlf option
* is given.
*/
- convert_to_git(NULL, path, buf->buf, buf->len, buf, safe_crlf);
+ convert_to_git(NULL, path, buf->buf, buf->len, buf, conv_flags);
return 0;
default:
return -1;
size_t len, size_t postlen)
{
int i, ctx, reduced;
- char *new, *old, *fixed;
+ char *new_buf, *old_buf, *fixed;
struct image fixed_preimage;
/*
* We trust the caller to tell us if the update can be done
* in place (postlen==0) or not.
*/
- old = postimage->buf;
+ old_buf = postimage->buf;
if (postlen)
- new = postimage->buf = xmalloc(postlen);
+ new_buf = postimage->buf = xmalloc(postlen);
else
- new = old;
+ new_buf = old_buf;
fixed = preimage->buf;
for (i = reduced = ctx = 0; i < postimage->nr; i++) {
size_t l_len = postimage->line[i].len;
if (!(postimage->line[i].flag & LINE_COMMON)) {
/* an added line -- no counterparts in preimage */
- memmove(new, old, l_len);
- old += l_len;
- new += l_len;
+ memmove(new_buf, old_buf, l_len);
+ old_buf += l_len;
+ new_buf += l_len;
continue;
}
/* a common context -- skip it in the original postimage */
- old += l_len;
+ old_buf += l_len;
/* and find the corresponding one in the fixed preimage */
while (ctx < preimage->nr &&
/* and copy it in, while fixing the line length */
l_len = preimage->line[ctx].len;
- memcpy(new, fixed, l_len);
- new += l_len;
+ memcpy(new_buf, fixed, l_len);
+ new_buf += l_len;
fixed += l_len;
postimage->line[i].len = l_len;
ctx++;
}
if (postlen
- ? postlen < new - postimage->buf
- : postimage->len < new - postimage->buf)
+ ? postlen < new_buf - postimage->buf
+ : postimage->len < new_buf - postimage->buf)
die("BUG: caller miscounted postlen: asked %d, orig = %d, used = %d",
- (int)postlen, (int) postimage->len, (int)(new - postimage->buf));
+ (int)postlen, (int) postimage->len, (int)(new_buf - postimage->buf));
/* Fix the length of the whole thing */
- postimage->len = new - postimage->buf;
+ postimage->len = new_buf - postimage->buf;
postimage->nr -= reduced;
}
static int line_by_line_fuzzy_match(struct image *img,
struct image *preimage,
struct image *postimage,
- unsigned long try,
- int try_lno,
+ unsigned long current,
+ int current_lno,
int preimage_limit)
{
int i;
for (i = 0; i < preimage_limit; i++) {
size_t prelen = preimage->line[i].len;
- size_t imglen = img->line[try_lno+i].len;
+ size_t imglen = img->line[current_lno+i].len;
- if (!fuzzy_matchlines(img->buf + try + imgoff, imglen,
+ if (!fuzzy_matchlines(img->buf + current + imgoff, imglen,
preimage->buf + preoff, prelen))
return 0;
if (preimage->line[i].flag & LINE_COMMON)
*/
extra_chars = preimage_end - preimage_eof;
strbuf_init(&fixed, imgoff + extra_chars);
- strbuf_add(&fixed, img->buf + try, imgoff);
+ strbuf_add(&fixed, img->buf + current, imgoff);
strbuf_add(&fixed, preimage_eof, extra_chars);
fixed_buf = strbuf_detach(&fixed, &fixed_len);
update_pre_post_images(preimage, postimage,
struct image *img,
struct image *preimage,
struct image *postimage,
- unsigned long try,
- int try_lno,
+ unsigned long current,
+ int current_lno,
unsigned ws_rule,
int match_beginning, int match_end)
{
size_t fixed_len, postlen;
int preimage_limit;
- if (preimage->nr + try_lno <= img->nr) {
+ if (preimage->nr + current_lno <= img->nr) {
/*
* The hunk falls within the boundaries of img.
*/
preimage_limit = preimage->nr;
- if (match_end && (preimage->nr + try_lno != img->nr))
+ if (match_end && (preimage->nr + current_lno != img->nr))
return 0;
} else if (state->ws_error_action == correct_ws_error &&
(ws_rule & WS_BLANK_AT_EOF)) {
* match with img, and the remainder of the preimage
* must be blank.
*/
- preimage_limit = img->nr - try_lno;
+ preimage_limit = img->nr - current_lno;
} else {
/*
* The hunk extends beyond the end of the img and
return 0;
}
- if (match_beginning && try_lno)
+ if (match_beginning && current_lno)
return 0;
/* Quick hash check */
for (i = 0; i < preimage_limit; i++)
- if ((img->line[try_lno + i].flag & LINE_PATCHED) ||
- (preimage->line[i].hash != img->line[try_lno + i].hash))
+ if ((img->line[current_lno + i].flag & LINE_PATCHED) ||
+ (preimage->line[i].hash != img->line[current_lno + i].hash))
return 0;
if (preimage_limit == preimage->nr) {
/*
* Do we have an exact match? If we were told to match
- * at the end, size must be exactly at try+fragsize,
- * otherwise try+fragsize must be still within the preimage,
+ * at the end, size must be exactly at current+fragsize,
+ * otherwise current+fragsize must be still within the preimage,
* and either case, the old piece should match the preimage
* exactly.
*/
if ((match_end
- ? (try + preimage->len == img->len)
- : (try + preimage->len <= img->len)) &&
- !memcmp(img->buf + try, preimage->buf, preimage->len))
+ ? (current + preimage->len == img->len)
+ : (current + preimage->len <= img->len)) &&
+ !memcmp(img->buf + current, preimage->buf, preimage->len))
return 1;
} else {
/*
*/
if (state->ws_ignore_action == ignore_ws_change)
return line_by_line_fuzzy_match(img, preimage, postimage,
- try, try_lno, preimage_limit);
+ current, current_lno, preimage_limit);
if (state->ws_error_action != correct_ws_error)
return 0;
*/
strbuf_init(&fixed, preimage->len + 1);
orig = preimage->buf;
- target = img->buf + try;
+ target = img->buf + current;
for (i = 0; i < preimage_limit; i++) {
size_t oldlen = preimage->line[i].len;
- size_t tgtlen = img->line[try_lno + i].len;
+ size_t tgtlen = img->line[current_lno + i].len;
size_t fixstart = fixed.len;
struct strbuf tgtfix;
int match;
int match_beginning, int match_end)
{
int i;
- unsigned long backwards, forwards, try;
- int backwards_lno, forwards_lno, try_lno;
+ unsigned long backwards, forwards, current;
+ int backwards_lno, forwards_lno, current_lno;
/*
* If match_beginning or match_end is specified, there is no
if ((size_t) line > img->nr)
line = img->nr;
- try = 0;
+ current = 0;
for (i = 0; i < line; i++)
- try += img->line[i].len;
+ current += img->line[i].len;
/*
* There's probably some smart way to do this, but I'll leave
* that to the smart and beautiful people. I'm simple and stupid.
*/
- backwards = try;
+ backwards = current;
backwards_lno = line;
- forwards = try;
+ forwards = current;
forwards_lno = line;
- try_lno = line;
+ current_lno = line;
for (i = 0; ; i++) {
if (match_fragment(state, img, preimage, postimage,
- try, try_lno, ws_rule,
+ current, current_lno, ws_rule,
match_beginning, match_end))
- return try_lno;
+ return current_lno;
again:
if (backwards_lno == 0 && forwards_lno == img->nr)
}
backwards_lno--;
backwards -= img->line[backwards_lno].len;
- try = backwards;
- try_lno = backwards_lno;
+ current = backwards;
+ current_lno = backwards_lno;
} else {
if (forwards_lno == img->nr) {
i++;
}
forwards += img->line[forwards_lno].len;
forwards_lno++;
- try = forwards;
- try_lno = forwards_lno;
+ current = forwards;
+ current_lno = forwards_lno;
}
}
* See if the old one matches what the patch
* applies to.
*/
- hash_sha1_file(img->buf, img->len, blob_type, oid.hash);
+ hash_object_file(img->buf, img->len, blob_type, &oid);
if (strcmp(oid_to_hex(&oid), patch->old_sha1_prefix))
return error(_("the patch applies to '%s' (%s), "
"which does not match the "
name);
/* verify that the result matches */
- hash_sha1_file(img->buf, img->len, blob_type, oid.hash);
+ hash_object_file(img->buf, img->len, blob_type, &oid);
if (strcmp(oid_to_hex(&oid), patch->new_sha1_prefix))
return error(_("binary patch to '%s' creates incorrect result (expecting %s, got %s)"),
name, patch->new_sha1_prefix, oid_to_hex(&oid));
/* Preimage the patch was prepared for */
if (patch->is_new)
- write_sha1_file("", 0, blob_type, pre_oid.hash);
+ write_object_file("", 0, blob_type, &pre_oid);
else if (get_oid(patch->old_sha1_prefix, &pre_oid) ||
read_blob_object(&buf, &pre_oid, patch->old_mode))
return error(_("repository lacks the necessary blob to fall back on 3-way merge."));
return -1;
}
/* post_oid is theirs */
- write_sha1_file(tmp_image.buf, tmp_image.len, blob_type, post_oid.hash);
+ write_object_file(tmp_image.buf, tmp_image.len, blob_type, &post_oid);
clear_image(&tmp_image);
/* our_oid is ours */
return error(_("cannot read the current contents of '%s'"),
patch->old_name);
}
- write_sha1_file(tmp_image.buf, tmp_image.len, blob_type, our_oid.hash);
+ write_object_file(tmp_image.buf, tmp_image.len, blob_type, &our_oid);
clear_image(&tmp_image);
/* in-core three-way merge between post and our using pre as base */
static void show_rename_copy(struct patch *p)
{
const char *renamecopy = p->is_rename ? "rename" : "copy";
- const char *old, *new;
+ const char *old_name, *new_name;
/* Find common prefix */
- old = p->old_name;
- new = p->new_name;
+ old_name = p->old_name;
+ new_name = p->new_name;
while (1) {
const char *slash_old, *slash_new;
- slash_old = strchr(old, '/');
- slash_new = strchr(new, '/');
+ slash_old = strchr(old_name, '/');
+ slash_new = strchr(new_name, '/');
if (!slash_old ||
!slash_new ||
- slash_old - old != slash_new - new ||
- memcmp(old, new, slash_new - new))
+ slash_old - old_name != slash_new - new_name ||
+ memcmp(old_name, new_name, slash_new - new_name))
break;
- old = slash_old + 1;
- new = slash_new + 1;
+ old_name = slash_old + 1;
+ new_name = slash_new + 1;
}
- /* p->old_name thru old is the common prefix, and old and new
+ /* p->old_name thru old_name is the common prefix, and old_name and new_name
* through the end of names are renames
*/
- if (old != p->old_name)
+ if (old_name != p->old_name)
printf(" %s %.*s{%s => %s} (%d%%)\n", renamecopy,
- (int)(old - p->old_name), p->old_name,
- old, new, p->score);
+ (int)(old_name - p->old_name), p->old_name,
+ old_name, new_name, p->score);
else
printf(" %s %s => %s (%d%%)\n", renamecopy,
p->old_name, p->new_name, p->score);
}
fill_stat_cache_info(ce, &st);
}
- if (write_sha1_file(buf, size, blob_type, ce->oid.hash) < 0) {
+ if (write_object_file(buf, size, blob_type, &ce->oid) < 0) {
free(ce);
return error(_("unable to create backing store "
"for newly created file %s"), path);
N_("make sure the patch is applicable to the current index")),
OPT_BOOL(0, "cached", &state->cached,
N_("apply a patch without touching the working tree")),
- OPT_BOOL(0, "unsafe-paths", &state->unsafe_paths,
- N_("accept a patch that touches outside the working area")),
+ OPT_BOOL_F(0, "unsafe-paths", &state->unsafe_paths,
+ N_("accept a patch that touches outside the working area"),
+ PARSE_OPT_NOCOMPLETE),
OPT_BOOL(0, "apply", force_apply,
N_("also apply the patch (use with --stat/--summary/--check)")),
OPT_BOOL('3', "3way", &state->threeway,
convert_to_git(&the_index, path, buf.buf, buf.len, &buf, 0);
origin->file.ptr = buf.buf;
origin->file.size = buf.len;
- pretend_sha1_file(buf.buf, buf.len, OBJ_BLOB, origin->blob_oid.hash);
+ pretend_object_file(buf.buf, buf.len, OBJ_BLOB, &origin->blob_oid);
/*
* Read the current index, replace the path entry with
}
/*
- * best_so_far[] and this[] are both a split of an existing blame_entry
- * that passes blame to the parent. Maintain best_so_far the best split
- * so far, by comparing this and best_so_far and copying this into
+ * best_so_far[] and potential[] are both a split of an existing blame_entry
+ * that passes blame to the parent. Maintain best_so_far the best split so
+ * far, by comparing potential and best_so_far and copying potential into
* bst_so_far as needed.
*/
static void copy_split_if_better(struct blame_scoreboard *sb,
struct blame_entry *best_so_far,
- struct blame_entry *this)
+ struct blame_entry *potential)
{
int i;
- if (!this[1].suspect)
+ if (!potential[1].suspect)
return;
if (best_so_far[1].suspect) {
- if (blame_entry_score(sb, &this[1]) < blame_entry_score(sb, &best_so_far[1]))
+ if (blame_entry_score(sb, &potential[1]) <
+ blame_entry_score(sb, &best_so_far[1]))
return;
}
for (i = 0; i < 3; i++)
- blame_origin_incref(this[i].suspect);
+ blame_origin_incref(potential[i].suspect);
decref_split(best_so_far);
- memcpy(best_so_far, this, sizeof(struct blame_entry [3]));
+ memcpy(best_so_far, potential, sizeof(struct blame_entry[3]));
}
/*
if (ent->num_lines <= tlno)
return;
if (tlno < same) {
- struct blame_entry this[3];
+ struct blame_entry potential[3];
tlno += ent->s_lno;
same += ent->s_lno;
- split_overlap(this, ent, tlno, plno, same, parent);
- copy_split_if_better(sb, split, this);
- decref_split(this);
+ split_overlap(potential, ent, tlno, plno, same, parent);
+ copy_split_if_better(sb, split, potential);
+ decref_split(potential);
}
}
struct diff_filepair *p = diff_queued_diff.queue[i];
struct blame_origin *norigin;
mmfile_t file_p;
- struct blame_entry this[3];
+ struct blame_entry potential[3];
if (!DIFF_FILE_VALID(p->one))
continue; /* does not exist in parent */
for (j = 0; j < num_ents; j++) {
find_copy_in_blob(sb, blame_list[j].ent,
- norigin, this, &file_p);
+ norigin, potential, &file_p);
copy_split_if_better(sb, blame_list[j].split,
- this);
- decref_split(this);
+ potential);
+ decref_split(potential);
}
blame_origin_decref(norigin);
}
OPT_BOOL('i', "interactive", &add_interactive, N_("interactive picking")),
OPT_BOOL('p', "patch", &patch_interactive, N_("select hunks interactively")),
OPT_BOOL('e', "edit", &edit_interactive, N_("edit current diff and apply")),
- OPT__FORCE(&ignored_too, N_("allow adding otherwise ignored files")),
+ OPT__FORCE(&ignored_too, N_("allow adding otherwise ignored files"), 0),
OPT_BOOL('u', "update", &take_worktree_changes, N_("update tracked files")),
OPT_BOOL(0, "renormalize", &add_renormalize, N_("renormalize EOL of tracked files (implies -u)")),
OPT_BOOL('N', "intent-to-add", &intent_to_add, N_("record only the fact that the path will be added later")),
unplug_bulk_checkin();
finish:
- if (active_cache_changed) {
- if (write_locked_index(&the_index, &lock_file, COMMIT_LOCK))
- die(_("Unable to write new index file"));
- }
+ if (write_locked_index(&the_index, &lock_file,
+ COMMIT_LOCK | SKIP_IF_UNCHANGED))
+ die(_("Unable to write new index file"));
UNLEAK(pathspec);
UNLEAK(dir);
if (mkdir(state->dir, 0777) < 0 && errno != EEXIST)
die_errno(_("failed to create directory '%s'"), state->dir);
+ delete_ref(NULL, "REBASE_HEAD", NULL, REF_NO_DEREF);
if (split_mail(state, patch_format, paths, keep_cr) < 0) {
am_destroy(state);
}
write_state_text(state, "scissors", str);
- sq_quote_argv(&sb, state->git_apply_opts.argv, 0);
+ sq_quote_argv(&sb, state->git_apply_opts.argv);
write_state_text(state, "apply-opt", sb.buf);
if (state->rebasing)
oidclr(&state->orig_commit);
unlink(am_path(state, "original-commit"));
+ delete_ref(NULL, "REBASE_HEAD", NULL, REF_NO_DEREF);
if (!get_oid("HEAD", &head))
write_state_text(state, "abort-safety", oid_to_hex(&head));
oidcpy(&state->orig_commit, &commit_oid);
write_state_text(state, "original-commit", oid_to_hex(&commit_oid));
+ update_ref("am", "REBASE_HEAD", &commit_oid,
+ NULL, REF_NO_DEREF, UPDATE_REFS_DIE_ON_ERR);
return 0;
}
setenv("GIT_COMMITTER_DATE",
state->ignore_date ? "" : state->author_date, 1);
- if (commit_tree(state->msg, state->msg_len, tree.hash, parents, commit.hash,
- author, state->sign_commit))
+ if (commit_tree(state->msg, state->msg_len, &tree, parents, &commit,
+ author, state->sign_commit))
die(_("failed to write commit object"));
reflog_msg = getenv("GIT_REFLOG_ACTION");
git_config_get_bool("advice.amworkdir", &advice_amworkdir);
if (advice_amworkdir)
- printf_ln(_("The copy of the patch that failed is found in: %s"),
- am_path(state, "patch"));
+ printf_ln(_("Use 'git am --show-current-patch' to see the failed patch"));
die_user_resolve(state);
}
am_destroy(state);
}
+static int show_patch(struct am_state *state)
+{
+ struct strbuf sb = STRBUF_INIT;
+ const char *patch_path;
+ int len;
+
+ if (!is_null_oid(&state->orig_commit)) {
+ const char *av[4] = { "show", NULL, "--", NULL };
+ char *new_oid_str;
+ int ret;
+
+ av[1] = new_oid_str = xstrdup(oid_to_hex(&state->orig_commit));
+ ret = run_command_v_opt(av, RUN_GIT_CMD);
+ free(new_oid_str);
+ return ret;
+ }
+
+ patch_path = am_path(state, msgnum(state));
+ len = strbuf_read_file(&sb, patch_path, 0);
+ if (len < 0)
+ die_errno(_("failed to read '%s'"), patch_path);
+
+ setup_pager();
+ write_in_full(1, sb.buf, sb.len);
+ strbuf_release(&sb);
+ return 0;
+}
+
/**
* parse_options() callback that validates and sets opt->value to the
* PATCH_FORMAT_* enum value corresponding to `arg`.
RESUME_APPLY,
RESUME_RESOLVED,
RESUME_SKIP,
- RESUME_ABORT
+ RESUME_ABORT,
+ RESUME_QUIT,
+ RESUME_SHOW_PATCH
};
static int git_am_config(const char *k, const char *v, void *cb)
int patch_format = PATCH_FORMAT_UNKNOWN;
enum resume_mode resume = RESUME_FALSE;
int in_progress;
+ int ret = 0;
const char * const usage[] = {
N_("git am [<options>] [(<mbox> | <Maildir>)...]"),
OPT_CMDMODE(0, "abort", &resume,
N_("restore the original branch and abort the patching operation."),
RESUME_ABORT),
+ OPT_CMDMODE(0, "quit", &resume,
+ N_("abort the patching operation but keep HEAD where it is."),
+ RESUME_QUIT),
+ OPT_CMDMODE(0, "show-current-patch", &resume,
+ N_("show the patch being applied."),
+ RESUME_SHOW_PATCH),
OPT_BOOL(0, "committer-date-is-author-date",
&state.committer_date_is_author_date,
N_("lie about committer date")),
* stray directories.
*/
if (file_exists(state.dir) && !state.rebasing) {
- if (resume == RESUME_ABORT) {
+ if (resume == RESUME_ABORT || resume == RESUME_QUIT) {
am_destroy(&state);
am_state_release(&state);
return 0;
case RESUME_ABORT:
am_abort(&state);
break;
+ case RESUME_QUIT:
+ am_rerere_clear();
+ am_destroy(&state);
+ break;
+ case RESUME_SHOW_PATCH:
+ ret = show_patch(&state);
+ break;
default:
die("BUG: invalid resume value");
}
am_state_release(&state);
- return 0;
+ return ret;
}
buf = packet_read_line(fd[0], NULL);
if (!buf)
- die(_("git archive: expected ACK/NAK, got EOF"));
+ die(_("git archive: expected ACK/NAK, got a flush packet"));
if (strcmp(buf, "ACK")) {
if (starts_with(buf, "NACK "))
die(_("git archive: NACK %s"), buf + 5);
return 0;
}
+static int is_a_rev(const char *name)
+{
+ struct object_id oid;
+
+ if (get_oid(name, &oid))
+ return 0;
+ return OBJ_NONE < sha1_object_info(oid.hash, NULL);
+}
+
int cmd_blame(int argc, const char **argv, const char *prefix)
{
struct rev_info revs;
} else {
if (argc < 2)
usage_with_options(blame_opt_usage, options);
- path = add_prefix(prefix, argv[argc - 1]);
- if (argc == 3 && !file_exists(path)) { /* (2b) */
+ if (argc == 3 && is_a_rev(argv[argc - 1])) { /* (2b) */
path = add_prefix(prefix, argv[1]);
argv[1] = argv[2];
+ } else { /* (2a) */
+ if (argc == 2 && is_a_rev(argv[1]) && !get_git_work_tree())
+ die("missing <path> to blame");
+ path = add_prefix(prefix, argv[argc - 1]);
}
argv[argc - 1] = "--";
-
- setup_work_tree();
- if (!file_exists(path))
- die_errno("cannot stat path '%s'", path);
}
revs.disable_stdin = 1;
OPT_BOOL('l', "create-reflog", &reflog, N_("create the branch's reflog")),
OPT_BOOL(0, "edit-description", &edit_description,
N_("edit the description for the branch")),
- OPT__FORCE(&force, N_("force creation, move/rename, deletion")),
+ OPT__FORCE(&force, N_("force creation, move/rename, deletion"), PARSE_OPT_NOCOMPLETE),
OPT_MERGED(&filter, N_("print only branches that are merged")),
OPT_NO_MERGED(&filter, N_("print only branches that are not merged")),
OPT_COLUMN(0, "column", &colopts, N_("list branches in columns")),
buf = NULL;
switch (opt) {
case 't':
- oi.typename = &sb;
+ oi.type_name = &sb;
if (sha1_object_info_extended(oid.hash, &oi, flags) < 0)
die("git cat-file: could not get object info");
if (sb.len) {
if (data->mark_query)
data->info.typep = &data->type;
else
- strbuf_addstr(sb, typename(data->type));
+ strbuf_addstr(sb, type_name(data->type));
} else if (is_atom("objectsize", atom, len)) {
if (data->mark_query)
data->info.sizep = &data->size;
for_each_loose_object(batch_loose_object, &sa, 0);
for_each_packed_object(batch_packed_object, &sa, 0);
+ if (repository_format_partial_clone)
+ warning("This repository has extensions.partialClone set. Some objects may not be loaded.");
cb.opt = opt;
cb.expand = &data;
{
const char *full_path;
char *seen;
- int num_ignored = 0, dtype = DT_UNKNOWN, i;
+ int num_ignored = 0, i;
struct exclude *exclude;
struct pathspec pathspec;
full_path = pathspec.items[i].match;
exclude = NULL;
if (!seen[i]) {
+ int dtype = DT_UNKNOWN;
exclude = last_exclude_matching(dir, &the_index,
full_path, &dtype);
}
struct option builtin_checkout_index_options[] = {
OPT_BOOL('a', "all", &all,
N_("check out all files in the index")),
- OPT__FORCE(&force, N_("force overwrite of existing files")),
+ OPT__FORCE(&force, N_("force overwrite of existing files"), 0),
OPT__QUIET(&quiet,
N_("no warning for existing files and files not in index")),
OPT_BOOL('n', "no-create", ¬_new,
struct tree *source_tree;
};
-static int post_checkout_hook(struct commit *old, struct commit *new,
+static int post_checkout_hook(struct commit *old_commit, struct commit *new_commit,
int changed)
{
return run_hook_le(NULL, "post-checkout",
- oid_to_hex(old ? &old->object.oid : &null_oid),
- oid_to_hex(new ? &new->object.oid : &null_oid),
+ oid_to_hex(old_commit ? &old_commit->object.oid : &null_oid),
+ oid_to_hex(new_commit ? &new_commit->object.oid : &null_oid),
changed ? "1" : "0", NULL);
- /* "new" can be NULL when checking out from the index before
+ /* "new_commit" can be NULL when checking out from the index before
a commit exists. */
}
* (it also writes the merge result to the object database even
* when it may contain conflicts).
*/
- if (write_sha1_file(result_buf.ptr, result_buf.size,
- blob_type, oid.hash))
+ if (write_object_file(result_buf.ptr, result_buf.size, blob_type, &oid))
die(_("Unable to add merge result for '%s'"), path);
free(result_buf.ptr);
ce = make_cache_entry(mode, oid.hash, path, 2, 0);
}
static int merge_working_tree(const struct checkout_opts *opts,
- struct branch_info *old,
- struct branch_info *new,
+ struct branch_info *old_branch_info,
+ struct branch_info *new_branch_info,
int *writeout_error)
{
int ret;
resolve_undo_clear();
if (opts->force) {
- ret = reset_tree(new->commit->tree, opts, 1, writeout_error);
+ ret = reset_tree(new_branch_info->commit->tree, opts, 1, writeout_error);
if (ret)
return ret;
} else {
topts.initial_checkout = is_cache_unborn();
topts.update = 1;
topts.merge = 1;
- topts.gently = opts->merge && old->commit;
+ topts.gently = opts->merge && old_branch_info->commit;
topts.verbose_update = opts->show_progress;
topts.fn = twoway_merge;
if (opts->overwrite_ignore) {
topts.dir->flags |= DIR_SHOW_IGNORED;
setup_standard_excludes(topts.dir);
}
- tree = parse_tree_indirect(old->commit ?
- &old->commit->object.oid :
+ tree = parse_tree_indirect(old_branch_info->commit ?
+ &old_branch_info->commit->object.oid :
the_hash_algo->empty_tree);
init_tree_desc(&trees[0], tree->buffer, tree->size);
- tree = parse_tree_indirect(&new->commit->object.oid);
+ tree = parse_tree_indirect(&new_branch_info->commit->object.oid);
init_tree_desc(&trees[1], tree->buffer, tree->size);
ret = unpack_trees(2, trees, &topts);
return 1;
/*
- * Without old->commit, the below is the same as
+ * Without old_branch_info->commit, the below is the same as
* the two-tree unpack we already tried and failed.
*/
- if (!old->commit)
+ if (!old_branch_info->commit)
return 1;
/* Do more real merge */
o.verbosity = 0;
work = write_tree_from_memory(&o);
- ret = reset_tree(new->commit->tree, opts, 1,
+ ret = reset_tree(new_branch_info->commit->tree, opts, 1,
writeout_error);
if (ret)
return ret;
- o.ancestor = old->name;
- o.branch1 = new->name;
+ o.ancestor = old_branch_info->name;
+ o.branch1 = new_branch_info->name;
o.branch2 = "local";
- ret = merge_trees(&o, new->commit->tree, work,
- old->commit->tree, &result);
+ ret = merge_trees(&o, new_branch_info->commit->tree, work,
+ old_branch_info->commit->tree, &result);
if (ret < 0)
exit(128);
- ret = reset_tree(new->commit->tree, opts, 0,
+ ret = reset_tree(new_branch_info->commit->tree, opts, 0,
writeout_error);
strbuf_release(&o.obuf);
if (ret)
die(_("unable to write new index file"));
if (!opts->force && !opts->quiet)
- show_local_changes(&new->commit->object, &opts->diff_options);
+ show_local_changes(&new_branch_info->commit->object, &opts->diff_options);
return 0;
}
-static void report_tracking(struct branch_info *new)
+static void report_tracking(struct branch_info *new_branch_info)
{
struct strbuf sb = STRBUF_INIT;
- struct branch *branch = branch_get(new->name);
+ struct branch *branch = branch_get(new_branch_info->name);
- if (!format_tracking_info(branch, &sb))
+ if (!format_tracking_info(branch, &sb, AHEAD_BEHIND_FULL))
return;
fputs(sb.buf, stdout);
strbuf_release(&sb);
}
static void update_refs_for_switch(const struct checkout_opts *opts,
- struct branch_info *old,
- struct branch_info *new)
+ struct branch_info *old_branch_info,
+ struct branch_info *new_branch_info)
{
struct strbuf msg = STRBUF_INIT;
const char *old_desc, *reflog_msg;
free(refname);
}
else
- create_branch(opts->new_branch, new->name,
+ create_branch(opts->new_branch, new_branch_info->name,
opts->new_branch_force ? 1 : 0,
opts->new_branch_force ? 1 : 0,
opts->new_branch_log,
opts->quiet,
opts->track);
- new->name = opts->new_branch;
- setup_branch_path(new);
+ new_branch_info->name = opts->new_branch;
+ setup_branch_path(new_branch_info);
}
- old_desc = old->name;
- if (!old_desc && old->commit)
- old_desc = oid_to_hex(&old->commit->object.oid);
+ old_desc = old_branch_info->name;
+ if (!old_desc && old_branch_info->commit)
+ old_desc = oid_to_hex(&old_branch_info->commit->object.oid);
reflog_msg = getenv("GIT_REFLOG_ACTION");
if (!reflog_msg)
strbuf_addf(&msg, "checkout: moving from %s to %s",
- old_desc ? old_desc : "(invalid)", new->name);
+ old_desc ? old_desc : "(invalid)", new_branch_info->name);
else
strbuf_insert(&msg, 0, reflog_msg, strlen(reflog_msg));
- if (!strcmp(new->name, "HEAD") && !new->path && !opts->force_detach) {
+ if (!strcmp(new_branch_info->name, "HEAD") && !new_branch_info->path && !opts->force_detach) {
/* Nothing to do. */
- } else if (opts->force_detach || !new->path) { /* No longer on any branch. */
- update_ref(msg.buf, "HEAD", &new->commit->object.oid, NULL,
+ } else if (opts->force_detach || !new_branch_info->path) { /* No longer on any branch. */
+ update_ref(msg.buf, "HEAD", &new_branch_info->commit->object.oid, NULL,
REF_NO_DEREF, UPDATE_REFS_DIE_ON_ERR);
if (!opts->quiet) {
- if (old->path &&
+ if (old_branch_info->path &&
advice_detached_head && !opts->force_detach)
- detach_advice(new->name);
- describe_detached_head(_("HEAD is now at"), new->commit);
+ detach_advice(new_branch_info->name);
+ describe_detached_head(_("HEAD is now at"), new_branch_info->commit);
}
- } else if (new->path) { /* Switch branches. */
- if (create_symref("HEAD", new->path, msg.buf) < 0)
+ } else if (new_branch_info->path) { /* Switch branches. */
+ if (create_symref("HEAD", new_branch_info->path, msg.buf) < 0)
die(_("unable to update HEAD"));
if (!opts->quiet) {
- if (old->path && !strcmp(new->path, old->path)) {
+ if (old_branch_info->path && !strcmp(new_branch_info->path, old_branch_info->path)) {
if (opts->new_branch_force)
fprintf(stderr, _("Reset branch '%s'\n"),
- new->name);
+ new_branch_info->name);
else
fprintf(stderr, _("Already on '%s'\n"),
- new->name);
+ new_branch_info->name);
} else if (opts->new_branch) {
if (opts->branch_exists)
- fprintf(stderr, _("Switched to and reset branch '%s'\n"), new->name);
+ fprintf(stderr, _("Switched to and reset branch '%s'\n"), new_branch_info->name);
else
- fprintf(stderr, _("Switched to a new branch '%s'\n"), new->name);
+ fprintf(stderr, _("Switched to a new branch '%s'\n"), new_branch_info->name);
} else {
fprintf(stderr, _("Switched to branch '%s'\n"),
- new->name);
+ new_branch_info->name);
}
}
- if (old->path && old->name) {
- if (!ref_exists(old->path) && reflog_exists(old->path))
- delete_reflog(old->path);
+ if (old_branch_info->path && old_branch_info->name) {
+ if (!ref_exists(old_branch_info->path) && reflog_exists(old_branch_info->path))
+ delete_reflog(old_branch_info->path);
}
}
remove_branch_state();
strbuf_release(&msg);
if (!opts->quiet &&
- (new->path || (!opts->force_detach && !strcmp(new->name, "HEAD"))))
- report_tracking(new);
+ (new_branch_info->path || (!opts->force_detach && !strcmp(new_branch_info->name, "HEAD"))))
+ report_tracking(new_branch_info);
}
static int add_pending_uninteresting_ref(const char *refname,
* HEAD. If it is not reachable from any ref, this is the last chance
* for the user to do so without resorting to reflog.
*/
-static void orphaned_commit_warning(struct commit *old, struct commit *new)
+static void orphaned_commit_warning(struct commit *old_commit, struct commit *new_commit)
{
struct rev_info revs;
- struct object *object = &old->object;
+ struct object *object = &old_commit->object;
init_revisions(&revs, NULL);
setup_revisions(0, NULL, &revs, NULL);
add_pending_object(&revs, object, oid_to_hex(&object->oid));
for_each_ref(add_pending_uninteresting_ref, &revs);
- add_pending_oid(&revs, "HEAD", &new->object.oid, UNINTERESTING);
+ add_pending_oid(&revs, "HEAD", &new_commit->object.oid, UNINTERESTING);
if (prepare_revision_walk(&revs))
die(_("internal error in revision walk"));
- if (!(old->object.flags & UNINTERESTING))
- suggest_reattach(old, &revs);
+ if (!(old_commit->object.flags & UNINTERESTING))
+ suggest_reattach(old_commit, &revs);
else
- describe_detached_head(_("Previous HEAD position was"), old);
+ describe_detached_head(_("Previous HEAD position was"), old_commit);
/* Clean up objects used, as they will be reused. */
clear_commit_marks_all(ALL_REV_FLAGS);
}
static int switch_branches(const struct checkout_opts *opts,
- struct branch_info *new)
+ struct branch_info *new_branch_info)
{
int ret = 0;
- struct branch_info old;
+ struct branch_info old_branch_info;
void *path_to_free;
struct object_id rev;
int flag, writeout_error = 0;
- memset(&old, 0, sizeof(old));
- old.path = path_to_free = resolve_refdup("HEAD", 0, &rev, &flag);
- if (old.path)
- old.commit = lookup_commit_reference_gently(&rev, 1);
+ memset(&old_branch_info, 0, sizeof(old_branch_info));
+ old_branch_info.path = path_to_free = resolve_refdup("HEAD", 0, &rev, &flag);
+ if (old_branch_info.path)
+ old_branch_info.commit = lookup_commit_reference_gently(&rev, 1);
if (!(flag & REF_ISSYMREF))
- old.path = NULL;
+ old_branch_info.path = NULL;
- if (old.path)
- skip_prefix(old.path, "refs/heads/", &old.name);
+ if (old_branch_info.path)
+ skip_prefix(old_branch_info.path, "refs/heads/", &old_branch_info.name);
- if (!new->name) {
- new->name = "HEAD";
- new->commit = old.commit;
- if (!new->commit)
+ if (!new_branch_info->name) {
+ new_branch_info->name = "HEAD";
+ new_branch_info->commit = old_branch_info.commit;
+ if (!new_branch_info->commit)
die(_("You are on a branch yet to be born"));
- parse_commit_or_die(new->commit);
+ parse_commit_or_die(new_branch_info->commit);
}
- ret = merge_working_tree(opts, &old, new, &writeout_error);
+ ret = merge_working_tree(opts, &old_branch_info, new_branch_info, &writeout_error);
if (ret) {
free(path_to_free);
return ret;
}
- if (!opts->quiet && !old.path && old.commit && new->commit != old.commit)
- orphaned_commit_warning(old.commit, new->commit);
+ if (!opts->quiet && !old_branch_info.path && old_branch_info.commit && new_branch_info->commit != old_branch_info.commit)
+ orphaned_commit_warning(old_branch_info.commit, new_branch_info->commit);
- update_refs_for_switch(opts, &old, new);
+ update_refs_for_switch(opts, &old_branch_info, new_branch_info);
- ret = post_checkout_hook(old.commit, new->commit, 1);
+ ret = post_checkout_hook(old_branch_info.commit, new_branch_info->commit, 1);
free(path_to_free);
return ret || writeout_error;
}
static int parse_branchname_arg(int argc, const char **argv,
int dwim_new_local_branch_ok,
- struct branch_info *new,
+ struct branch_info *new_branch_info,
struct checkout_opts *opts,
struct object_id *rev)
{
argv++;
argc--;
- new->name = arg;
- setup_branch_path(new);
+ new_branch_info->name = arg;
+ setup_branch_path(new_branch_info);
- if (!check_refname_format(new->path, 0) &&
- !read_ref(new->path, &branch_rev))
+ if (!check_refname_format(new_branch_info->path, 0) &&
+ !read_ref(new_branch_info->path, &branch_rev))
oidcpy(rev, &branch_rev);
else
- new->path = NULL; /* not an existing branch */
+ new_branch_info->path = NULL; /* not an existing branch */
- new->commit = lookup_commit_reference_gently(rev, 1);
- if (!new->commit) {
+ new_branch_info->commit = lookup_commit_reference_gently(rev, 1);
+ if (!new_branch_info->commit) {
/* not a commit */
*source_tree = parse_tree_indirect(rev);
} else {
- parse_commit_or_die(new->commit);
- *source_tree = new->commit->tree;
+ parse_commit_or_die(new_branch_info->commit);
+ *source_tree = new_branch_info->commit->tree;
}
if (!*source_tree) /* case (1): want a tree */
}
static int checkout_branch(struct checkout_opts *opts,
- struct branch_info *new)
+ struct branch_info *new_branch_info)
{
if (opts->pathspec.nr)
die(_("paths cannot be used with switching branches"));
} else if (opts->track == BRANCH_TRACK_UNSPECIFIED)
opts->track = git_branch_track;
- if (new->name && !new->commit)
+ if (new_branch_info->name && !new_branch_info->commit)
die(_("Cannot switch branch to a non-commit '%s'"),
- new->name);
+ new_branch_info->name);
- if (new->path && !opts->force_detach && !opts->new_branch &&
+ if (new_branch_info->path && !opts->force_detach && !opts->new_branch &&
!opts->ignore_other_worktrees) {
int flag;
char *head_ref = resolve_refdup("HEAD", 0, NULL, &flag);
if (head_ref &&
- (!(flag & REF_ISSYMREF) || strcmp(head_ref, new->path)))
- die_if_checked_out(new->path, 1);
+ (!(flag & REF_ISSYMREF) || strcmp(head_ref, new_branch_info->path)))
+ die_if_checked_out(new_branch_info->path, 1);
free(head_ref);
}
- if (!new->commit && opts->new_branch) {
+ if (!new_branch_info->commit && opts->new_branch) {
struct object_id rev;
int flag;
(flag & REF_ISSYMREF) && is_null_oid(&rev))
return switch_unborn_to_new_branch(opts);
}
- return switch_branches(opts, new);
+ return switch_branches(opts, new_branch_info);
}
int cmd_checkout(int argc, const char **argv, const char *prefix)
{
struct checkout_opts opts;
- struct branch_info new;
+ struct branch_info new_branch_info;
char *conflict_style = NULL;
int dwim_new_local_branch = 1;
struct option options[] = {
2),
OPT_SET_INT('3', "theirs", &opts.writeout_stage, N_("checkout their version for unmerged files"),
3),
- OPT__FORCE(&opts.force, N_("force checkout (throw away local modifications)")),
+ OPT__FORCE(&opts.force, N_("force checkout (throw away local modifications)"),
+ PARSE_OPT_NOCOMPLETE),
OPT_BOOL('m', "merge", &opts.merge, N_("perform a 3-way merge with the new branch")),
- OPT_BOOL(0, "overwrite-ignore", &opts.overwrite_ignore, N_("update ignored files (default)")),
+ OPT_BOOL_F(0, "overwrite-ignore", &opts.overwrite_ignore,
+ N_("update ignored files (default)"),
+ PARSE_OPT_NOCOMPLETE),
OPT_STRING(0, "conflict", &conflict_style, N_("style"),
N_("conflict style (merge or diff3)")),
OPT_BOOL('p', "patch", &opts.patch_mode, N_("select hunks interactively")),
};
memset(&opts, 0, sizeof(opts));
- memset(&new, 0, sizeof(new));
+ memset(&new_branch_info, 0, sizeof(new_branch_info));
opts.overwrite_ignore = 1;
opts.prefix = prefix;
opts.show_progress = -1;
opts.track == BRANCH_TRACK_UNSPECIFIED &&
!opts.new_branch;
int n = parse_branchname_arg(argc, argv, dwim_ok,
- &new, &opts, &rev);
+ &new_branch_info, &opts, &rev);
argv += n;
argc -= n;
}
UNLEAK(opts);
if (opts.patch_mode || opts.pathspec.nr)
- return checkout_paths(&opts, new.name);
+ return checkout_paths(&opts, new_branch_info.name);
else
- return checkout_branch(&opts, &new);
+ return checkout_branch(&opts, &new_branch_info);
}
struct option options[] = {
OPT__QUIET(&quiet, N_("do not print names of files removed")),
OPT__DRY_RUN(&dry_run, N_("dry run")),
- OPT__FORCE(&force, N_("force")),
+ OPT__FORCE(&force, N_("force"), PARSE_OPT_NOCOMPLETE),
OPT_BOOL('i', "interactive", &interactive, N_("interactive cleaning")),
OPT_BOOL('d', NULL, &remove_directories,
N_("remove whole directories")),
#include "run-command.h"
#include "connected.h"
#include "packfile.h"
+#include "list-objects-filter-options.h"
/*
* Overall FIXMEs:
static int option_dissociate;
static int max_jobs = -1;
static struct string_list option_recurse_submodules = STRING_LIST_INIT_NODUP;
+static struct list_objects_filter_options filter_options;
static int recurse_submodules_cb(const struct option *opt,
const char *arg, int unset)
TRANSPORT_FAMILY_IPV4),
OPT_SET_INT('6', "ipv6", &family, N_("use IPv6 addresses only"),
TRANSPORT_FAMILY_IPV6),
+ OPT_PARSE_LIST_OBJECTS_FILTER(&filter_options),
OPT_END()
};
struct refspec *refspec;
const char *fetch_pattern;
+ fetch_if_missing = 0;
+
packet_trace_identity("clone");
argc = parse_options(argc, argv, prefix, builtin_clone_options,
builtin_clone_usage, 0);
warning(_("--shallow-since is ignored in local clones; use file:// instead."));
if (option_not.nr)
warning(_("--shallow-exclude is ignored in local clones; use file:// instead."));
+ if (filter_options.choice)
+ warning(_("--filter is ignored in local clones; use file:// instead."));
if (!access(mkpath("%s/shallow", path), F_OK)) {
if (option_local > 0)
warning(_("source repository is shallow, ignoring --local"));
transport_set_option(transport, TRANS_OPT_UPLOADPACK,
option_upload_pack);
- if (transport->smart_options && !deepen)
+ if (filter_options.choice) {
+ transport_set_option(transport, TRANS_OPT_LIST_OBJECTS_FILTER,
+ filter_options.filter_spec);
+ transport_set_option(transport, TRANS_OPT_FROM_PROMISOR, "1");
+ }
+
+ if (transport->smart_options && !deepen && !filter_options.choice)
transport->smart_options->check_self_contained_and_connected = 1;
refs = transport_get_remote_refs(transport);
write_refspec_config(src_ref_prefix, our_head_points_at,
remote_head_points_at, &branch_top);
+ if (filter_options.choice)
+ partial_clone_register("origin", &filter_options);
+
if (is_local)
clone_local(path, git_dir);
else if (refs && complete_refs_before_fetch)
transport_fetch_refs(transport, mapped_refs);
update_remote_refs(refs, mapped_refs, remote_head_points_at,
- branch_top.buf, reflog_msg.buf, transport, !is_local);
+ branch_top.buf, reflog_msg.buf, transport,
+ !is_local && !filter_options.choice);
update_head(our_head_points_at, remote_head, reflog_msg.buf);
}
junk_mode = JUNK_LEAVE_REPO;
+ fetch_if_missing = 1;
err = checkout(submodule_progress);
strbuf_release(&reflog_msg);
die_errno("git commit-tree: failed to read");
}
- if (commit_tree(buffer.buf, buffer.len, tree_oid.hash, parents,
- commit_oid.hash, NULL, sign_commit)) {
+ if (commit_tree(buffer.buf, buffer.len, &tree_oid, parents, &commit_oid,
+ NULL, sign_commit)) {
strbuf_release(&buffer);
return 1;
}
#include "gpg-interface.h"
#include "column.h"
#include "sequencer.h"
-#include "notes-utils.h"
#include "mailmap.h"
-#include "sigchain.h"
static const char * const builtin_commit_usage[] = {
N_("git commit [<options>] [--] <pathspec>..."),
NULL
};
-static const char implicit_ident_advice_noconfig[] =
-N_("Your name and email address were configured automatically based\n"
-"on your username and hostname. Please check that they are accurate.\n"
-"You can suppress this message by setting them explicitly. Run the\n"
-"following command and follow the instructions in your editor to edit\n"
-"your configuration file:\n"
-"\n"
-" git config --global --edit\n"
-"\n"
-"After doing this, you may fix the identity used for this commit with:\n"
-"\n"
-" git commit --amend --reset-author\n");
-
-static const char implicit_ident_advice_config[] =
-N_("Your name and email address were configured automatically based\n"
-"on your username and hostname. Please check that they are accurate.\n"
-"You can suppress this message by setting them explicitly:\n"
-"\n"
-" git config --global user.name \"Your Name\"\n"
-" git config --global user.email you@example.com\n"
-"\n"
-"After doing this, you may fix the identity used for this commit with:\n"
-"\n"
-" git commit --amend --reset-author\n");
-
static const char empty_amend_advice[] =
N_("You asked to amend the most recent commit, but doing so would make\n"
"it empty. You can repeat your command with --allow-empty, or you can\n"
"Then \"git cherry-pick --continue\" will resume cherry-picking\n"
"the remaining commits.\n");
-static GIT_PATH_FUNC(git_path_commit_editmsg, "COMMIT_EDITMSG")
-
static const char *use_message_buffer;
static struct lock_file index_lock; /* real index */
static struct lock_file false_lock; /* used only for partial commits */
* if editor is used, and only the whitespaces if the message
* is specified explicitly.
*/
-static enum {
- CLEANUP_SPACE,
- CLEANUP_NONE,
- CLEANUP_SCISSORS,
- CLEANUP_ALL
-} cleanup_mode;
+static enum commit_msg_cleanup_mode cleanup_mode;
static const char *cleanup_arg;
static enum commit_whence whence;
if (active_cache_changed
|| !cache_tree_fully_valid(active_cache_tree))
update_main_cache_tree(WRITE_TREE_SILENT);
- if (active_cache_changed) {
- if (write_locked_index(&the_index, &index_lock,
- COMMIT_LOCK))
- die(_("unable to write new_index file"));
- } else {
- rollback_lock_file(&index_lock);
- }
+ if (write_locked_index(&the_index, &index_lock,
+ COMMIT_LOCK | SKIP_IF_UNCHANGED))
+ die(_("unable to write new_index file"));
commit_style = COMMIT_AS_IS;
ret = get_index_file();
goto out;
struct strbuf sb = STRBUF_INIT;
const char *hook_arg1 = NULL;
const char *hook_arg2 = NULL;
- int clean_message_contents = (cleanup_mode != CLEANUP_NONE);
+ int clean_message_contents = (cleanup_mode != COMMIT_MSG_CLEANUP_NONE);
int old_display_comment_prefix;
/* This checks and barfs if author is badly specified */
struct ident_split ci, ai;
if (whence != FROM_COMMIT) {
- if (cleanup_mode == CLEANUP_SCISSORS)
+ if (cleanup_mode == COMMIT_MSG_CLEANUP_SCISSORS)
wt_status_add_cut_line(s->fp);
status_printf_ln(s, GIT_COLOR_NORMAL,
whence == FROM_MERGE
}
fprintf(s->fp, "\n");
- if (cleanup_mode == CLEANUP_ALL)
+ if (cleanup_mode == COMMIT_MSG_CLEANUP_ALL)
status_printf(s, GIT_COLOR_NORMAL,
_("Please enter the commit message for your changes."
" Lines starting\nwith '%c' will be ignored, and an empty"
" message aborts the commit.\n"), comment_line_char);
- else if (cleanup_mode == CLEANUP_SCISSORS && whence == FROM_COMMIT)
+ else if (cleanup_mode == COMMIT_MSG_CLEANUP_SCISSORS &&
+ whence == FROM_COMMIT)
wt_status_add_cut_line(s->fp);
- else /* CLEANUP_SPACE, that is. */
+ else /* COMMIT_MSG_CLEANUP_SPACE, that is. */
status_printf(s, GIT_COLOR_NORMAL,
_("Please enter the commit message for your changes."
" Lines starting\n"
return 1;
}
-static int rest_is_empty(struct strbuf *sb, int start)
-{
- int i, eol;
- const char *nl;
-
- /* Check if the rest is just whitespace and Signed-off-by's. */
- for (i = start; i < sb->len; i++) {
- nl = memchr(sb->buf + i, '\n', sb->len - i);
- if (nl)
- eol = nl - sb->buf;
- else
- eol = sb->len;
-
- if (strlen(sign_off_header) <= eol - i &&
- starts_with(sb->buf + i, sign_off_header)) {
- i = eol;
- continue;
- }
- while (i < eol)
- if (!isspace(sb->buf[i++]))
- return 0;
- }
-
- return 1;
-}
-
-/*
- * Find out if the message in the strbuf contains only whitespace and
- * Signed-off-by lines.
- */
-static int message_is_empty(struct strbuf *sb)
-{
- if (cleanup_mode == CLEANUP_NONE && sb->len)
- return 0;
- return rest_is_empty(sb, 0);
-}
-
-/*
- * See if the user edited the message in the editor or left what
- * was in the template intact
- */
-static int template_untouched(struct strbuf *sb)
-{
- struct strbuf tmpl = STRBUF_INIT;
- const char *start;
-
- if (cleanup_mode == CLEANUP_NONE && sb->len)
- return 0;
-
- if (!template_file || strbuf_read_file(&tmpl, template_file, 0) <= 0)
- return 0;
-
- strbuf_stripspace(&tmpl, cleanup_mode == CLEANUP_ALL);
- if (!skip_prefix(sb->buf, tmpl.buf, &start))
- start = sb->buf;
- strbuf_release(&tmpl);
- return rest_is_empty(sb, start - sb->buf);
-}
-
static const char *find_author_by_nickname(const char *name)
{
struct rev_info revs;
s->show_branch = status_deferred_config.show_branch;
if (s->show_branch < 0)
s->show_branch = 0;
+
+ if (s->ahead_behind_flags == AHEAD_BEHIND_UNSPECIFIED)
+ s->ahead_behind_flags = AHEAD_BEHIND_FULL;
}
static int parse_and_validate_options(int argc, const char *argv[],
if (argc == 0 && (also || (only && !amend && !allow_empty)))
die(_("No paths with --include/--only does not make sense."));
if (!cleanup_arg || !strcmp(cleanup_arg, "default"))
- cleanup_mode = use_editor ? CLEANUP_ALL : CLEANUP_SPACE;
+ cleanup_mode = use_editor ? COMMIT_MSG_CLEANUP_ALL :
+ COMMIT_MSG_CLEANUP_SPACE;
else if (!strcmp(cleanup_arg, "verbatim"))
- cleanup_mode = CLEANUP_NONE;
+ cleanup_mode = COMMIT_MSG_CLEANUP_NONE;
else if (!strcmp(cleanup_arg, "whitespace"))
- cleanup_mode = CLEANUP_SPACE;
+ cleanup_mode = COMMIT_MSG_CLEANUP_SPACE;
else if (!strcmp(cleanup_arg, "strip"))
- cleanup_mode = CLEANUP_ALL;
+ cleanup_mode = COMMIT_MSG_CLEANUP_ALL;
else if (!strcmp(cleanup_arg, "scissors"))
- cleanup_mode = use_editor ? CLEANUP_SCISSORS : CLEANUP_SPACE;
+ cleanup_mode = use_editor ? COMMIT_MSG_CLEANUP_SCISSORS :
+ COMMIT_MSG_CLEANUP_SPACE;
else
die(_("Invalid cleanup mode %s"), cleanup_arg);
N_("show branch information")),
OPT_BOOL(0, "show-stash", &s.show_stash,
N_("show stash information")),
+ OPT_BOOL(0, "ahead-behind", &s.ahead_behind_flags,
+ N_("compute full ahead/behind values")),
{ OPTION_CALLBACK, 0, "porcelain", &status_format,
N_("version"), N_("machine-readable output"),
PARSE_OPT_OPTARG, opt_parse_porcelain },
return 0;
}
-static const char *implicit_ident_advice(void)
-{
- char *user_config = expand_user_path("~/.gitconfig", 0);
- char *xdg_config = xdg_config_home("config");
- int config_exists = file_exists(user_config) || file_exists(xdg_config);
-
- free(user_config);
- free(xdg_config);
-
- if (config_exists)
- return _(implicit_ident_advice_config);
- else
- return _(implicit_ident_advice_noconfig);
-
-}
-
-static void print_summary(const char *prefix, const struct object_id *oid,
- int initial_commit)
-{
- struct rev_info rev;
- struct commit *commit;
- struct strbuf format = STRBUF_INIT;
- const char *head;
- struct pretty_print_context pctx = {0};
- struct strbuf author_ident = STRBUF_INIT;
- struct strbuf committer_ident = STRBUF_INIT;
-
- commit = lookup_commit(oid);
- if (!commit)
- die(_("couldn't look up newly created commit"));
- if (parse_commit(commit))
- die(_("could not parse newly created commit"));
-
- strbuf_addstr(&format, "format:%h] %s");
-
- format_commit_message(commit, "%an <%ae>", &author_ident, &pctx);
- format_commit_message(commit, "%cn <%ce>", &committer_ident, &pctx);
- if (strbuf_cmp(&author_ident, &committer_ident)) {
- strbuf_addstr(&format, "\n Author: ");
- strbuf_addbuf_percentquote(&format, &author_ident);
- }
- if (author_date_is_interesting()) {
- struct strbuf date = STRBUF_INIT;
- format_commit_message(commit, "%ad", &date, &pctx);
- strbuf_addstr(&format, "\n Date: ");
- strbuf_addbuf_percentquote(&format, &date);
- strbuf_release(&date);
- }
- if (!committer_ident_sufficiently_given()) {
- strbuf_addstr(&format, "\n Committer: ");
- strbuf_addbuf_percentquote(&format, &committer_ident);
- if (advice_implicit_identity) {
- strbuf_addch(&format, '\n');
- strbuf_addstr(&format, implicit_ident_advice());
- }
- }
- strbuf_release(&author_ident);
- strbuf_release(&committer_ident);
-
- init_revisions(&rev, prefix);
- setup_revisions(0, NULL, &rev, NULL);
-
- rev.diff = 1;
- rev.diffopt.output_format =
- DIFF_FORMAT_SHORTSTAT | DIFF_FORMAT_SUMMARY;
-
- rev.verbose_header = 1;
- rev.show_root_diff = 1;
- get_commit_format(format.buf, &rev);
- rev.always_show_header = 0;
- rev.diffopt.detect_rename = DIFF_DETECT_RENAME;
- rev.diffopt.break_opt = 0;
- diff_setup_done(&rev.diffopt);
-
- head = resolve_ref_unsafe("HEAD", 0, NULL, NULL);
- if (!head)
- die_errno(_("unable to resolve HEAD after creating commit"));
- if (!strcmp(head, "HEAD"))
- head = _("detached HEAD");
- else
- skip_prefix(head, "refs/heads/", &head);
- printf("[%s%s ", head, initial_commit ? _(" (root-commit)") : "");
-
- if (!log_tree_commit(&rev, commit)) {
- rev.always_show_header = 1;
- rev.use_terminator = 1;
- log_tree_commit(&rev, commit);
- }
-
- strbuf_release(&format);
-}
-
static int git_commit_config(const char *k, const char *v, void *cb)
{
struct wt_status *s = cb;
return git_status_config(k, v, s);
}
-static int run_rewrite_hook(const struct object_id *oldoid,
- const struct object_id *newoid)
-{
- struct child_process proc = CHILD_PROCESS_INIT;
- const char *argv[3];
- int code;
- struct strbuf sb = STRBUF_INIT;
-
- argv[0] = find_hook("post-rewrite");
- if (!argv[0])
- return 0;
-
- argv[1] = "amend";
- argv[2] = NULL;
-
- proc.argv = argv;
- proc.in = -1;
- proc.stdout_to_stderr = 1;
-
- code = start_command(&proc);
- if (code)
- return code;
- strbuf_addf(&sb, "%s %s\n", oid_to_hex(oldoid), oid_to_hex(newoid));
- sigchain_push(SIGPIPE, SIG_IGN);
- write_in_full(proc.in, sb.buf, sb.len);
- close(proc.in);
- strbuf_release(&sb);
- sigchain_pop(SIGPIPE);
- return finish_command(&proc);
-}
-
int run_commit_hook(int editor_is_used, const char *index_file, const char *name, ...)
{
struct argv_array hook_env = ARGV_ARRAY_INIT;
int cmd_commit(int argc, const char **argv, const char *prefix)
{
+ const char *argv_gc_auto[] = {"gc", "--auto", NULL};
static struct wt_status s;
static struct option builtin_commit_options[] = {
OPT__QUIET(&quiet, N_("suppress summary after successful commit")),
OPT_SET_INT(0, "short", &status_format, N_("show status concisely"),
STATUS_FORMAT_SHORT),
OPT_BOOL(0, "branch", &s.show_branch, N_("show branch information")),
+ OPT_BOOL(0, "ahead-behind", &s.ahead_behind_flags,
+ N_("compute full ahead/behind values")),
OPT_SET_INT(0, "porcelain", &status_format,
N_("machine-readable output"), STATUS_FORMAT_PORCELAIN),
OPT_SET_INT(0, "long", &status_format,
struct strbuf sb = STRBUF_INIT;
struct strbuf author_ident = STRBUF_INIT;
const char *index_file, *reflog_msg;
- char *nl;
struct object_id oid;
struct commit_list *parents = NULL;
struct stat statbuf;
struct commit *current_head = NULL;
struct commit_extra_header *extra = NULL;
- struct ref_transaction *transaction;
struct strbuf err = STRBUF_INIT;
if (argc == 2 && !strcmp(argv[1], "-h"))
}
if (verbose || /* Truncate the message just before the diff, if any. */
- cleanup_mode == CLEANUP_SCISSORS)
+ cleanup_mode == COMMIT_MSG_CLEANUP_SCISSORS)
strbuf_setlen(&sb, wt_status_locate_end(sb.buf, sb.len));
- if (cleanup_mode != CLEANUP_NONE)
- strbuf_stripspace(&sb, cleanup_mode == CLEANUP_ALL);
+ if (cleanup_mode != COMMIT_MSG_CLEANUP_NONE)
+ strbuf_stripspace(&sb, cleanup_mode == COMMIT_MSG_CLEANUP_ALL);
- if (message_is_empty(&sb) && !allow_empty_message) {
+ if (message_is_empty(&sb, cleanup_mode) && !allow_empty_message) {
rollback_index_files();
fprintf(stderr, _("Aborting commit due to empty commit message.\n"));
exit(1);
}
- if (template_untouched(&sb) && !allow_empty_message) {
+ if (template_untouched(&sb, template_file, cleanup_mode) && !allow_empty_message) {
rollback_index_files();
fprintf(stderr, _("Aborting commit; you did not edit the message.\n"));
exit(1);
append_merge_tag_headers(parents, &tail);
}
- if (commit_tree_extended(sb.buf, sb.len, active_cache_tree->oid.hash,
- parents, oid.hash, author_ident.buf, sign_commit, extra)) {
+ if (commit_tree_extended(sb.buf, sb.len, &active_cache_tree->oid,
+ parents, &oid, author_ident.buf, sign_commit,
+ extra)) {
rollback_index_files();
die(_("failed to write commit object"));
}
strbuf_release(&author_ident);
free_commit_extra_headers(extra);
- nl = strchr(sb.buf, '\n');
- if (nl)
- strbuf_setlen(&sb, nl + 1 - sb.buf);
- else
- strbuf_addch(&sb, '\n');
- strbuf_insert(&sb, 0, reflog_msg, strlen(reflog_msg));
- strbuf_insert(&sb, strlen(reflog_msg), ": ", 2);
-
- transaction = ref_transaction_begin(&err);
- if (!transaction ||
- ref_transaction_update(transaction, "HEAD", &oid,
- current_head
- ? ¤t_head->object.oid : &null_oid,
- 0, sb.buf, &err) ||
- ref_transaction_commit(transaction, &err)) {
+ if (update_head_with_reflog(current_head, &oid, reflog_msg, &sb,
+ &err)) {
rollback_index_files();
die("%s", err.buf);
}
- ref_transaction_free(transaction);
unlink(git_path_cherry_pick_head());
unlink(git_path_revert_head());
"not exceeded, and then \"git reset HEAD\" to recover."));
rerere(0);
+ run_command_v_opt(argv_gc_auto, RUN_GIT_CMD);
run_commit_hook(use_editor, get_index_file(), "post-commit", NULL);
if (amend && !no_post_rewrite) {
- struct notes_rewrite_cfg *cfg;
- cfg = init_copy_notes_for_rewrite("amend");
- if (cfg) {
- /* we are amending, so current_head is not NULL */
- copy_note_for_rewrite(cfg, ¤t_head->object.oid, &oid);
- finish_copy_notes_for_rewrite(cfg, "Notes added by 'git commit --amend'");
- }
- run_rewrite_hook(¤t_head->object.oid, &oid);
+ commit_post_rewrite(current_head, &oid);
+ }
+ if (!quiet) {
+ unsigned int flags = 0;
+
+ if (!current_head)
+ flags |= SUMMARY_INITIAL_COMMIT;
+ if (author_date_is_interesting())
+ flags |= SUMMARY_SHOW_AUTHOR_DATE;
+ print_commit_summary(prefix, &oid, flags);
}
- if (!quiet)
- print_summary(prefix, &oid, !current_head);
UNLEAK(err);
UNLEAK(sb);
#define ACTION_GET_COLORBOOL (1<<14)
#define ACTION_GET_URLMATCH (1<<15)
+/*
+ * The actions "ACTION_LIST | ACTION_GET_*" which may produce more than
+ * one line of output and which should therefore be paged.
+ */
+#define PAGING_ACTIONS (ACTION_LIST | ACTION_GET_ALL | \
+ ACTION_GET_REGEXP | ACTION_GET_URLMATCH)
+
#define TYPE_BOOL (1<<0)
#define TYPE_INT (1<<1)
#define TYPE_BOOL_OR_INT (1<<2)
usage_with_options(builtin_config_usage, builtin_config_options);
}
+ if (actions & PAGING_ACTIONS)
+ setup_auto_pager("config", 1);
+
if (actions == ACTION_LIST) {
check_argc(argc, 0, 0);
if (config_with_options(show_all_config, NULL,
if (!match_cnt) {
struct object_id *cmit_oid = &cmit->object.oid;
if (always) {
- strbuf_addstr(dst, find_unique_abbrev(cmit_oid->hash, abbrev));
+ strbuf_add_unique_abbrev(dst, cmit_oid->hash, abbrev);
if (suffix)
strbuf_addstr(dst, suffix);
return;
if (cmit)
describe_commit(&oid, &sb);
- else if (lookup_blob(&oid))
+ else if (sha1_object_info(oid.hash, NULL) == OBJ_BLOB)
describe_blob(oid, &sb);
else
die(_("%s is neither a commit nor blob"), arg);
if (obj->type == OBJ_TREE)
return stdin_diff_trees((struct tree *)obj, p);
error("Object %s is a %s, not a commit or tree",
- oid_to_hex(&oid), typename(obj->type));
+ oid_to_hex(&oid), type_name(obj->type));
return -1;
}
buf = read_sha1_file(oid->hash, &type, &size);
if (!buf)
die ("Could not read blob %s", oid_to_hex(oid));
- if (check_sha1_signature(oid->hash, buf, size, typename(type)) < 0)
+ if (check_sha1_signature(oid->hash, buf, size, type_name(type)) < 0)
die("sha1 mismatch in blob %s", oid_to_hex(oid));
object = parse_object_buffer(oid, type, size, buf, &eaten);
}
if (tagged->type != OBJ_COMMIT) {
die ("Tag %s tags unexported %s!",
oid_to_hex(&tag->object.oid),
- typename(tagged->type));
+ type_name(tagged->type));
}
p = (struct commit *)tagged;
for (;;) {
if (!commit) {
warning("%s: Unexpected object of type %s, skipping.",
e->name,
- typename(e->item->type));
+ type_name(e->item->type));
continue;
}
continue;
default: /* OBJ_TAG (nested tags) is already handled */
warning("Tag points to object of unexpected type %s, skipping.",
- typename(commit->object.type));
+ type_name(commit->object.type));
continue;
}
struct oid_array shallow = OID_ARRAY_INIT;
struct string_list deepen_not = STRING_LIST_INIT_DUP;
+ fetch_if_missing = 0;
+
packet_trace_identity("fetch-pack");
memset(&args, 0, sizeof(args));
args.update_shallow = 1;
continue;
}
+ if (!strcmp("--from-promisor", arg)) {
+ args.from_promisor = 1;
+ continue;
+ }
+ if (!strcmp("--no-dependents", arg)) {
+ args.no_dependents = 1;
+ continue;
+ }
+ if (skip_prefix(arg, ("--" CL_ARG__FILTER "="), &arg)) {
+ parse_list_objects_filter(&args.filter_options, arg);
+ continue;
+ }
+ if (!strcmp(arg, ("--no-" CL_ARG__FILTER))) {
+ list_objects_filter_set_no_filter(&args.filter_options);
+ continue;
+ }
usage(fetch_pack_usage);
}
if (deepen_not.nr)
#include "argv-array.h"
#include "utf8.h"
#include "packfile.h"
+#include "list-objects-filter-options.h"
static const char * const builtin_fetch_usage[] = {
N_("git fetch [<options>] [<repository> [<refspec>...]]"),
static int prune = -1; /* unspecified */
#define PRUNE_BY_DEFAULT 0 /* do we prune by default? */
+static int fetch_prune_tags_config = -1; /* unspecified */
+static int prune_tags = -1; /* unspecified */
+#define PRUNE_TAGS_BY_DEFAULT 0 /* do we prune tags by default? */
+
static int all, append, dry_run, force, keep, multiple, update_head_ok, verbosity, deepen_relative;
static int progress = -1;
static int tags = TAGS_DEFAULT, unshallow, update_shallow, deepen;
static int shown_url = 0;
static int refmap_alloc, refmap_nr;
static const char **refmap_array;
+static struct list_objects_filter_options filter_options;
static int git_fetch_config(const char *k, const char *v, void *cb)
{
return 0;
}
+ if (!strcmp(k, "fetch.prunetags")) {
+ fetch_prune_tags_config = git_config_bool(k, v);
+ return 0;
+ }
+
if (!strcmp(k, "submodule.recurse")) {
int r = git_config_bool(k, v) ?
RECURSE_SUBMODULES_ON : RECURSE_SUBMODULES_OFF;
N_("append to .git/FETCH_HEAD instead of overwriting")),
OPT_STRING(0, "upload-pack", &upload_pack, N_("path"),
N_("path to upload pack on remote end")),
- OPT__FORCE(&force, N_("force overwrite of local branch")),
+ OPT__FORCE(&force, N_("force overwrite of local branch"), 0),
OPT_BOOL('m', "multiple", &multiple,
N_("fetch from multiple remotes")),
OPT_SET_INT('t', "tags", &tags,
N_("number of submodules fetched in parallel")),
OPT_BOOL('p', "prune", &prune,
N_("prune remote-tracking branches no longer on remote")),
+ OPT_BOOL('P', "prune-tags", &prune_tags,
+ N_("prune local tags no longer on remote and clobber changed tags")),
{ OPTION_CALLBACK, 0, "recurse-submodules", &recurse_submodules, N_("on-demand"),
N_("control recursive fetching of submodules"),
PARSE_OPT_OPTARG, option_fetch_parse_recurse_submodules },
TRANSPORT_FAMILY_IPV4),
OPT_SET_INT('6', "ipv6", &family, N_("use IPv6 addresses only"),
TRANSPORT_FAMILY_IPV6),
+ OPT_PARSE_LIST_OBJECTS_FILTER(&filter_options),
OPT_END()
};
set_option(transport, TRANS_OPT_DEEPEN_RELATIVE, "yes");
if (update_shallow)
set_option(transport, TRANS_OPT_UPDATE_SHALLOW, "yes");
+ if (filter_options.choice) {
+ set_option(transport, TRANS_OPT_LIST_OBJECTS_FILTER,
+ filter_options.filter_spec);
+ set_option(transport, TRANS_OPT_FROM_PROMISOR, "1");
+ }
return transport;
}
argv_array_push(argv, "--dry-run");
if (prune != -1)
argv_array_push(argv, prune ? "--prune" : "--no-prune");
+ if (prune_tags != -1)
+ argv_array_push(argv, prune_tags ? "--prune-tags" : "--no-prune-tags");
if (update_head_ok)
argv_array_push(argv, "--update-head-ok");
if (force)
return result;
}
-static int fetch_one(struct remote *remote, int argc, const char **argv)
+/*
+ * Fetching from the promisor remote should use the given filter-spec
+ * or inherit the default filter-spec from the config.
+ */
+static inline void fetch_one_setup_partial(struct remote *remote)
+{
+ /*
+ * Explicit --no-filter argument overrides everything, regardless
+ * of any prior partial clones and fetches.
+ */
+ if (filter_options.no_filter)
+ return;
+
+ /*
+ * If no prior partial clone/fetch and the current fetch DID NOT
+ * request a partial-fetch, do a normal fetch.
+ */
+ if (!repository_format_partial_clone && !filter_options.choice)
+ return;
+
+ /*
+ * If this is the FIRST partial-fetch request, we enable partial
+ * on this repo and remember the given filter-spec as the default
+ * for subsequent fetches to this remote.
+ */
+ if (!repository_format_partial_clone && filter_options.choice) {
+ partial_clone_register(remote->name, &filter_options);
+ return;
+ }
+
+ /*
+ * We are currently limited to only ONE promisor remote and only
+ * allow partial-fetches from the promisor remote.
+ */
+ if (strcmp(remote->name, repository_format_partial_clone)) {
+ if (filter_options.choice)
+ die(_("--filter can only be used with the remote configured in core.partialClone"));
+ return;
+ }
+
+ /*
+ * Do a partial-fetch from the promisor remote using either the
+ * explicitly given filter-spec or inherit the filter-spec from
+ * the config.
+ */
+ if (!filter_options.choice)
+ partial_clone_get_default_filter_spec(&filter_options);
+ return;
+}
+
+static int fetch_one(struct remote *remote, int argc, const char **argv, int prune_tags_ok)
{
static const char **refs = NULL;
struct refspec *refspec;
int ref_nr = 0;
+ int j = 0;
int exit_code;
+ int maybe_prune_tags;
+ int remote_via_config = remote_is_configured(remote, 0);
if (!remote)
die(_("No remote repository specified. Please, specify either a URL or a\n"
if (prune < 0) {
/* no command line request */
- if (0 <= gtransport->remote->prune)
- prune = gtransport->remote->prune;
+ if (0 <= remote->prune)
+ prune = remote->prune;
else if (0 <= fetch_prune_config)
prune = fetch_prune_config;
else
prune = PRUNE_BY_DEFAULT;
}
+ if (prune_tags < 0) {
+ /* no command line request */
+ if (0 <= remote->prune_tags)
+ prune_tags = remote->prune_tags;
+ else if (0 <= fetch_prune_tags_config)
+ prune_tags = fetch_prune_tags_config;
+ else
+ prune_tags = PRUNE_TAGS_BY_DEFAULT;
+ }
+
+ maybe_prune_tags = prune_tags_ok && prune_tags;
+ if (maybe_prune_tags && remote_via_config)
+ add_prune_tags_to_fetch_refspec(remote);
+
+ if (argc > 0 || (maybe_prune_tags && !remote_via_config)) {
+ size_t nr_alloc = st_add3(argc, maybe_prune_tags, 1);
+ refs = xcalloc(nr_alloc, sizeof(const char *));
+ if (maybe_prune_tags) {
+ refs[j++] = xstrdup("refs/tags/*:refs/tags/*");
+ ref_nr++;
+ }
+ }
+
if (argc > 0) {
- int j = 0;
int i;
- refs = xcalloc(st_add(argc, 1), sizeof(const char *));
for (i = 0; i < argc; i++) {
if (!strcmp(argv[i], "tag")) {
i++;
argv[i], argv[i]);
} else
refs[j++] = argv[i];
+ ref_nr++;
}
- refs[j] = NULL;
- ref_nr = j;
}
sigchain_push_common(unlock_pack_on_signal);
{
int i;
struct string_list list = STRING_LIST_INIT_DUP;
- struct remote *remote;
+ struct remote *remote = NULL;
int result = 0;
+ int prune_tags_ok = 1;
struct argv_array argv_gc_auto = ARGV_ARRAY_INIT;
packet_trace_identity("fetch");
+ fetch_if_missing = 0;
+
/* Record the command line for the reflog */
strbuf_addstr(&default_rla, "fetch");
for (i = 1; i < argc; i++)
if (depth || deepen_since || deepen_not.nr)
deepen = 1;
+ if (filter_options.choice && !repository_format_partial_clone)
+ die("--filter can only be used when extensions.partialClone is set");
+
if (all) {
if (argc == 1)
die(_("fetch --all does not take a repository argument"));
else if (argc > 1)
die(_("fetch --all does not make sense with refspecs"));
(void) for_each_remote(get_one_remote_for_fetch, &list);
- result = fetch_multiple(&list);
} else if (argc == 0) {
/* No arguments -- use default remote */
remote = remote_get(NULL);
- result = fetch_one(remote, argc, argv);
} else if (multiple) {
/* All arguments are assumed to be remotes or groups */
for (i = 0; i < argc; i++)
if (!add_remote_or_group(argv[i], &list))
die(_("No such remote or remote group: %s"), argv[i]);
- result = fetch_multiple(&list);
} else {
/* Single remote or group */
(void) add_remote_or_group(argv[0], &list);
/* More than one remote */
if (argc > 1)
die(_("Fetching a group and specifying refspecs does not make sense"));
- result = fetch_multiple(&list);
} else {
/* Zero or one remotes */
remote = remote_get(argv[0]);
- result = fetch_one(remote, argc-1, argv+1);
+ prune_tags_ok = (argc == 1);
+ argc--;
+ argv++;
}
}
+ if (remote) {
+ if (filter_options.choice || repository_format_partial_clone)
+ fetch_one_setup_partial(remote);
+ result = fetch_one(remote, argc, argv, prune_tags_ok);
+ } else {
+ if (filter_options.choice)
+ die(_("--filter can only be used with the remote configured in core.partialClone"));
+ /* TODO should this also die if we have a previous partial-clone? */
+ result = fetch_multiple(&list);
+ }
+
if (!result && (recurse_submodules != RECURSE_SUBMODULES_OFF)) {
struct argv_array options = ARGV_ARRAY_INIT;
object_as_type(obj, type, 0);
}
- ret = typename(obj->type);
+ ret = type_name(obj->type);
if (!ret)
ret = "unknown";
printf("broken link from %7s %s\n",
printable_type(parent), describe_object(parent));
printf("broken link from %7s %s\n",
- (type == OBJ_ANY ? "unknown" : typename(type)), "unknown");
+ (type == OBJ_ANY ? "unknown" : type_name(type)), "unknown");
errors_found |= ERROR_REACHABLE;
return 1;
}
if (obj->flags & REACHABLE)
return 0;
obj->flags |= REACHABLE;
+
+ if (is_promisor_object(&obj->oid))
+ /*
+ * Further recursion does not need to be performed on this
+ * object since it is a promisor object (so it does not need to
+ * be added to "pending").
+ */
+ return 0;
+
if (!(obj->flags & HAS_OBJ)) {
if (parent && !has_object_file(&obj->oid)) {
printf("broken link from %7s %s\n",
static int traverse_one_object(struct object *obj)
{
- return fsck_walk(obj, obj, &fsck_walk_options);
+ int result = fsck_walk(obj, obj, &fsck_walk_options);
+
+ if (obj->type == OBJ_TREE) {
+ struct tree *tree = (struct tree *)obj;
+ free_tree_buffer(tree);
+ }
+ return result;
}
static int traverse_reachable(void)
* do a full fsck
*/
if (!(obj->flags & HAS_OBJ)) {
+ if (is_promisor_object(&obj->oid))
+ return;
if (has_sha1_pack(obj->oid.hash))
return; /* it is in pack - forget about it */
printf("missing %s %s\n", printable_type(obj),
xstrfmt("%s@{%"PRItime"}", refname, timestamp));
obj->flags |= USED;
mark_object_reachable(obj);
- } else {
+ } else if (!is_promisor_object(oid)) {
error("%s: invalid reflog entry %s", refname, oid_to_hex(oid));
errors_found |= ERROR_REACHABLE;
}
obj = parse_object(oid);
if (!obj) {
+ if (is_promisor_object(oid)) {
+ /*
+ * Increment default_refs anyway, because this is a
+ * valid ref.
+ */
+ default_refs++;
+ return 0;
+ }
error("%s: invalid sha1 pointer %s", refname, oid_to_hex(oid));
errors_found |= ERROR_REACHABLE;
/* We'll continue with the rest despite the error.. */
int i;
struct alternate_object_database *alt;
+ /* fsck knows how to handle missing promisor objects */
+ fetch_if_missing = 0;
+
errors_found = 0;
check_replace_refs = 0;
struct object *obj = lookup_object(oid.hash);
if (!obj || !(obj->flags & HAS_OBJ)) {
+ if (is_promisor_object(&oid))
+ continue;
error("%s: object missing", oid_to_hex(&oid));
errors_found |= ERROR_OBJECT;
continue;
N_("prune unreferenced objects"),
PARSE_OPT_OPTARG, NULL, (intptr_t)prune_expire },
OPT_BOOL(0, "aggressive", &aggressive, N_("be more thorough (increased runtime)")),
- OPT_BOOL(0, "auto", &auto_gc, N_("enable auto-gc mode")),
- OPT_BOOL(0, "force", &force, N_("force running gc even if there may be another gc running")),
+ OPT_BOOL_F(0, "auto", &auto_gc, N_("enable auto-gc mode"),
+ PARSE_OPT_NOCOMPLETE),
+ OPT_BOOL_F(0, "force", &force,
+ N_("force running gc even if there may be another gc running"),
+ PARSE_OPT_NOCOMPLETE),
OPT_END()
};
argv_array_push(&prune, prune_expire);
if (quiet)
argv_array_push(&prune, "--no-progress");
+ if (repository_format_partial_clone)
+ argv_array_push(&prune,
+ "--exclude-promisor-objects");
if (run_command_v_opt(prune.argv, RUN_GIT_CMD))
return error(FAILED_RUN, prune.argv[0]);
}
static int skip_first_line;
-static void add_work(struct grep_opt *opt, enum grep_source_type type,
- const char *name, const char *path, const void *id)
+static void add_work(struct grep_opt *opt, const struct grep_source *gs)
{
grep_lock();
pthread_cond_wait(&cond_write, &grep_mutex);
}
- grep_source_init(&todo[todo_end].source, type, name, path, id);
+ todo[todo_end].source = *gs;
if (opt->binary != GREP_BINARY_TEXT)
grep_source_load_driver(&todo[todo_end].source);
todo[todo_end].done = 0;
const char *path)
{
struct strbuf pathbuf = STRBUF_INIT;
+ struct grep_source gs;
if (opt->relative && opt->prefix_length) {
quote_path_relative(filename + tree_name_len, opt->prefix, &pathbuf);
strbuf_addstr(&pathbuf, filename);
}
+ grep_source_init(&gs, GREP_SOURCE_OID, pathbuf.buf, path, oid);
+ strbuf_release(&pathbuf);
+
#ifndef NO_PTHREADS
if (num_threads) {
- add_work(opt, GREP_SOURCE_OID, pathbuf.buf, path, oid);
- strbuf_release(&pathbuf);
+ /*
+ * add_work() copies gs and thus assumes ownership of
+ * its fields, so do not call grep_source_clear()
+ */
+ add_work(opt, &gs);
return 0;
} else
#endif
{
- struct grep_source gs;
int hit;
- grep_source_init(&gs, GREP_SOURCE_OID, pathbuf.buf, path, oid);
- strbuf_release(&pathbuf);
hit = grep_source(opt, &gs);
grep_source_clear(&gs);
static int grep_file(struct grep_opt *opt, const char *filename)
{
struct strbuf buf = STRBUF_INIT;
+ struct grep_source gs;
if (opt->relative && opt->prefix_length)
quote_path_relative(filename, opt->prefix, &buf);
else
strbuf_addstr(&buf, filename);
+ grep_source_init(&gs, GREP_SOURCE_FILE, buf.buf, filename, filename);
+ strbuf_release(&buf);
+
#ifndef NO_PTHREADS
if (num_threads) {
- add_work(opt, GREP_SOURCE_FILE, buf.buf, filename, filename);
- strbuf_release(&buf);
+ /*
+ * add_work() copies gs and thus assumes ownership of
+ * its fields, so do not call grep_source_clear()
+ */
+ add_work(opt, &gs);
return 0;
} else
#endif
{
- struct grep_source gs;
int hit;
- grep_source_init(&gs, GREP_SOURCE_FILE, buf.buf, filename, filename);
- strbuf_release(&buf);
hit = grep_source(opt, &gs);
grep_source_clear(&gs);
free(data);
return hit;
}
- die(_("unable to grep from object of type %s"), typename(obj->type));
+ die(_("unable to grep from object of type %s"), type_name(obj->type));
}
static int grep_objects(struct grep_opt *opt, const struct pathspec *pathspec,
OPT_BOOL('L', "files-without-match",
&opt.unmatch_name_only,
N_("show only the names of files without match")),
- OPT_BOOL('z', "null", &opt.null_following_name,
- N_("print NUL after filenames")),
+ OPT_BOOL_F('z', "null", &opt.null_following_name,
+ N_("print NUL after filenames"),
+ PARSE_OPT_NOCOMPLETE),
OPT_BOOL('c', "count", &opt.count,
N_("show the number of matches instead of matching lines")),
OPT__COLOR(&opt.color, N_("highlight matches")),
OPT_GROUP(""),
{ OPTION_STRING, 'O', "open-files-in-pager", &show_in_pager,
N_("pager"), N_("show matching files in the pager"),
- PARSE_OPT_OPTARG, NULL, (intptr_t)default_pager },
- OPT_BOOL(0, "ext-grep", &external_grep_allowed__ignored,
- N_("allow calling of grep(1) (ignored by this build)")),
+ PARSE_OPT_OPTARG | PARSE_OPT_NOCOMPLETE,
+ NULL, (intptr_t)default_pager },
+ OPT_BOOL_F(0, "ext-grep", &external_grep_allowed__ignored,
+ N_("allow calling of grep(1) (ignored by this build)"),
+ PARSE_OPT_NOCOMPLETE),
OPT_END()
};
if (strbuf_read(&buf, fd, 4096) < 0)
ret = -1;
else
- ret = hash_sha1_file_literally(buf.buf, buf.len, type, oid, flags);
+ ret = hash_object_file_literally(buf.buf, buf.len, type, oid,
+ flags);
strbuf_release(&buf);
return ret;
}
size_t len,
const char *value)
{
- struct man_viewer_info_list *new;
- FLEX_ALLOC_MEM(new, name, name, len);
- new->info = xstrdup(value);
- new->next = man_viewer_info_list;
- man_viewer_info_list = new;
+ struct man_viewer_info_list *new_man_viewer;
+ FLEX_ALLOC_MEM(new_man_viewer, name, name, len);
+ new_man_viewer->info = xstrdup(value);
+ new_man_viewer->next = man_viewer_info_list;
+ man_viewer_info_list = new_man_viewer;
}
static int add_man_viewer_path(const char *name,
int pack_fd;
};
+/* Remember to update object flag allocation in object.h */
#define FLAG_LINK (1u<<20)
#define FLAG_CHECKED (1u<<21)
static off_t consumed_bytes;
static off_t max_input_size;
static unsigned deepest_delta;
-static git_SHA_CTX input_ctx;
+static git_hash_ctx input_ctx;
static uint32_t input_crc32;
static int input_fd, output_fd;
static const char *curr_pack;
if (type != obj->type)
die(_("object %s: expected type %s, found %s"),
oid_to_hex(&obj->oid),
- typename(obj->type), typename(type));
+ type_name(obj->type), type_name(type));
obj->flags |= FLAG_CHECKED;
return 1;
}
if (input_offset) {
if (output_fd >= 0)
write_or_die(output_fd, input_buffer, input_offset);
- git_SHA1_Update(&input_ctx, input_buffer, input_offset);
+ the_hash_algo->update_fn(&input_ctx, input_buffer, input_offset);
memmove(input_buffer, input_buffer + input_offset, input_len);
input_offset = 0;
}
output_fd = -1;
nothread_data.pack_fd = input_fd;
}
- git_SHA1_Init(&input_ctx);
+ the_hash_algo->init_fn(&input_ctx);
return pack_name;
}
}
static void *unpack_entry_data(off_t offset, unsigned long size,
- enum object_type type, unsigned char *sha1)
+ enum object_type type, struct object_id *oid)
{
static char fixed_buf[8192];
int status;
git_zstream stream;
void *buf;
- git_SHA_CTX c;
+ git_hash_ctx c;
char hdr[32];
int hdrlen;
if (!is_delta_type(type)) {
- hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", typename(type), size) + 1;
- git_SHA1_Init(&c);
- git_SHA1_Update(&c, hdr, hdrlen);
+ hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", type_name(type), size) + 1;
+ the_hash_algo->init_fn(&c);
+ the_hash_algo->update_fn(&c, hdr, hdrlen);
} else
- sha1 = NULL;
+ oid = NULL;
if (type == OBJ_BLOB && size > big_file_threshold)
buf = fixed_buf;
else
stream.avail_in = input_len;
status = git_inflate(&stream, 0);
use(input_len - stream.avail_in);
- if (sha1)
- git_SHA1_Update(&c, last_out, stream.next_out - last_out);
+ if (oid)
+ the_hash_algo->update_fn(&c, last_out, stream.next_out - last_out);
if (buf == fixed_buf) {
stream.next_out = buf;
stream.avail_out = sizeof(fixed_buf);
if (stream.total_out != size || status != Z_STREAM_END)
bad_object(offset, _("inflate returned %d"), status);
git_inflate_end(&stream);
- if (sha1)
- git_SHA1_Final(sha1, &c);
+ if (oid)
+ the_hash_algo->final_fn(oid->hash, &c);
return buf == fixed_buf ? NULL : buf;
}
static void *unpack_raw_entry(struct object_entry *obj,
off_t *ofs_offset,
- unsigned char *ref_sha1,
- unsigned char *sha1)
+ struct object_id *ref_oid,
+ struct object_id *oid)
{
unsigned char *p;
unsigned long size, c;
switch (obj->type) {
case OBJ_REF_DELTA:
- hashcpy(ref_sha1, fill(20));
- use(20);
+ hashcpy(ref_oid->hash, fill(the_hash_algo->rawsz));
+ use(the_hash_algo->rawsz);
break;
case OBJ_OFS_DELTA:
p = fill(1);
}
obj->hdr_size = consumed_bytes - obj->idx.offset;
- data = unpack_entry_data(obj->idx.offset, obj->size, obj->type, sha1);
+ data = unpack_entry_data(obj->idx.offset, obj->size, obj->type, oid);
obj->idx.crc32 = input_crc32;
return data;
}
free(has_data);
}
- if (strict) {
+ if (strict || do_fsck_object) {
read_lock();
if (type == OBJ_BLOB) {
struct blob *blob = lookup_blob(oid);
obj = parse_object_buffer(oid, type, size, buf,
&eaten);
if (!obj)
- die(_("invalid %s"), typename(type));
+ die(_("invalid %s"), type_name(type));
if (do_fsck_object &&
fsck_object(obj, buf, size, &fsck_options))
die(_("Error in object"));
- if (fsck_walk(obj, NULL, &fsck_options))
+ if (strict && fsck_walk(obj, NULL, &fsck_options))
die(_("Not all child objects of %s are reachable"), oid_to_hex(&obj->oid));
if (obj->type == OBJ_TREE) {
free(delta_data);
if (!result->data)
bad_object(delta_obj->idx.offset, _("failed to apply delta"));
- hash_sha1_file(result->data, result->size,
- typename(delta_obj->real_type),
- delta_obj->idx.oid.hash);
+ hash_object_file(result->data, result->size,
+ type_name(delta_obj->real_type), &delta_obj->idx.oid);
sha1_object(result->data, NULL, result->size, delta_obj->real_type,
&delta_obj->idx.oid);
counter_lock();
* - calculate SHA1 of all non-delta objects;
* - remember base (SHA1 or offset) for all deltas.
*/
-static void parse_pack_objects(unsigned char *sha1)
+static void parse_pack_objects(unsigned char *hash)
{
int i, nr_delays = 0;
struct ofs_delta_entry *ofs_delta = ofs_deltas;
- unsigned char ref_delta_sha1[20];
+ struct object_id ref_delta_oid;
struct stat st;
if (verbose)
for (i = 0; i < nr_objects; i++) {
struct object_entry *obj = &objects[i];
void *data = unpack_raw_entry(obj, &ofs_delta->offset,
- ref_delta_sha1,
- obj->idx.oid.hash);
+ &ref_delta_oid,
+ &obj->idx.oid);
obj->real_type = obj->type;
if (obj->type == OBJ_OFS_DELTA) {
nr_ofs_deltas++;
ofs_delta++;
} else if (obj->type == OBJ_REF_DELTA) {
ALLOC_GROW(ref_deltas, nr_ref_deltas + 1, ref_deltas_alloc);
- hashcpy(ref_deltas[nr_ref_deltas].sha1, ref_delta_sha1);
+ hashcpy(ref_deltas[nr_ref_deltas].sha1, ref_delta_oid.hash);
ref_deltas[nr_ref_deltas].obj_no = i;
nr_ref_deltas++;
} else if (!data) {
/* Check pack integrity */
flush();
- git_SHA1_Final(sha1, &input_ctx);
- if (hashcmp(fill(20), sha1))
+ the_hash_algo->final_fn(hash, &input_ctx);
+ if (hashcmp(fill(the_hash_algo->rawsz), hash))
die(_("pack is corrupted (SHA1 mismatch)"));
- use(20);
+ use(the_hash_algo->rawsz);
/* If input_fd is a file, we should have reached its end now. */
if (fstat(input_fd, &st))
/*
* Third pass:
* - append objects to convert thin pack to full pack if required
- * - write the final 20-byte SHA-1
+ * - write the final pack hash
*/
-static void fix_unresolved_deltas(struct sha1file *f);
-static void conclude_pack(int fix_thin_pack, const char *curr_pack, unsigned char *pack_sha1)
+static void fix_unresolved_deltas(struct hashfile *f);
+static void conclude_pack(int fix_thin_pack, const char *curr_pack, unsigned char *pack_hash)
{
if (nr_ref_deltas + nr_ofs_deltas == nr_resolved_deltas) {
stop_progress(&progress);
- /* Flush remaining pack final 20-byte SHA1. */
+ /* Flush remaining pack final hash. */
flush();
return;
}
if (fix_thin_pack) {
- struct sha1file *f;
- unsigned char read_sha1[20], tail_sha1[20];
+ struct hashfile *f;
+ unsigned char read_hash[GIT_MAX_RAWSZ], tail_hash[GIT_MAX_RAWSZ];
struct strbuf msg = STRBUF_INIT;
int nr_unresolved = nr_ofs_deltas + nr_ref_deltas - nr_resolved_deltas;
int nr_objects_initial = nr_objects;
REALLOC_ARRAY(objects, nr_objects + nr_unresolved + 1);
memset(objects + nr_objects + 1, 0,
nr_unresolved * sizeof(*objects));
- f = sha1fd(output_fd, curr_pack);
+ f = hashfd(output_fd, curr_pack);
fix_unresolved_deltas(f);
strbuf_addf(&msg, Q_("completed with %d local object",
"completed with %d local objects",
nr_objects - nr_objects_initial);
stop_progress_msg(&progress, msg.buf);
strbuf_release(&msg);
- sha1close(f, tail_sha1, 0);
- hashcpy(read_sha1, pack_sha1);
- fixup_pack_header_footer(output_fd, pack_sha1,
+ hashclose(f, tail_hash, 0);
+ hashcpy(read_hash, pack_hash);
+ fixup_pack_header_footer(output_fd, pack_hash,
curr_pack, nr_objects,
- read_sha1, consumed_bytes-20);
- if (hashcmp(read_sha1, tail_sha1) != 0)
+ read_hash, consumed_bytes-the_hash_algo->rawsz);
+ if (hashcmp(read_hash, tail_hash) != 0)
die(_("Unexpected tail checksum for %s "
"(disk corruption?)"), curr_pack);
}
nr_ofs_deltas + nr_ref_deltas - nr_resolved_deltas);
}
-static int write_compressed(struct sha1file *f, void *in, unsigned int size)
+static int write_compressed(struct hashfile *f, void *in, unsigned int size)
{
git_zstream stream;
int status;
stream.next_out = outbuf;
stream.avail_out = sizeof(outbuf);
status = git_deflate(&stream, Z_FINISH);
- sha1write(f, outbuf, sizeof(outbuf) - stream.avail_out);
+ hashwrite(f, outbuf, sizeof(outbuf) - stream.avail_out);
} while (status == Z_OK);
if (status != Z_STREAM_END)
return size;
}
-static struct object_entry *append_obj_to_pack(struct sha1file *f,
+static struct object_entry *append_obj_to_pack(struct hashfile *f,
const unsigned char *sha1, void *buf,
unsigned long size, enum object_type type)
{
}
header[n++] = c;
crc32_begin(f);
- sha1write(f, header, n);
+ hashwrite(f, header, n);
obj[0].size = size;
obj[0].hdr_size = n;
obj[0].type = type;
obj[1].idx.offset = obj[0].idx.offset + n;
obj[1].idx.offset += write_compressed(f, buf, size);
obj[0].idx.crc32 = crc32_end(f);
- sha1flush(f);
+ hashflush(f);
hashcpy(obj->idx.oid.hash, sha1);
return obj;
}
return a->obj_no - b->obj_no;
}
-static void fix_unresolved_deltas(struct sha1file *f)
+static void fix_unresolved_deltas(struct hashfile *f)
{
struct ref_delta_entry **sorted_by_pos;
int i;
continue;
if (check_sha1_signature(d->sha1, base_obj->data,
- base_obj->size, typename(type)))
+ base_obj->size, type_name(type)))
die(_("local object %s is corrupt"), sha1_to_hex(d->sha1));
base_obj->obj = append_obj_to_pack(f, d->sha1,
base_obj->data, base_obj->size, type);
free(sorted_by_pos);
}
+static const char *derive_filename(const char *pack_name, const char *suffix,
+ struct strbuf *buf)
+{
+ size_t len;
+ if (!strip_suffix(pack_name, ".pack", &len))
+ die(_("packfile name '%s' does not end with '.pack'"),
+ pack_name);
+ strbuf_add(buf, pack_name, len);
+ strbuf_addch(buf, '.');
+ strbuf_addstr(buf, suffix);
+ return buf->buf;
+}
+
+static void write_special_file(const char *suffix, const char *msg,
+ const char *pack_name, const unsigned char *hash,
+ const char **report)
+{
+ struct strbuf name_buf = STRBUF_INIT;
+ const char *filename;
+ int fd;
+ int msg_len = strlen(msg);
+
+ if (pack_name)
+ filename = derive_filename(pack_name, suffix, &name_buf);
+ else
+ filename = odb_pack_name(&name_buf, hash, suffix);
+
+ fd = odb_pack_keep(filename);
+ if (fd < 0) {
+ if (errno != EEXIST)
+ die_errno(_("cannot write %s file '%s'"),
+ suffix, filename);
+ } else {
+ if (msg_len > 0) {
+ write_or_die(fd, msg, msg_len);
+ write_or_die(fd, "\n", 1);
+ }
+ if (close(fd) != 0)
+ die_errno(_("cannot close written %s file '%s'"),
+ suffix, filename);
+ if (report)
+ *report = suffix;
+ }
+ strbuf_release(&name_buf);
+}
+
static void final(const char *final_pack_name, const char *curr_pack_name,
const char *final_index_name, const char *curr_index_name,
- const char *keep_name, const char *keep_msg,
- unsigned char *sha1)
+ const char *keep_msg, const char *promisor_msg,
+ unsigned char *hash)
{
const char *report = "pack";
struct strbuf pack_name = STRBUF_INIT;
struct strbuf index_name = STRBUF_INIT;
- struct strbuf keep_name_buf = STRBUF_INIT;
int err;
if (!from_stdin) {
die_errno(_("error while closing pack file"));
}
- if (keep_msg) {
- int keep_fd, keep_msg_len = strlen(keep_msg);
-
- if (!keep_name)
- keep_name = odb_pack_name(&keep_name_buf, sha1, "keep");
-
- keep_fd = odb_pack_keep(keep_name);
- if (keep_fd < 0) {
- if (errno != EEXIST)
- die_errno(_("cannot write keep file '%s'"),
- keep_name);
- } else {
- if (keep_msg_len > 0) {
- write_or_die(keep_fd, keep_msg, keep_msg_len);
- write_or_die(keep_fd, "\n", 1);
- }
- if (close(keep_fd) != 0)
- die_errno(_("cannot close written keep file '%s'"),
- keep_name);
- report = "keep";
- }
- }
+ if (keep_msg)
+ write_special_file("keep", keep_msg, final_pack_name, hash,
+ &report);
+ if (promisor_msg)
+ write_special_file("promisor", promisor_msg, final_pack_name,
+ hash, NULL);
if (final_pack_name != curr_pack_name) {
if (!final_pack_name)
- final_pack_name = odb_pack_name(&pack_name, sha1, "pack");
+ final_pack_name = odb_pack_name(&pack_name, hash, "pack");
if (finalize_object_file(curr_pack_name, final_pack_name))
die(_("cannot store pack file"));
} else if (from_stdin)
if (final_index_name != curr_index_name) {
if (!final_index_name)
- final_index_name = odb_pack_name(&index_name, sha1, "idx");
+ final_index_name = odb_pack_name(&index_name, hash, "idx");
if (finalize_object_file(curr_index_name, final_index_name))
die(_("cannot store index file"));
} else
chmod(final_index_name, 0444);
if (!from_stdin) {
- printf("%s\n", sha1_to_hex(sha1));
+ printf("%s\n", sha1_to_hex(hash));
} else {
struct strbuf buf = STRBUF_INIT;
- strbuf_addf(&buf, "%s\t%s\n", report, sha1_to_hex(sha1));
+ strbuf_addf(&buf, "%s\t%s\n", report, sha1_to_hex(hash));
write_or_die(1, buf.buf, buf.len);
strbuf_release(&buf);
strbuf_release(&index_name);
strbuf_release(&pack_name);
- strbuf_release(&keep_name_buf);
}
static int git_index_pack_config(const char *k, const char *v, void *cb)
continue;
printf("%s %-6s %lu %lu %"PRIuMAX,
oid_to_hex(&obj->idx.oid),
- typename(obj->real_type), obj->size,
+ type_name(obj->real_type), obj->size,
(unsigned long)(obj[1].idx.offset - obj->idx.offset),
(uintmax_t)obj->idx.offset);
if (is_delta_type(obj->type)) {
}
}
-static const char *derive_filename(const char *pack_name, const char *suffix,
- struct strbuf *buf)
-{
- size_t len;
- if (!strip_suffix(pack_name, ".pack", &len))
- die(_("packfile name '%s' does not end with '.pack'"),
- pack_name);
- strbuf_add(buf, pack_name, len);
- strbuf_addstr(buf, suffix);
- return buf->buf;
-}
-
int cmd_index_pack(int argc, const char **argv, const char *prefix)
{
int i, fix_thin_pack = 0, verify = 0, stat_only = 0;
const char *curr_index;
const char *index_name = NULL, *pack_name = NULL;
- const char *keep_name = NULL, *keep_msg = NULL;
- struct strbuf index_name_buf = STRBUF_INIT,
- keep_name_buf = STRBUF_INIT;
+ const char *keep_msg = NULL;
+ const char *promisor_msg = NULL;
+ struct strbuf index_name_buf = STRBUF_INIT;
struct pack_idx_entry **idx_objects;
struct pack_idx_option opts;
- unsigned char pack_sha1[20];
+ unsigned char pack_hash[GIT_MAX_RAWSZ];
unsigned foreign_nr = 1; /* zero is a "good" value, assume bad */
int report_end_of_input = 0;
+ /*
+ * index-pack never needs to fetch missing objects, since it only
+ * accesses the repo to do hash collision checks
+ */
+ fetch_if_missing = 0;
+
if (argc == 2 && !strcmp(argv[1], "-h"))
usage(index_pack_usage);
} else if (!strcmp(arg, "--check-self-contained-and-connected")) {
strict = 1;
check_self_contained_and_connected = 1;
+ } else if (!strcmp(arg, "--fsck-objects")) {
+ do_fsck_object = 1;
} else if (!strcmp(arg, "--verify")) {
verify = 1;
} else if (!strcmp(arg, "--verify-stat")) {
stat_only = 1;
} else if (skip_to_optional_arg(arg, "--keep", &keep_msg)) {
; /* nothing to do */
+ } else if (skip_to_optional_arg(arg, "--promisor", &promisor_msg)) {
+ ; /* already parsed */
} else if (starts_with(arg, "--threads=")) {
char *end;
nr_threads = strtoul(arg+10, &end, 0);
if (from_stdin && !startup_info->have_repository)
die(_("--stdin requires a git repository"));
if (!index_name && pack_name)
- index_name = derive_filename(pack_name, ".idx", &index_name_buf);
- if (keep_msg && !keep_name && pack_name)
- keep_name = derive_filename(pack_name, ".keep", &keep_name_buf);
+ index_name = derive_filename(pack_name, "idx", &index_name_buf);
if (verify) {
if (!index_name)
if (show_stat)
obj_stat = xcalloc(st_add(nr_objects, 1), sizeof(struct object_stat));
ofs_deltas = xcalloc(nr_objects, sizeof(struct ofs_delta_entry));
- parse_pack_objects(pack_sha1);
+ parse_pack_objects(pack_hash);
if (report_end_of_input)
write_in_full(2, "\0", 1);
resolve_deltas();
- conclude_pack(fix_thin_pack, curr_pack, pack_sha1);
+ conclude_pack(fix_thin_pack, curr_pack, pack_hash);
free(ofs_deltas);
free(ref_deltas);
if (strict)
ALLOC_ARRAY(idx_objects, nr_objects);
for (i = 0; i < nr_objects; i++)
idx_objects[i] = &objects[i].idx;
- curr_index = write_idx_file(index_name, idx_objects, nr_objects, &opts, pack_sha1);
+ curr_index = write_idx_file(index_name, idx_objects, nr_objects, &opts, pack_hash);
free(idx_objects);
if (!verify)
final(pack_name, curr_pack,
index_name, curr_index,
- keep_name, keep_msg,
- pack_sha1);
+ keep_msg, promisor_msg,
+ pack_hash);
else
close(input_fd);
free(objects);
strbuf_release(&index_name_buf);
- strbuf_release(&keep_name_buf);
if (pack_name == NULL)
free((void *) curr_pack);
if (index_name == NULL)
static int init_shared_repository = -1;
static const char *init_db_template_dir;
-static void copy_templates_1(struct strbuf *path, struct strbuf *template,
+static void copy_templates_1(struct strbuf *path, struct strbuf *template_path,
DIR *dir)
{
size_t path_baselen = path->len;
- size_t template_baselen = template->len;
+ size_t template_baselen = template_path->len;
struct dirent *de;
/* Note: if ".git/hooks" file exists in the repository being
int exists = 0;
strbuf_setlen(path, path_baselen);
- strbuf_setlen(template, template_baselen);
+ strbuf_setlen(template_path, template_baselen);
if (de->d_name[0] == '.')
continue;
strbuf_addstr(path, de->d_name);
- strbuf_addstr(template, de->d_name);
+ strbuf_addstr(template_path, de->d_name);
if (lstat(path->buf, &st_git)) {
if (errno != ENOENT)
die_errno(_("cannot stat '%s'"), path->buf);
else
exists = 1;
- if (lstat(template->buf, &st_template))
- die_errno(_("cannot stat template '%s'"), template->buf);
+ if (lstat(template_path->buf, &st_template))
+ die_errno(_("cannot stat template '%s'"), template_path->buf);
if (S_ISDIR(st_template.st_mode)) {
- DIR *subdir = opendir(template->buf);
+ DIR *subdir = opendir(template_path->buf);
if (!subdir)
- die_errno(_("cannot opendir '%s'"), template->buf);
+ die_errno(_("cannot opendir '%s'"), template_path->buf);
strbuf_addch(path, '/');
- strbuf_addch(template, '/');
- copy_templates_1(path, template, subdir);
+ strbuf_addch(template_path, '/');
+ copy_templates_1(path, template_path, subdir);
closedir(subdir);
}
else if (exists)
continue;
else if (S_ISLNK(st_template.st_mode)) {
struct strbuf lnk = STRBUF_INIT;
- if (strbuf_readlink(&lnk, template->buf, 0) < 0)
- die_errno(_("cannot readlink '%s'"), template->buf);
+ if (strbuf_readlink(&lnk, template_path->buf, 0) < 0)
+ die_errno(_("cannot readlink '%s'"), template_path->buf);
if (symlink(lnk.buf, path->buf))
die_errno(_("cannot symlink '%s' '%s'"),
lnk.buf, path->buf);
strbuf_release(&lnk);
}
else if (S_ISREG(st_template.st_mode)) {
- if (copy_file(path->buf, template->buf, st_template.st_mode))
+ if (copy_file(path->buf, template_path->buf, st_template.st_mode))
die_errno(_("cannot copy '%s' to '%s'"),
- template->buf, path->buf);
+ template_path->buf, path->buf);
}
else
- error(_("ignoring template %s"), template->buf);
+ error(_("ignoring template %s"), template_path->buf);
}
}
#include "gpg-interface.h"
#include "progress.h"
+#define MAIL_DEFAULT_WRAP 72
+
/* Set a default date-time format for git log ("log.date" config variable) */
static const char *default_date_mode = NULL;
if (rev->show_notes)
init_display_notes(&rev->notes_opt);
- if (rev->diffopt.pickaxe || rev->diffopt.filter ||
- rev->diffopt.flags.follow_renames)
+ if ((rev->diffopt.pickaxe_opts & DIFF_PICKAXE_KINDS_MASK) ||
+ rev->diffopt.filter || rev->diffopt.flags.follow_renames)
rev->always_show_header = 0;
if (source)
shortlog_init(&log);
log.wrap_lines = 1;
- log.wrap = 72;
+ log.wrap = MAIL_DEFAULT_WRAP;
log.in1 = 2;
log.in2 = 4;
log.file = rev->diffopt.file;
memcpy(&opts, &rev->diffopt, sizeof(opts));
opts.output_format = DIFF_FORMAT_SUMMARY | DIFF_FORMAT_DIFFSTAT;
+ opts.stat_width = MAIL_DEFAULT_WRAP;
diff_setup_done(&opts);
(!rev.diffopt.output_format ||
rev.diffopt.output_format == DIFF_FORMAT_PATCH))
rev.diffopt.output_format = DIFF_FORMAT_DIFFSTAT | DIFF_FORMAT_SUMMARY;
+ if (!rev.diffopt.stat_width)
+ rev.diffopt.stat_width = MAIL_DEFAULT_WRAP;
/* Always generate a patch */
rev.diffopt.output_format |= DIFF_FORMAT_PATCH;
OPT_BIT(0, "refs", &flags, N_("do not show peeled tags"), REF_NORMAL),
OPT_BOOL(0, "get-url", &get_url,
N_("take url.<base>.insteadOf into account")),
- OPT_SET_INT(0, "exit-code", &status,
- N_("exit with exit code 2 if no matching refs are found"), 2),
+ OPT_SET_INT_F(0, "exit-code", &status,
+ N_("exit with exit code 2 if no matching refs are found"),
+ 2, PARSE_OPT_NOCOMPLETE),
OPT_BOOL(0, "symref", &show_symref_target,
N_("show underlying ref in addition to the object pointed by it")),
OPT_END()
#include "sequencer.h"
#include "string-list.h"
#include "packfile.h"
+#include "tag.h"
#define DEFAULT_TWOHEAD (1<<0)
#define DEFAULT_OCTOPUS (1<<1)
if (desc && desc->obj && desc->obj->type == OBJ_TAG) {
strbuf_addf(msg, "%s\t\t%s '%s'\n",
oid_to_hex(&desc->obj->oid),
- typename(desc->obj->type),
+ type_name(desc->obj->type),
remote);
goto cleanup;
}
hold_locked_index(&lock, LOCK_DIE_ON_ERROR);
refresh_cache(REFRESH_QUIET);
- if (active_cache_changed &&
- write_locked_index(&the_index, &lock, COMMIT_LOCK))
+ if (write_locked_index(&the_index, &lock,
+ COMMIT_LOCK | SKIP_IF_UNCHANGED))
return error(_("Unable to write index."));
- rollback_lock_file(&lock);
if (!strcmp(strategy, "recursive") || !strcmp(strategy, "subtree")) {
int clean, x;
remoteheads->item, reversed, &result);
if (clean < 0)
exit(128);
- if (active_cache_changed &&
- write_locked_index(&the_index, &lock, COMMIT_LOCK))
+ if (write_locked_index(&the_index, &lock,
+ COMMIT_LOCK | SKIP_IF_UNCHANGED))
die (_("unable to write %s"), get_index_file());
- rollback_lock_file(&lock);
return clean ? 0 : 1;
} else {
return try_merge_command(strategy, xopts_nr, xopts,
hold_locked_index(&lock, LOCK_DIE_ON_ERROR);
refresh_cache(REFRESH_QUIET);
- if (active_cache_changed &&
- write_locked_index(&the_index, &lock, COMMIT_LOCK))
+ if (write_locked_index(&the_index, &lock,
+ COMMIT_LOCK | SKIP_IF_UNCHANGED))
return error(_("Unable to write index."));
- rollback_lock_file(&lock);
write_tree_trivial(&result_tree);
printf(_("Wonderful.\n"));
pptr = commit_list_append(head, pptr);
pptr = commit_list_append(remoteheads->item, pptr);
prepare_to_commit(remoteheads);
- if (commit_tree(merge_msg.buf, merge_msg.len, result_tree.hash, parents,
- result_commit.hash, NULL, sign_commit))
+ if (commit_tree(merge_msg.buf, merge_msg.len, &result_tree, parents,
+ &result_commit, NULL, sign_commit))
die(_("failed to write commit object"));
finish(head, remoteheads, &result_commit, "In-index merge");
drop_save();
commit_list_insert(head, &parents);
strbuf_addch(&merge_msg, '\n');
prepare_to_commit(remoteheads);
- if (commit_tree(merge_msg.buf, merge_msg.len, result_tree->hash, parents,
- result_commit.hash, NULL, sign_commit))
+ if (commit_tree(merge_msg.buf, merge_msg.len, result_tree, parents,
+ &result_commit, NULL, sign_commit))
die(_("failed to write commit object"));
strbuf_addf(&buf, "Merge made by the '%s' strategy.", wt_strategy);
finish(head, remoteheads, &result_commit, buf.buf);
return remoteheads;
}
+static int merging_a_throwaway_tag(struct commit *commit)
+{
+ char *tag_ref;
+ struct object_id oid;
+ int is_throwaway_tag = 0;
+
+ /* Are we merging a tag? */
+ if (!merge_remote_util(commit) ||
+ !merge_remote_util(commit)->obj ||
+ merge_remote_util(commit)->obj->type != OBJ_TAG)
+ return is_throwaway_tag;
+
+ /*
+ * Now we know we are merging a tag object. Are we downstream
+ * and following the tags from upstream? If so, we must have
+ * the tag object pointed at by "refs/tags/$T" where $T is the
+ * tagname recorded in the tag object. We want to allow such
+ * a "just to catch up" merge to fast-forward.
+ *
+ * Otherwise, we are playing an integrator's role, making a
+ * merge with a throw-away tag from a contributor with
+ * something like "git pull $contributor $signed_tag".
+ * We want to forbid such a merge from fast-forwarding
+ * by default; otherwise we would not keep the signature
+ * anywhere.
+ */
+ tag_ref = xstrfmt("refs/tags/%s",
+ ((struct tag *)merge_remote_util(commit)->obj)->tag);
+ if (!read_ref(tag_ref, &oid) &&
+ !oidcmp(&oid, &merge_remote_util(commit)->obj->oid))
+ is_throwaway_tag = 0;
+ else
+ is_throwaway_tag = 1;
+ free(tag_ref);
+ return is_throwaway_tag;
+}
+
int cmd_merge(int argc, const char **argv, const char *prefix)
{
struct object_id result_tree, stash, head_oid;
oid_to_hex(&commit->object.oid));
setenv(buf.buf, merge_remote_util(commit)->name, 1);
strbuf_reset(&buf);
- if (fast_forward != FF_ONLY &&
- merge_remote_util(commit) &&
- merge_remote_util(commit)->obj &&
- merge_remote_util(commit)->obj->type == OBJ_TAG)
+ if (fast_forward != FF_ONLY && merging_a_throwaway_tag(commit))
fast_forward = FF_NO;
}
int cmd_mktag(int argc, const char **argv, const char *prefix)
{
struct strbuf buf = STRBUF_INIT;
- unsigned char result_sha1[20];
+ struct object_id result;
if (argc != 1)
usage("git mktag");
if (verify_tag(buf.buf, buf.len) < 0)
die("invalid tag signature file");
- if (write_sha1_file(buf.buf, buf.len, tag_type, result_sha1) < 0)
+ if (write_object_file(buf.buf, buf.len, tag_type, &result) < 0)
die("unable to write tag file");
strbuf_release(&buf);
- printf("%s\n", sha1_to_hex(result_sha1));
+ printf("%s\n", oid_to_hex(&result));
return 0;
}
b->name, b->len, b->mode);
}
-static void write_tree(unsigned char *sha1)
+static void write_tree(struct object_id *oid)
{
struct strbuf buf;
size_t size;
strbuf_add(&buf, ent->sha1, 20);
}
- write_sha1_file(buf.buf, buf.len, tree_type, sha1);
+ write_object_file(buf.buf, buf.len, tree_type, oid);
strbuf_release(&buf);
}
mode_type = object_type(mode);
if (mode_type != type_from_string(ptr)) {
die("entry '%s' object type (%s) doesn't match mode type (%s)",
- path, ptr, typename(mode_type));
+ path, ptr, type_name(mode_type));
}
/* Check the type of object identified by sha1 */
* because the new tree entry will never be correct.
*/
die("entry '%s' object %s is a %s but specified type was (%s)",
- path, sha1_to_hex(sha1), typename(obj_type), typename(mode_type));
+ path, sha1_to_hex(sha1), type_name(obj_type), type_name(mode_type));
}
}
int cmd_mktree(int ac, const char **av, const char *prefix)
{
struct strbuf sb = STRBUF_INIT;
- unsigned char sha1[20];
+ struct object_id oid;
int nul_term_line = 0;
int allow_missing = 0;
int is_batch_mode = 0;
*/
; /* skip creating an empty tree */
} else {
- write_tree(sha1);
- puts(sha1_to_hex(sha1));
+ write_tree(&oid);
+ puts(oid_to_hex(&oid));
fflush(stdout);
}
used=0; /* reset tree entry buffer for re-use in batch mode */
struct option builtin_mv_options[] = {
OPT__VERBOSE(&verbose, N_("be verbose")),
OPT__DRY_RUN(&show_only, N_("dry run")),
- OPT__FORCE(&force, N_("force move/rename even if target exists")),
+ OPT__FORCE(&force, N_("force move/rename even if target exists"),
+ PARSE_OPT_NOCOMPLETE),
OPT_BOOL('k', NULL, &ignore_errors, N_("skip move/rename errors")),
OPT_END(),
};
pos = cache_name_pos(src, strlen(src));
assert(pos >= 0);
- if (!show_only)
- rename_cache_entry_at(pos, dst);
+ rename_cache_entry_at(pos, dst);
}
if (gitmodules_modified)
stage_updated_gitmodules(&the_index);
- if (active_cache_changed &&
- write_locked_index(&the_index, &lock_file, COMMIT_LOCK))
+ if (write_locked_index(&the_index, &lock_file,
+ COMMIT_LOCK | SKIP_IF_UNCHANGED))
die(_("Unable to write new index file"));
return 0;
}
}
-static void write_note_data(struct note_data *d, unsigned char *sha1)
+static void write_note_data(struct note_data *d, struct object_id *oid)
{
- if (write_sha1_file(d->buf.buf, d->buf.len, blob_type, sha1)) {
+ if (write_object_file(d->buf.buf, d->buf.len, blob_type, oid)) {
error(_("unable to write note object"));
if (d->edit_path)
error(_("the note contents have been left in %s"),
parse_reuse_arg},
OPT_BOOL(0, "allow-empty", &allow_empty,
N_("allow storing empty note")),
- OPT__FORCE(&force, N_("replace existing notes")),
+ OPT__FORCE(&force, N_("replace existing notes"), PARSE_OPT_NOCOMPLETE),
OPT_END()
};
prepare_note_data(&object, &d, note ? note->hash : NULL);
if (d.buf.len || allow_empty) {
- write_note_data(&d, new_note.hash);
+ write_note_data(&d, &new_note);
if (add_note(t, &object, &new_note, combine_notes_overwrite))
die("BUG: combine_notes_overwrite failed");
commit_notes(t, "Notes added by 'git notes add'");
struct notes_tree *t;
const char *rewrite_cmd = NULL;
struct option options[] = {
- OPT__FORCE(&force, N_("replace existing notes")),
+ OPT__FORCE(&force, N_("replace existing notes"), PARSE_OPT_NOCOMPLETE),
OPT_BOOL(0, "stdin", &from_stdin, N_("read objects from stdin")),
OPT_STRING(0, "for-rewrite", &rewrite_cmd, N_("command"),
N_("load rewriting config for <command> (implies "
}
if (d.buf.len || allow_empty) {
- write_note_data(&d, new_note.hash);
+ write_note_data(&d, &new_note);
if (add_note(t, &object, &new_note, combine_notes_overwrite))
die("BUG: combine_notes_overwrite failed");
logmsg = xstrfmt("Notes added by 'git notes %s'", argv[0]);
#include "reachable.h"
#include "sha1-array.h"
#include "argv-array.h"
-#include "mru.h"
+#include "list.h"
#include "packfile.h"
static const char *pack_usage[] = {
static int write_bitmap_index;
static uint16_t write_bitmap_options;
+static int exclude_promisor_objects;
+
static unsigned long delta_cache_size = 0;
static unsigned long max_delta_cache_size = 256 * 1024 * 1024;
static unsigned long cache_max_small_delta_size = 1000;
static struct list_objects_filter_options filter_options;
enum missing_action {
- MA_ERROR = 0, /* fail if any missing objects are encountered */
- MA_ALLOW_ANY, /* silently allow ALL missing objects */
+ MA_ERROR = 0, /* fail if any missing objects are encountered */
+ MA_ALLOW_ANY, /* silently allow ALL missing objects */
+ MA_ALLOW_PROMISOR, /* silently allow all missing PROMISOR objects */
};
static enum missing_action arg_missing_action;
static show_object_fn fn_show_object;
return stream.total_out;
}
-static unsigned long write_large_blob_data(struct git_istream *st, struct sha1file *f,
+static unsigned long write_large_blob_data(struct git_istream *st, struct hashfile *f,
const struct object_id *oid)
{
git_zstream stream;
stream.next_out = obuf;
stream.avail_out = sizeof(obuf);
zret = git_deflate(&stream, readlen ? 0 : Z_FINISH);
- sha1write(f, obuf, stream.next_out - obuf);
+ hashwrite(f, obuf, stream.next_out - obuf);
olen += stream.next_out - obuf;
}
if (stream.avail_in)
stream.total_in == len) ? 0 : -1;
}
-static void copy_pack_data(struct sha1file *f,
+static void copy_pack_data(struct hashfile *f,
struct packed_git *p,
struct pack_window **w_curs,
off_t offset,
in = use_pack(p, w_curs, offset, &avail);
if (avail > len)
avail = (unsigned long)len;
- sha1write(f, in, avail);
+ hashwrite(f, in, avail);
offset += avail;
len -= avail;
}
}
/* Return 0 if we will bust the pack-size limit */
-static unsigned long write_no_reuse_object(struct sha1file *f, struct object_entry *entry,
+static unsigned long write_no_reuse_object(struct hashfile *f, struct object_entry *entry,
unsigned long limit, int usable_delta)
{
unsigned long size, datalen;
free(buf);
return 0;
}
- sha1write(f, header, hdrlen);
- sha1write(f, dheader + pos, sizeof(dheader) - pos);
+ hashwrite(f, header, hdrlen);
+ hashwrite(f, dheader + pos, sizeof(dheader) - pos);
hdrlen += sizeof(dheader) - pos;
} else if (type == OBJ_REF_DELTA) {
/*
free(buf);
return 0;
}
- sha1write(f, header, hdrlen);
- sha1write(f, entry->delta->idx.oid.hash, 20);
+ hashwrite(f, header, hdrlen);
+ hashwrite(f, entry->delta->idx.oid.hash, 20);
hdrlen += 20;
} else {
if (limit && hdrlen + datalen + 20 >= limit) {
free(buf);
return 0;
}
- sha1write(f, header, hdrlen);
+ hashwrite(f, header, hdrlen);
}
if (st) {
datalen = write_large_blob_data(st, f, &entry->idx.oid);
close_istream(st);
} else {
- sha1write(f, buf, datalen);
+ hashwrite(f, buf, datalen);
free(buf);
}
}
/* Return 0 if we will bust the pack-size limit */
-static off_t write_reuse_object(struct sha1file *f, struct object_entry *entry,
+static off_t write_reuse_object(struct hashfile *f, struct object_entry *entry,
unsigned long limit, int usable_delta)
{
struct packed_git *p = entry->in_pack;
unuse_pack(&w_curs);
return 0;
}
- sha1write(f, header, hdrlen);
- sha1write(f, dheader + pos, sizeof(dheader) - pos);
+ hashwrite(f, header, hdrlen);
+ hashwrite(f, dheader + pos, sizeof(dheader) - pos);
hdrlen += sizeof(dheader) - pos;
reused_delta++;
} else if (type == OBJ_REF_DELTA) {
unuse_pack(&w_curs);
return 0;
}
- sha1write(f, header, hdrlen);
- sha1write(f, entry->delta->idx.oid.hash, 20);
+ hashwrite(f, header, hdrlen);
+ hashwrite(f, entry->delta->idx.oid.hash, 20);
hdrlen += 20;
reused_delta++;
} else {
unuse_pack(&w_curs);
return 0;
}
- sha1write(f, header, hdrlen);
+ hashwrite(f, header, hdrlen);
}
copy_pack_data(f, p, &w_curs, offset, datalen);
unuse_pack(&w_curs);
}
/* Return 0 if we will bust the pack-size limit */
-static off_t write_object(struct sha1file *f,
+static off_t write_object(struct hashfile *f,
struct object_entry *entry,
off_t write_offset)
{
WRITE_ONE_RECURSIVE = 2 /* already scheduled to be written */
};
-static enum write_one_status write_one(struct sha1file *f,
+static enum write_one_status write_one(struct hashfile *f,
struct object_entry *e,
off_t *offset)
{
return wo;
}
-static off_t write_reused_pack(struct sha1file *f)
+static off_t write_reused_pack(struct hashfile *f)
{
unsigned char buffer[8192];
off_t to_write, total;
if (read_pack > to_write)
read_pack = to_write;
- sha1write(f, buffer, read_pack);
+ hashwrite(f, buffer, read_pack);
to_write -= read_pack;
/*
static void write_pack_file(void)
{
uint32_t i = 0, j;
- struct sha1file *f;
+ struct hashfile *f;
off_t offset;
uint32_t nr_remaining = nr_result;
time_t last_mtime = 0;
char *pack_tmp_name = NULL;
if (pack_to_stdout)
- f = sha1fd_throughput(1, "<stdout>", progress_state);
+ f = hashfd_throughput(1, "<stdout>", progress_state);
else
f = create_tmp_packfile(&pack_tmp_name);
* If so, rewrite it like in fast-import
*/
if (pack_to_stdout) {
- sha1close(f, oid.hash, CSUM_CLOSE);
+ hashclose(f, oid.hash, CSUM_CLOSE);
} else if (nr_written == nr_remaining) {
- sha1close(f, oid.hash, CSUM_FSYNC);
+ hashclose(f, oid.hash, CSUM_FSYNC);
} else {
- int fd = sha1close(f, oid.hash, 0);
+ int fd = hashclose(f, oid.hash, 0);
fixup_pack_header_footer(fd, oid.hash, pack_tmp_name,
nr_written, oid.hash, offset);
close(fd);
struct packed_git **found_pack,
off_t *found_offset)
{
- struct mru_entry *entry;
int want;
+ struct list_head *pos;
if (!exclude && local && has_loose_object_nonlocal(oid->hash))
return 0;
return want;
}
- for (entry = packed_git_mru.head; entry; entry = entry->next) {
- struct packed_git *p = entry->item;
+ list_for_each(pos, &packed_git_mru) {
+ struct packed_git *p = list_entry(pos, struct packed_git, mru);
off_t offset;
if (p == *found_pack)
}
want = want_found_object(exclude, p);
if (!exclude && want > 0)
- mru_mark(&packed_git_mru, entry);
+ list_move(&p->mru, &packed_git_mru);
if (want != -1)
return want;
}
it = pbase_tree;
pbase_tree = NULL;
while (it) {
- struct pbase_tree *this = it;
- it = this->next;
- free(this->pcache.tree_data);
- free(this);
+ struct pbase_tree *tmp = it;
+ it = tmp->next;
+ free(tmp->pcache.tree_data);
+ free(tmp);
}
for (i = 0; i < ARRAY_SIZE(pbase_tree_cache); i++) {
}
}
+/* Remember to update object flag allocation in object.h */
#define OBJECT_ADDED (1u<<20)
static void show_commit(struct commit *commit, void *data)
show_object(obj, name, data);
}
+static void show_object__ma_allow_promisor(struct object *obj, const char *name, void *data)
+{
+ assert(arg_missing_action == MA_ALLOW_PROMISOR);
+
+ /*
+ * Quietly ignore EXPECTED missing objects. This avoids problems with
+ * staging them now and getting an odd error later.
+ */
+ if (!has_object_file(&obj->oid) && is_promisor_object(&obj->oid))
+ return;
+
+ show_object(obj, name, data);
+}
+
static int option_parse_missing_action(const struct option *opt,
const char *arg, int unset)
{
if (!strcmp(arg, "allow-any")) {
arg_missing_action = MA_ALLOW_ANY;
+ fetch_if_missing = 0;
fn_show_object = show_object__ma_allow_any;
return 0;
}
+ if (!strcmp(arg, "allow-promisor")) {
+ arg_missing_action = MA_ALLOW_PROMISOR;
+ fetch_if_missing = 0;
+ fn_show_object = show_object__ma_allow_promisor;
+ return 0;
+ }
+
die(_("invalid value for --missing"));
return 0;
}
if (!packlist_find(&to_pack, oid.hash, NULL) &&
!has_sha1_pack_kept_or_nonlocal(&oid) &&
!loosened_object_can_be_discarded(&oid, p->mtime))
- if (force_object_loose(oid.hash, p->mtime))
+ if (force_object_loose(&oid, p->mtime))
die("unable to force loose object");
}
}
{ OPTION_CALLBACK, 0, "missing", NULL, N_("action"),
N_("handling for missing objects"), PARSE_OPT_NONEG,
option_parse_missing_action },
+ OPT_BOOL(0, "exclude-promisor-objects", &exclude_promisor_objects,
+ N_("do not pack objects in promisor packfiles")),
OPT_END(),
};
argv_array_push(&rp, "--unpacked");
}
+ if (exclude_promisor_objects) {
+ use_internal_rev_list = 1;
+ fetch_if_missing = 0;
+ argv_array_push(&rp, "--exclude-promisor-objects");
+ }
+
if (!reuse_object)
reuse_delta = 0;
if (pack_compression_level == -1)
static inline struct llist_item *llist_item_get(void)
{
- struct llist_item *new;
+ struct llist_item *new_item;
if ( free_nodes ) {
- new = free_nodes;
+ new_item = free_nodes;
free_nodes = free_nodes->next;
} else {
int i = 1;
- ALLOC_ARRAY(new, BLKSIZE);
+ ALLOC_ARRAY(new_item, BLKSIZE);
for (; i < BLKSIZE; i++)
- llist_item_put(&new[i]);
+ llist_item_put(&new_item[i]);
}
- return new;
+ return new_item;
}
static void llist_free(struct llist *list)
static struct llist * llist_copy(struct llist *list)
{
struct llist *ret;
- struct llist_item *new, *old, *prev;
+ struct llist_item *new_item, *old_item, *prev;
llist_init(&ret);
if ((ret->size = list->size) == 0)
return ret;
- new = ret->front = llist_item_get();
- new->sha1 = list->front->sha1;
+ new_item = ret->front = llist_item_get();
+ new_item->sha1 = list->front->sha1;
- old = list->front->next;
- while (old) {
- prev = new;
- new = llist_item_get();
- prev->next = new;
- new->sha1 = old->sha1;
- old = old->next;
+ old_item = list->front->next;
+ while (old_item) {
+ prev = new_item;
+ new_item = llist_item_get();
+ prev->next = new_item;
+ new_item->sha1 = old_item->sha1;
+ old_item = old_item->next;
}
- new->next = NULL;
- ret->back = new;
+ new_item->next = NULL;
+ ret->back = new_item;
return ret;
}
struct llist_item *after,
const unsigned char *sha1)
{
- struct llist_item *new = llist_item_get();
- new->sha1 = sha1;
- new->next = NULL;
+ struct llist_item *new_item = llist_item_get();
+ new_item->sha1 = sha1;
+ new_item->next = NULL;
if (after != NULL) {
- new->next = after->next;
- after->next = new;
+ new_item->next = after->next;
+ after->next = new_item;
if (after == list->back)
- list->back = new;
+ list->back = new_item;
} else {/* insert in front */
if (list->size == 0)
- list->back = new;
+ list->back = new_item;
else
- new->next = list->front;
- list->front = new;
+ new_item->next = list->front;
+ list->front = new_item;
}
list->size++;
- return new;
+ return new_item;
}
static inline struct llist_item *llist_insert_back(struct llist *list,
if (show_only || verbose) {
enum object_type type = sha1_object_info(oid->hash, NULL);
printf("%s %s\n", oid_to_hex(oid),
- (type > 0) ? typename(type) : "unknown");
+ (type > 0) ? type_name(type) : "unknown");
}
if (!show_only)
unlink_or_warn(fullpath);
{
struct rev_info revs;
struct progress *progress = NULL;
+ int exclude_promisor_objects = 0;
const struct option options[] = {
OPT__DRY_RUN(&show_only, N_("do not remove, show only")),
OPT__VERBOSE(&verbose, N_("report pruned objects")),
OPT_BOOL(0, "progress", &show_progress, N_("show progress")),
OPT_EXPIRY_DATE(0, "expire", &expire,
N_("expire objects older than <time>")),
+ OPT_BOOL(0, "exclude-promisor-objects", &exclude_promisor_objects,
+ N_("limit traversal to objects outside promisor packfiles")),
OPT_END()
};
char *s;
show_progress = isatty(2);
if (show_progress)
progress = start_delayed_progress(_("Checking connectivity"), 0);
+ if (exclude_promisor_objects) {
+ fetch_if_missing = 0;
+ revs.exclude_promisor_objects = 1;
+ }
mark_reachable_objects(&revs, 1, expire, progress);
stop_progress(&progress);
OPT_PASSTHRU(0, "upload-pack", &opt_upload_pack, N_("path"),
N_("path to upload pack on remote end"),
0),
- OPT__FORCE(&opt_force, N_("force overwrite of local branch")),
+ OPT__FORCE(&opt_force, N_("force overwrite of local branch"), 0),
OPT_PASSTHRU('t', "tags", &opt_tags, NULL,
N_("fetch all tags and associated objects"),
PARSE_OPT_NOARG),
cp.no_stdin = 1;
argv_array_pushl(&cp.args, "submodule", "update",
"--recursive", "--rebase", NULL);
+ argv_push_verbosity(&cp.args);
return run_command(&cp);
}
cp.no_stdin = 1;
argv_array_pushl(&cp.args, "submodule", "update",
"--recursive", "--checkout", NULL);
+ argv_push_verbosity(&cp.args);
return run_command(&cp);
}
{ OPTION_CALLBACK, 0, "recurse-submodules", &recurse_submodules, "check|on-demand|no",
N_("control recursive pushing of submodules"),
PARSE_OPT_OPTARG, option_parse_recurse_submodules },
- OPT_BOOL( 0 , "thin", &thin, N_("use thin pack")),
+ OPT_BOOL_F( 0 , "thin", &thin, N_("use thin pack"), PARSE_OPT_NOCOMPLETE),
OPT_STRING( 0 , "receive-pack", &receivepack, "receive-pack", N_("receive pack program")),
OPT_STRING( 0 , "exec", &receivepack, "receive-pack", N_("receive pack program")),
OPT_BIT('u', "set-upstream", &flags, N_("set upstream for git pull/status"),
struct option options[] = {
OPT_BOOL(0, "ff", &opts.allow_ff, N_("allow fast-forward")),
OPT_BOOL(0, "keep-empty", &keep_empty, N_("keep empty commits")),
+ OPT_BOOL(0, "allow-empty-message", &opts.allow_empty_message,
+ N_("allow commits with empty messages")),
OPT_CMDMODE(0, "continue", &command, N_("continue rebase"),
CONTINUE),
OPT_CMDMODE(0, "abort", &command, N_("abort rebase"),
OPT_END()
};
- git_config(git_default_config, NULL);
+ sequencer_init_config(&opts);
git_config_get_bool("rebase.abbreviatecommands", &abbreviate_commands);
opts.action = REPLAY_INTERACTIVE_REBASE;
static int shallow_update;
static const char *alt_shallow_file;
static struct strbuf push_cert = STRBUF_INIT;
-static unsigned char push_cert_sha1[20];
+static struct object_id push_cert_oid;
static struct signature_check sigcheck;
static const char *push_cert_nonce;
static const char *cert_nonce_seed;
int bogs /* beginning_of_gpg_sig */;
already_done = 1;
- if (write_sha1_file(push_cert.buf, push_cert.len, "blob", push_cert_sha1))
- hashclr(push_cert_sha1);
+ if (write_object_file(push_cert.buf, push_cert.len, "blob",
+ &push_cert_oid))
+ oidclr(&push_cert_oid);
memset(&sigcheck, '\0', sizeof(sigcheck));
sigcheck.result = 'N';
strbuf_release(&gpg_status);
nonce_status = check_nonce(push_cert.buf, bogs);
}
- if (!is_null_sha1(push_cert_sha1)) {
+ if (!is_null_oid(&push_cert_oid)) {
argv_array_pushf(&proc->env_array, "GIT_PUSH_CERT=%s",
- sha1_to_hex(push_cert_sha1));
+ oid_to_hex(&push_cert_oid));
argv_array_pushf(&proc->env_array, "GIT_PUSH_CERT_SIGNER=%s",
sigcheck.signer ? sigcheck.signer : "");
argv_array_pushf(&proc->env_array, "GIT_PUSH_CERT_KEY=%s",
int nr;
};
+/* Remember to update object flag allocation in object.h */
#define INCOMPLETE (1u<<10)
#define STUDYING (1u<<11)
#define REACHABLE (1u<<12)
const char *message, void *cb_data)
{
struct expire_reflog_policy_cb *cb = cb_data;
- struct commit *old, *new;
+ struct commit *old_commit, *new_commit;
if (timestamp < cb->cmd.expire_total)
return 1;
- old = new = NULL;
+ old_commit = new_commit = NULL;
if (cb->cmd.stalefix &&
- (!keep_entry(&old, ooid) || !keep_entry(&new, noid)))
+ (!keep_entry(&old_commit, ooid) || !keep_entry(&new_commit, noid)))
return 1;
if (timestamp < cb->cmd.expire_unreachable) {
if (cb->unreachable_expire_kind == UE_ALWAYS)
return 1;
- if (unreachable(cb, old, ooid) || unreachable(cb, new, noid))
+ if (unreachable(cb, old_commit, ooid) || unreachable(cb, new_commit, noid))
return 1;
}
OPT_STRING('m', "master", &master, N_("branch"), N_("master branch")),
{ OPTION_CALLBACK, 0, "mirror", &mirror, N_("push|fetch"),
N_("set up remote as a mirror to push to or fetch from"),
- PARSE_OPT_OPTARG, parse_mirror_opt },
+ PARSE_OPT_OPTARG | PARSE_OPT_COMP_ARG, parse_mirror_opt },
OPT_END()
};
struct ref_states {
struct remote *remote;
- struct string_list new, stale, tracked, heads, push;
+ struct string_list new_refs, stale, tracked, heads, push;
int queried;
};
die(_("Could not get fetch map for refspec %s"),
states->remote->fetch_refspec[i]);
- states->new.strdup_strings = 1;
+ states->new_refs.strdup_strings = 1;
states->tracked.strdup_strings = 1;
states->stale.strdup_strings = 1;
for (ref = fetch_map; ref; ref = ref->next) {
if (!ref->peer_ref || !ref_exists(ref->peer_ref->name))
- string_list_append(&states->new, abbrev_branch(ref->name));
+ string_list_append(&states->new_refs, abbrev_branch(ref->name));
else
string_list_append(&states->tracked, abbrev_branch(ref->name));
}
free_refs(stale_refs);
free_refs(fetch_map);
- string_list_sort(&states->new);
+ string_list_sort(&states->new_refs);
string_list_sort(&states->tracked);
string_list_sort(&states->stale);
}
struct rename_info {
- const char *old;
- const char *new;
+ const char *old_name;
+ const char *new_name;
struct string_list *remote_branches;
};
int flag;
const char *symref;
- strbuf_addf(&buf, "refs/remotes/%s/", rename->old);
+ strbuf_addf(&buf, "refs/remotes/%s/", rename->old_name);
if (starts_with(refname, buf.buf)) {
item = string_list_append(rename->remote_branches, xstrdup(refname));
symref = resolve_ref_unsafe(refname, RESOLVE_REF_READING,
if (argc != 3)
usage_with_options(builtin_remote_rename_usage, options);
- rename.old = argv[1];
- rename.new = argv[2];
+ rename.old_name = argv[1];
+ rename.new_name = argv[2];
rename.remote_branches = &remote_branches;
- oldremote = remote_get(rename.old);
+ oldremote = remote_get(rename.old_name);
if (!remote_is_configured(oldremote, 1))
- die(_("No such remote: %s"), rename.old);
+ die(_("No such remote: %s"), rename.old_name);
- if (!strcmp(rename.old, rename.new) && oldremote->origin != REMOTE_CONFIG)
+ if (!strcmp(rename.old_name, rename.new_name) && oldremote->origin != REMOTE_CONFIG)
return migrate_file(oldremote);
- newremote = remote_get(rename.new);
+ newremote = remote_get(rename.new_name);
if (remote_is_configured(newremote, 1))
- die(_("remote %s already exists."), rename.new);
+ die(_("remote %s already exists."), rename.new_name);
- strbuf_addf(&buf, "refs/heads/test:refs/remotes/%s/test", rename.new);
+ strbuf_addf(&buf, "refs/heads/test:refs/remotes/%s/test", rename.new_name);
if (!valid_fetch_refspec(buf.buf))
- die(_("'%s' is not a valid remote name"), rename.new);
+ die(_("'%s' is not a valid remote name"), rename.new_name);
strbuf_reset(&buf);
- strbuf_addf(&buf, "remote.%s", rename.old);
- strbuf_addf(&buf2, "remote.%s", rename.new);
+ strbuf_addf(&buf, "remote.%s", rename.old_name);
+ strbuf_addf(&buf2, "remote.%s", rename.new_name);
if (git_config_rename_section(buf.buf, buf2.buf) < 1)
return error(_("Could not rename config section '%s' to '%s'"),
buf.buf, buf2.buf);
strbuf_reset(&buf);
- strbuf_addf(&buf, "remote.%s.fetch", rename.new);
+ strbuf_addf(&buf, "remote.%s.fetch", rename.new_name);
git_config_set_multivar(buf.buf, NULL, NULL, 1);
- strbuf_addf(&old_remote_context, ":refs/remotes/%s/", rename.old);
+ strbuf_addf(&old_remote_context, ":refs/remotes/%s/", rename.old_name);
for (i = 0; i < oldremote->fetch_refspec_nr; i++) {
char *ptr;
refspec_updated = 1;
strbuf_splice(&buf2,
ptr-buf2.buf + strlen(":refs/remotes/"),
- strlen(rename.old), rename.new,
- strlen(rename.new));
+ strlen(rename.old_name), rename.new_name,
+ strlen(rename.new_name));
} else
warning(_("Not updating non-default fetch refspec\n"
"\t%s\n"
for (i = 0; i < branch_list.nr; i++) {
struct string_list_item *item = branch_list.items + i;
struct branch_info *info = item->util;
- if (info->remote_name && !strcmp(info->remote_name, rename.old)) {
+ if (info->remote_name && !strcmp(info->remote_name, rename.old_name)) {
strbuf_reset(&buf);
strbuf_addf(&buf, "branch.%s.remote", item->string);
- git_config_set(buf.buf, rename.new);
+ git_config_set(buf.buf, rename.new_name);
}
}
continue;
strbuf_reset(&buf);
strbuf_addstr(&buf, item->string);
- strbuf_splice(&buf, strlen("refs/remotes/"), strlen(rename.old),
- rename.new, strlen(rename.new));
+ strbuf_splice(&buf, strlen("refs/remotes/"), strlen(rename.old_name),
+ rename.new_name, strlen(rename.new_name));
strbuf_reset(&buf2);
strbuf_addf(&buf2, "remote: renamed %s to %s",
item->string, buf.buf);
continue;
strbuf_reset(&buf);
strbuf_addstr(&buf, item->string);
- strbuf_splice(&buf, strlen("refs/remotes/"), strlen(rename.old),
- rename.new, strlen(rename.new));
+ strbuf_splice(&buf, strlen("refs/remotes/"), strlen(rename.old_name),
+ rename.new_name, strlen(rename.new_name));
strbuf_reset(&buf2);
strbuf_addstr(&buf2, item->util);
- strbuf_splice(&buf2, strlen("refs/remotes/"), strlen(rename.old),
- rename.new, strlen(rename.new));
+ strbuf_splice(&buf2, strlen("refs/remotes/"), strlen(rename.old_name),
+ rename.new_name, strlen(rename.new_name));
strbuf_reset(&buf3);
strbuf_addf(&buf3, "remote: renamed %s to %s",
item->string, buf.buf);
static void free_remote_ref_states(struct ref_states *states)
{
- string_list_clear(&states->new, 0);
+ string_list_clear(&states->new_refs, 0);
string_list_clear(&states->stale, 1);
string_list_clear(&states->tracked, 0);
string_list_clear(&states->heads, 0);
if (states->queried) {
const char *fmt = "%s";
const char *arg = "";
- if (string_list_has_string(&states->new, name)) {
+ if (string_list_has_string(&states->new_refs, name)) {
fmt = _(" new (next fetch will store in remotes/%s)");
arg = states->remote->name;
} else if (string_list_has_string(&states->tracked, name))
/* remote branch info */
info.width = 0;
- for_each_string_list(&states.new, add_remote_to_show_info, &info);
+ for_each_string_list(&states.new_refs, add_remote_to_show_info, &info);
for_each_string_list(&states.tracked, add_remote_to_show_info, &info);
for_each_string_list(&states.stale, add_remote_to_show_info, &info);
if (info.list->nr)
/*
* Adds all packs hex strings to the fname list, which do not
- * have a corresponding .keep file.
+ * have a corresponding .keep or .promisor file. These packs are not to
+ * be kept if we are going to pack everything into one file.
*/
static void get_non_kept_pack_filenames(struct string_list *fname_list)
{
fname = xmemdupz(e->d_name, len);
- if (!file_exists(mkpath("%s/%s.keep", packdir, fname)))
+ if (!file_exists(mkpath("%s/%s.keep", packdir, fname)) &&
+ !file_exists(mkpath("%s/%s.promisor", packdir, fname)))
string_list_append_nodup(fname_list, fname);
else
free(fname);
argv_array_push(&cmd.args, "--all");
argv_array_push(&cmd.args, "--reflog");
argv_array_push(&cmd.args, "--indexed-objects");
+ if (repository_format_partial_clone)
+ argv_array_push(&cmd.args, "--exclude-promisor-objects");
if (window)
argv_array_pushf(&cmd.args, "--window=%s", window);
if (window_memory)
obj_type = sha1_object_info(object.hash, NULL);
repl_type = sha1_object_info(oid->hash, NULL);
- printf("%s (%s) -> %s (%s)\n", refname, typename(obj_type),
- oid_to_hex(oid), typename(repl_type));
+ printf("%s (%s) -> %s (%s)\n", refname, type_name(obj_type),
+ oid_to_hex(oid), type_name(repl_type));
}
}
die("Objects must be of the same type.\n"
"'%s' points to a replaced object of type '%s'\n"
"while '%s' points to a replacement object of type '%s'.",
- object_ref, typename(obj_type),
- replace_ref, typename(repl_type));
+ object_ref, type_name(obj_type),
+ replace_ref, type_name(repl_type));
check_ref_valid(object, &prev, &ref, force);
argv_array_push(&cmd.args, "--no-replace-objects");
argv_array_push(&cmd.args, "cat-file");
if (raw)
- argv_array_push(&cmd.args, typename(type));
+ argv_array_push(&cmd.args, type_name(type));
else
argv_array_push(&cmd.args, "-p");
argv_array_push(&cmd.args, oid_to_hex(oid));
{
char *tmpfile = git_pathdup("REPLACE_EDITOBJ");
enum object_type type;
- struct object_id old, new, prev;
+ struct object_id old_oid, new_oid, prev;
struct strbuf ref = STRBUF_INIT;
- if (get_oid(object_ref, &old) < 0)
+ if (get_oid(object_ref, &old_oid) < 0)
die("Not a valid object name: '%s'", object_ref);
- type = sha1_object_info(old.hash, NULL);
+ type = sha1_object_info(old_oid.hash, NULL);
if (type < 0)
- die("unable to get object type for %s", oid_to_hex(&old));
+ die("unable to get object type for %s", oid_to_hex(&old_oid));
- check_ref_valid(&old, &prev, &ref, force);
+ check_ref_valid(&old_oid, &prev, &ref, force);
strbuf_release(&ref);
- export_object(&old, type, raw, tmpfile);
+ export_object(&old_oid, type, raw, tmpfile);
if (launch_editor(tmpfile, NULL, NULL) < 0)
die("editing object file failed");
- import_object(&new, type, raw, tmpfile);
+ import_object(&new_oid, type, raw, tmpfile);
free(tmpfile);
- if (!oidcmp(&old, &new))
- return error("new object is the same as the old one: '%s'", oid_to_hex(&old));
+ if (!oidcmp(&old_oid, &new_oid))
+ return error("new object is the same as the old one: '%s'", oid_to_hex(&old_oid));
- return replace_object_oid(object_ref, &old, "replacement", &new, force);
+ return replace_object_oid(object_ref, &old_oid, "replacement", &new_oid, force);
}
static void replace_parents(struct strbuf *buf, int argc, const char **argv)
struct tag *tag;
int i;
- hash_sha1_file(extra->value, extra->len, typename(OBJ_TAG), tag_oid.hash);
+ hash_object_file(extra->value, extra->len, type_name(OBJ_TAG), &tag_oid);
tag = lookup_tag(&tag_oid);
if (!tag)
die(_("bad mergetag in commit '%s'"), ref);
static int create_graft(int argc, const char **argv, int force)
{
- struct object_id old, new;
+ struct object_id old_oid, new_oid;
const char *old_ref = argv[0];
struct commit *commit;
struct strbuf buf = STRBUF_INIT;
const char *buffer;
unsigned long size;
- if (get_oid(old_ref, &old) < 0)
+ if (get_oid(old_ref, &old_oid) < 0)
die(_("Not a valid object name: '%s'"), old_ref);
- commit = lookup_commit_or_die(&old, old_ref);
+ commit = lookup_commit_or_die(&old_oid, old_ref);
buffer = get_commit_buffer(commit, &size);
strbuf_add(&buf, buffer, size);
check_mergetags(commit, argc, argv);
- if (write_sha1_file(buf.buf, buf.len, commit_type, new.hash))
+ if (write_object_file(buf.buf, buf.len, commit_type, &new_oid))
die(_("could not write replacement commit for: '%s'"), old_ref);
strbuf_release(&buf);
- if (!oidcmp(&old, &new))
- return error("new commit is the same as the old one: '%s'", oid_to_hex(&old));
+ if (!oidcmp(&old_oid, &new_oid))
+ return error("new commit is the same as the old one: '%s'", oid_to_hex(&old_oid));
- return replace_object_oid(old_ref, &old, "replacement", &new, force);
+ return replace_object_oid(old_ref, &old_oid, "replacement", &new_oid, force);
}
int cmd_replace(int argc, const char **argv, const char *prefix)
OPT_CMDMODE('d', "delete", &cmdmode, N_("delete replace refs"), MODE_DELETE),
OPT_CMDMODE('e', "edit", &cmdmode, N_("edit existing object"), MODE_EDIT),
OPT_CMDMODE('g', "graft", &cmdmode, N_("change a commit's parents"), MODE_GRAFT),
- OPT_BOOL('f', "force", &force, N_("replace the ref if it exists")),
+ OPT_BOOL_F('f', "force", &force, N_("replace the ref if it exists"),
+ PARSE_OPT_NOCOMPLETE),
OPT_BOOL(0, "raw", &raw, N_("do not pretty-print contents for --edit")),
OPT_STRING(0, "format", &format, N_("format"), N_("use this format")),
OPT_END()
static void print_new_head_line(struct commit *commit)
{
- const char *hex, *body;
- const char *msg;
-
- hex = find_unique_abbrev(commit->object.oid.hash, DEFAULT_ABBREV);
- printf(_("HEAD is now at %s"), hex);
- msg = logmsg_reencode(commit, NULL, get_log_output_encoding());
- body = strstr(msg, "\n\n");
- if (body) {
- const char *eol;
- size_t len;
- body = skip_blank_lines(body + 2);
- eol = strchr(body, '\n');
- len = eol ? eol - body : strlen(body);
- printf(" %.*s\n", (int) len, body);
- }
- else
- printf("\n");
- unuse_commit_buffer(commit, msg);
+ struct strbuf buf = STRBUF_INIT;
+
+ printf(_("HEAD is now at %s"),
+ find_unique_abbrev(commit->object.oid.hash, DEFAULT_ABBREV));
+
+ pp_commit_easy(CMIT_FMT_ONELINE, commit, &buf);
+ if (buf.len > 0)
+ printf(" %s", buf.buf);
+ putchar('\n');
+ strbuf_release(&buf);
}
static void update_index_from_diff(struct diff_queue_struct *q,
#include "progress.h"
#include "reflog-walk.h"
#include "oidset.h"
+#include "packfile.h"
static const char rev_list_usage[] =
"git rev-list [OPTION] <commit-id>... [ -- paths... ]\n"
MA_ERROR = 0, /* fail if any missing objects are encountered */
MA_ALLOW_ANY, /* silently allow ALL missing objects */
MA_PRINT, /* print ALL missing objects in special section */
+ MA_ALLOW_PROMISOR, /* silently allow all missing PROMISOR objects */
};
static enum missing_action arg_missing_action;
else
putchar('\n');
- if (revs->verbose_header && get_cached_commit_buffer(commit, NULL)) {
+ if (revs->verbose_header) {
struct strbuf buf = STRBUF_INIT;
struct pretty_print_context ctx = {0};
ctx.abbrev = revs->abbrev;
static inline void finish_object__ma(struct object *obj)
{
+ /*
+ * Whether or not we try to dynamically fetch missing objects
+ * from the server, we currently DO NOT have the object. We
+ * can either print, allow (ignore), or conditionally allow
+ * (ignore) them.
+ */
switch (arg_missing_action) {
case MA_ERROR:
die("missing blob object '%s'", oid_to_hex(&obj->oid));
oidset_insert(&missing_objects, &obj->oid);
return;
+ case MA_ALLOW_PROMISOR:
+ if (is_promisor_object(&obj->oid))
+ return;
+ die("unexpected missing blob object '%s'",
+ oid_to_hex(&obj->oid));
+ return;
+
default:
BUG("unhandled missing_action");
return;
}
}
-static void finish_object(struct object *obj, const char *name, void *cb_data)
+static int finish_object(struct object *obj, const char *name, void *cb_data)
{
struct rev_list_info *info = cb_data;
- if (obj->type == OBJ_BLOB && !has_object_file(&obj->oid))
+ if (obj->type == OBJ_BLOB && !has_object_file(&obj->oid)) {
finish_object__ma(obj);
+ return 1;
+ }
if (info->revs->verify_objects && !obj->parsed && obj->type != OBJ_COMMIT)
parse_object(&obj->oid);
+ return 0;
}
static void show_object(struct object *obj, const char *name, void *cb_data)
{
struct rev_list_info *info = cb_data;
- finish_object(obj, name, cb_data);
+ if (finish_object(obj, name, cb_data))
+ return;
display_progress(progress, ++progress_counter);
if (info->flags & REV_LIST_QUIET)
return;
if (!strcmp(value, "allow-any")) {
arg_missing_action = MA_ALLOW_ANY;
+ fetch_if_missing = 0;
return 1;
}
if (!strcmp(value, "print")) {
arg_missing_action = MA_PRINT;
+ fetch_if_missing = 0;
+ return 1;
+ }
+
+ if (!strcmp(value, "allow-promisor")) {
+ arg_missing_action = MA_ALLOW_PROMISOR;
+ fetch_if_missing = 0;
return 1;
}
init_revisions(&revs, prefix);
revs.abbrev = DEFAULT_ABBREV;
revs.commit_format = CMIT_FMT_UNSPECIFIED;
+
+ /*
+ * Scan the argument list before invoking setup_revisions(), so that we
+ * know if fetch_if_missing needs to be set to 0.
+ *
+ * "--exclude-promisor-objects" acts as a pre-filter on missing objects
+ * by not crossing the boundary from realized objects to promisor
+ * objects.
+ *
+ * Let "--missing" to conditionally set fetch_if_missing.
+ */
+ for (i = 1; i < argc; i++) {
+ const char *arg = argv[i];
+ if (!strcmp(arg, "--exclude-promisor-objects")) {
+ fetch_if_missing = 0;
+ revs.exclude_promisor_objects = 1;
+ break;
+ }
+ }
+ for (i = 1; i < argc; i++) {
+ const char *arg = argv[i];
+ if (skip_prefix(arg, "--missing=", &arg)) {
+ if (revs.exclude_promisor_objects)
+ die(_("cannot combine --exclude-promisor-objects and --missing"));
+ if (parse_missing_action_value(arg))
+ break;
+ }
+ }
+
argc = setup_revisions(argc, argv, &revs, NULL);
memset(&info, 0, sizeof(info));
continue;
}
if (!strcmp(arg, ("--no-" CL_ARG__FILTER))) {
- list_objects_filter_release(&filter_options);
+ list_objects_filter_set_no_filter(&filter_options);
continue;
}
if (!strcmp(arg, "--filter-print-omitted")) {
continue;
}
- if (skip_prefix(arg, "--missing=", &arg) &&
- parse_missing_action_value(arg))
- continue;
+ if (!strcmp(arg, "--exclude-promisor-objects"))
+ continue; /* already handled above */
+ if (skip_prefix(arg, "--missing=", &arg))
+ continue; /* already handled above */
usage(rev_list_usage);
static int try_difference(const char *arg)
{
char *dotdot;
- struct object_id oid;
- struct object_id end;
- const char *next;
- const char *this;
+ struct object_id start_oid;
+ struct object_id end_oid;
+ const char *end;
+ const char *start;
int symmetric;
static const char head_by_default[] = "HEAD";
if (!(dotdot = strstr(arg, "..")))
return 0;
- next = dotdot + 2;
- this = arg;
- symmetric = (*next == '.');
+ end = dotdot + 2;
+ start = arg;
+ symmetric = (*end == '.');
*dotdot = 0;
- next += symmetric;
+ end += symmetric;
- if (!*next)
- next = head_by_default;
+ if (!*end)
+ end = head_by_default;
if (dotdot == arg)
- this = head_by_default;
+ start = head_by_default;
- if (this == head_by_default && next == head_by_default &&
+ if (start == head_by_default && end == head_by_default &&
!symmetric) {
/*
* Just ".."? That is not a range but the
return 0;
}
- if (!get_oid_committish(this, &oid) && !get_oid_committish(next, &end)) {
- show_rev(NORMAL, &end, next);
- show_rev(symmetric ? NORMAL : REVERSED, &oid, this);
+ if (!get_oid_committish(start, &start_oid) && !get_oid_committish(end, &end_oid)) {
+ show_rev(NORMAL, &end_oid, end);
+ show_rev(symmetric ? NORMAL : REVERSED, &start_oid, start);
if (symmetric) {
struct commit_list *exclude;
struct commit *a, *b;
- a = lookup_commit_reference(&oid);
- b = lookup_commit_reference(&end);
+ a = lookup_commit_reference(&start_oid);
+ b = lookup_commit_reference(&end_oid);
exclude = get_merge_bases(a, b);
while (exclude) {
struct commit *commit = pop_commit(&exclude);
PARSE_OPT_SHELL_EVAL);
strbuf_addstr(&parsed, " --");
- sq_quote_argv(&parsed, argv, 0);
+ sq_quote_argv(&parsed, argv);
puts(parsed.buf);
return 0;
}
struct strbuf buf = STRBUF_INIT;
if (argc)
- sq_quote_argv(&buf, argv, 0);
+ sq_quote_argv(&buf, argv);
printf("%s\n", buf.buf);
strbuf_release(&buf);
if (isatty(0))
opts.edit = 1;
opts.action = REPLAY_REVERT;
- git_config(git_default_config, NULL);
+ sequencer_init_config(&opts);
res = run_sequencer(argc, argv, &opts);
if (res < 0)
die(_("revert failed"));
int res;
opts.action = REPLAY_PICK;
- git_config(git_default_config, NULL);
+ sequencer_init_config(&opts);
res = run_sequencer(argc, argv, &opts);
if (res < 0)
die(_("cherry-pick failed"));
OPT__DRY_RUN(&show_only, N_("dry run")),
OPT__QUIET(&quiet, N_("do not list removed files")),
OPT_BOOL( 0 , "cached", &index_only, N_("only remove from the index")),
- OPT__FORCE(&force, N_("override the up-to-date check")),
+ OPT__FORCE(&force, N_("override the up-to-date check"), PARSE_OPT_NOCOMPLETE),
OPT_BOOL('r', NULL, &recursive, N_("allow recursive removal")),
OPT_BOOL( 0 , "ignore-unmatch", &ignore_unmatch,
N_("exit with a zero status even if nothing matched")),
stage_updated_gitmodules(&the_index);
}
- if (active_cache_changed) {
- if (write_locked_index(&the_index, &lock_file, COMMIT_LOCK))
- die(_("Unable to write new index file"));
- }
+ if (write_locked_index(&the_index, &lock_file,
+ COMMIT_LOCK | SKIP_IF_UNCHANGED))
+ die(_("Unable to write new index file"));
return 0;
}
#define OPT_QUIET (1 << 0)
#define OPT_CACHED (1 << 1)
#define OPT_RECURSIVE (1 << 2)
+#define OPT_FORCE (1 << 3)
typedef void (*each_submodule_fn)(const struct cache_entry *list_item,
void *cb_data);
return ret;
}
+static int print_default_remote(int argc, const char **argv, const char *prefix)
+{
+ const char *remote;
+
+ if (argc != 1)
+ die(_("submodule--helper print-default-remote takes no arguments"));
+
+ remote = get_default_remote();
+ if (remote)
+ printf("%s\n", remote);
+
+ return 0;
+}
+
static int starts_with_dot_slash(const char *str)
{
return str[0] == '.' && is_dir_sep(str[1]);
*list = active_modules;
}
+static char *get_up_path(const char *path)
+{
+ int i;
+ struct strbuf sb = STRBUF_INIT;
+
+ for (i = count_slashes(path); i; i--)
+ strbuf_addstr(&sb, "../");
+
+ /*
+ * Check if 'path' ends with slash or not
+ * for having the same output for dir/sub_dir
+ * and dir/sub_dir/
+ */
+ if (!is_dir_sep(path[strlen(path) - 1]))
+ strbuf_addstr(&sb, "../");
+
+ return strbuf_detach(&sb, NULL);
+}
+
static int module_list(int argc, const char **argv, const char *prefix)
{
int i;
return 0;
}
+struct sync_cb {
+ const char *prefix;
+ unsigned int flags;
+};
+
+#define SYNC_CB_INIT { NULL, 0 }
+
+static void sync_submodule(const char *path, const char *prefix,
+ unsigned int flags)
+{
+ const struct submodule *sub;
+ char *remote_key = NULL;
+ char *sub_origin_url, *super_config_url, *displaypath;
+ struct strbuf sb = STRBUF_INIT;
+ struct child_process cp = CHILD_PROCESS_INIT;
+ char *sub_config_path = NULL;
+
+ if (!is_submodule_active(the_repository, path))
+ return;
+
+ sub = submodule_from_path(&null_oid, path);
+
+ if (sub && sub->url) {
+ if (starts_with_dot_dot_slash(sub->url) ||
+ starts_with_dot_slash(sub->url)) {
+ char *remote_url, *up_path;
+ char *remote = get_default_remote();
+ strbuf_addf(&sb, "remote.%s.url", remote);
+
+ if (git_config_get_string(sb.buf, &remote_url))
+ remote_url = xgetcwd();
+
+ up_path = get_up_path(path);
+ sub_origin_url = relative_url(remote_url, sub->url, up_path);
+ super_config_url = relative_url(remote_url, sub->url, NULL);
+
+ free(remote);
+ free(up_path);
+ free(remote_url);
+ } else {
+ sub_origin_url = xstrdup(sub->url);
+ super_config_url = xstrdup(sub->url);
+ }
+ } else {
+ sub_origin_url = xstrdup("");
+ super_config_url = xstrdup("");
+ }
+
+ displaypath = get_submodule_displaypath(path, prefix);
+
+ if (!(flags & OPT_QUIET))
+ printf(_("Synchronizing submodule url for '%s'\n"),
+ displaypath);
+
+ strbuf_reset(&sb);
+ strbuf_addf(&sb, "submodule.%s.url", sub->name);
+ if (git_config_set_gently(sb.buf, super_config_url))
+ die(_("failed to register url for submodule path '%s'"),
+ displaypath);
+
+ if (!is_submodule_populated_gently(path, NULL))
+ goto cleanup;
+
+ prepare_submodule_repo_env(&cp.env_array);
+ cp.git_cmd = 1;
+ cp.dir = path;
+ argv_array_pushl(&cp.args, "submodule--helper",
+ "print-default-remote", NULL);
+
+ strbuf_reset(&sb);
+ if (capture_command(&cp, &sb, 0))
+ die(_("failed to get the default remote for submodule '%s'"),
+ path);
+
+ strbuf_strip_suffix(&sb, "\n");
+ remote_key = xstrfmt("remote.%s.url", sb.buf);
+
+ strbuf_reset(&sb);
+ submodule_to_gitdir(&sb, path);
+ strbuf_addstr(&sb, "/config");
+
+ if (git_config_set_in_file_gently(sb.buf, remote_key, sub_origin_url))
+ die(_("failed to update remote for submodule '%s'"),
+ path);
+
+ if (flags & OPT_RECURSIVE) {
+ struct child_process cpr = CHILD_PROCESS_INIT;
+
+ cpr.git_cmd = 1;
+ cpr.dir = path;
+ prepare_submodule_repo_env(&cpr.env_array);
+
+ argv_array_push(&cpr.args, "--super-prefix");
+ argv_array_pushf(&cpr.args, "%s/", displaypath);
+ argv_array_pushl(&cpr.args, "submodule--helper", "sync",
+ "--recursive", NULL);
+
+ if (flags & OPT_QUIET)
+ argv_array_push(&cpr.args, "--quiet");
+
+ if (run_command(&cpr))
+ die(_("failed to recurse into submodule '%s'"),
+ path);
+ }
+
+cleanup:
+ free(super_config_url);
+ free(sub_origin_url);
+ strbuf_release(&sb);
+ free(remote_key);
+ free(displaypath);
+ free(sub_config_path);
+}
+
+static void sync_submodule_cb(const struct cache_entry *list_item, void *cb_data)
+{
+ struct sync_cb *info = cb_data;
+ sync_submodule(list_item->name, info->prefix, info->flags);
+
+}
+
+static int module_sync(int argc, const char **argv, const char *prefix)
+{
+ struct sync_cb info = SYNC_CB_INIT;
+ struct pathspec pathspec;
+ struct module_list list = MODULE_LIST_INIT;
+ int quiet = 0;
+ int recursive = 0;
+
+ struct option module_sync_options[] = {
+ OPT__QUIET(&quiet, N_("Suppress output of synchronizing submodule url")),
+ OPT_BOOL(0, "recursive", &recursive,
+ N_("Recurse into nested submodules")),
+ OPT_END()
+ };
+
+ const char *const git_submodule_helper_usage[] = {
+ N_("git submodule--helper sync [--quiet] [--recursive] [<path>]"),
+ NULL
+ };
+
+ argc = parse_options(argc, argv, prefix, module_sync_options,
+ git_submodule_helper_usage, 0);
+
+ if (module_list_compute(argc, argv, prefix, &pathspec, &list) < 0)
+ return 1;
+
+ info.prefix = prefix;
+ if (quiet)
+ info.flags |= OPT_QUIET;
+ if (recursive)
+ info.flags |= OPT_RECURSIVE;
+
+ for_each_listed_submodule(&list, sync_submodule_cb, &info);
+
+ return 0;
+}
+
+struct deinit_cb {
+ const char *prefix;
+ unsigned int flags;
+};
+#define DEINIT_CB_INIT { NULL, 0 }
+
+static void deinit_submodule(const char *path, const char *prefix,
+ unsigned int flags)
+{
+ const struct submodule *sub;
+ char *displaypath = NULL;
+ struct child_process cp_config = CHILD_PROCESS_INIT;
+ struct strbuf sb_config = STRBUF_INIT;
+ char *sub_git_dir = xstrfmt("%s/.git", path);
+
+ sub = submodule_from_path(&null_oid, path);
+
+ if (!sub || !sub->name)
+ goto cleanup;
+
+ displaypath = get_submodule_displaypath(path, prefix);
+
+ /* remove the submodule work tree (unless the user already did it) */
+ if (is_directory(path)) {
+ struct strbuf sb_rm = STRBUF_INIT;
+ const char *format;
+
+ /*
+ * protect submodules containing a .git directory
+ * NEEDSWORK: instead of dying, automatically call
+ * absorbgitdirs and (possibly) warn.
+ */
+ if (is_directory(sub_git_dir))
+ die(_("Submodule work tree '%s' contains a .git "
+ "directory (use 'rm -rf' if you really want "
+ "to remove it including all of its history)"),
+ displaypath);
+
+ if (!(flags & OPT_FORCE)) {
+ struct child_process cp_rm = CHILD_PROCESS_INIT;
+ cp_rm.git_cmd = 1;
+ argv_array_pushl(&cp_rm.args, "rm", "-qn",
+ path, NULL);
+
+ if (run_command(&cp_rm))
+ die(_("Submodule work tree '%s' contains local "
+ "modifications; use '-f' to discard them"),
+ displaypath);
+ }
+
+ strbuf_addstr(&sb_rm, path);
+
+ if (!remove_dir_recursively(&sb_rm, 0))
+ format = _("Cleared directory '%s'\n");
+ else
+ format = _("Could not remove submodule work tree '%s'\n");
+
+ if (!(flags & OPT_QUIET))
+ printf(format, displaypath);
+
+ strbuf_release(&sb_rm);
+ }
+
+ if (mkdir(path, 0777))
+ printf(_("could not create empty submodule directory %s"),
+ displaypath);
+
+ cp_config.git_cmd = 1;
+ argv_array_pushl(&cp_config.args, "config", "--get-regexp", NULL);
+ argv_array_pushf(&cp_config.args, "submodule.%s\\.", sub->name);
+
+ /* remove the .git/config entries (unless the user already did it) */
+ if (!capture_command(&cp_config, &sb_config, 0) && sb_config.len) {
+ char *sub_key = xstrfmt("submodule.%s", sub->name);
+ /*
+ * remove the whole section so we have a clean state when
+ * the user later decides to init this submodule again
+ */
+ git_config_rename_section_in_file(NULL, sub_key, NULL);
+ if (!(flags & OPT_QUIET))
+ printf(_("Submodule '%s' (%s) unregistered for path '%s'\n"),
+ sub->name, sub->url, displaypath);
+ free(sub_key);
+ }
+
+cleanup:
+ free(displaypath);
+ free(sub_git_dir);
+ strbuf_release(&sb_config);
+}
+
+static void deinit_submodule_cb(const struct cache_entry *list_item,
+ void *cb_data)
+{
+ struct deinit_cb *info = cb_data;
+ deinit_submodule(list_item->name, info->prefix, info->flags);
+}
+
+static int module_deinit(int argc, const char **argv, const char *prefix)
+{
+ struct deinit_cb info = DEINIT_CB_INIT;
+ struct pathspec pathspec;
+ struct module_list list = MODULE_LIST_INIT;
+ int quiet = 0;
+ int force = 0;
+ int all = 0;
+
+ struct option module_deinit_options[] = {
+ OPT__QUIET(&quiet, N_("Suppress submodule status output")),
+ OPT__FORCE(&force, N_("Remove submodule working trees even if they contain local changes"), 0),
+ OPT_BOOL(0, "all", &all, N_("Unregister all submodules")),
+ OPT_END()
+ };
+
+ const char *const git_submodule_helper_usage[] = {
+ N_("git submodule deinit [--quiet] [-f | --force] [--all | [--] [<path>...]]"),
+ NULL
+ };
+
+ argc = parse_options(argc, argv, prefix, module_deinit_options,
+ git_submodule_helper_usage, 0);
+
+ if (all && argc) {
+ error("pathspec and --all are incompatible");
+ usage_with_options(git_submodule_helper_usage,
+ module_deinit_options);
+ }
+
+ if (!argc && !all)
+ die(_("Use '--all' if you really want to deinitialize all submodules"));
+
+ if (module_list_compute(argc, argv, prefix, &pathspec, &list) < 0)
+ BUG("module_list_compute should not choke on empty pathspec");
+
+ info.prefix = prefix;
+ if (quiet)
+ info.flags |= OPT_QUIET;
+ if (force)
+ info.flags |= OPT_FORCE;
+
+ for_each_listed_submodule(&list, deinit_submodule_cb, &info);
+
+ return 0;
+}
+
static int clone_submodule(const char *path, const char *gitdir, const char *url,
const char *depth, struct string_list *reference,
int quiet, int progress)
{"resolve-relative-url-test", resolve_relative_url_test, 0},
{"init", module_init, SUPPORT_SUPER_PREFIX},
{"status", module_status, SUPPORT_SUPER_PREFIX},
+ {"print-default-remote", print_default_remote, 0},
+ {"sync", module_sync, SUPPORT_SUPER_PREFIX},
+ {"deinit", module_deinit, 0},
{"remote-branch", resolve_remote_submodule_branch, 0},
{"push-check", push_check, 0},
{"absorb-git-dirs", absorb_git_dirs, SUPPORT_SUPER_PREFIX},
{
if (sign && do_sign(buf) < 0)
return error(_("unable to sign the tag"));
- if (write_sha1_file(buf->buf, buf->len, tag_type, result->hash) < 0)
+ if (write_object_file(buf->buf, buf->len, tag_type, result) < 0)
return error(_("unable to write tag file"));
return 0;
}
struct create_tag_options {
unsigned int message_given:1;
+ unsigned int use_editor:1;
unsigned int sign;
enum {
CLEANUP_NONE,
"tag %s\n"
"tagger %s\n\n",
oid_to_hex(object),
- typename(type),
+ type_name(type),
tag,
git_committer_info(IDENT_STRICT));
- if (!opt->message_given) {
+ if (!opt->message_given || opt->use_editor) {
int fd;
/* write the template message before editing: */
if (fd < 0)
die_errno(_("could not create file '%s'"), path);
- if (!is_null_oid(prev)) {
+ if (opt->message_given) {
+ write_or_die(fd, buf->buf, buf->len);
+ strbuf_reset(buf);
+ } else if (!is_null_oid(prev)) {
write_tag_body(fd, prev);
} else {
struct strbuf buf = STRBUF_INIT;
static struct ref_sorting *sorting = NULL, **sorting_tail = &sorting;
struct ref_format format = REF_FORMAT_INIT;
int icase = 0;
+ int edit_flag = 0;
struct option options[] = {
OPT_CMDMODE('l', "list", &cmdmode, N_("list tag names"), 'l'),
{ OPTION_INTEGER, 'n', NULL, &filter.lines, N_("n"),
OPT_CALLBACK('m', "message", &msg, N_("message"),
N_("tag message"), parse_msg_arg),
OPT_FILENAME('F', "file", &msgfile, N_("read message from file")),
+ OPT_BOOL('e', "edit", &edit_flag, N_("force edit of tag message")),
OPT_BOOL('s', "sign", &opt.sign, N_("annotated and GPG-signed tag")),
OPT_STRING(0, "cleanup", &cleanup_arg, N_("mode"),
N_("how to strip spaces and #comments from message")),
OPT_STRING('u', "local-user", &keyid, N_("key-id"),
N_("use another key to sign the tag")),
- OPT__FORCE(&force, N_("replace the tag if exists")),
+ OPT__FORCE(&force, N_("replace the tag if exists"), 0),
OPT_BOOL(0, "create-reflog", &create_reflog, N_("create a reflog")),
OPT_GROUP(N_("Tag listing options")),
die(_("tag '%s' already exists"), tag);
opt.message_given = msg.given || msgfile;
+ opt.use_editor = edit_flag;
if (!cleanup_arg || !strcmp(cleanup_arg, "strip"))
opt.cleanup_mode = CLEANUP_ALL;
static unsigned int offset, len;
static off_t consumed_bytes;
static off_t max_input_size;
-static git_SHA_CTX ctx;
+static git_hash_ctx ctx;
static struct fsck_options fsck_options = FSCK_OPTIONS_STRICT;
/*
if (min > sizeof(buffer))
die("cannot fill %d bytes", min);
if (offset) {
- git_SHA1_Update(&ctx, buffer, offset);
+ the_hash_algo->update_fn(&ctx, buffer, offset);
memmove(buffer, buffer + offset, len);
offset = 0;
}
struct object *obj;
};
+/* Remember to update object flag allocation in object.h */
#define FLAG_OPEN (1u<<20)
#define FLAG_WRITTEN (1u<<21)
{
struct object_id oid;
- if (write_sha1_file(obj_buf->buffer, obj_buf->size, typename(obj->type), oid.hash) < 0)
+ if (write_object_file(obj_buf->buffer, obj_buf->size,
+ type_name(obj->type), &oid) < 0)
die("failed to write object %s", oid_to_hex(&obj->oid));
obj->flags |= FLAG_WRITTEN;
}
void *buf, unsigned long size)
{
if (!strict) {
- if (write_sha1_file(buf, size, typename(type), obj_list[nr].oid.hash) < 0)
+ if (write_object_file(buf, size, type_name(type),
+ &obj_list[nr].oid) < 0)
die("failed to write object");
added_object(nr, type, buf, size);
free(buf);
obj_list[nr].obj = NULL;
} else if (type == OBJ_BLOB) {
struct blob *blob;
- if (write_sha1_file(buf, size, typename(type), obj_list[nr].oid.hash) < 0)
+ if (write_object_file(buf, size, type_name(type),
+ &obj_list[nr].oid) < 0)
die("failed to write object");
added_object(nr, type, buf, size);
free(buf);
} else {
struct object *obj;
int eaten;
- hash_sha1_file(buf, size, typename(type), obj_list[nr].oid.hash);
+ hash_object_file(buf, size, type_name(type), &obj_list[nr].oid);
added_object(nr, type, buf, size);
obj = parse_object_buffer(&obj_list[nr].oid, type, size, buf,
&eaten);
if (!obj)
- die("invalid %s", typename(type));
+ die("invalid %s", type_name(type));
add_object_buffer(obj, buf, size);
obj->flags |= FLAG_OPEN;
obj_list[nr].obj = obj;
struct object_id base_oid;
if (type == OBJ_REF_DELTA) {
- hashcpy(base_oid.hash, fill(GIT_SHA1_RAWSZ));
- use(GIT_SHA1_RAWSZ);
+ hashcpy(base_oid.hash, fill(the_hash_algo->rawsz));
+ use(the_hash_algo->rawsz);
delta_data = get_data(delta_size);
if (dry_run || !delta_data) {
free(delta_data);
/* We don't take any non-flag arguments now.. Maybe some day */
usage(unpack_usage);
}
- git_SHA1_Init(&ctx);
+ the_hash_algo->init_fn(&ctx);
unpack_all();
- git_SHA1_Update(&ctx, buffer, offset);
- git_SHA1_Final(oid.hash, &ctx);
+ the_hash_algo->update_fn(&ctx, buffer, offset);
+ the_hash_algo->final_fn(oid.hash, &ctx);
if (strict)
write_rest();
- if (hashcmp(fill(GIT_SHA1_RAWSZ), oid.hash))
+ if (hashcmp(fill(the_hash_algo->rawsz), oid.hash))
die("final sha1 did not match");
- use(GIT_SHA1_RAWSZ);
+ use(the_hash_algo->rawsz);
/* Write the last part of the buffer to stdout */
while (len) {
{
int force = 0;
struct option options[] = {
- OPT__FORCE(&force, N_("update the info files from scratch")),
+ OPT__FORCE(&force, N_("update the info files from scratch"), 0),
OPT_END()
};
return error("%s: unable to read file.", name);
if (type != OBJ_COMMIT)
return error("%s: cannot verify a non-commit object of type %s.",
- name, typename(type));
+ name, type_name(type));
ret = run_gpg_verify(&oid, buf, size, flags);
#include "worktree.h"
static const char * const worktree_usage[] = {
- N_("git worktree add [<options>] <path> [<branch>]"),
+ N_("git worktree add [<options>] <path> [<commit-ish>]"),
N_("git worktree list [<options>]"),
N_("git worktree lock [<options>] <path>"),
+ N_("git worktree move <worktree> <new-path>"),
N_("git worktree prune [<options>]"),
+ N_("git worktree remove [<options>] <worktree>"),
N_("git worktree unlock <path>"),
NULL
};
* Hook failure does not warrant worktree deletion, so run hook after
* is_junk is cleared, but do return appropriate code when hook fails.
*/
- if (!ret && opts->checkout)
- ret = run_hook_le(NULL, "post-checkout", oid_to_hex(&null_oid),
- oid_to_hex(&commit->object.oid), "1", NULL);
+ if (!ret && opts->checkout) {
+ const char *hook = find_hook("post-checkout");
+ if (hook) {
+ const char *env[] = { "GIT_DIR", "GIT_WORK_TREE", NULL };
+ cp.git_cmd = 0;
+ cp.no_stdin = 1;
+ cp.stdout_to_stderr = 1;
+ cp.dir = path;
+ cp.env = env;
+ cp.argv = NULL;
+ argv_array_pushl(&cp.args, absolute_path(hook),
+ oid_to_hex(&null_oid),
+ oid_to_hex(&commit->object.oid),
+ "1", NULL);
+ ret = run_command(&cp);
+ }
+ }
argv_array_clear(&child_env);
strbuf_release(&sb);
const char *branch;
const char *opt_track = NULL;
struct option options[] = {
- OPT__FORCE(&opts.force, N_("checkout <branch> even if already checked out in other worktree")),
+ OPT__FORCE(&opts.force,
+ N_("checkout <branch> even if already checked out in other worktree"),
+ PARSE_OPT_NOCOMPLETE),
OPT_STRING('b', NULL, &opts.new_branch, N_("branch"),
N_("create a new branch")),
OPT_STRING('B', NULL, &new_branch_force, N_("branch"),
return ret;
}
+static void validate_no_submodules(const struct worktree *wt)
+{
+ struct index_state istate = { NULL };
+ int i, found_submodules = 0;
+
+ if (read_index_from(&istate, worktree_git_path(wt, "index"),
+ get_worktree_git_dir(wt)) > 0) {
+ for (i = 0; i < istate.cache_nr; i++) {
+ struct cache_entry *ce = istate.cache[i];
+
+ if (S_ISGITLINK(ce->ce_mode)) {
+ found_submodules = 1;
+ break;
+ }
+ }
+ }
+ discard_index(&istate);
+
+ if (found_submodules)
+ die(_("working trees containing submodules cannot be moved or removed"));
+}
+
+static int move_worktree(int ac, const char **av, const char *prefix)
+{
+ struct option options[] = {
+ OPT_END()
+ };
+ struct worktree **worktrees, *wt;
+ struct strbuf dst = STRBUF_INIT;
+ struct strbuf errmsg = STRBUF_INIT;
+ const char *reason;
+ char *path;
+
+ ac = parse_options(ac, av, prefix, options, worktree_usage, 0);
+ if (ac != 2)
+ usage_with_options(worktree_usage, options);
+
+ path = prefix_filename(prefix, av[1]);
+ strbuf_addstr(&dst, path);
+ free(path);
+
+ worktrees = get_worktrees(0);
+ wt = find_worktree(worktrees, prefix, av[0]);
+ if (!wt)
+ die(_("'%s' is not a working tree"), av[0]);
+ if (is_main_worktree(wt))
+ die(_("'%s' is a main working tree"), av[0]);
+ if (is_directory(dst.buf)) {
+ const char *sep = find_last_dir_sep(wt->path);
+
+ if (!sep)
+ die(_("could not figure out destination name from '%s'"),
+ wt->path);
+ strbuf_trim_trailing_dir_sep(&dst);
+ strbuf_addstr(&dst, sep);
+ }
+ if (file_exists(dst.buf))
+ die(_("target '%s' already exists"), dst.buf);
+
+ validate_no_submodules(wt);
+
+ reason = is_worktree_locked(wt);
+ if (reason) {
+ if (*reason)
+ die(_("cannot move a locked working tree, lock reason: %s"),
+ reason);
+ die(_("cannot move a locked working tree"));
+ }
+ if (validate_worktree(wt, &errmsg, 0))
+ die(_("validation failed, cannot move working tree: %s"),
+ errmsg.buf);
+ strbuf_release(&errmsg);
+
+ if (rename(wt->path, dst.buf) == -1)
+ die_errno(_("failed to move '%s' to '%s'"), wt->path, dst.buf);
+
+ update_worktree_location(wt, dst.buf);
+
+ strbuf_release(&dst);
+ free_worktrees(worktrees);
+ return 0;
+}
+
+/*
+ * Note, "git status --porcelain" is used to determine if it's safe to
+ * delete a whole worktree. "git status" does not ignore user
+ * configuration, so if a normal "git status" shows "clean" for the
+ * user, then it's ok to remove it.
+ *
+ * This assumption may be a bad one. We may want to ignore
+ * (potentially bad) user settings and only delete a worktree when
+ * it's absolutely safe to do so from _our_ point of view because we
+ * know better.
+ */
+static void check_clean_worktree(struct worktree *wt,
+ const char *original_path)
+{
+ struct argv_array child_env = ARGV_ARRAY_INIT;
+ struct child_process cp;
+ char buf[1];
+ int ret;
+
+ /*
+ * Until we sort this out, all submodules are "dirty" and
+ * will abort this function.
+ */
+ validate_no_submodules(wt);
+
+ argv_array_pushf(&child_env, "%s=%s/.git",
+ GIT_DIR_ENVIRONMENT, wt->path);
+ argv_array_pushf(&child_env, "%s=%s",
+ GIT_WORK_TREE_ENVIRONMENT, wt->path);
+ memset(&cp, 0, sizeof(cp));
+ argv_array_pushl(&cp.args, "status",
+ "--porcelain", "--ignore-submodules=none",
+ NULL);
+ cp.env = child_env.argv;
+ cp.git_cmd = 1;
+ cp.dir = wt->path;
+ cp.out = -1;
+ ret = start_command(&cp);
+ if (ret)
+ die_errno(_("failed to run 'git status' on '%s'"),
+ original_path);
+ ret = xread(cp.out, buf, sizeof(buf));
+ if (ret)
+ die(_("'%s' is dirty, use --force to delete it"),
+ original_path);
+ close(cp.out);
+ ret = finish_command(&cp);
+ if (ret)
+ die_errno(_("failed to run 'git status' on '%s', code %d"),
+ original_path, ret);
+}
+
+static int delete_git_work_tree(struct worktree *wt)
+{
+ struct strbuf sb = STRBUF_INIT;
+ int ret = 0;
+
+ strbuf_addstr(&sb, wt->path);
+ if (remove_dir_recursively(&sb, 0)) {
+ error_errno(_("failed to delete '%s'"), sb.buf);
+ ret = -1;
+ }
+ strbuf_release(&sb);
+ return ret;
+}
+
+static int delete_git_dir(struct worktree *wt)
+{
+ struct strbuf sb = STRBUF_INIT;
+ int ret = 0;
+
+ strbuf_addstr(&sb, git_common_path("worktrees/%s", wt->id));
+ if (remove_dir_recursively(&sb, 0)) {
+ error_errno(_("failed to delete '%s'"), sb.buf);
+ ret = -1;
+ }
+ strbuf_release(&sb);
+ return ret;
+}
+
+static int remove_worktree(int ac, const char **av, const char *prefix)
+{
+ int force = 0;
+ struct option options[] = {
+ OPT_BOOL(0, "force", &force,
+ N_("force removing even if the worktree is dirty")),
+ OPT_END()
+ };
+ struct worktree **worktrees, *wt;
+ struct strbuf errmsg = STRBUF_INIT;
+ const char *reason;
+ int ret = 0;
+
+ ac = parse_options(ac, av, prefix, options, worktree_usage, 0);
+ if (ac != 1)
+ usage_with_options(worktree_usage, options);
+
+ worktrees = get_worktrees(0);
+ wt = find_worktree(worktrees, prefix, av[0]);
+ if (!wt)
+ die(_("'%s' is not a working tree"), av[0]);
+ if (is_main_worktree(wt))
+ die(_("'%s' is a main working tree"), av[0]);
+ reason = is_worktree_locked(wt);
+ if (reason) {
+ if (*reason)
+ die(_("cannot remove a locked working tree, lock reason: %s"),
+ reason);
+ die(_("cannot remove a locked working tree"));
+ }
+ if (validate_worktree(wt, &errmsg, WT_VALIDATE_WORKTREE_MISSING_OK))
+ die(_("validation failed, cannot remove working tree: %s"),
+ errmsg.buf);
+ strbuf_release(&errmsg);
+
+ if (file_exists(wt->path)) {
+ if (!force)
+ check_clean_worktree(wt, av[0]);
+
+ ret |= delete_git_work_tree(wt);
+ }
+ /*
+ * continue on even if ret is non-zero, there's no going back
+ * from here.
+ */
+ ret |= delete_git_dir(wt);
+
+ free_worktrees(worktrees);
+ return ret;
+}
+
int cmd_worktree(int ac, const char **av, const char *prefix)
{
struct option options[] = {
return lock_worktree(ac - 1, av + 1, prefix);
if (!strcmp(av[1], "unlock"))
return unlock_worktree(ac - 1, av + 1, prefix);
+ if (!strcmp(av[1], "move"))
+ return move_worktree(ac - 1, av + 1, prefix);
+ if (!strcmp(av[1], "remove"))
+ return remove_worktree(ac - 1, av + 1, prefix);
usage_with_options(worktree_usage, options);
}
unsigned plugged:1;
char *pack_tmp_name;
- struct sha1file *f;
+ struct hashfile *f;
off_t offset;
struct pack_idx_option pack_idx_opts;
unlink(state->pack_tmp_name);
goto clear_exit;
} else if (state->nr_written == 1) {
- sha1close(state->f, oid.hash, CSUM_FSYNC);
+ hashclose(state->f, oid.hash, CSUM_FSYNC);
} else {
- int fd = sha1close(state->f, oid.hash, 0);
+ int fd = hashclose(state->f, oid.hash, 0);
fixup_pack_header_footer(fd, oid.hash, state->pack_tmp_name,
state->nr_written, oid.hash,
state->offset);
* with a new pack.
*/
static int stream_to_pack(struct bulk_checkin_state *state,
- git_SHA_CTX *ctx, off_t *already_hashed_to,
+ git_hash_ctx *ctx, off_t *already_hashed_to,
int fd, size_t size, enum object_type type,
const char *path, unsigned flags)
{
if (rsize < hsize)
hsize = rsize;
if (hsize)
- git_SHA1_Update(ctx, ibuf, hsize);
+ the_hash_algo->update_fn(ctx, ibuf, hsize);
*already_hashed_to = offset;
}
s.next_in = ibuf;
return -1;
}
- sha1write(state->f, obuf, written);
+ hashwrite(state->f, obuf, written);
state->offset += written;
}
s.next_out = obuf;
unsigned flags)
{
off_t seekback, already_hashed_to;
- git_SHA_CTX ctx;
+ git_hash_ctx ctx;
unsigned char obuf[16384];
unsigned header_len;
- struct sha1file_checkpoint checkpoint;
+ struct hashfile_checkpoint checkpoint;
struct pack_idx_entry *idx = NULL;
seekback = lseek(fd, 0, SEEK_CUR);
return error("cannot find the current offset");
header_len = xsnprintf((char *)obuf, sizeof(obuf), "%s %" PRIuMAX,
- typename(type), (uintmax_t)size) + 1;
- git_SHA1_Init(&ctx);
- git_SHA1_Update(&ctx, obuf, header_len);
+ type_name(type), (uintmax_t)size) + 1;
+ the_hash_algo->init_fn(&ctx);
+ the_hash_algo->update_fn(&ctx, obuf, header_len);
/* Note: idx is non-NULL when we are writing */
if ((flags & HASH_WRITE_OBJECT) != 0)
while (1) {
prepare_to_stream(state, flags);
if (idx) {
- sha1file_checkpoint(state->f, &checkpoint);
+ hashfile_checkpoint(state->f, &checkpoint);
idx->offset = state->offset;
crc32_begin(state->f);
}
*/
if (!idx)
die("BUG: should not happen");
- sha1file_truncate(state->f, &checkpoint);
+ hashfile_truncate(state->f, &checkpoint);
state->offset = checkpoint.offset;
finish_bulk_checkin(state);
if (lseek(fd, seekback, SEEK_SET) == (off_t) -1)
return error("cannot seek back");
}
- git_SHA1_Final(result_sha1, &ctx);
+ the_hash_algo->final_fn(result_sha1, &ctx);
if (!idx)
return 0;
idx->crc32 = crc32_end(state->f);
if (already_written(state, result_sha1)) {
- sha1file_truncate(state->f, &checkpoint);
+ hashfile_truncate(state->f, &checkpoint);
state->offset = checkpoint.offset;
free(idx);
} else {
down->namelen = pathlen;
if (pos < it->subtree_nr)
- memmove(it->down + pos + 1,
- it->down + pos,
- sizeof(down) * (it->subtree_nr - pos - 1));
+ MOVE_ARRAY(it->down + pos + 1, it->down + pos,
+ it->subtree_nr - pos - 1);
it->down[pos] = down;
return down;
}
}
if (repair) {
- unsigned char sha1[20];
- hash_sha1_file(buffer.buf, buffer.len, tree_type, sha1);
- if (has_sha1_file(sha1))
- hashcpy(it->oid.hash, sha1);
+ struct object_id oid;
+ hash_object_file(buffer.buf, buffer.len, tree_type, &oid);
+ if (has_sha1_file(oid.hash))
+ oidcpy(&it->oid, &oid);
else
to_invalidate = 1;
- } else if (dryrun)
- hash_sha1_file(buffer.buf, buffer.len, tree_type,
- it->oid.hash);
- else if (write_sha1_file(buffer.buf, buffer.len, tree_type, it->oid.hash)) {
+ } else if (dryrun) {
+ hash_object_file(buffer.buf, buffer.len, tree_type, &it->oid);
+ } else if (write_object_file(buffer.buf, buffer.len, tree_type,
+ &it->oid)) {
strbuf_release(&buffer);
return -1;
}
hold_lock_file_for_update(&lock_file, index_path, LOCK_DIE_ON_ERROR);
- entries = read_index_from(index_state, index_path);
+ entries = read_index_from(index_state, index_path, get_git_dir());
if (entries < 0) {
ret = WRITE_TREE_UNREADABLE_INDEX;
goto out;
#include "git-compat-util.h"
#include "strbuf.h"
#include "hashmap.h"
-#include "mru.h"
+#include "list.h"
#include "advice.h"
#include "gettext.h"
#include "convert.h"
#include "sha1-array.h"
#include "repository.h"
-#ifndef platform_SHA_CTX
-/*
- * platform's underlying implementation of SHA-1; could be OpenSSL,
- * blk_SHA, Apple CommonCrypto, etc... Note that including
- * SHA1_HEADER may have already defined platform_SHA_CTX for our
- * own implementations like block-sha1 and ppc-sha1, so we list
- * the default for OpenSSL compatible SHA-1 implementations here.
- */
-#define platform_SHA_CTX SHA_CTX
-#define platform_SHA1_Init SHA1_Init
-#define platform_SHA1_Update SHA1_Update
-#define platform_SHA1_Final SHA1_Final
-#endif
-
-#define git_SHA_CTX platform_SHA_CTX
-#define git_SHA1_Init platform_SHA1_Init
-#define git_SHA1_Update platform_SHA1_Update
-#define git_SHA1_Final platform_SHA1_Final
-
-#ifdef SHA1_MAX_BLOCK_SIZE
-#include "compat/sha1-chunked.h"
-#undef git_SHA1_Update
-#define git_SHA1_Update git_SHA1_Update_Chunked
-#endif
-
#include <zlib.h>
typedef struct git_zstream {
z_stream z;
struct split_index *split_index;
struct cache_time timestamp;
unsigned name_hash_initialized : 1,
- initialized : 1;
+ initialized : 1,
+ drop_cache_tree : 1;
struct hashmap name_hash;
struct hashmap dir_hash;
unsigned char sha1[20];
#define active_cache_tree (the_index.cache_tree)
#define read_cache() read_index(&the_index)
-#define read_cache_from(path) read_index_from(&the_index, (path))
+#define read_cache_from(path) read_index_from(&the_index, (path), (get_git_dir()))
#define read_cache_preload(pathspec) read_index_preload(&the_index, (pathspec))
#define is_cache_unborn() is_index_unborn(&the_index)
#define read_cache_unmerged() read_index_unmerged(&the_index)
extern int read_index_preload(struct index_state *, const struct pathspec *pathspec);
extern int do_read_index(struct index_state *istate, const char *path,
int must_exist); /* for testting only! */
-extern int read_index_from(struct index_state *, const char *path);
+extern int read_index_from(struct index_state *, const char *path,
+ const char *gitdir);
extern int is_index_unborn(struct index_state *);
extern int read_index_unmerged(struct index_state *);
/* For use with `write_locked_index()`. */
#define COMMIT_LOCK (1 << 0)
+#define SKIP_IF_UNCHANGED (1 << 1)
/*
* Write the index while holding an already-taken lock. Close the lock,
* With `COMMIT_LOCK`, the lock is always committed or rolled back.
* Without it, the lock is closed, but neither committed nor rolled
* back.
+ *
+ * If `SKIP_IF_UNCHANGED` is given and the index is unchanged, nothing
+ * is written (and the lock is rolled back if `COMMIT_LOCK` is given).
*/
extern int write_locked_index(struct index_state *, struct lock_file *lock, unsigned flags);
#define GIT_REPO_VERSION 0
#define GIT_REPO_VERSION_READ 1
extern int repository_format_precious_objects;
+extern char *repository_format_partial_clone;
+extern const char *core_partial_clone_filter_default;
struct repository_format {
int version;
int precious_objects;
+ char *partial_clone; /* value of extensions.partialclone */
int is_bare;
int hash_algo;
char *work_tree;
#define TYPE_CHANGED 0x0040
/*
- * Return the name of the file in the local object database that would
- * be used to store a loose object with the specified sha1. The
- * return value is a pointer to a statically allocated buffer that is
- * overwritten each time the function is called.
+ * Put in `buf` the name of the file in the local object database that
+ * would be used to store a loose object with the specified sha1.
*/
-extern const char *sha1_file_name(const unsigned char *sha1);
+extern void sha1_file_name(struct strbuf *buf, const unsigned char *sha1);
/*
* Return an abbreviated sha1 unique within this repository's object database.
static inline void oidclr(struct object_id *oid)
{
- hashclr(oid->hash);
+ memset(oid->hash, 0, GIT_MAX_RAWSZ);
}
"\xe6\x9d\xe2\x9b\xb2\xd1\xd6\x43\x4b\x8b" \
"\x29\xae\x77\x5a\xd8\xc2\xe4\x8c\x53\x91"
extern const struct object_id empty_blob_oid;
-#define EMPTY_BLOB_SHA1_BIN (empty_blob_oid.hash)
-
static inline int is_empty_blob_sha1(const unsigned char *sha1)
{
/* Read and unpack a sha1 file into memory, write memory to a sha1 file */
extern int sha1_object_info(const unsigned char *, unsigned long *);
-extern int hash_sha1_file(const void *buf, unsigned long len, const char *type, unsigned char *sha1);
-extern int write_sha1_file(const void *buf, unsigned long len, const char *type, unsigned char *return_sha1);
-extern int hash_sha1_file_literally(const void *buf, unsigned long len, const char *type, struct object_id *oid, unsigned flags);
-extern int pretend_sha1_file(void *, unsigned long, enum object_type, unsigned char *);
-extern int force_object_loose(const unsigned char *sha1, time_t mtime);
+
+extern int hash_object_file(const void *buf, unsigned long len,
+ const char *type, struct object_id *oid);
+
+extern int write_object_file(const void *buf, unsigned long len,
+ const char *type, struct object_id *oid);
+
+extern int hash_object_file_literally(const void *buf, unsigned long len,
+ const char *type, struct object_id *oid,
+ unsigned flags);
+
+extern int pretend_object_file(void *, unsigned long, enum object_type,
+ struct object_id *oid);
+
+extern int force_object_loose(const struct object_id *oid, time_t mtime);
+
extern int git_open_cloexec(const char *name, int flags);
#define git_open(name) git_open_cloexec(name, O_RDONLY)
extern void *map_sha1_file(const unsigned char *sha1, unsigned long *size);
extern struct packed_git {
struct packed_git *next;
+ struct list_head mru;
struct pack_window *windows;
off_t pack_size;
const void *index_data;
unsigned pack_local:1,
pack_keep:1,
freshened:1,
- do_not_close:1;
+ do_not_close:1,
+ pack_promisor:1;
unsigned char sha1[20];
struct revindex_entry *revindex;
/* something like ".git/objects/pack/xxxxx.pack" */
} *packed_git;
/*
- * A most-recently-used ordered version of the packed_git list, which can
- * be iterated instead of packed_git (and marked via mru_mark).
+ * A most-recently-used ordered version of the packed_git list.
*/
-extern struct mru packed_git_mru;
+extern struct list_head packed_git_mru;
struct pack_entry {
off_t offset;
* usual "XXXXXX" trailer, and the resulting filename is written into the
* "template" buffer. Returns the open descriptor.
*/
-extern int odb_mkstemp(struct strbuf *template, const char *pattern);
+extern int odb_mkstemp(struct strbuf *temp_filename, const char *pattern);
/*
* Create a pack .keep file named "name" (which should generally be the output
unsigned long *sizep;
off_t *disk_sizep;
unsigned char *delta_base_sha1;
- struct strbuf *typename;
+ struct strbuf *type_name;
void **contentp;
/* Response */
#define OBJECT_INFO_QUICK 8
extern int sha1_object_info_extended(const unsigned char *, struct object_info *, unsigned flags);
+/*
+ * Set this to 0 to prevent sha1_object_info_extended() from fetching missing
+ * blobs. This has a difference only if extensions.partialClone is set.
+ *
+ * Its default value is 1.
+ */
+extern int fetch_if_missing;
+
/* Dumb servers support */
extern int update_server_info(int);
fi
}
-good_trees_file="$HOME/travis-cache/good-trees"
-
# Save some info about the current commit's tree, so we can skip the build
# job if we encounter the same tree again and can provide a useful info
# message.
# and installing dependencies.
set -ex
-mkdir -p "$HOME/travis-cache"
+cache_dir="$HOME/travis-cache"
+good_trees_file="$cache_dir/good-trees"
+
+mkdir -p "$cache_dir"
skip_branch_tip_with_tag
skip_good_tree
export DEVELOPER=1
export DEFAULT_TEST_TARGET=prove
export GIT_PROVE_OPTS="--timer --jobs 3 --state=failed,slow,save"
-export GIT_TEST_OPTS="--verbose-log"
+export GIT_TEST_OPTS="--verbose-log -x"
export GIT_TEST_CLONE_2GB=YesPlease
case "$jobname" in
--- /dev/null
+#!/bin/sh
+#
+# Build and test Git
+#
+
+. ${0%/*}/lib-travisci.sh
+
+ln -s "$cache_dir/.prove" t/.prove
+
+make --jobs=2
+make --quiet test
+if test "$jobname" = "linux-gcc"
+then
+ GIT_TEST_SPLIT_INDEX=YesPlease make --quiet test
+fi
+
+check_unignored_build_artifacts
+
+save_good_tree
+++ /dev/null
-#!/bin/sh
-#
-# Build Git
-#
-
-. ${0%/*}/lib-travisci.sh
-
-make --jobs=2
# Build and test Git in a 32-bit environment
#
# Usage:
-# run-linux32-build.sh [host-user-id]
+# run-linux32-build.sh <host-user-id>
#
-set -x
+set -ex
+
+if test $# -ne 1 || test -z "$1"
+then
+ echo >&2 "usage: run-linux32-build.sh <host-user-id>"
+ exit 1
+fi
# Update packages to the latest available versions
linux32 --32bit i386 sh -c '
apt update >/dev/null &&
apt install -y build-essential libcurl4-openssl-dev libssl-dev \
libexpat-dev gettext python >/dev/null
-' &&
+'
# If this script runs inside a docker container, then all commands are
# usually executed as root. Consequently, the host user might not be
# able to access the test output files.
-# If a host user id is given, then create a user "ci" with the host user
-# id to make everything accessible to the host user.
-HOST_UID=$1 &&
-CI_USER=$USER &&
-test -z $HOST_UID || (CI_USER="ci" && useradd -u $HOST_UID $CI_USER) &&
+# If a non 0 host user id is given, then create a user "ci" with that
+# user id to make everything accessible to the host user.
+HOST_UID=$1
+if test $HOST_UID -eq 0
+then
+ # Just in case someone does want to run the test suite as root.
+ CI_USER=root
+else
+ CI_USER=ci
+ if test "$(id -u $CI_USER 2>/dev/null)" = $HOST_UID
+ then
+ echo "user '$CI_USER' already exists with the requested ID $HOST_UID"
+ else
+ useradd -u $HOST_UID $CI_USER
+ fi
+
+ # Due to a bug the test suite was run as root in the past, so
+ # a prove state file created back then is only accessible by
+ # root. Now that bug is fixed, the test suite is run as a
+ # regular user, but the prove state file coming from Travis
+ # CI's cache might still be owned by root.
+ # Make sure that this user has rights to any cached files,
+ # including an existing prove state file.
+ test -n "$cache_dir" && chown -R $HOST_UID:$HOST_UID "$cache_dir"
+fi
# Build and test
linux32 --32bit i386 su -m -l $CI_USER -c '
- cd /usr/src/git &&
- ln -s /tmp/travis-cache/.prove t/.prove &&
- make --jobs=2 &&
- make --quiet test
+ set -ex
+ cd /usr/src/git
+ test -n "$cache_dir" && ln -s "$cache_dir/.prove" t/.prove
+ make --jobs=2
+ make --quiet test
'
# Use the following command to debug the docker build locally:
# $ docker run -itv "${PWD}:/usr/src/git" --entrypoint /bin/bash daald/ubuntu32:xenial
-# root@container:/# /usr/src/git/ci/run-linux32-build.sh
+# root@container:/# /usr/src/git/ci/run-linux32-build.sh <host-user-id>
+
+container_cache_dir=/tmp/travis-cache
docker run \
--interactive \
--env GIT_PROVE_OPTS \
--env GIT_TEST_OPTS \
--env GIT_TEST_CLONE_2GB \
+ --env cache_dir="$container_cache_dir" \
--volume "${PWD}:/usr/src/git" \
- --volume "${HOME}/travis-cache:/tmp/travis-cache" \
+ --volume "$cache_dir:$container_cache_dir" \
daald/ubuntu32:xenial \
/usr/src/git/ci/run-linux32-build.sh $(id -u $USER)
+++ /dev/null
-#!/bin/sh
-#
-# Test Git
-#
-
-. ${0%/*}/lib-travisci.sh
-
-ln -s $HOME/travis-cache/.prove t/.prove
-make --quiet test
-
-check_unignored_build_artifacts
-
-save_good_tree
return color_parse_mem(value, strlen(value), dst);
}
-void color_set(char *dst, const char *color_bytes)
-{
- xsnprintf(dst, COLOR_MAXLEN, "%s", color_bytes);
-}
-
/*
* Write the ANSI color codes for "c" to "out"; the string should
* already have the ANSI escape code in it. "out" should have enough
return r;
}
-
-
int color_fprintf(FILE *fp, const char *color, const char *fmt, ...)
{
va_list args;
int git_color_default_config(const char *var, const char *value, void *cb);
/*
- * Set the color buffer (which must be COLOR_MAXLEN bytes)
- * to the raw color bytes; this is useful for initializing
- * default color variables.
+ * Parse a config option, which can be a boolean or one of
+ * "never", "auto", "always". Return a constant of
+ * GIT_COLOR_NEVER for "never" or negative boolean,
+ * GIT_COLOR_ALWAYS for "always" or a positive boolean,
+ * and GIT_COLOR_AUTO for "auto".
*/
-void color_set(char *dst, const char *color_bytes);
-
int git_config_colorbool(const char *var, const char *value);
+
+/*
+ * Return a boolean whether to use color, where the argument 'var' is
+ * one of GIT_COLOR_UNKNOWN, GIT_COLOR_NEVER, GIT_COLOR_ALWAYS, GIT_COLOR_AUTO.
+ */
int want_color(int var);
+
+/*
+ * Translate a Git color from 'value' into a string that the terminal can
+ * interpret and store it into 'dst'. The Git color values are of the form
+ * "foreground [background] [attr]" where fore- and background can be a color
+ * name ("red"), a RGB code (#0xFF0000) or a 256-color-mode from the terminal.
+ */
int color_parse(const char *value, char *dst);
int color_parse_mem(const char *value, int len, char *dst);
+
+/*
+ * Output the formatted string in the specified color (and then reset to normal
+ * color so subsequent output is uncolored). Omits the color encapsulation if
+ * `color` is NULL. The `color_fprintf_ln` prints a new line after resetting
+ * the color. The `color_print_strbuf` prints the contents of the given
+ * strbuf (BUG: but only up to its first NUL character).
+ */
__attribute__((format (printf, 3, 4)))
int color_fprintf(FILE *fp, const char *color, const char *fmt, ...);
__attribute__((format (printf, 3, 4)))
int color_fprintf_ln(FILE *fp, const char *color, const char *fmt, ...);
void color_print_strbuf(FILE *fp, const char *color, const struct strbuf *sb);
+/*
+ * Check if the given color is GIT_COLOR_NIL that means "no color selected".
+ * The caller needs to replace the color with the actual desired color.
+ */
int color_is_nil(const char *color);
#endif /* COLOR_H */
/* Coalesce new lines into base by finding LCS */
static struct lline *coalesce_lines(struct lline *base, int *lenbase,
- struct lline *new, int lennew,
+ struct lline *newline, int lennew,
unsigned long parent, long flags)
{
int **lcs;
struct lline *baseend, *newend = NULL;
int i, j, origbaselen = *lenbase;
- if (new == NULL)
+ if (newline == NULL)
return base;
if (base == NULL) {
*lenbase = lennew;
- return new;
+ return newline;
}
/*
directions[0][j] = NEW;
for (i = 1, baseend = base; i < origbaselen + 1; i++) {
- for (j = 1, newend = new; j < lennew + 1; j++) {
+ for (j = 1, newend = newline; j < lennew + 1; j++) {
if (match_string_spaces(baseend->line, baseend->len,
newend->line, newend->len, flags)) {
lcs[i][j] = lcs[i - 1][j - 1] + 1;
if (lline->prev)
lline->prev->next = lline->next;
else
- new = lline->next;
+ newline = lline->next;
if (lline->next)
lline->next->prev = lline->prev;
}
}
- newend = new;
+ newend = newline;
while (newend) {
struct lline *lline = newend;
newend = newend->next;
if (is_file) {
struct strbuf buf = STRBUF_INIT;
- if (convert_to_git(&the_index, elem->path, result, len, &buf, safe_crlf)) {
+ if (convert_to_git(&the_index, elem->path, result, len, &buf, global_conv_flags_eol)) {
free(result);
result = strbuf_detach(&buf, &len);
result_size = len;
opt->flags.follow_renames ||
opt->break_opt != -1 ||
opt->detect_rename ||
- opt->pickaxe ||
+ (opt->pickaxe_opts & DIFF_PICKAXE_KINDS_MASK) ||
opt->filter;
ALLOC_GROW(commit_graft, commit_graft_nr + 1, commit_graft_alloc);
commit_graft_nr++;
if (pos < commit_graft_nr)
- memmove(commit_graft + pos + 1,
- commit_graft + pos,
- (commit_graft_nr - pos - 1) *
- sizeof(*commit_graft));
+ MOVE_ARRAY(commit_graft + pos + 1, commit_graft + pos,
+ commit_graft_nr - pos - 1);
commit_graft[pos] = graft;
return 0;
}
oid_to_hex(&commit->object.oid));
if (type != OBJ_COMMIT)
die("expected commit for %s, got %s",
- oid_to_hex(&commit->object.oid), typename(type));
+ oid_to_hex(&commit->object.oid), type_name(type));
if (sizep)
*sizep = size;
}
commit_list_insert(in->item, &ret);
for (i = in->next; i; i = i->next) {
- struct commit_list *new = NULL, *end = NULL;
+ struct commit_list *new_commits = NULL, *end = NULL;
for (j = ret; j; j = j->next) {
struct commit_list *bases;
bases = get_merge_bases(i->item, j->item);
- if (!new)
- new = bases;
+ if (!new_commits)
+ new_commits = bases;
else
end->next = bases;
for (k = bases; k; k = k->next)
end = k;
}
- ret = new;
+ ret = new_commits;
}
return ret;
}
}
}
-int commit_tree(const char *msg, size_t msg_len,
- const unsigned char *tree,
- struct commit_list *parents, unsigned char *ret,
+int commit_tree(const char *msg, size_t msg_len, const struct object_id *tree,
+ struct commit_list *parents, struct object_id *ret,
const char *author, const char *sign_commit)
{
struct commit_extra_header *extra = NULL, **tail = &extra;
"variable i18n.commitencoding to the encoding your project uses.\n");
int commit_tree_extended(const char *msg, size_t msg_len,
- const unsigned char *tree,
- struct commit_list *parents, unsigned char *ret,
+ const struct object_id *tree,
+ struct commit_list *parents, struct object_id *ret,
const char *author, const char *sign_commit,
struct commit_extra_header *extra)
{
int encoding_is_utf8;
struct strbuf buffer;
- assert_sha1_type(tree, OBJ_TREE);
+ assert_sha1_type(tree->hash, OBJ_TREE);
if (memchr(msg, '\0', msg_len))
return error("a NUL byte in commit log message not allowed.");
encoding_is_utf8 = is_encoding_utf8(git_commit_encoding);
strbuf_init(&buffer, 8192); /* should avoid reallocs for the headers */
- strbuf_addf(&buffer, "tree %s\n", sha1_to_hex(tree));
+ strbuf_addf(&buffer, "tree %s\n", oid_to_hex(tree));
/*
* NOTE! This ordering means that the same exact tree merged with a
goto out;
}
- result = write_sha1_file(buffer.buf, buffer.len, commit_type, ret);
+ result = write_object_file(buffer.buf, buffer.len, commit_type, ret);
out:
strbuf_release(&buffer);
return result;
struct commit_list **commit_list_append(struct commit *commit,
struct commit_list **next)
{
- struct commit_list *new = xmalloc(sizeof(struct commit_list));
- new->item = commit;
- *next = new;
- new->next = NULL;
- return &new->next;
+ struct commit_list *new_commit = xmalloc(sizeof(struct commit_list));
+ new_commit->item = commit;
+ *next = new_commit;
+ new_commit->next = NULL;
+ return &new_commit->next;
}
const char *find_commit_header(const char *msg, const char *key, size_t *out_len)
struct commit_extra_header ***tail);
extern int commit_tree(const char *msg, size_t msg_len,
- const unsigned char *tree,
- struct commit_list *parents, unsigned char *ret,
+ const struct object_id *tree,
+ struct commit_list *parents, struct object_id *ret,
const char *author, const char *sign_commit);
extern int commit_tree_extended(const char *msg, size_t msg_len,
- const unsigned char *tree,
- struct commit_list *parents, unsigned char *ret,
- const char *author, const char *sign_commit,
+ const struct object_id *tree,
+ struct commit_list *parents,
+ struct object_id *ret, const char *author,
+ const char *sign_commit,
struct commit_extra_header *);
extern struct commit_extra_header *read_commit_extra_headers(struct commit *, const char **);
}
if (!strcmp(var, "core.safecrlf")) {
+ int eol_rndtrp_die;
if (value && !strcasecmp(value, "warn")) {
- safe_crlf = SAFE_CRLF_WARN;
+ global_conv_flags_eol = CONV_EOL_RNDTRP_WARN;
return 0;
}
- safe_crlf = git_config_bool(var, value);
+ eol_rndtrp_die = git_config_bool(var, value);
+ global_conv_flags_eol = eol_rndtrp_die ?
+ CONV_EOL_RNDTRP_DIE : CONV_EOL_RNDTRP_WARN;
return 0;
}
return 0;
}
+ if (!strcmp(var, "core.partialclonefilter")) {
+ return git_config_string(&core_partial_clone_filter_default,
+ var, value);
+ }
+
/* Add other config variables here and to Documentation/config.txt. */
return 0;
}
NO_ST_BLOCKS_IN_STRUCT_STAT = YesPlease
X = .exe
UNRELIABLE_FSTAT = UnfortunatelyYes
- SPARSE_FLAGS = -isystem /usr/include/w32api -Wno-one-bit-signed-bitfield
OBJECT_CREATION_USES_RENAMES = UnfortunatelyNeedsTo
MMAP_PREVENTS_DELETE = UnfortunatelyYes
COMPAT_OBJS += compat/cygwin.o
argv_array_push(&rev_list.args,"rev-list");
argv_array_push(&rev_list.args, "--objects");
argv_array_push(&rev_list.args, "--stdin");
+ if (repository_format_partial_clone)
+ argv_array_push(&rev_list.args, "--exclude-promisor-objects");
argv_array_push(&rev_list.args, "--not");
argv_array_push(&rev_list.args, "--all");
argv_array_push(&rev_list.args, "--quiet");
@ strbuf_addf_with_format_only @
expression E;
-constant fmt;
-@@
- strbuf_addf(E,
-(
- fmt
-|
- _(fmt)
-)
- );
-
-@ script:python @
-fmt << strbuf_addf_with_format_only.fmt;
-@@
-cocci.include_match("%" not in fmt)
-
-@ extends strbuf_addf_with_format_only @
+constant fmt !~ "%";
@@
- strbuf_addf
+ strbuf_addstr
@@
expression E1, E2;
+format F =~ "s";
@@
-- strbuf_addf(E1, "%s", E2);
+- strbuf_addf(E1, "%@F@", E2);
+ strbuf_addstr(E1, E2);
@@
esac
}
+# This function is equivalent to
+#
+# __gitcomp "$(git xxx --git-completion-helper) ..."
+#
+# except that the output is cached. Accept 1-3 arguments:
+# 1: the git command to execute, this is also the cache key
+# 2: extra options to be added on top (e.g. negative forms)
+# 3: options to be excluded
+__gitcomp_builtin ()
+{
+ # spaces must be replaced with underscore for multi-word
+ # commands, e.g. "git remote add" becomes remote_add.
+ local cmd="$1"
+ local incl="$2"
+ local excl="$3"
+
+ local var=__gitcomp_builtin_"${cmd/-/_}"
+ local options
+ eval "options=\$$var"
+
+ if [ -z "$options" ]; then
+ # leading and trailing spaces are significant to make
+ # option removal work correctly.
+ options=" $(__git ${cmd/_/ } --git-completion-helper) $incl "
+ for i in $excl; do
+ options="${options/ $i / }"
+ done
+ eval "$var=\"$options\""
+ fi
+
+ __gitcomp "$options"
+}
+
# Variation of __gitcomp_nl () that appends to the existing list of
# completion candidates, COMPREPLY.
__gitcomp_nl_append ()
track=""
;;
*)
- for i in HEAD FETCH_HEAD ORIG_HEAD MERGE_HEAD; do
+ for i in HEAD FETCH_HEAD ORIG_HEAD MERGE_HEAD REBASE_HEAD; do
case "$i" in
$match*)
if [ -e "$dir/$i" ]; then
__git_list_merge_strategies ()
{
- git merge -s help 2>&1 |
+ LANG=C LC_ALL=C git merge -s help 2>&1 |
sed -n -e '/[Aa]vailable strategies are: /,/^$/{
s/\.$//
s/.*://
}
__git_whitespacelist="nowarn warn error error-all fix"
+__git_am_inprogress_options="--skip --continue --resolved --abort --quit --show-current-patch"
_git_am ()
{
__git_find_repo_path
if [ -d "$__git_repo_path"/rebase-apply ]; then
- __gitcomp "--skip --continue --resolved --abort"
+ __gitcomp "$__git_am_inprogress_options"
return
fi
case "$cur" in
return
;;
--*)
- __gitcomp "
- --3way --committer-date-is-author-date --ignore-date
- --ignore-whitespace --ignore-space-change
- --interactive --keep --no-utf8 --signoff --utf8
- --whitespace= --scissors
- "
+ __gitcomp_builtin am "--no-utf8" \
+ "$__git_am_inprogress_options"
return
esac
}
return
;;
--*)
- __gitcomp "
- --stat --numstat --summary --check --index
- --cached --index-info --reverse --reject --unidiff-zero
- --apply --no-add --exclude=
- --ignore-whitespace --ignore-space-change
- --whitespace= --inaccurate-eof --verbose
- --recount --directory=
- "
+ __gitcomp_builtin apply
return
esac
}
{
case "$cur" in
--*)
- __gitcomp "
- --interactive --refresh --patch --update --dry-run
- --ignore-errors --intent-to-add --force --edit --chmod=
- "
+ __gitcomp_builtin add
return
esac
__git_complete_refs --cur="${cur##--set-upstream-to=}"
;;
--*)
- __gitcomp "
- --color --no-color --verbose --abbrev= --no-abbrev
- --track --no-track --contains --no-contains --merged --no-merged
- --set-upstream-to= --edit-description --list
- --unset-upstream --delete --move --copy --remotes
- --column --no-column --sort= --points-at
+ __gitcomp_builtin branch "--no-color --no-abbrev
+ --no-track --no-column
"
;;
*)
__gitcomp "diff3 merge" "" "${cur##--conflict=}"
;;
--*)
- __gitcomp "
- --quiet --ours --theirs --track --no-track --merge
- --conflict= --orphan --patch --detach --ignore-skip-worktree-bits
- --recurse-submodules --no-recurse-submodules
- "
+ __gitcomp_builtin checkout "--no-track --no-recurse-submodules"
;;
*)
# check if --track, --no-track, or --no-guess was specified
__git_complete_refs
}
+__git_cherry_pick_inprogress_options="--continue --quit --abort"
+
_git_cherry_pick ()
{
__git_find_repo_path
if [ -f "$__git_repo_path"/CHERRY_PICK_HEAD ]; then
- __gitcomp "--continue --quit --abort"
+ __gitcomp "$__git_cherry_pick_inprogress_options"
return
fi
case "$cur" in
--*)
- __gitcomp "--edit --no-commit --signoff --strategy= --mainline"
+ __gitcomp_builtin cherry-pick "" \
+ "$__git_cherry_pick_inprogress_options"
;;
*)
__git_complete_refs
{
case "$cur" in
--*)
- __gitcomp "--dry-run --quiet"
+ __gitcomp_builtin clean
return
;;
esac
{
case "$cur" in
--*)
- __gitcomp "
- --local
- --no-hardlinks
- --shared
- --reference
- --quiet
- --no-checkout
- --bare
- --mirror
- --origin
- --upload-pack
- --template=
- --depth
- --single-branch
- --no-tags
- --branch
- --recurse-submodules
- --no-single-branch
- --shallow-submodules
- "
+ __gitcomp_builtin clone "--no-single-branch"
return
;;
esac
return
;;
--*)
- __gitcomp "
- --all --author= --signoff --verify --no-verify
- --edit --no-edit
- --amend --include --only --interactive
- --dry-run --reuse-message= --reedit-message=
- --reset-author --file= --message= --template=
- --cleanup= --untracked-files --untracked-files=
- --verbose --quiet --fixup= --squash=
- --patch --short --date --allow-empty
- "
+ __gitcomp_builtin commit "--no-edit --verify"
return
esac
{
case "$cur" in
--*)
- __gitcomp "
- --all --tags --contains --abbrev= --candidates=
- --exact-match --debug --long --match --always --first-parent
- --exclude --dirty --broken
- "
+ __gitcomp_builtin describe
return
esac
__git_complete_refs
--dirstat --dirstat= --dirstat-by-file
--dirstat-by-file= --cumulative
--diff-algorithm=
- --submodule --submodule=
+ --submodule --submodule= --ignore-submodules
"
_git_diff ()
return
;;
--*)
- __gitcomp "--cached --staged --pickaxe-all --pickaxe-regex
- --base --ours --theirs
- --no-renames --diff-filter= --find-copies-harder
- --relative --ignore-submodules
- --tool="
+ __gitcomp_builtin difftool "$__git_diff_common_options
+ --base --cached --ours --theirs
+ --pickaxe-all --pickaxe-regex
+ --relative --staged
+ "
return
;;
esac
__git_fetch_recurse_submodules="yes on-demand no"
-__git_fetch_options="
- --quiet --verbose --append --upload-pack --force --keep --depth=
- --tags --no-tags --all --prune --dry-run --recurse-submodules=
- --unshallow --update-shallow
-"
-
_git_fetch ()
{
case "$cur" in
return
;;
--*)
- __gitcomp "$__git_fetch_options"
+ __gitcomp_builtin fetch "--no-tags"
return
;;
esac
{
case "$cur" in
--*)
- __gitcomp "
- --tags --root --unreachable --cache --no-reflogs --full
- --strict --verbose --lost-found --name-objects
- "
+ __gitcomp_builtin fsck "--no-reflogs"
return
;;
esac
{
case "$cur" in
--*)
- __gitcomp "--prune --aggressive"
+ __gitcomp_builtin gc
return
;;
esac
case "$cur" in
--*)
- __gitcomp "
- --cached
- --text --ignore-case --word-regexp --invert-match
- --full-name --line-number
- --extended-regexp --basic-regexp --fixed-strings
- --perl-regexp
- --threads
- --files-with-matches --name-only
- --files-without-match
- --max-depth
- --count
- --and --or --not --all-match
- --break --heading --show-function --function-context
- --untracked --no-index
- "
+ __gitcomp_builtin grep
return
;;
esac
{
case "$cur" in
--*)
- __gitcomp "--all --guides --info --man --web"
+ __gitcomp_builtin help
return
;;
esac
return
;;
--*)
- __gitcomp "--quiet --bare --template= --shared --shared="
+ __gitcomp_builtin init
return
;;
esac
{
case "$cur" in
--*)
- __gitcomp "--cached --deleted --modified --others --ignored
- --stage --directory --no-empty-directory --unmerged
- --killed --exclude= --exclude-from=
- --exclude-per-directory= --exclude-standard
- --error-unmatch --with-tree= --full-name
- --abbrev --ignored --exclude-per-directory
- "
+ __gitcomp_builtin ls-files "--no-empty-directory"
return
;;
esac
{
case "$cur" in
--*)
- __gitcomp "--heads --tags --refs --get-url --symref"
+ __gitcomp_builtin ls-remote
return
;;
esac
__git_complete_revlist
}
-# Common merge options shared by git-merge(1) and git-pull(1).
-__git_merge_options="
- --no-commit --no-stat --log --no-log --squash --strategy
- --commit --stat --no-squash --ff --no-ff --ff-only --edit --no-edit
- --verify-signatures --no-verify-signatures --gpg-sign
- --quiet --verbose --progress --no-progress
-"
-
_git_merge ()
{
__git_complete_strategy && return
case "$cur" in
--*)
- __gitcomp "$__git_merge_options
- --rerere-autoupdate --no-rerere-autoupdate --abort --continue"
+ __gitcomp_builtin merge "--no-rerere-autoupdate
+ --no-commit --no-edit --no-ff
+ --no-log --no-progress
+ --no-squash --no-stat
+ --no-verify-signatures
+ "
return
esac
__git_complete_refs
{
case "$cur" in
--*)
- __gitcomp "--octopus --independent --is-ancestor --fork-point"
+ __gitcomp_builtin merge-base
return
;;
esac
{
case "$cur" in
--*)
- __gitcomp "--dry-run"
+ __gitcomp_builtin mv
return
;;
esac
_git_name_rev ()
{
- __gitcomp "--tags --all --stdin"
+ __gitcomp_builtin name-rev
}
_git_notes ()
{
- local subcommands='add append copy edit list prune remove show'
+ local subcommands='add append copy edit get-ref list merge prune remove show'
local subcommand="$(__git_find_on_cmdline "$subcommands")"
case "$subcommand,$cur" in
,--*)
- __gitcomp '--ref'
+ __gitcomp_builtin notes
;;
,*)
case "$prev" in
;;
esac
;;
- add,--reuse-message=*|append,--reuse-message=*|\
- add,--reedit-message=*|append,--reedit-message=*)
+ *,--reuse-message=*|*,--reedit-message=*)
__git_complete_refs --cur="${cur#*=}"
;;
- add,--*|append,--*)
- __gitcomp '--file= --message= --reedit-message=
- --reuse-message='
+ *,--*)
+ __gitcomp_builtin notes_$subcommand
;;
- copy,--*)
- __gitcomp '--stdin'
- ;;
- prune,--*)
- __gitcomp '--dry-run --verbose'
- ;;
- prune,*)
+ prune,*|get-ref,*)
+ # this command does not take a ref, do not complete it
;;
*)
case "$prev" in
return
;;
--*)
- __gitcomp "
- --rebase --no-rebase
- --autostash --no-autostash
- $__git_merge_options
- $__git_fetch_options
- "
+ __gitcomp_builtin pull "--no-autostash --no-commit --no-edit
+ --no-ff --no-log --no-progress --no-rebase
+ --no-squash --no-stat --no-tags
+ --no-verify-signatures"
+
return
;;
esac
return
;;
--*)
- __gitcomp "
- --all --mirror --tags --dry-run --force --verbose
- --quiet --prune --delete --follow-tags
- --receive-pack= --repo= --set-upstream
- --force-with-lease --force-with-lease= --recurse-submodules=
- "
+ __gitcomp_builtin push
return
;;
esac
{
__git_find_repo_path
if [ -f "$__git_repo_path"/rebase-merge/interactive ]; then
- __gitcomp "--continue --skip --abort --quit --edit-todo"
+ __gitcomp "--continue --skip --abort --quit --edit-todo --show-current-patch"
return
elif [ -d "$__git_repo_path"/rebase-apply ] || \
[ -d "$__git_repo_path"/rebase-merge ]; then
- __gitcomp "--continue --skip --abort --quit"
+ __gitcomp "--continue --skip --abort --quit --show-current-patch"
return
fi
__git_complete_strategy && return
--autostash --no-autostash
--verify --no-verify
--keep-empty --root --force-rebase --no-ff
+ --rerere-autoupdate
--exec
"
--compose --confirm= --dry-run --envelope-sender
--from --identity
--in-reply-to --no-chain-reply-to --no-signed-off-by-cc
- --no-suppress-from --no-thread --quiet
+ --no-suppress-from --no-thread --quiet --reply-to
--signed-off-by-cc --smtp-pass --smtp-server
--smtp-server-port --smtp-encryption= --smtp-user
--subject --suppress-cc= --suppress-from --thread --to
return
;;
--*)
- __gitcomp "
- --short --branch --porcelain --long --verbose
- --untracked-files= --ignore-submodules= --ignored
- --column= --no-column
- "
+ __gitcomp_builtin status "--no-column"
return
;;
esac
esac
case "$cur" in
--*)
- __gitcomp "
- --system --global --local --file=
- --list --replace-all
- --get --get-all --get-regexp
- --add --unset --unset-all
- --remove-section --rename-section
- --name-only
- "
+ __gitcomp_builtin config
return
;;
branch.*.*)
if [ -z "$subcommand" ]; then
case "$cur" in
--*)
- __gitcomp "--verbose"
+ __gitcomp_builtin remote
;;
*)
__gitcomp "$subcommands"
case "$subcommand,$cur" in
add,--*)
- __gitcomp "--track --master --fetch --tags --no-tags --mirror="
+ __gitcomp_builtin remote_add "--no-tags"
;;
add,*)
;;
set-head,--*)
- __gitcomp "--auto --delete"
+ __gitcomp_builtin remote_set-head
;;
set-branches,--*)
- __gitcomp "--add"
+ __gitcomp_builtin remote_set-branches
;;
set-head,*|set-branches,*)
__git_complete_remote_or_refspec
;;
update,--*)
- __gitcomp "--prune"
+ __gitcomp_builtin remote_update
;;
update,*)
__gitcomp "$(__git_get_config_variables "remotes")"
;;
set-url,--*)
- __gitcomp "--push --add --delete"
+ __gitcomp_builtin remote_set-url
;;
get-url,--*)
- __gitcomp "--push --all"
+ __gitcomp_builtin remote_get-url
;;
prune,--*)
- __gitcomp "--dry-run"
+ __gitcomp_builtin remote_prune
;;
*)
__gitcomp_nl "$(__git_remotes)"
{
case "$cur" in
--*)
- __gitcomp "--edit --graft --format= --list --delete"
+ __gitcomp_builtin replace
return
;;
esac
case "$cur" in
--*)
- __gitcomp "--merge --mixed --hard --soft --patch --keep"
+ __gitcomp_builtin reset
return
;;
esac
__git_complete_refs
}
+__git_revert_inprogress_options="--continue --quit --abort"
+
_git_revert ()
{
__git_find_repo_path
if [ -f "$__git_repo_path"/REVERT_HEAD ]; then
- __gitcomp "--continue --quit --abort"
+ __gitcomp "$__git_revert_inprogress_options"
return
fi
case "$cur" in
--*)
- __gitcomp "
- --edit --mainline --no-edit --no-commit --signoff
- --strategy= --strategy-option=
- "
+ __gitcomp_builtin revert "--no-edit" \
+ "$__git_revert_inprogress_options"
return
;;
esac
{
case "$cur" in
--*)
- __gitcomp "--cached --dry-run --ignore-unmatch --quiet"
+ __gitcomp_builtin rm
return
;;
esac
{
case "$cur" in
--*)
- __gitcomp "
- --all --remotes --topo-order --date-order --current --more=
- --list --independent --merge-base --no-name
- --color --no-color
- --sha1-name --sparse --topics --reflog
- "
+ __gitcomp_builtin show-branch "--no-color"
return
;;
esac
case "$cur" in
--*)
- __gitcomp "
- --list --delete --verify --annotate --message --file
- --sign --cleanup --local-user --force --column --sort=
- --contains --no-contains --points-at --merged --no-merged --create-reflog
- "
+ __gitcomp_builtin tag
;;
esac
}
_git_worktree ()
{
- local subcommands="add list lock prune unlock"
+ local subcommands="add list lock move prune remove unlock"
local subcommand="$(__git_find_on_cmdline "$subcommands")"
if [ -z "$subcommand" ]; then
__gitcomp "$subcommands"
else
case "$subcommand,$cur" in
add,--*)
- __gitcomp "--detach"
+ __gitcomp_builtin worktree_add
;;
list,--*)
- __gitcomp "--porcelain"
+ __gitcomp_builtin worktree_list
;;
lock,--*)
- __gitcomp "--reason"
+ __gitcomp_builtin worktree_lock
;;
prune,--*)
- __gitcomp "--dry-run --expire --verbose"
+ __gitcomp_builtin worktree_prune
+ ;;
+ remove,--*)
+ __gitcomp "--force"
;;
*)
;;
static void show_new(enum object_type type, unsigned char *sha1_new)
{
- fprintf(stderr, " %s: %s\n", typename(type),
+ fprintf(stderr, " %s: %s\n", type_name(type),
find_unique_abbrev(sha1_new, DEFAULT_ABBREV));
}
use 5.008;
use strict;
use warnings;
-use Error qw(:try);
+use Git::LoadCPAN::Error qw(:try);
use File::Basename qw(dirname);
use File::Copy;
use File::Find;
my($d) = @_;
$d =~ m#(\d\d\d\d)-(\d\d)-(\d\d)T(\d\d):(\d\d):(\d\d)#
or die "Unparseable date: $d\n";
- my $y=$1; $y-=1900 if $y>1900;
+ my $y=$1; $y+=1900 if $y<1000;
return timegm($6||0,$5,$4,$3,$2-1,$y);
}
# ln -sf /usr/share/git-core/contrib/hooks/pre-auto-gc-battery \
# hooks/pre-auto-gc
-if test -x /sbin/on_ac_power && /sbin/on_ac_power
+if test -x /sbin/on_ac_power && (/sbin/on_ac_power;test $? -ne 1)
then
exit 0
elif test "$(cat /sys/class/power_supply/AC/online 2>/dev/null)" = 1
main=
sub=
git log --grep="^git-subtree-dir: $dir/*\$" \
- --pretty=format:'START %H%n%s%n%n%b%nEND%n' HEAD |
+ --no-show-signature --pretty=format:'START %H%n%s%n%n%b%nEND%n' HEAD |
while read a b junk
do
debug "$a $b $junk"
main=
sub=
git log --grep="^git-subtree-dir: $dir/*\$" \
- --pretty=format:'START %H%n%s%n%n%b%nEND%n' $revs |
+ --no-show-signature --pretty=format:'START %H%n%s%n%n%b%nEND%n' $revs |
while read a b junk
do
case "$a" in
# We're going to set some environment vars here, so
# do it in a subshell to get rid of them safely later
debug copy_commit "{$1}" "{$2}" "{$3}"
- git log -1 --pretty=format:'%an%n%ae%n%aD%n%cn%n%ce%n%cD%n%B' "$1" |
+ git log -1 --no-show-signature --pretty=format:'%an%n%ae%n%aD%n%cn%n%ce%n%cD%n%B' "$1" |
(
read GIT_AUTHOR_NAME
read GIT_AUTHOR_EMAIL
oldsub_short=$(git rev-parse --short "$oldsub")
echo "Squashed '$dir/' changes from $oldsub_short..$newsub_short"
echo
- git log --pretty=tformat:'%h %s' "$oldsub..$newsub"
- git log --pretty=tformat:'REVERT: %h %s' "$newsub..$oldsub"
+ git log --no-show-signature --pretty=tformat:'%h %s' "$oldsub..$newsub"
+ git log --no-show-signature --pretty=tformat:'REVERT: %h %s' "$newsub..$oldsub"
else
echo "Squashed '$dir/' content from commit $newsub_short"
fi
toptree_for_commit () {
commit="$1"
- git log -1 --pretty=format:'%T' "$commit" -- || exit $?
+ git rev-parse --verify "$commit^{tree}" || exit $?
}
subtree_for_commit () {
Subtrees are not to be confused with submodules, which are meant for
the same task. Unlike submodules, subtrees do not need any special
-constructions (like .gitmodule files or gitlinks) be present in
+constructions (like .gitmodules files or gitlinks) be present in
your repository, and do not force end-users of your
repository to do anything special or to understand how subtrees
work. A subtree is just a subdirectory that can be
return core_eol;
}
-static void check_safe_crlf(const char *path, enum crlf_action crlf_action,
+static void check_global_conv_flags_eol(const char *path, enum crlf_action crlf_action,
struct text_stat *old_stats, struct text_stat *new_stats,
- enum safe_crlf checksafe)
+ int conv_flags)
{
if (old_stats->crlf && !new_stats->crlf ) {
/*
* CRLFs would not be restored by checkout
*/
- if (checksafe == SAFE_CRLF_WARN)
+ if (conv_flags & CONV_EOL_RNDTRP_DIE)
+ die(_("CRLF would be replaced by LF in %s."), path);
+ else if (conv_flags & CONV_EOL_RNDTRP_WARN)
warning(_("CRLF will be replaced by LF in %s.\n"
"The file will have its original line"
" endings in your working directory."), path);
- else /* i.e. SAFE_CRLF_FAIL */
- die(_("CRLF would be replaced by LF in %s."), path);
} else if (old_stats->lonelf && !new_stats->lonelf ) {
/*
* CRLFs would be added by checkout
*/
- if (checksafe == SAFE_CRLF_WARN)
+ if (conv_flags & CONV_EOL_RNDTRP_DIE)
+ die(_("LF would be replaced by CRLF in %s"), path);
+ else if (conv_flags & CONV_EOL_RNDTRP_WARN)
warning(_("LF will be replaced by CRLF in %s.\n"
"The file will have its original line"
" endings in your working directory."), path);
- else /* i.e. SAFE_CRLF_FAIL */
- die(_("LF would be replaced by CRLF in %s"), path);
}
}
static int crlf_to_git(const struct index_state *istate,
const char *path, const char *src, size_t len,
struct strbuf *buf,
- enum crlf_action crlf_action, enum safe_crlf checksafe)
+ enum crlf_action crlf_action, int conv_flags)
{
struct text_stat stats;
char *dst;
* unless we want to renormalize in a merge or
* cherry-pick.
*/
- if ((checksafe != SAFE_CRLF_RENORMALIZE) &&
+ if ((!(conv_flags & CONV_EOL_RENORMALIZE)) &&
has_crlf_in_index(istate, path))
convert_crlf_into_lf = 0;
}
- if ((checksafe == SAFE_CRLF_WARN ||
- (checksafe == SAFE_CRLF_FAIL)) && len) {
+ if (((conv_flags & CONV_EOL_RNDTRP_WARN) ||
+ ((conv_flags & CONV_EOL_RNDTRP_DIE) && len))) {
struct text_stat new_stats;
memcpy(&new_stats, &stats, sizeof(new_stats));
/* simulate "git add" */
new_stats.crlf += new_stats.lonelf;
new_stats.lonelf = 0;
}
- check_safe_crlf(path, crlf_action, &stats, &new_stats, checksafe);
+ check_global_conv_flags_eol(path, crlf_action, &stats, &new_stats, conv_flags);
}
if (!convert_crlf_into_lf)
return 0;
static int ident_to_worktree(const char *path, const char *src, size_t len,
struct strbuf *buf, int ident)
{
- unsigned char sha1[20];
+ struct object_id oid;
char *to_free = NULL, *dollar, *spc;
int cnt;
/* are we "faking" in place editing ? */
if (src == buf->buf)
to_free = strbuf_detach(buf, NULL);
- hash_sha1_file(src, len, "blob", sha1);
+ hash_object_file(src, len, "blob", &oid);
strbuf_grow(buf, len + cnt * 43);
for (;;) {
/* step 4: substitute */
strbuf_addstr(buf, "Id: ");
- strbuf_add(buf, sha1_to_hex(sha1), 40);
+ strbuf_addstr(buf, oid_to_hex(&oid));
strbuf_addstr(buf, " $");
}
strbuf_add(buf, src, len);
int convert_to_git(const struct index_state *istate,
const char *path, const char *src, size_t len,
- struct strbuf *dst, enum safe_crlf checksafe)
+ struct strbuf *dst, int conv_flags)
{
int ret = 0;
struct conv_attrs ca;
src = dst->buf;
len = dst->len;
}
- if (checksafe != SAFE_CRLF_KEEP_CRLF) {
- ret |= crlf_to_git(istate, path, src, len, dst, ca.crlf_action, checksafe);
+ if (!(conv_flags & CONV_EOL_KEEP_CRLF)) {
+ ret |= crlf_to_git(istate, path, src, len, dst, ca.crlf_action, conv_flags);
if (ret && dst) {
src = dst->buf;
len = dst->len;
void convert_to_git_filter_fd(const struct index_state *istate,
const char *path, int fd, struct strbuf *dst,
- enum safe_crlf checksafe)
+ int conv_flags)
{
struct conv_attrs ca;
convert_attrs(&ca, path);
if (!apply_filter(path, NULL, 0, fd, dst, ca.drv, CAP_CLEAN, NULL))
die("%s: clean filter '%s' failed", path, ca.drv->name);
- crlf_to_git(istate, path, dst->buf, dst->len, dst, ca.crlf_action, checksafe);
+ crlf_to_git(istate, path, dst->buf, dst->len, dst, ca.crlf_action, conv_flags);
ident_to_git(path, dst->buf, dst->len, dst, ca.ident);
}
src = dst->buf;
len = dst->len;
}
- return ret | convert_to_git(istate, path, src, len, dst, SAFE_CRLF_RENORMALIZE);
+ return ret | convert_to_git(istate, path, src, len, dst, CONV_EOL_RENORMALIZE);
}
/*****************************************************************
struct index_state;
-enum safe_crlf {
- SAFE_CRLF_FALSE = 0,
- SAFE_CRLF_FAIL = 1,
- SAFE_CRLF_WARN = 2,
- SAFE_CRLF_RENORMALIZE = 3,
- SAFE_CRLF_KEEP_CRLF = 4
-};
+#define CONV_EOL_RNDTRP_DIE (1<<0) /* Die if CRLF to LF to CRLF is different */
+#define CONV_EOL_RNDTRP_WARN (1<<1) /* Warn if CRLF to LF to CRLF is different */
+#define CONV_EOL_RENORMALIZE (1<<2) /* Convert CRLF to LF */
+#define CONV_EOL_KEEP_CRLF (1<<3) /* Keep CRLF line endings as is */
-extern enum safe_crlf safe_crlf;
+extern int global_conv_flags_eol;
enum auto_crlf {
AUTO_CRLF_FALSE = 0,
/* returns 1 if *dst was used */
extern int convert_to_git(const struct index_state *istate,
const char *path, const char *src, size_t len,
- struct strbuf *dst, enum safe_crlf checksafe);
+ struct strbuf *dst, int conv_flags);
extern int convert_to_working_tree(const char *path, const char *src,
size_t len, struct strbuf *dst);
extern int async_convert_to_working_tree(const char *path, const char *src,
extern void convert_to_git_filter_fd(const struct index_state *istate,
const char *path, int fd,
struct strbuf *dst,
- enum safe_crlf checksafe);
+ int conv_flags);
extern int would_convert_to_git_filter_fd(const char *path);
/*****************************************************************
#include "progress.h"
#include "csum-file.h"
-static void flush(struct sha1file *f, const void *buf, unsigned int count)
+static void flush(struct hashfile *f, const void *buf, unsigned int count)
{
if (0 <= f->check_fd && count) {
unsigned char check_buffer[8192];
}
}
-void sha1flush(struct sha1file *f)
+void hashflush(struct hashfile *f)
{
unsigned offset = f->offset;
if (offset) {
- git_SHA1_Update(&f->ctx, f->buffer, offset);
+ the_hash_algo->update_fn(&f->ctx, f->buffer, offset);
flush(f, f->buffer, offset);
f->offset = 0;
}
}
-int sha1close(struct sha1file *f, unsigned char *result, unsigned int flags)
+int hashclose(struct hashfile *f, unsigned char *result, unsigned int flags)
{
int fd;
- sha1flush(f);
- git_SHA1_Final(f->buffer, &f->ctx);
+ hashflush(f);
+ the_hash_algo->final_fn(f->buffer, &f->ctx);
if (result)
hashcpy(result, f->buffer);
if (flags & (CSUM_CLOSE | CSUM_FSYNC)) {
/* write checksum and close fd */
- flush(f, f->buffer, 20);
+ flush(f, f->buffer, the_hash_algo->rawsz);
if (flags & CSUM_FSYNC)
fsync_or_die(f->fd, f->name);
if (close(f->fd))
return fd;
}
-void sha1write(struct sha1file *f, const void *buf, unsigned int count)
+void hashwrite(struct hashfile *f, const void *buf, unsigned int count)
{
while (count) {
unsigned offset = f->offset;
buf = (char *) buf + nr;
left -= nr;
if (!left) {
- git_SHA1_Update(&f->ctx, data, offset);
+ the_hash_algo->update_fn(&f->ctx, data, offset);
flush(f, data, offset);
offset = 0;
}
}
}
-struct sha1file *sha1fd(int fd, const char *name)
+struct hashfile *hashfd(int fd, const char *name)
{
- return sha1fd_throughput(fd, name, NULL);
+ return hashfd_throughput(fd, name, NULL);
}
-struct sha1file *sha1fd_check(const char *name)
+struct hashfile *hashfd_check(const char *name)
{
int sink, check;
- struct sha1file *f;
+ struct hashfile *f;
sink = open("/dev/null", O_WRONLY);
if (sink < 0)
check = open(name, O_RDONLY);
if (check < 0)
die_errno("unable to open '%s'", name);
- f = sha1fd(sink, name);
+ f = hashfd(sink, name);
f->check_fd = check;
return f;
}
-struct sha1file *sha1fd_throughput(int fd, const char *name, struct progress *tp)
+struct hashfile *hashfd_throughput(int fd, const char *name, struct progress *tp)
{
- struct sha1file *f = xmalloc(sizeof(*f));
+ struct hashfile *f = xmalloc(sizeof(*f));
f->fd = fd;
f->check_fd = -1;
f->offset = 0;
f->tp = tp;
f->name = name;
f->do_crc = 0;
- git_SHA1_Init(&f->ctx);
+ the_hash_algo->init_fn(&f->ctx);
return f;
}
-void sha1file_checkpoint(struct sha1file *f, struct sha1file_checkpoint *checkpoint)
+void hashfile_checkpoint(struct hashfile *f, struct hashfile_checkpoint *checkpoint)
{
- sha1flush(f);
+ hashflush(f);
checkpoint->offset = f->total;
checkpoint->ctx = f->ctx;
}
-int sha1file_truncate(struct sha1file *f, struct sha1file_checkpoint *checkpoint)
+int hashfile_truncate(struct hashfile *f, struct hashfile_checkpoint *checkpoint)
{
off_t offset = checkpoint->offset;
return -1;
f->total = offset;
f->ctx = checkpoint->ctx;
- f->offset = 0; /* sha1flush() was called in checkpoint */
+ f->offset = 0; /* hashflush() was called in checkpoint */
return 0;
}
-void crc32_begin(struct sha1file *f)
+void crc32_begin(struct hashfile *f)
{
f->crc32 = crc32(0, NULL, 0);
f->do_crc = 1;
}
-uint32_t crc32_end(struct sha1file *f)
+uint32_t crc32_end(struct hashfile *f)
{
f->do_crc = 0;
return f->crc32;
struct progress;
/* A SHA1-protected file */
-struct sha1file {
+struct hashfile {
int fd;
int check_fd;
unsigned int offset;
- git_SHA_CTX ctx;
+ git_hash_ctx ctx;
off_t total;
struct progress *tp;
const char *name;
};
/* Checkpoint */
-struct sha1file_checkpoint {
+struct hashfile_checkpoint {
off_t offset;
- git_SHA_CTX ctx;
+ git_hash_ctx ctx;
};
-extern void sha1file_checkpoint(struct sha1file *, struct sha1file_checkpoint *);
-extern int sha1file_truncate(struct sha1file *, struct sha1file_checkpoint *);
+extern void hashfile_checkpoint(struct hashfile *, struct hashfile_checkpoint *);
+extern int hashfile_truncate(struct hashfile *, struct hashfile_checkpoint *);
-/* sha1close flags */
+/* hashclose flags */
#define CSUM_CLOSE 1
#define CSUM_FSYNC 2
-extern struct sha1file *sha1fd(int fd, const char *name);
-extern struct sha1file *sha1fd_check(const char *name);
-extern struct sha1file *sha1fd_throughput(int fd, const char *name, struct progress *tp);
-extern int sha1close(struct sha1file *, unsigned char *, unsigned int);
-extern void sha1write(struct sha1file *, const void *, unsigned int);
-extern void sha1flush(struct sha1file *f);
-extern void crc32_begin(struct sha1file *);
-extern uint32_t crc32_end(struct sha1file *);
+extern struct hashfile *hashfd(int fd, const char *name);
+extern struct hashfile *hashfd_check(const char *name);
+extern struct hashfile *hashfd_throughput(int fd, const char *name, struct progress *tp);
+extern int hashclose(struct hashfile *, unsigned char *, unsigned int);
+extern void hashwrite(struct hashfile *, const void *, unsigned int);
+extern void hashflush(struct hashfile *f);
+extern void crc32_begin(struct hashfile *);
+extern uint32_t crc32_end(struct hashfile *);
-static inline void sha1write_u8(struct sha1file *f, uint8_t data)
+static inline void hashwrite_u8(struct hashfile *f, uint8_t data)
{
- sha1write(f, &data, sizeof(data));
+ hashwrite(f, &data, sizeof(data));
}
-static inline void sha1write_be32(struct sha1file *f, uint32_t data)
+static inline void hashwrite_be32(struct hashfile *f, uint32_t data)
{
data = htonl(data);
- sha1write(f, &data, sizeof(data));
+ hashwrite(f, &data, sizeof(data));
}
#endif
#define initgroups(x, y) (0) /* nothing */
#endif
-static int log_syslog;
+static enum log_destination {
+ LOG_DESTINATION_UNSET = -1,
+ LOG_DESTINATION_NONE = 0,
+ LOG_DESTINATION_STDERR = 1,
+ LOG_DESTINATION_SYSLOG = 2,
+} log_destination = LOG_DESTINATION_UNSET;
static int verbose;
static int reuseaddr;
static int informative_errors;
" [--access-hook=<path>]\n"
" [--inetd | [--listen=<host_or_ipaddr>] [--port=<n>]\n"
" [--detach] [--user=<user> [--group=<group>]]\n"
+" [--log-destination=(stderr|syslog|none)]\n"
" [<directory>...]";
/* List of acceptable pathname prefixes */
static void logreport(int priority, const char *err, va_list params)
{
- if (log_syslog) {
+ switch (log_destination) {
+ case LOG_DESTINATION_SYSLOG: {
char buf[1024];
vsnprintf(buf, sizeof(buf), err, params);
syslog(priority, "%s", buf);
- } else {
+ break;
+ }
+ case LOG_DESTINATION_STDERR:
/*
* Since stderr is set to buffered mode, the
* logging of different processes will not overlap
vfprintf(stderr, err, params);
fputc('\n', stderr);
fflush(stderr);
+ break;
+ case LOG_DESTINATION_NONE:
+ break;
+ case LOG_DESTINATION_UNSET:
+ BUG("log destination not initialized correctly");
}
}
if (strncasecmp("host=", extra_args, 5) == 0) {
val = extra_args + 5;
vallen = strlen(val) + 1;
+ loginfo("Extended attribute \"host\": %s", val);
if (*val) {
/* Split <host>:<port> at colon. */
char *host;
}
}
- if (git_protocol.len > 0)
+ if (git_protocol.len > 0) {
+ loginfo("Extended attribute \"protocol\": %s", git_protocol.buf);
argv_array_pushf(env, GIT_PROTOCOL_ENVIRONMENT "=%s",
git_protocol.buf);
+ }
strbuf_release(&git_protocol);
}
alarm(0);
len = strlen(line);
- if (pktlen != len)
- loginfo("Extended attributes (%d bytes) exist <%.*s>",
- (int) pktlen - len,
- (int) pktlen - len, line + len + 1);
- if (len && line[len-1] == '\n') {
- line[--len] = 0;
- pktlen--;
- }
+ if (len && line[len-1] == '\n')
+ line[len-1] = 0;
/* parse additional args hidden behind a NUL byte */
if (len != pktlen)
}
if (!strcmp(arg, "--inetd")) {
inetd_mode = 1;
- log_syslog = 1;
continue;
}
if (!strcmp(arg, "--verbose")) {
continue;
}
if (!strcmp(arg, "--syslog")) {
- log_syslog = 1;
+ log_destination = LOG_DESTINATION_SYSLOG;
continue;
}
+ if (skip_prefix(arg, "--log-destination=", &v)) {
+ if (!strcmp(v, "syslog")) {
+ log_destination = LOG_DESTINATION_SYSLOG;
+ continue;
+ } else if (!strcmp(v, "stderr")) {
+ log_destination = LOG_DESTINATION_STDERR;
+ continue;
+ } else if (!strcmp(v, "none")) {
+ log_destination = LOG_DESTINATION_NONE;
+ continue;
+ } else
+ die("unknown log destination '%s'", v);
+ }
if (!strcmp(arg, "--export-all")) {
export_all_trees = 1;
continue;
}
if (!strcmp(arg, "--detach")) {
detach = 1;
- log_syslog = 1;
continue;
}
if (skip_prefix(arg, "--user=", &v)) {
usage(daemon_usage);
}
- if (log_syslog) {
+ if (log_destination == LOG_DESTINATION_UNSET) {
+ if (inetd_mode || detach)
+ log_destination = LOG_DESTINATION_SYSLOG;
+ else
+ log_destination = LOG_DESTINATION_STDERR;
+ }
+
+ if (log_destination == LOG_DESTINATION_SYSLOG) {
openlog("git-daemon", LOG_PID, LOG_DAEMON);
set_die_routine(daemon_die);
} else
int diff_unmerged_stage = revs->max_count;
unsigned ce_option = ((option & DIFF_RACY_IS_MODIFIED)
? CE_MATCH_RACY_IS_DIRTY : 0);
+ uint64_t start = getnanotime();
diff_set_mnemonic_prefix(&revs->diffopt, "i/", "w/");
}
diffcore_std(&revs->diffopt);
diff_flush(&revs->diffopt);
+ trace_performance_since(start, "diff-files");
return 0;
}
}
static void show_new_file(struct rev_info *revs,
- const struct cache_entry *new,
+ const struct cache_entry *new_file,
int cached, int match_missing)
{
const struct object_id *oid;
* New file in the index: it might actually be different in
* the working tree.
*/
- if (get_stat_data(new, &oid, &mode, cached, match_missing,
+ if (get_stat_data(new_file, &oid, &mode, cached, match_missing,
&dirty_submodule, &revs->diffopt) < 0)
return;
- diff_index_show_file(revs, "+", new, oid, !is_null_oid(oid), mode, dirty_submodule);
+ diff_index_show_file(revs, "+", new_file, oid, !is_null_oid(oid), mode, dirty_submodule);
}
static int show_modified(struct rev_info *revs,
- const struct cache_entry *old,
- const struct cache_entry *new,
+ const struct cache_entry *old_entry,
+ const struct cache_entry *new_entry,
int report_missing,
int cached, int match_missing)
{
const struct object_id *oid;
unsigned dirty_submodule = 0;
- if (get_stat_data(new, &oid, &mode, cached, match_missing,
+ if (get_stat_data(new_entry, &oid, &mode, cached, match_missing,
&dirty_submodule, &revs->diffopt) < 0) {
if (report_missing)
- diff_index_show_file(revs, "-", old,
- &old->oid, 1, old->ce_mode,
+ diff_index_show_file(revs, "-", old_entry,
+ &old_entry->oid, 1, old_entry->ce_mode,
0);
return -1;
}
if (revs->combine_merges && !cached &&
- (oidcmp(oid, &old->oid) || oidcmp(&old->oid, &new->oid))) {
+ (oidcmp(oid, &old_entry->oid) || oidcmp(&old_entry->oid, &new_entry->oid))) {
struct combine_diff_path *p;
- int pathlen = ce_namelen(new);
+ int pathlen = ce_namelen(new_entry);
p = xmalloc(combine_diff_path_size(2, pathlen));
p->path = (char *) &p->parent[2];
p->next = NULL;
- memcpy(p->path, new->name, pathlen);
+ memcpy(p->path, new_entry->name, pathlen);
p->path[pathlen] = 0;
p->mode = mode;
oidclr(&p->oid);
memset(p->parent, 0, 2 * sizeof(struct combine_diff_parent));
p->parent[0].status = DIFF_STATUS_MODIFIED;
- p->parent[0].mode = new->ce_mode;
- oidcpy(&p->parent[0].oid, &new->oid);
+ p->parent[0].mode = new_entry->ce_mode;
+ oidcpy(&p->parent[0].oid, &new_entry->oid);
p->parent[1].status = DIFF_STATUS_MODIFIED;
- p->parent[1].mode = old->ce_mode;
- oidcpy(&p->parent[1].oid, &old->oid);
+ p->parent[1].mode = old_entry->ce_mode;
+ oidcpy(&p->parent[1].oid, &old_entry->oid);
show_combined_diff(p, 2, revs->dense_combined_merges, revs);
free(p);
return 0;
}
- oldmode = old->ce_mode;
- if (mode == oldmode && !oidcmp(oid, &old->oid) && !dirty_submodule &&
+ oldmode = old_entry->ce_mode;
+ if (mode == oldmode && !oidcmp(oid, &old_entry->oid) && !dirty_submodule &&
!revs->diffopt.flags.find_copies_harder)
return 0;
diff_change(&revs->diffopt, oldmode, mode,
- &old->oid, oid, 1, !is_null_oid(oid),
- old->name, 0, dirty_submodule);
+ &old_entry->oid, oid, 1, !is_null_oid(oid),
+ old_entry->name, 0, dirty_submodule);
return 0;
}
int run_diff_index(struct rev_info *revs, int cached)
{
struct object_array_entry *ent;
+ uint64_t start = getnanotime();
ent = revs->pending.objects;
if (diff_cache(revs, &ent->item->oid, ent->name, cached))
diffcore_fix_diff_index(&revs->diffopt);
diffcore_std(&revs->diffopt);
diff_flush(&revs->diffopt);
+ trace_performance_since(start, "diff-index");
return 0;
}
struct diff_words_style {
enum diff_words_type type;
- struct diff_words_style_elem new, old, ctx;
+ struct diff_words_style_elem new_word, old_word, ctx;
const char *newline;
};
}
if (minus_begin != minus_end) {
fn_out_diff_words_write_helper(diff_words->opt,
- &style->old, style->newline,
+ &style->old_word, style->newline,
minus_end - minus_begin, minus_begin);
}
if (plus_begin != plus_end) {
fn_out_diff_words_write_helper(diff_words->opt,
- &style->new, style->newline,
+ &style->new_word, style->newline,
plus_end - plus_begin, plus_begin);
}
emit_diff_symbol(diff_words->opt, DIFF_SYMBOL_WORD_DIFF,
line_prefix, strlen(line_prefix), 0);
fn_out_diff_words_write_helper(diff_words->opt,
- &style->old, style->newline,
+ &style->old_word, style->newline,
diff_words->minus.text.size,
diff_words->minus.text.ptr);
diff_words->minus.text.size = 0;
}
if (want_color(o->use_color)) {
struct diff_words_style *st = ecbdata->diff_words->style;
- st->old.color = diff_get_color_opt(o, DIFF_FILE_OLD);
- st->new.color = diff_get_color_opt(o, DIFF_FILE_NEW);
+ st->old_word.color = diff_get_color_opt(o, DIFF_FILE_OLD);
+ st->new_word.color = diff_get_color_opt(o, DIFF_FILE_NEW);
st->ctx.color = diff_get_color_opt(o, DIFF_CONTEXT);
}
}
}
}
-static char *pprint_rename(const char *a, const char *b)
+static void pprint_rename(struct strbuf *name, const char *a, const char *b)
{
- const char *old = a;
- const char *new = b;
- struct strbuf name = STRBUF_INIT;
+ const char *old_name = a;
+ const char *new_name = b;
int pfx_length, sfx_length;
int pfx_adjust_for_slash;
int len_a = strlen(a);
int qlen_b = quote_c_style(b, NULL, NULL, 0);
if (qlen_a || qlen_b) {
- quote_c_style(a, &name, NULL, 0);
- strbuf_addstr(&name, " => ");
- quote_c_style(b, &name, NULL, 0);
- return strbuf_detach(&name, NULL);
+ quote_c_style(a, name, NULL, 0);
+ strbuf_addstr(name, " => ");
+ quote_c_style(b, name, NULL, 0);
+ return;
}
/* Find common prefix */
pfx_length = 0;
- while (*old && *new && *old == *new) {
- if (*old == '/')
- pfx_length = old - a + 1;
- old++;
- new++;
+ while (*old_name && *new_name && *old_name == *new_name) {
+ if (*old_name == '/')
+ pfx_length = old_name - a + 1;
+ old_name++;
+ new_name++;
}
/* Find common suffix */
- old = a + len_a;
- new = b + len_b;
+ old_name = a + len_a;
+ new_name = b + len_b;
sfx_length = 0;
/*
* If there is a common prefix, it must end in a slash. In
* underrun the input strings.
*/
pfx_adjust_for_slash = (pfx_length ? 1 : 0);
- while (a + pfx_length - pfx_adjust_for_slash <= old &&
- b + pfx_length - pfx_adjust_for_slash <= new &&
- *old == *new) {
- if (*old == '/')
- sfx_length = len_a - (old - a);
- old--;
- new--;
+ while (a + pfx_length - pfx_adjust_for_slash <= old_name &&
+ b + pfx_length - pfx_adjust_for_slash <= new_name &&
+ *old_name == *new_name) {
+ if (*old_name == '/')
+ sfx_length = len_a - (old_name - a);
+ old_name--;
+ new_name--;
}
/*
if (b_midlen < 0)
b_midlen = 0;
- strbuf_grow(&name, pfx_length + a_midlen + b_midlen + sfx_length + 7);
+ strbuf_grow(name, pfx_length + a_midlen + b_midlen + sfx_length + 7);
if (pfx_length + sfx_length) {
- strbuf_add(&name, a, pfx_length);
- strbuf_addch(&name, '{');
+ strbuf_add(name, a, pfx_length);
+ strbuf_addch(name, '{');
}
- strbuf_add(&name, a + pfx_length, a_midlen);
- strbuf_addstr(&name, " => ");
- strbuf_add(&name, b + pfx_length, b_midlen);
+ strbuf_add(name, a + pfx_length, a_midlen);
+ strbuf_addstr(name, " => ");
+ strbuf_add(name, b + pfx_length, b_midlen);
if (pfx_length + sfx_length) {
- strbuf_addch(&name, '}');
- strbuf_add(&name, a + len_a - sfx_length, sfx_length);
+ strbuf_addch(name, '}');
+ strbuf_add(name, a + len_a - sfx_length, sfx_length);
}
- return strbuf_detach(&name, NULL);
}
struct diffstat_t {
char *from_name;
char *name;
char *print_name;
+ const char *comments;
unsigned is_unmerged:1;
unsigned is_binary:1;
unsigned is_renamed:1;
static void fill_print_name(struct diffstat_file *file)
{
- char *pname;
+ struct strbuf pname = STRBUF_INIT;
if (file->print_name)
return;
- if (!file->is_renamed) {
- struct strbuf buf = STRBUF_INIT;
- if (quote_c_style(file->name, &buf, NULL, 0)) {
- pname = strbuf_detach(&buf, NULL);
- } else {
- pname = file->name;
- strbuf_release(&buf);
- }
- } else {
- pname = pprint_rename(file->from_name, file->name);
- }
- file->print_name = pname;
+ if (file->is_renamed)
+ pprint_rename(&pname, file->from_name, file->name);
+ else
+ quote_c_style(file->name, &pname, NULL, 0);
+
+ if (file->comments)
+ strbuf_addf(&pname, " (%s)", file->comments);
+
+ file->print_name = strbuf_detach(&pname, NULL);
}
static void print_stat_summary_inserts_deletes(struct diff_options *options,
static long gather_dirstat(struct diff_options *opt, struct dirstat_dir *dir,
unsigned long changed, const char *base, int baselen)
{
- unsigned long this_dir = 0;
+ unsigned long sum_changes = 0;
unsigned int sources = 0;
const char *line_prefix = diff_line_prefix(opt);
while (dir->nr) {
struct dirstat_file *f = dir->files;
int namelen = strlen(f->name);
- unsigned long this;
+ unsigned long changes;
char *slash;
if (namelen < baselen)
slash = strchr(f->name + baselen, '/');
if (slash) {
int newbaselen = slash + 1 - f->name;
- this = gather_dirstat(opt, dir, changed, f->name, newbaselen);
+ changes = gather_dirstat(opt, dir, changed, f->name, newbaselen);
sources++;
} else {
- this = f->changed;
+ changes = f->changed;
dir->files++;
dir->nr--;
sources += 2;
}
- this_dir += this;
+ sum_changes += changes;
}
/*
* under this directory (sources == 1).
*/
if (baselen && sources != 1) {
- if (this_dir) {
- int permille = this_dir * 1000 / changed;
+ if (sum_changes) {
+ int permille = sum_changes * 1000 / changed;
if (permille >= dir->permille) {
fprintf(opt->file, "%s%4d.%01d%% %.*s\n", line_prefix,
permille / 10, permille % 10, baselen, base);
}
}
}
- return this_dir;
+ return sum_changes;
}
static int dirstat_compare(const void *_a, const void *_b)
int i;
for (i = 0; i < diffstat->nr; i++) {
struct diffstat_file *f = diffstat->files[i];
- if (f->name != f->print_name)
- free(f->print_name);
+ free(f->print_name);
free(f->name);
free(f->from_name);
free(f);
return;
}
+static char *get_compact_summary(const struct diff_filepair *p, int is_renamed)
+{
+ if (!is_renamed) {
+ if (p->status == DIFF_STATUS_ADDED) {
+ if (S_ISLNK(p->two->mode))
+ return "new +l";
+ else if ((p->two->mode & 0777) == 0755)
+ return "new +x";
+ else
+ return "new";
+ } else if (p->status == DIFF_STATUS_DELETED)
+ return "gone";
+ }
+ if (S_ISLNK(p->one->mode) && !S_ISLNK(p->two->mode))
+ return "mode -l";
+ else if (!S_ISLNK(p->one->mode) && S_ISLNK(p->two->mode))
+ return "mode +l";
+ else if ((p->one->mode & 0777) == 0644 &&
+ (p->two->mode & 0777) == 0755)
+ return "mode +x";
+ else if ((p->one->mode & 0777) == 0755 &&
+ (p->two->mode & 0777) == 0644)
+ return "mode -x";
+ return NULL;
+}
+
static void builtin_diffstat(const char *name_a, const char *name_b,
struct diff_filespec *one,
struct diff_filespec *two,
data = diffstat_add(diffstat, name_a, name_b);
data->is_interesting = p->status != DIFF_STATUS_UNKNOWN;
+ if (o->flags.stat_with_summary)
+ data->comments = get_compact_summary(p, data->is_renamed);
if (!one || !two) {
data->is_unmerged = 1;
{
int size_only = flags & CHECK_SIZE_ONLY;
int err = 0;
+ int conv_flags = global_conv_flags_eol;
/*
* demote FAIL to WARN to allow inspecting the situation
* instead of refusing.
*/
- enum safe_crlf crlf_warn = (safe_crlf == SAFE_CRLF_FAIL
- ? SAFE_CRLF_WARN
- : safe_crlf);
+ if (conv_flags & CONV_EOL_RNDTRP_DIE)
+ conv_flags = CONV_EOL_RNDTRP_WARN;
if (!DIFF_FILE_VALID(s))
die("internal error: asking to populate invalid file.");
/*
* Convert from working tree format to canonical git format
*/
- if (convert_to_git(&the_index, s->path, s->data, s->size, &buf, crlf_warn)) {
+ if (convert_to_git(&the_index, s->path, s->data, s->size, &buf, conv_flags)) {
size_t size = 0;
munmap(s->data, s->size);
s->should_munmap = 0;
int mode)
{
struct strbuf buf = STRBUF_INIT;
- struct strbuf template = STRBUF_INIT;
+ struct strbuf tempfile = STRBUF_INIT;
char *path_dup = xstrdup(path);
const char *base = basename(path_dup);
/* Generate "XXXXXX_basename.ext" */
- strbuf_addstr(&template, "XXXXXX_");
- strbuf_addstr(&template, base);
+ strbuf_addstr(&tempfile, "XXXXXX_");
+ strbuf_addstr(&tempfile, base);
- temp->tempfile = mks_tempfile_ts(template.buf, strlen(base) + 1);
+ temp->tempfile = mks_tempfile_ts(tempfile.buf, strlen(base) + 1);
if (!temp->tempfile)
die_errno("unable to create temp-file");
if (convert_to_working_tree(path,
oid_to_hex_r(temp->hex, oid);
xsnprintf(temp->mode, sizeof(temp->mode), "%06o", mode);
strbuf_release(&buf);
- strbuf_release(&template);
+ strbuf_release(&tempfile);
free(path_dup);
}
options->interhunkcontext = diff_interhunk_context_default;
options->ws_error_highlight = ws_error_highlight_default;
options->flags.rename_empty = 1;
+ options->objfind = NULL;
/* pathchange left =NULL by default */
options->change = diff_change;
void diff_setup_done(struct diff_options *options)
{
- int count = 0;
+ unsigned check_mask = DIFF_FORMAT_NAME |
+ DIFF_FORMAT_NAME_STATUS |
+ DIFF_FORMAT_CHECKDIFF |
+ DIFF_FORMAT_NO_OUTPUT;
if (options->set_default)
options->set_default(options);
- if (options->output_format & DIFF_FORMAT_NAME)
- count++;
- if (options->output_format & DIFF_FORMAT_NAME_STATUS)
- count++;
- if (options->output_format & DIFF_FORMAT_CHECKDIFF)
- count++;
- if (options->output_format & DIFF_FORMAT_NO_OUTPUT)
- count++;
- if (count > 1)
+ if (HAS_MULTI_BITS(options->output_format & check_mask))
die(_("--name-only, --name-status, --check and -s are mutually exclusive"));
+ if (HAS_MULTI_BITS(options->pickaxe_opts & DIFF_PICKAXE_KINDS_MASK))
+ die(_("-G, -S and --find-object are mutually exclusive"));
+
/*
* Most of the time we can say "there are changes"
* only by checking if there are changed paths, but
/*
* Also pickaxe would not work very well if you do not say recursive
*/
- if (options->pickaxe)
+ if (options->pickaxe_opts & DIFF_PICKAXE_KINDS_MASK)
options->flags.recursive = 1;
/*
* When patches are generated, submodules diffed against the work tree
return 1;
}
+static int parse_objfind_opt(struct diff_options *opt, const char *arg)
+{
+ struct object_id oid;
+
+ if (get_oid(arg, &oid))
+ return error("unable to resolve '%s'", arg);
+
+ if (!opt->objfind)
+ opt->objfind = xcalloc(1, sizeof(*opt->objfind));
+
+ opt->pickaxe_opts |= DIFF_PICKAXE_KIND_OBJFIND;
+ opt->flags.recursive = 1;
+ opt->flags.tree_in_recursive = 1;
+ oidset_insert(opt->objfind, &oid);
+ return 1;
+}
+
int diff_opt_parse(struct diff_options *options,
const char **av, int ac, const char *prefix)
{
else if (starts_with(arg, "--stat"))
/* --stat, --stat-width, --stat-name-width, or --stat-count */
return stat_opt(options, av);
+ else if (!strcmp(arg, "--compact-summary")) {
+ options->flags.stat_with_summary = 1;
+ options->output_format |= DIFF_FORMAT_DIFFSTAT;
+ } else if (!strcmp(arg, "--no-compact-summary"))
+ options->flags.stat_with_summary = 0;
/* renames options */
else if (starts_with(arg, "-B") ||
else if ((argcount = short_opt('O', av, &optarg))) {
options->orderfile = prefix_filename(prefix, optarg);
return argcount;
- }
+ } else if (skip_prefix(arg, "--find-object=", &arg))
+ return parse_objfind_opt(options, arg);
else if ((argcount = parse_long_opt("diff-filter", av, &optarg))) {
int offending = parse_diff_filter_opt(optarg, options);
if (offending)
struct diff_filepair *p)
{
struct strbuf sb = STRBUF_INIT;
- char *names = pprint_rename(p->one->path, p->two->path);
+ struct strbuf names = STRBUF_INIT;
+
+ pprint_rename(&names, p->one->path, p->two->path);
strbuf_addf(&sb, " %s %s (%d%%)\n",
- renamecopy, names, similarity_index(p));
- free(names);
+ renamecopy, names.buf, similarity_index(p));
+ strbuf_release(&names);
emit_diff_symbol(opt, DIFF_SYMBOL_SUMMARY,
sb.buf, sb.len, 0);
show_mode_change(opt, p, 0);
void diff_warn_rename_limit(const char *varname, int needed, int degraded_cc)
{
+ fflush(stdout);
if (degraded_cc)
warning(_(degrade_cc_to_c_warning));
else if (needed)
if (options->break_opt != -1)
diffcore_merge_broken();
}
- if (options->pickaxe)
+ if (options->pickaxe_opts & DIFF_PICKAXE_KINDS_MASK)
diffcore_pickaxe(options);
if (options->orderfile)
diffcore_order(options->orderfile);
#include "tree-walk.h"
#include "pathspec.h"
#include "object.h"
+#include "oidset.h"
struct rev_info;
struct diff_options;
unsigned override_submodule_config:1;
unsigned dirstat_by_line:1;
unsigned funccontext:1;
- unsigned pickaxe_ignore_case:1;
unsigned default_follow_renames:1;
+ unsigned stat_with_summary:1;
};
static inline void diff_flags_or(struct diff_flags *a,
int skip_stat_unmatch;
int line_termination;
int output_format;
- int pickaxe_opts;
+ unsigned pickaxe_opts;
int rename_score;
int rename_limit;
int needed_rename_limit;
enum diff_words_type word_diff;
enum diff_submodule_format submodule_format;
+ struct oidset *objfind;
+
/* this is set by diffcore for DIFF_FORMAT_PATCH */
int found_changes;
#define DIFF_PICKAXE_KIND_S 4 /* traditional plumbing counter */
#define DIFF_PICKAXE_KIND_G 8 /* grep in the patch */
+#define DIFF_PICKAXE_KIND_OBJFIND 16 /* specific object IDs */
+
+#define DIFF_PICKAXE_KINDS_MASK (DIFF_PICKAXE_KIND_S | \
+ DIFF_PICKAXE_KIND_G | \
+ DIFF_PICKAXE_KIND_OBJFIND)
+
+#define DIFF_PICKAXE_IGNORE_CASE 32
extern void diffcore_std(struct diff_options *);
extern void diffcore_fix_diff_index(struct diff_options *);
static struct spanhash_top *spanhash_rehash(struct spanhash_top *orig)
{
- struct spanhash_top *new;
+ struct spanhash_top *new_spanhash;
int i;
int osz = 1 << orig->alloc_log2;
int sz = osz << 1;
- new = xmalloc(st_add(sizeof(*orig),
+ new_spanhash = xmalloc(st_add(sizeof(*orig),
st_mult(sizeof(struct spanhash), sz)));
- new->alloc_log2 = orig->alloc_log2 + 1;
- new->free = INITIAL_FREE(new->alloc_log2);
- memset(new->data, 0, sizeof(struct spanhash) * sz);
+ new_spanhash->alloc_log2 = orig->alloc_log2 + 1;
+ new_spanhash->free = INITIAL_FREE(new_spanhash->alloc_log2);
+ memset(new_spanhash->data, 0, sizeof(struct spanhash) * sz);
for (i = 0; i < osz; i++) {
struct spanhash *o = &(orig->data[i]);
int bucket;
continue;
bucket = o->hashval & (sz - 1);
while (1) {
- struct spanhash *h = &(new->data[bucket++]);
+ struct spanhash *h = &(new_spanhash->data[bucket++]);
if (!h->cnt) {
h->hashval = o->hashval;
h->cnt = o->cnt;
- new->free--;
+ new_spanhash->free--;
break;
}
if (sz <= bucket)
}
}
free(orig);
- return new;
+ return new_spanhash;
}
static struct spanhash_top *add_spanhash(struct spanhash_top *top,
mmfile_t mf1, mf2;
int ret;
- if (!o->pickaxe[0])
- return 0;
-
/* ignore unmerged */
if (!DIFF_FILE_VALID(p->one) && !DIFF_FILE_VALID(p->two))
return 0;
+ if (o->objfind) {
+ return (DIFF_FILE_VALID(p->one) &&
+ oidset_contains(o->objfind, &p->one->oid)) ||
+ (DIFF_FILE_VALID(p->two) &&
+ oidset_contains(o->objfind, &p->two->oid));
+ }
+
+ if (!o->pickaxe[0])
+ return 0;
+
if (o->flags.allow_textconv) {
textconv_one = get_textconv(p->one);
textconv_two = get_textconv(p->two);
if (opts & (DIFF_PICKAXE_REGEX | DIFF_PICKAXE_KIND_G)) {
int cflags = REG_EXTENDED | REG_NEWLINE;
- if (o->flags.pickaxe_ignore_case)
+ if (o->pickaxe_opts & DIFF_PICKAXE_IGNORE_CASE)
cflags |= REG_ICASE;
regcomp_or_die(®ex, needle, cflags);
regexp = ®ex;
- } else if (o->flags.pickaxe_ignore_case &&
- has_non_ascii(needle)) {
- struct strbuf sb = STRBUF_INIT;
- int cflags = REG_NEWLINE | REG_ICASE;
-
- basic_regex_quote_buf(&sb, needle);
- regcomp_or_die(®ex, sb.buf, cflags);
- strbuf_release(&sb);
- regexp = ®ex;
- } else {
- kws = kwsalloc(o->flags.pickaxe_ignore_case
- ? tolower_trans_tbl : NULL);
- kwsincr(kws, needle, strlen(needle));
- kwsprep(kws);
+ } else if (opts & DIFF_PICKAXE_KIND_S) {
+ if (o->pickaxe_opts & DIFF_PICKAXE_IGNORE_CASE &&
+ has_non_ascii(needle)) {
+ struct strbuf sb = STRBUF_INIT;
+ int cflags = REG_NEWLINE | REG_ICASE;
+
+ basic_regex_quote_buf(&sb, needle);
+ regcomp_or_die(®ex, sb.buf, cflags);
+ strbuf_release(&sb);
+ regexp = ®ex;
+ } else {
+ kws = kwsalloc(o->pickaxe_opts & DIFF_PICKAXE_IGNORE_CASE
+ ? tolower_trans_tbl : NULL);
+ kwsincr(kws, needle, strlen(needle));
+ kwsprep(kws);
+ }
}
- /* Might want to warn when both S and G are on; I don't care... */
pickaxe(&diff_queued_diff, o, regexp, kws,
(opts & DIFF_PICKAXE_KIND_G) ? diff_grep : has_changes);
if (regexp)
regfree(regexp);
- else
+ if (kws)
kwsfree(kws);
return;
}
ALLOC_GROW(rename_dst, rename_dst_nr + 1, rename_dst_alloc);
rename_dst_nr++;
if (first < rename_dst_nr)
- memmove(rename_dst + first + 1, rename_dst + first,
- (rename_dst_nr - first - 1) * sizeof(*rename_dst));
+ MOVE_ARRAY(rename_dst + first + 1, rename_dst + first,
+ rename_dst_nr - first - 1);
rename_dst[first].two = alloc_filespec(two->path);
fill_filespec(rename_dst[first].two, &two->oid, two->oid_valid,
two->mode);
ALLOC_GROW(rename_src, rename_src_nr + 1, rename_src_alloc);
rename_src_nr++;
if (first < rename_src_nr)
- memmove(rename_src + first + 1, rename_src + first,
- (rename_src_nr - first - 1) * sizeof(*rename_src));
+ MOVE_ARRAY(rename_src + first + 1, rename_src + first,
+ rename_src_nr - first - 1);
rename_src[first].p = p;
rename_src[first].score = score;
return &(rename_src[first]);
if (!filespec->oid_valid) {
if (diff_populate_filespec(filespec, 0))
return 0;
- hash_sha1_file(filespec->data, filespec->size, "blob",
- filespec->oid.hash);
+ hash_object_file(filespec->data, filespec->size, "blob",
+ &filespec->oid);
}
return sha1hash(filespec->oid.hash);
}
* 1 along with { data, size } of the (possibly augmented) buffer
* when successful.
*
- * Optionally updates the given sha1_stat with the given OID (when valid).
+ * Optionally updates the given oid_stat with the given OID (when valid).
*/
-static int do_read_blob(const struct object_id *oid,
- struct sha1_stat *sha1_stat,
- size_t *size_out,
- char **data_out)
+static int do_read_blob(const struct object_id *oid, struct oid_stat *oid_stat,
+ size_t *size_out, char **data_out)
{
enum object_type type;
unsigned long sz;
return -1;
}
- if (sha1_stat) {
- memset(&sha1_stat->stat, 0, sizeof(sha1_stat->stat));
- hashcpy(sha1_stat->sha1, oid->hash);
+ if (oid_stat) {
+ memset(&oid_stat->stat, 0, sizeof(oid_stat->stat));
+ oidcpy(&oid_stat->oid, oid);
}
if (sz == 0) {
static int read_skip_worktree_file_from_index(const struct index_state *istate,
const char *path,
- size_t *size_out,
- char **data_out,
- struct sha1_stat *sha1_stat)
+ size_t *size_out, char **data_out,
+ struct oid_stat *oid_stat)
{
int pos, len;
if (!ce_skip_worktree(istate->cache[pos]))
return -1;
- return do_read_blob(&istate->cache[pos]->oid, sha1_stat, size_out, data_out);
+ return do_read_blob(&istate->cache[pos]->oid, oid_stat, size_out, data_out);
}
/*
FLEX_ALLOC_MEM(d, name, name, len);
ALLOC_GROW(dir->dirs, dir->dirs_nr + 1, dir->dirs_alloc);
- memmove(dir->dirs + first + 1, dir->dirs + first,
- (dir->dirs_nr - first) * sizeof(*dir->dirs));
+ MOVE_ARRAY(dir->dirs + first + 1, dir->dirs + first,
+ dir->dirs_nr - first);
dir->dirs_nr++;
dir->dirs[first] = d;
return d;
struct untracked_cache_dir *dir)
{
int i;
- uc->dir_invalidated++;
+
+ /*
+ * Invalidation increment here is just roughly correct. If
+ * untracked_nr or any of dirs[].recurse is non-zero, we
+ * should increment dir_invalidated too. But that's more
+ * expensive to do.
+ */
+ if (dir->valid)
+ uc->dir_invalidated++;
+
dir->valid = 0;
dir->untracked_nr = 0;
for (i = 0; i < dir->dirs_nr; i++)
* ss_valid is non-zero, "ss" must contain good value as input.
*/
static int add_excludes(const char *fname, const char *base, int baselen,
- struct exclude_list *el,
- struct index_state *istate,
- struct sha1_stat *sha1_stat)
+ struct exclude_list *el, struct index_state *istate,
+ struct oid_stat *oid_stat)
{
struct stat st;
int r;
return -1;
r = read_skip_worktree_file_from_index(istate, fname,
&size, &buf,
- sha1_stat);
+ oid_stat);
if (r != 1)
return r;
} else {
size = xsize_t(st.st_size);
if (size == 0) {
- if (sha1_stat) {
- fill_stat_data(&sha1_stat->stat, &st);
- hashcpy(sha1_stat->sha1, EMPTY_BLOB_SHA1_BIN);
- sha1_stat->valid = 1;
+ if (oid_stat) {
+ fill_stat_data(&oid_stat->stat, &st);
+ oidcpy(&oid_stat->oid, &empty_blob_oid);
+ oid_stat->valid = 1;
}
close(fd);
return 0;
}
buf[size++] = '\n';
close(fd);
- if (sha1_stat) {
+ if (oid_stat) {
int pos;
- if (sha1_stat->valid &&
- !match_stat_data_racy(istate, &sha1_stat->stat, &st))
+ if (oid_stat->valid &&
+ !match_stat_data_racy(istate, &oid_stat->stat, &st))
; /* no content change, ss->sha1 still good */
else if (istate &&
(pos = index_name_pos(istate, fname, strlen(fname))) >= 0 &&
!ce_stage(istate->cache[pos]) &&
ce_uptodate(istate->cache[pos]) &&
!would_convert_to_git(istate, fname))
- hashcpy(sha1_stat->sha1,
- istate->cache[pos]->oid.hash);
+ oidcpy(&oid_stat->oid,
+ &istate->cache[pos]->oid);
else
- hash_sha1_file(buf, size, "blob", sha1_stat->sha1);
- fill_stat_data(&sha1_stat->stat, &st);
- sha1_stat->valid = 1;
+ hash_object_file(buf, size, "blob",
+ &oid_stat->oid);
+ fill_stat_data(&oid_stat->stat, &st);
+ oid_stat->valid = 1;
}
}
* Used to set up core.excludesfile and .git/info/exclude lists.
*/
static void add_excludes_from_file_1(struct dir_struct *dir, const char *fname,
- struct sha1_stat *sha1_stat)
+ struct oid_stat *oid_stat)
{
struct exclude_list *el;
/*
if (!dir->untracked)
dir->unmanaged_exclude_files++;
el = add_exclude_list(dir, EXC_FILE, fname);
- if (add_excludes(fname, "", 0, el, NULL, sha1_stat) < 0)
+ if (add_excludes(fname, "", 0, el, NULL, oid_stat) < 0)
die("cannot use %s as an exclude file", fname);
}
while (current < baselen) {
const char *cp;
- struct sha1_stat sha1_stat;
+ struct oid_stat oid_stat;
stk = xcalloc(1, sizeof(*stk));
if (current < 0) {
}
/* Try to read per-directory file */
- hashclr(sha1_stat.sha1);
- sha1_stat.valid = 0;
+ oidclr(&oid_stat.oid);
+ oid_stat.valid = 0;
if (dir->exclude_per_dir &&
/*
* If we know that no files have been added in
strbuf_addstr(&sb, dir->exclude_per_dir);
el->src = strbuf_detach(&sb, NULL);
add_excludes(el->src, el->src, stk->baselen, el, istate,
- untracked ? &sha1_stat : NULL);
+ untracked ? &oid_stat : NULL);
}
/*
* NEEDSWORK: when untracked cache is enabled, prep_exclude()
* order, though, if you do that.
*/
if (untracked &&
- hashcmp(sha1_stat.sha1, untracked->exclude_sha1)) {
+ hashcmp(oid_stat.oid.hash, untracked->exclude_sha1)) {
invalidate_gitignore(dir->untracked, untracked);
- hashcpy(untracked->exclude_sha1, sha1_stat.sha1);
+ hashcpy(untracked->exclude_sha1, oid_stat.oid.hash);
}
dir->exclude_stack = stk;
current = stk->baselen;
if (!de)
return treat_path_fast(dir, untracked, cdir, istate, path,
baselen, pathspec);
- if (is_dot_or_dotdot(de->d_name) || !strcmp(de->d_name, ".git"))
+ if (is_dot_or_dotdot(de->d_name) || !fspathcmp(de->d_name, ".git"))
return path_none;
strbuf_setlen(path, baselen);
strbuf_addstr(path, de->d_name);
*/
refresh_fsmonitor(istate);
if (!(dir->untracked->use_fsmonitor && untracked->valid)) {
- if (stat(path->len ? path->buf : ".", &st)) {
- invalidate_directory(dir->untracked, untracked);
+ if (lstat(path->len ? path->buf : ".", &st)) {
memset(&untracked->stat_data, 0, sizeof(untracked->stat_data));
return 0;
}
if (!untracked->valid ||
match_stat_data_racy(istate, &untracked->stat_data, &st)) {
- if (untracked->valid)
- invalidate_directory(dir->untracked, untracked);
fill_stat_data(&untracked->stat_data, &st);
return 0;
}
}
- if (untracked->check_only != !!check_only) {
- invalidate_directory(dir->untracked, untracked);
+ if (untracked->check_only != !!check_only)
return 0;
- }
/*
* prep_exclude will be called eventually on this directory,
struct strbuf *path,
int check_only)
{
+ const char *c_path;
+
memset(cdir, 0, sizeof(*cdir));
cdir->untracked = untracked;
if (valid_cached_dir(dir, untracked, istate, path, check_only))
return 0;
- cdir->fdir = opendir(path->len ? path->buf : ".");
- if (dir->untracked)
+ c_path = path->len ? path->buf : ".";
+ cdir->fdir = opendir(c_path);
+ if (!cdir->fdir)
+ warning_errno(_("could not open directory '%s'"), c_path);
+ if (dir->untracked) {
+ invalidate_directory(dir->untracked, untracked);
dir->untracked->dir_opened++;
+ }
if (!cdir->fdir)
return -1;
return 0;
const struct pathspec *pathspec)
{
struct untracked_cache_dir *root;
+ static int untracked_cache_disabled = -1;
- if (!dir->untracked || getenv("GIT_DISABLE_UNTRACKED_CACHE"))
+ if (!dir->untracked)
+ return NULL;
+ if (untracked_cache_disabled < 0)
+ untracked_cache_disabled = git_env_bool("GIT_DISABLE_UNTRACKED_CACHE", 0);
+ if (untracked_cache_disabled)
return NULL;
/*
/* Validate $GIT_DIR/info/exclude and core.excludesfile */
root = dir->untracked->root;
- if (hashcmp(dir->ss_info_exclude.sha1,
- dir->untracked->ss_info_exclude.sha1)) {
+ if (oidcmp(&dir->ss_info_exclude.oid,
+ &dir->untracked->ss_info_exclude.oid)) {
invalidate_gitignore(dir->untracked, root);
dir->untracked->ss_info_exclude = dir->ss_info_exclude;
}
- if (hashcmp(dir->ss_excludes_file.sha1,
- dir->untracked->ss_excludes_file.sha1)) {
+ if (oidcmp(&dir->ss_excludes_file.oid,
+ &dir->untracked->ss_excludes_file.oid)) {
invalidate_gitignore(dir->untracked, root);
dir->untracked->ss_excludes_file = dir->ss_excludes_file;
}
const char *path, int len, const struct pathspec *pathspec)
{
struct untracked_cache_dir *untracked;
+ uint64_t start = getnanotime();
if (has_symlink_leading_path(path, len))
return dir->nr;
dir->nr = i;
}
+ trace_performance_since(start, "read directory %.*s", len, path);
if (dir->untracked) {
+ static int force_untracked_cache = -1;
static struct trace_key trace_untracked_stats = TRACE_KEY_INIT(UNTRACKED_STATS);
+
+ if (force_untracked_cache < 0)
+ force_untracked_cache =
+ git_env_bool("GIT_FORCE_UNTRACKED_CACHE", 0);
trace_printf_key(&trace_untracked_stats,
"node creation: %u\n"
"gitignore invalidation: %u\n"
dir->untracked->gitignore_invalidated,
dir->untracked->dir_invalidated,
dir->untracked->dir_opened);
- if (dir->untracked == istate->untracked &&
+ if (force_untracked_cache &&
+ dir->untracked == istate->untracked &&
(dir->untracked->dir_opened ||
dir->untracked->gitignore_invalidated ||
dir->untracked->dir_invalidated))
FLEX_ALLOC_MEM(ouc, exclude_per_dir, untracked->exclude_per_dir, len);
stat_data_to_disk(&ouc->info_exclude_stat, &untracked->ss_info_exclude.stat);
stat_data_to_disk(&ouc->excludes_file_stat, &untracked->ss_excludes_file.stat);
- hashcpy(ouc->info_exclude_sha1, untracked->ss_info_exclude.sha1);
- hashcpy(ouc->excludes_file_sha1, untracked->ss_excludes_file.sha1);
+ hashcpy(ouc->info_exclude_sha1, untracked->ss_info_exclude.oid.hash);
+ hashcpy(ouc->excludes_file_sha1, untracked->ss_excludes_file.oid.hash);
ouc->dir_flags = htonl(untracked->dir_flags);
varint_len = encode_varint(untracked->ident.len, varbuf);
rd->data += 20;
}
-static void load_sha1_stat(struct sha1_stat *sha1_stat,
- const unsigned char *data,
- const unsigned char *sha1)
+static void load_oid_stat(struct oid_stat *oid_stat, const unsigned char *data,
+ const unsigned char *sha1)
{
- stat_data_from_disk(&sha1_stat->stat, data);
- hashcpy(sha1_stat->sha1, sha1);
- sha1_stat->valid = 1;
+ stat_data_from_disk(&oid_stat->stat, data);
+ hashcpy(oid_stat->oid.hash, sha1);
+ oid_stat->valid = 1;
}
struct untracked_cache *read_untracked_extension(const void *data, unsigned long sz)
uc = xcalloc(1, sizeof(*uc));
strbuf_init(&uc->ident, ident_len);
strbuf_add(&uc->ident, ident, ident_len);
- load_sha1_stat(&uc->ss_info_exclude,
- next + ouc_offset(info_exclude_stat),
- next + ouc_offset(info_exclude_sha1));
- load_sha1_stat(&uc->ss_excludes_file,
- next + ouc_offset(excludes_file_stat),
- next + ouc_offset(excludes_file_sha1));
+ load_oid_stat(&uc->ss_info_exclude,
+ next + ouc_offset(info_exclude_stat),
+ next + ouc_offset(info_exclude_sha1));
+ load_oid_stat(&uc->ss_excludes_file,
+ next + ouc_offset(excludes_file_stat),
+ next + ouc_offset(excludes_file_sha1));
uc->dir_flags = get_be32(next + ouc_offset(dir_flags));
exclude_per_dir = (const char *)next + ouc_offset(exclude_per_dir);
uc->exclude_per_dir = xstrdup(exclude_per_dir);
}
void untracked_cache_invalidate_path(struct index_state *istate,
- const char *path)
+ const char *path, int safe_path)
{
if (!istate->untracked || !istate->untracked->root)
return;
+ if (!safe_path && !verify_path(path))
+ return;
invalidate_one_component(istate->untracked, istate->untracked->root,
path, strlen(path));
}
void untracked_cache_remove_from_index(struct index_state *istate,
const char *path)
{
- untracked_cache_invalidate_path(istate, path);
+ untracked_cache_invalidate_path(istate, path, 1);
}
void untracked_cache_add_to_index(struct index_state *istate,
const char *path)
{
- untracked_cache_invalidate_path(istate, path);
+ untracked_cache_invalidate_path(istate, path, 1);
}
/* Update gitfile and core.worktree setting to connect work tree and git dir */
struct exclude_list *el;
};
-struct sha1_stat {
+struct oid_stat {
struct stat_data stat;
- unsigned char sha1[20];
+ struct object_id oid;
int valid;
};
};
struct untracked_cache {
- struct sha1_stat ss_info_exclude;
- struct sha1_stat ss_excludes_file;
+ struct oid_stat ss_info_exclude;
+ struct oid_stat ss_excludes_file;
const char *exclude_per_dir;
struct strbuf ident;
/*
/* Enable untracked file cache if set */
struct untracked_cache *untracked;
- struct sha1_stat ss_info_exclude;
- struct sha1_stat ss_excludes_file;
+ struct oid_stat ss_info_exclude;
+ struct oid_stat ss_excludes_file;
unsigned unmanaged_exclude_files;
};
int cmp_dir_entry(const void *p1, const void *p2);
int check_dir_entry_contains(const struct dir_entry *out, const struct dir_entry *in);
-void untracked_cache_invalidate_path(struct index_state *, const char *);
+void untracked_cache_invalidate_path(struct index_state *, const char *, int safe_path);
void untracked_cache_remove_from_index(struct index_state *, const char *);
void untracked_cache_add_to_index(struct index_state *, const char *);
static void *read_blob_entry(const struct cache_entry *ce, unsigned long *size)
{
enum object_type type;
- void *new = read_sha1_file(ce->oid.hash, &type, size);
+ void *blob_data = read_sha1_file(ce->oid.hash, &type, size);
- if (new) {
+ if (blob_data) {
if (type == OBJ_BLOB)
- return new;
- free(new);
+ return blob_data;
+ free(blob_data);
}
return NULL;
}
unsigned int ce_mode_s_ifmt = ce->ce_mode & S_IFMT;
struct delayed_checkout *dco = state->delayed_checkout;
int fd, ret, fstat_done = 0;
- char *new;
+ char *new_blob;
struct strbuf buf = STRBUF_INIT;
unsigned long size;
ssize_t wrote;
switch (ce_mode_s_ifmt) {
case S_IFLNK:
- new = read_blob_entry(ce, &size);
- if (!new)
+ new_blob = read_blob_entry(ce, &size);
+ if (!new_blob)
return error("unable to read sha1 file of %s (%s)",
path, oid_to_hex(&ce->oid));
if (!has_symlinks || to_tempfile)
goto write_file_entry;
- ret = symlink(new, path);
- free(new);
+ ret = symlink(new_blob, path);
+ free(new_blob);
if (ret)
return error_errno("unable to create symlink %s", path);
break;
* bother reading it at all.
*/
if (dco && dco->state == CE_RETRY) {
- new = NULL;
+ new_blob = NULL;
size = 0;
} else {
- new = read_blob_entry(ce, &size);
- if (!new)
+ new_blob = read_blob_entry(ce, &size);
+ if (!new_blob)
return error("unable to read sha1 file of %s (%s)",
path, oid_to_hex(&ce->oid));
}
* Convert from git internal format to working tree format
*/
if (dco && dco->state != CE_NO_DELAY) {
- ret = async_convert_to_working_tree(ce->name, new,
+ ret = async_convert_to_working_tree(ce->name, new_blob,
size, &buf, dco);
if (ret && string_list_has_string(&dco->paths, ce->name)) {
- free(new);
+ free(new_blob);
goto delayed;
}
} else
- ret = convert_to_working_tree(ce->name, new, size, &buf);
+ ret = convert_to_working_tree(ce->name, new_blob, size, &buf);
if (ret) {
- free(new);
- new = strbuf_detach(&buf, &newsize);
+ free(new_blob);
+ new_blob = strbuf_detach(&buf, &newsize);
size = newsize;
}
/*
write_file_entry:
fd = open_output_fd(path, ce, to_tempfile);
if (fd < 0) {
- free(new);
+ free(new_blob);
return error_errno("unable to create file %s", path);
}
- wrote = write_in_full(fd, new, size);
+ wrote = write_in_full(fd, new_blob, size);
if (!to_tempfile)
fstat_done = fstat_output(fd, state, &st);
close(fd);
- free(new);
+ free(new_blob);
if (wrote < 0)
return error("unable to write file %s", path);
break;
int warn_on_object_refname_ambiguity = 1;
int ref_paranoia = -1;
int repository_format_precious_objects;
+char *repository_format_partial_clone;
+const char *core_partial_clone_filter_default;
const char *git_commit_encoding;
const char *git_log_output_encoding;
const char *apply_default_whitespace;
int check_replace_refs = 1;
char *git_replace_ref_base;
enum eol core_eol = EOL_UNSET;
-enum safe_crlf safe_crlf = SAFE_CRLF_WARN;
+int global_conv_flags_eol = CONV_EOL_RNDTRP_WARN;
unsigned whitespace_rule_cfg = WS_DEFAULT_RULE;
enum branch_track git_branch_track = BRANCH_TRACK_REMOTE;
enum rebase_setup_type autorebase = AUTOREBASE_NEVER;
/* This is set by setup_git_dir_gently() and/or git_default_config() */
char *git_work_tree_cfg;
-static char *namespace;
+static char *git_namespace;
static const char *super_prefix;
free(git_replace_ref_base);
git_replace_ref_base = xstrdup(replace_ref_base ? replace_ref_base
: "refs/replace/");
- free(namespace);
- namespace = expand_namespace(getenv(GIT_NAMESPACE_ENVIRONMENT));
+ free(git_namespace);
+ git_namespace = expand_namespace(getenv(GIT_NAMESPACE_ENVIRONMENT));
shallow_file = getenv(GIT_SHALLOW_FILE_ENVIRONMENT);
if (shallow_file)
set_alternate_shallow_file(shallow_file, 0);
const char *get_git_namespace(void)
{
- if (!namespace)
+ if (!git_namespace)
BUG("git environment hasn't been setup");
- return namespace;
+ return git_namespace;
}
const char *strip_namespace(const char *namespaced_ref)
return the_repository->objectdir;
}
-int odb_mkstemp(struct strbuf *template, const char *pattern)
+int odb_mkstemp(struct strbuf *temp_filename, const char *pattern)
{
int fd;
/*
* restrictive except to remove write permission.
*/
int mode = 0444;
- git_path_buf(template, "objects/%s", pattern);
- fd = git_mkstemp_mode(template->buf, mode);
+ git_path_buf(temp_filename, "objects/%s", pattern);
+ fd = git_mkstemp_mode(temp_filename->buf, mode);
if (0 <= fd)
return fd;
/* slow path */
- /* some mkstemp implementations erase template on failure */
- git_path_buf(template, "objects/%s", pattern);
- safe_create_leading_directories(template->buf);
- return xmkstemp_mode(template->buf, mode);
+ /* some mkstemp implementations erase temp_filename on failure */
+ git_path_buf(temp_filename, "objects/%s", pattern);
+ safe_create_leading_directories(temp_filename->buf);
+ return xmkstemp_mode(temp_filename->buf, mode);
}
int odb_pack_keep(const char *name)
/* The .pack file being generated */
static struct pack_idx_option pack_idx_opts;
static unsigned int pack_id;
-static struct sha1file *pack_file;
+static struct hashfile *pack_file;
static struct packed_git *pack_data;
static struct packed_git **all_packs;
static off_t pack_size;
p->pack_fd = pack_fd;
p->do_not_close = 1;
- pack_file = sha1fd(pack_fd, p->pack_name);
+ pack_file = hashfd(pack_fd, p->pack_name);
hdr.hdr_signature = htonl(PACK_SIGNATURE);
hdr.hdr_version = htonl(2);
hdr.hdr_entries = 0;
- sha1write(pack_file, &hdr, sizeof(hdr));
+ hashwrite(pack_file, &hdr, sizeof(hdr));
pack_data = p;
pack_size = sizeof(hdr);
struct tag *t;
close_pack_windows(pack_data);
- sha1close(pack_file, cur_pack_oid.hash, 0);
+ hashclose(pack_file, cur_pack_oid.hash, 0);
fixup_pack_header_footer(pack_data->pack_fd, pack_data->sha1,
pack_data->pack_name, object_count,
cur_pack_oid.hash, pack_size);
unsigned char hdr[96];
struct object_id oid;
unsigned long hdrlen, deltalen;
- git_SHA_CTX c;
+ git_hash_ctx c;
git_zstream s;
hdrlen = xsnprintf((char *)hdr, sizeof(hdr), "%s %lu",
- typename(type), (unsigned long)dat->len) + 1;
- git_SHA1_Init(&c);
- git_SHA1_Update(&c, hdr, hdrlen);
- git_SHA1_Update(&c, dat->buf, dat->len);
- git_SHA1_Final(oid.hash, &c);
+ type_name(type), (unsigned long)dat->len) + 1;
+ the_hash_algo->init_fn(&c);
+ the_hash_algo->update_fn(&c, hdr, hdrlen);
+ the_hash_algo->update_fn(&c, dat->buf, dat->len);
+ the_hash_algo->final_fn(oid.hash, &c);
if (oidout)
oidcpy(oidout, &oid);
return 1;
}
- if (last && last->data.buf && last->depth < max_depth && dat->len > 20) {
+ if (last && last->data.buf && last->depth < max_depth
+ && dat->len > the_hash_algo->rawsz) {
+
delta_count_attempts_by_type[type]++;
delta = diff_delta(last->data.buf, last->data.len,
dat->buf, dat->len,
- &deltalen, dat->len - 20);
+ &deltalen, dat->len - the_hash_algo->rawsz);
} else
delta = NULL;
hdrlen = encode_in_pack_object_header(hdr, sizeof(hdr),
OBJ_OFS_DELTA, deltalen);
- sha1write(pack_file, hdr, hdrlen);
+ hashwrite(pack_file, hdr, hdrlen);
pack_size += hdrlen;
hdr[pos] = ofs & 127;
while (ofs >>= 7)
hdr[--pos] = 128 | (--ofs & 127);
- sha1write(pack_file, hdr + pos, sizeof(hdr) - pos);
+ hashwrite(pack_file, hdr + pos, sizeof(hdr) - pos);
pack_size += sizeof(hdr) - pos;
} else {
e->depth = 0;
hdrlen = encode_in_pack_object_header(hdr, sizeof(hdr),
type, dat->len);
- sha1write(pack_file, hdr, hdrlen);
+ hashwrite(pack_file, hdr, hdrlen);
pack_size += hdrlen;
}
- sha1write(pack_file, out, s.total_out);
+ hashwrite(pack_file, out, s.total_out);
pack_size += s.total_out;
e->idx.crc32 = crc32_end(pack_file);
return 0;
}
-static void truncate_pack(struct sha1file_checkpoint *checkpoint)
+static void truncate_pack(struct hashfile_checkpoint *checkpoint)
{
- if (sha1file_truncate(pack_file, checkpoint))
+ if (hashfile_truncate(pack_file, checkpoint))
die_errno("cannot truncate pack to skip duplicate");
pack_size = checkpoint->offset;
}
struct object_id oid;
unsigned long hdrlen;
off_t offset;
- git_SHA_CTX c;
+ git_hash_ctx c;
git_zstream s;
- struct sha1file_checkpoint checkpoint;
+ struct hashfile_checkpoint checkpoint;
int status = Z_OK;
/* Determine if we should auto-checkpoint. */
|| (pack_size + 60 + len) < pack_size)
cycle_packfile();
- sha1file_checkpoint(pack_file, &checkpoint);
+ hashfile_checkpoint(pack_file, &checkpoint);
offset = checkpoint.offset;
hdrlen = xsnprintf((char *)out_buf, out_sz, "blob %" PRIuMAX, len) + 1;
- git_SHA1_Init(&c);
- git_SHA1_Update(&c, out_buf, hdrlen);
+ the_hash_algo->init_fn(&c);
+ the_hash_algo->update_fn(&c, out_buf, hdrlen);
crc32_begin(pack_file);
if (!n && feof(stdin))
die("EOF in data (%" PRIuMAX " bytes remaining)", len);
- git_SHA1_Update(&c, in_buf, n);
+ the_hash_algo->update_fn(&c, in_buf, n);
s.next_in = in_buf;
s.avail_in = n;
len -= n;
if (!s.avail_out || status == Z_STREAM_END) {
size_t n = s.next_out - out_buf;
- sha1write(pack_file, out_buf, n);
+ hashwrite(pack_file, out_buf, n);
pack_size += n;
s.next_out = out_buf;
s.avail_out = out_sz;
}
}
git_deflate_end(&s);
- git_SHA1_Final(oid.hash, &c);
+ the_hash_algo->final_fn(oid.hash, &c);
if (oidout)
oidcpy(oidout, &oid);
{
enum object_type type;
struct packed_git *p = all_packs[oe->pack_id];
- if (p == pack_data && p->pack_size < (pack_size + 20)) {
+ if (p == pack_data && p->pack_size < (pack_size + the_hash_algo->rawsz)) {
/* The object is stored in the packfile we are writing to
* and we have modified it since the last time we scanned
* back to read a previously written object. If an old
- * window covered [p->pack_size, p->pack_size + 20) its
+ * window covered [p->pack_size, p->pack_size + rawsz) its
* data is stale and is not valid. Closing all windows
* and updating the packfile length ensures we can read
* the newly written data.
*/
close_pack_windows(p);
- sha1flush(pack_file);
+ hashflush(pack_file);
- /* We have to offer 20 bytes additional on the end of
+ /* We have to offer rawsz bytes additional on the end of
* the packfile as the core unpacker code assumes the
* footer is present at the file end and must promise
- * at least 20 bytes within any window it maps. But
+ * at least rawsz bytes within any window it maps. But
* we don't actually create the footer here.
*/
- p->pack_size = pack_size + 20;
+ p->pack_size = pack_size + the_hash_algo->rawsz;
}
return unpack_entry(p, oe->idx.offset, &type, sizep);
}
unsigned char fanout, char *path)
{
unsigned int i = 0, j = 0;
- if (fanout >= 20)
+ if (fanout >= the_hash_algo->rawsz)
die("Too large fanout (%u)", fanout);
while (fanout) {
path[i++] = hex_sha1[j++];
path[i++] = '/';
fanout--;
}
- memcpy(path + i, hex_sha1 + j, GIT_SHA1_HEXSZ - j);
- path[i + GIT_SHA1_HEXSZ - j] = '\0';
+ memcpy(path + i, hex_sha1 + j, the_hash_algo->hexsz - j);
+ path[i + the_hash_algo->hexsz - j] = '\0';
}
static uintmax_t do_change_note_fanout(
else if (oe) {
if (oe->type != OBJ_COMMIT)
die("Not a commit (actually a %s): %s",
- typename(oe->type), command_buf.buf);
+ type_name(oe->type), command_buf.buf);
}
/*
* Accept the sha1 without checking; it expected to be in
command_buf.buf);
if (type != expected)
die("Not a %s (actually a %s): %s",
- typename(expected), typename(type),
+ type_name(expected), type_name(type),
command_buf.buf);
}
} else if (oe) {
if (oe->type != OBJ_BLOB)
die("Not a blob (actually a %s): %s",
- typename(oe->type), command_buf.buf);
+ type_name(oe->type), command_buf.buf);
} else if (!is_null_oid(&oid)) {
enum object_type type = sha1_object_info(oid.hash, NULL);
if (type < 0)
die("Blob not found: %s", command_buf.buf);
if (type != OBJ_BLOB)
die("Not a blob (actually a %s): %s",
- typename(type), command_buf.buf);
+ type_name(type), command_buf.buf);
}
construct_path_with_fanout(oid_to_hex(&commit_oid), *old_fanout, path);
"object %s\n"
"type %s\n"
"tag %s\n",
- oid_to_hex(&oid), typename(type), t->name);
+ oid_to_hex(&oid), type_name(type), t->name);
if (tagger)
strbuf_addf(&new_data,
"tagger %s\n", tagger);
die("Can't read object %s", oid_to_hex(oid));
if (type != OBJ_BLOB)
die("Object %s is a %s but a blob was expected.",
- oid_to_hex(oid), typename(type));
+ oid_to_hex(oid), type_name(type));
strbuf_reset(&line);
strbuf_addf(&line, "%s %s %lu\n", oid_to_hex(oid),
- typename(type), size);
+ type_name(type), size);
cat_blob_write(line.buf, line.len);
strbuf_release(&line);
cat_blob_write(buf, size);
--- /dev/null
+#include "cache.h"
+#include "packfile.h"
+#include "pkt-line.h"
+#include "strbuf.h"
+#include "transport.h"
+#include "fetch-object.h"
+
+static void fetch_refs(const char *remote_name, struct ref *ref)
+{
+ struct remote *remote;
+ struct transport *transport;
+ int original_fetch_if_missing = fetch_if_missing;
+
+ fetch_if_missing = 0;
+ remote = remote_get(remote_name);
+ if (!remote->url[0])
+ die(_("Remote with no URL"));
+ transport = transport_get(remote, remote->url[0]);
+
+ transport_set_option(transport, TRANS_OPT_FROM_PROMISOR, "1");
+ transport_set_option(transport, TRANS_OPT_NO_DEPENDENTS, "1");
+ transport_fetch_refs(transport, ref);
+ fetch_if_missing = original_fetch_if_missing;
+}
+
+void fetch_object(const char *remote_name, const unsigned char *sha1)
+{
+ struct ref *ref = alloc_ref(sha1_to_hex(sha1));
+ hashcpy(ref->old_oid.hash, sha1);
+ fetch_refs(remote_name, ref);
+}
+
+void fetch_objects(const char *remote_name, const struct oid_array *to_fetch)
+{
+ struct ref *ref = NULL;
+ int i;
+
+ for (i = 0; i < to_fetch->nr; i++) {
+ struct ref *new_ref = alloc_ref(oid_to_hex(&to_fetch->oid[i]));
+ oidcpy(&new_ref->old_oid, &to_fetch->oid[i]);
+ new_ref->next = ref;
+ ref = new_ref;
+ }
+ fetch_refs(remote_name, ref);
+}
--- /dev/null
+#ifndef FETCH_OBJECT_H
+#define FETCH_OBJECT_H
+
+#include "sha1-array.h"
+
+extern void fetch_object(const char *remote_name, const unsigned char *sha1);
+
+extern void fetch_objects(const char *remote_name,
+ const struct oid_array *to_fetch);
+
+#endif
static int fetch_fsck_objects = -1;
static int transfer_fsck_objects = -1;
static int agent_supported;
+static int server_supports_filtering;
static struct lock_file shallow_lock;
static const char *alternate_shallow_file;
char *line = packet_read_line(fd, &len);
const char *arg;
- if (!len)
- die(_("git fetch-pack: expected ACK/NAK, got EOF"));
+ if (!line)
+ die(_("git fetch-pack: expected ACK/NAK, got a flush packet"));
if (!strcmp(line, "NAK"))
return NAK;
if (skip_prefix(line, "ACK ", &arg)) {
if (deepen_not_ok) strbuf_addstr(&c, " deepen-not");
if (agent_supported) strbuf_addf(&c, " agent=%s",
git_user_agent_sanitized());
+ if (args->filter_options.choice)
+ strbuf_addstr(&c, " filter");
packet_buf_write(&req_buf, "want %s%s\n", remote_hex, c.buf);
strbuf_release(&c);
} else
packet_buf_write(&req_buf, "deepen-not %s", s->string);
}
}
+ if (server_supports_filtering && args->filter_options.choice)
+ packet_buf_write(&req_buf, "filter %s",
+ args->filter_options.filter_spec);
packet_buf_flush(&req_buf);
state_len = req_buf.len;
flushes = 0;
retval = -1;
+ if (args->no_dependents)
+ goto done;
while ((oid = get_rev())) {
packet_buf_write(&req_buf, "have %s\n", oid_to_hex(oid));
print_verbose(args, "have %s", oid_to_hex(oid));
{
struct ref *ref;
int retval;
+ int old_save_commit_buffer = save_commit_buffer;
timestamp_t cutoff = 0;
save_commit_buffer = 0;
}
}
- if (!args->deepen) {
- for_each_ref(mark_complete_oid, NULL);
- for_each_cached_alternate(mark_alternate_complete);
- commit_list_sort_by_date(&complete);
- if (cutoff)
- mark_recent_complete_commits(args, cutoff);
- }
+ if (!args->no_dependents) {
+ if (!args->deepen) {
+ for_each_ref(mark_complete_oid, NULL);
+ for_each_cached_alternate(mark_alternate_complete);
+ commit_list_sort_by_date(&complete);
+ if (cutoff)
+ mark_recent_complete_commits(args, cutoff);
+ }
- /*
- * Mark all complete remote refs as common refs.
- * Don't mark them common yet; the server has to be told so first.
- */
- for (ref = *refs; ref; ref = ref->next) {
- struct object *o = deref_tag(lookup_object(ref->old_oid.hash),
- NULL, 0);
+ /*
+ * Mark all complete remote refs as common refs.
+ * Don't mark them common yet; the server has to be told so first.
+ */
+ for (ref = *refs; ref; ref = ref->next) {
+ struct object *o = deref_tag(lookup_object(ref->old_oid.hash),
+ NULL, 0);
- if (!o || o->type != OBJ_COMMIT || !(o->flags & COMPLETE))
- continue;
+ if (!o || o->type != OBJ_COMMIT || !(o->flags & COMPLETE))
+ continue;
- if (!(o->flags & SEEN)) {
- rev_list_push((struct commit *)o, COMMON_REF | SEEN);
+ if (!(o->flags & SEEN)) {
+ rev_list_push((struct commit *)o, COMMON_REF | SEEN);
- mark_common((struct commit *)o, 1, 1);
+ mark_common((struct commit *)o, 1, 1);
+ }
}
}
print_verbose(args, _("already have %s (%s)"), oid_to_hex(remote),
ref->name);
}
+
+ save_commit_buffer = old_save_commit_buffer;
+
return retval;
}
argv_array_push(&cmd.args, alternate_shallow_file);
}
- if (do_keep) {
+ if (do_keep || args->from_promisor) {
if (pack_lockfile)
cmd.out = -1;
cmd_name = "index-pack";
argv_array_push(&cmd.args, "-v");
if (args->use_thin_pack)
argv_array_push(&cmd.args, "--fix-thin");
- if (args->lock_pack || unpack_limit) {
+ if (do_keep && (args->lock_pack || unpack_limit)) {
char hostname[HOST_NAME_MAX + 1];
if (xgethostname(hostname, sizeof(hostname)))
xsnprintf(hostname, sizeof(hostname), "localhost");
}
if (args->check_self_contained_and_connected)
argv_array_push(&cmd.args, "--check-self-contained-and-connected");
+ if (args->from_promisor)
+ argv_array_push(&cmd.args, "--promisor");
}
else {
cmd_name = "unpack-objects";
? fetch_fsck_objects
: transfer_fsck_objects >= 0
? transfer_fsck_objects
- : 0)
- argv_array_push(&cmd.args, "--strict");
+ : 0) {
+ if (args->from_promisor)
+ /*
+ * We cannot use --strict in index-pack because it
+ * checks both broken objects and links, but we only
+ * want to check for broken objects.
+ */
+ argv_array_push(&cmd.args, "--fsck-objects");
+ else
+ argv_array_push(&cmd.args, "--strict");
+ }
cmd.in = demux.out;
cmd.git_cmd = 1;
else
prefer_ofs_delta = 0;
+ if (server_supports("filter")) {
+ server_supports_filtering = 1;
+ print_verbose(args, _("Server supports filter"));
+ } else if (args->filter_options.choice) {
+ warning("filtering not recognized by server, ignoring");
+ }
+
if ((agent_feature = server_feature_value("agent", &agent_len))) {
agent_supported = 1;
if (agent_len)
#include "string-list.h"
#include "run-command.h"
+#include "list-objects-filter-options.h"
struct oid_array;
int depth;
const char *deepen_since;
const struct string_list *deepen_not;
+ struct list_objects_filter_options filter_options;
unsigned deepen_relative:1;
unsigned quiet:1;
unsigned keep_pack:1;
unsigned cloning:1;
unsigned update_shallow:1;
unsigned deepen:1;
+ unsigned from_promisor:1;
+
+ /*
+ * If 1, fetch_pack() will also not modify any object flags.
+ * This allows fetch_pack() to safely be called by any function,
+ * regardless of which object flags it uses (if any).
+ */
+ unsigned no_dependents:1;
};
/*
ret = report(options, &tag->object,
FSCK_MSG_TAG_OBJECT_NOT_TAG,
"expected tag got %s",
- typename(type));
+ type_name(type));
goto done;
}
}
* as it could be a new untracked file.
*/
trace_printf_key(&trace_fsmonitor, "fsmonitor_refresh_callback '%s'", name);
- untracked_cache_invalidate_path(istate, name);
+ untracked_cache_invalidate_path(istate, name, 0);
}
void refresh_fsmonitor(struct index_state *istate)
{
if (core_fsmonitor) {
ce->ce_flags &= ~CE_FSMONITOR_VALID;
- untracked_cache_invalidate_path(istate, ce->name);
+ untracked_cache_invalidate_path(istate, ce->name, 1);
trace_printf_key(&trace_fsmonitor, "mark_fsmonitor_invalid '%s'", ce->name);
}
}
sub run_git_apply {
my $cmd = shift;
my $fh;
- open $fh, '| git ' . $cmd . " --recount --allow-overlap";
+ open $fh, '| git ' . $cmd . " --allow-overlap";
print $fh @_;
return close $fh;
}
}
my (@hunk) = { TEXT => [], DISPLAY => [], TYPE => 'header' };
+ if (@colored && @colored != @diff) {
+ print STDERR
+ "fatal: mismatched output from interactive.diffFilter\n",
+ "hint: Your filter must maintain a one-to-one correspondence\n",
+ "hint: between its input and output lines.\n";
+ exit 1;
+ }
+
for (my $i = 0; $i < @diff; $i++) {
if ($diff[$i] =~ /^@@ /) {
push @hunk, { TEXT => [], DISPLAY => [],
return ($o_ofs, $o_cnt, $n_ofs, $n_cnt);
}
+sub format_hunk_header {
+ my ($o_ofs, $o_cnt, $n_ofs, $n_cnt) = @_;
+ return ("@@ -$o_ofs" .
+ (($o_cnt != 1) ? ",$o_cnt" : '') .
+ " +$n_ofs" .
+ (($n_cnt != 1) ? ",$n_cnt" : '') .
+ " @@\n");
+}
+
sub split_hunk {
my ($text, $display) = @_;
my @split = ();
while (++$i < @$text) {
my $line = $text->[$i];
my $display = $display->[$i];
+ if ($line =~ /^\\/) {
+ push @{$this->{TEXT}}, $line;
+ push @{$this->{DISPLAY}}, $display;
+ next;
+ }
if ($line =~ /^ /) {
if ($this->{ADDDEL} &&
!defined $next_hunk_start) {
my $o_cnt = $hunk->{OCNT};
my $n_cnt = $hunk->{NCNT};
- my $head = ("@@ -$o_ofs" .
- (($o_cnt != 1) ? ",$o_cnt" : '') .
- " +$n_ofs" .
- (($n_cnt != 1) ? ",$n_cnt" : '') .
- " @@\n");
+ my $head = format_hunk_header($o_ofs, $o_cnt, $n_ofs, $n_cnt);
my $display_head = $head;
unshift @{$hunk->{TEXT}}, $head;
if ($diff_use_color) {
$n_cnt++;
push @line, $line;
next;
+ } elsif ($line =~ /^\\/) {
+ push @line, $line;
+ next;
}
last if ($o1_ofs <= $ofs);
$n_cnt++;
push @line, $line;
next;
+ } elsif ($line =~ /^\\/) {
+ push @line, $line;
+ next;
}
$ofs++;
$o_cnt++;
}
push @line, $line;
}
- my $head = ("@@ -$o0_ofs" .
- (($o_cnt != 1) ? ",$o_cnt" : '') .
- " +$n0_ofs" .
- (($n_cnt != 1) ? ",$n_cnt" : '') .
- " @@\n");
+ my $head = format_hunk_header($o0_ofs, $o_cnt, $n0_ofs, $n_cnt);
@{$prev->{TEXT}} = ($head, @line);
}
my @out = ();
my ($last_o_ctx, $last_was_dirty);
+ my $ofs_delta = 0;
- for (grep { $_->{USE} } @in) {
+ for (@in) {
if ($_->{TYPE} ne 'hunk') {
push @out, $_;
next;
}
my $text = $_->{TEXT};
- my ($o_ofs) = parse_hunk_header($text->[0]);
+ my ($o_ofs, $o_cnt, $n_ofs, $n_cnt) =
+ parse_hunk_header($text->[0]);
+ unless ($_->{USE}) {
+ $ofs_delta += $o_cnt - $n_cnt;
+ # If this hunk has been edited then subtract
+ # the delta that is due to the edit.
+ if ($_->{OFS_DELTA}) {
+ $ofs_delta -= $_->{OFS_DELTA};
+ }
+ next;
+ }
+ if ($ofs_delta) {
+ $n_ofs += $ofs_delta;
+ $_->{TEXT}->[0] = format_hunk_header($o_ofs, $o_cnt,
+ $n_ofs, $n_cnt);
+ }
+ # If this hunk was edited then adjust the offset delta
+ # to reflect the edit.
+ if ($_->{OFS_DELTA}) {
+ $ofs_delta += $_->{OFS_DELTA};
+ }
if (defined $last_o_ctx &&
$o_ofs <= $last_o_ctx &&
!$_->{DIRTY} &&
marked for applying."),
);
+sub recount_edited_hunk {
+ local $_;
+ my ($oldtext, $newtext) = @_;
+ my ($o_cnt, $n_cnt) = (0, 0);
+ for (@{$newtext}[1..$#{$newtext}]) {
+ my $mode = substr($_, 0, 1);
+ if ($mode eq '-') {
+ $o_cnt++;
+ } elsif ($mode eq '+') {
+ $n_cnt++;
+ } elsif ($mode eq ' ') {
+ $o_cnt++;
+ $n_cnt++;
+ }
+ }
+ my ($o_ofs, undef, $n_ofs, undef) =
+ parse_hunk_header($newtext->[0]);
+ $newtext->[0] = format_hunk_header($o_ofs, $o_cnt, $n_ofs, $n_cnt);
+ my (undef, $orig_o_cnt, undef, $orig_n_cnt) =
+ parse_hunk_header($oldtext->[0]);
+ # Return the change in the number of lines inserted by this hunk
+ return $orig_o_cnt - $orig_n_cnt - $o_cnt + $n_cnt;
+}
+
sub edit_hunk_manually {
my ($oldtext) = @_;
}
sub edit_hunk_loop {
- my ($head, $hunk, $ix) = @_;
- my $text = $hunk->[$ix]->{TEXT};
+ my ($head, $hunks, $ix) = @_;
+ my $hunk = $hunks->[$ix];
+ my $text = $hunk->{TEXT};
while (1) {
- $text = edit_hunk_manually($text);
- if (!defined $text) {
+ my $newtext = edit_hunk_manually($text);
+ if (!defined $newtext) {
return undef;
}
my $newhunk = {
- TEXT => $text,
- TYPE => $hunk->[$ix]->{TYPE},
+ TEXT => $newtext,
+ TYPE => $hunk->{TYPE},
USE => 1,
DIRTY => 1,
};
+ $newhunk->{OFS_DELTA} = recount_edited_hunk($text, $newtext);
+ # If this hunk has already been edited then add the
+ # offset delta of the previous edit to get the real
+ # delta from the original unedited hunk.
+ $hunk->{OFS_DELTA} and
+ $newhunk->{OFS_DELTA} += $hunk->{OFS_DELTA};
if (diff_applies($head,
- @{$hunk}[0..$ix-1],
+ @{$hunks}[0..$ix-1],
$newhunk,
- @{$hunk}[$ix+1..$#{$hunk}])) {
- $newhunk->{DISPLAY} = [color_diff(@{$text})];
+ @{$hunks}[$ix+1..$#{$hunks}])) {
+ $newhunk->{DISPLAY} = [color_diff(@{$newtext})];
return $newhunk;
}
else {
);
sub help_patch_cmd {
- print colored $help_color, __($help_patch_modes{$patch_mode}), "\n", __ <<EOF ;
+ local $_;
+ my $other = $_[0] . ",?";
+ print colored $help_color, __($help_patch_modes{$patch_mode}), "\n",
+ map { "$_\n" } grep {
+ my $c = quotemeta(substr($_, 0, 1));
+ $other =~ /,$c/
+ } split "\n", __ <<EOF ;
g - select a hunk to go to
/ - search for a hunk matching the given regex
j - leave this hunk undecided, see next undecided hunk
my %patch_update_prompt_modes = (
stage => {
- mode => N__("Stage mode change [y,n,q,a,d,/%s,?]? "),
- deletion => N__("Stage deletion [y,n,q,a,d,/%s,?]? "),
- hunk => N__("Stage this hunk [y,n,q,a,d,/%s,?]? "),
+ mode => N__("Stage mode change [y,n,q,a,d%s,?]? "),
+ deletion => N__("Stage deletion [y,n,q,a,d%s,?]? "),
+ hunk => N__("Stage this hunk [y,n,q,a,d%s,?]? "),
},
stash => {
- mode => N__("Stash mode change [y,n,q,a,d,/%s,?]? "),
- deletion => N__("Stash deletion [y,n,q,a,d,/%s,?]? "),
- hunk => N__("Stash this hunk [y,n,q,a,d,/%s,?]? "),
+ mode => N__("Stash mode change [y,n,q,a,d%s,?]? "),
+ deletion => N__("Stash deletion [y,n,q,a,d%s,?]? "),
+ hunk => N__("Stash this hunk [y,n,q,a,d%s,?]? "),
},
reset_head => {
- mode => N__("Unstage mode change [y,n,q,a,d,/%s,?]? "),
- deletion => N__("Unstage deletion [y,n,q,a,d,/%s,?]? "),
- hunk => N__("Unstage this hunk [y,n,q,a,d,/%s,?]? "),
+ mode => N__("Unstage mode change [y,n,q,a,d%s,?]? "),
+ deletion => N__("Unstage deletion [y,n,q,a,d%s,?]? "),
+ hunk => N__("Unstage this hunk [y,n,q,a,d%s,?]? "),
},
reset_nothead => {
- mode => N__("Apply mode change to index [y,n,q,a,d,/%s,?]? "),
- deletion => N__("Apply deletion to index [y,n,q,a,d,/%s,?]? "),
- hunk => N__("Apply this hunk to index [y,n,q,a,d,/%s,?]? "),
+ mode => N__("Apply mode change to index [y,n,q,a,d%s,?]? "),
+ deletion => N__("Apply deletion to index [y,n,q,a,d%s,?]? "),
+ hunk => N__("Apply this hunk to index [y,n,q,a,d%s,?]? "),
},
checkout_index => {
- mode => N__("Discard mode change from worktree [y,n,q,a,d,/%s,?]? "),
- deletion => N__("Discard deletion from worktree [y,n,q,a,d,/%s,?]? "),
- hunk => N__("Discard this hunk from worktree [y,n,q,a,d,/%s,?]? "),
+ mode => N__("Discard mode change from worktree [y,n,q,a,d%s,?]? "),
+ deletion => N__("Discard deletion from worktree [y,n,q,a,d%s,?]? "),
+ hunk => N__("Discard this hunk from worktree [y,n,q,a,d%s,?]? "),
},
checkout_head => {
- mode => N__("Discard mode change from index and worktree [y,n,q,a,d,/%s,?]? "),
- deletion => N__("Discard deletion from index and worktree [y,n,q,a,d,/%s,?]? "),
- hunk => N__("Discard this hunk from index and worktree [y,n,q,a,d,/%s,?]? "),
+ mode => N__("Discard mode change from index and worktree [y,n,q,a,d%s,?]? "),
+ deletion => N__("Discard deletion from index and worktree [y,n,q,a,d%s,?]? "),
+ hunk => N__("Discard this hunk from index and worktree [y,n,q,a,d%s,?]? "),
},
checkout_nothead => {
- mode => N__("Apply mode change to index and worktree [y,n,q,a,d,/%s,?]? "),
- deletion => N__("Apply deletion to index and worktree [y,n,q,a,d,/%s,?]? "),
- hunk => N__("Apply this hunk to index and worktree [y,n,q,a,d,/%s,?]? "),
+ mode => N__("Apply mode change to index and worktree [y,n,q,a,d%s,?]? "),
+ deletion => N__("Apply deletion to index and worktree [y,n,q,a,d%s,?]? "),
+ hunk => N__("Apply this hunk to index and worktree [y,n,q,a,d%s,?]? "),
},
);
$other .= ',J';
}
if ($num > 1) {
- $other .= ',g';
+ $other .= ',g,/';
}
for ($i = 0; $i < $num; $i++) {
if (!defined $hunk[$i]{USE}) {
}
next;
}
- elsif ($other =~ /g/ && $line =~ /^g(.*)/) {
+ elsif ($line =~ /^g(.*)/) {
my $response = $1;
+ unless ($other =~ /g/) {
+ error_msg __("No other hunks to goto\n");
+ next;
+ }
my $no = $ix > 10 ? $ix - 10 : 0;
while ($response eq '') {
$no = display_hunks(\@hunk, $no);
}
elsif ($line =~ m|^/(.*)|) {
my $regex = $1;
+ unless ($other =~ m|/|) {
+ error_msg __("No other hunks to search\n");
+ next;
+ }
if ($1 eq "") {
print colored $prompt_color, __("search for regex? ");
$regex = <STDIN>;
next;
}
}
- elsif ($other =~ /s/ && $line =~ /^s/) {
+ elsif ($line =~ /^s/) {
+ unless ($other =~ /s/) {
+ error_msg __("Sorry, cannot split this hunk\n");
+ next;
+ }
my @split = split_hunk($hunk[$ix]{TEXT}, $hunk[$ix]{DISPLAY});
if (1 < @split) {
print colored $header_color, sprintf(
$num = scalar @hunk;
next;
}
- elsif ($other =~ /e/ && $line =~ /^e/) {
+ elsif ($line =~ /^e/) {
+ unless ($other =~ /e/) {
+ error_msg __("Sorry, cannot edit this hunk\n");
+ next;
+ }
my $newhunk = edit_hunk_loop($head, \@hunk, $ix);
if (defined $newhunk) {
splice @hunk, $ix, 1, $newhunk;
extern int xdup(int fd);
extern FILE *xfopen(const char *path, const char *mode);
extern FILE *xfdopen(int fd, const char *mode);
-extern int xmkstemp(char *template);
-extern int xmkstemp_mode(char *template, int mode);
+extern int xmkstemp(char *temp_filename);
+extern int xmkstemp_mode(char *temp_filename, int mode);
extern char *xgetcwd(void);
extern FILE *fopen_for_writing(const char *path);
extern FILE *fopen_or_warn(const char *path, const char *mode);
my ($d) = @_;
m#(\d{2,4})/(\d\d)/(\d\d)\s(\d\d):(\d\d)(?::(\d\d))?#
or die "Unparseable date: $d\n";
- my $y=$1; $y-=1900 if $y>1900;
+ my $y=$1;
+ $y+=100 if $y<70;
+ $y+=1900 if $y<1000;
return timegm($6||0,$5,$4,$3,$2-1,$y);
}
move_to_original_branch
return
;;
+show-current-patch)
+ exec git am --show-current-patch
+ ;;
esac
if test -z "$rebase_root"
# makes this easy
git cherry-pick ${gpg_sign_opt:+"$gpg_sign_opt"} --allow-empty \
$allow_rerere_autoupdate --right-only "$revisions" \
+ $allow_empty_message \
${restrict_revision+^$restrict_revision}
ret=$?
else
die_with_patch () {
echo "$1" > "$state_dir"/stopped-sha
+ git update-ref REBASE_HEAD "$1"
make_patch "$1"
die "$2"
}
exit_with_patch () {
echo "$1" > "$state_dir"/stopped-sha
+ git update-ref REBASE_HEAD "$1"
make_patch $1
git rev-parse --verify HEAD > "$amend"
gpg_sign_opt_quoted=${gpg_sign_opt:+$(git rev-parse --sq-quote "$gpg_sign_opt")}
test -d "$rewritten" &&
pick_one_preserving_merges "$@" && return
- output eval git cherry-pick $allow_rerere_autoupdate \
+ output eval git cherry-pick $allow_rerere_autoupdate $allow_empty_message \
${gpg_sign_opt:+$(git rev-parse --sq-quote "$gpg_sign_opt")} \
"$strategy_args" $empty_args $ff "$@"
--sq-quote "$gpg_sign_opt")} \
$allow_rerere_autoupdate "$merge_args" \
"$strategy_args" \
- -m $(git rev-parse --sq-quote "$msg_content") \
+ -m "$(git rev-parse --sq-quote "$msg_content")" \
"$new_parents"
then
printf "%s\n" "$msg_content" > "$GIT_DIR"/MERGE_MSG
;;
*)
output eval git cherry-pick $allow_rerere_autoupdate \
+ $allow_empty_message \
${gpg_sign_opt:+$(git rev-parse --sq-quote "$gpg_sign_opt")} \
"$strategy_args" "$@" ||
die_with_patch $sha1 "$(eval_gettext "Could not pick \$sha1")"
mark_action_done
do_pick $sha1 "$rest"
- git commit --amend --no-post-rewrite ${gpg_sign_opt:+"$gpg_sign_opt"} || {
+ git commit --amend --no-post-rewrite ${gpg_sign_opt:+"$gpg_sign_opt"} \
+ $allow_empty_message || {
warn "$(eval_gettext "\
Could not amend commit after successfully picking \$sha1... \$rest
This is most likely due to an empty commit message, or the pre-commit hook
# This is an intermediate commit; its message will only be
# used in case of trouble. So use the long version:
do_with_author output git commit --amend --no-verify -F "$squash_msg" \
- ${gpg_sign_opt:+"$gpg_sign_opt"} ||
+ ${gpg_sign_opt:+"$gpg_sign_opt"} $allow_empty_message ||
die_failed_squash $sha1 "$rest"
;;
*)
if test -f "$fixup_msg"
then
do_with_author git commit --amend --no-verify -F "$fixup_msg" \
- ${gpg_sign_opt:+"$gpg_sign_opt"} ||
+ ${gpg_sign_opt:+"$gpg_sign_opt"} $allow_empty_message ||
die_failed_squash $sha1 "$rest"
else
cp "$squash_msg" "$GIT_DIR"/SQUASH_MSG || exit
rm -f "$GIT_DIR"/MERGE_MSG
do_with_author git commit --amend --no-verify -F "$GIT_DIR"/SQUASH_MSG -e \
- ${gpg_sign_opt:+"$gpg_sign_opt"} ||
+ ${gpg_sign_opt:+"$gpg_sign_opt"} $allow_empty_message ||
die_failed_squash $sha1 "$rest"
fi
rm -f "$squash_msg" "$fixup_msg"
continue)
if test ! -d "$rewritten"
then
- exec git rebase--helper ${force_rebase:+--no-ff} --continue
+ exec git rebase--helper ${force_rebase:+--no-ff} $allow_empty_message \
+ --continue
fi
# do we have anything to commit?
if git diff-index --cached --quiet HEAD --
You have uncommitted changes in your working tree. Please commit them
first and then run 'git rebase --continue' again.")"
do_with_author git commit --amend --no-verify -F "$msg" -e \
- ${gpg_sign_opt:+"$gpg_sign_opt"} ||
+ ${gpg_sign_opt:+"$gpg_sign_opt"} $allow_empty_message ||
die "$(gettext "Could not commit staged changes.")"
else
do_with_author git commit --no-verify -F "$msg" -e \
- ${gpg_sign_opt:+"$gpg_sign_opt"} ||
+ ${gpg_sign_opt:+"$gpg_sign_opt"} $allow_empty_message ||
die "$(gettext "Could not commit staged changes.")"
fi
fi
if test ! -d "$rewritten"
then
- exec git rebase--helper ${force_rebase:+--no-ff} --continue
+ exec git rebase--helper ${force_rebase:+--no-ff} $allow_empty_message \
+ --continue
fi
do_rest
return 0
exit
;;
+show-current-patch)
+ exec git show REBASE_HEAD --
+ ;;
esac
comment_for_reflog start
orig_head=$(git rev-parse --verify HEAD) || die "$(gettext "No HEAD?")"
mkdir -p "$state_dir" || die "$(eval_gettext "Could not create temporary \$state_dir")"
+rm -f "$(git rev-parse --git-path REBASE_HEAD)"
: > "$state_dir"/interactive || die "$(gettext "Could not mark as interactive")"
write_basic_state
if test -z "$rebase_root" && test ! -d "$rewritten"
then
require_clean_work_tree "rebase"
- exec git rebase--helper ${force_rebase:+--no-ff} --continue
+ exec git rebase--helper ${force_rebase:+--no-ff} $allow_empty_message \
+ --continue
fi
do_rest
cmt=$(cat "$state_dir/current")
if ! git diff-index --quiet --ignore-submodules HEAD --
then
- if ! git commit ${gpg_sign_opt:+"$gpg_sign_opt"} --no-verify -C "$cmt"
+ if ! git commit ${gpg_sign_opt:+"$gpg_sign_opt"} $allow_empty_message \
+ --no-verify -C "$cmt"
then
echo "Commit failed, please do not call \"git commit\""
echo "directly, but instead do one of the following: "
echo "$msgnum" >"$state_dir/msgnum"
cmt="$(cat "$state_dir/cmt.$msgnum")"
echo "$cmt" > "$state_dir/current"
+ git update-ref REBASE_HEAD "$cmt"
hd=$(git rev-parse --verify HEAD)
cmt_name=$(git symbolic-ref HEAD 2> /dev/null || echo HEAD)
eval GITHEAD_$cmt='"${cmt_name##refs/heads/}~$(($end - $msgnum))"'
finish_rb_merge
return
;;
+show-current-patch)
+ exec git show REBASE_HEAD --
+ ;;
esac
mkdir -p "$state_dir"
echo "$onto_name" > "$state_dir/onto_name"
write_basic_state
+rm -f "$(git rev-parse --git-path REBASE_HEAD)"
msgnum=0
for cmt in $(git rev-list --reverse --no-merges "$revisions")
i,interactive! let the user edit the list of commits to rebase
x,exec=! add exec lines after each commit of the editable list
k,keep-empty preserve empty commits during rebase
+allow-empty-message allow rebasing commits with empty messages
f,force-rebase! force rebase even if branch is up to date
X,strategy-option=! pass the argument through to the merge strategy
stat! display a diffstat of what changed upstream
skip! skip current patch and continue
edit-todo! edit the todo list during an interactive rebase
quit! abort but keep HEAD where it is
+show-current-patch! show the patch file being applied or merged
"
. git-sh-setup
set_reflog_action rebase
preserve_merges=
autosquash=
keep_empty=
+allow_empty_message=
test "$(git config --bool rebase.autosquash)" = "true" && autosquash=t
case "$(git config --bool commit.gpgsign)" in
true) gpg_sign_opt=-S ;;
}
finish_rebase () {
+ rm -f "$(git rev-parse --git-path REBASE_HEAD)"
apply_autostash &&
{ git gc --auto || true; } &&
rm -rf "$state_dir"
--verify)
ok_to_skip_pre_rebase=
;;
- --continue|--skip|--abort|--quit|--edit-todo)
+ --continue|--skip|--abort|--quit|--edit-todo|--show-current-patch)
test $total_argc -eq 2 || usage
action=${1##--}
;;
--keep-empty)
keep_empty=yes
;;
+ --allow-empty-message)
+ allow_empty_message=--allow-empty-message
+ ;;
--preserve-merges)
preserve_merges=t
test -z "$interactive_rebase" && interactive_rebase=implied
edit-todo)
run_specific_rebase
;;
+show-current-patch)
+ run_specific_rebase
+ die "BUG: run_specific_rebase is not supposed to return here"
+ ;;
esac
# Make sure no rebase is in progress
use Term::ANSIColor;
use File::Temp qw/ tempdir tempfile /;
use File::Spec::Functions qw(catdir catfile);
-use Error qw(:try);
+use Git::LoadCPAN::Error qw(:try);
use Cwd qw(abs_path cwd);
use Git;
use Git::I18N;
+use Net::Domain ();
+use Net::SMTP ();
+use Git::LoadCPAN::Mail::Address;
Getopt::Long::Configure qw/ pass_through /;
--[no-]cc <str> * Email Cc:
--[no-]bcc <str> * Email Bcc:
--subject <str> * Email "Subject:"
+ --reply-to <str> * Email "Reply-To:"
--in-reply-to <str> * Email "In-Reply-To:"
--[no-]xmailer * Add "X-Mailer:" header (default).
--[no-]annotate * Review each patch that will be sent in an editor.
# Variables we fill in automatically, or via prompting:
my (@to,$no_to,@initial_to,@cc,$no_cc,@initial_cc,@bcclist,$no_bcc,@xh,
- $initial_reply_to,$initial_subject,@files,
+ $initial_in_reply_to,$reply_to,$initial_subject,@files,
$author,$sender,$smtp_authpass,$annotate,$use_xmailer,$compose,$time);
my $envelope_sender;
# Example reply to:
-#$initial_reply_to = ''; #<20050203173208.GA23964@foobar.com>';
+#$initial_in_reply_to = ''; #<20050203173208.GA23964@foobar.com>';
my $repo = eval { Git->repository() };
my @repo = $repo ? ($repo) : ();
if !$help and $dump_aliases and @ARGV;
$rc = GetOptions(
"sender|from=s" => \$sender,
- "in-reply-to=s" => \$initial_reply_to,
+ "in-reply-to=s" => \$initial_in_reply_to,
+ "reply-to=s" => \$reply_to,
"subject=s" => \$initial_subject,
"to=s" => \@initial_to,
"to-cmd=s" => \$to_cmd,
die __("Cannot run git format-patch from outside a repository\n")
if $format_patch and not $repo;
+die __("`batch-size` and `relogin` must be specified together " .
+ "(via command-line or configuration option)\n")
+ if defined $relogin_delay and not defined $batch_size;
+
# Now, let's fill any that aren't set in with defaults:
sub read_config {
($repocommitter) = Git::ident_person(@repo, 'committer');
sub parse_address_line {
- return Git::parse_mailboxes($_[0]);
+ return map { $_->format } Mail::Address->parse($_[0]);
}
sub split_addrs {
my $tpl_sender = $sender || $repoauthor || $repocommitter || '';
my $tpl_subject = $initial_subject || '';
- my $tpl_reply_to = $initial_reply_to || '';
+ my $tpl_in_reply_to = $initial_in_reply_to || '';
+ my $tpl_reply_to = $reply_to || '';
print $c <<EOT1, Git::prefix_lines("GIT: ", __ <<EOT2), <<EOT3;
From $tpl_sender # This line is ignored.
Clear the body content if you don't wish to send a summary.
EOT2
From: $tpl_sender
+Reply-To: $tpl_reply_to
Subject: $tpl_subject
-In-Reply-To: $tpl_reply_to
+In-Reply-To: $tpl_in_reply_to
EOT3
for my $f (@files) {
do_edit($compose_filename);
}
- open my $c2, ">", $compose_filename . ".final"
- or die sprintf(__("Failed to open %s.final: %s"), $compose_filename, $!);
-
open $c, "<", $compose_filename
or die sprintf(__("Failed to open %s: %s"), $compose_filename, $!);
- my $need_8bit_cte = file_has_nonascii($compose_filename);
- my $in_body = 0;
- my $summary_empty = 1;
if (!defined $compose_encoding) {
$compose_encoding = "UTF-8";
}
- while(<$c>) {
- next if m/^GIT:/;
- if ($in_body) {
- $summary_empty = 0 unless (/^\n$/);
- } elsif (/^\n$/) {
- $in_body = 1;
- if ($need_8bit_cte) {
- print $c2 "MIME-Version: 1.0\n",
- "Content-Type: text/plain; ",
- "charset=$compose_encoding\n",
- "Content-Transfer-Encoding: 8bit\n";
- }
- } elsif (/^MIME-Version:/i) {
- $need_8bit_cte = 0;
- } elsif (/^Subject:\s*(.+)\s*$/i) {
- $initial_subject = $1;
- my $subject = $initial_subject;
- $_ = "Subject: " .
- quote_subject($subject, $compose_encoding) .
- "\n";
- } elsif (/^In-Reply-To:\s*(.+)\s*$/i) {
- $initial_reply_to = $1;
- next;
- } elsif (/^From:\s*(.+)\s*$/i) {
- $sender = $1;
- next;
- } elsif (/^(?:To|Cc|Bcc):/i) {
- print __("To/Cc/Bcc fields are not interpreted yet, they have been ignored\n");
- next;
+
+ my %parsed_email;
+ while (my $line = <$c>) {
+ next if $line =~ m/^GIT:/;
+ parse_header_line($line, \%parsed_email);
+ if ($line =~ /^$/) {
+ $parsed_email{'body'} = filter_body($c);
}
- print $c2 $_;
}
close $c;
- close $c2;
- if ($summary_empty) {
+ open my $c2, ">", $compose_filename . ".final"
+ or die sprintf(__("Failed to open %s.final: %s"), $compose_filename, $!);
+
+
+ if ($parsed_email{'From'}) {
+ $sender = delete($parsed_email{'From'});
+ }
+ if ($parsed_email{'In-Reply-To'}) {
+ $initial_in_reply_to = delete($parsed_email{'In-Reply-To'});
+ }
+ if ($parsed_email{'Reply-To'}) {
+ $reply_to = delete($parsed_email{'Reply-To'});
+ }
+ if ($parsed_email{'Subject'}) {
+ $initial_subject = delete($parsed_email{'Subject'});
+ print $c2 "Subject: " .
+ quote_subject($initial_subject, $compose_encoding) .
+ "\n";
+ }
+
+ if ($parsed_email{'MIME-Version'}) {
+ print $c2 "MIME-Version: $parsed_email{'MIME-Version'}\n",
+ "Content-Type: $parsed_email{'Content-Type'};\n",
+ "Content-Transfer-Encoding: $parsed_email{'Content-Transfer-Encoding'}\n";
+ delete($parsed_email{'MIME-Version'});
+ delete($parsed_email{'Content-Type'});
+ delete($parsed_email{'Content-Transfer-Encoding'});
+ } elsif (file_has_nonascii($compose_filename)) {
+ my $content_type = (delete($parsed_email{'Content-Type'}) or
+ "text/plain; charset=$compose_encoding");
+ print $c2 "MIME-Version: 1.0\n",
+ "Content-Type: $content_type\n",
+ "Content-Transfer-Encoding: 8bit\n";
+ }
+ # Preserve unknown headers
+ foreach my $key (keys %parsed_email) {
+ next if $key eq 'body';
+ print $c2 "$key: $parsed_email{$key}";
+ }
+
+ if ($parsed_email{'body'}) {
+ print $c2 "\n$parsed_email{'body'}\n";
+ delete($parsed_email{'body'});
+ } else {
print __("Summary email is empty, skipping it\n");
$compose = -1;
}
+
+ close $c2;
+
} elsif ($annotate) {
do_edit(@files);
}
return;
}
+sub parse_header_line {
+ my $lines = shift;
+ my $parsed_line = shift;
+ my $addr_pat = join "|", qw(To Cc Bcc);
+
+ foreach (split(/\n/, $lines)) {
+ if (/^($addr_pat):\s*(.+)$/i) {
+ $parsed_line->{$1} = [ parse_address_line($2) ];
+ } elsif (/^([^:]*):\s*(.+)\s*$/i) {
+ $parsed_line->{$1} = $2;
+ }
+ }
+}
+
+sub filter_body {
+ my $c = shift;
+ my $body = "";
+ while (my $body_line = <$c>) {
+ if ($body_line !~ m/^GIT:/) {
+ $body .= $body_line;
+ }
+ }
+ return $body;
+}
+
+
my %broken_encoding;
sub file_declares_8bit_cte {
@initial_cc = process_address_list(@initial_cc);
@bcclist = process_address_list(@bcclist);
-if ($thread && !defined $initial_reply_to && $prompting) {
- $initial_reply_to = ask(
+if ($thread && !defined $initial_in_reply_to && $prompting) {
+ $initial_in_reply_to = ask(
__("Message-ID to be used as In-Reply-To for the first email (if any)? "),
default => "",
valid_re => qr/\@.*\./, confirm_only => 1);
}
-if (defined $initial_reply_to) {
- $initial_reply_to =~ s/^\s*<?//;
- $initial_reply_to =~ s/>?\s*$//;
- $initial_reply_to = "<$initial_reply_to>" if $initial_reply_to ne '';
+if (defined $initial_in_reply_to) {
+ $initial_in_reply_to =~ s/^\s*<?//;
+ $initial_in_reply_to =~ s/>?\s*$//;
+ $initial_in_reply_to = "<$initial_in_reply_to>" if $initial_in_reply_to ne '';
+}
+
+if (defined $reply_to) {
+ $reply_to =~ s/^\s+|\s+$//g;
+ ($reply_to) = expand_aliases($reply_to);
+ $reply_to = sanitize_address($reply_to);
}
if (!defined $smtp_server) {
}
# Variables we set as part of the loop over files
-our ($message_id, %mail, $subject, $reply_to, $references, $message,
+our ($message_id, %mail, $subject, $in_reply_to, $references, $message,
$needs_confirm, $message_num, $ask_default);
sub extract_valid_address {
sub maildomain_net {
my $maildomain;
- if (eval { require Net::Domain; 1 }) {
- my $domain = Net::Domain::domainname();
- $maildomain = $domain if valid_fqdn($domain);
- }
+ my $domain = Net::Domain::domainname();
+ $maildomain = $domain if valid_fqdn($domain);
return $maildomain;
}
sub maildomain_mta {
my $maildomain;
- if (eval { require Net::SMTP; 1 }) {
- for my $host (qw(mailhost localhost)) {
- my $smtp = Net::SMTP->new($host);
- if (defined $smtp) {
- my $domain = $smtp->domain;
- $smtp->quit;
+ for my $host (qw(mailhost localhost)) {
+ my $smtp = Net::SMTP->new($host);
+ if (defined $smtp) {
+ my $domain = $smtp->domain;
+ $smtp->quit;
- $maildomain = $domain if valid_fqdn($domain);
+ $maildomain = $domain if valid_fqdn($domain);
- last if $maildomain;
- }
+ last if $maildomain;
}
}
if ($use_xmailer) {
$header .= "X-Mailer: git-send-email $gitversion\n";
}
- if ($reply_to) {
+ if ($in_reply_to) {
- $header .= "In-Reply-To: $reply_to\n";
+ $header .= "In-Reply-To: $in_reply_to\n";
$header .= "References: $references\n";
}
+ if ($reply_to) {
+ $header .= "Reply-To: $reply_to\n";
+ }
if (@xh) {
$header .= join("\n", @xh) . "\n";
}
return 1;
}
-$reply_to = $initial_reply_to;
-$references = $initial_reply_to || '';
+$in_reply_to = $initial_in_reply_to;
+$references = $initial_in_reply_to || '';
$subject = $initial_subject;
$message_num = 0;
# set up for the next message
if ($thread && $message_was_sent &&
- ($chain_reply_to || !defined $reply_to || length($reply_to) == 0 ||
+ ($chain_reply_to || !defined $in_reply_to || length($in_reply_to) == 0 ||
$message_num == 1)) {
- $reply_to = $message_id;
+ $in_reply_to = $message_id;
if (length $references > 0) {
$references .= "\n $message_id";
} else {
# First decide what scheme to use...
GIT_INTERNAL_GETTEXT_SH_SCHEME=fallthrough
-if test -n "@@USE_GETTEXT_SCHEME@@"
+if test -n "$GIT_GETTEXT_POISON"
+then
+ GIT_INTERNAL_GETTEXT_SH_SCHEME=poison
+elif test -n "@@USE_GETTEXT_SCHEME@@"
then
GIT_INTERNAL_GETTEXT_SH_SCHEME="@@USE_GETTEXT_SCHEME@@"
elif test -n "$GIT_INTERNAL_GETTEXT_TEST_FALLBACKS"
then
: no probing necessary
-elif test -n "$GIT_GETTEXT_POISON"
-then
- GIT_INTERNAL_GETTEXT_SH_SCHEME=poison
elif type gettext.sh >/dev/null 2>&1
then
# GNU libintl's gettext.sh
shift
done
- if test -n "$deinit_all" && test "$#" -ne 0
- then
- echo >&2 "$(eval_gettext "pathspec and --all are incompatible")"
- usage
- fi
- if test $# = 0 && test -z "$deinit_all"
- then
- die "$(eval_gettext "Use '--all' if you really want to deinitialize all submodules")"
- fi
-
- {
- git submodule--helper list --prefix "$wt_prefix" "$@" ||
- echo "#unmatched" $?
- } |
- while read -r mode sha1 stage sm_path
- do
- die_if_unmatched "$mode" "$sha1"
- name=$(git submodule--helper name "$sm_path") || exit
-
- displaypath=$(git submodule--helper relative-path "$sm_path" "$wt_prefix")
-
- # Remove the submodule work tree (unless the user already did it)
- if test -d "$sm_path"
- then
- # Protect submodules containing a .git directory
- if test -d "$sm_path/.git"
- then
- die "$(eval_gettext "\
-Submodule work tree '\$displaypath' contains a .git directory
-(use 'rm -rf' if you really want to remove it including all of its history)")"
- fi
-
- if test -z "$force"
- then
- git rm -qn "$sm_path" ||
- die "$(eval_gettext "Submodule work tree '\$displaypath' contains local modifications; use '-f' to discard them")"
- fi
- rm -rf "$sm_path" &&
- say "$(eval_gettext "Cleared directory '\$displaypath'")" ||
- say "$(eval_gettext "Could not remove submodule work tree '\$displaypath'")"
- fi
-
- mkdir "$sm_path" || say "$(eval_gettext "Could not create empty submodule directory '\$displaypath'")"
-
- # Remove the .git/config entries (unless the user already did it)
- if test -n "$(git config --get-regexp submodule."$name\.")"
- then
- # Remove the whole section so we have a clean state when
- # the user later decides to init this submodule again
- url=$(git config submodule."$name".url)
- git config --remove-section submodule."$name" 2>/dev/null &&
- say "$(eval_gettext "Submodule '\$name' (\$url) unregistered for path '\$displaypath'")"
- fi
- done
+ git ${wt_prefix:+-C "$wt_prefix"} submodule--helper deinit ${GIT_QUIET:+--quiet} ${prefix:+--prefix "$prefix"} ${force:+--force} ${deinit_all:+--all} "$@"
}
is_tip_reachable () (
;;
esac
done
- cd_to_toplevel
- {
- git submodule--helper list --prefix "$wt_prefix" "$@" ||
- echo "#unmatched" $?
- } |
- while read -r mode sha1 stage sm_path
- do
- die_if_unmatched "$mode" "$sha1"
-
- # skip inactive submodules
- if ! git submodule--helper is-active "$sm_path"
- then
- continue
- fi
-
- name=$(git submodule--helper name "$sm_path")
- url=$(git config -f .gitmodules --get submodule."$name".url)
-
- # Possibly a url relative to parent
- case "$url" in
- ./*|../*)
- # rewrite foo/bar as ../.. to find path from
- # submodule work tree to superproject work tree
- up_path="$(printf '%s\n' "$sm_path" | sed "s/[^/][^/]*/../g")" &&
- # guarantee a trailing /
- up_path=${up_path%/}/ &&
- # path from submodule work tree to submodule origin repo
- sub_origin_url=$(git submodule--helper resolve-relative-url "$url" "$up_path") &&
- # path from superproject work tree to submodule origin repo
- super_config_url=$(git submodule--helper resolve-relative-url "$url") || exit
- ;;
- *)
- sub_origin_url="$url"
- super_config_url="$url"
- ;;
- esac
-
- displaypath=$(git submodule--helper relative-path "$prefix$sm_path" "$wt_prefix")
- say "$(eval_gettext "Synchronizing submodule url for '\$displaypath'")"
- git config submodule."$name".url "$super_config_url"
-
- if test -e "$sm_path"/.git
- then
- (
- sanitize_submodule_env
- cd "$sm_path"
- remote=$(get_default_remote)
- git config remote."$remote".url "$sub_origin_url"
- if test -n "$recursive"
- then
- prefix="$prefix$sm_path/"
- eval cmd_sync
- fi
- )
- fi
- done
+ git ${wt_prefix:+-C "$wt_prefix"} ${prefix:+--super-prefix "$prefix"} submodule--helper sync ${GIT_QUIET:+--quiet} ${recursive:+--recursive} "$@"
}
cmd_absorbgitdirs()
$ctx->copy($src, $rev, $dst)
unless $_dry_run;
+ # Release resources held by ctx before creating another SVN::Ra
+ # so destruction is orderly. This seems necessary with SVN 1.9.5
+ # to avoid segfaults.
+ $ctx = undef;
+
$gs->fetch_all;
}
#include "run-command.h"
const char git_usage_string[] =
- "git [--version] [--help] [-C <path>] [-c name=value]\n"
- " [--exec-path[=<path>]] [--html-path] [--man-path] [--info-path]\n"
- " [-p | --paginate | --no-pager] [--no-replace-objects] [--bare]\n"
- " [--git-dir=<path>] [--work-tree=<path>] [--namespace=<name>]\n"
- " <command> [<args>]";
+ N_("git [--version] [--help] [-C <path>] [-c <name>=<value>]\n"
+ " [--exec-path[=<path>]] [--html-path] [--man-path] [--info-path]\n"
+ " [-p | --paginate | --no-pager] [--no-replace-objects] [--bare]\n"
+ " [--git-dir=<path>] [--work-tree=<path>] [--namespace=<name>]\n"
+ " <command> [<args>]");
const char git_more_info_string[] =
N_("'git help -a' and 'git help -g' list available subcommands and some\n"
*envchanged = 1;
} else if (!strcmp(cmd, "--git-dir")) {
if (*argc < 2) {
- fprintf(stderr, "No directory given for --git-dir.\n" );
+ fprintf(stderr, _("no directory given for --git-dir\n" ));
usage(git_usage_string);
}
setenv(GIT_DIR_ENVIRONMENT, (*argv)[1], 1);
*envchanged = 1;
} else if (!strcmp(cmd, "--namespace")) {
if (*argc < 2) {
- fprintf(stderr, "No namespace given for --namespace.\n" );
+ fprintf(stderr, _("no namespace given for --namespace\n" ));
usage(git_usage_string);
}
setenv(GIT_NAMESPACE_ENVIRONMENT, (*argv)[1], 1);
*envchanged = 1;
} else if (!strcmp(cmd, "--work-tree")) {
if (*argc < 2) {
- fprintf(stderr, "No directory given for --work-tree.\n" );
+ fprintf(stderr, _("no directory given for --work-tree\n" ));
usage(git_usage_string);
}
setenv(GIT_WORK_TREE_ENVIRONMENT, (*argv)[1], 1);
*envchanged = 1;
} else if (!strcmp(cmd, "--super-prefix")) {
if (*argc < 2) {
- fprintf(stderr, "No prefix given for --super-prefix.\n" );
+ fprintf(stderr, _("no prefix given for --super-prefix\n" ));
usage(git_usage_string);
}
setenv(GIT_SUPER_PREFIX_ENVIRONMENT, (*argv)[1], 1);
*envchanged = 1;
} else if (!strcmp(cmd, "-c")) {
if (*argc < 2) {
- fprintf(stderr, "-c expects a configuration string\n" );
+ fprintf(stderr, _("-c expects a configuration string\n" ));
usage(git_usage_string);
}
git_config_push_parameter((*argv)[1]);
*envchanged = 1;
} else if (!strcmp(cmd, "-C")) {
if (*argc < 2) {
- fprintf(stderr, "No directory given for -C.\n" );
+ fprintf(stderr, _("no directory given for -C\n" ));
usage(git_usage_string);
}
if ((*argv)[1][0]) {
if (chdir((*argv)[1]))
- die_errno("Cannot change to '%s'", (*argv)[1]);
+ die_errno("cannot change to '%s'", (*argv)[1]);
if (envchanged)
*envchanged = 1;
}
list_builtins();
exit(0);
} else {
- fprintf(stderr, "Unknown option: %s\n", cmd);
+ fprintf(stderr, _("unknown option: %s\n"), cmd);
usage(git_usage_string);
}
if (ret >= 0) /* normal exit */
exit(ret);
- die_errno("While expanding alias '%s': '%s'",
+ die_errno("while expanding alias '%s': '%s'",
alias_command, alias_string + 1);
}
count = split_cmdline(alias_string, &new_argv);
split_cmdline_strerror(count));
option_count = handle_options(&new_argv, &count, &envchanged);
if (envchanged)
- die("alias '%s' changes environment variables\n"
- "You can use '!git' in the alias to do this.",
+ die("alias '%s' changes environment variables.\n"
+ "You can use '!git' in the alias to do this",
alias_command);
memmove(new_argv - option_count, new_argv,
count * sizeof(char *));
{ "column", cmd_column, RUN_SETUP_GENTLY },
{ "commit", cmd_commit, RUN_SETUP | NEED_WORK_TREE },
{ "commit-tree", cmd_commit_tree, RUN_SETUP },
- { "config", cmd_config, RUN_SETUP_GENTLY },
+ { "config", cmd_config, RUN_SETUP_GENTLY | DELAY_PAGER_CONFIG },
{ "count-objects", cmd_count_objects, RUN_SETUP },
{ "credential", cmd_credential, RUN_SETUP_GENTLY },
{ "describe", cmd_describe, RUN_SETUP },
if (errno != ENOENT)
break;
if (was_alias) {
- fprintf(stderr, "Expansion of alias '%s' failed; "
- "'%s' is not a git command\n",
+ fprintf(stderr, _("expansion of alias '%s' failed; "
+ "'%s' is not a git command\n"),
cmd, argv[0]);
exit(1);
}
break;
}
- fprintf(stderr, "Failed to run command '%s': %s\n",
+ fprintf(stderr, _("failed to run command '%s': %s\n"),
cmd, strerror(errno));
return 1;
------------
- Core git tools
- - Perl
+ - Perl 5.8
- Perl modules: CGI, Encode, Fcntl, File::Find, File::Basename.
- web server
The following optional Perl modules are required for extra features
- - Digest::MD5 - for gravatar support
- CGI::Fast and FCGI - for running gitweb as FastCGI script
- HTML::TagCloud - for fancy tag cloud in project list view
- HTTP::Date or Time::ParseDate - to support If-Modified-Since for feeds
use File::Find qw();
use File::Basename qw(basename);
use Time::HiRes qw(gettimeofday tv_interval);
+use Digest::MD5 qw(md5_hex);
+
binmode STDOUT, ':utf8';
if (!defined($CGI::VERSION) || $CGI::VERSION < 4.08) {
# Currently available providers are gravatar and picon.
# If an unknown provider is specified, the feature is disabled.
- # Gravatar depends on Digest::MD5.
# Picon currently relies on the indiana.edu database.
# To enable system wide have in $GITWEB_CONFIG
our @snapshot_fmts = gitweb_get_feature('snapshot');
@snapshot_fmts = filter_snapshot_fmts(@snapshot_fmts);
- # check that the avatar feature is set to a known provider name,
- # and for each provider check if the dependencies are satisfied.
- # if the provider name is invalid or the dependencies are not met,
- # reset $git_avatar to the empty string.
our ($git_avatar) = gitweb_get_feature('avatar');
- if ($git_avatar eq 'gravatar') {
- $git_avatar = '' unless (eval { require Digest::MD5; 1; });
- } elsif ($git_avatar eq 'picon') {
- # no dependencies
- } else {
- $git_avatar = '';
- }
+ $git_avatar = '' unless $git_avatar =~ /^(?:gravatar|picon)$/s;
our @extra_branch_refs = gitweb_get_feature('extra-branch-refs');
@extra_branch_refs = filter_and_validate_refs (@extra_branch_refs);
my $size = shift;
$avatar_cache{$email} ||=
"//www.gravatar.com/avatar/" .
- Digest::MD5::md5_hex($email) . "?s=";
+ md5_hex($email) . "?s=";
return $avatar_cache{$email} . $size;
}
fwrite(buf, size, 1, stdout);
}
+static void color_set(char *dst, const char *color_bytes)
+{
+ xsnprintf(dst, COLOR_MAXLEN, "%s", color_bytes);
+}
+
/*
* Initialize the grep_defaults template with hardcoded defaults.
* We could let the compiler do this, but without C99 initializers
#include "block-sha1/sha1.h"
#endif
+#ifndef platform_SHA_CTX
+/*
+ * platform's underlying implementation of SHA-1; could be OpenSSL,
+ * blk_SHA, Apple CommonCrypto, etc... Note that the relevant
+ * SHA-1 header may have already defined platform_SHA_CTX for our
+ * own implementations like block-sha1 and ppc-sha1, so we list
+ * the default for OpenSSL compatible SHA-1 implementations here.
+ */
+#define platform_SHA_CTX SHA_CTX
+#define platform_SHA1_Init SHA1_Init
+#define platform_SHA1_Update SHA1_Update
+#define platform_SHA1_Final SHA1_Final
+#endif
+
+#define git_SHA_CTX platform_SHA_CTX
+#define git_SHA1_Init platform_SHA1_Init
+#define git_SHA1_Update platform_SHA1_Update
+#define git_SHA1_Final platform_SHA1_Final
+
+#ifdef SHA1_MAX_BLOCK_SIZE
+#include "compat/sha1-chunked.h"
+#undef git_SHA1_Update
+#define git_SHA1_Update git_SHA1_Update_Chunked
+#endif
+
/*
* Note that these constants are suitable for indexing the hash_algos array and
* comparing against each other, but are otherwise arbitrary, so they should not
/* Number of algorithms supported (including unknown). */
#define GIT_HASH_NALGOS (GIT_HASH_SHA1 + 1)
-typedef void (*git_hash_init_fn)(void *ctx);
-typedef void (*git_hash_update_fn)(void *ctx, const void *in, size_t len);
-typedef void (*git_hash_final_fn)(unsigned char *hash, void *ctx);
+/* A suitably aligned type for stack allocations of hash contexts. */
+union git_hash_ctx {
+ git_SHA_CTX sha1;
+};
+typedef union git_hash_ctx git_hash_ctx;
+
+typedef void (*git_hash_init_fn)(git_hash_ctx *ctx);
+typedef void (*git_hash_update_fn)(git_hash_ctx *ctx, const void *in, size_t len);
+typedef void (*git_hash_final_fn)(unsigned char *hash, git_hash_ctx *ctx);
struct git_hash_algo {
/*
/* A four-byte version identifier, used in pack indices. */
uint32_t format_id;
- /* The size of a hash context (e.g. git_SHA_CTX). */
- size_t ctxsz;
-
/* The length of the hash in binary. */
size_t rawsz;
*/
static inline void hashmap_enable_item_counting(struct hashmap *map)
{
- void *item;
unsigned int n = 0;
struct hashmap_iter iter;
return;
hashmap_iter_init(map, &iter);
- while ((item = hashmap_iter_next(&iter)))
+ while (hashmap_iter_next(&iter))
n++;
map->do_count_items = 1;
git_zstream stream;
unpacked = read_sha1_file(request->obj->oid.hash, &type, &len);
- hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", typename(type), len) + 1;
+ hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", type_name(type), len) + 1;
/* Set it up */
git_deflate_init(&stream, zlib_compression_level);
lock->timeout = -1;
}
XML_ParserFree(parser);
+ } else {
+ fprintf(stderr,
+ "error: curl result=%d, HTTP code=%ld\n",
+ results.curl_result, results.http_code);
}
} else {
fprintf(stderr, "Unable to start LOCK request\n");
} else if (hashcmp(obj_req->sha1, req->real_sha1)) {
ret = error("File %s has bad hash", hex);
} else if (req->rename < 0) {
- ret = error("unable to write sha1 filename %s",
- sha1_file_name(req->sha1));
+ struct strbuf buf = STRBUF_INIT;
+ sha1_file_name(&buf, req->sha1);
+ ret = error("unable to write sha1 filename %s", buf.buf);
+ strbuf_release(&buf);
}
release_http_object_request(req);
#include "transport.h"
#include "packfile.h"
#include "protocol.h"
+#include "string-list.h"
static struct trace_key trace_curl = TRACE_KEY_INIT(CURL);
+static int trace_curl_data = 1;
+static struct string_list cookies_to_redact = STRING_LIST_INIT_DUP;
#if LIBCURL_VERSION_NUM >= 0x070a08
long int git_curl_ipresolve = CURL_IPRESOLVE_WHATEVER;
#else
/* Everything else is opaque and possibly sensitive */
strbuf_setlen(header, sensitive_header - header->buf);
strbuf_addstr(header, " <redacted>");
+ } else if (cookies_to_redact.nr &&
+ skip_prefix(header->buf, "Cookie:", &sensitive_header)) {
+ struct strbuf redacted_header = STRBUF_INIT;
+ char *cookie;
+
+ while (isspace(*sensitive_header))
+ sensitive_header++;
+
+ /*
+ * The contents of header starting from sensitive_header will
+ * subsequently be overridden, so it is fine to mutate this
+ * string (hence the assignment to "char *").
+ */
+ cookie = (char *) sensitive_header;
+
+ while (cookie) {
+ char *equals;
+ char *semicolon = strstr(cookie, "; ");
+ if (semicolon)
+ *semicolon = 0;
+ equals = strchrnul(cookie, '=');
+ if (!equals) {
+ /* invalid cookie, just append and continue */
+ strbuf_addstr(&redacted_header, cookie);
+ continue;
+ }
+ *equals = 0; /* temporarily set to NUL for lookup */
+ if (string_list_lookup(&cookies_to_redact, cookie)) {
+ strbuf_addstr(&redacted_header, cookie);
+ strbuf_addstr(&redacted_header, "=<redacted>");
+ } else {
+ *equals = '=';
+ strbuf_addstr(&redacted_header, cookie);
+ }
+ if (semicolon) {
+ /*
+ * There are more cookies. (Or, for some
+ * reason, the input string ends in "; ".)
+ */
+ strbuf_addstr(&redacted_header, "; ");
+ cookie = semicolon + strlen("; ");
+ } else {
+ cookie = NULL;
+ }
+ }
+
+ strbuf_setlen(header, sensitive_header - header->buf);
+ strbuf_addbuf(header, &redacted_header);
}
}
curl_dump_header(text, (unsigned char *)data, size, DO_FILTER);
break;
case CURLINFO_DATA_OUT:
- text = "=> Send data";
- curl_dump_data(text, (unsigned char *)data, size);
+ if (trace_curl_data) {
+ text = "=> Send data";
+ curl_dump_data(text, (unsigned char *)data, size);
+ }
break;
case CURLINFO_SSL_DATA_OUT:
- text = "=> Send SSL data";
- curl_dump_data(text, (unsigned char *)data, size);
+ if (trace_curl_data) {
+ text = "=> Send SSL data";
+ curl_dump_data(text, (unsigned char *)data, size);
+ }
break;
case CURLINFO_HEADER_IN:
text = "<= Recv header";
curl_dump_header(text, (unsigned char *)data, size, NO_FILTER);
break;
case CURLINFO_DATA_IN:
- text = "<= Recv data";
- curl_dump_data(text, (unsigned char *)data, size);
+ if (trace_curl_data) {
+ text = "<= Recv data";
+ curl_dump_data(text, (unsigned char *)data, size);
+ }
break;
case CURLINFO_SSL_DATA_IN:
- text = "<= Recv SSL data";
- curl_dump_data(text, (unsigned char *)data, size);
+ if (trace_curl_data) {
+ text = "<= Recv SSL data";
+ curl_dump_data(text, (unsigned char *)data, size);
+ }
break;
default: /* we ignore unknown types by default */
if (getenv("GIT_CURL_VERBOSE"))
curl_easy_setopt(result, CURLOPT_VERBOSE, 1L);
setup_curl_trace(result);
+ if (getenv("GIT_TRACE_CURL_NO_DATA"))
+ trace_curl_data = 0;
+ if (getenv("GIT_REDACT_COOKIES")) {
+ string_list_split(&cookies_to_redact,
+ getenv("GIT_REDACT_COOKIES"), ',', -1);
+ string_list_sort(&cookies_to_redact);
+ }
curl_easy_setopt(result, CURLOPT_USERAGENT,
user_agent ? user_agent : git_user_agent());
void add_fill_function(void *data, int (*fill)(void *))
{
- struct fill_chain *new = xmalloc(sizeof(*new));
+ struct fill_chain *new_fill = xmalloc(sizeof(*new_fill));
struct fill_chain **linkp = &fill_cfg;
- new->data = data;
- new->fill = fill;
- new->next = NULL;
+ new_fill->data = data;
+ new_fill->fill = fill;
+ new_fill->next = NULL;
while (*linkp)
linkp = &(*linkp)->next;
- *linkp = new;
+ *linkp = new_fill;
}
void fill_active_slots(void)
unsigned char *sha1)
{
char *hex = sha1_to_hex(sha1);
- const char *filename;
+ struct strbuf filename = STRBUF_INIT;
char prevfile[PATH_MAX];
int prevlocal;
char prev_buf[PREV_BUF_SIZE];
hashcpy(freq->sha1, sha1);
freq->localfile = -1;
- filename = sha1_file_name(sha1);
+ sha1_file_name(&filename, sha1);
snprintf(freq->tmpfile, sizeof(freq->tmpfile),
- "%s.temp", filename);
+ "%s.temp", filename.buf);
- snprintf(prevfile, sizeof(prevfile), "%s.prev", filename);
+ snprintf(prevfile, sizeof(prevfile), "%s.prev", filename.buf);
unlink_or_warn(prevfile);
rename(freq->tmpfile, prevfile);
unlink_or_warn(freq->tmpfile);
+ strbuf_release(&filename);
if (freq->localfile != -1)
error("fd leakage in start: %d", freq->localfile);
int finish_http_object_request(struct http_object_request *freq)
{
struct stat st;
+ struct strbuf filename = STRBUF_INIT;
close(freq->localfile);
freq->localfile = -1;
unlink_or_warn(freq->tmpfile);
return -1;
}
- freq->rename =
- finalize_object_file(freq->tmpfile, sha1_file_name(freq->sha1));
+
+ sha1_file_name(&filename, freq->sha1);
+ freq->rename = finalize_object_file(freq->tmpfile, filename.buf);
+ strbuf_release(&filename);
return freq->rename;
}
*/
static void lf_to_crlf(struct strbuf *msg)
{
- char *new;
+ char *new_msg;
size_t i, j;
char lastc;
- /* First pass: tally, in j, the size of the new string: */
+ /* First pass: tally, in j, the size of the new_msg string: */
for (i = j = 0, lastc = '\0'; i < msg->len; i++) {
if (msg->buf[i] == '\n' && lastc != '\r')
j++; /* a CR will need to be added here */
j++;
}
- new = xmallocz(j);
+ new_msg = xmallocz(j);
/*
- * Second pass: write the new string. Note that this loop is
+ * Second pass: write the new_msg string. Note that this loop is
* otherwise identical to the first pass.
*/
for (i = j = 0, lastc = '\0'; i < msg->len; i++) {
if (msg->buf[i] == '\n' && lastc != '\r')
- new[j++] = '\r';
- lastc = new[j++] = msg->buf[i];
+ new_msg[j++] = '\r';
+ lastc = new_msg[j++] = msg->buf[i];
}
- strbuf_attach(msg, new, j, j + 1);
+ strbuf_attach(msg, new_msg, j, j + 1);
}
/*
assert(out->nr == 0);
while (i < a->nr || j < b->nr) {
- struct range *new;
+ struct range *new_range;
if (i < a->nr && j < b->nr) {
if (ra[i].start < rb[j].start)
- new = &ra[i++];
+ new_range = &ra[i++];
else if (ra[i].start > rb[j].start)
- new = &rb[j++];
+ new_range = &rb[j++];
else if (ra[i].end < rb[j].end)
- new = &ra[i++];
+ new_range = &ra[i++];
else
- new = &rb[j++];
+ new_range = &rb[j++];
} else if (i < a->nr) /* b exhausted */
- new = &ra[i++];
+ new_range = &ra[i++];
else /* a exhausted */
- new = &rb[j++];
- if (new->start == new->end)
+ new_range = &rb[j++];
+ if (new_range->start == new_range->end)
; /* empty range */
- else if (!out->nr || out->ranges[out->nr-1].end < new->start) {
+ else if (!out->nr || out->ranges[out->nr-1].end < new_range->start) {
range_set_grow(out, 1);
- out->ranges[out->nr].start = new->start;
- out->ranges[out->nr].end = new->end;
+ out->ranges[out->nr].start = new_range->start;
+ out->ranges[out->nr].end = new_range->end;
out->nr++;
- } else if (out->ranges[out->nr-1].end < new->end) {
- out->ranges[out->nr-1].end = new->end;
+ } else if (out->ranges[out->nr-1].end < new_range->end) {
+ out->ranges[out->nr-1].end = new_range->end;
}
}
}
static void add_line_range(struct rev_info *revs, struct commit *commit,
struct line_log_data *range)
{
- struct line_log_data *old = NULL;
- struct line_log_data *new = NULL;
+ struct line_log_data *old_line = NULL;
+ struct line_log_data *new_line = NULL;
- old = lookup_decoration(&revs->line_log_data, &commit->object);
- if (old && range) {
- new = line_log_data_merge(old, range);
- free_line_log_data(old);
+ old_line = lookup_decoration(&revs->line_log_data, &commit->object);
+ if (old_line && range) {
+ new_line = line_log_data_merge(old_line, range);
+ free_line_log_data(old_line);
} else if (range)
- new = line_log_data_copy(range);
+ new_line = line_log_data_copy(range);
- if (new)
- add_decoration(&revs->line_log_data, &commit->object, new);
+ if (new_line)
+ add_decoration(&revs->line_log_data, &commit->object, new_line);
}
static void clear_commit_line_range(struct rev_info *revs, struct commit *commit)
static struct diff_filepair *diff_filepair_dup(struct diff_filepair *pair)
{
- struct diff_filepair *new = xmalloc(sizeof(struct diff_filepair));
- new->one = pair->one;
- new->two = pair->two;
- new->one->count++;
- new->two->count++;
- return new;
+ struct diff_filepair *new_filepair = xmalloc(sizeof(struct diff_filepair));
+ new_filepair->one = pair->one;
+ new_filepair->two = pair->two;
+ new_filepair->one->count++;
+ new_filepair->two->count++;
+ return new_filepair;
}
static void free_diffqueues(int n, struct diff_queue_struct *dq)
* subordinate commands when necessary. We also "intern" the arg for
* the convenience of the current command.
*/
-int parse_list_objects_filter(struct list_objects_filter_options *filter_options,
- const char *arg)
+static int gently_parse_list_objects_filter(
+ struct list_objects_filter_options *filter_options,
+ const char *arg,
+ struct strbuf *errbuf)
{
const char *v0;
- if (filter_options->choice)
- die(_("multiple object filter types cannot be combined"));
+ if (filter_options->choice) {
+ if (errbuf) {
+ strbuf_init(errbuf, 0);
+ strbuf_addstr(
+ errbuf,
+ _("multiple filter-specs cannot be combined"));
+ }
+ return 1;
+ }
filter_options->filter_spec = strdup(arg);
if (!strcmp(arg, "blob:none")) {
filter_options->choice = LOFC_BLOB_NONE;
return 0;
- }
- if (skip_prefix(arg, "blob:limit=", &v0)) {
- if (!git_parse_ulong(v0, &filter_options->blob_limit_value))
- die(_("invalid filter-spec expression '%s'"), arg);
- filter_options->choice = LOFC_BLOB_LIMIT;
- return 0;
- }
+ } else if (skip_prefix(arg, "blob:limit=", &v0)) {
+ if (git_parse_ulong(v0, &filter_options->blob_limit_value)) {
+ filter_options->choice = LOFC_BLOB_LIMIT;
+ return 0;
+ }
- if (skip_prefix(arg, "sparse:oid=", &v0)) {
+ } else if (skip_prefix(arg, "sparse:oid=", &v0)) {
struct object_context oc;
struct object_id sparse_oid;
filter_options->sparse_oid_value = oiddup(&sparse_oid);
filter_options->choice = LOFC_SPARSE_OID;
return 0;
- }
- if (skip_prefix(arg, "sparse:path=", &v0)) {
+ } else if (skip_prefix(arg, "sparse:path=", &v0)) {
filter_options->choice = LOFC_SPARSE_PATH;
filter_options->sparse_path_value = strdup(v0);
return 0;
}
- die(_("invalid filter-spec expression '%s'"), arg);
+ if (errbuf) {
+ strbuf_init(errbuf, 0);
+ strbuf_addf(errbuf, "invalid filter-spec '%s'", arg);
+ }
+ memset(filter_options, 0, sizeof(*filter_options));
+ return 1;
+}
+
+int parse_list_objects_filter(struct list_objects_filter_options *filter_options,
+ const char *arg)
+{
+ struct strbuf buf = STRBUF_INIT;
+ if (gently_parse_list_objects_filter(filter_options, arg, &buf))
+ die("%s", buf.buf);
return 0;
}
struct list_objects_filter_options *filter_options = opt->value;
if (unset || !arg) {
- list_objects_filter_release(filter_options);
+ list_objects_filter_set_no_filter(filter_options);
return 0;
}
free(filter_options->sparse_path_value);
memset(filter_options, 0, sizeof(*filter_options));
}
+
+void partial_clone_register(
+ const char *remote,
+ const struct list_objects_filter_options *filter_options)
+{
+ /*
+ * Record the name of the partial clone remote in the
+ * config and in the global variable -- the latter is
+ * used throughout to indicate that partial clone is
+ * enabled and to expect missing objects.
+ */
+ if (repository_format_partial_clone &&
+ *repository_format_partial_clone &&
+ strcmp(remote, repository_format_partial_clone))
+ die(_("cannot change partial clone promisor remote"));
+
+ git_config_set("core.repositoryformatversion", "1");
+ git_config_set("extensions.partialclone", remote);
+
+ repository_format_partial_clone = xstrdup(remote);
+
+ /*
+ * Record the initial filter-spec in the config as
+ * the default for subsequent fetches from this remote.
+ */
+ core_partial_clone_filter_default =
+ xstrdup(filter_options->filter_spec);
+ git_config_set("core.partialclonefilter",
+ core_partial_clone_filter_default);
+}
+
+void partial_clone_get_default_filter_spec(
+ struct list_objects_filter_options *filter_options)
+{
+ /*
+ * Parse default value, but silently ignore it if it is invalid.
+ */
+ gently_parse_list_objects_filter(filter_options,
+ core_partial_clone_filter_default,
+ NULL);
+}
*/
enum list_objects_filter_choice choice;
+ /*
+ * Choice is LOFC_DISABLED because "--no-filter" was requested.
+ */
+ unsigned int no_filter : 1;
+
/*
* Parsed values (fields) from within the filter-spec. These are
* choice-specific; not all values will be defined for any given
void list_objects_filter_release(
struct list_objects_filter_options *filter_options);
+static inline void list_objects_filter_set_no_filter(
+ struct list_objects_filter_options *filter_options)
+{
+ list_objects_filter_release(filter_options);
+ filter_options->no_filter = 1;
+}
+
+void partial_clone_register(
+ const char *remote,
+ const struct list_objects_filter_options *filter_options);
+void partial_clone_get_default_filter_spec(
+ struct list_objects_filter_options *filter_options);
+
#endif /* LIST_OBJECTS_FILTER_OPTIONS_H */
#include "list-objects.h"
#include "list-objects-filter.h"
#include "list-objects-filter-options.h"
+#include "packfile.h"
static void process_blob(struct rev_info *revs,
struct blob *blob,
if (obj->flags & (UNINTERESTING | SEEN))
return;
+ /*
+ * Pre-filter known-missing objects when explicitly requested.
+ * Otherwise, a missing object error message may be reported
+ * later (depending on other filtering criteria).
+ *
+ * Note that this "--exclude-promisor-objects" pre-filtering
+ * may cause the actual filter to report an incomplete list
+ * of missing objects.
+ */
+ if (revs->exclude_promisor_objects &&
+ !has_object_file(&obj->oid) &&
+ is_promisor_object(&obj->oid))
+ return;
+
pathlen = path->len;
strbuf_addstr(path, name);
if (filter_fn)
all_entries_interesting: entry_not_interesting;
int baselen = base->len;
enum list_objects_filter_result r = LOFR_MARK_SEEN | LOFR_DO_SHOW;
+ int gently = revs->ignore_missing_links ||
+ revs->exclude_promisor_objects;
if (!revs->tree_objects)
return;
die("bad tree object");
if (obj->flags & (UNINTERESTING | SEEN))
return;
- if (parse_tree_gently(tree, revs->ignore_missing_links) < 0) {
+ if (parse_tree_gently(tree, gently) < 0) {
if (revs->ignore_missing_links)
return;
+
+ /*
+ * Pre-filter known-missing tree objects when explicitly
+ * requested. This may cause the actual filter to report
+ * an incomplete list of missing objects.
+ */
+ if (revs->exclude_promisor_objects &&
+ is_promisor_object(&obj->oid))
+ return;
+
die("bad tree object %s", oid_to_hex(&obj->oid));
}
int status, nth;
size_t payload_size, gpg_message_offset;
- hash_sha1_file(extra->value, extra->len, typename(OBJ_TAG), oid.hash);
+ hash_object_file(extra->value, extra->len, type_name(OBJ_TAG), &oid);
tag = lookup_tag(&oid);
if (!tag)
return; /* error message already given */
show_mergetag(opt, commit);
}
- if (!get_cached_commit_buffer(commit, NULL))
- return;
-
if (opt->show_notes) {
int raw;
struct strbuf notebuf = STRBUF_INIT;
strbuf_release(&mi->inbody_header_accum);
free(mi->message_id);
- for (i = 0; mi->p_hdr_data[i]; i++)
- strbuf_release(mi->p_hdr_data[i]);
+ if (mi->p_hdr_data)
+ for (i = 0; mi->p_hdr_data[i]; i++)
+ strbuf_release(mi->p_hdr_data[i]);
free(mi->p_hdr_data);
- for (i = 0; mi->s_hdr_data[i]; i++)
- strbuf_release(mi->s_hdr_data[i]);
+ if (mi->s_hdr_data)
+ for (i = 0; mi->s_hdr_data[i]; i++)
+ strbuf_release(mi->s_hdr_data[i]);
free(mi->s_hdr_data);
while (mi->content < mi->content_top) {
}
/*
- * A tree "hash1" has a subdirectory at "prefix". Come up with a
- * tree object by replacing it with another tree "hash2".
+ * A tree "oid1" has a subdirectory at "prefix". Come up with a tree object by
+ * replacing it with another tree "oid2".
*/
-static int splice_tree(const unsigned char *hash1,
- const char *prefix,
- const unsigned char *hash2,
- unsigned char *result)
+static int splice_tree(const struct object_id *oid1, const char *prefix,
+ const struct object_id *oid2, struct object_id *result)
{
char *subpath;
int toplen;
char *buf;
unsigned long sz;
struct tree_desc desc;
- unsigned char *rewrite_here;
- const unsigned char *rewrite_with;
- unsigned char subtree[20];
+ struct object_id *rewrite_here;
+ const struct object_id *rewrite_with;
+ struct object_id subtree;
enum object_type type;
int status;
if (*subpath)
subpath++;
- buf = read_sha1_file(hash1, &type, &sz);
+ buf = read_sha1_file(oid1->hash, &type, &sz);
if (!buf)
- die("cannot read tree %s", sha1_to_hex(hash1));
+ die("cannot read tree %s", oid_to_hex(oid1));
init_tree_desc(&desc, buf, sz);
rewrite_here = NULL;
if (strlen(name) == toplen &&
!memcmp(name, prefix, toplen)) {
if (!S_ISDIR(mode))
- die("entry %s in tree %s is not a tree",
- name, sha1_to_hex(hash1));
- rewrite_here = (unsigned char *) oid->hash;
+ die("entry %s in tree %s is not a tree", name,
+ oid_to_hex(oid1));
+ rewrite_here = (struct object_id *)oid;
break;
}
update_tree_entry(&desc);
}
if (!rewrite_here)
- die("entry %.*s not found in tree %s",
- toplen, prefix, sha1_to_hex(hash1));
+ die("entry %.*s not found in tree %s", toplen, prefix,
+ oid_to_hex(oid1));
if (*subpath) {
- status = splice_tree(rewrite_here, subpath, hash2, subtree);
+ status = splice_tree(rewrite_here, subpath, oid2, &subtree);
if (status)
return status;
- rewrite_with = subtree;
+ rewrite_with = &subtree;
+ } else {
+ rewrite_with = oid2;
}
- else
- rewrite_with = hash2;
- hashcpy(rewrite_here, rewrite_with);
- status = write_sha1_file(buf, sz, tree_type, result);
+ oidcpy(rewrite_here, rewrite_with);
+ status = write_object_file(buf, sz, tree_type, result);
free(buf);
return status;
}
if (!*add_prefix)
return;
- splice_tree(hash1->hash, add_prefix, hash2->hash, shifted->hash);
+ splice_tree(hash1, add_prefix, hash2, shifted);
}
/*
* shift tree2 down by adding shift_prefix above it
* to match tree1.
*/
- splice_tree(hash1->hash, shift_prefix, hash2->hash, shifted->hash);
+ splice_tree(hash1, shift_prefix, hash2, shifted);
else
/*
* shift tree2 up by removing shift_prefix from it
struct rename {
struct diff_filepair *pair;
+ /*
+ * Purpose of src_entry and dst_entry:
+ *
+ * If 'before' is renamed to 'after' then src_entry will contain
+ * the versions of 'before' from the merge_base, HEAD, and MERGE in
+ * stages 1, 2, and 3; dst_entry will contain the respective
+ * versions of 'after' in corresponding locations. Thus, we have a
+ * total of six modes and oids, though some will be null. (Stage 0
+ * is ignored; we're interested in handling conflicts.)
+ *
+ * Since we don't turn on break-rewrites by default, neither
+ * src_entry nor dst_entry can have all three of their stages have
+ * non-null oids, meaning at most four of the six will be non-null.
+ * Also, since this is a rename, both src_entry and dst_entry will
+ * have at least one non-null oid, meaning at least two will be
+ * non-null. Of the six oids, a typical rename will have three be
+ * non-null. Only two implies a rename/delete, and four implies a
+ * rename/add.
+ */
struct stage_data *src_entry;
struct stage_data *dst_entry;
unsigned processed:1;
if ((merge_status < 0) || !result_buf.ptr)
ret = err(o, _("Failed to execute internal merge"));
- if (!ret && write_sha1_file(result_buf.ptr, result_buf.size,
- blob_type, result->oid.hash))
+ if (!ret &&
+ write_object_file(result_buf.ptr, result_buf.size,
+ blob_type, &result->oid))
ret = err(o, _("Unable to add %s to database"),
a->path);
get_files_dirs(o, merge);
entries = get_unmerged();
- record_df_conflict_files(o, entries);
re_head = get_renames(o, head, common, head, merge, entries);
re_merge = get_renames(o, merge, common, head, merge, entries);
clean = process_renames(o, re_head, re_merge);
+ record_df_conflict_files(o, entries);
if (clean < 0)
goto cleanup;
for (i = entries->nr-1; 0 <= i; i--) {
hold_locked_index(&lock, LOCK_DIE_ON_ERROR);
clean = merge_recursive(o, head_commit, next_commit, ca,
result);
- if (clean < 0)
+ if (clean < 0) {
+ rollback_lock_file(&lock);
return clean;
+ }
- if (active_cache_changed &&
- write_locked_index(&the_index, &lock, COMMIT_LOCK))
+ if (write_locked_index(&the_index, &lock,
+ COMMIT_LOCK | SKIP_IF_UNCHANGED))
return err(o, _("Unable to write index."));
return clean ? 0 : 1;
setup_unpack_trees_porcelain(&opts, "merge");
trees[nr_trees] = parse_tree_indirect(head);
- if (!trees[nr_trees++])
+ if (!trees[nr_trees++]) {
+ rollback_lock_file(&lock_file);
return -1;
+ }
trees[nr_trees] = parse_tree_indirect(remote);
- if (!trees[nr_trees++])
+ if (!trees[nr_trees++]) {
+ rollback_lock_file(&lock_file);
return -1;
+ }
for (i = 0; i < nr_trees; i++) {
parse_tree(trees[i]);
init_tree_desc(t+i, trees[i]->buffer, trees[i]->size);
}
- if (unpack_trees(nr_trees, t, &opts))
+ if (unpack_trees(nr_trees, t, &opts)) {
+ rollback_lock_file(&lock_file);
return -1;
+ }
if (write_locked_index(&the_index, &lock_file, COMMIT_LOCK))
return error(_("unable to write new index file"));
return 0;
+++ /dev/null
-#include "cache.h"
-#include "mru.h"
-
-void mru_append(struct mru *mru, void *item)
-{
- struct mru_entry *cur = xmalloc(sizeof(*cur));
- cur->item = item;
- cur->prev = mru->tail;
- cur->next = NULL;
-
- if (mru->tail)
- mru->tail->next = cur;
- else
- mru->head = cur;
- mru->tail = cur;
-}
-
-void mru_mark(struct mru *mru, struct mru_entry *entry)
-{
- /* If we're already at the front of the list, nothing to do */
- if (mru->head == entry)
- return;
-
- /* Otherwise, remove us from our current slot... */
- if (entry->prev)
- entry->prev->next = entry->next;
- if (entry->next)
- entry->next->prev = entry->prev;
- else
- mru->tail = entry->prev;
-
- /* And insert us at the beginning. */
- entry->prev = NULL;
- entry->next = mru->head;
- if (mru->head)
- mru->head->prev = entry;
- mru->head = entry;
-}
-
-void mru_clear(struct mru *mru)
-{
- struct mru_entry *p = mru->head;
-
- while (p) {
- struct mru_entry *to_free = p;
- p = p->next;
- free(to_free);
- }
- mru->head = mru->tail = NULL;
-}
+++ /dev/null
-#ifndef MRU_H
-#define MRU_H
-
-/**
- * A simple most-recently-used cache, backed by a doubly-linked list.
- *
- * Usage is roughly:
- *
- * // Create a list. Zero-initialization is required.
- * static struct mru cache;
- * mru_append(&cache, item);
- * ...
- *
- * // Iterate in MRU order.
- * struct mru_entry *p;
- * for (p = cache.head; p; p = p->next) {
- * if (matches(p->item))
- * break;
- * }
- *
- * // Mark an item as used, moving it to the front of the list.
- * mru_mark(&cache, p);
- *
- * // Reset the list to empty, cleaning up all resources.
- * mru_clear(&cache);
- *
- * Note that you SHOULD NOT call mru_mark() and then continue traversing the
- * list; it reorders the marked item to the front of the list, and therefore
- * you will begin traversing the whole list again.
- */
-
-struct mru_entry {
- void *item;
- struct mru_entry *prev, *next;
-};
-
-struct mru {
- struct mru_entry *head, *tail;
-};
-
-void mru_append(struct mru *mru, void *item);
-void mru_mark(struct mru *mru, struct mru_entry *entry);
-void mru_clear(struct mru *mru);
-
-#endif /* MRU_H */
static void lazy_init_name_hash(struct index_state *istate)
{
+ uint64_t start = getnanotime();
+
if (istate->name_hash_initialized)
return;
hashmap_init(&istate->name_hash, cache_entry_cmp, NULL, istate->cache_nr);
}
istate->name_hash_initialized = 1;
+ trace_performance_since(start, "initialize name hash");
}
/*
if (*ptr == '/') {
struct dir_entry *dir;
- ptr++;
- dir = find_dir_entry(istate, name, ptr - name + 1);
+ dir = find_dir_entry(istate, name, ptr - name);
if (dir) {
memcpy((void *)startPtr, dir->name + (startPtr - name), ptr - startPtr);
- startPtr = ptr;
+ startPtr = ptr + 1;
}
+ ptr++;
}
}
}
if (!c->tree.dirty)
return 0;
- if (write_notes_tree(&c->tree, tree_oid.hash))
+ if (write_notes_tree(&c->tree, &tree_oid))
return -1;
- if (commit_tree(c->validity, strlen(c->validity), tree_oid.hash, NULL,
- commit_oid.hash, NULL, NULL) < 0)
+ if (commit_tree(c->validity, strlen(c->validity), &tree_oid, NULL,
+ &commit_oid, NULL, NULL) < 0)
return -1;
if (update_ref("update notes cache", c->tree.update_ref, &commit_oid,
NULL, 0, UPDATE_REFS_QUIET_ON_ERR) < 0)
{
struct object_id value_oid;
- if (write_sha1_file(data, size, "blob", value_oid.hash) < 0)
+ if (write_object_file(data, size, "blob", &value_oid) < 0)
return -1;
return add_note(&c->tree, key_oid, &value_oid, NULL);
}
struct commit_list *parents = NULL;
commit_list_insert(remote, &parents); /* LIFO order */
commit_list_insert(local, &parents);
- create_notes_commit(local_tree, parents,
- o->commit_msg.buf, o->commit_msg.len,
- result_oid->hash);
+ create_notes_commit(local_tree, parents, o->commit_msg.buf,
+ o->commit_msg.len, result_oid);
}
found_result:
strbuf_setlen(&path, baselen);
}
- create_notes_commit(partial_tree, partial_commit->parents,
- msg, strlen(msg), result_oid->hash);
+ create_notes_commit(partial_tree, partial_commit->parents, msg,
+ strlen(msg), result_oid);
unuse_commit_buffer(partial_commit, buffer);
if (o->verbosity >= 4)
printf("Finalized notes merge commit: %s\n",
void create_notes_commit(struct notes_tree *t, struct commit_list *parents,
const char *msg, size_t msg_len,
- unsigned char *result_sha1)
+ struct object_id *result_oid)
{
struct object_id tree_oid;
assert(t->initialized);
- if (write_notes_tree(t, tree_oid.hash))
+ if (write_notes_tree(t, &tree_oid))
die("Failed to write notes tree to database");
if (!parents) {
/* else: t->ref points to nothing, assume root/orphan commit */
}
- if (commit_tree(msg, msg_len, tree_oid.hash, parents, result_sha1, NULL, NULL))
+ if (commit_tree(msg, msg_len, &tree_oid, parents, result_oid, NULL,
+ NULL))
die("Failed to commit notes tree to database");
}
strbuf_addstr(&buf, msg);
strbuf_complete_line(&buf);
- create_notes_commit(t, NULL, buf.buf, buf.len, commit_oid.hash);
+ create_notes_commit(t, NULL, buf.buf, buf.len, &commit_oid);
strbuf_insert(&buf, 0, "notes: ", 7); /* commit message starts at index 7 */
update_ref(buf.buf, t->update_ref, &commit_oid, NULL, 0,
UPDATE_REFS_DIE_ON_ERR);
* The resulting commit SHA1 is stored in result_sha1.
*/
void create_notes_commit(struct notes_tree *t, struct commit_list *parents,
- const char *msg, size_t msg_len, unsigned char *result_sha1);
+ const char *msg, size_t msg_len,
+ struct object_id *result_oid);
void commit_notes(struct notes_tree *t, const char *msg);
if (!oidcmp(&l->val_oid, &entry->val_oid))
return 0;
- ret = combine_notes(l->val_oid.hash,
- entry->val_oid.hash);
+ ret = combine_notes(&l->val_oid,
+ &entry->val_oid);
if (!ret && is_null_oid(&l->val_oid))
note_tree_remove(t, tree, n, entry);
free(entry);
ret = tree_write_stack_finish_subtree(n);
if (ret)
return ret;
- ret = write_sha1_file(n->buf.buf, n->buf.len, tree_type, s.hash);
+ ret = write_object_file(n->buf.buf, n->buf.len, tree_type, &s);
if (ret)
return ret;
strbuf_release(&n->buf);
return 0;
}
-int combine_notes_concatenate(unsigned char *cur_sha1,
- const unsigned char *new_sha1)
+int combine_notes_concatenate(struct object_id *cur_oid,
+ const struct object_id *new_oid)
{
char *cur_msg = NULL, *new_msg = NULL, *buf;
unsigned long cur_len, new_len, buf_len;
int ret;
/* read in both note blob objects */
- if (!is_null_sha1(new_sha1))
- new_msg = read_sha1_file(new_sha1, &new_type, &new_len);
+ if (!is_null_oid(new_oid))
+ new_msg = read_sha1_file(new_oid->hash, &new_type, &new_len);
if (!new_msg || !new_len || new_type != OBJ_BLOB) {
free(new_msg);
return 0;
}
- if (!is_null_sha1(cur_sha1))
- cur_msg = read_sha1_file(cur_sha1, &cur_type, &cur_len);
+ if (!is_null_oid(cur_oid))
+ cur_msg = read_sha1_file(cur_oid->hash, &cur_type, &cur_len);
if (!cur_msg || !cur_len || cur_type != OBJ_BLOB) {
free(cur_msg);
free(new_msg);
- hashcpy(cur_sha1, new_sha1);
+ oidcpy(cur_oid, new_oid);
return 0;
}
free(new_msg);
/* create a new blob object from buf */
- ret = write_sha1_file(buf, buf_len, blob_type, cur_sha1);
+ ret = write_object_file(buf, buf_len, blob_type, cur_oid);
free(buf);
return ret;
}
-int combine_notes_overwrite(unsigned char *cur_sha1,
- const unsigned char *new_sha1)
+int combine_notes_overwrite(struct object_id *cur_oid,
+ const struct object_id *new_oid)
{
- hashcpy(cur_sha1, new_sha1);
+ oidcpy(cur_oid, new_oid);
return 0;
}
-int combine_notes_ignore(unsigned char *cur_sha1,
- const unsigned char *new_sha1)
+int combine_notes_ignore(struct object_id *cur_oid,
+ const struct object_id *new_oid)
{
return 0;
}
* newlines removed.
*/
static int string_list_add_note_lines(struct string_list *list,
- const unsigned char *sha1)
+ const struct object_id *oid)
{
char *data;
unsigned long len;
enum object_type t;
- if (is_null_sha1(sha1))
+ if (is_null_oid(oid))
return 0;
/* read_sha1_file NUL-terminates */
- data = read_sha1_file(sha1, &t, &len);
+ data = read_sha1_file(oid->hash, &t, &len);
if (t != OBJ_BLOB || !data || !len) {
free(data);
return t != OBJ_BLOB || !data;
return 0;
}
-int combine_notes_cat_sort_uniq(unsigned char *cur_sha1,
- const unsigned char *new_sha1)
+int combine_notes_cat_sort_uniq(struct object_id *cur_oid,
+ const struct object_id *new_oid)
{
struct string_list sort_uniq_list = STRING_LIST_INIT_DUP;
struct strbuf buf = STRBUF_INIT;
int ret = 1;
/* read both note blob objects into unique_lines */
- if (string_list_add_note_lines(&sort_uniq_list, cur_sha1))
+ if (string_list_add_note_lines(&sort_uniq_list, cur_oid))
goto out;
- if (string_list_add_note_lines(&sort_uniq_list, new_sha1))
+ if (string_list_add_note_lines(&sort_uniq_list, new_oid))
goto out;
string_list_remove_empty_items(&sort_uniq_list, 0);
string_list_sort(&sort_uniq_list);
string_list_join_lines_helper, &buf))
goto out;
- ret = write_sha1_file(buf.buf, buf.len, blob_type, cur_sha1);
+ ret = write_object_file(buf.buf, buf.len, blob_type, cur_oid);
out:
strbuf_release(&buf);
return for_each_note_helper(t, t->root, 0, 0, flags, fn, cb_data);
}
-int write_notes_tree(struct notes_tree *t, unsigned char *result)
+int write_notes_tree(struct notes_tree *t, struct object_id *result)
{
struct tree_write_stack root;
struct write_each_note_data cb_data;
int ret;
+ int flags;
if (!t)
t = &default_notes_tree;
cb_data.next_non_note = t->first_non_note;
/* Write tree objects representing current notes tree */
- ret = for_each_note(t, FOR_EACH_NOTE_DONT_UNPACK_SUBTREES |
- FOR_EACH_NOTE_YIELD_SUBTREES,
- write_each_note, &cb_data) ||
- write_each_non_note_until(NULL, &cb_data) ||
- tree_write_stack_finish_subtree(&root) ||
- write_sha1_file(root.buf.buf, root.buf.len, tree_type, result);
+ flags = FOR_EACH_NOTE_DONT_UNPACK_SUBTREES |
+ FOR_EACH_NOTE_YIELD_SUBTREES;
+ ret = for_each_note(t, flags, write_each_note, &cb_data) ||
+ write_each_non_note_until(NULL, &cb_data) ||
+ tree_write_stack_finish_subtree(&root) ||
+ write_object_file(root.buf.buf, root.buf.len, tree_type, result);
strbuf_release(&root.buf);
return ret;
}
* When adding a new note annotating the same object as an existing note, it is
* up to the caller to decide how to combine the two notes. The decision is
* made by passing in a function of the following form. The function accepts
- * two SHA1s -- of the existing note and the new note, respectively. The
+ * two object_ids -- of the existing note and the new note, respectively. The
* function then combines the notes in whatever way it sees fit, and writes the
- * resulting SHA1 into the first SHA1 argument (cur_sha1). A non-zero return
+ * resulting oid into the first argument (cur_oid). A non-zero return
* value indicates failure.
*
- * The two given SHA1s shall both be non-NULL and different from each other.
- * Either of them (but not both) may be == null_sha1, which indicates an
- * empty/non-existent note. If the resulting SHA1 (cur_sha1) is == null_sha1,
+ * The two given object_ids shall both be non-NULL and different from each
+ * other. Either of them (but not both) may be == null_oid, which indicates an
+ * empty/non-existent note. If the resulting oid (cur_oid) is == null_oid,
* the note will be removed from the notes tree.
*
* The default combine_notes function (you get this when passing NULL) is
* combine_notes_concatenate(), which appends the contents of the new note to
* the contents of the existing note.
*/
-typedef int (*combine_notes_fn)(unsigned char *cur_sha1, const unsigned char *new_sha1);
+typedef int (*combine_notes_fn)(struct object_id *cur_oid,
+ const struct object_id *new_oid);
/* Common notes combinators */
-int combine_notes_concatenate(unsigned char *cur_sha1, const unsigned char *new_sha1);
-int combine_notes_overwrite(unsigned char *cur_sha1, const unsigned char *new_sha1);
-int combine_notes_ignore(unsigned char *cur_sha1, const unsigned char *new_sha1);
-int combine_notes_cat_sort_uniq(unsigned char *cur_sha1, const unsigned char *new_sha1);
+int combine_notes_concatenate(struct object_id *cur_oid,
+ const struct object_id *new_oid);
+int combine_notes_overwrite(struct object_id *cur_oid,
+ const struct object_id *new_oid);
+int combine_notes_ignore(struct object_id *cur_oid,
+ const struct object_id *new_oid);
+int combine_notes_cat_sort_uniq(struct object_id *cur_oid,
+ const struct object_id *new_oid);
/*
* Notes tree object
* Write the given notes_tree structure to the object database
*
* Creates a new tree object encapsulating the current state of the given
- * notes_tree, and stores its SHA1 into the 'result' argument.
+ * notes_tree, and stores its object id into the 'result' argument.
*
* Returns zero on success, non-zero on failure.
*
* this function has returned zero. Please also remember to create a
* corresponding commit object, and update the appropriate notes ref.
*/
-int write_notes_tree(struct notes_tree *t, unsigned char *result);
+int write_notes_tree(struct notes_tree *t, struct object_id *result);
/* Flags controlling the operation of prune */
#define NOTES_PRUNE_VERBOSE 1
"tag", /* OBJ_TAG = 4 */
};
-const char *typename(unsigned int type)
+const char *type_name(unsigned int type)
{
if (type >= ARRAY_SIZE(object_type_strings))
return NULL;
if (!quiet)
error("object %s is a %s, not a %s",
oid_to_hex(&obj->oid),
- typename(obj->type), typename(type));
+ type_name(obj->type), type_name(type));
return NULL;
}
}
if (obj && obj->parsed)
return obj;
- if ((obj && obj->type == OBJ_BLOB) ||
+ if ((obj && obj->type == OBJ_BLOB && has_object_file(oid)) ||
(!obj && has_object_file(oid) &&
sha1_object_info(oid->hash, NULL) == OBJ_BLOB)) {
if (check_sha1_signature(repl, NULL, 0, NULL) < 0) {
buffer = read_sha1_file(oid->hash, &type, &size);
if (buffer) {
- if (check_sha1_signature(repl, buffer, size, typename(type)) < 0) {
+ if (check_sha1_signature(repl, buffer, size, type_name(type)) < 0) {
free(buffer);
error("sha1 mismatch %s", sha1_to_hex(repl));
return NULL;
#define TYPE_BITS 3
/*
* object flag allocation:
- * revision.h: 0---------10 26
- * fetch-pack.c: 0---5
- * walker.c: 0-2
- * upload-pack.c: 4 11----------------19
- * builtin/blame.c: 12-13
- * bisect.c: 16
- * bundle.c: 16
- * http-push.c: 16-----19
- * commit.c: 16-----19
- * sha1_name.c: 20
- * list-objects-filter.c: 21
- * builtin/fsck.c: 0--3
+ * revision.h: 0---------10 26
+ * fetch-pack.c: 0----5
+ * walker.c: 0-2
+ * upload-pack.c: 4 11----------------19
+ * builtin/blame.c: 12-13
+ * bisect.c: 16
+ * bundle.c: 16
+ * http-push.c: 16-----19
+ * commit.c: 16-----19
+ * sha1_name.c: 20
+ * list-objects-filter.c: 21
+ * builtin/fsck.c: 0--3
+ * builtin/index-pack.c: 2021
+ * builtin/pack-objects.c: 20
+ * builtin/reflog.c: 10--12
+ * builtin/unpack-objects.c: 2021
*/
#define FLAG_BITS 27
struct object_id oid;
};
-extern const char *typename(unsigned int type);
+extern const char *type_name(unsigned int type);
extern int type_from_string_gently(const char *str, ssize_t, int gentle);
#define type_from_string(str) type_from_string_gently(str, -1, 0)
void *oidmap_get(const struct oidmap *map, const struct object_id *key)
{
+ if (!map->map.cmpfn)
+ return NULL;
+
return hashmap_get_from_hash(&map->map, hash(key), key);
}
void *oidmap_remove(struct oidmap *map, const struct object_id *key)
{
struct hashmap_entry entry;
+
+ if (!map->map.cmpfn)
+ oidmap_init(map, 0);
+
hashmap_entry_init(&entry, hash(key));
return hashmap_remove(&map->map, &entry, key);
}
void *oidmap_put(struct oidmap *map, void *entry)
{
struct oidmap_entry *to_put = entry;
+
+ if (!map->map.cmpfn)
+ oidmap_init(map, 0);
+
hashmap_entry_init(&to_put->internal_entry, hash(&to_put->oid));
return hashmap_put(&map->map, to_put);
}
}
-static int sha1write_ewah_helper(void *f, const void *buf, size_t len)
+static int hashwrite_ewah_helper(void *f, const void *buf, size_t len)
{
- /* sha1write will die on error */
- sha1write(f, buf, len);
+ /* hashwrite will die on error */
+ hashwrite(f, buf, len);
return len;
}
/**
* Write the bitmap index to disk
*/
-static inline void dump_bitmap(struct sha1file *f, struct ewah_bitmap *bitmap)
+static inline void dump_bitmap(struct hashfile *f, struct ewah_bitmap *bitmap)
{
- if (ewah_serialize_to(bitmap, sha1write_ewah_helper, f) < 0)
+ if (ewah_serialize_to(bitmap, hashwrite_ewah_helper, f) < 0)
die("Failed to write bitmap index");
}
return index[pos]->oid.hash;
}
-static void write_selected_commits_v1(struct sha1file *f,
+static void write_selected_commits_v1(struct hashfile *f,
struct pack_idx_entry **index,
uint32_t index_nr)
{
if (commit_pos < 0)
die("BUG: trying to write commit not in index");
- sha1write_be32(f, commit_pos);
- sha1write_u8(f, stored->xor_offset);
- sha1write_u8(f, stored->flags);
+ hashwrite_be32(f, commit_pos);
+ hashwrite_u8(f, stored->xor_offset);
+ hashwrite_u8(f, stored->flags);
dump_bitmap(f, stored->write_as);
}
}
-static void write_hash_cache(struct sha1file *f,
+static void write_hash_cache(struct hashfile *f,
struct pack_idx_entry **index,
uint32_t index_nr)
{
for (i = 0; i < index_nr; ++i) {
struct object_entry *entry = (struct object_entry *)index[i];
uint32_t hash_value = htonl(entry->hash);
- sha1write(f, &hash_value, sizeof(hash_value));
+ hashwrite(f, &hash_value, sizeof(hash_value));
}
}
static uint16_t default_version = 1;
static uint16_t flags = BITMAP_OPT_FULL_DAG;
struct strbuf tmp_file = STRBUF_INIT;
- struct sha1file *f;
+ struct hashfile *f;
struct bitmap_disk_header header;
int fd = odb_mkstemp(&tmp_file, "pack/tmp_bitmap_XXXXXX");
- f = sha1fd(fd, tmp_file.buf);
+ f = hashfd(fd, tmp_file.buf);
memcpy(header.magic, BITMAP_IDX_SIGNATURE, sizeof(BITMAP_IDX_SIGNATURE));
header.version = htons(default_version);
header.entry_count = htonl(writer.selected_nr);
hashcpy(header.checksum, writer.pack_checksum);
- sha1write(f, &header, sizeof(header));
+ hashwrite(f, &header, sizeof(header));
dump_bitmap(f, writer.commits);
dump_bitmap(f, writer.trees);
dump_bitmap(f, writer.blobs);
if (options & BITMAP_OPT_HASH_CACHE)
write_hash_cache(f, index, index_nr);
- sha1close(f, NULL, CSUM_FSYNC);
+ hashclose(f, NULL, CSUM_FSYNC);
if (adjust_shared_perm(tmp_file.buf))
die_errno("unable to make temporary bitmap file readable");
} while (len);
index_crc = p->index_data;
- index_crc += 2 + 256 + p->num_objects * (20/4) + nr;
+ index_crc += 2 + 256 + p->num_objects * (the_hash_algo->rawsz/4) + nr;
return data_crc != ntohl(*index_crc);
}
{
off_t index_size = p->index_size;
const unsigned char *index_base = p->index_data;
- git_SHA_CTX ctx;
+ git_hash_ctx ctx;
unsigned char hash[GIT_MAX_RAWSZ], *pack_sig;
off_t offset = 0, pack_sig_ofs = 0;
uint32_t nr_objects, i;
if (!is_pack_valid(p))
return error("packfile %s cannot be accessed", p->pack_name);
- git_SHA1_Init(&ctx);
+ the_hash_algo->init_fn(&ctx);
do {
unsigned long remaining;
unsigned char *in = use_pack(p, w_curs, offset, &remaining);
offset += remaining;
if (!pack_sig_ofs)
- pack_sig_ofs = p->pack_size - 20;
+ pack_sig_ofs = p->pack_size - the_hash_algo->rawsz;
if (offset > pack_sig_ofs)
remaining -= (unsigned int)(offset - pack_sig_ofs);
- git_SHA1_Update(&ctx, in, remaining);
+ the_hash_algo->update_fn(&ctx, in, remaining);
} while (offset < pack_sig_ofs);
- git_SHA1_Final(hash, &ctx);
+ the_hash_algo->final_fn(hash, &ctx);
pack_sig = use_pack(p, w_curs, pack_sig_ofs, NULL);
if (hashcmp(hash, pack_sig))
- err = error("%s SHA1 checksum mismatch",
+ err = error("%s pack checksum mismatch",
p->pack_name);
- if (hashcmp(index_base + index_size - 40, pack_sig))
- err = error("%s SHA1 does not match its index",
+ if (hashcmp(index_base + index_size - the_hash_algo->hexsz, pack_sig))
+ err = error("%s pack checksum does not match its index",
p->pack_name);
unuse_pack(w_curs);
err = error("cannot unpack %s from %s at offset %"PRIuMAX"",
oid_to_hex(entries[i].oid.oid), p->pack_name,
(uintmax_t)entries[i].offset);
- else if (check_sha1_signature(entries[i].oid.hash, data, size, typename(type)))
+ else if (check_sha1_signature(entries[i].oid.hash, data, size, type_name(type)))
err = error("packed %s from %s is corrupt",
oid_to_hex(entries[i].oid.oid), p->pack_name);
else if (fn) {
{
off_t index_size;
const unsigned char *index_base;
- git_SHA_CTX ctx;
- unsigned char sha1[20];
+ git_hash_ctx ctx;
+ unsigned char hash[GIT_MAX_RAWSZ];
int err = 0;
if (open_pack_index(p))
index_base = p->index_data;
/* Verify SHA1 sum of the index file */
- git_SHA1_Init(&ctx);
- git_SHA1_Update(&ctx, index_base, (unsigned int)(index_size - 20));
- git_SHA1_Final(sha1, &ctx);
- if (hashcmp(sha1, index_base + index_size - 20))
- err = error("Packfile index for %s SHA1 mismatch",
+ the_hash_algo->init_fn(&ctx);
+ the_hash_algo->update_fn(&ctx, index_base, (unsigned int)(index_size - the_hash_algo->rawsz));
+ the_hash_algo->final_fn(hash, &ctx);
+ if (hashcmp(hash, index_base + index_size - the_hash_algo->rawsz))
+ err = error("Packfile index for %s hash mismatch",
p->pack_name);
return err;
}
if (!(off & 0x80000000)) {
p->revindex[i].offset = off;
} else {
- p->revindex[i].offset =
- ((uint64_t)ntohl(*off_64++)) << 32;
- p->revindex[i].offset |=
- ntohl(*off_64++);
+ p->revindex[i].offset = get_be64(off_64);
+ off_64 += 2;
}
p->revindex[i].nr = i;
}
int nr_objects, const struct pack_idx_option *opts,
const unsigned char *sha1)
{
- struct sha1file *f;
+ struct hashfile *f;
struct pack_idx_entry **sorted_by_sha, **list, **last;
off_t last_obj_offset = 0;
uint32_t array[256];
if (opts->flags & WRITE_IDX_VERIFY) {
assert(index_name);
- f = sha1fd_check(index_name);
+ f = hashfd_check(index_name);
} else {
if (!index_name) {
struct strbuf tmp_file = STRBUF_INIT;
if (fd < 0)
die_errno("unable to create '%s'", index_name);
}
- f = sha1fd(fd, index_name);
+ f = hashfd(fd, index_name);
}
/* if last object's offset is >= 2^31 we should use index V2 */
struct pack_idx_header hdr;
hdr.idx_signature = htonl(PACK_IDX_SIGNATURE);
hdr.idx_version = htonl(index_version);
- sha1write(f, &hdr, sizeof(hdr));
+ hashwrite(f, &hdr, sizeof(hdr));
}
/*
array[i] = htonl(next - sorted_by_sha);
list = next;
}
- sha1write(f, array, 256 * 4);
+ hashwrite(f, array, 256 * 4);
/*
* Write the actual SHA1 entries..
struct pack_idx_entry *obj = *list++;
if (index_version < 2) {
uint32_t offset = htonl(obj->offset);
- sha1write(f, &offset, 4);
+ hashwrite(f, &offset, 4);
}
- sha1write(f, obj->oid.hash, 20);
+ hashwrite(f, obj->oid.hash, the_hash_algo->rawsz);
if ((opts->flags & WRITE_IDX_STRICT) &&
(i && !oidcmp(&list[-2]->oid, &obj->oid)))
die("The same object %s appears twice in the pack",
for (i = 0; i < nr_objects; i++) {
struct pack_idx_entry *obj = *list++;
uint32_t crc32_val = htonl(obj->crc32);
- sha1write(f, &crc32_val, 4);
+ hashwrite(f, &crc32_val, 4);
}
/* write the 32-bit offset table */
? (0x80000000 | nr_large_offset++)
: obj->offset);
offset = htonl(offset);
- sha1write(f, &offset, 4);
+ hashwrite(f, &offset, 4);
}
/* write the large offset table */
continue;
split[0] = htonl(offset >> 32);
split[1] = htonl(offset & 0xffffffff);
- sha1write(f, split, 8);
+ hashwrite(f, split, 8);
nr_large_offset--;
}
}
- sha1write(f, sha1, 20);
- sha1close(f, NULL, ((opts->flags & WRITE_IDX_VERIFY)
+ hashwrite(f, sha1, the_hash_algo->rawsz);
+ hashclose(f, NULL, ((opts->flags & WRITE_IDX_VERIFY)
? CSUM_CLOSE : CSUM_FSYNC));
return index_name;
}
-off_t write_pack_header(struct sha1file *f, uint32_t nr_entries)
+off_t write_pack_header(struct hashfile *f, uint32_t nr_entries)
{
struct pack_header hdr;
hdr.hdr_signature = htonl(PACK_SIGNATURE);
hdr.hdr_version = htonl(PACK_VERSION);
hdr.hdr_entries = htonl(nr_entries);
- sha1write(f, &hdr, sizeof(hdr));
+ hashwrite(f, &hdr, sizeof(hdr));
return sizeof(hdr);
}
* interested in the resulting SHA1 of pack data above partial_pack_offset.
*/
void fixup_pack_header_footer(int pack_fd,
- unsigned char *new_pack_sha1,
+ unsigned char *new_pack_hash,
const char *pack_name,
uint32_t object_count,
- unsigned char *partial_pack_sha1,
+ unsigned char *partial_pack_hash,
off_t partial_pack_offset)
{
int aligned_sz, buf_sz = 8 * 1024;
- git_SHA_CTX old_sha1_ctx, new_sha1_ctx;
+ git_hash_ctx old_hash_ctx, new_hash_ctx;
struct pack_header hdr;
char *buf;
ssize_t read_result;
- git_SHA1_Init(&old_sha1_ctx);
- git_SHA1_Init(&new_sha1_ctx);
+ the_hash_algo->init_fn(&old_hash_ctx);
+ the_hash_algo->init_fn(&new_hash_ctx);
if (lseek(pack_fd, 0, SEEK_SET) != 0)
die_errno("Failed seeking to start of '%s'", pack_name);
pack_name);
if (lseek(pack_fd, 0, SEEK_SET) != 0)
die_errno("Failed seeking to start of '%s'", pack_name);
- git_SHA1_Update(&old_sha1_ctx, &hdr, sizeof(hdr));
+ the_hash_algo->update_fn(&old_hash_ctx, &hdr, sizeof(hdr));
hdr.hdr_entries = htonl(object_count);
- git_SHA1_Update(&new_sha1_ctx, &hdr, sizeof(hdr));
+ the_hash_algo->update_fn(&new_hash_ctx, &hdr, sizeof(hdr));
write_or_die(pack_fd, &hdr, sizeof(hdr));
partial_pack_offset -= sizeof(hdr);
aligned_sz = buf_sz - sizeof(hdr);
for (;;) {
ssize_t m, n;
- m = (partial_pack_sha1 && partial_pack_offset < aligned_sz) ?
+ m = (partial_pack_hash && partial_pack_offset < aligned_sz) ?
partial_pack_offset : aligned_sz;
n = xread(pack_fd, buf, m);
if (!n)
break;
if (n < 0)
die_errno("Failed to checksum '%s'", pack_name);
- git_SHA1_Update(&new_sha1_ctx, buf, n);
+ the_hash_algo->update_fn(&new_hash_ctx, buf, n);
aligned_sz -= n;
if (!aligned_sz)
aligned_sz = buf_sz;
- if (!partial_pack_sha1)
+ if (!partial_pack_hash)
continue;
- git_SHA1_Update(&old_sha1_ctx, buf, n);
+ the_hash_algo->update_fn(&old_hash_ctx, buf, n);
partial_pack_offset -= n;
if (partial_pack_offset == 0) {
- unsigned char sha1[20];
- git_SHA1_Final(sha1, &old_sha1_ctx);
- if (hashcmp(sha1, partial_pack_sha1) != 0)
+ unsigned char hash[GIT_MAX_RAWSZ];
+ the_hash_algo->final_fn(hash, &old_hash_ctx);
+ if (hashcmp(hash, partial_pack_hash) != 0)
die("Unexpected checksum for %s "
"(disk corruption?)", pack_name);
/*
* pack, which also means making partial_pack_offset
* big enough not to matter anymore.
*/
- git_SHA1_Init(&old_sha1_ctx);
+ the_hash_algo->init_fn(&old_hash_ctx);
partial_pack_offset = ~partial_pack_offset;
partial_pack_offset -= MSB(partial_pack_offset, 1);
}
}
free(buf);
- if (partial_pack_sha1)
- git_SHA1_Final(partial_pack_sha1, &old_sha1_ctx);
- git_SHA1_Final(new_pack_sha1, &new_sha1_ctx);
- write_or_die(pack_fd, new_pack_sha1, 20);
+ if (partial_pack_hash)
+ the_hash_algo->final_fn(partial_pack_hash, &old_hash_ctx);
+ the_hash_algo->final_fn(new_pack_hash, &new_hash_ctx);
+ write_or_die(pack_fd, new_pack_hash, the_hash_algo->rawsz);
fsync_or_die(pack_fd, pack_name);
}
char *index_pack_lockfile(int ip_out)
{
- char packname[46];
+ char packname[GIT_MAX_HEXSZ + 6];
+ const int len = the_hash_algo->hexsz + 6;
/*
* The first thing we expect from index-pack's output
* case, we need it to remove the corresponding .keep file
* later on. If we don't get that then tough luck with it.
*/
- if (read_in_full(ip_out, packname, 46) == 46 && packname[45] == '\n') {
+ if (read_in_full(ip_out, packname, len) == len && packname[len-1] == '\n') {
const char *name;
- packname[45] = 0;
+ packname[len-1] = 0;
if (skip_prefix(packname, "keep\t", &name))
return xstrfmt("%s/pack/pack-%s.keep",
get_object_directory(), name);
return n;
}
-struct sha1file *create_tmp_packfile(char **pack_tmp_name)
+struct hashfile *create_tmp_packfile(char **pack_tmp_name)
{
struct strbuf tmpname = STRBUF_INIT;
int fd;
fd = odb_mkstemp(&tmpname, "pack/tmp_pack_XXXXXX");
*pack_tmp_name = strbuf_detach(&tmpname, NULL);
- return sha1fd(fd, *pack_tmp_name);
+ return hashfd(fd, *pack_tmp_name);
}
void finish_tmp_packfile(struct strbuf *name_buffer,
extern int check_pack_crc(struct packed_git *p, struct pack_window **w_curs, off_t offset, off_t len, unsigned int nr);
extern int verify_pack_index(struct packed_git *);
extern int verify_pack(struct packed_git *, verify_fn fn, struct progress *, uint32_t);
-extern off_t write_pack_header(struct sha1file *f, uint32_t);
+extern off_t write_pack_header(struct hashfile *f, uint32_t);
extern void fixup_pack_header_footer(int, unsigned char *, const char *, uint32_t, unsigned char *, off_t);
extern char *index_pack_lockfile(int fd);
#define PH_ERROR_PROTOCOL (-3)
extern int read_pack_header(int fd, struct pack_header *);
-extern struct sha1file *create_tmp_packfile(char **pack_tmp_name);
+extern struct hashfile *create_tmp_packfile(char **pack_tmp_name);
extern void finish_tmp_packfile(struct strbuf *name_buffer, const char *pack_tmp_name, struct pack_idx_entry **written_list, uint32_t nr_written, struct pack_idx_option *pack_idx_opts, unsigned char sha1[]);
#endif
#include "cache.h"
-#include "mru.h"
+#include "list.h"
#include "pack.h"
#include "dir.h"
#include "mergesort.h"
#include "list.h"
#include "streaming.h"
#include "sha1-lookup.h"
+#include "commit.h"
+#include "object.h"
+#include "tag.h"
+#include "tree-walk.h"
+#include "tree.h"
char *odb_pack_name(struct strbuf *buf,
const unsigned char *sha1,
static size_t peak_pack_mapped;
static size_t pack_mapped;
struct packed_git *packed_git;
-struct mru packed_git_mru;
+LIST_HEAD(packed_git_mru);
#define SZ_FMT PRIuMAX
static inline uintmax_t sz_fmt(size_t s) { return s; }
return NULL;
/*
- * ".pack" is long enough to hold any suffix we're adding (and
+ * ".promisor" is long enough to hold any suffix we're adding (and
* the use xsnprintf double-checks that)
*/
- alloc = st_add3(path_len, strlen(".pack"), 1);
+ alloc = st_add3(path_len, strlen(".promisor"), 1);
p = alloc_packed_git(alloc);
memcpy(p->pack_name, path, path_len);
if (!access(p->pack_name, F_OK))
p->pack_keep = 1;
+ xsnprintf(p->pack_name + path_len, alloc - path_len, ".promisor");
+ if (!access(p->pack_name, F_OK))
+ p->pack_promisor = 1;
+
xsnprintf(p->pack_name + path_len, alloc - path_len, ".pack");
if (stat(p->pack_name, &st) || !S_ISREG(st.st_mode)) {
free(p);
if (ends_with(de->d_name, ".idx") ||
ends_with(de->d_name, ".pack") ||
ends_with(de->d_name, ".bitmap") ||
- ends_with(de->d_name, ".keep"))
+ ends_with(de->d_name, ".keep") ||
+ ends_with(de->d_name, ".promisor"))
string_list_append(&garbage, path.buf);
else
report_garbage(PACKDIR_FILE_GARBAGE, path.buf);
{
struct packed_git *p;
- mru_clear(&packed_git_mru);
+ INIT_LIST_HEAD(&packed_git_mru);
+
for (p = packed_git; p; p = p->next)
- mru_append(&packed_git_mru, p);
+ list_add_tail(&p->mru, &packed_git_mru);
}
static int prepare_packed_git_run_once = 0;
*oi->disk_sizep = revidx[1].offset - obj_offset;
}
- if (oi->typep || oi->typename) {
+ if (oi->typep || oi->type_name) {
enum object_type ptot;
ptot = packed_to_object_type(p, obj_offset, type, &w_curs,
curpos);
if (oi->typep)
*oi->typep = ptot;
- if (oi->typename) {
- const char *tn = typename(ptot);
+ if (oi->type_name) {
+ const char *tn = type_name(ptot);
if (tn)
- strbuf_addstr(oi->typename, tn);
+ strbuf_addstr(oi->type_name, tn);
}
if (ptot < 0) {
type = OBJ_BAD;
return off;
index += p->num_objects * 4 + (off & 0x7fffffff) * 8;
check_pack_index_ptr(p, index);
- return (((uint64_t)ntohl(*((uint32_t *)(index + 0)))) << 32) |
- ntohl(*((uint32_t *)(index + 4)));
+ return get_be64(index);
}
}
{
const uint32_t *level1_ofs = p->index_data;
const unsigned char *index = p->index_data;
- unsigned hi, lo, stride;
- static int debug_lookup = -1;
-
- if (debug_lookup < 0)
- debug_lookup = !!getenv("GIT_DEBUG_LOOKUP");
+ unsigned stride;
+ uint32_t result;
if (!index) {
if (open_pack_index(p))
index += 8;
}
index += 4 * 256;
- hi = ntohl(level1_ofs[*sha1]);
- lo = ((*sha1 == 0x0) ? 0 : ntohl(level1_ofs[*sha1 - 1]));
if (p->index_version > 1) {
stride = 20;
} else {
index += 4;
}
- if (debug_lookup)
- printf("%02x%02x%02x... lo %u hi %u nr %"PRIu32"\n",
- sha1[0], sha1[1], sha1[2], lo, hi, p->num_objects);
-
- while (lo < hi) {
- unsigned mi = lo + (hi - lo) / 2;
- int cmp = hashcmp(index + mi * stride, sha1);
-
- if (debug_lookup)
- printf("lo %u hi %u rg %u mi %u\n",
- lo, hi, hi - lo, mi);
- if (!cmp)
- return nth_packed_object_offset(p, mi);
- if (cmp > 0)
- hi = mi;
- else
- lo = mi+1;
- }
+ if (bsearch_hash(sha1, level1_ofs, index, stride, &result))
+ return nth_packed_object_offset(p, result);
return 0;
}
*/
int find_pack_entry(const unsigned char *sha1, struct pack_entry *e)
{
- struct mru_entry *p;
+ struct list_head *pos;
prepare_packed_git();
if (!packed_git)
return 0;
- for (p = packed_git_mru.head; p; p = p->next) {
- if (fill_pack_entry(sha1, e, p->item)) {
- mru_mark(&packed_git_mru, p);
+ list_for_each(pos, &packed_git_mru) {
+ struct packed_git *p = list_entry(pos, struct packed_git, mru);
+ if (fill_pack_entry(sha1, e, p)) {
+ list_move(&p->mru, &packed_git_mru);
return 1;
}
}
for (p = packed_git; p; p = p->next) {
if ((flags & FOR_EACH_OBJECT_LOCAL_ONLY) && !p->pack_local)
continue;
+ if ((flags & FOR_EACH_OBJECT_PROMISOR_ONLY) &&
+ !p->pack_promisor)
+ continue;
if (open_pack_index(p)) {
pack_errors = 1;
continue;
}
return r ? r : pack_errors;
}
+
+static int add_promisor_object(const struct object_id *oid,
+ struct packed_git *pack,
+ uint32_t pos,
+ void *set_)
+{
+ struct oidset *set = set_;
+ struct object *obj = parse_object(oid);
+ if (!obj)
+ return 1;
+
+ oidset_insert(set, oid);
+
+ /*
+ * If this is a tree, commit, or tag, the objects it refers
+ * to are also promisor objects. (Blobs refer to no objects.)
+ */
+ if (obj->type == OBJ_TREE) {
+ struct tree *tree = (struct tree *)obj;
+ struct tree_desc desc;
+ struct name_entry entry;
+ if (init_tree_desc_gently(&desc, tree->buffer, tree->size))
+ /*
+ * Error messages are given when packs are
+ * verified, so do not print any here.
+ */
+ return 0;
+ while (tree_entry_gently(&desc, &entry))
+ oidset_insert(set, entry.oid);
+ } else if (obj->type == OBJ_COMMIT) {
+ struct commit *commit = (struct commit *) obj;
+ struct commit_list *parents = commit->parents;
+
+ oidset_insert(set, &commit->tree->object.oid);
+ for (; parents; parents = parents->next)
+ oidset_insert(set, &parents->item->object.oid);
+ } else if (obj->type == OBJ_TAG) {
+ struct tag *tag = (struct tag *) obj;
+ oidset_insert(set, &tag->tagged->oid);
+ }
+ return 0;
+}
+
+int is_promisor_object(const struct object_id *oid)
+{
+ static struct oidset promisor_objects;
+ static int promisor_objects_prepared;
+
+ if (!promisor_objects_prepared) {
+ if (repository_format_partial_clone) {
+ for_each_packed_object(add_promisor_object,
+ &promisor_objects,
+ FOR_EACH_OBJECT_PROMISOR_ONLY);
+ }
+ promisor_objects_prepared = 1;
+ }
+ return oidset_contains(&promisor_objects, oid);
+}
#ifndef PACKFILE_H
#define PACKFILE_H
+#include "oidset.h"
+
/*
* Generate the filename to be used for a pack file with checksum "sha1" and
* extension "ext". The result is written into the strbuf "buf", overwriting
extern int has_pack_index(const unsigned char *sha1);
+/*
+ * Only iterate over packs obtained from the promisor remote.
+ */
+#define FOR_EACH_OBJECT_PROMISOR_ONLY 2
+
/*
* Iterate over packed objects in both the local
* repository and any alternates repositories (unless the
void *data);
extern int for_each_packed_object(each_packed_object_fn, void *, unsigned flags);
+/*
+ * Return 1 if an object in a promisor packfile is or refers to the given
+ * object, 0 otherwise.
+ */
+extern int is_promisor_object(const struct object_id *oid);
+
#endif
parse_options_check(options);
}
+/*
+ * TODO: we are not completing the --no-XXX form yet because there are
+ * many options that do not suppress it properly.
+ */
+static int show_gitcomp(struct parse_opt_ctx_t *ctx,
+ const struct option *opts)
+{
+ for (; opts->type != OPTION_END; opts++) {
+ const char *suffix = "";
+
+ if (!opts->long_name)
+ continue;
+ if (opts->flags & (PARSE_OPT_HIDDEN | PARSE_OPT_NOCOMPLETE))
+ continue;
+
+ switch (opts->type) {
+ case OPTION_GROUP:
+ continue;
+ case OPTION_STRING:
+ case OPTION_FILENAME:
+ case OPTION_INTEGER:
+ case OPTION_MAGNITUDE:
+ case OPTION_CALLBACK:
+ if (opts->flags & PARSE_OPT_NOARG)
+ break;
+ if (opts->flags & PARSE_OPT_OPTARG)
+ break;
+ if (opts->flags & PARSE_OPT_LASTARG_DEFAULT)
+ break;
+ suffix = "=";
+ break;
+ default:
+ break;
+ }
+ if (opts->flags & PARSE_OPT_COMP_ARG)
+ suffix = "=";
+ printf(" --%s%s", opts->long_name, suffix);
+ }
+ fputc('\n', stdout);
+ exit(0);
+}
+
static int usage_with_options_internal(struct parse_opt_ctx_t *,
const char * const *,
const struct option *, int, int);
if (internal_help && ctx->total == 1 && !strcmp(arg + 1, "h"))
goto show_usage;
+ /* lone --git-completion-helper is asked by git-completion.bash */
+ if (ctx->total == 1 && !strcmp(arg + 1, "-git-completion-helper"))
+ return show_gitcomp(ctx, options);
+
if (arg[1] != '-') {
ctx->opt = arg + 1;
switch (parse_short_opt(ctx, options)) {
int parse_options_end(struct parse_opt_ctx_t *ctx)
{
- memmove(ctx->out + ctx->cpidx, ctx->argv, ctx->argc * sizeof(*ctx->out));
+ MOVE_ARRAY(ctx->out + ctx->cpidx, ctx->argv, ctx->argc);
ctx->out[ctx->cpidx + ctx->argc] = NULL;
return ctx->cpidx + ctx->argc;
}
PARSE_OPT_LASTARG_DEFAULT = 16,
PARSE_OPT_NODASH = 32,
PARSE_OPT_LITERAL_ARGHELP = 64,
- PARSE_OPT_SHELL_EVAL = 256
+ PARSE_OPT_SHELL_EVAL = 256,
+ PARSE_OPT_NOCOMPLETE = 512,
+ PARSE_OPT_COMP_ARG = 1024
};
struct option;
* PARSE_OPT_LITERAL_ARGHELP: says that argh shouldn't be enclosed in brackets
* (i.e. '<argh>') in the help message.
* Useful for options with multiple parameters.
+ * PARSE_OPT_NOCOMPLETE: by default all visible options are completable
+ * by git-completion.bash. This option suppresses that.
+ * PARSE_OPT_COMP_ARG: this option forces to git-completion.bash to
+ * complete an option as --name= not --name even if
+ * the option takes optional argument.
*
* `callback`::
* pointer to the callback to use for OPTION_CALLBACK or
intptr_t defval;
};
+#define OPT_BIT_F(s, l, v, h, b, f) { OPTION_BIT, (s), (l), (v), NULL, (h), \
+ PARSE_OPT_NOARG|(f), NULL, (b) }
+#define OPT_COUNTUP_F(s, l, v, h, f) { OPTION_COUNTUP, (s), (l), (v), NULL, \
+ (h), PARSE_OPT_NOARG|(f) }
+#define OPT_SET_INT_F(s, l, v, h, i, f) { OPTION_SET_INT, (s), (l), (v), NULL, \
+ (h), PARSE_OPT_NOARG | (f), NULL, (i) }
+#define OPT_BOOL_F(s, l, v, h, f) OPT_SET_INT_F(s, l, v, h, 1, f)
+
#define OPT_END() { OPTION_END }
#define OPT_ARGUMENT(l, h) { OPTION_ARGUMENT, 0, (l), NULL, NULL, \
(h), PARSE_OPT_NOARG}
#define OPT_GROUP(h) { OPTION_GROUP, 0, NULL, NULL, NULL, (h) }
-#define OPT_BIT(s, l, v, h, b) { OPTION_BIT, (s), (l), (v), NULL, (h), \
- PARSE_OPT_NOARG, NULL, (b) }
+#define OPT_BIT(s, l, v, h, b) OPT_BIT_F(s, l, v, h, b, 0)
#define OPT_NEGBIT(s, l, v, h, b) { OPTION_NEGBIT, (s), (l), (v), NULL, \
(h), PARSE_OPT_NOARG, NULL, (b) }
-#define OPT_COUNTUP(s, l, v, h) { OPTION_COUNTUP, (s), (l), (v), NULL, \
- (h), PARSE_OPT_NOARG }
-#define OPT_SET_INT(s, l, v, h, i) { OPTION_SET_INT, (s), (l), (v), NULL, \
- (h), PARSE_OPT_NOARG, NULL, (i) }
-#define OPT_BOOL(s, l, v, h) OPT_SET_INT(s, l, v, h, 1)
+#define OPT_COUNTUP(s, l, v, h) OPT_COUNTUP_F(s, l, v, h, 0)
+#define OPT_SET_INT(s, l, v, h, i) OPT_SET_INT_F(s, l, v, h, i, 0)
+#define OPT_BOOL(s, l, v, h) OPT_BOOL_F(s, l, v, h, 0)
#define OPT_HIDDEN_BOOL(s, l, v, h) { OPTION_SET_INT, (s), (l), (v), NULL, \
(h), PARSE_OPT_NOARG | PARSE_OPT_HIDDEN, NULL, 1}
#define OPT_CMDMODE(s, l, v, h, i) { OPTION_CMDMODE, (s), (l), (v), NULL, \
{ OPTION_CALLBACK, 'q', "quiet", (var), NULL, N_("be more quiet"), \
PARSE_OPT_NOARG, &parse_opt_verbosity_cb, 0 }
#define OPT__DRY_RUN(var, h) OPT_BOOL('n', "dry-run", (var), (h))
-#define OPT__FORCE(var, h) OPT_COUNTUP('f', "force", (var), (h))
+#define OPT__FORCE(var, h, f) OPT_COUNTUP_F('f', "force", (var), (h), (f))
#define OPT__ABBREV(var) \
{ OPTION_CALLBACK, 0, "abbrev", (var), N_("n"), \
N_("use <n> digits to display SHA-1s"), \
-perl.mak
-perl.mak.old
-MYMETA.json
-MYMETA.yml
-blib
-blibdirs
-pm_to_blib
-PM.stamp
+/build/
--- /dev/null
+/Error.pm whitespace=-blank-at-eof
--- /dev/null
+# Error.pm
+#
+# Copyright (c) 1997-8 Graham Barr <gbarr@ti.com>. All rights reserved.
+# This program is free software; you can redistribute it and/or
+# modify it under the same terms as Perl itself.
+#
+# Based on my original Error.pm, and Exceptions.pm by Peter Seibel
+# <peter@weblogic.com> and adapted by Jesse Glick <jglick@sig.bsh.com>.
+#
+# but modified ***significantly***
+
+package Error;
+
+use strict;
+use warnings;
+
+use vars qw($VERSION);
+use 5.004;
+
+$VERSION = "0.17025";
+
+use overload (
+ '""' => 'stringify',
+ '0+' => 'value',
+ 'bool' => sub { return 1; },
+ 'fallback' => 1
+);
+
+$Error::Depth = 0; # Depth to pass to caller()
+$Error::Debug = 0; # Generate verbose stack traces
+@Error::STACK = (); # Clause stack for try
+$Error::THROWN = undef; # last error thrown, a workaround until die $ref works
+
+my $LAST; # Last error created
+my %ERROR; # Last error associated with package
+
+sub _throw_Error_Simple
+{
+ my $args = shift;
+ return Error::Simple->new($args->{'text'});
+}
+
+$Error::ObjectifyCallback = \&_throw_Error_Simple;
+
+
+# Exported subs are defined in Error::subs
+
+use Scalar::Util ();
+
+sub import {
+ shift;
+ my @tags = @_;
+ local $Exporter::ExportLevel = $Exporter::ExportLevel + 1;
+
+ @tags = grep {
+ if( $_ eq ':warndie' ) {
+ Error::WarnDie->import();
+ 0;
+ }
+ else {
+ 1;
+ }
+ } @tags;
+
+ Error::subs->import(@tags);
+}
+
+# I really want to use last for the name of this method, but it is a keyword
+# which prevent the syntax last Error
+
+sub prior {
+ shift; # ignore
+
+ return $LAST unless @_;
+
+ my $pkg = shift;
+ return exists $ERROR{$pkg} ? $ERROR{$pkg} : undef
+ unless ref($pkg);
+
+ my $obj = $pkg;
+ my $err = undef;
+ if($obj->isa('HASH')) {
+ $err = $obj->{'__Error__'}
+ if exists $obj->{'__Error__'};
+ }
+ elsif($obj->isa('GLOB')) {
+ $err = ${*$obj}{'__Error__'}
+ if exists ${*$obj}{'__Error__'};
+ }
+
+ $err;
+}
+
+sub flush {
+ shift; #ignore
+
+ unless (@_) {
+ $LAST = undef;
+ return;
+ }
+
+ my $pkg = shift;
+ return unless ref($pkg);
+
+ undef $ERROR{$pkg} if defined $ERROR{$pkg};
+}
+
+# Return as much information as possible about where the error
+# happened. The -stacktrace element only exists if $Error::DEBUG
+# was set when the error was created
+
+sub stacktrace {
+ my $self = shift;
+
+ return $self->{'-stacktrace'}
+ if exists $self->{'-stacktrace'};
+
+ my $text = exists $self->{'-text'} ? $self->{'-text'} : "Died";
+
+ $text .= sprintf(" at %s line %d.\n", $self->file, $self->line)
+ unless($text =~ /\n$/s);
+
+ $text;
+}
+
+
+sub associate {
+ my $err = shift;
+ my $obj = shift;
+
+ return unless ref($obj);
+
+ if($obj->isa('HASH')) {
+ $obj->{'__Error__'} = $err;
+ }
+ elsif($obj->isa('GLOB')) {
+ ${*$obj}{'__Error__'} = $err;
+ }
+ $obj = ref($obj);
+ $ERROR{ ref($obj) } = $err;
+
+ return;
+}
+
+
+sub new {
+ my $self = shift;
+ my($pkg,$file,$line) = caller($Error::Depth);
+
+ my $err = bless {
+ '-package' => $pkg,
+ '-file' => $file,
+ '-line' => $line,
+ @_
+ }, $self;
+
+ $err->associate($err->{'-object'})
+ if(exists $err->{'-object'});
+
+ # To always create a stacktrace would be very inefficient, so
+ # we only do it if $Error::Debug is set
+
+ if($Error::Debug) {
+ require Carp;
+ local $Carp::CarpLevel = $Error::Depth;
+ my $text = defined($err->{'-text'}) ? $err->{'-text'} : "Error";
+ my $trace = Carp::longmess($text);
+ # Remove try calls from the trace
+ $trace =~ s/(\n\s+\S+__ANON__[^\n]+)?\n\s+eval[^\n]+\n\s+Error::subs::try[^\n]+(?=\n)//sog;
+ $trace =~ s/(\n\s+\S+__ANON__[^\n]+)?\n\s+eval[^\n]+\n\s+Error::subs::run_clauses[^\n]+\n\s+Error::subs::try[^\n]+(?=\n)//sog;
+ $err->{'-stacktrace'} = $trace
+ }
+
+ $@ = $LAST = $ERROR{$pkg} = $err;
+}
+
+# Throw an error. this contains some very gory code.
+
+sub throw {
+ my $self = shift;
+ local $Error::Depth = $Error::Depth + 1;
+
+ # if we are not rethrow-ing then create the object to throw
+ $self = $self->new(@_) unless ref($self);
+
+ die $Error::THROWN = $self;
+}
+
+# syntactic sugar for
+#
+# die with Error( ... );
+
+sub with {
+ my $self = shift;
+ local $Error::Depth = $Error::Depth + 1;
+
+ $self->new(@_);
+}
+
+# syntactic sugar for
+#
+# record Error( ... ) and return;
+
+sub record {
+ my $self = shift;
+ local $Error::Depth = $Error::Depth + 1;
+
+ $self->new(@_);
+}
+
+# catch clause for
+#
+# try { ... } catch CLASS with { ... }
+
+sub catch {
+ my $pkg = shift;
+ my $code = shift;
+ my $clauses = shift || {};
+ my $catch = $clauses->{'catch'} ||= [];
+
+ unshift @$catch, $pkg, $code;
+
+ $clauses;
+}
+
+# Object query methods
+
+sub object {
+ my $self = shift;
+ exists $self->{'-object'} ? $self->{'-object'} : undef;
+}
+
+sub file {
+ my $self = shift;
+ exists $self->{'-file'} ? $self->{'-file'} : undef;
+}
+
+sub line {
+ my $self = shift;
+ exists $self->{'-line'} ? $self->{'-line'} : undef;
+}
+
+sub text {
+ my $self = shift;
+ exists $self->{'-text'} ? $self->{'-text'} : undef;
+}
+
+# overload methods
+
+sub stringify {
+ my $self = shift;
+ defined $self->{'-text'} ? $self->{'-text'} : "Died";
+}
+
+sub value {
+ my $self = shift;
+ exists $self->{'-value'} ? $self->{'-value'} : undef;
+}
+
+package Error::Simple;
+
+use vars qw($VERSION);
+
+$VERSION = "0.17025";
+
+@Error::Simple::ISA = qw(Error);
+
+sub new {
+ my $self = shift;
+ my $text = "" . shift;
+ my $value = shift;
+ my(@args) = ();
+
+ local $Error::Depth = $Error::Depth + 1;
+
+ @args = ( -file => $1, -line => $2)
+ if($text =~ s/\s+at\s+(\S+)\s+line\s+(\d+)(?:,\s*<[^>]*>\s+line\s+\d+)?\.?\n?$//s);
+ push(@args, '-value', 0 + $value)
+ if defined($value);
+
+ $self->SUPER::new(-text => $text, @args);
+}
+
+sub stringify {
+ my $self = shift;
+ my $text = $self->SUPER::stringify;
+ $text .= sprintf(" at %s line %d.\n", $self->file, $self->line)
+ unless($text =~ /\n$/s);
+ $text;
+}
+
+##########################################################################
+##########################################################################
+
+# Inspired by code from Jesse Glick <jglick@sig.bsh.com> and
+# Peter Seibel <peter@weblogic.com>
+
+package Error::subs;
+
+use Exporter ();
+use vars qw(@EXPORT_OK @ISA %EXPORT_TAGS);
+
+@EXPORT_OK = qw(try with finally except otherwise);
+%EXPORT_TAGS = (try => \@EXPORT_OK);
+
+@ISA = qw(Exporter);
+
+sub run_clauses ($$$\@) {
+ my($clauses,$err,$wantarray,$result) = @_;
+ my $code = undef;
+
+ $err = $Error::ObjectifyCallback->({'text' =>$err}) unless ref($err);
+
+ CATCH: {
+
+ # catch
+ my $catch;
+ if(defined($catch = $clauses->{'catch'})) {
+ my $i = 0;
+
+ CATCHLOOP:
+ for( ; $i < @$catch ; $i += 2) {
+ my $pkg = $catch->[$i];
+ unless(defined $pkg) {
+ #except
+ splice(@$catch,$i,2,$catch->[$i+1]->($err));
+ $i -= 2;
+ next CATCHLOOP;
+ }
+ elsif(Scalar::Util::blessed($err) && $err->isa($pkg)) {
+ $code = $catch->[$i+1];
+ while(1) {
+ my $more = 0;
+ local($Error::THROWN, $@);
+ my $ok = eval {
+ $@ = $err;
+ if($wantarray) {
+ @{$result} = $code->($err,\$more);
+ }
+ elsif(defined($wantarray)) {
+ @{$result} = ();
+ $result->[0] = $code->($err,\$more);
+ }
+ else {
+ $code->($err,\$more);
+ }
+ 1;
+ };
+ if( $ok ) {
+ next CATCHLOOP if $more;
+ undef $err;
+ }
+ else {
+ $err = $@ || $Error::THROWN;
+ $err = $Error::ObjectifyCallback->({'text' =>$err})
+ unless ref($err);
+ }
+ last CATCH;
+ };
+ }
+ }
+ }
+
+ # otherwise
+ my $owise;
+ if(defined($owise = $clauses->{'otherwise'})) {
+ my $code = $clauses->{'otherwise'};
+ my $more = 0;
+ local($Error::THROWN, $@);
+ my $ok = eval {
+ $@ = $err;
+ if($wantarray) {
+ @{$result} = $code->($err,\$more);
+ }
+ elsif(defined($wantarray)) {
+ @{$result} = ();
+ $result->[0] = $code->($err,\$more);
+ }
+ else {
+ $code->($err,\$more);
+ }
+ 1;
+ };
+ if( $ok ) {
+ undef $err;
+ }
+ else {
+ $err = $@ || $Error::THROWN;
+
+ $err = $Error::ObjectifyCallback->({'text' =>$err})
+ unless ref($err);
+ }
+ }
+ }
+ $err;
+}
+
+sub try (&;$) {
+ my $try = shift;
+ my $clauses = @_ ? shift : {};
+ my $ok = 0;
+ my $err = undef;
+ my @result = ();
+
+ unshift @Error::STACK, $clauses;
+
+ my $wantarray = wantarray();
+
+ do {
+ local $Error::THROWN = undef;
+ local $@ = undef;
+
+ $ok = eval {
+ if($wantarray) {
+ @result = $try->();
+ }
+ elsif(defined $wantarray) {
+ $result[0] = $try->();
+ }
+ else {
+ $try->();
+ }
+ 1;
+ };
+
+ $err = $@ || $Error::THROWN
+ unless $ok;
+ };
+
+ shift @Error::STACK;
+
+ $err = run_clauses($clauses,$err,wantarray,@result)
+ unless($ok);
+
+ $clauses->{'finally'}->()
+ if(defined($clauses->{'finally'}));
+
+ if (defined($err))
+ {
+ if (Scalar::Util::blessed($err) && $err->can('throw'))
+ {
+ throw $err;
+ }
+ else
+ {
+ die $err;
+ }
+ }
+
+ wantarray ? @result : $result[0];
+}
+
+# Each clause adds a sub to the list of clauses. The finally clause is
+# always the last, and the otherwise clause is always added just before
+# the finally clause.
+#
+# All clauses, except the finally clause, add a sub which takes one argument
+# this argument will be the error being thrown. The sub will return a code ref
+# if that clause can handle that error, otherwise undef is returned.
+#
+# The otherwise clause adds a sub which unconditionally returns the users
+# code reference, this is why it is forced to be last.
+#
+# The catch clause is defined in Error.pm, as the syntax causes it to
+# be called as a method
+
+sub with (&;$) {
+ @_
+}
+
+sub finally (&) {
+ my $code = shift;
+ my $clauses = { 'finally' => $code };
+ $clauses;
+}
+
+# The except clause is a block which returns a hashref or a list of
+# key-value pairs, where the keys are the classes and the values are subs.
+
+sub except (&;$) {
+ my $code = shift;
+ my $clauses = shift || {};
+ my $catch = $clauses->{'catch'} ||= [];
+
+ my $sub = sub {
+ my $ref;
+ my(@array) = $code->($_[0]);
+ if(@array == 1 && ref($array[0])) {
+ $ref = $array[0];
+ $ref = [ %$ref ]
+ if(UNIVERSAL::isa($ref,'HASH'));
+ }
+ else {
+ $ref = \@array;
+ }
+ @$ref
+ };
+
+ unshift @{$catch}, undef, $sub;
+
+ $clauses;
+}
+
+sub otherwise (&;$) {
+ my $code = shift;
+ my $clauses = shift || {};
+
+ if(exists $clauses->{'otherwise'}) {
+ require Carp;
+ Carp::croak("Multiple otherwise clauses");
+ }
+
+ $clauses->{'otherwise'} = $code;
+
+ $clauses;
+}
+
+1;
+
+package Error::WarnDie;
+
+sub gen_callstack($)
+{
+ my ( $start ) = @_;
+
+ require Carp;
+ local $Carp::CarpLevel = $start;
+ my $trace = Carp::longmess("");
+ # Remove try calls from the trace
+ $trace =~ s/(\n\s+\S+__ANON__[^\n]+)?\n\s+eval[^\n]+\n\s+Error::subs::try[^\n]+(?=\n)//sog;
+ $trace =~ s/(\n\s+\S+__ANON__[^\n]+)?\n\s+eval[^\n]+\n\s+Error::subs::run_clauses[^\n]+\n\s+Error::subs::try[^\n]+(?=\n)//sog;
+ my @callstack = split( m/\n/, $trace );
+ return @callstack;
+}
+
+my $old_DIE;
+my $old_WARN;
+
+sub DEATH
+{
+ my ( $e ) = @_;
+
+ local $SIG{__DIE__} = $old_DIE if( defined $old_DIE );
+
+ die @_ if $^S;
+
+ my ( $etype, $message, $location, @callstack );
+ if ( ref($e) && $e->isa( "Error" ) ) {
+ $etype = "exception of type " . ref( $e );
+ $message = $e->text;
+ $location = $e->file . ":" . $e->line;
+ @callstack = split( m/\n/, $e->stacktrace );
+ }
+ else {
+ # Don't apply subsequent layer of message formatting
+ die $e if( $e =~ m/^\nUnhandled perl error caught at toplevel:\n\n/ );
+ $etype = "perl error";
+ my $stackdepth = 0;
+ while( caller( $stackdepth ) =~ m/^Error(?:$|::)/ ) {
+ $stackdepth++
+ }
+
+ @callstack = gen_callstack( $stackdepth + 1 );
+
+ $message = "$e";
+ chomp $message;
+
+ if ( $message =~ s/ at (.*?) line (\d+)\.$// ) {
+ $location = $1 . ":" . $2;
+ }
+ else {
+ my @caller = caller( $stackdepth );
+ $location = $caller[1] . ":" . $caller[2];
+ }
+ }
+
+ shift @callstack;
+ # Do it this way in case there are no elements; we don't print a spurious \n
+ my $callstack = join( "", map { "$_\n"} @callstack );
+
+ die "\nUnhandled $etype caught at toplevel:\n\n $message\n\nThrown from: $location\n\nFull stack trace:\n\n$callstack\n";
+}
+
+sub TAXES
+{
+ my ( $message ) = @_;
+
+ local $SIG{__WARN__} = $old_WARN if( defined $old_WARN );
+
+ $message =~ s/ at .*? line \d+\.$//;
+ chomp $message;
+
+ my @callstack = gen_callstack( 1 );
+ my $location = shift @callstack;
+
+ # $location already starts in a leading space
+ $message .= $location;
+
+ # Do it this way in case there are no elements; we don't print a spurious \n
+ my $callstack = join( "", map { "$_\n"} @callstack );
+
+ warn "$message:\n$callstack";
+}
+
+sub import
+{
+ $old_DIE = $SIG{__DIE__};
+ $old_WARN = $SIG{__WARN__};
+
+ $SIG{__DIE__} = \&DEATH;
+ $SIG{__WARN__} = \&TAXES;
+}
+
+1;
+
+__END__
+
+=head1 NAME
+
+Error - Error/exception handling in an OO-ish way
+
+=head1 WARNING
+
+Using the "Error" module is B<no longer recommended> due to the black-magical
+nature of its syntactic sugar, which often tends to break. Its maintainers
+have stopped actively writing code that uses it, and discourage people
+from doing so. See the "SEE ALSO" section below for better recommendations.
+
+=head1 SYNOPSIS
+
+ use Error qw(:try);
+
+ throw Error::Simple( "A simple error");
+
+ sub xyz {
+ ...
+ record Error::Simple("A simple error")
+ and return;
+ }
+
+ unlink($file) or throw Error::Simple("$file: $!",$!);
+
+ try {
+ do_some_stuff();
+ die "error!" if $condition;
+ throw Error::Simple "Oops!" if $other_condition;
+ }
+ catch Error::IO with {
+ my $E = shift;
+ print STDERR "File ", $E->{'-file'}, " had a problem\n";
+ }
+ except {
+ my $E = shift;
+ my $general_handler=sub {send_message $E->{-description}};
+ return {
+ UserException1 => $general_handler,
+ UserException2 => $general_handler
+ };
+ }
+ otherwise {
+ print STDERR "Well I don't know what to say\n";
+ }
+ finally {
+ close_the_garage_door_already(); # Should be reliable
+ }; # Don't forget the trailing ; or you might be surprised
+
+=head1 DESCRIPTION
+
+The C<Error> package provides two interfaces. Firstly C<Error> provides
+a procedural interface to exception handling. Secondly C<Error> is a
+base class for errors/exceptions that can either be thrown, for
+subsequent catch, or can simply be recorded.
+
+Errors in the class C<Error> should not be thrown directly, but the
+user should throw errors from a sub-class of C<Error>.
+
+=head1 PROCEDURAL INTERFACE
+
+C<Error> exports subroutines to perform exception handling. These will
+be exported if the C<:try> tag is used in the C<use> line.
+
+=over 4
+
+=item try BLOCK CLAUSES
+
+C<try> is the main subroutine called by the user. All other subroutines
+exported are clauses to the try subroutine.
+
+The BLOCK will be evaluated and, if no error is throw, try will return
+the result of the block.
+
+C<CLAUSES> are the subroutines below, which describe what to do in the
+event of an error being thrown within BLOCK.
+
+=item catch CLASS with BLOCK
+
+This clauses will cause all errors that satisfy C<$err-E<gt>isa(CLASS)>
+to be caught and handled by evaluating C<BLOCK>.
+
+C<BLOCK> will be passed two arguments. The first will be the error
+being thrown. The second is a reference to a scalar variable. If this
+variable is set by the catch block then, on return from the catch
+block, try will continue processing as if the catch block was never
+found. The error will also be available in C<$@>.
+
+To propagate the error the catch block may call C<$err-E<gt>throw>
+
+If the scalar reference by the second argument is not set, and the
+error is not thrown. Then the current try block will return with the
+result from the catch block.
+
+=item except BLOCK
+
+When C<try> is looking for a handler, if an except clause is found
+C<BLOCK> is evaluated. The return value from this block should be a
+HASHREF or a list of key-value pairs, where the keys are class names
+and the values are CODE references for the handler of errors of that
+type.
+
+=item otherwise BLOCK
+
+Catch any error by executing the code in C<BLOCK>
+
+When evaluated C<BLOCK> will be passed one argument, which will be the
+error being processed. The error will also be available in C<$@>.
+
+Only one otherwise block may be specified per try block
+
+=item finally BLOCK
+
+Execute the code in C<BLOCK> either after the code in the try block has
+successfully completed, or if the try block throws an error then
+C<BLOCK> will be executed after the handler has completed.
+
+If the handler throws an error then the error will be caught, the
+finally block will be executed and the error will be re-thrown.
+
+Only one finally block may be specified per try block
+
+=back
+
+=head1 COMPATIBILITY
+
+L<Moose> exports a keyword called C<with> which clashes with Error's. This
+example returns a prototype mismatch error:
+
+ package MyTest;
+
+ use warnings;
+ use Moose;
+ use Error qw(:try);
+
+(Thanks to C<maik.hentsche@amd.com> for the report.).
+
+=head1 CLASS INTERFACE
+
+=head2 CONSTRUCTORS
+
+The C<Error> object is implemented as a HASH. This HASH is initialized
+with the arguments that are passed to it's constructor. The elements
+that are used by, or are retrievable by the C<Error> class are listed
+below, other classes may add to these.
+
+ -file
+ -line
+ -text
+ -value
+ -object
+
+If C<-file> or C<-line> are not specified in the constructor arguments
+then these will be initialized with the file name and line number where
+the constructor was called from.
+
+If the error is associated with an object then the object should be
+passed as the C<-object> argument. This will allow the C<Error> package
+to associate the error with the object.
+
+The C<Error> package remembers the last error created, and also the
+last error associated with a package. This could either be the last
+error created by a sub in that package, or the last error which passed
+an object blessed into that package as the C<-object> argument.
+
+=over 4
+
+=item Error->new()
+
+See the Error::Simple documentation.
+
+=item throw ( [ ARGS ] )
+
+Create a new C<Error> object and throw an error, which will be caught
+by a surrounding C<try> block, if there is one. Otherwise it will cause
+the program to exit.
+
+C<throw> may also be called on an existing error to re-throw it.
+
+=item with ( [ ARGS ] )
+
+Create a new C<Error> object and returns it. This is defined for
+syntactic sugar, eg
+
+ die with Some::Error ( ... );
+
+=item record ( [ ARGS ] )
+
+Create a new C<Error> object and returns it. This is defined for
+syntactic sugar, eg
+
+ record Some::Error ( ... )
+ and return;
+
+=back
+
+=head2 STATIC METHODS
+
+=over 4
+
+=item prior ( [ PACKAGE ] )
+
+Return the last error created, or the last error associated with
+C<PACKAGE>
+
+=item flush ( [ PACKAGE ] )
+
+Flush the last error created, or the last error associated with
+C<PACKAGE>.It is necessary to clear the error stack before exiting the
+package or uncaught errors generated using C<record> will be reported.
+
+ $Error->flush;
+
+=cut
+
+=back
+
+=head2 OBJECT METHODS
+
+=over 4
+
+=item stacktrace
+
+If the variable C<$Error::Debug> was non-zero when the error was
+created, then C<stacktrace> returns a string created by calling
+C<Carp::longmess>. If the variable was zero the C<stacktrace> returns
+the text of the error appended with the filename and line number of
+where the error was created, providing the text does not end with a
+newline.
+
+=item object
+
+The object this error was associated with
+
+=item file
+
+The file where the constructor of this error was called from
+
+=item line
+
+The line where the constructor of this error was called from
+
+=item text
+
+The text of the error
+
+=item $err->associate($obj)
+
+Associates an error with an object to allow error propagation. I.e:
+
+ $ber->encode(...) or
+ return Error->prior($ber)->associate($ldap);
+
+=back
+
+=head2 OVERLOAD METHODS
+
+=over 4
+
+=item stringify
+
+A method that converts the object into a string. This method may simply
+return the same as the C<text> method, or it may append more
+information. For example the file name and line number.
+
+By default this method returns the C<-text> argument that was passed to
+the constructor, or the string C<"Died"> if none was given.
+
+=item value
+
+A method that will return a value that can be associated with the
+error. For example if an error was created due to the failure of a
+system call, then this may return the numeric value of C<$!> at the
+time.
+
+By default this method returns the C<-value> argument that was passed
+to the constructor.
+
+=back
+
+=head1 PRE-DEFINED ERROR CLASSES
+
+=head2 Error::Simple
+
+This class can be used to hold simple error strings and values. It's
+constructor takes two arguments. The first is a text value, the second
+is a numeric value. These values are what will be returned by the
+overload methods.
+
+If the text value ends with C<at file line 1> as $@ strings do, then
+this information will be used to set the C<-file> and C<-line> arguments
+of the error object.
+
+This class is used internally if an eval'd block die's with an error
+that is a plain string. (Unless C<$Error::ObjectifyCallback> is modified)
+
+
+=head1 $Error::ObjectifyCallback
+
+This variable holds a reference to a subroutine that converts errors that
+are plain strings to objects. It is used by Error.pm to convert textual
+errors to objects, and can be overridden by the user.
+
+It accepts a single argument which is a hash reference to named parameters.
+Currently the only named parameter passed is C<'text'> which is the text
+of the error, but others may be available in the future.
+
+For example the following code will cause Error.pm to throw objects of the
+class MyError::Bar by default:
+
+ sub throw_MyError_Bar
+ {
+ my $args = shift;
+ my $err = MyError::Bar->new();
+ $err->{'MyBarText'} = $args->{'text'};
+ return $err;
+ }
+
+ {
+ local $Error::ObjectifyCallback = \&throw_MyError_Bar;
+
+ # Error handling here.
+ }
+
+=cut
+
+=head1 MESSAGE HANDLERS
+
+C<Error> also provides handlers to extend the output of the C<warn()> perl
+function, and to handle the printing of a thrown C<Error> that is not caught
+or otherwise handled. These are not installed by default, but are requested
+using the C<:warndie> tag in the C<use> line.
+
+ use Error qw( :warndie );
+
+These new error handlers are installed in C<$SIG{__WARN__}> and
+C<$SIG{__DIE__}>. If these handlers are already defined when the tag is
+imported, the old values are stored, and used during the new code. Thus, to
+arrange for custom handling of warnings and errors, you will need to perform
+something like the following:
+
+ BEGIN {
+ $SIG{__WARN__} = sub {
+ print STDERR "My special warning handler: $_[0]"
+ };
+ }
+
+ use Error qw( :warndie );
+
+Note that setting C<$SIG{__WARN__}> after the C<:warndie> tag has been
+imported will overwrite the handler that C<Error> provides. If this cannot be
+avoided, then the tag can be explicitly C<import>ed later
+
+ use Error;
+
+ $SIG{__WARN__} = ...;
+
+ import Error qw( :warndie );
+
+=head2 EXAMPLE
+
+The C<__DIE__> handler turns messages such as
+
+ Can't call method "foo" on an undefined value at examples/warndie.pl line 16.
+
+into
+
+ Unhandled perl error caught at toplevel:
+
+ Can't call method "foo" on an undefined value
+
+ Thrown from: examples/warndie.pl:16
+
+ Full stack trace:
+
+ main::inner('undef') called at examples/warndie.pl line 20
+ main::outer('undef') called at examples/warndie.pl line 23
+
+=cut
+
+=head1 SEE ALSO
+
+See L<Exception::Class> for a different module providing Object-Oriented
+exception handling, along with a convenient syntax for declaring hierarchies
+for them. It doesn't provide Error's syntactic sugar of C<try { ... }>,
+C<catch { ... }>, etc. which may be a good thing or a bad thing based
+on what you want. (Because Error's syntactic sugar tends to break.)
+
+L<Error::Exception> aims to combine L<Error> and L<Exception::Class>
+"with correct stringification".
+
+L<TryCatch> and L<Try::Tiny> are similar in concept to Error.pm only providing
+a syntax that hopefully breaks less.
+
+=head1 KNOWN BUGS
+
+None, but that does not mean there are not any.
+
+=head1 AUTHORS
+
+Graham Barr <gbarr@pobox.com>
+
+The code that inspired me to write this was originally written by
+Peter Seibel <peter@weblogic.com> and adapted by Jesse Glick
+<jglick@sig.bsh.com>.
+
+C<:warndie> handlers added by Paul Evans <leonerd@leonerd.org.uk>
+
+=head1 MAINTAINER
+
+Shlomi Fish, L<http://www.shlomifish.org/> .
+
+=head1 PAST MAINTAINERS
+
+Arun Kumar U <u_arunkumar@yahoo.com>
+
+=head1 COPYRIGHT
+
+Copyright (c) 1997-8 Graham Barr. All rights reserved.
+This program is free software; you can redistribute it and/or modify it
+under the same terms as Perl itself.
+
+=cut
--- /dev/null
+# Copyrights 1995-2018 by [Mark Overmeer].
+# For other contributors see ChangeLog.
+# See the manual pages for details on the licensing terms.
+# Pod stripped from pm file by OODoc 2.02.
+# This code is part of the bundle MailTools. Meta-POD processed with
+# OODoc into POD and HTML manual-pages. See README.md for Copyright.
+# Licensed under the same terms as Perl itself.
+
+package Mail::Address;
+use vars '$VERSION';
+$VERSION = '2.20';
+
+use strict;
+
+use Carp;
+
+# use locale; removed in version 1.78, because it causes taint problems
+
+sub Version { our $VERSION }
+
+
+
+# given a comment, attempt to extract a person's name
+sub _extract_name
+{ # This function can be called as method as well
+ my $self = @_ && ref $_[0] ? shift : undef;
+
+ local $_ = shift
+ or return '';
+
+ # Using encodings, too hard. See Mail::Message::Field::Full.
+ return '' if m/\=\?.*?\?\=/;
+
+ # trim whitespace
+ s/^\s+//;
+ s/\s+$//;
+ s/\s+/ /;
+
+ # Disregard numeric names (e.g. 123456.1234@compuserve.com)
+ return "" if /^[\d ]+$/;
+
+ s/^\((.*)\)$/$1/; # remove outermost parenthesis
+ s/^"(.*)"$/$1/; # remove outer quotation marks
+ s/\(.*?\)//g; # remove minimal embedded comments
+ s/\\//g; # remove all escapes
+ s/^"(.*)"$/$1/; # remove internal quotation marks
+ s/^([^\s]+) ?, ?(.*)$/$2 $1/; # reverse "Last, First M." if applicable
+ s/,.*//;
+
+ # Change casing only when the name contains only upper or only
+ # lower cased characters.
+ unless( m/[A-Z]/ && m/[a-z]/ )
+ { # Set the case of the name to first char upper rest lower
+ s/\b(\w+)/\L\u$1/igo; # Upcase first letter on name
+ s/\bMc(\w)/Mc\u$1/igo; # Scottish names such as 'McLeod'
+ s/\bo'(\w)/O'\u$1/igo; # Irish names such as 'O'Malley, O'Reilly'
+ s/\b(x*(ix)?v*(iv)?i*)\b/\U$1/igo; # Roman numerals, eg 'Level III Support'
+ }
+
+ # some cleanup
+ s/\[[^\]]*\]//g;
+ s/(^[\s'"]+|[\s'"]+$)//g;
+ s/\s{2,}/ /g;
+
+ $_;
+}
+
+sub _tokenise
+{ local $_ = join ',', @_;
+ my (@words,$snippet,$field);
+
+ s/\A\s+//;
+ s/[\r\n]+/ /g;
+
+ while ($_ ne '')
+ { $field = '';
+ if(s/^\s*\(/(/ ) # (...)
+ { my $depth = 0;
+
+ PAREN: while(s/^(\(([^\(\)\\]|\\.)*)//)
+ { $field .= $1;
+ $depth++;
+ while(s/^(([^\(\)\\]|\\.)*\)\s*)//)
+ { $field .= $1;
+ last PAREN unless --$depth;
+ $field .= $1 if s/^(([^\(\)\\]|\\.)+)//;
+ }
+ }
+
+ carp "Unmatched () '$field' '$_'"
+ if $depth;
+
+ $field =~ s/\s+\Z//;
+ push @words, $field;
+
+ next;
+ }
+
+ if( s/^("(?:[^"\\]+|\\.)*")\s*// # "..."
+ || s/^(\[(?:[^\]\\]+|\\.)*\])\s*// # [...]
+ || s/^([^\s()<>\@,;:\\".[\]]+)\s*//
+ || s/^([()<>\@,;:\\".[\]])\s*//
+ )
+ { push @words, $1;
+ next;
+ }
+
+ croak "Unrecognised line: $_";
+ }
+
+ push @words, ",";
+ \@words;
+}
+
+sub _find_next
+{ my ($idx, $tokens, $len) = @_;
+
+ while($idx < $len)
+ { my $c = $tokens->[$idx];
+ return $c if $c eq ',' || $c eq ';' || $c eq '<';
+ $idx++;
+ }
+
+ "";
+}
+
+sub _complete
+{ my ($class, $phrase, $address, $comment) = @_;
+
+ @$phrase || @$comment || @$address
+ or return undef;
+
+ my $o = $class->new(join(" ",@$phrase), join("",@$address), join(" ",@$comment));
+ @$phrase = @$address = @$comment = ();
+ $o;
+}
+
+#------------
+
+sub new(@)
+{ my $class = shift;
+ bless [@_], $class;
+}
+
+
+sub parse(@)
+{ my $class = shift;
+ my @line = grep {defined} @_;
+ my $line = join '', @line;
+
+ my (@phrase, @comment, @address, @objs);
+ my ($depth, $idx) = (0, 0);
+
+ my $tokens = _tokenise @line;
+ my $len = @$tokens;
+ my $next = _find_next $idx, $tokens, $len;
+
+ local $_;
+ for(my $idx = 0; $idx < $len; $idx++)
+ { $_ = $tokens->[$idx];
+
+ if(substr($_,0,1) eq '(') { push @comment, $_ }
+ elsif($_ eq '<') { $depth++ }
+ elsif($_ eq '>') { $depth-- if $depth }
+ elsif($_ eq ',' || $_ eq ';')
+ { warn "Unmatched '<>' in $line" if $depth;
+ my $o = $class->_complete(\@phrase, \@address, \@comment);
+ push @objs, $o if defined $o;
+ $depth = 0;
+ $next = _find_next $idx+1, $tokens, $len;
+ }
+ elsif($depth) { push @address, $_ }
+ elsif($next eq '<') { push @phrase, $_ }
+ elsif( /^[.\@:;]$/ || !@address || $address[-1] =~ /^[.\@:;]$/ )
+ { push @address, $_ }
+ else
+ { warn "Unmatched '<>' in $line" if $depth;
+ my $o = $class->_complete(\@phrase, \@address, \@comment);
+ push @objs, $o if defined $o;
+ $depth = 0;
+ push @address, $_;
+ }
+ }
+ @objs;
+}
+
+#------------
+
+sub phrase { shift->set_or_get(0, @_) }
+sub address { shift->set_or_get(1, @_) }
+sub comment { shift->set_or_get(2, @_) }
+
+sub set_or_get($)
+{ my ($self, $i) = (shift, shift);
+ @_ or return $self->[$i];
+
+ my $val = $self->[$i];
+ $self->[$i] = shift if @_;
+ $val;
+}
+
+
+my $atext = '[\-\w !#$%&\'*+/=?^`{|}~]';
+sub format
+{ my @addrs;
+
+ foreach (@_)
+ { my ($phrase, $email, $comment) = @$_;
+ my @addr;
+
+ if(defined $phrase && length $phrase)
+ { push @addr
+ , $phrase =~ /^(?:\s*$atext\s*)+$/o ? $phrase
+ : $phrase =~ /(?<!\\)"/ ? $phrase
+ : qq("$phrase");
+
+ push @addr, "<$email>"
+ if defined $email && length $email;
+ }
+ elsif(defined $email && length $email)
+ { push @addr, $email;
+ }
+
+ if(defined $comment && $comment =~ /\S/)
+ { $comment =~ s/^\s*\(?/(/;
+ $comment =~ s/\)?\s*$/)/;
+ }
+
+ push @addr, $comment
+ if defined $comment && length $comment;
+
+ push @addrs, join(" ", @addr)
+ if @addr;
+ }
+
+ join ", ", @addrs;
+}
+
+#------------
+
+sub name
+{ my $self = shift;
+ my $phrase = $self->phrase;
+ my $addr = $self->address;
+
+ $phrase = $self->comment
+ unless defined $phrase && length $phrase;
+
+ my $name = $self->_extract_name($phrase);
+
+ # first.last@domain address
+ if($name eq '' && $addr =~ /([^\%\.\@_]+([\._][^\%\.\@_]+)+)[\@\%]/)
+ { ($name = $1) =~ s/[\._]+/ /g;
+ $name = _extract_name $name;
+ }
+
+ if($name eq '' && $addr =~ m#/g=#i) # X400 style address
+ { my ($f) = $addr =~ m#g=([^/]*)#i;
+ my ($l) = $addr =~ m#s=([^/]*)#i;
+ $name = _extract_name "$f $l";
+ }
+
+ length $name ? $name : undef;
+}
+
+
+sub host
+{ my $addr = shift->address || '';
+ my $i = rindex $addr, '@';
+ $i >= 0 ? substr($addr, $i+1) : undef;
+}
+
+
+sub user
+{ my $addr = shift->address || '';
+ my $i = rindex $addr, '@';
+ $i >= 0 ? substr($addr,0,$i) : $addr;
+}
+
+1;
use 5.008;
use strict;
+use warnings;
+use File::Temp ();
+use File::Spec ();
BEGIN {
use Carp qw(carp croak); # but croak is bad - throw instead
-use Error qw(:try);
+use Git::LoadCPAN::Error qw(:try);
use Cwd qw(abs_path cwd);
use IPC::Open2 qw(open2);
use Fcntl qw(SEEK_SET SEEK_CUR);
};
if ($dir) {
- _verify_require();
File::Spec->file_name_is_absolute($dir) or $dir = $opts{Directory} . '/' . $dir;
$opts{Repository} = abs_path($dir);
sub get_tz_offset {
# some systems don't handle or mishandle %z, so be creative.
my $t = shift || time;
- my $gm = timegm(localtime($t));
+ my @t = localtime($t);
+ $t[5] += 1900;
+ my $gm = timegm(@t);
my $sign = qw( + + - )[ $gm <=> $t ];
return sprintf("%s%02d%02d", $sign, (gmtime(abs($t - $gm)))[2,1]);
}
return "$ident[0] <$ident[1]>";
}
-=item parse_mailboxes
-
-Return an array of mailboxes extracted from a string.
-
-=cut
-
-# Very close to Mail::Address's parser, but we still have minor
-# differences in some cases (see t9000 for examples).
-sub parse_mailboxes {
- my $re_comment = qr/\((?:[^)]*)\)/;
- my $re_quote = qr/"(?:[^\"\\]|\\.)*"/;
- my $re_word = qr/(?:[^]["\s()<>:;@\\,.]|\\.)+/;
-
- # divide the string in tokens of the above form
- my $re_token = qr/(?:$re_quote|$re_word|$re_comment|\S)/;
- my @tokens = map { $_ =~ /\s*($re_token)\s*/g } @_;
- my $end_of_addr_seen = 0;
-
- # add a delimiter to simplify treatment for the last mailbox
- push @tokens, ",";
-
- my (@addr_list, @phrase, @address, @comment, @buffer) = ();
- foreach my $token (@tokens) {
- if ($token =~ /^[,;]$/) {
- # if buffer still contains undeterminated strings
- # append it at the end of @address or @phrase
- if ($end_of_addr_seen) {
- push @phrase, @buffer;
- } else {
- push @address, @buffer;
- }
-
- my $str_phrase = join ' ', @phrase;
- my $str_address = join '', @address;
- my $str_comment = join ' ', @comment;
-
- # quote are necessary if phrase contains
- # special characters
- if ($str_phrase =~ /[][()<>:;@\\,.\000-\037\177]/) {
- $str_phrase =~ s/(^|[^\\])"/$1/g;
- $str_phrase = qq["$str_phrase"];
- }
-
- # add "<>" around the address if necessary
- if ($str_address ne "" && $str_phrase ne "") {
- $str_address = qq[<$str_address>];
- }
-
- my $str_mailbox = "$str_phrase $str_address $str_comment";
- $str_mailbox =~ s/^\s*|\s*$//g;
- push @addr_list, $str_mailbox if ($str_mailbox);
-
- @phrase = @address = @comment = @buffer = ();
- $end_of_addr_seen = 0;
- } elsif ($token =~ /^\(/) {
- push @comment, $token;
- } elsif ($token eq "<") {
- push @phrase, (splice @address), (splice @buffer);
- } elsif ($token eq ">") {
- $end_of_addr_seen = 1;
- push @address, (splice @buffer);
- } elsif ($token eq "@" && !$end_of_addr_seen) {
- push @address, (splice @buffer), "@";
- } else {
- push @buffer, $token;
- }
- }
-
- return @addr_list;
-}
-
=item hash_object ( TYPE, FILENAME )
Compute the SHA1 object id of the given C<FILENAME> considering it is
sub _temp_cache {
my ($self, $name) = _maybe_self(@_);
- _verify_require();
-
my $temp_fd = \$TEMP_FILEMAP{$name};
if (defined $$temp_fd and $$temp_fd->opened) {
if ($TEMP_FILES{$$temp_fd}{locked}) {
$$temp_fd;
}
-sub _verify_require {
- eval { require File::Temp; require File::Spec; };
- $@ and throw Error::Simple($@);
-}
-
=item temp_reset ( FILEHANDLE )
Truncates and resets the position of the C<FILEHANDLE>.
# Pipe implementation for ActiveState Perl.
package Git::activestate_pipe;
-use strict;
sub TIEHANDLE {
my ($class, @params) = @_;
sub __bootstrap_locale_messages {
our $TEXTDOMAIN = 'git';
- our $TEXTDOMAINDIR = $ENV{GIT_TEXTDOMAINDIR} || '++LOCALEDIR++';
+ our $TEXTDOMAINDIR = $ENV{GIT_TEXTDOMAINDIR} || '@@LOCALEDIR@@';
require POSIX;
POSIX->import(qw(setlocale));
--- /dev/null
+package Git::LoadCPAN;
+use 5.008;
+use strict;
+use warnings;
+
+=head1 NAME
+
+Git::LoadCPAN - Wrapper for loading modules from the CPAN (OS) or Git's own copy
+
+=head1 DESCRIPTION
+
+The Perl code in Git depends on some modules from the CPAN, but we
+don't want to make those a hard requirement for anyone building from
+source.
+
+Therefore the L<Git::LoadCPAN> namespace shipped with Git contains
+wrapper modules like C<Git::LoadCPAN::Module::Name> that will first
+attempt to load C<Module::Name> from the OS, and if that doesn't work
+will fall back on C<FromCPAN::Module::Name> shipped with Git itself.
+
+Usually distributors will not ship with Git's Git::FromCPAN tree at
+all via the C<NO_PERL_CPAN_FALLBACKS> option, preferring to use their
+own packaging of CPAN modules instead.
+
+This module is only intended to be used for code shipping in the
+C<git.git> repository. Use it for anything else at your peril!
+
+=cut
+
+# NO_PERL_CPAN_FALLBACKS_STR evades the sed search-replace from the
+# Makefile, and allows for detecting whether the module is loaded from
+# perl/Git as opposed to perl/build/Git, which is useful for one-off
+# testing without having Error.pm et al installed.
+use constant NO_PERL_CPAN_FALLBACKS_STR => '@@' . 'NO_PERL_CPAN_FALLBACKS' . '@@';
+use constant NO_PERL_CPAN_FALLBACKS => (
+ q[@@NO_PERL_CPAN_FALLBACKS@@] ne ''
+ and
+ q[@@NO_PERL_CPAN_FALLBACKS@@] ne NO_PERL_CPAN_FALLBACKS_STR
+);
+
+sub import {
+ shift;
+ my $caller = caller;
+ my %args = @_;
+ my $module = exists $args{module} ? delete $args{module} : die "BUG: Expected 'module' parameter!";
+ my $import = exists $args{import} ? delete $args{import} : die "BUG: Expected 'import' parameter!";
+ die "BUG: Too many arguments!" if keys %args;
+
+ # Foo::Bar to Foo/Bar.pm
+ my $package_pm = $module;
+ $package_pm =~ s[::][/]g;
+ $package_pm .= '.pm';
+
+ eval {
+ require $package_pm;
+ 1;
+ } or do {
+ my $error = $@ || "Zombie Error";
+
+ if (NO_PERL_CPAN_FALLBACKS) {
+ chomp(my $error = sprintf <<'THEY_PROMISED', $module);
+BUG: The '%s' module is not here, but NO_PERL_CPAN_FALLBACKS was set!
+
+Git needs this Perl module from the CPAN, and will by default ship
+with a copy of it. This Git was built with NO_PERL_CPAN_FALLBACKS,
+meaning that whoever built it promised to provide this module.
+
+You're seeing this error because they broke that promise, and we can't
+load our fallback version, since we were asked not to install it.
+
+If you're seeing this error and didn't package Git yourself the
+package you're using is broken, or your system is broken. This error
+won't appear if Git is built without NO_PERL_CPAN_FALLBACKS (instead
+we'll use our fallback version of the module).
+THEY_PROMISED
+ die $error;
+ }
+
+ my $Git_LoadCPAN_pm_path = $INC{"Git/LoadCPAN.pm"} || die "BUG: Should have our own path from %INC!";
+
+ require File::Basename;
+ my $Git_LoadCPAN_pm_root = File::Basename::dirname($Git_LoadCPAN_pm_path) || die "BUG: Can't figure out lib/Git dirname from '$Git_LoadCPAN_pm_path'!";
+
+ require File::Spec;
+ my $Git_pm_FromCPAN_root = File::Spec->catdir($Git_LoadCPAN_pm_root, '..', 'FromCPAN');
+ die "BUG: '$Git_pm_FromCPAN_root' should be a directory!" unless -d $Git_pm_FromCPAN_root;
+
+ local @INC = ($Git_pm_FromCPAN_root, @INC);
+ require $package_pm;
+ };
+
+ if ($import) {
+ no strict 'refs';
+ *{"${caller}::import"} = sub {
+ shift;
+ use strict 'refs';
+ unshift @_, $module;
+ goto &{"${module}::import"};
+ };
+ use strict 'refs';
+ }
+}
+
+1;
--- /dev/null
+package Git::LoadCPAN::Error;
+use 5.008;
+use strict;
+use warnings;
+use Git::LoadCPAN (
+ module => 'Error',
+ import => 1,
+);
+
+1;
--- /dev/null
+package Git::LoadCPAN::Mail::Address;
+use 5.008;
+use strict;
+use warnings;
+use Git::LoadCPAN (
+ module => 'Mail::Address',
+ import => 0,
+);
+
+1;
$ENV{TZ} = 'UTC';
my $epoch_in_UTC =
- Time::Local::timelocal($S, $M, $H, $d, $m - 1, $Y - 1900);
+ Time::Local::timelocal($S, $M, $H, $d, $m - 1, $Y);
# Determine our local timezone (including DST) at the
# time of $epoch_in_UTC. $Git::SVN::Log::TZ stored the
+++ /dev/null
-#
-# Makefile for perl support modules and routine
-#
-makfile:=perl.mak
-modules =
-
-PERL_PATH_SQ = $(subst ','\'',$(PERL_PATH))
-prefix_SQ = $(subst ','\'',$(prefix))
-localedir_SQ = $(subst ','\'',$(localedir))
-
-ifndef V
- QUIET = @
-endif
-
-all install instlibdir: $(makfile)
- $(QUIET)$(MAKE) -f $(makfile) $@
-
-clean:
- $(QUIET)test -f $(makfile) && $(MAKE) -f $(makfile) $@ || exit 0
- $(RM) ppport.h
- $(RM) $(makfile)
- $(RM) $(makfile).old
- $(RM) PM.stamp
-
-$(makfile): PM.stamp
-
-ifdef NO_PERL_MAKEMAKER
-instdir_SQ = $(subst ','\'',$(prefix)/lib)
-
-modules += Git
-modules += Git/I18N
-modules += Git/IndexInfo
-modules += Git/Packet
-modules += Git/SVN
-modules += Git/SVN/Memoize/YAML
-modules += Git/SVN/Fetcher
-modules += Git/SVN/Editor
-modules += Git/SVN/GlobSpec
-modules += Git/SVN/Log
-modules += Git/SVN/Migration
-modules += Git/SVN/Prompt
-modules += Git/SVN/Ra
-modules += Git/SVN/Utils
-
-$(makfile): ../GIT-CFLAGS Makefile
- echo all: private-Error.pm Git.pm Git/I18N.pm > $@
- set -e; \
- for i in $(modules); \
- do \
- if test $$i = $${i%/*}; \
- then \
- subdir=; \
- else \
- subdir=/$${i%/*}; \
- fi; \
- echo ' $(RM) blib/lib/'$$i'.pm' >> $@; \
- echo ' mkdir -p blib/lib'$$subdir >> $@; \
- echo ' cp '$$i'.pm blib/lib/'$$i'.pm' >> $@; \
- done
- echo ' $(RM) blib/lib/Error.pm' >> $@
- '$(PERL_PATH_SQ)' -MError -e 'exit($$Error::VERSION < 0.15009)' || \
- echo ' cp private-Error.pm blib/lib/Error.pm' >> $@
- echo install: >> $@
- set -e; \
- for i in $(modules); \
- do \
- if test $$i = $${i%/*}; \
- then \
- subdir=; \
- else \
- subdir=/$${i%/*}; \
- fi; \
- echo ' $(RM) "$$(DESTDIR)$(instdir_SQ)/'$$i'.pm"' >> $@; \
- echo ' mkdir -p "$$(DESTDIR)$(instdir_SQ)'$$subdir'"' >> $@; \
- echo ' cp '$$i'.pm "$$(DESTDIR)$(instdir_SQ)/'$$i'.pm"' >> $@; \
- done
- echo ' $(RM) "$$(DESTDIR)$(instdir_SQ)/Error.pm"' >> $@
- '$(PERL_PATH_SQ)' -MError -e 'exit($$Error::VERSION < 0.15009)' || \
- echo ' cp private-Error.pm "$$(DESTDIR)$(instdir_SQ)/Error.pm"' >> $@
- echo instlibdir: >> $@
- echo ' echo $(instdir_SQ)' >> $@
-else
-$(makfile): Makefile.PL ../GIT-CFLAGS
- $(PERL_PATH) $< PREFIX='$(prefix_SQ)' INSTALL_BASE='' --localedir='$(localedir_SQ)'
-endif
-
-# this is just added comfort for calling make directly in perl dir
-# (even though GIT-CFLAGS aren't used yet. If ever)
-../GIT-CFLAGS:
- $(MAKE) -C .. GIT-CFLAGS
+++ /dev/null
-use strict;
-use warnings;
-use ExtUtils::MakeMaker;
-use Getopt::Long;
-use File::Find;
-
-# Don't forget to update the perl/Makefile, too.
-# Don't forget to test with NO_PERL_MAKEMAKER=YesPlease
-
-# Sanity: die at first unknown option
-Getopt::Long::Configure qw/ pass_through /;
-
-my $localedir = '';
-GetOptions("localedir=s" => \$localedir);
-
-sub MY::postamble {
- return <<'MAKE_FRAG';
-instlibdir:
- @echo '$(INSTALLSITELIB)'
-
-ifneq (,$(DESTDIR))
-ifeq (0,$(shell expr '$(MM_VERSION)' '>' 6.10))
-$(error ExtUtils::MakeMaker version "$(MM_VERSION)" is older than 6.11 and so \
- is likely incompatible with the DESTDIR mechanism. Try setting \
- NO_PERL_MAKEMAKER=1 instead)
-endif
-endif
-
-MAKE_FRAG
-}
-
-# Find all the .pm files in "Git/" and Git.pm
-my %pm;
-find sub {
- return unless /\.pm$/;
-
- # sometimes File::Find prepends a ./ Strip it.
- my $pm_path = $File::Find::name;
- $pm_path =~ s{^\./}{};
-
- $pm{$pm_path} = '$(INST_LIBDIR)/'.$pm_path;
-}, "Git", "Git.pm";
-
-
-# We come with our own bundled Error.pm. It's not in the set of default
-# Perl modules so install it if it's not available on the system yet.
-if ( !eval { require Error } || $Error::VERSION < 0.15009) {
- $pm{'private-Error.pm'} = '$(INST_LIBDIR)/Error.pm';
-}
-
-# redirect stdout, otherwise the message "Writing perl.mak for Git"
-# disrupts the output for the target 'instlibdir'
-open STDOUT, ">&STDERR";
-
-WriteMakefile(
- NAME => 'Git',
- VERSION_FROM => 'Git.pm',
- PM => \%pm,
- PM_FILTER => qq[\$(PERL) -pe "s<\\Q++LOCALEDIR++\\E><$localedir>"],
- MAKEFILE => 'perl.mak',
- INSTALLSITEMAN3DIR => '$(SITEPREFIX)/share/man/man3'
-);
+++ /dev/null
-# Error.pm
-#
-# Copyright (c) 1997-8 Graham Barr <gbarr@ti.com>. All rights reserved.
-# This program is free software; you can redistribute it and/or
-# modify it under the same terms as Perl itself.
-#
-# Based on my original Error.pm, and Exceptions.pm by Peter Seibel
-# <peter@weblogic.com> and adapted by Jesse Glick <jglick@sig.bsh.com>.
-#
-# but modified ***significantly***
-
-package Error;
-
-use strict;
-use vars qw($VERSION);
-use 5.004;
-
-$VERSION = "0.15009";
-
-use overload (
- '""' => 'stringify',
- '0+' => 'value',
- 'bool' => sub { return 1; },
- 'fallback' => 1
-);
-
-$Error::Depth = 0; # Depth to pass to caller()
-$Error::Debug = 0; # Generate verbose stack traces
-@Error::STACK = (); # Clause stack for try
-$Error::THROWN = undef; # last error thrown, a workaround until die $ref works
-
-my $LAST; # Last error created
-my %ERROR; # Last error associated with package
-
-sub throw_Error_Simple
-{
- my $args = shift;
- return Error::Simple->new($args->{'text'});
-}
-
-$Error::ObjectifyCallback = \&throw_Error_Simple;
-
-
-# Exported subs are defined in Error::subs
-
-sub import {
- shift;
- local $Exporter::ExportLevel = $Exporter::ExportLevel + 1;
- Error::subs->import(@_);
-}
-
-# I really want to use last for the name of this method, but it is a keyword
-# which prevent the syntax last Error
-
-sub prior {
- shift; # ignore
-
- return $LAST unless @_;
-
- my $pkg = shift;
- return exists $ERROR{$pkg} ? $ERROR{$pkg} : undef
- unless ref($pkg);
-
- my $obj = $pkg;
- my $err = undef;
- if($obj->isa('HASH')) {
- $err = $obj->{'__Error__'}
- if exists $obj->{'__Error__'};
- }
- elsif($obj->isa('GLOB')) {
- $err = ${*$obj}{'__Error__'}
- if exists ${*$obj}{'__Error__'};
- }
-
- $err;
-}
-
-sub flush {
- shift; #ignore
-
- unless (@_) {
- $LAST = undef;
- return;
- }
-
- my $pkg = shift;
- return unless ref($pkg);
-
- undef $ERROR{$pkg} if defined $ERROR{$pkg};
-}
-
-# Return as much information as possible about where the error
-# happened. The -stacktrace element only exists if $Error::DEBUG
-# was set when the error was created
-
-sub stacktrace {
- my $self = shift;
-
- return $self->{'-stacktrace'}
- if exists $self->{'-stacktrace'};
-
- my $text = exists $self->{'-text'} ? $self->{'-text'} : "Died";
-
- $text .= sprintf(" at %s line %d.\n", $self->file, $self->line)
- unless($text =~ /\n$/s);
-
- $text;
-}
-
-# Allow error propagation, ie
-#
-# $ber->encode(...) or
-# return Error->prior($ber)->associate($ldap);
-
-sub associate {
- my $err = shift;
- my $obj = shift;
-
- return unless ref($obj);
-
- if($obj->isa('HASH')) {
- $obj->{'__Error__'} = $err;
- }
- elsif($obj->isa('GLOB')) {
- ${*$obj}{'__Error__'} = $err;
- }
- $obj = ref($obj);
- $ERROR{ ref($obj) } = $err;
-
- return;
-}
-
-sub new {
- my $self = shift;
- my($pkg,$file,$line) = caller($Error::Depth);
-
- my $err = bless {
- '-package' => $pkg,
- '-file' => $file,
- '-line' => $line,
- @_
- }, $self;
-
- $err->associate($err->{'-object'})
- if(exists $err->{'-object'});
-
- # To always create a stacktrace would be very inefficient, so
- # we only do it if $Error::Debug is set
-
- if($Error::Debug) {
- require Carp;
- local $Carp::CarpLevel = $Error::Depth;
- my $text = defined($err->{'-text'}) ? $err->{'-text'} : "Error";
- my $trace = Carp::longmess($text);
- # Remove try calls from the trace
- $trace =~ s/(\n\s+\S+__ANON__[^\n]+)?\n\s+eval[^\n]+\n\s+Error::subs::try[^\n]+(?=\n)//sog;
- $trace =~ s/(\n\s+\S+__ANON__[^\n]+)?\n\s+eval[^\n]+\n\s+Error::subs::run_clauses[^\n]+\n\s+Error::subs::try[^\n]+(?=\n)//sog;
- $err->{'-stacktrace'} = $trace
- }
-
- $@ = $LAST = $ERROR{$pkg} = $err;
-}
-
-# Throw an error. this contains some very gory code.
-
-sub throw {
- my $self = shift;
- local $Error::Depth = $Error::Depth + 1;
-
- # if we are not rethrow-ing then create the object to throw
- $self = $self->new(@_) unless ref($self);
-
- die $Error::THROWN = $self;
-}
-
-# syntactic sugar for
-#
-# die with Error( ... );
-
-sub with {
- my $self = shift;
- local $Error::Depth = $Error::Depth + 1;
-
- $self->new(@_);
-}
-
-# syntactic sugar for
-#
-# record Error( ... ) and return;
-
-sub record {
- my $self = shift;
- local $Error::Depth = $Error::Depth + 1;
-
- $self->new(@_);
-}
-
-# catch clause for
-#
-# try { ... } catch CLASS with { ... }
-
-sub catch {
- my $pkg = shift;
- my $code = shift;
- my $clauses = shift || {};
- my $catch = $clauses->{'catch'} ||= [];
-
- unshift @$catch, $pkg, $code;
-
- $clauses;
-}
-
-# Object query methods
-
-sub object {
- my $self = shift;
- exists $self->{'-object'} ? $self->{'-object'} : undef;
-}
-
-sub file {
- my $self = shift;
- exists $self->{'-file'} ? $self->{'-file'} : undef;
-}
-
-sub line {
- my $self = shift;
- exists $self->{'-line'} ? $self->{'-line'} : undef;
-}
-
-sub text {
- my $self = shift;
- exists $self->{'-text'} ? $self->{'-text'} : undef;
-}
-
-# overload methods
-
-sub stringify {
- my $self = shift;
- defined $self->{'-text'} ? $self->{'-text'} : "Died";
-}
-
-sub value {
- my $self = shift;
- exists $self->{'-value'} ? $self->{'-value'} : undef;
-}
-
-package Error::Simple;
-
-@Error::Simple::ISA = qw(Error);
-
-sub new {
- my $self = shift;
- my $text = "" . shift;
- my $value = shift;
- my(@args) = ();
-
- local $Error::Depth = $Error::Depth + 1;
-
- @args = ( -file => $1, -line => $2)
- if($text =~ s/\s+at\s+(\S+)\s+line\s+(\d+)(?:,\s*<[^>]*>\s+line\s+\d+)?\.?\n?$//s);
- push(@args, '-value', 0 + $value)
- if defined($value);
-
- $self->SUPER::new(-text => $text, @args);
-}
-
-sub stringify {
- my $self = shift;
- my $text = $self->SUPER::stringify;
- $text .= sprintf(" at %s line %d.\n", $self->file, $self->line)
- unless($text =~ /\n$/s);
- $text;
-}
-
-##########################################################################
-##########################################################################
-
-# Inspired by code from Jesse Glick <jglick@sig.bsh.com> and
-# Peter Seibel <peter@weblogic.com>
-
-package Error::subs;
-
-use Exporter ();
-use vars qw(@EXPORT_OK @ISA %EXPORT_TAGS);
-
-@EXPORT_OK = qw(try with finally except otherwise);
-%EXPORT_TAGS = (try => \@EXPORT_OK);
-
-@ISA = qw(Exporter);
-
-
-sub blessed {
- my $item = shift;
- local $@; # don't kill an outer $@
- ref $item and eval { $item->can('can') };
-}
-
-
-sub run_clauses ($$$\@) {
- my($clauses,$err,$wantarray,$result) = @_;
- my $code = undef;
-
- $err = $Error::ObjectifyCallback->({'text' =>$err}) unless ref($err);
-
- CATCH: {
-
- # catch
- my $catch;
- if(defined($catch = $clauses->{'catch'})) {
- my $i = 0;
-
- CATCHLOOP:
- for( ; $i < @$catch ; $i += 2) {
- my $pkg = $catch->[$i];
- unless(defined $pkg) {
- #except
- splice(@$catch,$i,2,$catch->[$i+1]->());
- $i -= 2;
- next CATCHLOOP;
- }
- elsif(blessed($err) && $err->isa($pkg)) {
- $code = $catch->[$i+1];
- while(1) {
- my $more = 0;
- local($Error::THROWN);
- my $ok = eval {
- if($wantarray) {
- @{$result} = $code->($err,\$more);
- }
- elsif(defined($wantarray)) {
- @{$result} = ();
- $result->[0] = $code->($err,\$more);
- }
- else {
- $code->($err,\$more);
- }
- 1;
- };
- if( $ok ) {
- next CATCHLOOP if $more;
- undef $err;
- }
- else {
- $err = defined($Error::THROWN)
- ? $Error::THROWN : $@;
- $err = $Error::ObjectifyCallback->({'text' =>$err})
- unless ref($err);
- }
- last CATCH;
- };
- }
- }
- }
-
- # otherwise
- my $owise;
- if(defined($owise = $clauses->{'otherwise'})) {
- my $code = $clauses->{'otherwise'};
- my $more = 0;
- my $ok = eval {
- if($wantarray) {
- @{$result} = $code->($err,\$more);
- }
- elsif(defined($wantarray)) {
- @{$result} = ();
- $result->[0] = $code->($err,\$more);
- }
- else {
- $code->($err,\$more);
- }
- 1;
- };
- if( $ok ) {
- undef $err;
- }
- else {
- $err = defined($Error::THROWN)
- ? $Error::THROWN : $@;
-
- $err = $Error::ObjectifyCallback->({'text' =>$err})
- unless ref($err);
- }
- }
- }
- $err;
-}
-
-sub try (&;$) {
- my $try = shift;
- my $clauses = @_ ? shift : {};
- my $ok = 0;
- my $err = undef;
- my @result = ();
-
- unshift @Error::STACK, $clauses;
-
- my $wantarray = wantarray();
-
- do {
- local $Error::THROWN = undef;
- local $@ = undef;
-
- $ok = eval {
- if($wantarray) {
- @result = $try->();
- }
- elsif(defined $wantarray) {
- $result[0] = $try->();
- }
- else {
- $try->();
- }
- 1;
- };
-
- $err = defined($Error::THROWN) ? $Error::THROWN : $@
- unless $ok;
- };
-
- shift @Error::STACK;
-
- $err = run_clauses($clauses,$err,wantarray,@result)
- unless($ok);
-
- $clauses->{'finally'}->()
- if(defined($clauses->{'finally'}));
-
- if (defined($err))
- {
- if (blessed($err) && $err->can('throw'))
- {
- throw $err;
- }
- else
- {
- die $err;
- }
- }
-
- wantarray ? @result : $result[0];
-}
-
-# Each clause adds a sub to the list of clauses. The finally clause is
-# always the last, and the otherwise clause is always added just before
-# the finally clause.
-#
-# All clauses, except the finally clause, add a sub which takes one argument
-# this argument will be the error being thrown. The sub will return a code ref
-# if that clause can handle that error, otherwise undef is returned.
-#
-# The otherwise clause adds a sub which unconditionally returns the users
-# code reference, this is why it is forced to be last.
-#
-# The catch clause is defined in Error.pm, as the syntax causes it to
-# be called as a method
-
-sub with (&;$) {
- @_
-}
-
-sub finally (&) {
- my $code = shift;
- my $clauses = { 'finally' => $code };
- $clauses;
-}
-
-# The except clause is a block which returns a hashref or a list of
-# key-value pairs, where the keys are the classes and the values are subs.
-
-sub except (&;$) {
- my $code = shift;
- my $clauses = shift || {};
- my $catch = $clauses->{'catch'} ||= [];
-
- my $sub = sub {
- my $ref;
- my(@array) = $code->($_[0]);
- if(@array == 1 && ref($array[0])) {
- $ref = $array[0];
- $ref = [ %$ref ]
- if(UNIVERSAL::isa($ref,'HASH'));
- }
- else {
- $ref = \@array;
- }
- @$ref
- };
-
- unshift @{$catch}, undef, $sub;
-
- $clauses;
-}
-
-sub otherwise (&;$) {
- my $code = shift;
- my $clauses = shift || {};
-
- if(exists $clauses->{'otherwise'}) {
- require Carp;
- Carp::croak("Multiple otherwise clauses");
- }
-
- $clauses->{'otherwise'} = $code;
-
- $clauses;
-}
-
-1;
-__END__
-
-=head1 NAME
-
-Error - Error/exception handling in an OO-ish way
-
-=head1 SYNOPSIS
-
- use Error qw(:try);
-
- throw Error::Simple( "A simple error");
-
- sub xyz {
- ...
- record Error::Simple("A simple error")
- and return;
- }
-
- unlink($file) or throw Error::Simple("$file: $!",$!);
-
- try {
- do_some_stuff();
- die "error!" if $condition;
- throw Error::Simple -text => "Oops!" if $other_condition;
- }
- catch Error::IO with {
- my $E = shift;
- print STDERR "File ", $E->{'-file'}, " had a problem\n";
- }
- except {
- my $E = shift;
- my $general_handler=sub {send_message $E->{-description}};
- return {
- UserException1 => $general_handler,
- UserException2 => $general_handler
- };
- }
- otherwise {
- print STDERR "Well I don't know what to say\n";
- }
- finally {
- close_the_garage_door_already(); # Should be reliable
- }; # Don't forget the trailing ; or you might be surprised
-
-=head1 DESCRIPTION
-
-The C<Error> package provides two interfaces. Firstly C<Error> provides
-a procedural interface to exception handling. Secondly C<Error> is a
-base class for errors/exceptions that can either be thrown, for
-subsequent catch, or can simply be recorded.
-
-Errors in the class C<Error> should not be thrown directly, but the
-user should throw errors from a sub-class of C<Error>.
-
-=head1 PROCEDURAL INTERFACE
-
-C<Error> exports subroutines to perform exception handling. These will
-be exported if the C<:try> tag is used in the C<use> line.
-
-=over 4
-
-=item try BLOCK CLAUSES
-
-C<try> is the main subroutine called by the user. All other subroutines
-exported are clauses to the try subroutine.
-
-The BLOCK will be evaluated and, if no error is throw, try will return
-the result of the block.
-
-C<CLAUSES> are the subroutines below, which describe what to do in the
-event of an error being thrown within BLOCK.
-
-=item catch CLASS with BLOCK
-
-This clauses will cause all errors that satisfy C<$err-E<gt>isa(CLASS)>
-to be caught and handled by evaluating C<BLOCK>.
-
-C<BLOCK> will be passed two arguments. The first will be the error
-being thrown. The second is a reference to a scalar variable. If this
-variable is set by the catch block then, on return from the catch
-block, try will continue processing as if the catch block was never
-found.
-
-To propagate the error the catch block may call C<$err-E<gt>throw>
-
-If the scalar reference by the second argument is not set, and the
-error is not thrown. Then the current try block will return with the
-result from the catch block.
-
-=item except BLOCK
-
-When C<try> is looking for a handler, if an except clause is found
-C<BLOCK> is evaluated. The return value from this block should be a
-HASHREF or a list of key-value pairs, where the keys are class names
-and the values are CODE references for the handler of errors of that
-type.
-
-=item otherwise BLOCK
-
-Catch any error by executing the code in C<BLOCK>
-
-When evaluated C<BLOCK> will be passed one argument, which will be the
-error being processed.
-
-Only one otherwise block may be specified per try block
-
-=item finally BLOCK
-
-Execute the code in C<BLOCK> either after the code in the try block has
-successfully completed, or if the try block throws an error then
-C<BLOCK> will be executed after the handler has completed.
-
-If the handler throws an error then the error will be caught, the
-finally block will be executed and the error will be re-thrown.
-
-Only one finally block may be specified per try block
-
-=back
-
-=head1 CLASS INTERFACE
-
-=head2 CONSTRUCTORS
-
-The C<Error> object is implemented as a HASH. This HASH is initialized
-with the arguments that are passed to its constructor. The elements
-that are used by, or are retrievable by the C<Error> class are listed
-below, other classes may add to these.
-
- -file
- -line
- -text
- -value
- -object
-
-If C<-file> or C<-line> are not specified in the constructor arguments
-then these will be initialized with the file name and line number where
-the constructor was called from.
-
-If the error is associated with an object then the object should be
-passed as the C<-object> argument. This will allow the C<Error> package
-to associate the error with the object.
-
-The C<Error> package remembers the last error created, and also the
-last error associated with a package. This could either be the last
-error created by a sub in that package, or the last error which passed
-an object blessed into that package as the C<-object> argument.
-
-=over 4
-
-=item throw ( [ ARGS ] )
-
-Create a new C<Error> object and throw an error, which will be caught
-by a surrounding C<try> block, if there is one. Otherwise it will cause
-the program to exit.
-
-C<throw> may also be called on an existing error to re-throw it.
-
-=item with ( [ ARGS ] )
-
-Create a new C<Error> object and returns it. This is defined for
-syntactic sugar, eg
-
- die with Some::Error ( ... );
-
-=item record ( [ ARGS ] )
-
-Create a new C<Error> object and returns it. This is defined for
-syntactic sugar, eg
-
- record Some::Error ( ... )
- and return;
-
-=back
-
-=head2 STATIC METHODS
-
-=over 4
-
-=item prior ( [ PACKAGE ] )
-
-Return the last error created, or the last error associated with
-C<PACKAGE>
-
-=item flush ( [ PACKAGE ] )
-
-Flush the last error created, or the last error associated with
-C<PACKAGE>.It is necessary to clear the error stack before exiting the
-package or uncaught errors generated using C<record> will be reported.
-
- $Error->flush;
-
-=cut
-
-=back
-
-=head2 OBJECT METHODS
-
-=over 4
-
-=item stacktrace
-
-If the variable C<$Error::Debug> was non-zero when the error was
-created, then C<stacktrace> returns a string created by calling
-C<Carp::longmess>. If the variable was zero the C<stacktrace> returns
-the text of the error appended with the filename and line number of
-where the error was created, providing the text does not end with a
-newline.
-
-=item object
-
-The object this error was associated with
-
-=item file
-
-The file where the constructor of this error was called from
-
-=item line
-
-The line where the constructor of this error was called from
-
-=item text
-
-The text of the error
-
-=back
-
-=head2 OVERLOAD METHODS
-
-=over 4
-
-=item stringify
-
-A method that converts the object into a string. This method may simply
-return the same as the C<text> method, or it may append more
-information. For example the file name and line number.
-
-By default this method returns the C<-text> argument that was passed to
-the constructor, or the string C<"Died"> if none was given.
-
-=item value
-
-A method that will return a value that can be associated with the
-error. For example if an error was created due to the failure of a
-system call, then this may return the numeric value of C<$!> at the
-time.
-
-By default this method returns the C<-value> argument that was passed
-to the constructor.
-
-=back
-
-=head1 PRE-DEFINED ERROR CLASSES
-
-=over 4
-
-=item Error::Simple
-
-This class can be used to hold simple error strings and values. Its
-constructor takes two arguments. The first is a text value, the second
-is a numeric value. These values are what will be returned by the
-overload methods.
-
-If the text value ends with C<at file line 1> as $@ strings do, then
-this information will be used to set the C<-file> and C<-line> arguments
-of the error object.
-
-This class is used internally if an eval'd block die's with an error
-that is a plain string. (Unless C<$Error::ObjectifyCallback> is modified)
-
-=back
-
-=head1 $Error::ObjectifyCallback
-
-This variable holds a reference to a subroutine that converts errors that
-are plain strings to objects. It is used by Error.pm to convert textual
-errors to objects, and can be overridden by the user.
-
-It accepts a single argument which is a hash reference to named parameters.
-Currently the only named parameter passed is C<'text'> which is the text
-of the error, but others may be available in the future.
-
-For example the following code will cause Error.pm to throw objects of the
-class MyError::Bar by default:
-
- sub throw_MyError_Bar
- {
- my $args = shift;
- my $err = MyError::Bar->new();
- $err->{'MyBarText'} = $args->{'text'};
- return $err;
- }
-
- {
- local $Error::ObjectifyCallback = \&throw_MyError_Bar;
-
- # Error handling here.
- }
-
-=head1 KNOWN BUGS
-
-None, but that does not mean there are not any.
-
-=head1 AUTHORS
-
-Graham Barr <gbarr@pobox.com>
-
-The code that inspired me to write this was originally written by
-Peter Seibel <peter@weblogic.com> and adapted by Jesse Glick
-<jglick@sig.bsh.com>.
-
-=head1 MAINTAINER
-
-Shlomi Fish <shlomif@iglu.org.il>
-
-=head1 PAST MAINTAINERS
-
-Arun Kumar U <u_arunkumar@yahoo.com>
-
-=cut
{
int threads, i, work, offset;
struct thread_data data[MAX_PARALLEL];
+ uint64_t start = getnanotime();
if (!core_preload_index)
return;
if (pthread_join(p->pthread, NULL))
die("unable to join threaded lstat");
}
+ trace_performance_since(start, "preload index");
}
#endif
free(to_free);
}
+void sq_quote_buf_pretty(struct strbuf *dst, const char *src)
+{
+ static const char ok_punct[] = "+,-./:=@_^";
+ const char *p;
+
+ for (p = src; *p; p++) {
+ if (!isalpha(*p) && !isdigit(*p) && !strchr(ok_punct, *p)) {
+ sq_quote_buf(dst, src);
+ return;
+ }
+ }
+
+ /* if we get here, we did not need quoting */
+ strbuf_addstr(dst, src);
+}
+
void sq_quotef(struct strbuf *dst, const char *fmt, ...)
{
struct strbuf src = STRBUF_INIT;
strbuf_release(&src);
}
-void sq_quote_argv(struct strbuf *dst, const char** argv, size_t maxlen)
+void sq_quote_argv(struct strbuf *dst, const char **argv)
{
int i;
for (i = 0; argv[i]; ++i) {
strbuf_addch(dst, ' ');
sq_quote_buf(dst, argv[i]);
- if (maxlen && dst->len > maxlen)
- die("Too many or long arguments");
+ }
+}
+
+void sq_quote_argv_pretty(struct strbuf *dst, const char **argv)
+{
+ int i;
+
+ for (i = 0; argv[i]; i++) {
+ strbuf_addch(dst, ' ');
+ sq_quote_buf_pretty(dst, argv[i]);
}
}
*next = NULL;
return arg;
case '\\':
- c = *++src;
- if (need_bs_quote(c) && *++src == '\'') {
- *dst++ = c;
+ /*
+ * Allow backslashed characters outside of
+ * single-quotes only if they need escaping,
+ * and only if we resume the single-quoted part
+ * afterward.
+ */
+ if (need_bs_quote(src[1]) && src[2] == '\'') {
+ *dst++ = src[1];
+ src += 2;
continue;
}
/* Fallthrough */
*/
extern void sq_quote_buf(struct strbuf *, const char *src);
-extern void sq_quote_argv(struct strbuf *, const char **argv, size_t maxlen);
+extern void sq_quote_argv(struct strbuf *, const char **argv);
extern void sq_quotef(struct strbuf *, const char *fmt, ...);
+/*
+ * These match their non-pretty variants, except that they avoid
+ * quoting when there are no exotic characters. These should only be used for
+ * human-readable output, as sq_dequote() is not smart enough to dequote it.
+ */
+void sq_quote_buf_pretty(struct strbuf *, const char *src);
+void sq_quote_argv_pretty(struct strbuf *, const char **argv);
+
/* This unwraps what sq_quote() produces in place, but returns
* NULL if the input does not look like what sq_quote would have
* produced.
break;
default:
die("unknown object type for %s: %s",
- oid_to_hex(oid), typename(type));
+ oid_to_hex(oid), type_name(type));
}
if (!obj)
replace_index_entry_in_base(istate, old, ce);
remove_name_hash(istate, old);
free(old);
+ ce->ce_flags &= ~CE_HASHED;
set_index_entry(istate, nr, ce);
ce->ce_flags |= CE_UPDATE_IN_BASE;
mark_fsmonitor_invalid(istate, ce);
void rename_index_entry_at(struct index_state *istate, int nr, const char *new_name)
{
- struct cache_entry *old = istate->cache[nr], *new;
+ struct cache_entry *old_entry = istate->cache[nr], *new_entry;
int namelen = strlen(new_name);
- new = xmalloc(cache_entry_size(namelen));
- copy_cache_entry(new, old);
- new->ce_flags &= ~CE_HASHED;
- new->ce_namelen = namelen;
- new->index = 0;
- memcpy(new->name, new_name, namelen + 1);
+ new_entry = xmalloc(cache_entry_size(namelen));
+ copy_cache_entry(new_entry, old_entry);
+ new_entry->ce_flags &= ~CE_HASHED;
+ new_entry->ce_namelen = namelen;
+ new_entry->index = 0;
+ memcpy(new_entry->name, new_name, namelen + 1);
- cache_tree_invalidate_path(istate, old->name);
- untracked_cache_remove_from_index(istate, old->name);
+ cache_tree_invalidate_path(istate, old_entry->name);
+ untracked_cache_remove_from_index(istate, old_entry->name);
remove_index_entry_at(istate, nr);
- add_index_entry(istate, new, ADD_CACHE_OK_TO_ADD|ADD_CACHE_OK_TO_REPLACE);
+ add_index_entry(istate, new_entry, ADD_CACHE_OK_TO_ADD|ADD_CACHE_OK_TO_REPLACE);
}
void fill_stat_data(struct stat_data *sd, struct stat *st)
struct cache_entry *alias)
{
int len;
- struct cache_entry *new;
+ struct cache_entry *new_entry;
if (alias->ce_flags & CE_ADDED)
die("Will not add file alias '%s' ('%s' already exists in index)", ce->name, alias->name);
/* Ok, create the new entry using the name of the existing alias */
len = ce_namelen(alias);
- new = xcalloc(1, cache_entry_size(len));
- memcpy(new->name, alias->name, len);
- copy_cache_entry(new, ce);
+ new_entry = xcalloc(1, cache_entry_size(len));
+ memcpy(new_entry->name, alias->name, len);
+ copy_cache_entry(new_entry, ce);
save_or_free_index_entry(istate, ce);
- return new;
+ return new_entry;
}
void set_object_name_for_intent_to_add_entry(struct cache_entry *ce)
{
- unsigned char sha1[20];
- if (write_sha1_file("", 0, blob_type, sha1))
+ struct object_id oid;
+ if (write_object_file("", 0, blob_type, &oid))
die("cannot create an empty blob in the object database");
- hashcpy(ce->oid.hash, sha1);
+ oidcpy(&ce->oid, &oid);
}
int add_to_index(struct index_state *istate, const char *path, struct stat *st, int flags)
/* Add it in.. */
istate->cache_nr++;
if (istate->cache_nr > pos + 1)
- memmove(istate->cache + pos + 1,
- istate->cache + pos,
- (istate->cache_nr - pos - 1) * sizeof(ce));
+ MOVE_ARRAY(istate->cache + pos + 1, istate->cache + pos,
+ istate->cache_nr - pos - 1);
set_index_entry(istate, pos, ce);
istate->cache_changed |= CE_ENTRY_ADDED;
return 0;
size = ce_size(ce);
updated = xmalloc(size);
- memcpy(updated, ce, size);
+ copy_cache_entry(updated, ce);
+ memcpy(updated->name, ce->name, ce->ce_namelen + 1);
fill_stat_cache_info(updated, &st);
/*
* If ignore_valid is not set, we should leave CE_VALID bit
const char *typechange_fmt;
const char *added_fmt;
const char *unmerged_fmt;
+ uint64_t start = getnanotime();
modified_fmt = (in_porcelain ? "M\t%s\n" : "%s: needs update\n");
deleted_fmt = (in_porcelain ? "D\t%s\n" : "%s: needs update\n");
added_fmt = (in_porcelain ? "A\t%s\n" : "%s needs update\n");
unmerged_fmt = (in_porcelain ? "U\t%s\n" : "%s: needs merge\n");
for (i = 0; i < istate->cache_nr; i++) {
- struct cache_entry *ce, *new;
+ struct cache_entry *ce, *new_entry;
int cache_errno = 0;
int changed = 0;
int filtered = 0;
if (filtered)
continue;
- new = refresh_cache_ent(istate, ce, options, &cache_errno, &changed);
- if (new == ce)
+ new_entry = refresh_cache_ent(istate, ce, options, &cache_errno, &changed);
+ if (new_entry == ce)
continue;
- if (!new) {
+ if (!new_entry) {
const char *fmt;
if (really && cache_errno == EINVAL) {
continue;
}
- replace_index_entry(istate, i, new);
+ replace_index_entry(istate, i, new_entry);
}
+ trace_performance_since(start, "refresh index");
return has_errors;
}
static int verify_hdr(struct cache_header *hdr, unsigned long size)
{
- git_SHA_CTX c;
- unsigned char sha1[20];
+ git_hash_ctx c;
+ unsigned char hash[GIT_MAX_RAWSZ];
int hdr_version;
if (hdr->hdr_signature != htonl(CACHE_SIGNATURE))
if (!verify_index_checksum)
return 0;
- git_SHA1_Init(&c);
- git_SHA1_Update(&c, hdr, size - 20);
- git_SHA1_Final(sha1, &c);
- if (hashcmp(sha1, (unsigned char *)hdr + size - 20))
+ the_hash_algo->init_fn(&c);
+ the_hash_algo->update_fn(&c, hdr, size - the_hash_algo->rawsz);
+ the_hash_algo->final_fn(hash, &c);
+ if (hashcmp(hash, (unsigned char *)hdr + size - the_hash_algo->rawsz))
return error("bad index file sha1 signature");
return 0;
}
int read_index(struct index_state *istate)
{
- return read_index_from(istate, get_index_file());
+ return read_index_from(istate, get_index_file(), get_git_dir());
}
static struct cache_entry *cache_entry_from_ondisk(struct ondisk_cache_entry *ondisk,
die_errno("cannot stat the open index");
mmap_size = xsize_t(st.st_size);
- if (mmap_size < sizeof(struct cache_header) + 20)
+ if (mmap_size < sizeof(struct cache_header) + the_hash_algo->rawsz)
die("index file smaller than expected");
mmap = xmmap(NULL, mmap_size, PROT_READ, MAP_PRIVATE, fd, 0);
if (verify_hdr(hdr, mmap_size) < 0)
goto unmap;
- hashcpy(istate->sha1, (const unsigned char *)hdr + mmap_size - 20);
+ hashcpy(istate->sha1, (const unsigned char *)hdr + mmap_size - the_hash_algo->rawsz);
istate->version = ntohl(hdr->hdr_version);
istate->cache_nr = ntohl(hdr->hdr_entries);
istate->cache_alloc = alloc_nr(istate->cache_nr);
istate->timestamp.sec = st.st_mtime;
istate->timestamp.nsec = ST_MTIME_NSEC(st);
- while (src_offset <= mmap_size - 20 - 8) {
+ while (src_offset <= mmap_size - the_hash_algo->rawsz - 8) {
/* After an array of active_nr index entries,
* there can be arbitrary number of extended
* sections, each of which is prefixed with
* This way, shared index can be removed if they have not been used
* for some time.
*/
-static void freshen_shared_index(char *base_sha1_hex, int warn)
+static void freshen_shared_index(const char *shared_index, int warn)
{
- char *shared_index = git_pathdup("sharedindex.%s", base_sha1_hex);
if (!check_and_freshen_file(shared_index, 1) && warn)
warning("could not freshen shared index '%s'", shared_index);
- free(shared_index);
}
-int read_index_from(struct index_state *istate, const char *path)
+int read_index_from(struct index_state *istate, const char *path,
+ const char *gitdir)
{
+ uint64_t start = getnanotime();
struct split_index *split_index;
int ret;
char *base_sha1_hex;
- const char *base_path;
+ char *base_path;
/* istate->initialized covers both .git/index and .git/sharedindex.xxx */
if (istate->initialized)
return istate->cache_nr;
ret = do_read_index(istate, path, 0);
+ trace_performance_since(start, "read cache %s", path);
split_index = istate->split_index;
if (!split_index || is_null_sha1(split_index->base_sha1)) {
split_index->base = xcalloc(1, sizeof(*split_index->base));
base_sha1_hex = sha1_to_hex(split_index->base_sha1);
- base_path = git_path("sharedindex.%s", base_sha1_hex);
+ base_path = xstrfmt("%s/sharedindex.%s", gitdir, base_sha1_hex);
ret = do_read_index(split_index->base, base_path, 1);
if (hashcmp(split_index->base_sha1, split_index->base->sha1))
die("broken index, expect %s in %s, got %s",
base_sha1_hex, base_path,
sha1_to_hex(split_index->base->sha1));
- freshen_shared_index(base_sha1_hex, 0);
+ freshen_shared_index(base_path, 0);
merge_base_index(istate);
post_read_index_from(istate);
+ trace_performance_since(start, "read cache %s", base_path);
+ free(base_path);
return ret;
}
static unsigned char write_buffer[WRITE_BUFFER_SIZE];
static unsigned long write_buffer_len;
-static int ce_write_flush(git_SHA_CTX *context, int fd)
+static int ce_write_flush(git_hash_ctx *context, int fd)
{
unsigned int buffered = write_buffer_len;
if (buffered) {
- git_SHA1_Update(context, write_buffer, buffered);
+ the_hash_algo->update_fn(context, write_buffer, buffered);
if (write_in_full(fd, write_buffer, buffered) < 0)
return -1;
write_buffer_len = 0;
return 0;
}
-static int ce_write(git_SHA_CTX *context, int fd, void *data, unsigned int len)
+static int ce_write(git_hash_ctx *context, int fd, void *data, unsigned int len)
{
while (len) {
unsigned int buffered = write_buffer_len;
return 0;
}
-static int write_index_ext_header(git_SHA_CTX *context, int fd,
+static int write_index_ext_header(git_hash_ctx *context, int fd,
unsigned int ext, unsigned int sz)
{
ext = htonl(ext);
(ce_write(context, fd, &sz, 4) < 0)) ? -1 : 0;
}
-static int ce_flush(git_SHA_CTX *context, int fd, unsigned char *sha1)
+static int ce_flush(git_hash_ctx *context, int fd, unsigned char *hash)
{
unsigned int left = write_buffer_len;
if (left) {
write_buffer_len = 0;
- git_SHA1_Update(context, write_buffer, left);
+ the_hash_algo->update_fn(context, write_buffer, left);
}
- /* Flush first if not enough space for SHA1 signature */
- if (left + 20 > WRITE_BUFFER_SIZE) {
+ /* Flush first if not enough space for hash signature */
+ if (left + the_hash_algo->rawsz > WRITE_BUFFER_SIZE) {
if (write_in_full(fd, write_buffer, left) < 0)
return -1;
left = 0;
}
- /* Append the SHA1 signature at the end */
- git_SHA1_Final(write_buffer + left, context);
- hashcpy(sha1, write_buffer + left);
- left += 20;
+ /* Append the hash signature at the end */
+ the_hash_algo->final_fn(write_buffer + left, context);
+ hashcpy(hash, write_buffer + left);
+ left += the_hash_algo->rawsz;
return (write_in_full(fd, write_buffer, left) < 0) ? -1 : 0;
}
}
}
-static int ce_write_entry(git_SHA_CTX *c, int fd, struct cache_entry *ce,
+static int ce_write_entry(git_hash_ctx *c, int fd, struct cache_entry *ce,
struct strbuf *previous_name, struct ondisk_cache_entry *ondisk)
{
int size;
int fd;
ssize_t n;
struct stat st;
- unsigned char sha1[20];
+ unsigned char hash[GIT_MAX_RAWSZ];
if (!istate->initialized)
return 0;
if (fstat(fd, &st))
goto out;
- if (st.st_size < sizeof(struct cache_header) + 20)
+ if (st.st_size < sizeof(struct cache_header) + the_hash_algo->rawsz)
goto out;
- n = pread_in_full(fd, sha1, 20, st.st_size - 20);
- if (n != 20)
+ n = pread_in_full(fd, hash, the_hash_algo->rawsz, st.st_size - the_hash_algo->rawsz);
+ if (n != the_hash_algo->rawsz)
goto out;
- if (hashcmp(istate->sha1, sha1))
+ if (hashcmp(istate->sha1, hash))
goto out;
close(fd);
static int do_write_index(struct index_state *istate, struct tempfile *tempfile,
int strip_extensions)
{
+ uint64_t start = getnanotime();
int newfd = tempfile->fd;
- git_SHA_CTX c;
+ git_hash_ctx c;
struct cache_header hdr;
int i, err = 0, removed, extended, hdr_version;
struct cache_entry **cache = istate->cache;
struct stat st;
struct ondisk_cache_entry_extended ondisk;
struct strbuf previous_name_buf = STRBUF_INIT, *previous_name;
- int drop_cache_tree = 0;
+ int drop_cache_tree = istate->drop_cache_tree;
for (i = removed = extended = 0; i < entries; i++) {
if (cache[i]->ce_flags & CE_REMOVE)
hdr.hdr_version = htonl(hdr_version);
hdr.hdr_entries = htonl(entries - removed);
- git_SHA1_Init(&c);
+ the_hash_algo->init_fn(&c);
if (ce_write(&c, newfd, &hdr, sizeof(hdr)) < 0)
return -1;
return -1;
istate->timestamp.sec = (unsigned int)st.st_mtime;
istate->timestamp.nsec = ST_MTIME_NSEC(st);
+ trace_performance_since(start, "write index, changed mask = %x", istate->cache_changed);
return 0;
}
}
static int write_shared_index(struct index_state *istate,
- struct lock_file *lock, unsigned flags)
+ struct tempfile **temp)
{
- struct tempfile *temp;
struct split_index *si = istate->split_index;
int ret;
- temp = mks_tempfile(git_path("sharedindex_XXXXXX"));
- if (!temp) {
- hashclr(si->base_sha1);
- return do_write_locked_index(istate, lock, flags);
- }
move_cache_to_base_index(istate);
- ret = do_write_index(si->base, temp, 1);
- if (ret) {
- delete_tempfile(&temp);
+ ret = do_write_index(si->base, *temp, 1);
+ if (ret)
return ret;
- }
- ret = adjust_shared_perm(get_tempfile_path(temp));
+ ret = adjust_shared_perm(get_tempfile_path(*temp));
if (ret) {
- int save_errno = errno;
- error("cannot fix permission bits on %s", get_tempfile_path(temp));
- delete_tempfile(&temp);
- errno = save_errno;
+ error("cannot fix permission bits on %s", get_tempfile_path(*temp));
return ret;
}
- ret = rename_tempfile(&temp,
+ ret = rename_tempfile(temp,
git_path("sharedindex.%s", sha1_to_hex(si->base->sha1)));
if (!ret) {
hashcpy(si->base_sha1, si->base->sha1);
int new_shared_index, ret;
struct split_index *si = istate->split_index;
+ if ((flags & SKIP_IF_UNCHANGED) && !istate->cache_changed) {
+ if (flags & COMMIT_LOCK)
+ rollback_lock_file(lock);
+ return 0;
+ }
+
if (istate->fsmonitor_last_update)
fill_fsmonitor_bitmap(istate);
new_shared_index = istate->cache_changed & SPLIT_INDEX_ORDERED;
if (new_shared_index) {
- ret = write_shared_index(istate, lock, flags);
+ struct tempfile *temp;
+ int saved_errno;
+
+ temp = mks_tempfile(git_path("sharedindex_XXXXXX"));
+ if (!temp) {
+ hashclr(si->base_sha1);
+ ret = do_write_locked_index(istate, lock, flags);
+ goto out;
+ }
+ ret = write_shared_index(istate, &temp);
+
+ saved_errno = errno;
+ if (is_tempfile_active(temp))
+ delete_tempfile(&temp);
+ errno = saved_errno;
+
if (ret)
goto out;
}
ret = write_split_index(istate, lock, flags);
/* Freshen the shared index only if the split-index was written */
- if (!ret && !new_shared_index)
- freshen_shared_index(sha1_to_hex(si->base_sha1), 1);
+ if (!ret && !new_shared_index) {
+ const char *shared_index = git_path("sharedindex.%s",
+ sha1_to_hex(si->base_sha1));
+ freshen_shared_index(shared_index, 1);
+ }
out:
if (flags & COMMIT_LOCK)
static void align_atom_handler(struct atom_value *atomv, struct ref_formatting_state *state)
{
- struct ref_formatting_stack *new;
+ struct ref_formatting_stack *new_stack;
push_stack_element(&state->stack);
- new = state->stack;
- new->at_end = end_align_handler;
- new->at_end_data = &atomv->atom->u.align;
+ new_stack = state->stack;
+ new_stack->at_end = end_align_handler;
+ new_stack->at_end_data = &atomv->atom->u.align;
}
static void if_then_else_handler(struct ref_formatting_stack **stack)
static void if_atom_handler(struct atom_value *atomv, struct ref_formatting_state *state)
{
- struct ref_formatting_stack *new;
+ struct ref_formatting_stack *new_stack;
struct if_then_else *if_then_else = xcalloc(sizeof(struct if_then_else), 1);
if_then_else->str = atomv->atom->u.if_then_else.str;
if_then_else->cmp_status = atomv->atom->u.if_then_else.cmp_status;
push_stack_element(&state->stack);
- new = state->stack;
- new->at_end = if_then_else_handler;
- new->at_end_data = if_then_else;
+ new_stack = state->stack;
+ new_stack->at_end = if_then_else_handler;
+ new_stack->at_end_data = if_then_else;
}
static int is_empty(const char *s)
if (deref)
name++;
if (!strcmp(name, "objecttype"))
- v->s = typename(obj->type);
+ v->s = type_name(obj->type);
else if (!strcmp(name, "objectsize")) {
v->value = sz;
v->s = xstrfmt("%lu", sz);
if (!strcmp(name, "tag"))
v->s = tag->tag;
else if (!strcmp(name, "type") && tag->tagged)
- v->s = typename(tag->tagged->type);
+ v->s = type_name(tag->tagged->type);
else if (!strcmp(name, "object") && tag->tagged)
v->s = xstrdup(oid_to_hex(&tag->tagged->oid));
}
if (atom->u.remote_ref.option == RR_REF)
*s = show_ref(&atom->u.remote_ref.refname, refname);
else if (atom->u.remote_ref.option == RR_TRACK) {
- if (stat_tracking_info(branch, &num_ours,
- &num_theirs, NULL)) {
+ if (stat_tracking_info(branch, &num_ours, &num_theirs,
+ NULL, AHEAD_BEHIND_FULL) < 0) {
*s = xstrdup(msgs.gone);
} else if (!num_ours && !num_theirs)
*s = "";
free((void *)to_free);
}
} else if (atom->u.remote_ref.option == RR_TRACKSHORT) {
- if (stat_tracking_info(branch, &num_ours,
- &num_theirs, NULL))
+ if (stat_tracking_info(branch, &num_ours, &num_theirs,
+ NULL, AHEAD_BEHIND_FULL) < 0)
return;
if (!num_ours && !num_theirs)
return show_ref(&atom->u.refname, ref->refname);
}
+static void get_object(struct ref_array_item *ref, const struct object_id *oid,
+ int deref, struct object **obj)
+{
+ int eaten;
+ unsigned long size;
+ void *buf = get_obj(oid, obj, &size, &eaten);
+ if (!buf)
+ die(_("missing object %s for %s"),
+ oid_to_hex(oid), ref->refname);
+ if (!*obj)
+ die(_("parse_object_buffer failed on %s for %s"),
+ oid_to_hex(oid), ref->refname);
+
+ grab_values(ref->value, deref, *obj, buf, size);
+ if (!eaten)
+ free(buf);
+}
+
/*
* Parse the object referred by ref, and grab needed value.
*/
static void populate_value(struct ref_array_item *ref)
{
- void *buf;
struct object *obj;
- int eaten, i;
- unsigned long size;
+ int i;
const struct object_id *tagged;
ref->value = xcalloc(used_atom_cnt, sizeof(struct atom_value));
for (i = 0; i < used_atom_cnt; i++) {
struct atom_value *v = &ref->value[i];
if (v->s == NULL)
- goto need_obj;
+ break;
}
- return;
-
- need_obj:
- buf = get_obj(&ref->objectname, &obj, &size, &eaten);
- if (!buf)
- die(_("missing object %s for %s"),
- oid_to_hex(&ref->objectname), ref->refname);
- if (!obj)
- die(_("parse_object_buffer failed on %s for %s"),
- oid_to_hex(&ref->objectname), ref->refname);
+ if (used_atom_cnt <= i)
+ return;
- grab_values(ref->value, 0, obj, buf, size);
- if (!eaten)
- free(buf);
+ get_object(ref, &ref->objectname, 0, &obj);
/*
* If there is no atom that wants to know about tagged
* is not consistent with what deref_tag() does
* which peels the onion to the core.
*/
- buf = get_obj(tagged, &obj, &size, &eaten);
- if (!buf)
- die(_("missing object %s for %s"),
- oid_to_hex(tagged), ref->refname);
- if (!obj)
- die(_("parse_object_buffer failed on %s for %s"),
- oid_to_hex(tagged), ref->refname);
- grab_values(ref->value, 1, obj, buf, size);
- if (!eaten)
- free(buf);
+ get_object(ref, tagged, 1, &obj);
}
/*
if (initial_ref_transaction_commit(packed_transaction, err)) {
ret = TRANSACTION_GENERIC_ERROR;
- goto cleanup;
}
+ packed_refs_unlock(refs->packed_ref_store);
cleanup:
if (packed_transaction)
ref_transaction_free(packed_transaction);
- packed_refs_unlock(refs->packed_ref_store);
transaction->state = REF_TRANSACTION_CLOSED;
string_list_clear(&affected_refnames, 0);
return ret;
int mmapped;
/*
- * The contents of the `packed-refs` file. If the file was
- * already sorted, this points at the mmapped contents of the
- * file. If not, this points at heap-allocated memory
- * containing the contents, sorted. If there were no contents
- * (e.g., because the file didn't exist), `buf` and `eof` are
- * both NULL.
+ * The contents of the `packed-refs` file:
+ *
+ * - buf -- a pointer to the start of the memory
+ * - start -- a pointer to the first byte of actual references
+ * (i.e., after the header line, if one is present)
+ * - eof -- a pointer just past the end of the reference
+ * contents
+ *
+ * If the `packed-refs` file was already sorted, `buf` points
+ * at the mmapped contents of the file. If not, it points at
+ * heap-allocated memory containing the contents, sorted. If
+ * there were no contents (e.g., because the file didn't
+ * exist), `buf`, `start`, and `eof` are all NULL.
*/
- char *buf, *eof;
-
- /* The size of the header line, if any; otherwise, 0: */
- size_t header_len;
+ char *buf, *start, *eof;
/*
* What is the peeled state of the `packed-refs` file that
} else {
free(snapshot->buf);
}
- snapshot->buf = snapshot->eof = NULL;
- snapshot->header_len = 0;
+ snapshot->buf = snapshot->start = snapshot->eof = NULL;
}
/*
size_t len, i;
char *new_buffer, *dst;
- pos = snapshot->buf + snapshot->header_len;
+ pos = snapshot->start;
eof = snapshot->eof;
- len = eof - pos;
- if (!len)
+ if (pos == eof)
return;
+ len = eof - pos;
+
/*
* Initialize records based on a crude estimate of the number
* of references in the file (we'll grow it below if needed):
* place:
*/
clear_snapshot_buffer(snapshot);
- snapshot->buf = new_buffer;
+ snapshot->buf = snapshot->start = new_buffer;
snapshot->eof = new_buffer + len;
- snapshot->header_len = 0;
cleanup:
free(records);
*/
static void verify_buffer_safe(struct snapshot *snapshot)
{
- const char *buf = snapshot->buf + snapshot->header_len;
+ const char *start = snapshot->start;
const char *eof = snapshot->eof;
const char *last_line;
- if (buf == eof)
+ if (start == eof)
return;
- last_line = find_start_of_record(buf, eof - 1);
+ last_line = find_start_of_record(start, eof - 1);
if (*(eof - 1) != '\n' || eof - last_line < GIT_SHA1_HEXSZ + 2)
die_invalid_line(snapshot->refs->path,
last_line, eof - last_line);
}
+#define SMALL_FILE_SIZE (32*1024)
+
/*
* Depending on `mmap_strategy`, either mmap or read the contents of
* the `packed-refs` file into the snapshot. Return 1 if the file
- * existed and was read, or 0 if the file was absent. Die on errors.
+ * existed and was read, or 0 if the file was absent or empty. Die on
+ * errors.
*/
static int load_contents(struct snapshot *snapshot)
{
die_errno("couldn't stat %s", snapshot->refs->path);
size = xsize_t(st.st_size);
- switch (mmap_strategy) {
- case MMAP_NONE:
+ if (!size) {
+ return 0;
+ } else if (mmap_strategy == MMAP_NONE || size <= SMALL_FILE_SIZE) {
snapshot->buf = xmalloc(size);
bytes_read = read_in_full(fd, snapshot->buf, size);
if (bytes_read < 0 || bytes_read != size)
die_errno("couldn't read %s", snapshot->refs->path);
- snapshot->eof = snapshot->buf + size;
snapshot->mmapped = 0;
- break;
- case MMAP_TEMPORARY:
- case MMAP_OK:
+ } else {
snapshot->buf = xmmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0);
- snapshot->eof = snapshot->buf + size;
snapshot->mmapped = 1;
- break;
}
close(fd);
+ snapshot->start = snapshot->buf;
+ snapshot->eof = snapshot->buf + size;
+
return 1;
}
* `refname` starts. If `mustexist` is true and the reference doesn't
* exist, then return NULL. If `mustexist` is false and the reference
* doesn't exist, then return the point where that reference would be
- * inserted. In the latter mode, `refname` doesn't have to be a proper
- * reference name; for example, one could search for "refs/replace/"
- * to find the start of any replace references.
+ * inserted, or `snapshot->eof` (which might be NULL) if it would be
+ * inserted at the end of the file. In the latter mode, `refname`
+ * doesn't have to be a proper reference name; for example, one could
+ * search for "refs/replace/" to find the start of any replace
+ * references.
*
* The record is sought using a binary search, so `snapshot->buf` must
* be sorted.
* preceding records all have reference names that come
* *before* `refname`.
*/
- const char *lo = snapshot->buf + snapshot->header_len;
+ const char *lo = snapshot->start;
/*
* A pointer to a the first character of a record whose
*/
const char *hi = snapshot->eof;
- while (lo < hi) {
+ while (lo != hi) {
const char *mid, *rec;
int cmp;
/* If the file has a header line, process it: */
if (snapshot->buf < snapshot->eof && *snapshot->buf == '#') {
- struct strbuf tmp = STRBUF_INIT;
- char *p;
- const char *eol;
+ char *tmp, *p, *eol;
struct string_list traits = STRING_LIST_INIT_NODUP;
eol = memchr(snapshot->buf, '\n',
snapshot->buf,
snapshot->eof - snapshot->buf);
- strbuf_add(&tmp, snapshot->buf, eol - snapshot->buf);
+ tmp = xmemdupz(snapshot->buf, eol - snapshot->buf);
- if (!skip_prefix(tmp.buf, "# pack-refs with:", (const char **)&p))
+ if (!skip_prefix(tmp, "# pack-refs with:", (const char **)&p))
die_invalid_line(refs->path,
snapshot->buf,
snapshot->eof - snapshot->buf);
/* perhaps other traits later as well */
/* The "+ 1" is for the LF character. */
- snapshot->header_len = eol + 1 - snapshot->buf;
+ snapshot->start = eol + 1;
string_list_clear(&traits, 0);
- strbuf_release(&tmp);
+ free(tmp);
}
verify_buffer_safe(snapshot);
* We don't want to leave the file mmapped, so we are
* forced to make a copy now:
*/
- size_t size = snapshot->eof -
- (snapshot->buf + snapshot->header_len);
+ size_t size = snapshot->eof - snapshot->start;
char *buf_copy = xmalloc(size);
- memcpy(buf_copy, snapshot->buf + snapshot->header_len, size);
+ memcpy(buf_copy, snapshot->start, size);
clear_snapshot_buffer(snapshot);
- snapshot->buf = buf_copy;
+ snapshot->buf = snapshot->start = buf_copy;
snapshot->eof = buf_copy + size;
}
*/
snapshot = get_snapshot(refs);
- if (!snapshot->buf)
+ if (prefix && *prefix)
+ start = find_reference_location(snapshot, prefix, 0);
+ else
+ start = snapshot->start;
+
+ if (start == snapshot->eof)
return empty_ref_iterator_begin();
iter = xcalloc(1, sizeof(*iter));
iter->snapshot = snapshot;
acquire_snapshot(snapshot);
- if (prefix && *prefix)
- start = find_reference_location(snapshot, prefix, 0);
- else
- start = snapshot->buf + snapshot->header_len;
-
iter->pos = start;
iter->eof = snapshot->eof;
strbuf_init(&iter->refname_buf, 0);
return -1;
entry = dir->entries[entry_index];
- memmove(&dir->entries[entry_index],
- &dir->entries[entry_index + 1],
- (dir->nr - entry_index - 1) * sizeof(*dir->entries)
- );
+ MOVE_ARRAY(&dir->entries[entry_index],
+ &dir->entries[entry_index + 1], dir->nr - entry_index - 1);
dir->nr--;
if (dir->sorted > entry_index)
dir->sorted--;
#include "credential.h"
#include "sha1-array.h"
#include "send-pack.h"
+#include "quote.h"
static struct remote *remote;
/* always ends with a trailing slash */
char *deepen_since;
struct string_list deepen_not;
struct string_list push_options;
+ char *filter;
unsigned progress : 1,
check_self_contained_and_connected : 1,
cloning : 1,
thin : 1,
/* One of the SEND_PACK_PUSH_CERT_* constants. */
push_cert : 2,
- deepen_relative : 1;
+ deepen_relative : 1,
+ from_promisor : 1,
+ no_dependents : 1;
};
static struct options options;
static struct string_list cas_options = STRING_LIST_INIT_DUP;
return -1;
return 0;
} else if (!strcmp(name, "push-option")) {
- string_list_append(&options.push_options, value);
+ if (*value != '"')
+ string_list_append(&options.push_options, value);
+ else {
+ struct strbuf unquoted = STRBUF_INIT;
+ if (unquote_c_style(&unquoted, value, NULL) < 0)
+ die("invalid quoting in push-option value");
+ string_list_append_nodup(&options.push_options,
+ strbuf_detach(&unquoted, NULL));
+ }
return 0;
#if LIBCURL_VERSION_NUM >= 0x070a08
return -1;
return 0;
#endif /* LIBCURL_VERSION_NUM >= 0x070a08 */
+ } else if (!strcmp(name, "from-promisor")) {
+ options.from_promisor = 1;
+ return 0;
+ } else if (!strcmp(name, "no-dependents")) {
+ options.no_dependents = 1;
+ return 0;
+ } else if (!strcmp(name, "filter")) {
+ options.filter = xstrdup(value);;
+ return 0;
} else {
return 1 /* unsupported */;
}
* pkt-line matches our request.
*/
line = packet_read_line_buf(&last->buf, &last->len, NULL);
+ if (!line)
+ die("invalid server response; expected service, got flush packet");
strbuf_reset(&exp);
strbuf_addf(&exp, "# service=%s", service);
options.deepen_not.items[i].string);
if (options.deepen_relative && options.depth)
argv_array_push(&args, "--deepen-relative");
+ if (options.from_promisor)
+ argv_array_push(&args, "--from-promisor");
+ if (options.no_dependents)
+ argv_array_push(&args, "--no-dependents");
+ if (options.filter)
+ argv_array_pushf(&args, "--filter=%s", options.filter);
argv_array_push(&args, url.buf);
for (i = 0; i < nr_heads; i++) {
"refs/tags/*"
};
+/* See TAG_REFSPEC for the string version */
const struct refspec *tag_refspec = &s_tag_refspec;
struct counted_string {
remote->fetch_refspec[remote->fetch_refspec_nr++] = ref;
}
+void add_prune_tags_to_fetch_refspec(struct remote *remote)
+{
+ int nr = remote->fetch_refspec_nr;
+ int bufsize = nr + 1;
+ int size = sizeof(struct refspec);
+
+ remote->fetch = xrealloc(remote->fetch, size * bufsize);
+ memcpy(&remote->fetch[nr], tag_refspec, size);
+ add_fetch_refspec(remote, xstrdup(TAG_REFSPEC));
+}
+
static void add_url(struct remote *remote, const char *url)
{
ALLOC_GROW(remote->url, remote->url_nr + 1, remote->url_alloc);
ret = xcalloc(1, sizeof(struct remote));
ret->prune = -1; /* unspecified */
+ ret->prune_tags = -1; /* unspecified */
ALLOC_GROW(remotes, remotes_nr + 1, remotes_alloc);
remotes[remotes_nr++] = ret;
ret->name = xstrndup(name, len);
remote->skip_default_update = git_config_bool(key, value);
else if (!strcmp(subkey, "prune"))
remote->prune = git_config_bool(key, value);
+ else if (!strcmp(subkey, "prunetags"))
+ remote->prune_tags = git_config_bool(key, value);
else if (!strcmp(subkey, "url")) {
const char *v;
if (git_config_string(&v, key, value))
int ref_newer(const struct object_id *new_oid, const struct object_id *old_oid)
{
struct object *o;
- struct commit *old, *new;
+ struct commit *old_commit, *new_commit;
struct commit_list *list, *used;
int found = 0;
/*
- * Both new and old must be commit-ish and new is descendant of
- * old. Otherwise we require --force.
+ * Both new_commit and old_commit must be commit-ish and new_commit is descendant of
+ * old_commit. Otherwise we require --force.
*/
o = deref_tag(parse_object(old_oid), NULL, 0);
if (!o || o->type != OBJ_COMMIT)
return 0;
- old = (struct commit *) o;
+ old_commit = (struct commit *) o;
o = deref_tag(parse_object(new_oid), NULL, 0);
if (!o || o->type != OBJ_COMMIT)
return 0;
- new = (struct commit *) o;
+ new_commit = (struct commit *) o;
- if (parse_commit(new) < 0)
+ if (parse_commit(new_commit) < 0)
return 0;
used = list = NULL;
- commit_list_insert(new, &list);
+ commit_list_insert(new_commit, &list);
while (list) {
- new = pop_most_recent_commit(&list, TMP_MARK);
- commit_list_insert(new, &used);
- if (new == old) {
+ new_commit = pop_most_recent_commit(&list, TMP_MARK);
+ commit_list_insert(new_commit, &used);
+ if (new_commit == old_commit) {
found = 1;
break;
}
}
/*
- * Compare a branch with its upstream, and save their differences (number
- * of commits) in *num_ours and *num_theirs. The name of the upstream branch
- * (or NULL if no upstream is defined) is returned via *upstream_name, if it
- * is not itself NULL.
+ * Lookup the upstream branch for the given branch and if present, optionally
+ * compute the commit ahead/behind values for the pair.
+ *
+ * If abf is AHEAD_BEHIND_FULL, compute the full ahead/behind and return the
+ * counts in *num_ours and *num_theirs. If abf is AHEAD_BEHIND_QUICK, skip
+ * the (potentially expensive) a/b computation (*num_ours and *num_theirs are
+ * set to zero).
+ *
+ * The name of the upstream branch (or NULL if no upstream is defined) is
+ * returned via *upstream_name, if it is not itself NULL.
*
* Returns -1 if num_ours and num_theirs could not be filled in (e.g., no
- * upstream defined, or ref does not exist), 0 otherwise.
+ * upstream defined, or ref does not exist). Returns 0 if the commits are
+ * identical. Returns 1 if commits are different.
*/
int stat_tracking_info(struct branch *branch, int *num_ours, int *num_theirs,
- const char **upstream_name)
+ const char **upstream_name, enum ahead_behind_flags abf)
{
struct object_id oid;
struct commit *ours, *theirs;
if (!ours)
return -1;
+ *num_theirs = *num_ours = 0;
+
/* are we the same? */
- if (theirs == ours) {
- *num_theirs = *num_ours = 0;
+ if (theirs == ours)
return 0;
- }
+ if (abf == AHEAD_BEHIND_QUICK)
+ return 1;
+ if (abf != AHEAD_BEHIND_FULL)
+ BUG("stat_tracking_info: invalid abf '%d'", abf);
/* Run "rev-list --left-right ours...theirs" internally... */
argv_array_push(&argv, ""); /* ignored */
die("revision walk setup failed");
/* ... and count the commits on each side. */
- *num_ours = 0;
- *num_theirs = 0;
while (1) {
struct commit *c = get_revision(&revs);
if (!c)
clear_commit_marks(theirs, ALL_REV_FLAGS);
argv_array_clear(&argv);
- return 0;
+ return 1;
}
/*
* Return true when there is anything to report, otherwise false.
*/
-int format_tracking_info(struct branch *branch, struct strbuf *sb)
+int format_tracking_info(struct branch *branch, struct strbuf *sb,
+ enum ahead_behind_flags abf)
{
- int ours, theirs;
+ int ours, theirs, sti;
const char *full_base;
char *base;
int upstream_is_gone = 0;
- if (stat_tracking_info(branch, &ours, &theirs, &full_base) < 0) {
+ sti = stat_tracking_info(branch, &ours, &theirs, &full_base, abf);
+ if (sti < 0) {
if (!full_base)
return 0;
upstream_is_gone = 1;
if (advice_status_hints)
strbuf_addstr(sb,
_(" (use \"git branch --unset-upstream\" to fixup)\n"));
- } else if (!ours && !theirs) {
+ } else if (!sti) {
strbuf_addf(sb,
_("Your branch is up to date with '%s'.\n"),
base);
+ } else if (abf == AHEAD_BEHIND_QUICK) {
+ strbuf_addf(sb,
+ _("Your branch and '%s' refer to different commits.\n"),
+ base);
+ if (advice_status_hints)
+ strbuf_addf(sb, _(" (use \"%s\" for details)\n"),
+ "git status --ahead-behind");
} else if (!theirs) {
strbuf_addf(sb,
Q_("Your branch is ahead of '%s' by %d commit.\n",
int skip_default_update;
int mirror;
int prune;
+ int prune_tags;
const char *receivepack;
const char *uploadpack;
MATCH_REFS_FOLLOW_TAGS = (1 << 3)
};
+/* Flags for --ahead-behind option. */
+enum ahead_behind_flags {
+ AHEAD_BEHIND_UNSPECIFIED = -1,
+ AHEAD_BEHIND_QUICK = 0, /* just eq/neq reporting */
+ AHEAD_BEHIND_FULL = 1, /* traditional a/b reporting */
+};
+
/* Reporting of tracking info */
int stat_tracking_info(struct branch *branch, int *num_ours, int *num_theirs,
- const char **upstream_name);
-int format_tracking_info(struct branch *branch, struct strbuf *sb);
+ const char **upstream_name, enum ahead_behind_flags abf);
+int format_tracking_info(struct branch *branch, struct strbuf *sb,
+ enum ahead_behind_flags abf);
struct ref *get_local_heads(void);
/*
extern int is_empty_cas(const struct push_cas_option *);
void apply_push_cas(struct push_cas_option *, struct remote *, struct ref *);
+#define TAG_REFSPEC "refs/tags/*:refs/tags/*"
+
+void add_prune_tags_to_fetch_refspec(struct remote *remote);
+
#endif
ALLOC_GROW(replace_object, replace_object_nr + 1, replace_object_alloc);
replace_object_nr++;
if (pos < replace_object_nr)
- memmove(replace_object + pos + 1,
- replace_object + pos,
- (replace_object_nr - pos - 1) *
- sizeof(*replace_object));
+ MOVE_ARRAY(replace_object + pos + 1, replace_object + pos,
+ replace_object_nr - pos - 1);
replace_object[pos] = replace;
return 0;
}
if (!repo->index)
repo->index = xcalloc(1, sizeof(*repo->index));
- return read_index_from(repo->index, repo->index_file);
+ return read_index_from(repo->index, repo->index_file, repo->gitdir);
}
ALLOC_GROW(rerere_dir, rerere_dir_nr + 1, rerere_dir_alloc);
/* ... and add it in. */
rerere_dir_nr++;
- memmove(rerere_dir + pos + 1, rerere_dir + pos,
- (rerere_dir_nr - pos - 1) * sizeof(*rerere_dir));
+ MOVE_ARRAY(rerere_dir + pos + 1, rerere_dir + pos,
+ rerere_dir_nr - pos - 1);
rerere_dir[pos] = rr_dir;
scan_rerere_dir(rr_dir);
}
item->string);
}
- if (active_cache_changed) {
- if (write_locked_index(&the_index, &index_lock, COMMIT_LOCK))
- die("Unable to write new index file");
- } else
- rollback_lock_file(&index_lock);
+ if (write_locked_index(&the_index, &index_lock,
+ COMMIT_LOCK | SKIP_IF_UNCHANGED))
+ die("Unable to write new index file");
}
static void remove_variant(struct rerere_id *id)
* it is popped next time around, we won't be trying
* to parse it and get an error.
*/
- if (!has_object_file(&commit->object.oid))
+ if (!commit->object.parsed &&
+ !has_object_file(&commit->object.oid))
commit->object.parsed = 1;
if (commit->object.flags & UNINTERESTING)
if (!object) {
if (revs->ignore_missing)
return object;
+ if (revs->exclude_promisor_objects && is_promisor_object(oid))
+ return NULL;
die("bad object %s", name);
}
object->flags |= flags;
for (parent = commit->parents; parent; parent = parent->next) {
struct commit *p = parent->item;
-
- if (parse_commit_gently(p, revs->ignore_missing_links) < 0)
+ int gently = revs->ignore_missing_links ||
+ revs->exclude_promisor_objects;
+ if (parse_commit_gently(p, gently) < 0) {
+ if (revs->exclude_promisor_objects &&
+ is_promisor_object(&p->object.oid)) {
+ if (revs->first_parent_only)
+ break;
+ continue;
+ }
return -1;
+ }
if (revs->show_source && !p->util)
p->util = commit->util;
p->object.flags |= left_flag;
return -1;
if (obj->flags & UNINTERESTING) {
mark_parents_uninteresting(commit);
- if (revs->show_all)
- p = &commit_list_insert(commit, p)->next;
slop = still_interesting(list, date, slop, &interesting_cache);
if (slop)
continue;
- /* If showing all, add the whole pending list to the end */
- if (revs->show_all)
- *p = list;
break;
}
if (revs->min_age != -1 && (commit->date > revs->min_age))
continue; /* current index already taken care of */
if (read_index_from(&istate,
- worktree_git_path(wt, "index")) > 0)
+ worktree_git_path(wt, "index"),
+ get_worktree_git_dir(wt)) > 0)
do_add_index_objects_to_pending(revs, &istate);
discard_index(&istate);
}
revs->dense = 1;
} else if (!strcmp(arg, "--sparse")) {
revs->dense = 0;
- } else if (!strcmp(arg, "--show-all")) {
- revs->show_all = 1;
} else if (!strcmp(arg, "--in-commit-order")) {
revs->tree_blobs_in_commit_order = 1;
} else if (!strcmp(arg, "--remove-empty")) {
revs->grep_filter.pattern_type_option = GREP_PATTERN_TYPE_ERE;
} else if (!strcmp(arg, "--regexp-ignore-case") || !strcmp(arg, "-i")) {
revs->grep_filter.ignore_case = 1;
- revs->diffopt.flags.pickaxe_ignore_case = 1;
+ revs->diffopt.pickaxe_opts |= DIFF_PICKAXE_IGNORE_CASE;
} else if (!strcmp(arg, "--fixed-strings") || !strcmp(arg, "-F")) {
revs->grep_filter.pattern_type_option = GREP_PATTERN_TYPE_FIXED;
} else if (!strcmp(arg, "--perl-regexp") || !strcmp(arg, "-P")) {
revs->limited = 1;
} else if (!strcmp(arg, "--ignore-missing")) {
revs->ignore_missing = 1;
+ } else if (!strcmp(arg, "--exclude-promisor-objects")) {
+ if (fetch_if_missing)
+ die("BUG: exclude_promisor_objects can only be used when fetch_if_missing is 0");
+ revs->exclude_promisor_objects = 1;
} else {
int opts = diff_opt_parse(&revs->diffopt, argv, argc, revs->prefix);
if (!opts)
revs->diff = 1;
/* Pickaxe, diff-filter and rename following need diffs */
- if (revs->diffopt.pickaxe ||
+ if ((revs->diffopt.pickaxe_opts & DIFF_PICKAXE_KINDS_MASK) ||
revs->diffopt.filter ||
revs->diffopt.flags.follow_renames)
revs->diff = 1;
+ if (revs->diffopt.objfind)
+ revs->simplify_history = 0;
+
if (revs->topo_order)
revs->limited = 1;
clear_object_flags(SEEN | ADDED | SHOWN);
}
+static int mark_uninteresting(const struct object_id *oid,
+ struct packed_git *pack,
+ uint32_t pos,
+ void *unused)
+{
+ struct object *o = parse_object(oid);
+ o->flags |= UNINTERESTING | SEEN;
+ return 0;
+}
+
int prepare_revision_walk(struct rev_info *revs)
{
int i;
(revs->limited && limiting_can_increase_treesame(revs)))
revs->treesame.name = "treesame";
+ if (revs->exclude_promisor_objects) {
+ for_each_packed_object(mark_uninteresting, NULL,
+ FOR_EACH_OBJECT_PROMISOR_ONLY);
+ }
+
if (revs->no_walk != REVISION_WALK_NO_WALK_UNSORTED)
commit_list_sort_by_date(&revs->commits);
if (revs->no_walk)
return commit_ignore;
if (revs->unpacked && has_sha1_pack(commit->object.oid.hash))
return commit_ignore;
- if (revs->show_all)
- return commit_show;
if (commit->object.flags & UNINTERESTING)
return commit_ignore;
if (revs->min_age != -1 &&
enum commit_action action = get_commit_action(revs, commit);
if (action == commit_show &&
- !revs->show_all &&
revs->prune && revs->dense && want_ancestry(revs)) {
/*
* --full-diff on simplified parents is no good: it
unsigned int dense:1,
prune:1,
no_walk:2,
- show_all:1,
remove_empty_trees:1,
simplify_history:1,
topo_order:1,
ancestry_path:1,
first_parent_only:1,
line_level_traverse:1,
- tree_blobs_in_commit_order:1;
+ tree_blobs_in_commit_order:1,
+
+ /* for internal use only */
+ exclude_promisor_objects:1;
/* Diff flags */
unsigned int diff:1,
#include "thread-utils.h"
#include "strbuf.h"
#include "string-list.h"
+#include "quote.h"
void child_process_init(struct child_process *child)
{
return code;
}
+static void trace_add_env(struct strbuf *dst, const char *const *deltaenv)
+{
+ struct string_list envs = STRING_LIST_INIT_DUP;
+ const char *const *e;
+ int i;
+ int printed_unset = 0;
+
+ /* Last one wins, see run-command.c:prep_childenv() for context */
+ for (e = deltaenv; e && *e; e++) {
+ struct strbuf key = STRBUF_INIT;
+ char *equals = strchr(*e, '=');
+
+ if (equals) {
+ strbuf_add(&key, *e, equals - *e);
+ string_list_insert(&envs, key.buf)->util = equals + 1;
+ } else {
+ string_list_insert(&envs, *e)->util = NULL;
+ }
+ strbuf_release(&key);
+ }
+
+ /* "unset X Y...;" */
+ for (i = 0; i < envs.nr; i++) {
+ const char *var = envs.items[i].string;
+ const char *val = envs.items[i].util;
+
+ if (val || !getenv(var))
+ continue;
+
+ if (!printed_unset) {
+ strbuf_addstr(dst, " unset");
+ printed_unset = 1;
+ }
+ strbuf_addf(dst, " %s", var);
+ }
+ if (printed_unset)
+ strbuf_addch(dst, ';');
+
+ /* ... followed by "A=B C=D ..." */
+ for (i = 0; i < envs.nr; i++) {
+ const char *var = envs.items[i].string;
+ const char *val = envs.items[i].util;
+ const char *oldval;
+
+ if (!val)
+ continue;
+
+ oldval = getenv(var);
+ if (oldval && !strcmp(val, oldval))
+ continue;
+
+ strbuf_addf(dst, " %s=", var);
+ sq_quote_buf_pretty(dst, val);
+ }
+ string_list_clear(&envs, 0);
+}
+
+static void trace_run_command(const struct child_process *cp)
+{
+ struct strbuf buf = STRBUF_INIT;
+
+ if (!trace_want(&trace_default_key))
+ return;
+
+ strbuf_addf(&buf, "trace: run_command:");
+ if (cp->dir) {
+ strbuf_addstr(&buf, " cd ");
+ sq_quote_buf_pretty(&buf, cp->dir);
+ strbuf_addch(&buf, ';');
+ }
+ /*
+ * The caller is responsible for initializing cp->env from
+ * cp->env_array if needed. We only check one place.
+ */
+ if (cp->env)
+ trace_add_env(&buf, cp->env);
+ if (cp->git_cmd)
+ strbuf_addstr(&buf, " git");
+ sq_quote_argv_pretty(&buf, cp->argv);
+
+ trace_printf("%s", buf.buf);
+ strbuf_release(&buf);
+}
+
int start_command(struct child_process *cmd)
{
int need_in, need_out, need_err;
cmd->err = fderr[0];
}
- trace_argv_printf(cmd->argv, "trace: run_command:");
+ trace_run_command(cmd);
+
fflush(NULL);
#ifndef GIT_WINDOWS_NATIVE
static int receive_unpack_status(int in)
{
const char *line = packet_read_line(in, NULL);
+ if (!line)
+ return error(_("unexpected flush packet while reading remote unpack status"));
if (!skip_prefix(line, "unpack ", &line))
return error(_("unable to parse remote unpack status: %s"), line);
if (strcmp(line, "ok"))
#include "cache.h"
#include "config.h"
#include "lockfile.h"
-#include "sequencer.h"
#include "dir.h"
#include "object.h"
#include "commit.h"
+#include "sequencer.h"
#include "tag.h"
#include "run-command.h"
#include "exec_cmd.h"
#include "log-tree.h"
#include "wt-status.h"
#include "hashmap.h"
+#include "notes-utils.h"
+#include "sigchain.h"
#define GIT_REFLOG_ACTION "GIT_REFLOG_ACTION"
const char sign_off_header[] = "Signed-off-by: ";
static const char cherry_picked_prefix[] = "(cherry picked from commit ";
+GIT_PATH_FUNC(git_path_commit_editmsg, "COMMIT_EDITMSG")
+
GIT_PATH_FUNC(git_path_seq_dir, "sequencer")
static GIT_PATH_FUNC(git_path_todo_file, "sequencer/todo")
static GIT_PATH_FUNC(rebase_path_strategy_opts, "rebase-merge/strategy_opts")
static GIT_PATH_FUNC(rebase_path_allow_rerere_autoupdate, "rebase-merge/allow_rerere_autoupdate")
+static int git_sequencer_config(const char *k, const char *v, void *cb)
+{
+ struct replay_opts *opts = cb;
+ int status;
+
+ if (!strcmp(k, "commit.cleanup")) {
+ const char *s;
+
+ status = git_config_string(&s, k, v);
+ if (status)
+ return status;
+
+ if (!strcmp(s, "verbatim"))
+ opts->default_msg_cleanup = COMMIT_MSG_CLEANUP_NONE;
+ else if (!strcmp(s, "whitespace"))
+ opts->default_msg_cleanup = COMMIT_MSG_CLEANUP_SPACE;
+ else if (!strcmp(s, "strip"))
+ opts->default_msg_cleanup = COMMIT_MSG_CLEANUP_ALL;
+ else if (!strcmp(s, "scissors"))
+ opts->default_msg_cleanup = COMMIT_MSG_CLEANUP_SPACE;
+ else
+ warning(_("invalid commit message cleanup mode '%s'"),
+ s);
+
+ return status;
+ }
+
+ if (!strcmp(k, "commit.gpgsign")) {
+ opts->gpg_sign = git_config_bool(k, v) ? xstrdup("") : NULL;
+ return 0;
+ }
+
+ status = git_gpg_config(k, v, NULL);
+ if (status)
+ return status;
+
+ return git_diff_basic_config(k, v, NULL);
+}
+
+void sequencer_init_config(struct replay_opts *opts)
+{
+ opts->default_msg_cleanup = COMMIT_MSG_CLEANUP_NONE;
+ git_config(git_sequencer_config, opts);
+}
+
static inline int is_rebase_i(const struct replay_opts *opts)
{
return opts->action == REPLAY_INTERACTIVE_REBASE;
static int write_message(const void *buf, size_t len, const char *filename,
int append_eol)
{
- static struct lock_file msg_file;
+ struct lock_file msg_file = LOCK_INIT;
int msg_fd = hold_lock_file_for_update(&msg_file, filename, 0);
if (msg_fd < 0)
rollback_lock_file(&msg_file);
return error_errno(_("could not write eol to '%s'"), filename);
}
- if (commit_lock_file(&msg_file) < 0) {
- rollback_lock_file(&msg_file);
- return error(_("failed to finalize '%s'."), filename);
- }
+ if (commit_lock_file(&msg_file) < 0)
+ return error(_("failed to finalize '%s'"), filename);
return 0;
}
struct tree *result, *next_tree, *base_tree, *head_tree;
int clean;
char **xopt;
- static struct lock_file index_lock;
+ struct lock_file index_lock = LOCK_INIT;
if (hold_locked_index(&index_lock, LOCK_REPORT_ON_ERROR) < 0)
return -1;
fputs(o.obuf.buf, stdout);
strbuf_release(&o.obuf);
diff_warn_rename_limit("merge.renamelimit", o.needed_rename_limit, 0);
- if (clean < 0)
+ if (clean < 0) {
+ rollback_lock_file(&index_lock);
return clean;
+ }
- if (active_cache_changed &&
- write_locked_index(&the_index, &index_lock, COMMIT_LOCK))
+ if (write_locked_index(&the_index, &index_lock,
+ COMMIT_LOCK | SKIP_IF_UNCHANGED))
/*
* TRANSLATORS: %s will be "revert", "cherry-pick" or
* "rebase -i".
*/
return error(_("%s: Unable to write new index file"),
_(action_name(opts)));
- rollback_lock_file(&index_lock);
-
- if (opts->signoff)
- append_signoff(msgbuf, 0, 0);
if (!clean)
append_conflicts_hint(msgbuf);
return 0;
}
+static char *get_author(const char *message)
+{
+ size_t len;
+ const char *a;
+
+ a = find_commit_header(message, "author", &len);
+ if (a)
+ return xmemdupz(a, len);
+
+ return NULL;
+}
+
static const char staged_changes_advice[] =
N_("you have staged changes in your working tree\n"
"If these changes are meant to be squashed into the previous commit, run:\n"
argv_array_push(&cmd.args, "--amend");
if (opts->gpg_sign)
argv_array_pushf(&cmd.args, "-S%s", opts->gpg_sign);
- if (opts->signoff)
- argv_array_push(&cmd.args, "-s");
if (defmsg)
argv_array_pushl(&cmd.args, "-F", defmsg, NULL);
if ((flags & CLEANUP_MSG))
return run_command(&cmd);
}
+static int rest_is_empty(const struct strbuf *sb, int start)
+{
+ int i, eol;
+ const char *nl;
+
+ /* Check if the rest is just whitespace and Signed-off-by's. */
+ for (i = start; i < sb->len; i++) {
+ nl = memchr(sb->buf + i, '\n', sb->len - i);
+ if (nl)
+ eol = nl - sb->buf;
+ else
+ eol = sb->len;
+
+ if (strlen(sign_off_header) <= eol - i &&
+ starts_with(sb->buf + i, sign_off_header)) {
+ i = eol;
+ continue;
+ }
+ while (i < eol)
+ if (!isspace(sb->buf[i++]))
+ return 0;
+ }
+
+ return 1;
+}
+
+/*
+ * Find out if the message in the strbuf contains only whitespace and
+ * Signed-off-by lines.
+ */
+int message_is_empty(const struct strbuf *sb,
+ enum commit_msg_cleanup_mode cleanup_mode)
+{
+ if (cleanup_mode == COMMIT_MSG_CLEANUP_NONE && sb->len)
+ return 0;
+ return rest_is_empty(sb, 0);
+}
+
+/*
+ * See if the user edited the message in the editor or left what
+ * was in the template intact
+ */
+int template_untouched(const struct strbuf *sb, const char *template_file,
+ enum commit_msg_cleanup_mode cleanup_mode)
+{
+ struct strbuf tmpl = STRBUF_INIT;
+ const char *start;
+
+ if (cleanup_mode == COMMIT_MSG_CLEANUP_NONE && sb->len)
+ return 0;
+
+ if (!template_file || strbuf_read_file(&tmpl, template_file, 0) <= 0)
+ return 0;
+
+ strbuf_stripspace(&tmpl, cleanup_mode == COMMIT_MSG_CLEANUP_ALL);
+ if (!skip_prefix(sb->buf, tmpl.buf, &start))
+ start = sb->buf;
+ strbuf_release(&tmpl);
+ return rest_is_empty(sb, start - sb->buf);
+}
+
+int update_head_with_reflog(const struct commit *old_head,
+ const struct object_id *new_head,
+ const char *action, const struct strbuf *msg,
+ struct strbuf *err)
+{
+ struct ref_transaction *transaction;
+ struct strbuf sb = STRBUF_INIT;
+ const char *nl;
+ int ret = 0;
+
+ if (action) {
+ strbuf_addstr(&sb, action);
+ strbuf_addstr(&sb, ": ");
+ }
+
+ nl = strchr(msg->buf, '\n');
+ if (nl) {
+ strbuf_add(&sb, msg->buf, nl + 1 - msg->buf);
+ } else {
+ strbuf_addbuf(&sb, msg);
+ strbuf_addch(&sb, '\n');
+ }
+
+ transaction = ref_transaction_begin(err);
+ if (!transaction ||
+ ref_transaction_update(transaction, "HEAD", new_head,
+ old_head ? &old_head->object.oid : &null_oid,
+ 0, sb.buf, err) ||
+ ref_transaction_commit(transaction, err)) {
+ ret = -1;
+ }
+ ref_transaction_free(transaction);
+ strbuf_release(&sb);
+
+ return ret;
+}
+
+static int run_rewrite_hook(const struct object_id *oldoid,
+ const struct object_id *newoid)
+{
+ struct child_process proc = CHILD_PROCESS_INIT;
+ const char *argv[3];
+ int code;
+ struct strbuf sb = STRBUF_INIT;
+
+ argv[0] = find_hook("post-rewrite");
+ if (!argv[0])
+ return 0;
+
+ argv[1] = "amend";
+ argv[2] = NULL;
+
+ proc.argv = argv;
+ proc.in = -1;
+ proc.stdout_to_stderr = 1;
+
+ code = start_command(&proc);
+ if (code)
+ return code;
+ strbuf_addf(&sb, "%s %s\n", oid_to_hex(oldoid), oid_to_hex(newoid));
+ sigchain_push(SIGPIPE, SIG_IGN);
+ write_in_full(proc.in, sb.buf, sb.len);
+ close(proc.in);
+ strbuf_release(&sb);
+ sigchain_pop(SIGPIPE);
+ return finish_command(&proc);
+}
+
+void commit_post_rewrite(const struct commit *old_head,
+ const struct object_id *new_head)
+{
+ struct notes_rewrite_cfg *cfg;
+
+ cfg = init_copy_notes_for_rewrite("amend");
+ if (cfg) {
+ /* we are amending, so old_head is not NULL */
+ copy_note_for_rewrite(cfg, &old_head->object.oid, new_head);
+ finish_copy_notes_for_rewrite(cfg, "Notes added by 'git commit --amend'");
+ }
+ run_rewrite_hook(&old_head->object.oid, new_head);
+}
+
+static int run_prepare_commit_msg_hook(struct strbuf *msg, const char *commit)
+{
+ struct argv_array hook_env = ARGV_ARRAY_INIT;
+ int ret;
+ const char *name;
+
+ name = git_path_commit_editmsg();
+ if (write_message(msg->buf, msg->len, name, 0))
+ return -1;
+
+ argv_array_pushf(&hook_env, "GIT_INDEX_FILE=%s", get_index_file());
+ argv_array_push(&hook_env, "GIT_EDITOR=:");
+ if (commit)
+ ret = run_hook_le(hook_env.argv, "prepare-commit-msg", name,
+ "commit", commit, NULL);
+ else
+ ret = run_hook_le(hook_env.argv, "prepare-commit-msg", name,
+ "message", NULL);
+ if (ret)
+ ret = error(_("'prepare-commit-msg' hook failed"));
+ argv_array_clear(&hook_env);
+
+ return ret;
+}
+
+static const char implicit_ident_advice_noconfig[] =
+N_("Your name and email address were configured automatically based\n"
+"on your username and hostname. Please check that they are accurate.\n"
+"You can suppress this message by setting them explicitly. Run the\n"
+"following command and follow the instructions in your editor to edit\n"
+"your configuration file:\n"
+"\n"
+" git config --global --edit\n"
+"\n"
+"After doing this, you may fix the identity used for this commit with:\n"
+"\n"
+" git commit --amend --reset-author\n");
+
+static const char implicit_ident_advice_config[] =
+N_("Your name and email address were configured automatically based\n"
+"on your username and hostname. Please check that they are accurate.\n"
+"You can suppress this message by setting them explicitly:\n"
+"\n"
+" git config --global user.name \"Your Name\"\n"
+" git config --global user.email you@example.com\n"
+"\n"
+"After doing this, you may fix the identity used for this commit with:\n"
+"\n"
+" git commit --amend --reset-author\n");
+
+static const char *implicit_ident_advice(void)
+{
+ char *user_config = expand_user_path("~/.gitconfig", 0);
+ char *xdg_config = xdg_config_home("config");
+ int config_exists = file_exists(user_config) || file_exists(xdg_config);
+
+ free(user_config);
+ free(xdg_config);
+
+ if (config_exists)
+ return _(implicit_ident_advice_config);
+ else
+ return _(implicit_ident_advice_noconfig);
+
+}
+
+void print_commit_summary(const char *prefix, const struct object_id *oid,
+ unsigned int flags)
+{
+ struct rev_info rev;
+ struct commit *commit;
+ struct strbuf format = STRBUF_INIT;
+ const char *head;
+ struct pretty_print_context pctx = {0};
+ struct strbuf author_ident = STRBUF_INIT;
+ struct strbuf committer_ident = STRBUF_INIT;
+
+ commit = lookup_commit(oid);
+ if (!commit)
+ die(_("couldn't look up newly created commit"));
+ if (parse_commit(commit))
+ die(_("could not parse newly created commit"));
+
+ strbuf_addstr(&format, "format:%h] %s");
+
+ format_commit_message(commit, "%an <%ae>", &author_ident, &pctx);
+ format_commit_message(commit, "%cn <%ce>", &committer_ident, &pctx);
+ if (strbuf_cmp(&author_ident, &committer_ident)) {
+ strbuf_addstr(&format, "\n Author: ");
+ strbuf_addbuf_percentquote(&format, &author_ident);
+ }
+ if (flags & SUMMARY_SHOW_AUTHOR_DATE) {
+ struct strbuf date = STRBUF_INIT;
+
+ format_commit_message(commit, "%ad", &date, &pctx);
+ strbuf_addstr(&format, "\n Date: ");
+ strbuf_addbuf_percentquote(&format, &date);
+ strbuf_release(&date);
+ }
+ if (!committer_ident_sufficiently_given()) {
+ strbuf_addstr(&format, "\n Committer: ");
+ strbuf_addbuf_percentquote(&format, &committer_ident);
+ if (advice_implicit_identity) {
+ strbuf_addch(&format, '\n');
+ strbuf_addstr(&format, implicit_ident_advice());
+ }
+ }
+ strbuf_release(&author_ident);
+ strbuf_release(&committer_ident);
+
+ init_revisions(&rev, prefix);
+ setup_revisions(0, NULL, &rev, NULL);
+
+ rev.diff = 1;
+ rev.diffopt.output_format =
+ DIFF_FORMAT_SHORTSTAT | DIFF_FORMAT_SUMMARY;
+
+ rev.verbose_header = 1;
+ rev.show_root_diff = 1;
+ get_commit_format(format.buf, &rev);
+ rev.always_show_header = 0;
+ rev.diffopt.detect_rename = DIFF_DETECT_RENAME;
+ rev.diffopt.break_opt = 0;
+ diff_setup_done(&rev.diffopt);
+
+ head = resolve_ref_unsafe("HEAD", 0, NULL, NULL);
+ if (!head)
+ die_errno(_("unable to resolve HEAD after creating commit"));
+ if (!strcmp(head, "HEAD"))
+ head = _("detached HEAD");
+ else
+ skip_prefix(head, "refs/heads/", &head);
+ printf("[%s%s ", head, (flags & SUMMARY_INITIAL_COMMIT) ?
+ _(" (root-commit)") : "");
+
+ if (!log_tree_commit(&rev, commit)) {
+ rev.always_show_header = 1;
+ rev.use_terminator = 1;
+ log_tree_commit(&rev, commit);
+ }
+
+ strbuf_release(&format);
+}
+
+static int parse_head(struct commit **head)
+{
+ struct commit *current_head;
+ struct object_id oid;
+
+ if (get_oid("HEAD", &oid)) {
+ current_head = NULL;
+ } else {
+ current_head = lookup_commit_reference(&oid);
+ if (!current_head)
+ return error(_("could not parse HEAD"));
+ if (oidcmp(&oid, ¤t_head->object.oid)) {
+ warning(_("HEAD %s is not a commit!"),
+ oid_to_hex(&oid));
+ }
+ if (parse_commit(current_head))
+ return error(_("could not parse HEAD commit"));
+ }
+ *head = current_head;
+
+ return 0;
+}
+
+/*
+ * Try to commit without forking 'git commit'. In some cases we need
+ * to run 'git commit' to display an error message
+ *
+ * Returns:
+ * -1 - error unable to commit
+ * 0 - success
+ * 1 - run 'git commit'
+ */
+static int try_to_commit(struct strbuf *msg, const char *author,
+ struct replay_opts *opts, unsigned int flags,
+ struct object_id *oid)
+{
+ struct object_id tree;
+ struct commit *current_head;
+ struct commit_list *parents = NULL;
+ struct commit_extra_header *extra = NULL;
+ struct strbuf err = STRBUF_INIT;
+ struct strbuf commit_msg = STRBUF_INIT;
+ char *amend_author = NULL;
+ const char *hook_commit = NULL;
+ enum commit_msg_cleanup_mode cleanup;
+ int res = 0;
+
+ if (parse_head(¤t_head))
+ return -1;
+
+ if (flags & AMEND_MSG) {
+ const char *exclude_gpgsig[] = { "gpgsig", NULL };
+ const char *out_enc = get_commit_output_encoding();
+ const char *message = logmsg_reencode(current_head, NULL,
+ out_enc);
+
+ if (!msg) {
+ const char *orig_message = NULL;
+
+ find_commit_subject(message, &orig_message);
+ msg = &commit_msg;
+ strbuf_addstr(msg, orig_message);
+ hook_commit = "HEAD";
+ }
+ author = amend_author = get_author(message);
+ unuse_commit_buffer(current_head, message);
+ if (!author) {
+ res = error(_("unable to parse commit author"));
+ goto out;
+ }
+ parents = copy_commit_list(current_head->parents);
+ extra = read_commit_extra_headers(current_head, exclude_gpgsig);
+ } else if (current_head) {
+ commit_list_insert(current_head, &parents);
+ }
+
+ if (write_cache_as_tree(tree.hash, 0, NULL)) {
+ res = error(_("git write-tree failed to write a tree"));
+ goto out;
+ }
+
+ if (!(flags & ALLOW_EMPTY) && !oidcmp(current_head ?
+ ¤t_head->tree->object.oid :
+ &empty_tree_oid, &tree)) {
+ res = 1; /* run 'git commit' to display error message */
+ goto out;
+ }
+
+ if (find_hook("prepare-commit-msg")) {
+ res = run_prepare_commit_msg_hook(msg, hook_commit);
+ if (res)
+ goto out;
+ if (strbuf_read_file(&commit_msg, git_path_commit_editmsg(),
+ 2048) < 0) {
+ res = error_errno(_("unable to read commit message "
+ "from '%s'"),
+ git_path_commit_editmsg());
+ goto out;
+ }
+ msg = &commit_msg;
+ }
+
+ cleanup = (flags & CLEANUP_MSG) ? COMMIT_MSG_CLEANUP_ALL :
+ opts->default_msg_cleanup;
+
+ if (cleanup != COMMIT_MSG_CLEANUP_NONE)
+ strbuf_stripspace(msg, cleanup == COMMIT_MSG_CLEANUP_ALL);
+ if (!opts->allow_empty_message && message_is_empty(msg, cleanup)) {
+ res = 1; /* run 'git commit' to display error message */
+ goto out;
+ }
+
+ if (commit_tree_extended(msg->buf, msg->len, &tree, parents,
+ oid, author, opts->gpg_sign, extra)) {
+ res = error(_("failed to write commit object"));
+ goto out;
+ }
+
+ if (update_head_with_reflog(current_head, oid,
+ getenv("GIT_REFLOG_ACTION"), msg, &err)) {
+ res = error("%s", err.buf);
+ goto out;
+ }
+
+ if (flags & AMEND_MSG)
+ commit_post_rewrite(current_head, oid);
+
+out:
+ free_commit_extra_headers(extra);
+ strbuf_release(&err);
+ strbuf_release(&commit_msg);
+ free(amend_author);
+
+ return res;
+}
+
+static int do_commit(const char *msg_file, const char *author,
+ struct replay_opts *opts, unsigned int flags)
+{
+ int res = 1;
+
+ if (!(flags & EDIT_MSG) && !(flags & VERIFY_MSG)) {
+ struct object_id oid;
+ struct strbuf sb = STRBUF_INIT;
+
+ if (msg_file && strbuf_read_file(&sb, msg_file, 2048) < 0)
+ return error_errno(_("unable to read commit message "
+ "from '%s'"),
+ msg_file);
+
+ res = try_to_commit(msg_file ? &sb : NULL, author, opts, flags,
+ &oid);
+ strbuf_release(&sb);
+ if (!res) {
+ unlink(git_path_cherry_pick_head());
+ unlink(git_path_merge_msg());
+ if (!is_rebase_i(opts))
+ print_commit_summary(NULL, &oid,
+ SUMMARY_SHOW_AUTHOR_DATE);
+ return res;
+ }
+ }
+ if (res == 1)
+ return run_git_commit(msg_file, opts, flags);
+
+ return res;
+}
+
static int is_original_commit_empty(struct commit *commit)
{
const struct object_id *ptree_oid;
struct object_id head;
struct commit *base, *next, *parent;
const char *base_label, *next_label;
+ char *author = NULL;
struct commit_message msg = { NULL, NULL, NULL, NULL };
struct strbuf msgbuf = STRBUF_INIT;
int res, unborn = 0, allow;
strbuf_addstr(&msgbuf, oid_to_hex(&commit->object.oid));
strbuf_addstr(&msgbuf, ")\n");
}
+ if (!is_fixup(command))
+ author = get_author(msg.message);
}
if (command == TODO_REWORD)
}
}
+ if (opts->signoff)
+ append_signoff(&msgbuf, 0, 0);
+
if (is_rebase_i(opts) && write_author_script(msg.message) < 0)
res = -1;
else if (!opts->strategy || !strcmp(opts->strategy, "recursive") || command == TODO_REVERT) {
goto leave;
} else if (allow)
flags |= ALLOW_EMPTY;
- if (!opts->no_commit)
+ if (!opts->no_commit) {
fast_forward_edit:
- res = run_git_commit(msg_file, opts, flags);
+ if (author || command == TODO_REVERT || (flags & AMEND_MSG))
+ res = do_commit(msg_file, author, opts, flags);
+ else
+ res = error(_("unable to parse commit author"));
+ }
if (!res && final_fixup) {
unlink(rebase_path_fixup_msg());
leave:
free_message(commit, &msg);
+ free(author);
update_abort_safety_file();
return res;
static int read_and_refresh_cache(struct replay_opts *opts)
{
- static struct lock_file index_lock;
+ struct lock_file index_lock = LOCK_INIT;
int index_fd = hold_locked_index(&index_lock, 0);
if (read_index_preload(&the_index, NULL) < 0) {
rollback_lock_file(&index_lock);
_(action_name(opts)));
}
refresh_index(&the_index, REFRESH_QUIET|REFRESH_UNMERGED, NULL, NULL, NULL);
- if (the_index.cache_changed && index_fd >= 0) {
- if (write_locked_index(&the_index, &index_lock, COMMIT_LOCK)) {
+ if (index_fd >= 0) {
+ if (write_locked_index(&the_index, &index_lock,
+ COMMIT_LOCK | SKIP_IF_UNCHANGED)) {
return error(_("git %s: failed to refresh the index"),
_(action_name(opts)));
}
}
- rollback_lock_file(&index_lock);
return 0;
}
return count;
}
+static ssize_t strbuf_read_file_or_whine(struct strbuf *sb, const char *path)
+{
+ int fd;
+ ssize_t len;
+
+ fd = open(path, O_RDONLY);
+ if (fd < 0)
+ return error_errno(_("could not open '%s'"), path);
+ len = strbuf_read(sb, fd, 0);
+ close(fd);
+ if (len < 0)
+ return error(_("could not read '%s'."), path);
+ return len;
+}
+
static int read_populate_todo(struct todo_list *todo_list,
struct replay_opts *opts)
{
struct stat st;
const char *todo_file = get_todo_path(opts);
- int fd, res;
+ int res;
strbuf_reset(&todo_list->buf);
- fd = open(todo_file, O_RDONLY);
- if (fd < 0)
- return error_errno(_("could not open '%s'"), todo_file);
- if (strbuf_read(&todo_list->buf, fd, 0) < 0) {
- close(fd);
- return error(_("could not read '%s'."), todo_file);
- }
- close(fd);
+ if (strbuf_read_file_or_whine(&todo_list->buf, todo_file) < 0)
+ return -1;
res = stat(todo_file, &st);
if (res)
static int save_head(const char *head)
{
- static struct lock_file head_lock;
+ struct lock_file head_lock = LOCK_INIT;
struct strbuf buf = STRBUF_INIT;
int fd;
ssize_t written;
fd = hold_lock_file_for_update(&head_lock, git_path_head_file(), 0);
- if (fd < 0) {
- rollback_lock_file(&head_lock);
+ if (fd < 0)
return error_errno(_("could not lock HEAD"));
- }
strbuf_addf(&buf, "%s\n", head);
written = write_in_full(fd, buf.buf, buf.len);
strbuf_release(&buf);
return error_errno(_("could not write to '%s'"),
git_path_head_file());
}
- if (commit_lock_file(&head_lock) < 0) {
- rollback_lock_file(&head_lock);
- return error(_("failed to finalize '%s'."), git_path_head_file());
- }
+ if (commit_lock_file(&head_lock) < 0)
+ return error(_("failed to finalize '%s'"), git_path_head_file());
return 0;
}
static int save_todo(struct todo_list *todo_list, struct replay_opts *opts)
{
- static struct lock_file todo_lock;
+ struct lock_file todo_lock = LOCK_INIT;
const char *todo_path = get_todo_path(opts);
int next = todo_list->current, offset, fd;
todo_list->buf.len - offset) < 0)
return error_errno(_("could not write to '%s'"), todo_path);
if (commit_lock_file(&todo_lock) < 0)
- return error(_("failed to finalize '%s'."), todo_path);
+ return error(_("failed to finalize '%s'"), todo_path);
if (is_rebase_i(opts)) {
const char *done_path = rebase_path_done();
p = short_commit_name(commit);
if (write_message(p, strlen(p), rebase_path_stopped_sha(), 1) < 0)
return -1;
+ if (update_ref("rebase", "REBASE_HEAD", &commit->object.oid,
+ NULL, REF_NO_DEREF, UPDATE_REFS_MSG_ON_ERR))
+ res |= error(_("could not update %s"), "REBASE_HEAD");
strbuf_addf(&buf, "%s/patch", get_dir(opts));
memset(&log_tree_opt, 0, sizeof(log_tree_opt));
unlink(rebase_path_author_script());
unlink(rebase_path_stopped_sha());
unlink(rebase_path_amend());
+ delete_ref(NULL, "REBASE_HEAD", NULL, REF_NO_DEREF);
}
if (item->command <= TODO_SQUASH) {
if (is_rebase_i(opts))
if (!lookup_commit_reference_gently(&oid, 1)) {
enum object_type type = sha1_object_info(oid.hash, NULL);
return error(_("%s: can't cherry-pick a %s"),
- name, typename(type));
+ name, type_name(type));
}
} else
return error(_("%s: bad revision"), name);
struct strbuf todo_file = STRBUF_INIT;
struct todo_list todo_list = TODO_LIST_INIT;
struct strbuf missing = STRBUF_INIT;
- int advise_to_edit_todo = 0, res = 0, fd, i;
+ int advise_to_edit_todo = 0, res = 0, i;
strbuf_addstr(&todo_file, rebase_path_todo());
- fd = open(todo_file.buf, O_RDONLY);
- if (fd < 0) {
- res = error_errno(_("could not open '%s'"), todo_file.buf);
- goto leave_check;
- }
- if (strbuf_read(&todo_list.buf, fd, 0) < 0) {
- close(fd);
- res = error(_("could not read '%s'."), todo_file.buf);
+ if (strbuf_read_file_or_whine(&todo_list.buf, todo_file.buf) < 0) {
+ res = -1;
goto leave_check;
}
- close(fd);
advise_to_edit_todo = res =
parse_insn_buffer(todo_list.buf.buf, &todo_list);
todo_list_release(&todo_list);
strbuf_addstr(&todo_file, ".backup");
- fd = open(todo_file.buf, O_RDONLY);
- if (fd < 0) {
- res = error_errno(_("could not open '%s'"), todo_file.buf);
- goto leave_check;
- }
- if (strbuf_read(&todo_list.buf, fd, 0) < 0) {
- close(fd);
- res = error(_("could not read '%s'."), todo_file.buf);
+ if (strbuf_read_file_or_whine(&todo_list.buf, todo_file.buf) < 0) {
+ res = -1;
goto leave_check;
}
- close(fd);
strbuf_release(&todo_file);
res = !!parse_insn_buffer(todo_list.buf.buf, &todo_list);
}
strbuf_release(&buf);
- fd = open(todo_file, O_RDONLY);
- if (fd < 0) {
- return error_errno(_("could not open '%s'"), todo_file);
- }
- if (strbuf_read(&todo_list.buf, fd, 0) < 0) {
- close(fd);
- return error(_("could not read '%s'."), todo_file);
- }
- close(fd);
+ if (strbuf_read_file_or_whine(&todo_list.buf, todo_file) < 0)
+ return -1;
if (parse_insn_buffer(todo_list.buf.buf, &todo_list) < 0) {
todo_list_release(&todo_list);
return -1;
const char *todo_file = rebase_path_todo();
struct todo_list todo_list = TODO_LIST_INIT;
struct hashmap subject2item;
- int res = 0, rearranged = 0, *next, *tail, fd, i;
+ int res = 0, rearranged = 0, *next, *tail, i;
char **subjects;
- fd = open(todo_file, O_RDONLY);
- if (fd < 0)
- return error_errno(_("could not open '%s'"), todo_file);
- if (strbuf_read(&todo_list.buf, fd, 0) < 0) {
- close(fd);
- return error(_("could not read '%s'."), todo_file);
- }
- close(fd);
+ if (strbuf_read_file_or_whine(&todo_list.buf, todo_file) < 0)
+ return -1;
if (parse_insn_buffer(todo_list.buf.buf, &todo_list) < 0) {
todo_list_release(&todo_list);
return -1;
#ifndef SEQUENCER_H
#define SEQUENCER_H
+const char *git_path_commit_editmsg(void);
const char *git_path_seq_dir(void);
#define APPEND_SIGNOFF_DEDUP (1u << 0)
REPLAY_INTERACTIVE_REBASE
};
+enum commit_msg_cleanup_mode {
+ COMMIT_MSG_CLEANUP_SPACE,
+ COMMIT_MSG_CLEANUP_NONE,
+ COMMIT_MSG_CLEANUP_SCISSORS,
+ COMMIT_MSG_CLEANUP_ALL
+};
+
struct replay_opts {
enum replay_action action;
int mainline;
char *gpg_sign;
+ enum commit_msg_cleanup_mode default_msg_cleanup;
/* Merge strategy */
char *strategy;
};
#define REPLAY_OPTS_INIT { -1 }
+/* Call this to setup defaults before parsing command line options */
+void sequencer_init_config(struct replay_opts *opts);
int sequencer_pick_revisions(struct replay_opts *opts);
int sequencer_continue(struct replay_opts *opts);
int sequencer_rollback(struct replay_opts *opts);
void append_signoff(struct strbuf *msgbuf, int ignore_footer, unsigned flag);
void append_conflicts_hint(struct strbuf *msgbuf);
+int message_is_empty(const struct strbuf *sb,
+ enum commit_msg_cleanup_mode cleanup_mode);
+int template_untouched(const struct strbuf *sb, const char *template_file,
+ enum commit_msg_cleanup_mode cleanup_mode);
+int update_head_with_reflog(const struct commit *old_head,
+ const struct object_id *new_head,
+ const char *action, const struct strbuf *msg,
+ struct strbuf *err);
+void commit_post_rewrite(const struct commit *current_head,
+ const struct object_id *new_head);
+#define SUMMARY_INITIAL_COMMIT (1 << 0)
+#define SUMMARY_SHOW_AUTHOR_DATE (1 << 1)
+void print_commit_summary(const char *prefix, const struct object_id *oid,
+ unsigned int flags);
#endif
{
char *r = prefix_path_gently(prefix, len, NULL, path);
if (!r)
- die("'%s' is outside repository", path);
+ die(_("'%s' is outside repository"), path);
return r;
}
free(to_free);
return 0; /* file does not exist */
}
- die_errno("failed to stat '%s'", arg);
+ die_errno(_("failed to stat '%s'"), arg);
}
static void NORETURN die_verify_filename(const char *prefix,
int diagnose_misspelt_rev)
{
if (*arg == '-')
- die("option '%s' must come before non-option arguments", arg);
+ die(_("option '%s' must come before non-option arguments"), arg);
if (looks_like_pathspec(arg) || check_filename(prefix, arg))
return;
die_verify_filename(prefix, arg, diagnose_misspelt_rev);
return;
if (work_tree_config_is_bogus)
- die("unable to set up work tree using invalid config");
+ die(_("unable to set up work tree using invalid config"));
work_tree = get_git_work_tree();
git_dir = get_git_dir();
if (!is_absolute_path(git_dir))
git_dir = real_path(get_git_dir());
if (!work_tree || chdir(work_tree))
- die("This operation must be run in a work tree");
+ die(_("this operation must be run in a work tree"));
/*
* Make sure subsequent git processes find correct worktree
;
else if (!strcmp(ext, "preciousobjects"))
data->precious_objects = git_config_bool(var, value);
- else
+ else if (!strcmp(ext, "partialclone")) {
+ if (!value)
+ return config_error_nonbool(var);
+ data->partial_clone = xstrdup(value);
+ } else
string_list_append(&data->unknown_extensions, ext);
} else if (strcmp(var, "core.bare") == 0) {
data->is_bare = git_config_bool(var, value);
}
repository_format_precious_objects = candidate->precious_objects;
+ repository_format_partial_clone = candidate->partial_clone;
string_list_clear(&candidate->unknown_extensions, 0);
if (!has_common) {
if (candidate->is_bare != -1) {
/* non-fatal; follow return path */
break;
case READ_GITFILE_ERR_OPEN_FAILED:
- die_errno("Error opening '%s'", path);
+ die_errno(_("error opening '%s'"), path);
case READ_GITFILE_ERR_TOO_LARGE:
- die("Too large to be a .git file: '%s'", path);
+ die(_("too large to be a .git file: '%s'"), path);
case READ_GITFILE_ERR_READ_FAILED:
- die("Error reading %s", path);
+ die(_("error reading %s"), path);
case READ_GITFILE_ERR_INVALID_FORMAT:
- die("Invalid gitfile format: %s", path);
+ die(_("invalid gitfile format: %s"), path);
case READ_GITFILE_ERR_NO_PATH:
- die("No path in gitfile: %s", path);
+ die(_("no path in gitfile: %s"), path);
case READ_GITFILE_ERR_NOT_A_REPO:
- die("Not a git repository: %s", dir);
+ die(_("not a git repository: %s"), dir);
default:
die("BUG: unknown error code");
}
int offset;
if (PATH_MAX - 40 < strlen(gitdirenv))
- die("'$%s' too big", GIT_DIR_ENVIRONMENT);
+ die(_("'$%s' too big"), GIT_DIR_ENVIRONMENT);
gitfile = (char*)read_gitfile(gitdirenv);
if (gitfile) {
free(gitfile);
return NULL;
}
- die("Not a git repository: '%s'", gitdirenv);
+ die(_("not a git repository: '%s'"), gitdirenv);
}
if (check_repository_format_gently(gitdirenv, repo_fmt, nongit_ok)) {
else {
char *core_worktree;
if (chdir(gitdirenv))
- die_errno("Could not chdir to '%s'", gitdirenv);
+ die_errno(_("cannot chdir to '%s'"), gitdirenv);
if (chdir(git_work_tree_cfg))
- die_errno("Could not chdir to '%s'", git_work_tree_cfg);
+ die_errno(_("cannot chdir to '%s'"), git_work_tree_cfg);
core_worktree = xgetcwd();
if (chdir(cwd->buf))
- die_errno("Could not come back to cwd");
+ die_errno(_("cannot come back to cwd"));
set_git_work_tree(core_worktree);
free(core_worktree);
}
if (offset >= 0) { /* cwd inside worktree? */
set_git_dir(real_path(gitdirenv));
if (chdir(worktree))
- die_errno("Could not chdir to '%s'", worktree);
+ die_errno(_("cannot chdir to '%s'"), worktree);
strbuf_addch(cwd, '/');
free(gitfile);
return cwd->buf + offset;
if (offset != cwd->len && !is_absolute_path(gitdir))
gitdir = to_free = real_pathdup(gitdir, 1);
if (chdir(cwd->buf))
- die_errno("Could not come back to cwd");
+ die_errno(_("cannot come back to cwd"));
ret = setup_explicit_git_dir(gitdir, cwd, repo_fmt, nongit_ok);
free(to_free);
return ret;
if (is_bare_repository_cfg > 0) {
set_git_dir(offset == cwd->len ? gitdir : real_path(gitdir));
if (chdir(cwd->buf))
- die_errno("Could not come back to cwd");
+ die_errno(_("cannot come back to cwd"));
return NULL;
}
gitdir = offset == cwd->len ? "." : xmemdupz(cwd->buf, offset);
if (chdir(cwd->buf))
- die_errno("Could not come back to cwd");
+ die_errno(_("cannot come back to cwd"));
return setup_explicit_git_dir(gitdir, cwd, repo_fmt, nongit_ok);
}
inside_work_tree = 0;
if (offset != cwd->len) {
if (chdir(cwd->buf))
- die_errno("Cannot come back to cwd");
+ die_errno(_("cannot come back to cwd"));
root_len = offset_1st_component(cwd->buf);
strbuf_setlen(cwd, offset > root_len ? offset : root_len);
set_git_dir(cwd->buf);
static const char *setup_nongit(const char *cwd, int *nongit_ok)
{
if (!nongit_ok)
- die(_("Not a git repository (or any of the parent directories): %s"), DEFAULT_GIT_DIR_ENVIRONMENT);
+ die(_("not a git repository (or any of the parent directories): %s"), DEFAULT_GIT_DIR_ENVIRONMENT);
if (chdir(cwd))
- die_errno(_("Cannot come back to cwd"));
+ die_errno(_("cannot come back to cwd"));
*nongit_ok = 1;
return NULL;
}
{
struct stat buf;
if (stat(path, &buf)) {
- die_errno("failed to stat '%*s%s%s'",
+ die_errno(_("failed to stat '%*s%s%s'"),
prefix_len,
prefix ? prefix : "",
prefix ? "/" : "", path);
break;
case GIT_DIR_DISCOVERED:
if (dir.len < cwd.len && chdir(dir.buf))
- die(_("Cannot change to '%s'"), dir.buf);
+ die(_("cannot change to '%s'"), dir.buf);
prefix = setup_discovered_git_dir(gitdir.buf, &cwd, dir.len,
&repo_fmt, nongit_ok);
break;
case GIT_DIR_BARE:
if (dir.len < cwd.len && chdir(dir.buf))
- die(_("Cannot change to '%s'"), dir.buf);
+ die(_("cannot change to '%s'"), dir.buf);
prefix = setup_bare_git_dir(&cwd, dir.len, &repo_fmt, nongit_ok);
break;
case GIT_DIR_HIT_CEILING:
strbuf_release(&dir);
return NULL;
}
- die(_("Not a git repository (or any parent up to mount point %s)\n"
+ die(_("not a git repository (or any parent up to mount point %s)\n"
"Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set)."),
dir.buf);
default:
/* A filemode value was given: 0xxx */
if ((i & 0600) != 0600)
- die(_("Problem with core.sharedRepository filemode value "
+ die(_("problem with core.sharedRepository filemode value "
"(0%.3o).\nThe owner of files must always have "
"read and write permissions."), i);
while (fd != -1 && fd < 2)
fd = dup(fd);
if (fd == -1)
- die_errno("open /dev/null or dup failed");
+ die_errno(_("open /dev/null or dup failed"));
if (fd > 2)
close(fd);
}
case 0:
break;
case -1:
- die_errno("fork failed");
+ die_errno(_("fork failed"));
default:
exit(0);
}
if (setsid() == -1)
- die_errno("setsid failed");
+ die_errno(_("setsid failed"));
close(0);
close(1);
close(2);
} while (lo < hi);
return -lo-1;
}
+
+int bsearch_hash(const unsigned char *sha1, const uint32_t *fanout_nbo,
+ const unsigned char *table, size_t stride, uint32_t *result)
+{
+ uint32_t hi, lo;
+
+ hi = ntohl(fanout_nbo[*sha1]);
+ lo = ((*sha1 == 0x0) ? 0 : ntohl(fanout_nbo[*sha1 - 1]));
+
+ while (lo < hi) {
+ unsigned mi = lo + (hi - lo) / 2;
+ int cmp = hashcmp(table + mi * stride, sha1);
+
+ if (!cmp) {
+ if (result)
+ *result = mi;
+ return 1;
+ }
+ if (cmp > 0)
+ hi = mi;
+ else
+ lo = mi + 1;
+ }
+
+ if (result)
+ *result = lo;
+ return 0;
+}
void *table,
size_t nr,
sha1_access_fn fn);
+
+/*
+ * Searches for sha1 in table, using the given fanout table to determine the
+ * interval to search, then using binary search. Returns 1 if found, 0 if not.
+ *
+ * Takes the following parameters:
+ *
+ * - sha1: the hash to search for
+ * - fanout_nbo: a 256-element array of NETWORK-order 32-bit integers; the
+ * integer at position i represents the number of elements in table whose
+ * first byte is less than or equal to i
+ * - table: a sorted list of hashes with optional extra information in between
+ * - stride: distance between two consecutive elements in table (should be
+ * GIT_MAX_RAWSZ or greater)
+ * - result: if not NULL, this function stores the element index of the
+ * position found (if the search is successful) or the index of the least
+ * element that is greater than sha1 (if the search is not successful)
+ *
+ * This function does not verify the validity of the fanout table.
+ */
+int bsearch_hash(const unsigned char *sha1, const uint32_t *fanout_nbo,
+ const unsigned char *table, size_t stride, uint32_t *result);
#endif
#include "bulk-checkin.h"
#include "streaming.h"
#include "dir.h"
-#include "mru.h"
#include "list.h"
#include "mergesort.h"
#include "quote.h"
#include "packfile.h"
+#include "fetch-object.h"
const unsigned char null_sha1[GIT_MAX_RAWSZ];
const struct object_id null_oid;
EMPTY_BLOB_SHA1_BIN_LITERAL
};
-static void git_hash_sha1_init(void *ctx)
+static void git_hash_sha1_init(git_hash_ctx *ctx)
{
- git_SHA1_Init((git_SHA_CTX *)ctx);
+ git_SHA1_Init(&ctx->sha1);
}
-static void git_hash_sha1_update(void *ctx, const void *data, size_t len)
+static void git_hash_sha1_update(git_hash_ctx *ctx, const void *data, size_t len)
{
- git_SHA1_Update((git_SHA_CTX *)ctx, data, len);
+ git_SHA1_Update(&ctx->sha1, data, len);
}
-static void git_hash_sha1_final(unsigned char *hash, void *ctx)
+static void git_hash_sha1_final(unsigned char *hash, git_hash_ctx *ctx)
{
- git_SHA1_Final(hash, (git_SHA_CTX *)ctx);
+ git_SHA1_Final(hash, &ctx->sha1);
}
-static void git_hash_unknown_init(void *ctx)
+static void git_hash_unknown_init(git_hash_ctx *ctx)
{
die("trying to init unknown hash");
}
-static void git_hash_unknown_update(void *ctx, const void *data, size_t len)
+static void git_hash_unknown_update(git_hash_ctx *ctx, const void *data, size_t len)
{
die("trying to update unknown hash");
}
-static void git_hash_unknown_final(unsigned char *hash, void *ctx)
+static void git_hash_unknown_final(unsigned char *hash, git_hash_ctx *ctx)
{
die("trying to finalize unknown hash");
}
0x00000000,
0,
0,
- 0,
git_hash_unknown_init,
git_hash_unknown_update,
git_hash_unknown_final,
"sha-1",
/* "sha1", big-endian */
0x73686131,
- sizeof(git_SHA_CTX),
GIT_SHA1_RAWSZ,
GIT_SHA1_HEXSZ,
git_hash_sha1_init,
}
-static enum safe_crlf get_safe_crlf(unsigned flags)
+static int get_conv_flags(unsigned flags)
{
if (flags & HASH_RENORMALIZE)
- return SAFE_CRLF_RENORMALIZE;
+ return CONV_EOL_RENORMALIZE;
else if (flags & HASH_WRITE_OBJECT)
- return safe_crlf;
+ return global_conv_flags_eol;
else
- return SAFE_CRLF_FALSE;
+ return 0;
}
}
}
-const char *sha1_file_name(const unsigned char *sha1)
+void sha1_file_name(struct strbuf *buf, const unsigned char *sha1)
{
- static struct strbuf buf = STRBUF_INIT;
-
- strbuf_reset(&buf);
- strbuf_addf(&buf, "%s/", get_object_directory());
-
- fill_sha1_path(&buf, sha1);
- return buf.buf;
+ strbuf_addstr(buf, get_object_directory());
+ strbuf_addch(buf, '/');
+ fill_sha1_path(buf, sha1);
}
struct strbuf *alt_scratch_buf(struct alternate_object_database *alt)
static int check_and_freshen_local(const unsigned char *sha1, int freshen)
{
- return check_and_freshen_file(sha1_file_name(sha1), freshen);
+ static struct strbuf buf = STRBUF_INIT;
+
+ strbuf_reset(&buf);
+ sha1_file_name(&buf, sha1);
+
+ return check_and_freshen_file(buf.buf, freshen);
}
static int check_and_freshen_nonlocal(const unsigned char *sha1, int freshen)
int check_sha1_signature(const unsigned char *sha1, void *map,
unsigned long size, const char *type)
{
- unsigned char real_sha1[20];
+ struct object_id real_oid;
enum object_type obj_type;
struct git_istream *st;
- git_SHA_CTX c;
+ git_hash_ctx c;
char hdr[32];
int hdrlen;
if (map) {
- hash_sha1_file(map, size, type, real_sha1);
- return hashcmp(sha1, real_sha1) ? -1 : 0;
+ hash_object_file(map, size, type, &real_oid);
+ return hashcmp(sha1, real_oid.hash) ? -1 : 0;
}
st = open_istream(sha1, &obj_type, &size, NULL);
return -1;
/* Generate the header */
- hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", typename(obj_type), size) + 1;
+ hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", type_name(obj_type), size) + 1;
/* Sha1.. */
- git_SHA1_Init(&c);
- git_SHA1_Update(&c, hdr, hdrlen);
+ the_hash_algo->init_fn(&c);
+ the_hash_algo->update_fn(&c, hdr, hdrlen);
for (;;) {
char buf[1024 * 16];
ssize_t readlen = read_istream(st, buf, sizeof(buf));
}
if (!readlen)
break;
- git_SHA1_Update(&c, buf, readlen);
+ the_hash_algo->update_fn(&c, buf, readlen);
}
- git_SHA1_Final(real_sha1, &c);
+ the_hash_algo->final_fn(real_oid.hash, &c);
close_istream(st);
- return hashcmp(sha1, real_sha1) ? -1 : 0;
+ return hashcmp(sha1, real_oid.hash) ? -1 : 0;
}
int git_open_cloexec(const char *name, int flags)
const char **path)
{
struct alternate_object_database *alt;
+ static struct strbuf buf = STRBUF_INIT;
+
+ strbuf_reset(&buf);
+ sha1_file_name(&buf, sha1);
+ *path = buf.buf;
- *path = sha1_file_name(sha1);
if (!lstat(*path, st))
return 0;
int fd;
struct alternate_object_database *alt;
int most_interesting_errno;
+ static struct strbuf buf = STRBUF_INIT;
+
+ strbuf_reset(&buf);
+ sha1_file_name(&buf, sha1);
+ *path = buf.buf;
- *path = sha1_file_name(sha1);
fd = git_open(*path);
if (fd >= 0)
return fd;
}
type = type_from_string_gently(type_buf, type_len, 1);
- if (oi->typename)
- strbuf_add(oi->typename, type_buf, type_len);
+ if (oi->type_name)
+ strbuf_add(oi->type_name, type_buf, type_len);
/*
* Set type to 0 if its an unknown object and
* we're obtaining the type using '--allow-unknown-type'
* return value implicitly indicates whether the
* object even exists.
*/
- if (!oi->typep && !oi->typename && !oi->sizep && !oi->contentp) {
+ if (!oi->typep && !oi->type_name && !oi->sizep && !oi->contentp) {
const char *path;
struct stat st;
if (stat_sha1_file(sha1, &st, &path) < 0)
return (status < 0) ? status : 0;
}
+int fetch_if_missing = 1;
+
int sha1_object_info_extended(const unsigned char *sha1, struct object_info *oi, unsigned flags)
{
static struct object_info blank_oi = OBJECT_INFO_INIT;
const unsigned char *real = (flags & OBJECT_INFO_LOOKUP_REPLACE) ?
lookup_replace_object(sha1) :
sha1;
+ int already_retried = 0;
if (is_null_sha1(real))
return -1;
*(oi->disk_sizep) = 0;
if (oi->delta_base_sha1)
hashclr(oi->delta_base_sha1);
- if (oi->typename)
- strbuf_addstr(oi->typename, typename(co->type));
+ if (oi->type_name)
+ strbuf_addstr(oi->type_name, type_name(co->type));
if (oi->contentp)
*oi->contentp = xmemdupz(co->buf, co->size);
oi->whence = OI_CACHED;
}
}
- if (!find_pack_entry(real, &e)) {
+ while (1) {
+ if (find_pack_entry(real, &e))
+ break;
+
/* Most likely it's a loose object. */
if (!sha1_loose_object_info(real, oi, flags))
return 0;
/* Not a loose object; someone else may have just packed it. */
- if (flags & OBJECT_INFO_QUICK) {
- return -1;
- } else {
+ if (!(flags & OBJECT_INFO_QUICK)) {
reprepare_packed_git();
- if (!find_pack_entry(real, &e))
- return -1;
+ if (find_pack_entry(real, &e))
+ break;
}
+
+ /* Check if it is a missing object */
+ if (fetch_if_missing && repository_format_partial_clone &&
+ !already_retried) {
+ /*
+ * TODO Investigate haveing fetch_object() return
+ * TODO error/success and stopping the music here.
+ */
+ fetch_object(repository_format_partial_clone, real);
+ already_retried = 1;
+ continue;
+ }
+
+ return -1;
}
if (oi == &blank_oi)
* information below, so return early.
*/
return 0;
-
rtype = packed_object_info(e.p, e.offset, oi);
if (rtype < 0) {
mark_bad_packed_object(e.p, real);
return content;
}
-int pretend_sha1_file(void *buf, unsigned long len, enum object_type type,
- unsigned char *sha1)
+int pretend_object_file(void *buf, unsigned long len, enum object_type type,
+ struct object_id *oid)
{
struct cached_object *co;
- hash_sha1_file(buf, len, typename(type), sha1);
- if (has_sha1_file(sha1) || find_cached_object(sha1))
+ hash_object_file(buf, len, type_name(type), oid);
+ if (has_sha1_file(oid->hash) || find_cached_object(oid->hash))
return 0;
ALLOC_GROW(cached_objects, cached_object_nr + 1, cached_object_alloc);
co = &cached_objects[cached_object_nr++];
co->type = type;
co->buf = xmalloc(len);
memcpy(co->buf, buf, len);
- hashcpy(co->sha1, sha1);
+ hashcpy(co->sha1, oid->hash);
return 0;
}
}
}
-static void write_sha1_file_prepare(const void *buf, unsigned long len,
- const char *type, unsigned char *sha1,
- char *hdr, int *hdrlen)
+static void write_object_file_prepare(const void *buf, unsigned long len,
+ const char *type, struct object_id *oid,
+ char *hdr, int *hdrlen)
{
- git_SHA_CTX c;
+ git_hash_ctx c;
/* Generate the header */
*hdrlen = xsnprintf(hdr, *hdrlen, "%s %lu", type, len)+1;
/* Sha1.. */
- git_SHA1_Init(&c);
- git_SHA1_Update(&c, hdr, *hdrlen);
- git_SHA1_Update(&c, buf, len);
- git_SHA1_Final(sha1, &c);
+ the_hash_algo->init_fn(&c);
+ the_hash_algo->update_fn(&c, hdr, *hdrlen);
+ the_hash_algo->update_fn(&c, buf, len);
+ the_hash_algo->final_fn(oid->hash, &c);
}
/*
return 0;
}
-int hash_sha1_file(const void *buf, unsigned long len, const char *type,
- unsigned char *sha1)
+int hash_object_file(const void *buf, unsigned long len, const char *type,
+ struct object_id *oid)
{
char hdr[32];
int hdrlen = sizeof(hdr);
- write_sha1_file_prepare(buf, len, type, sha1, hdr, &hdrlen);
+ write_object_file_prepare(buf, len, type, oid, hdr, &hdrlen);
return 0;
}
return fd;
}
-static int write_loose_object(const unsigned char *sha1, char *hdr, int hdrlen,
- const void *buf, unsigned long len, time_t mtime)
+static int write_loose_object(const struct object_id *oid, char *hdr,
+ int hdrlen, const void *buf, unsigned long len,
+ time_t mtime)
{
int fd, ret;
unsigned char compressed[4096];
git_zstream stream;
- git_SHA_CTX c;
- unsigned char parano_sha1[20];
+ git_hash_ctx c;
+ struct object_id parano_oid;
static struct strbuf tmp_file = STRBUF_INIT;
- const char *filename = sha1_file_name(sha1);
+ static struct strbuf filename = STRBUF_INIT;
+
+ strbuf_reset(&filename);
+ sha1_file_name(&filename, oid->hash);
- fd = create_tmpfile(&tmp_file, filename);
+ fd = create_tmpfile(&tmp_file, filename.buf);
if (fd < 0) {
if (errno == EACCES)
return error("insufficient permission for adding an object to repository database %s", get_object_directory());
git_deflate_init(&stream, zlib_compression_level);
stream.next_out = compressed;
stream.avail_out = sizeof(compressed);
- git_SHA1_Init(&c);
+ the_hash_algo->init_fn(&c);
/* First header.. */
stream.next_in = (unsigned char *)hdr;
stream.avail_in = hdrlen;
while (git_deflate(&stream, 0) == Z_OK)
; /* nothing */
- git_SHA1_Update(&c, hdr, hdrlen);
+ the_hash_algo->update_fn(&c, hdr, hdrlen);
/* Then the data itself.. */
stream.next_in = (void *)buf;
do {
unsigned char *in0 = stream.next_in;
ret = git_deflate(&stream, Z_FINISH);
- git_SHA1_Update(&c, in0, stream.next_in - in0);
+ the_hash_algo->update_fn(&c, in0, stream.next_in - in0);
if (write_buffer(fd, compressed, stream.next_out - compressed) < 0)
die("unable to write sha1 file");
stream.next_out = compressed;
} while (ret == Z_OK);
if (ret != Z_STREAM_END)
- die("unable to deflate new object %s (%d)", sha1_to_hex(sha1), ret);
+ die("unable to deflate new object %s (%d)", oid_to_hex(oid),
+ ret);
ret = git_deflate_end_gently(&stream);
if (ret != Z_OK)
- die("deflateEnd on object %s failed (%d)", sha1_to_hex(sha1), ret);
- git_SHA1_Final(parano_sha1, &c);
- if (hashcmp(sha1, parano_sha1) != 0)
- die("confused by unstable object source data for %s", sha1_to_hex(sha1));
+ die("deflateEnd on object %s failed (%d)", oid_to_hex(oid),
+ ret);
+ the_hash_algo->final_fn(parano_oid.hash, &c);
+ if (oidcmp(oid, ¶no_oid) != 0)
+ die("confused by unstable object source data for %s",
+ oid_to_hex(oid));
close_sha1_file(fd);
warning_errno("failed utime() on %s", tmp_file.buf);
}
- return finalize_object_file(tmp_file.buf, filename);
+ return finalize_object_file(tmp_file.buf, filename.buf);
}
static int freshen_loose_object(const unsigned char *sha1)
return 1;
}
-int write_sha1_file(const void *buf, unsigned long len, const char *type, unsigned char *sha1)
+int write_object_file(const void *buf, unsigned long len, const char *type,
+ struct object_id *oid)
{
char hdr[32];
int hdrlen = sizeof(hdr);
/* Normally if we have it in the pack then we do not bother writing
* it out into .git/objects/??/?{38} file.
*/
- write_sha1_file_prepare(buf, len, type, sha1, hdr, &hdrlen);
- if (freshen_packed_object(sha1) || freshen_loose_object(sha1))
+ write_object_file_prepare(buf, len, type, oid, hdr, &hdrlen);
+ if (freshen_packed_object(oid->hash) || freshen_loose_object(oid->hash))
return 0;
- return write_loose_object(sha1, hdr, hdrlen, buf, len, 0);
+ return write_loose_object(oid, hdr, hdrlen, buf, len, 0);
}
-int hash_sha1_file_literally(const void *buf, unsigned long len, const char *type,
- struct object_id *oid, unsigned flags)
+int hash_object_file_literally(const void *buf, unsigned long len,
+ const char *type, struct object_id *oid,
+ unsigned flags)
{
char *header;
int hdrlen, status = 0;
/* type string, SP, %lu of the length plus NUL must fit this */
hdrlen = strlen(type) + 32;
header = xmalloc(hdrlen);
- write_sha1_file_prepare(buf, len, type, oid->hash, header, &hdrlen);
+ write_object_file_prepare(buf, len, type, oid, header, &hdrlen);
if (!(flags & HASH_WRITE_OBJECT))
goto cleanup;
if (freshen_packed_object(oid->hash) || freshen_loose_object(oid->hash))
goto cleanup;
- status = write_loose_object(oid->hash, header, hdrlen, buf, len, 0);
+ status = write_loose_object(oid, header, hdrlen, buf, len, 0);
cleanup:
free(header);
return status;
}
-int force_object_loose(const unsigned char *sha1, time_t mtime)
+int force_object_loose(const struct object_id *oid, time_t mtime)
{
void *buf;
unsigned long len;
int hdrlen;
int ret;
- if (has_loose_object(sha1))
+ if (has_loose_object(oid->hash))
return 0;
- buf = read_object(sha1, &type, &len);
+ buf = read_object(oid->hash, &type, &len);
if (!buf)
- return error("cannot read sha1_file for %s", sha1_to_hex(sha1));
- hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", typename(type), len) + 1;
- ret = write_loose_object(sha1, hdr, hdrlen, buf, len, mtime);
+ return error("cannot read sha1_file for %s", oid_to_hex(oid));
+ hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", type_name(type), len) + 1;
+ ret = write_loose_object(oid, hdr, hdrlen, buf, len, mtime);
free(buf);
return ret;
if ((type == OBJ_BLOB) && path) {
struct strbuf nbuf = STRBUF_INIT;
if (convert_to_git(&the_index, path, buf, size, &nbuf,
- get_safe_crlf(flags))) {
+ get_conv_flags(flags))) {
buf = strbuf_detach(&nbuf, &size);
re_allocated = 1;
}
}
if (write_object)
- ret = write_sha1_file(buf, size, typename(type), oid->hash);
+ ret = write_object_file(buf, size, type_name(type), oid);
else
- ret = hash_sha1_file(buf, size, typename(type), oid->hash);
+ ret = hash_object_file(buf, size, type_name(type), oid);
if (re_allocated)
free(buf);
return ret;
assert(would_convert_to_git_filter_fd(path));
convert_to_git_filter_fd(&the_index, path, fd, &sbuf,
- get_safe_crlf(flags));
+ get_conv_flags(flags));
if (write_object)
- ret = write_sha1_file(sbuf.buf, sbuf.len, typename(OBJ_BLOB),
- oid->hash);
+ ret = write_object_file(sbuf.buf, sbuf.len, type_name(OBJ_BLOB),
+ oid);
else
- ret = hash_sha1_file(sbuf.buf, sbuf.len, typename(OBJ_BLOB),
- oid->hash);
+ ret = hash_object_file(sbuf.buf, sbuf.len, type_name(OBJ_BLOB),
+ oid);
strbuf_release(&sbuf);
return ret;
}
if (strbuf_readlink(&sb, path, st->st_size))
return error_errno("readlink(\"%s\")", path);
if (!(flags & HASH_WRITE_OBJECT))
- hash_sha1_file(sb.buf, sb.len, blob_type, oid->hash);
- else if (write_sha1_file(sb.buf, sb.len, blob_type, oid->hash))
+ hash_object_file(sb.buf, sb.len, blob_type, oid);
+ else if (write_object_file(sb.buf, sb.len, blob_type, oid))
rc = error("%s: failed to insert into database", path);
strbuf_release(&sb);
break;
die("%s is not a valid object", sha1_to_hex(sha1));
if (type != expect)
die("%s is not a valid '%s' object", sha1_to_hex(sha1),
- typename(expect));
+ type_name(expect));
}
int for_each_file_in_obj_subdir(unsigned int subdir_nr,
const char *path,
const unsigned char *expected_sha1)
{
- git_SHA_CTX c;
+ git_hash_ctx c;
unsigned char real_sha1[GIT_MAX_RAWSZ];
unsigned char buf[4096];
unsigned long total_read;
int status = Z_OK;
- git_SHA1_Init(&c);
- git_SHA1_Update(&c, hdr, stream->total_out);
+ the_hash_algo->init_fn(&c);
+ the_hash_algo->update_fn(&c, hdr, stream->total_out);
/*
* We already read some bytes into hdr, but the ones up to the NUL
if (size - total_read < stream->avail_out)
stream->avail_out = size - total_read;
status = git_inflate(stream, Z_FINISH);
- git_SHA1_Update(&c, buf, stream->next_out - buf);
+ the_hash_algo->update_fn(&c, buf, stream->next_out - buf);
total_read += stream->next_out - buf;
}
git_inflate_end(stream);
return -1;
}
- git_SHA1_Final(real_sha1, &c);
+ the_hash_algo->final_fn(real_sha1, &c);
if (hashcmp(expected_sha1, real_sha1)) {
error("sha1 mismatch for %s (expected %s)", path,
sha1_to_hex(expected_sha1));
goto out;
}
if (check_sha1_signature(expected_sha1, *contents,
- *size, typename(*type))) {
+ *size, type_name(*type))) {
error("sha1 mismatch for %s (expected %s)", path,
sha1_to_hex(expected_sha1));
free(*contents);
advise(" %s %s%s",
find_unique_abbrev(oid->hash, DEFAULT_ABBREV),
- typename(type) ? typename(type) : "unknown type",
+ type_name(type) ? type_name(type) : "unknown type",
desc.buf);
strbuf_release(&desc);
/*
* first is now the position in the packfile where we would insert
* mad->hash if it does not exist (or the position of mad->hash if
- * it does exist). Hence, we consider a maximum of three objects
+ * it does exist). Hence, we consider a maximum of two objects
* nearby for the abbreviation length.
*/
mad->init_len = 0;
if (!match) {
- nth_packed_object_oid(&oid, p, first);
- extend_abbrev_len(&oid, mad);
+ if (nth_packed_object_oid(&oid, p, first))
+ extend_abbrev_len(&oid, mad);
} else if (first < num - 1) {
- nth_packed_object_oid(&oid, p, first + 1);
- extend_abbrev_len(&oid, mad);
+ if (nth_packed_object_oid(&oid, p, first + 1))
+ extend_abbrev_len(&oid, mad);
}
if (first > 0) {
- nth_packed_object_oid(&oid, p, first - 1);
- extend_abbrev_len(&oid, mad);
+ if (nth_packed_object_oid(&oid, p, first - 1))
+ extend_abbrev_len(&oid, mad);
}
mad->init_len = mad->cur_len;
}
if (name)
error("%.*s: expected %s type, but the object "
"dereferences to %s type",
- namelen, name, typename(expected_type),
- typename(o->type));
+ namelen, name, type_name(expected_type),
+ type_name(o->type));
return NULL;
}
}
/* Plumbing with collition-detecting SHA1 code */
-#ifdef DC_SHA1_SUBMODULE
-#include "sha1collisiondetection/lib/sha1.h"
-#elif defined(DC_SHA1_EXTERNAL)
+#ifdef DC_SHA1_EXTERNAL
#include <sha1dc/sha1.h>
+#elif defined(DC_SHA1_SUBMODULE)
+#include "sha1collisiondetection/lib/sha1.h"
#else
#include "sha1dc/sha1.h"
#endif
ALLOC_GROW(entries, nr_entries+1, nr_alloc);
entries[nr_entries++] = ce;
}
+ if (is_null_oid(&ce->oid))
+ istate->drop_cache_tree = 1;
}
}
}
void replace_index_entry_in_base(struct index_state *istate,
- struct cache_entry *old,
- struct cache_entry *new)
+ struct cache_entry *old_entry,
+ struct cache_entry *new_entry)
{
- if (old->index &&
+ if (old_entry->index &&
istate->split_index &&
istate->split_index->base &&
- old->index <= istate->split_index->base->cache_nr) {
- new->index = old->index;
- if (old != istate->split_index->base->cache[new->index - 1])
- free(istate->split_index->base->cache[new->index - 1]);
- istate->split_index->base->cache[new->index - 1] = new;
+ old_entry->index <= istate->split_index->base->cache_nr) {
+ new_entry->index = old_entry->index;
+ if (old_entry != istate->split_index->base->cache[new_entry->index - 1])
+ free(istate->split_index->base->cache[new_entry->index - 1]);
+ istate->split_index->base->cache[new_entry->index - 1] = new_entry;
}
}
void save_or_free_index_entry(struct index_state *istate, struct cache_entry *ce);
void replace_index_entry_in_base(struct index_state *istate,
struct cache_entry *old,
- struct cache_entry *new);
+ struct cache_entry *new_entry);
int read_link_extension(struct index_state *istate,
const void *data, unsigned long sz);
int write_link_extension(struct strbuf *sb,
strbuf_rtrim(sb);
strbuf_ltrim(sb);
}
+
void strbuf_rtrim(struct strbuf *sb)
{
while (sb->len > 0 && isspace((unsigned char)sb->buf[sb->len - 1]))
sb->buf[sb->len] = '\0';
}
+void strbuf_trim_trailing_dir_sep(struct strbuf *sb)
+{
+ while (sb->len > 0 && is_dir_sep((unsigned char)sb->buf[sb->len - 1]))
+ sb->len--;
+ sb->buf[sb->len] = '\0';
+}
+
void strbuf_ltrim(struct strbuf *sb)
{
char *b = sb->buf;
{
int fd;
ssize_t len;
+ int saved_errno;
fd = open(path, O_RDONLY);
if (fd < 0)
return -1;
len = strbuf_read(sb, fd, hint);
+ saved_errno = errno;
close(fd);
- if (len < 0)
+ if (len < 0) {
+ errno = saved_errno;
return -1;
+ }
return len;
}
extern void strbuf_rtrim(struct strbuf *);
extern void strbuf_ltrim(struct strbuf *);
+/* Strip trailing directory separators */
+extern void strbuf_trim_trailing_dir_sep(struct strbuf *);
+
/**
* Replace the contents of the strbuf with a reencoded form. Returns -1
* on error, 0 on success.
}
/*
- * Perform the version and capability negotiation as described in the "Long
- * Running Filter Process" section of the gitattributes documentation using the
+ * Perform the version and capability negotiation as described in the
+ * "Handshake" section of long-running-process-protocol.txt using the
* given requested versions and capabilities. The "versions" and "capabilities"
* parameters are arrays terminated by a 0 or blank struct.
*
/*
* submodule cache lookup structure
* There is one shared set of 'struct submodule' entries which can be
- * looked up by their sha1 blob id of the .gitmodule file and either
+ * looked up by their sha1 blob id of the .gitmodules file and either
* using path or name as key.
* for_path stores submodule entries with path as key
* for_name stores submodule entries with name as key
/*
* We iterate over the name hash here to be symmetric with the
* allocation of struct submodule entries. Each is allocated by
- * their .gitmodule blob sha1 and submodule name.
+ * their .gitmodules blob sha1 and submodule name.
*/
hashmap_iter_init(&cache->for_name, &iter);
while ((entry = hashmap_iter_next(&iter)))
struct object_id *one, struct object_id *two,
unsigned dirty_submodule)
{
- const struct object_id *old = the_hash_algo->empty_tree, *new = the_hash_algo->empty_tree;
+ const struct object_id *old_oid = the_hash_algo->empty_tree, *new_oid = the_hash_algo->empty_tree;
struct commit *left = NULL, *right = NULL;
struct commit_list *merge_bases = NULL;
struct child_process cp = CHILD_PROCESS_INIT;
goto done;
if (left)
- old = one;
+ old_oid = one;
if (right)
- new = two;
+ new_oid = two;
cp.git_cmd = 1;
cp.dir = path;
argv_array_pushf(&cp.args, "--dst-prefix=%s%s/",
o->b_prefix, path);
}
- argv_array_push(&cp.args, oid_to_hex(old));
+ argv_array_push(&cp.args, oid_to_hex(old_oid));
/*
* If the submodule has modified content, we will diff against the
* work tree, under the assumption that the user has asked for the
* haven't yet been committed to the submodule yet.
*/
if (!(dirty_submodule & DIRTY_SUBMODULE_MODIFIED))
- argv_array_push(&cp.args, oid_to_hex(new));
+ argv_array_push(&cp.args, oid_to_hex(new_oid));
prepare_submodule_repo_env(&cp.env_array);
if (start_command(&cp))
return 0;
default:
die(_("submodule entry '%s' (%s) is a %s, not a commit"),
- cb->path, oid_to_hex(oid), typename(type));
+ cb->path, oid_to_hex(oid), type_name(type));
}
}
* pass NULL for old or new respectively.
*/
int submodule_move_head(const char *path,
- const char *old,
- const char *new,
+ const char *old_head,
+ const char *new_head,
unsigned flags)
{
int ret = 0;
else
error_code_ptr = NULL;
- if (old && !is_submodule_populated_gently(path, error_code_ptr))
+ if (old_head && !is_submodule_populated_gently(path, error_code_ptr))
return 0;
sub = submodule_from_path(&null_oid, path);
if (!sub)
die("BUG: could not get submodule information for '%s'", path);
- if (old && !(flags & SUBMODULE_MOVE_HEAD_FORCE)) {
+ if (old_head && !(flags & SUBMODULE_MOVE_HEAD_FORCE)) {
/* Check if the submodule has a dirty index. */
if (submodule_has_dirty_index(sub))
return error(_("submodule '%s' has dirty index"), path);
}
if (!(flags & SUBMODULE_MOVE_HEAD_DRY_RUN)) {
- if (old) {
+ if (old_head) {
if (!submodule_uses_gitfile(path))
absorb_git_dir_into_superproject("", path,
ABSORB_GITDIR_RECURSE_SUBMODULES);
submodule_reset_index(path);
}
- if (old && (flags & SUBMODULE_MOVE_HEAD_FORCE)) {
+ if (old_head && (flags & SUBMODULE_MOVE_HEAD_FORCE)) {
char *gitdir = xstrfmt("%s/modules/%s",
get_git_common_dir(), sub->name);
connect_work_tree_and_git_dir(path, gitdir);
argv_array_push(&cp.args, "-m");
if (!(flags & SUBMODULE_MOVE_HEAD_FORCE))
- argv_array_push(&cp.args, old ? old : EMPTY_TREE_SHA1_HEX);
+ argv_array_push(&cp.args, old_head ? old_head : EMPTY_TREE_SHA1_HEX);
- argv_array_push(&cp.args, new ? new : EMPTY_TREE_SHA1_HEX);
+ argv_array_push(&cp.args, new_head ? new_head : EMPTY_TREE_SHA1_HEX);
if (run_command(&cp)) {
ret = -1;
}
if (!(flags & SUBMODULE_MOVE_HEAD_DRY_RUN)) {
- if (new) {
+ if (new_head) {
child_process_init(&cp);
/* also set the HEAD accordingly */
cp.git_cmd = 1;
prepare_submodule_repo_env(&cp.env_array);
argv_array_pushl(&cp.args, "update-ref", "HEAD",
- "--no-deref", new, NULL);
+ "--no-deref", new_head, NULL);
if (run_command(&cp)) {
ret = -1;
#define SUBMODULE_MOVE_HEAD_FORCE (1<<1)
extern int submodule_move_head(const char *path,
const char *old,
- const char *new,
+ const char *new_head,
unsigned flags);
/*
-x::
Turn on shell tracing (i.e., `set -x`) during the tests
- themselves. Implies `--verbose`. Note that in non-bash shells,
- this can cause failures in some tests which redirect and test
- the output of shell functions. Use with caution.
+ themselves. Implies `--verbose`.
+ Ignored in test scripts that set the variable 'test_untraceable'
+ to a non-empty value, unless it's run with a Bash version
+ supporting BASH_XTRACEFD, i.e. v4.1 or later.
-d::
--debug::
causing the next test to start in an unexpected directory. Do so
inside a subshell if necessary.
+ - save and verify the standard error of compound commands, i.e. group
+ commands, subshells, and shell functions (except test helper
+ functions like 'test_must_fail') like this:
+
+ ( cd dir && git cmd ) 2>error &&
+ test_cmp expect error
+
+ When running the test with '-x' tracing, then the trace of commands
+ executed in the compound command will be included in standard error
+ as well, quite possibly throwing off the subsequent checks examining
+ the output. Instead, save only the relevant git command's standard
+ error:
+
+ ( cd dir && git cmd 2>../error ) &&
+ test_cmp expect error
+
- Break the TAP output
The raw output from your test may be interpreted by a TAP harness. TAP
test_expect_code 1 git merge "merge msg" B master
'
- - test_must_fail <git-command>
+ - test_must_fail [<options>] <git-command>
Run a git command and ensure it fails in a controlled way. Use
this instead of "! <git-command>". When git-command dies due to a
treats it as just another expected failure, which would let such a
bug go unnoticed.
- - test_might_fail <git-command>
+ Accepts the following options:
+
+ ok=<signal-name>[,<...>]:
+ Don't treat an exit caused by the given signal as error.
+ Multiple signals can be specified as a comma separated list.
+ Currently recognized signal names are: sigpipe, success.
+ (Don't use 'success', use 'test_might_fail' instead.)
+
+ - test_might_fail [<options>] <git-command>
Similar to test_must_fail, but tolerate success, too. Use this
instead of "<git-command> || :" to catch failures due to segv.
+ Accepts the same options as test_must_fail.
+
- test_cmp <expected> <actual>
Check whether the content of the <actual> file matches the
printf("no untracked cache\n");
return 0;
}
- printf("info/exclude %s\n", sha1_to_hex(uc->ss_info_exclude.sha1));
- printf("core.excludesfile %s\n", sha1_to_hex(uc->ss_excludes_file.sha1));
+ printf("info/exclude %s\n", oid_to_hex(&uc->ss_info_exclude.oid));
+ printf("core.excludesfile %s\n", oid_to_hex(&uc->ss_excludes_file.oid));
printf("exclude_per_dir %s\n", uc->exclude_per_dir);
printf("flags %08x\n", uc->dir_flags);
if (uc->root)
#include "git-compat-util.h"
#include "hashmap.h"
+#include "strbuf.h"
struct test_entry
{
return strcmp(e1->key, key ? key : e2->key);
}
-static struct test_entry *alloc_test_entry(int hash, char *key, int klen,
- char *value, int vlen)
+static struct test_entry *alloc_test_entry(unsigned int hash,
+ char *key, char *value)
{
- struct test_entry *entry = malloc(sizeof(struct test_entry) + klen
- + vlen + 2);
+ size_t klen = strlen(key);
+ size_t vlen = strlen(value);
+ struct test_entry *entry = xmalloc(st_add4(sizeof(*entry), klen, vlen, 2));
hashmap_entry_init(entry, hash);
memcpy(entry->key, key, klen + 1);
memcpy(entry->key + klen + 1, value, vlen + 1);
unsigned int *hashes;
unsigned int i, j;
- entries = malloc(TEST_SIZE * sizeof(struct test_entry *));
- hashes = malloc(TEST_SIZE * sizeof(int));
+ ALLOC_ARRAY(entries, TEST_SIZE);
+ ALLOC_ARRAY(hashes, TEST_SIZE);
for (i = 0; i < TEST_SIZE; i++) {
- snprintf(buf, sizeof(buf), "%i", i);
- entries[i] = alloc_test_entry(0, buf, strlen(buf), "", 0);
+ xsnprintf(buf, sizeof(buf), "%i", i);
+ entries[i] = alloc_test_entry(0, buf, "");
hashes[i] = hash(method, i, entries[i]->key);
}
*/
int cmd_main(int argc, const char **argv)
{
- char line[1024];
+ struct strbuf line = STRBUF_INIT;
struct hashmap map;
int icase;
hashmap_init(&map, test_entry_cmp, &icase, 0);
/* process commands from stdin */
- while (fgets(line, sizeof(line), stdin)) {
+ while (strbuf_getline(&line, stdin) != EOF) {
char *cmd, *p1 = NULL, *p2 = NULL;
- int l1 = 0, l2 = 0, hash = 0;
+ unsigned int hash = 0;
struct test_entry *entry;
/* break line into command and up to two parameters */
- cmd = strtok(line, DELIM);
+ cmd = strtok(line.buf, DELIM);
/* ignore empty lines */
if (!cmd || *cmd == '#')
continue;
p1 = strtok(NULL, DELIM);
if (p1) {
- l1 = strlen(p1);
hash = icase ? strihash(p1) : strhash(p1);
p2 = strtok(NULL, DELIM);
- if (p2)
- l2 = strlen(p2);
}
- if (!strcmp("hash", cmd) && l1) {
+ if (!strcmp("hash", cmd) && p1) {
/* print results of different hash functions */
- printf("%u %u %u %u\n", strhash(p1), memhash(p1, l1),
- strihash(p1), memihash(p1, l1));
+ printf("%u %u %u %u\n",
+ strhash(p1), memhash(p1, strlen(p1)),
+ strihash(p1), memihash(p1, strlen(p1)));
- } else if (!strcmp("add", cmd) && l1 && l2) {
+ } else if (!strcmp("add", cmd) && p1 && p2) {
/* create entry with key = p1, value = p2 */
- entry = alloc_test_entry(hash, p1, l1, p2, l2);
+ entry = alloc_test_entry(hash, p1, p2);
/* add to hashmap */
hashmap_add(&map, entry);
- } else if (!strcmp("put", cmd) && l1 && l2) {
+ } else if (!strcmp("put", cmd) && p1 && p2) {
/* create entry with key = p1, value = p2 */
- entry = alloc_test_entry(hash, p1, l1, p2, l2);
+ entry = alloc_test_entry(hash, p1, p2);
/* add / replace entry */
entry = hashmap_put(&map, entry);
puts(entry ? get_value(entry) : "NULL");
free(entry);
- } else if (!strcmp("get", cmd) && l1) {
+ } else if (!strcmp("get", cmd) && p1) {
/* lookup entry in hashmap */
entry = hashmap_get_from_hash(&map, hash, p1);
entry = hashmap_get_next(&map, entry);
}
- } else if (!strcmp("remove", cmd) && l1) {
+ } else if (!strcmp("remove", cmd) && p1) {
/* setup static key */
struct hashmap_entry key;
printf("%u %u\n", map.tablesize,
hashmap_get_size(&map));
- } else if (!strcmp("intern", cmd) && l1) {
+ } else if (!strcmp("intern", cmd) && p1) {
/* test that strintern works */
const char *i1 = strintern(p1);
else
printf("%s\n", i1);
- } else if (!strcmp("perfhashmap", cmd) && l1 && l2) {
+ } else if (!strcmp("perfhashmap", cmd) && p1 && p2) {
perf_hashmap(atoi(p1), atoi(p2));
}
}
+ strbuf_release(&line);
hashmap_free(&map, 1);
return 0;
}
struct child_process proc = CHILD_PROCESS_INIT;
int jobs;
+ if (argc < 3)
+ return 1;
+ while (!strcmp(argv[1], "env")) {
+ if (!argv[2])
+ die("env specifier without a value");
+ argv_array_push(&proc.env_array, argv[2]);
+ argv += 2;
+ argc -= 2;
+ }
if (argc < 3)
return 1;
proc.argv = (const char **)argv + 2;
return !!wildmatch(argv[3], argv[2], WM_PATHNAME | WM_CASEFOLD);
else if (!strcmp(argv[1], "pathmatch"))
return !!wildmatch(argv[3], argv[2], 0);
+ else if (!strcmp(argv[1], "ipathmatch"))
+ return !!wildmatch(argv[3], argv[2], WM_CASEFOLD);
else
return 1;
}
GIT_DAEMON_PID=
GIT_DAEMON_DOCUMENT_ROOT_PATH="$PWD"/repo
-GIT_DAEMON_URL=git://127.0.0.1:$LIB_GIT_DAEMON_PORT
+GIT_DAEMON_HOST_PORT=127.0.0.1:$LIB_GIT_DAEMON_PORT
+GIT_DAEMON_URL=git://$GIT_DAEMON_HOST_PORT
start_git_daemon() {
if test -n "$GIT_DAEMON_PID"
"$@" "$GIT_DAEMON_DOCUMENT_ROOT_PATH" \
>&3 2>git_daemon_output &
GIT_DAEMON_PID=$!
+ >daemon.log
{
- read line <&7
- echo >&4 "$line"
- cat <&7 >&4 &
- } 7<git_daemon_output &&
+ read -r line <&7
+ printf "%s\n" "$line"
+ printf >&4 "%s\n" "$line"
+ (
+ while read -r line <&7
+ do
+ printf "%s\n" "$line"
+ printf >&4 "%s\n" "$line"
+ done
+ ) &
+ } 7<git_daemon_output >>"$TRASH_DIRECTORY/daemon.log" &&
# Check expected output
if test x"$(expr "$line" : "\[[0-9]*\] \(.*\)")" != x"Ready to rumble"
GIT_DAEMON_PID=
rm -f git_daemon_output
}
+
+# A stripped-down version of a netcat client, that connects to a "host:port"
+# given in $1, sends its stdin followed by EOF, then dumps the response (until
+# EOF) to stdout.
+fake_nc() {
+ if ! test_declared_prereq FAKENC
+ then
+ echo >&4 "fake_nc: need to declare FAKENC prerequisite"
+ return 127
+ fi
+ perl -Mstrict -MIO::Socket::INET -e '
+ my $s = IO::Socket::INET->new(shift)
+ or die "unable to open socket: $!";
+ print $s <STDIN>;
+ $s->shutdown(1);
+ print <$s>;
+ ' "$@"
+}
+
+test_lazy_prereq FAKENC '
+ perl -MIO::Socket::INET -e "exit 0"
+'
echo >&4 "test_terminal: need to declare TTY prerequisite"
return 127
fi
- perl "$TEST_DIRECTORY"/test-terminal.perl "$@"
-}
+ perl "$TEST_DIRECTORY"/test-terminal.perl "$@" 2>&7
+} 7>&2 2>&4
test_lazy_prereq TTY '
test_have_prereq PERL &&
#!/usr/bin/perl
-use lib '../../perl/blib/lib';
+use lib '../../perl/build/lib';
use strict;
use warnings;
+use JSON;
use Git;
sub get_times {
return $out;
}
-my (@dirs, %dirnames, %dirabbrevs, %prefixes, @tests);
+my (@dirs, %dirnames, %dirabbrevs, %prefixes, @tests,
+ $codespeed, $subsection, $reponame);
while (scalar @ARGV) {
my $arg = $ARGV[0];
my $dir;
+ if ($arg eq "--codespeed") {
+ $codespeed = 1;
+ shift @ARGV;
+ next;
+ }
+ if ($arg eq "--subsection") {
+ shift @ARGV;
+ $subsection = $ARGV[0];
+ shift @ARGV;
+ if (! $subsection) {
+ die "empty subsection";
+ }
+ next;
+ }
+ if ($arg eq "--reponame") {
+ shift @ARGV;
+ $reponame = $ARGV[0];
+ shift @ARGV;
+ if (! $reponame) {
+ die "empty reponame";
+ }
+ next;
+ }
last if -f $arg or $arg eq "--";
if (! -d $arg) {
my $rev = Git::command_oneline(qw(rev-parse --verify), $arg);
}
my $resultsdir = "test-results";
-if ($ENV{GIT_PERF_SUBSECTION} ne "") {
- $resultsdir .= "/" . $ENV{GIT_PERF_SUBSECTION};
+
+if (! $subsection and
+ exists $ENV{GIT_PERF_SUBSECTION} and
+ $ENV{GIT_PERF_SUBSECTION} ne "") {
+ $subsection = $ENV{GIT_PERF_SUBSECTION};
+}
+
+if ($subsection) {
+ $resultsdir .= "/" . $subsection;
}
my @subtests;
return $line;
}
-my %descrs;
-my $descrlen = 4; # "Test"
-for my $t (@subtests) {
- $descrs{$t} = $shorttests{$t}.": ".read_descr("$resultsdir/$t.descr");
- $descrlen = length $descrs{$t} if length $descrs{$t}>$descrlen;
-}
-
sub have_duplicate {
my %seen;
for (@_) {
return 0;
}
-my %newdirabbrevs = %dirabbrevs;
-while (!have_duplicate(values %newdirabbrevs)) {
- %dirabbrevs = %newdirabbrevs;
- last if !have_slash(values %dirabbrevs);
- %newdirabbrevs = %dirabbrevs;
- for (values %newdirabbrevs) {
- s{^[^/]*/}{};
+sub print_default_results {
+ my %descrs;
+ my $descrlen = 4; # "Test"
+ for my $t (@subtests) {
+ $descrs{$t} = $shorttests{$t}.": ".read_descr("$resultsdir/$t.descr");
+ $descrlen = length $descrs{$t} if length $descrs{$t}>$descrlen;
}
-}
-my %times;
-my @colwidth = ((0)x@dirs);
-for my $i (0..$#dirs) {
- my $d = $dirs[$i];
- my $w = length (exists $dirabbrevs{$d} ? $dirabbrevs{$d} : $dirnames{$d});
- $colwidth[$i] = $w if $w > $colwidth[$i];
-}
-for my $t (@subtests) {
- my $firstr;
+ my %newdirabbrevs = %dirabbrevs;
+ while (!have_duplicate(values %newdirabbrevs)) {
+ %dirabbrevs = %newdirabbrevs;
+ last if !have_slash(values %dirabbrevs);
+ %newdirabbrevs = %dirabbrevs;
+ for (values %newdirabbrevs) {
+ s{^[^/]*/}{};
+ }
+ }
+
+ my %times;
+ my @colwidth = ((0)x@dirs);
for my $i (0..$#dirs) {
my $d = $dirs[$i];
- $times{$prefixes{$d}.$t} = [get_times("$resultsdir/$prefixes{$d}$t.times")];
- my ($r,$u,$s) = @{$times{$prefixes{$d}.$t}};
- my $w = length format_times($r,$u,$s,$firstr);
+ my $w = length (exists $dirabbrevs{$d} ? $dirabbrevs{$d} : $dirnames{$d});
$colwidth[$i] = $w if $w > $colwidth[$i];
- $firstr = $r unless defined $firstr;
}
-}
-my $totalwidth = 3*@dirs+$descrlen;
-$totalwidth += $_ for (@colwidth);
-
-binmode STDOUT, ":utf8" or die "PANIC on binmode: $!";
+ for my $t (@subtests) {
+ my $firstr;
+ for my $i (0..$#dirs) {
+ my $d = $dirs[$i];
+ $times{$prefixes{$d}.$t} = [get_times("$resultsdir/$prefixes{$d}$t.times")];
+ my ($r,$u,$s) = @{$times{$prefixes{$d}.$t}};
+ my $w = length format_times($r,$u,$s,$firstr);
+ $colwidth[$i] = $w if $w > $colwidth[$i];
+ $firstr = $r unless defined $firstr;
+ }
+ }
+ my $totalwidth = 3*@dirs+$descrlen;
+ $totalwidth += $_ for (@colwidth);
-printf "%-${descrlen}s", "Test";
-for my $i (0..$#dirs) {
- my $d = $dirs[$i];
- printf " %-$colwidth[$i]s", (exists $dirabbrevs{$d} ? $dirabbrevs{$d} : $dirnames{$d});
-}
-print "\n";
-print "-"x$totalwidth, "\n";
-for my $t (@subtests) {
- printf "%-${descrlen}s", $descrs{$t};
- my $firstr;
+ printf "%-${descrlen}s", "Test";
for my $i (0..$#dirs) {
my $d = $dirs[$i];
- my ($r,$u,$s) = @{$times{$prefixes{$d}.$t}};
- printf " %-$colwidth[$i]s", format_times($r,$u,$s,$firstr);
- $firstr = $r unless defined $firstr;
+ printf " %-$colwidth[$i]s", (exists $dirabbrevs{$d} ? $dirabbrevs{$d} : $dirnames{$d});
}
print "\n";
+ print "-"x$totalwidth, "\n";
+ for my $t (@subtests) {
+ printf "%-${descrlen}s", $descrs{$t};
+ my $firstr;
+ for my $i (0..$#dirs) {
+ my $d = $dirs[$i];
+ my ($r,$u,$s) = @{$times{$prefixes{$d}.$t}};
+ printf " %-$colwidth[$i]s", format_times($r,$u,$s,$firstr);
+ $firstr = $r unless defined $firstr;
+ }
+ print "\n";
+ }
+}
+
+sub print_codespeed_results {
+ my ($subsection) = @_;
+
+ my $project = "Git";
+
+ my $executable = `uname -s -m`;
+ chomp $executable;
+
+ if ($subsection) {
+ $executable .= ", " . $subsection;
+ }
+
+ my $environment;
+ if ($reponame) {
+ $environment = $reponame;
+ } elsif (exists $ENV{GIT_PERF_REPO_NAME} and $ENV{GIT_PERF_REPO_NAME} ne "") {
+ $environment = $ENV{GIT_PERF_REPO_NAME};
+ } elsif (exists $ENV{GIT_TEST_INSTALLED} and $ENV{GIT_TEST_INSTALLED} ne "") {
+ $environment = $ENV{GIT_TEST_INSTALLED};
+ $environment =~ s|/bin-wrappers$||;
+ } else {
+ $environment = `uname -r`;
+ chomp $environment;
+ }
+
+ my @data;
+
+ for my $t (@subtests) {
+ for my $d (@dirs) {
+ my $commitid = $prefixes{$d};
+ $commitid =~ s/^build_//;
+ $commitid =~ s/\.$//;
+ my ($result_value, $u, $s) = get_times("$resultsdir/$prefixes{$d}$t.times");
+
+ my %vals = (
+ "commitid" => $commitid,
+ "project" => $project,
+ "branch" => $dirnames{$d},
+ "executable" => $executable,
+ "benchmark" => $shorttests{$t} . " " . read_descr("$resultsdir/$t.descr"),
+ "environment" => $environment,
+ "result_value" => $result_value,
+ );
+ push @data, \%vals;
+ }
+ }
+
+ print to_json(\@data, {utf8 => 1, pretty => 1, canonical => 1}), "\n";
+}
+
+binmode STDOUT, ":utf8" or die "PANIC on binmode: $!";
+
+if ($codespeed) {
+ print_codespeed_results($subsection);
+} else {
+ print_default_results();
}
env_var="$1"
conf_sec="$2"
conf_var="$3"
- # $4 can be set to a default value
+ conf_opts="$4" # optional
# Do nothing if the env variable is already set
eval "test -z \"\${$env_var+x}\"" || return
if test -n "$GIT_PERF_SUBSECTION"
then
var="$conf_sec.$GIT_PERF_SUBSECTION.$conf_var"
- conf_value=$(git config -f "$GIT_PERF_CONFIG_FILE" "$var") &&
+ conf_value=$(git config $conf_opts -f "$GIT_PERF_CONFIG_FILE" "$var") &&
eval "$env_var=\"$conf_value\"" && return
fi
var="$conf_sec.$conf_var"
- conf_value=$(git config -f "$GIT_PERF_CONFIG_FILE" "$var") &&
- eval "$env_var=\"$conf_value\"" && return
-
- test -n "${4+x}" && eval "$env_var=\"$4\""
+ conf_value=$(git config $conf_opts -f "$GIT_PERF_CONFIG_FILE" "$var") &&
+ eval "$env_var=\"$conf_value\""
}
run_subsection () {
- get_var_from_env_or_config "GIT_PERF_REPEAT_COUNT" "perf" "repeatCount" 3
+ get_var_from_env_or_config "GIT_PERF_REPEAT_COUNT" "perf" "repeatCount" "--int"
+ : ${GIT_PERF_REPEAT_COUNT:=3}
export GIT_PERF_REPEAT_COUNT
get_var_from_env_or_config "GIT_PERF_DIRS_OR_REVS" "perf" "dirsOrRevs"
get_var_from_env_or_config "GIT_PERF_MAKE_COMMAND" "perf" "makeCommand"
get_var_from_env_or_config "GIT_PERF_MAKE_OPTS" "perf" "makeOpts"
+ get_var_from_env_or_config "GIT_PERF_REPO_NAME" "perf" "repoName"
+ export GIT_PERF_REPO_NAME
+
GIT_PERF_AGGREGATING_LATER=t
export GIT_PERF_AGGREGATING_LATER
set -- . "$@"
fi
+ codespeed_opt=
+ test "$GIT_PERF_CODESPEED_OUTPUT" = "true" && codespeed_opt="--codespeed"
+
run_dirs "$@"
- ./aggregate.perl "$@"
+
+ if test -z "$GIT_PERF_SEND_TO_CODESPEED"
+ then
+ ./aggregate.perl $codespeed_opt "$@"
+ else
+ json_res_file="test-results/$GIT_PERF_SUBSECTION/aggregate.json"
+ ./aggregate.perl --codespeed "$@" | tee "$json_res_file"
+ send_data_url="$GIT_PERF_SEND_TO_CODESPEED/result/add/json/"
+ curl -v --request POST --data-urlencode "json=$(cat "$json_res_file")" "$send_data_url"
+ fi
}
+get_var_from_env_or_config "GIT_PERF_CODESPEED_OUTPUT" "perf" "codespeedOutput" "--bool"
+get_var_from_env_or_config "GIT_PERF_SEND_TO_CODESPEED" "perf" "sendToCodespeed"
+
cd "$(dirname $0)"
. ../../GIT-BUILD-OPTIONS
echo "$1" | sed -e 's|\(..\)|\1/|'
}
-objck() {
- p=$(objpath "$1")
- if test ! -f "$REAL/objects/$p"
- then
- echo "Object not found: $REAL/objects/$p"
- false
- fi
-}
-
test_expect_success 'initial setup' '
REAL="$(pwd)/.real" &&
mv .git "$REAL"
test_expect_success 'bad setup: invalid .git file format' '
echo "gitdir $REAL" >.git &&
- if git rev-parse 2>.err
- then
- echo "git rev-parse accepted an invalid .git file"
- false
- fi &&
- if ! grep "Invalid gitfile format" .err
- then
- echo "git rev-parse returned wrong error"
- false
- fi
+ test_must_fail git rev-parse 2>.err &&
+ test_i18ngrep "invalid gitfile format" .err
'
test_expect_success 'bad setup: invalid .git file path' '
echo "gitdir: $REAL.not" >.git &&
- if git rev-parse 2>.err
- then
- echo "git rev-parse accepted an invalid .git file path"
- false
- fi &&
- if ! grep "Not a git repository" .err
- then
- echo "git rev-parse returned wrong error"
- false
- fi
+ test_must_fail git rev-parse 2>.err &&
+ test_i18ngrep "not a git repository" .err
'
test_expect_success 'final setup + check rev-parse --git-dir' '
test_expect_success 'check hash-object' '
echo "foo" >bar &&
SHA=$(cat bar | git hash-object -w --stdin) &&
- objck $SHA
+ test_path_is_file "$REAL/objects/$(objpath $SHA)"
'
test_expect_success 'check cat-file' '
'
test_expect_success 'check update-index' '
- if test -f "$REAL/index"
- then
- echo "Hmm, $REAL/index exists?"
- false
- fi &&
+ test_path_is_missing "$REAL/index" &&
rm -f "$REAL/objects/$(objpath $SHA)" &&
git update-index --add bar &&
- if ! test -f "$REAL/index"
- then
- echo "$REAL/index not found"
- false
- fi &&
- objck $SHA
+ test_path_is_file "$REAL/index" &&
+ test_path_is_file "$REAL/objects/$(objpath $SHA)"
'
test_expect_success 'check write-tree' '
SHA=$(git write-tree) &&
- objck $SHA
+ test_path_is_file "$REAL/objects/$(objpath $SHA)"
'
test_expect_success 'check commit-tree' '
SHA=$(echo "commit bar" | git commit-tree $SHA) &&
- objck $SHA
+ test_path_is_file "$REAL/objects/$(objpath $SHA)"
'
test_expect_success 'check rev-list' '
cd .git &&
test_check_ignore "foo" 128
) &&
- stderr_contains "fatal: This operation must be run in a work tree"
+ stderr_contains "fatal: this operation must be run in a work tree"
'
############################################################################
echo "$response" | grep "^:: two"
'
+test_expect_success 'existing file and directory' '
+ test_when_finished "rm one" &&
+ test_when_finished "rmdir top-level-dir" &&
+ >one &&
+ mkdir top-level-dir &&
+ git check-ignore one top-level-dir >actual &&
+ grep one actual &&
+ grep top-level-dir actual
+'
+
+test_expect_success 'existing directory and file' '
+ test_when_finished "rm one" &&
+ test_when_finished "rmdir top-level-dir" &&
+ >one &&
+ mkdir top-level-dir &&
+ git check-ignore top-level-dir one >actual &&
+ grep one actual &&
+ grep top-level-dir actual
+'
+
############################################################################
#
# test whitespace handling
git merge topic
'
-
+test_expect_success CASE_INSENSITIVE_FS 'add directory (with different case)' '
+ git reset --hard initial &&
+ mkdir -p dir1/dir2 &&
+ echo >dir1/dir2/a &&
+ echo >dir1/dir2/b &&
+ git add dir1/dir2/a &&
+ git add dir1/DIR2/b &&
+ git ls-files >actual &&
+ cat >expected <<-\EOF &&
+ camelcase
+ dir1/dir2/a
+ dir1/dir2/b
+ EOF
+ test_cmp expected actual
+'
test_expect_failure CASE_INSENSITIVE_FS 'add (with different case)' '
git reset --hard initial &&
test_cmp expect actual
'
+test_trace () {
+ expect="$1"
+ shift
+ GIT_TRACE=1 test-run-command "$@" run-command true 2>&1 >/dev/null | \
+ sed 's/.* run_command: //' >actual &&
+ echo "$expect true" >expect &&
+ test_cmp expect actual
+}
+
+test_expect_success 'GIT_TRACE with environment variables' '
+ test_trace "abc=1 def=2" env abc=1 env def=2 &&
+ test_trace "abc=2" env abc env abc=1 env abc=2 &&
+ test_trace "abc=2" env abc env abc=2 &&
+ (
+ abc=1 && export abc &&
+ test_trace "def=1" env abc=1 env def=1
+ ) &&
+ (
+ abc=1 && export abc &&
+ test_trace "def=1" env abc env abc=1 env def=1
+ ) &&
+ test_trace "def=1" env non-exist env def=1 &&
+ test_trace "abc=2" env abc=1 env abc env abc=2 &&
+ (
+ abc=1 def=2 && export abc def &&
+ test_trace "unset abc def;" env abc env def
+ ) &&
+ (
+ abc=1 def=2 && export abc def &&
+ test_trace "unset def; abc=3" env abc env def env abc=3
+ ) &&
+ (
+ abc=1 && export abc &&
+ test_trace "unset abc;" env abc=2 env abc
+ )
+'
+
test_done
. ./lib-gettext.sh
-test_expect_success GETTEXT_POISON "sanity: \$GIT_INTERNAL_GETTEXT_SH_SCHEME is set (to $GIT_INTERNAL_GETTEXT_SH_SCHEME)" '
- test -n "$GIT_INTERNAL_GETTEXT_SH_SCHEME"
-'
-
test_expect_success GETTEXT_POISON 'sanity: $GIT_INTERNAL_GETTEXT_SH_SCHEME" is poison' '
test "$GIT_INTERNAL_GETTEXT_SH_SCHEME" = "poison"
'
--- /dev/null
+#!/bin/sh
+
+test_description='partial clone'
+
+. ./test-lib.sh
+
+delete_object () {
+ rm $1/.git/objects/$(echo $2 | sed -e 's|^..|&/|')
+}
+
+pack_as_from_promisor () {
+ HASH=$(git -C repo pack-objects .git/objects/pack/pack) &&
+ >repo/.git/objects/pack/pack-$HASH.promisor &&
+ echo $HASH
+}
+
+promise_and_delete () {
+ HASH=$(git -C repo rev-parse "$1") &&
+ git -C repo tag -a -m message my_annotated_tag "$HASH" &&
+ git -C repo rev-parse my_annotated_tag | pack_as_from_promisor &&
+ # tag -d prints a message to stdout, so redirect it
+ git -C repo tag -d my_annotated_tag >/dev/null &&
+ delete_object repo "$HASH"
+}
+
+test_expect_success 'missing reflog object, but promised by a commit, passes fsck' '
+ test_create_repo repo &&
+ test_commit -C repo my_commit &&
+
+ A=$(git -C repo commit-tree -m a HEAD^{tree}) &&
+ C=$(git -C repo commit-tree -m c -p $A HEAD^{tree}) &&
+
+ # Reference $A only from reflog, and delete it
+ git -C repo branch my_branch "$A" &&
+ git -C repo branch -f my_branch my_commit &&
+ delete_object repo "$A" &&
+
+ # State that we got $C, which refers to $A, from promisor
+ printf "$C\n" | pack_as_from_promisor &&
+
+ # Normally, it fails
+ test_must_fail git -C repo fsck &&
+
+ # But with the extension, it succeeds
+ git -C repo config core.repositoryformatversion 1 &&
+ git -C repo config extensions.partialclone "arbitrary string" &&
+ git -C repo fsck
+'
+
+test_expect_success 'missing reflog object, but promised by a tag, passes fsck' '
+ rm -rf repo &&
+ test_create_repo repo &&
+ test_commit -C repo my_commit &&
+
+ A=$(git -C repo commit-tree -m a HEAD^{tree}) &&
+ git -C repo tag -a -m d my_tag_name $A &&
+ T=$(git -C repo rev-parse my_tag_name) &&
+ git -C repo tag -d my_tag_name &&
+
+ # Reference $A only from reflog, and delete it
+ git -C repo branch my_branch "$A" &&
+ git -C repo branch -f my_branch my_commit &&
+ delete_object repo "$A" &&
+
+ # State that we got $T, which refers to $A, from promisor
+ printf "$T\n" | pack_as_from_promisor &&
+
+ git -C repo config core.repositoryformatversion 1 &&
+ git -C repo config extensions.partialclone "arbitrary string" &&
+ git -C repo fsck
+'
+
+test_expect_success 'missing reflog object alone fails fsck, even with extension set' '
+ rm -rf repo &&
+ test_create_repo repo &&
+ test_commit -C repo my_commit &&
+
+ A=$(git -C repo commit-tree -m a HEAD^{tree}) &&
+ B=$(git -C repo commit-tree -m b HEAD^{tree}) &&
+
+ # Reference $A only from reflog, and delete it
+ git -C repo branch my_branch "$A" &&
+ git -C repo branch -f my_branch my_commit &&
+ delete_object repo "$A" &&
+
+ git -C repo config core.repositoryformatversion 1 &&
+ git -C repo config extensions.partialclone "arbitrary string" &&
+ test_must_fail git -C repo fsck
+'
+
+test_expect_success 'missing ref object, but promised, passes fsck' '
+ rm -rf repo &&
+ test_create_repo repo &&
+ test_commit -C repo my_commit &&
+
+ A=$(git -C repo commit-tree -m a HEAD^{tree}) &&
+
+ # Reference $A only from ref
+ git -C repo branch my_branch "$A" &&
+ promise_and_delete "$A" &&
+
+ git -C repo config core.repositoryformatversion 1 &&
+ git -C repo config extensions.partialclone "arbitrary string" &&
+ git -C repo fsck
+'
+
+test_expect_success 'missing object, but promised, passes fsck' '
+ rm -rf repo &&
+ test_create_repo repo &&
+ test_commit -C repo 1 &&
+ test_commit -C repo 2 &&
+ test_commit -C repo 3 &&
+ git -C repo tag -a annotated_tag -m "annotated tag" &&
+
+ C=$(git -C repo rev-parse 1) &&
+ T=$(git -C repo rev-parse 2^{tree}) &&
+ B=$(git hash-object repo/3.t) &&
+ AT=$(git -C repo rev-parse annotated_tag) &&
+
+ promise_and_delete "$C" &&
+ promise_and_delete "$T" &&
+ promise_and_delete "$B" &&
+ promise_and_delete "$AT" &&
+
+ git -C repo config core.repositoryformatversion 1 &&
+ git -C repo config extensions.partialclone "arbitrary string" &&
+ git -C repo fsck
+'
+
+test_expect_success 'missing CLI object, but promised, passes fsck' '
+ rm -rf repo &&
+ test_create_repo repo &&
+ test_commit -C repo my_commit &&
+
+ A=$(git -C repo commit-tree -m a HEAD^{tree}) &&
+ promise_and_delete "$A" &&
+
+ git -C repo config core.repositoryformatversion 1 &&
+ git -C repo config extensions.partialclone "arbitrary string" &&
+ git -C repo fsck "$A"
+'
+
+test_expect_success 'fetching of missing objects' '
+ rm -rf repo &&
+ test_create_repo server &&
+ test_commit -C server foo &&
+ git -C server repack -a -d --write-bitmap-index &&
+
+ git clone "file://$(pwd)/server" repo &&
+ HASH=$(git -C repo rev-parse foo) &&
+ rm -rf repo/.git/objects/* &&
+
+ git -C repo config core.repositoryformatversion 1 &&
+ git -C repo config extensions.partialclone "origin" &&
+ git -C repo cat-file -p "$HASH" &&
+
+ # Ensure that the .promisor file is written, and check that its
+ # associated packfile contains the object
+ ls repo/.git/objects/pack/pack-*.promisor >promisorlist &&
+ test_line_count = 1 promisorlist &&
+ IDX=$(cat promisorlist | sed "s/promisor$/idx/") &&
+ git verify-pack --verbose "$IDX" | grep "$HASH"
+'
+
+test_expect_success 'rev-list stops traversal at missing and promised commit' '
+ rm -rf repo &&
+ test_create_repo repo &&
+ test_commit -C repo foo &&
+ test_commit -C repo bar &&
+
+ FOO=$(git -C repo rev-parse foo) &&
+ promise_and_delete "$FOO" &&
+
+ git -C repo config core.repositoryformatversion 1 &&
+ git -C repo config extensions.partialclone "arbitrary string" &&
+ git -C repo rev-list --exclude-promisor-objects --objects bar >out &&
+ grep $(git -C repo rev-parse bar) out &&
+ ! grep $FOO out
+'
+
+test_expect_success 'rev-list stops traversal at missing and promised tree' '
+ rm -rf repo &&
+ test_create_repo repo &&
+ test_commit -C repo foo &&
+ mkdir repo/a_dir &&
+ echo something >repo/a_dir/something &&
+ git -C repo add a_dir/something &&
+ git -C repo commit -m bar &&
+
+ # foo^{tree} (tree referenced from commit)
+ TREE=$(git -C repo rev-parse foo^{tree}) &&
+
+ # a tree referenced by HEAD^{tree} (tree referenced from tree)
+ TREE2=$(git -C repo ls-tree HEAD^{tree} | grep " tree " | head -1 | cut -b13-52) &&
+
+ promise_and_delete "$TREE" &&
+ promise_and_delete "$TREE2" &&
+
+ git -C repo config core.repositoryformatversion 1 &&
+ git -C repo config extensions.partialclone "arbitrary string" &&
+ git -C repo rev-list --exclude-promisor-objects --objects HEAD >out &&
+ grep $(git -C repo rev-parse foo) out &&
+ ! grep $TREE out &&
+ grep $(git -C repo rev-parse HEAD) out &&
+ ! grep $TREE2 out
+'
+
+test_expect_success 'rev-list stops traversal at missing and promised blob' '
+ rm -rf repo &&
+ test_create_repo repo &&
+ echo something >repo/something &&
+ git -C repo add something &&
+ git -C repo commit -m foo &&
+
+ BLOB=$(git -C repo hash-object -w something) &&
+ promise_and_delete "$BLOB" &&
+
+ git -C repo config core.repositoryformatversion 1 &&
+ git -C repo config extensions.partialclone "arbitrary string" &&
+ git -C repo rev-list --exclude-promisor-objects --objects HEAD >out &&
+ grep $(git -C repo rev-parse HEAD) out &&
+ ! grep $BLOB out
+'
+
+test_expect_success 'rev-list stops traversal at promisor commit, tree, and blob' '
+ rm -rf repo &&
+ test_create_repo repo &&
+ test_commit -C repo foo &&
+ test_commit -C repo bar &&
+ test_commit -C repo baz &&
+
+ COMMIT=$(git -C repo rev-parse foo) &&
+ TREE=$(git -C repo rev-parse bar^{tree}) &&
+ BLOB=$(git hash-object repo/baz.t) &&
+ printf "%s\n%s\n%s\n" $COMMIT $TREE $BLOB | pack_as_from_promisor &&
+
+ git -C repo config core.repositoryformatversion 1 &&
+ git -C repo config extensions.partialclone "arbitrary string" &&
+ git -C repo rev-list --exclude-promisor-objects --objects HEAD >out &&
+ ! grep $COMMIT out &&
+ ! grep $TREE out &&
+ ! grep $BLOB out &&
+ grep $(git -C repo rev-parse bar) out # sanity check that some walking was done
+'
+
+test_expect_success 'rev-list accepts missing and promised objects on command line' '
+ rm -rf repo &&
+ test_create_repo repo &&
+ test_commit -C repo foo &&
+ test_commit -C repo bar &&
+ test_commit -C repo baz &&
+
+ COMMIT=$(git -C repo rev-parse foo) &&
+ TREE=$(git -C repo rev-parse bar^{tree}) &&
+ BLOB=$(git hash-object repo/baz.t) &&
+
+ promise_and_delete $COMMIT &&
+ promise_and_delete $TREE &&
+ promise_and_delete $BLOB &&
+
+ git -C repo config core.repositoryformatversion 1 &&
+ git -C repo config extensions.partialclone "arbitrary string" &&
+ git -C repo rev-list --exclude-promisor-objects --objects "$COMMIT" "$TREE" "$BLOB"
+'
+
+test_expect_success 'gc does not repack promisor objects' '
+ rm -rf repo &&
+ test_create_repo repo &&
+ test_commit -C repo my_commit &&
+
+ TREE_HASH=$(git -C repo rev-parse HEAD^{tree}) &&
+ HASH=$(printf "$TREE_HASH\n" | pack_as_from_promisor) &&
+
+ git -C repo config core.repositoryformatversion 1 &&
+ git -C repo config extensions.partialclone "arbitrary string" &&
+ git -C repo gc &&
+
+ # Ensure that the promisor packfile still exists, and remove it
+ test -e repo/.git/objects/pack/pack-$HASH.pack &&
+ rm repo/.git/objects/pack/pack-$HASH.* &&
+
+ # Ensure that the single other pack contains the commit, but not the tree
+ ls repo/.git/objects/pack/pack-*.pack >packlist &&
+ test_line_count = 1 packlist &&
+ git verify-pack repo/.git/objects/pack/pack-*.pack -v >out &&
+ grep "$(git -C repo rev-parse HEAD)" out &&
+ ! grep "$TREE_HASH" out
+'
+
+test_expect_success 'gc stops traversal when a missing but promised object is reached' '
+ rm -rf repo &&
+ test_create_repo repo &&
+ test_commit -C repo my_commit &&
+
+ TREE_HASH=$(git -C repo rev-parse HEAD^{tree}) &&
+ HASH=$(promise_and_delete $TREE_HASH) &&
+
+ git -C repo config core.repositoryformatversion 1 &&
+ git -C repo config extensions.partialclone "arbitrary string" &&
+ git -C repo gc &&
+
+ # Ensure that the promisor packfile still exists, and remove it
+ test -e repo/.git/objects/pack/pack-$HASH.pack &&
+ rm repo/.git/objects/pack/pack-$HASH.* &&
+
+ # Ensure that the single other pack contains the commit, but not the tree
+ ls repo/.git/objects/pack/pack-*.pack >packlist &&
+ test_line_count = 1 packlist &&
+ git verify-pack repo/.git/objects/pack/pack-*.pack -v >out &&
+ grep "$(git -C repo rev-parse HEAD)" out &&
+ ! grep "$TREE_HASH" out
+'
+
+LIB_HTTPD_PORT=12345 # default port, 410, cannot be used as non-root
+. "$TEST_DIRECTORY"/lib-httpd.sh
+start_httpd
+
+test_expect_success 'fetching of missing objects from an HTTP server' '
+ rm -rf repo &&
+ SERVER="$HTTPD_DOCUMENT_ROOT_PATH/server" &&
+ test_create_repo "$SERVER" &&
+ test_commit -C "$SERVER" foo &&
+ git -C "$SERVER" repack -a -d --write-bitmap-index &&
+
+ git clone $HTTPD_URL/smart/server repo &&
+ HASH=$(git -C repo rev-parse foo) &&
+ rm -rf repo/.git/objects/* &&
+
+ git -C repo config core.repositoryformatversion 1 &&
+ git -C repo config extensions.partialclone "origin" &&
+ git -C repo cat-file -p "$HASH" &&
+
+ # Ensure that the .promisor file is written, and check that its
+ # associated packfile contains the object
+ ls repo/.git/objects/pack/pack-*.promisor >promisorlist &&
+ test_line_count = 1 promisorlist &&
+ IDX=$(cat promisorlist | sed "s/promisor$/idx/") &&
+ git verify-pack --verbose "$IDX" | grep "$HASH"
+'
+
+stop_httpd
+
+test_done
GIT_CONFIG_PARAMETERS="" git -c x.one=1 config --list
'
+sq="'"
+test_expect_success 'detect bogus GIT_CONFIG_PARAMETERS' '
+ cat >expect <<-\EOF &&
+ env.one one
+ env.two two
+ EOF
+ GIT_CONFIG_PARAMETERS="${sq}env.one=one${sq} ${sq}env.two=two${sq}" \
+ git config --get-regexp "env.*" >actual &&
+ test_cmp expect actual &&
+
+ cat >expect <<-EOF &&
+ env.one one${sq}
+ env.two two
+ EOF
+ GIT_CONFIG_PARAMETERS="${sq}env.one=one${sq}\\$sq$sq$sq ${sq}env.two=two${sq}" \
+ git config --get-regexp "env.*" >actual &&
+ test_cmp expect actual &&
+
+ test_must_fail env \
+ GIT_CONFIG_PARAMETERS="${sq}env.one=one${sq}\\$sq ${sq}env.two=two${sq}" \
+ git config --get-regexp "env.*"
+'
+
test_expect_success 'git config --edit works' '
git config -f tmp test.value no &&
echo test.value=yes >expect &&
test_expect_success 'relative path outside worktree' '
test_must_fail git rev-parse HEAD:../file.txt >output 2>error &&
test -z "$(cat output)" &&
- grep "outside repository" error
+ test_i18ngrep "outside repository" error
'
test_expect_success 'relative path when cwd is outside worktree' '
error_message () {
(cd clone &&
- test_must_fail git rev-parse --verify "$@")
+ test_must_fail git rev-parse --verify "$@" 2>../error)
}
test_expect_success '@{upstream} resolves to correct full name' '
cat >expect <<-EOF &&
fatal: no upstream configured for branch ${sq}non-tracking${sq}
EOF
- error_message non-tracking@{u} 2>actual &&
- test_i18ncmp expect actual
+ error_message non-tracking@{u} &&
+ test_i18ncmp expect error
'
test_expect_success '@{u} error message when no upstream' '
cat >expect <<-EOF &&
fatal: no such branch: ${sq}no-such-branch${sq}
EOF
- error_message no-such-branch@{u} 2>actual &&
- test_i18ncmp expect actual
+ error_message no-such-branch@{u} &&
+ test_i18ncmp expect error
'
test_expect_success '@{u} error message when not on a branch' '
cat >expect <<-EOF &&
fatal: upstream branch ${sq}refs/heads/side${sq} not stored as a remote-tracking branch
EOF
- error_message bad-upstream@{u} 2>actual &&
- test_i18ncmp expect actual
+ error_message bad-upstream@{u} &&
+ test_i18ncmp expect error
'
test_expect_success 'pull works when tracking a local branch' '
11. When user's cwd is outside worktree, cwd remains unchanged,
prefix is NULL.
"
+
+# This test heavily relies on the standard error of nested function calls.
+test_untraceable=UnfortunatelyYes
+
. ./test-lib.sh
here=$(pwd)
0642 -rw-r---w-
EOF
+test_expect_success POSIXPERM,SANITY 'graceful handling when splitting index is not allowed' '
+ test_create_repo ro &&
+ (
+ cd ro &&
+ test_commit initial &&
+ git update-index --split-index &&
+ test -f .git/sharedindex.*
+ ) &&
+ cp ro/.git/index new-index &&
+ test_when_finished "chmod u+w ro/.git" &&
+ chmod u-w ro/.git &&
+ GIT_INDEX_FILE="$(pwd)/new-index" git -C ro update-index --split-index &&
+ chmod u+w ro/.git &&
+ rm ro/.git/sharedindex.* &&
+ GIT_INDEX_FILE=new-index git ls-files >actual &&
+ echo initial.t >expected &&
+ test_cmp expected actual
+'
+
+test_expect_success 'writing split index with null sha1 does not write cache tree' '
+ git config core.splitIndex true &&
+ git config splitIndex.maxPercentChange 0 &&
+ git commit -m "commit" &&
+ {
+ git ls-tree HEAD &&
+ printf "160000 commit $_z40\\tbroken\\n"
+ } >broken-tree &&
+ echo "add broken entry" >msg &&
+
+ tree=$(git mktree <broken-tree) &&
+ test_tick &&
+ commit=$(git commit-tree $tree -p HEAD <msg) &&
+ git update-ref HEAD "$commit" &&
+ GIT_ALLOW_NULL_SHA1=1 git reset --hard &&
+ (test-dump-cache-tree >cache-tree.out || true) &&
+ test_line_count = 0 cache-tree.out
+'
+
test_done
'
post_checkout_hook () {
- test_when_finished "rm -f .git/hooks/post-checkout" &&
- mkdir -p .git/hooks &&
- write_script .git/hooks/post-checkout <<-\EOF
- echo $* >hook.actual
+ gitdir=${1:-.git}
+ test_when_finished "rm -f $gitdir/hooks/post-checkout" &&
+ mkdir -p $gitdir/hooks &&
+ write_script $gitdir/hooks/post-checkout <<-\EOF
+ {
+ echo $*
+ git rev-parse --git-dir --show-toplevel
+ } >hook.actual
EOF
}
test_expect_success '"add" invokes post-checkout hook (branch)' '
post_checkout_hook &&
- printf "%s %s 1\n" $_z40 $(git rev-parse HEAD) >hook.expect &&
+ {
+ echo $_z40 $(git rev-parse HEAD) 1 &&
+ echo $(pwd)/.git/worktrees/gumby &&
+ echo $(pwd)/gumby
+ } >hook.expect &&
git worktree add gumby &&
- test_cmp hook.expect hook.actual
+ test_cmp hook.expect gumby/hook.actual
'
test_expect_success '"add" invokes post-checkout hook (detached)' '
post_checkout_hook &&
- printf "%s %s 1\n" $_z40 $(git rev-parse HEAD) >hook.expect &&
+ {
+ echo $_z40 $(git rev-parse HEAD) 1 &&
+ echo $(pwd)/.git/worktrees/grumpy &&
+ echo $(pwd)/grumpy
+ } >hook.expect &&
git worktree add --detach grumpy &&
- test_cmp hook.expect hook.actual
+ test_cmp hook.expect grumpy/hook.actual
'
test_expect_success '"add --no-checkout" suppresses post-checkout hook' '
post_checkout_hook &&
rm -f hook.actual &&
git worktree add --no-checkout gloopy &&
- test_path_is_missing hook.actual
+ test_path_is_missing gloopy/hook.actual
+'
+
+test_expect_success '"add" in other worktree invokes post-checkout hook' '
+ post_checkout_hook &&
+ {
+ echo $_z40 $(git rev-parse HEAD) 1 &&
+ echo $(pwd)/.git/worktrees/guppy &&
+ echo $(pwd)/guppy
+ } >hook.expect &&
+ git -C gloopy worktree add --detach ../guppy &&
+ test_cmp hook.expect guppy/hook.actual
+'
+
+test_expect_success '"add" in bare repo invokes post-checkout hook' '
+ rm -rf bare &&
+ git clone --bare . bare &&
+ {
+ echo $_z40 $(git --git-dir=bare rev-parse HEAD) 1 &&
+ echo $(pwd)/bare/worktrees/goozy &&
+ echo $(pwd)/goozy
+ } >hook.expect &&
+ post_checkout_hook bare &&
+ git -C bare worktree add --detach ../goozy &&
+ test_cmp hook.expect goozy/hook.actual
'
test_done
test_expect_success 'setup' '
test_commit init &&
git worktree add source &&
- git worktree list --porcelain | grep "^worktree" >actual &&
+ git worktree list --porcelain >out &&
+ grep "^worktree" out >actual &&
cat <<-EOF >expected &&
worktree $(pwd)
worktree $(pwd)/source
test_path_is_missing .git/worktrees/source/locked
'
+test_expect_success 'move non-worktree' '
+ mkdir abc &&
+ test_must_fail git worktree move abc def
+'
+
+test_expect_success 'move locked worktree' '
+ git worktree lock source &&
+ test_when_finished "git worktree unlock source" &&
+ test_must_fail git worktree move source destination
+'
+
+test_expect_success 'move worktree' '
+ toplevel="$(pwd)" &&
+ git worktree move source destination &&
+ test_path_is_missing source &&
+ git worktree list --porcelain >out &&
+ grep "^worktree.*/destination" out &&
+ ! grep "^worktree.*/source" out &&
+ git -C destination log --format=%s >actual2 &&
+ echo init >expected2 &&
+ test_cmp expected2 actual2
+'
+
+test_expect_success 'move main worktree' '
+ test_must_fail git worktree move . def
+'
+
+test_expect_success 'move worktree to another dir' '
+ mkdir some-dir &&
+ git worktree move destination some-dir &&
+ test_when_finished "git worktree move some-dir/destination destination" &&
+ test_path_is_missing destination &&
+ git worktree list --porcelain >out &&
+ grep "^worktree.*/some-dir/destination" out &&
+ git -C some-dir/destination log --format=%s >actual2 &&
+ echo init >expected2 &&
+ test_cmp expected2 actual2
+'
+
+test_expect_success 'remove main worktree' '
+ test_must_fail git worktree remove .
+'
+
+test_expect_success 'remove locked worktree' '
+ git worktree lock destination &&
+ test_when_finished "git worktree unlock destination" &&
+ test_must_fail git worktree remove destination
+'
+
+test_expect_success 'remove worktree with dirty tracked file' '
+ echo dirty >>destination/init.t &&
+ test_when_finished "git -C destination checkout init.t" &&
+ test_must_fail git worktree remove destination
+'
+
+test_expect_success 'remove worktree with untracked file' '
+ : >destination/untracked &&
+ test_must_fail git worktree remove destination
+'
+
+test_expect_success 'force remove worktree with untracked file' '
+ git worktree remove --force destination &&
+ test_path_is_missing destination
+'
+
+test_expect_success 'remove missing worktree' '
+ git worktree add to-be-gone &&
+ test -d .git/worktrees/to-be-gone &&
+ mv to-be-gone gone &&
+ git worktree remove to-be-gone &&
+ test_path_is_missing .git/worktrees/to-be-gone
+'
+
+test_expect_success 'NOT remove missing-but-locked worktree' '
+ git worktree add gone-but-locked &&
+ git worktree lock gone-but-locked &&
+ test -d .git/worktrees/gone-but-locked &&
+ mv gone-but-locked really-gone-now &&
+ test_must_fail git worktree remove gone-but-locked &&
+ test_path_is_dir .git/worktrees/gone-but-locked
+'
+
test_done
GIT_INDEX_FILE="$PWD/ours-has-rename-index" &&
export GIT_INDEX_FILE &&
mkdir "$GIT_WORK_TREE" &&
- git read-tree -i -m $c7 &&
- git update-index --ignore-missing --refresh &&
- git merge-recursive $c0 -- $c7 $c3 &&
- git ls-files -s >actual-files
- ) 2>actual-err &&
- >expected-err &&
+ git read-tree -i -m $c7 2>actual-err &&
+ test_must_be_empty actual-err &&
+ git update-index --ignore-missing --refresh 2>actual-err &&
+ test_must_be_empty actual-err &&
+ git merge-recursive $c0 -- $c7 $c3 2>actual-err &&
+ test_must_be_empty actual-err &&
+ git ls-files -s >actual-files 2>actual-err &&
+ test_must_be_empty actual-err
+ ) &&
cat >expected-files <<-EOF &&
100644 $o3 0 b/c
100644 $o0 0 c
100644 $o0 0 d/e
100644 $o0 0 e
EOF
- test_cmp expected-files actual-files &&
- test_cmp expected-err actual-err
+ test_cmp expected-files actual-files
'
test_expect_success 'merge-recursive w/ empty work tree - theirs has rename' '
GIT_INDEX_FILE="$PWD/theirs-has-rename-index" &&
export GIT_INDEX_FILE &&
mkdir "$GIT_WORK_TREE" &&
- git read-tree -i -m $c3 &&
- git update-index --ignore-missing --refresh &&
- git merge-recursive $c0 -- $c3 $c7 &&
- git ls-files -s >actual-files
- ) 2>actual-err &&
- >expected-err &&
+ git read-tree -i -m $c3 2>actual-err &&
+ test_must_be_empty actual-err &&
+ git update-index --ignore-missing --refresh 2>actual-err &&
+ test_must_be_empty actual-err &&
+ git merge-recursive $c0 -- $c3 $c7 2>actual-err &&
+ test_must_be_empty actual-err &&
+ git ls-files -s >actual-files 2>actual-err &&
+ test_must_be_empty actual-err
+ ) &&
cat >expected-files <<-EOF &&
100644 $o3 0 b/c
100644 $o0 0 c
100644 $o0 0 d/e
100644 $o0 0 e
EOF
- test_cmp expected-files actual-files &&
- test_cmp expected-err actual-err
+ test_cmp expected-files actual-files
'
test_expect_success 'merge removes empty directories' '
. ./test-lib.sh
-match() {
- if [ $1 = 1 ]; then
- test_expect_success "wildmatch: match '$3' '$4'" "
- test-wildmatch wildmatch '$3' '$4'
- "
- else
- test_expect_success "wildmatch: no match '$3' '$4'" "
- ! test-wildmatch wildmatch '$3' '$4'
- "
- fi
+should_create_test_file() {
+ file=$1
+
+ case $file in
+ # `touch .` will succeed but obviously not do what we intend
+ # here.
+ ".")
+ return 1
+ ;;
+ # We cannot create a file with an empty filename.
+ "")
+ return 1
+ ;;
+ # The tests that are testing that e.g. foo//bar is matched by
+ # foo/*/bar can't be tested on filesystems since there's no
+ # way we're getting a double slash.
+ *//*)
+ return 1
+ ;;
+ # When testing the difference between foo/bar and foo/bar/ we
+ # can't test the latter.
+ */)
+ return 1
+ ;;
+ # On Windows, \ in paths is silently converted to /, which
+ # would result in the "touch" below working, but the test
+ # itself failing. See 6fd1106aa4 ("t3700: Skip a test with
+ # backslashes in pathspec", 2009-03-13) for prior art and
+ # details.
+ *\\*)
+ if ! test_have_prereq BSLASHPSPEC
+ then
+ return 1
+ fi
+ # NOTE: The ;;& bash extension is not portable, so
+ # this test needs to be at the end of the pattern
+ # list.
+ #
+ # If we want to add more conditional returns we either
+ # need a new case statement, or turn this whole thing
+ # into a series of "if" tests.
+ ;;
+ esac
+
+
+ # On Windows proper (i.e. not Cygwin) many file names which
+ # under Cygwin would be emulated don't work.
+ if test_have_prereq MINGW
+ then
+ case $file in
+ " ")
+ # Files called " " are forbidden on Windows
+ return 1
+ ;;
+ *\<*|*\>*|*:*|*\"*|*\|*|*\?*|*\**)
+ # Files with various special characters aren't
+ # allowed on Windows. Sourced from
+ # https://stackoverflow.com/a/31976060
+ return 1
+ ;;
+ esac
+ fi
+
+ return 0
}
-imatch() {
- if [ $1 = 1 ]; then
- test_expect_success "iwildmatch: match '$2' '$3'" "
- test-wildmatch iwildmatch '$2' '$3'
- "
- else
- test_expect_success "iwildmatch: no match '$2' '$3'" "
- ! test-wildmatch iwildmatch '$2' '$3'
- "
- fi
+match_with_function() {
+ text=$1
+ pattern=$2
+ match_expect=$3
+ match_function=$4
+
+ if test "$match_expect" = 1
+ then
+ test_expect_success "$match_function: match '$text' '$pattern'" "
+ test-wildmatch $match_function '$text' '$pattern'
+ "
+ elif test "$match_expect" = 0
+ then
+ test_expect_success "$match_function: no match '$text' '$pattern'" "
+ test_must_fail test-wildmatch $match_function '$text' '$pattern'
+ "
+ else
+ test_expect_success "PANIC: Test framework error. Unknown matches value $match_expect" 'false'
+ fi
+
+}
+
+match_with_ls_files() {
+ text=$1
+ pattern=$2
+ match_expect=$3
+ match_function=$4
+ ls_files_args=$5
+
+ match_stdout_stderr_cmp="
+ tr -d '\0' <actual.raw >actual &&
+ >expect.err &&
+ test_cmp expect.err actual.err &&
+ test_cmp expect actual"
+
+ if test "$match_expect" = 'E'
+ then
+ if test -e .git/created_test_file
+ then
+ test_expect_success EXPENSIVE_ON_WINDOWS "$match_function (via ls-files): match dies on '$pattern' '$text'" "
+ printf '%s' '$text' >expect &&
+ test_must_fail git$ls_files_args ls-files -z -- '$pattern'
+ "
+ else
+ test_expect_failure EXPENSIVE_ON_WINDOWS "$match_function (via ls-files): match skip '$pattern' '$text'" 'false'
+ fi
+ elif test "$match_expect" = 1
+ then
+ if test -e .git/created_test_file
+ then
+ test_expect_success EXPENSIVE_ON_WINDOWS "$match_function (via ls-files): match '$pattern' '$text'" "
+ printf '%s' '$text' >expect &&
+ git$ls_files_args ls-files -z -- '$pattern' >actual.raw 2>actual.err &&
+ $match_stdout_stderr_cmp
+ "
+ else
+ test_expect_failure EXPENSIVE_ON_WINDOWS "$match_function (via ls-files): match skip '$pattern' '$text'" 'false'
+ fi
+ elif test "$match_expect" = 0
+ then
+ if test -e .git/created_test_file
+ then
+ test_expect_success EXPENSIVE_ON_WINDOWS "$match_function (via ls-files): no match '$pattern' '$text'" "
+ >expect &&
+ git$ls_files_args ls-files -z -- '$pattern' >actual.raw 2>actual.err &&
+ $match_stdout_stderr_cmp
+ "
+ else
+ test_expect_failure EXPENSIVE_ON_WINDOWS "$match_function (via ls-files): no match skip '$pattern' '$text'" 'false'
+ fi
+ else
+ test_expect_success "PANIC: Test framework error. Unknown matches value $match_expect" 'false'
+ fi
}
-pathmatch() {
- if [ $1 = 1 ]; then
- test_expect_success "pathmatch: match '$2' '$3'" "
- test-wildmatch pathmatch '$2' '$3'
- "
- else
- test_expect_success "pathmatch: no match '$2' '$3'" "
- ! test-wildmatch pathmatch '$2' '$3'
- "
- fi
+match() {
+ if test "$#" = 6
+ then
+ # When test-wildmatch and git ls-files produce the same
+ # result.
+ match_glob=$1
+ match_file_glob=$match_glob
+ match_iglob=$2
+ match_file_iglob=$match_iglob
+ match_pathmatch=$3
+ match_file_pathmatch=$match_pathmatch
+ match_pathmatchi=$4
+ match_file_pathmatchi=$match_pathmatchi
+ text=$5
+ pattern=$6
+ elif test "$#" = 10
+ then
+ match_glob=$1
+ match_iglob=$2
+ match_pathmatch=$3
+ match_pathmatchi=$4
+ match_file_glob=$5
+ match_file_iglob=$6
+ match_file_pathmatch=$7
+ match_file_pathmatchi=$8
+ text=$9
+ pattern=${10}
+ fi
+
+ test_expect_success EXPENSIVE_ON_WINDOWS 'cleanup after previous file test' '
+ if test -e .git/created_test_file
+ then
+ git reset &&
+ git clean -df
+ fi
+ '
+
+ printf '%s' "$text" >.git/expected_test_file
+
+ test_expect_success EXPENSIVE_ON_WINDOWS "setup match file test for $text" '
+ file=$(cat .git/expected_test_file) &&
+ if should_create_test_file "$file"
+ then
+ dirs=${file%/*}
+ if test "$file" != "$dirs"
+ then
+ mkdir -p -- "$dirs" &&
+ touch -- "./$text"
+ else
+ touch -- "./$file"
+ fi &&
+ git add -A &&
+ printf "%s" "$file" >.git/created_test_file
+ elif test -e .git/created_test_file
+ then
+ rm .git/created_test_file
+ fi
+ '
+
+ # $1: Case sensitive glob match: test-wildmatch & ls-files
+ match_with_function "$text" "$pattern" $match_glob "wildmatch"
+ match_with_ls_files "$text" "$pattern" $match_file_glob "wildmatch" " --glob-pathspecs"
+
+ # $2: Case insensitive glob match: test-wildmatch & ls-files
+ match_with_function "$text" "$pattern" $match_iglob "iwildmatch"
+ match_with_ls_files "$text" "$pattern" $match_file_iglob "iwildmatch" " --glob-pathspecs --icase-pathspecs"
+
+ # $3: Case sensitive path match: test-wildmatch & ls-files
+ match_with_function "$text" "$pattern" $match_pathmatch "pathmatch"
+ match_with_ls_files "$text" "$pattern" $match_file_pathmatch "pathmatch" ""
+
+ # $4: Case insensitive path match: test-wildmatch & ls-files
+ match_with_function "$text" "$pattern" $match_pathmatchi "ipathmatch"
+ match_with_ls_files "$text" "$pattern" $match_file_pathmatchi "ipathmatch" " --icase-pathspecs"
}
-# Basic wildmat features
-match 1 1 foo foo
-match 0 0 foo bar
-match 1 1 '' ""
-match 1 1 foo '???'
-match 0 0 foo '??'
-match 1 1 foo '*'
-match 1 1 foo 'f*'
-match 0 0 foo '*f'
-match 1 1 foo '*foo*'
-match 1 1 foobar '*ob*a*r*'
-match 1 1 aaaaaaabababab '*ab'
-match 1 1 'foo*' 'foo\*'
-match 0 0 foobar 'foo\*bar'
-match 1 1 'f\oo' 'f\\oo'
-match 1 1 ball '*[al]?'
-match 0 0 ten '[ten]'
-match 0 1 ten '**[!te]'
-match 0 0 ten '**[!ten]'
-match 1 1 ten 't[a-g]n'
-match 0 0 ten 't[!a-g]n'
-match 1 1 ton 't[!a-g]n'
-match 1 1 ton 't[^a-g]n'
-match 1 x 'a]b' 'a[]]b'
-match 1 x a-b 'a[]-]b'
-match 1 x 'a]b' 'a[]-]b'
-match 0 x aab 'a[]-]b'
-match 1 x aab 'a[]a-]b'
-match 1 1 ']' ']'
+# Basic wildmatch features
+match 1 1 1 1 foo foo
+match 0 0 0 0 foo bar
+match 1 1 1 1 '' ""
+match 1 1 1 1 foo '???'
+match 0 0 0 0 foo '??'
+match 1 1 1 1 foo '*'
+match 1 1 1 1 foo 'f*'
+match 0 0 0 0 foo '*f'
+match 1 1 1 1 foo '*foo*'
+match 1 1 1 1 foobar '*ob*a*r*'
+match 1 1 1 1 aaaaaaabababab '*ab'
+match 1 1 1 1 'foo*' 'foo\*'
+match 0 0 0 0 foobar 'foo\*bar'
+match 1 1 1 1 'f\oo' 'f\\oo'
+match 1 1 1 1 ball '*[al]?'
+match 0 0 0 0 ten '[ten]'
+match 0 0 1 1 ten '**[!te]'
+match 0 0 0 0 ten '**[!ten]'
+match 1 1 1 1 ten 't[a-g]n'
+match 0 0 0 0 ten 't[!a-g]n'
+match 1 1 1 1 ton 't[!a-g]n'
+match 1 1 1 1 ton 't[^a-g]n'
+match 1 1 1 1 'a]b' 'a[]]b'
+match 1 1 1 1 a-b 'a[]-]b'
+match 1 1 1 1 'a]b' 'a[]-]b'
+match 0 0 0 0 aab 'a[]-]b'
+match 1 1 1 1 aab 'a[]a-]b'
+match 1 1 1 1 ']' ']'
# Extended slash-matching features
-match 0 0 'foo/baz/bar' 'foo*bar'
-match 0 0 'foo/baz/bar' 'foo**bar'
-match 0 1 'foobazbar' 'foo**bar'
-match 1 1 'foo/baz/bar' 'foo/**/bar'
-match 1 0 'foo/baz/bar' 'foo/**/**/bar'
-match 1 0 'foo/b/a/z/bar' 'foo/**/bar'
-match 1 0 'foo/b/a/z/bar' 'foo/**/**/bar'
-match 1 0 'foo/bar' 'foo/**/bar'
-match 1 0 'foo/bar' 'foo/**/**/bar'
-match 0 0 'foo/bar' 'foo?bar'
-match 0 0 'foo/bar' 'foo[/]bar'
-match 0 0 'foo/bar' 'foo[^a-z]bar'
-match 0 0 'foo/bar' 'f[^eiu][^eiu][^eiu][^eiu][^eiu]r'
-match 1 1 'foo-bar' 'f[^eiu][^eiu][^eiu][^eiu][^eiu]r'
-match 1 0 'foo' '**/foo'
-match 1 x 'XXX/foo' '**/foo'
-match 1 0 'bar/baz/foo' '**/foo'
-match 0 0 'bar/baz/foo' '*/foo'
-match 0 0 'foo/bar/baz' '**/bar*'
-match 1 0 'deep/foo/bar/baz' '**/bar/*'
-match 0 0 'deep/foo/bar/baz/' '**/bar/*'
-match 1 0 'deep/foo/bar/baz/' '**/bar/**'
-match 0 0 'deep/foo/bar' '**/bar/*'
-match 1 0 'deep/foo/bar/' '**/bar/**'
-match 0 0 'foo/bar/baz' '**/bar**'
-match 1 0 'foo/bar/baz/x' '*/bar/**'
-match 0 0 'deep/foo/bar/baz/x' '*/bar/**'
-match 1 0 'deep/foo/bar/baz/x' '**/bar/*/*'
+match 0 0 1 1 'foo/baz/bar' 'foo*bar'
+match 0 0 1 1 'foo/baz/bar' 'foo**bar'
+match 0 0 1 1 'foobazbar' 'foo**bar'
+match 1 1 1 1 'foo/baz/bar' 'foo/**/bar'
+match 1 1 0 0 'foo/baz/bar' 'foo/**/**/bar'
+match 1 1 1 1 'foo/b/a/z/bar' 'foo/**/bar'
+match 1 1 1 1 'foo/b/a/z/bar' 'foo/**/**/bar'
+match 1 1 0 0 'foo/bar' 'foo/**/bar'
+match 1 1 0 0 'foo/bar' 'foo/**/**/bar'
+match 0 0 1 1 'foo/bar' 'foo?bar'
+match 0 0 1 1 'foo/bar' 'foo[/]bar'
+match 0 0 1 1 'foo/bar' 'foo[^a-z]bar'
+match 0 0 1 1 'foo/bar' 'f[^eiu][^eiu][^eiu][^eiu][^eiu]r'
+match 1 1 1 1 'foo-bar' 'f[^eiu][^eiu][^eiu][^eiu][^eiu]r'
+match 1 1 0 0 'foo' '**/foo'
+match 1 1 1 1 'XXX/foo' '**/foo'
+match 1 1 1 1 'bar/baz/foo' '**/foo'
+match 0 0 1 1 'bar/baz/foo' '*/foo'
+match 0 0 1 1 'foo/bar/baz' '**/bar*'
+match 1 1 1 1 'deep/foo/bar/baz' '**/bar/*'
+match 0 0 1 1 'deep/foo/bar/baz/' '**/bar/*'
+match 1 1 1 1 'deep/foo/bar/baz/' '**/bar/**'
+match 0 0 0 0 'deep/foo/bar' '**/bar/*'
+match 1 1 1 1 'deep/foo/bar/' '**/bar/**'
+match 0 0 1 1 'foo/bar/baz' '**/bar**'
+match 1 1 1 1 'foo/bar/baz/x' '*/bar/**'
+match 0 0 1 1 'deep/foo/bar/baz/x' '*/bar/**'
+match 1 1 1 1 'deep/foo/bar/baz/x' '**/bar/*/*'
# Various additional tests
-match 0 0 'acrt' 'a[c-c]st'
-match 1 1 'acrt' 'a[c-c]rt'
-match 0 0 ']' '[!]-]'
-match 1 x 'a' '[!]-]'
-match 0 0 '' '\'
-match 0 x '\' '\'
-match 0 x 'XXX/\' '*/\'
-match 1 x 'XXX/\' '*/\\'
-match 1 1 'foo' 'foo'
-match 1 1 '@foo' '@foo'
-match 0 0 'foo' '@foo'
-match 1 1 '[ab]' '\[ab]'
-match 1 1 '[ab]' '[[]ab]'
-match 1 x '[ab]' '[[:]ab]'
-match 0 x '[ab]' '[[::]ab]'
-match 1 x '[ab]' '[[:digit]ab]'
-match 1 x '[ab]' '[\[:]ab]'
-match 1 1 '?a?b' '\??\?b'
-match 1 1 'abc' '\a\b\c'
-match 0 0 'foo' ''
-match 1 0 'foo/bar/baz/to' '**/t[o]'
+match 0 0 0 0 'acrt' 'a[c-c]st'
+match 1 1 1 1 'acrt' 'a[c-c]rt'
+match 0 0 0 0 ']' '[!]-]'
+match 1 1 1 1 'a' '[!]-]'
+match 0 0 0 0 '' '\'
+match 0 0 0 0 \
+ 1 1 1 1 '\' '\'
+match 0 0 0 0 'XXX/\' '*/\'
+match 1 1 1 1 'XXX/\' '*/\\'
+match 1 1 1 1 'foo' 'foo'
+match 1 1 1 1 '@foo' '@foo'
+match 0 0 0 0 'foo' '@foo'
+match 1 1 1 1 '[ab]' '\[ab]'
+match 1 1 1 1 '[ab]' '[[]ab]'
+match 1 1 1 1 '[ab]' '[[:]ab]'
+match 0 0 0 0 '[ab]' '[[::]ab]'
+match 1 1 1 1 '[ab]' '[[:digit]ab]'
+match 1 1 1 1 '[ab]' '[\[:]ab]'
+match 1 1 1 1 '?a?b' '\??\?b'
+match 1 1 1 1 'abc' '\a\b\c'
+match 0 0 0 0 \
+ E E E E 'foo' ''
+match 1 1 1 1 'foo/bar/baz/to' '**/t[o]'
# Character class tests
-match 1 x 'a1B' '[[:alpha:]][[:digit:]][[:upper:]]'
-match 0 x 'a' '[[:digit:][:upper:][:space:]]'
-match 1 x 'A' '[[:digit:][:upper:][:space:]]'
-match 1 x '1' '[[:digit:][:upper:][:space:]]'
-match 0 x '1' '[[:digit:][:upper:][:spaci:]]'
-match 1 x ' ' '[[:digit:][:upper:][:space:]]'
-match 0 x '.' '[[:digit:][:upper:][:space:]]'
-match 1 x '.' '[[:digit:][:punct:][:space:]]'
-match 1 x '5' '[[:xdigit:]]'
-match 1 x 'f' '[[:xdigit:]]'
-match 1 x 'D' '[[:xdigit:]]'
-match 1 x '_' '[[:alnum:][:alpha:][:blank:][:cntrl:][:digit:][:graph:][:lower:][:print:][:punct:][:space:][:upper:][:xdigit:]]'
-match 1 x '.' '[^[:alnum:][:alpha:][:blank:][:cntrl:][:digit:][:lower:][:space:][:upper:][:xdigit:]]'
-match 1 x '5' '[a-c[:digit:]x-z]'
-match 1 x 'b' '[a-c[:digit:]x-z]'
-match 1 x 'y' '[a-c[:digit:]x-z]'
-match 0 x 'q' '[a-c[:digit:]x-z]'
-
-# Additional tests, including some malformed wildmats
-match 1 x ']' '[\\-^]'
-match 0 0 '[' '[\\-^]'
-match 1 x '-' '[\-_]'
-match 1 x ']' '[\]]'
-match 0 0 '\]' '[\]]'
-match 0 0 '\' '[\]]'
-match 0 0 'ab' 'a[]b'
-match 0 x 'a[]b' 'a[]b'
-match 0 x 'ab[' 'ab['
-match 0 0 'ab' '[!'
-match 0 0 'ab' '[-'
-match 1 1 '-' '[-]'
-match 0 0 '-' '[a-'
-match 0 0 '-' '[!a-'
-match 1 x '-' '[--A]'
-match 1 x '5' '[--A]'
-match 1 1 ' ' '[ --]'
-match 1 1 '$' '[ --]'
-match 1 1 '-' '[ --]'
-match 0 0 '0' '[ --]'
-match 1 x '-' '[---]'
-match 1 x '-' '[------]'
-match 0 0 'j' '[a-e-n]'
-match 1 x '-' '[a-e-n]'
-match 1 x 'a' '[!------]'
-match 0 0 '[' '[]-a]'
-match 1 x '^' '[]-a]'
-match 0 0 '^' '[!]-a]'
-match 1 x '[' '[!]-a]'
-match 1 1 '^' '[a^bc]'
-match 1 x '-b]' '[a-]b]'
-match 0 0 '\' '[\]'
-match 1 1 '\' '[\\]'
-match 0 0 '\' '[!\\]'
-match 1 1 'G' '[A-\\]'
-match 0 0 'aaabbb' 'b*a'
-match 0 0 'aabcaa' '*ba*'
-match 1 1 ',' '[,]'
-match 1 1 ',' '[\\,]'
-match 1 1 '\' '[\\,]'
-match 1 1 '-' '[,-.]'
-match 0 0 '+' '[,-.]'
-match 0 0 '-.]' '[,-.]'
-match 1 1 '2' '[\1-\3]'
-match 1 1 '3' '[\1-\3]'
-match 0 0 '4' '[\1-\3]'
-match 1 1 '\' '[[-\]]'
-match 1 1 '[' '[[-\]]'
-match 1 1 ']' '[[-\]]'
-match 0 0 '-' '[[-\]]'
+match 1 1 1 1 'a1B' '[[:alpha:]][[:digit:]][[:upper:]]'
+match 0 1 0 1 'a' '[[:digit:][:upper:][:space:]]'
+match 1 1 1 1 'A' '[[:digit:][:upper:][:space:]]'
+match 1 1 1 1 '1' '[[:digit:][:upper:][:space:]]'
+match 0 0 0 0 '1' '[[:digit:][:upper:][:spaci:]]'
+match 1 1 1 1 ' ' '[[:digit:][:upper:][:space:]]'
+match 0 0 0 0 '.' '[[:digit:][:upper:][:space:]]'
+match 1 1 1 1 '.' '[[:digit:][:punct:][:space:]]'
+match 1 1 1 1 '5' '[[:xdigit:]]'
+match 1 1 1 1 'f' '[[:xdigit:]]'
+match 1 1 1 1 'D' '[[:xdigit:]]'
+match 1 1 1 1 '_' '[[:alnum:][:alpha:][:blank:][:cntrl:][:digit:][:graph:][:lower:][:print:][:punct:][:space:][:upper:][:xdigit:]]'
+match 1 1 1 1 '.' '[^[:alnum:][:alpha:][:blank:][:cntrl:][:digit:][:lower:][:space:][:upper:][:xdigit:]]'
+match 1 1 1 1 '5' '[a-c[:digit:]x-z]'
+match 1 1 1 1 'b' '[a-c[:digit:]x-z]'
+match 1 1 1 1 'y' '[a-c[:digit:]x-z]'
+match 0 0 0 0 'q' '[a-c[:digit:]x-z]'
-# Test recursion and the abort code (use "wildtest -i" to see iteration counts)
-match 1 1 '-adobe-courier-bold-o-normal--12-120-75-75-m-70-iso8859-1' '-*-*-*-*-*-*-12-*-*-*-m-*-*-*'
-match 0 0 '-adobe-courier-bold-o-normal--12-120-75-75-X-70-iso8859-1' '-*-*-*-*-*-*-12-*-*-*-m-*-*-*'
-match 0 0 '-adobe-courier-bold-o-normal--12-120-75-75-/-70-iso8859-1' '-*-*-*-*-*-*-12-*-*-*-m-*-*-*'
-match 1 1 'XXX/adobe/courier/bold/o/normal//12/120/75/75/m/70/iso8859/1' 'XXX/*/*/*/*/*/*/12/*/*/*/m/*/*/*'
-match 0 0 'XXX/adobe/courier/bold/o/normal//12/120/75/75/X/70/iso8859/1' 'XXX/*/*/*/*/*/*/12/*/*/*/m/*/*/*'
-match 1 0 'abcd/abcdefg/abcdefghijk/abcdefghijklmnop.txt' '**/*a*b*g*n*t'
-match 0 0 'abcd/abcdefg/abcdefghijk/abcdefghijklmnop.txtz' '**/*a*b*g*n*t'
-match 0 x foo '*/*/*'
-match 0 x foo/bar '*/*/*'
-match 1 x foo/bba/arr '*/*/*'
-match 0 x foo/bb/aa/rr '*/*/*'
-match 1 x foo/bb/aa/rr '**/**/**'
-match 1 x abcXdefXghi '*X*i'
-match 0 x ab/cXd/efXg/hi '*X*i'
-match 1 x ab/cXd/efXg/hi '*/*X*/*/*i'
-match 1 x ab/cXd/efXg/hi '**/*X*/**/*i'
+# Additional tests, including some malformed wildmatch patterns
+match 1 1 1 1 ']' '[\\-^]'
+match 0 0 0 0 '[' '[\\-^]'
+match 1 1 1 1 '-' '[\-_]'
+match 1 1 1 1 ']' '[\]]'
+match 0 0 0 0 '\]' '[\]]'
+match 0 0 0 0 '\' '[\]]'
+match 0 0 0 0 'ab' 'a[]b'
+match 0 0 0 0 \
+ 1 1 1 1 'a[]b' 'a[]b'
+match 0 0 0 0 \
+ 1 1 1 1 'ab[' 'ab['
+match 0 0 0 0 'ab' '[!'
+match 0 0 0 0 'ab' '[-'
+match 1 1 1 1 '-' '[-]'
+match 0 0 0 0 '-' '[a-'
+match 0 0 0 0 '-' '[!a-'
+match 1 1 1 1 '-' '[--A]'
+match 1 1 1 1 '5' '[--A]'
+match 1 1 1 1 ' ' '[ --]'
+match 1 1 1 1 '$' '[ --]'
+match 1 1 1 1 '-' '[ --]'
+match 0 0 0 0 '0' '[ --]'
+match 1 1 1 1 '-' '[---]'
+match 1 1 1 1 '-' '[------]'
+match 0 0 0 0 'j' '[a-e-n]'
+match 1 1 1 1 '-' '[a-e-n]'
+match 1 1 1 1 'a' '[!------]'
+match 0 0 0 0 '[' '[]-a]'
+match 1 1 1 1 '^' '[]-a]'
+match 0 0 0 0 '^' '[!]-a]'
+match 1 1 1 1 '[' '[!]-a]'
+match 1 1 1 1 '^' '[a^bc]'
+match 1 1 1 1 '-b]' '[a-]b]'
+match 0 0 0 0 '\' '[\]'
+match 1 1 1 1 '\' '[\\]'
+match 0 0 0 0 '\' '[!\\]'
+match 1 1 1 1 'G' '[A-\\]'
+match 0 0 0 0 'aaabbb' 'b*a'
+match 0 0 0 0 'aabcaa' '*ba*'
+match 1 1 1 1 ',' '[,]'
+match 1 1 1 1 ',' '[\\,]'
+match 1 1 1 1 '\' '[\\,]'
+match 1 1 1 1 '-' '[,-.]'
+match 0 0 0 0 '+' '[,-.]'
+match 0 0 0 0 '-.]' '[,-.]'
+match 1 1 1 1 '2' '[\1-\3]'
+match 1 1 1 1 '3' '[\1-\3]'
+match 0 0 0 0 '4' '[\1-\3]'
+match 1 1 1 1 '\' '[[-\]]'
+match 1 1 1 1 '[' '[[-\]]'
+match 1 1 1 1 ']' '[[-\]]'
+match 0 0 0 0 '-' '[[-\]]'
-pathmatch 1 foo foo
-pathmatch 0 foo fo
-pathmatch 1 foo/bar foo/bar
-pathmatch 1 foo/bar 'foo/*'
-pathmatch 1 foo/bba/arr 'foo/*'
-pathmatch 1 foo/bba/arr 'foo/**'
-pathmatch 1 foo/bba/arr 'foo*'
-pathmatch 1 foo/bba/arr 'foo**'
-pathmatch 1 foo/bba/arr 'foo/*arr'
-pathmatch 1 foo/bba/arr 'foo/**arr'
-pathmatch 0 foo/bba/arr 'foo/*z'
-pathmatch 0 foo/bba/arr 'foo/**z'
-pathmatch 1 foo/bar 'foo?bar'
-pathmatch 1 foo/bar 'foo[/]bar'
-pathmatch 1 foo/bar 'foo[^a-z]bar'
-pathmatch 0 foo '*/*/*'
-pathmatch 0 foo/bar '*/*/*'
-pathmatch 1 foo/bba/arr '*/*/*'
-pathmatch 1 foo/bb/aa/rr '*/*/*'
-pathmatch 1 abcXdefXghi '*X*i'
-pathmatch 1 ab/cXd/efXg/hi '*/*X*/*/*i'
-pathmatch 1 ab/cXd/efXg/hi '*Xg*i'
+# Test recursion
+match 1 1 1 1 '-adobe-courier-bold-o-normal--12-120-75-75-m-70-iso8859-1' '-*-*-*-*-*-*-12-*-*-*-m-*-*-*'
+match 0 0 0 0 '-adobe-courier-bold-o-normal--12-120-75-75-X-70-iso8859-1' '-*-*-*-*-*-*-12-*-*-*-m-*-*-*'
+match 0 0 0 0 '-adobe-courier-bold-o-normal--12-120-75-75-/-70-iso8859-1' '-*-*-*-*-*-*-12-*-*-*-m-*-*-*'
+match 1 1 1 1 'XXX/adobe/courier/bold/o/normal//12/120/75/75/m/70/iso8859/1' 'XXX/*/*/*/*/*/*/12/*/*/*/m/*/*/*'
+match 0 0 0 0 'XXX/adobe/courier/bold/o/normal//12/120/75/75/X/70/iso8859/1' 'XXX/*/*/*/*/*/*/12/*/*/*/m/*/*/*'
+match 1 1 1 1 'abcd/abcdefg/abcdefghijk/abcdefghijklmnop.txt' '**/*a*b*g*n*t'
+match 0 0 0 0 'abcd/abcdefg/abcdefghijk/abcdefghijklmnop.txtz' '**/*a*b*g*n*t'
+match 0 0 0 0 foo '*/*/*'
+match 0 0 0 0 foo/bar '*/*/*'
+match 1 1 1 1 foo/bba/arr '*/*/*'
+match 0 0 1 1 foo/bb/aa/rr '*/*/*'
+match 1 1 1 1 foo/bb/aa/rr '**/**/**'
+match 1 1 1 1 abcXdefXghi '*X*i'
+match 0 0 1 1 ab/cXd/efXg/hi '*X*i'
+match 1 1 1 1 ab/cXd/efXg/hi '*/*X*/*/*i'
+match 1 1 1 1 ab/cXd/efXg/hi '**/*X*/**/*i'
-# Case-sensitivity features
-match 0 x 'a' '[A-Z]'
-match 1 x 'A' '[A-Z]'
-match 0 x 'A' '[a-z]'
-match 1 x 'a' '[a-z]'
-match 0 x 'a' '[[:upper:]]'
-match 1 x 'A' '[[:upper:]]'
-match 0 x 'A' '[[:lower:]]'
-match 1 x 'a' '[[:lower:]]'
-match 0 x 'A' '[B-Za]'
-match 1 x 'a' '[B-Za]'
-match 0 x 'A' '[B-a]'
-match 1 x 'a' '[B-a]'
-match 0 x 'z' '[Z-y]'
-match 1 x 'Z' '[Z-y]'
+# Extra pathmatch tests
+match 0 0 0 0 foo fo
+match 1 1 1 1 foo/bar foo/bar
+match 1 1 1 1 foo/bar 'foo/*'
+match 0 0 1 1 foo/bba/arr 'foo/*'
+match 1 1 1 1 foo/bba/arr 'foo/**'
+match 0 0 1 1 foo/bba/arr 'foo*'
+match 0 0 1 1 \
+ 1 1 1 1 foo/bba/arr 'foo**'
+match 0 0 1 1 foo/bba/arr 'foo/*arr'
+match 0 0 1 1 foo/bba/arr 'foo/**arr'
+match 0 0 0 0 foo/bba/arr 'foo/*z'
+match 0 0 0 0 foo/bba/arr 'foo/**z'
+match 0 0 1 1 foo/bar 'foo?bar'
+match 0 0 1 1 foo/bar 'foo[/]bar'
+match 0 0 1 1 foo/bar 'foo[^a-z]bar'
+match 0 0 1 1 ab/cXd/efXg/hi '*Xg*i'
-imatch 1 'a' '[A-Z]'
-imatch 1 'A' '[A-Z]'
-imatch 1 'A' '[a-z]'
-imatch 1 'a' '[a-z]'
-imatch 1 'a' '[[:upper:]]'
-imatch 1 'A' '[[:upper:]]'
-imatch 1 'A' '[[:lower:]]'
-imatch 1 'a' '[[:lower:]]'
-imatch 1 'A' '[B-Za]'
-imatch 1 'a' '[B-Za]'
-imatch 1 'A' '[B-a]'
-imatch 1 'a' '[B-a]'
-imatch 1 'z' '[Z-y]'
-imatch 1 'Z' '[Z-y]'
+# Extra case-sensitivity tests
+match 0 1 0 1 'a' '[A-Z]'
+match 1 1 1 1 'A' '[A-Z]'
+match 0 1 0 1 'A' '[a-z]'
+match 1 1 1 1 'a' '[a-z]'
+match 0 1 0 1 'a' '[[:upper:]]'
+match 1 1 1 1 'A' '[[:upper:]]'
+match 0 1 0 1 'A' '[[:lower:]]'
+match 1 1 1 1 'a' '[[:lower:]]'
+match 0 1 0 1 'A' '[B-Za]'
+match 1 1 1 1 'a' '[B-Za]'
+match 0 1 0 1 'A' '[B-a]'
+match 1 1 1 1 'a' '[B-a]'
+match 0 1 0 1 'z' '[Z-y]'
+match 1 1 1 1 'Z' '[Z-y]'
test_done
git branch -c -f o/q o/p
'
-test_expect_success 'git branch -c qq rr/qq should fail when r exists' '
+test_expect_success 'git branch -c qq rr/qq should fail when rr exists' '
git branch qq &&
git branch rr &&
test_must_fail git branch -c qq rr/qq
test_cmp From_.msg out
'
+test_expect_success 'rebase--am.sh and --show-current-patch' '
+ test_create_repo conflict-apply &&
+ (
+ cd conflict-apply &&
+ test_commit init &&
+ echo one >>init.t &&
+ git commit -a -m one &&
+ echo two >>init.t &&
+ git commit -a -m two &&
+ git tag two &&
+ test_must_fail git rebase --onto init HEAD^ &&
+ GIT_TRACE=1 git rebase --show-current-patch >/dev/null 2>stderr &&
+ grep "show.*$(git rev-parse two)" stderr
+ )
+'
+
+test_expect_success 'rebase--merge.sh and --show-current-patch' '
+ test_create_repo conflict-merge &&
+ (
+ cd conflict-merge &&
+ test_commit init &&
+ echo one >>init.t &&
+ git commit -a -m one &&
+ echo two >>init.t &&
+ git commit -a -m two &&
+ git tag two &&
+ test_must_fail git rebase --merge --onto init HEAD^ &&
+ git rebase --show-current-patch >actual.patch &&
+ GIT_TRACE=1 git rebase --show-current-patch >/dev/null 2>stderr &&
+ grep "show.*REBASE_HEAD" stderr &&
+ test "$(git rev-parse REBASE_HEAD)" = "$(git rev-parse two)"
+ )
+'
+
test_done
test 0 = $(grep -c "^[^#]" < .git/rebase-merge/git-rebase-todo)
'
+test_expect_success 'show conflicted patch' '
+ GIT_TRACE=1 git rebase --show-current-patch >/dev/null 2>stderr &&
+ grep "show.*REBASE_HEAD" stderr &&
+ # the original stopped-sha1 is abbreviated
+ stopped_sha1="$(git rev-parse $(cat ".git/rebase-merge/stopped-sha"))" &&
+ test "$(git rev-parse REBASE_HEAD)" = "$stopped_sha1"
+'
+
test_expect_success 'abort' '
git rebase --abort &&
test $(git rev-parse new-branch1) = $(git rev-parse HEAD) &&
git rebase -i $base &&
git cat-file commit HEAD | sed -e 1,/^\$/d > actual-squash-fixup &&
test_cmp expect-squash-fixup actual-squash-fixup &&
+ git cat-file commit HEAD@{2} |
+ grep "^# This is a combination of 3 commits\." &&
+ git cat-file commit HEAD@{3} |
+ grep "^# This is a combination of 2 commits\." &&
git checkout to-be-rebased &&
git branch -D squash-fixup
'
SQ="'"
test_expect_success 'rebase -i --gpg-sign=<key-id>' '
+ test_when_finished "test_might_fail git rebase --abort" &&
+ set_fake_editor &&
+ FAKE_LINES="edit 1" git rebase -i --gpg-sign="\"S I Gner\"" HEAD^ \
+ >out 2>err &&
+ test_i18ngrep "$SQ-S\"S I Gner\"$SQ" err
+'
+
+test_expect_success 'rebase -i --gpg-sign=<key-id> overrides commit.gpgSign' '
+ test_when_finished "test_might_fail git rebase --abort" &&
+ test_config commit.gpgsign true &&
set_fake_editor &&
FAKE_LINES="edit 1" git rebase -i --gpg-sign="\"S I Gner\"" HEAD^ \
>out 2>err &&
test_description='rebase should handle arbitrary git message'
. ./test-lib.sh
+. "$TEST_DIRECTORY"/lib-rebase.sh
cat >F <<\EOF
This is an example of a commit log message
test_tick &&
git commit -m "Initial commit" &&
git branch diff-in-message &&
+ git branch empty-message-merge &&
git checkout -b multi-line-subject &&
cat F >file2 &&
git cat-file commit HEAD | sed -e "1,/^\$/d" >G0 &&
+ git checkout empty-message-merge &&
+ echo file3 >file3 &&
+ git add file3 &&
+ git commit --allow-empty-message -m "" &&
+
git checkout master &&
echo One >file1 &&
test_cmp G G0
'
+test_expect_success 'rebase -m commit with empty message' '
+ test_must_fail git rebase -m master empty-message-merge &&
+ git rebase --abort &&
+ git rebase -m --allow-empty-message master empty-message-merge
+'
+
+test_expect_success 'rebase -i commit with empty message' '
+ git checkout diff-in-message &&
+ set_fake_editor &&
+ test_must_fail env FAKE_COMMIT_MESSAGE=" " FAKE_LINES="reword 1" \
+ git rebase -i HEAD^ &&
+ git rebase --abort &&
+ FAKE_COMMIT_MESSAGE=" " FAKE_LINES="reword 1" \
+ git rebase -i --allow-empty-message HEAD^
+'
+
test_done
>elif &&
git add elif &&
test_tick &&
- git commit -m second
+ git commit -m second &&
+ git checkout -b side2 &&
+ >afile &&
+ git add afile &&
+ test_tick &&
+ git commit -m third &&
+ echo hello >afile &&
+ test_tick &&
+ git commit -a -m fourth &&
+ git checkout -b side-merge &&
+ git reset --hard HEAD^^ &&
+ git merge --no-ff -m "A merge commit log message that has a long
+summary that spills over multiple lines.
+
+But otherwise with a sane description." side2 &&
+ git branch side-merge-original
'
test_expect_success rebase '
git cat-file commit side@{1} | sed -e "1,/^\$/d" >expect &&
test_cmp expect actual
+'
+test_expect_success rebasep '
+
+ git checkout side-merge &&
+ git rebase -p side &&
+ git cat-file commit HEAD | sed -e "1,/^\$/d" >actual &&
+ git cat-file commit side-merge-original | sed -e "1,/^\$/d" >expect &&
+ test_cmp expect actual
+
'
test_done
test_cmp expect actual
'
-test_expect_success 'cherry-pick works with dirty renamed file' '
+test_expect_failure 'cherry-pick works with dirty renamed file' '
test_commit to-rename &&
git checkout -b unrelated &&
test_commit unrelated &&
test_tick &&
git commit -m renamed &&
echo modified >renamed &&
- git cherry-pick refs/heads/unrelated
+ test_must_fail git cherry-pick refs/heads/unrelated >out &&
+ test_i18ngrep "Refusing to lose dirty file at renamed" out &&
+ test $(git rev-parse :0:renamed) = $(git rev-parse HEAD^:to-rename.t) &&
+ grep -q "^modified$" renamed
'
test_done
. ./test-lib.sh
. "$TEST_DIRECTORY"/lib-submodule-update.sh
-KNOWN_FAILURE_CHERRY_PICK_SEES_EMPTY_COMMIT=1
KNOWN_FAILURE_NOFF_MERGE_DOESNT_CREATE_EMPTY_SUBMODULE_DIR=1
KNOWN_FAILURE_NOFF_MERGE_ATTEMPTS_TO_MERGE_REMOVED_SUBMODULE_FILES=1
test_submodule_switch "git cherry-pick"
git revert HEAD
}
-KNOWN_FAILURE_CHERRY_PICK_SEES_EMPTY_COMMIT=1
KNOWN_FAILURE_NOFF_MERGE_DOESNT_CREATE_EMPTY_SUBMODULE_DIR=1
test_submodule_switch "git_revert"
test_done
fi
+diff_cmp () {
+ for x
+ do
+ sed -e '/^index/s/[0-9a-f]*[1-9a-f][0-9a-f]*\.\./1234567../' \
+ -e '/^index/s/\.\.[0-9a-f]*[1-9a-f][0-9a-f]*/..9abcdef/' \
+ -e '/^index/s/ 00*\.\./ 0000000../' \
+ -e '/^index/s/\.\.00*$/..0000000/' \
+ -e '/^index/s/\.\.00* /..0000000 /' \
+ "$x" >"$x.filtered"
+ done
+ test_cmp "$1.filtered" "$2.filtered"
+}
+
test_expect_success 'setup (initial)' '
echo content >file &&
git add file &&
'
test_expect_success 'setup expected' '
-cat >expected <<EOF
-new file mode 100644
-index 0000000..d95f3ad
---- /dev/null
-+++ b/file
-@@ -0,0 +1 @@
-+content
-EOF
+ cat >expected <<-\EOF
+ new file mode 100644
+ index 0000000..d95f3ad
+ --- /dev/null
+ +++ b/file
+ @@ -0,0 +1 @@
+ +content
+ EOF
'
test_expect_success 'diff works (initial)' '
(echo d; echo 1) | git add -i >output &&
sed -ne "/new file/,/content/p" <output >diff &&
- test_cmp expected diff
+ diff_cmp expected diff
'
test_expect_success 'revert works (initial)' '
git add file &&
'
test_expect_success 'setup expected' '
-cat >expected <<EOF
-index 180b47c..b6f2c08 100644
---- a/file
-+++ b/file
-@@ -1 +1,2 @@
- baseline
-+content
-EOF
+ cat >expected <<-\EOF
+ index 180b47c..b6f2c08 100644
+ --- a/file
+ +++ b/file
+ @@ -1 +1,2 @@
+ baseline
+ +content
+ EOF
'
test_expect_success 'diff works (commit)' '
(echo d; echo 1) | git add -i >output &&
sed -ne "/^index/,/content/p" <output >diff &&
- test_cmp expected diff
+ diff_cmp expected diff
'
test_expect_success 'revert works (commit)' '
git add file &&
test_expect_success 'setup expected' '
-cat >expected <<EOF
-EOF
-'
-
-test_expect_success 'setup fake editor' '
- >fake_editor.sh &&
- chmod a+x fake_editor.sh &&
- test_set_editor "$(pwd)/fake_editor.sh"
+ cat >expected <<-\EOF
+ EOF
'
test_expect_success 'dummy edit works' '
+ test_set_editor : &&
(echo e; echo a) | git add -p &&
git diff > diff &&
- test_cmp expected diff
+ diff_cmp expected diff
'
test_expect_success 'setup patch' '
-cat >patch <<EOF
-@@ -1,1 +1,4 @@
- this
-+patch
--does not
- apply
-EOF
+ cat >patch <<-\EOF
+ @@ -1,1 +1,4 @@
+ this
+ +patch
+ -does not
+ apply
+ EOF
'
test_expect_success 'setup fake editor' '
- echo "#!$SHELL_PATH" >fake_editor.sh &&
- cat >>fake_editor.sh <<\EOF &&
-mv -f "$1" oldpatch &&
-mv -f patch "$1"
-EOF
- chmod a+x fake_editor.sh &&
+ write_script "fake_editor.sh" <<-\EOF &&
+ mv -f "$1" oldpatch &&
+ mv -f patch "$1"
+ EOF
test_set_editor "$(pwd)/fake_editor.sh"
'
'
test_expect_success 'setup patch' '
-cat >patch <<EOF
-this patch
-is garbage
-EOF
+ cat >patch <<-\EOF
+ this patch
+ is garbage
+ EOF
'
test_expect_success 'garbage edit rejected' '
'
test_expect_success 'setup patch' '
-cat >patch <<EOF
-@@ -1,0 +1,0 @@
- baseline
-+content
-+newcontent
-+lines
-EOF
+ cat >patch <<-\EOF
+ @@ -1,0 +1,0 @@
+ baseline
+ +content
+ +newcontent
+ +lines
+ EOF
'
test_expect_success 'setup expected' '
-cat >expected <<EOF
-diff --git a/file b/file
-index b5dd6c9..f910ae9 100644
---- a/file
-+++ b/file
-@@ -1,4 +1,4 @@
- baseline
- content
--newcontent
-+more
- lines
-EOF
+ cat >expected <<-\EOF
+ diff --git a/file b/file
+ index b5dd6c9..f910ae9 100644
+ --- a/file
+ +++ b/file
+ @@ -1,4 +1,4 @@
+ baseline
+ content
+ -newcontent
+ +more
+ lines
+ EOF
'
test_expect_success 'real edit works' '
(echo e; echo n; echo d) | git add -p &&
git diff >output &&
- test_cmp expected output
+ diff_cmp expected output
'
test_expect_success 'skip files similarly as commit -a' '
git reset &&
git commit -am commit &&
git diff >expected &&
- test_cmp expected output &&
+ diff_cmp expected output &&
git reset --hard HEAD^
'
rm -f .gitignore
# Write the patch file with a new line at the top and bottom
test_expect_success 'setup patch' '
-cat >patch <<EOF
-index 180b47c..b6f2c08 100644
---- a/file
-+++ b/file
-@@ -1,2 +1,4 @@
-+firstline
- baseline
- content
-+lastline
-EOF
-'
-
-# Expected output, similar to the patch but w/ diff at the top
+ cat >patch <<-\EOF
+ index 180b47c..b6f2c08 100644
+ --- a/file
+ +++ b/file
+ @@ -1,2 +1,4 @@
+ +firstline
+ baseline
+ content
+ +lastline
+ \ No newline at end of file
+ EOF
+'
+
+# Expected output, diff is similar to the patch but w/ diff at the top
test_expect_success 'setup expected' '
-cat >expected <<EOF
-diff --git a/file b/file
-index b6f2c08..61b9053 100755
---- a/file
-+++ b/file
-@@ -1,2 +1,4 @@
-+firstline
- baseline
- content
-+lastline
-EOF
+ echo diff --git a/file b/file >expected &&
+ cat patch |sed "/^index/s/ 100644/ 100755/" >>expected &&
+ cat >expected-output <<-\EOF
+ --- a/file
+ +++ b/file
+ @@ -1,2 +1,4 @@
+ +firstline
+ baseline
+ content
+ +lastline
+ \ No newline at end of file
+ @@ -1,2 +1,3 @@
+ +firstline
+ baseline
+ content
+ @@ -1,2 +2,3 @@
+ baseline
+ content
+ +lastline
+ \ No newline at end of file
+ EOF
'
# Test splitting the first patch, then adding both
-test_expect_success 'add first line works' '
+test_expect_success C_LOCALE_OUTPUT 'add first line works' '
git commit -am "clear local changes" &&
git apply patch &&
- (echo s; echo y; echo y) | git add -p file &&
- git diff --cached > diff &&
- test_cmp expected diff
+ printf "%s\n" s y y | git add -p file 2>error |
+ sed -n -e "s/^Stage this hunk[^@]*\(@@ .*\)/\1/" \
+ -e "/^[-+@ \\\\]"/p >output &&
+ test_must_be_empty error &&
+ git diff --cached >diff &&
+ diff_cmp expected diff &&
+ test_cmp expected-output output
'
test_expect_success 'setup expected' '
-cat >expected <<EOF
-diff --git a/non-empty b/non-empty
-deleted file mode 100644
-index d95f3ad..0000000
---- a/non-empty
-+++ /dev/null
-@@ -1 +0,0 @@
--content
-EOF
+ cat >expected <<-\EOF
+ diff --git a/non-empty b/non-empty
+ deleted file mode 100644
+ index d95f3ad..0000000
+ --- a/non-empty
+ +++ /dev/null
+ @@ -1 +0,0 @@
+ -content
+ EOF
'
test_expect_success 'deleting a non-empty file' '
rm non-empty &&
echo y | git add -p non-empty &&
git diff --cached >diff &&
- test_cmp expected diff
+ diff_cmp expected diff
'
test_expect_success 'setup expected' '
-cat >expected <<EOF
-diff --git a/empty b/empty
-deleted file mode 100644
-index e69de29..0000000
-EOF
+ cat >expected <<-\EOF
+ diff --git a/empty b/empty
+ deleted file mode 100644
+ index e69de29..0000000
+ EOF
'
test_expect_success 'deleting an empty file' '
rm empty &&
echo y | git add -p empty &&
git diff --cached >diff &&
- test_cmp expected diff
+ diff_cmp expected diff
'
test_expect_success 'split hunk setup' '
git reset --hard &&
- for i in 10 20 30 40 50 60
- do
- echo $i
- done >test &&
+ test_write_lines 10 20 30 40 50 60 >test &&
git add test &&
test_tick &&
git commit -m test &&
- for i in 10 15 20 21 22 23 24 30 40 50 60
- do
- echo $i
- done >test
+ test_write_lines 10 15 20 21 22 23 24 30 40 50 60 >test
'
test_expect_success 'split hunk "add -p (edit)"' '
'
test_expect_failure 'split hunk "add -p (no, yes, edit)"' '
- cat >test <<-\EOF &&
- 5
- 10
- 20
- 21
- 30
- 31
- 40
- 50
- 60
- EOF
+ test_write_lines 5 10 20 21 30 31 40 50 60 >test &&
git reset &&
# test sequence is s(plit), n(o), y(es), e(dit)
# q n q q is there to make sure we exit at the end.
+changed
EOF
git diff --cached >diff &&
- test_cmp expected diff
+ diff_cmp expected diff
'
test_expect_success TTY 'diffs can be colorized' '
grep "$(printf "\\033")" output
'
+test_expect_success TTY 'diffFilter filters diff' '
+ git reset --hard &&
+
+ echo content >test &&
+ test_config interactive.diffFilter "sed s/^/foo:/" &&
+ printf y | test_terminal git add -p >output 2>&1 &&
+
+ # avoid depending on the exact coloring or content of the prompts,
+ # and just make sure we saw our diff prefixed
+ grep foo:.*content output
+'
+
+test_expect_success TTY 'detect bogus diffFilter output' '
+ git reset --hard &&
+
+ echo content >test &&
+ test_config interactive.diffFilter "echo too-short" &&
+ printf y | test_must_fail test_terminal git add -p
+'
+
test_expect_success 'patch-mode via -i prompts for files' '
git reset --hard &&
echo test >expect &&
git diff --cached --name-only >actual &&
- test_cmp expect actual
+ diff_cmp expect actual
'
test_expect_success 'add -p handles globs' '
! grep dirty-otherwise output
'
+test_expect_success 'set up pathological context' '
+ git reset --hard &&
+ test_write_lines a a a a a a a a a a a >a &&
+ git add a &&
+ git commit -m a &&
+ test_write_lines c b a a a a a a a b a a a a >a &&
+ test_write_lines a a a a a a a b a a a a >expected-1 &&
+ test_write_lines b a a a a a a a b a a a a >expected-2 &&
+ # check editing can cope with missing header and deleted context lines
+ # as well as changes to other lines
+ test_write_lines +b " a" >patch
+'
+
+test_expect_success 'add -p works with pathological context lines' '
+ git reset &&
+ printf "%s\n" n y |
+ git add -p &&
+ git cat-file blob :a >actual &&
+ test_cmp expected-1 actual
+'
+
+test_expect_success 'add -p patch editing works with pathological context lines' '
+ git reset &&
+ # n q q below is in case edit fails
+ printf "%s\n" e y n q q |
+ git add -p &&
+ git cat-file blob :a >actual &&
+ test_cmp expected-2 actual
+'
+
test_done
git rm path1 &&
mkdir subdir &&
git mv another-path subdir/path1 &&
- git status | test_i18ngrep "renamed: .*path1 -> subdir/path1"'
+ git status >out &&
+ test_i18ngrep "renamed: .*path1 -> subdir/path1" out
+'
test_expect_success 'favour same basenames even with minor differences' '
git show HEAD:path1 | sed "s/15/16/" > subdir/path1 &&
- git status | test_i18ngrep "renamed: .*path1 -> subdir/path1"'
+ git status >out &&
+ test_i18ngrep "renamed: .*path1 -> subdir/path1" out
+'
test_expect_success 'two files with same basename and same content' '
git reset --hard &&
git add dir &&
git commit -m 2 &&
git mv dir other-dir &&
- git status | test_i18ngrep "renamed: .*dir/A/file -> other-dir/A/file"
+ git status >out &&
+ test_i18ngrep "renamed: .*dir/A/file -> other-dir/A/file" out
'
test_expect_success 'setup for many rename source candidates' '
diff --no-index --raw --abbrev=4 dir2 dir
:noellipses diff --no-index --raw --abbrev=4 dir2 dir
diff --no-index --raw --no-abbrev dir2 dir
+
+diff-tree --pretty --root --stat --compact-summary initial
+diff-tree --pretty -R --root --stat --compact-summary initial
+diff-tree --stat --compact-summary initial mode
+diff-tree -R --stat --compact-summary initial mode
EOF
test_expect_success 'log -S requires an argument' '
--- /dev/null
+$ git diff-tree --pretty --root --stat --compact-summary initial
+commit 444ac553ac7612cc88969031b02b3767fb8a353a
+Author: A U Thor <author@example.com>
+Date: Mon Jun 26 00:00:00 2006 +0000
+
+ Initial
+
+ dir/sub (new) | 2 ++
+ file0 (new) | 3 +++
+ file2 (new) | 3 +++
+ 3 files changed, 8 insertions(+)
+$
--- /dev/null
+$ git diff-tree --pretty -R --root --stat --compact-summary initial
+commit 444ac553ac7612cc88969031b02b3767fb8a353a
+Author: A U Thor <author@example.com>
+Date: Mon Jun 26 00:00:00 2006 +0000
+
+ Initial
+
+ dir/sub (gone) | 2 --
+ file0 (gone) | 3 ---
+ file2 (gone) | 3 ---
+ 3 files changed, 8 deletions(-)
+$
--- /dev/null
+$ git diff-tree --stat --compact-summary initial mode
+ file0 (mode +x) | 0
+ 1 file changed, 0 insertions(+), 0 deletions(-)
+$
--- /dev/null
+$ git diff-tree -R --stat --compact-summary initial mode
+ file0 (mode -x) | 0
+ 1 file changed, 0 insertions(+), 0 deletions(-)
+$
css
fortran
fountain
+ golang
html
java
matlab
--- /dev/null
+type Test struct {
+ a Type
+}
+
+func (t *Test) RIGHT(a Type) (Type, error) {
+ t.a = a
+ return ChangeMe, nil
+}
--- /dev/null
+func RIGHT() {
+ a := 5
+ b := ChangeMe
+}
--- /dev/null
+type RIGHT interface {
+ a() Type
+ b() ChangeMe
+}
--- /dev/null
+func RIGHT(aVeryVeryVeryLongVariableName AVeryVeryVeryLongType,
+ anotherLongVariableName AnotherLongType) {
+ a := 5
+ b := ChangeMe
+}
--- /dev/null
+type RIGHT struct {
+ a Type
+ b ChangeMe
+}
git commit -m message "$name"
'
+cat >expect72 <<-'EOF'
+ ...aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa | 1 +
+EOF
+test_expect_success "format-patch: small change with long name gives more space to the name" '
+ git format-patch -1 --stdout >output &&
+ grep " | " output >actual &&
+ test_cmp expect72 actual
+'
+
while read cmd args
do
- cat >expect <<-'EOF'
+ cat >expect80 <<-'EOF'
...aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa | 1 +
EOF
test_expect_success "$cmd: small change with long name gives more space to the name" '
git $cmd $args >output &&
grep " | " output >actual &&
- test_cmp expect actual
+ test_cmp expect80 actual
'
+done <<\EOF
+diff HEAD^ HEAD --stat
+show --stat
+log -1 --stat
+EOF
+while read cmd args
+do
cat >expect <<-'EOF'
...aaaaaaaaaaaaaaaaaaaaaaaaaaaaa | 1 +
EOF
git commit -m message abcd
'
-cat >expect80 <<'EOF'
- abcd | 1000 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+cat >expect72 <<'EOF'
+ abcd | 1000 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
EOF
-cat >expect80-graph <<'EOF'
-| abcd | 1000 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+cat >expect72-graph <<'EOF'
+| abcd | 1000 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
EOF
cat >expect200 <<'EOF'
abcd | 1000 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
test_cmp "$expect-graph" actual
'
done <<\EOF
-ignores expect80 format-patch -1 --stdout
+ignores expect72 format-patch -1 --stdout
respects expect200 diff HEAD^ HEAD --stat
respects expect200 show --stat
respects expect200 log -1 --stat
test_cmp "$expect-graph" actual
'
done <<\EOF
-ignores expect80 format-patch -1 --stdout
+ignores expect72 format-patch -1 --stdout
respects expect40 diff HEAD^ HEAD --stat
respects expect40 show --stat
respects expect40 log -1 --stat
test_cmp "$expect-graph" actual
'
done <<\EOF
-ignores expect80 format-patch -1 --stdout
+ignores expect72 format-patch -1 --stdout
respects expect40 diff HEAD^ HEAD --stat
respects expect40 show --stat
respects expect40 log -1 --stat
log -1 --stat
EOF
-cat >expect80 <<'EOF'
- ...aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa | 1000 ++++++++++++++++++++
+cat >expect72 <<'EOF'
+ ...aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa | 1000 +++++++++++++++++
EOF
-cat >expect80-graph <<'EOF'
-| ...aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa | 1000 ++++++++++++++++++++
+cat >expect72-graph <<'EOF'
+| ...aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa | 1000 +++++++++++++++++
EOF
cat >expect200 <<'EOF'
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa | 1000 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
test_cmp "$expect-graph" actual
'
done <<\EOF
-ignores expect80 format-patch -1 --stdout
+ignores expect72 format-patch -1 --stdout
respects expect200 diff HEAD^ HEAD --stat
respects expect200 show --stat
respects expect200 log -1 --stat
test_cmp "$expect-graph" actual
'
done <<\EOF
-ignores expect80 format-patch -1 --stdout
+ignores expect72 format-patch -1 --stdout
respects expect1 diff HEAD^ HEAD --stat
respects expect1 show --stat
respects expect1 log -1 --stat
--- /dev/null
+#!/bin/sh
+
+test_description='test finding specific blobs in the revision walking'
+. ./test-lib.sh
+
+test_expect_success 'setup ' '
+ git commit --allow-empty -m "empty initial commit" &&
+
+ echo "Hello, world!" >greeting &&
+ git add greeting &&
+ git commit -m "add the greeting blob" && # borrowed from Git from the Bottom Up
+ git tag -m "the blob" greeting $(git rev-parse HEAD:greeting) &&
+
+ echo asdf >unrelated &&
+ git add unrelated &&
+ git commit -m "unrelated history" &&
+
+ git revert HEAD^ &&
+
+ git commit --allow-empty -m "another unrelated commit"
+'
+
+test_expect_success 'find the greeting blob' '
+ cat >expect <<-EOF &&
+ Revert "add the greeting blob"
+ add the greeting blob
+ EOF
+
+ git log --format=%s --find-object=greeting^{blob} >actual &&
+
+ test_cmp expect actual
+'
+
+test_expect_success 'setup a tree' '
+ mkdir a &&
+ echo asdf >a/file &&
+ git add a/file &&
+ git commit -m "add a file in a subdirectory"
+'
+
+test_expect_success 'find a tree' '
+ cat >expect <<-EOF &&
+ add a file in a subdirectory
+ EOF
+
+ git log --format=%s -t --find-object=HEAD:a >actual &&
+
+ test_cmp expect actual
+'
+
+test_expect_success 'setup a submodule' '
+ test_create_repo sub &&
+ test_commit -C sub sub &&
+ git submodule add ./sub sub &&
+ git commit -a -m "add sub"
+'
+
+test_expect_success 'find a submodule' '
+ cat >expect <<-EOF &&
+ add sub
+ EOF
+
+ git log --format=%s --find-object=HEAD:sub >actual &&
+
+ test_cmp expect actual
+'
+
+test_done
test_cmp expected "post image.txt"
'
+cat >diff-from-svn <<\EOF
+Index: Makefile
+===================================================================
+diff --git a/branches/Makefile
+deleted file mode 100644
+--- a/branches/Makefile (revision 13)
++++ /dev/null (nonexistent)
+@@ +1 0,0 @@
+-
+EOF
+
+test_expect_success 'apply handles a diff generated by Subversion' '
+ >Makefile &&
+ git apply -p2 diff-from-svn &&
+ test_path_is_missing Makefile
+'
+
test_done
test -d .git/rebase-apply
'
+test_expect_success 'am --show-current-patch' '
+ git am --show-current-patch >actual.patch &&
+ test_cmp .git/rebase-apply/0001 actual.patch
+'
+
test_expect_success 'am --skip works' '
echo goodbye >expected &&
git am --skip &&
git cat-file commit HEAD | grep "^$LONG$"
'
+test_expect_success 'am --quit keeps HEAD where it is' '
+ mkdir .git/rebase-apply &&
+ >.git/rebase-apply/last &&
+ >.git/rebase-apply/next &&
+ git rev-parse HEAD^ >.git/ORIG_HEAD &&
+ git rev-parse HEAD >expected &&
+ git am --quit &&
+ test_path_is_missing .git/rebase-apply &&
+ git rev-parse HEAD >actual &&
+ test_cmp expected actual
+'
+
test_done
test_expect_success "am$with3 --skip continue after failed am$with3" '
test_must_fail git am$with3 --skip >output &&
- test_i18ngrep "^Applying" output >output.applying &&
- test_i18ngrep "^Applying: 6$" output.applying &&
- test_i18ncmp file-2-expect file-2 &&
+ test_i18ngrep "^Applying: 6$" output &&
+ test_cmp file-2-expect file-2 &&
test ! -f .git/MERGE_RR
'
grep "^warning:.* expected .tagger. line" err
'
+test_expect_success 'index-pack --fsck-objects also warns upon missing tagger in tag' '
+ git index-pack --fsck-objects tag-test-${pack1}.pack 2>err &&
+ grep "^warning:.* expected .tagger. line" err
+'
+
test_done
test_expect_success 'test lonely missing ref' '
(
cd client &&
- test_must_fail git fetch-pack --no-progress .. refs/heads/xyzzy
- ) >/dev/null 2>error-m &&
+ test_must_fail git fetch-pack --no-progress .. refs/heads/xyzzy 2>../error-m
+ ) &&
test_i18ncmp expect-error error-m
'
test_expect_success 'test missing ref after existing' '
(
cd client &&
- test_must_fail git fetch-pack --no-progress .. refs/heads/A refs/heads/xyzzy
- ) >/dev/null 2>error-em &&
+ test_must_fail git fetch-pack --no-progress .. refs/heads/A refs/heads/xyzzy 2>../error-em
+ ) &&
test_i18ncmp expect-error error-em
'
test_expect_success 'test missing ref before existing' '
(
cd client &&
- test_must_fail git fetch-pack --no-progress .. refs/heads/xyzzy refs/heads/A
- ) >/dev/null 2>error-me &&
+ test_must_fail git fetch-pack --no-progress .. refs/heads/xyzzy refs/heads/A 2>../error-me
+ ) &&
test_i18ncmp expect-error error-me
'
)
'
+test_expect_success 'filtering by size' '
+ rm -rf server client &&
+ test_create_repo server &&
+ test_commit -C server one &&
+ test_config -C server uploadpack.allowfilter 1 &&
+
+ test_create_repo client &&
+ git -C client fetch-pack --filter=blob:limit=0 ../server HEAD &&
+
+ # Ensure that object is not inadvertently fetched
+ test_must_fail git -C client cat-file -e $(git hash-object server/one.t)
+'
+
+test_expect_success 'filtering by size has no effect if support for it is not advertised' '
+ rm -rf server client &&
+ test_create_repo server &&
+ test_commit -C server one &&
+
+ test_create_repo client &&
+ git -C client fetch-pack --filter=blob:limit=0 ../server HEAD 2> err &&
+
+ # Ensure that object is fetched
+ git -C client cat-file -e $(git hash-object server/one.t) &&
+
+ test_i18ngrep "filtering not recognized by server" err
+'
+
+fetch_filter_blob_limit_zero () {
+ SERVER="$1"
+ URL="$2"
+
+ rm -rf "$SERVER" client &&
+ test_create_repo "$SERVER" &&
+ test_commit -C "$SERVER" one &&
+ test_config -C "$SERVER" uploadpack.allowfilter 1 &&
+
+ git clone "$URL" client &&
+ test_config -C client extensions.partialclone origin &&
+
+ test_commit -C "$SERVER" two &&
+
+ git -C client fetch --filter=blob:limit=0 origin HEAD:somewhere &&
+
+ # Ensure that commit is fetched, but blob is not
+ test_config -C client extensions.partialclone "arbitrary string" &&
+ git -C client cat-file -e $(git -C "$SERVER" rev-parse two) &&
+ test_must_fail git -C client cat-file -e $(git hash-object "$SERVER/two.t")
+}
+
+test_expect_success 'fetch with --filter=blob:limit=0' '
+ fetch_filter_blob_limit_zero server server
+'
+
+. "$TEST_DIRECTORY"/lib-httpd.sh
+start_httpd
+
+test_expect_success 'fetch with --filter=blob:limit=0 and HTTP' '
+ fetch_filter_blob_limit_zero "$HTTPD_DOCUMENT_ROOT_PATH/server" "$HTTPD_URL/smart/server"
+'
+
+stop_httpd
+
+
test_done
(
cd descriptive &&
git fetch o 2>actual &&
- grep " -> refs/crazyheads/descriptive-branch$" actual |
- test_i18ngrep "new branch" &&
- grep " -> descriptive-tag$" actual |
- test_i18ngrep "new tag" &&
- grep " -> crazy$" actual |
- test_i18ngrep "new ref"
+ test_i18ngrep "new branch.* -> refs/crazyheads/descriptive-branch$" actual &&
+ test_i18ngrep "new tag.* -> descriptive-tag$" actual &&
+ test_i18ngrep "new ref.* -> crazy$" actual
) &&
git checkout master
'
set_config_tristate () {
# var=$1 val=$2
case "$2" in
- unset) test_unconfig "$1" ;;
- *) git config "$1" "$2" ;;
+ unset)
+ test_unconfig "$1"
+ ;;
+ *)
+ git config "$1" "$2"
+ key=$(echo $1 | sed -e 's/^remote\.origin/fetch/')
+ git_fetch_c="$git_fetch_c -c $key=$2"
+ ;;
esac
}
test_configured_prune () {
- fetch_prune=$1 remote_origin_prune=$2 cmdline=$3 expected=$4
+ test_configured_prune_type "$@" "name"
+ test_configured_prune_type "$@" "link"
+}
- test_expect_success "prune fetch.prune=$1 remote.origin.prune=$2${3:+ $3}; $4" '
+test_configured_prune_type () {
+ fetch_prune=$1
+ remote_origin_prune=$2
+ fetch_prune_tags=$3
+ remote_origin_prune_tags=$4
+ expected_branch=$5
+ expected_tag=$6
+ cmdline=$7
+ mode=$8
+
+ if test -z "$cmdline_setup"
+ then
+ test_expect_success 'setup cmdline_setup variable for subsequent test' '
+ remote_url="file://$(git -C one config remote.origin.url)" &&
+ remote_fetch="$(git -C one config remote.origin.fetch)" &&
+ cmdline_setup="\"$remote_url\" \"$remote_fetch\""
+ '
+ fi
+
+ if test "$mode" = 'link'
+ then
+ new_cmdline=""
+
+ if test "$cmdline" = ""
+ then
+ new_cmdline=$cmdline_setup
+ else
+ new_cmdline=$(printf "%s" "$cmdline" | perl -pe 's[origin(?!/)]["'"$remote_url"'"]g')
+ fi
+
+ if test "$fetch_prune_tags" = 'true' ||
+ test "$remote_origin_prune_tags" = 'true'
+ then
+ if ! printf '%s' "$cmdline\n" | grep -q refs/remotes/origin/
+ then
+ new_cmdline="$new_cmdline refs/tags/*:refs/tags/*"
+ fi
+ fi
+
+ cmdline="$new_cmdline"
+ fi
+
+ test_expect_success "$mode prune fetch.prune=$1 remote.origin.prune=$2 fetch.pruneTags=$3 remote.origin.pruneTags=$4${7:+ $7}; branch:$5 tag:$6" '
# make sure a newbranch is there in . and also in one
git branch -f newbranch &&
+ git tag -f newtag &&
(
cd one &&
test_unconfig fetch.prune &&
+ test_unconfig fetch.pruneTags &&
test_unconfig remote.origin.prune &&
- git fetch &&
- git rev-parse --verify refs/remotes/origin/newbranch
+ test_unconfig remote.origin.pruneTags &&
+ git fetch '"$cmdline_setup"' &&
+ git rev-parse --verify refs/remotes/origin/newbranch &&
+ git rev-parse --verify refs/tags/newtag
) &&
# now remove it
git branch -d newbranch &&
+ git tag -d newtag &&
# then test
(
cd one &&
+ git_fetch_c="" &&
set_config_tristate fetch.prune $fetch_prune &&
+ set_config_tristate fetch.pruneTags $fetch_prune_tags &&
set_config_tristate remote.origin.prune $remote_origin_prune &&
-
- git fetch $cmdline &&
- case "$expected" in
+ set_config_tristate remote.origin.pruneTags $remote_origin_prune_tags &&
+
+ if test "$mode" != "link"
+ then
+ git_fetch_c=""
+ fi &&
+ git$git_fetch_c fetch '"$cmdline"' &&
+ case "$expected_branch" in
pruned)
test_must_fail git rev-parse --verify refs/remotes/origin/newbranch
;;
kept)
git rev-parse --verify refs/remotes/origin/newbranch
;;
+ esac &&
+ case "$expected_tag" in
+ pruned)
+ test_must_fail git rev-parse --verify refs/tags/newtag
+ ;;
+ kept)
+ git rev-parse --verify refs/tags/newtag
+ ;;
esac
)
'
}
-test_configured_prune unset unset "" kept
-test_configured_prune unset unset "--no-prune" kept
-test_configured_prune unset unset "--prune" pruned
-
-test_configured_prune false unset "" kept
-test_configured_prune false unset "--no-prune" kept
-test_configured_prune false unset "--prune" pruned
-
-test_configured_prune true unset "" pruned
-test_configured_prune true unset "--prune" pruned
-test_configured_prune true unset "--no-prune" kept
-
-test_configured_prune unset false "" kept
-test_configured_prune unset false "--no-prune" kept
-test_configured_prune unset false "--prune" pruned
-
-test_configured_prune false false "" kept
-test_configured_prune false false "--no-prune" kept
-test_configured_prune false false "--prune" pruned
-
-test_configured_prune true false "" kept
-test_configured_prune true false "--prune" pruned
-test_configured_prune true false "--no-prune" kept
-
-test_configured_prune unset true "" pruned
-test_configured_prune unset true "--no-prune" kept
-test_configured_prune unset true "--prune" pruned
-
-test_configured_prune false true "" pruned
-test_configured_prune false true "--no-prune" kept
-test_configured_prune false true "--prune" pruned
-
-test_configured_prune true true "" pruned
-test_configured_prune true true "--prune" pruned
-test_configured_prune true true "--no-prune" kept
+# $1 config: fetch.prune
+# $2 config: remote.<name>.prune
+# $3 config: fetch.pruneTags
+# $4 config: remote.<name>.pruneTags
+# $5 expect: branch to be pruned?
+# $6 expect: tag to be pruned?
+# $7 git-fetch $cmdline:
+#
+# $1 $2 $3 $4 $5 $6 $7
+test_configured_prune unset unset unset unset kept kept ""
+test_configured_prune unset unset unset unset kept kept "--no-prune"
+test_configured_prune unset unset unset unset pruned kept "--prune"
+test_configured_prune unset unset unset unset kept pruned \
+ "--prune origin refs/tags/*:refs/tags/*"
+test_configured_prune unset unset unset unset pruned pruned \
+ "--prune origin refs/tags/*:refs/tags/* +refs/heads/*:refs/remotes/origin/*"
+
+test_configured_prune false unset unset unset kept kept ""
+test_configured_prune false unset unset unset kept kept "--no-prune"
+test_configured_prune false unset unset unset pruned kept "--prune"
+
+test_configured_prune true unset unset unset pruned kept ""
+test_configured_prune true unset unset unset pruned kept "--prune"
+test_configured_prune true unset unset unset kept kept "--no-prune"
+
+test_configured_prune unset false unset unset kept kept ""
+test_configured_prune unset false unset unset kept kept "--no-prune"
+test_configured_prune unset false unset unset pruned kept "--prune"
+
+test_configured_prune false false unset unset kept kept ""
+test_configured_prune false false unset unset kept kept "--no-prune"
+test_configured_prune false false unset unset pruned kept "--prune"
+test_configured_prune false false unset unset kept pruned \
+ "--prune origin refs/tags/*:refs/tags/*"
+test_configured_prune false false unset unset pruned pruned \
+ "--prune origin refs/tags/*:refs/tags/* +refs/heads/*:refs/remotes/origin/*"
+
+test_configured_prune true false unset unset kept kept ""
+test_configured_prune true false unset unset pruned kept "--prune"
+test_configured_prune true false unset unset kept kept "--no-prune"
+
+test_configured_prune unset true unset unset pruned kept ""
+test_configured_prune unset true unset unset kept kept "--no-prune"
+test_configured_prune unset true unset unset pruned kept "--prune"
+
+test_configured_prune false true unset unset pruned kept ""
+test_configured_prune false true unset unset kept kept "--no-prune"
+test_configured_prune false true unset unset pruned kept "--prune"
+
+test_configured_prune true true unset unset pruned kept ""
+test_configured_prune true true unset unset pruned kept "--prune"
+test_configured_prune true true unset unset kept kept "--no-prune"
+test_configured_prune true true unset unset kept pruned \
+ "--prune origin refs/tags/*:refs/tags/*"
+test_configured_prune true true unset unset pruned pruned \
+ "--prune origin refs/tags/*:refs/tags/* +refs/heads/*:refs/remotes/origin/*"
+
+# --prune-tags on its own does nothing, needs --prune as well, same
+# for for fetch.pruneTags without fetch.prune
+test_configured_prune unset unset unset unset kept kept "--prune-tags"
+test_configured_prune unset unset true unset kept kept ""
+test_configured_prune unset unset unset true kept kept ""
+
+# These will prune the tags
+test_configured_prune unset unset unset unset pruned pruned "--prune --prune-tags"
+test_configured_prune true unset true unset pruned pruned ""
+test_configured_prune unset true unset true pruned pruned ""
+
+# remote.<name>.pruneTags overrides fetch.pruneTags, just like
+# remote.<name>.prune overrides fetch.prune if set.
+test_configured_prune true unset true unset pruned pruned ""
+test_configured_prune false true false true pruned pruned ""
+test_configured_prune true false true false kept kept ""
+
+# When --prune-tags is supplied it's ignored if an explicit refspec is
+# given, same for the configuration options.
+test_configured_prune unset unset unset unset pruned kept \
+ "--prune --prune-tags origin +refs/heads/*:refs/remotes/origin/*"
+test_configured_prune unset unset true unset pruned kept \
+ "--prune origin +refs/heads/*:refs/remotes/origin/*"
+test_configured_prune unset unset unset true pruned kept \
+ "--prune origin +refs/heads/*:refs/remotes/origin/*"
+
+# Pruning that also takes place if a file:// url replaces a named
+# remote. However, because there's no implicit
+# +refs/heads/*:refs/remotes/origin/* refspec and supplying it on the
+# command-line negates --prune-tags, the branches will not be pruned.
+test_configured_prune_type unset unset unset unset kept kept "origin --prune-tags" "name"
+test_configured_prune_type unset unset unset unset kept kept "origin --prune-tags" "link"
+test_configured_prune_type unset unset unset unset pruned pruned "origin --prune --prune-tags" "name"
+test_configured_prune_type unset unset unset unset kept pruned "origin --prune --prune-tags" "link"
+test_configured_prune_type unset unset unset unset pruned pruned "--prune --prune-tags origin" "name"
+test_configured_prune_type unset unset unset unset kept pruned "--prune --prune-tags origin" "link"
+test_configured_prune_type unset unset true unset pruned pruned "--prune origin" "name"
+test_configured_prune_type unset unset true unset kept pruned "--prune origin" "link"
+test_configured_prune_type unset unset unset true pruned pruned "--prune origin" "name"
+test_configured_prune_type unset unset unset true kept pruned "--prune origin" "link"
+test_configured_prune_type true unset true unset pruned pruned "origin" "name"
+test_configured_prune_type true unset true unset kept pruned "origin" "link"
+test_configured_prune_type unset true true unset pruned pruned "origin" "name"
+test_configured_prune_type unset true true unset kept pruned "origin" "link"
+test_configured_prune_type unset true unset true pruned pruned "origin" "name"
+test_configured_prune_type unset true unset true kept pruned "origin" "link"
+
+# When all remote.origin.fetch settings are deleted a --prune
+# --prune-tags still implicitly supplies refs/tags/*:refs/tags/* so
+# tags, but not tracking branches, will be deleted.
+test_expect_success 'remove remote.origin.fetch "one"' '
+ (
+ cd one &&
+ git config --unset-all remote.origin.fetch
+ )
+'
+test_configured_prune_type unset unset unset unset kept pruned "origin --prune --prune-tags" "name"
+test_configured_prune_type unset unset unset unset kept pruned "origin --prune --prune-tags" "link"
test_expect_success 'all boundary commits are excluded' '
test_commit base &&
add_upstream_commit &&
(
cd downstream &&
- GIT_TRACE=$(pwd)/../trace.out git fetch --recurse-submodules -j2 2>../actual.err
+ GIT_TRACE="$TRASH_DIRECTORY/trace.out" git fetch --recurse-submodules -j2 2>../actual.err
) &&
test_must_be_empty actual.out &&
test_i18ncmp expect.err actual.err &&
)
'
-test_expect_success "'fetch.recurseSubmodules=on-demand' works also without .gitmodule entry" '
+test_expect_success "'fetch.recurseSubmodules=on-demand' works also without .gitmodules entry" '
(
cd downstream &&
git fetch --recurse-submodules
)
}
-verify_stderr () {
- cat >expected &&
- # We're not interested in the error
- # "fatal: The remote end hung up unexpectedly":
- test_i18ngrep -E '^(fatal|warning):' <error | grep -v 'hung up' >actual | sort &&
- test_i18ncmp expected actual
-}
-
test_expect_success 'setup' '
git commit --allow-empty -m "Initial" &&
git branch branch1 &&
"+refs/heads/branch2:refs/remotes/origin/branch1" && (
cd ccc &&
test_must_fail git fetch origin 2>error &&
- verify_stderr <<-\EOF
- fatal: Cannot fetch both refs/heads/branch1 and refs/heads/branch2 to refs/remotes/origin/branch1
- EOF
+ test_i18ngrep "fatal: Cannot fetch both refs/heads/branch1 and refs/heads/branch2 to refs/remotes/origin/branch1" error
)
'
test_must_fail git fetch origin \
refs/heads/*:refs/remotes/origin/* \
refs/heads/branch2:refs/remotes/origin/branch1 2>error &&
- verify_stderr <<-\EOF
- fatal: Cannot fetch both refs/heads/branch1 and refs/heads/branch2 to refs/remotes/origin/branch1
- EOF
+ test_i18ngrep "fatal: Cannot fetch both refs/heads/branch1 and refs/heads/branch2 to refs/remotes/origin/branch1" error
)
'
git fetch origin \
refs/heads/branch1:refs/remotes/origin/branch2 \
refs/heads/branch2:refs/remotes/origin/branch1 2>error &&
- verify_stderr <<-\EOF
- warning: refs/remotes/origin/branch1 usually tracks refs/heads/branch1, not refs/heads/branch2
- warning: refs/remotes/origin/branch2 usually tracks refs/heads/branch2, not refs/heads/branch1
- EOF
+ test_i18ngrep "warning: refs/remotes/origin/branch1 usually tracks refs/heads/branch1, not refs/heads/branch2" error &&
+ test_i18ngrep "warning: refs/remotes/origin/branch2 usually tracks refs/heads/branch2, not refs/heads/branch1" error
)
'
test_commit no-progress &&
test_terminal git push --no-progress >output 2>&1 &&
test_i18ngrep "^To http" output &&
- test_i18ngrep ! "^Writing objects"
+ test_i18ngrep ! "^Writing objects" output
'
test_expect_success 'push --progress shows progress to non-tty' '
test_refs master HEAD@{1}
'
+test_expect_success 'push options keep quoted characters intact (direct)' '
+ mk_repo_pair &&
+ git -C upstream config receive.advertisePushOptions true &&
+ test_commit -C workbench one &&
+ git -C workbench push --push-option="\"embedded quotes\"" up master &&
+ echo "\"embedded quotes\"" >expect &&
+ test_cmp expect upstream/.git/hooks/pre-receive.push_options
+'
+
. "$TEST_DIRECTORY"/lib-httpd.sh
start_httpd
-test_expect_success 'push option denied properly by http server' '
+# set up http repository for fetching/pushing, with push options config
+# bool set to $1
+mk_http_pair () {
test_when_finished "rm -rf test_http_clone" &&
- test_when_finished "rm -rf \"$HTTPD_DOCUMENT_ROOT_PATH\"/upstream.git" &&
+ test_when_finished 'rm -rf "$HTTPD_DOCUMENT_ROOT_PATH"/upstream.git' &&
mk_repo_pair &&
- git -C upstream config receive.advertisePushOptions false &&
+ git -C upstream config receive.advertisePushOptions "$1" &&
git -C upstream config http.receivepack true &&
cp -R upstream/.git "$HTTPD_DOCUMENT_ROOT_PATH"/upstream.git &&
- git clone "$HTTPD_URL"/smart/upstream test_http_clone &&
+ git clone "$HTTPD_URL"/smart/upstream test_http_clone
+}
+
+test_expect_success 'push option denied properly by http server' '
+ mk_http_pair false &&
test_commit -C test_http_clone one &&
test_must_fail git -C test_http_clone push --push-option=asdf origin master 2>actual &&
test_i18ngrep "the receiving end does not support push options" actual &&
'
test_expect_success 'push options work properly across http' '
- test_when_finished "rm -rf test_http_clone" &&
- test_when_finished "rm -rf \"$HTTPD_DOCUMENT_ROOT_PATH\"/upstream.git" &&
- mk_repo_pair &&
- git -C upstream config receive.advertisePushOptions true &&
- git -C upstream config http.receivepack true &&
- cp -R upstream/.git "$HTTPD_DOCUMENT_ROOT_PATH"/upstream.git &&
- git clone "$HTTPD_URL"/smart/upstream test_http_clone &&
+ mk_http_pair true &&
test_commit -C test_http_clone one &&
git -C test_http_clone push origin master &&
test_cmp expect actual
'
+test_expect_success 'push options keep quoted characters intact (http)' '
+ mk_http_pair true &&
+
+ test_commit -C test_http_clone one &&
+ git -C test_http_clone push --push-option="\"embedded quotes\"" origin master &&
+ echo "\"embedded quotes\"" >expect &&
+ test_cmp expect "$HTTPD_DOCUMENT_ROOT_PATH"/upstream.git/hooks/pre-receive.push_options
+'
+
stop_httpd
test_done
submodule update sub
'
+test_expect_success 'GIT_REDACT_COOKIES redacts cookies' '
+ rm -rf clone &&
+ echo "Set-Cookie: Foo=1" >cookies &&
+ echo "Set-Cookie: Bar=2" >>cookies &&
+ GIT_TRACE_CURL=true GIT_REDACT_COOKIES=Bar,Baz \
+ git -c "http.cookieFile=$(pwd)/cookies" clone \
+ $HTTPD_URL/smart/repo.git clone 2>err &&
+ grep "Cookie:.*Foo=1" err &&
+ grep "Cookie:.*Bar=<redacted>" err &&
+ ! grep "Cookie:.*Bar=2" err
+'
+
+test_expect_success 'GIT_REDACT_COOKIES handles empty values' '
+ rm -rf clone &&
+ echo "Set-Cookie: Foo=" >cookies &&
+ GIT_TRACE_CURL=true GIT_REDACT_COOKIES=Foo \
+ git -c "http.cookieFile=$(pwd)/cookies" clone \
+ $HTTPD_URL/smart/repo.git clone 2>err &&
+ grep "Cookie:.*Foo=<redacted>" err
+'
+
+test_expect_success 'GIT_TRACE_CURL_NO_DATA prevents data from being traced' '
+ rm -rf clone &&
+ GIT_TRACE_CURL=true \
+ git clone $HTTPD_URL/smart/repo.git clone 2>err &&
+ grep "=> Send data" err &&
+
+ rm -rf clone &&
+ GIT_TRACE_CURL=true GIT_TRACE_CURL_NO_DATA=1 \
+ git clone $HTTPD_URL/smart/repo.git clone 2>err &&
+ ! grep "=> Send data" err
+'
+
stop_httpd
test_done
'
test_expect_success 'no-op fetch without "-v" is quiet' '
- (cd clone && git fetch) 2>stderr &&
+ (cd clone && git fetch 2>../stderr) &&
! test -s stderr
'
git init --bare "$repo" &&
git push "$repo" HEAD &&
>"$repo"/git-daemon-export-ok &&
- rm -rf tmp.git &&
GIT_OVERRIDE_VIRTUAL_HOST=localhost \
- git clone --bare "$GIT_DAEMON_URL/interp.git" tmp.git &&
- rm -rf tmp.git &&
+ git ls-remote "$GIT_DAEMON_URL/interp.git" &&
GIT_OVERRIDE_VIRTUAL_HOST=LOCALHOST \
- git clone --bare "$GIT_DAEMON_URL/interp.git" tmp.git
+ git ls-remote "$GIT_DAEMON_URL/interp.git"
'
test_expect_success 'hostname cannot break out of directory' '
- rm -rf tmp.git &&
repo="$GIT_DAEMON_DOCUMENT_ROOT_PATH/../escape.git" &&
git init --bare "$repo" &&
git push "$repo" HEAD &&
>"$repo"/git-daemon-export-ok &&
test_must_fail \
env GIT_OVERRIDE_VIRTUAL_HOST=.. \
- git clone --bare "$GIT_DAEMON_URL/escape.git" tmp.git
+ git ls-remote "$GIT_DAEMON_URL/escape.git"
+'
+
+test_expect_success 'daemon log records all attributes' '
+ cat >expect <<-\EOF &&
+ Extended attribute "host": localhost
+ Extended attribute "protocol": version=1
+ EOF
+ >daemon.log &&
+ GIT_OVERRIDE_VIRTUAL_HOST=localhost \
+ git -c protocol.version=1 \
+ ls-remote "$GIT_DAEMON_URL/interp.git" &&
+ grep -i extended.attribute daemon.log | cut -d" " -f2- >actual &&
+ test_cmp expect actual
+'
+
+test_expect_success FAKENC 'hostname interpolation works after LF-stripping' '
+ {
+ printf "git-upload-pack /interp.git\n\0host=localhost" | packetize
+ printf "0000"
+ } >input &&
+ fake_nc "$GIT_DAEMON_HOST_PORT" <input >output &&
+ depacketize <output >output.raw &&
+
+ # just pick out the value of master, which avoids any protocol
+ # particulars
+ perl -lne "print \$1 if m{^(\\S+) refs/heads/master}" <output.raw >actual &&
+ git -C "$repo" rev-parse master >expect &&
+ test_cmp expect actual
'
stop_git_daemon
)
'
+partial_clone () {
+ SERVER="$1" &&
+ URL="$2" &&
+
+ rm -rf "$SERVER" client &&
+ test_create_repo "$SERVER" &&
+ test_commit -C "$SERVER" one &&
+ HASH1=$(git hash-object "$SERVER/one.t") &&
+ git -C "$SERVER" revert HEAD &&
+ test_commit -C "$SERVER" two &&
+ HASH2=$(git hash-object "$SERVER/two.t") &&
+ test_config -C "$SERVER" uploadpack.allowfilter 1 &&
+ test_config -C "$SERVER" uploadpack.allowanysha1inwant 1 &&
+
+ git clone --filter=blob:limit=0 "$URL" client &&
+
+ git -C client fsck &&
+
+ # Ensure that unneeded blobs are not inadvertently fetched.
+ test_config -C client extensions.partialclone "not a remote" &&
+ test_must_fail git -C client cat-file -e "$HASH1" &&
+
+ # But this blob was fetched, because clone performs an initial checkout
+ git -C client cat-file -e "$HASH2"
+}
+
+test_expect_success 'partial clone' '
+ partial_clone server "file://$(pwd)/server"
+'
+
+test_expect_success 'partial clone: warn if server does not support object filtering' '
+ rm -rf server client &&
+ test_create_repo server &&
+ test_commit -C server one &&
+
+ git clone --filter=blob:limit=0 "file://$(pwd)/server" client 2> err &&
+
+ test_i18ngrep "filtering not recognized by server" err
+'
+
+test_expect_success 'batch missing blob request during checkout' '
+ rm -rf server client &&
+
+ test_create_repo server &&
+ echo a >server/a &&
+ echo b >server/b &&
+ git -C server add a b &&
+
+ git -C server commit -m x &&
+ echo aa >server/a &&
+ echo bb >server/b &&
+ git -C server add a b &&
+ git -C server commit -m x &&
+
+ test_config -C server uploadpack.allowfilter 1 &&
+ test_config -C server uploadpack.allowanysha1inwant 1 &&
+
+ git clone --filter=blob:limit=0 "file://$(pwd)/server" client &&
+
+ # Ensure that there is only one negotiation by checking that there is
+ # only "done" line sent. ("done" marks the end of negotiation.)
+ GIT_TRACE_PACKET="$(pwd)/trace" git -C client checkout HEAD^ &&
+ grep "git> done" trace >done_lines &&
+ test_line_count = 1 done_lines
+'
+
+test_expect_success 'batch missing blob request does not inadvertently try to fetch gitlinks' '
+ rm -rf server client &&
+
+ test_create_repo repo_for_submodule &&
+ test_commit -C repo_for_submodule x &&
+
+ test_create_repo server &&
+ echo a >server/a &&
+ echo b >server/b &&
+ git -C server add a b &&
+ git -C server commit -m x &&
+
+ echo aa >server/a &&
+ echo bb >server/b &&
+ # Also add a gitlink pointing to an arbitrary repository
+ git -C server submodule add "$(pwd)/repo_for_submodule" c &&
+ git -C server add a b c &&
+ git -C server commit -m x &&
+
+ test_config -C server uploadpack.allowfilter 1 &&
+ test_config -C server uploadpack.allowanysha1inwant 1 &&
+
+ # Make sure that it succeeds
+ git clone --filter=blob:limit=0 "file://$(pwd)/server" client
+'
+
+. "$TEST_DIRECTORY"/lib-httpd.sh
+start_httpd
+
+test_expect_success 'partial clone using HTTP' '
+ partial_clone "$HTTPD_DOCUMENT_ROOT_PATH/server" "$HTTPD_URL/smart/server"
+'
+
+stop_httpd
+
test_done
--- /dev/null
+#!/bin/sh
+
+test_description='git partial clone'
+
+. ./test-lib.sh
+
+# create a normal "src" repo where we can later create new commits.
+# expect_1.oids will contain a list of the OIDs of all blobs.
+test_expect_success 'setup normal src repo' '
+ echo "{print \$1}" >print_1.awk &&
+ echo "{print \$2}" >print_2.awk &&
+
+ git init src &&
+ for n in 1 2 3 4
+ do
+ echo "This is file: $n" > src/file.$n.txt
+ git -C src add file.$n.txt
+ git -C src commit -m "file $n"
+ git -C src ls-files -s file.$n.txt >>temp
+ done &&
+ awk -f print_2.awk <temp | sort >expect_1.oids &&
+ test_line_count = 4 expect_1.oids
+'
+
+# bare clone "src" giving "srv.bare" for use as our server.
+test_expect_success 'setup bare clone for server' '
+ git clone --bare "file://$(pwd)/src" srv.bare &&
+ git -C srv.bare config --local uploadpack.allowfilter 1 &&
+ git -C srv.bare config --local uploadpack.allowanysha1inwant 1
+'
+
+# do basic partial clone from "srv.bare"
+# confirm we are missing all of the known blobs.
+# confirm partial clone was registered in the local config.
+test_expect_success 'do partial clone 1' '
+ git clone --no-checkout --filter=blob:none "file://$(pwd)/srv.bare" pc1 &&
+ git -C pc1 rev-list HEAD --quiet --objects --missing=print \
+ | awk -f print_1.awk \
+ | sed "s/?//" \
+ | sort >observed.oids &&
+ test_cmp expect_1.oids observed.oids &&
+ test "$(git -C pc1 config --local core.repositoryformatversion)" = "1" &&
+ test "$(git -C pc1 config --local extensions.partialclone)" = "origin" &&
+ test "$(git -C pc1 config --local core.partialclonefilter)" = "blob:none"
+'
+
+# checkout master to force dynamic object fetch of blobs at HEAD.
+test_expect_success 'verify checkout with dynamic object fetch' '
+ git -C pc1 rev-list HEAD --quiet --objects --missing=print >observed &&
+ test_line_count = 4 observed &&
+ git -C pc1 checkout master &&
+ git -C pc1 rev-list HEAD --quiet --objects --missing=print >observed &&
+ test_line_count = 0 observed
+'
+
+# create new commits in "src" repo to establish a blame history on file.1.txt
+# and push to "srv.bare".
+test_expect_success 'push new commits to server' '
+ git -C src remote add srv "file://$(pwd)/srv.bare" &&
+ for x in a b c d e
+ do
+ echo "Mod file.1.txt $x" >>src/file.1.txt
+ git -C src add file.1.txt
+ git -C src commit -m "mod $x"
+ done &&
+ git -C src blame master -- file.1.txt >expect.blame &&
+ git -C src push -u srv master
+'
+
+# (partial) fetch in the partial clone repo from the promisor remote.
+# verify that fetch inherited the filter-spec from the config and DOES NOT
+# have the new blobs.
+test_expect_success 'partial fetch inherits filter settings' '
+ git -C pc1 fetch origin &&
+ git -C pc1 rev-list master..origin/master --quiet --objects --missing=print >observed &&
+ test_line_count = 5 observed
+'
+
+# force dynamic object fetch using diff.
+# we should only get 1 new blob (for the file in origin/master).
+test_expect_success 'verify diff causes dynamic object fetch' '
+ git -C pc1 diff master..origin/master -- file.1.txt &&
+ git -C pc1 rev-list master..origin/master --quiet --objects --missing=print >observed &&
+ test_line_count = 4 observed
+'
+
+# force full dynamic object fetch of the file's history using blame.
+# we should get the intermediate blobs for the file.
+test_expect_success 'verify blame causes dynamic object fetch' '
+ git -C pc1 blame origin/master -- file.1.txt >observed.blame &&
+ test_cmp expect.blame observed.blame &&
+ git -C pc1 rev-list master..origin/master --quiet --objects --missing=print >observed &&
+ test_line_count = 0 observed
+'
+
+# create new commits in "src" repo to establish a history on file.2.txt
+# and push to "srv.bare".
+test_expect_success 'push new commits to server for file.2.txt' '
+ for x in a b c d e f
+ do
+ echo "Mod file.2.txt $x" >>src/file.2.txt
+ git -C src add file.2.txt
+ git -C src commit -m "mod $x"
+ done &&
+ git -C src push -u srv master
+'
+
+# Do FULL fetch by disabling inherited filter-spec using --no-filter.
+# Verify we have all the new blobs.
+test_expect_success 'override inherited filter-spec using --no-filter' '
+ git -C pc1 fetch --no-filter origin &&
+ git -C pc1 rev-list master..origin/master --quiet --objects --missing=print >observed &&
+ test_line_count = 0 observed
+'
+
+# create new commits in "src" repo to establish a history on file.3.txt
+# and push to "srv.bare".
+test_expect_success 'push new commits to server for file.3.txt' '
+ for x in a b c d e f
+ do
+ echo "Mod file.3.txt $x" >>src/file.3.txt
+ git -C src add file.3.txt
+ git -C src commit -m "mod $x"
+ done &&
+ git -C src push -u srv master
+'
+
+# Do a partial fetch and then try to manually fetch the missing objects.
+# This can be used as the basis of a pre-command hook to bulk fetch objects
+# perhaps combined with a command in dry-run mode.
+test_expect_success 'manual prefetch of missing objects' '
+ git -C pc1 fetch --filter=blob:none origin &&
+ git -C pc1 rev-list master..origin/master --quiet --objects --missing=print \
+ | awk -f print_1.awk \
+ | sed "s/?//" \
+ | sort >observed.oids &&
+ test_line_count = 6 observed.oids &&
+ git -C pc1 fetch-pack --stdin "file://$(pwd)/srv.bare" <observed.oids &&
+ git -C pc1 rev-list master..origin/master --quiet --objects --missing=print \
+ | awk -f print_1.awk \
+ | sed "s/?//" \
+ | sort >observed.oids &&
+ test_line_count = 0 observed.oids
+'
+
+test_expect_success 'partial clone with transfer.fsckobjects=1 uses index-pack --fsck-objects' '
+ git init src &&
+ test_commit -C src x &&
+ test_config -C src uploadpack.allowfilter 1 &&
+ test_config -C src uploadpack.allowanysha1inwant 1 &&
+
+ GIT_TRACE="$(pwd)/trace" git -c transfer.fsckobjects=1 \
+ clone --filter="blob:none" "file://$(pwd)/src" dst &&
+ grep "git index-pack.*--fsck-objects" trace
+'
+
+test_done
test_must_fail env GIT_ALLOW_PROTOCOL=http:https \
GIT_SMART_HTTP=0 \
git clone "$HTTPD_URL/ftp-redir/repo.git" 2>stderr &&
- {
- test_i18ngrep "ftp.*disabled" stderr ||
- test_i18ngrep "your curl version is too old"
- }
+ test_i18ngrep -E "(ftp.*disabled|your curl version is too old)" stderr
'
test_expect_success 'curl limits redirects' '
+++ /dev/null
-#!/bin/sh
-
-test_description='--show-all --parents does not rewrite TREESAME commits'
-
-. ./test-lib.sh
-
-test_expect_success 'set up --show-all --parents test' '
- test_commit one foo.txt &&
- commit1=$(git rev-list -1 HEAD) &&
- test_commit two bar.txt &&
- commit2=$(git rev-list -1 HEAD) &&
- test_commit three foo.txt &&
- commit3=$(git rev-list -1 HEAD)
- '
-
-test_expect_success '--parents rewrites TREESAME parents correctly' '
- echo $commit3 $commit1 > expected &&
- echo $commit1 >> expected &&
- git rev-list --parents HEAD -- foo.txt > actual &&
- test_cmp expected actual
- '
-
-test_expect_success '--parents --show-all does not rewrites TREESAME parents' '
- echo $commit3 $commit2 > expected &&
- echo $commit2 $commit1 >> expected &&
- echo $commit1 >> expected &&
- git rev-list --parents --show-all HEAD -- foo.txt > actual &&
- test_cmp expected actual
- '
-
-test_done
rm -f A M N &&
git reset --hard &&
git checkout change+rename &&
- GIT_MERGE_VERBOSITY=3 git merge change | test_i18ngrep "^Skipped B" &&
+ GIT_MERGE_VERBOSITY=3 git merge change >out &&
+ test_i18ngrep "^Skipped B" out &&
git reset --hard HEAD^ &&
git checkout change &&
- GIT_MERGE_VERBOSITY=3 git merge change+rename | test_i18ngrep "^Skipped B"
+ GIT_MERGE_VERBOSITY=3 git merge change+rename >out &&
+ test_i18ngrep "^Skipped B" out
'
test_expect_success 'setup for rename + d/f conflicts' '
test_i18ncmp expect actual
'
+cat >expect <<\EOF
+## b1...origin/master [different]
+EOF
+
+test_expect_success 'status -s -b --no-ahead-behind (diverged from upstream)' '
+ (
+ cd test &&
+ git checkout b1 >/dev/null &&
+ git status -s -b --no-ahead-behind | head -1
+ ) >actual &&
+ test_i18ncmp expect actual
+'
+
+cat >expect <<\EOF
+On branch b1
+Your branch and 'origin/master' have diverged,
+and have 1 and 1 different commits each, respectively.
+EOF
+
+test_expect_success 'status --long --branch' '
+ (
+ cd test &&
+ git checkout b1 >/dev/null &&
+ git status --long -b | head -3
+ ) >actual &&
+ test_i18ncmp expect actual
+'
+
+cat >expect <<\EOF
+On branch b1
+Your branch and 'origin/master' refer to different commits.
+EOF
+
+test_expect_success 'status --long --branch --no-ahead-behind' '
+ (
+ cd test &&
+ git checkout b1 >/dev/null &&
+ git status --long -b --no-ahead-behind | head -2
+ ) >actual &&
+ test_i18ncmp expect actual
+'
+
cat >expect <<\EOF
## b5...brokenbase [gone]
EOF
check_describe tags/c --all c
check_describe heads/branch_A --all --match='branch_*' branch_A
+test_expect_success 'describe complains about tree object' '
+ test_must_fail git describe HEAD^{tree}
+'
+
+test_expect_success 'describe complains about missing object' '
+ test_must_fail git describe $_z40
+'
+
test_done
test_when_finished "git reset --hard" &&
annote=$(git rev-parse annote) &&
- git merge --no-commit $annote &&
+ git merge --no-commit --no-ff $annote &&
{
cat <<-EOF
Merge tag '\''$annote'\''
for i in "--perl --shell" "-s --python" "--python --tcl" "--tcl --perl"; do
test_expect_success "more than one quoting style: $i" "
- git for-each-ref $i 2>&1 | (read line &&
- case \$line in
- \"error: more than one quoting style\"*) : happy;;
- *) false
- esac)
+ test_must_fail git for-each-ref $i 2>err &&
+ grep '^error: more than one quoting style' err
"
done
'git diff-tree -r -M --name-status HEAD^ HEAD | \
grep "^R100..*path1/COPYING..*path0/COPYING"'
+test_expect_success \
+ 'mv --dry-run does not move file' \
+ 'git mv -n path0/COPYING MOVED &&
+ test -f path0/COPYING &&
+ test ! -f MOVED'
+
test_expect_success \
'checking -k on non-existing file' \
'git mv -k idontexist path0'
test_cmp expect actual
'
+get_tag_header annotated-tag-edit $commit commit $time >expect
+echo "An edited message" >>expect
+test_expect_success 'set up editor' '
+ write_script fakeeditor <<-\EOF
+ sed -e "s/A message/An edited message/g" <"$1" >"$1-"
+ mv "$1-" "$1"
+ EOF
+'
+test_expect_success \
+ 'creating an annotated tag with -m message --edit should succeed' '
+ GIT_EDITOR=./fakeeditor git tag -m "A message" --edit annotated-tag-edit &&
+ get_tag_msg annotated-tag-edit >actual &&
+ test_cmp expect actual
+'
+
cat >msgfile <<EOF
Another message
in a file.
test_cmp expect actual
'
+get_tag_header file-annotated-tag-edit $commit commit $time >expect
+sed -e "s/Another message/Another edited message/g" msgfile >>expect
+test_expect_success 'set up editor' '
+ write_script fakeeditor <<-\EOF
+ sed -e "s/Another message/Another edited message/g" <"$1" >"$1-"
+ mv "$1-" "$1"
+ EOF
+'
+test_expect_success \
+ 'creating an annotated tag with -F messagefile --edit should succeed' '
+ GIT_EDITOR=./fakeeditor git tag -F msgfile --edit file-annotated-tag-edit &&
+ get_tag_msg file-annotated-tag-edit >actual &&
+ test_cmp expect actual
+'
+
cat >inputmsg <<EOF
A message from the
standard input
! test -e paginated.out
'
-test_expect_success TTY 'git config uses a pager if configured to' '
- rm -f paginated.out &&
- test_config pager.config true &&
- test_terminal git config --list &&
- test -e paginated.out
-'
-
test_expect_success TTY 'configuration can enable pager (from subdir)' '
rm -f paginated.out &&
mkdir -p subdir &&
! test -e paginated.out
'
+test_expect_success TTY 'git config ignores pager.config when setting' '
+ rm -f paginated.out &&
+ test_terminal git -c pager.config config foo.bar bar &&
+ ! test -e paginated.out
+'
+
+test_expect_success TTY 'git config --edit ignores pager.config' '
+ rm -f paginated.out editor.used &&
+ write_script editor <<-\EOF &&
+ touch editor.used
+ EOF
+ EDITOR=./editor test_terminal git -c pager.config config --edit &&
+ ! test -e paginated.out &&
+ test -e editor.used
+'
+
+test_expect_success TTY 'git config --get ignores pager.config' '
+ rm -f paginated.out &&
+ test_terminal git -c pager.config config --get foo.bar &&
+ ! test -e paginated.out
+'
+
+test_expect_success TTY 'git config --get-urlmatch defaults to paging' '
+ rm -f paginated.out &&
+ test_terminal git -c http."https://foo.com/".bar=foo \
+ config --get-urlmatch http https://foo.com &&
+ test -e paginated.out
+'
+
+test_expect_success TTY 'git config --get-all respects pager.config' '
+ rm -f paginated.out &&
+ test_terminal git -c pager.config=false config --get-all foo.bar &&
+ ! test -e paginated.out
+'
+
+test_expect_success TTY 'git config --list defaults to paging' '
+ rm -f paginated.out &&
+ test_terminal git config --list &&
+ test -e paginated.out
+'
+
+
# A colored commit log will begin with an appropriate ANSI escape
# for the first color; the text "commit" comes later.
colorful() {
# See <20160803174522.5571-1-pclouds@gmail.com> if you want to know
# more.
+GIT_FORCE_UNTRACKED_CACHE=true
+export GIT_FORCE_UNTRACKED_CACHE
+
sync_mtime () {
find . -type d -ls >/dev/null
}
sleep 1
}
+status_is_clean() {
+ >../status.expect &&
+ git status --porcelain >../status.actual &&
+ test_cmp ../status.expect ../status.actual
+}
+
test_lazy_prereq UNTRACKED_CACHE '
{ git update-index --test-untracked-cache; ret=$?; } &&
test $ret -ne 1
test_cmp ../before ../after
'
+test_expect_success 'teardown worktree' '
+ cd ..
+'
+
+test_expect_success SYMLINKS 'setup worktree for symlink test' '
+ git init worktree-symlink &&
+ cd worktree-symlink &&
+ git config core.untrackedCache true &&
+ mkdir one two &&
+ touch one/file two/file &&
+ git add one/file two/file &&
+ git commit -m"first commit" &&
+ git rm -rf one &&
+ ln -s two one &&
+ git add one &&
+ git commit -m"second commit"
+'
+
+test_expect_success SYMLINKS '"status" after symlink replacement should be clean with UC=true' '
+ git checkout HEAD~ &&
+ status_is_clean &&
+ status_is_clean &&
+ git checkout master &&
+ avoid_racy &&
+ status_is_clean &&
+ status_is_clean
+'
+
+test_expect_success SYMLINKS '"status" after symlink replacement should be clean with UC=false' '
+ git config core.untrackedCache false &&
+ git checkout HEAD~ &&
+ status_is_clean &&
+ status_is_clean &&
+ git checkout master &&
+ avoid_racy &&
+ status_is_clean &&
+ status_is_clean
+'
+
+test_expect_success 'setup worktree for non-symlink test' '
+ git init worktree-non-symlink &&
+ cd worktree-non-symlink &&
+ git config core.untrackedCache true &&
+ mkdir one two &&
+ touch one/file two/file &&
+ git add one/file two/file &&
+ git commit -m"first commit" &&
+ git rm -rf one &&
+ cp two/file one &&
+ git add one &&
+ git commit -m"second commit"
+'
+
+test_expect_success '"status" after file replacement should be clean with UC=true' '
+ git checkout HEAD~ &&
+ status_is_clean &&
+ status_is_clean &&
+ git checkout master &&
+ avoid_racy &&
+ status_is_clean &&
+ test-dump-untracked-cache >../actual &&
+ grep -F "recurse valid" ../actual >../actual.grep &&
+ cat >../expect.grep <<EOF &&
+/ 0000000000000000000000000000000000000000 recurse valid
+/two/ 0000000000000000000000000000000000000000 recurse valid
+EOF
+ status_is_clean &&
+ test_cmp ../expect.grep ../actual.grep
+'
+
+test_expect_success '"status" after file replacement should be clean with UC=false' '
+ git config core.untrackedCache false &&
+ git checkout HEAD~ &&
+ status_is_clean &&
+ status_is_clean &&
+ git checkout master &&
+ avoid_racy &&
+ status_is_clean &&
+ status_is_clean
+'
+
test_done
)
'
+test_expect_success 'verify --[no-]ahead-behind with V2 format' '
+ git checkout master &&
+ test_when_finished "rm -rf sub_repo" &&
+ git clone . sub_repo &&
+ (
+ ## Confirm local master tracks remote master.
+ cd sub_repo &&
+ HUF=$(git rev-parse HEAD) &&
+
+ # Confirm --no-ahead-behind reports traditional branch.ab with 0/0 for equal branches.
+ cat >expect <<-EOF &&
+ # branch.oid $HUF
+ # branch.head master
+ # branch.upstream origin/master
+ # branch.ab +0 -0
+ EOF
+
+ git status --no-ahead-behind --porcelain=v2 --branch --untracked-files=all >actual &&
+ test_cmp expect actual &&
+
+ # Confirm --ahead-behind reports traditional branch.ab with 0/0.
+ cat >expect <<-EOF &&
+ # branch.oid $HUF
+ # branch.head master
+ # branch.upstream origin/master
+ # branch.ab +0 -0
+ EOF
+
+ git status --ahead-behind --porcelain=v2 --branch --untracked-files=all >actual &&
+ test_cmp expect actual &&
+
+ ## Test non-equal ahead/behind.
+ echo xyz >file_xyz &&
+ git add file_xyz &&
+ git commit -m xyz &&
+
+ HUF=$(git rev-parse HEAD) &&
+
+ # Confirm --no-ahead-behind reports branch.ab with ?/? for non-equal branches.
+ cat >expect <<-EOF &&
+ # branch.oid $HUF
+ # branch.head master
+ # branch.upstream origin/master
+ # branch.ab +? -?
+ EOF
+
+ git status --no-ahead-behind --porcelain=v2 --branch --untracked-files=all >actual &&
+ test_cmp expect actual &&
+
+ # Confirm --ahead-behind reports traditional branch.ab with 1/0.
+ cat >expect <<-EOF &&
+ # branch.oid $HUF
+ # branch.head master
+ # branch.upstream origin/master
+ # branch.ab +1 -0
+ EOF
+
+ git status --ahead-behind --porcelain=v2 --branch --untracked-files=all >actual &&
+ test_cmp expect actual
+ )
+'
+
test_expect_success 'create and add submodule, submodule appears clean (A. S...)' '
git checkout master &&
git clone . sub_repo &&
test_description='Test submodules on detached working tree
This test verifies that "git submodule" initialization, update and addition works
-on detahced working trees
+on detached working trees
'
TEST_NO_CREATE_REPO=1
. ./test-lib.sh
+test_expect_success 'set up commits for rebasing' '
+ test_commit root &&
+ test_commit a a a &&
+ test_commit b b b &&
+ git checkout -b rebase-me root &&
+ test_commit rebase-a a aa &&
+ test_commit rebase-b b bb &&
+ for i in $(test_seq 1 13)
+ do
+ test_commit rebase-$i c $i
+ done &&
+ git checkout master &&
+
+ cat >rebase-todo <<-EOF
+ pick $(git rev-parse rebase-a)
+ pick $(git rev-parse rebase-b)
+ fixup $(git rev-parse rebase-1)
+ fixup $(git rev-parse rebase-2)
+ pick $(git rev-parse rebase-3)
+ fixup $(git rev-parse rebase-4)
+ squash $(git rev-parse rebase-5)
+ reword $(git rev-parse rebase-6)
+ squash $(git rev-parse rebase-7)
+ fixup $(git rev-parse rebase-8)
+ fixup $(git rev-parse rebase-9)
+ edit $(git rev-parse rebase-10)
+ squash $(git rev-parse rebase-11)
+ squash $(git rev-parse rebase-12)
+ edit $(git rev-parse rebase-13)
+ EOF
+'
+
test_expect_success 'with no hook' '
echo "foo" > file &&
echo "#!$SHELL_PATH" > "$HOOK"
cat >> "$HOOK" <<'EOF'
-if test "$2" = commit; then
- source=$(git rev-parse "$3")
+GIT_DIR=$(git rev-parse --git-dir)
+if test -d "$GIT_DIR/rebase-merge"
+then
+ rebasing=1
else
- source=${2-default}
+ rebasing=0
fi
-if test "$GIT_EDITOR" = :; then
- sed -e "1s/.*/$source (no editor)/" "$1" > msg.tmp
+
+get_last_cmd () {
+ tail -n1 "$GIT_DIR/rebase-merge/done" | {
+ read cmd id _
+ git log --pretty="[$cmd %s]" -n1 $id
+ }
+}
+
+if test "$2" = commit
+then
+ if test $rebasing = 1
+ then
+ source="$3"
+ else
+ source=$(git rev-parse "$3")
+ fi
else
- sed -e "1s/.*/$source/" "$1" > msg.tmp
+ source=${2-default}
+fi
+test "$GIT_EDITOR" = : && source="$source (no editor)"
+
+if test $rebasing = 1
+then
+ echo "$source $(get_last_cmd)" >"$1"
+else
+ sed -e "1s/.*/$source/" "$1" >msg.tmp
+ mv msg.tmp "$1"
fi
-mv msg.tmp "$1"
exit 0
EOF
chmod +x "$HOOK"
test "$(git log -1 --pretty=format:%s)" = "merge"
'
+test_rebase () {
+ expect=$1 &&
+ mode=$2 &&
+ test_expect_$expect C_LOCALE_OUTPUT "with hook (rebase $mode)" '
+ test_when_finished "\
+ git rebase --abort
+ git checkout -f master
+ git branch -D tmp" &&
+ git checkout -b tmp rebase-me &&
+ GIT_SEQUENCE_EDITOR="cp rebase-todo" &&
+ GIT_EDITOR="\"$FAKE_EDITOR\"" &&
+ (
+ export GIT_SEQUENCE_EDITOR GIT_EDITOR &&
+ test_must_fail git rebase $mode b &&
+ echo x >a &&
+ git add a &&
+ test_must_fail git rebase --continue &&
+ echo x >b &&
+ git add b &&
+ git commit &&
+ git rebase --continue &&
+ echo y >a &&
+ git add a &&
+ git commit &&
+ git rebase --continue &&
+ echo y >b &&
+ git add b &&
+ git rebase --continue
+ ) &&
+ if test $mode = -p # reword amended after pick
+ then
+ n=18
+ else
+ n=17
+ fi &&
+ git log --pretty=%s -g -n$n HEAD@{1} >actual &&
+ test_cmp "$TEST_DIRECTORY/t7505/expected-rebase$mode" actual
+ '
+}
+
+test_rebase success -i
+test_rebase success -p
+
+test_expect_success 'with hook (cherry-pick)' '
+ test_when_finished "git checkout -f master" &&
+ git checkout -B other b &&
+ git cherry-pick rebase-1 &&
+ test "$(git log -1 --pretty=format:%s)" = "message (no editor)"
+'
+
+test_expect_success 'with hook and editor (cherry-pick)' '
+ test_when_finished "git checkout -f master" &&
+ git checkout -B other b &&
+ git cherry-pick -e rebase-1 &&
+ test "$(git log -1 --pretty=format:%s)" = merge
+'
+
cat > "$HOOK" <<'EOF'
#!/bin/sh
exit 1
'
+test_expect_success C_LOCALE_OUTPUT 'with failing hook (cherry-pick)' '
+ test_when_finished "git checkout -f master" &&
+ git checkout -B other b &&
+ test_must_fail git cherry-pick rebase-1 2>actual &&
+ test $(grep -c prepare-commit-msg actual) = 1
+'
+
test_done
--- /dev/null
+message [edit rebase-13]
+message (no editor) [edit rebase-13]
+message [squash rebase-12]
+message (no editor) [squash rebase-11]
+default [edit rebase-10]
+message (no editor) [edit rebase-10]
+message [fixup rebase-9]
+message (no editor) [fixup rebase-8]
+message (no editor) [squash rebase-7]
+message [reword rebase-6]
+message [squash rebase-5]
+message (no editor) [fixup rebase-4]
+message (no editor) [pick rebase-3]
+message (no editor) [fixup rebase-2]
+message (no editor) [fixup rebase-1]
+merge [pick rebase-b]
+message [pick rebase-a]
--- /dev/null
+message [edit rebase-13]
+message (no editor) [edit rebase-13]
+message [squash rebase-12]
+message (no editor) [squash rebase-11]
+default [edit rebase-10]
+message (no editor) [edit rebase-10]
+message [fixup rebase-9]
+message (no editor) [fixup rebase-8]
+message (no editor) [squash rebase-7]
+HEAD [reword rebase-6]
+message (no editor) [reword rebase-6]
+message [squash rebase-5]
+message (no editor) [fixup rebase-4]
+message (no editor) [pick rebase-3]
+message (no editor) [fixup rebase-2]
+message (no editor) [fixup rebase-1]
+merge [pick rebase-b]
+message [pick rebase-a]
test_cmp expect actual
'
+test_expect_success UNTRACKED_CACHE 'ignore .git changes when invalidating UNTR' '
+ test_create_repo dot-git &&
+ (
+ cd dot-git &&
+ mkdir -p .git/hooks &&
+ : >tracked &&
+ : >modified &&
+ mkdir dir1 &&
+ : >dir1/tracked &&
+ : >dir1/modified &&
+ mkdir dir2 &&
+ : >dir2/tracked &&
+ : >dir2/modified &&
+ write_integration_script &&
+ git config core.fsmonitor .git/hooks/fsmonitor-test &&
+ git update-index --untracked-cache &&
+ git update-index --fsmonitor &&
+ GIT_TRACE_UNTRACKED_STATS="$TRASH_DIRECTORY/trace-before" \
+ git status &&
+ test-dump-untracked-cache >../before
+ ) &&
+ cat >>dot-git/.git/hooks/fsmonitor-test <<-\EOF &&
+ printf ".git\0"
+ printf ".git/index\0"
+ printf "dir1/.git\0"
+ printf "dir1/.git/index\0"
+ EOF
+ (
+ cd dot-git &&
+ GIT_TRACE_UNTRACKED_STATS="$TRASH_DIRECTORY/trace-after" \
+ git status &&
+ test-dump-untracked-cache >../after
+ ) &&
+ grep "directory invalidation" trace-before >>before &&
+ grep "directory invalidation" trace-after >>after &&
+ # UNTR extension unchanged, dir invalidation count unchanged
+ test_cmp before after
+'
+
test_done
test_cmp expected actual
'
+test_expect_success 'merge annotated/signed tag w/o tracking' '
+ test_when_finished "rm -rf dst; git tag -d anno1" &&
+ git tag -a -m "anno c1" anno1 c1 &&
+ git init dst &&
+ git rev-parse c1 >dst/expect &&
+ (
+ # c0 fast-forwards to c1 but because this repository
+ # is not a "downstream" whose refs/tags follows along
+ # tag from the "upstream", this pull defaults to --no-ff
+ cd dst &&
+ git pull .. c0 &&
+ git pull .. anno1 &&
+ git rev-parse HEAD^2 >actual &&
+ test_cmp expect actual
+ )
+'
+
+test_expect_success 'merge annotated/signed tag w/ tracking' '
+ test_when_finished "rm -rf dst; git tag -d anno1" &&
+ git tag -a -m "anno c1" anno1 c1 &&
+ git init dst &&
+ git rev-parse c1 >dst/expect &&
+ (
+ # c0 fast-forwards to c1 and because this repository
+ # is a "downstream" whose refs/tags follows along
+ # tag from the "upstream", this pull defaults to --ff
+ cd dst &&
+ git remote add origin .. &&
+ git pull origin c0 &&
+ git fetch origin &&
+ git merge anno1 &&
+ git rev-parse HEAD >actual &&
+ test_cmp expect actual
+ )
+'
+
test_expect_success GPG 'merge --ff-only tag' '
git reset --hard c0 &&
git commit --allow-empty -m "A newer commit" &&
git tag -f -s -m "A newer commit" signed &&
git reset --hard c0 &&
- EDITOR=false git merge --no-edit signed &&
+ EDITOR=false git merge --no-edit --no-ff signed &&
git rev-parse signed^0 >expect &&
git rev-parse HEAD^2 >actual &&
test_cmp expect actual
git mv c1.c other.c &&
git commit -m rename &&
cp important other.c &&
- git merge c1a &&
+ test_must_fail git merge c1a >out &&
+ test_i18ngrep "Refusing to lose dirty file at other.c" out &&
+ test_path_is_file other.c~HEAD &&
+ test $(git hash-object other.c~HEAD) = $(git rev-parse c1a:c1.c) &&
test_cmp important other.c
'
+++ /dev/null
-#!/bin/sh
-
-test_description='compare address parsing with and without Mail::Address'
-. ./test-lib.sh
-
-if ! test_have_prereq PERL; then
- skip_all='skipping perl interface tests, perl not available'
- test_done
-fi
-
-perl -MTest::More -e 0 2>/dev/null || {
- skip_all="Perl Test::More unavailable, skipping test"
- test_done
-}
-
-perl -MMail::Address -e 0 2>/dev/null || {
- skip_all="Perl Mail::Address unavailable, skipping test"
- test_done
-}
-
-test_external_has_tap=1
-
-test_external_without_stderr \
- 'Perl address parsing function' \
- perl "$TEST_DIRECTORY"/t9000/test.pl
-
-test_done
+++ /dev/null
-#!/usr/bin/perl
-use lib (split(/:/, $ENV{GITPERLLIB}));
-
-use 5.008;
-use warnings;
-use strict;
-
-use Test::More qw(no_plan);
-use Mail::Address;
-
-BEGIN { use_ok('Git') }
-
-my @success_list = (q[Jane],
- q[jdoe@example.com],
- q[<jdoe@example.com>],
- q[Jane <jdoe@example.com>],
- q[Jane Doe <jdoe@example.com>],
- q["Jane" <jdoe@example.com>],
- q["Doe, Jane" <jdoe@example.com>],
- q["Jane@:;\>.,()<Doe" <jdoe@example.com>],
- q[Jane!#$%&'*+-/=?^_{|}~Doe' <jdoe@example.com>],
- q["<jdoe@example.com>"],
- q["Jane jdoe@example.com"],
- q[Jane Doe <jdoe @ example.com >],
- q[Jane Doe < jdoe@example.com >],
- q[Jane @ Doe @ Jane @ Doe],
- q["Jane, 'Doe'" <jdoe@example.com>],
- q['Doe, "Jane' <jdoe@example.com>],
- q["Jane" "Do"e <jdoe@example.com>],
- q["Jane' Doe" <jdoe@example.com>],
- q["Jane Doe <jdoe@example.com>" <jdoe@example.com>],
- q["Jane\" Doe" <jdoe@example.com>],
- q[Doe, jane <jdoe@example.com>],
- q["Jane Doe <jdoe@example.com>],
- q['Jane 'Doe' <jdoe@example.com>],
- q[Jane@:;\.,()<>Doe <jdoe@example.com>],
- q[Jane <jdoe@example.com> Doe],
- q[<jdoe@example.com> Jane Doe]);
-
-my @known_failure_list = (q[Jane\ Doe <jdoe@example.com>],
- q["Doe, Ja"ne <jdoe@example.com>],
- q["Doe, Katarina" Jane <jdoe@example.com>],
- q[Jane jdoe@example.com],
- q["Jane "Kat"a" ri"na" ",Doe" <jdoe@example.com>],
- q[Jane Doe],
- q[Jane "Doe <jdoe@example.com>"],
- q[\"Jane Doe <jdoe@example.com>],
- q[Jane\"\" Doe <jdoe@example.com>],
- q['Jane "Katarina\" \' Doe' <jdoe@example.com>]);
-
-foreach my $str (@success_list) {
- my @expected = map { $_->format } Mail::Address->parse("$str");
- my @actual = Git::parse_mailboxes("$str");
- is_deeply(\@expected, \@actual, qq[same output : $str]);
-}
-
-TODO: {
- local $TODO = "known breakage";
- foreach my $str (@known_failure_list) {
- my @expected = map { $_->format } Mail::Address->parse("$str");
- my @actual = Git::parse_mailboxes("$str");
- is_deeply(\@expected, \@actual, qq[same output : $str]);
- }
-}
-
-my $is_passing = eval { Test::More->is_passing };
-exit($is_passing ? 0 : 1) unless $@ =~ /Can't locate object method/;
# May be altered later in the test
PREREQ="PERL"
+replace_variable_fields () {
+ sed -e "s/^\(Date:\).*/\1 DATE-STRING/" \
+ -e "s/^\(Message-Id:\).*/\1 MESSAGE-ID-STRING/" \
+ -e "s/^\(X-Mailer:\).*/\1 X-MAILER-STRING/"
+}
+
test_expect_success $PREREQ 'prepare reference tree' '
echo "1A quick brown fox jumps over the" >file &&
echo "lazy dog" >>file &&
test_cmp expected-cc commandline1
'
+test_expect_success $PREREQ 'setup fake get_maintainer.pl script for cc trailer' "
+ write_script expected-cc-script.sh <<-EOF
+ echo 'One Person <one@example.com> (supporter:THIS (FOO/bar))'
+ echo 'Two Person <two@example.com> (maintainer:THIS THING)'
+ echo 'Third List <three@example.com> (moderated list:THIS THING (FOO/bar))'
+ echo '<four@example.com> (moderated list:FOR THING)'
+ echo 'five@example.com (open list:FOR THING (FOO/bar))'
+ echo 'six@example.com (open list)'
+ EOF
+"
+
+test_expect_success $PREREQ 'cc trailer with get_maintainer.pl output' '
+ clean_fake_sendmail &&
+ git send-email -1 --to=recipient@example.com \
+ --cc-cmd=./expected-cc-script.sh \
+ --smtp-server="$(pwd)/fake.sendmail" &&
+ test_cmp expected-cc commandline1
+'
+
test_expect_success $PREREQ 'setup expect' "
cat >expected-show-all-headers <<\EOF
0001-Second.patch
X-Mailer: X-MAILER-STRING
In-Reply-To: <unique-message-id@example.com>
References: <unique-message-id@example.com>
+Reply-To: Reply <reply@example.com>
Result: OK
EOF
--dry-run \
--suppress-cc=sob \
--from="Example <from@example.com>" \
+ --reply-to="Reply <reply@example.com>" \
--to=to@example.com \
--cc=cc@example.com \
--bcc=bcc@example.com \
--in-reply-to="<unique-message-id@example.com>" \
--smtp-server relay.example.com \
- $patches |
- sed -e "s/^\(Date:\).*/\1 DATE-STRING/" \
- -e "s/^\(Message-Id:\).*/\1 MESSAGE-ID-STRING/" \
- -e "s/^\(X-Mailer:\).*/\1 X-MAILER-STRING/" \
+ $patches | replace_variable_fields \
>actual-show-all-headers &&
test_cmp expected-show-all-headers actual-show-all-headers
'
EOF
"
-replace_variable_fields () {
- sed -e "s/^\(Date:\).*/\1 DATE-STRING/" \
- -e "s/^\(Message-Id:\).*/\1 MESSAGE-ID-STRING/" \
- -e "s/^\(X-Mailer:\).*/\1 X-MAILER-STRING/"
-}
-
test_suppression () {
git send-email \
--dry-run \
git push gitcvs.git >/dev/null &&
cd cvswork &&
GIT_CONFIG="$git_config" cvs update &&
- rm -f failures &&
for i in merge no-lf empty really-empty; do
- GIT_CONFIG="$git_config" cvs update -p "$i" >$i.out
- test_cmp $i.out ../$i >>failures 2>&1
- done &&
- test -z "$(cat failures)"
+ GIT_CONFIG="$git_config" cvs update -p "$i" >$i.out &&
+ test_cmp $i.out ../$i || return 1
+ done
'
cd "$WORKDIR"
'
test_expect_success 'cvs diff -r v1 -u' '
- ( cd cvswork && cvs -f diff -r v1 -u ) >cvsDiff.out 2>cvs.log &&
+ ( cd cvswork && cvs -f diff -r v1 -u >../cvsDiff.out 2>../cvs.log ) &&
test_must_be_empty cvsDiff.out &&
test_must_be_empty cvs.log
'
test_expect_success 'cvs diff -N -r v2 -u' '
- ( cd cvswork && ! cvs -f diff -N -r v2 -u ) >cvsDiff.out 2>cvs.log &&
+ ( cd cvswork && ! cvs -f diff -N -r v2 -u >../cvsDiff.out 2>../cvs.log ) &&
test_must_be_empty cvs.log &&
test -s cvsDiff.out &&
check_diff cvsDiff.out v2 v1 >check_diff.out 2>&1
'
test_expect_success 'cvs diff -N -r v2 -r v1.2' '
- ( cd cvswork && ! cvs -f diff -N -r v2 -r v1.2 -u ) >cvsDiff.out 2>cvs.log &&
+ ( cd cvswork && ! cvs -f diff -N -r v2 -r v1.2 -u >../cvsDiff.out 2>../cvs.log ) &&
test_must_be_empty cvs.log &&
test -s cvsDiff.out &&
check_diff cvsDiff.out v2 v1.2 >check_diff.out 2>&1
'
test_expect_success 'check [cvswork3] diff' '
- ( cd cvswork3 && ! cvs -f diff -N -u ) >"$WORKDIR/cvsDiff.out" 2>cvs.log &&
+ ( cd cvswork3 && ! cvs -f diff -N -u >"$WORKDIR/cvsDiff.out" 2>../cvs.log ) &&
test_must_be_empty cvs.log &&
test -s cvsDiff.out &&
test $(grep Index: cvsDiff.out | wc -l) = 3 &&
test_expect_success 'double dash "git checkout"' '
test_completion "git checkout --" <<-\EOF
--quiet Z
+ --detach Z
+ --track Z
+ --orphan=Z
--ours Z
--theirs Z
- --track Z
- --no-track Z
--merge Z
- --conflict=
- --orphan Z
+ --conflict=Z
--patch Z
- --detach Z
--ignore-skip-worktree-bits Z
+ --ignore-other-worktrees Z
--recurse-submodules Z
+ --progress Z
+ --no-track Z
--no-recurse-submodules Z
EOF
'
test_cmp expected "$actual"
'
-test_expect_success 'prompt - hide if pwd ignored - inside gitdir (stdout)' '
+test_expect_success 'prompt - hide if pwd ignored - inside gitdir' '
printf " (GIT_DIR!)" >expected &&
(
GIT_PS1_HIDE_IF_PWD_IGNORED=y &&
cd .git &&
- __git_ps1 >"$actual" 2>/dev/null
- ) &&
- test_cmp expected "$actual"
-'
-
-test_expect_success 'prompt - hide if pwd ignored - inside gitdir (stderr)' '
- printf "" >expected &&
- (
- GIT_PS1_HIDE_IF_PWD_IGNORED=y &&
- cd .git &&
- __git_ps1 >/dev/null 2>"$actual"
+ __git_ps1 >"$actual"
) &&
test_cmp expected "$actual"
'
#
# Writing this as "! git checkout ../outerspace" is wrong, because
# the failure could be due to a segv. We want a controlled failure.
+#
+# Accepts the following options:
+#
+# ok=<signal-name>[,<...>]:
+# Don't treat an exit caused by the given signal as error.
+# Multiple signals can be specified as a comma separated list.
+# Currently recognized signal names are: sigpipe, success.
+# (Don't use 'success', use 'test_might_fail' instead.)
test_must_fail () {
case "$1" in
_test_ok=
;;
esac
- "$@"
+ "$@" 2>&7
exit_code=$?
if test $exit_code -eq 0 && ! list_contains "$_test_ok" success
then
- echo >&2 "test_must_fail: command succeeded: $*"
+ echo >&4 "test_must_fail: command succeeded: $*"
return 1
elif test_match_signal 13 $exit_code && list_contains "$_test_ok" sigpipe
then
return 0
elif test $exit_code -gt 129 && test $exit_code -le 192
then
- echo >&2 "test_must_fail: died by signal $(($exit_code - 128)): $*"
+ echo >&4 "test_must_fail: died by signal $(($exit_code - 128)): $*"
return 1
elif test $exit_code -eq 127
then
- echo >&2 "test_must_fail: command not found: $*"
+ echo >&4 "test_must_fail: command not found: $*"
return 1
elif test $exit_code -eq 126
then
- echo >&2 "test_must_fail: valgrind error: $*"
+ echo >&4 "test_must_fail: valgrind error: $*"
return 1
fi
return 0
-}
+} 7>&2 2>&4
# Similar to test_must_fail, but tolerates success, too. This is
# meant to be used in contexts like:
#
# Writing "git config --unset all.configuration || :" would be wrong,
# because we want to notice if it fails due to segv.
+#
+# Accepts the same options as test_must_fail.
test_might_fail () {
- test_must_fail ok=success "$@"
-}
+ test_must_fail ok=success "$@" 2>&7
+} 7>&2 2>&4
# Similar to test_must_fail and test_might_fail, but check that a
# given command exited with a given exit code. Meant to be used as:
test_expect_code () {
want_code=$1
shift
- "$@"
+ "$@" 2>&7
exit_code=$?
if test $exit_code = $want_code
then
return 0
fi
- echo >&2 "test_expect_code: command exited with $exit_code, we wanted $want_code $*"
+ echo >&4 "test_expect_code: command exited with $exit_code, we wanted $want_code $*"
return 1
-}
+} 7>&2 2>&4
# test_cmp is a helper function to compare actual and expected output.
# You can use it like:
cmp "$@"
}
+# Use this instead of test_cmp to compare files that contain expected and
+# actual output from git commands that can be translated. When running
+# under GETTEXT_POISON this pretends that the command produced expected
+# results.
+test_i18ncmp () {
+ test -n "$GETTEXT_POISON" || test_cmp "$@"
+}
+
+# Use this instead of "grep expected-string actual" to see if the
+# output from a git command that can be translated either contains an
+# expected string, or does not contain an unwanted one. When running
+# under GETTEXT_POISON this pretends that the command produced expected
+# results.
+test_i18ngrep () {
+ eval "last_arg=\${$#}"
+
+ test -f "$last_arg" ||
+ error "bug in the test script: test_i18ngrep requires a file" \
+ "to read as the last parameter"
+
+ if test $# -lt 2 ||
+ { test "x!" = "x$1" && test $# -lt 3 ; }
+ then
+ error "bug in the test script: too few parameters to test_i18ngrep"
+ fi
+
+ if test -n "$GETTEXT_POISON"
+ then
+ # pretend success
+ return 0
+ fi
+
+ if test "x!" = "x$1"
+ then
+ shift
+ ! grep "$@" && return 0
+
+ echo >&4 "error: '! grep $@' did find a match in:"
+ else
+ grep "$@" && return 0
+
+ echo >&4 "error: 'grep $@' didn't find a match in:"
+ fi
+
+ if test -s "$last_arg"
+ then
+ cat >&4 "$last_arg"
+ else
+ echo >&4 "<File '$last_arg' is empty>"
+ fi
+
+ return 1
+}
+
# Call any command "$@" but be more verbose about its
# failure. This is handy for commands like "test" which do
# not output anything when they fail.
verbose () {
"$@" && return 0
- echo >&2 "command failed: $(git rev-parse --sq-quote "$@")"
+ echo >&4 "command failed: $(git rev-parse --sq-quote "$@")"
return 1
}
# otherwise.
test_must_be_empty () {
- if test -s "$1"
+ if ! test -f "$1"
+ then
+ echo "'$1' is missing"
+ return 1
+ elif test -s "$1"
then
echo "'$1' is not empty, it contains:"
cat "$1"
}
perl () {
- command "$PERL_PATH" "$@"
-}
+ command "$PERL_PATH" "$@" 2>&7
+} 7>&2 2>&4
# Is the value one of the various ways to spell a boolean true/false?
test_normalize_bool () {
shift
;;
*)
- "$@"
+ "$@" 2>&7
exit
;;
esac
done
)
-}
+} 7>&2 2>&4
# Returns true if the numeric exit code in "$2" represents the expected signal
# in "$1". Signals should be given numerically.
GIT_CEILING_DIRECTORIES=$(pwd) &&
export GIT_CEILING_DIRECTORIES &&
cd non-repo &&
- "$@"
+ "$@" 2>&7
)
+} 7>&2 2>&4
+
+# convert stdin to pktline representation; note that empty input becomes an
+# empty packet, not a flush packet (for that you can just print 0000 yourself).
+packetize() {
+ cat >packetize.tmp &&
+ len=$(wc -c <packetize.tmp) &&
+ printf '%04x%s' "$(($len + 4))" &&
+ cat packetize.tmp &&
+ rm -f packetize.tmp
+}
+
+# Parse the input as a series of pktlines, writing the result to stdout.
+# Sideband markers are removed automatically, and the output is routed to
+# stderr if appropriate.
+#
+# NUL bytes are converted to "\\0" for ease of parsing with text tools.
+depacketize () {
+ perl -e '
+ while (read(STDIN, $len, 4) == 4) {
+ if ($len eq "0000") {
+ print "FLUSH\n";
+ } else {
+ read(STDIN, $buf, hex($len) - 4);
+ $buf =~ s/\0/\\0/g;
+ if ($buf =~ s/^[\x2\x3]//) {
+ print STDERR $buf;
+ } else {
+ $buf =~ s/^\x1//;
+ print $buf;
+ }
+ }
+ }
+ '
}
my @vars = grep(/^GIT_/ && !/^GIT_($ok)/o, @env);
print join("\n", @vars);
')
+unset XDG_CACHE_HOME
unset XDG_CONFIG_HOME
unset GITPERLLIB
GIT_AUTHOR_EMAIL=author@example.com
GIT_TEST_CHAIN_LINT=0
shift ;;
-x)
- trace=t
+ # Some test scripts can't be reliably traced with '-x',
+ # unless the test is run with a Bash version supporting
+ # BASH_XTRACEFD (introduced in Bash v4.1). Check whether
+ # this test is marked as such, and ignore '-x' if it
+ # isn't executed with a suitable Bash version.
+ if test -z "$test_untraceable" || {
+ test -n "$BASH_VERSION" && {
+ test ${BASH_VERSINFO[0]} -gt 4 || {
+ test ${BASH_VERSINFO[0]} -eq 4 &&
+ test ${BASH_VERSINFO[1]} -ge 1
+ }
+ }
+ }
+ then
+ trace=t
+ else
+ echo >&2 "warning: ignoring -x; '$0' is untraceable without BASH_XTRACEFD"
+ fi
shift ;;
--verbose-log)
verbose_log=t
fi
fi
-GITPERLLIB="$GIT_BUILD_DIR"/perl/blib/lib:"$GIT_BUILD_DIR"/perl/blib/arch/auto/Git
+GITPERLLIB="$GIT_BUILD_DIR"/perl/build/lib
export GITPERLLIB
test -d "$GIT_BUILD_DIR"/templates/blt || {
error "You haven't built things yet, have you?"
test_set_prereq C_LOCALE_OUTPUT
fi
-# Use this instead of test_cmp to compare files that contain expected and
-# actual output from git commands that can be translated. When running
-# under GETTEXT_POISON this pretends that the command produced expected
-# results.
-test_i18ncmp () {
- test -n "$GETTEXT_POISON" || test_cmp "$@"
-}
-
-# Use this instead of "grep expected-string actual" to see if the
-# output from a git command that can be translated either contains an
-# expected string, or does not contain an unwanted one. When running
-# under GETTEXT_POISON this pretends that the command produced expected
-# results.
-test_i18ngrep () {
- if test -n "$GETTEXT_POISON"
- then
- : # pretend success
- elif test "x!" = "x$1"
- then
- shift
- ! grep "$@"
- else
- grep "$@"
- fi
-}
-
test_lazy_prereq PIPE '
# test whether the filesystem supports FIFOs
test_have_prereq !MINGW,!CYGWIN &&
test -n "$GIT_TEST_LONG"
'
+test_lazy_prereq EXPENSIVE_ON_WINDOWS '
+ test_have_prereq EXPENSIVE || test_have_prereq !MINGW,!CYGWIN
+'
+
test_lazy_prereq USR_BIN_TIME '
test -x /usr/bin/time
'
name_to_report ?
name_to_report :
find_unique_abbrev(oid->hash, DEFAULT_ABBREV),
- typename(type));
+ type_name(type));
buf = read_sha1_file(oid->hash, &type, &size);
if (!buf)
return tempfile;
}
-struct tempfile *mks_tempfile_sm(const char *template, int suffixlen, int mode)
+struct tempfile *mks_tempfile_sm(const char *filename_template, int suffixlen, int mode)
{
struct tempfile *tempfile = new_tempfile();
- strbuf_add_absolute_path(&tempfile->filename, template);
+ strbuf_add_absolute_path(&tempfile->filename, filename_template);
tempfile->fd = git_mkstemps_mode(tempfile->filename.buf, suffixlen, mode);
if (tempfile->fd < 0) {
deactivate_tempfile(tempfile);
return tempfile;
}
-struct tempfile *mks_tempfile_tsm(const char *template, int suffixlen, int mode)
+struct tempfile *mks_tempfile_tsm(const char *filename_template, int suffixlen, int mode)
{
struct tempfile *tempfile = new_tempfile();
const char *tmpdir;
if (!tmpdir)
tmpdir = "/tmp";
- strbuf_addf(&tempfile->filename, "%s/%s", tmpdir, template);
+ strbuf_addf(&tempfile->filename, "%s/%s", tmpdir, filename_template);
tempfile->fd = git_mkstemps_mode(tempfile->filename.buf, suffixlen, mode);
if (tempfile->fd < 0) {
deactivate_tempfile(tempfile);
return tempfile;
}
-struct tempfile *xmks_tempfile_m(const char *template, int mode)
+struct tempfile *xmks_tempfile_m(const char *filename_template, int mode)
{
struct tempfile *tempfile;
struct strbuf full_template = STRBUF_INIT;
- strbuf_add_absolute_path(&full_template, template);
+ strbuf_add_absolute_path(&full_template, filename_template);
tempfile = mks_tempfile_m(full_template.buf, mode);
if (!tempfile)
die_errno("Unable to create temporary file '%s'",
*/
/* See "mks_tempfile functions" above. */
-extern struct tempfile *mks_tempfile_sm(const char *template,
+extern struct tempfile *mks_tempfile_sm(const char *filename_template,
int suffixlen, int mode);
/* See "mks_tempfile functions" above. */
-static inline struct tempfile *mks_tempfile_s(const char *template,
+static inline struct tempfile *mks_tempfile_s(const char *filename_template,
int suffixlen)
{
- return mks_tempfile_sm(template, suffixlen, 0600);
+ return mks_tempfile_sm(filename_template, suffixlen, 0600);
}
/* See "mks_tempfile functions" above. */
-static inline struct tempfile *mks_tempfile_m(const char *template, int mode)
+static inline struct tempfile *mks_tempfile_m(const char *filename_template, int mode)
{
- return mks_tempfile_sm(template, 0, mode);
+ return mks_tempfile_sm(filename_template, 0, mode);
}
/* See "mks_tempfile functions" above. */
-static inline struct tempfile *mks_tempfile(const char *template)
+static inline struct tempfile *mks_tempfile(const char *filename_template)
{
- return mks_tempfile_sm(template, 0, 0600);
+ return mks_tempfile_sm(filename_template, 0, 0600);
}
/* See "mks_tempfile functions" above. */
-extern struct tempfile *mks_tempfile_tsm(const char *template,
+extern struct tempfile *mks_tempfile_tsm(const char *filename_template,
int suffixlen, int mode);
/* See "mks_tempfile functions" above. */
-static inline struct tempfile *mks_tempfile_ts(const char *template,
+static inline struct tempfile *mks_tempfile_ts(const char *filename_template,
int suffixlen)
{
- return mks_tempfile_tsm(template, suffixlen, 0600);
+ return mks_tempfile_tsm(filename_template, suffixlen, 0600);
}
/* See "mks_tempfile functions" above. */
-static inline struct tempfile *mks_tempfile_tm(const char *template, int mode)
+static inline struct tempfile *mks_tempfile_tm(const char *filename_template, int mode)
{
- return mks_tempfile_tsm(template, 0, mode);
+ return mks_tempfile_tsm(filename_template, 0, mode);
}
/* See "mks_tempfile functions" above. */
-static inline struct tempfile *mks_tempfile_t(const char *template)
+static inline struct tempfile *mks_tempfile_t(const char *filename_template)
{
- return mks_tempfile_tsm(template, 0, 0600);
+ return mks_tempfile_tsm(filename_template, 0, 0600);
}
/* See "mks_tempfile functions" above. */
-extern struct tempfile *xmks_tempfile_m(const char *template, int mode);
+extern struct tempfile *xmks_tempfile_m(const char *filename_template, int mode);
/* See "mks_tempfile functions" above. */
-static inline struct tempfile *xmks_tempfile(const char *template)
+static inline struct tempfile *xmks_tempfile(const char *filename_template)
{
- return xmks_tempfile_m(template, 0600);
+ return xmks_tempfile_m(filename_template, 0600);
}
/*
{
strbuf_complete_line(buf);
trace_write(key, buf->buf, buf->len);
- strbuf_release(buf);
}
static void trace_vprintf_fl(const char *file, int line, struct trace_key *key,
strbuf_vaddf(&buf, format, ap);
print_trace_line(key, &buf);
+ strbuf_release(&buf);
}
static void trace_argv_vprintf_fl(const char *file, int line,
strbuf_vaddf(&buf, format, ap);
- sq_quote_argv(&buf, argv, 0);
+ sq_quote_argv_pretty(&buf, argv);
print_trace_line(&trace_default_key, &buf);
+ strbuf_release(&buf);
}
void trace_strbuf_fl(const char *file, int line, struct trace_key *key,
strbuf_addbuf(&buf, data);
print_trace_line(key, &buf);
+ strbuf_release(&buf);
}
static void trace_performance_vprintf_fl(const char *file, int line,
}
print_trace_line(&trace_perf_key, &buf);
+ strbuf_release(&buf);
}
#ifndef HAVE_VARIADIC_MACROS
atexit(print_command_performance_atexit);
strbuf_reset(&command_line);
- sq_quote_argv(&command_line, argv, 0);
+ sq_quote_argv_pretty(&command_line, argv);
command_start_time = getnanotime();
}
static struct trailer_item *trailer_from_arg(struct arg_item *arg_tok)
{
- struct trailer_item *new = xcalloc(sizeof(*new), 1);
- new->token = arg_tok->token;
- new->value = arg_tok->value;
+ struct trailer_item *new_item = xcalloc(sizeof(*new_item), 1);
+ new_item->token = arg_tok->token;
+ new_item->value = arg_tok->value;
arg_tok->token = arg_tok->value = NULL;
free_arg_item(arg_tok);
- return new;
+ return new_item;
}
static void add_arg_to_input_list(struct trailer_item *on_tok,
static struct trailer_item *add_trailer_item(struct list_head *head, char *tok,
char *val)
{
- struct trailer_item *new = xcalloc(sizeof(*new), 1);
- new->token = tok;
- new->value = val;
- list_add_tail(&new->list, head);
- return new;
+ struct trailer_item *new_item = xcalloc(sizeof(*new_item), 1);
+ new_item->token = tok;
+ new_item->value = val;
+ list_add_tail(&new_item->list, head);
+ return new_item;
}
static void add_arg_item(struct list_head *arg_head, char *tok, char *val,
const struct conf_info *conf,
const struct new_trailer_item *new_trailer_item)
{
- struct arg_item *new = xcalloc(sizeof(*new), 1);
- new->token = tok;
- new->value = val;
- duplicate_conf(&new->conf, conf);
+ struct arg_item *new_item = xcalloc(sizeof(*new_item), 1);
+ new_item->token = tok;
+ new_item->value = val;
+ duplicate_conf(&new_item->conf, conf);
if (new_trailer_item) {
if (new_trailer_item->where != WHERE_DEFAULT)
- new->conf.where = new_trailer_item->where;
+ new_item->conf.where = new_trailer_item->where;
if (new_trailer_item->if_exists != EXISTS_DEFAULT)
- new->conf.if_exists = new_trailer_item->if_exists;
+ new_item->conf.if_exists = new_trailer_item->if_exists;
if (new_trailer_item->if_missing != MISSING_DEFAULT)
- new->conf.if_missing = new_trailer_item->if_missing;
+ new_item->conf.if_missing = new_trailer_item->if_missing;
}
- list_add_tail(&new->list, arg_head);
+ list_add_tail(&new_item->list, arg_head);
}
static void process_command_line_args(struct list_head *arg_head,
static FILE *create_in_place_tempfile(const char *file)
{
struct stat st;
- struct strbuf template = STRBUF_INIT;
+ struct strbuf filename_template = STRBUF_INIT;
const char *tail;
FILE *outfile;
/* Create temporary file in the same directory as the original */
tail = strrchr(file, '/');
if (tail != NULL)
- strbuf_add(&template, file, tail - file + 1);
- strbuf_addstr(&template, "git-interpret-trailers-XXXXXX");
+ strbuf_add(&filename_template, file, tail - file + 1);
+ strbuf_addstr(&filename_template, "git-interpret-trailers-XXXXXX");
- trailers_tempfile = xmks_tempfile_m(template.buf, st.st_mode);
- strbuf_release(&template);
+ trailers_tempfile = xmks_tempfile_m(filename_template.buf, st.st_mode);
+ strbuf_release(&filename_template);
outfile = fdopen_tempfile(trailers_tempfile, "w");
if (!outfile)
die_errno(_("could not open temporary file"));
if (data->transport_options.update_shallow)
set_helper_option(transport, "update-shallow", "true");
+ if (data->transport_options.filter_options.choice)
+ set_helper_option(
+ transport, "filter",
+ data->transport_options.filter_options.filter_spec);
+
if (data->fetch)
return fetch_with_fetch(transport, nr_heads, to_fetch);
} else if (!strcmp(name, TRANS_OPT_DEEPEN_RELATIVE)) {
opts->deepen_relative = !!value;
return 0;
+ } else if (!strcmp(name, TRANS_OPT_FROM_PROMISOR)) {
+ opts->from_promisor = !!value;
+ return 0;
+ } else if (!strcmp(name, TRANS_OPT_NO_DEPENDENTS)) {
+ opts->no_dependents = !!value;
+ return 0;
+ } else if (!strcmp(name, TRANS_OPT_LIST_OBJECTS_FILTER)) {
+ parse_list_objects_filter(&opts->filter_options, value);
+ return 0;
}
return 1;
}
data->options.check_self_contained_and_connected;
args.cloning = transport->cloning;
args.update_shallow = data->options.update_shallow;
+ args.from_promisor = data->options.from_promisor;
+ args.no_dependents = data->options.no_dependents;
+ args.filter_options = data->options.filter_options;
if (!data->got_remote_heads) {
connect_setup(transport, 0);
#include "cache.h"
#include "run-command.h"
#include "remote.h"
+#include "list-objects-filter-options.h"
struct string_list;
unsigned self_contained_and_connected : 1;
unsigned update_shallow : 1;
unsigned deepen_relative : 1;
+ unsigned from_promisor : 1;
+ unsigned no_dependents : 1;
int depth;
const char *deepen_since;
const struct string_list *deepen_not;
const char *uploadpack;
const char *receivepack;
struct push_cas_option *cas;
+ struct list_objects_filter_options filter_options;
};
enum transport_family {
/* Send push certificates */
#define TRANS_OPT_PUSH_CERT "pushcert"
+/* Indicate that these objects are being fetched by a promisor */
+#define TRANS_OPT_FROM_PROMISOR "from-promisor"
+
+/*
+ * Indicate that only the objects wanted need to be fetched, not their
+ * dependents
+ */
+#define TRANS_OPT_NO_DEPENDENTS "no-dependents"
+
+/* Filter objects for partial clone and fetch */
+#define TRANS_OPT_LIST_OBJECTS_FILTER "filter"
+
/**
* Returns 0 if the option was used, non-zero otherwise. Prints a
* message to stderr if the option is not used.
#include "submodule.h"
#include "submodule-config.h"
#include "fsmonitor.h"
+#include "fetch-object.h"
/*
* Error messages expected by scripts out of plumbing commands such as
static struct cache_entry *dup_entry(const struct cache_entry *ce)
{
unsigned int size = ce_size(ce);
- struct cache_entry *new = xmalloc(size);
+ struct cache_entry *new_entry = xmalloc(size);
- memcpy(new, ce, size);
- return new;
+ memcpy(new_entry, ce, size);
+ return new_entry;
}
static void add_entry(struct unpack_trees_options *o,
load_gitmodules_file(index, &state);
enable_delayed_checkout(&state);
+ if (repository_format_partial_clone && o->update && !o->dry_run) {
+ /*
+ * Prefetch the objects that are to be checked out in the loop
+ * below.
+ */
+ struct oid_array to_fetch = OID_ARRAY_INIT;
+ int fetch_if_missing_store = fetch_if_missing;
+ fetch_if_missing = 0;
+ for (i = 0; i < index->cache_nr; i++) {
+ struct cache_entry *ce = index->cache[i];
+ if ((ce->ce_flags & CE_UPDATE) &&
+ !S_ISGITLINK(ce->ce_mode)) {
+ if (!has_object_file(&ce->oid))
+ oid_array_append(&to_fetch, &ce->oid);
+ }
+ }
+ if (to_fetch.nr)
+ fetch_objects(repository_format_partial_clone,
+ &to_fetch);
+ fetch_if_missing = fetch_if_missing_store;
+ }
for (i = 0; i < index->cache_nr; i++) {
struct cache_entry *ce = index->cache[i];
if (!ce)
return;
cache_tree_invalidate_path(o->src_index, ce->name);
- untracked_cache_invalidate_path(o->src_index, ce->name);
+ untracked_cache_invalidate_path(o->src_index, ce->name, 1);
}
/*
#include "diff.h"
#include "revision.h"
#include "list-objects.h"
+#include "list-objects-filter.h"
+#include "list-objects-filter-options.h"
#include "run-command.h"
#include "connect.h"
#include "sigchain.h"
#include "argv-array.h"
#include "prio-queue.h"
#include "protocol.h"
+#include "quote.h"
static const char * const upload_pack_usage[] = {
N_("git upload-pack [<options>] <dir>"),
static int stateless_rpc;
static const char *pack_objects_hook;
+static int filter_capability_requested;
+static int filter_advertise;
+static struct list_objects_filter_options filter_options;
+
static void reset_timeout(void)
{
alarm(timeout);
argv_array_push(&pack_objects.args, "--delta-base-offset");
if (use_include_tag)
argv_array_push(&pack_objects.args, "--include-tag");
+ if (filter_options.filter_spec) {
+ if (pack_objects.use_shell) {
+ struct strbuf buf = STRBUF_INIT;
+ sq_quote_buf(&buf, filter_options.filter_spec);
+ argv_array_pushf(&pack_objects.args, "--filter=%s", buf.buf);
+ strbuf_release(&buf);
+ } else {
+ argv_array_pushf(&pack_objects.args, "--filter=%s",
+ filter_options.filter_spec);
+ }
+ }
pack_objects.in = -1;
pack_objects.out = -1;
deepen_rev_list = 1;
continue;
}
+ if (skip_prefix(line, "filter ", &arg)) {
+ if (!filter_capability_requested)
+ die("git upload-pack: filtering capability not negotiated");
+ parse_list_objects_filter(&filter_options, arg);
+ continue;
+ }
if (!skip_prefix(line, "want ", &arg) ||
get_oid_hex(arg, &oid_buf))
die("git upload-pack: protocol error, "
no_progress = 1;
if (parse_feature_request(features, "include-tag"))
use_include_tag = 1;
+ if (parse_feature_request(features, "filter"))
+ filter_capability_requested = 1;
o = parse_object(&oid_buf);
if (!o) {
struct strbuf symref_info = STRBUF_INIT;
format_symref_info(&symref_info, cb_data);
- packet_write_fmt(1, "%s %s%c%s%s%s%s%s agent=%s\n",
+ packet_write_fmt(1, "%s %s%c%s%s%s%s%s%s agent=%s\n",
oid_to_hex(oid), refname_nons,
0, capabilities,
(allow_unadvertised_object_request & ALLOW_TIP_SHA1) ?
" allow-reachable-sha1-in-want" : "",
stateless_rpc ? " no-done" : "",
symref_info.buf,
+ filter_advertise ? " filter" : "",
git_user_agent_sanitized());
strbuf_release(&symref_info);
} else {
} else if (current_config_scope() != CONFIG_SCOPE_REPO) {
if (!strcmp("uploadpack.packobjectshook", var))
return git_config_string(&pack_objects_hook, var, value);
+ } else if (!strcmp("uploadpack.allowfilter", var)) {
+ filter_advertise = git_config_bool(var, value);
}
return parse_hide_refs_config(var, value, "uploadpack");
}
"|//|\\*\\*|::|[/<>=]="),
IPATTERN("fountain", "^((\\.[^.]|(int|ext|est|int\\.?/ext|i/e)[. ]).*)$",
"[^ \t-]+"),
+PATTERNS("golang",
+ /* Functions */
+ "^[ \t]*(func[ \t]*.*(\\{[ \t]*)?)\n"
+ /* Structs and interfaces */
+ "^[ \t]*(type[ \t].*(struct|interface)[ \t]*(\\{[ \t]*)?)",
+ /* -- */
+ "[a-zA-Z_][a-zA-Z0-9_]*"
+ "|[-+0-9.eE]+i?|0[xX]?[0-9a-fA-F]+i?"
+ "|[-+*/<>%&^|=!:]=|--|\\+\\+|<<=?|>>=?|&\\^=?|&&|\\|\\||<-|\\.{3}"),
PATTERNS("html", "^[ \t]*(<[Hh][1-6]([ \t].*)?>.*)$",
"[^<>= \t]+"),
PATTERNS("java",
/* Keywords */
"!^[ \t]*(do|while|for|if|else|instanceof|new|return|switch|case|throw|catch|using)\n"
/* Methods and constructors */
- "^[ \t]*(((static|public|internal|private|protected|new|virtual|sealed|override|unsafe)[ \t]+)*[][<>@.~_[:alnum:]]+[ \t]+[<>@._[:alnum:]]+[ \t]*\\(.*\\))[ \t]*$\n"
+ "^[ \t]*(((static|public|internal|private|protected|new|virtual|sealed|override|unsafe|async)[ \t]+)*[][<>@.~_[:alnum:]]+[ \t]+[<>@._[:alnum:]]+[ \t]*\\(.*\\))[ \t]*$\n"
/* Properties */
"^[ \t]*(((static|public|internal|private|protected|new|virtual|sealed|override|unsafe)[ \t]+)*[][<>@.~_[:alnum:]]+[ \t]+[@._[:alnum:]]+)[ \t]*$\n"
/* Type definitions */
static void report_missing(const struct object *obj)
{
fprintf(stderr, "Cannot obtain needed %s %s\n",
- obj->type ? typename(obj->type): "object",
+ obj->type ? type_name(obj->type): "object",
oid_to_hex(&obj->oid));
if (!is_null_oid(¤t_commit_oid))
fprintf(stderr, "while processing commit %s.\n",
}
return error("Unable to determine requirements "
"of type %s for %s",
- typename(obj->type), oid_to_hex(&obj->oid));
+ type_name(obj->type), oid_to_hex(&obj->oid));
}
static int process(struct walker *walker, struct object *obj)
return wt->lock_reason;
}
+/* convenient wrapper to deal with NULL strbuf */
+static void strbuf_addf_gently(struct strbuf *buf, const char *fmt, ...)
+{
+ va_list params;
+
+ if (!buf)
+ return;
+
+ va_start(params, fmt);
+ strbuf_vaddf(buf, fmt, params);
+ va_end(params);
+}
+
+int validate_worktree(const struct worktree *wt, struct strbuf *errmsg,
+ unsigned flags)
+{
+ struct strbuf wt_path = STRBUF_INIT;
+ char *path = NULL;
+ int err, ret = -1;
+
+ strbuf_addf(&wt_path, "%s/.git", wt->path);
+
+ if (is_main_worktree(wt)) {
+ if (is_directory(wt_path.buf)) {
+ ret = 0;
+ goto done;
+ }
+ /*
+ * Main worktree using .git file to point to the
+ * repository would make it impossible to know where
+ * the actual worktree is if this function is executed
+ * from another worktree. No .git file support for now.
+ */
+ strbuf_addf_gently(errmsg,
+ _("'%s' at main working tree is not the repository directory"),
+ wt_path.buf);
+ goto done;
+ }
+
+ /*
+ * Make sure "gitdir" file points to a real .git file and that
+ * file points back here.
+ */
+ if (!is_absolute_path(wt->path)) {
+ strbuf_addf_gently(errmsg,
+ _("'%s' file does not contain absolute path to the working tree location"),
+ git_common_path("worktrees/%s/gitdir", wt->id));
+ goto done;
+ }
+
+ if (flags & WT_VALIDATE_WORKTREE_MISSING_OK &&
+ !file_exists(wt->path)) {
+ ret = 0;
+ goto done;
+ }
+
+ if (!file_exists(wt_path.buf)) {
+ strbuf_addf_gently(errmsg, _("'%s' does not exist"), wt_path.buf);
+ goto done;
+ }
+
+ path = xstrdup_or_null(read_gitfile_gently(wt_path.buf, &err));
+ if (!path) {
+ strbuf_addf_gently(errmsg, _("'%s' is not a .git file, error code %d"),
+ wt_path.buf, err);
+ goto done;
+ }
+
+ ret = fspathcmp(path, real_path(git_common_path("worktrees/%s", wt->id)));
+
+ if (ret)
+ strbuf_addf_gently(errmsg, _("'%s' does not point back to '%s'"),
+ wt->path, git_common_path("worktrees/%s", wt->id));
+done:
+ free(path);
+ strbuf_release(&wt_path);
+ return ret;
+}
+
+void update_worktree_location(struct worktree *wt, const char *path_)
+{
+ struct strbuf path = STRBUF_INIT;
+
+ if (is_main_worktree(wt))
+ die("BUG: can't relocate main worktree");
+
+ strbuf_realpath(&path, path_, 1);
+ if (fspathcmp(wt->path, path.buf)) {
+ write_file(git_common_path("worktrees/%s/gitdir", wt->id),
+ "%s/.git", path.buf);
+ free(wt->path);
+ wt->path = strbuf_detach(&path, NULL);
+ }
+ strbuf_release(&path);
+}
+
int is_worktree_being_rebased(const struct worktree *wt,
const char *target)
{
#include "refs.h"
+struct strbuf;
+
struct worktree {
char *path;
char *id;
*/
extern const char *is_worktree_locked(struct worktree *wt);
+#define WT_VALIDATE_WORKTREE_MISSING_OK (1 << 0)
+
+/*
+ * Return zero if the worktree is in good condition. Error message is
+ * returned if "errmsg" is not NULL.
+ */
+extern int validate_worktree(const struct worktree *wt,
+ struct strbuf *errmsg,
+ unsigned flags);
+
+/*
+ * Update worktrees/xxx/gitdir with the new path.
+ */
+extern void update_worktree_location(struct worktree *wt,
+ const char *path_);
+
/*
* Free up the memory for worktree(s)
*/
GIT_TEMPLATE_DIR='@@BUILD_DIR@@/templates/blt'
export GIT_TEMPLATE_DIR
fi
-GITPERLLIB='@@BUILD_DIR@@/perl/blib/lib'"${GITPERLLIB:+:$GITPERLLIB}"
+GITPERLLIB='@@BUILD_DIR@@/perl/build/lib'"${GITPERLLIB:+:$GITPERLLIB}"
GIT_TEXTDOMAINDIR='@@BUILD_DIR@@/po/build/locale'
PATH='@@BUILD_DIR@@/bin-wrappers:'"$PATH"
return NULL;
}
-int xmkstemp(char *template)
+int xmkstemp(char *filename_template)
{
int fd;
char origtemplate[PATH_MAX];
- strlcpy(origtemplate, template, sizeof(origtemplate));
+ strlcpy(origtemplate, filename_template, sizeof(origtemplate));
- fd = mkstemp(template);
+ fd = mkstemp(filename_template);
if (fd < 0) {
int saved_errno = errno;
const char *nonrelative_template;
- if (strlen(template) != strlen(origtemplate))
- template = origtemplate;
+ if (strlen(filename_template) != strlen(origtemplate))
+ filename_template = origtemplate;
- nonrelative_template = absolute_path(template);
+ nonrelative_template = absolute_path(filename_template);
errno = saved_errno;
die_errno("Unable to create temporary file '%s'",
nonrelative_template);
static const int num_letters = 62;
uint64_t value;
struct timeval tv;
- char *template;
+ char *filename_template;
size_t len;
int fd, count;
*/
gettimeofday(&tv, NULL);
value = ((size_t)(tv.tv_usec << 16)) ^ tv.tv_sec ^ getpid();
- template = &pattern[len - 6 - suffix_len];
+ filename_template = &pattern[len - 6 - suffix_len];
for (count = 0; count < TMP_MAX; ++count) {
uint64_t v = value;
/* Fill in the random bits. */
- template[0] = letters[v % num_letters]; v /= num_letters;
- template[1] = letters[v % num_letters]; v /= num_letters;
- template[2] = letters[v % num_letters]; v /= num_letters;
- template[3] = letters[v % num_letters]; v /= num_letters;
- template[4] = letters[v % num_letters]; v /= num_letters;
- template[5] = letters[v % num_letters]; v /= num_letters;
+ filename_template[0] = letters[v % num_letters]; v /= num_letters;
+ filename_template[1] = letters[v % num_letters]; v /= num_letters;
+ filename_template[2] = letters[v % num_letters]; v /= num_letters;
+ filename_template[3] = letters[v % num_letters]; v /= num_letters;
+ filename_template[4] = letters[v % num_letters]; v /= num_letters;
+ filename_template[5] = letters[v % num_letters]; v /= num_letters;
fd = open(pattern, O_CREAT | O_EXCL | O_RDWR, mode);
if (fd >= 0)
return git_mkstemps_mode(pattern, 0, mode);
}
-int xmkstemp_mode(char *template, int mode)
+int xmkstemp_mode(char *filename_template, int mode)
{
int fd;
char origtemplate[PATH_MAX];
- strlcpy(origtemplate, template, sizeof(origtemplate));
+ strlcpy(origtemplate, filename_template, sizeof(origtemplate));
- fd = git_mkstemp_mode(template, mode);
+ fd = git_mkstemp_mode(filename_template, mode);
if (fd < 0) {
int saved_errno = errno;
const char *nonrelative_template;
- if (!template[0])
- template = origtemplate;
+ if (!filename_template[0])
+ filename_template = origtemplate;
- nonrelative_template = absolute_path(template);
+ nonrelative_template = absolute_path(filename_template);
errno = saved_errno;
die_errno("Unable to create temporary file '%s'",
nonrelative_template);
s->ignored.strdup_strings = 1;
s->show_branch = -1; /* unspecified */
s->show_stash = 0;
+ s->ahead_behind_flags = AHEAD_BEHIND_UNSPECIFIED;
s->display_comment_prefix = 0;
}
if (!skip_prefix(s->branch, "refs/heads/", &branch_name))
return;
branch = branch_get(branch_name);
- if (!format_tracking_info(branch, &sb))
+ if (!format_tracking_info(branch, &sb, s->ahead_behind_flags))
return;
i = 0;
const char *base;
char *short_base;
const char *branch_name;
- int num_ours, num_theirs;
+ int num_ours, num_theirs, sti;
int upstream_is_gone = 0;
color_fprintf(s->fp, color(WT_STATUS_HEADER, s), "## ");
color_fprintf(s->fp, branch_color_local, "%s", branch_name);
- if (stat_tracking_info(branch, &num_ours, &num_theirs, &base) < 0) {
+ sti = stat_tracking_info(branch, &num_ours, &num_theirs, &base,
+ s->ahead_behind_flags);
+ if (sti < 0) {
if (!base)
goto conclude;
color_fprintf(s->fp, branch_color_remote, "%s", short_base);
free(short_base);
- if (!upstream_is_gone && !num_ours && !num_theirs)
+ if (!upstream_is_gone && !sti)
goto conclude;
color_fprintf(s->fp, header_color, " [");
if (upstream_is_gone) {
color_fprintf(s->fp, header_color, LABEL(N_("gone")));
+ } else if (s->ahead_behind_flags == AHEAD_BEHIND_QUICK) {
+ color_fprintf(s->fp, header_color, LABEL(N_("different")));
} else if (!num_ours) {
color_fprintf(s->fp, header_color, LABEL(N_("behind ")));
color_fprintf(s->fp, branch_color_remote, "%d", num_theirs);
*
* <upstream> ::= the upstream branch name, when set.
*
- * <ahead> ::= integer ahead value, when upstream set
- * and the commit is present (not gone).
- *
- * <behind> ::= integer behind value, when upstream set
- * and commit is present.
+ * <ahead> ::= integer ahead value or '?'.
*
+ * <behind> ::= integer behind value or '?'.
*
* The end-of-line is defined by the -z flag.
*
* <eol> ::= NUL when -z,
* LF when NOT -z.
*
+ * When an upstream is set and present, the 'branch.ab' line will
+ * be printed with the ahead/behind counts for the branch and the
+ * upstream. When AHEAD_BEHIND_QUICK is requested and the branches
+ * are different, '?' will be substituted for the actual count.
*/
static void wt_porcelain_v2_print_tracking(struct wt_status *s)
{
/* Lookup stats on the upstream tracking branch, if set. */
branch = branch_get(branch_name);
base = NULL;
- ab_info = (stat_tracking_info(branch, &nr_ahead, &nr_behind, &base) == 0);
+ ab_info = stat_tracking_info(branch, &nr_ahead, &nr_behind,
+ &base, s->ahead_behind_flags);
if (base) {
base = shorten_unambiguous_ref(base, 0);
fprintf(s->fp, "# branch.upstream %s%c", base, eol);
free((char *)base);
- if (ab_info)
- fprintf(s->fp, "# branch.ab +%d -%d%c", nr_ahead, nr_behind, eol);
+ if (ab_info > 0) {
+ /* different */
+ if (nr_ahead || nr_behind)
+ fprintf(s->fp, "# branch.ab +%d -%d%c",
+ nr_ahead, nr_behind, eol);
+ else
+ fprintf(s->fp, "# branch.ab +? -?%c",
+ eol);
+ } else if (!ab_info) {
+ /* same */
+ fprintf(s->fp, "# branch.ab +0 -0%c", eol);
+ }
}
}
#include "string-list.h"
#include "color.h"
#include "pathspec.h"
+#include "remote.h"
struct worktree;
int show_branch;
int show_stash;
int hints;
+ enum ahead_behind_flags ahead_behind_flags;
enum wt_status_format status_format;
unsigned char sha1_commit[GIT_MAX_RAWSZ]; /* when not Initial */