cmds-*.txt
mergetools-*.txt
manpage-base-url.xsl
+SubmittingPatches.txt
API_DOCS = $(patsubst %.txt,%,$(filter-out technical/api-index-skel.txt technical/api-index.txt, $(wildcard technical/api-*.txt)))
SP_ARTICLES += $(API_DOCS)
+TECH_DOCS += SubmittingPatches
TECH_DOCS += technical/hash-function-transition
TECH_DOCS += technical/http-protocol
TECH_DOCS += technical/index-format
ASCIIDOC_CONF =
ASCIIDOC_HTML = xhtml5
ASCIIDOC_DOCBOOK = docbook45
+ASCIIDOC_EXTRA += -acompat-mode
ASCIIDOC_EXTRA += -I. -rasciidoctor-extensions
ASCIIDOC_EXTRA += -alitdd='&\#x2d;&\#x2d;'
DBLATEX_COMMON =
$(RM) *.pdf
$(RM) howto-index.txt howto/*.html doc.dep
$(RM) technical/*.html technical/api-index.txt
+ $(RM) SubmittingPatches.txt
$(RM) $(cmds_txt) $(mergetools_txt) *.made
$(RM) manpage-base-url.xsl
$(patsubst %,%.html,$(API_DOCS) technical/api-index $(TECH_DOCS)): %.html : %.txt asciidoc.conf
$(QUIET_ASCIIDOC)$(TXT_TO_HTML) $*.txt
+SubmittingPatches.txt: SubmittingPatches
+ $(QUIET_GEN) cp $< $@
+
XSLT = docbook.xsl
XSLTOPTS = --xinclude --stringparam html.stylesheet docbook-xsl.css
--- /dev/null
+Git v2.15.1 Release Notes
+=========================
+
+Fixes since v2.15
+-----------------
+
+ * TravisCI build updates.
+
+ * "auto" as a value for the columnar output configuration ought to
+ judge "is the output consumed by humans?" with the same criteria as
+ "auto" for coloured output configuration, i.e. either the standard
+ output stream is going to tty, or a pager is in use. We forgot the
+ latter, which has been fixed.
+
+ * The experimental "color moved lines differently in diff output"
+ feature was buggy around "ignore whitespace changes" edges, whihch
+ has been corrected.
+
+ * Instead of using custom line comparison and hashing functions to
+ implement "moved lines" coloring in the diff output, use the pair
+ of these functions from lower-layer xdiff/ code.
+
+ * Some codepaths did not check for errors when asking what branch the
+ HEAD points at, which have been fixed.
+
+ * "git commit", after making a commit, did not check for errors when
+ asking on what branch it made the commit, which has been correted.
+
+ * "git status --ignored -u" did not stop at a working tree of a
+ separate project that is embedded in an ignored directory and
+ listed files in that other project, instead of just showing the
+ directory itself as ignored.
+
+ * A broken access to object databases in recent update to "git grep
+ --recurse-submodules" has been fixed.
+
+ * A recent regression in "git rebase -i" that broke execution of git
+ commands from subdirectories via "exec" insn has been fixed.
+
+ * "git check-ref-format --branch @{-1}" bit a "BUG()" when run
+ outside a repository for obvious reasons; clarify the documentation
+ and make sure we do not even try to expand the at-mark magic in
+ such a case, but still call the validation logic for branch names.
+
+ * Command line completion (in contrib/) update.
+
+ * Description of blame.{showroot,blankboundary,showemail,date}
+ configuration variables have been added to "git config --help".
+
+ * After an error from lstat(), diff_populate_filespec() function
+ sometimes still went ahead and used invalid data in struct stat,
+ which has been fixed.
+
+ * UNC paths are also relevant in Cygwin builds and they are now
+ tested just like Mingw builds.
+
+ * Correct start-up sequence so that a repository could be placed
+ immediately under the root directory again (which was broken at
+ around Git 2.13).
+
+ * The credential helper for libsecret (in contrib/) has been improved
+ to allow possibly prompting the end user to unlock secrets that are
+ currently locked (otherwise the secrets may not be loaded).
+
+ * Updates from GfW project.
+
+ * "git rebase -i" recently started misbehaving when a submodule that
+ is configured with 'submodule.<name>.ignore' is dirty; this has
+ been corrected.
+
+ * Some error messages did not quote filenames shown in it, which have
+ been fixed.
+
+ * Building with NO_LIBPCRE1_JIT did not disable it, which has been fixed.
+
+ * We used to add an empty alternate object database to the system
+ that does not help anything; it has been corrected.
+
+
+Also contains various documentation updates and code clean-ups.
* "git stash save" has been deprecated in favour of "git stash push".
+ * The set of paths output from "git status --ignored" was tied
+ closely with its "--untracked=<mode>" option, but now it can be
+ controlled more flexibly. Most notably, a directory that is
+ ignored because it is listed to be ignored in the ignore/exclude
+ mechanism can be handled differently from a directory that ends up
+ to be ignored only because all files in it are ignored.
+
+ * The remote-helper for talking to MediaWiki has been updated to
+ truncate an overlong pagename so that ".mw" suffix can still be
+ added.
+
+ * The remote-helper for talking to MediaWiki has been updated to
+ work with mediawiki namespaces.
+
+ * The "--format=..." option "git for-each-ref" takes learned to show
+ the name of the 'remote' repository and the ref at the remote side
+ that is affected for 'upstream' and 'push' via "%(push:remotename)"
+ and friends.
+
+ * Doc and message updates to teach users "bisect view" is a synonym
+ for "bisect visualize".
+
+ * "git bisect run" that did not specify any command to run used to go
+ ahead and treated all commits to be tested as 'good'. This has
+ been corrected by making the command error out.
+
+ * The SubmittingPatches document has been converted to produce an
+ HTML version via AsciiDoc/Asciidoctor.
+ (merge 049e64aa50 bc/submitting-patches-in-asciidoc later to maint).
+
+ * We learned to talk to watchman to speed up "git status" and other
+ operations that need to see which paths have been modified.
+
Performance, Internal Implementation, Development Support etc.
* Conversion from uchar[20] to struct object_id continues.
* Code cleanup.
- (merge 62a24c8923 rs/hex-to-bytes-cleanup later to maint).
* A single-word "unsigned flags" in the diff options is being split
into a structure with many bitfields.
- (merge 0d1e0e7801 bw/diff-opt-impl-to-bitfields later to maint).
* TravisCI build updates.
- (merge c2154953b8 sg/travis-fixes later to maint).
+ * Parts of a test to drive the long-running content filter interface
+ has been split into its own module, hopefully to eventually become
+ reusable.
+
+ * Drop (perhaps overly cautious) sanity check before using the index
+ read from the filesystem at runtime.
Also contains various documentation updates and code clean-ups.
"auto" for coloured output configuration, i.e. either the standard
output stream is going to tty, or a pager is in use. We forgot the
latter, which has been fixed.
- (merge 965ff23a43 kd/auto-col-with-pager-fix later to maint).
* The experimental "color moved lines differently in diff output"
feature was buggy around "ignore whitespace changes" edges, whihch
has been corrected.
- (merge b66b507292 jk/diff-color-moved-fix later to maint).
* Instead of using custom line comparison and hashing functions to
implement "moved lines" coloring in the diff output, use the pair
of these functions from lower-layer xdiff/ code.
- (merge 01be97c2b2 sb/diff-color-moved-use-xdl-recmatch later to maint).
* Some codepaths did not check for errors when asking what branch the
HEAD points at, which have been fixed.
- (merge dbd2b55cb7 jk/misc-resolve-ref-unsafe-fixes later to maint).
* "git commit", after making a commit, did not check for errors when
asking on what branch it made the commit, which has been correted.
- (merge c26de08370 ao/check-resolve-ref-unsafe-result later to maint).
* "git status --ignored -u" did not stop at a working tree of a
separate project that is embedded in an ignored directory and
listed files in that other project, instead of just showing the
directory itself as ignored.
- (merge fadb4820c4 js/submodule-in-excluded later to maint).
* A broken access to object databases in recent update to "git grep
--recurse-submodules" has been fixed.
- (merge 9560e6245a bw/grep-recurse-submodules later to maint).
* A recent regression in "git rebase -i" that broke execution of git
commands from subdirectories via "exec" insn has been fixed.
- (merge 09d7b6c6fa jk/rebase-i-exec-gitdir-fix later to maint).
* A (possibly flakey) test fix.
- (merge cff48ccf2a jc/t5601-copy-workaround later to maint).
* "git check-ref-format --branch @{-1}" bit a "BUG()" when run
outside a repository for obvious reasons; clarify the documentation
and make sure we do not even try to expand the at-mark magic in
such a case, but still call the validation logic for branch names.
- (merge 89dd32aedc jc/check-ref-format-oor later to maint).
* "git fetch --recurse-submodules" now knows that submodules can be
moved around in the superproject in addition to getting updated,
and finds the ones that need to be fetched accordingly.
- (merge 4b4acedd61 hv/fetch-moved-submodules-on-demand later to maint).
* Command line completion (in contrib/) update.
- (merge 6357d9d004 tb/complete-checkout later to maint).
* Description of blame.{showroot,blankboundary,showemail,date}
configuration variables have been added to "git config --help".
- (merge de0bc11d13 sb/blame-config-doc later to maint).
* After an error from lstat(), diff_populate_filespec() function
sometimes still went ahead and used invalid data in struct stat,
which has been fixed.
- (merge 10e0ca843d ao/diff-populate-filespec-lstat-errorpath-fix later to maint).
* UNC paths are also relevant in Cygwin builds and they are now
tested just like Mingw builds.
- (merge f21d60b429 ad/5580-unc-tests-on-cygwin later to maint).
* Correct start-up sequence so that a repository could be placed
immediately under the root directory again (which was broken at
around Git 2.13).
- (merge fa4d8c783d js/early-config later to maint).
* The credential helper for libsecret (in contrib/) has been improved
to allow possibly prompting the end user to unlock secrets that are
currently locked (otherwise the secrets may not be loaded).
- (merge 9c109e9bbc dk/libsecret-unlock-to-load-fix later to maint).
* MinGW updates.
- (merge 39bb86b4e5 js/mingw-full-version-in-resources later to maint).
- (merge 601e1e7897 js/wincred-empty-cred later to maint).
- (merge b2f55717c7 js/mingw-redirect-std-handles later to maint).
+
+ * Error checking in "git imap-send" for empty response has been
+ improved.
+ (merge 618ec81abb rs/imap-send-next-arg-fix later to maint).
+
+ * Recent update to the refs infrastructure implementation started
+ rewriting packed-refs file more often than before; this has been
+ optimized again for most trivial cases.
+ (merge 7c6bd25c7d mh/avoid-rewriting-packed-refs later to maint).
+
+ * Some error messages did not quote filenames shown in it, which have
+ been fixed.
+
+ * "git rebase -i" recently started misbehaving when a submodule that
+ is configured with 'submodule.<name>.ignore' is dirty; this has
+ been corrected.
+
+ * Building with NO_LIBPCRE1_JIT did not disable it, which has been fixed.
+
+ * We used to add an empty alternate object database to the system
+ that does not help anything; it has been corrected.
+
+ * Doc update around use of "format-patch --subject-prefix" etc.
+ (merge f6be7edcac ad/submitting-patches-title-decoration later to maint).
+
+ * A fix for an ancient bug in "git apply --ignore-space-change" codepath.
+ (merge 6ce15ce576 rs/apply-fuzzy-match-fix later to maint).
* Other minor doc, test and build updates and code cleanups.
- (merge bab76141da cn/diff-indent-no-longer-is-experimental later to maint).
- (merge 8684dde10d jm/relnotes-2.15-typofix later to maint).
- (merge cd3f8e2fc2 ks/mailmap later to maint).
+ (merge f4e45cb3eb ma/bisect-leakfix later to maint).
+ (merge 4da72644b7 ma/reduce-heads-leakfix later to maint).
+Submitting Patches
+==================
+
+== Guidelines
+
Here are some guidelines for people who want to contribute their code
to this software.
-(0) Decide what to base your work on.
+[[base-branch]]
+=== Decide what to base your work on.
In general, always base your work on the oldest branch that your
change is relevant to.
- - A bugfix should be based on 'maint' in general. If the bug is not
- present in 'maint', base it on 'master'. For a bug that's not yet
- in 'master', find the topic that introduces the regression, and
- base your work on the tip of the topic.
+* A bugfix should be based on `maint` in general. If the bug is not
+ present in `maint`, base it on `master`. For a bug that's not yet
+ in `master`, find the topic that introduces the regression, and
+ base your work on the tip of the topic.
- - A new feature should be based on 'master' in general. If the new
- feature depends on a topic that is in 'pu', but not in 'master',
- base your work on the tip of that topic.
+* A new feature should be based on `master` in general. If the new
+ feature depends on a topic that is in `pu`, but not in `master`,
+ base your work on the tip of that topic.
- - Corrections and enhancements to a topic not yet in 'master' should
- be based on the tip of that topic. If the topic has not been merged
- to 'next', it's alright to add a note to squash minor corrections
- into the series.
+* Corrections and enhancements to a topic not yet in `master` should
+ be based on the tip of that topic. If the topic has not been merged
+ to `next`, it's alright to add a note to squash minor corrections
+ into the series.
- - In the exceptional case that a new feature depends on several topics
- not in 'master', start working on 'next' or 'pu' privately and send
- out patches for discussion. Before the final merge, you may have to
- wait until some of the dependent topics graduate to 'master', and
- rebase your work.
+* In the exceptional case that a new feature depends on several topics
+ not in `master`, start working on `next` or `pu` privately and send
+ out patches for discussion. Before the final merge, you may have to
+ wait until some of the dependent topics graduate to `master`, and
+ rebase your work.
- - Some parts of the system have dedicated maintainers with their own
- repositories (see the section "Subsystems" below). Changes to
- these parts should be based on their trees.
+* Some parts of the system have dedicated maintainers with their own
+ repositories (see the section "Subsystems" below). Changes to
+ these parts should be based on their trees.
-To find the tip of a topic branch, run "git log --first-parent
-master..pu" and look for the merge commit. The second parent of this
+To find the tip of a topic branch, run `git log --first-parent
+master..pu` and look for the merge commit. The second parent of this
commit is the tip of the topic branch.
-(1) Make separate commits for logically separate changes.
+[[separate-commits]]
+=== Make separate commits for logically separate changes.
Unless your patch is really trivial, you should not be sending
out a patch that was generated between your working tree and
to have.
Make sure that you have tests for the bug you are fixing. See
-t/README for guidance.
+`t/README` for guidance.
+[[tests]]
When adding a new feature, make sure that you have new tests to show
the feature triggers the new behavior when it should, and to show the
feature does not trigger when it shouldn't. After any code change, make
more welcomed ("teh -> "the"), preferably submitted as independent
patches separate from other documentation changes.
+[[whitespace-check]]
Oh, another thing. We are picky about whitespaces. Make sure your
changes do not trigger errors with the sample pre-commit hook shipped
-in templates/hooks--pre-commit. To help ensure this does not happen,
-run "git diff --check" on your changes before you commit.
+in `templates/hooks--pre-commit`. To help ensure this does not happen,
+run `git diff --check` on your changes before you commit.
-
-(2) Describe your changes well.
+[[describe-changes]]
+=== Describe your changes well.
The first line of the commit message should be a short description (50
-characters is the soft limit, see DISCUSSION in git-commit(1)), and
-should skip the full stop. It is also conventional in most cases to
+characters is the soft limit, see DISCUSSION in linkgit:git-commit[1]),
+and should skip the full stop. It is also conventional in most cases to
prefix the first line with "area: " where the area is a filename or
identifier for the general area of the code being modified, e.g.
- . doc: clarify distinction between sign-off and pgp-signing
- . githooks.txt: improve the intro section
+* doc: clarify distinction between sign-off and pgp-signing
+* githooks.txt: improve the intro section
-If in doubt which identifier to use, run "git log --no-merges" on the
+If in doubt which identifier to use, run `git log --no-merges` on the
files you are modifying to see the current conventions.
+[[summary-section]]
It's customary to start the remainder of the first line after "area: "
with a lower-case letter. E.g. "doc: clarify...", not "doc:
Clarify...", or "githooks.txt: improve...", not "githooks.txt:
Improve...".
+[[meaningful-message]]
The body should provide a meaningful commit message, which:
- . explains the problem the change tries to solve, i.e. what is wrong
- with the current code without the change.
+. explains the problem the change tries to solve, i.e. what is wrong
+ with the current code without the change.
- . justifies the way the change solves the problem, i.e. why the
- result with the change is better.
+. justifies the way the change solves the problem, i.e. why the
+ result with the change is better.
- . alternate solutions considered but discarded, if any.
+. alternate solutions considered but discarded, if any.
+[[imperative-mood]]
Describe your changes in imperative mood, e.g. "make xyzzy do frotz"
instead of "[This patch] makes xyzzy do frotz" or "[I] changed xyzzy
to do frotz", as if you are giving orders to the codebase to change
without external resources. Instead of giving a URL to a mailing list
archive, summarize the relevant points of the discussion.
+[[commit-reference]]
If you want to reference a previous commit in the history of a stable
branch, use the format "abbreviated sha1 (subject, date)",
with the subject enclosed in a pair of double-quotes, like this:
- Commit f86a374 ("pack-bitmap.c: fix a memleak", 2015-03-30)
- noticed that ...
+....
+ Commit f86a374 ("pack-bitmap.c: fix a memleak", 2015-03-30)
+ noticed that ...
+....
The "Copy commit summary" command of gitk can be used to obtain this
-format, or this invocation of "git show":
+format, or this invocation of `git show`:
- git show -s --date=short --pretty='format:%h ("%s", %ad)' <commit>
+....
+ git show -s --date=short --pretty='format:%h ("%s", %ad)' <commit>
+....
-(3) Generate your patch using Git tools out of your commits.
+[[git-tools]]
+=== Generate your patch using Git tools out of your commits.
Git based diff tools generate unidiff which is the preferred format.
-You do not have to be afraid to use -M option to "git diff" or
-"git format-patch", if your patch involves file renames. The
+You do not have to be afraid to use `-M` option to `git diff` or
+`git format-patch`, if your patch involves file renames. The
receiving end can handle them just fine.
+[[review-patch]]
Please make sure your patch does not add commented out debugging code,
or include any extra files which do not relate to what your patch
is trying to achieve. Make sure to review
your patch after generating it, to ensure accuracy. Before
-sending out, please make sure it cleanly applies to the "master"
+sending out, please make sure it cleanly applies to the `master`
branch head. If you are preparing a work based on "next" branch,
that is fine, but please mark it as such.
-
-(4) Sending your patches.
+[[send-patches]]
+=== Sending your patches.
Learn to use format-patch and send-email if possible. These commands
are optimized for the workflow of sending patches, avoiding many ways
It is a common convention to prefix your subject line with
[PATCH]. This lets people easily distinguish patches from other
-e-mail discussions. Use of additional markers after PATCH and
-the closing bracket to mark the nature of the patch is also
-encouraged. E.g. [PATCH/RFC] is often used when the patch is
-not ready to be applied but it is for discussion, [PATCH v2],
-[PATCH v3] etc. are often seen when you are sending an update to
-what you have previously sent.
-
-"git format-patch" command follows the best current practice to
+e-mail discussions. Use of markers in addition to PATCH within
+the brackets to describe the nature of the patch is also
+encouraged. E.g. [RFC PATCH] (where RFC stands for "request for
+comments") is often used to indicate a patch needs further
+discussion before being accepted, [PATCH v2], [PATCH v3] etc.
+are often seen when you are sending an update to what you have
+previously sent.
+
+The `git format-patch` command follows the best current practice to
format the body of an e-mail message. At the beginning of the
patch should come your commit message, ending with the
Signed-off-by: lines, and a line that consists of three dashes,
you are forwarding a patch from somebody else, optionally, at
the beginning of the e-mail message just before the commit
message starts, you can put a "From: " line to name that person.
+To change the default "[PATCH]" in the subject to "[<text>]", use
+`git format-patch --subject-prefix=<text>`. As a shortcut, you
+can use `--rfc` instead of `--subject-prefix="RFC PATCH"`, or
+`-v <n>` instead of `--subject-prefix="PATCH v<n>"`.
You often want to add additional explanation about the patch,
other than the commit message itself. Place such "cover letter"
Git-notes and inserted automatically following the three-dash
line via `git format-patch --notes`.
+[[attachment]]
Do not attach the patch as a MIME attachment, compressed or not.
Do not let your e-mail client send quoted-printable. Do not let
your e-mail client send format=flowed which would destroy
Exception: If your mailer is mangling patches then someone may ask
you to re-send them using MIME, that is OK.
+[[pgp-signature]]
Do not PGP sign your patch. Most likely, your maintainer or other people on the
list would not have your PGP key and would not bother obtaining it anyway.
Your patch is not judged by who you are; a good patch from an unknown origin
If you really really really really want to do a PGP signed
patch, format it as "multipart/signed", not a text/plain message
-that starts with '-----BEGIN PGP SIGNED MESSAGE-----'. That is
+that starts with `-----BEGIN PGP SIGNED MESSAGE-----`. That is
not a text/plain, it's something else.
Send your patch with "To:" set to the mailing list, with "cc:" listing
people who are involved in the area you are touching (the output from
-"git blame $path" and "git shortlog --no-merges $path" would help to
++git blame _$path_+ and +git shortlog {litdd}no-merges _$path_+ would help to
identify them), to solicit comments and reviews.
+:1: footnote:[The current maintainer: gitster@pobox.com]
+:2: footnote:[The mailing list: git@vger.kernel.org]
+
After the list reached a consensus that it is a good idea to apply the
-patch, re-send it with "To:" set to the maintainer [*1*] and "cc:" the
-list [*2*] for inclusion.
+patch, re-send it with "To:" set to the maintainer{1} and "cc:" the
+list{2} for inclusion.
-Do not forget to add trailers such as "Acked-by:", "Reviewed-by:" and
-"Tested-by:" lines as necessary to credit people who helped your
+Do not forget to add trailers such as `Acked-by:`, `Reviewed-by:` and
+`Tested-by:` lines as necessary to credit people who helped your
patch.
- [Addresses]
- *1* The current maintainer: gitster@pobox.com
- *2* The mailing list: git@vger.kernel.org
-
-
-(5) Certify your work by adding your "Signed-off-by: " line
+[[sign-off]]
+=== Certify your work by adding your "Signed-off-by: " line
To improve tracking of who did what, we've borrowed the
"sign-off" procedure from the Linux kernel project on patches
the right to pass it on as a open-source patch. The rules are
pretty simple: if you can certify the below D-C-O:
- Developer's Certificate of Origin 1.1
-
- By making a contribution to this project, I certify that:
-
- (a) The contribution was created in whole or in part by me and I
- have the right to submit it under the open source license
- indicated in the file; or
-
- (b) The contribution is based upon previous work that, to the best
- of my knowledge, is covered under an appropriate open source
- license and I have the right under that license to submit that
- work with modifications, whether created in whole or in part
- by me, under the same open source license (unless I am
- permitted to submit under a different license), as indicated
- in the file; or
-
- (c) The contribution was provided directly to me by some other
- person who certified (a), (b) or (c) and I have not modified
- it.
-
- (d) I understand and agree that this project and the contribution
- are public and that a record of the contribution (including all
- personal information I submit with it, including my sign-off) is
- maintained indefinitely and may be redistributed consistent with
- this project or the open source license(s) involved.
+[[dco]]
+.Developer's Certificate of Origin 1.1
+____
+By making a contribution to this project, I certify that:
+
+a. The contribution was created in whole or in part by me and I
+ have the right to submit it under the open source license
+ indicated in the file; or
+
+b. The contribution is based upon previous work that, to the best
+ of my knowledge, is covered under an appropriate open source
+ license and I have the right under that license to submit that
+ work with modifications, whether created in whole or in part
+ by me, under the same open source license (unless I am
+ permitted to submit under a different license), as indicated
+ in the file; or
+
+c. The contribution was provided directly to me by some other
+ person who certified (a), (b) or (c) and I have not modified
+ it.
+
+d. I understand and agree that this project and the contribution
+ are public and that a record of the contribution (including all
+ personal information I submit with it, including my sign-off) is
+ maintained indefinitely and may be redistributed consistent with
+ this project or the open source license(s) involved.
+____
then you just add a line saying
- Signed-off-by: Random J Developer <random@developer.example.org>
+....
+ Signed-off-by: Random J Developer <random@developer.example.org>
+....
This line can be automatically added by Git if you run the git-commit
command with the -s option.
place an in-body "From: " line at the beginning to properly attribute
the change to its true author (see (2) above).
+[[real-name]]
Also notice that a real name is used in the Signed-off-by: line. Please
don't hide your real name.
+[[commit-trailers]]
If you like, you can put extra tags at the end:
-1. "Reported-by:" is used to credit someone who found the bug that
- the patch attempts to fix.
-2. "Acked-by:" says that the person who is more familiar with the area
- the patch attempts to modify liked the patch.
-3. "Reviewed-by:", unlike the other tags, can only be offered by the
- reviewer and means that she is completely satisfied that the patch
- is ready for application. It is usually offered only after a
- detailed review.
-4. "Tested-by:" is used to indicate that the person applied the patch
- and found it to have the desired effect.
+. `Reported-by:` is used to credit someone who found the bug that
+ the patch attempts to fix.
+. `Acked-by:` says that the person who is more familiar with the area
+ the patch attempts to modify liked the patch.
+. `Reviewed-by:`, unlike the other tags, can only be offered by the
+ reviewer and means that she is completely satisfied that the patch
+ is ready for application. It is usually offered only after a
+ detailed review.
+. `Tested-by:` is used to indicate that the person applied the patch
+ and found it to have the desired effect.
You can also create your own tag or use one that's in common usage
such as "Thanks-to:", "Based-on-patch-by:", or "Mentored-by:".
-------------------------------------------------
-Subsystems with dedicated maintainers
+== Subsystems with dedicated maintainers
Some parts of the system have dedicated maintainers with their own
repositories.
- - git-gui/ comes from git-gui project, maintained by Pat Thoyts:
+- 'git-gui/' comes from git-gui project, maintained by Pat Thoyts:
- git://repo.or.cz/git-gui.git
+ git://repo.or.cz/git-gui.git
- - gitk-git/ comes from Paul Mackerras's gitk project:
+- 'gitk-git/' comes from Paul Mackerras's gitk project:
- git://ozlabs.org/~paulus/gitk
+ git://ozlabs.org/~paulus/gitk
- - po/ comes from the localization coordinator, Jiang Xin:
+- 'po/' comes from the localization coordinator, Jiang Xin:
https://github.com/git-l10n/git-po/
Patches to these parts should be based on their trees.
-------------------------------------------------
-An ideal patch flow
+[[patch-flow]]
+== An ideal patch flow
Here is an ideal patch flow for this project the current maintainer
suggests to the contributors:
- (0) You come up with an itch. You code it up.
+. You come up with an itch. You code it up.
- (1) Send it to the list and cc people who may need to know about
- the change.
+. Send it to the list and cc people who may need to know about
+ the change.
++
+The people who may need to know are the ones whose code you
+are butchering. These people happen to be the ones who are
+most likely to be knowledgeable enough to help you, but
+they have no obligation to help you (i.e. you ask for help,
+don't demand). +git log -p {litdd} _$area_you_are_modifying_+ would
+help you find out who they are.
- The people who may need to know are the ones whose code you
- are butchering. These people happen to be the ones who are
- most likely to be knowledgeable enough to help you, but
- they have no obligation to help you (i.e. you ask for help,
- don't demand). "git log -p -- $area_you_are_modifying" would
- help you find out who they are.
+. You get comments and suggestions for improvements. You may
+ even get them in a "on top of your change" patch form.
- (2) You get comments and suggestions for improvements. You may
- even get them in a "on top of your change" patch form.
+. Polish, refine, and re-send to the list and the people who
+ spend their time to improve your patch. Go back to step (2).
- (3) Polish, refine, and re-send to the list and the people who
- spend their time to improve your patch. Go back to step (2).
+. The list forms consensus that the last round of your patch is
+ good. Send it to the maintainer and cc the list.
- (4) The list forms consensus that the last round of your patch is
- good. Send it to the maintainer and cc the list.
-
- (5) A topic branch is created with the patch and is merged to 'next',
- and cooked further and eventually graduates to 'master'.
+. A topic branch is created with the patch and is merged to `next`,
+ and cooked further and eventually graduates to `master`.
In any time between the (2)-(3) cycle, the maintainer may pick it up
-from the list and queue it to 'pu', in order to make it easier for
+from the list and queue it to `pu`, in order to make it easier for
people play with it without having to pick up and apply the patch to
their trees themselves.
-------------------------------------------------
-Know the status of your patch after submission
+[[patch-status]]
+== Know the status of your patch after submission
* You can use Git itself to find out when your patch is merged in
- master. 'git pull --rebase' will automatically skip already-applied
+ master. `git pull --rebase` will automatically skip already-applied
patches, and will let you know. This works only if you rebase on top
of the branch in which your patch has been merged (i.e. it will not
tell you if your patch is merged in pu if you rebase on top of
entitled "What's cooking in git.git" and "What's in git.git" giving
the status of various proposed changes.
---------------------------------------------------
-GitHub-Travis CI hints
+[[travis]]
+== GitHub-Travis CI hints
With an account at GitHub (you can get one for free to work on open
source projects), you can use Travis CI to test your changes on Linux,
Follow these steps for the initial setup:
- (1) Fork https://github.com/git/git to your GitHub account.
- You can find detailed instructions how to fork here:
- https://help.github.com/articles/fork-a-repo/
+. Fork https://github.com/git/git to your GitHub account.
+ You can find detailed instructions how to fork here:
+ https://help.github.com/articles/fork-a-repo/
- (2) Open the Travis CI website: https://travis-ci.org
+. Open the Travis CI website: https://travis-ci.org
- (3) Press the "Sign in with GitHub" button.
+. Press the "Sign in with GitHub" button.
- (4) Grant Travis CI permissions to access your GitHub account.
- You can find more information about the required permissions here:
- https://docs.travis-ci.com/user/github-oauth-scopes
+. Grant Travis CI permissions to access your GitHub account.
+ You can find more information about the required permissions here:
+ https://docs.travis-ci.com/user/github-oauth-scopes
- (5) Open your Travis CI profile page: https://travis-ci.org/profile
+. Open your Travis CI profile page: https://travis-ci.org/profile
- (6) Enable Travis CI builds for your Git fork.
+. Enable Travis CI builds for your Git fork.
After the initial setup, Travis CI will run whenever you push new changes
to your fork of Git on GitHub. You can monitor the test state of all your
-branches here: https://travis-ci.org/<Your GitHub handle>/git/branches
+branches here: https://travis-ci.org/__<Your GitHub handle>__/git/branches
If a branch did not pass all test cases then it is marked with a red
cross. In that case you can click on the failing Travis CI job and
Fix the problem and push your fix to your Git fork. This will trigger
a new Travis CI build to ensure all tests pass.
-
-------------------------------------------------
-MUA specific hints
+[[mua]]
+== MUA specific hints
Some of patches I receive or pick up from the list share common
patterns of breakage. Please make sure your MUA is set up
properly not to corrupt whitespaces.
-See the DISCUSSION section of git-format-patch(1) for hints on
+See the DISCUSSION section of linkgit:git-format-patch[1] for hints on
checking your patch by mailing it to yourself and applying with
-git-am(1).
+linkgit:git-am[1].
While you are at it, check the resulting commit log message from
a trial run of applying the patch. If what is in the resulting
commit message.
-Pine
-----
+=== Pine
(Johannes Schindelin)
+....
I don't know how many people still use pine, but for those poor
souls it may be good to mention that the quell-flowed-text is
needed for recent versions.
... the "no-strip-whitespace-before-send" option, too. AFAIK it
was introduced in 4.60.
+....
(Linus Torvalds)
+....
And 4.58 needs at least this.
----
diff-tree 8326dd8350be64ac7fc805f6563a1d61ad10d32c (from e886a61f76edf5410573e92e38ce22974f9c40f1)
Author: Linus Torvalds <torvalds@g5.osdl.org>
Date: Mon Aug 15 17:23:51 2005 -0700
+#endif
c |= COMP_EXIT;
break;
-
+....
(Daniel Barkalow)
+....
> A patch to SubmittingPatches, MUA specific help section for
> users of Pine 4.63 would be very much appreciated.
"no-strip-whitespace-before-send" option, unless the option you have is
"strip-whitespace-before-send", in which case you should avoid checking
it.
+....
+=== Thunderbird, KMail, GMail
-Thunderbird, KMail, GMail
--------------------------
-
-See the MUA-SPECIFIC HINTS section of git-format-patch(1).
+See the MUA-SPECIFIC HINTS section of linkgit:git-format-patch[1].
-Gnus
-----
+=== Gnus
-'|' in the *Summary* buffer can be used to pipe the current
+"|" in the `*Summary*` buffer can be used to pipe the current
message to an external program, and this is a handy way to drive
-"git am". However, if the message is MIME encoded, what is
+`git am`. However, if the message is MIME encoded, what is
piped into the program is the representation you see in your
-*Article* buffer after unwrapping MIME. This is often not what
+`*Article*` buffer after unwrapping MIME. This is often not what
you would want for two reasons. It tends to screw up non ASCII
characters (most notably in people's names), and also
-whitespaces (fatal in patches). Running 'C-u g' to display the
-message in raw form before using '|' to run the pipe can work
+whitespaces (fatal in patches). Running "C-u g" to display the
+message in raw form before using "|" to run the pipe can work
this problem around.
8.3 "short" names.
Defaults to `true` on Windows, and `false` elsewhere.
+core.fsmonitor::
+ If set, the value of this variable is used as a command which
+ will identify all files that may have changed since the
+ requested date/time. This information is used to speed up git by
+ avoiding unnecessary processing of files that have not changed.
+ See the "fsmonitor-watchman" section of linkgit:githooks[5].
+
core.trustctime::
If false, the ctime differences between the index and the
working tree are ignored; useful when the inode change time
--text::
Treat all files as text.
+--ignore-cr-at-eol::
+ Ignore carrige-return at the end of line when doing a comparison.
+
--ignore-space-at-eol::
Ignore changes in whitespace at EOL.
git bisect terms [--term-good | --term-bad]
git bisect skip [(<rev>|<range>)...]
git bisect reset [<commit>]
- git bisect visualize
+ git bisect (visualize|view)
git bisect replay <logfile>
git bisect log
git bisect run <cmd>...
Then, use `git bisect <term-old>` and `git bisect <term-new>` instead
of `git bisect good` and `git bisect bad` to mark commits.
-Bisect visualize
-~~~~~~~~~~~~~~~~
+Bisect visualize/view
+~~~~~~~~~~~~~~~~~~~~~
To see the currently remaining suspects in 'gitk', issue the following
-command during the bisection process:
+command during the bisection process (the subcommand `view` can be used
+as an alternative to `visualize`):
------------
$ git bisect visualize
------------
-`view` may also be used as a synonym for `visualize`.
-
If the `DISPLAY` environment variable is not set, 'git log' is used
instead. You can also give command-line options such as `-p` and
`--stat`.
------------
-$ git bisect view --stat
+$ git bisect visualize --stat
------------
Bisect log and bisect replay
(behind), "<>" (ahead and behind), or "=" (in sync). `:track`
also prints "[gone]" whenever unknown upstream ref is
encountered. Append `:track,nobracket` to show tracking
- information without brackets (i.e "ahead N, behind M"). Has
- no effect if the ref does not have tracking information
- associated with it. All the options apart from `nobracket`
- are mutually exclusive, but if used together the last option
- is selected.
+ information without brackets (i.e "ahead N, behind M").
++
+For any remote-tracking branch `%(upstream)`, `%(upstream:remotename)`
+and `%(upstream:remoteref)` refer to the name of the remote and the
+name of the tracked remote ref, respectively. In other words, the
+remote-tracking branch can be updated explicitly and individually by
+using the refspec `%(upstream:remoteref):%(upstream)` to fetch from
+`%(upstream:remotename)`.
++
+Has no effect if the ref does not have tracking information associated
+with it. All the options apart from `nobracket` are mutually exclusive,
+but if used together the last option is selected.
push::
The name of a local ref which represents the `@{push}`
location for the displayed ref. Respects `:short`, `:lstrip`,
- `:rstrip`, `:track`, and `:trackshort` options as `upstream`
- does. Produces an empty string if no `@{push}` ref is
- configured.
+ `:rstrip`, `:track`, `:trackshort`, `:remotename`, and `:remoteref`
+ options as `upstream` does. Produces an empty string if no `@{push}`
+ ref is configured.
HEAD::
'*' if HEAD matches current ref (the checked out branch), ' '
SYNOPSIS
--------
[verse]
-'git ls-files' [-z] [-t] [-v]
+'git ls-files' [-z] [-t] [-v] [-f]
(--[cached|deleted|others|ignored|stage|unmerged|killed|modified])*
(-[c|d|o|i|s|u|k|m])*
[--eol]
that are marked as 'assume unchanged' (see
linkgit:git-update-index[1]).
+-f::
+ Similar to `-t`, but use lowercase letters for files
+ that are marked as 'fsmonitor valid' (see
+ linkgit:git-update-index[1]).
+
--full-name::
When run from a subdirectory, the command usually
outputs paths relative to the current directory. This
`origin/master` may have been rewound and rebuilt, leading to a
history of this shape:
- o---B1
+ o---B2
/
- ---o---o---B2--o---o---o---B (origin/master)
+ ---o---o---B1--o---o---o---B (origin/master)
\
- B3
+ B0
\
- Derived (topic)
+ D0---D1---D (topic)
-where `origin/master` used to point at commits B3, B2, B1 and now it
+where `origin/master` used to point at commits B0, B1, B2 and now it
points at B, and your `topic` branch was started on top of it back
-when `origin/master` was at B3. This mode uses the reflog of
-`origin/master` to find B3 as the fork point, so that the `topic`
-can be rebased on top of the updated `origin/master` by:
+when `origin/master` was at B0, and you built three commits, D0, D1,
+and D, on top of it. Imagine that you now want to rebase the work
+you did on the topic on top of the updated origin/master.
+
+In such a case, `git merge-base origin/master topic` would return the
+parent of B0 in the above picture, but B0^..D is *not* the range of
+commits you would want to replay on top of B (it includes B0, which
+is not what you wrote; it is a commit the other side discarded when
+it moved its tip from B0 to B1).
+
+`git merge-base --fork-point origin/master topic` is designed to
+help in such a case. It takes not only B but also B0, B1, and B2
+(i.e. old tips of the remote-tracking branches your repository's
+reflog knows about) into account to see on which commit your topic
+branch was built and finds B0, allowing you to replay only the
+commits on your topic, excluding the commits the other side later
+discarded.
+
+Hence
$ fork_point=$(git merge-base --fork-point origin/master topic)
+
+will find B0, and
+
$ git rebase --onto origin/master $fork_point topic
+will replay D0, D1 and D on top of B to create a new history of this
+shape:
+
+ o---B2
+ /
+ ---o---o---B1--o---o---o---B (origin/master)
+ \ \
+ B0 D0'--D1'--D' (topic - updated)
+ \
+ D0---D1---D (topic - old)
+
+A caveat is that older reflog entries in your repository may be
+expired by `git gc`. If B0 no longer appears in the reflog of the
+remote-tracking branch `origin/master`, the `--fork-point` mode
+obviously cannot find it and fails, avoiding to give a random and
+useless result (such as the parent of B0, like the same command
+without the `--fork-point` option gives).
+
+Also, the remote-tracking branch you use the `--fork-point` mode
+with must be the one your topic forked from its tip. If you forked
+from an older commit than the tip, this mode would not find the fork
+point (imagine in the above sample history B0 did not exist,
+origin/master started at B1, moved to B2 and then B, and you forked
+your topic at origin/master^ when origin/master was B1; the shape of
+the history would be the same as above, without B0, and the parent
+of B1 is what `git merge-base origin/master topic` correctly finds,
+but the `--fork-point` mode will not, because it is not one of the
+commits that used to be at the tip of origin/master).
+
See also
--------
[--chmod=(+|-)x]
[--[no-]assume-unchanged]
[--[no-]skip-worktree]
+ [--[no-]fsmonitor-valid]
[--ignore-submodules]
[--[no-]split-index]
[--[no-|test-|force-]untracked-cache]
+ [--[no-]fsmonitor]
[--really-refresh] [--unresolve] [--again | -g]
[--info-only] [--index-info]
[-z] [--stdin] [--index-version <n>]
set and unset the "skip-worktree" bit for the paths. See
section "Skip-worktree bit" below for more information.
+--[no-]fsmonitor-valid::
+ When one of these flags is specified, the object name recorded
+ for the paths are not updated. Instead, these options
+ set and unset the "fsmonitor valid" bit for the paths. See
+ section "File System Monitor" below for more information.
+
-g::
--again::
Runs 'git update-index' itself on the paths whose index
`--untracked-cache` used to imply `--test-untracked-cache` but
this option would enable the extension unconditionally.
+--fsmonitor::
+--no-fsmonitor::
+ Enable or disable files system monitor feature. These options
+ take effect whatever the value of the `core.fsmonitor`
+ configuration variable (see linkgit:git-config[1]). But a warning
+ is emitted when the change goes against the configured value, as
+ the configured value will take effect next time the index is
+ read and this will remove the intended effect of the option.
+
\--::
Do not interpret any more arguments as options.
are used, the untracked cache is immediately added to or removed from
the index.
+File System Monitor
+-------------------
+
+This feature is intended to speed up git operations for repos that have
+large working directories.
+
+It enables git to work together with a file system monitor (see the
+"fsmonitor-watchman" section of linkgit:githooks[5]) that can
+inform it as to what files have been modified. This enables git to avoid
+having to lstat() every file to find modified files.
+
+When used in conjunction with the untracked cache, it can further improve
+performance by avoiding the cost of scanning the entire working directory
+looking for new files.
+
+If you want to enable (or disable) this feature, it is easier to use
+the `core.fsmonitor` configuration variable (see
+linkgit:git-config[1]) than using the `--fsmonitor` option to
+`git update-index` in each repository, especially if you want to do so
+across all repositories you use, because you can set the configuration
+variable to `true` (or `false`) in your `$HOME/.gitconfig` just once
+and have it affect all repositories you touch.
+
+When the `core.fsmonitor` configuration variable is changed, the
+file system monitor is added to or removed from the index the next time
+a command reads the index. When `--[no-]fsmonitor` are used, the file
+system monitor is immediately added to or removed from the index.
+
Configuration
-------------
Unsetting the variable, or setting it to empty, "0" or
"false" (case insensitive) disables trace messages.
+`GIT_TRACE_FSMONITOR`::
+ Enables trace messages for the filesystem monitor extension.
+ See `GIT_TRACE` for available trace output options.
+
`GIT_TRACE_PACK_ACCESS`::
Enables trace messages for all accesses to any packs. For each
access, the pack file name and an offset in the pack is
non-zero status causes 'git send-email' to abort before sending any
e-mails.
+fsmonitor-watchman
+~~~~~~~~~~~~~~~~~~
+
+This hook is invoked when the configuration option core.fsmonitor is
+set to .git/hooks/fsmonitor-watchman. It takes two arguments, a version
+(currently 1) and the time in elapsed nanoseconds since midnight,
+January 1, 1970.
+
+The hook should output to stdout the list of all files in the working
+directory that may have changed since the requested time. The logic
+should be inclusive so that it does not miss any potential changes.
+The paths should be relative to the root of the working directory
+and be separated by a single NUL.
+
+It is OK to include files which have not actually changed. All changes
+including newly-created and deleted files should be included. When
+files are renamed, both the old and the new name should be included.
+
+Git will limit what files it checks for changes as well as which
+directories are checked for untracked files based on the path names
+given.
+
+An optimized way to tell git "all files have changed" is to return
+the filename '/'.
+
+The exit status determines whether git will use the data from the
+hook to limit its search. On error, it will fall back to verifying
+all files and folders.
GIT
---
ignore-space-change;;
ignore-all-space;;
ignore-space-at-eol;;
+ignore-cr-at-eol;;
Treats lines with the indicated type of whitespace change as
unchanged for the sake of a three-way merge. Whitespace
changes mixed with other changes to a line are not ignored.
- See also linkgit:git-diff[1] `-b`, `-w`, and
- `--ignore-space-at-eol`.
+ See also linkgit:git-diff[1] `-b`, `-w`,
+ `--ignore-space-at-eol`, and `--ignore-cr-at-eol`.
+
* If 'their' version only introduces whitespace changes to a line,
'our' version is used;
in the previous ewah bitmap.
- One NUL.
+
+== File System Monitor cache
+
+ The file system monitor cache tracks files for which the core.fsmonitor
+ hook has told us about changes. The signature for this extension is
+ { 'F', 'S', 'M', 'N' }.
+
+ The extension starts with
+
+ - 32-bit version number: the current supported version is 1.
+
+ - 64-bit time: the extension data reflects all changes through the given
+ time which is stored as the nanoseconds elapsed since midnight,
+ January 1, 1970.
+
+ - 32-bit bitmap size: the size of the CE_FSMONITOR_VALID bitmap.
+
+ - An ewah bitmap, the n-th bit indicates whether the n-th index entry
+ is not CE_FSMONITOR_VALID.
TEST_PROGRAMS_NEED_X += test-config
TEST_PROGRAMS_NEED_X += test-date
TEST_PROGRAMS_NEED_X += test-delta
+TEST_PROGRAMS_NEED_X += test-drop-caches
TEST_PROGRAMS_NEED_X += test-dump-cache-tree
+TEST_PROGRAMS_NEED_X += test-dump-fsmonitor
TEST_PROGRAMS_NEED_X += test-dump-split-index
TEST_PROGRAMS_NEED_X += test-dump-untracked-cache
TEST_PROGRAMS_NEED_X += test-fake-ssh
LIB_OBJS += exec_cmd.o
LIB_OBJS += fetch-pack.o
LIB_OBJS += fsck.o
+LIB_OBJS += fsmonitor.o
LIB_OBJS += gettext.o
LIB_OBJS += gpg-interface.o
LIB_OBJS += graph.o
static int fuzzy_matchlines(const char *s1, size_t n1,
const char *s2, size_t n2)
{
- const char *last1 = s1 + n1 - 1;
- const char *last2 = s2 + n2 - 1;
- int result = 0;
+ const char *end1 = s1 + n1;
+ const char *end2 = s2 + n2;
/* ignore line endings */
- while ((*last1 == '\r') || (*last1 == '\n'))
- last1--;
- while ((*last2 == '\r') || (*last2 == '\n'))
- last2--;
-
- /* skip leading whitespaces, if both begin with whitespace */
- if (s1 <= last1 && s2 <= last2 && isspace(*s1) && isspace(*s2)) {
- while (isspace(*s1) && (s1 <= last1))
- s1++;
- while (isspace(*s2) && (s2 <= last2))
- s2++;
- }
- /* early return if both lines are empty */
- if ((s1 > last1) && (s2 > last2))
- return 1;
- while (!result) {
- result = *s1++ - *s2++;
- /*
- * Skip whitespace inside. We check for whitespace on
- * both buffers because we don't want "a b" to match
- * "ab"
- */
- if (isspace(*s1) && isspace(*s2)) {
- while (isspace(*s1) && s1 <= last1)
+ while (s1 < end1 && (end1[-1] == '\r' || end1[-1] == '\n'))
+ end1--;
+ while (s2 < end2 && (end2[-1] == '\r' || end2[-1] == '\n'))
+ end2--;
+
+ while (s1 < end1 && s2 < end2) {
+ if (isspace(*s1)) {
+ /*
+ * Skip whitespace. We check on both buffers
+ * because we don't want "a b" to match "ab".
+ */
+ if (!isspace(*s2))
+ return 0;
+ while (s1 < end1 && isspace(*s1))
s1++;
- while (isspace(*s2) && s2 <= last2)
+ while (s2 < end2 && isspace(*s2))
s2++;
- }
- /*
- * If we reached the end on one side only,
- * lines don't match
- */
- if (
- ((s2 > last2) && (s1 <= last1)) ||
- ((s1 > last1) && (s2 <= last2)))
+ } else if (*s1++ != *s2++)
return 0;
- if ((s1 > last1) && (s2 > last2))
- break;
}
- return !result;
+ /* If we reached the end on one side only, lines don't match. */
+ return s1 == end1 && s2 == end2;
}
static void add_line_info(struct image *img, const char *bol, size_t len, unsigned flag)
add_name_decoration(DECORATION_NONE, buf.buf, obj);
p->item = array[i].commit;
- p = p->next;
+ if (i < cnt - 1)
+ p = p->next;
}
- if (p)
- p->next = NULL;
+ free_commit_list(p->next);
+ p->next = NULL;
strbuf_release(&buf);
free(array);
return list;
return best_bisection_sorted(list, nr);
}
-struct commit_list *find_bisection(struct commit_list *list,
- int *reaches, int *all,
- int find_all)
+void find_bisection(struct commit_list **commit_list, int *reaches,
+ int *all, int find_all)
{
int nr, on_list;
- struct commit_list *p, *best, *next, *last;
+ struct commit_list *list, *p, *best, *next, *last;
int *weights;
- show_list("bisection 2 entry", 0, 0, list);
+ show_list("bisection 2 entry", 0, 0, *commit_list);
/*
* Count the number of total and tree-changing items on the
* list, while reversing the list.
*/
- for (nr = on_list = 0, last = NULL, p = list;
+ for (nr = on_list = 0, last = NULL, p = *commit_list;
p;
p = next) {
unsigned flags = p->item->object.flags;
next = p->next;
- if (flags & UNINTERESTING)
+ if (flags & UNINTERESTING) {
+ free(p);
continue;
+ }
p->next = last;
last = p;
if (!(flags & TREESAME))
/* Do the real work of finding bisection commit. */
best = do_find_bisection(list, nr, weights, find_all);
if (best) {
- if (!find_all)
+ if (!find_all) {
+ list->item = best->item;
+ free_commit_list(list->next);
+ best = list;
best->next = NULL;
+ }
*reaches = weight(best);
}
free(weights);
- return best;
+ *commit_list = best;
}
static int register_ref(const char *refname, const struct object_id *oid,
bisect_common(&revs);
- revs.commits = find_bisection(revs.commits, &reaches, &all,
- !!skipped_revs.nr);
+ find_bisection(&revs.commits, &reaches, &all, !!skipped_revs.nr);
revs.commits = managed_skipped(revs.commits, &tried);
if (!revs.commits) {
struct string_list refs_for_removal = STRING_LIST_INIT_NODUP;
for_each_ref_in("refs/bisect", mark_for_removal, (void *) &refs_for_removal);
string_list_append(&refs_for_removal, xstrdup("BISECT_HEAD"));
- result = delete_refs("bisect: remove", &refs_for_removal, REF_NODEREF);
+ result = delete_refs("bisect: remove", &refs_for_removal, REF_NO_DEREF);
refs_for_removal.strdup_strings = 1;
string_list_clear(&refs_for_removal, 0);
unlink_or_warn(git_path_bisect_expected_rev());
#ifndef BISECT_H
#define BISECT_H
-extern struct commit_list *find_bisection(struct commit_list *list,
- int *reaches, int *all,
- int find_all);
+/*
+ * Find bisection. If something is found, `reaches` will be the number of
+ * commits that the best commit reaches. `all` will be the count of
+ * non-SAMETREE commits. If nothing is found, `list` will be NULL.
+ * Otherwise, it will be either all non-SAMETREE commits or the single
+ * best commit, as chosen by `find_all`.
+ */
+extern void find_bisection(struct commit_list **list, int *reaches, int *all,
+ int find_all);
extern struct commit_list *filter_skipped(struct commit_list *list,
struct commit_list **tried,
has_curr_head ? &curr_head : NULL, 0,
UPDATE_REFS_DIE_ON_ERR);
else if (curr_branch)
- delete_ref(NULL, curr_branch, NULL, REF_NODEREF);
+ delete_ref(NULL, curr_branch, NULL, REF_NO_DEREF);
free(curr_branch);
am_destroy(state);
return error(_("'%s' is not a valid term"), term);
if (one_of(term, "help", "start", "skip", "next", "reset",
- "visualize", "replay", "log", "run", "terms", NULL))
+ "visualize", "view", "replay", "log", "run", "terms", NULL))
return error(_("can't use the builtin command '%s' as a term"), term);
/*
}
if (delete_ref(NULL, name, is_null_oid(&oid) ? NULL : &oid,
- REF_NODEREF)) {
+ REF_NO_DEREF)) {
error(remote_branch
? _("Error deleting remote-tracking branch '%s'")
: _("Error deleting branch '%s'"),
/* Nothing to do. */
} else if (opts->force_detach || !new->path) { /* No longer on any branch. */
update_ref(msg.buf, "HEAD", &new->commit->object.oid, NULL,
- REF_NODEREF, UPDATE_REFS_DIE_ON_ERR);
+ REF_NO_DEREF, UPDATE_REFS_DIE_ON_ERR);
if (!opts->quiet) {
if (old->path &&
advice_detached_head && !opts->force_detach)
} else if (our) {
struct commit *c = lookup_commit_reference(&our->old_oid);
/* --branch specifies a non-branch (i.e. tags), detach HEAD */
- update_ref(msg, "HEAD", &c->object.oid, NULL, REF_NODEREF,
+ update_ref(msg, "HEAD", &c->object.oid, NULL, REF_NO_DEREF,
UPDATE_REFS_DIE_ON_ERR);
} else if (remote) {
/*
* HEAD points to a branch but we don't know which one.
* Detach HEAD in all these cases.
*/
- update_ref(msg, "HEAD", &remote->old_oid, NULL, REF_NODEREF,
+ update_ref(msg, "HEAD", &remote->old_oid, NULL, REF_NO_DEREF,
UPDATE_REFS_DIE_ON_ERR);
}
}
allow_fast_forward = 0;
}
if (allow_fast_forward)
- parents = reduce_heads(parents);
+ reduce_heads_replace(&parents);
} else {
if (!reflog_msg)
reflog_msg = (whence == FROM_CHERRY_PICK)
head_commit = lookup_commit(head);
if (head_commit)
commit_list_insert(head_commit, &parents);
- parents = reduce_heads(parents);
+ reduce_heads_replace(&parents);
while (parents) {
struct commit *cmit = pop_commit(&parents);
if (keep_cache_objects) {
verify_index_checksum = 1;
+ verify_ce_order = 1;
read_cache();
for (i = 0; i < active_nr; i++) {
unsigned int mode;
static int show_modified;
static int show_killed;
static int show_valid_bit;
+static int show_fsmonitor_bit;
static int line_terminator = '\n';
static int debug_mode;
static int show_eol;
{
static char alttag[4];
- if (tag && *tag && show_valid_bit && (ce->ce_flags & CE_VALID)) {
+ if (tag && *tag && ((show_valid_bit && (ce->ce_flags & CE_VALID)) ||
+ (show_fsmonitor_bit && (ce->ce_flags & CE_FSMONITOR_VALID)))) {
memcpy(alttag, tag, 3);
if (isalpha(tag[0])) {
N_("identify the file status with tags")),
OPT_BOOL('v', NULL, &show_valid_bit,
N_("use lowercase letters for 'assume unchanged' files")),
+ OPT_BOOL('f', NULL, &show_fsmonitor_bit,
+ N_("use lowercase letters for 'fsmonitor clean' files")),
OPT_BOOL('c', "cached", &show_cached,
N_("show cached files in the output (default)")),
OPT_BOOL('d', "deleted", &show_deleted,
for (i = 0; i < exclude_list.nr; i++) {
add_exclude(exclude_list.items[i].string, "", 0, el, --exclude_args);
}
- if (show_tag || show_valid_bit) {
+ if (show_tag || show_valid_bit || show_fsmonitor_bit) {
tag_cached = "H ";
tag_unmerged = "M ";
tag_removed = "R ";
static int show_merge_base(struct commit **rev, int rev_nr, int show_all)
{
- struct commit_list *result;
+ struct commit_list *result, *r;
result = get_merge_bases_many_dirty(rev[0], rev_nr - 1, rev + 1);
if (!result)
return 1;
- while (result) {
- printf("%s\n", oid_to_hex(&result->item->object.oid));
+ for (r = result; r; r = r->next) {
+ printf("%s\n", oid_to_hex(&r->item->object.oid));
if (!show_all)
- return 0;
- result = result->next;
+ break;
}
+ free_commit_list(result);
return 0;
}
static int handle_independent(int count, const char **args)
{
- struct commit_list *revs = NULL;
- struct commit_list *result;
+ struct commit_list *revs = NULL, *rev;
int i;
for (i = count - 1; i >= 0; i--)
commit_list_insert(get_commit_reference(args[i]), &revs);
- result = reduce_heads(revs);
- if (!result)
+ reduce_heads_replace(&revs);
+
+ if (!revs)
return 1;
- while (result) {
- printf("%s\n", oid_to_hex(&result->item->object.oid));
- result = result->next;
- }
+ for (rev = revs; rev; rev = rev->next)
+ printf("%s\n", oid_to_hex(&rev->item->object.oid));
+
+ free_commit_list(revs);
return 0;
}
static int handle_octopus(int count, const char **args, int show_all)
{
struct commit_list *revs = NULL;
- struct commit_list *result;
+ struct commit_list *result, *rev;
int i;
for (i = count - 1; i >= 0; i--)
commit_list_insert(get_commit_reference(args[i]), &revs);
- result = reduce_heads(get_octopus_merge_bases(revs));
+ result = get_octopus_merge_bases(revs);
+ free_commit_list(revs);
+ reduce_heads_replace(&result);
if (!result)
return 1;
- while (result) {
- printf("%s\n", oid_to_hex(&result->item->object.oid));
+ for (rev = result; rev; rev = rev->next) {
+ printf("%s\n", oid_to_hex(&rev->item->object.oid));
if (!show_all)
- return 0;
- result = result->next;
+ break;
}
+ free_commit_list(result);
return 0;
}
/* Find what parents to record by checking independent ones. */
parents = reduce_heads(remoteheads);
+ free_commit_list(remoteheads);
remoteheads = NULL;
remotes = &remoteheads;
if (delete_ref(NULL, "NOTES_MERGE_PARTIAL", NULL, 0))
ret += error(_("failed to delete ref NOTES_MERGE_PARTIAL"));
- if (delete_ref(NULL, "NOTES_MERGE_REF", NULL, REF_NODEREF))
+ if (delete_ref(NULL, "NOTES_MERGE_REF", NULL, REF_NO_DEREF))
ret += error(_("failed to delete ref NOTES_MERGE_REF"));
if (notes_merge_abort(o))
ret += error(_("failed to remove 'git notes merge' worktree"));
if (!is_null_oid(fork_point))
commit_list_insert(lookup_commit_reference(fork_point), &revs);
- result = reduce_heads(get_octopus_merge_bases(revs));
+ result = get_octopus_merge_bases(revs);
free_commit_list(revs);
+ reduce_heads_replace(&result);
+
if (!result)
return 1;
oidcpy(merge_base, &result->item->object.oid);
+ free_commit_list(result);
return 0;
}
read_ref_full(item->string, RESOLVE_REF_READING, &oid, &flag);
if (!(flag & REF_ISSYMREF))
continue;
- if (delete_ref(NULL, item->string, NULL, REF_NODEREF))
+ if (delete_ref(NULL, item->string, NULL, REF_NO_DEREF))
die(_("deleting '%s' failed"), item->string);
}
for (i = 0; i < remote_branches.nr; i++) {
strbuf_release(&buf);
if (!result)
- result = delete_refs("remote: remove", &branches, REF_NODEREF);
+ result = delete_refs("remote: remove", &branches, REF_NO_DEREF);
string_list_clear(&branches, 0);
if (skipped.nr) {
head_name = xstrdup(states.heads.items[0].string);
free_remote_ref_states(&states);
} else if (opt_d && !opt_a && argc == 1) {
- if (delete_ref(NULL, buf.buf, NULL, REF_NODEREF))
+ if (delete_ref(NULL, buf.buf, NULL, REF_NO_DEREF))
result |= error(_("Could not delete %s"), buf.buf);
} else
usage_with_options(builtin_remote_sethead_usage, options);
if (bisect_list) {
int reaches = reaches, all = all;
- revs.commits = find_bisection(revs.commits, &reaches, &all,
- bisect_find_all);
+ find_bisection(&revs.commits, &reaches, &all, bisect_find_all);
if (bisect_show_vars)
return show_bisect_vars(&info, reaches, all);
die("Cannot delete %s, not a symbolic ref", argv[0]);
if (!strcmp(argv[0], "HEAD"))
die("deleting '%s' is not allowed", argv[0]);
- return delete_ref(NULL, argv[0], NULL, REF_NODEREF);
+ return delete_ref(NULL, argv[0], NULL, REF_NO_DEREF);
}
switch (argc) {
#include "pathspec.h"
#include "dir.h"
#include "split-index.h"
+#include "fsmonitor.h"
/*
* Default to not allowing changes to the list of files. The
static int verbose;
static int mark_valid_only;
static int mark_skip_worktree_only;
+static int mark_fsmonitor_only;
#define MARK_FLAG 1
#define UNMARK_FLAG 2
static struct strbuf mtime_dir = STRBUF_INIT;
int namelen = strlen(path);
int pos = cache_name_pos(path, namelen);
if (0 <= pos) {
+ mark_fsmonitor_invalid(&the_index, active_cache[pos]);
if (mark)
active_cache[pos]->ce_flags |= flag;
else
die("Unable to mark file %s", path);
return;
}
+ if (mark_fsmonitor_only) {
+ if (mark_ce_flags(path, CE_FSMONITOR_VALID, mark_fsmonitor_only == MARK_FLAG))
+ die("Unable to mark file %s", path);
+ return;
+ }
if (force_remove) {
if (remove_file_from_cache(path))
struct refresh_params refresh_args = {0, &has_errors};
int lock_error = 0;
int split_index = -1;
+ int force_write = 0;
+ int fsmonitor = -1;
struct lock_file lock_file = LOCK_INIT;
struct parse_opt_ctx_t ctx;
strbuf_getline_fn getline_fn;
N_("test if the filesystem supports untracked cache"), UC_TEST),
OPT_SET_INT(0, "force-untracked-cache", &untracked_cache,
N_("enable untracked cache without testing the filesystem"), UC_FORCE),
+ OPT_SET_INT(0, "force-write-index", &force_write,
+ N_("write out the index even if is not flagged as changed"), 1),
+ OPT_BOOL(0, "fsmonitor", &fsmonitor,
+ N_("enable or disable file system monitor")),
+ {OPTION_SET_INT, 0, "fsmonitor-valid", &mark_fsmonitor_only, NULL,
+ N_("mark files as fsmonitor valid"),
+ PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, MARK_FLAG},
+ {OPTION_SET_INT, 0, "no-fsmonitor-valid", &mark_fsmonitor_only, NULL,
+ N_("clear fsmonitor valid bit"),
+ PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, UNMARK_FLAG},
OPT_END()
};
die("BUG: bad untracked_cache value: %d", untracked_cache);
}
- if (active_cache_changed) {
+ if (fsmonitor > 0) {
+ if (git_config_get_fsmonitor() == 0)
+ warning(_("core.fsmonitor is unset; "
+ "set it if you really want to "
+ "enable fsmonitor"));
+ add_fsmonitor(&the_index);
+ report(_("fsmonitor enabled"));
+ } else if (!fsmonitor) {
+ if (git_config_get_fsmonitor() == 1)
+ warning(_("core.fsmonitor is set; "
+ "remove it if you really want to "
+ "disable fsmonitor"));
+ remove_fsmonitor(&the_index);
+ report(_("fsmonitor disabled"));
+ }
+
+ if (active_cache_changed || force_write) {
if (newfd < 0) {
if (refresh_args.flags & REFRESH_QUIET)
exit(128);
static const char *parse_cmd_option(struct strbuf *input, const char *next)
{
if (!strncmp(next, "no-deref", 8) && next[8] == line_termination)
- update_flags |= REF_NODEREF;
+ update_flags |= REF_NO_DEREF;
else
die("option unknown: %s", next);
return next + 8;
}
if (no_deref)
- flags = REF_NODEREF;
+ flags = REF_NO_DEREF;
if (delete)
/*
* For purposes of backwards compatibility, we treat
#define CE_ADDED (1 << 19)
#define CE_HASHED (1 << 20)
+#define CE_FSMONITOR_VALID (1 << 21)
#define CE_WT_REMOVE (1 << 22) /* remove in work directory */
#define CE_CONFLICTED (1 << 23)
#define CACHE_TREE_CHANGED (1 << 5)
#define SPLIT_INDEX_ORDERED (1 << 6)
#define UNTRACKED_CHANGED (1 << 7)
+#define FSMONITOR_CHANGED (1 << 8)
struct split_index;
struct untracked_cache;
struct hashmap dir_hash;
unsigned char sha1[20];
struct untracked_cache *untracked;
+ uint64_t fsmonitor_last_update;
+ struct ewah_bitmap *fsmonitor_dirty;
};
extern struct index_state the_index;
#define CE_MATCH_IGNORE_MISSING 0x08
/* enable stat refresh */
#define CE_MATCH_REFRESH 0x10
-extern int ie_match_stat(const struct index_state *, const struct cache_entry *, struct stat *, unsigned int);
-extern int ie_modified(const struct index_state *, const struct cache_entry *, struct stat *, unsigned int);
+/* don't refresh_fsmonitor state or do stat comparison even if CE_FSMONITOR_VALID is true */
+#define CE_MATCH_IGNORE_FSMONITOR 0X20
+extern int ie_match_stat(struct index_state *, const struct cache_entry *, struct stat *, unsigned int);
+extern int ie_modified(struct index_state *, const struct cache_entry *, struct stat *, unsigned int);
#define HASH_WRITE_OBJECT 1
#define HASH_FORMAT_CHECK 2
extern void set_alternate_index_output(const char *);
extern int verify_index_checksum;
+extern int verify_ce_order;
/* Environment bits from configuration mechanism */
extern int trust_executable_bit;
extern int precomposed_unicode;
extern int protect_hfs;
extern int protect_ntfs;
+extern const char *core_fsmonitor;
/*
* Include broken refs in all ref iterations, which will
return result;
}
+void reduce_heads_replace(struct commit_list **heads)
+{
+ struct commit_list *result = reduce_heads(*heads);
+ free_commit_list(*heads);
+ *heads = result;
+}
+
static const char gpg_sig_header[] = "gpgsig";
static const int gpg_sig_header_len = sizeof(gpg_sig_header) - 1;
extern int run_add_interactive(const char *revision, const char *patch_mode,
const struct pathspec *pathspec);
-struct commit_list *reduce_heads(struct commit_list *heads);
+/*
+ * Takes a list of commits and returns a new list where those
+ * have been removed that can be reached from other commits in
+ * the list. It is useful for, e.g., reducing the commits
+ * randomly thrown at the git-merge command and removing
+ * redundant commits that the user shouldn't have given to it.
+ *
+ * This function destroys the STALE bit of the commit objects'
+ * flags.
+ */
+extern struct commit_list *reduce_heads(struct commit_list *heads);
+
+/*
+ * Like `reduce_heads()`, except it replaces the list. Use this
+ * instead of `foo = reduce_heads(foo);` to avoid memory leaks.
+ */
+extern void reduce_heads_replace(struct commit_list **heads);
struct commit_extra_header {
struct commit_extra_header *next;
#define get_be16(p) ntohs(*(unsigned short *)(p))
#define get_be32(p) ntohl(*(unsigned int *)(p))
+#define get_be64(p) ntohll(*(uint64_t *)(p))
#define put_be32(p, v) do { *(unsigned int *)(p) = htonl(v); } while (0)
+#define put_be64(p, v) do { *(uint64_t *)(p) = htonll(v); } while (0)
#else
(uint32_t)p[3] << 0;
}
+static inline uint64_t get_be64(const void *ptr)
+{
+ const unsigned char *p = ptr;
+ return (uint64_t)get_be32(&p[0]) << 32 |
+ (uint64_t)get_be32(&p[4]) << 0;
+}
+
static inline void put_be32(void *ptr, uint32_t value)
{
unsigned char *p = ptr;
p[3] = value >> 0;
}
+static inline void put_be64(void *ptr, uint64_t value)
+{
+ unsigned char *p = ptr;
+ p[0] = value >> 56;
+ p[1] = value >> 48;
+ p[2] = value >> 40;
+ p[3] = value >> 32;
+ p[4] = value >> 24;
+ p[5] = value >> 16;
+ p[6] = value >> 8;
+ p[7] = value >> 0;
+}
+
#endif
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, write to the Free
- Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
- Boston, MA 02110-1301, USA. */
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
#include "git-compat-util.h"
#include <gettext.h>
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, write to the Free
- Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
- Boston, MA 02110-1301, USA. */
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
/* Summary:
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
- with this program; if not, write to the Free Software Foundation,
- Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */
+ with this program; if not, see <http://www.gnu.org/licenses/>. */
/* Tell gcc not to warn about the (nfd < 0) tests, below. */
#if (__GNUC__ == 4 && 3 <= __GNUC_MINOR__) || 4 < __GNUC__
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
- with this program; if not, write to the Free Software Foundation,
- Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */
+ with this program; if not, see <http://www.gnu.org/licenses/>. */
#ifndef _GL_POLL_H
#define _GL_POLL_H
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, write to the Free
- Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- 02110-1301 USA. */
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
static reg_errcode_t re_compile_internal (regex_t *preg, const char * pattern,
size_t length, reg_syntax_t syntax);
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, write to the Free
- Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- 02110-1301 USA. */
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
#ifdef HAVE_CONFIG_H
#include "config.h"
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, write to the Free
- Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- 02110-1301 USA. */
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
#ifndef _REGEX_H
#define _REGEX_H 1
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, write to the Free
- Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- 02110-1301 USA. */
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
static void re_string_construct_common (const char *str, int len,
re_string_t *pstr,
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- 02111-1307 USA. */
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
#ifndef _REGEX_INTERNAL_H
#define _REGEX_INTERNAL_H 1
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, write to the Free
- Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- 02110-1301 USA. */
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
static reg_errcode_t match_ctx_init (re_match_context_t *cache, int eflags,
int n) internal_function;
return -1; /* default value */
}
+int git_config_get_fsmonitor(void)
+{
+ if (git_config_get_pathname("core.fsmonitor", &core_fsmonitor))
+ core_fsmonitor = getenv("GIT_FSMONITOR_TEST");
+
+ if (core_fsmonitor && !*core_fsmonitor)
+ core_fsmonitor = NULL;
+
+ if (core_fsmonitor)
+ return 1;
+
+ return 0;
+}
+
NORETURN
void git_die_config_linenr(const char *key, const char *filename, int linenr)
{
extern int git_config_get_untracked_cache(void);
extern int git_config_get_split_index(void);
extern int git_config_get_max_percent_split_change(void);
+extern int git_config_get_fsmonitor(void);
/* This dies if the configured or default date is in the future */
extern int git_config_get_expiry(const char *key, const char **output);
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software Foundation,
-# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
# The latest version of this software can be obtained here:
#
--patch-with-stat --name-only --name-status --color
--no-color --color-words --no-renames --check
--full-index --binary --abbrev --diff-filter=
- --find-copies-harder
+ --find-copies-harder --ignore-cr-at-eol
--text --ignore-space-at-eol --ignore-space-change
--ignore-all-space --ignore-blank-lines --exit-code
--quiet --ext-diff --no-ext-diff
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
/*
;; PURPOSE. See the GNU General Public License for more details.
;; You should have received a copy of the GNU General Public
-;; License along with this program; if not, write to the Free
-;; Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
-;; MA 02111-1307 USA
+;; License along with this program; if not, see
+;; <http://www.gnu.org/licenses/>.
;; http://www.fsf.org/copyleft/gpl.html
;; PURPOSE. See the GNU General Public License for more details.
;;
;; You should have received a copy of the GNU General Public
-;; License along with this program; if not, write to the Free
-;; Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
-;; MA 02111-1307 USA
+;; License along with this program; if not, see
+;; <http://www.gnu.org/licenses/>.
;;; Commentary:
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
# ------------------------------------------------------------------------
# same as above, but case-insensitive; you can give
# arbitrary grep options
git jump grep -i foo_bar
+
+# use the silver searcher for git jump grep
+git config jump.grepCmd "ag --column"
--------------------------------------------------
The shell snippets to generate the quickfix lines will almost certainly
choke on filenames with exotic characters (like newlines).
+
+Contributing
+------------
+
+Bug fixes, bug reports, and feature requests should be discussed on the
+Git mailing list <git@vger.kernel.org>, and cc'd to the git-jump
+maintainer, Jeff King <peff@peff.net>.
merge: elements are merge conflicts. Arguments are ignored.
-grep: elements are grep hits. Arguments are given to grep.
+grep: elements are grep hits. Arguments are given to git grep or, if
+ configured, to the command in `jump.grepCmd`.
ws: elements are whitespace errors. Arguments are given to diff --check.
EOF
# but let's clean up extra whitespace, so they look better if the
# editor shows them to us in the status bar.
mode_grep() {
- git grep -n "$@" |
+ cmd=$(git config jump.grepCmd)
+ test -n "$cmd" || cmd="git grep -n"
+ $cmd "$@" |
perl -pe '
s/[ \t]+/ /g;
s/^ *//;
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
"""
import os, os.path, sys
use 5.008;
use strict;
+use POSIX;
use Git;
BEGIN {
$filename =~ s/ /_/g;
# Decode forbidden characters encoded in clean_filename
$filename =~ s/_%_([0-9a-fA-F][0-9a-fA-F])/sprintf('%c', hex($1))/ge;
- return $filename;
+ return substr($filename, 0, NAME_MAX-length('.mw'));
}
sub connect_maybe {
my @tracked_categories = split(/[ \n]/, run_git("config --get-all remote.${remotename}.categories"));
chomp(@tracked_categories);
+# Just like @tracked_categories, but for MediaWiki namespaces.
+my @tracked_namespaces = split(/[ \n]/, run_git("config --get-all remote.${remotename}.namespaces"));
+for (@tracked_namespaces) { s/_/ /g; }
+chomp(@tracked_namespaces);
+
# Import media files on pull
my $import_media = run_git("config --get --bool remote.${remotename}.mediaimport");
chomp($import_media);
return;
}
+sub get_mw_tracked_namespaces {
+ my $pages = shift;
+ foreach my $local_namespace (sort @tracked_namespaces) {
+ my $namespace_id;
+ if ($local_namespace eq "(Main)") {
+ $namespace_id = 0;
+ } else {
+ $namespace_id = get_mw_namespace_id($local_namespace);
+ }
+ # virtual namespaces don't support allpages
+ next if !defined($namespace_id) || $namespace_id < 0;
+ my $mw_pages = $mediawiki->list( {
+ action => 'query',
+ list => 'allpages',
+ apnamespace => $namespace_id,
+ aplimit => 'max' } )
+ || die $mediawiki->{error}->{code} . ': '
+ . $mediawiki->{error}->{details} . "\n";
+ print {*STDERR} "$#{$mw_pages} found in namespace $local_namespace ($namespace_id)\n";
+ foreach my $page (@{$mw_pages}) {
+ $pages->{$page->{title}} = $page;
+ }
+ }
+ return;
+}
+
sub get_mw_all_pages {
my $pages = shift;
# No user-provided list, get the list of pages from the API.
$user_defined = 1;
get_mw_tracked_categories(\%pages);
}
+ if (@tracked_namespaces) {
+ $user_defined = 1;
+ get_mw_tracked_namespaces(\%pages);
+ }
if (!$user_defined) {
get_mw_all_pages(\%pages);
}
my $id;
if (!defined $ns) {
- print {*STDERR} "No such namespace ${name} on MediaWiki.\n";
+ my @namespaces = map { s/ /_/g; $_; } sort keys %namespace_id;
+ print {*STDERR} "No such namespace ${name} on MediaWiki, known namespaces: @namespaces\n";
$ns = {is_namespace => 0};
$namespace_id{$name} = $ns;
}
#include "refs.h"
#include "submodule.h"
#include "dir.h"
+#include "fsmonitor.h"
/*
* diff-files
if (!changed && !dirty_submodule) {
ce_mark_uptodate(ce);
+ mark_fsmonitor_valid(ce);
if (!revs->diffopt.flags.find_copies_harder)
continue;
}
* inside contents.
*/
- if (DIFF_XDL_TST(options, IGNORE_WHITESPACE) ||
- DIFF_XDL_TST(options, IGNORE_WHITESPACE_CHANGE) ||
- DIFF_XDL_TST(options, IGNORE_WHITESPACE_AT_EOL))
+ if ((options->xdl_opts & XDF_WHITESPACE_FLAGS))
options->flags.diff_from_contents = 1;
else
options->flags.diff_from_contents = 0;
DIFF_XDL_SET(options, IGNORE_WHITESPACE_CHANGE);
else if (!strcmp(arg, "--ignore-space-at-eol"))
DIFF_XDL_SET(options, IGNORE_WHITESPACE_AT_EOL);
+ else if (!strcmp(arg, "--ignore-cr-at-eol"))
+ DIFF_XDL_SET(options, IGNORE_CR_AT_EOL);
else if (!strcmp(arg, "--ignore-blank-lines"))
DIFF_XDL_SET(options, IGNORE_BLANK_LINES);
else if (!strcmp(arg, "--indent-heuristic"))
#include "utf8.h"
#include "varint.h"
#include "ewah/ewok.h"
+#include "fsmonitor.h"
/*
* Tells read_directory_recursive how a file or directory should be treated.
if (!untracked)
return 0;
- if (stat(path->len ? path->buf : ".", &st)) {
- invalidate_directory(dir->untracked, untracked);
- memset(&untracked->stat_data, 0, sizeof(untracked->stat_data));
- return 0;
- }
- if (!untracked->valid ||
- match_stat_data_racy(istate, &untracked->stat_data, &st)) {
- if (untracked->valid)
+ /*
+ * With fsmonitor, we can trust the untracked cache's valid field.
+ */
+ refresh_fsmonitor(istate);
+ if (!(dir->untracked->use_fsmonitor && untracked->valid)) {
+ if (stat(path->len ? path->buf : ".", &st)) {
invalidate_directory(dir->untracked, untracked);
- fill_stat_data(&untracked->stat_data, &st);
- return 0;
+ memset(&untracked->stat_data, 0, sizeof(untracked->stat_data));
+ return 0;
+ }
+ if (!untracked->valid ||
+ match_stat_data_racy(istate, &untracked->stat_data, &st)) {
+ if (untracked->valid)
+ invalidate_directory(dir->untracked, untracked);
+ fill_stat_data(&untracked->stat_data, &st);
+ return 0;
+ }
}
if (untracked->check_only != !!check_only) {
int gitignore_invalidated;
int dir_invalidated;
int dir_opened;
+ /* fsmonitor invalidation data */
+ unsigned int use_fsmonitor : 1;
};
struct dir_struct {
#include "streaming.h"
#include "submodule.h"
#include "progress.h"
+#include "fsmonitor.h"
static void create_directories(const char *path, int path_len,
const struct checkout *state)
ce->name);
fill_stat_cache_info(ce, &st);
ce->ce_flags |= CE_UPDATE_IN_BASE;
+ mark_fsmonitor_invalid(state->istate, ce);
state->istate->cache_changed |= CE_ENTRY_CHANGED;
}
delayed:
#define PROTECT_NTFS_DEFAULT 0
#endif
int protect_ntfs = PROTECT_NTFS_DEFAULT;
+const char *core_fsmonitor;
/*
* The character that begins a commented line in user-editable file
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "cache.h"
#include "ewok.h"
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "git-compat-util.h"
#include "ewok.h"
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "git-compat-util.h"
#include "ewok.h"
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "git-compat-util.h"
#include "ewok.h"
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __EWOK_BITMAP_H__
#define __EWOK_BITMAP_H__
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __EWOK_RLW_H__
#define __EWOK_RLW_H__
--- /dev/null
+#include "cache.h"
+#include "config.h"
+#include "dir.h"
+#include "ewah/ewok.h"
+#include "fsmonitor.h"
+#include "run-command.h"
+#include "strbuf.h"
+
+#define INDEX_EXTENSION_VERSION (1)
+#define HOOK_INTERFACE_VERSION (1)
+
+struct trace_key trace_fsmonitor = TRACE_KEY_INIT(FSMONITOR);
+
+static void fsmonitor_ewah_callback(size_t pos, void *is)
+{
+ struct index_state *istate = (struct index_state *)is;
+ struct cache_entry *ce = istate->cache[pos];
+
+ ce->ce_flags &= ~CE_FSMONITOR_VALID;
+}
+
+int read_fsmonitor_extension(struct index_state *istate, const void *data,
+ unsigned long sz)
+{
+ const char *index = data;
+ uint32_t hdr_version;
+ uint32_t ewah_size;
+ struct ewah_bitmap *fsmonitor_dirty;
+ int ret;
+
+ if (sz < sizeof(uint32_t) + sizeof(uint64_t) + sizeof(uint32_t))
+ return error("corrupt fsmonitor extension (too short)");
+
+ hdr_version = get_be32(index);
+ index += sizeof(uint32_t);
+ if (hdr_version != INDEX_EXTENSION_VERSION)
+ return error("bad fsmonitor version %d", hdr_version);
+
+ istate->fsmonitor_last_update = get_be64(index);
+ index += sizeof(uint64_t);
+
+ ewah_size = get_be32(index);
+ index += sizeof(uint32_t);
+
+ fsmonitor_dirty = ewah_new();
+ ret = ewah_read_mmap(fsmonitor_dirty, index, ewah_size);
+ if (ret != ewah_size) {
+ ewah_free(fsmonitor_dirty);
+ return error("failed to parse ewah bitmap reading fsmonitor index extension");
+ }
+ istate->fsmonitor_dirty = fsmonitor_dirty;
+
+ trace_printf_key(&trace_fsmonitor, "read fsmonitor extension successful");
+ return 0;
+}
+
+void fill_fsmonitor_bitmap(struct index_state *istate)
+{
+ int i;
+ istate->fsmonitor_dirty = ewah_new();
+ for (i = 0; i < istate->cache_nr; i++)
+ if (!(istate->cache[i]->ce_flags & CE_FSMONITOR_VALID))
+ ewah_set(istate->fsmonitor_dirty, i);
+}
+
+void write_fsmonitor_extension(struct strbuf *sb, struct index_state *istate)
+{
+ uint32_t hdr_version;
+ uint64_t tm;
+ uint32_t ewah_start;
+ uint32_t ewah_size = 0;
+ int fixup = 0;
+
+ put_be32(&hdr_version, INDEX_EXTENSION_VERSION);
+ strbuf_add(sb, &hdr_version, sizeof(uint32_t));
+
+ put_be64(&tm, istate->fsmonitor_last_update);
+ strbuf_add(sb, &tm, sizeof(uint64_t));
+ fixup = sb->len;
+ strbuf_add(sb, &ewah_size, sizeof(uint32_t)); /* we'll fix this up later */
+
+ ewah_start = sb->len;
+ ewah_serialize_strbuf(istate->fsmonitor_dirty, sb);
+ ewah_free(istate->fsmonitor_dirty);
+ istate->fsmonitor_dirty = NULL;
+
+ /* fix up size field */
+ put_be32(&ewah_size, sb->len - ewah_start);
+ memcpy(sb->buf + fixup, &ewah_size, sizeof(uint32_t));
+
+ trace_printf_key(&trace_fsmonitor, "write fsmonitor extension successful");
+}
+
+/*
+ * Call the query-fsmonitor hook passing the time of the last saved results.
+ */
+static int query_fsmonitor(int version, uint64_t last_update, struct strbuf *query_result)
+{
+ struct child_process cp = CHILD_PROCESS_INIT;
+ char ver[64];
+ char date[64];
+ const char *argv[4];
+
+ if (!(argv[0] = core_fsmonitor))
+ return -1;
+
+ snprintf(ver, sizeof(version), "%d", version);
+ snprintf(date, sizeof(date), "%" PRIuMAX, (uintmax_t)last_update);
+ argv[1] = ver;
+ argv[2] = date;
+ argv[3] = NULL;
+ cp.argv = argv;
+ cp.use_shell = 1;
+ cp.dir = get_git_work_tree();
+
+ return capture_command(&cp, query_result, 1024);
+}
+
+static void fsmonitor_refresh_callback(struct index_state *istate, const char *name)
+{
+ int pos = index_name_pos(istate, name, strlen(name));
+
+ if (pos >= 0) {
+ struct cache_entry *ce = istate->cache[pos];
+ ce->ce_flags &= ~CE_FSMONITOR_VALID;
+ }
+
+ /*
+ * Mark the untracked cache dirty even if it wasn't found in the index
+ * as it could be a new untracked file.
+ */
+ trace_printf_key(&trace_fsmonitor, "fsmonitor_refresh_callback '%s'", name);
+ untracked_cache_invalidate_path(istate, name);
+}
+
+void refresh_fsmonitor(struct index_state *istate)
+{
+ static int has_run_once = 0;
+ struct strbuf query_result = STRBUF_INIT;
+ int query_success = 0;
+ size_t bol; /* beginning of line */
+ uint64_t last_update;
+ char *buf;
+ int i;
+
+ if (!core_fsmonitor || has_run_once)
+ return;
+ has_run_once = 1;
+
+ trace_printf_key(&trace_fsmonitor, "refresh fsmonitor");
+ /*
+ * This could be racy so save the date/time now and query_fsmonitor
+ * should be inclusive to ensure we don't miss potential changes.
+ */
+ last_update = getnanotime();
+
+ /*
+ * If we have a last update time, call query_fsmonitor for the set of
+ * changes since that time, else assume everything is possibly dirty
+ * and check it all.
+ */
+ if (istate->fsmonitor_last_update) {
+ query_success = !query_fsmonitor(HOOK_INTERFACE_VERSION,
+ istate->fsmonitor_last_update, &query_result);
+ trace_performance_since(last_update, "fsmonitor process '%s'", core_fsmonitor);
+ trace_printf_key(&trace_fsmonitor, "fsmonitor process '%s' returned %s",
+ core_fsmonitor, query_success ? "success" : "failure");
+ }
+
+ /* a fsmonitor process can return '/' to indicate all entries are invalid */
+ if (query_success && query_result.buf[0] != '/') {
+ /* Mark all entries returned by the monitor as dirty */
+ buf = query_result.buf;
+ bol = 0;
+ for (i = 0; i < query_result.len; i++) {
+ if (buf[i] != '\0')
+ continue;
+ fsmonitor_refresh_callback(istate, buf + bol);
+ bol = i + 1;
+ }
+ if (bol < query_result.len)
+ fsmonitor_refresh_callback(istate, buf + bol);
+ } else {
+ /* Mark all entries invalid */
+ for (i = 0; i < istate->cache_nr; i++)
+ istate->cache[i]->ce_flags &= ~CE_FSMONITOR_VALID;
+
+ if (istate->untracked)
+ istate->untracked->use_fsmonitor = 0;
+ }
+ strbuf_release(&query_result);
+
+ /* Now that we've updated istate, save the last_update time */
+ istate->fsmonitor_last_update = last_update;
+}
+
+void add_fsmonitor(struct index_state *istate)
+{
+ int i;
+
+ if (!istate->fsmonitor_last_update) {
+ trace_printf_key(&trace_fsmonitor, "add fsmonitor");
+ istate->cache_changed |= FSMONITOR_CHANGED;
+ istate->fsmonitor_last_update = getnanotime();
+
+ /* reset the fsmonitor state */
+ for (i = 0; i < istate->cache_nr; i++)
+ istate->cache[i]->ce_flags &= ~CE_FSMONITOR_VALID;
+
+ /* reset the untracked cache */
+ if (istate->untracked) {
+ add_untracked_cache(istate);
+ istate->untracked->use_fsmonitor = 1;
+ }
+
+ /* Update the fsmonitor state */
+ refresh_fsmonitor(istate);
+ }
+}
+
+void remove_fsmonitor(struct index_state *istate)
+{
+ if (istate->fsmonitor_last_update) {
+ trace_printf_key(&trace_fsmonitor, "remove fsmonitor");
+ istate->cache_changed |= FSMONITOR_CHANGED;
+ istate->fsmonitor_last_update = 0;
+ }
+}
+
+void tweak_fsmonitor(struct index_state *istate)
+{
+ int i;
+ int fsmonitor_enabled = git_config_get_fsmonitor();
+
+ if (istate->fsmonitor_dirty) {
+ if (fsmonitor_enabled) {
+ /* Mark all entries valid */
+ for (i = 0; i < istate->cache_nr; i++) {
+ istate->cache[i]->ce_flags |= CE_FSMONITOR_VALID;
+ }
+
+ /* Mark all previously saved entries as dirty */
+ ewah_each_bit(istate->fsmonitor_dirty, fsmonitor_ewah_callback, istate);
+
+ /* Now mark the untracked cache for fsmonitor usage */
+ if (istate->untracked)
+ istate->untracked->use_fsmonitor = 1;
+ }
+
+ ewah_free(istate->fsmonitor_dirty);
+ istate->fsmonitor_dirty = NULL;
+ }
+
+ switch (fsmonitor_enabled) {
+ case -1: /* keep: do nothing */
+ break;
+ case 0: /* false */
+ remove_fsmonitor(istate);
+ break;
+ case 1: /* true */
+ add_fsmonitor(istate);
+ break;
+ default: /* unknown value: do nothing */
+ break;
+ }
+}
--- /dev/null
+#ifndef FSMONITOR_H
+#define FSMONITOR_H
+
+extern struct trace_key trace_fsmonitor;
+
+/*
+ * Read the fsmonitor index extension and (if configured) restore the
+ * CE_FSMONITOR_VALID state.
+ */
+extern int read_fsmonitor_extension(struct index_state *istate, const void *data, unsigned long sz);
+
+/*
+ * Fill the fsmonitor_dirty ewah bits with their state from the index,
+ * before it is split during writing.
+ */
+extern void fill_fsmonitor_bitmap(struct index_state *istate);
+
+/*
+ * Write the CE_FSMONITOR_VALID state into the fsmonitor index
+ * extension. Reads from the fsmonitor_dirty ewah in the index.
+ */
+extern void write_fsmonitor_extension(struct strbuf *sb, struct index_state *istate);
+
+/*
+ * Add/remove the fsmonitor index extension
+ */
+extern void add_fsmonitor(struct index_state *istate);
+extern void remove_fsmonitor(struct index_state *istate);
+
+/*
+ * Add/remove the fsmonitor index extension as necessary based on the current
+ * core.fsmonitor setting.
+ */
+extern void tweak_fsmonitor(struct index_state *istate);
+
+/*
+ * Run the configured fsmonitor integration script and clear the
+ * CE_FSMONITOR_VALID bit for any files returned as dirty. Also invalidate
+ * any corresponding untracked cache directory structures. Optimized to only
+ * run the first time it is called.
+ */
+extern void refresh_fsmonitor(struct index_state *istate);
+
+/*
+ * Set the given cache entries CE_FSMONITOR_VALID bit. This should be
+ * called any time the cache entry has been updated to reflect the
+ * current state of the file on disk.
+ */
+static inline void mark_fsmonitor_valid(struct cache_entry *ce)
+{
+ if (core_fsmonitor) {
+ ce->ce_flags |= CE_FSMONITOR_VALID;
+ trace_printf_key(&trace_fsmonitor, "mark_fsmonitor_clean '%s'", ce->name);
+ }
+}
+
+/*
+ * Clear the given cache entry's CE_FSMONITOR_VALID bit and invalidate
+ * any corresponding untracked cache directory structures. This should
+ * be called any time git creates or modifies a file that should
+ * trigger an lstat() or invalidate the untracked cache for the
+ * corresponding directory
+ */
+static inline void mark_fsmonitor_invalid(struct index_state *istate, struct cache_entry *ce)
+{
+ if (core_fsmonitor) {
+ ce->ce_flags &= ~CE_FSMONITOR_VALID;
+ untracked_cache_invalidate_path(istate, ce->name);
+ trace_printf_key(&trace_fsmonitor, "mark_fsmonitor_invalid '%s'", ce->name);
+ }
+}
+
+#endif
#!/bin/sh
-USAGE='[help|start|bad|good|new|old|terms|skip|next|reset|visualize|replay|log|run]'
+USAGE='[help|start|bad|good|new|old|terms|skip|next|reset|visualize|view|replay|log|run]'
LONG_USAGE='git bisect help
print this long help message.
git bisect start [--term-{old,good}=<term> --term-{new,bad}=<term>]
find next bisection to test and check it out.
git bisect reset [<commit>]
finish bisection search and go back to commit.
-git bisect visualize
+git bisect (visualize|view)
show bisect status in gitk.
git bisect replay <logfile>
replay bisection log.
bisect_run () {
bisect_next_check fail
+ test -n "$*" || die "$(gettext "bisect run failed: no command provided.")"
+
while true
do
command="$@"
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
-along with this program; if not, write to the Free Software
-Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA}]
+along with this program; if not, see <http://www.gnu.org/licenses/>.}]
######################################################################
##
git rebase--helper --shorten-ids
}
-# Add commands after a pick or after a squash/fixup serie
+# Add commands after a pick or after a squash/fixup series
# in the todo list.
add_exec_commands () {
{
if (!p->pcre1_regexp)
compile_regexp_failed(p, error);
- p->pcre1_extra_info = pcre_study(p->pcre1_regexp, PCRE_STUDY_JIT_COMPILE, &error);
+ p->pcre1_extra_info = pcre_study(p->pcre1_regexp, GIT_PCRE_STUDY_JIT_COMPILE, &error);
if (!p->pcre1_extra_info && error)
die("%s", error);
#if PCRE_MAJOR >= 8 && PCRE_MINOR >= 32
#ifndef NO_LIBPCRE1_JIT
#define GIT_PCRE1_USE_JIT
+#define GIT_PCRE_STUDY_JIT_COMPILE PCRE_STUDY_JIT_COMPILE
#endif
#endif
#endif
-#ifndef PCRE_STUDY_JIT_COMPILE
-#define PCRE_STUDY_JIT_COMPILE 0
+#ifndef GIT_PCRE_STUDY_JIT_COMPILE
+#define GIT_PCRE_STUDY_JIT_COMPILE 0
#endif
#if PCRE_MAJOR <= 8 && PCRE_MINOR < 20
typedef int pcre_jit_stack;
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "cache.h"
struct imap *imap = ctx->imap;
char *arg, *p;
- if (*s != '[')
+ if (!s || *s != '[')
return RESP_OK; /* no response code */
s++;
if (!(p = strchr(s, ']'))) {
}
*p++ = 0;
arg = next_arg(&s);
+ if (!arg) {
+ fprintf(stderr, "IMAP error: empty response code\n");
+ return RESP_BAD;
+ }
if (!strcmp("UIDVALIDITY", arg)) {
if (!(arg = next_arg(&s)) || !(ctx->uidvalidity = atoi(arg))) {
fprintf(stderr, "IMAP error: malformed UIDVALIDITY status\n");
{
struct imap *imap = ctx->imap;
struct imap_cmd *cmdp, **pcmdp;
- char *cmd, *arg, *arg1;
+ char *cmd;
+ const char *arg, *arg1;
int n, resp, resp2, tag;
for (;;) {
return RESP_BAD;
arg = next_arg(&cmd);
+ if (!arg) {
+ fprintf(stderr, "IMAP error: empty response\n");
+ return RESP_BAD;
+ }
if (*arg == '*') {
arg = next_arg(&cmd);
if (!arg) {
if (cmdp->cb.cont || cmdp->cb.data)
imap->literal_pending = 0;
arg = next_arg(&cmd);
+ if (!arg)
+ arg = "";
if (!strcmp("OK", arg))
resp = DRV_OK;
else {
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA
- 02110-1301, USA. */
+ along with this program; if not, see <http://www.gnu.org/licenses/>. */
/* Written August 1989 by Mike Haertel.
The author may be reached (Email) at the address mike@ai.mit.edu,
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA
- 02110-1301, USA. */
+ along with this program; if not, see <http://www.gnu.org/licenses/>. */
/* Written August 1989 by Mike Haertel.
The author may be reached (Email) at the address mike@ai.mit.edu,
void init_merge_options(struct merge_options *o)
{
+ const char *merge_verbosity;
memset(o, 0, sizeof(struct merge_options));
o->verbosity = 2;
o->buffer_output = 1;
o->renormalize = 0;
o->detect_rename = 1;
merge_recursive_config(o);
- if (getenv("GIT_MERGE_VERBOSITY"))
- o->verbosity =
- strtol(getenv("GIT_MERGE_VERBOSITY"), NULL, 10);
+ merge_verbosity = getenv("GIT_MERGE_VERBOSITY");
+ if (merge_verbosity)
+ o->verbosity = strtol(merge_verbosity, NULL, 10);
if (o->verbosity >= 5)
o->buffer_output = 0;
strbuf_init(&o->obuf, 0);
DIFF_XDL_SET(o, IGNORE_WHITESPACE);
else if (!strcmp(s, "ignore-space-at-eol"))
DIFF_XDL_SET(o, IGNORE_WHITESPACE_AT_EOL);
+ else if (!strcmp(s, "ignore-cr-at-eol"))
+ DIFF_XDL_SET(o, IGNORE_CR_AT_EOL);
else if (!strcmp(s, "renormalize"))
o->renormalize = 1;
else if (!strcmp(s, "no-renormalize"))
--- /dev/null
+package Git::Packet;
+use 5.008;
+use strict;
+use warnings;
+BEGIN {
+ require Exporter;
+ if ($] < 5.008003) {
+ *import = \&Exporter::import;
+ } else {
+ # Exporter 5.57 which supports this invocation was
+ # released with perl 5.8.3
+ Exporter->import('import');
+ }
+}
+
+our @EXPORT = qw(
+ packet_compare_lists
+ packet_bin_read
+ packet_txt_read
+ packet_required_key_val_read
+ packet_bin_write
+ packet_txt_write
+ packet_flush
+ packet_initialize
+ packet_read_capabilities
+ packet_read_and_check_capabilities
+ packet_check_and_write_capabilities
+ );
+our @EXPORT_OK = @EXPORT;
+
+sub packet_compare_lists {
+ my ($expect, @result) = @_;
+ my $ix;
+ if (scalar @$expect != scalar @result) {
+ return undef;
+ }
+ for ($ix = 0; $ix < $#result; $ix++) {
+ if ($expect->[$ix] ne $result[$ix]) {
+ return undef;
+ }
+ }
+ return 1;
+}
+
+sub packet_bin_read {
+ my $buffer;
+ my $bytes_read = read STDIN, $buffer, 4;
+ if ( $bytes_read == 0 ) {
+ # EOF - Git stopped talking to us!
+ return ( -1, "" );
+ } elsif ( $bytes_read != 4 ) {
+ die "invalid packet: '$buffer'";
+ }
+ my $pkt_size = hex($buffer);
+ if ( $pkt_size == 0 ) {
+ return ( 1, "" );
+ } elsif ( $pkt_size > 4 ) {
+ my $content_size = $pkt_size - 4;
+ $bytes_read = read STDIN, $buffer, $content_size;
+ if ( $bytes_read != $content_size ) {
+ die "invalid packet ($content_size bytes expected; $bytes_read bytes read)";
+ }
+ return ( 0, $buffer );
+ } else {
+ die "invalid packet size: $pkt_size";
+ }
+}
+
+sub remove_final_lf_or_die {
+ my $buf = shift;
+ unless ( $buf =~ s/\n$// ) {
+ die "A non-binary line MUST be terminated by an LF.\n"
+ . "Received: '$buf'";
+ }
+ return $buf;
+}
+
+sub packet_txt_read {
+ my ( $res, $buf ) = packet_bin_read();
+ unless ( $res == -1 or $buf eq '' ) {
+ $buf = remove_final_lf_or_die($buf);
+ }
+ return ( $res, $buf );
+}
+
+sub packet_required_key_val_read {
+ my ( $key ) = @_;
+ my ( $res, $buf ) = packet_txt_read();
+ unless ( $res == -1 or ( $buf =~ s/^$key=// and $buf ne '' ) ) {
+ die "bad $key: '$buf'";
+ }
+ return ( $res, $buf );
+}
+
+sub packet_bin_write {
+ my $buf = shift;
+ print STDOUT sprintf( "%04x", length($buf) + 4 );
+ print STDOUT $buf;
+ STDOUT->flush();
+}
+
+sub packet_txt_write {
+ packet_bin_write( $_[0] . "\n" );
+}
+
+sub packet_flush {
+ print STDOUT sprintf( "%04x", 0 );
+ STDOUT->flush();
+}
+
+sub packet_initialize {
+ my ($name, $version) = @_;
+
+ packet_compare_lists([0, $name . "-client"], packet_txt_read()) ||
+ die "bad initialize";
+ packet_compare_lists([0, "version=" . $version], packet_txt_read()) ||
+ die "bad version";
+ packet_compare_lists([1, ""], packet_bin_read()) ||
+ die "bad version end";
+
+ packet_txt_write( $name . "-server" );
+ packet_txt_write( "version=" . $version );
+ packet_flush();
+}
+
+sub packet_read_capabilities {
+ my @cap;
+ while (1) {
+ my ( $res, $buf ) = packet_bin_read();
+ if ( $res == -1 ) {
+ die "unexpected EOF when reading capabilities";
+ }
+ return ( $res, @cap ) if ( $res != 0 );
+ $buf = remove_final_lf_or_die($buf);
+ unless ( $buf =~ s/capability=// ) {
+ die "bad capability buf: '$buf'";
+ }
+ push @cap, $buf;
+ }
+}
+
+# Read remote capabilities and check them against capabilities we require
+sub packet_read_and_check_capabilities {
+ my @required_caps = @_;
+ my ($res, @remote_caps) = packet_read_capabilities();
+ my %remote_caps = map { $_ => 1 } @remote_caps;
+ foreach (@required_caps) {
+ unless (exists($remote_caps{$_})) {
+ die "required '$_' capability not available from remote" ;
+ }
+ }
+ return %remote_caps;
+}
+
+# Check our capabilities we want to advertise against the remote ones
+# and then advertise our capabilities
+sub packet_check_and_write_capabilities {
+ my ($remote_caps, @our_caps) = @_;
+ foreach (@our_caps) {
+ unless (exists($remote_caps->{$_})) {
+ die "our capability '$_' is not available from remote"
+ }
+ packet_txt_write( "capability=" . $_ );
+ }
+ packet_flush();
+}
+
+1;
modules += Git
modules += Git/I18N
modules += Git/IndexInfo
+modules += Git/Packet
modules += Git/SVN
modules += Git/SVN/Memoize/YAML
modules += Git/SVN/Fetcher
#include "cache.h"
#include "pathspec.h"
#include "dir.h"
+#include "fsmonitor.h"
#ifdef NO_PTHREADS
static void preload_index(struct index_state *index,
continue;
if (ce_skip_worktree(ce))
continue;
+ if (ce->ce_flags & CE_FSMONITOR_VALID)
+ continue;
if (!ce_path_match(ce, &p->pathspec, NULL))
continue;
if (threaded_has_symlink_leading_path(&cache, ce->name, ce_namelen(ce)))
continue;
if (lstat(ce->name, &st))
continue;
- if (ie_match_stat(index, ce, &st, CE_MATCH_RACY_IS_DIRTY))
+ if (ie_match_stat(index, ce, &st, CE_MATCH_RACY_IS_DIRTY|CE_MATCH_IGNORE_FSMONITOR))
continue;
ce_mark_uptodate(ce);
+ mark_fsmonitor_valid(ce);
} while (--nr > 0);
cache_def_clear(&cache);
return NULL;
return;
threads = index->cache_nr / THREAD_COST;
+ if ((index->cache_nr > 1) && (threads < 2) && getenv("GIT_FORCE_PRELOAD_TEST"))
+ threads = 2;
if (threads < 2)
return;
if (threads > MAX_PARALLEL)
#include "varint.h"
#include "split-index.h"
#include "utf8.h"
+#include "fsmonitor.h"
/* Mask for the name length in ce_flags in the on-disk index */
#define CACHE_EXT_RESOLVE_UNDO 0x52455543 /* "REUC" */
#define CACHE_EXT_LINK 0x6c696e6b /* "link" */
#define CACHE_EXT_UNTRACKED 0x554E5452 /* "UNTR" */
+#define CACHE_EXT_FSMONITOR 0x46534D4E /* "FSMN" */
/* changes that can be kept in $GIT_DIR/index (basically all extensions) */
#define EXTMASK (RESOLVE_UNDO_CHANGED | CACHE_TREE_CHANGED | \
CE_ENTRY_ADDED | CE_ENTRY_REMOVED | CE_ENTRY_CHANGED | \
- SPLIT_INDEX_ORDERED | UNTRACKED_CHANGED)
+ SPLIT_INDEX_ORDERED | UNTRACKED_CHANGED | FSMONITOR_CHANGED)
struct index_state the_index;
static const char *alternate_index_output;
free(old);
set_index_entry(istate, nr, ce);
ce->ce_flags |= CE_UPDATE_IN_BASE;
+ mark_fsmonitor_invalid(istate, ce);
istate->cache_changed |= CE_ENTRY_CHANGED;
}
if (assume_unchanged)
ce->ce_flags |= CE_VALID;
- if (S_ISREG(st->st_mode))
+ if (S_ISREG(st->st_mode)) {
ce_mark_uptodate(ce);
+ mark_fsmonitor_valid(ce);
+ }
}
static int ce_compare_data(const struct cache_entry *ce, struct stat *st)
return match_stat_data(sd, st);
}
-int ie_match_stat(const struct index_state *istate,
+int ie_match_stat(struct index_state *istate,
const struct cache_entry *ce, struct stat *st,
unsigned int options)
{
int ignore_valid = options & CE_MATCH_IGNORE_VALID;
int ignore_skip_worktree = options & CE_MATCH_IGNORE_SKIP_WORKTREE;
int assume_racy_is_modified = options & CE_MATCH_RACY_IS_DIRTY;
+ int ignore_fsmonitor = options & CE_MATCH_IGNORE_FSMONITOR;
+ if (!ignore_fsmonitor)
+ refresh_fsmonitor(istate);
/*
* If it's marked as always valid in the index, it's
* valid whatever the checked-out copy says.
return 0;
if (!ignore_valid && (ce->ce_flags & CE_VALID))
return 0;
+ if (!ignore_fsmonitor && (ce->ce_flags & CE_FSMONITOR_VALID))
+ return 0;
/*
* Intent-to-add entries have not been added, so the index entry
return changed;
}
-int ie_modified(const struct index_state *istate,
+int ie_modified(struct index_state *istate,
const struct cache_entry *ce,
struct stat *st, unsigned int options)
{
}
cache_tree_invalidate_path(istate, ce->name);
ce->ce_flags |= CE_UPDATE_IN_BASE;
+ mark_fsmonitor_invalid(istate, ce);
istate->cache_changed |= CE_ENTRY_CHANGED;
return 0;
int ignore_valid = options & CE_MATCH_IGNORE_VALID;
int ignore_skip_worktree = options & CE_MATCH_IGNORE_SKIP_WORKTREE;
int ignore_missing = options & CE_MATCH_IGNORE_MISSING;
+ int ignore_fsmonitor = options & CE_MATCH_IGNORE_FSMONITOR;
if (!refresh || ce_uptodate(ce))
return ce;
+ if (!ignore_fsmonitor)
+ refresh_fsmonitor(istate);
/*
* CE_VALID or CE_SKIP_WORKTREE means the user promised us
* that the change to the work tree does not matter and told
ce_mark_uptodate(ce);
return ce;
}
+ if (!ignore_fsmonitor && (ce->ce_flags & CE_FSMONITOR_VALID)) {
+ ce_mark_uptodate(ce);
+ return ce;
+ }
if (has_symlink_leading_path(ce->name, ce_namelen(ce))) {
if (ignore_missing)
* because CE_UPTODATE flag is in-core only;
* we are not going to write this change out.
*/
- if (!S_ISGITLINK(ce->ce_mode))
+ if (!S_ISGITLINK(ce->ce_mode)) {
ce_mark_uptodate(ce);
+ mark_fsmonitor_valid(ce);
+ }
return ce;
}
}
*/
ce->ce_flags &= ~CE_VALID;
ce->ce_flags |= CE_UPDATE_IN_BASE;
+ mark_fsmonitor_invalid(istate, ce);
istate->cache_changed |= CE_ENTRY_CHANGED;
}
if (quiet)
/* Allow fsck to force verification of the index checksum. */
int verify_index_checksum;
+/* Allow fsck to force verification of the cache entry order. */
+int verify_ce_order;
+
static int verify_hdr(struct cache_header *hdr, unsigned long size)
{
git_SHA_CTX c;
case CACHE_EXT_UNTRACKED:
istate->untracked = read_untracked_extension(data, sz);
break;
+ case CACHE_EXT_FSMONITOR:
+ read_fsmonitor_extension(istate, data, sz);
+ break;
default:
if (*ext < 'A' || 'Z' < *ext)
return error("index uses %.4s extension, which we do not understand",
{
unsigned int i;
+ if (!verify_ce_order)
+ return;
+
for (i = 1; i < istate->cache_nr; i++) {
struct cache_entry *ce = istate->cache[i - 1];
struct cache_entry *next_ce = istate->cache[i];
check_ce_order(istate);
tweak_untracked_cache(istate);
tweak_split_index(istate);
+ tweak_fsmonitor(istate);
}
/* remember to discard_cache() before reading a different cache! */
if (err)
return -1;
}
+ if (!strip_extensions && istate->fsmonitor_last_update) {
+ struct strbuf sb = STRBUF_INIT;
+
+ write_fsmonitor_extension(&sb, istate);
+ err = write_index_ext_header(&c, newfd, CACHE_EXT_FSMONITOR, sb.len) < 0
+ || ce_write(&c, newfd, sb.buf, sb.len) < 0;
+ strbuf_release(&sb);
+ if (err)
+ return -1;
+ }
if (ce_flush(&c, newfd, istate->sha1))
return -1;
int new_shared_index, ret;
struct split_index *si = istate->split_index;
+ if (istate->fsmonitor_last_update)
+ fill_fsmonitor_bitmap(istate);
+
if (!si || alternate_index_output ||
(istate->cache_changed & ~EXTMASK)) {
if (si)
char color[COLOR_MAXLEN];
struct align align;
struct {
- enum { RR_REF, RR_TRACK, RR_TRACKSHORT } option;
+ enum {
+ RR_REF, RR_TRACK, RR_TRACKSHORT, RR_REMOTE_NAME, RR_REMOTE_REF
+ } option;
struct refname_atom refname;
- unsigned int nobracket : 1;
+ unsigned int nobracket : 1, push : 1, push_remote : 1;
} remote_ref;
struct {
enum { C_BARE, C_BODY, C_BODY_DEP, C_LINES, C_SIG, C_SUB, C_TRAILERS } option;
struct string_list params = STRING_LIST_INIT_DUP;
int i;
+ if (!strcmp(atom->name, "push") || starts_with(atom->name, "push:"))
+ atom->u.remote_ref.push = 1;
+
if (!arg) {
atom->u.remote_ref.option = RR_REF;
refname_atom_parser_internal(&atom->u.remote_ref.refname,
atom->u.remote_ref.option = RR_TRACKSHORT;
else if (!strcmp(s, "nobracket"))
atom->u.remote_ref.nobracket = 1;
- else {
+ else if (!strcmp(s, "remotename")) {
+ atom->u.remote_ref.option = RR_REMOTE_NAME;
+ atom->u.remote_ref.push_remote = 1;
+ } else if (!strcmp(s, "remoteref")) {
+ atom->u.remote_ref.option = RR_REMOTE_REF;
+ atom->u.remote_ref.push_remote = 1;
+ } else {
atom->u.remote_ref.option = RR_REF;
refname_atom_parser_internal(&atom->u.remote_ref.refname,
arg, atom->name);
*s = ">";
else
*s = "<>";
+ } else if (atom->u.remote_ref.option == RR_REMOTE_NAME) {
+ int explicit;
+ const char *remote = atom->u.remote_ref.push ?
+ pushremote_for_branch(branch, &explicit) :
+ remote_for_branch(branch, &explicit);
+ if (explicit)
+ *s = xstrdup(remote);
+ else
+ *s = "";
+ } else if (atom->u.remote_ref.option == RR_REMOTE_REF) {
+ int explicit;
+ const char *merge;
+
+ merge = remote_ref_for_branch(branch, atom->u.remote_ref.push,
+ &explicit);
+ if (explicit)
+ *s = xstrdup(merge);
+ else
+ *s = "";
} else
die("BUG: unhandled RR_* enum");
}
if (refname)
fill_remote_ref_details(atom, refname, branch, &v->s);
continue;
- } else if (starts_with(name, "push")) {
+ } else if (atom->u.remote_ref.push) {
const char *branch_name;
if (!skip_prefix(ref->refname, "refs/heads/",
&branch_name))
continue;
branch = branch_get(branch_name);
- refname = branch_get_push(branch, NULL);
- if (!refname)
- continue;
+ if (atom->u.remote_ref.push_remote)
+ refname = NULL;
+ else {
+ refname = branch_get_push(branch, NULL);
+ if (!refname)
+ continue;
+ }
fill_remote_ref_details(atom, refname, branch, &v->s);
continue;
} else if (starts_with(name, "color:")) {
if (cb->cutoff_cnt)
*cb->cutoff_cnt = cb->reccnt - 1;
/*
- * we have not yet updated cb->[n|o]sha1 so they still
+ * we have not yet updated cb->[n|o]oid so they still
* hold the values for the previous record.
*/
if (!is_null_oid(&cb->ooid)) {
if (transaction->state != REF_TRANSACTION_OPEN)
die("BUG: update called for transaction that is not open");
- if ((flags & REF_ISPRUNING) && !(flags & REF_NODEREF))
- die("BUG: REF_ISPRUNING set without REF_NODEREF");
-
FLEX_ALLOC_STR(update, refname, refname);
ALLOC_GROW(transaction->updates, transaction->nr + 1, transaction->alloc);
transaction->updates[transaction->nr++] = update;
return -1;
}
- flags &= REF_TRANSACTION_UPDATE_ALLOWED_FLAGS;
+ if (flags & ~REF_TRANSACTION_UPDATE_ALLOWED_FLAGS)
+ BUG("illegal flags 0x%x passed to ref_transaction_update()", flags);
flags |= (new_oid ? REF_HAVE_NEW : 0) | (old_oid ? REF_HAVE_OLD : 0);
/**
* Resolve refname in the nested "gitlink" repository in the specified
* submodule (which must be non-NULL). If the resolution is
- * successful, return 0 and set sha1 to the name of the object;
+ * successful, return 0 and set oid to the name of the object;
* otherwise, return a non-zero value.
*/
int resolve_gitlink_ref(const char *submodule, const char *refname,
/*
* The signature for the callback function for the for_each_*()
- * functions below. The memory pointed to by the refname and sha1
+ * functions below. The memory pointed to by the refname and oid
* arguments is only guaranteed to be valid for the duration of a
* single callback invocation.
*/
*/
int refs_pack_refs(struct ref_store *refs, unsigned int flags);
-/*
- * Flags controlling ref_transaction_update(), ref_transaction_create(), etc.
- * REF_NODEREF: act on the ref directly, instead of dereferencing
- * symbolic references.
- *
- * Other flags are reserved for internal use.
- */
-#define REF_NODEREF 0x01
-#define REF_FORCE_CREATE_REFLOG 0x40
-
-/*
- * Flags that can be passed in to ref_transaction_update
- */
-#define REF_TRANSACTION_UPDATE_ALLOWED_FLAGS \
- REF_ISPRUNING | \
- REF_FORCE_CREATE_REFLOG | \
- REF_NODEREF
-
/*
* Setup reflog before using. Fill in err and return -1 on failure.
*/
/*
* Delete the specified reference. If old_oid is non-NULL, then
- * verify that the current value of the reference is old_sha1 before
+ * verify that the current value of the reference is old_oid before
* deleting it. If old_oid is NULL, delete the reference if it
* exists, regardless of its old value. It is an error for old_oid to
* be null_oid. msg and flags are passed through to
*
* refname -- the name of the reference to be affected.
*
- * new_sha1 -- the SHA-1 that should be set to be the new value of
- * the reference. Some functions allow this parameter to be
+ * new_oid -- the object ID that should be set to be the new value
+ * of the reference. Some functions allow this parameter to be
* NULL, meaning that the reference is not changed, or
- * null_sha1, meaning that the reference should be deleted. A
+ * null_oid, meaning that the reference should be deleted. A
* copy of this value is made in the transaction.
*
- * old_sha1 -- the SHA-1 value that the reference must have before
+ * old_oid -- the object ID that the reference must have before
* the update. Some functions allow this parameter to be NULL,
* meaning that the old value of the reference is not checked,
- * or null_sha1, meaning that the reference must not exist
+ * or null_oid, meaning that the reference must not exist
* before the update. A copy of this value is made in the
* transaction.
*
* flags -- flags affecting the update, passed to
- * update_ref_lock(). Can be REF_NODEREF, which means that
- * symbolic references should not be followed.
+ * update_ref_lock(). Possible flags: REF_NO_DEREF,
+ * REF_FORCE_CREATE_REFLOG. See those constants for more
+ * information.
*
* msg -- a message describing the change (for the reflog).
*
*/
/*
- * Add a reference update to transaction. new_oid is the value that
- * the reference should have after the update, or null_oid if it
- * should be deleted. If new_oid is NULL, then the reference is not
- * changed at all. old_oid is the value that the reference must have
- * before the update, or null_oid if it must not have existed
+ * The following flags can be passed to ref_transaction_update() etc.
+ * Internally, they are stored in `ref_update::flags`, along with some
+ * internal flags.
+ */
+
+/*
+ * Act on the ref directly; i.e., without dereferencing symbolic refs.
+ * If this flag is not specified, then symbolic references are
+ * dereferenced and the update is applied to the referent.
+ */
+#define REF_NO_DEREF (1 << 0)
+
+/*
+ * Force the creation of a reflog for this reference, even if it
+ * didn't previously have a reflog.
+ */
+#define REF_FORCE_CREATE_REFLOG (1 << 1)
+
+/*
+ * Bitmask of all of the flags that are allowed to be passed in to
+ * ref_transaction_update() and friends:
+ */
+#define REF_TRANSACTION_UPDATE_ALLOWED_FLAGS \
+ (REF_NO_DEREF | REF_FORCE_CREATE_REFLOG)
+
+/*
+ * Add a reference update to transaction. `new_oid` is the value that
+ * the reference should have after the update, or `null_oid` if it
+ * should be deleted. If `new_oid` is NULL, then the reference is not
+ * changed at all. `old_oid` is the value that the reference must have
+ * before the update, or `null_oid` if it must not have existed
* beforehand. The old value is checked after the lock is taken to
* prevent races. If the old value doesn't agree with old_oid, the
* whole transaction fails. If old_oid is NULL, then the previous
* It is a bug to call this function when there might be other
* processes accessing the repository or if there are existing
* references that might conflict with the ones being created. All
- * old_sha1 values must either be absent or NULL_SHA1.
+ * old_oid values must either be absent or null_oid.
*/
int initial_ref_transaction_commit(struct ref_transaction *transaction,
struct strbuf *err);
#include "../object.h"
#include "../dir.h"
+/*
+ * This backend uses the following flags in `ref_update::flags` for
+ * internal bookkeeping purposes. Their numerical values must not
+ * conflict with REF_NO_DEREF, REF_FORCE_CREATE_REFLOG, REF_HAVE_NEW,
+ * REF_HAVE_OLD, or REF_IS_PRUNING, which are also stored in
+ * `ref_update::flags`.
+ */
+
+/*
+ * Used as a flag in ref_update::flags when a loose ref is being
+ * pruned. This flag must only be used when REF_NO_DEREF is set.
+ */
+#define REF_IS_PRUNING (1 << 4)
+
+/*
+ * Flag passed to lock_ref_sha1_basic() telling it to tolerate broken
+ * refs (i.e., because the reference is about to be deleted anyway).
+ */
+#define REF_DELETING (1 << 5)
+
+/*
+ * Used as a flag in ref_update::flags when the lockfile needs to be
+ * committed.
+ */
+#define REF_NEEDS_COMMIT (1 << 6)
+
+/*
+ * Used as a flag in ref_update::flags when we want to log a ref
+ * update but not actually perform it. This is used when a symbolic
+ * ref update is split up.
+ */
+#define REF_LOG_ONLY (1 << 7)
+
+/*
+ * Used as a flag in ref_update::flags when the ref_update was via an
+ * update to HEAD.
+ */
+#define REF_UPDATE_VIA_HEAD (1 << 8)
+
+/*
+ * Used as a flag in ref_update::flags when the loose reference has
+ * been deleted.
+ */
+#define REF_DELETED_LOOSE (1 << 9)
+
struct ref_lock {
char *ref_name;
struct lock_file lk;
} else if (is_null_oid(&oid)) {
/*
* It is so astronomically unlikely
- * that NULL_SHA1 is the SHA-1 of an
+ * that null_oid is the OID of an
* actual object that we consider its
* appearance in a loose reference
* file to be repo corruption
* are passed to refs_verify_refname_available() for this check.
*
* If mustexist is not set and the reference is not found or is
- * broken, lock the reference anyway but clear sha1.
+ * broken, lock the reference anyway but clear old_oid.
*
* Return 0 on success. On failure, write an error message to err and
* return TRANSACTION_NAME_CONFLICT or TRANSACTION_GENERIC_ERROR.
{
struct ref_transaction *transaction;
struct strbuf err = STRBUF_INIT;
+ int ret = -1;
if (check_refname_format(r->name, 0))
return;
transaction = ref_store_transaction_begin(&refs->base, &err);
- if (!transaction ||
- ref_transaction_delete(transaction, r->name, &r->oid,
- REF_ISPRUNING | REF_NODEREF, NULL, &err) ||
- ref_transaction_commit(transaction, &err)) {
- ref_transaction_free(transaction);
+ if (!transaction)
+ goto cleanup;
+ ref_transaction_add_update(
+ transaction, r->name,
+ REF_NO_DEREF | REF_HAVE_NEW | REF_HAVE_OLD | REF_IS_PRUNING,
+ &null_oid, &r->oid, NULL);
+ if (ref_transaction_commit(transaction, &err))
+ goto cleanup;
+
+ ret = 0;
+
+cleanup:
+ if (ret)
error("%s", err.buf);
- strbuf_release(&err);
- return;
- }
- ref_transaction_free(transaction);
strbuf_release(&err);
+ ref_transaction_free(transaction);
+ return;
}
/*
*/
if (ref_transaction_update(transaction, iter->refname,
iter->oid, NULL,
- REF_NODEREF, NULL, &err))
+ REF_NO_DEREF, NULL, &err))
die("failure preparing to create packed reference %s: %s",
iter->refname, err.buf);
}
if (!copy && refs_delete_ref(&refs->base, logmsg, oldrefname,
- &orig_oid, REF_NODEREF)) {
+ &orig_oid, REF_NO_DEREF)) {
error("unable to delete old %s", oldrefname);
goto rollback;
}
RESOLVE_REF_READING | RESOLVE_REF_NO_RECURSE,
&oid, NULL) &&
refs_delete_ref(&refs->base, NULL, newrefname,
- NULL, REF_NODEREF)) {
+ NULL, REF_NO_DEREF)) {
if (errno == EISDIR) {
struct strbuf path = STRBUF_INIT;
int result;
logmoved = log;
lock = lock_ref_oid_basic(refs, newrefname, NULL, NULL, NULL,
- REF_NODEREF, NULL, &err);
+ REF_NO_DEREF, NULL, &err);
if (!lock) {
if (copy)
error("unable to copy '%s' to '%s': %s", oldrefname, newrefname, err.buf);
rollback:
lock = lock_ref_oid_basic(refs, oldrefname, NULL, NULL, NULL,
- REF_NODEREF, NULL, &err);
+ REF_NO_DEREF, NULL, &err);
if (!lock) {
error("unable to lock %s for rollback: %s", oldrefname, err.buf);
strbuf_release(&err);
}
/*
- * Write sha1 into the open lockfile, then close the lockfile. On
- * errors, rollback the lockfile, fill in *err and
- * return -1.
+ * Write oid into the open lockfile, then close the lockfile. On
+ * errors, rollback the lockfile, fill in *err and return -1.
*/
static int write_ref_to_lockfile(struct ref_lock *lock,
const struct object_id *oid, struct strbuf *err)
int ret;
lock = lock_ref_oid_basic(refs, refname, NULL,
- NULL, NULL, REF_NODEREF, NULL,
+ NULL, NULL, REF_NO_DEREF, NULL,
&err);
if (!lock) {
error("%s", err.buf);
struct ref_update *new_update;
if ((update->flags & REF_LOG_ONLY) ||
- (update->flags & REF_ISPRUNING) ||
+ (update->flags & REF_IS_PRUNING) ||
(update->flags & REF_UPDATE_VIA_HEAD))
return 0;
new_update = ref_transaction_add_update(
transaction, "HEAD",
- update->flags | REF_LOG_ONLY | REF_NODEREF,
+ update->flags | REF_LOG_ONLY | REF_NO_DEREF,
&update->new_oid, &update->old_oid,
update->msg);
/*
* update is for a symref that points at referent and doesn't have
- * REF_NODEREF set. Split it into two updates:
- * - The original update, but with REF_LOG_ONLY and REF_NODEREF set
+ * REF_NO_DEREF set. Split it into two updates:
+ * - The original update, but with REF_LOG_ONLY and REF_NO_DEREF set
* - A new, separate update for the referent reference
* Note that the new update will itself be subject to splitting when
* the iteration gets to it.
/*
* Change the symbolic ref update to log only. Also, it
- * doesn't need to check its old SHA-1 value, as that will be
+ * doesn't need to check its old OID value, as that will be
* done when new_update is processed.
*/
- update->flags |= REF_LOG_ONLY | REF_NODEREF;
+ update->flags |= REF_LOG_ONLY | REF_NO_DEREF;
update->flags &= ~REF_HAVE_OLD;
/*
* Prepare for carrying out update:
* - Lock the reference referred to by update.
* - Read the reference under lock.
- * - Check that its old SHA-1 value (if specified) is correct, and in
+ * - Check that its old OID value (if specified) is correct, and in
* any case record it in update->lock->old_oid for later use when
* writing the reflog.
- * - If it is a symref update without REF_NODEREF, split it up into a
+ * - If it is a symref update without REF_NO_DEREF, split it up into a
* REF_LOG_ONLY update of the symref and add a separate update for
* the referent to transaction.
* - If it is an update of head_ref, add a corresponding REF_LOG_ONLY
update->backend_data = lock;
if (update->type & REF_ISSYMREF) {
- if (update->flags & REF_NODEREF) {
+ if (update->flags & REF_NO_DEREF) {
/*
* We won't be reading the referent as part of
* the transaction, so we have to read it here
- * to record and possibly check old_sha1:
+ * to record and possibly check old_oid:
*/
if (refs_read_ref_full(&refs->base,
referent.buf, 0,
/*
* Create a new update for the reference this
* symref is pointing at. Also, we will record
- * and verify old_sha1 for this update as part
+ * and verify old_oid for this update as part
* of processing the split-off update, so we
* don't have to do it here.
*/
/*
* If this update is happening indirectly because of a
- * symref update, record the old SHA-1 in the parent
+ * symref update, record the old OID in the parent
* update:
*/
for (parent_update = update->parent_update;
* transaction. (If we end up splitting up any updates using
* split_symref_update() or split_head_update(), those
* functions will check that the new updates don't have the
- * same refname as any existing ones.)
+ * same refname as any existing ones.) Also fail if any of the
+ * updates use REF_IS_PRUNING without REF_NO_DEREF.
*/
for (i = 0; i < transaction->nr; i++) {
struct ref_update *update = transaction->updates[i];
struct string_list_item *item =
string_list_append(&affected_refnames, update->refname);
+ if ((update->flags & REF_IS_PRUNING) &&
+ !(update->flags & REF_NO_DEREF))
+ BUG("REF_IS_PRUNING set without REF_NO_DEREF");
+
/*
* We store a pointer to update in item->util, but at
* the moment we never use the value of this field
if (update->flags & REF_DELETING &&
!(update->flags & REF_LOG_ONLY) &&
- !(update->flags & REF_ISPRUNING)) {
+ !(update->flags & REF_IS_PRUNING)) {
/*
* This reference has to be deleted from
* packed-refs if it exists there.
ref_transaction_add_update(
packed_transaction, update->refname,
- update->flags & ~REF_HAVE_OLD,
- &update->new_oid, &update->old_oid,
+ REF_HAVE_NEW | REF_NO_DEREF,
+ &update->new_oid, NULL,
NULL);
}
}
goto cleanup;
}
backend_data->packed_refs_locked = 1;
- ret = ref_transaction_prepare(packed_transaction, err);
+
+ if (is_packed_transaction_needed(refs->packed_ref_store,
+ packed_transaction)) {
+ ret = ref_transaction_prepare(packed_transaction, err);
+ } else {
+ /*
+ * We can skip rewriting the `packed-refs`
+ * file. But we do need to leave it locked, so
+ * that somebody else doesn't pack a reference
+ * that we are trying to delete.
+ */
+ if (ref_transaction_abort(packed_transaction, err)) {
+ ret = TRANSACTION_GENERIC_ERROR;
+ goto cleanup;
+ }
+ backend_data->packed_transaction = NULL;
+ }
}
cleanup:
struct ref_update *update = transaction->updates[i];
if (update->flags & REF_DELETING &&
!(update->flags & REF_LOG_ONLY) &&
- !(update->flags & REF_ISPRUNING)) {
+ !(update->flags & REF_IS_PRUNING)) {
strbuf_reset(&sb);
files_reflog_path(refs, &sb, update->refname);
if (!unlink_or_warn(sb.buf))
* reference if --updateref was specified:
*/
lock = lock_ref_oid_basic(refs, refname, oid,
- NULL, NULL, REF_NODEREF,
+ NULL, NULL, REF_NO_DEREF,
&type, &err);
if (!lock) {
error("cannot lock ref '%s': %s", refname, err.buf);
/*
* This value is set in `base.flags` if the peeled value of the
* current reference is known. In that case, `peeled` contains the
- * correct peeled value for the reference, which might be `null_sha1`
+ * correct peeled value for the reference, which might be `null_oid`
* if the reference is not a tag or if it is broken.
*/
#define REF_KNOWS_PEELED 0x40
* by the failing call to `fprintf()`.
*/
static int write_packed_entry(FILE *fh, const char *refname,
- const unsigned char *sha1,
- const unsigned char *peeled)
+ const struct object_id *oid,
+ const struct object_id *peeled)
{
- if (fprintf(fh, "%s %s\n", sha1_to_hex(sha1), refname) < 0 ||
- (peeled && fprintf(fh, "^%s\n", sha1_to_hex(peeled)) < 0))
+ if (fprintf(fh, "%s %s\n", oid_to_hex(oid), refname) < 0 ||
+ (peeled && fprintf(fh, "^%s\n", oid_to_hex(peeled)) < 0))
return -1;
return 0;
int peel_error = ref_iterator_peel(iter, &peeled);
if (write_packed_entry(out, iter->refname,
- iter->oid->hash,
- peel_error ? NULL : peeled.hash))
+ iter->oid,
+ peel_error ? NULL : &peeled))
goto write_error;
if ((ok = ref_iterator_advance(iter)) != ITER_OK)
&peeled);
if (write_packed_entry(out, update->refname,
- update->new_oid.hash,
- peel_error ? NULL : peeled.hash))
+ &update->new_oid,
+ peel_error ? NULL : &peeled))
goto write_error;
i++;
return -1;
}
+int is_packed_transaction_needed(struct ref_store *ref_store,
+ struct ref_transaction *transaction)
+{
+ struct packed_ref_store *refs = packed_downcast(
+ ref_store,
+ REF_STORE_READ,
+ "is_packed_transaction_needed");
+ struct strbuf referent = STRBUF_INIT;
+ size_t i;
+ int ret;
+
+ if (!is_lock_file_locked(&refs->lock))
+ BUG("is_packed_transaction_needed() called while unlocked");
+
+ /*
+ * We're only going to bother returning false for the common,
+ * trivial case that references are only being deleted, their
+ * old values are not being checked, and the old `packed-refs`
+ * file doesn't contain any of those reference(s). This gives
+ * false positives for some other cases that could
+ * theoretically be optimized away:
+ *
+ * 1. It could be that the old value is being verified without
+ * setting a new value. In this case, we could verify the
+ * old value here and skip the update if it agrees. If it
+ * disagrees, we could either let the update go through
+ * (the actual commit would re-detect and report the
+ * problem), or come up with a way of reporting such an
+ * error to *our* caller.
+ *
+ * 2. It could be that a new value is being set, but that it
+ * is identical to the current packed value of the
+ * reference.
+ *
+ * Neither of these cases will come up in the current code,
+ * because the only caller of this function passes to it a
+ * transaction that only includes `delete` updates with no
+ * `old_id`. Even if that ever changes, false positives only
+ * cause an optimization to be missed; they do not affect
+ * correctness.
+ */
+
+ /*
+ * Start with the cheap checks that don't require old
+ * reference values to be read:
+ */
+ for (i = 0; i < transaction->nr; i++) {
+ struct ref_update *update = transaction->updates[i];
+
+ if (update->flags & REF_HAVE_OLD)
+ /* Have to check the old value -> needed. */
+ return 1;
+
+ if ((update->flags & REF_HAVE_NEW) && !is_null_oid(&update->new_oid))
+ /* Have to set a new value -> needed. */
+ return 1;
+ }
+
+ /*
+ * The transaction isn't checking any old values nor is it
+ * setting any nonzero new values, so it still might be able
+ * to be skipped. Now do the more expensive check: the update
+ * is needed if any of the updates is a delete, and the old
+ * `packed-refs` file contains a value for that reference.
+ */
+ ret = 0;
+ for (i = 0; i < transaction->nr; i++) {
+ struct ref_update *update = transaction->updates[i];
+ unsigned int type;
+ struct object_id oid;
+
+ if (!(update->flags & REF_HAVE_NEW))
+ /*
+ * This reference isn't being deleted -> not
+ * needed.
+ */
+ continue;
+
+ if (!refs_read_raw_ref(ref_store, update->refname,
+ &oid, &referent, &type) ||
+ errno != ENOENT) {
+ /*
+ * We have to actually delete that reference
+ * -> this transaction is needed.
+ */
+ ret = 1;
+ break;
+ }
+ }
+
+ strbuf_release(&referent);
+ return ret;
+}
+
struct packed_transaction_backend_data {
/* True iff the transaction owns the packed-refs lock. */
int own_lock;
void packed_refs_unlock(struct ref_store *ref_store);
int packed_refs_is_locked(struct ref_store *ref_store);
+/*
+ * Return true if `transaction` really needs to be carried out against
+ * the specified packed_ref_store, or false if it can be skipped
+ * (i.e., because it is an obvious NOOP). `ref_store` must be locked
+ * before calling this function.
+ */
+int is_packed_transaction_needed(struct ref_store *ref_store,
+ struct ref_transaction *transaction);
+
#endif /* REFS_PACKED_BACKEND_H */
/*
* Emit a warning and return true iff ref1 and ref2 have the same name
- * and the same sha1. Die if they have the same name but different
- * sha1s.
+ * and the same oid. Die if they have the same name but different
+ * oids.
*/
static int is_dup_ref(const struct ref_entry *ref1, const struct ref_entry *ref2)
{
*/
/*
- * Flag passed to lock_ref_sha1_basic() telling it to tolerate broken
- * refs (i.e., because the reference is about to be deleted anyway).
+ * The following flags can appear in `ref_update::flags`. Their
+ * numerical values must not conflict with those of REF_NO_DEREF and
+ * REF_FORCE_CREATE_REFLOG, which are also stored in
+ * `ref_update::flags`.
*/
-#define REF_DELETING 0x02
/*
- * Used as a flag in ref_update::flags when a loose ref is being
- * pruned. This flag must only be used when REF_NODEREF is set.
+ * The reference should be updated to new_oid.
*/
-#define REF_ISPRUNING 0x04
+#define REF_HAVE_NEW (1 << 2)
/*
- * Used as a flag in ref_update::flags when the reference should be
- * updated to new_sha1.
+ * The current reference's value should be checked to make sure that
+ * it agrees with old_oid.
*/
-#define REF_HAVE_NEW 0x08
-
-/*
- * Used as a flag in ref_update::flags when old_sha1 should be
- * checked.
- */
-#define REF_HAVE_OLD 0x10
-
-/*
- * Used as a flag in ref_update::flags when the lockfile needs to be
- * committed.
- */
-#define REF_NEEDS_COMMIT 0x20
-
-/*
- * 0x40 is REF_FORCE_CREATE_REFLOG, so skip it if you're adding a
- * value to ref_update::flags
- */
-
-/*
- * Used as a flag in ref_update::flags when we want to log a ref
- * update but not actually perform it. This is used when a symbolic
- * ref update is split up.
- */
-#define REF_LOG_ONLY 0x80
-
-/*
- * Internal flag, meaning that the containing ref_update was via an
- * update to HEAD.
- */
-#define REF_UPDATE_VIA_HEAD 0x100
-
-/*
- * Used as a flag in ref_update::flags when the loose reference has
- * been deleted.
- */
-#define REF_DELETED_LOOSE 0x200
+#define REF_HAVE_OLD (1 << 3)
/*
* Return the length of time to retry acquiring a loose reference lock
* tag recursively until a non-tag is found. If successful, store the
* result to oid and return PEEL_PEELED. If the object is not a tag
* or is not valid, return PEEL_NON_TAG or PEEL_INVALID, respectively,
- * and leave sha1 unchanged.
+ * and leave oid unchanged.
*/
enum peel_status peel_object(const struct object_id *name, struct object_id *oid);
int copy_reflog_msg(char *buf, const char *msg);
/**
- * Information needed for a single ref update. Set new_sha1 to the new
- * value or to null_sha1 to delete the ref. To check the old value
- * while the ref is locked, set (flags & REF_HAVE_OLD) and set
- * old_sha1 to the old value, or to null_sha1 to ensure the ref does
- * not exist before update.
+ * Information needed for a single ref update. Set new_oid to the new
+ * value or to null_oid to delete the ref. To check the old value
+ * while the ref is locked, set (flags & REF_HAVE_OLD) and set old_oid
+ * to the old value, or to null_oid to ensure the ref does not exist
+ * before update.
*/
struct ref_update {
-
/*
- * If (flags & REF_HAVE_NEW), set the reference to this value:
+ * If (flags & REF_HAVE_NEW), set the reference to this value
+ * (or delete it, if `new_oid` is `null_oid`).
*/
struct object_id new_oid;
/*
* If (flags & REF_HAVE_OLD), check that the reference
- * previously had this value:
+ * previously had this value (or didn't previously exist, if
+ * `old_oid` is `null_oid`).
*/
struct object_id old_oid;
/*
- * One or more of REF_HAVE_NEW, REF_HAVE_OLD, REF_NODEREF,
- * REF_DELETING, REF_ISPRUNING, REF_LOG_ONLY,
- * REF_UPDATE_VIA_HEAD, REF_NEEDS_COMMIT, and
- * REF_DELETED_LOOSE:
+ * One or more of REF_NO_DEREF, REF_FORCE_CREATE_REFLOG,
+ * REF_HAVE_NEW, REF_HAVE_OLD, or backend-specific flags.
*/
unsigned int flags;
/*
* Add a ref_update with the specified properties to transaction, and
* return a pointer to the new object. This function does not verify
- * that refname is well-formed. new_sha1 and old_sha1 are only
+ * that refname is well-formed. new_oid and old_oid are only
* dereferenced if the REF_HAVE_NEW and REF_HAVE_OLD bits,
* respectively, are set in flags.
*/
return remote_for_branch(branch, explicit);
}
+const char *remote_ref_for_branch(struct branch *branch, int for_push,
+ int *explicit)
+{
+ if (branch) {
+ if (!for_push) {
+ if (branch->merge_nr) {
+ if (explicit)
+ *explicit = 1;
+ return branch->merge_name[0];
+ }
+ } else {
+ const char *dst, *remote_name =
+ pushremote_for_branch(branch, NULL);
+ struct remote *remote = remote_get(remote_name);
+
+ if (remote && remote->push_refspec_nr &&
+ (dst = apply_refspecs(remote->push,
+ remote->push_refspec_nr,
+ branch->refname))) {
+ if (explicit)
+ *explicit = 1;
+ return dst;
+ }
+ }
+ }
+ if (explicit)
+ *explicit = 0;
+ return "";
+}
+
static struct remote *remote_get_1(const char *name,
const char *(*get_default)(struct branch *, int *))
{
struct branch *branch_get(const char *name);
const char *remote_for_branch(struct branch *branch, int *explicit);
const char *pushremote_for_branch(struct branch *branch, int *explicit);
+const char *remote_ref_for_branch(struct branch *branch, int for_push,
+ int *explicit);
int branch_has_merge_config(struct branch *branch);
int branch_merge_matches(struct branch *, int n, const char *);
*/
if (command == TODO_PICK && !opts->no_commit && (res == 0 || res == 1) &&
update_ref(NULL, "CHERRY_PICK_HEAD", &commit->object.oid, NULL,
- REF_NODEREF, UPDATE_REFS_MSG_ON_ERR))
+ REF_NO_DEREF, UPDATE_REFS_MSG_ON_ERR))
res = -1;
if (command == TODO_REVERT && ((opts->no_commit && res == 0) || res == 1) &&
update_ref(NULL, "REVERT_HEAD", &commit->object.oid, NULL,
- REF_NODEREF, UPDATE_REFS_MSG_ON_ERR))
+ REF_NO_DEREF, UPDATE_REFS_MSG_ON_ERR))
res = -1;
if (res) {
msg = reflog_message(opts, "finish", "%s onto %s",
head_ref.buf, buf.buf);
if (update_ref(msg, head_ref.buf, &head, &orig,
- REF_NODEREF, UPDATE_REFS_MSG_ON_ERR)) {
+ REF_NO_DEREF, UPDATE_REFS_MSG_ON_ERR)) {
res = error(_("could not update %s"),
head_ref.buf);
goto cleanup_head_ref;
return res;
}
+static int rewrite_file(const char *path, const char *buf, size_t len)
+{
+ int rc = 0;
+ int fd = open(path, O_WRONLY | O_TRUNC);
+ if (fd < 0)
+ return error_errno(_("could not open '%s' for writing"), path);
+ if (write_in_full(fd, buf, len) < 0)
+ rc = error_errno(_("could not write to '%s'"), path);
+ if (close(fd) && !rc)
+ rc = error_errno(_("could not close '%s'"), path);
+ return rc;
+}
+
/* skip picking commits whose parents are unchanged */
int skip_unnecessary_picks(void)
{
}
close(fd);
- fd = open(rebase_path_todo(), O_WRONLY, 0666);
- if (fd < 0) {
- error_errno(_("could not open '%s' for writing"),
- rebase_path_todo());
- todo_list_release(&todo_list);
- return -1;
- }
- if (write_in_full(fd, todo_list.buf.buf + offset,
- todo_list.buf.len - offset) < 0) {
- error_errno(_("could not write to '%s'"),
- rebase_path_todo());
- close(fd);
- todo_list_release(&todo_list);
- return -1;
- }
- if (ftruncate(fd, todo_list.buf.len - offset) < 0) {
- error_errno(_("could not truncate '%s'"),
- rebase_path_todo());
+ if (rewrite_file(rebase_path_todo(), todo_list.buf.buf + offset,
+ todo_list.buf.len - offset) < 0) {
todo_list_release(&todo_list);
- close(fd);
return -1;
}
- close(fd);
todo_list.current = i;
if (is_fixup(peek_command(&todo_list, 0)))
}
}
- fd = open(todo_file, O_WRONLY);
- if (fd < 0)
- res = error_errno(_("could not open '%s'"), todo_file);
- else if (write(fd, buf.buf, buf.len) < 0)
- res = error_errno(_("could not write to '%s'"), todo_file);
- else if (ftruncate(fd, buf.len) < 0)
- res = error_errno(_("could not truncate '%s'"),
- todo_file);
- close(fd);
+ res = rewrite_file(todo_file, buf.buf, buf.len);
strbuf_release(&buf);
}
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software Foundation,
- Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */
+ along with this program; if not, see <http://www.gnu.org/licenses/>. */
/* closeout.c - close standard output and standard error
Copyright (C) 1998-2007 Free Software Foundation, Inc.
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software Foundation,
- Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */
+ along with this program; if not, see <http://www.gnu.org/licenses/>. */
#include <errno.h>
#include <stdio.h>
struct strbuf objdirbuf = STRBUF_INIT;
struct strbuf entry = STRBUF_INIT;
+ if (!alt || !*alt)
+ return;
+
if (depth > 5) {
error("%s: ignoring alternate object stores, nesting too deep.",
relative_base);
return;
alt = getenv(ALTERNATE_DB_ENVIRONMENT);
- if (!alt) alt = "";
alt_odb_tail = &alt_odb_list;
link_alt_odb_entries(alt, PATH_SEP, NULL, 0);
if ((pos >= 0) && (pos < istate->cache_nr)) {
struct stat st;
if (lstat(GITMODULES_FILE, &st) == 0 &&
- ce_match_stat(istate->cache[pos], &st, 0) & DATA_CHANGED)
+ ce_match_stat(istate->cache[pos], &st, CE_MATCH_IGNORE_FSMONITOR) & DATA_CHANGED)
return 0;
}
/test-config
/test-date
/test-delta
+/test-drop-caches
/test-dump-cache-tree
+/test-dump-fsmonitor
/test-dump-split-index
/test-dump-untracked-cache
/test-fake-ssh
--- /dev/null
+#include "git-compat-util.h"
+
+#if defined(GIT_WINDOWS_NATIVE)
+
+static int cmd_sync(void)
+{
+ char Buffer[MAX_PATH];
+ DWORD dwRet;
+ char szVolumeAccessPath[] = "\\\\.\\X:";
+ HANDLE hVolWrite;
+ int success = 0;
+
+ dwRet = GetCurrentDirectory(MAX_PATH, Buffer);
+ if ((0 == dwRet) || (dwRet > MAX_PATH))
+ return error("Error getting current directory");
+
+ if ((Buffer[0] < 'A') || (Buffer[0] > 'Z'))
+ return error("Invalid drive letter '%c'", Buffer[0]);
+
+ szVolumeAccessPath[4] = Buffer[0];
+ hVolWrite = CreateFile(szVolumeAccessPath, GENERIC_READ | GENERIC_WRITE,
+ FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, OPEN_EXISTING, 0, NULL);
+ if (INVALID_HANDLE_VALUE == hVolWrite)
+ return error("Unable to open volume for writing, need admin access");
+
+ success = FlushFileBuffers(hVolWrite);
+ if (!success)
+ error("Unable to flush volume");
+
+ CloseHandle(hVolWrite);
+
+ return !success;
+}
+
+#define STATUS_SUCCESS (0x00000000L)
+#define STATUS_PRIVILEGE_NOT_HELD (0xC0000061L)
+
+typedef enum _SYSTEM_INFORMATION_CLASS {
+ SystemMemoryListInformation = 80,
+} SYSTEM_INFORMATION_CLASS;
+
+typedef enum _SYSTEM_MEMORY_LIST_COMMAND {
+ MemoryCaptureAccessedBits,
+ MemoryCaptureAndResetAccessedBits,
+ MemoryEmptyWorkingSets,
+ MemoryFlushModifiedList,
+ MemoryPurgeStandbyList,
+ MemoryPurgeLowPriorityStandbyList,
+ MemoryCommandMax
+} SYSTEM_MEMORY_LIST_COMMAND;
+
+static BOOL GetPrivilege(HANDLE TokenHandle, LPCSTR lpName, int flags)
+{
+ BOOL bResult;
+ DWORD dwBufferLength;
+ LUID luid;
+ TOKEN_PRIVILEGES tpPreviousState;
+ TOKEN_PRIVILEGES tpNewState;
+
+ dwBufferLength = 16;
+ bResult = LookupPrivilegeValueA(0, lpName, &luid);
+ if (bResult) {
+ tpNewState.PrivilegeCount = 1;
+ tpNewState.Privileges[0].Luid = luid;
+ tpNewState.Privileges[0].Attributes = 0;
+ bResult = AdjustTokenPrivileges(TokenHandle, 0, &tpNewState,
+ (DWORD)((LPBYTE)&(tpNewState.Privileges[1]) - (LPBYTE)&tpNewState),
+ &tpPreviousState, &dwBufferLength);
+ if (bResult) {
+ tpPreviousState.PrivilegeCount = 1;
+ tpPreviousState.Privileges[0].Luid = luid;
+ tpPreviousState.Privileges[0].Attributes = flags != 0 ? 2 : 0;
+ bResult = AdjustTokenPrivileges(TokenHandle, 0, &tpPreviousState,
+ dwBufferLength, 0, 0);
+ }
+ }
+ return bResult;
+}
+
+static int cmd_dropcaches(void)
+{
+ HANDLE hProcess = GetCurrentProcess();
+ HANDLE hToken;
+ HMODULE ntdll;
+ DWORD(WINAPI *NtSetSystemInformation)(INT, PVOID, ULONG);
+ SYSTEM_MEMORY_LIST_COMMAND command;
+ int status;
+
+ if (!OpenProcessToken(hProcess, TOKEN_QUERY | TOKEN_ADJUST_PRIVILEGES, &hToken))
+ return error("Can't open current process token");
+
+ if (!GetPrivilege(hToken, "SeProfileSingleProcessPrivilege", 1))
+ return error("Can't get SeProfileSingleProcessPrivilege");
+
+ CloseHandle(hToken);
+
+ ntdll = LoadLibrary("ntdll.dll");
+ if (!ntdll)
+ return error("Can't load ntdll.dll, wrong Windows version?");
+
+ NtSetSystemInformation =
+ (DWORD(WINAPI *)(INT, PVOID, ULONG))GetProcAddress(ntdll, "NtSetSystemInformation");
+ if (!NtSetSystemInformation)
+ return error("Can't get function addresses, wrong Windows version?");
+
+ command = MemoryPurgeStandbyList;
+ status = NtSetSystemInformation(
+ SystemMemoryListInformation,
+ &command,
+ sizeof(SYSTEM_MEMORY_LIST_COMMAND)
+ );
+ if (status == STATUS_PRIVILEGE_NOT_HELD)
+ error("Insufficient privileges to purge the standby list, need admin access");
+ else if (status != STATUS_SUCCESS)
+ error("Unable to execute the memory list command %d", status);
+
+ FreeLibrary(ntdll);
+
+ return status;
+}
+
+#elif defined(__linux__)
+
+static int cmd_sync(void)
+{
+ return system("sync");
+}
+
+static int cmd_dropcaches(void)
+{
+ return system("echo 3 | sudo tee /proc/sys/vm/drop_caches");
+}
+
+#elif defined(__APPLE__)
+
+static int cmd_sync(void)
+{
+ return system("sync");
+}
+
+static int cmd_dropcaches(void)
+{
+ return system("sudo purge");
+}
+
+#else
+
+static int cmd_sync(void)
+{
+ return 0;
+}
+
+static int cmd_dropcaches(void)
+{
+ return error("drop caches not implemented on this platform");
+}
+
+#endif
+
+int cmd_main(int argc, const char **argv)
+{
+ cmd_sync();
+ return cmd_dropcaches();
+}
--- /dev/null
+#include "cache.h"
+
+int cmd_main(int ac, const char **av)
+{
+ struct index_state *istate = &the_index;
+ int i;
+
+ setup_git_directory();
+ if (do_read_index(istate, get_index_file(), 0) < 0)
+ die("unable to read index file");
+ if (!istate->fsmonitor_last_update) {
+ printf("no fsmonitor\n");
+ return 0;
+ }
+ printf("fsmonitor last update %"PRIuMAX"\n", (uintmax_t)istate->fsmonitor_last_update);
+
+ for (i = 0; i < istate->cache_nr; i++)
+ printf((istate->cache[i]->ce_flags & CE_FSMONITOR_VALID) ? "+" : "-");
+
+ return 0;
+}
# to protect the history!
#
-# Test that submodule contents are currently not updated when switching
-# between commits that change a submodule.
-test_submodule_switch () {
+# Internal function; use test_submodule_switch() or
+# test_submodule_forced_switch() instead.
+test_submodule_switch_common() {
command="$1"
######################### Appearing submodule #########################
# Switching to a commit letting a submodule appear creates empty dir ...
test_submodule_content sub1 origin/add_sub1
)
'
- # ... and doesn't care if it already exists ...
+ # ... and doesn't care if it already exists.
test_expect_$RESULT "$command: added submodule leaves existing empty directory alone" '
prolog &&
reset_work_tree_to no_submodule &&
test_submodule_content sub1 origin/add_sub1
)
'
- # ... unless there is an untracked file in its place.
- test_expect_success "$command: added submodule doesn't remove untracked unignored file with same name" '
- prolog &&
- reset_work_tree_to no_submodule &&
- (
- cd submodule_update &&
- git branch -t add_sub1 origin/add_sub1 &&
- >sub1 &&
- test_must_fail $command add_sub1 &&
- test_superproject_content origin/no_submodule &&
- test_must_be_empty sub1
- )
- '
# Replacing a tracked file with a submodule produces an empty
# directory ...
test_expect_$RESULT "$command: replace tracked file with submodule creates empty directory" '
# submodule files with the newly checked out ones in the
# directory of the same name while it shouldn't.
RESULT="failure"
+ elif test "$KNOWN_FAILURE_FORCED_SWITCH_TESTS" = 1
+ then
+ # All existing tests that use test_submodule_forced_switch()
+ # require this.
+ RESULT="failure"
else
RESULT="success"
fi
test_submodule_content sub1 origin/modify_sub1
)
'
-
# Updating a submodule to an invalid sha1 doesn't update the
# submodule's work tree, subsequent update will fail
test_expect_$RESULT "$command: modified submodule does not update submodule work tree to invalid commit" '
'
}
-# Test that submodule contents are currently not updated when switching
-# between commits that change a submodule, but throwing away local changes in
-# the superproject is allowed.
-test_submodule_forced_switch () {
+# Declares and invokes several tests that, in various situations, checks that
+# the provided transition function:
+# - succeeds in updating the worktree and index of a superproject to a target
+# commit, or fails atomically (depending on the test situation)
+# - if succeeds, the contents of submodule directories are unchanged
+# - if succeeds, once "git submodule update" is invoked, the contents of
+# submodule directories are updated
+#
+# Use as follows:
+#
+# my_func () {
+# target=$1
+# # Do something here that updates the worktree and index to match target,
+# # but not any submodule directories.
+# }
+# test_submodule_switch "my_func"
+test_submodule_switch () {
command="$1"
- ######################### Appearing submodule #########################
- # Switching to a commit letting a submodule appear creates empty dir ...
- test_expect_success "$command: added submodule creates empty directory" '
- prolog &&
- reset_work_tree_to no_submodule &&
- (
- cd submodule_update &&
- git branch -t add_sub1 origin/add_sub1 &&
- $command add_sub1 &&
- test_superproject_content origin/add_sub1 &&
- test_dir_is_empty sub1 &&
- git submodule update --init --recursive &&
- test_submodule_content sub1 origin/add_sub1
- )
- '
- # ... and doesn't care if it already exists ...
- test_expect_success "$command: added submodule leaves existing empty directory alone" '
+ test_submodule_switch_common "$command"
+
+ # An empty directory does not prevent the creation of a submodule of
+ # the same name, but a file does.
+ test_expect_success "$command: added submodule doesn't remove untracked unignored file with same name" '
prolog &&
reset_work_tree_to no_submodule &&
(
cd submodule_update &&
git branch -t add_sub1 origin/add_sub1 &&
- mkdir sub1 &&
- $command add_sub1 &&
- test_superproject_content origin/add_sub1 &&
- test_dir_is_empty sub1 &&
- git submodule update --init --recursive &&
- test_submodule_content sub1 origin/add_sub1
+ >sub1 &&
+ test_must_fail $command add_sub1 &&
+ test_superproject_content origin/no_submodule &&
+ test_must_be_empty sub1
)
'
- # ... unless there is an untracked file in its place.
+}
+
+# Same as test_submodule_switch(), except that throwing away local changes in
+# the superproject is allowed.
+test_submodule_forced_switch () {
+ command="$1"
+ KNOWN_FAILURE_FORCED_SWITCH_TESTS=1
+ test_submodule_switch_common "$command"
+
+ # When forced, a file in the superproject does not prevent creating a
+ # submodule of the same name.
test_expect_success "$command: added submodule does remove untracked unignored file with same name when forced" '
prolog &&
reset_work_tree_to no_submodule &&
test_dir_is_empty sub1
)
'
- # Replacing a tracked file with a submodule produces an empty
- # directory ...
- test_expect_success "$command: replace tracked file with submodule creates empty directory" '
- prolog &&
- reset_work_tree_to replace_sub1_with_file &&
- (
- cd submodule_update &&
- git branch -t replace_file_with_sub1 origin/replace_file_with_sub1 &&
- $command replace_file_with_sub1 &&
- test_superproject_content origin/replace_file_with_sub1 &&
- test_dir_is_empty sub1 &&
- git submodule update --init --recursive &&
- test_submodule_content sub1 origin/replace_file_with_sub1
- )
- '
- # ... as does removing a directory with tracked files with a
- # submodule.
- test_expect_success "$command: replace directory with submodule" '
- prolog &&
- reset_work_tree_to replace_sub1_with_directory &&
- (
- cd submodule_update &&
- git branch -t replace_directory_with_sub1 origin/replace_directory_with_sub1 &&
- $command replace_directory_with_sub1 &&
- test_superproject_content origin/replace_directory_with_sub1 &&
- test_dir_is_empty sub1 &&
- git submodule update --init --recursive &&
- test_submodule_content sub1 origin/replace_directory_with_sub1
- )
- '
-
- ######################## Disappearing submodule #######################
- # Removing a submodule doesn't remove its work tree ...
- test_expect_success "$command: removed submodule leaves submodule directory and its contents in place" '
- prolog &&
- reset_work_tree_to add_sub1 &&
- (
- cd submodule_update &&
- git branch -t remove_sub1 origin/remove_sub1 &&
- $command remove_sub1 &&
- test_superproject_content origin/remove_sub1 &&
- test_submodule_content sub1 origin/add_sub1
- )
- '
- # ... especially when it contains a .git directory.
- test_expect_success "$command: removed submodule leaves submodule containing a .git directory alone" '
- prolog &&
- reset_work_tree_to add_sub1 &&
- (
- cd submodule_update &&
- git branch -t remove_sub1 origin/remove_sub1 &&
- replace_gitfile_with_git_dir sub1 &&
- $command remove_sub1 &&
- test_superproject_content origin/remove_sub1 &&
- test_git_directory_is_unchanged sub1 &&
- test_submodule_content sub1 origin/add_sub1
- )
- '
- # Replacing a submodule with files in a directory must fail as the
- # submodule work tree isn't removed ...
- test_expect_failure "$command: replace submodule with a directory must fail" '
- prolog &&
- reset_work_tree_to add_sub1 &&
- (
- cd submodule_update &&
- git branch -t replace_sub1_with_directory origin/replace_sub1_with_directory &&
- test_must_fail $command replace_sub1_with_directory &&
- test_superproject_content origin/add_sub1 &&
- test_submodule_content sub1 origin/add_sub1
- )
- '
- # ... especially when it contains a .git directory.
- test_expect_failure "$command: replace submodule containing a .git directory with a directory must fail" '
- prolog &&
- reset_work_tree_to add_sub1 &&
- (
- cd submodule_update &&
- git branch -t replace_sub1_with_directory origin/replace_sub1_with_directory &&
- replace_gitfile_with_git_dir sub1 &&
- test_must_fail $command replace_sub1_with_directory &&
- test_superproject_content origin/add_sub1 &&
- test_git_directory_is_unchanged sub1 &&
- test_submodule_content sub1 origin/add_sub1
- )
- '
- # Replacing it with a file must fail as it could throw away any local
- # work tree changes ...
- test_expect_failure "$command: replace submodule with a file must fail" '
- prolog &&
- reset_work_tree_to add_sub1 &&
- (
- cd submodule_update &&
- git branch -t replace_sub1_with_file origin/replace_sub1_with_file &&
- test_must_fail $command replace_sub1_with_file &&
- test_superproject_content origin/add_sub1 &&
- test_submodule_content sub1 origin/add_sub1
- )
- '
- # ... or even destroy unpushed parts of submodule history if that
- # still uses a .git directory.
- test_expect_failure "$command: replace submodule containing a .git directory with a file must fail" '
- prolog &&
- reset_work_tree_to add_sub1 &&
- (
- cd submodule_update &&
- git branch -t replace_sub1_with_file origin/replace_sub1_with_file &&
- replace_gitfile_with_git_dir sub1 &&
- test_must_fail $command replace_sub1_with_file &&
- test_superproject_content origin/add_sub1 &&
- test_git_directory_is_unchanged sub1 &&
- test_submodule_content sub1 origin/add_sub1
- )
- '
-
- ########################## Modified submodule #########################
- # Updating a submodule sha1 doesn't update the submodule's work tree
- test_expect_success "$command: modified submodule does not update submodule work tree" '
- prolog &&
- reset_work_tree_to add_sub1 &&
- (
- cd submodule_update &&
- git branch -t modify_sub1 origin/modify_sub1 &&
- $command modify_sub1 &&
- test_superproject_content origin/modify_sub1 &&
- test_submodule_content sub1 origin/add_sub1 &&
- git submodule update &&
- test_submodule_content sub1 origin/modify_sub1
- )
- '
- # Updating a submodule to an invalid sha1 doesn't update the
- # submodule's work tree, subsequent update will fail
- test_expect_success "$command: modified submodule does not update submodule work tree to invalid commit" '
- prolog &&
- reset_work_tree_to add_sub1 &&
- (
- cd submodule_update &&
- git branch -t invalid_sub1 origin/invalid_sub1 &&
- $command invalid_sub1 &&
- test_superproject_content origin/invalid_sub1 &&
- test_submodule_content sub1 origin/add_sub1 &&
- test_must_fail git submodule update &&
- test_submodule_content sub1 origin/add_sub1
- )
- '
- # Updating a submodule from an invalid sha1 doesn't update the
- # submodule's work tree, subsequent update will succeed
- test_expect_success "$command: modified submodule does not update submodule work tree from invalid commit" '
- prolog &&
- reset_work_tree_to invalid_sub1 &&
- (
- cd submodule_update &&
- git branch -t valid_sub1 origin/valid_sub1 &&
- $command valid_sub1 &&
- test_superproject_content origin/valid_sub1 &&
- test_dir_is_empty sub1 &&
- git submodule update --init --recursive &&
- test_submodule_content sub1 origin/valid_sub1
- )
- '
}
# Test that submodule contents are correctly updated when switching
--- /dev/null
+#!/bin/sh
+
+test_description="Test core.fsmonitor"
+
+. ./perf-lib.sh
+
+#
+# Performance test for the fsmonitor feature which enables git to talk to a
+# file system change monitor and avoid having to scan the working directory
+# for new or modified files.
+#
+# By default, the performance test will utilize the Watchman file system
+# monitor if it is installed. If Watchman is not installed, it will use a
+# dummy integration script that does not report any new or modified files.
+# The dummy script has very little overhead which provides optimistic results.
+#
+# The performance test will also use the untracked cache feature if it is
+# available as fsmonitor uses it to speed up scanning for untracked files.
+#
+# There are 3 environment variables that can be used to alter the default
+# behavior of the performance test:
+#
+# GIT_PERF_7519_UNTRACKED_CACHE: used to configure core.untrackedCache
+# GIT_PERF_7519_SPLIT_INDEX: used to configure core.splitIndex
+# GIT_PERF_7519_FSMONITOR: used to configure core.fsMonitor
+#
+# The big win for using fsmonitor is the elimination of the need to scan the
+# working directory looking for changed and untracked files. If the file
+# information is all cached in RAM, the benefits are reduced.
+#
+# GIT_PERF_7519_DROP_CACHE: if set, the OS caches are dropped between tests
+#
+
+test_perf_large_repo
+test_checkout_worktree
+
+test_lazy_prereq UNTRACKED_CACHE '
+ { git update-index --test-untracked-cache; ret=$?; } &&
+ test $ret -ne 1
+'
+
+test_lazy_prereq WATCHMAN '
+ { command -v watchman >/dev/null 2>&1; ret=$?; } &&
+ test $ret -ne 1
+'
+
+if test_have_prereq WATCHMAN
+then
+ # Convert unix style paths to escaped Windows style paths for Watchman
+ case "$(uname -s)" in
+ MSYS_NT*)
+ GIT_WORK_TREE="$(cygpath -aw "$PWD" | sed 's,\\,/,g')"
+ ;;
+ *)
+ GIT_WORK_TREE="$PWD"
+ ;;
+ esac
+fi
+
+if test -n "$GIT_PERF_7519_DROP_CACHE"
+then
+ # When using GIT_PERF_7519_DROP_CACHE, GIT_PERF_REPEAT_COUNT must be 1 to
+ # generate valid results. Otherwise the caching that happens for the nth
+ # run will negate the validity of the comparisons.
+ if test "$GIT_PERF_REPEAT_COUNT" -ne 1
+ then
+ echo "warning: Setting GIT_PERF_REPEAT_COUNT=1" >&2
+ GIT_PERF_REPEAT_COUNT=1
+ fi
+fi
+
+test_expect_success "setup for fsmonitor" '
+ # set untrackedCache depending on the environment
+ if test -n "$GIT_PERF_7519_UNTRACKED_CACHE"
+ then
+ git config core.untrackedCache "$GIT_PERF_7519_UNTRACKED_CACHE"
+ else
+ if test_have_prereq UNTRACKED_CACHE
+ then
+ git config core.untrackedCache true
+ else
+ git config core.untrackedCache false
+ fi
+ fi &&
+
+ # set core.splitindex depending on the environment
+ if test -n "$GIT_PERF_7519_SPLIT_INDEX"
+ then
+ git config core.splitIndex "$GIT_PERF_7519_SPLIT_INDEX"
+ fi &&
+
+ # set INTEGRATION_SCRIPT depending on the environment
+ if test -n "$GIT_PERF_7519_FSMONITOR"
+ then
+ INTEGRATION_SCRIPT="$GIT_PERF_7519_FSMONITOR"
+ else
+ #
+ # Choose integration script based on existence of Watchman.
+ # If Watchman exists, watch the work tree and attempt a query.
+ # If everything succeeds, use Watchman integration script,
+ # else fall back to an empty integration script.
+ #
+ mkdir .git/hooks &&
+ if test_have_prereq WATCHMAN
+ then
+ INTEGRATION_SCRIPT=".git/hooks/fsmonitor-watchman" &&
+ cp "$TEST_DIRECTORY/../templates/hooks--fsmonitor-watchman.sample" "$INTEGRATION_SCRIPT" &&
+ watchman watch "$GIT_WORK_TREE" &&
+ watchman watch-list | grep -q -F "$GIT_WORK_TREE"
+ else
+ INTEGRATION_SCRIPT=".git/hooks/fsmonitor-empty" &&
+ write_script "$INTEGRATION_SCRIPT"<<-\EOF
+ EOF
+ fi
+ fi &&
+
+ git config core.fsmonitor "$INTEGRATION_SCRIPT" &&
+ git update-index --fsmonitor
+'
+
+if test -n "$GIT_PERF_7519_DROP_CACHE"; then
+ test-drop-caches
+fi
+
+test_perf "status (fsmonitor=$INTEGRATION_SCRIPT)" '
+ git status
+'
+
+if test -n "$GIT_PERF_7519_DROP_CACHE"; then
+ test-drop-caches
+fi
+
+test_perf "status -uno (fsmonitor=$INTEGRATION_SCRIPT)" '
+ git status -uno
+'
+
+if test -n "$GIT_PERF_7519_DROP_CACHE"; then
+ test-drop-caches
+fi
+
+test_perf "status -uall (fsmonitor=$INTEGRATION_SCRIPT)" '
+ git status -uall
+'
+
+test_expect_success "setup without fsmonitor" '
+ unset INTEGRATION_SCRIPT &&
+ git config --unset core.fsmonitor &&
+ git update-index --no-fsmonitor
+'
+
+if test -n "$GIT_PERF_7519_DROP_CACHE"; then
+ test-drop-caches
+fi
+
+test_perf "status (fsmonitor=$INTEGRATION_SCRIPT)" '
+ git status
+'
+
+if test -n "$GIT_PERF_7519_DROP_CACHE"; then
+ test-drop-caches
+fi
+
+test_perf "status -uno (fsmonitor=$INTEGRATION_SCRIPT)" '
+ git status -uno
+'
+
+if test -n "$GIT_PERF_7519_DROP_CACHE"; then
+ test-drop-caches
+fi
+
+test_perf "status -uall (fsmonitor=$INTEGRATION_SCRIPT)" '
+ git status -uall
+'
+
+if test_have_prereq WATCHMAN
+then
+ watchman watch-del "$GIT_WORK_TREE" >/dev/null 2>&1 &&
+
+ # Work around Watchman bug on Windows where it holds on to handles
+ # preventing the removal of the trash directory
+ watchman shutdown-server >/dev/null 2>&1
+fi
+
+test_done
# to the "list_available_blobs" response.
#
+use 5.008;
+use lib (split(/:/, $ENV{GITPERLLIB}));
use strict;
use warnings;
use IO::File;
+use Git::Packet;
my $MAX_PACKET_CONTENT_SIZE = 65516;
my $log_file = shift @ARGV;
return $str;
}
-sub packet_bin_read {
- my $buffer;
- my $bytes_read = read STDIN, $buffer, 4;
- if ( $bytes_read == 0 ) {
- # EOF - Git stopped talking to us!
- print $debug "STOP\n";
- exit();
- }
- elsif ( $bytes_read != 4 ) {
- die "invalid packet: '$buffer'";
- }
- my $pkt_size = hex($buffer);
- if ( $pkt_size == 0 ) {
- return ( 1, "" );
- }
- elsif ( $pkt_size > 4 ) {
- my $content_size = $pkt_size - 4;
- $bytes_read = read STDIN, $buffer, $content_size;
- if ( $bytes_read != $content_size ) {
- die "invalid packet ($content_size bytes expected; $bytes_read bytes read)";
- }
- return ( 0, $buffer );
- }
- else {
- die "invalid packet size: $pkt_size";
- }
-}
-
-sub packet_txt_read {
- my ( $res, $buf ) = packet_bin_read();
- unless ( $buf eq '' or $buf =~ s/\n$// ) {
- die "A non-binary line MUST be terminated by an LF.";
- }
- return ( $res, $buf );
-}
-
-sub packet_bin_write {
- my $buf = shift;
- print STDOUT sprintf( "%04x", length($buf) + 4 );
- print STDOUT $buf;
- STDOUT->flush();
-}
-
-sub packet_txt_write {
- packet_bin_write( $_[0] . "\n" );
-}
-
-sub packet_flush {
- print STDOUT sprintf( "%04x", 0 );
- STDOUT->flush();
-}
-
print $debug "START\n";
$debug->flush();
-( packet_txt_read() eq ( 0, "git-filter-client" ) ) || die "bad initialize";
-( packet_txt_read() eq ( 0, "version=2" ) ) || die "bad version";
-( packet_bin_read() eq ( 1, "" ) ) || die "bad version end";
+packet_initialize("git-filter", 2);
-packet_txt_write("git-filter-server");
-packet_txt_write("version=2");
-packet_flush();
+my %remote_caps = packet_read_and_check_capabilities("clean", "smudge", "delay");
+packet_check_and_write_capabilities(\%remote_caps, @capabilities);
-( packet_txt_read() eq ( 0, "capability=clean" ) ) || die "bad capability";
-( packet_txt_read() eq ( 0, "capability=smudge" ) ) || die "bad capability";
-( packet_txt_read() eq ( 0, "capability=delay" ) ) || die "bad capability";
-( packet_bin_read() eq ( 1, "" ) ) || die "bad capability end";
-
-foreach (@capabilities) {
- packet_txt_write( "capability=" . $_ );
-}
-packet_flush();
print $debug "init handshake complete\n";
$debug->flush();
while (1) {
- my ( $command ) = packet_txt_read() =~ /^command=(.+)$/;
+ my ( $res, $command ) = packet_required_key_val_read("command");
+ if ( $res == -1 ) {
+ print $debug "STOP\n";
+ exit();
+ }
print $debug "IN: $command";
$debug->flush();
if ( $command eq "list_available_blobs" ) {
# Flush
- packet_bin_read();
+ packet_compare_lists([1, ""], packet_bin_read()) ||
+ die "bad list_available_blobs end";
foreach my $pathname ( sort keys %DELAY ) {
if ( $DELAY{$pathname}{"requested"} >= 1 ) {
$debug->flush();
packet_txt_write("status=success");
packet_flush();
- }
- else {
- my ( $pathname ) = packet_txt_read() =~ /^pathname=(.+)$/;
+ } else {
+ my ( $res, $pathname ) = packet_required_key_val_read("pathname");
+ if ( $res == -1 ) {
+ die "unexpected EOF while expecting pathname";
+ }
print $debug " $pathname";
$debug->flush();
- if ( $pathname eq "" ) {
- die "bad pathname '$pathname'";
- }
-
# Read until flush
my ( $done, $buffer ) = packet_txt_read();
while ( $buffer ne '' ) {
( $done, $buffer ) = packet_txt_read();
}
+ if ( $done == -1 ) {
+ die "unexpected EOF after pathname '$pathname'";
+ }
my $input = "";
{
( $done, $buffer ) = packet_bin_read();
$input .= $buffer;
}
+ if ( $done == -1 ) {
+ die "unexpected EOF while reading input for '$pathname'";
+ }
print $debug " " . length($input) . " [OK] -- ";
$debug->flush();
}
my $output;
if ( exists $DELAY{$pathname} and exists $DELAY{$pathname}{"output"} ) {
$output = $DELAY{$pathname}{"output"}
- }
- elsif ( $pathname eq "error.r" or $pathname eq "abort.r" ) {
+ } elsif ( $pathname eq "error.r" or $pathname eq "abort.r" ) {
$output = "";
- }
- elsif ( $command eq "clean" and grep( /^clean$/, @capabilities ) ) {
+ } elsif ( $command eq "clean" and grep( /^clean$/, @capabilities ) ) {
$output = rot13($input);
- }
- elsif ( $command eq "smudge" and grep( /^smudge$/, @capabilities ) ) {
+ } elsif ( $command eq "smudge" and grep( /^smudge$/, @capabilities ) ) {
$output = rot13($input);
- }
- else {
+ } else {
die "bad command '$command'";
}
$debug->flush();
packet_txt_write("status=error");
packet_flush();
- }
- elsif ( $pathname eq "abort.r" ) {
+ } elsif ( $pathname eq "abort.r" ) {
print $debug "[ABORT]\n";
$debug->flush();
packet_txt_write("status=abort");
packet_flush();
- }
- elsif ( $command eq "smudge" and
+ } elsif ( $command eq "smudge" and
exists $DELAY{$pathname} and
- $DELAY{$pathname}{"requested"} == 1
- ) {
+ $DELAY{$pathname}{"requested"} == 1 ) {
print $debug "[DELAYED]\n";
$debug->flush();
packet_txt_write("status=delayed");
packet_flush();
$DELAY{$pathname}{"requested"} = 2;
$DELAY{$pathname}{"output"} = $output;
- }
- else {
+ } else {
packet_txt_write("status=success");
packet_flush();
print $debug ".";
if ( length($output) > $MAX_PACKET_CONTENT_SIZE ) {
$output = substr( $output, $MAX_PACKET_CONTENT_SIZE );
- }
- else {
+ } else {
$output = "";
}
}
--- /dev/null
+#!/bin/sh
+
+test_description='avoid rewriting packed-refs unnecessarily'
+
+. ./test-lib.sh
+
+# Add an identifying mark to the packed-refs file header line. This
+# shouldn't upset readers, and it should be omitted if the file is
+# ever rewritten.
+mark_packed_refs () {
+ sed -e "s/^\(#.*\)/\1 t1409 /" <.git/packed-refs >.git/packed-refs.new &&
+ mv .git/packed-refs.new .git/packed-refs
+}
+
+# Verify that the packed-refs file is still marked.
+check_packed_refs_marked () {
+ grep -q '^#.* t1409 ' .git/packed-refs
+}
+
+test_expect_success 'setup' '
+ git commit --allow-empty -m "Commit A" &&
+ A=$(git rev-parse HEAD) &&
+ git commit --allow-empty -m "Commit B" &&
+ B=$(git rev-parse HEAD) &&
+ git commit --allow-empty -m "Commit C" &&
+ C=$(git rev-parse HEAD)
+'
+
+test_expect_success 'do not create packed-refs file gratuitously' '
+ test_must_fail test -f .git/packed-refs &&
+ git update-ref refs/heads/foo $A &&
+ test_must_fail test -f .git/packed-refs &&
+ git update-ref refs/heads/foo $B &&
+ test_must_fail test -f .git/packed-refs &&
+ git update-ref refs/heads/foo $C $B &&
+ test_must_fail test -f .git/packed-refs &&
+ git update-ref -d refs/heads/foo &&
+ test_must_fail test -f .git/packed-refs
+'
+
+test_expect_success 'check that marking the packed-refs file works' '
+ git for-each-ref >expected &&
+ git pack-refs --all &&
+ mark_packed_refs &&
+ check_packed_refs_marked &&
+ git for-each-ref >actual &&
+ test_cmp expected actual &&
+ git pack-refs --all &&
+ test_must_fail check_packed_refs_marked &&
+ git for-each-ref >actual2 &&
+ test_cmp expected actual2
+'
+
+test_expect_success 'leave packed-refs untouched on update of packed' '
+ git update-ref refs/heads/packed-update $A &&
+ git pack-refs --all &&
+ mark_packed_refs &&
+ git update-ref refs/heads/packed-update $B &&
+ check_packed_refs_marked
+'
+
+test_expect_success 'leave packed-refs untouched on checked update of packed' '
+ git update-ref refs/heads/packed-checked-update $A &&
+ git pack-refs --all &&
+ mark_packed_refs &&
+ git update-ref refs/heads/packed-checked-update $B $A &&
+ check_packed_refs_marked
+'
+
+test_expect_success 'leave packed-refs untouched on verify of packed' '
+ git update-ref refs/heads/packed-verify $A &&
+ git pack-refs --all &&
+ mark_packed_refs &&
+ echo "verify refs/heads/packed-verify $A" | git update-ref --stdin &&
+ check_packed_refs_marked
+'
+
+test_expect_success 'touch packed-refs on delete of packed' '
+ git update-ref refs/heads/packed-delete $A &&
+ git pack-refs --all &&
+ mark_packed_refs &&
+ git update-ref -d refs/heads/packed-delete &&
+ test_must_fail check_packed_refs_marked
+'
+
+test_expect_success 'leave packed-refs untouched on update of loose' '
+ git pack-refs --all &&
+ git update-ref refs/heads/loose-update $A &&
+ mark_packed_refs &&
+ git update-ref refs/heads/loose-update $B &&
+ check_packed_refs_marked
+'
+
+test_expect_success 'leave packed-refs untouched on checked update of loose' '
+ git pack-refs --all &&
+ git update-ref refs/heads/loose-checked-update $A &&
+ mark_packed_refs &&
+ git update-ref refs/heads/loose-checked-update $B $A &&
+ check_packed_refs_marked
+'
+
+test_expect_success 'leave packed-refs untouched on verify of loose' '
+ git pack-refs --all &&
+ git update-ref refs/heads/loose-verify $A &&
+ mark_packed_refs &&
+ echo "verify refs/heads/loose-verify $A" | git update-ref --stdin &&
+ check_packed_refs_marked
+'
+
+test_expect_success 'leave packed-refs untouched on delete of loose' '
+ git pack-refs --all &&
+ git update-ref refs/heads/loose-delete $A &&
+ mark_packed_refs &&
+ git update-ref -d refs/heads/loose-delete &&
+ check_packed_refs_marked
+'
+
+test_done
# We need total control of index splitting here
sane_unset GIT_TEST_SPLIT_INDEX
+sane_unset GIT_FSMONITOR_TEST
test_expect_success 'enable split index' '
git config splitIndex.maxPercentChange 100 &&
test_submodule_switch "git_rebase_interactive"
+test_expect_success 'rebase interactive ignores modified submodules' '
+ test_when_finished "rm -rf super sub" &&
+ git init sub &&
+ git -C sub commit --allow-empty -m "Initial commit" &&
+ git init super &&
+ git -C super submodule add ../sub &&
+ git -C super config submodule.sub.ignore dirty &&
+ >super/foo &&
+ git -C super add foo &&
+ git -C super commit -m "Initial commit" &&
+ test_commit -C super a &&
+ test_commit -C super b &&
+ test_commit -C super/sub c &&
+ set_fake_editor &&
+ git -C super rebase -i HEAD^^
+'
+
test_done
git submodule update &&
git checkout -q HEAD^ &&
git checkout -q master 2>actual &&
- test_i18ngrep "^warning: unable to rmdir submod:" actual &&
+ test_i18ngrep "^warning: unable to rmdir '\''submod'\'':" actual &&
git status -s submod >actual &&
echo "?? submod/" >expected &&
test_cmp expected actual &&
git diff -w -b --ignore-space-at-eol >out &&
test_cmp expect out &&
+ git diff -w --ignore-cr-at-eol >out &&
+ test_cmp expect out &&
tr "Q_" "\015 " <<-\EOF >expect &&
diff --git a/x b/x
git diff -b --ignore-space-at-eol >out &&
test_cmp expect out &&
+ git diff -b --ignore-cr-at-eol >out &&
+ test_cmp expect out &&
+
tr "Q_" "\015 " <<-\EOF >expect &&
diff --git a/x b/x
index d99af23..22d9f73 100644
CR at end
EOF
git diff --ignore-space-at-eol >out &&
+ test_cmp expect out &&
+
+ git diff --ignore-space-at-eol --ignore-cr-at-eol >out &&
+ test_cmp expect out &&
+
+ tr "Q_" "\015 " <<-\EOF >expect &&
+ diff --git a/x b/x
+ index_d99af23..22d9f73 100644
+ --- a/x
+ +++ b/x
+ @@ -1,6 +1,6 @@
+ -whitespace at beginning
+ -whitespace change
+ -whitespace in the middle
+ -whitespace at end
+ +_ whitespace at beginning
+ +whitespace_ _change
+ +white space in the middle
+ +whitespace at end__
+ unchanged line
+ CR at end
+ EOF
+ git diff --ignore-cr-at-eol >out &&
test_cmp expect out
'
. ./test-lib.sh
test_expect_success 'setup' '
+ test_tick &&
echo 1 >a1 &&
git add a1 &&
tree=$(git write-tree) &&
file=$1 &&
sed "
s/$_x40/OBJECT_NAME/g
- s/$_x05/OBJID/g
+ s/$_x35/OBJID/g
s/^ \{6\}[CTa].*/ SUBJECT/g
s/^ \{8\}[^ ].*/ CONTINUATION/g
" <"$file" >"$file.fuzzy" &&
test_expect_success '--abbrev' '
sed s/SUBJECT/OBJID/ expect.template >expect &&
- git shortlog --format="%h" --abbrev=5 HEAD >log &&
+ git shortlog --format="%h" --abbrev=35 HEAD >log &&
fuzz log >log.predictable &&
test_cmp expect log.predictable
'
test_cmp expected actual
'
+test_expect_success ':remotename and :remoteref' '
+ git init remote-tests &&
+ (
+ cd remote-tests &&
+ test_commit initial &&
+ git remote add from fifth.coffee:blub &&
+ git config branch.master.remote from &&
+ git config branch.master.merge refs/heads/stable &&
+ git remote add to southridge.audio:repo &&
+ git config remote.to.push "refs/heads/*:refs/heads/pushed/*" &&
+ git config branch.master.pushRemote to &&
+ for pair in "%(upstream)=refs/remotes/from/stable" \
+ "%(upstream:remotename)=from" \
+ "%(upstream:remoteref)=refs/heads/stable" \
+ "%(push)=refs/remotes/to/pushed/master" \
+ "%(push:remotename)=to" \
+ "%(push:remoteref)=refs/heads/pushed/master"
+ do
+ echo "${pair#*=}" >expect &&
+ git for-each-ref --format="${pair%=*}" \
+ refs/heads/master >actual &&
+ test_cmp expect actual
+ done &&
+ git branch push-simple &&
+ git config branch.push-simple.pushRemote from &&
+ actual="$(git for-each-ref \
+ --format="%(push:remotename),%(push:remoteref)" \
+ refs/heads/push-simple)" &&
+ test from, = "$actual"
+ )
+'
+
test_done
git mv sub sub2 &&
git commit -m "moved sub to sub2" &&
git checkout -q HEAD^ 2>actual &&
- test_i18ngrep "^warning: unable to rmdir sub2:" actual &&
+ test_i18ngrep "^warning: unable to rmdir '\''sub2'\'':" actual &&
git status -s sub2 >actual &&
echo "?? sub2/" >expected &&
test_cmp expected actual &&
--- /dev/null
+#!/bin/sh
+
+test_description='git status with file system watcher'
+
+. ./test-lib.sh
+
+#
+# To run the entire git test suite using fsmonitor:
+#
+# copy t/t7519/fsmonitor-all to a location in your path and then set
+# GIT_FSMONITOR_TEST=fsmonitor-all and run your tests.
+#
+
+# Note, after "git reset --hard HEAD" no extensions exist other than 'TREE'
+# "git update-index --fsmonitor" can be used to get the extension written
+# before testing the results.
+
+clean_repo () {
+ git reset --hard HEAD &&
+ git clean -fd
+}
+
+dirty_repo () {
+ : >untracked &&
+ : >dir1/untracked &&
+ : >dir2/untracked &&
+ echo 1 >modified &&
+ echo 2 >dir1/modified &&
+ echo 3 >dir2/modified &&
+ echo 4 >new &&
+ echo 5 >dir1/new &&
+ echo 6 >dir2/new
+}
+
+write_integration_script () {
+ write_script .git/hooks/fsmonitor-test<<-\EOF
+ if test "$#" -ne 2
+ then
+ echo "$0: exactly 2 arguments expected"
+ exit 2
+ fi
+ if test "$1" != 1
+ then
+ echo "Unsupported core.fsmonitor hook version." >&2
+ exit 1
+ fi
+ printf "untracked\0"
+ printf "dir1/untracked\0"
+ printf "dir2/untracked\0"
+ printf "modified\0"
+ printf "dir1/modified\0"
+ printf "dir2/modified\0"
+ printf "new\0"
+ printf "dir1/new\0"
+ printf "dir2/new\0"
+ EOF
+}
+
+test_lazy_prereq UNTRACKED_CACHE '
+ { git update-index --test-untracked-cache; ret=$?; } &&
+ test $ret -ne 1
+'
+
+test_expect_success 'setup' '
+ mkdir -p .git/hooks &&
+ : >tracked &&
+ : >modified &&
+ mkdir dir1 &&
+ : >dir1/tracked &&
+ : >dir1/modified &&
+ mkdir dir2 &&
+ : >dir2/tracked &&
+ : >dir2/modified &&
+ git -c core.fsmonitor= add . &&
+ git -c core.fsmonitor= commit -m initial &&
+ git config core.fsmonitor .git/hooks/fsmonitor-test &&
+ cat >.gitignore <<-\EOF
+ .gitignore
+ expect*
+ actual*
+ marker*
+ EOF
+'
+
+# test that the fsmonitor extension is off by default
+test_expect_success 'fsmonitor extension is off by default' '
+ test-dump-fsmonitor >actual &&
+ grep "^no fsmonitor" actual
+'
+
+# test that "update-index --fsmonitor" adds the fsmonitor extension
+test_expect_success 'update-index --fsmonitor" adds the fsmonitor extension' '
+ git update-index --fsmonitor &&
+ test-dump-fsmonitor >actual &&
+ grep "^fsmonitor last update" actual
+'
+
+# test that "update-index --no-fsmonitor" removes the fsmonitor extension
+test_expect_success 'update-index --no-fsmonitor" removes the fsmonitor extension' '
+ git update-index --no-fsmonitor &&
+ test-dump-fsmonitor >actual &&
+ grep "^no fsmonitor" actual
+'
+
+cat >expect <<EOF &&
+h dir1/modified
+H dir1/tracked
+h dir2/modified
+H dir2/tracked
+h modified
+H tracked
+EOF
+
+# test that "update-index --fsmonitor-valid" sets the fsmonitor valid bit
+test_expect_success 'update-index --fsmonitor-valid" sets the fsmonitor valid bit' '
+ git update-index --fsmonitor &&
+ git update-index --fsmonitor-valid dir1/modified &&
+ git update-index --fsmonitor-valid dir2/modified &&
+ git update-index --fsmonitor-valid modified &&
+ git ls-files -f >actual &&
+ test_cmp expect actual
+'
+
+cat >expect <<EOF &&
+H dir1/modified
+H dir1/tracked
+H dir2/modified
+H dir2/tracked
+H modified
+H tracked
+EOF
+
+# test that "update-index --no-fsmonitor-valid" clears the fsmonitor valid bit
+test_expect_success 'update-index --no-fsmonitor-valid" clears the fsmonitor valid bit' '
+ git update-index --no-fsmonitor-valid dir1/modified &&
+ git update-index --no-fsmonitor-valid dir2/modified &&
+ git update-index --no-fsmonitor-valid modified &&
+ git ls-files -f >actual &&
+ test_cmp expect actual
+'
+
+cat >expect <<EOF &&
+H dir1/modified
+H dir1/tracked
+H dir2/modified
+H dir2/tracked
+H modified
+H tracked
+EOF
+
+# test that all files returned by the script get flagged as invalid
+test_expect_success 'all files returned by integration script get flagged as invalid' '
+ write_integration_script &&
+ dirty_repo &&
+ git update-index --fsmonitor &&
+ git ls-files -f >actual &&
+ test_cmp expect actual
+'
+
+cat >expect <<EOF &&
+H dir1/modified
+h dir1/new
+H dir1/tracked
+H dir2/modified
+h dir2/new
+H dir2/tracked
+H modified
+h new
+H tracked
+EOF
+
+# test that newly added files are marked valid
+test_expect_success 'newly added files are marked valid' '
+ git add new &&
+ git add dir1/new &&
+ git add dir2/new &&
+ git ls-files -f >actual &&
+ test_cmp expect actual
+'
+
+cat >expect <<EOF &&
+H dir1/modified
+h dir1/new
+h dir1/tracked
+H dir2/modified
+h dir2/new
+h dir2/tracked
+H modified
+h new
+h tracked
+EOF
+
+# test that all unmodified files get marked valid
+test_expect_success 'all unmodified files get marked valid' '
+ # modified files result in update-index returning 1
+ test_must_fail git update-index --refresh --force-write-index &&
+ git ls-files -f >actual &&
+ test_cmp expect actual
+'
+
+cat >expect <<EOF &&
+H dir1/modified
+h dir1/tracked
+h dir2/modified
+h dir2/tracked
+h modified
+h tracked
+EOF
+
+# test that *only* files returned by the integration script get flagged as invalid
+test_expect_success '*only* files returned by the integration script get flagged as invalid' '
+ write_script .git/hooks/fsmonitor-test<<-\EOF &&
+ printf "dir1/modified\0"
+ EOF
+ clean_repo &&
+ git update-index --refresh --force-write-index &&
+ echo 1 >modified &&
+ echo 2 >dir1/modified &&
+ echo 3 >dir2/modified &&
+ test_must_fail git update-index --refresh --force-write-index &&
+ git ls-files -f >actual &&
+ test_cmp expect actual
+'
+
+# Ensure commands that call refresh_index() to move the index back in time
+# properly invalidate the fsmonitor cache
+test_expect_success 'refresh_index() invalidates fsmonitor cache' '
+ write_script .git/hooks/fsmonitor-test<<-\EOF &&
+ EOF
+ clean_repo &&
+ dirty_repo &&
+ git add . &&
+ git commit -m "to reset" &&
+ git reset HEAD~1 &&
+ git status >actual &&
+ git -c core.fsmonitor= status >expect &&
+ test_i18ncmp expect actual
+'
+
+# test fsmonitor with and without preloadIndex
+preload_values="false true"
+for preload_val in $preload_values
+do
+ test_expect_success "setup preloadIndex to $preload_val" '
+ git config core.preloadIndex $preload_val &&
+ if test $preload_val = true
+ then
+ GIT_FORCE_PRELOAD_TEST=$preload_val; export GIT_FORCE_PRELOAD_TEST
+ else
+ unset GIT_FORCE_PRELOAD_TEST
+ fi
+ '
+
+ # test fsmonitor with and without the untracked cache (if available)
+ uc_values="false"
+ test_have_prereq UNTRACKED_CACHE && uc_values="false true"
+ for uc_val in $uc_values
+ do
+ test_expect_success "setup untracked cache to $uc_val" '
+ git config core.untrackedcache $uc_val
+ '
+
+ # Status is well tested elsewhere so we'll just ensure that the results are
+ # the same when using core.fsmonitor.
+ test_expect_success 'compare status with and without fsmonitor' '
+ write_integration_script &&
+ clean_repo &&
+ dirty_repo &&
+ git add new &&
+ git add dir1/new &&
+ git add dir2/new &&
+ git status >actual &&
+ git -c core.fsmonitor= status >expect &&
+ test_i18ncmp expect actual
+ '
+
+ # Make sure it's actually skipping the check for modified and untracked
+ # (if enabled) files unless it is told about them.
+ test_expect_success "status doesn't detect unreported modifications" '
+ write_script .git/hooks/fsmonitor-test<<-\EOF &&
+ :>marker
+ EOF
+ clean_repo &&
+ git status &&
+ test_path_is_file marker &&
+ dirty_repo &&
+ rm -f marker &&
+ git status >actual &&
+ test_path_is_file marker &&
+ test_i18ngrep ! "Changes not staged for commit:" actual &&
+ if test $uc_val = true
+ then
+ test_i18ngrep ! "Untracked files:" actual
+ fi &&
+ if test $uc_val = false
+ then
+ test_i18ngrep "Untracked files:" actual
+ fi &&
+ rm -f marker
+ '
+ done
+done
+
+# test that splitting the index dosn't interfere
+test_expect_success 'splitting the index results in the same state' '
+ write_integration_script &&
+ dirty_repo &&
+ git update-index --fsmonitor &&
+ git ls-files -f >expect &&
+ test-dump-fsmonitor >&2 && echo &&
+ git update-index --fsmonitor --split-index &&
+ test-dump-fsmonitor >&2 && echo &&
+ git ls-files -f >actual &&
+ test_cmp expect actual
+'
+
+test_done
--- /dev/null
+#!/bin/sh
+#
+# An test hook script to integrate with git to test fsmonitor.
+#
+# The hook is passed a version (currently 1) and a time in nanoseconds
+# formatted as a string and outputs to stdout all files that have been
+# modified since the given time. Paths must be relative to the root of
+# the working tree and separated by a single NUL.
+#
+#echo "$0 $*" >&2
+
+if test "$#" -ne 2
+then
+ echo "$0: exactly 2 arguments expected" >&2
+ exit 2
+fi
+
+if test "$1" != 1
+then
+ echo "Unsupported core.fsmonitor hook version." >&2
+ exit 1
+fi
+
+echo "/"
--- /dev/null
+#!/bin/sh
+#
+# An test hook script to integrate with git to test fsmonitor.
+#
+# The hook is passed a version (currently 1) and a time in nanoseconds
+# formatted as a string and outputs to stdout all files that have been
+# modified since the given time. Paths must be relative to the root of
+# the working tree and separated by a single NUL.
+#
+#echo "$0 $*" >&2
+
+if test "$#" -ne 2
+then
+ echo "$0: exactly 2 arguments expected" >&2
+ exit 2
+fi
+
+if test "$1" != 1
+then
+ echo "Unsupported core.fsmonitor hook version." >&2
+ exit 1
+fi
--- /dev/null
+#!/usr/bin/perl
+
+use strict;
+use warnings;
+use IPC::Open2;
+
+# An example hook script to integrate Watchman
+# (https://facebook.github.io/watchman/) with git to speed up detecting
+# new and modified files.
+#
+# The hook is passed a version (currently 1) and a time in nanoseconds
+# formatted as a string and outputs to stdout all files that have been
+# modified since the given time. Paths must be relative to the root of
+# the working tree and separated by a single NUL.
+#
+# To enable this hook, rename this file to "query-watchman" and set
+# 'git config core.fsmonitor .git/hooks/query-watchman'
+#
+my ($version, $time) = @ARGV;
+#print STDERR "$0 $version $time\n";
+
+# Check the hook interface version
+
+if ($version == 1) {
+ # convert nanoseconds to seconds
+ $time = int $time / 1000000000;
+} else {
+ die "Unsupported query-fsmonitor hook version '$version'.\n" .
+ "Falling back to scanning...\n";
+}
+
+my $git_work_tree;
+if ($^O =~ 'msys' || $^O =~ 'cygwin') {
+ $git_work_tree = Win32::GetCwd();
+ $git_work_tree =~ tr/\\/\//;
+} else {
+ require Cwd;
+ $git_work_tree = Cwd::cwd();
+}
+
+my $retry = 1;
+
+launch_watchman();
+
+sub launch_watchman {
+
+ my $pid = open2(\*CHLD_OUT, \*CHLD_IN, 'watchman -j')
+ or die "open2() failed: $!\n" .
+ "Falling back to scanning...\n";
+
+ # In the query expression below we're asking for names of files that
+ # changed since $time but were not transient (ie created after
+ # $time but no longer exist).
+ #
+ # To accomplish this, we're using the "since" generator to use the
+ # recency index to select candidate nodes and "fields" to limit the
+ # output to file names only. Then we're using the "expression" term to
+ # further constrain the results.
+ #
+ # The category of transient files that we want to ignore will have a
+ # creation clock (cclock) newer than $time_t value and will also not
+ # currently exist.
+
+ my $query = <<" END";
+ ["query", "$git_work_tree", {
+ "since": $time,
+ "fields": ["name"],
+ "expression": ["not", ["allof", ["since", $time, "cclock"], ["not", "exists"]]]
+ }]
+ END
+
+ open (my $fh, ">", ".git/watchman-query.json");
+ print $fh $query;
+ close $fh;
+
+ print CHLD_IN $query;
+ close CHLD_IN;
+ my $response = do {local $/; <CHLD_OUT>};
+
+ open ($fh, ">", ".git/watchman-response.json");
+ print $fh $response;
+ close $fh;
+
+ die "Watchman: command returned no output.\n" .
+ "Falling back to scanning...\n" if $response eq "";
+ die "Watchman: command returned invalid output: $response\n" .
+ "Falling back to scanning...\n" unless $response =~ /^\{/;
+
+ my $json_pkg;
+ eval {
+ require JSON::XS;
+ $json_pkg = "JSON::XS";
+ 1;
+ } or do {
+ require JSON::PP;
+ $json_pkg = "JSON::PP";
+ };
+
+ my $o = $json_pkg->new->utf8->decode($response);
+
+ if ($retry > 0 and $o->{error} and $o->{error} =~ m/unable to resolve root .* directory (.*) is not watched/) {
+ print STDERR "Adding '$git_work_tree' to watchman's watch list.\n";
+ $retry--;
+ qx/watchman watch "$git_work_tree"/;
+ die "Failed to make watchman watch '$git_work_tree'.\n" .
+ "Falling back to scanning...\n" if $? != 0;
+
+ # Watchman will always return all files on the first query so
+ # return the fast "everything is dirty" flag to git and do the
+ # Watchman query just to get it over with now so we won't pay
+ # the cost in git to look up each individual file.
+
+ open ($fh, ">", ".git/watchman-output.out");
+ print "/\0";
+ close $fh;
+
+ print "/\0";
+ eval { launch_watchman() };
+ exit 0;
+ }
+
+ die "Watchman: $o->{error}.\n" .
+ "Falling back to scanning...\n" if $o->{error};
+
+ open ($fh, ">", ".git/watchman-output.out");
+ binmode $fh, ":utf8";
+ print $fh @{$o->{files}};
+ close $fh;
+
+ binmode STDOUT, ":utf8";
+ local $, = "\0";
+ print @{$o->{files}};
+}
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
-# MA 02111-1307 USA
+# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
EOF
}
# Convenience
#
-# A regexp to match 5 and 40 hexdigits
+# A regexp to match 5, 35 and 40 hexdigits
_x05='[0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f]'
-_x40="$_x05$_x05$_x05$_x05$_x05$_x05$_x05$_x05"
+_x35="$_x05$_x05$_x05$_x05$_x05$_x05$_x05"
+_x40="$_x35$_x05"
# Zero SHA-1
_z40=0000000000000000000000000000000000000000
# when case-folding filenames
u200c=$(printf '\342\200\214')
-export _x05 _x40 _z40 LF u200c EMPTY_TREE EMPTY_BLOB
+export _x05 _x35 _x40 _z40 LF u200c EMPTY_TREE EMPTY_BLOB
# Each test should start with something like this, after copyright notices:
#
--- /dev/null
+#!/usr/bin/perl
+
+use strict;
+use warnings;
+use IPC::Open2;
+
+# An example hook script to integrate Watchman
+# (https://facebook.github.io/watchman/) with git to speed up detecting
+# new and modified files.
+#
+# The hook is passed a version (currently 1) and a time in nanoseconds
+# formatted as a string and outputs to stdout all files that have been
+# modified since the given time. Paths must be relative to the root of
+# the working tree and separated by a single NUL.
+#
+# To enable this hook, rename this file to "query-watchman" and set
+# 'git config core.fsmonitor .git/hooks/query-watchman'
+#
+my ($version, $time) = @ARGV;
+
+# Check the hook interface version
+
+if ($version == 1) {
+ # convert nanoseconds to seconds
+ $time = int $time / 1000000000;
+} else {
+ die "Unsupported query-fsmonitor hook version '$version'.\n" .
+ "Falling back to scanning...\n";
+}
+
+my $git_work_tree;
+if ($^O =~ 'msys' || $^O =~ 'cygwin') {
+ $git_work_tree = Win32::GetCwd();
+ $git_work_tree =~ tr/\\/\//;
+} else {
+ require Cwd;
+ $git_work_tree = Cwd::cwd();
+}
+
+my $retry = 1;
+
+launch_watchman();
+
+sub launch_watchman {
+
+ my $pid = open2(\*CHLD_OUT, \*CHLD_IN, 'watchman -j --no-pretty')
+ or die "open2() failed: $!\n" .
+ "Falling back to scanning...\n";
+
+ # In the query expression below we're asking for names of files that
+ # changed since $time but were not transient (ie created after
+ # $time but no longer exist).
+ #
+ # To accomplish this, we're using the "since" generator to use the
+ # recency index to select candidate nodes and "fields" to limit the
+ # output to file names only. Then we're using the "expression" term to
+ # further constrain the results.
+ #
+ # The category of transient files that we want to ignore will have a
+ # creation clock (cclock) newer than $time_t value and will also not
+ # currently exist.
+
+ my $query = <<" END";
+ ["query", "$git_work_tree", {
+ "since": $time,
+ "fields": ["name"],
+ "expression": ["not", ["allof", ["since", $time, "cclock"], ["not", "exists"]]]
+ }]
+ END
+
+ print CHLD_IN $query;
+ close CHLD_IN;
+ my $response = do {local $/; <CHLD_OUT>};
+
+ die "Watchman: command returned no output.\n" .
+ "Falling back to scanning...\n" if $response eq "";
+ die "Watchman: command returned invalid output: $response\n" .
+ "Falling back to scanning...\n" unless $response =~ /^\{/;
+
+ my $json_pkg;
+ eval {
+ require JSON::XS;
+ $json_pkg = "JSON::XS";
+ 1;
+ } or do {
+ require JSON::PP;
+ $json_pkg = "JSON::PP";
+ };
+
+ my $o = $json_pkg->new->utf8->decode($response);
+
+ if ($retry > 0 and $o->{error} and $o->{error} =~ m/unable to resolve root .* directory (.*) is not watched/) {
+ print STDERR "Adding '$git_work_tree' to watchman's watch list.\n";
+ $retry--;
+ qx/watchman watch "$git_work_tree"/;
+ die "Failed to make watchman watch '$git_work_tree'.\n" .
+ "Falling back to scanning...\n" if $? != 0;
+
+ # Watchman will always return all files on the first query so
+ # return the fast "everything is dirty" flag to git and do the
+ # Watchman query just to get it over with now so we won't pay
+ # the cost in git to look up each individual file.
+ print "/\0";
+ eval { launch_watchman() };
+ exit 0;
+ }
+
+ die "Watchman: $o->{error}.\n" .
+ "Falling back to scanning...\n" if $o->{error};
+
+ binmode STDOUT, ":utf8";
+ local $, = "\0";
+ print @{$o->{files}};
+}
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "cache.h"
#include "dir.h"
#include "submodule.h"
#include "submodule-config.h"
+#include "fsmonitor.h"
/*
* Error messages expected by scripts out of plumbing commands such as
ce->ce_flags &= ~CE_SKIP_WORKTREE;
if (was_skip_worktree != ce_skip_worktree(ce)) {
ce->ce_flags |= CE_UPDATE_IN_BASE;
+ mark_fsmonitor_invalid(istate, ce);
istate->cache_changed |= CE_ENTRY_CHANGED;
}
if (!rc || errno == ENOENT)
return 0;
err = errno;
- warning_errno("unable to %s %s", op, file);
+ warning_errno("unable to %s '%s'", op, file);
errno = err;
return rc;
}
if (!rc || errno == ENOENT)
return 0;
- strbuf_addf(err, "unable to unlink %s: %s",
+ strbuf_addf(err, "unable to unlink '%s': %s",
file, strerror(errno));
return -1;
}
{
int fd = xopen(path, O_WRONLY | O_CREAT | O_TRUNC, 0666);
if (write_in_full(fd, buf, len) < 0)
- die_errno(_("could not write to %s"), path);
+ die_errno(_("could not write to '%s'"), path);
if (close(fd))
- die_errno(_("could not close %s"), path);
+ die_errno(_("could not close '%s'"), path);
}
void write_file(const char *path, const char *fmt, ...)
int result;
init_revisions(&rev_info, NULL);
- if (ignore_submodules)
+ if (ignore_submodules) {
rev_info.diffopt.flags.ignore_submodules = 1;
+ rev_info.diffopt.flags.override_submodule_config = 1;
+ }
rev_info.diffopt.flags.quick = 1;
diff_setup_done(&rev_info.diffopt);
result = run_diff_files(&rev_info, 0);
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/>.
*
* Davide Libenzi <davidel@xmailserver.org>
*
extern "C" {
#endif /* #ifdef __cplusplus */
+/* xpparm_t.flags */
+#define XDF_NEED_MINIMAL (1 << 0)
-#define XDF_NEED_MINIMAL (1 << 1)
-#define XDF_IGNORE_WHITESPACE (1 << 2)
-#define XDF_IGNORE_WHITESPACE_CHANGE (1 << 3)
-#define XDF_IGNORE_WHITESPACE_AT_EOL (1 << 4)
-#define XDF_WHITESPACE_FLAGS (XDF_IGNORE_WHITESPACE | XDF_IGNORE_WHITESPACE_CHANGE | XDF_IGNORE_WHITESPACE_AT_EOL)
+#define XDF_IGNORE_WHITESPACE (1 << 1)
+#define XDF_IGNORE_WHITESPACE_CHANGE (1 << 2)
+#define XDF_IGNORE_WHITESPACE_AT_EOL (1 << 3)
+#define XDF_IGNORE_CR_AT_EOL (1 << 4)
+#define XDF_WHITESPACE_FLAGS (XDF_IGNORE_WHITESPACE | \
+ XDF_IGNORE_WHITESPACE_CHANGE | \
+ XDF_IGNORE_WHITESPACE_AT_EOL | \
+ XDF_IGNORE_CR_AT_EOL)
-#define XDF_PATIENCE_DIFF (1 << 5)
-#define XDF_HISTOGRAM_DIFF (1 << 6)
+#define XDF_IGNORE_BLANK_LINES (1 << 7)
+
+#define XDF_PATIENCE_DIFF (1 << 14)
+#define XDF_HISTOGRAM_DIFF (1 << 15)
#define XDF_DIFF_ALGORITHM_MASK (XDF_PATIENCE_DIFF | XDF_HISTOGRAM_DIFF)
#define XDF_DIFF_ALG(x) ((x) & XDF_DIFF_ALGORITHM_MASK)
-#define XDF_IGNORE_BLANK_LINES (1 << 7)
-
-#define XDF_INDENT_HEURISTIC (1 << 8)
+#define XDF_INDENT_HEURISTIC (1 << 23)
+/* xdemitconf_t.flags */
#define XDL_EMIT_FUNCNAMES (1 << 0)
#define XDL_EMIT_FUNCCONTEXT (1 << 2)
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/>.
*
* Davide Libenzi <davidel@xmailserver.org>
*
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/>.
*
* Davide Libenzi <davidel@xmailserver.org>
*
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/>.
*
* Davide Libenzi <davidel@xmailserver.org>
*
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/>.
*
* Davide Libenzi <davidel@xmailserver.org>
*
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/>.
*
* Davide Libenzi <davidel@xmailserver.org>
*
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/>.
*
* Davide Libenzi <davidel@xmailserver.org>
*
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/>.
*
* Davide Libenzi <davidel@xmailserver.org>
*
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/>.
*
* Davide Libenzi <davidel@xmailserver.org>
*
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/>.
*
* Davide Libenzi <davidel@xmailserver.org>
*
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/>.
*
* Davide Libenzi <davidel@xmailserver.org>
*
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/>.
*
* Davide Libenzi <davidel@xmailserver.org>
*
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/>.
*
* Davide Libenzi <davidel@xmailserver.org>
*
return (i == size);
}
+/*
+ * Have we eaten everything on the line, except for an optional
+ * CR at the very end?
+ */
+static int ends_with_optional_cr(const char *l, long s, long i)
+{
+ int complete = s && l[s-1] == '\n';
+
+ if (complete)
+ s--;
+ if (s == i)
+ return 1;
+ /* do not ignore CR at the end of an incomplete line */
+ if (complete && s == i + 1 && l[i] == '\r')
+ return 1;
+ return 0;
+}
+
int xdl_recmatch(const char *l1, long s1, const char *l2, long s2, long flags)
{
int i1, i2;
/*
* -w matches everything that matches with -b, and -b in turn
- * matches everything that matches with --ignore-space-at-eol.
+ * matches everything that matches with --ignore-space-at-eol,
+ * which in turn matches everything that matches with --ignore-cr-at-eol.
*
* Each flavor of ignoring needs different logic to skip whitespaces
* while we have both sides to compare.
i1++;
i2++;
}
+ } else if (flags & XDF_IGNORE_CR_AT_EOL) {
+ /* Find the first difference and see how the line ends */
+ while (i1 < s1 && i2 < s2 && l1[i1] == l2[i2]) {
+ i1++;
+ i2++;
+ }
+ return (ends_with_optional_cr(l1, s1, i1) &&
+ ends_with_optional_cr(l2, s2, i2));
}
/*
char const *top, long flags) {
unsigned long ha = 5381;
char const *ptr = *data;
+ int cr_at_eol_only = (flags & XDF_WHITESPACE_FLAGS) == XDF_IGNORE_CR_AT_EOL;
for (; ptr < top && *ptr != '\n'; ptr++) {
- if (XDL_ISSPACE(*ptr)) {
+ if (cr_at_eol_only) {
+ /* do not ignore CR at the end of an incomplete line */
+ if (*ptr == '\r' &&
+ (ptr + 1 < top && ptr[1] == '\n'))
+ continue;
+ }
+ else if (XDL_ISSPACE(*ptr)) {
const char *ptr2 = ptr;
int at_eol;
while (ptr + 1 < top && XDL_ISSPACE(ptr[1])
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * License along with this library; if not, see
+ * <http://www.gnu.org/licenses/>.
*
* Davide Libenzi <davidel@xmailserver.org>
*