Merge branch 'ma/ref-filter-leakfix'
authorJunio C Hamano <gitster@pobox.com>
Fri, 19 Jul 2019 18:30:23 +0000 (11:30 -0700)
committerJunio C Hamano <gitster@pobox.com>
Fri, 19 Jul 2019 18:30:23 +0000 (11:30 -0700)
Leakfix.

* ma/ref-filter-leakfix:
ref-filter: fix memory leak in `free_array_item()`

111 files changed:
Documentation/RelNotes/2.23.0.txt
Documentation/blame-options.txt
Documentation/config/advice.txt
Documentation/config/blame.txt
Documentation/git-blame.txt
Documentation/git-cherry-pick.txt
Documentation/git-clone.txt
Documentation/git-commit-graph.txt
Documentation/git-multi-pack-index.txt
Documentation/git-revert.txt
Documentation/git.txt
Documentation/rev-list-options.txt
Documentation/sequencer.txt
Documentation/technical/api-trace2.txt
Documentation/technical/commit-graph-format.txt
Documentation/technical/commit-graph.txt
Makefile
advice.c
advice.h
archive.c
blame.c
blame.h
builtin/blame.c
builtin/cat-file.c
builtin/commit-graph.c
builtin/commit.c
builtin/gc.c
builtin/grep.c
builtin/merge-tree.c
builtin/merge.c
builtin/multi-pack-index.c
builtin/pack-objects.c
builtin/rebase.c
builtin/receive-pack.c
builtin/repack.c
builtin/reset.c
builtin/revert.c
builtin/rm.c
builtin/update-index.c
cache.h
ci/install-dependencies.sh
ci/lib.sh
commit-graph.c
commit-graph.h
compat/mingw.c
compat/poll/poll.c
compat/win32/git.manifest [new file with mode: 0644]
compat/winansi.c
config.mak.uname
connected.c
contrib/completion/git-prompt.sh
fast-import.c
fsck.c
gettext.c
git.rc
line-log.c
match-trees.c
merge-recursive.c
midx.c
midx.h
notes.c
object-store.h
oidmap.c
oidset.c
oidset.h
packfile.c
packfile.h
progress.c
ref-filter.c
revision.c
sequencer.c
sequencer.h
sha1-file.c
sha1-name.c
shallow.c
strbuf.c
strbuf.h
t/helper/test-hashmap.c
t/helper/test-match-trees.c
t/helper/test-oidmap.c [new file with mode: 0644]
t/helper/test-tool.c
t/helper/test-tool.h
t/perf/p5600-clone-reference.sh [new file with mode: 0755]
t/t0011-hashmap.sh
t/t0016-oidmap.sh [new file with mode: 0755]
t/t3418-rebase-continue.sh
t/t3420-rebase-autostash.sh
t/t3510-cherry-pick-sequence.sh
t/t5318-commit-graph.sh
t/t5319-multi-pack-index.sh
t/t5324-split-commit-graph.sh [new file with mode: 0755]
t/t5504-fetch-receive-strict.sh
t/t5551-http-fetch-smart.sh
t/t5618-alternate-refs.sh [new file with mode: 0755]
t/t6300-for-each-ref.sh
t/t7060-wtstatus.sh
t/t7508-status.sh
t/t7512-status-help.sh
t/t7700-repack.sh
t/t7814-grep-recurse-submodules.sh
t/t8003-blame-corner-cases.sh
t/t8013-blame-ignore-revs.sh [new file with mode: 0755]
t/t8014-blame-ignore-fuzzy.sh [new file with mode: 0755]
t/t9903-bash-prompt.sh
transport.c
transport.h
tree-diff.c
tree-walk.c
tree-walk.h
unpack-trees.c
wt-status.c
index a8c9029aa493938039f7b86bbfa22a189143f853..a63204ffe8a040479654c3e44db6c170feca2a58 100644 (file)
@@ -112,6 +112,8 @@ Performance, Internal Implementation, Development Support etc.
  * "git fetch" that grabs from a group of remotes learned to run the
    auto-gc only once at the very end.
 
+ * A handful of Windows build patches have been upstreamed.
+
 
 Fixes since v2.22
 -----------------
@@ -280,6 +282,17 @@ Fixes since v2.22
    "--recursive" option was in use.
    (merge 30db18b148 ms/submodule-foreach-fix later to maint).
 
+ * The configuration variable rebase.rescheduleFailedExec should be
+   effective only while running an interactive rebase and should not
+   affect anything when running an non-interactive one, which was not
+   the case.  This has been corrected.
+   (merge 906b63942a js/rebase-reschedule-applies-only-to-interactive later to maint).
+
+ * The "git clone" documentation refers to command line options in its
+   description in the short form; they have been replaced with long
+   forms to make them more recognisable.
+   (merge bfc8c84ed5 qn/clone-doc-use-long-form later to maint).
+
  * Other code cleanup, docfix, build fix, etc.
    (merge f547101b26 es/git-debugger-doc later to maint).
    (merge 7877ac3d7b js/bisect-helper-check-get-oid-return-value later to maint).
@@ -296,3 +309,6 @@ Fixes since v2.22
    (merge ed33bd8f30 js/t0001-case-insensitive later to maint).
    (merge dfa880e336 jw/gitweb-sample-update later to maint).
    (merge e532a90a9f sg/t5551-fetch-smart-error-is-translated later to maint).
+   (merge 8d45ad8c29 jt/t5551-test-chunked later to maint).
+   (merge 1a64e07d23 sg/git-C-empty-doc later to maint).
+   (merge 37a2e35395 sg/ci-brew-gcc-workaround later to maint).
index dc41957afab25d08cf6fc530cde97b91bed8e06e..5d122db6e9e6863fcf1e69ebc14feb1393501e0b 100644 (file)
@@ -110,5 +110,24 @@ commit. And the default value is 40. If there are more than one
 `-C` options given, the <num> argument of the last `-C` will
 take effect.
 
+--ignore-rev <rev>::
+       Ignore changes made by the revision when assigning blame, as if the
+       change never happened.  Lines that were changed or added by an ignored
+       commit will be blamed on the previous commit that changed that line or
+       nearby lines.  This option may be specified multiple times to ignore
+       more than one revision.  If the `blame.markIgnoredLines` config option
+       is set, then lines that were changed by an ignored commit and attributed to
+       another commit will be marked with a `?` in the blame output.  If the
+       `blame.markUnblamableLines` config option is set, then those lines touched
+       by an ignored commit that we could not attribute to another revision are
+       marked with a '*'.
+
+--ignore-revs-file <file>::
+       Ignore revisions listed in `file`, which must be in the same format as an
+       `fsck.skipList`.  This option may be repeated, and these files will be
+       processed after any files specified with the `blame.ignoreRevsFile` config
+       option.  An empty file name, `""`, will clear the list of revs from
+       previously processed files.
+
 -h::
        Show help message.
index ee85c536cec83c9097ae586da404580573448431..6aaa36020298f54f60cab1973b91641d684b9b18 100644 (file)
@@ -68,6 +68,8 @@ advice.*::
        resolveConflict::
                Advice shown by various commands when conflicts
                prevent the operation from being performed.
+       sequencerInUse::
+               Advice shown when a sequencer command is already in progress.
        implicitIdentity::
                Advice on how to set your identity configuration when
                your information is guessed from the system username and
index 67b5c1d1e02a4458f5fefef2ed7e25df32343e2b..9468e8599c0c16d3bd54ec67ba65351be9de1a69 100644 (file)
@@ -19,3 +19,19 @@ blame.showEmail::
 blame.showRoot::
        Do not treat root commits as boundaries in linkgit:git-blame[1].
        This option defaults to false.
+
+blame.ignoreRevsFile::
+       Ignore revisions listed in the file, one unabbreviated object name per
+       line, in linkgit:git-blame[1].  Whitespace and comments beginning with
+       `#` are ignored.  This option may be repeated multiple times.  Empty
+       file names will reset the list of ignored revisions.  This option will
+       be handled before the command line option `--ignore-revs-file`.
+
+blame.markUnblamables::
+       Mark lines that were changed by an ignored revision that we could not
+       attribute to another commit with a '*' in the output of
+       linkgit:git-blame[1].
+
+blame.markIgnoredLines::
+       Mark lines that were changed by an ignored revision that we attributed to
+       another commit with a '?' in the output of linkgit:git-blame[1].
index 16323eb80e3108794067c4dbfcbfe25e46938498..7e81541996359cf4b7a4abce35e8cae5c2ce29fb 100644 (file)
@@ -10,6 +10,7 @@ SYNOPSIS
 [verse]
 'git blame' [-c] [-b] [-l] [--root] [-t] [-f] [-n] [-s] [-e] [-p] [-w] [--incremental]
            [-L <range>] [-S <revs-file>] [-M] [-C] [-C] [-C] [--since=<date>]
+           [--ignore-rev <rev>] [--ignore-revs-file <file>]
            [--progress] [--abbrev=<n>] [<rev> | --contents <file> | --reverse <rev>..<rev>]
            [--] <file>
 
index 754b16ce0c9da6a137c9f58de86eb44841ae64d6..83ce51aedfea54fd5150ef142aca24f8c1df95c9 100644 (file)
@@ -10,9 +10,7 @@ SYNOPSIS
 [verse]
 'git cherry-pick' [--edit] [-n] [-m parent-number] [-s] [-x] [--ff]
                  [-S[<keyid>]] <commit>...
-'git cherry-pick' --continue
-'git cherry-pick' --quit
-'git cherry-pick' --abort
+'git cherry-pick' (--continue | --skip | --abort | --quit)
 
 DESCRIPTION
 -----------
index 5fc97f14de4debac113d490e5a8256cde67dae1b..34011c2940ad4b4d3115e3ddc41f18f8a00ecd01 100644 (file)
@@ -23,7 +23,7 @@ DESCRIPTION
 
 Clones a repository into a newly created directory, creates
 remote-tracking branches for each branch in the cloned repository
-(visible using `git branch -r`), and creates and checks out an
+(visible using `git branch --remotes`), and creates and checks out an
 initial branch that is forked from the cloned repository's
 currently active branch.
 
@@ -41,8 +41,8 @@ configuration variables.
 
 OPTIONS
 -------
---local::
 -l::
+--local::
        When the repository to clone from is on a local machine,
        this flag bypasses the normal "Git aware" transport
        mechanism and clones the repository by making a copy of
@@ -63,8 +63,8 @@ Git transport instead.
        directory instead of using hardlinks. This may be desirable
        if you are trying to make a back-up of your repository.
 
---shared::
 -s::
+--shared::
        When the repository to clone is on the local machine,
        instead of using hard links, automatically setup
        `.git/objects/info/alternates` to share the objects
@@ -81,13 +81,13 @@ which automatically call `git gc --auto`. (See linkgit:git-gc[1].)
 If these objects are removed and were referenced by the cloned repository,
 then the cloned repository will become corrupt.
 +
-Note that running `git repack` without the `-l` option in a repository
-cloned with `-s` will copy objects from the source repository into a pack
-in the cloned repository, removing the disk space savings of `clone -s`.
-It is safe, however, to run `git gc`, which uses the `-l` option by
+Note that running `git repack` without the `--local` option in a repository
+cloned with `--shared` will copy objects from the source repository into a pack
+in the cloned repository, removing the disk space savings of `clone --shared`.
+It is safe, however, to run `git gc`, which uses the `--local` option by
 default.
 +
-If you want to break the dependency of a repository cloned with `-s` on
+If you want to break the dependency of a repository cloned with `--shared` on
 its source repository, you can simply run `git repack -a` to copy all
 objects from the source repository into a pack in the cloned repository.
 
@@ -116,19 +116,19 @@ objects from the source repository into a pack in the cloned repository.
        same repository, and this option can be used to stop the
        borrowing.
 
---quiet::
 -q::
+--quiet::
        Operate quietly.  Progress is not reported to the standard
        error stream.
 
---verbose::
 -v::
+--verbose::
        Run verbosely. Does not affect the reporting of progress status
        to the standard error stream.
 
 --progress::
        Progress status is reported on the standard error stream
-       by default when it is attached to a terminal, unless -q
+       by default when it is attached to a terminal, unless `--quiet`
        is specified. This flag forces progress status even if the
        standard error stream is not directed to a terminal.
 
@@ -140,15 +140,15 @@ objects from the source repository into a pack in the cloned repository.
        When multiple `--server-option=<option>` are given, they are all
        sent to the other side in the order listed on the command line.
 
---no-checkout::
 -n::
+--no-checkout::
        No checkout of HEAD is performed after the clone is complete.
 
 --bare::
        Make a 'bare' Git repository.  That is, instead of
        creating `<directory>` and placing the administrative
        files in `<directory>/.git`, make the `<directory>`
-       itself the `$GIT_DIR`. This obviously implies the `-n`
+       itself the `$GIT_DIR`. This obviously implies the `--no-checkout`
        because there is nowhere to check out the working tree.
        Also the branch heads at the remote are copied directly
        to corresponding local branch heads, without mapping
@@ -164,13 +164,13 @@ objects from the source repository into a pack in the cloned repository.
        that all these refs are overwritten by a `git remote update` in the
        target repository.
 
---origin <name>::
 -o <name>::
+--origin <name>::
        Instead of using the remote name `origin` to keep track
        of the upstream repository, use `<name>`.
 
---branch <name>::
 -b <name>::
+--branch <name>::
        Instead of pointing the newly created HEAD to the branch pointed
        to by the cloned repository's HEAD, point to `<name>` branch
        instead. In a non-bare repository, this is the branch that will
@@ -178,8 +178,8 @@ objects from the source repository into a pack in the cloned repository.
        `--branch` can also take tags and detaches the HEAD at that commit
        in the resulting repository.
 
---upload-pack <upload-pack>::
 -u <upload-pack>::
+--upload-pack <upload-pack>::
        When given, and the repository to clone from is accessed
        via ssh, this specifies a non-default path for the command
        run on the other end.
@@ -188,8 +188,8 @@ objects from the source repository into a pack in the cloned repository.
        Specify the directory from which templates will be used;
        (See the "TEMPLATE DIRECTORY" section of linkgit:git-init[1].)
 
---config <key>=<value>::
 -c <key>=<value>::
+--config <key>=<value>::
        Set a configuration variable in the newly-created repository;
        this takes effect immediately after the repository is
        initialized, but before the remote history is fetched or any
index 624470e198202a47e91e89274bb32b2bc3d441d6..eb5e7865f0ef787e1410a95702990e38567b68fe 100644 (file)
@@ -10,7 +10,7 @@ SYNOPSIS
 --------
 [verse]
 'git commit-graph read' [--object-dir <dir>]
-'git commit-graph verify' [--object-dir <dir>]
+'git commit-graph verify' [--object-dir <dir>] [--shallow]
 'git commit-graph write' <options> [--object-dir <dir>]
 
 
@@ -26,7 +26,7 @@ OPTIONS
        Use given directory for the location of packfiles and commit-graph
        file. This parameter exists to specify the location of an alternate
        that only has the objects directory, not a full `.git` directory. The
-       commit-graph file is expected to be at `<dir>/info/commit-graph` and
+       commit-graph file is expected to be in the `<dir>/info` directory and
        the packfiles are expected to be in `<dir>/pack`.
 
 
@@ -51,6 +51,25 @@ or `--stdin-packs`.)
 +
 With the `--append` option, include all commits that are present in the
 existing commit-graph file.
++
+With the `--split` option, write the commit-graph as a chain of multiple
+commit-graph files stored in `<dir>/info/commit-graphs`. The new commits
+not already in the commit-graph are added in a new "tip" file. This file
+is merged with the existing file if the following merge conditions are
+met:
++
+* If `--size-multiple=<X>` is not specified, let `X` equal 2. If the new
+tip file would have `N` commits and the previous tip has `M` commits and
+`X` times `N` is greater than  `M`, instead merge the two files into a
+single file.
++
+* If `--max-commits=<M>` is specified with `M` a positive integer, and the
+new tip file would have more than `M` commits, then instead merge the new
+tip with the previous tip.
++
+Finally, if `--expire-time=<datetime>` is not specified, let `datetime`
+be the current time. After writing the split commit-graph, delete all
+unused commit-graph whose modified times are older than `datetime`.
 
 'read'::
 
@@ -61,6 +80,9 @@ Used for debugging purposes.
 
 Read the commit-graph file and verify its contents against the object
 database. Used to check for corrupted data.
++
+With the `--shallow` option, only check the tip commit-graph file in
+a chain of split commit-graphs.
 
 
 EXAMPLES
index f7778a2c85c1aaa1295dbf3b9e4f74f3683def23..233b2b786271cc695268d2f7c0139d02228bd3c2 100644 (file)
@@ -9,7 +9,7 @@ git-multi-pack-index - Write and verify multi-pack-indexes
 SYNOPSIS
 --------
 [verse]
-'git multi-pack-index' [--object-dir=<dir>] <verb>
+'git multi-pack-index' [--object-dir=<dir>] <subcommand>
 
 DESCRIPTION
 -----------
@@ -23,13 +23,35 @@ OPTIONS
        `<dir>/packs/multi-pack-index` for the current MIDX file, and
        `<dir>/packs` for the pack-files to index.
 
+The following subcommands are available:
+
 write::
-       When given as the verb, write a new MIDX file to
-       `<dir>/packs/multi-pack-index`.
+       Write a new MIDX file.
 
 verify::
-       When given as the verb, verify the contents of the MIDX file
-       at `<dir>/packs/multi-pack-index`.
+       Verify the contents of the MIDX file.
+
+expire::
+       Delete the pack-files that are tracked  by the MIDX file, but
+       have no objects referenced by the MIDX. Rewrite the MIDX file
+       afterward to remove all references to these pack-files.
+
+repack::
+       Create a new pack-file containing objects in small pack-files
+       referenced by the multi-pack-index. If the size given by the
+       `--batch-size=<size>` argument is zero, then create a pack
+       containing all objects referenced by the multi-pack-index. For
+       a non-zero batch size, Select the pack-files by examining packs
+       from oldest-to-newest, computing the "expected size" by counting
+       the number of objects in the pack referenced by the
+       multi-pack-index, then divide by the total number of objects in
+       the pack and multiply by the pack size. We select packs with
+       expected size below the batch size until the set of packs have
+       total expected size at least the batch size. If the total size
+       does not reach the batch size, then do nothing. If a new pack-
+       file is created, rewrite the multi-pack-index to reference the
+       new pack-file. A later run of 'git multi-pack-index expire' will
+       delete the pack-files that were part of this batch.
 
 
 EXAMPLES
index fae4d66547fb900d79d2bafad52cf0aba9a51158..9d22270757c9b5d402f680a3f5933989678cb6f0 100644 (file)
@@ -9,9 +9,7 @@ SYNOPSIS
 --------
 [verse]
 'git revert' [--[no-]edit] [-n] [-m parent-number] [-s] [-S[<keyid>]] <commit>...
-'git revert' --continue
-'git revert' --quit
-'git revert' --abort
+'git revert' (--continue | --skip | --abort | --quit)
 
 DESCRIPTION
 -----------
index e095514ace357b8e76ab0d9ea58b2a6c0e3dcd5b..9b82564d1aa9c04c4b0299c1a82c4ba45a2c3415 100644 (file)
@@ -57,7 +57,8 @@ help ...`.
        Run as if git was started in '<path>' instead of the current working
        directory.  When multiple `-C` options are given, each subsequent
        non-absolute `-C <path>` is interpreted relative to the preceding `-C
-       <path>`.
+       <path>`.  If '<path>' is present but empty, e.g. `-C ""`, then the
+       current working directory is left unchanged.
 +
 This option affects options that expect path name like `--git-dir` and
 `--work-tree` in that their interpretations of the path names would be
index 286fc163f14256f4f7ff4be7fba57cef963d2f75..bb1251c0364dc71880f6e63db7c6116ed859b90f 100644 (file)
@@ -182,6 +182,14 @@ explicitly.
        Pretend as if all objects mentioned by reflogs are listed on the
        command line as `<commit>`.
 
+--alternate-refs::
+       Pretend as if all objects mentioned as ref tips of alternate
+       repositories were listed on the command line. An alternate
+       repository is any repository whose object directory is specified
+       in `objects/info/alternates`.  The set of included objects may
+       be modified by `core.alternateRefsCommand`, etc. See
+       linkgit:git-config[1].
+
 --single-worktree::
        By default, all working trees will be examined by the
        following options when there are more than one (see
index 5a57c4a4077f0bba7bb0186932a166f2dc7666ce..3bceb564741158dfcebbfabf6f01a9b2f444494f 100644 (file)
@@ -3,6 +3,10 @@
        `.git/sequencer`.  Can be used to continue after resolving
        conflicts in a failed cherry-pick or revert.
 
+--skip::
+       Skip the current commit and continue with the rest of the
+       sequence.
+
 --quit::
        Forget about the current operation in progress.  Can be used
        to clear the sequencer state after a failed cherry-pick or
index f7ffe7d5998c6ec6f7ed4ec420c938d7f129c94f..71eb081fed25a7314a822087056a6c5b9a72d944 100644 (file)
@@ -668,7 +668,7 @@ completed.)
        "event":"signal",
        ...
        "t_abs":0.001227,  # elapsed time in seconds
-       "signal":13        # SIGTERM, SIGINT, etc.
+       "signo":13         # SIGTERM, SIGINT, etc.
 }
 ------------
 
index 16452a0504c8fa5b9b1b62cb907b96315db43942..a4f17441aed30f14c036a4bed6a911c86cf31ce5 100644 (file)
@@ -44,8 +44,9 @@ HEADER:
 
   1-byte number (C) of "chunks"
 
-  1-byte (reserved for later use)
-     Current clients should ignore this value.
+  1-byte number (B) of base commit-graphs
+      We infer the length (H*B) of the Base Graphs chunk
+      from this value.
 
 CHUNK LOOKUP:
 
@@ -92,6 +93,12 @@ CHUNK DATA:
       positions for the parents until reaching a value with the most-significant
       bit on. The other bits correspond to the position of the last parent.
 
+  Base Graphs List (ID: {'B', 'A', 'S', 'E'}) [Optional]
+      This list of H-byte hashes describe a set of B commit-graph files that
+      form a commit-graph chain. The graph position for the ith commit in this
+      file's OID Lookup chunk is equal to i plus the number of commits in all
+      base graphs.  If B is non-zero, this chunk must exist.
+
 TRAILER:
 
        H-byte HASH-checksum of all of the above.
index fb53341d5ee3116361d599b5aca2e5ea3a228589..729fbcb32f8793d06da3a985bb6d8a299b3a15dd 100644 (file)
@@ -127,6 +127,197 @@ Design Details
   helpful for these clones, anyway. The commit-graph will not be read or
   written when shallow commits are present.
 
+Commit Graphs Chains
+--------------------
+
+Typically, repos grow with near-constant velocity (commits per day). Over time,
+the number of commits added by a fetch operation is much smaller than the
+number of commits in the full history. By creating a "chain" of commit-graphs,
+we enable fast writes of new commit data without rewriting the entire commit
+history -- at least, most of the time.
+
+## File Layout
+
+A commit-graph chain uses multiple files, and we use a fixed naming convention
+to organize these files. Each commit-graph file has a name
+`$OBJDIR/info/commit-graphs/graph-{hash}.graph` where `{hash}` is the hex-
+valued hash stored in the footer of that file (which is a hash of the file's
+contents before that hash). For a chain of commit-graph files, a plain-text
+file at `$OBJDIR/info/commit-graphs/commit-graph-chain` contains the
+hashes for the files in order from "lowest" to "highest".
+
+For example, if the `commit-graph-chain` file contains the lines
+
+```
+       {hash0}
+       {hash1}
+       {hash2}
+```
+
+then the commit-graph chain looks like the following diagram:
+
+ +-----------------------+
+ |  graph-{hash2}.graph  |
+ +-----------------------+
+         |
+ +-----------------------+
+ |                       |
+ |  graph-{hash1}.graph  |
+ |                       |
+ +-----------------------+
+         |
+ +-----------------------+
+ |                       |
+ |                       |
+ |                       |
+ |  graph-{hash0}.graph  |
+ |                       |
+ |                       |
+ |                       |
+ +-----------------------+
+
+Let X0 be the number of commits in `graph-{hash0}.graph`, X1 be the number of
+commits in `graph-{hash1}.graph`, and X2 be the number of commits in
+`graph-{hash2}.graph`. If a commit appears in position i in `graph-{hash2}.graph`,
+then we interpret this as being the commit in position (X0 + X1 + i), and that
+will be used as its "graph position". The commits in `graph-{hash2}.graph` use these
+positions to refer to their parents, which may be in `graph-{hash1}.graph` or
+`graph-{hash0}.graph`. We can navigate to an arbitrary commit in position j by checking
+its containment in the intervals [0, X0), [X0, X0 + X1), [X0 + X1, X0 + X1 +
+X2).
+
+Each commit-graph file (except the base, `graph-{hash0}.graph`) contains data
+specifying the hashes of all files in the lower layers. In the above example,
+`graph-{hash1}.graph` contains `{hash0}` while `graph-{hash2}.graph` contains
+`{hash0}` and `{hash1}`.
+
+## Merging commit-graph files
+
+If we only added a new commit-graph file on every write, we would run into a
+linear search problem through many commit-graph files.  Instead, we use a merge
+strategy to decide when the stack should collapse some number of levels.
+
+The diagram below shows such a collapse. As a set of new commits are added, it
+is determined by the merge strategy that the files should collapse to
+`graph-{hash1}`. Thus, the new commits, the commits in `graph-{hash2}` and
+the commits in `graph-{hash1}` should be combined into a new `graph-{hash3}`
+file.
+
+                           +---------------------+
+                           |                     |
+                           |    (new commits)    |
+                           |                     |
+                           +---------------------+
+                           |                     |
+ +-----------------------+  +---------------------+
+ |  graph-{hash2} |->|                     |
+ +-----------------------+  +---------------------+
+         |                 |                     |
+ +-----------------------+  +---------------------+
+ |                       |  |                     |
+ |  graph-{hash1} |->|                     |
+ |                       |  |                     |
+ +-----------------------+  +---------------------+
+         |                  tmp_graphXXX
+ +-----------------------+
+ |                       |
+ |                       |
+ |                       |
+ |  graph-{hash0} |
+ |                       |
+ |                       |
+ |                       |
+ +-----------------------+
+
+During this process, the commits to write are combined, sorted and we write the
+contents to a temporary file, all while holding a `commit-graph-chain.lock`
+lock-file.  When the file is flushed, we rename it to `graph-{hash3}`
+according to the computed `{hash3}`. Finally, we write the new chain data to
+`commit-graph-chain.lock`:
+
+```
+       {hash3}
+       {hash0}
+```
+
+We then close the lock-file.
+
+## Merge Strategy
+
+When writing a set of commits that do not exist in the commit-graph stack of
+height N, we default to creating a new file at level N + 1. We then decide to
+merge with the Nth level if one of two conditions hold:
+
+  1. `--size-multiple=<X>` is specified or X = 2, and the number of commits in
+     level N is less than X times the number of commits in level N + 1.
+
+  2. `--max-commits=<C>` is specified with non-zero C and the number of commits
+     in level N + 1 is more than C commits.
+
+This decision cascades down the levels: when we merge a level we create a new
+set of commits that then compares to the next level.
+
+The first condition bounds the number of levels to be logarithmic in the total
+number of commits.  The second condition bounds the total number of commits in
+a `graph-{hashN}` file and not in the `commit-graph` file, preventing
+significant performance issues when the stack merges and another process only
+partially reads the previous stack.
+
+The merge strategy values (2 for the size multiple, 64,000 for the maximum
+number of commits) could be extracted into config settings for full
+flexibility.
+
+## Deleting graph-{hash} files
+
+After a new tip file is written, some `graph-{hash}` files may no longer
+be part of a chain. It is important to remove these files from disk, eventually.
+The main reason to delay removal is that another process could read the
+`commit-graph-chain` file before it is rewritten, but then look for the
+`graph-{hash}` files after they are deleted.
+
+To allow holding old split commit-graphs for a while after they are unreferenced,
+we update the modified times of the files when they become unreferenced. Then,
+we scan the `$OBJDIR/info/commit-graphs/` directory for `graph-{hash}`
+files whose modified times are older than a given expiry window. This window
+defaults to zero, but can be changed using command-line arguments or a config
+setting.
+
+## Chains across multiple object directories
+
+In a repo with alternates, we look for the `commit-graph-chain` file starting
+in the local object directory and then in each alternate. The first file that
+exists defines our chain. As we look for the `graph-{hash}` files for
+each `{hash}` in the chain file, we follow the same pattern for the host
+directories.
+
+This allows commit-graphs to be split across multiple forks in a fork network.
+The typical case is a large "base" repo with many smaller forks.
+
+As the base repo advances, it will likely update and merge its commit-graph
+chain more frequently than the forks. If a fork updates their commit-graph after
+the base repo, then it should "reparent" the commit-graph chain onto the new
+chain in the base repo. When reading each `graph-{hash}` file, we track
+the object directory containing it. During a write of a new commit-graph file,
+we check for any changes in the source object directory and read the
+`commit-graph-chain` file for that source and create a new file based on those
+files. During this "reparent" operation, we necessarily need to collapse all
+levels in the fork, as all of the files are invalid against the new base file.
+
+It is crucial to be careful when cleaning up "unreferenced" `graph-{hash}.graph`
+files in this scenario. It falls to the user to define the proper settings for
+their custom environment:
+
+ 1. When merging levels in the base repo, the unreferenced files may still be
+    referenced by chains from fork repos.
+
+ 2. The expiry time should be set to a length of time such that every fork has
+    time to recompute their commit-graph chain to "reparent" onto the new base
+    file(s).
+
+ 3. If the commit-graph chain is updated in the base, the fork will not have
+    access to the new chain until its chain is updated to reference those files.
+    (This may change in the future [5].)
+
 Related Links
 -------------
 [0] https://bugs.chromium.org/p/git/issues/detail?id=8
@@ -153,3 +344,7 @@ Related Links
 
 [4] https://public-inbox.org/git/20180108154822.54829-1-git@jeffhostetler.com/T/#u
     A patch to remove the ahead-behind calculation from 'status'.
+
+[5] https://public-inbox.org/git/f27db281-abad-5043-6d71-cbb083b1c877@gmail.com/
+    A discussion of a "two-dimensional graph position" that can allow reading
+    multiple commit-graph chains at the same time.
index 98a0588416c4f3f7a5b48ad72a6c2fd54ff94120..11ccea40716d9bbcef731f093e530a9b1aee0013 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -721,6 +721,7 @@ TEST_BUILTINS_OBJS += test-lazy-init-name-hash.o
 TEST_BUILTINS_OBJS += test-match-trees.o
 TEST_BUILTINS_OBJS += test-mergesort.o
 TEST_BUILTINS_OBJS += test-mktemp.o
+TEST_BUILTINS_OBJS += test-oidmap.o
 TEST_BUILTINS_OBJS += test-online-cpus.o
 TEST_BUILTINS_OBJS += test-parse-options.o
 TEST_BUILTINS_OBJS += test-path-utils.o
index 67de6dece4284a52fef27f3e7ef662d68eaa8109..3ee0ee2d8fbb04dabcfc0152dc741c7d1f28b220 100644 (file)
--- a/advice.c
+++ b/advice.c
@@ -17,6 +17,7 @@ int advice_status_ahead_behind_warning = 1;
 int advice_commit_before_merge = 1;
 int advice_reset_quiet_warning = 1;
 int advice_resolve_conflict = 1;
+int advice_sequencer_in_use = 1;
 int advice_implicit_identity = 1;
 int advice_detached_head = 1;
 int advice_set_upstream_failure = 1;
@@ -75,6 +76,7 @@ static struct {
        { "commitBeforeMerge", &advice_commit_before_merge },
        { "resetQuiet", &advice_reset_quiet_warning },
        { "resolveConflict", &advice_resolve_conflict },
+       { "sequencerInUse", &advice_sequencer_in_use },
        { "implicitIdentity", &advice_implicit_identity },
        { "detachedHead", &advice_detached_head },
        { "setupStreamFailure", &advice_set_upstream_failure },
index 940c4c253e25644cd85362418ea965dd0bb51a3d..d0154048431c773b9bf0ec450478a93d5ede06c0 100644 (file)
--- a/advice.h
+++ b/advice.h
@@ -17,6 +17,7 @@ extern int advice_status_ahead_behind_warning;
 extern int advice_commit_before_merge;
 extern int advice_reset_quiet_warning;
 extern int advice_resolve_conflict;
+extern int advice_sequencer_in_use;
 extern int advice_implicit_identity;
 extern int advice_detached_head;
 extern int advice_set_upstream_failure;
index 53141c1f0ee12b4ce14efef1e4026d7ba7d2b665..a8da0fcc4f0cc47e585b3b378109fbd84496ed15 100644 (file)
--- a/archive.c
+++ b/archive.c
@@ -418,7 +418,9 @@ static void parse_treeish_arg(const char **argv,
                unsigned short mode;
                int err;
 
-               err = get_tree_entry(&tree->object.oid, prefix, &tree_oid,
+               err = get_tree_entry(ar_args->repo,
+                                    &tree->object.oid,
+                                    prefix, &tree_oid,
                                     &mode);
                if (err || !S_ISDIR(mode))
                        die(_("current working directory is untracked"));
diff --git a/blame.c b/blame.c
index 145eaf2faf9cf56977da61572c93783ea702b0f9..36a2e7ef119d7bea691babe15f861a0600028196 100644 (file)
--- a/blame.c
+++ b/blame.c
@@ -101,7 +101,7 @@ static void verify_working_tree_path(struct repository *r,
                struct object_id blob_oid;
                unsigned short mode;
 
-               if (!get_tree_entry(commit_oid, path, &blob_oid, &mode) &&
+               if (!get_tree_entry(r, commit_oid, path, &blob_oid, &mode) &&
                    oid_object_info(r, &blob_oid, NULL) == OBJ_BLOB)
                        return;
        }
@@ -311,12 +311,707 @@ static int diff_hunks(mmfile_t *file_a, mmfile_t *file_b,
        return xdi_diff(file_a, file_b, &xpp, &xecfg, &ecb);
 }
 
+static const char *get_next_line(const char *start, const char *end)
+{
+       const char *nl = memchr(start, '\n', end - start);
+
+       return nl ? nl + 1 : end;
+}
+
+static int find_line_starts(int **line_starts, const char *buf,
+                           unsigned long len)
+{
+       const char *end = buf + len;
+       const char *p;
+       int *lineno;
+       int num = 0;
+
+       for (p = buf; p < end; p = get_next_line(p, end))
+               num++;
+
+       ALLOC_ARRAY(*line_starts, num + 1);
+       lineno = *line_starts;
+
+       for (p = buf; p < end; p = get_next_line(p, end))
+               *lineno++ = p - buf;
+
+       *lineno = len;
+
+       return num;
+}
+
+struct fingerprint_entry;
+
+/* A fingerprint is intended to loosely represent a string, such that two
+ * fingerprints can be quickly compared to give an indication of the similarity
+ * of the strings that they represent.
+ *
+ * A fingerprint is represented as a multiset of the lower-cased byte pairs in
+ * the string that it represents. Whitespace is added at each end of the
+ * string. Whitespace pairs are ignored. Whitespace is converted to '\0'.
+ * For example, the string "Darth   Radar" will be converted to the following
+ * fingerprint:
+ * {"\0d", "da", "da", "ar", "ar", "rt", "th", "h\0", "\0r", "ra", "ad", "r\0"}
+ *
+ * The similarity between two fingerprints is the size of the intersection of
+ * their multisets, including repeated elements. See fingerprint_similarity for
+ * examples.
+ *
+ * For ease of implementation, the fingerprint is implemented as a map
+ * of byte pairs to the count of that byte pair in the string, instead of
+ * allowing repeated elements in a set.
+ */
+struct fingerprint {
+       struct hashmap map;
+       /* As we know the maximum number of entries in advance, it's
+        * convenient to store the entries in a single array instead of having
+        * the hashmap manage the memory.
+        */
+       struct fingerprint_entry *entries;
+};
+
+/* A byte pair in a fingerprint. Stores the number of times the byte pair
+ * occurs in the string that the fingerprint represents.
+ */
+struct fingerprint_entry {
+       /* The hashmap entry - the hash represents the byte pair in its
+        * entirety so we don't need to store the byte pair separately.
+        */
+       struct hashmap_entry entry;
+       /* The number of times the byte pair occurs in the string that the
+        * fingerprint represents.
+        */
+       int count;
+};
+
+/* See `struct fingerprint` for an explanation of what a fingerprint is.
+ * \param result the fingerprint of the string is stored here. This must be
+ *              freed later using free_fingerprint.
+ * \param line_begin the start of the string
+ * \param line_end the end of the string
+ */
+static void get_fingerprint(struct fingerprint *result,
+                           const char *line_begin,
+                           const char *line_end)
+{
+       unsigned int hash, c0 = 0, c1;
+       const char *p;
+       int max_map_entry_count = 1 + line_end - line_begin;
+       struct fingerprint_entry *entry = xcalloc(max_map_entry_count,
+               sizeof(struct fingerprint_entry));
+       struct fingerprint_entry *found_entry;
+
+       hashmap_init(&result->map, NULL, NULL, max_map_entry_count);
+       result->entries = entry;
+       for (p = line_begin; p <= line_end; ++p, c0 = c1) {
+               /* Always terminate the string with whitespace.
+                * Normalise whitespace to 0, and normalise letters to
+                * lower case. This won't work for multibyte characters but at
+                * worst will match some unrelated characters.
+                */
+               if ((p == line_end) || isspace(*p))
+                       c1 = 0;
+               else
+                       c1 = tolower(*p);
+               hash = c0 | (c1 << 8);
+               /* Ignore whitespace pairs */
+               if (hash == 0)
+                       continue;
+               hashmap_entry_init(entry, hash);
+
+               found_entry = hashmap_get(&result->map, entry, NULL);
+               if (found_entry) {
+                       found_entry->count += 1;
+               } else {
+                       entry->count = 1;
+                       hashmap_add(&result->map, entry);
+                       ++entry;
+               }
+       }
+}
+
+static void free_fingerprint(struct fingerprint *f)
+{
+       hashmap_free(&f->map, 0);
+       free(f->entries);
+}
+
+/* Calculates the similarity between two fingerprints as the size of the
+ * intersection of their multisets, including repeated elements. See
+ * `struct fingerprint` for an explanation of the fingerprint representation.
+ * The similarity between "cat mat" and "father rather" is 2 because "at" is
+ * present twice in both strings while the similarity between "tim" and "mit"
+ * is 0.
+ */
+static int fingerprint_similarity(struct fingerprint *a, struct fingerprint *b)
+{
+       int intersection = 0;
+       struct hashmap_iter iter;
+       const struct fingerprint_entry *entry_a, *entry_b;
+
+       hashmap_iter_init(&b->map, &iter);
+
+       while ((entry_b = hashmap_iter_next(&iter))) {
+               if ((entry_a = hashmap_get(&a->map, entry_b, NULL))) {
+                       intersection += entry_a->count < entry_b->count ?
+                                       entry_a->count : entry_b->count;
+               }
+       }
+       return intersection;
+}
+
+/* Subtracts byte-pair elements in B from A, modifying A in place.
+ */
+static void fingerprint_subtract(struct fingerprint *a, struct fingerprint *b)
+{
+       struct hashmap_iter iter;
+       struct fingerprint_entry *entry_a;
+       const struct fingerprint_entry *entry_b;
+
+       hashmap_iter_init(&b->map, &iter);
+
+       while ((entry_b = hashmap_iter_next(&iter))) {
+               if ((entry_a = hashmap_get(&a->map, entry_b, NULL))) {
+                       if (entry_a->count <= entry_b->count)
+                               hashmap_remove(&a->map, entry_b, NULL);
+                       else
+                               entry_a->count -= entry_b->count;
+               }
+       }
+}
+
+/* Calculate fingerprints for a series of lines.
+ * Puts the fingerprints in the fingerprints array, which must have been
+ * preallocated to allow storing line_count elements.
+ */
+static void get_line_fingerprints(struct fingerprint *fingerprints,
+                                 const char *content, const int *line_starts,
+                                 long first_line, long line_count)
+{
+       int i;
+       const char *linestart, *lineend;
+
+       line_starts += first_line;
+       for (i = 0; i < line_count; ++i) {
+               linestart = content + line_starts[i];
+               lineend = content + line_starts[i + 1];
+               get_fingerprint(fingerprints + i, linestart, lineend);
+       }
+}
+
+static void free_line_fingerprints(struct fingerprint *fingerprints,
+                                  int nr_fingerprints)
+{
+       int i;
+
+       for (i = 0; i < nr_fingerprints; i++)
+               free_fingerprint(&fingerprints[i]);
+}
+
+/* This contains the data necessary to linearly map a line number in one half
+ * of a diff chunk to the line in the other half of the diff chunk that is
+ * closest in terms of its position as a fraction of the length of the chunk.
+ */
+struct line_number_mapping {
+       int destination_start, destination_length,
+               source_start, source_length;
+};
+
+/* Given a line number in one range, offset and scale it to map it onto the
+ * other range.
+ * Essentially this mapping is a simple linear equation but the calculation is
+ * more complicated to allow performing it with integer operations.
+ * Another complication is that if a line could map onto many lines in the
+ * destination range then we want to choose the line at the center of those
+ * possibilities.
+ * Example: if the chunk is 2 lines long in A and 10 lines long in B then the
+ * first 5 lines in B will map onto the first line in the A chunk, while the
+ * last 5 lines will all map onto the second line in the A chunk.
+ * Example: if the chunk is 10 lines long in A and 2 lines long in B then line
+ * 0 in B will map onto line 2 in A, and line 1 in B will map onto line 7 in A.
+ */
+static int map_line_number(int line_number,
+       const struct line_number_mapping *mapping)
+{
+       return ((line_number - mapping->source_start) * 2 + 1) *
+              mapping->destination_length /
+              (mapping->source_length * 2) +
+              mapping->destination_start;
+}
+
+/* Get a pointer to the element storing the similarity between a line in A
+ * and a line in B.
+ *
+ * The similarities are stored in a 2-dimensional array. Each "row" in the
+ * array contains the similarities for a line in B. The similarities stored in
+ * a row are the similarities between the line in B and the nearby lines in A.
+ * To keep the length of each row the same, it is padded out with values of -1
+ * where the search range extends beyond the lines in A.
+ * For example, if max_search_distance_a is 2 and the two sides of a diff chunk
+ * look like this:
+ * a | m
+ * b | n
+ * c | o
+ * d | p
+ * e | q
+ * Then the similarity array will contain:
+ * [-1, -1, am, bm, cm,
+ *  -1, an, bn, cn, dn,
+ *  ao, bo, co, do, eo,
+ *  bp, cp, dp, ep, -1,
+ *  cq, dq, eq, -1, -1]
+ * Where similarities are denoted either by -1 for invalid, or the
+ * concatenation of the two lines in the diff being compared.
+ *
+ * \param similarities array of similarities between lines in A and B
+ * \param line_a the index of the line in A, in the same frame of reference as
+ *     closest_line_a.
+ * \param local_line_b the index of the line in B, relative to the first line
+ *                    in B that similarities represents.
+ * \param closest_line_a the index of the line in A that is deemed to be
+ *                      closest to local_line_b. This must be in the same
+ *                      frame of reference as line_a. This value defines
+ *                      where similarities is centered for the line in B.
+ * \param max_search_distance_a maximum distance in lines from the closest line
+ *                             in A for other lines in A for which
+ *                             similarities may be calculated.
+ */
+static int *get_similarity(int *similarities,
+                          int line_a, int local_line_b,
+                          int closest_line_a, int max_search_distance_a)
+{
+       assert(abs(line_a - closest_line_a) <=
+              max_search_distance_a);
+       return similarities + line_a - closest_line_a +
+              max_search_distance_a +
+              local_line_b * (max_search_distance_a * 2 + 1);
+}
+
+#define CERTAIN_NOTHING_MATCHES -2
+#define CERTAINTY_NOT_CALCULATED -1
+
+/* Given a line in B, first calculate its similarities with nearby lines in A
+ * if not already calculated, then identify the most similar and second most
+ * similar lines. The "certainty" is calculated based on those two
+ * similarities.
+ *
+ * \param start_a the index of the first line of the chunk in A
+ * \param length_a the length in lines of the chunk in A
+ * \param local_line_b the index of the line in B, relative to the first line
+ *                    in the chunk.
+ * \param fingerprints_a array of fingerprints for the chunk in A
+ * \param fingerprints_b array of fingerprints for the chunk in B
+ * \param similarities 2-dimensional array of similarities between lines in A
+ *                    and B. See get_similarity() for more details.
+ * \param certainties array of values indicating how strongly a line in B is
+ *                   matched with some line in A.
+ * \param second_best_result array of absolute indices in A for the second
+ *                          closest match of a line in B.
+ * \param result array of absolute indices in A for the closest match of a line
+ *              in B.
+ * \param max_search_distance_a maximum distance in lines from the closest line
+ *                             in A for other lines in A for which
+ *                             similarities may be calculated.
+ * \param map_line_number_in_b_to_a parameter to map_line_number().
+ */
+static void find_best_line_matches(
+       int start_a,
+       int length_a,
+       int start_b,
+       int local_line_b,
+       struct fingerprint *fingerprints_a,
+       struct fingerprint *fingerprints_b,
+       int *similarities,
+       int *certainties,
+       int *second_best_result,
+       int *result,
+       const int max_search_distance_a,
+       const struct line_number_mapping *map_line_number_in_b_to_a)
+{
+
+       int i, search_start, search_end, closest_local_line_a, *similarity,
+               best_similarity = 0, second_best_similarity = 0,
+               best_similarity_index = 0, second_best_similarity_index = 0;
+
+       /* certainty has already been calculated so no need to redo the work */
+       if (certainties[local_line_b] != CERTAINTY_NOT_CALCULATED)
+               return;
+
+       closest_local_line_a = map_line_number(
+               local_line_b + start_b, map_line_number_in_b_to_a) - start_a;
+
+       search_start = closest_local_line_a - max_search_distance_a;
+       if (search_start < 0)
+               search_start = 0;
+
+       search_end = closest_local_line_a + max_search_distance_a + 1;
+       if (search_end > length_a)
+               search_end = length_a;
+
+       for (i = search_start; i < search_end; ++i) {
+               similarity = get_similarity(similarities,
+                                           i, local_line_b,
+                                           closest_local_line_a,
+                                           max_search_distance_a);
+               if (*similarity == -1) {
+                       /* This value will never exceed 10 but assert just in
+                        * case
+                        */
+                       assert(abs(i - closest_local_line_a) < 1000);
+                       /* scale the similarity by (1000 - distance from
+                        * closest line) to act as a tie break between lines
+                        * that otherwise are equally similar.
+                        */
+                       *similarity = fingerprint_similarity(
+                               fingerprints_b + local_line_b,
+                               fingerprints_a + i) *
+                               (1000 - abs(i - closest_local_line_a));
+               }
+               if (*similarity > best_similarity) {
+                       second_best_similarity = best_similarity;
+                       second_best_similarity_index = best_similarity_index;
+                       best_similarity = *similarity;
+                       best_similarity_index = i;
+               } else if (*similarity > second_best_similarity) {
+                       second_best_similarity = *similarity;
+                       second_best_similarity_index = i;
+               }
+       }
+
+       if (best_similarity == 0) {
+               /* this line definitely doesn't match with anything. Mark it
+                * with this special value so it doesn't get invalidated and
+                * won't be recalculated.
+                */
+               certainties[local_line_b] = CERTAIN_NOTHING_MATCHES;
+               result[local_line_b] = -1;
+       } else {
+               /* Calculate the certainty with which this line matches.
+                * If the line matches well with two lines then that reduces
+                * the certainty. However we still want to prioritise matching
+                * a line that matches very well with two lines over matching a
+                * line that matches poorly with one line, hence doubling
+                * best_similarity.
+                * This means that if we have
+                * line X that matches only one line with a score of 3,
+                * line Y that matches two lines equally with a score of 5,
+                * and line Z that matches only one line with a score or 2,
+                * then the lines in order of certainty are X, Y, Z.
+                */
+               certainties[local_line_b] = best_similarity * 2 -
+                       second_best_similarity;
+
+               /* We keep both the best and second best results to allow us to
+                * check at a later stage of the matching process whether the
+                * result needs to be invalidated.
+                */
+               result[local_line_b] = start_a + best_similarity_index;
+               second_best_result[local_line_b] =
+                       start_a + second_best_similarity_index;
+       }
+}
+
+/*
+ * This finds the line that we can match with the most confidence, and
+ * uses it as a partition. It then calls itself on the lines on either side of
+ * that partition. In this way we avoid lines appearing out of order, and
+ * retain a sensible line ordering.
+ * \param start_a index of the first line in A with which lines in B may be
+ *               compared.
+ * \param start_b index of the first line in B for which matching should be
+ *               done.
+ * \param length_a number of lines in A with which lines in B may be compared.
+ * \param length_b number of lines in B for which matching should be done.
+ * \param fingerprints_a mutable array of fingerprints in A. The first element
+ *                      corresponds to the line at start_a.
+ * \param fingerprints_b array of fingerprints in B. The first element
+ *                      corresponds to the line at start_b.
+ * \param similarities 2-dimensional array of similarities between lines in A
+ *                    and B. See get_similarity() for more details.
+ * \param certainties array of values indicating how strongly a line in B is
+ *                   matched with some line in A.
+ * \param second_best_result array of absolute indices in A for the second
+ *                          closest match of a line in B.
+ * \param result array of absolute indices in A for the closest match of a line
+ *              in B.
+ * \param max_search_distance_a maximum distance in lines from the closest line
+ *                           in A for other lines in A for which
+ *                           similarities may be calculated.
+ * \param max_search_distance_b an upper bound on the greatest possible
+ *                           distance between lines in B such that they will
+ *                              both be compared with the same line in A
+ *                           according to max_search_distance_a.
+ * \param map_line_number_in_b_to_a parameter to map_line_number().
+ */
+static void fuzzy_find_matching_lines_recurse(
+       int start_a, int start_b,
+       int length_a, int length_b,
+       struct fingerprint *fingerprints_a,
+       struct fingerprint *fingerprints_b,
+       int *similarities,
+       int *certainties,
+       int *second_best_result,
+       int *result,
+       int max_search_distance_a,
+       int max_search_distance_b,
+       const struct line_number_mapping *map_line_number_in_b_to_a)
+{
+       int i, invalidate_min, invalidate_max, offset_b,
+               second_half_start_a, second_half_start_b,
+               second_half_length_a, second_half_length_b,
+               most_certain_line_a, most_certain_local_line_b = -1,
+               most_certain_line_certainty = -1,
+               closest_local_line_a;
+
+       for (i = 0; i < length_b; ++i) {
+               find_best_line_matches(start_a,
+                                      length_a,
+                                      start_b,
+                                      i,
+                                      fingerprints_a,
+                                      fingerprints_b,
+                                      similarities,
+                                      certainties,
+                                      second_best_result,
+                                      result,
+                                      max_search_distance_a,
+                                      map_line_number_in_b_to_a);
+
+               if (certainties[i] > most_certain_line_certainty) {
+                       most_certain_line_certainty = certainties[i];
+                       most_certain_local_line_b = i;
+               }
+       }
+
+       /* No matches. */
+       if (most_certain_local_line_b == -1)
+               return;
+
+       most_certain_line_a = result[most_certain_local_line_b];
+
+       /*
+        * Subtract the most certain line's fingerprint in B from the matched
+        * fingerprint in A. This means that other lines in B can't also match
+        * the same parts of the line in A.
+        */
+       fingerprint_subtract(fingerprints_a + most_certain_line_a - start_a,
+                            fingerprints_b + most_certain_local_line_b);
+
+       /* Invalidate results that may be affected by the choice of most
+        * certain line.
+        */
+       invalidate_min = most_certain_local_line_b - max_search_distance_b;
+       invalidate_max = most_certain_local_line_b + max_search_distance_b + 1;
+       if (invalidate_min < 0)
+               invalidate_min = 0;
+       if (invalidate_max > length_b)
+               invalidate_max = length_b;
+
+       /* As the fingerprint in A has changed, discard previously calculated
+        * similarity values with that fingerprint.
+        */
+       for (i = invalidate_min; i < invalidate_max; ++i) {
+               closest_local_line_a = map_line_number(
+                       i + start_b, map_line_number_in_b_to_a) - start_a;
+
+               /* Check that the lines in A and B are close enough that there
+                * is a similarity value for them.
+                */
+               if (abs(most_certain_line_a - start_a - closest_local_line_a) >
+                       max_search_distance_a) {
+                       continue;
+               }
+
+               *get_similarity(similarities, most_certain_line_a - start_a,
+                               i, closest_local_line_a,
+                               max_search_distance_a) = -1;
+       }
+
+       /* More invalidating of results that may be affected by the choice of
+        * most certain line.
+        * Discard the matches for lines in B that are currently matched with a
+        * line in A such that their ordering contradicts the ordering imposed
+        * by the choice of most certain line.
+        */
+       for (i = most_certain_local_line_b - 1; i >= invalidate_min; --i) {
+               /* In this loop we discard results for lines in B that are
+                * before most-certain-line-B but are matched with a line in A
+                * that is after most-certain-line-A.
+                */
+               if (certainties[i] >= 0 &&
+                   (result[i] >= most_certain_line_a ||
+                    second_best_result[i] >= most_certain_line_a)) {
+                       certainties[i] = CERTAINTY_NOT_CALCULATED;
+               }
+       }
+       for (i = most_certain_local_line_b + 1; i < invalidate_max; ++i) {
+               /* In this loop we discard results for lines in B that are
+                * after most-certain-line-B but are matched with a line in A
+                * that is before most-certain-line-A.
+                */
+               if (certainties[i] >= 0 &&
+                   (result[i] <= most_certain_line_a ||
+                    second_best_result[i] <= most_certain_line_a)) {
+                       certainties[i] = CERTAINTY_NOT_CALCULATED;
+               }
+       }
+
+       /* Repeat the matching process for lines before the most certain line.
+        */
+       if (most_certain_local_line_b > 0) {
+               fuzzy_find_matching_lines_recurse(
+                       start_a, start_b,
+                       most_certain_line_a + 1 - start_a,
+                       most_certain_local_line_b,
+                       fingerprints_a, fingerprints_b, similarities,
+                       certainties, second_best_result, result,
+                       max_search_distance_a,
+                       max_search_distance_b,
+                       map_line_number_in_b_to_a);
+       }
+       /* Repeat the matching process for lines after the most certain line.
+        */
+       if (most_certain_local_line_b + 1 < length_b) {
+               second_half_start_a = most_certain_line_a;
+               offset_b = most_certain_local_line_b + 1;
+               second_half_start_b = start_b + offset_b;
+               second_half_length_a =
+                       length_a + start_a - second_half_start_a;
+               second_half_length_b =
+                       length_b + start_b - second_half_start_b;
+               fuzzy_find_matching_lines_recurse(
+                       second_half_start_a, second_half_start_b,
+                       second_half_length_a, second_half_length_b,
+                       fingerprints_a + second_half_start_a - start_a,
+                       fingerprints_b + offset_b,
+                       similarities +
+                               offset_b * (max_search_distance_a * 2 + 1),
+                       certainties + offset_b,
+                       second_best_result + offset_b, result + offset_b,
+                       max_search_distance_a,
+                       max_search_distance_b,
+                       map_line_number_in_b_to_a);
+       }
+}
+
+/* Find the lines in the parent line range that most closely match the lines in
+ * the target line range. This is accomplished by matching fingerprints in each
+ * blame_origin, and choosing the best matches that preserve the line ordering.
+ * See struct fingerprint for details of fingerprint matching, and
+ * fuzzy_find_matching_lines_recurse for details of preserving line ordering.
+ *
+ * The performance is believed to be O(n log n) in the typical case and O(n^2)
+ * in a pathological case, where n is the number of lines in the target range.
+ */
+static int *fuzzy_find_matching_lines(struct blame_origin *parent,
+                                     struct blame_origin *target,
+                                     int tlno, int parent_slno, int same,
+                                     int parent_len)
+{
+       /* We use the terminology "A" for the left hand side of the diff AKA
+        * parent, and "B" for the right hand side of the diff AKA target. */
+       int start_a = parent_slno;
+       int length_a = parent_len;
+       int start_b = tlno;
+       int length_b = same - tlno;
+
+       struct line_number_mapping map_line_number_in_b_to_a = {
+               start_a, length_a, start_b, length_b
+       };
+
+       struct fingerprint *fingerprints_a = parent->fingerprints;
+       struct fingerprint *fingerprints_b = target->fingerprints;
+
+       int i, *result, *second_best_result,
+               *certainties, *similarities, similarity_count;
+
+       /*
+        * max_search_distance_a means that given a line in B, compare it to
+        * the line in A that is closest to its position, and the lines in A
+        * that are no greater than max_search_distance_a lines away from the
+        * closest line in A.
+        *
+        * max_search_distance_b is an upper bound on the greatest possible
+        * distance between lines in B such that they will both be compared
+        * with the same line in A according to max_search_distance_a.
+        */
+       int max_search_distance_a = 10, max_search_distance_b;
+
+       if (length_a <= 0)
+               return NULL;
+
+       if (max_search_distance_a >= length_a)
+               max_search_distance_a = length_a ? length_a - 1 : 0;
+
+       max_search_distance_b = ((2 * max_search_distance_a + 1) * length_b
+                                - 1) / length_a;
+
+       result = xcalloc(sizeof(int), length_b);
+       second_best_result = xcalloc(sizeof(int), length_b);
+       certainties = xcalloc(sizeof(int), length_b);
+
+       /* See get_similarity() for details of similarities. */
+       similarity_count = length_b * (max_search_distance_a * 2 + 1);
+       similarities = xcalloc(sizeof(int), similarity_count);
+
+       for (i = 0; i < length_b; ++i) {
+               result[i] = -1;
+               second_best_result[i] = -1;
+               certainties[i] = CERTAINTY_NOT_CALCULATED;
+       }
+
+       for (i = 0; i < similarity_count; ++i)
+               similarities[i] = -1;
+
+       fuzzy_find_matching_lines_recurse(start_a, start_b,
+                                         length_a, length_b,
+                                         fingerprints_a + start_a,
+                                         fingerprints_b + start_b,
+                                         similarities,
+                                         certainties,
+                                         second_best_result,
+                                         result,
+                                         max_search_distance_a,
+                                         max_search_distance_b,
+                                         &map_line_number_in_b_to_a);
+
+       free(similarities);
+       free(certainties);
+       free(second_best_result);
+
+       return result;
+}
+
+static void fill_origin_fingerprints(struct blame_origin *o)
+{
+       int *line_starts;
+
+       if (o->fingerprints)
+               return;
+       o->num_lines = find_line_starts(&line_starts, o->file.ptr,
+                                       o->file.size);
+       o->fingerprints = xcalloc(sizeof(struct fingerprint), o->num_lines);
+       get_line_fingerprints(o->fingerprints, o->file.ptr, line_starts,
+                             0, o->num_lines);
+       free(line_starts);
+}
+
+static void drop_origin_fingerprints(struct blame_origin *o)
+{
+       if (o->fingerprints) {
+               free_line_fingerprints(o->fingerprints, o->num_lines);
+               o->num_lines = 0;
+               FREE_AND_NULL(o->fingerprints);
+       }
+}
+
 /*
  * Given an origin, prepare mmfile_t structure to be used by the
  * diff machinery
  */
 static void fill_origin_blob(struct diff_options *opt,
-                            struct blame_origin *o, mmfile_t *file, int *num_read_blob)
+                            struct blame_origin *o, mmfile_t *file,
+                            int *num_read_blob, int fill_fingerprints)
 {
        if (!o->file.ptr) {
                enum object_type type;
@@ -340,11 +1035,14 @@ static void fill_origin_blob(struct diff_options *opt,
        }
        else
                *file = o->file;
+       if (fill_fingerprints)
+               fill_origin_fingerprints(o);
 }
 
 static void drop_origin_blob(struct blame_origin *o)
 {
        FREE_AND_NULL(o->file.ptr);
+       drop_origin_fingerprints(o);
 }
 
 /*
@@ -480,7 +1178,9 @@ void blame_coalesce(struct blame_scoreboard *sb)
 
        for (ent = sb->ent; ent && (next = ent->next); ent = next) {
                if (ent->suspect == next->suspect &&
-                   ent->s_lno + ent->num_lines == next->s_lno) {
+                   ent->s_lno + ent->num_lines == next->s_lno &&
+                   ent->ignored == next->ignored &&
+                   ent->unblamable == next->unblamable) {
                        ent->num_lines += next->num_lines;
                        ent->next = next->next;
                        blame_origin_decref(next->suspect);
@@ -532,7 +1232,7 @@ static int fill_blob_sha1_and_mode(struct repository *r,
 {
        if (!is_null_oid(&origin->blob_oid))
                return 0;
-       if (get_tree_entry(&origin->commit->object.oid, origin->path, &origin->blob_oid, &origin->mode))
+       if (get_tree_entry(r, &origin->commit->object.oid, origin->path, &origin->blob_oid, &origin->mode))
                goto error_out;
        if (oid_object_info(r, &origin->blob_oid, NULL) != OBJ_BLOB)
                goto error_out;
@@ -730,8 +1430,14 @@ static void split_overlap(struct blame_entry *split,
                          struct blame_origin *parent)
 {
        int chunk_end_lno;
+       int i;
        memset(split, 0, sizeof(struct blame_entry [3]));
 
+       for (i = 0; i < 3; i++) {
+               split[i].ignored = e->ignored;
+               split[i].unblamable = e->unblamable;
+       }
+
        if (e->s_lno < tlno) {
                /* there is a pre-chunk part not blamed on parent */
                split[0].suspect = blame_origin_incref(e->suspect);
@@ -839,6 +1545,164 @@ static struct blame_entry *reverse_blame(struct blame_entry *head,
        return tail;
 }
 
+/*
+ * Splits a blame entry into two entries at 'len' lines.  The original 'e'
+ * consists of len lines, i.e. [e->lno, e->lno + len), and the second part,
+ * which is returned, consists of the remainder: [e->lno + len, e->lno +
+ * e->num_lines).  The caller needs to sort out the reference counting for the
+ * new entry's suspect.
+ */
+static struct blame_entry *split_blame_at(struct blame_entry *e, int len,
+                                         struct blame_origin *new_suspect)
+{
+       struct blame_entry *n = xcalloc(1, sizeof(struct blame_entry));
+
+       n->suspect = new_suspect;
+       n->ignored = e->ignored;
+       n->unblamable = e->unblamable;
+       n->lno = e->lno + len;
+       n->s_lno = e->s_lno + len;
+       n->num_lines = e->num_lines - len;
+       e->num_lines = len;
+       e->score = 0;
+       return n;
+}
+
+struct blame_line_tracker {
+       int is_parent;
+       int s_lno;
+};
+
+static int are_lines_adjacent(struct blame_line_tracker *first,
+                             struct blame_line_tracker *second)
+{
+       return first->is_parent == second->is_parent &&
+              first->s_lno + 1 == second->s_lno;
+}
+
+static int scan_parent_range(struct fingerprint *p_fps,
+                            struct fingerprint *t_fps, int t_idx,
+                            int from, int nr_lines)
+{
+       int sim, p_idx;
+       #define FINGERPRINT_FILE_THRESHOLD      10
+       int best_sim_val = FINGERPRINT_FILE_THRESHOLD;
+       int best_sim_idx = -1;
+
+       for (p_idx = from; p_idx < from + nr_lines; p_idx++) {
+               sim = fingerprint_similarity(&t_fps[t_idx], &p_fps[p_idx]);
+               if (sim < best_sim_val)
+                       continue;
+               /* Break ties with the closest-to-target line number */
+               if (sim == best_sim_val && best_sim_idx != -1 &&
+                   abs(best_sim_idx - t_idx) < abs(p_idx - t_idx))
+                       continue;
+               best_sim_val = sim;
+               best_sim_idx = p_idx;
+       }
+       return best_sim_idx;
+}
+
+/*
+ * The first pass checks the blame entry (from the target) against the parent's
+ * diff chunk.  If that fails for a line, the second pass tries to match that
+ * line to any part of parent file.  That catches cases where a change was
+ * broken into two chunks by 'context.'
+ */
+static void guess_line_blames(struct blame_origin *parent,
+                             struct blame_origin *target,
+                             int tlno, int offset, int same, int parent_len,
+                             struct blame_line_tracker *line_blames)
+{
+       int i, best_idx, target_idx;
+       int parent_slno = tlno + offset;
+       int *fuzzy_matches;
+
+       fuzzy_matches = fuzzy_find_matching_lines(parent, target,
+                                                 tlno, parent_slno, same,
+                                                 parent_len);
+       for (i = 0; i < same - tlno; i++) {
+               target_idx = tlno + i;
+               if (fuzzy_matches && fuzzy_matches[i] >= 0) {
+                       best_idx = fuzzy_matches[i];
+               } else {
+                       best_idx = scan_parent_range(parent->fingerprints,
+                                                    target->fingerprints,
+                                                    target_idx, 0,
+                                                    parent->num_lines);
+               }
+               if (best_idx >= 0) {
+                       line_blames[i].is_parent = 1;
+                       line_blames[i].s_lno = best_idx;
+               } else {
+                       line_blames[i].is_parent = 0;
+                       line_blames[i].s_lno = target_idx;
+               }
+       }
+       free(fuzzy_matches);
+}
+
+/*
+ * This decides which parts of a blame entry go to the parent (added to the
+ * ignoredp list) and which stay with the target (added to the diffp list).  The
+ * actual decision was made in a separate heuristic function, and those answers
+ * for the lines in 'e' are in line_blames.  This consumes e, essentially
+ * putting it on a list.
+ *
+ * Note that the blame entries on the ignoredp list are not necessarily sorted
+ * with respect to the parent's line numbers yet.
+ */
+static void ignore_blame_entry(struct blame_entry *e,
+                              struct blame_origin *parent,
+                              struct blame_entry **diffp,
+                              struct blame_entry **ignoredp,
+                              struct blame_line_tracker *line_blames)
+{
+       int entry_len, nr_lines, i;
+
+       /*
+        * We carve new entries off the front of e.  Each entry comes from a
+        * contiguous chunk of lines: adjacent lines from the same origin
+        * (either the parent or the target).
+        */
+       entry_len = 1;
+       nr_lines = e->num_lines;        /* e changes in the loop */
+       for (i = 0; i < nr_lines; i++) {
+               struct blame_entry *next = NULL;
+
+               /*
+                * We are often adjacent to the next line - only split the blame
+                * entry when we have to.
+                */
+               if (i + 1 < nr_lines) {
+                       if (are_lines_adjacent(&line_blames[i],
+                                              &line_blames[i + 1])) {
+                               entry_len++;
+                               continue;
+                       }
+                       next = split_blame_at(e, entry_len,
+                                             blame_origin_incref(e->suspect));
+               }
+               if (line_blames[i].is_parent) {
+                       e->ignored = 1;
+                       blame_origin_decref(e->suspect);
+                       e->suspect = blame_origin_incref(parent);
+                       e->s_lno = line_blames[i - entry_len + 1].s_lno;
+                       e->next = *ignoredp;
+                       *ignoredp = e;
+               } else {
+                       e->unblamable = 1;
+                       /* e->s_lno is already in the target's address space. */
+                       e->next = *diffp;
+                       *diffp = e;
+               }
+               assert(e->num_lines == entry_len);
+               e = next;
+               entry_len = 1;
+       }
+       assert(!e);
+}
+
 /*
  * Process one hunk from the patch between the current suspect for
  * blame_entry e and its parent.  This first blames any unfinished
@@ -848,13 +1712,20 @@ static struct blame_entry *reverse_blame(struct blame_entry *head,
  * -C options may lead to overlapping/duplicate source line number
  * ranges, all we can rely on from sorting/merging is the order of the
  * first suspect line number.
+ *
+ * tlno: line number in the target where this chunk begins
+ * same: line number in the target where this chunk ends
+ * offset: add to tlno to get the chunk starting point in the parent
+ * parent_len: number of lines in the parent chunk
  */
 static void blame_chunk(struct blame_entry ***dstq, struct blame_entry ***srcq,
-                       int tlno, int offset, int same,
-                       struct blame_origin *parent)
+                       int tlno, int offset, int same, int parent_len,
+                       struct blame_origin *parent,
+                       struct blame_origin *target, int ignore_diffs)
 {
        struct blame_entry *e = **srcq;
-       struct blame_entry *samep = NULL, *diffp = NULL;
+       struct blame_entry *samep = NULL, *diffp = NULL, *ignoredp = NULL;
+       struct blame_line_tracker *line_blames = NULL;
 
        while (e && e->s_lno < tlno) {
                struct blame_entry *next = e->next;
@@ -865,14 +1736,9 @@ static void blame_chunk(struct blame_entry ***dstq, struct blame_entry ***srcq,
                 */
                if (e->s_lno + e->num_lines > tlno) {
                        /* Move second half to a new record */
-                       int len = tlno - e->s_lno;
-                       struct blame_entry *n = xcalloc(1, sizeof (struct blame_entry));
-                       n->suspect = e->suspect;
-                       n->lno = e->lno + len;
-                       n->s_lno = e->s_lno + len;
-                       n->num_lines = e->num_lines - len;
-                       e->num_lines = len;
-                       e->score = 0;
+                       struct blame_entry *n;
+
+                       n = split_blame_at(e, tlno - e->s_lno, e->suspect);
                        /* Push new record to diffp */
                        n->next = diffp;
                        diffp = n;
@@ -908,6 +1774,14 @@ static void blame_chunk(struct blame_entry ***dstq, struct blame_entry ***srcq,
         */
        samep = NULL;
        diffp = NULL;
+
+       if (ignore_diffs && same - tlno > 0) {
+               line_blames = xcalloc(sizeof(struct blame_line_tracker),
+                                     same - tlno);
+               guess_line_blames(parent, target, tlno, offset, same,
+                                 parent_len, line_blames);
+       }
+
        while (e && e->s_lno < same) {
                struct blame_entry *next = e->next;
 
@@ -919,22 +1793,37 @@ static void blame_chunk(struct blame_entry ***dstq, struct blame_entry ***srcq,
                         * Move second half to a new record to be
                         * processed by later chunks
                         */
-                       int len = same - e->s_lno;
-                       struct blame_entry *n = xcalloc(1, sizeof (struct blame_entry));
-                       n->suspect = blame_origin_incref(e->suspect);
-                       n->lno = e->lno + len;
-                       n->s_lno = e->s_lno + len;
-                       n->num_lines = e->num_lines - len;
-                       e->num_lines = len;
-                       e->score = 0;
+                       struct blame_entry *n;
+
+                       n = split_blame_at(e, same - e->s_lno,
+                                          blame_origin_incref(e->suspect));
                        /* Push new record to samep */
                        n->next = samep;
                        samep = n;
                }
-               e->next = diffp;
-               diffp = e;
+               if (ignore_diffs) {
+                       ignore_blame_entry(e, parent, &diffp, &ignoredp,
+                                          line_blames + e->s_lno - tlno);
+               } else {
+                       e->next = diffp;
+                       diffp = e;
+               }
                e = next;
        }
+       free(line_blames);
+       if (ignoredp) {
+               /*
+                * Note ignoredp is not sorted yet, and thus neither is dstq.
+                * That list must be sorted before we queue_blames().  We defer
+                * sorting until after all diff hunks are processed, so that
+                * guess_line_blames() can pick *any* line in the parent.  The
+                * slight drawback is that we end up sorting all blame entries
+                * passed to the parent, including those that are unrelated to
+                * changes made by the ignored commit.
+                */
+               **dstq = reverse_blame(ignoredp, **dstq);
+               *dstq = &ignoredp->next;
+       }
        **srcq = reverse_blame(diffp, reverse_blame(samep, e));
        /* Move across elements that are in the unblamable portion */
        if (diffp)
@@ -943,7 +1832,9 @@ static void blame_chunk(struct blame_entry ***dstq, struct blame_entry ***srcq,
 
 struct blame_chunk_cb_data {
        struct blame_origin *parent;
+       struct blame_origin *target;
        long offset;
+       int ignore_diffs;
        struct blame_entry **dstq;
        struct blame_entry **srcq;
 };
@@ -956,7 +1847,8 @@ static int blame_chunk_cb(long start_a, long count_a,
        if (start_a - start_b != d->offset)
                die("internal error in blame::blame_chunk_cb");
        blame_chunk(&d->dstq, &d->srcq, start_b, start_a - start_b,
-                   start_b + count_b, d->parent);
+                   start_b + count_b, count_a, d->parent, d->target,
+                   d->ignore_diffs);
        d->offset = start_a + count_a - (start_b + count_b);
        return 0;
 }
@@ -968,7 +1860,7 @@ static int blame_chunk_cb(long start_a, long count_a,
  */
 static void pass_blame_to_parent(struct blame_scoreboard *sb,
                                 struct blame_origin *target,
-                                struct blame_origin *parent)
+                                struct blame_origin *parent, int ignore_diffs)
 {
        mmfile_t file_p, file_o;
        struct blame_chunk_cb_data d;
@@ -978,11 +1870,15 @@ static void pass_blame_to_parent(struct blame_scoreboard *sb,
                return; /* nothing remains for this target */
 
        d.parent = parent;
+       d.target = target;
        d.offset = 0;
+       d.ignore_diffs = ignore_diffs;
        d.dstq = &newdest; d.srcq = &target->suspects;
 
-       fill_origin_blob(&sb->revs->diffopt, parent, &file_p, &sb->num_read_blob);
-       fill_origin_blob(&sb->revs->diffopt, target, &file_o, &sb->num_read_blob);
+       fill_origin_blob(&sb->revs->diffopt, parent, &file_p,
+                        &sb->num_read_blob, ignore_diffs);
+       fill_origin_blob(&sb->revs->diffopt, target, &file_o,
+                        &sb->num_read_blob, ignore_diffs);
        sb->num_get_patch++;
 
        if (diff_hunks(&file_p, &file_o, blame_chunk_cb, &d, sb->xdl_opts))
@@ -990,8 +1886,13 @@ static void pass_blame_to_parent(struct blame_scoreboard *sb,
                    oid_to_hex(&parent->commit->object.oid),
                    oid_to_hex(&target->commit->object.oid));
        /* The rest are the same as the parent */
-       blame_chunk(&d.dstq, &d.srcq, INT_MAX, d.offset, INT_MAX, parent);
+       blame_chunk(&d.dstq, &d.srcq, INT_MAX, d.offset, INT_MAX, 0,
+                   parent, target, 0);
        *d.dstq = NULL;
+       if (ignore_diffs)
+               newdest = llist_mergesort(newdest, get_next_blame,
+                                         set_next_blame,
+                                         compare_blame_suspect);
        queue_blames(sb, parent, newdest);
 
        return;
@@ -1188,7 +2089,8 @@ static void find_move_in_parent(struct blame_scoreboard *sb,
        if (!unblamed)
                return; /* nothing remains for this target */
 
-       fill_origin_blob(&sb->revs->diffopt, parent, &file_p, &sb->num_read_blob);
+       fill_origin_blob(&sb->revs->diffopt, parent, &file_p,
+                        &sb->num_read_blob, 0);
        if (!file_p.ptr)
                return;
 
@@ -1317,7 +2219,8 @@ static void find_copy_in_parent(struct blame_scoreboard *sb,
                        norigin = get_origin(parent, p->one->path);
                        oidcpy(&norigin->blob_oid, &p->one->oid);
                        norigin->mode = p->one->mode;
-                       fill_origin_blob(&sb->revs->diffopt, norigin, &file_p, &sb->num_read_blob);
+                       fill_origin_blob(&sb->revs->diffopt, norigin, &file_p,
+                                        &sb->num_read_blob, 0);
                        if (!file_p.ptr)
                                continue;
 
@@ -1495,11 +2398,34 @@ static void pass_blame(struct blame_scoreboard *sb, struct blame_origin *origin,
                        blame_origin_incref(porigin);
                        origin->previous = porigin;
                }
-               pass_blame_to_parent(sb, origin, porigin);
+               pass_blame_to_parent(sb, origin, porigin, 0);
                if (!origin->suspects)
                        goto finish;
        }
 
+       /*
+        * Pass remaining suspects for ignored commits to their parents.
+        */
+       if (oidset_contains(&sb->ignore_list, &commit->object.oid)) {
+               for (i = 0, sg = first_scapegoat(revs, commit, sb->reverse);
+                    i < num_sg && sg;
+                    sg = sg->next, i++) {
+                       struct blame_origin *porigin = sg_origin[i];
+
+                       if (!porigin)
+                               continue;
+                       pass_blame_to_parent(sb, origin, porigin, 1);
+                       /*
+                        * Preemptively drop porigin so we can refresh the
+                        * fingerprints if we use the parent again, which can
+                        * occur if you ignore back-to-back commits.
+                        */
+                       drop_origin_blob(porigin);
+                       if (!origin->suspects)
+                               goto finish;
+               }
+       }
+
        /*
         * Optionally find moves in parents' files.
         */
@@ -1640,37 +2566,14 @@ void assign_blame(struct blame_scoreboard *sb, int opt)
        }
 }
 
-static const char *get_next_line(const char *start, const char *end)
-{
-       const char *nl = memchr(start, '\n', end - start);
-       return nl ? nl + 1 : end;
-}
-
 /*
  * To allow quick access to the contents of nth line in the
  * final image, prepare an index in the scoreboard.
  */
 static int prepare_lines(struct blame_scoreboard *sb)
 {
-       const char *buf = sb->final_buf;
-       unsigned long len = sb->final_buf_size;
-       const char *end = buf + len;
-       const char *p;
-       int *lineno;
-       int num = 0;
-
-       for (p = buf; p < end; p = get_next_line(p, end))
-               num++;
-
-       ALLOC_ARRAY(sb->lineno, num + 1);
-       lineno = sb->lineno;
-
-       for (p = buf; p < end; p = get_next_line(p, end))
-               *lineno++ = p - buf;
-
-       *lineno = len;
-
-       sb->num_lines = num;
+       sb->num_lines = find_line_starts(&sb->lineno, sb->final_buf,
+                                        sb->final_buf_size);
        return sb->num_lines;
 }
 
diff --git a/blame.h b/blame.h
index d62f80fa74c44011f8cdaea7a4baec3d8cae03e6..4a9e1270b036465c23fab5a0e536b9638ca3ce1b 100644 (file)
--- a/blame.h
+++ b/blame.h
@@ -51,6 +51,8 @@ struct blame_origin {
         */
        struct blame_entry *suspects;
        mmfile_t file;
+       int num_lines;
+       void *fingerprints;
        struct object_id blob_oid;
        unsigned short mode;
        /* guilty gets set when shipping any suspects to the final
@@ -92,6 +94,8 @@ struct blame_entry {
         * scanning the lines over and over.
         */
        unsigned score;
+       int ignored;
+       int unblamable;
 };
 
 /*
@@ -117,6 +121,8 @@ struct blame_scoreboard {
        /* linked list of blames */
        struct blame_entry *ent;
 
+       struct oidset ignore_list;
+
        /* look-up a line in the final buffer */
        int num_lines;
        int *lineno;
index 50e3d4a2656140b406975947d066283a2ef14460..b6534d4dea9ad81a34eaf099f7cf9a0a1e56f410 100644 (file)
@@ -53,6 +53,9 @@ static int no_whole_file_rename;
 static int show_progress;
 static char repeated_meta_color[COLOR_MAXLEN];
 static int coloring_mode;
+static struct string_list ignore_revs_file_list = STRING_LIST_INIT_NODUP;
+static int mark_unblamable_lines;
+static int mark_ignored_lines;
 
 static struct date_mode blame_date_mode = { DATE_ISO8601 };
 static size_t blame_date_width;
@@ -480,6 +483,14 @@ static void emit_other(struct blame_scoreboard *sb, struct blame_entry *ent, int
                        }
                }
 
+               if (mark_unblamable_lines && ent->unblamable) {
+                       length--;
+                       putchar('*');
+               }
+               if (mark_ignored_lines && ent->ignored) {
+                       length--;
+                       putchar('?');
+               }
                printf("%.*s", length, hex);
                if (opt & OUTPUT_ANNOTATE_COMPAT) {
                        const char *name;
@@ -696,6 +707,24 @@ static int git_blame_config(const char *var, const char *value, void *cb)
                parse_date_format(value, &blame_date_mode);
                return 0;
        }
+       if (!strcmp(var, "blame.ignorerevsfile")) {
+               const char *str;
+               int ret;
+
+               ret = git_config_pathname(&str, var, value);
+               if (ret)
+                       return ret;
+               string_list_insert(&ignore_revs_file_list, str);
+               return 0;
+       }
+       if (!strcmp(var, "blame.markunblamablelines")) {
+               mark_unblamable_lines = git_config_bool(var, value);
+               return 0;
+       }
+       if (!strcmp(var, "blame.markignoredlines")) {
+               mark_ignored_lines = git_config_bool(var, value);
+               return 0;
+       }
        if (!strcmp(var, "color.blame.repeatedlines")) {
                if (color_parse_mem(value, strlen(value), repeated_meta_color))
                        warning(_("invalid color '%s' in color.blame.repeatedLines"),
@@ -775,6 +804,27 @@ static int is_a_rev(const char *name)
        return OBJ_NONE < oid_object_info(the_repository, &oid, NULL);
 }
 
+static void build_ignorelist(struct blame_scoreboard *sb,
+                            struct string_list *ignore_revs_file_list,
+                            struct string_list *ignore_rev_list)
+{
+       struct string_list_item *i;
+       struct object_id oid;
+
+       oidset_init(&sb->ignore_list, 0);
+       for_each_string_list_item(i, ignore_revs_file_list) {
+               if (!strcmp(i->string, ""))
+                       oidset_clear(&sb->ignore_list);
+               else
+                       oidset_parse_file(&sb->ignore_list, i->string);
+       }
+       for_each_string_list_item(i, ignore_rev_list) {
+               if (get_oid_committish(i->string, &oid))
+                       die(_("cannot find revision %s to ignore"), i->string);
+               oidset_insert(&sb->ignore_list, &oid);
+       }
+}
+
 int cmd_blame(int argc, const char **argv, const char *prefix)
 {
        struct rev_info revs;
@@ -786,6 +836,7 @@ int cmd_blame(int argc, const char **argv, const char *prefix)
        struct progress_info pi = { NULL, 0 };
 
        struct string_list range_list = STRING_LIST_INIT_NODUP;
+       struct string_list ignore_rev_list = STRING_LIST_INIT_NODUP;
        int output_option = 0, opt = 0;
        int show_stats = 0;
        const char *revs_file = NULL;
@@ -807,6 +858,8 @@ int cmd_blame(int argc, const char **argv, const char *prefix)
                OPT_BIT('s', NULL, &output_option, N_("Suppress author name and timestamp (Default: off)"), OUTPUT_NO_AUTHOR),
                OPT_BIT('e', "show-email", &output_option, N_("Show author email instead of name (Default: off)"), OUTPUT_SHOW_EMAIL),
                OPT_BIT('w', NULL, &xdl_opts, N_("Ignore whitespace differences"), XDF_IGNORE_WHITESPACE),
+               OPT_STRING_LIST(0, "ignore-rev", &ignore_rev_list, N_("rev"), N_("Ignore <rev> when blaming")),
+               OPT_STRING_LIST(0, "ignore-revs-file", &ignore_revs_file_list, N_("file"), N_("Ignore revisions from <file>")),
                OPT_BIT(0, "color-lines", &output_option, N_("color redundant metadata from previous line differently"), OUTPUT_COLOR_LINE),
                OPT_BIT(0, "color-by-age", &output_option, N_("color lines by age"), OUTPUT_SHOW_AGE_WITH_COLOR),
 
@@ -1012,6 +1065,9 @@ int cmd_blame(int argc, const char **argv, const char *prefix)
        sb.contents_from = contents_from;
        sb.reverse = reverse;
        sb.repo = the_repository;
+       build_ignorelist(&sb, &ignore_revs_file_list, &ignore_rev_list);
+       string_list_clear(&ignore_revs_file_list, 0);
+       string_list_clear(&ignore_rev_list, 0);
        setup_scoreboard(&sb, path, &o);
        lno = sb.num_lines;
 
index 0f092382e175cf7ebe43d2f53ef1c5c79c338568..995d47c85aad24a645786ed6480bd659c755997e 100644 (file)
@@ -172,7 +172,8 @@ static int cat_one_file(int opt, const char *exp_type, const char *obj_name,
                         * fall-back to the usual case.
                         */
                }
-               buf = read_object_with_reference(&oid, exp_type, &size, NULL);
+               buf = read_object_with_reference(the_repository,
+                                                &oid, exp_type, &size, NULL);
                break;
 
        default:
index d8efa5bab276a816bef48e9cb3891ccc0cb81aad..38027b83d9d8329a1dc2e47b236a985e4ce71060 100644 (file)
@@ -5,17 +5,18 @@
 #include "parse-options.h"
 #include "repository.h"
 #include "commit-graph.h"
+#include "object-store.h"
 
 static char const * const builtin_commit_graph_usage[] = {
        N_("git commit-graph [--object-dir <objdir>]"),
        N_("git commit-graph read [--object-dir <objdir>]"),
-       N_("git commit-graph verify [--object-dir <objdir>]"),
-       N_("git commit-graph write [--object-dir <objdir>] [--append] [--reachable|--stdin-packs|--stdin-commits]"),
+       N_("git commit-graph verify [--object-dir <objdir>] [--shallow]"),
+       N_("git commit-graph write [--object-dir <objdir>] [--append|--split] [--reachable|--stdin-packs|--stdin-commits] <split options>"),
        NULL
 };
 
 static const char * const builtin_commit_graph_verify_usage[] = {
-       N_("git commit-graph verify [--object-dir <objdir>]"),
+       N_("git commit-graph verify [--object-dir <objdir>] [--shallow]"),
        NULL
 };
 
@@ -25,7 +26,7 @@ static const char * const builtin_commit_graph_read_usage[] = {
 };
 
 static const char * const builtin_commit_graph_write_usage[] = {
-       N_("git commit-graph write [--object-dir <objdir>] [--append] [--reachable|--stdin-packs|--stdin-commits]"),
+       N_("git commit-graph write [--object-dir <objdir>] [--append|--split] [--reachable|--stdin-packs|--stdin-commits] <split options>"),
        NULL
 };
 
@@ -35,9 +36,10 @@ static struct opts_commit_graph {
        int stdin_packs;
        int stdin_commits;
        int append;
+       int split;
+       int shallow;
 } opts;
 
-
 static int graph_verify(int argc, const char **argv)
 {
        struct commit_graph *graph = NULL;
@@ -45,11 +47,14 @@ static int graph_verify(int argc, const char **argv)
        int open_ok;
        int fd;
        struct stat st;
+       int flags = 0;
 
        static struct option builtin_commit_graph_verify_options[] = {
                OPT_STRING(0, "object-dir", &opts.obj_dir,
                           N_("dir"),
                           N_("The object directory to store the graph")),
+               OPT_BOOL(0, "shallow", &opts.shallow,
+                        N_("if the commit-graph is split, only verify the tip file")),
                OPT_END(),
        };
 
@@ -59,21 +64,27 @@ static int graph_verify(int argc, const char **argv)
 
        if (!opts.obj_dir)
                opts.obj_dir = get_object_directory();
+       if (opts.shallow)
+               flags |= COMMIT_GRAPH_VERIFY_SHALLOW;
 
        graph_name = get_commit_graph_filename(opts.obj_dir);
        open_ok = open_commit_graph(graph_name, &fd, &st);
-       if (!open_ok && errno == ENOENT)
-               return 0;
-       if (!open_ok)
+       if (!open_ok && errno != ENOENT)
                die_errno(_("Could not open commit-graph '%s'"), graph_name);
-       graph = load_commit_graph_one_fd_st(fd, &st);
+
        FREE_AND_NULL(graph_name);
 
+       if (open_ok)
+               graph = load_commit_graph_one_fd_st(fd, &st);
+        else
+               graph = read_commit_graph_one(the_repository, opts.obj_dir);
+
+       /* Return failure if open_ok predicted success */
        if (!graph)
-               return 1;
+               return !!open_ok;
 
        UNLEAK(graph);
-       return verify_commit_graph(the_repository, graph);
+       return verify_commit_graph(the_repository, graph, flags);
 }
 
 static int graph_read(int argc, const char **argv)
@@ -135,6 +146,7 @@ static int graph_read(int argc, const char **argv)
 }
 
 extern int read_replace_refs;
+static struct split_commit_graph_opts split_opts;
 
 static int graph_write(int argc, const char **argv)
 {
@@ -156,9 +168,21 @@ static int graph_write(int argc, const char **argv)
                        N_("start walk at commits listed by stdin")),
                OPT_BOOL(0, "append", &opts.append,
                        N_("include all commits already in the commit-graph file")),
+               OPT_BOOL(0, "split", &opts.split,
+                       N_("allow writing an incremental commit-graph file")),
+               OPT_INTEGER(0, "max-commits", &split_opts.max_commits,
+                       N_("maximum number of commits in a non-base split commit-graph")),
+               OPT_INTEGER(0, "size-multiple", &split_opts.size_multiple,
+                       N_("maximum ratio between two levels of a split commit-graph")),
+               OPT_EXPIRY_DATE(0, "expire-time", &split_opts.expire_time,
+                       N_("maximum number of commits in a non-base split commit-graph")),
                OPT_END(),
        };
 
+       split_opts.size_multiple = 2;
+       split_opts.max_commits = 0;
+       split_opts.expire_time = 0;
+
        argc = parse_options(argc, argv, NULL,
                             builtin_commit_graph_write_options,
                             builtin_commit_graph_write_usage, 0);
@@ -169,11 +193,16 @@ static int graph_write(int argc, const char **argv)
                opts.obj_dir = get_object_directory();
        if (opts.append)
                flags |= COMMIT_GRAPH_APPEND;
+       if (opts.split)
+               flags |= COMMIT_GRAPH_SPLIT;
 
        read_replace_refs = 0;
 
-       if (opts.reachable)
-               return write_commit_graph_reachable(opts.obj_dir, flags);
+       if (opts.reachable) {
+               if (write_commit_graph_reachable(opts.obj_dir, flags, &split_opts))
+                       return 1;
+               return 0;
+       }
 
        string_list_init(&lines, 0);
        if (opts.stdin_packs || opts.stdin_commits) {
@@ -193,7 +222,8 @@ static int graph_write(int argc, const char **argv)
        if (write_commit_graph(opts.obj_dir,
                               pack_indexes,
                               commit_hex,
-                              flags))
+                              flags,
+                              &split_opts))
                result = 1;
 
        UNLEAK(lines);
index 3b561c2a75b5075ca688868ab965c7b96a87a9e8..ae7aaf6dc6835888e4fb55de8a135331ab05316e 100644 (file)
@@ -60,15 +60,18 @@ N_("The previous cherry-pick is now empty, possibly due to conflict resolution.\
 "\n");
 
 static const char empty_cherry_pick_advice_single[] =
-N_("Otherwise, please use 'git reset'\n");
+N_("Otherwise, please use 'git cherry-pick --skip'\n");
 
 static const char empty_cherry_pick_advice_multi[] =
-N_("If you wish to skip this commit, use:\n"
+N_("and then use:\n"
 "\n"
-"    git reset\n"
+"    git cherry-pick --continue\n"
 "\n"
-"Then \"git cherry-pick --continue\" will resume cherry-picking\n"
-"the remaining commits.\n");
+"to resume cherry-picking the remaining commits.\n"
+"If you wish to skip this commit, use:\n"
+"\n"
+"    git cherry-pick --skip\n"
+"\n");
 
 static const char *color_status_slots[] = {
        [WT_STATUS_HEADER]        = "header",
@@ -1687,7 +1690,7 @@ int cmd_commit(int argc, const char **argv, const char *prefix)
                      "not exceeded, and then \"git restore --staged :/\" to recover."));
 
        if (git_env_bool(GIT_TEST_COMMIT_GRAPH, 0) &&
-           write_commit_graph_reachable(get_object_directory(), 0))
+           write_commit_graph_reachable(get_object_directory(), 0, NULL))
                return 1;
 
        repo_rerere(the_repository, 0);
index be8e0bfcbe0a428f72c5762ff8a066baaefd2533..c18efadda53e54f0e80dbd16737e2d40f47fa16f 100644 (file)
@@ -687,7 +687,8 @@ int cmd_gc(int argc, const char **argv, const char *prefix)
 
        if (gc_write_commit_graph &&
            write_commit_graph_reachable(get_object_directory(),
-                                        !quiet && !daemonized ? COMMIT_GRAPH_PROGRESS : 0))
+                                        !quiet && !daemonized ? COMMIT_GRAPH_PROGRESS : 0,
+                                        NULL))
                return 1;
 
        if (auto_gc && too_many_loose_objects())
index 580fd38f41704b6d534c04f2dee1bf85eee67c3c..560051784ef7c222046d51ba8b8bf7f84223a26e 100644 (file)
@@ -458,7 +458,8 @@ static int grep_submodule(struct grep_opt *opt,
                object = parse_object_or_die(oid, oid_to_hex(oid));
 
                grep_read_lock();
-               data = read_object_with_reference(&object->oid, tree_type,
+               data = read_object_with_reference(&subrepo,
+                                                 &object->oid, tree_type,
                                                  &size, NULL);
                grep_read_unlock();
 
@@ -623,7 +624,8 @@ static int grep_object(struct grep_opt *opt, const struct pathspec *pathspec,
                int hit, len;
 
                grep_read_lock();
-               data = read_object_with_reference(&obj->oid, tree_type,
+               data = read_object_with_reference(opt->repo,
+                                                 &obj->oid, tree_type,
                                                  &size, NULL);
                grep_read_unlock();
 
index 34ca0258b12ae4e4a04c495c244fb9c18268e7d8..97b54caeb90085e0fb4a88898ac87c7a5cb45eed 100644 (file)
@@ -205,6 +205,7 @@ static void resolve(const struct traverse_info *info, struct name_entry *ours, s
 static void unresolved_directory(const struct traverse_info *info,
                                 struct name_entry n[3])
 {
+       struct repository *r = the_repository;
        char *newbase;
        struct name_entry *p;
        struct tree_desc t[3];
@@ -220,9 +221,9 @@ static void unresolved_directory(const struct traverse_info *info,
        newbase = traverse_path(info, p);
 
 #define ENTRY_OID(e) (((e)->mode && S_ISDIR((e)->mode)) ? &(e)->oid : NULL)
-       buf0 = fill_tree_descriptor(t + 0, ENTRY_OID(n + 0));
-       buf1 = fill_tree_descriptor(t + 1, ENTRY_OID(n + 1));
-       buf2 = fill_tree_descriptor(t + 2, ENTRY_OID(n + 2));
+       buf0 = fill_tree_descriptor(r, t + 0, ENTRY_OID(n + 0));
+       buf1 = fill_tree_descriptor(r, t + 1, ENTRY_OID(n + 1));
+       buf2 = fill_tree_descriptor(r, t + 2, ENTRY_OID(n + 2));
 #undef ENTRY_OID
 
        merge_trees(t, newbase);
@@ -351,14 +352,16 @@ static void merge_trees(struct tree_desc t[3], const char *base)
        traverse_trees(&the_index, 3, t, &info);
 }
 
-static void *get_tree_descriptor(struct tree_desc *desc, const char *rev)
+static void *get_tree_descriptor(struct repository *r,
+                                struct tree_desc *desc,
+                                const char *rev)
 {
        struct object_id oid;
        void *buf;
 
-       if (get_oid(rev, &oid))
+       if (repo_get_oid(r, rev, &oid))
                die("unknown rev %s", rev);
-       buf = fill_tree_descriptor(desc, &oid);
+       buf = fill_tree_descriptor(r, desc, &oid);
        if (!buf)
                die("%s is not a tree", rev);
        return buf;
@@ -366,15 +369,16 @@ static void *get_tree_descriptor(struct tree_desc *desc, const char *rev)
 
 int cmd_merge_tree(int argc, const char **argv, const char *prefix)
 {
+       struct repository *r = the_repository;
        struct tree_desc t[3];
        void *buf1, *buf2, *buf3;
 
        if (argc != 4)
                usage(merge_tree_usage);
 
-       buf1 = get_tree_descriptor(t+0, argv[1]);
-       buf2 = get_tree_descriptor(t+1, argv[2]);
-       buf3 = get_tree_descriptor(t+2, argv[3]);
+       buf1 = get_tree_descriptor(r, t+0, argv[1]);
+       buf2 = get_tree_descriptor(r, t+1, argv[2]);
+       buf3 = get_tree_descriptor(r, t+2, argv[3]);
        merge_trees(t, "");
        free(buf1);
        free(buf2);
index aad5a9504c8546db0adfd36c59cb1ad1918a56e9..e2ccbc44e204173b09f5ad4b704a31a9e8643bb6 100644 (file)
@@ -892,6 +892,7 @@ static int finish_automerge(struct commit *head,
        struct strbuf buf = STRBUF_INIT;
        struct object_id result_commit;
 
+       write_tree_trivial(result_tree);
        free_commit_list(common);
        parents = remoteheads;
        if (!head_subsumed || fast_forward == FF_NO)
@@ -1586,8 +1587,8 @@ int cmd_merge(int argc, const char **argv, const char *prefix)
            save_state(&stash))
                oidclr(&stash);
 
-       for (i = 0; i < use_strategies_nr; i++) {
-               int ret;
+       for (i = 0; !merge_was_ok && i < use_strategies_nr; i++) {
+               int ret, cnt;
                if (i) {
                        printf(_("Rewinding the tree to pristine...\n"));
                        restore_state(&head_commit->object.oid, &stash);
@@ -1604,40 +1605,26 @@ int cmd_merge(int argc, const char **argv, const char *prefix)
                ret = try_merge_strategy(use_strategies[i]->name,
                                         common, remoteheads,
                                         head_commit);
-               if (!option_commit && !ret) {
-                       merge_was_ok = 1;
-                       /*
-                        * This is necessary here just to avoid writing
-                        * the tree, but later we will *not* exit with
-                        * status code 1 because merge_was_ok is set.
-                        */
-                       ret = 1;
-               }
-
-               if (ret) {
-                       /*
-                        * The backend exits with 1 when conflicts are
-                        * left to be resolved, with 2 when it does not
-                        * handle the given merge at all.
-                        */
-                       if (ret == 1) {
-                               int cnt = evaluate_result();
-
-                               if (best_cnt <= 0 || cnt <= best_cnt) {
-                                       best_strategy = use_strategies[i]->name;
-                                       best_cnt = cnt;
+               /*
+                * The backend exits with 1 when conflicts are
+                * left to be resolved, with 2 when it does not
+                * handle the given merge at all.
+                */
+               if (ret < 2) {
+                       if (!ret) {
+                               if (option_commit) {
+                                       /* Automerge succeeded. */
+                                       automerge_was_ok = 1;
+                                       break;
                                }
+                               merge_was_ok = 1;
+                       }
+                       cnt = evaluate_result();
+                       if (best_cnt <= 0 || cnt <= best_cnt) {
+                               best_strategy = use_strategies[i]->name;
+                               best_cnt = cnt;
                        }
-                       if (merge_was_ok)
-                               break;
-                       else
-                               continue;
                }
-
-               /* Automerge succeeded. */
-               write_tree_trivial(&result_tree);
-               automerge_was_ok = 1;
-               break;
        }
 
        /*
index 72dfd3dadc7bf8037d4bd11d24aabca6a56a5fa8..b1ea1a6aa17724915a529a882831640aec7ea7b8 100644 (file)
@@ -6,12 +6,13 @@
 #include "trace2.h"
 
 static char const * const builtin_multi_pack_index_usage[] = {
-       N_("git multi-pack-index [--object-dir=<dir>] (write|verify)"),
+       N_("git multi-pack-index [--object-dir=<dir>] (write|verify|expire|repack --batch-size=<size>)"),
        NULL
 };
 
 static struct opts_multi_pack_index {
        const char *object_dir;
+       unsigned long batch_size;
 } opts;
 
 int cmd_multi_pack_index(int argc, const char **argv,
@@ -20,6 +21,8 @@ int cmd_multi_pack_index(int argc, const char **argv,
        static struct option builtin_multi_pack_index_options[] = {
                OPT_FILENAME(0, "object-dir", &opts.object_dir,
                  N_("object directory containing set of packfile and pack-index pairs")),
+               OPT_MAGNITUDE(0, "batch-size", &opts.batch_size,
+                 N_("during repack, collect pack-files of smaller size into a batch that is larger than this size")),
                OPT_END(),
        };
 
@@ -43,10 +46,17 @@ int cmd_multi_pack_index(int argc, const char **argv,
 
        trace2_cmd_mode(argv[0]);
 
+       if (!strcmp(argv[0], "repack"))
+               return midx_repack(the_repository, opts.object_dir, (size_t)opts.batch_size);
+       if (opts.batch_size)
+               die(_("--batch-size option is only for 'repack' subcommand"));
+
        if (!strcmp(argv[0], "write"))
                return write_midx_file(opts.object_dir);
        if (!strcmp(argv[0], "verify"))
                return verify_midx_file(the_repository, opts.object_dir);
+       if (!strcmp(argv[0], "expire"))
+               return expire_midx_packs(the_repository, opts.object_dir);
 
-       die(_("unrecognized verb: %s"), argv[0]);
+       die(_("unrecognized subcommand: %s"), argv[0]);
 }
index 000dc4b872b23d555d87f475511ca60d601bc4a1..267c562b1f81dad2c42e2392a76ff0a269d38f65 100644 (file)
@@ -1428,7 +1428,8 @@ static void add_preferred_base(struct object_id *oid)
        if (window <= num_preferred_base++)
                return;
 
-       data = read_object_with_reference(oid, tree_type, &size, &tree_oid);
+       data = read_object_with_reference(the_repository, oid,
+                                         tree_type, &size, &tree_oid);
        if (!data)
                return;
 
index 3c7d8b894a35157a9e1f72f5437608ec2a07c208..95d34223e93774edbede0c83d781587777d647f3 100644 (file)
@@ -850,13 +850,13 @@ static int reset_head(struct object_id *oid, const char *action,
                goto leave_reset_head;
        }
 
-       if (!reset_hard && !fill_tree_descriptor(&desc[nr++], &head_oid)) {
+       if (!reset_hard && !fill_tree_descriptor(the_repository, &desc[nr++], &head_oid)) {
                ret = error(_("failed to find tree of %s"),
                            oid_to_hex(&head_oid));
                goto leave_reset_head;
        }
 
-       if (!fill_tree_descriptor(&desc[nr++], oid)) {
+       if (!fill_tree_descriptor(the_repository, &desc[nr++], oid)) {
                ret = error(_("failed to find tree of %s"), oid_to_hex(oid));
                goto leave_reset_head;
        }
@@ -1389,6 +1389,7 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
        struct string_list strategy_options = STRING_LIST_INIT_NODUP;
        struct object_id squash_onto;
        char *squash_onto_name = NULL;
+       int reschedule_failed_exec = -1;
        struct option builtin_rebase_options[] = {
                OPT_STRING(0, "onto", &options.onto_name,
                           N_("revision"),
@@ -1481,7 +1482,7 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
                OPT_BOOL(0, "root", &options.root,
                         N_("rebase all reachable commits up to the root(s)")),
                OPT_BOOL(0, "reschedule-failed-exec",
-                        &options.reschedule_failed_exec,
+                        &reschedule_failed_exec,
                         N_("automatically re-schedule any `exec` that fails")),
                OPT_END(),
        };
@@ -1796,8 +1797,11 @@ int cmd_rebase(int argc, const char **argv, const char *prefix)
                break;
        }
 
-       if (options.reschedule_failed_exec && !is_interactive(&options))
-               die(_("%s requires an interactive rebase"), "--reschedule-failed-exec");
+       if (reschedule_failed_exec > 0 && !is_interactive(&options))
+               die(_("--reschedule-failed-exec requires "
+                     "--exec or --interactive"));
+       if (reschedule_failed_exec >= 0)
+               options.reschedule_failed_exec = reschedule_failed_exec;
 
        if (options.git_am_opts.argc) {
                /* all am options except -q are compatible only with --am */
index 610eadf5f092a651fe3d04b07528f40f6adddafe..dcf385511f07dcd8efb5079077823b9df9d0d850 100644 (file)
@@ -12,7 +12,6 @@
 #include "object.h"
 #include "remote.h"
 #include "connect.h"
-#include "transport.h"
 #include "string-list.h"
 #include "sha1-array.h"
 #include "connected.h"
index f834b5551b1ffe003b943c18cd35397e9196e730..30982ed2a2aa750b68f1613e8f14b049708b70c4 100644 (file)
@@ -89,6 +89,17 @@ static void remove_pack_on_signal(int signo)
        raise(signo);
 }
 
+static int has_pack_keep_file(void)
+{
+       struct packed_git *p;
+
+       for (p = get_all_packs(the_repository); p; p = p->next) {
+               if (p->pack_keep)
+                       return 1;
+       }
+       return 0;
+}
+
 /*
  * Adds all packs hex strings to the fname list, which do not
  * have a corresponding .keep file. These packs are not to
@@ -129,19 +140,9 @@ static void get_non_kept_pack_filenames(struct string_list *fname_list,
 
 static void remove_redundant_pack(const char *dir_name, const char *base_name)
 {
-       const char *exts[] = {".pack", ".idx", ".keep", ".bitmap", ".promisor"};
-       int i;
        struct strbuf buf = STRBUF_INIT;
-       size_t plen;
-
-       strbuf_addf(&buf, "%s/%s", dir_name, base_name);
-       plen = buf.len;
-
-       for (i = 0; i < ARRAY_SIZE(exts); i++) {
-               strbuf_setlen(&buf, plen);
-               strbuf_addstr(&buf, exts[i]);
-               unlink(buf.buf);
-       }
+       strbuf_addf(&buf, "%s/%s.pack", dir_name, base_name);
+       unlink_pack_path(buf.buf, 1);
        strbuf_release(&buf);
 }
 
@@ -343,9 +344,12 @@ int cmd_repack(int argc, const char **argv, const char *prefix)
            (unpack_unreachable || (pack_everything & LOOSEN_UNREACHABLE)))
                die(_("--keep-unreachable and -A are incompatible"));
 
-       if (write_bitmaps < 0)
+       if (write_bitmaps < 0) {
                write_bitmaps = (pack_everything & ALL_INTO_ONE) &&
-                                is_bare_repository();
+                                is_bare_repository() &&
+                                keep_pack_list.nr == 0 &&
+                                !has_pack_keep_file();
+       }
        if (pack_kept_objects < 0)
                pack_kept_objects = write_bitmaps;
 
index c2bb35a4b7048c94f79057ed3db4cbdd30a28504..fdd572168b51cc388a098008c3636ffa60856233 100644 (file)
@@ -79,13 +79,13 @@ static int reset_index(const struct object_id *oid, int reset_type, int quiet)
                struct object_id head_oid;
                if (get_oid("HEAD", &head_oid))
                        return error(_("You do not have a valid HEAD."));
-               if (!fill_tree_descriptor(desc + nr, &head_oid))
+               if (!fill_tree_descriptor(the_repository, desc + nr, &head_oid))
                        return error(_("Failed to find tree of HEAD."));
                nr++;
                opts.fn = twoway_merge;
        }
 
-       if (!fill_tree_descriptor(desc + nr, oid)) {
+       if (!fill_tree_descriptor(the_repository, desc + nr, oid)) {
                error(_("Failed to find tree of %s."), oid_to_hex(oid));
                goto out;
        }
index 4e71b2f2aa292ffc3187aac61001e7fd529255b3..f61cc5d82cf2697583b5851893ba4076f96643a5 100644 (file)
@@ -102,6 +102,7 @@ static int run_sequencer(int argc, const char **argv, struct replay_opts *opts)
                OPT_CMDMODE(0, "quit", &cmd, N_("end revert or cherry-pick sequence"), 'q'),
                OPT_CMDMODE(0, "continue", &cmd, N_("resume revert or cherry-pick sequence"), 'c'),
                OPT_CMDMODE(0, "abort", &cmd, N_("cancel revert or cherry-pick sequence"), 'a'),
+               OPT_CMDMODE(0, "skip", &cmd, N_("skip current commit and continue"), 's'),
                OPT_CLEANUP(&cleanup_arg),
                OPT_BOOL('n', "no-commit", &opts->no_commit, N_("don't automatically commit")),
                OPT_BOOL('e', "edit", &opts->edit, N_("edit the commit message")),
@@ -151,6 +152,8 @@ static int run_sequencer(int argc, const char **argv, struct replay_opts *opts)
                        this_operation = "--quit";
                else if (cmd == 'c')
                        this_operation = "--continue";
+               else if (cmd == 's')
+                       this_operation = "--skip";
                else {
                        assert(cmd == 'a');
                        this_operation = "--abort";
@@ -210,6 +213,8 @@ static int run_sequencer(int argc, const char **argv, struct replay_opts *opts)
                return sequencer_continue(the_repository, opts);
        if (cmd == 'a')
                return sequencer_rollback(the_repository, opts);
+       if (cmd == 's')
+               return sequencer_skip(the_repository, opts);
        return sequencer_pick_revisions(the_repository, opts);
 }
 
index be8edc6d1e1185fa0224b516c0e4f73ceb590aa3..2eacda42b4a8fff746e846891fce0107d80efbe1 100644 (file)
@@ -179,7 +179,7 @@ static int check_local_mod(struct object_id *head, int index_only)
                 * way as changed from the HEAD.
                 */
                if (no_head
-                    || get_tree_entry(head, name, &oid, &mode)
+                    || get_tree_entry(the_repository, head, name, &oid, &mode)
                     || ce->ce_mode != create_ce_mode(mode)
                     || !oideq(&ce->oid, &oid))
                        staged_changes = 1;
index 3f8cc6ccb47c2f8927e91076a8844d2cfd8a0bb2..dff2f4b837208deebeb34f4af687c18df7dbfe76 100644 (file)
@@ -601,7 +601,7 @@ static struct cache_entry *read_one_ent(const char *which,
        struct object_id oid;
        struct cache_entry *ce;
 
-       if (get_tree_entry(ent, path, &oid, &mode)) {
+       if (get_tree_entry(the_repository, ent, path, &oid, &mode)) {
                if (which)
                        error("%s: not in %s branch.", path, which);
                return NULL;
diff --git a/cache.h b/cache.h
index 3167585cabda5f91f4c501d9b5bf924b4f5a3e12..b1da1ab08faad3da19657a9a5dcf5f2592c2127c 100644 (file)
--- a/cache.h
+++ b/cache.h
@@ -1476,7 +1476,8 @@ int df_name_compare(const char *name1, int len1, int mode1, const char *name2, i
 int name_compare(const char *name1, size_t len1, const char *name2, size_t len2);
 int cache_name_stage_compare(const char *name1, int len1, int stage1, const char *name2, int len2, int stage2);
 
-void *read_object_with_reference(const struct object_id *oid,
+void *read_object_with_reference(struct repository *r,
+                                const struct object_id *oid,
                                 const char *required_type,
                                 unsigned long *size,
                                 struct object_id *oid_ret);
@@ -1762,8 +1763,8 @@ int add_files_to_cache(const char *prefix, const struct pathspec *pathspec, int
 extern int diff_auto_refresh_index;
 
 /* match-trees.c */
-void shift_tree(const struct object_id *, const struct object_id *, struct object_id *, int);
-void shift_tree_by(const struct object_id *, const struct object_id *, struct object_id *, const char *);
+void shift_tree(struct repository *, const struct object_id *, const struct object_id *, struct object_id *, int);
+void shift_tree_by(struct repository *, const struct object_id *, const struct object_id *, struct object_id *, const char *);
 
 /*
  * whitespace rules.
index 7f6acdd803c33bc43200804ecec5d99f6d404224..8cc72503cb768573bc6d284eb57910ba6ca5a5cb 100755 (executable)
@@ -34,7 +34,7 @@ linux-clang|linux-gcc)
        popd
        ;;
 osx-clang|osx-gcc)
-       brew update >/dev/null
+       export HOMEBREW_NO_AUTO_UPDATE=1 HOMEBREW_NO_INSTALL_CLEANUP=1
        # Uncomment this if you want to run perf tests:
        # brew install gnu-time
        test -z "$BREW_INSTALL_PACKAGES" ||
index 288a5b3884ad825c99601ba5dc62ee81c62d7d64..0c7171a17354fd6dba5017e443b3d1b936f73e34 100755 (executable)
--- a/ci/lib.sh
+++ b/ci/lib.sh
@@ -163,8 +163,10 @@ linux-clang|linux-gcc)
        export GIT_TEST_HTTPD=YesPlease
 
        # The Linux build installs the defined dependency versions below.
-       # The OS X build installs the latest available versions. Keep that
-       # in mind when you encounter a broken OS X build!
+       # The OS X build installs much more recent versions, whichever
+       # were recorded in the Homebrew database upon creating the OS X
+       # image.
+       # Keep that in mind when you encounter a broken OS X build!
        export LINUX_P4_VERSION="16.2"
        export LINUX_GIT_LFS_VERSION="1.5.2"
 
index 8cc1d1d6c3aff0842c42ecda5cc545c9eb085802..b3c4de79b6da4502726dbec5e21b11734d76e28e 100644 (file)
@@ -22,6 +22,7 @@
 #define GRAPH_CHUNKID_OIDLOOKUP 0x4f49444c /* "OIDL" */
 #define GRAPH_CHUNKID_DATA 0x43444154 /* "CDAT" */
 #define GRAPH_CHUNKID_EXTRAEDGES 0x45444745 /* "EDGE" */
+#define GRAPH_CHUNKID_BASE 0x42415345 /* "BASE" */
 
 #define GRAPH_DATA_WIDTH (the_hash_algo->rawsz + 16)
 
 
 char *get_commit_graph_filename(const char *obj_dir)
 {
-       return xstrfmt("%s/info/commit-graph", obj_dir);
+       char *filename = xstrfmt("%s/info/commit-graph", obj_dir);
+       char *normalized = xmalloc(strlen(filename) + 1);
+       normalize_path_copy(normalized, filename);
+       free(filename);
+       return normalized;
+}
+
+static char *get_split_graph_filename(const char *obj_dir,
+                                     const char *oid_hex)
+{
+       char *filename = xstrfmt("%s/info/commit-graphs/graph-%s.graph",
+                                obj_dir,
+                                oid_hex);
+       char *normalized = xmalloc(strlen(filename) + 1);
+       normalize_path_copy(normalized, filename);
+       free(filename);
+       return normalized;
+}
+
+static char *get_chain_filename(const char *obj_dir)
+{
+       return xstrfmt("%s/info/commit-graphs/commit-graph-chain", obj_dir);
 }
 
 static uint8_t oid_version(void)
@@ -249,6 +271,12 @@ struct commit_graph *parse_commit_graph(void *graph_map, int fd,
                        else
                                graph->chunk_extra_edges = data + chunk_offset;
                        break;
+
+               case GRAPH_CHUNKID_BASE:
+                       if (graph->chunk_base_graphs)
+                               chunk_repeated = 1;
+                       else
+                               graph->chunk_base_graphs = data + chunk_offset;
                }
 
                if (chunk_repeated) {
@@ -267,6 +295,8 @@ struct commit_graph *parse_commit_graph(void *graph_map, int fd,
                last_chunk_offset = chunk_offset;
        }
 
+       hashcpy(graph->oid.hash, graph->data + graph->data_len - graph->hash_len);
+
        if (verify_commit_graph_lite(graph)) {
                free(graph);
                return NULL;
@@ -280,26 +310,151 @@ static struct commit_graph *load_commit_graph_one(const char *graph_file)
 
        struct stat st;
        int fd;
+       struct commit_graph *g;
        int open_ok = open_commit_graph(graph_file, &fd, &st);
 
        if (!open_ok)
                return NULL;
 
-       return load_commit_graph_one_fd_st(fd, &st);
+       g = load_commit_graph_one_fd_st(fd, &st);
+
+       if (g)
+               g->filename = xstrdup(graph_file);
+
+       return g;
+}
+
+static struct commit_graph *load_commit_graph_v1(struct repository *r, const char *obj_dir)
+{
+       char *graph_name = get_commit_graph_filename(obj_dir);
+       struct commit_graph *g = load_commit_graph_one(graph_name);
+       free(graph_name);
+
+       if (g)
+               g->obj_dir = obj_dir;
+
+       return g;
+}
+
+static int add_graph_to_chain(struct commit_graph *g,
+                             struct commit_graph *chain,
+                             struct object_id *oids,
+                             int n)
+{
+       struct commit_graph *cur_g = chain;
+
+       if (n && !g->chunk_base_graphs) {
+               warning(_("commit-graph has no base graphs chunk"));
+               return 0;
+       }
+
+       while (n) {
+               n--;
+
+               if (!cur_g ||
+                   !oideq(&oids[n], &cur_g->oid) ||
+                   !hasheq(oids[n].hash, g->chunk_base_graphs + g->hash_len * n)) {
+                       warning(_("commit-graph chain does not match"));
+                       return 0;
+               }
+
+               cur_g = cur_g->base_graph;
+       }
+
+       g->base_graph = chain;
+
+       if (chain)
+               g->num_commits_in_base = chain->num_commits + chain->num_commits_in_base;
+
+       return 1;
+}
+
+static struct commit_graph *load_commit_graph_chain(struct repository *r, const char *obj_dir)
+{
+       struct commit_graph *graph_chain = NULL;
+       struct strbuf line = STRBUF_INIT;
+       struct stat st;
+       struct object_id *oids;
+       int i = 0, valid = 1, count;
+       char *chain_name = get_chain_filename(obj_dir);
+       FILE *fp;
+       int stat_res;
+
+       fp = fopen(chain_name, "r");
+       stat_res = stat(chain_name, &st);
+       free(chain_name);
+
+       if (!fp ||
+           stat_res ||
+           st.st_size <= the_hash_algo->hexsz)
+               return NULL;
+
+       count = st.st_size / (the_hash_algo->hexsz + 1);
+       oids = xcalloc(count, sizeof(struct object_id));
+
+       prepare_alt_odb(r);
+
+       for (i = 0; i < count; i++) {
+               struct object_directory *odb;
+
+               if (strbuf_getline_lf(&line, fp) == EOF)
+                       break;
+
+               if (get_oid_hex(line.buf, &oids[i])) {
+                       warning(_("invalid commit-graph chain: line '%s' not a hash"),
+                               line.buf);
+                       valid = 0;
+                       break;
+               }
+
+               valid = 0;
+               for (odb = r->objects->odb; odb; odb = odb->next) {
+                       char *graph_name = get_split_graph_filename(odb->path, line.buf);
+                       struct commit_graph *g = load_commit_graph_one(graph_name);
+
+                       free(graph_name);
+
+                       if (g) {
+                               g->obj_dir = odb->path;
+
+                               if (add_graph_to_chain(g, graph_chain, oids, i)) {
+                                       graph_chain = g;
+                                       valid = 1;
+                               }
+
+                               break;
+                       }
+               }
+
+               if (!valid) {
+                       warning(_("unable to find all commit-graph files"));
+                       break;
+               }
+       }
+
+       free(oids);
+       fclose(fp);
+
+       return graph_chain;
+}
+
+struct commit_graph *read_commit_graph_one(struct repository *r, const char *obj_dir)
+{
+       struct commit_graph *g = load_commit_graph_v1(r, obj_dir);
+
+       if (!g)
+               g = load_commit_graph_chain(r, obj_dir);
+
+       return g;
 }
 
 static void prepare_commit_graph_one(struct repository *r, const char *obj_dir)
 {
-       char *graph_name;
 
        if (r->objects->commit_graph)
                return;
 
-       graph_name = get_commit_graph_filename(obj_dir);
-       r->objects->commit_graph =
-               load_commit_graph_one(graph_name);
-
-       FREE_AND_NULL(graph_name);
+       r->objects->commit_graph = read_commit_graph_one(r, obj_dir);
 }
 
 /*
@@ -361,9 +516,18 @@ int generation_numbers_enabled(struct repository *r)
        return !!first_generation;
 }
 
+static void close_commit_graph_one(struct commit_graph *g)
+{
+       if (!g)
+               return;
+
+       close_commit_graph_one(g->base_graph);
+       free_commit_graph(g);
+}
+
 void close_commit_graph(struct raw_object_store *o)
 {
-       free_commit_graph(o->commit_graph);
+       close_commit_graph_one(o->commit_graph);
        o->commit_graph = NULL;
 }
 
@@ -373,18 +537,38 @@ static int bsearch_graph(struct commit_graph *g, struct object_id *oid, uint32_t
                            g->chunk_oid_lookup, g->hash_len, pos);
 }
 
+static void load_oid_from_graph(struct commit_graph *g,
+                               uint32_t pos,
+                               struct object_id *oid)
+{
+       uint32_t lex_index;
+
+       while (g && pos < g->num_commits_in_base)
+               g = g->base_graph;
+
+       if (!g)
+               BUG("NULL commit-graph");
+
+       if (pos >= g->num_commits + g->num_commits_in_base)
+               die(_("invalid commit position. commit-graph is likely corrupt"));
+
+       lex_index = pos - g->num_commits_in_base;
+
+       hashcpy(oid->hash, g->chunk_oid_lookup + g->hash_len * lex_index);
+}
+
 static struct commit_list **insert_parent_or_die(struct repository *r,
                                                 struct commit_graph *g,
-                                                uint64_t pos,
+                                                uint32_t pos,
                                                 struct commit_list **pptr)
 {
        struct commit *c;
        struct object_id oid;
 
-       if (pos >= g->num_commits)
-               die("invalid parent position %"PRIu64, pos);
+       if (pos >= g->num_commits + g->num_commits_in_base)
+               die("invalid parent position %"PRIu32, pos);
 
-       hashcpy(oid.hash, g->chunk_oid_lookup + g->hash_len * pos);
+       load_oid_from_graph(g, pos, &oid);
        c = lookup_commit(r, &oid);
        if (!c)
                die(_("could not find commit %s"), oid_to_hex(&oid));
@@ -394,7 +578,14 @@ static struct commit_list **insert_parent_or_die(struct repository *r,
 
 static void fill_commit_graph_info(struct commit *item, struct commit_graph *g, uint32_t pos)
 {
-       const unsigned char *commit_data = g->chunk_commit_data + GRAPH_DATA_WIDTH * pos;
+       const unsigned char *commit_data;
+       uint32_t lex_index;
+
+       while (pos < g->num_commits_in_base)
+               g = g->base_graph;
+
+       lex_index = pos - g->num_commits_in_base;
+       commit_data = g->chunk_commit_data + GRAPH_DATA_WIDTH * lex_index;
        item->graph_pos = pos;
        item->generation = get_be32(commit_data + g->hash_len + 8) >> 2;
 }
@@ -412,10 +603,25 @@ static int fill_commit_in_graph(struct repository *r,
        uint32_t *parent_data_ptr;
        uint64_t date_low, date_high;
        struct commit_list **pptr;
-       const unsigned char *commit_data = g->chunk_commit_data + (g->hash_len + 16) * pos;
+       const unsigned char *commit_data;
+       uint32_t lex_index;
 
-       item->object.parsed = 1;
+       while (pos < g->num_commits_in_base)
+               g = g->base_graph;
+
+       if (pos >= g->num_commits + g->num_commits_in_base)
+               die(_("invalid commit position. commit-graph is likely corrupt"));
+
+       /*
+        * Store the "full" position, but then use the
+        * "local" position for the rest of the calculation.
+        */
        item->graph_pos = pos;
+       lex_index = pos - g->num_commits_in_base;
+
+       commit_data = g->chunk_commit_data + (g->hash_len + 16) * lex_index;
+
+       item->object.parsed = 1;
 
        set_commit_tree(item, NULL);
 
@@ -459,7 +665,18 @@ static int find_commit_in_graph(struct commit *item, struct commit_graph *g, uin
                *pos = item->graph_pos;
                return 1;
        } else {
-               return bsearch_graph(g, &(item->object.oid), pos);
+               struct commit_graph *cur_g = g;
+               uint32_t lex_index;
+
+               while (cur_g && !bsearch_graph(cur_g, &(item->object.oid), &lex_index))
+                       cur_g = cur_g->base_graph;
+
+               if (cur_g) {
+                       *pos = lex_index + cur_g->num_commits_in_base;
+                       return 1;
+               }
+
+               return 0;
        }
 }
 
@@ -499,8 +716,13 @@ static struct tree *load_tree_for_commit(struct repository *r,
                                         struct commit *c)
 {
        struct object_id oid;
-       const unsigned char *commit_data = g->chunk_commit_data +
-                                          GRAPH_DATA_WIDTH * (c->graph_pos);
+       const unsigned char *commit_data;
+
+       while (c->graph_pos < g->num_commits_in_base)
+               g = g->base_graph;
+
+       commit_data = g->chunk_commit_data +
+                       GRAPH_DATA_WIDTH * (c->graph_pos - g->num_commits_in_base);
 
        hashcpy(oid.hash, commit_data);
        set_commit_tree(c, lookup_tree(r, &oid));
@@ -539,7 +761,7 @@ struct packed_oid_list {
 
 struct write_commit_graph_context {
        struct repository *r;
-       const char *obj_dir;
+       char *obj_dir;
        char *graph_name;
        struct packed_oid_list oids;
        struct packed_commit_list commits;
@@ -548,8 +770,21 @@ struct write_commit_graph_context {
        struct progress *progress;
        int progress_done;
        uint64_t progress_cnt;
+
+       char *base_graph_name;
+       int num_commit_graphs_before;
+       int num_commit_graphs_after;
+       char **commit_graph_filenames_before;
+       char **commit_graph_filenames_after;
+       char **commit_graph_hash_after;
+       uint32_t new_num_commits_in_base;
+       struct commit_graph *new_base_graph;
+
        unsigned append:1,
-                report_progress:1;
+                report_progress:1,
+                split:1;
+
+       const struct split_commit_graph_opts *split_opts;
 };
 
 static void write_graph_chunk_fanout(struct hashfile *f,
@@ -619,6 +854,16 @@ static void write_graph_chunk_data(struct hashfile *f, int hash_len,
                                              ctx->commits.nr,
                                              commit_to_sha1);
 
+                       if (edge_value >= 0)
+                               edge_value += ctx->new_num_commits_in_base;
+                       else {
+                               uint32_t pos;
+                               if (find_commit_in_graph(parent->item,
+                                                        ctx->new_base_graph,
+                                                        &pos))
+                                       edge_value = pos;
+                       }
+
                        if (edge_value < 0)
                                BUG("missing parent %s for commit %s",
                                    oid_to_hex(&parent->item->object.oid),
@@ -639,6 +884,17 @@ static void write_graph_chunk_data(struct hashfile *f, int hash_len,
                                              ctx->commits.list,
                                              ctx->commits.nr,
                                              commit_to_sha1);
+
+                       if (edge_value >= 0)
+                               edge_value += ctx->new_num_commits_in_base;
+                       else {
+                               uint32_t pos;
+                               if (find_commit_in_graph(parent->item,
+                                                        ctx->new_base_graph,
+                                                        &pos))
+                                       edge_value = pos;
+                       }
+
                        if (edge_value < 0)
                                BUG("missing parent %s for commit %s",
                                    oid_to_hex(&parent->item->object.oid),
@@ -696,6 +952,16 @@ static void write_graph_chunk_extra_edges(struct hashfile *f,
                                                  ctx->commits.nr,
                                                  commit_to_sha1);
 
+                       if (edge_value >= 0)
+                               edge_value += ctx->new_num_commits_in_base;
+                       else {
+                               uint32_t pos;
+                               if (find_commit_in_graph(parent->item,
+                                                        ctx->new_base_graph,
+                                                        &pos))
+                                       edge_value = pos;
+                       }
+
                        if (edge_value < 0)
                                BUG("missing parent %s for commit %s",
                                    oid_to_hex(&parent->item->object.oid),
@@ -710,7 +976,7 @@ static void write_graph_chunk_extra_edges(struct hashfile *f,
        }
 }
 
-static int commit_compare(const void *_a, const void *_b)
+static int oid_compare(const void *_a, const void *_b)
 {
        const struct object_id *a = (const struct object_id *)_a;
        const struct object_id *b = (const struct object_id *)_b;
@@ -787,7 +1053,13 @@ static void close_reachable(struct write_commit_graph_context *ctx)
                display_progress(ctx->progress, i + 1);
                commit = lookup_commit(ctx->r, &ctx->oids.list[i]);
 
-               if (commit && !parse_commit_no_graph(commit))
+               if (!commit)
+                       continue;
+               if (ctx->split) {
+                       if (!parse_commit(commit) &&
+                           commit->graph_pos == COMMIT_NOT_FROM_GRAPH)
+                               add_missing_parents(ctx, commit);
+               } else if (!parse_commit_no_graph(commit))
                        add_missing_parents(ctx, commit);
        }
        stop_progress(&ctx->progress);
@@ -861,14 +1133,15 @@ static int add_ref_to_list(const char *refname,
        return 0;
 }
 
-int write_commit_graph_reachable(const char *obj_dir, unsigned int flags)
+int write_commit_graph_reachable(const char *obj_dir, unsigned int flags,
+                                const struct split_commit_graph_opts *split_opts)
 {
        struct string_list list = STRING_LIST_INIT_DUP;
        int result;
 
        for_each_ref(add_ref_to_list, &list);
        result = write_commit_graph(obj_dir, NULL, &list,
-                                   flags);
+                                   flags, split_opts);
 
        string_list_clear(&list, 0);
        return result;
@@ -979,12 +1252,20 @@ static uint32_t count_distinct_commits(struct write_commit_graph_context *ctx)
                        _("Counting distinct commits in commit graph"),
                        ctx->oids.nr);
        display_progress(ctx->progress, 0); /* TODO: Measure QSORT() progress */
-       QSORT(ctx->oids.list, ctx->oids.nr, commit_compare);
+       QSORT(ctx->oids.list, ctx->oids.nr, oid_compare);
 
        for (i = 1; i < ctx->oids.nr; i++) {
                display_progress(ctx->progress, i + 1);
-               if (!oideq(&ctx->oids.list[i - 1], &ctx->oids.list[i]))
+               if (!oideq(&ctx->oids.list[i - 1], &ctx->oids.list[i])) {
+                       if (ctx->split) {
+                               struct commit *c = lookup_commit(ctx->r, &ctx->oids.list[i]);
+
+                               if (!c || c->graph_pos != COMMIT_NOT_FROM_GRAPH)
+                                       continue;
+                       }
+
                        count_distinct++;
+               }
        }
        stop_progress(&ctx->progress);
 
@@ -1007,7 +1288,13 @@ static void copy_oids_to_commits(struct write_commit_graph_context *ctx)
                if (i > 0 && oideq(&ctx->oids.list[i - 1], &ctx->oids.list[i]))
                        continue;
 
+               ALLOC_GROW(ctx->commits.list, ctx->commits.nr + 1, ctx->commits.alloc);
                ctx->commits.list[ctx->commits.nr] = lookup_commit(ctx->r, &ctx->oids.list[i]);
+
+               if (ctx->split &&
+                   ctx->commits.list[ctx->commits.nr]->graph_pos != COMMIT_NOT_FROM_GRAPH)
+                       continue;
+
                parse_commit_no_graph(ctx->commits.list[ctx->commits.nr]);
 
                for (parent = ctx->commits.list[ctx->commits.nr]->parents;
@@ -1022,18 +1309,56 @@ static void copy_oids_to_commits(struct write_commit_graph_context *ctx)
        stop_progress(&ctx->progress);
 }
 
+static int write_graph_chunk_base_1(struct hashfile *f,
+                                   struct commit_graph *g)
+{
+       int num = 0;
+
+       if (!g)
+               return 0;
+
+       num = write_graph_chunk_base_1(f, g->base_graph);
+       hashwrite(f, g->oid.hash, the_hash_algo->rawsz);
+       return num + 1;
+}
+
+static int write_graph_chunk_base(struct hashfile *f,
+                                 struct write_commit_graph_context *ctx)
+{
+       int num = write_graph_chunk_base_1(f, ctx->new_base_graph);
+
+       if (num != ctx->num_commit_graphs_after - 1) {
+               error(_("failed to write correct number of base graph ids"));
+               return -1;
+       }
+
+       return 0;
+}
+
 static int write_commit_graph_file(struct write_commit_graph_context *ctx)
 {
        uint32_t i;
+       int fd;
        struct hashfile *f;
        struct lock_file lk = LOCK_INIT;
-       uint32_t chunk_ids[5];
-       uint64_t chunk_offsets[5];
+       uint32_t chunk_ids[6];
+       uint64_t chunk_offsets[6];
        const unsigned hashsz = the_hash_algo->rawsz;
        struct strbuf progress_title = STRBUF_INIT;
-       int num_chunks = ctx->num_extra_edges ? 4 : 3;
+       int num_chunks = 3;
+       struct object_id file_hash;
+
+       if (ctx->split) {
+               struct strbuf tmp_file = STRBUF_INIT;
+
+               strbuf_addf(&tmp_file,
+                           "%s/info/commit-graphs/tmp_graph_XXXXXX",
+                           ctx->obj_dir);
+               ctx->graph_name = strbuf_detach(&tmp_file, NULL);
+       } else {
+               ctx->graph_name = get_commit_graph_filename(ctx->obj_dir);
+       }
 
-       ctx->graph_name = get_commit_graph_filename(ctx->obj_dir);
        if (safe_create_leading_directories(ctx->graph_name)) {
                UNLEAK(ctx->graph_name);
                error(_("unable to create leading directories of %s"),
@@ -1041,30 +1366,61 @@ static int write_commit_graph_file(struct write_commit_graph_context *ctx)
                return -1;
        }
 
-       hold_lock_file_for_update(&lk, ctx->graph_name, LOCK_DIE_ON_ERROR);
-       f = hashfd(lk.tempfile->fd, lk.tempfile->filename.buf);
+       if (ctx->split) {
+               char *lock_name = get_chain_filename(ctx->obj_dir);
 
-       hashwrite_be32(f, GRAPH_SIGNATURE);
+               hold_lock_file_for_update(&lk, lock_name, LOCK_DIE_ON_ERROR);
 
-       hashwrite_u8(f, GRAPH_VERSION);
-       hashwrite_u8(f, oid_version());
-       hashwrite_u8(f, num_chunks);
-       hashwrite_u8(f, 0); /* unused padding byte */
+               fd = git_mkstemp_mode(ctx->graph_name, 0444);
+               if (fd < 0) {
+                       error(_("unable to create '%s'"), ctx->graph_name);
+                       return -1;
+               }
+
+               f = hashfd(fd, ctx->graph_name);
+       } else {
+               hold_lock_file_for_update(&lk, ctx->graph_name, LOCK_DIE_ON_ERROR);
+               fd = lk.tempfile->fd;
+               f = hashfd(lk.tempfile->fd, lk.tempfile->filename.buf);
+       }
 
        chunk_ids[0] = GRAPH_CHUNKID_OIDFANOUT;
        chunk_ids[1] = GRAPH_CHUNKID_OIDLOOKUP;
        chunk_ids[2] = GRAPH_CHUNKID_DATA;
-       if (ctx->num_extra_edges)
-               chunk_ids[3] = GRAPH_CHUNKID_EXTRAEDGES;
-       else
-               chunk_ids[3] = 0;
-       chunk_ids[4] = 0;
+       if (ctx->num_extra_edges) {
+               chunk_ids[num_chunks] = GRAPH_CHUNKID_EXTRAEDGES;
+               num_chunks++;
+       }
+       if (ctx->num_commit_graphs_after > 1) {
+               chunk_ids[num_chunks] = GRAPH_CHUNKID_BASE;
+               num_chunks++;
+       }
+
+       chunk_ids[num_chunks] = 0;
 
        chunk_offsets[0] = 8 + (num_chunks + 1) * GRAPH_CHUNKLOOKUP_WIDTH;
        chunk_offsets[1] = chunk_offsets[0] + GRAPH_FANOUT_SIZE;
        chunk_offsets[2] = chunk_offsets[1] + hashsz * ctx->commits.nr;
        chunk_offsets[3] = chunk_offsets[2] + (hashsz + 16) * ctx->commits.nr;
-       chunk_offsets[4] = chunk_offsets[3] + 4 * ctx->num_extra_edges;
+
+       num_chunks = 3;
+       if (ctx->num_extra_edges) {
+               chunk_offsets[num_chunks + 1] = chunk_offsets[num_chunks] +
+                                               4 * ctx->num_extra_edges;
+               num_chunks++;
+       }
+       if (ctx->num_commit_graphs_after > 1) {
+               chunk_offsets[num_chunks + 1] = chunk_offsets[num_chunks] +
+                                               hashsz * (ctx->num_commit_graphs_after - 1);
+               num_chunks++;
+       }
+
+       hashwrite_be32(f, GRAPH_SIGNATURE);
+
+       hashwrite_u8(f, GRAPH_VERSION);
+       hashwrite_u8(f, oid_version());
+       hashwrite_u8(f, num_chunks);
+       hashwrite_u8(f, ctx->num_commit_graphs_after - 1);
 
        for (i = 0; i <= num_chunks; i++) {
                uint32_t chunk_write[3];
@@ -1090,23 +1446,316 @@ static int write_commit_graph_file(struct write_commit_graph_context *ctx)
        write_graph_chunk_data(f, hashsz, ctx);
        if (ctx->num_extra_edges)
                write_graph_chunk_extra_edges(f, ctx);
+       if (ctx->num_commit_graphs_after > 1 &&
+           write_graph_chunk_base(f, ctx)) {
+               return -1;
+       }
        stop_progress(&ctx->progress);
        strbuf_release(&progress_title);
 
+       if (ctx->split && ctx->base_graph_name && ctx->num_commit_graphs_after > 1) {
+               char *new_base_hash = xstrdup(oid_to_hex(&ctx->new_base_graph->oid));
+               char *new_base_name = get_split_graph_filename(ctx->new_base_graph->obj_dir, new_base_hash);
+
+               free(ctx->commit_graph_filenames_after[ctx->num_commit_graphs_after - 2]);
+               free(ctx->commit_graph_hash_after[ctx->num_commit_graphs_after - 2]);
+               ctx->commit_graph_filenames_after[ctx->num_commit_graphs_after - 2] = new_base_name;
+               ctx->commit_graph_hash_after[ctx->num_commit_graphs_after - 2] = new_base_hash;
+       }
+
        close_commit_graph(ctx->r->objects);
-       finalize_hashfile(f, NULL, CSUM_HASH_IN_STREAM | CSUM_FSYNC);
+       finalize_hashfile(f, file_hash.hash, CSUM_HASH_IN_STREAM | CSUM_FSYNC);
+
+       if (ctx->split) {
+               FILE *chainf = fdopen_lock_file(&lk, "w");
+               char *final_graph_name;
+               int result;
+
+               close(fd);
+
+               if (!chainf) {
+                       error(_("unable to open commit-graph chain file"));
+                       return -1;
+               }
+
+               if (ctx->base_graph_name) {
+                       const char *dest = ctx->commit_graph_filenames_after[
+                                               ctx->num_commit_graphs_after - 2];
+
+                       if (strcmp(ctx->base_graph_name, dest)) {
+                               result = rename(ctx->base_graph_name, dest);
+
+                               if (result) {
+                                       error(_("failed to rename base commit-graph file"));
+                                       return -1;
+                               }
+                       }
+               } else {
+                       char *graph_name = get_commit_graph_filename(ctx->obj_dir);
+                       unlink(graph_name);
+               }
+
+               ctx->commit_graph_hash_after[ctx->num_commit_graphs_after - 1] = xstrdup(oid_to_hex(&file_hash));
+               final_graph_name = get_split_graph_filename(ctx->obj_dir,
+                                       ctx->commit_graph_hash_after[ctx->num_commit_graphs_after - 1]);
+               ctx->commit_graph_filenames_after[ctx->num_commit_graphs_after - 1] = final_graph_name;
+
+               result = rename(ctx->graph_name, final_graph_name);
+
+               for (i = 0; i < ctx->num_commit_graphs_after; i++)
+                       fprintf(lk.tempfile->fp, "%s\n", ctx->commit_graph_hash_after[i]);
+
+               if (result) {
+                       error(_("failed to rename temporary commit-graph file"));
+                       return -1;
+               }
+       }
+
        commit_lock_file(&lk);
 
        return 0;
 }
 
+static void split_graph_merge_strategy(struct write_commit_graph_context *ctx)
+{
+       struct commit_graph *g = ctx->r->objects->commit_graph;
+       uint32_t num_commits = ctx->commits.nr;
+       uint32_t i;
+
+       int max_commits = 0;
+       int size_mult = 2;
+
+       if (ctx->split_opts) {
+               max_commits = ctx->split_opts->max_commits;
+               size_mult = ctx->split_opts->size_multiple;
+       }
+
+       g = ctx->r->objects->commit_graph;
+       ctx->num_commit_graphs_after = ctx->num_commit_graphs_before + 1;
+
+       while (g && (g->num_commits <= size_mult * num_commits ||
+                   (max_commits && num_commits > max_commits))) {
+               if (strcmp(g->obj_dir, ctx->obj_dir))
+                       break;
+
+               num_commits += g->num_commits;
+               g = g->base_graph;
+
+               ctx->num_commit_graphs_after--;
+       }
+
+       ctx->new_base_graph = g;
+
+       if (ctx->num_commit_graphs_after == 2) {
+               char *old_graph_name = get_commit_graph_filename(g->obj_dir);
+
+               if (!strcmp(g->filename, old_graph_name) &&
+                   strcmp(g->obj_dir, ctx->obj_dir)) {
+                       ctx->num_commit_graphs_after = 1;
+                       ctx->new_base_graph = NULL;
+               }
+
+               free(old_graph_name);
+       }
+
+       ALLOC_ARRAY(ctx->commit_graph_filenames_after, ctx->num_commit_graphs_after);
+       ALLOC_ARRAY(ctx->commit_graph_hash_after, ctx->num_commit_graphs_after);
+
+       for (i = 0; i < ctx->num_commit_graphs_after &&
+                   i < ctx->num_commit_graphs_before; i++)
+               ctx->commit_graph_filenames_after[i] = xstrdup(ctx->commit_graph_filenames_before[i]);
+
+       i = ctx->num_commit_graphs_before - 1;
+       g = ctx->r->objects->commit_graph;
+
+       while (g) {
+               if (i < ctx->num_commit_graphs_after)
+                       ctx->commit_graph_hash_after[i] = xstrdup(oid_to_hex(&g->oid));
+
+               i--;
+               g = g->base_graph;
+       }
+}
+
+static void merge_commit_graph(struct write_commit_graph_context *ctx,
+                              struct commit_graph *g)
+{
+       uint32_t i;
+       uint32_t offset = g->num_commits_in_base;
+
+       ALLOC_GROW(ctx->commits.list, ctx->commits.nr + g->num_commits, ctx->commits.alloc);
+
+       for (i = 0; i < g->num_commits; i++) {
+               struct object_id oid;
+               struct commit *result;
+
+               display_progress(ctx->progress, i + 1);
+
+               load_oid_from_graph(g, i + offset, &oid);
+
+               /* only add commits if they still exist in the repo */
+               result = lookup_commit_reference_gently(ctx->r, &oid, 1);
+
+               if (result) {
+                       ctx->commits.list[ctx->commits.nr] = result;
+                       ctx->commits.nr++;
+               }
+       }
+}
+
+static int commit_compare(const void *_a, const void *_b)
+{
+       const struct commit *a = *(const struct commit **)_a;
+       const struct commit *b = *(const struct commit **)_b;
+       return oidcmp(&a->object.oid, &b->object.oid);
+}
+
+static void sort_and_scan_merged_commits(struct write_commit_graph_context *ctx)
+{
+       uint32_t i, num_parents;
+       struct commit_list *parent;
+
+       if (ctx->report_progress)
+               ctx->progress = start_delayed_progress(
+                                       _("Scanning merged commits"),
+                                       ctx->commits.nr);
+
+       QSORT(ctx->commits.list, ctx->commits.nr, commit_compare);
+
+       ctx->num_extra_edges = 0;
+       for (i = 0; i < ctx->commits.nr; i++) {
+               display_progress(ctx->progress, i);
+
+               if (i && oideq(&ctx->commits.list[i - 1]->object.oid,
+                         &ctx->commits.list[i]->object.oid)) {
+                       die(_("unexpected duplicate commit id %s"),
+                           oid_to_hex(&ctx->commits.list[i]->object.oid));
+               } else {
+                       num_parents = 0;
+                       for (parent = ctx->commits.list[i]->parents; parent; parent = parent->next)
+                               num_parents++;
+
+                       if (num_parents > 2)
+                               ctx->num_extra_edges += num_parents - 2;
+               }
+       }
+
+       stop_progress(&ctx->progress);
+}
+
+static void merge_commit_graphs(struct write_commit_graph_context *ctx)
+{
+       struct commit_graph *g = ctx->r->objects->commit_graph;
+       uint32_t current_graph_number = ctx->num_commit_graphs_before;
+       struct strbuf progress_title = STRBUF_INIT;
+
+       while (g && current_graph_number >= ctx->num_commit_graphs_after) {
+               current_graph_number--;
+
+               if (ctx->report_progress) {
+                       strbuf_addstr(&progress_title, _("Merging commit-graph"));
+                       ctx->progress = start_delayed_progress(progress_title.buf, 0);
+               }
+
+               merge_commit_graph(ctx, g);
+               stop_progress(&ctx->progress);
+               strbuf_release(&progress_title);
+
+               g = g->base_graph;
+       }
+
+       if (g) {
+               ctx->new_base_graph = g;
+               ctx->new_num_commits_in_base = g->num_commits + g->num_commits_in_base;
+       }
+
+       if (ctx->new_base_graph)
+               ctx->base_graph_name = xstrdup(ctx->new_base_graph->filename);
+
+       sort_and_scan_merged_commits(ctx);
+}
+
+static void mark_commit_graphs(struct write_commit_graph_context *ctx)
+{
+       uint32_t i;
+       time_t now = time(NULL);
+
+       for (i = ctx->num_commit_graphs_after - 1; i < ctx->num_commit_graphs_before; i++) {
+               struct stat st;
+               struct utimbuf updated_time;
+
+               stat(ctx->commit_graph_filenames_before[i], &st);
+
+               updated_time.actime = st.st_atime;
+               updated_time.modtime = now;
+               utime(ctx->commit_graph_filenames_before[i], &updated_time);
+       }
+}
+
+static void expire_commit_graphs(struct write_commit_graph_context *ctx)
+{
+       struct strbuf path = STRBUF_INIT;
+       DIR *dir;
+       struct dirent *de;
+       size_t dirnamelen;
+       timestamp_t expire_time = time(NULL);
+
+       if (ctx->split_opts && ctx->split_opts->expire_time)
+               expire_time -= ctx->split_opts->expire_time;
+       if (!ctx->split) {
+               char *chain_file_name = get_chain_filename(ctx->obj_dir);
+               unlink(chain_file_name);
+               free(chain_file_name);
+               ctx->num_commit_graphs_after = 0;
+       }
+
+       strbuf_addstr(&path, ctx->obj_dir);
+       strbuf_addstr(&path, "/info/commit-graphs");
+       dir = opendir(path.buf);
+
+       if (!dir) {
+               strbuf_release(&path);
+               return;
+       }
+
+       strbuf_addch(&path, '/');
+       dirnamelen = path.len;
+       while ((de = readdir(dir)) != NULL) {
+               struct stat st;
+               uint32_t i, found = 0;
+
+               strbuf_setlen(&path, dirnamelen);
+               strbuf_addstr(&path, de->d_name);
+
+               stat(path.buf, &st);
+
+               if (st.st_mtime > expire_time)
+                       continue;
+               if (path.len < 6 || strcmp(path.buf + path.len - 6, ".graph"))
+                       continue;
+
+               for (i = 0; i < ctx->num_commit_graphs_after; i++) {
+                       if (!strcmp(ctx->commit_graph_filenames_after[i],
+                                   path.buf)) {
+                               found = 1;
+                               break;
+                       }
+               }
+
+               if (!found)
+                       unlink(path.buf);
+       }
+}
+
 int write_commit_graph(const char *obj_dir,
                       struct string_list *pack_indexes,
                       struct string_list *commit_hex,
-                      unsigned int flags)
+                      unsigned int flags,
+                      const struct split_commit_graph_opts *split_opts)
 {
        struct write_commit_graph_context *ctx;
        uint32_t i, count_distinct = 0;
+       size_t len;
        int res = 0;
 
        if (!commit_graph_compatible(the_repository))
@@ -1114,13 +1763,48 @@ int write_commit_graph(const char *obj_dir,
 
        ctx = xcalloc(1, sizeof(struct write_commit_graph_context));
        ctx->r = the_repository;
-       ctx->obj_dir = obj_dir;
+
+       /* normalize object dir with no trailing slash */
+       ctx->obj_dir = xmallocz(strlen(obj_dir) + 1);
+       normalize_path_copy(ctx->obj_dir, obj_dir);
+       len = strlen(ctx->obj_dir);
+       if (len && ctx->obj_dir[len - 1] == '/')
+               ctx->obj_dir[len - 1] = 0;
+
        ctx->append = flags & COMMIT_GRAPH_APPEND ? 1 : 0;
        ctx->report_progress = flags & COMMIT_GRAPH_PROGRESS ? 1 : 0;
+       ctx->split = flags & COMMIT_GRAPH_SPLIT ? 1 : 0;
+       ctx->split_opts = split_opts;
+
+       if (ctx->split) {
+               struct commit_graph *g;
+               prepare_commit_graph(ctx->r);
+
+               g = ctx->r->objects->commit_graph;
+
+               while (g) {
+                       ctx->num_commit_graphs_before++;
+                       g = g->base_graph;
+               }
+
+               if (ctx->num_commit_graphs_before) {
+                       ALLOC_ARRAY(ctx->commit_graph_filenames_before, ctx->num_commit_graphs_before);
+                       i = ctx->num_commit_graphs_before;
+                       g = ctx->r->objects->commit_graph;
+
+                       while (g) {
+                               ctx->commit_graph_filenames_before[--i] = xstrdup(g->filename);
+                               g = g->base_graph;
+                       }
+               }
+       }
 
        ctx->approx_nr_objects = approximate_object_count();
        ctx->oids.alloc = ctx->approx_nr_objects / 32;
 
+       if (ctx->split && split_opts && ctx->oids.alloc > split_opts->max_commits)
+               ctx->oids.alloc = split_opts->max_commits;
+
        if (ctx->append) {
                prepare_commit_graph_one(ctx->r, ctx->obj_dir);
                if (ctx->r->objects->commit_graph)
@@ -1171,14 +1855,45 @@ int write_commit_graph(const char *obj_dir,
                goto cleanup;
        }
 
+       if (!ctx->commits.nr)
+               goto cleanup;
+
+       if (ctx->split) {
+               split_graph_merge_strategy(ctx);
+
+               merge_commit_graphs(ctx);
+       } else
+               ctx->num_commit_graphs_after = 1;
+
        compute_generation_numbers(ctx);
 
        res = write_commit_graph_file(ctx);
 
+       if (ctx->split)
+               mark_commit_graphs(ctx);
+
+       expire_commit_graphs(ctx);
+
 cleanup:
        free(ctx->graph_name);
        free(ctx->commits.list);
        free(ctx->oids.list);
+       free(ctx->obj_dir);
+
+       if (ctx->commit_graph_filenames_after) {
+               for (i = 0; i < ctx->num_commit_graphs_after; i++) {
+                       free(ctx->commit_graph_filenames_after[i]);
+                       free(ctx->commit_graph_hash_after[i]);
+               }
+
+               for (i = 0; i < ctx->num_commit_graphs_before; i++)
+                       free(ctx->commit_graph_filenames_before[i]);
+
+               free(ctx->commit_graph_filenames_after);
+               free(ctx->commit_graph_filenames_before);
+               free(ctx->commit_graph_hash_after);
+       }
+
        free(ctx);
 
        return res;
@@ -1201,7 +1916,7 @@ static void graph_report(const char *fmt, ...)
 #define GENERATION_ZERO_EXISTS 1
 #define GENERATION_NUMBER_EXISTS 2
 
-int verify_commit_graph(struct repository *r, struct commit_graph *g)
+int verify_commit_graph(struct repository *r, struct commit_graph *g, int flags)
 {
        uint32_t i, cur_fanout_pos = 0;
        struct object_id prev_oid, cur_oid, checksum;
@@ -1209,6 +1924,7 @@ int verify_commit_graph(struct repository *r, struct commit_graph *g)
        struct hashfile *f;
        int devnull;
        struct progress *progress = NULL;
+       int local_error = 0;
 
        if (!g) {
                graph_report("no commit-graph file loaded");
@@ -1303,6 +2019,9 @@ int verify_commit_graph(struct repository *r, struct commit_graph *g)
                                break;
                        }
 
+                       /* parse parent in case it is in a base graph */
+                       parse_commit_in_graph_one(r, g, graph_parents->item);
+
                        if (!oideq(&graph_parents->item->object.oid, &odb_parents->item->object.oid))
                                graph_report(_("commit-graph parent for %s is %s != %s"),
                                             oid_to_hex(&cur_oid),
@@ -1354,7 +2073,12 @@ int verify_commit_graph(struct repository *r, struct commit_graph *g)
        }
        stop_progress(&progress);
 
-       return verify_commit_graph_error;
+       local_error = verify_commit_graph_error;
+
+       if (!(flags & COMMIT_GRAPH_VERIFY_SHALLOW) && g->base_graph)
+               local_error |= verify_commit_graph(r, g->base_graph, flags);
+
+       return local_error;
 }
 
 void free_commit_graph(struct commit_graph *g)
@@ -1366,5 +2090,6 @@ void free_commit_graph(struct commit_graph *g)
                g->data = NULL;
                close(g->graph_fd);
        }
+       free(g->filename);
        free(g);
 }
index 390c7f696104fbe772151b91cb04889d0f682401..df9a3b20e4abc7d388acab1cc85546aafa8345a3 100644 (file)
@@ -47,15 +47,21 @@ struct commit_graph {
        unsigned char num_chunks;
        uint32_t num_commits;
        struct object_id oid;
+       char *filename;
+       const char *obj_dir;
+
+       uint32_t num_commits_in_base;
+       struct commit_graph *base_graph;
 
        const uint32_t *chunk_oid_fanout;
        const unsigned char *chunk_oid_lookup;
        const unsigned char *chunk_commit_data;
        const unsigned char *chunk_extra_edges;
+       const unsigned char *chunk_base_graphs;
 };
 
 struct commit_graph *load_commit_graph_one_fd_st(int fd, struct stat *st);
-
+struct commit_graph *read_commit_graph_one(struct repository *r, const char *obj_dir);
 struct commit_graph *parse_commit_graph(void *graph_map, int fd,
                                        size_t graph_size);
 
@@ -67,6 +73,13 @@ int generation_numbers_enabled(struct repository *r);
 
 #define COMMIT_GRAPH_APPEND     (1 << 0)
 #define COMMIT_GRAPH_PROGRESS   (1 << 1)
+#define COMMIT_GRAPH_SPLIT      (1 << 2)
+
+struct split_commit_graph_opts {
+       int size_multiple;
+       int max_commits;
+       timestamp_t expire_time;
+};
 
 /*
  * The write_commit_graph* methods return zero on success
@@ -74,13 +87,17 @@ int generation_numbers_enabled(struct repository *r);
  * is not compatible with the commit-graph feature, then the
  * methods will return 0 without writing a commit-graph.
  */
-int write_commit_graph_reachable(const char *obj_dir, unsigned int flags);
+int write_commit_graph_reachable(const char *obj_dir, unsigned int flags,
+                                const struct split_commit_graph_opts *split_opts);
 int write_commit_graph(const char *obj_dir,
                       struct string_list *pack_indexes,
                       struct string_list *commit_hex,
-                      unsigned int flags);
+                      unsigned int flags,
+                      const struct split_commit_graph_opts *split_opts);
+
+#define COMMIT_GRAPH_VERIFY_SHALLOW    (1 << 0)
 
-int verify_commit_graph(struct repository *r, struct commit_graph *g);
+int verify_commit_graph(struct repository *r, struct commit_graph *g, int flags);
 
 void close_commit_graph(struct raw_object_store *);
 void free_commit_graph(struct commit_graph *);
index 433838391fd95d6c89279561df63176f31d77b27..d9913463be184fc508610c082f2ec1827898d12d 100644 (file)
@@ -1407,7 +1407,7 @@ static pid_t mingw_spawnve_fd(const char *cmd, const char **argv, char **deltaen
        do_unset_environment_variables();
 
        /* Determine whether or not we are associated to a console */
-       cons = CreateFile("CONOUT$", GENERIC_WRITE,
+       cons = CreateFileW(L"CONOUT$", GENERIC_WRITE,
                        FILE_SHARE_WRITE, NULL, OPEN_EXISTING,
                        FILE_ATTRIBUTE_NORMAL, NULL);
        if (cons == INVALID_HANDLE_VALUE) {
@@ -1949,13 +1949,19 @@ struct passwd *getpwuid(int uid)
        static unsigned initialized;
        static char user_name[100];
        static struct passwd *p;
+       wchar_t buf[100];
        DWORD len;
 
        if (initialized)
                return p;
 
-       len = sizeof(user_name);
-       if (!GetUserName(user_name, &len)) {
+       len = ARRAY_SIZE(buf);
+       if (!GetUserNameW(buf, &len)) {
+               initialized = 1;
+               return NULL;
+       }
+
+       if (xwcstoutf(user_name, buf, sizeof(user_name)) < 0) {
                initialized = 1;
                return NULL;
        }
@@ -2327,6 +2333,30 @@ static void setup_windows_environment(void)
        /* simulate TERM to enable auto-color (see color.c) */
        if (!getenv("TERM"))
                setenv("TERM", "cygwin", 1);
+
+       /* calculate HOME if not set */
+       if (!getenv("HOME")) {
+               /*
+                * try $HOMEDRIVE$HOMEPATH - the home share may be a network
+                * location, thus also check if the path exists (i.e. is not
+                * disconnected)
+                */
+               if ((tmp = getenv("HOMEDRIVE"))) {
+                       struct strbuf buf = STRBUF_INIT;
+                       strbuf_addstr(&buf, tmp);
+                       if ((tmp = getenv("HOMEPATH"))) {
+                               strbuf_addstr(&buf, tmp);
+                               if (is_directory(buf.buf))
+                                       setenv("HOME", buf.buf, 1);
+                               else
+                                       tmp = NULL; /* use $USERPROFILE */
+                       }
+                       strbuf_release(&buf);
+               }
+               /* use $USERPROFILE if the home share is not available */
+               if (!tmp && (tmp = getenv("USERPROFILE")))
+                       setenv("HOME", tmp, 1);
+       }
 }
 
 #if !defined(_MSC_VER)
index 8b07edb0feca434cce2ff7766c30d7529febf996..0e95dd493c949122b22036bf5d205c413d4a0421 100644 (file)
@@ -150,7 +150,7 @@ win32_compute_revents (HANDLE h, int *p_sought)
       if (!once_only)
        {
          NtQueryInformationFile = (PNtQueryInformationFile)(void (*)(void))
-           GetProcAddress (GetModuleHandle ("ntdll.dll"),
+           GetProcAddress (GetModuleHandleW (L"ntdll.dll"),
                            "NtQueryInformationFile");
          once_only = TRUE;
        }
diff --git a/compat/win32/git.manifest b/compat/win32/git.manifest
new file mode 100644 (file)
index 0000000..771e3cc
--- /dev/null
@@ -0,0 +1,25 @@
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
+       <assemblyIdentity type="win32" name="Git" version="0.0.0.1" />
+       <trustInfo xmlns="urn:schemas-microsoft-com:asm.v2">
+               <security>
+                       <requestedPrivileges>
+                               <requestedExecutionLevel level="asInvoker" uiAccess="false" />
+                       </requestedPrivileges>
+               </security>
+       </trustInfo>
+       <compatibility xmlns="urn:schemas-microsoft-com:compatibility.v1">
+               <application>
+                       <!-- Windows Vista -->
+                       <supportedOS Id="{e2011457-1546-43c5-a5fe-008deee3d3f0}"/>
+                       <!-- Windows 7 -->
+                       <supportedOS Id="{35138b9a-5d96-4fbd-8e2d-a2440225f93a}"/>
+                       <!-- Windows 8 -->
+                       <supportedOS Id="{4a2f28e3-53b9-4441-ba9c-d69d4a4a6e38}"/>
+                       <!-- Windows 8.1 -->
+                       <supportedOS Id="{1f676c76-80e1-4239-95bb-83d0f6d0da78}"/>
+                       <!-- Windows 10 -->
+                       <supportedOS Id="{8e0f7a12-bfb3-4fe8-b9a5-48fd50a15a9a}"/>
+               </application>
+       </compatibility>
+</assembly>
index efc0abcdac43e1c126d6d8148b8a03392e4ae911..cacd82c833a615a9daebbadca36d08647111a677 100644 (file)
@@ -608,7 +608,7 @@ int winansi_isatty(int fd)
 void winansi_init(void)
 {
        int con1, con2;
-       char name[32];
+       wchar_t name[32];
 
        /* check if either stdout or stderr is a console output screen buffer */
        con1 = is_console(1);
@@ -628,13 +628,15 @@ void winansi_init(void)
        }
 
        /* create a named pipe to communicate with the console thread */
-       xsnprintf(name, sizeof(name), "\\\\.\\pipe\\winansi%lu", GetCurrentProcessId());
-       hwrite = CreateNamedPipe(name, PIPE_ACCESS_OUTBOUND,
+       if (swprintf(name, ARRAY_SIZE(name) - 1, L"\\\\.\\pipe\\winansi%lu",
+                    GetCurrentProcessId()) < 0)
+               die("Could not initialize winansi pipe name");
+       hwrite = CreateNamedPipeW(name, PIPE_ACCESS_OUTBOUND,
                PIPE_TYPE_BYTE | PIPE_WAIT, 1, BUFFER_SIZE, 0, 0, NULL);
        if (hwrite == INVALID_HANDLE_VALUE)
                die_lasterr("CreateNamedPipe failed");
 
-       hread = CreateFile(name, GENERIC_READ, 0, NULL, OPEN_EXISTING, 0, NULL);
+       hread = CreateFileW(name, GENERIC_READ, 0, NULL, OPEN_EXISTING, 0, NULL);
        if (hread == INVALID_HANDLE_VALUE)
                die_lasterr("CreateFile for named pipe failed");
 
index 3fde48c64da61c6964ea5584aa5c27ec5dcbc389..48a6723222dfb7c4cedb49d4523faa6c3003a803 100644 (file)
@@ -655,7 +655,8 @@ else
                        BASIC_LDFLAGS += -Wl,--large-address-aware
                endif
                CC = gcc
-               COMPAT_CFLAGS += -D__USE_MINGW_ANSI_STDIO=0 -DDETECT_MSYS_TTY
+               COMPAT_CFLAGS += -D__USE_MINGW_ANSI_STDIO=0 -DDETECT_MSYS_TTY \
+                       -fstack-protector-strong
                EXTLIBS += -lntdll
                INSTALL = /bin/install
                NO_R_TO_GCC_LINKER = YesPlease
index 1ab481fed69b33b48bd95c282d2c0faff897123c..cd9b324afa5a33be7eced6a420061905d52c211f 100644 (file)
@@ -80,6 +80,7 @@ int check_connected(oid_iterate_fn fn, void *cb_data,
                argv_array_push(&rev_list.args, "--all");
        }
        argv_array_push(&rev_list.args, "--quiet");
+       argv_array_push(&rev_list.args, "--alternate-refs");
        if (opt->progress)
                argv_array_pushf(&rev_list.args, "--progress=%s",
                                 _("Checking connectivity"));
index 983e419d2b7eda8f191a878a395179dcc24eb949..1d510cd47bef49d918704a45e9592e0ffcb7ee0f 100644 (file)
@@ -286,6 +286,37 @@ __git_eread ()
        test -r "$1" && IFS=$'\r\n' read "$2" <"$1"
 }
 
+# see if a cherry-pick or revert is in progress, if the user has committed a
+# conflict resolution with 'git commit' in the middle of a sequence of picks or
+# reverts then CHERRY_PICK_HEAD/REVERT_HEAD will not exist so we have to read
+# the todo file.
+__git_sequencer_status ()
+{
+       local todo
+       if test -f "$g/CHERRY_PICK_HEAD"
+       then
+               r="|CHERRY-PICKING"
+               return 0;
+       elif test -f "$g/REVERT_HEAD"
+       then
+               r="|REVERTING"
+               return 0;
+       elif __git_eread "$g/sequencer/todo" todo
+       then
+               case "$todo" in
+               p[\ \   ]|pick[\ \      ]*)
+                       r="|CHERRY-PICKING"
+                       return 0
+               ;;
+               revert[\ \      ]*)
+                       r="|REVERTING"
+                       return 0
+               ;;
+               esac
+       fi
+       return 1
+}
+
 # __git_ps1 accepts 0 or 1 arguments (i.e., format string)
 # when called from PS1 using command substitution
 # in this mode it prints text to add to bash PS1 prompt (includes branch name)
@@ -417,10 +448,8 @@ __git_ps1 ()
                        fi
                elif [ -f "$g/MERGE_HEAD" ]; then
                        r="|MERGING"
-               elif [ -f "$g/CHERRY_PICK_HEAD" ]; then
-                       r="|CHERRY-PICKING"
-               elif [ -f "$g/REVERT_HEAD" ]; then
-                       r="|REVERTING"
+               elif __git_sequencer_status; then
+                       :
                elif [ -f "$g/BISECT_LOG" ]; then
                        r="|BISECTING"
                fi
index 6dfdd6801c720f3b693ca6b09f8b7b3cf0f0c764..b44d6a467ef17f3d2f541475ab1f4ce968504a5e 100644 (file)
@@ -2410,7 +2410,8 @@ static void note_change_n(const char *p, struct branch *b, unsigned char *old_fa
                oidcpy(&commit_oid, &commit_oe->idx.oid);
        } else if (!get_oid(p, &commit_oid)) {
                unsigned long size;
-               char *buf = read_object_with_reference(&commit_oid,
+               char *buf = read_object_with_reference(the_repository,
+                                                      &commit_oid,
                                                       commit_type, &size,
                                                       &commit_oid);
                if (!buf || size < the_hash_algo->hexsz + 6)
@@ -2482,7 +2483,8 @@ static void parse_from_existing(struct branch *b)
                unsigned long size;
                char *buf;
 
-               buf = read_object_with_reference(&b->oid, commit_type, &size,
+               buf = read_object_with_reference(the_repository,
+                                                &b->oid, commit_type, &size,
                                                 &b->oid);
                parse_from_commit(b, buf, size);
                free(buf);
@@ -2560,7 +2562,8 @@ static struct hash_list *parse_merge(unsigned int *count)
                        oidcpy(&n->oid, &oe->idx.oid);
                } else if (!get_oid(from, &n->oid)) {
                        unsigned long size;
-                       char *buf = read_object_with_reference(&n->oid,
+                       char *buf = read_object_with_reference(the_repository,
+                                                              &n->oid,
                                                               commit_type,
                                                               &size, &n->oid);
                        if (!buf || size < the_hash_algo->hexsz + 6)
diff --git a/fsck.c b/fsck.c
index 117c4a978f410047b6ca4b939ca6c294d6163dfc..cdb7d8db03017e36811e2bf3803ff0a4ca032a3f 100644 (file)
--- a/fsck.c
+++ b/fsck.c
@@ -181,41 +181,6 @@ static int fsck_msg_type(enum fsck_msg_id msg_id,
        return msg_type;
 }
 
-static void init_skiplist(struct fsck_options *options, const char *path)
-{
-       FILE *fp;
-       struct strbuf sb = STRBUF_INIT;
-       struct object_id oid;
-
-       fp = fopen(path, "r");
-       if (!fp)
-               die("Could not open skip list: %s", path);
-       while (!strbuf_getline(&sb, fp)) {
-               const char *p;
-               const char *hash;
-
-               /*
-                * Allow trailing comments, leading whitespace
-                * (including before commits), and empty or whitespace
-                * only lines.
-                */
-               hash = strchr(sb.buf, '#');
-               if (hash)
-                       strbuf_setlen(&sb, hash - sb.buf);
-               strbuf_trim(&sb);
-               if (!sb.len)
-                       continue;
-
-               if (parse_oid_hex(sb.buf, &oid, &p) || *p != '\0')
-                       die("Invalid SHA-1: %s", sb.buf);
-               oidset_insert(&options->skiplist, &oid);
-       }
-       if (ferror(fp))
-               die_errno("Could not read '%s'", path);
-       fclose(fp);
-       strbuf_release(&sb);
-}
-
 static int parse_msg_type(const char *str)
 {
        if (!strcmp(str, "error"))
@@ -284,7 +249,7 @@ void fsck_set_msg_types(struct fsck_options *options, const char *values)
                if (!strcmp(buf, "skiplist")) {
                        if (equal == len)
                                die("skiplist requires a path");
-                       init_skiplist(options, buf + equal + 1);
+                       oidset_parse_file(&options->skiplist, buf + equal + 1);
                        buf += len + 1;
                        continue;
                }
index d4021d690c07237edefb0cf2869eb6795d227a54..3f2aca5c3b16d39375fab9b6203c67ae16209023 100644 (file)
--- a/gettext.c
+++ b/gettext.c
 #ifndef NO_GETTEXT
 #      include <locale.h>
 #      include <libintl.h>
-#      ifdef HAVE_LIBCHARSET_H
+#      ifdef GIT_WINDOWS_NATIVE
+
+static const char *locale_charset(void)
+{
+       const char *env = getenv("LC_ALL"), *dot;
+
+       if (!env || !*env)
+               env = getenv("LC_CTYPE");
+       if (!env || !*env)
+               env = getenv("LANG");
+
+       if (!env)
+               return "UTF-8";
+
+       dot = strchr(env, '.');
+       return !dot ? env : dot + 1;
+}
+
+#      elif defined HAVE_LIBCHARSET_H
 #              include <libcharset.h>
 #      else
 #              include <langinfo.h>
diff --git a/git.rc b/git.rc
index 49002e0d541f1ab080efd8b9b4732f52db89e984..cc3fdc6cc6cb83b084eebe2ad49f3c78c981789b 100644 (file)
--- a/git.rc
+++ b/git.rc
@@ -20,3 +20,5 @@ BEGIN
     VALUE "Translation", 0x409, 1200
   END
 END
+
+1 RT_MANIFEST "compat/win32/git.manifest"
index 0a17b21187b6620c3c5c82e53271d56bc0c6deb8..3aff1849e7d5c7dba5c14b2fa34f7af64064a42d 100644 (file)
@@ -496,12 +496,13 @@ static struct commit *check_single_commit(struct rev_info *revs)
        return (struct commit *) commit;
 }
 
-static void fill_blob_sha1(struct commit *commit, struct diff_filespec *spec)
+static void fill_blob_sha1(struct repository *r, struct commit *commit,
+                          struct diff_filespec *spec)
 {
        unsigned short mode;
        struct object_id oid;
 
-       if (get_tree_entry(&commit->object.oid, spec->path, &oid, &mode))
+       if (get_tree_entry(r, &commit->object.oid, spec->path, &oid, &mode))
                die("There is no path %s in the commit", spec->path);
        fill_filespec(spec, &oid, 1, mode);
 
@@ -585,7 +586,7 @@ parse_lines(struct repository *r, struct commit *commit,
                                        name_part);
 
                spec = alloc_filespec(full_name);
-               fill_blob_sha1(commit, spec);
+               fill_blob_sha1(r, commit, spec);
                fill_line_ends(r, spec, &lines, &ends);
                cb_data.spec = spec;
                cb_data.lines = lines;
index 9d1ec8d6b01e13b20b34c836130cd9557fdd1d80..f6c194c1cca6bd64489b5dc1e90e5525da25d873 100644 (file)
@@ -248,7 +248,8 @@ static int splice_tree(const struct object_id *oid1, const char *prefix,
  * other hand, it could cover tree one and we might need to pick a
  * subtree of it.
  */
-void shift_tree(const struct object_id *hash1,
+void shift_tree(struct repository *r,
+               const struct object_id *hash1,
                const struct object_id *hash2,
                struct object_id *shifted,
                int depth_limit)
@@ -290,7 +291,7 @@ void shift_tree(const struct object_id *hash1,
                if (!*del_prefix)
                        return;
 
-               if (get_tree_entry(hash2, del_prefix, shifted, &mode))
+               if (get_tree_entry(r, hash2, del_prefix, shifted, &mode))
                        die("cannot find path %s in tree %s",
                            del_prefix, oid_to_hex(hash2));
                return;
@@ -307,7 +308,8 @@ void shift_tree(const struct object_id *hash1,
  * Unfortunately we cannot fundamentally tell which one to
  * be prefixed, as recursive merge can work in either direction.
  */
-void shift_tree_by(const struct object_id *hash1,
+void shift_tree_by(struct repository *r,
+                  const struct object_id *hash1,
                   const struct object_id *hash2,
                   struct object_id *shifted,
                   const char *shift_prefix)
@@ -317,12 +319,12 @@ void shift_tree_by(const struct object_id *hash1,
        unsigned candidate = 0;
 
        /* Can hash2 be a tree at shift_prefix in tree hash1? */
-       if (!get_tree_entry(hash1, shift_prefix, &sub1, &mode1) &&
+       if (!get_tree_entry(r, hash1, shift_prefix, &sub1, &mode1) &&
            S_ISDIR(mode1))
                candidate |= 1;
 
        /* Can hash1 be a tree at shift_prefix in tree hash2? */
-       if (!get_tree_entry(hash2, shift_prefix, &sub2, &mode2) &&
+       if (!get_tree_entry(r, hash2, shift_prefix, &sub2, &mode2) &&
            S_ISDIR(mode2))
                candidate |= 2;
 
index d2e380b7ed845e6fb2cd5616754a3f12a11c7b18..12300131fc12b15fbc9514e90ca45d76113d7267 100644 (file)
@@ -153,9 +153,9 @@ static struct tree *shift_tree_object(struct repository *repo,
        struct object_id shifted;
 
        if (!*subtree_shift) {
-               shift_tree(&one->object.oid, &two->object.oid, &shifted, 0);
+               shift_tree(repo, &one->object.oid, &two->object.oid, &shifted, 0);
        } else {
-               shift_tree_by(&one->object.oid, &two->object.oid, &shifted,
+               shift_tree_by(repo, &one->object.oid, &two->object.oid, &shifted,
                              subtree_shift);
        }
        if (oideq(&two->object.oid, &shifted))
@@ -465,17 +465,18 @@ static void get_files_dirs(struct merge_options *opt, struct tree *tree)
 {
        struct pathspec match_all;
        memset(&match_all, 0, sizeof(match_all));
-       read_tree_recursive(the_repository, tree, "", 0, 0,
+       read_tree_recursive(opt->repo, tree, "", 0, 0,
                            &match_all, save_files_dirs, opt);
 }
 
-static int get_tree_entry_if_blob(const struct object_id *tree,
+static int get_tree_entry_if_blob(struct repository *r,
+                                 const struct object_id *tree,
                                  const char *path,
                                  struct diff_filespec *dfs)
 {
        int ret;
 
-       ret = get_tree_entry(tree, path, &dfs->oid, &dfs->mode);
+       ret = get_tree_entry(r, tree, path, &dfs->oid, &dfs->mode);
        if (S_ISDIR(dfs->mode)) {
                oidcpy(&dfs->oid, &null_oid);
                dfs->mode = 0;
@@ -487,15 +488,16 @@ static int get_tree_entry_if_blob(const struct object_id *tree,
  * Returns an index_entry instance which doesn't have to correspond to
  * a real cache entry in Git's index.
  */
-static struct stage_data *insert_stage_data(const char *path,
+static struct stage_data *insert_stage_data(struct repository *r,
+               const char *path,
                struct tree *o, struct tree *a, struct tree *b,
                struct string_list *entries)
 {
        struct string_list_item *item;
        struct stage_data *e = xcalloc(1, sizeof(struct stage_data));
-       get_tree_entry_if_blob(&o->object.oid, path, &e->stages[1]);
-       get_tree_entry_if_blob(&a->object.oid, path, &e->stages[2]);
-       get_tree_entry_if_blob(&b->object.oid, path, &e->stages[3]);
+       get_tree_entry_if_blob(r, &o->object.oid, path, &e->stages[1]);
+       get_tree_entry_if_blob(r, &a->object.oid, path, &e->stages[2]);
+       get_tree_entry_if_blob(r, &b->object.oid, path, &e->stages[3]);
        item = string_list_insert(entries, path);
        item->util = e;
        return e;
@@ -1900,12 +1902,14 @@ static struct diff_queue_struct *get_diffpairs(struct merge_options *opt,
        return ret;
 }
 
-static int tree_has_path(struct tree *tree, const char *path)
+static int tree_has_path(struct repository *r, struct tree *tree,
+                        const char *path)
 {
        struct object_id hashy;
        unsigned short mode_o;
 
-       return !get_tree_entry(&tree->object.oid, path,
+       return !get_tree_entry(r,
+                              &tree->object.oid, path,
                               &hashy, &mode_o);
 }
 
@@ -2056,7 +2060,7 @@ static char *handle_path_level_conflicts(struct merge_options *opt,
         */
        if (collision_ent->reported_already) {
                clean = 0;
-       } else if (tree_has_path(tree, new_path)) {
+       } else if (tree_has_path(opt->repo, tree, new_path)) {
                collision_ent->reported_already = 1;
                strbuf_add_separated_string_list(&collision_paths, ", ",
                                                 &collision_ent->source_files);
@@ -2134,7 +2138,7 @@ static void handle_directory_level_conflicts(struct merge_options *opt,
                        string_list_append(&remove_from_merge,
                                           merge_ent->dir)->util = merge_ent;
                        strbuf_release(&merge_ent->new_dir);
-               } else if (tree_has_path(head, head_ent->dir)) {
+               } else if (tree_has_path(opt->repo, head, head_ent->dir)) {
                        /* 2. This wasn't a directory rename after all */
                        string_list_append(&remove_from_head,
                                           head_ent->dir)->util = head_ent;
@@ -2148,7 +2152,7 @@ static void handle_directory_level_conflicts(struct merge_options *opt,
        hashmap_iter_init(dir_re_merge, &iter);
        while ((merge_ent = hashmap_iter_next(&iter))) {
                head_ent = dir_rename_find_entry(dir_re_head, merge_ent->dir);
-               if (tree_has_path(merge, merge_ent->dir)) {
+               if (tree_has_path(opt->repo, merge, merge_ent->dir)) {
                        /* 2. This wasn't a directory rename after all */
                        string_list_append(&remove_from_merge,
                                           merge_ent->dir)->util = merge_ent;
@@ -2477,7 +2481,7 @@ static void apply_directory_rename_modifications(struct merge_options *opt,
                if (pair->status == 'R')
                        re->dst_entry->processed = 1;
 
-               re->dst_entry = insert_stage_data(new_path,
+               re->dst_entry = insert_stage_data(opt->repo, new_path,
                                                  o_tree, a_tree, b_tree,
                                                  entries);
                item = string_list_insert(entries, new_path);
@@ -2500,7 +2504,8 @@ static void apply_directory_rename_modifications(struct merge_options *opt,
         * the various handle_rename_*() functions update the index
         * explicitly rather than relying on unpack_trees() to have done it.
         */
-       get_tree_entry(&tree->object.oid,
+       get_tree_entry(opt->repo,
+                      &tree->object.oid,
                       pair->two->path,
                       &re->dst_entry->stages[stage].oid,
                       &re->dst_entry->stages[stage].mode);
@@ -2585,14 +2590,16 @@ static struct string_list *get_renames(struct merge_options *opt,
                re->dir_rename_original_dest = NULL;
                item = string_list_lookup(entries, re->pair->one->path);
                if (!item)
-                       re->src_entry = insert_stage_data(re->pair->one->path,
+                       re->src_entry = insert_stage_data(opt->repo,
+                                       re->pair->one->path,
                                        o_tree, a_tree, b_tree, entries);
                else
                        re->src_entry = item->util;
 
                item = string_list_lookup(entries, re->pair->two->path);
                if (!item)
-                       re->dst_entry = insert_stage_data(re->pair->two->path,
+                       re->dst_entry = insert_stage_data(opt->repo,
+                                       re->pair->two->path,
                                        o_tree, a_tree, b_tree, entries);
                else
                        re->dst_entry = item->util;
diff --git a/midx.c b/midx.c
index e7e1fe4d65ac3be54154e44ff07cd6122011405c..d6496444206aad68a88eb45583e452343c719764 100644 (file)
--- a/midx.c
+++ b/midx.c
@@ -9,6 +9,7 @@
 #include "midx.h"
 #include "progress.h"
 #include "trace2.h"
+#include "run-command.h"
 
 #define MIDX_SIGNATURE 0x4d494458 /* "MIDX" */
 #define MIDX_VERSION 1
@@ -34,6 +35,8 @@
 #define MIDX_CHUNK_LARGE_OFFSET_WIDTH (sizeof(uint64_t))
 #define MIDX_LARGE_OFFSET_NEEDED 0x80000000
 
+#define PACK_EXPIRED UINT_MAX
+
 static char *get_midx_filename(const char *object_dir)
 {
        return xstrfmt("%s/pack/multi-pack-index", object_dir);
@@ -427,13 +430,24 @@ static size_t write_midx_header(struct hashfile *f,
        return MIDX_HEADER_SIZE;
 }
 
+struct pack_info {
+       uint32_t orig_pack_int_id;
+       char *pack_name;
+       struct packed_git *p;
+       unsigned expired : 1;
+};
+
+static int pack_info_compare(const void *_a, const void *_b)
+{
+       struct pack_info *a = (struct pack_info *)_a;
+       struct pack_info *b = (struct pack_info *)_b;
+       return strcmp(a->pack_name, b->pack_name);
+}
+
 struct pack_list {
-       struct packed_git **list;
-       char **names;
+       struct pack_info *info;
        uint32_t nr;
-       uint32_t alloc_list;
-       uint32_t alloc_names;
-       size_t pack_name_concat_len;
+       uint32_t alloc;
        struct multi_pack_index *m;
 };
 
@@ -446,67 +460,33 @@ static void add_pack_to_midx(const char *full_path, size_t full_path_len,
                if (packs->m && midx_contains_pack(packs->m, file_name))
                        return;
 
-               ALLOC_GROW(packs->list, packs->nr + 1, packs->alloc_list);
-               ALLOC_GROW(packs->names, packs->nr + 1, packs->alloc_names);
+               ALLOC_GROW(packs->info, packs->nr + 1, packs->alloc);
 
-               packs->list[packs->nr] = add_packed_git(full_path,
-                                                       full_path_len,
-                                                       0);
+               packs->info[packs->nr].p = add_packed_git(full_path,
+                                                         full_path_len,
+                                                         0);
 
-               if (!packs->list[packs->nr]) {
+               if (!packs->info[packs->nr].p) {
                        warning(_("failed to add packfile '%s'"),
                                full_path);
                        return;
                }
 
-               if (open_pack_index(packs->list[packs->nr])) {
+               if (open_pack_index(packs->info[packs->nr].p)) {
                        warning(_("failed to open pack-index '%s'"),
                                full_path);
-                       close_pack(packs->list[packs->nr]);
-                       FREE_AND_NULL(packs->list[packs->nr]);
+                       close_pack(packs->info[packs->nr].p);
+                       FREE_AND_NULL(packs->info[packs->nr].p);
                        return;
                }
 
-               packs->names[packs->nr] = xstrdup(file_name);
-               packs->pack_name_concat_len += strlen(file_name) + 1;
+               packs->info[packs->nr].pack_name = xstrdup(file_name);
+               packs->info[packs->nr].orig_pack_int_id = packs->nr;
+               packs->info[packs->nr].expired = 0;
                packs->nr++;
        }
 }
 
-struct pack_pair {
-       uint32_t pack_int_id;
-       char *pack_name;
-};
-
-static int pack_pair_compare(const void *_a, const void *_b)
-{
-       struct pack_pair *a = (struct pack_pair *)_a;
-       struct pack_pair *b = (struct pack_pair *)_b;
-       return strcmp(a->pack_name, b->pack_name);
-}
-
-static void sort_packs_by_name(char **pack_names, uint32_t nr_packs, uint32_t *perm)
-{
-       uint32_t i;
-       struct pack_pair *pairs;
-
-       ALLOC_ARRAY(pairs, nr_packs);
-
-       for (i = 0; i < nr_packs; i++) {
-               pairs[i].pack_int_id = i;
-               pairs[i].pack_name = pack_names[i];
-       }
-
-       QSORT(pairs, nr_packs, pack_pair_compare);
-
-       for (i = 0; i < nr_packs; i++) {
-               pack_names[i] = pairs[i].pack_name;
-               perm[pairs[i].pack_int_id] = i;
-       }
-
-       free(pairs);
-}
-
 struct pack_midx_entry {
        struct object_id oid;
        uint32_t pack_int_id;
@@ -532,7 +512,6 @@ static int midx_oid_compare(const void *_a, const void *_b)
 }
 
 static int nth_midxed_pack_midx_entry(struct multi_pack_index *m,
-                                     uint32_t *pack_perm,
                                      struct pack_midx_entry *e,
                                      uint32_t pos)
 {
@@ -540,7 +519,7 @@ static int nth_midxed_pack_midx_entry(struct multi_pack_index *m,
                return 1;
 
        nth_midxed_object_oid(&e->oid, m, pos);
-       e->pack_int_id = pack_perm[nth_midxed_pack_int_id(m, pos)];
+       e->pack_int_id = nth_midxed_pack_int_id(m, pos);
        e->offset = nth_midxed_offset(m, pos);
 
        /* consider objects in midx to be from "old" packs */
@@ -574,8 +553,7 @@ static void fill_pack_entry(uint32_t pack_int_id,
  * of a packfile containing the object).
  */
 static struct pack_midx_entry *get_sorted_entries(struct multi_pack_index *m,
-                                                 struct packed_git **p,
-                                                 uint32_t *perm,
+                                                 struct pack_info *info,
                                                  uint32_t nr_packs,
                                                  uint32_t *nr_objects)
 {
@@ -586,7 +564,7 @@ static struct pack_midx_entry *get_sorted_entries(struct multi_pack_index *m,
        uint32_t start_pack = m ? m->num_packs : 0;
 
        for (cur_pack = start_pack; cur_pack < nr_packs; cur_pack++)
-               total_objects += p[cur_pack]->num_objects;
+               total_objects += info[cur_pack].p->num_objects;
 
        /*
         * As we de-duplicate by fanout value, we expect the fanout
@@ -611,7 +589,7 @@ static struct pack_midx_entry *get_sorted_entries(struct multi_pack_index *m,
 
                        for (cur_object = start; cur_object < end; cur_object++) {
                                ALLOC_GROW(entries_by_fanout, nr_fanout + 1, alloc_fanout);
-                               nth_midxed_pack_midx_entry(m, perm,
+                               nth_midxed_pack_midx_entry(m,
                                                           &entries_by_fanout[nr_fanout],
                                                           cur_object);
                                nr_fanout++;
@@ -622,12 +600,12 @@ static struct pack_midx_entry *get_sorted_entries(struct multi_pack_index *m,
                        uint32_t start = 0, end;
 
                        if (cur_fanout)
-                               start = get_pack_fanout(p[cur_pack], cur_fanout - 1);
-                       end = get_pack_fanout(p[cur_pack], cur_fanout);
+                               start = get_pack_fanout(info[cur_pack].p, cur_fanout - 1);
+                       end = get_pack_fanout(info[cur_pack].p, cur_fanout);
 
                        for (cur_object = start; cur_object < end; cur_object++) {
                                ALLOC_GROW(entries_by_fanout, nr_fanout + 1, alloc_fanout);
-                               fill_pack_entry(perm[cur_pack], p[cur_pack], cur_object, &entries_by_fanout[nr_fanout]);
+                               fill_pack_entry(cur_pack, info[cur_pack].p, cur_object, &entries_by_fanout[nr_fanout]);
                                nr_fanout++;
                        }
                }
@@ -656,7 +634,7 @@ static struct pack_midx_entry *get_sorted_entries(struct multi_pack_index *m,
 }
 
 static size_t write_midx_pack_names(struct hashfile *f,
-                                   char **pack_names,
+                                   struct pack_info *info,
                                    uint32_t num_packs)
 {
        uint32_t i;
@@ -664,14 +642,18 @@ static size_t write_midx_pack_names(struct hashfile *f,
        size_t written = 0;
 
        for (i = 0; i < num_packs; i++) {
-               size_t writelen = strlen(pack_names[i]) + 1;
+               size_t writelen;
+
+               if (info[i].expired)
+                       continue;
 
-               if (i && strcmp(pack_names[i], pack_names[i - 1]) <= 0)
+               if (i && strcmp(info[i].pack_name, info[i - 1].pack_name) <= 0)
                        BUG("incorrect pack-file order: %s before %s",
-                           pack_names[i - 1],
-                           pack_names[i]);
+                           info[i - 1].pack_name,
+                           info[i].pack_name);
 
-               hashwrite(f, pack_names[i], writelen);
+               writelen = strlen(info[i].pack_name) + 1;
+               hashwrite(f, info[i].pack_name, writelen);
                written += writelen;
        }
 
@@ -742,6 +724,7 @@ static size_t write_midx_oid_lookup(struct hashfile *f, unsigned char hash_len,
 }
 
 static size_t write_midx_object_offsets(struct hashfile *f, int large_offset_needed,
+                                       uint32_t *perm,
                                        struct pack_midx_entry *objects, uint32_t nr_objects)
 {
        struct pack_midx_entry *list = objects;
@@ -751,7 +734,12 @@ static size_t write_midx_object_offsets(struct hashfile *f, int large_offset_nee
        for (i = 0; i < nr_objects; i++) {
                struct pack_midx_entry *obj = list++;
 
-               hashwrite_be32(f, obj->pack_int_id);
+               if (perm[obj->pack_int_id] == PACK_EXPIRED)
+                       BUG("object %s is in an expired pack with int-id %d",
+                           oid_to_hex(&obj->oid),
+                           obj->pack_int_id);
+
+               hashwrite_be32(f, perm[obj->pack_int_id]);
 
                if (large_offset_needed && obj->offset >> 31)
                        hashwrite_be32(f, MIDX_LARGE_OFFSET_NEEDED | nr_large_offset++);
@@ -797,7 +785,8 @@ static size_t write_midx_large_offsets(struct hashfile *f, uint32_t nr_large_off
        return written;
 }
 
-int write_midx_file(const char *object_dir)
+static int write_midx_internal(const char *object_dir, struct multi_pack_index *m,
+                              struct string_list *packs_to_drop)
 {
        unsigned char cur_chunk, num_chunks = 0;
        char *midx_name;
@@ -812,6 +801,9 @@ int write_midx_file(const char *object_dir)
        uint32_t nr_entries, num_large_offsets = 0;
        struct pack_midx_entry *entries = NULL;
        int large_offsets_needed = 0;
+       int pack_name_concat_len = 0;
+       int dropped_packs = 0;
+       int result = 0;
 
        midx_name = get_midx_filename(object_dir);
        if (safe_create_leading_directories(midx_name)) {
@@ -820,42 +812,34 @@ int write_midx_file(const char *object_dir)
                          midx_name);
        }
 
-       packs.m = load_multi_pack_index(object_dir, 1);
+       if (m)
+               packs.m = m;
+       else
+               packs.m = load_multi_pack_index(object_dir, 1);
 
        packs.nr = 0;
-       packs.alloc_list = packs.m ? packs.m->num_packs : 16;
-       packs.alloc_names = packs.alloc_list;
-       packs.list = NULL;
-       packs.names = NULL;
-       packs.pack_name_concat_len = 0;
-       ALLOC_ARRAY(packs.list, packs.alloc_list);
-       ALLOC_ARRAY(packs.names, packs.alloc_names);
+       packs.alloc = packs.m ? packs.m->num_packs : 16;
+       packs.info = NULL;
+       ALLOC_ARRAY(packs.info, packs.alloc);
 
        if (packs.m) {
                for (i = 0; i < packs.m->num_packs; i++) {
-                       ALLOC_GROW(packs.list, packs.nr + 1, packs.alloc_list);
-                       ALLOC_GROW(packs.names, packs.nr + 1, packs.alloc_names);
+                       ALLOC_GROW(packs.info, packs.nr + 1, packs.alloc);
 
-                       packs.list[packs.nr] = NULL;
-                       packs.names[packs.nr] = xstrdup(packs.m->pack_names[i]);
-                       packs.pack_name_concat_len += strlen(packs.names[packs.nr]) + 1;
+                       packs.info[packs.nr].orig_pack_int_id = i;
+                       packs.info[packs.nr].pack_name = xstrdup(packs.m->pack_names[i]);
+                       packs.info[packs.nr].p = NULL;
+                       packs.info[packs.nr].expired = 0;
                        packs.nr++;
                }
        }
 
        for_each_file_in_pack_dir(object_dir, add_pack_to_midx, &packs);
 
-       if (packs.m && packs.nr == packs.m->num_packs)
+       if (packs.m && packs.nr == packs.m->num_packs && !packs_to_drop)
                goto cleanup;
 
-       if (packs.pack_name_concat_len % MIDX_CHUNK_ALIGNMENT)
-               packs.pack_name_concat_len += MIDX_CHUNK_ALIGNMENT -
-                                             (packs.pack_name_concat_len % MIDX_CHUNK_ALIGNMENT);
-
-       ALLOC_ARRAY(pack_perm, packs.nr);
-       sort_packs_by_name(packs.names, packs.nr, pack_perm);
-
-       entries = get_sorted_entries(packs.m, packs.list, pack_perm, packs.nr, &nr_entries);
+       entries = get_sorted_entries(packs.m, packs.info, packs.nr, &nr_entries);
 
        for (i = 0; i < nr_entries; i++) {
                if (entries[i].offset > 0x7fffffff)
@@ -864,6 +848,61 @@ int write_midx_file(const char *object_dir)
                        large_offsets_needed = 1;
        }
 
+       QSORT(packs.info, packs.nr, pack_info_compare);
+
+       if (packs_to_drop && packs_to_drop->nr) {
+               int drop_index = 0;
+               int missing_drops = 0;
+
+               for (i = 0; i < packs.nr && drop_index < packs_to_drop->nr; i++) {
+                       int cmp = strcmp(packs.info[i].pack_name,
+                                        packs_to_drop->items[drop_index].string);
+
+                       if (!cmp) {
+                               drop_index++;
+                               packs.info[i].expired = 1;
+                       } else if (cmp > 0) {
+                               error(_("did not see pack-file %s to drop"),
+                                     packs_to_drop->items[drop_index].string);
+                               drop_index++;
+                               missing_drops++;
+                               i--;
+                       } else {
+                               packs.info[i].expired = 0;
+                       }
+               }
+
+               if (missing_drops) {
+                       result = 1;
+                       goto cleanup;
+               }
+       }
+
+       /*
+        * pack_perm stores a permutation between pack-int-ids from the
+        * previous multi-pack-index to the new one we are writing:
+        *
+        * pack_perm[old_id] = new_id
+        */
+       ALLOC_ARRAY(pack_perm, packs.nr);
+       for (i = 0; i < packs.nr; i++) {
+               if (packs.info[i].expired) {
+                       dropped_packs++;
+                       pack_perm[packs.info[i].orig_pack_int_id] = PACK_EXPIRED;
+               } else {
+                       pack_perm[packs.info[i].orig_pack_int_id] = i - dropped_packs;
+               }
+       }
+
+       for (i = 0; i < packs.nr; i++) {
+               if (!packs.info[i].expired)
+                       pack_name_concat_len += strlen(packs.info[i].pack_name) + 1;
+       }
+
+       if (pack_name_concat_len % MIDX_CHUNK_ALIGNMENT)
+               pack_name_concat_len += MIDX_CHUNK_ALIGNMENT -
+                                       (pack_name_concat_len % MIDX_CHUNK_ALIGNMENT);
+
        hold_lock_file_for_update(&lk, midx_name, LOCK_DIE_ON_ERROR);
        f = hashfd(lk.tempfile->fd, lk.tempfile->filename.buf);
        FREE_AND_NULL(midx_name);
@@ -874,14 +913,14 @@ int write_midx_file(const char *object_dir)
        cur_chunk = 0;
        num_chunks = large_offsets_needed ? 5 : 4;
 
-       written = write_midx_header(f, num_chunks, packs.nr);
+       written = write_midx_header(f, num_chunks, packs.nr - dropped_packs);
 
        chunk_ids[cur_chunk] = MIDX_CHUNKID_PACKNAMES;
        chunk_offsets[cur_chunk] = written + (num_chunks + 1) * MIDX_CHUNKLOOKUP_WIDTH;
 
        cur_chunk++;
        chunk_ids[cur_chunk] = MIDX_CHUNKID_OIDFANOUT;
-       chunk_offsets[cur_chunk] = chunk_offsets[cur_chunk - 1] + packs.pack_name_concat_len;
+       chunk_offsets[cur_chunk] = chunk_offsets[cur_chunk - 1] + pack_name_concat_len;
 
        cur_chunk++;
        chunk_ids[cur_chunk] = MIDX_CHUNKID_OIDLOOKUP;
@@ -929,7 +968,7 @@ int write_midx_file(const char *object_dir)
 
                switch (chunk_ids[i]) {
                        case MIDX_CHUNKID_PACKNAMES:
-                               written += write_midx_pack_names(f, packs.names, packs.nr);
+                               written += write_midx_pack_names(f, packs.info, packs.nr);
                                break;
 
                        case MIDX_CHUNKID_OIDFANOUT:
@@ -941,7 +980,7 @@ int write_midx_file(const char *object_dir)
                                break;
 
                        case MIDX_CHUNKID_OBJECTOFFSETS:
-                               written += write_midx_object_offsets(f, large_offsets_needed, entries, nr_entries);
+                               written += write_midx_object_offsets(f, large_offsets_needed, pack_perm, entries, nr_entries);
                                break;
 
                        case MIDX_CHUNKID_LARGEOFFSETS:
@@ -964,19 +1003,23 @@ int write_midx_file(const char *object_dir)
 
 cleanup:
        for (i = 0; i < packs.nr; i++) {
-               if (packs.list[i]) {
-                       close_pack(packs.list[i]);
-                       free(packs.list[i]);
+               if (packs.info[i].p) {
+                       close_pack(packs.info[i].p);
+                       free(packs.info[i].p);
                }
-               free(packs.names[i]);
+               free(packs.info[i].pack_name);
        }
 
-       free(packs.list);
-       free(packs.names);
+       free(packs.info);
        free(entries);
        free(pack_perm);
        free(midx_name);
-       return 0;
+       return result;
+}
+
+int write_midx_file(const char *object_dir)
+{
+       return write_midx_internal(object_dir, NULL, NULL);
 }
 
 void clear_midx_file(struct repository *r)
@@ -1140,3 +1183,200 @@ int verify_midx_file(struct repository *r, const char *object_dir)
 
        return verify_midx_error;
 }
+
+int expire_midx_packs(struct repository *r, const char *object_dir)
+{
+       uint32_t i, *count, result = 0;
+       struct string_list packs_to_drop = STRING_LIST_INIT_DUP;
+       struct multi_pack_index *m = load_multi_pack_index(object_dir, 1);
+
+       if (!m)
+               return 0;
+
+       count = xcalloc(m->num_packs, sizeof(uint32_t));
+       for (i = 0; i < m->num_objects; i++) {
+               int pack_int_id = nth_midxed_pack_int_id(m, i);
+               count[pack_int_id]++;
+       }
+
+       for (i = 0; i < m->num_packs; i++) {
+               char *pack_name;
+
+               if (count[i])
+                       continue;
+
+               if (prepare_midx_pack(r, m, i))
+                       continue;
+
+               if (m->packs[i]->pack_keep)
+                       continue;
+
+               pack_name = xstrdup(m->packs[i]->pack_name);
+               close_pack(m->packs[i]);
+
+               string_list_insert(&packs_to_drop, m->pack_names[i]);
+               unlink_pack_path(pack_name, 0);
+               free(pack_name);
+       }
+
+       free(count);
+
+       if (packs_to_drop.nr)
+               result = write_midx_internal(object_dir, m, &packs_to_drop);
+
+       string_list_clear(&packs_to_drop, 0);
+       return result;
+}
+
+struct repack_info {
+       timestamp_t mtime;
+       uint32_t referenced_objects;
+       uint32_t pack_int_id;
+};
+
+static int compare_by_mtime(const void *a_, const void *b_)
+{
+       const struct repack_info *a, *b;
+
+       a = (const struct repack_info *)a_;
+       b = (const struct repack_info *)b_;
+
+       if (a->mtime < b->mtime)
+               return -1;
+       if (a->mtime > b->mtime)
+               return 1;
+       return 0;
+}
+
+static int fill_included_packs_all(struct multi_pack_index *m,
+                                  unsigned char *include_pack)
+{
+       uint32_t i;
+
+       for (i = 0; i < m->num_packs; i++)
+               include_pack[i] = 1;
+
+       return m->num_packs < 2;
+}
+
+static int fill_included_packs_batch(struct repository *r,
+                                    struct multi_pack_index *m,
+                                    unsigned char *include_pack,
+                                    size_t batch_size)
+{
+       uint32_t i, packs_to_repack;
+       size_t total_size;
+       struct repack_info *pack_info = xcalloc(m->num_packs, sizeof(struct repack_info));
+
+       for (i = 0; i < m->num_packs; i++) {
+               pack_info[i].pack_int_id = i;
+
+               if (prepare_midx_pack(r, m, i))
+                       continue;
+
+               pack_info[i].mtime = m->packs[i]->mtime;
+       }
+
+       for (i = 0; batch_size && i < m->num_objects; i++) {
+               uint32_t pack_int_id = nth_midxed_pack_int_id(m, i);
+               pack_info[pack_int_id].referenced_objects++;
+       }
+
+       QSORT(pack_info, m->num_packs, compare_by_mtime);
+
+       total_size = 0;
+       packs_to_repack = 0;
+       for (i = 0; total_size < batch_size && i < m->num_packs; i++) {
+               int pack_int_id = pack_info[i].pack_int_id;
+               struct packed_git *p = m->packs[pack_int_id];
+               size_t expected_size;
+
+               if (!p)
+                       continue;
+               if (open_pack_index(p) || !p->num_objects)
+                       continue;
+
+               expected_size = (size_t)(p->pack_size
+                                        * pack_info[i].referenced_objects);
+               expected_size /= p->num_objects;
+
+               if (expected_size >= batch_size)
+                       continue;
+
+               packs_to_repack++;
+               total_size += expected_size;
+               include_pack[pack_int_id] = 1;
+       }
+
+       free(pack_info);
+
+       if (total_size < batch_size || packs_to_repack < 2)
+               return 1;
+
+       return 0;
+}
+
+int midx_repack(struct repository *r, const char *object_dir, size_t batch_size)
+{
+       int result = 0;
+       uint32_t i;
+       unsigned char *include_pack;
+       struct child_process cmd = CHILD_PROCESS_INIT;
+       struct strbuf base_name = STRBUF_INIT;
+       struct multi_pack_index *m = load_multi_pack_index(object_dir, 1);
+
+       if (!m)
+               return 0;
+
+       include_pack = xcalloc(m->num_packs, sizeof(unsigned char));
+
+       if (batch_size) {
+               if (fill_included_packs_batch(r, m, include_pack, batch_size))
+                       goto cleanup;
+       } else if (fill_included_packs_all(m, include_pack))
+               goto cleanup;
+
+       argv_array_push(&cmd.args, "pack-objects");
+
+       strbuf_addstr(&base_name, object_dir);
+       strbuf_addstr(&base_name, "/pack/pack");
+       argv_array_push(&cmd.args, base_name.buf);
+       strbuf_release(&base_name);
+
+       cmd.git_cmd = 1;
+       cmd.in = cmd.out = -1;
+
+       if (start_command(&cmd)) {
+               error(_("could not start pack-objects"));
+               result = 1;
+               goto cleanup;
+       }
+
+       for (i = 0; i < m->num_objects; i++) {
+               struct object_id oid;
+               uint32_t pack_int_id = nth_midxed_pack_int_id(m, i);
+
+               if (!include_pack[pack_int_id])
+                       continue;
+
+               nth_midxed_object_oid(&oid, m, i);
+               xwrite(cmd.in, oid_to_hex(&oid), the_hash_algo->hexsz);
+               xwrite(cmd.in, "\n", 1);
+       }
+       close(cmd.in);
+
+       if (finish_command(&cmd)) {
+               error(_("could not finish pack-objects"));
+               result = 1;
+               goto cleanup;
+       }
+
+       result = write_midx_internal(object_dir, m, NULL);
+       m = NULL;
+
+cleanup:
+       if (m)
+               close_midx(m);
+       free(include_pack);
+       return result;
+}
diff --git a/midx.h b/midx.h
index 3eb29731f2b1e8e96a116a683fd8baad1020a46b..f0ae656b5d767644d60ef7b101350ca29cde7585 100644 (file)
--- a/midx.h
+++ b/midx.h
@@ -50,6 +50,8 @@ int prepare_multi_pack_index_one(struct repository *r, const char *object_dir, i
 int write_midx_file(const char *object_dir);
 void clear_midx_file(struct repository *r);
 int verify_midx_file(struct repository *r, const char *object_dir);
+int expire_midx_packs(struct repository *r, const char *object_dir);
+int midx_repack(struct repository *r, const char *object_dir, size_t batch_size);
 
 void close_midx(struct multi_pack_index *m);
 
diff --git a/notes.c b/notes.c
index 532ec37865768d05a31606f495b4f0c1645ea757..75c028b3005a8af8f39e9bceb7ab6e4b3b5dff83 100644 (file)
--- a/notes.c
+++ b/notes.c
@@ -397,7 +397,7 @@ static void load_subtree(struct notes_tree *t, struct leaf_node *subtree,
        struct name_entry entry;
        const unsigned hashsz = the_hash_algo->rawsz;
 
-       buf = fill_tree_descriptor(&desc, &subtree->val_oid);
+       buf = fill_tree_descriptor(the_repository, &desc, &subtree->val_oid);
        if (!buf)
                die("Could not read %s for notes-index",
                     oid_to_hex(&subtree->val_oid));
@@ -1015,7 +1015,7 @@ void init_notes(struct notes_tree *t, const char *notes_ref,
                return;
        if (flags & NOTES_INIT_WRITABLE && read_ref(notes_ref, &object_oid))
                die("Cannot use notes ref %s", notes_ref);
-       if (get_tree_entry(&object_oid, "", &oid, &mode))
+       if (get_tree_entry(the_repository, &object_oid, "", &oid, &mode))
                die("Failed to read notes tree referenced by %s (%s)",
                    notes_ref, oid_to_hex(&object_oid));
 
index 49f56ab8d9608919808ecebe71b9deb30c53980b..7f7b3cdd806b756eefb86c7d116ba9a9a6c62558 100644 (file)
@@ -33,6 +33,8 @@ void prepare_alt_odb(struct repository *r);
 char *compute_alternate_path(const char *path, struct strbuf *err);
 typedef int alt_odb_fn(struct object_directory *, void *);
 int foreach_alt_odb(alt_odb_fn, void*);
+typedef void alternate_ref_fn(const struct object_id *oid, void *);
+void for_each_alternate_ref(alternate_ref_fn, void *);
 
 /*
  * Add the directory to the on-disk alternates file; the new entry will also
index b0841a0f5870bafdcaf3482ed725e43ddaf5d461..6d6e840d037657721a5aa975e2567c8415547d93 100644 (file)
--- a/oidmap.c
+++ b/oidmap.c
@@ -12,13 +12,6 @@ static int oidmap_neq(const void *hashmap_cmp_fn_data,
                      &((const struct oidmap_entry *) entry_or_key)->oid);
 }
 
-static int hash(const struct object_id *oid)
-{
-       int hash;
-       memcpy(&hash, oid->hash, sizeof(hash));
-       return hash;
-}
-
 void oidmap_init(struct oidmap *map, size_t initial_size)
 {
        hashmap_init(&map->map, oidmap_neq, NULL, initial_size);
@@ -36,7 +29,7 @@ void *oidmap_get(const struct oidmap *map, const struct object_id *key)
        if (!map->map.cmpfn)
                return NULL;
 
-       return hashmap_get_from_hash(&map->map, hash(key), key);
+       return hashmap_get_from_hash(&map->map, oidhash(key), key);
 }
 
 void *oidmap_remove(struct oidmap *map, const struct object_id *key)
@@ -46,7 +39,7 @@ void *oidmap_remove(struct oidmap *map, const struct object_id *key)
        if (!map->map.cmpfn)
                oidmap_init(map, 0);
 
-       hashmap_entry_init(&entry, hash(key));
+       hashmap_entry_init(&entry, oidhash(key));
        return hashmap_remove(&map->map, &entry, key);
 }
 
@@ -57,6 +50,6 @@ void *oidmap_put(struct oidmap *map, void *entry)
        if (!map->map.cmpfn)
                oidmap_init(map, 0);
 
-       hashmap_entry_init(&to_put->internal_entry, hash(&to_put->oid));
+       hashmap_entry_init(&to_put->internal_entry, oidhash(&to_put->oid));
        return hashmap_put(&map->map, to_put);
 }
index 8bdecb13de1e0d0f24bfccb27dd46cdf64b27cff..f63ce818f67766378485ab16075dd11e87c00ca0 100644 (file)
--- a/oidset.c
+++ b/oidset.c
@@ -35,3 +35,38 @@ void oidset_clear(struct oidset *set)
        kh_release_oid_set(&set->set);
        oidset_init(set, 0);
 }
+
+void oidset_parse_file(struct oidset *set, const char *path)
+{
+       FILE *fp;
+       struct strbuf sb = STRBUF_INIT;
+       struct object_id oid;
+
+       fp = fopen(path, "r");
+       if (!fp)
+               die("could not open object name list: %s", path);
+       while (!strbuf_getline(&sb, fp)) {
+               const char *p;
+               const char *name;
+
+               /*
+                * Allow trailing comments, leading whitespace
+                * (including before commits), and empty or whitespace
+                * only lines.
+                */
+               name = strchr(sb.buf, '#');
+               if (name)
+                       strbuf_setlen(&sb, name - sb.buf);
+               strbuf_trim(&sb);
+               if (!sb.len)
+                       continue;
+
+               if (parse_oid_hex(sb.buf, &oid, &p) || *p != '\0')
+                       die("invalid object name: %s", sb.buf);
+               oidset_insert(set, &oid);
+       }
+       if (ferror(fp))
+               die_errno("Could not read '%s'", path);
+       fclose(fp);
+       strbuf_release(&sb);
+}
index 505fad578bec1691fde9f28342d4c476c60dd50c..5346563b0bccb602b8de745d1308793e8995a5e5 100644 (file)
--- a/oidset.h
+++ b/oidset.h
@@ -61,6 +61,14 @@ int oidset_remove(struct oidset *set, const struct object_id *oid);
  */
 void oidset_clear(struct oidset *set);
 
+/**
+ * Add the contents of the file 'path' to an initialized oidset.  Each line is
+ * an unabbreviated object name.  Comments begin with '#', and trailing comments
+ * are allowed.  Leading whitespace and empty or white-space only lines are
+ * ignored.
+ */
+void oidset_parse_file(struct oidset *set, const char *path);
+
 struct oidset_iter {
        kh_oid_set_t *set;
        khiter_t iter;
index c0d83fdfed973de8574224e46fb0afd4be0a98c9..fc43a6c52c75a32548c20bbc4a5aa7d0cc3ddd0d 100644 (file)
@@ -355,6 +355,34 @@ void close_object_store(struct raw_object_store *o)
        close_commit_graph(o);
 }
 
+void unlink_pack_path(const char *pack_name, int force_delete)
+{
+       static const char *exts[] = {".pack", ".idx", ".keep", ".bitmap", ".promisor"};
+       int i;
+       struct strbuf buf = STRBUF_INIT;
+       size_t plen;
+
+       strbuf_addstr(&buf, pack_name);
+       strip_suffix_mem(buf.buf, &buf.len, ".pack");
+       plen = buf.len;
+
+       if (!force_delete) {
+               strbuf_addstr(&buf, ".keep");
+               if (!access(buf.buf, F_OK)) {
+                       strbuf_release(&buf);
+                       return;
+               }
+       }
+
+       for (i = 0; i < ARRAY_SIZE(exts); i++) {
+               strbuf_setlen(&buf, plen);
+               strbuf_addstr(&buf, exts[i]);
+               unlink(buf.buf);
+       }
+
+       strbuf_release(&buf);
+}
+
 /*
  * The LRU pack is the one with the oldest MRU window, preferring packs
  * with no used windows, or the oldest mtime if it has no windows allocated.
index 81e868d55a9b1f1aeaafa4925a72ae5c53af86e9..3e98910bdd191f45d3dd86ff0360f40060944705 100644 (file)
@@ -95,6 +95,13 @@ void unuse_pack(struct pack_window **);
 void clear_delta_base_cache(void);
 struct packed_git *add_packed_git(const char *path, size_t path_len, int local);
 
+/*
+ * Unlink the .pack and associated extension files.
+ * Does not unlink if 'force_delete' is false and the pack-file is
+ * marked as ".keep".
+ */
+extern void unlink_pack_path(const char *pack_name, int force_delete);
+
 /*
  * Make sure that a pointer access into an mmap'd index file is within bounds,
  * and can provide at least 8 bytes of data.
index 095dcd0ddff2132a4dcfd59486ee4a5ddfa4684f..277db8afa26decf0d95a44663f477257213a48f6 100644 (file)
@@ -144,8 +144,7 @@ static void throughput_string(struct strbuf *buf, uint64_t total,
        strbuf_addstr(buf, ", ");
        strbuf_humanise_bytes(buf, total);
        strbuf_addstr(buf, " | ");
-       strbuf_humanise_bytes(buf, rate * 1024);
-       strbuf_addstr(buf, "/s");
+       strbuf_humanise_rate(buf, rate * 1024);
 }
 
 void display_throughput(struct progress *progress, uint64_t total)
index 1c1a2af880b60bbb7d47c74c1d233b45577cf288..f27cfc8c3e358fa27d7aec78c0b3c44c816402b3 100644 (file)
@@ -22,6 +22,7 @@
 #include "commit-reach.h"
 #include "worktree.h"
 #include "hashmap.h"
+#include "argv-array.h"
 
 static struct ref_msg {
        const char *gone;
@@ -1863,21 +1864,62 @@ static int filter_pattern_match(struct ref_filter *filter, const char *refname)
        return match_pattern(filter, refname);
 }
 
-/*
- * Find the longest prefix of pattern we can pass to
- * `for_each_fullref_in()`, namely the part of pattern preceding the
- * first glob character. (Note that `for_each_fullref_in()` is
- * perfectly happy working with a prefix that doesn't end at a
- * pathname component boundary.)
- */
-static void find_longest_prefix(struct strbuf *out, const char *pattern)
+static int qsort_strcmp(const void *va, const void *vb)
+{
+       const char *a = *(const char **)va;
+       const char *b = *(const char **)vb;
+
+       return strcmp(a, b);
+}
+
+static void find_longest_prefixes_1(struct string_list *out,
+                                 struct strbuf *prefix,
+                                 const char **patterns, size_t nr)
 {
-       const char *p;
+       size_t i;
+
+       for (i = 0; i < nr; i++) {
+               char c = patterns[i][prefix->len];
+               if (!c || is_glob_special(c)) {
+                       string_list_append(out, prefix->buf);
+                       return;
+               }
+       }
+
+       i = 0;
+       while (i < nr) {
+               size_t end;
+
+               /*
+               * Set "end" to the index of the element _after_ the last one
+               * in our group.
+               */
+               for (end = i + 1; end < nr; end++) {
+                       if (patterns[i][prefix->len] != patterns[end][prefix->len])
+                               break;
+               }
 
-       for (p = pattern; *p && !is_glob_special(*p); p++)
-               ;
+               strbuf_addch(prefix, patterns[i][prefix->len]);
+               find_longest_prefixes_1(out, prefix, patterns + i, end - i);
+               strbuf_setlen(prefix, prefix->len - 1);
 
-       strbuf_add(out, pattern, p - pattern);
+               i = end;
+       }
+}
+
+static void find_longest_prefixes(struct string_list *out,
+                                 const char **patterns)
+{
+       struct argv_array sorted = ARGV_ARRAY_INIT;
+       struct strbuf prefix = STRBUF_INIT;
+
+       argv_array_pushv(&sorted, patterns);
+       QSORT(sorted.argv, sorted.argc, qsort_strcmp);
+
+       find_longest_prefixes_1(out, &prefix, sorted.argv, sorted.argc);
+
+       argv_array_clear(&sorted);
+       strbuf_release(&prefix);
 }
 
 /*
@@ -1890,7 +1932,8 @@ static int for_each_fullref_in_pattern(struct ref_filter *filter,
                                       void *cb_data,
                                       int broken)
 {
-       struct strbuf prefix = STRBUF_INIT;
+       struct string_list prefixes = STRING_LIST_INIT_DUP;
+       struct string_list_item *prefix;
        int ret;
 
        if (!filter->match_as_path) {
@@ -1916,21 +1959,15 @@ static int for_each_fullref_in_pattern(struct ref_filter *filter,
                return for_each_fullref_in("", cb, cb_data, broken);
        }
 
-       if (filter->name_patterns[1]) {
-               /*
-                * multiple patterns; in theory this could still work as long
-                * as the patterns are disjoint. We'd just make multiple calls
-                * to for_each_ref(). But if they're not disjoint, we'd end up
-                * reporting the same ref multiple times. So let's punt on that
-                * for now.
-                */
-               return for_each_fullref_in("", cb, cb_data, broken);
-       }
+       find_longest_prefixes(&prefixes, filter->name_patterns);
 
-       find_longest_prefix(&prefix, filter->name_patterns[0]);
+       for_each_string_list_item(prefix, &prefixes) {
+               ret = for_each_fullref_in(prefix->string, cb, cb_data, broken);
+               if (ret)
+                       break;
+       }
 
-       ret = for_each_fullref_in(prefix.buf, cb, cb_data, broken);
-       strbuf_release(&prefix);
+       string_list_clear(&prefixes, 0);
        return ret;
 }
 
index 621feb9df716400f32d016e1d36fc368b6a884fb..07412297f0248aae886eeb77c3a1cab13c93039c 100644 (file)
@@ -1554,6 +1554,32 @@ void add_index_objects_to_pending(struct rev_info *revs, unsigned int flags)
        free_worktrees(worktrees);
 }
 
+struct add_alternate_refs_data {
+       struct rev_info *revs;
+       unsigned int flags;
+};
+
+static void add_one_alternate_ref(const struct object_id *oid,
+                                 void *vdata)
+{
+       const char *name = ".alternate";
+       struct add_alternate_refs_data *data = vdata;
+       struct object *obj;
+
+       obj = get_reference(data->revs, name, oid, data->flags);
+       add_rev_cmdline(data->revs, obj, name, REV_CMD_REV, data->flags);
+       add_pending_object(data->revs, obj, name);
+}
+
+static void add_alternate_refs_to_pending(struct rev_info *revs,
+                                         unsigned int flags)
+{
+       struct add_alternate_refs_data data;
+       data.revs = revs;
+       data.flags = flags;
+       for_each_alternate_ref(add_one_alternate_ref, &data);
+}
+
 static int add_parents_only(struct rev_info *revs, const char *arg_, int flags,
                            int exclude_parent)
 {
@@ -1956,6 +1982,7 @@ static int handle_revision_opt(struct rev_info *revs, int argc, const char **arg
            !strcmp(arg, "--no-walk") || !strcmp(arg, "--do-walk") ||
            !strcmp(arg, "--bisect") || starts_with(arg, "--glob=") ||
            !strcmp(arg, "--indexed-objects") ||
+           !strcmp(arg, "--alternate-refs") ||
            starts_with(arg, "--exclude=") ||
            starts_with(arg, "--branches=") || starts_with(arg, "--tags=") ||
            starts_with(arg, "--remotes=") || starts_with(arg, "--no-walk="))
@@ -2442,6 +2469,8 @@ static int handle_revision_pseudo_opt(const char *submodule,
                add_reflogs_to_pending(revs, *flags);
        } else if (!strcmp(arg, "--indexed-objects")) {
                add_index_objects_to_pending(revs, *flags);
+       } else if (!strcmp(arg, "--alternate-refs")) {
+               add_alternate_refs_to_pending(revs, *flags);
        } else if (!strcmp(arg, "--not")) {
                *flags ^= UNINTERESTING | BOTTOM;
        } else if (!strcmp(arg, "--no-walk")) {
index cf262701e8e7df668b2c659bfb4806f0c7e90a9e..34ebf8ed94ad7d8df6773337d31ff7d9c2c84c4a 100644 (file)
@@ -2079,6 +2079,18 @@ const char *todo_item_get_arg(struct todo_list *todo_list,
        return todo_list->buf.buf + item->arg_offset;
 }
 
+static int is_command(enum todo_command command, const char **bol)
+{
+       const char *str = todo_command_info[command].str;
+       const char nick = todo_command_info[command].c;
+       const char *p = *bol + 1;
+
+       return skip_prefix(*bol, str, bol) ||
+               ((nick && **bol == nick) &&
+                (*p == ' ' || *p == '\t' || *p == '\n' || *p == '\r' || !*p) &&
+                (*bol = p));
+}
+
 static int parse_insn_line(struct repository *r, struct todo_item *item,
                           const char *buf, const char *bol, char *eol)
 {
@@ -2100,12 +2112,7 @@ static int parse_insn_line(struct repository *r, struct todo_item *item,
        }
 
        for (i = 0; i < TODO_COMMENT; i++)
-               if (skip_prefix(bol, todo_command_info[i].str, &bol)) {
-                       item->command = i;
-                       break;
-               } else if ((bol + 1 == eol || bol[1] == ' ') &&
-                          *bol == todo_command_info[i].c) {
-                       bol++;
+               if (is_command(i, &bol)) {
                        item->command = i;
                        break;
                }
@@ -2173,34 +2180,26 @@ static int parse_insn_line(struct repository *r, struct todo_item *item,
 
 int sequencer_get_last_command(struct repository *r, enum replay_action *action)
 {
-       struct todo_item item;
-       char *eol;
-       const char *todo_file;
+       const char *todo_file, *bol;
        struct strbuf buf = STRBUF_INIT;
-       int ret = -1;
+       int ret = 0;
 
        todo_file = git_path_todo_file();
        if (strbuf_read_file(&buf, todo_file, 0) < 0) {
-               if (errno == ENOENT)
+               if (errno == ENOENT || errno == ENOTDIR)
                        return -1;
                else
                        return error_errno("unable to open '%s'", todo_file);
        }
-       eol = strchrnul(buf.buf, '\n');
-       if (buf.buf != eol && eol[-1] == '\r')
-               eol--; /* strip Carriage Return */
-       if (parse_insn_line(r, &item, buf.buf, buf.buf, eol))
-               goto fail;
-       if (item.command == TODO_PICK)
+       bol = buf.buf + strspn(buf.buf, " \t\r\n");
+       if (is_command(TODO_PICK, &bol) && (*bol == ' ' || *bol == '\t'))
                *action = REPLAY_PICK;
-       else if (item.command == TODO_REVERT)
+       else if (is_command(TODO_REVERT, &bol) &&
+                (*bol == ' ' || *bol == '\t'))
                *action = REPLAY_REVERT;
        else
-               goto fail;
-
-       ret = 0;
+               ret = -1;
 
- fail:
        strbuf_release(&buf);
 
        return ret;
@@ -2655,15 +2654,41 @@ static int walk_revs_populate_todo(struct todo_list *todo_list,
        return 0;
 }
 
-static int create_seq_dir(void)
+static int create_seq_dir(struct repository *r)
 {
-       if (file_exists(git_path_seq_dir())) {
-               error(_("a cherry-pick or revert is already in progress"));
-               advise(_("try \"git cherry-pick (--continue | --quit | --abort)\""));
+       enum replay_action action;
+       const char *in_progress_error = NULL;
+       const char *in_progress_advice = NULL;
+       unsigned int advise_skip = file_exists(git_path_revert_head(r)) ||
+                               file_exists(git_path_cherry_pick_head(r));
+
+       if (!sequencer_get_last_command(r, &action)) {
+               switch (action) {
+               case REPLAY_REVERT:
+                       in_progress_error = _("revert is already in progress");
+                       in_progress_advice =
+                       _("try \"git revert (--continue | %s--abort | --quit)\"");
+                       break;
+               case REPLAY_PICK:
+                       in_progress_error = _("cherry-pick is already in progress");
+                       in_progress_advice =
+                       _("try \"git cherry-pick (--continue | %s--abort | --quit)\"");
+                       break;
+               default:
+                       BUG("unexpected action in create_seq_dir");
+               }
+       }
+       if (in_progress_error) {
+               error("%s", in_progress_error);
+               if (advice_sequencer_in_use)
+                       advise(in_progress_advice,
+                               advise_skip ? "--skip | " : "");
                return -1;
-       } else if (mkdir(git_path_seq_dir(), 0777) < 0)
+       }
+       if (mkdir(git_path_seq_dir(), 0777) < 0)
                return error_errno(_("could not create sequencer directory '%s'"),
                                   git_path_seq_dir());
+
        return 0;
 }
 
@@ -2714,15 +2739,20 @@ static int rollback_is_safe(void)
        return oideq(&actual_head, &expected_head);
 }
 
-static int reset_for_rollback(const struct object_id *oid)
+static int reset_merge(const struct object_id *oid)
 {
-       const char *argv[4];    /* reset --merge <arg> + NULL */
+       int ret;
+       struct argv_array argv = ARGV_ARRAY_INIT;
 
-       argv[0] = "reset";
-       argv[1] = "--merge";
-       argv[2] = oid_to_hex(oid);
-       argv[3] = NULL;
-       return run_command_v_opt(argv, RUN_GIT_CMD);
+       argv_array_pushl(&argv, "reset", "--merge", NULL);
+
+       if (!is_null_oid(oid))
+               argv_array_push(&argv, oid_to_hex(oid));
+
+       ret = run_command_v_opt(argv.argv, RUN_GIT_CMD);
+       argv_array_clear(&argv);
+
+       return ret;
 }
 
 static int rollback_single_pick(struct repository *r)
@@ -2736,7 +2766,16 @@ static int rollback_single_pick(struct repository *r)
                return error(_("cannot resolve HEAD"));
        if (is_null_oid(&head_oid))
                return error(_("cannot abort from a branch yet to be born"));
-       return reset_for_rollback(&head_oid);
+       return reset_merge(&head_oid);
+}
+
+static int skip_single_pick(void)
+{
+       struct object_id head;
+
+       if (read_ref_full("HEAD", 0, &head, NULL))
+               return error(_("cannot resolve HEAD"));
+       return reset_merge(&head);
 }
 
 int sequencer_rollback(struct repository *r, struct replay_opts *opts)
@@ -2779,7 +2818,7 @@ int sequencer_rollback(struct repository *r, struct replay_opts *opts)
                warning(_("You seem to have moved HEAD. "
                          "Not rewinding, check your HEAD!"));
        } else
-       if (reset_for_rollback(&oid))
+       if (reset_merge(&oid))
                goto fail;
        strbuf_release(&buf);
        return sequencer_remove_state(opts);
@@ -2788,6 +2827,70 @@ int sequencer_rollback(struct repository *r, struct replay_opts *opts)
        return -1;
 }
 
+int sequencer_skip(struct repository *r, struct replay_opts *opts)
+{
+       enum replay_action action = -1;
+       sequencer_get_last_command(r, &action);
+
+       /*
+        * Check whether the subcommand requested to skip the commit is actually
+        * in progress and that it's safe to skip the commit.
+        *
+        * opts->action tells us which subcommand requested to skip the commit.
+        * If the corresponding .git/<ACTION>_HEAD exists, we know that the
+        * action is in progress and we can skip the commit.
+        *
+        * Otherwise we check that the last instruction was related to the
+        * particular subcommand we're trying to execute and barf if that's not
+        * the case.
+        *
+        * Finally we check that the rollback is "safe", i.e., has the HEAD
+        * moved? In this case, it doesn't make sense to "reset the merge" and
+        * "skip the commit" as the user already handled this by committing. But
+        * we'd not want to barf here, instead give advice on how to proceed. We
+        * only need to check that when .git/<ACTION>_HEAD doesn't exist because
+        * it gets removed when the user commits, so if it still exists we're
+        * sure the user can't have committed before.
+        */
+       switch (opts->action) {
+       case REPLAY_REVERT:
+               if (!file_exists(git_path_revert_head(r))) {
+                       if (action != REPLAY_REVERT)
+                               return error(_("no revert in progress"));
+                       if (!rollback_is_safe())
+                               goto give_advice;
+               }
+               break;
+       case REPLAY_PICK:
+               if (!file_exists(git_path_cherry_pick_head(r))) {
+                       if (action != REPLAY_PICK)
+                               return error(_("no cherry-pick in progress"));
+                       if (!rollback_is_safe())
+                               goto give_advice;
+               }
+               break;
+       default:
+               BUG("unexpected action in sequencer_skip");
+       }
+
+       if (skip_single_pick())
+               return error(_("failed to skip the commit"));
+       if (!is_directory(git_path_seq_dir()))
+               return 0;
+
+       return sequencer_continue(r, opts);
+
+give_advice:
+       error(_("there is nothing to skip"));
+
+       if (advice_resolve_conflict) {
+               advise(_("have you committed already?\n"
+                        "try \"git %s --continue\""),
+                        action == REPLAY_REVERT ? "revert" : "cherry-pick");
+       }
+       return -1;
+}
+
 static int save_todo(struct todo_list *todo_list, struct replay_opts *opts)
 {
        struct lock_file todo_lock = LOCK_INIT;
@@ -3199,7 +3302,7 @@ static int do_reset(struct repository *r,
                return error_resolve_conflict(_(action_name(opts)));
        }
 
-       if (!fill_tree_descriptor(&desc, &oid)) {
+       if (!fill_tree_descriptor(r, &desc, &oid)) {
                error(_("failed to find tree of %s"), oid_to_hex(&oid));
                rollback_lock_file(&lock);
                free((void *)desc.buffer);
@@ -3738,7 +3841,7 @@ static int pick_commits(struct repository *r,
                        unlink(rebase_path_author_script());
                        unlink(rebase_path_stopped_sha());
                        unlink(rebase_path_amend());
-                       unlink(git_path_merge_head(the_repository));
+                       unlink(git_path_merge_head(r));
                        delete_ref(NULL, "REBASE_HEAD", NULL, REF_NO_DEREF);
 
                        if (item->command == TODO_BREAK) {
@@ -4123,7 +4226,7 @@ static int commit_staged_changes(struct repository *r,
                           opts, flags))
                return error(_("could not commit staged changes."));
        unlink(rebase_path_amend());
-       unlink(git_path_merge_head(the_repository));
+       unlink(git_path_merge_head(r));
        if (final_fixup) {
                unlink(rebase_path_fixup_msg());
                unlink(rebase_path_squash_msg());
@@ -4258,7 +4361,7 @@ int sequencer_pick_revisions(struct repository *r,
         */
 
        if (walk_revs_populate_todo(&todo_list, opts) ||
-                       create_seq_dir() < 0)
+                       create_seq_dir(r) < 0)
                return -1;
        if (get_oid("HEAD", &oid) && (opts->action == REPLAY_REVERT))
                return error(_("can't revert as initial commit"));
index 3d0b68c34ce1fed23899415660127c999fa999c6..6704acbb9c93a55cb7ec69d2e045d67850bc4049 100644 (file)
@@ -129,6 +129,7 @@ int sequencer_pick_revisions(struct repository *repo,
                             struct replay_opts *opts);
 int sequencer_continue(struct repository *repo, struct replay_opts *opts);
 int sequencer_rollback(struct repository *repo, struct replay_opts *opts);
+int sequencer_skip(struct repository *repo, struct replay_opts *opts);
 int sequencer_remove_state(struct replay_opts *opts);
 
 #define TODO_LIST_KEEP_EMPTY (1U << 0)
index 888b6024d5de050753765e37b65ea0521d91f9fd..84fd02f107602411f3ceb460e4fe5b41c212bdec 100644 (file)
@@ -743,6 +743,103 @@ char *compute_alternate_path(const char *path, struct strbuf *err)
        return ref_git;
 }
 
+static void fill_alternate_refs_command(struct child_process *cmd,
+                                       const char *repo_path)
+{
+       const char *value;
+
+       if (!git_config_get_value("core.alternateRefsCommand", &value)) {
+               cmd->use_shell = 1;
+
+               argv_array_push(&cmd->args, value);
+               argv_array_push(&cmd->args, repo_path);
+       } else {
+               cmd->git_cmd = 1;
+
+               argv_array_pushf(&cmd->args, "--git-dir=%s", repo_path);
+               argv_array_push(&cmd->args, "for-each-ref");
+               argv_array_push(&cmd->args, "--format=%(objectname)");
+
+               if (!git_config_get_value("core.alternateRefsPrefixes", &value)) {
+                       argv_array_push(&cmd->args, "--");
+                       argv_array_split(&cmd->args, value);
+               }
+       }
+
+       cmd->env = local_repo_env;
+       cmd->out = -1;
+}
+
+static void read_alternate_refs(const char *path,
+                               alternate_ref_fn *cb,
+                               void *data)
+{
+       struct child_process cmd = CHILD_PROCESS_INIT;
+       struct strbuf line = STRBUF_INIT;
+       FILE *fh;
+
+       fill_alternate_refs_command(&cmd, path);
+
+       if (start_command(&cmd))
+               return;
+
+       fh = xfdopen(cmd.out, "r");
+       while (strbuf_getline_lf(&line, fh) != EOF) {
+               struct object_id oid;
+               const char *p;
+
+               if (parse_oid_hex(line.buf, &oid, &p) || *p) {
+                       warning(_("invalid line while parsing alternate refs: %s"),
+                               line.buf);
+                       break;
+               }
+
+               cb(&oid, data);
+       }
+
+       fclose(fh);
+       finish_command(&cmd);
+}
+
+struct alternate_refs_data {
+       alternate_ref_fn *fn;
+       void *data;
+};
+
+static int refs_from_alternate_cb(struct object_directory *e,
+                                 void *data)
+{
+       struct strbuf path = STRBUF_INIT;
+       size_t base_len;
+       struct alternate_refs_data *cb = data;
+
+       if (!strbuf_realpath(&path, e->path, 0))
+               goto out;
+       if (!strbuf_strip_suffix(&path, "/objects"))
+               goto out;
+       base_len = path.len;
+
+       /* Is this a git repository with refs? */
+       strbuf_addstr(&path, "/refs");
+       if (!is_directory(path.buf))
+               goto out;
+       strbuf_setlen(&path, base_len);
+
+       read_alternate_refs(path.buf, cb->fn, cb->data);
+
+out:
+       strbuf_release(&path);
+       return 0;
+}
+
+void for_each_alternate_ref(alternate_ref_fn fn, void *data)
+{
+       struct alternate_refs_data cb;
+       cb.fn = fn;
+       cb.data = data;
+       foreach_alt_odb(refs_from_alternate_cb, &cb);
+}
+
 int foreach_alt_odb(alt_odb_fn fn, void *cb)
 {
        struct object_directory *ent;
@@ -1505,7 +1602,8 @@ void *read_object_file_extended(struct repository *r,
        return NULL;
 }
 
-void *read_object_with_reference(const struct object_id *oid,
+void *read_object_with_reference(struct repository *r,
+                                const struct object_id *oid,
                                 const char *required_type_name,
                                 unsigned long *size,
                                 struct object_id *actual_oid_return)
@@ -1521,7 +1619,7 @@ void *read_object_with_reference(const struct object_id *oid,
                int ref_length = -1;
                const char *ref_type = NULL;
 
-               buffer = read_object_file(&actual_oid, &type, &isize);
+               buffer = repo_read_object_file(r, &actual_oid, &type, &isize);
                if (!buffer)
                        return NULL;
                if (type == required_type) {
index 49855ad24f1bc81aa8924fc927e0e32c7a62bf0a..2989e27b717abdabd6623299d7737e9a51641990 100644 (file)
@@ -478,7 +478,7 @@ static enum get_oid_result get_short_oid(struct repository *r,
         * or migrated from loose to packed.
         */
        if (status == MISSING_OBJECT) {
-               reprepare_packed_git(the_repository);
+               reprepare_packed_git(r);
                find_short_object_filename(&ds);
                find_short_packed_object(&ds);
                status = finish_object_disambiguation(&ds, oid);
@@ -1389,9 +1389,7 @@ int repo_get_oid_mb(struct repository *r,
        two = lookup_commit_reference_gently(r, &oid_tmp, 0);
        if (!two)
                return -1;
-       if (r != the_repository)
-               BUG("sorry get_merge_bases() can't take struct repository yet");
-       mbs = get_merge_bases(one, two);
+       mbs = repo_get_merge_bases(r, one, two);
        if (!mbs || mbs->next)
                st = -1;
        else {
@@ -1677,7 +1675,8 @@ int repo_get_oid_blob(struct repository *r,
 }
 
 /* Must be called only when object_name:filename doesn't exist. */
-static void diagnose_invalid_oid_path(const char *prefix,
+static void diagnose_invalid_oid_path(struct repository *r,
+                                     const char *prefix,
                                      const char *filename,
                                      const struct object_id *tree_oid,
                                      const char *object_name,
@@ -1695,7 +1694,7 @@ static void diagnose_invalid_oid_path(const char *prefix,
        if (is_missing_file_error(errno)) {
                char *fullname = xstrfmt("%s%s", prefix, filename);
 
-               if (!get_tree_entry(tree_oid, fullname, &oid, &mode)) {
+               if (!get_tree_entry(r, tree_oid, fullname, &oid, &mode)) {
                        die("Path '%s' exists, but not '%s'.\n"
                            "Did you mean '%.*s:%s' aka '%.*s:./%s'?",
                            fullname,
@@ -1889,23 +1888,15 @@ static enum get_oid_result get_oid_with_context_1(struct repository *repo,
                        new_filename = resolve_relative_path(repo, filename);
                        if (new_filename)
                                filename = new_filename;
-                       /*
-                        * NEEDSWORK: Eventually get_tree_entry*() should
-                        * learn to take struct repository directly and we
-                        * would not need to inject submodule odb to the
-                        * in-core odb.
-                        */
-                       if (repo != the_repository)
-                               add_to_alternates_memory(repo->objects->odb->path);
                        if (flags & GET_OID_FOLLOW_SYMLINKS) {
-                               ret = get_tree_entry_follow_symlinks(&tree_oid,
+                               ret = get_tree_entry_follow_symlinks(repo, &tree_oid,
                                        filename, oid, &oc->symlink_path,
                                        &oc->mode);
                        } else {
-                               ret = get_tree_entry(&tree_oid, filename, oid,
+                               ret = get_tree_entry(repo, &tree_oid, filename, oid,
                                                     &oc->mode);
                                if (ret && only_to_die) {
-                                       diagnose_invalid_oid_path(prefix,
+                                       diagnose_invalid_oid_path(repo, prefix,
                                                                   filename,
                                                                   &tree_oid,
                                                                   name, len);
index ce45297940d417e3454b08d3f0c29f5cc6d93658..5fa2b15d3705439b0088ac0fa0800c81ffad7a41 100644 (file)
--- a/shallow.c
+++ b/shallow.c
@@ -248,7 +248,8 @@ static void check_shallow_file_for_update(struct repository *r)
        if (r->parsed_objects->is_shallow == -1)
                BUG("shallow must be initialized by now");
 
-       if (!stat_validity_check(r->parsed_objects->shallow_stat, git_path_shallow(the_repository)))
+       if (!stat_validity_check(r->parsed_objects->shallow_stat,
+                                git_path_shallow(r)))
                die("shallow file has changed since we read it");
 }
 
index 0e18b259ce51fc73f82f7b77032221b33411c25f..d30f916858883aa312bd824a53516cb099a2e922 100644 (file)
--- a/strbuf.c
+++ b/strbuf.c
@@ -811,25 +811,57 @@ void strbuf_addstr_urlencode(struct strbuf *sb, const char *s,
        strbuf_add_urlencode(sb, s, strlen(s), reserved);
 }
 
-void strbuf_humanise_bytes(struct strbuf *buf, off_t bytes)
+static void strbuf_humanise(struct strbuf *buf, off_t bytes,
+                                int humanise_rate)
 {
        if (bytes > 1 << 30) {
-               strbuf_addf(buf, "%u.%2.2u GiB",
+               strbuf_addf(buf,
+                               humanise_rate == 0 ?
+                                       /* TRANSLATORS: IEC 80000-13:2008 gibibyte */
+                                       _("%u.%2.2u GiB") :
+                                       /* TRANSLATORS: IEC 80000-13:2008 gibibyte/second */
+                                       _("%u.%2.2u GiB/s"),
                            (unsigned)(bytes >> 30),
                            (unsigned)(bytes & ((1 << 30) - 1)) / 10737419);
        } else if (bytes > 1 << 20) {
                unsigned x = bytes + 5243;  /* for rounding */
-               strbuf_addf(buf, "%u.%2.2u MiB",
+               strbuf_addf(buf,
+                               humanise_rate == 0 ?
+                                       /* TRANSLATORS: IEC 80000-13:2008 mebibyte */
+                                       _("%u.%2.2u MiB") :
+                                       /* TRANSLATORS: IEC 80000-13:2008 mebibyte/second */
+                                       _("%u.%2.2u MiB/s"),
                            x >> 20, ((x & ((1 << 20) - 1)) * 100) >> 20);
        } else if (bytes > 1 << 10) {
                unsigned x = bytes + 5;  /* for rounding */
-               strbuf_addf(buf, "%u.%2.2u KiB",
+               strbuf_addf(buf,
+                               humanise_rate == 0 ?
+                                       /* TRANSLATORS: IEC 80000-13:2008 kibibyte */
+                                       _("%u.%2.2u KiB") :
+                                       /* TRANSLATORS: IEC 80000-13:2008 kibibyte/second */
+                                       _("%u.%2.2u KiB/s"),
                            x >> 10, ((x & ((1 << 10) - 1)) * 100) >> 10);
        } else {
-               strbuf_addf(buf, "%u bytes", (unsigned)bytes);
+               strbuf_addf(buf,
+                               humanise_rate == 0 ?
+                                       /* TRANSLATORS: IEC 80000-13:2008 byte */
+                                       Q_("%u byte", "%u bytes", (unsigned)bytes) :
+                                       /* TRANSLATORS: IEC 80000-13:2008 byte/second */
+                                       Q_("%u byte/s", "%u bytes/s", (unsigned)bytes),
+                               (unsigned)bytes);
        }
 }
 
+void strbuf_humanise_bytes(struct strbuf *buf, off_t bytes)
+{
+       strbuf_humanise(buf, bytes, 0);
+}
+
+void strbuf_humanise_rate(struct strbuf *buf, off_t bytes)
+{
+       strbuf_humanise(buf, bytes, 1);
+}
+
 void strbuf_add_absolute_path(struct strbuf *sb, const char *path)
 {
        if (!*path)
index c8d98dfb95b8d81b316e887fa031dab0b2165784..f62278a0be59be4c6cff17f0a0adcc6361e93e82 100644 (file)
--- a/strbuf.h
+++ b/strbuf.h
@@ -372,6 +372,12 @@ void strbuf_addbuf_percentquote(struct strbuf *dst, const struct strbuf *src);
  */
 void strbuf_humanise_bytes(struct strbuf *buf, off_t bytes);
 
+/**
+ * Append the given byte rate as a human-readable string (i.e. 12.23 KiB/s,
+ * 3.50 MiB/s).
+ */
+void strbuf_humanise_rate(struct strbuf *buf, off_t bytes);
+
 /**
  * Add a formatted string to the buffer.
  */
index 23d2b172fe708f711a15613e906637cd948324ef..aaf17b0ddf9e8ddd1270f613f8c71841c6644eb9 100644 (file)
@@ -173,14 +173,7 @@ int cmd__hashmap(int argc, const char **argv)
                        p2 = strtok(NULL, DELIM);
                }
 
-               if (!strcmp("hash", cmd) && p1) {
-
-                       /* print results of different hash functions */
-                       printf("%u %u %u %u\n",
-                              strhash(p1), memhash(p1, strlen(p1)),
-                              strihash(p1), memihash(p1, strlen(p1)));
-
-               } else if (!strcmp("add", cmd) && p1 && p2) {
+               if (!strcmp("add", cmd) && p1 && p2) {
 
                        /* create entry with key = p1, value = p2 */
                        entry = alloc_test_entry(hash, p1, p2);
index 96857f26ac8540cf22e74aed72bbd30bc8147f00..b9fd427571e6265dbc3be8b6e93acbb6eba61c56 100644 (file)
@@ -20,7 +20,7 @@ int cmd__match_trees(int ac, const char **av)
        if (!two)
                die("not a tree-ish %s", av[2]);
 
-       shift_tree(&one->object.oid, &two->object.oid, &shifted, -1);
+       shift_tree(the_repository, &one->object.oid, &two->object.oid, &shifted, -1);
        printf("shifted: %s\n", oid_to_hex(&shifted));
 
        exit(0);
diff --git a/t/helper/test-oidmap.c b/t/helper/test-oidmap.c
new file mode 100644 (file)
index 0000000..0acf999
--- /dev/null
@@ -0,0 +1,112 @@
+#include "test-tool.h"
+#include "cache.h"
+#include "oidmap.h"
+#include "strbuf.h"
+
+/* key is an oid and value is a name (could be a refname for example) */
+struct test_entry {
+       struct oidmap_entry entry;
+       char name[FLEX_ARRAY];
+};
+
+#define DELIM " \t\r\n"
+
+/*
+ * Read stdin line by line and print result of commands to stdout:
+ *
+ * hash oidkey -> sha1hash(oidkey)
+ * put oidkey namevalue -> NULL / old namevalue
+ * get oidkey -> NULL / namevalue
+ * remove oidkey -> NULL / old namevalue
+ * iterate -> oidkey1 namevalue1\noidkey2 namevalue2\n...
+ *
+ */
+int cmd__oidmap(int argc, const char **argv)
+{
+       struct strbuf line = STRBUF_INIT;
+       struct oidmap map = OIDMAP_INIT;
+
+       setup_git_directory();
+
+       /* init oidmap */
+       oidmap_init(&map, 0);
+
+       /* process commands from stdin */
+       while (strbuf_getline(&line, stdin) != EOF) {
+               char *cmd, *p1 = NULL, *p2 = NULL;
+               struct test_entry *entry;
+               struct object_id oid;
+
+               /* break line into command and up to two parameters */
+               cmd = strtok(line.buf, DELIM);
+               /* ignore empty lines */
+               if (!cmd || *cmd == '#')
+                       continue;
+
+               p1 = strtok(NULL, DELIM);
+               if (p1)
+                       p2 = strtok(NULL, DELIM);
+
+               if (!strcmp("put", cmd) && p1 && p2) {
+
+                       if (get_oid(p1, &oid)) {
+                               printf("Unknown oid: %s\n", p1);
+                               continue;
+                       }
+
+                       /* create entry with oid_key = p1, name_value = p2 */
+                       FLEX_ALLOC_STR(entry, name, p2);
+                       oidcpy(&entry->entry.oid, &oid);
+
+                       /* add / replace entry */
+                       entry = oidmap_put(&map, entry);
+
+                       /* print and free replaced entry, if any */
+                       puts(entry ? entry->name : "NULL");
+                       free(entry);
+
+               } else if (!strcmp("get", cmd) && p1) {
+
+                       if (get_oid(p1, &oid)) {
+                               printf("Unknown oid: %s\n", p1);
+                               continue;
+                       }
+
+                       /* lookup entry in oidmap */
+                       entry = oidmap_get(&map, &oid);
+
+                       /* print result */
+                       puts(entry ? entry->name : "NULL");
+
+               } else if (!strcmp("remove", cmd) && p1) {
+
+                       if (get_oid(p1, &oid)) {
+                               printf("Unknown oid: %s\n", p1);
+                               continue;
+                       }
+
+                       /* remove entry from oidmap */
+                       entry = oidmap_remove(&map, &oid);
+
+                       /* print result and free entry*/
+                       puts(entry ? entry->name : "NULL");
+                       free(entry);
+
+               } else if (!strcmp("iterate", cmd)) {
+
+                       struct oidmap_iter iter;
+                       oidmap_iter_init(&map, &iter);
+                       while ((entry = oidmap_iter_next(&iter)))
+                               printf("%s %s\n", oid_to_hex(&entry->entry.oid), entry->name);
+
+               } else {
+
+                       printf("Unknown command %s\n", cmd);
+
+               }
+       }
+
+       strbuf_release(&line);
+       oidmap_free(&map, 1);
+       return 0;
+}
index 087a8c0cc9da64d7bc276c3870b2d0faba4c2627..1eac25233f7ce62ecb00b2d1e3d06d1423c3581f 100644 (file)
@@ -35,6 +35,7 @@ static struct test_cmd cmds[] = {
        { "match-trees", cmd__match_trees },
        { "mergesort", cmd__mergesort },
        { "mktemp", cmd__mktemp },
+       { "oidmap", cmd__oidmap },
        { "online-cpus", cmd__online_cpus },
        { "parse-options", cmd__parse_options },
        { "path-utils", cmd__path_utils },
index 7e703f3038ae433c7d8b4ef5af51d9781d6bfffb..c7a46dc320e93b3bb5aef5f7fce3697c4558f814 100644 (file)
@@ -25,6 +25,7 @@ int cmd__lazy_init_name_hash(int argc, const char **argv);
 int cmd__match_trees(int argc, const char **argv);
 int cmd__mergesort(int argc, const char **argv);
 int cmd__mktemp(int argc, const char **argv);
+int cmd__oidmap(int argc, const char **argv);
 int cmd__online_cpus(int argc, const char **argv);
 int cmd__parse_options(int argc, const char **argv);
 int cmd__path_utils(int argc, const char **argv);
diff --git a/t/perf/p5600-clone-reference.sh b/t/perf/p5600-clone-reference.sh
new file mode 100755 (executable)
index 0000000..68fed66
--- /dev/null
@@ -0,0 +1,27 @@
+#!/bin/sh
+
+test_description='speed of clone --reference'
+. ./perf-lib.sh
+
+test_perf_default_repo
+
+test_expect_success 'create shareable repository' '
+       git clone --bare . shared.git
+'
+
+test_expect_success 'advance base repository' '
+       # Do not use test_commit here; its test_tick will
+       # use some ancient hard-coded date. The resulting clock
+       # skew will cause pack-objects to traverse in a very
+       # sub-optimal order, skewing the results.
+       echo content >new-file-that-does-not-exist &&
+       git add new-file-that-does-not-exist &&
+       git commit -m "new commit"
+'
+
+test_perf 'clone --reference' '
+       rm -rf dst.git &&
+       git clone --no-local --bare --reference shared.git . dst.git
+'
+
+test_done
index 3f1f505e8937f391666a1b7e6d9b972a5f146974..9c96b3e3b10a99c20a1e6f5d4d0349c4fbcb0a10 100755 (executable)
@@ -9,15 +9,6 @@ test_hashmap() {
        test_cmp expect actual
 }
 
-test_expect_success 'hash functions' '
-
-test_hashmap "hash key1" "2215982743 2215982743 116372151 116372151" &&
-test_hashmap "hash key2" "2215982740 2215982740 116372148 116372148" &&
-test_hashmap "hash fooBarFrotz" "1383912807 1383912807 3189766727 3189766727" &&
-test_hashmap "hash foobarfrotz" "2862305959 2862305959 3189766727 3189766727"
-
-'
-
 test_expect_success 'put' '
 
 test_hashmap "put key1 value1
diff --git a/t/t0016-oidmap.sh b/t/t0016-oidmap.sh
new file mode 100755 (executable)
index 0000000..bbe719e
--- /dev/null
@@ -0,0 +1,102 @@
+#!/bin/sh
+
+test_description='test oidmap'
+. ./test-lib.sh
+
+# This purposefully is very similar to t0011-hashmap.sh
+
+test_oidmap () {
+       echo "$1" | test-tool oidmap $3 >actual &&
+       echo "$2" >expect &&
+       test_cmp expect actual
+}
+
+
+test_expect_success 'setup' '
+
+       test_commit one &&
+       test_commit two &&
+       test_commit three &&
+       test_commit four
+
+'
+
+test_expect_success 'put' '
+
+test_oidmap "put one 1
+put two 2
+put invalidOid 4
+put three 3" "NULL
+NULL
+Unknown oid: invalidOid
+NULL"
+
+'
+
+test_expect_success 'replace' '
+
+test_oidmap "put one 1
+put two 2
+put three 3
+put invalidOid 4
+put two deux
+put one un" "NULL
+NULL
+NULL
+Unknown oid: invalidOid
+2
+1"
+
+'
+
+test_expect_success 'get' '
+
+test_oidmap "put one 1
+put two 2
+put three 3
+get two
+get four
+get invalidOid
+get one" "NULL
+NULL
+NULL
+2
+NULL
+Unknown oid: invalidOid
+1"
+
+'
+
+test_expect_success 'remove' '
+
+test_oidmap "put one 1
+put two 2
+put three 3
+remove one
+remove two
+remove invalidOid
+remove four" "NULL
+NULL
+NULL
+1
+2
+Unknown oid: invalidOid
+NULL"
+
+'
+
+test_expect_success 'iterate' '
+
+test_oidmap "put one 1
+put two 2
+put three 3
+iterate" "NULL
+NULL
+NULL
+$(git rev-parse two) 2
+$(git rev-parse one) 1
+$(git rev-parse three) 3"
+
+'
+
+test_done
index bdaa511bb0ae1082568e83781252f667b39cda8a..4eff14dae53223fb432ff5d9147543850a2c9ad5 100755 (executable)
@@ -265,4 +265,12 @@ test_expect_success '--reschedule-failed-exec' '
        test_i18ngrep "has been rescheduled" err
 '
 
+test_expect_success 'rebase.reschedulefailedexec only affects `rebase -i`' '
+       test_config rebase.reschedulefailedexec true &&
+       test_must_fail git rebase -x false HEAD^ &&
+       grep "^exec false" .git/rebase-merge/git-rebase-todo &&
+       git rebase --abort &&
+       git rebase HEAD^
+'
+
 test_done
index 9186e90127712feaf92fe94e5bdfce02528fcf5f..b8f4d034672378065a5fe08df6b8210fa3c71721 100755 (executable)
@@ -30,7 +30,8 @@ test_expect_success setup '
        echo conflicting-change >file2 &&
        git add . &&
        test_tick &&
-       git commit -m "related commit"
+       git commit -m "related commit" &&
+       remove_progress_re="$(printf "s/.*\\r//")"
 '
 
 create_expected_success_am () {
@@ -48,8 +49,8 @@ create_expected_success_interactive () {
        q_to_cr >expected <<-EOF
        $(grep "^Created autostash: [0-9a-f][0-9a-f]*\$" actual)
        HEAD is now at $(git rev-parse --short feature-branch) third commit
-       Rebasing (1/2)QRebasing (2/2)QApplied autostash.
-       Q                                                                                QSuccessfully rebased and updated refs/heads/rebased-feature-branch.
+       Applied autostash.
+       Successfully rebased and updated refs/heads/rebased-feature-branch.
        EOF
 }
 
@@ -67,13 +68,13 @@ create_expected_failure_am () {
 }
 
 create_expected_failure_interactive () {
-       q_to_cr >expected <<-EOF
+       cat >expected <<-EOF
        $(grep "^Created autostash: [0-9a-f][0-9a-f]*\$" actual)
        HEAD is now at $(git rev-parse --short feature-branch) third commit
-       Rebasing (1/2)QRebasing (2/2)QApplying autostash resulted in conflicts.
+       Applying autostash resulted in conflicts.
        Your changes are safe in the stash.
        You can run "git stash pop" or "git stash drop" at any time.
-       Q                                                                                QSuccessfully rebased and updated refs/heads/rebased-feature-branch.
+       Successfully rebased and updated refs/heads/rebased-feature-branch.
        EOF
 }
 
@@ -109,7 +110,8 @@ testrebase () {
                        suffix=interactive
                fi &&
                create_expected_success_$suffix &&
-               test_i18ncmp expected actual
+               sed "$remove_progress_re" <actual >actual2 &&
+               test_i18ncmp expected actual2
        '
 
        test_expect_success "rebase$type: dirty index, non-conflicting rebase" '
@@ -209,7 +211,8 @@ testrebase () {
                        suffix=interactive
                fi &&
                create_expected_failure_$suffix &&
-               test_i18ncmp expected actual
+               sed "$remove_progress_re" <actual >actual2 &&
+               test_i18ncmp expected actual2
        '
 }
 
index 941d5026da2adc857fa332f899ea3594876a550c..793bcc7fe3246e8fc375b8678bb99f7bc875a48c 100755 (executable)
@@ -93,6 +93,128 @@ test_expect_success 'cherry-pick cleans up sequencer state upon success' '
        test_path_is_missing .git/sequencer
 '
 
+test_expect_success 'cherry-pick --skip requires cherry-pick in progress' '
+       pristine_detach initial &&
+       test_must_fail git cherry-pick --skip
+'
+
+test_expect_success 'revert --skip requires revert in progress' '
+       pristine_detach initial &&
+       test_must_fail git revert --skip
+'
+
+test_expect_success 'cherry-pick --skip to skip commit' '
+       pristine_detach initial &&
+       test_must_fail git cherry-pick anotherpick &&
+       test_must_fail git revert --skip &&
+       git cherry-pick --skip &&
+       test_cmp_rev initial HEAD &&
+       test_path_is_missing .git/CHERRY_PICK_HEAD
+'
+
+test_expect_success 'revert --skip to skip commit' '
+       pristine_detach anotherpick &&
+       test_must_fail git revert anotherpick~1 &&
+       test_must_fail git cherry-pick --skip &&
+       git revert --skip &&
+       test_cmp_rev anotherpick HEAD
+'
+
+test_expect_success 'skip "empty" commit' '
+       pristine_detach picked &&
+       test_commit dummy foo d &&
+       test_must_fail git cherry-pick anotherpick &&
+       git cherry-pick --skip &&
+       test_cmp_rev dummy HEAD
+'
+
+test_expect_success 'skip a commit and check if rest of sequence is correct' '
+       pristine_detach initial &&
+       echo e >expect &&
+       cat >expect.log <<-EOF &&
+       OBJID
+       :100644 100644 OBJID OBJID M    foo
+       OBJID
+       :100644 100644 OBJID OBJID M    foo
+       OBJID
+       :100644 100644 OBJID OBJID M    unrelated
+       OBJID
+       :000000 100644 OBJID OBJID A    foo
+       :000000 100644 OBJID OBJID A    unrelated
+       EOF
+       test_must_fail git cherry-pick base..yetanotherpick &&
+       test_must_fail git cherry-pick --skip &&
+       echo d >foo &&
+       git add foo &&
+       git cherry-pick --continue &&
+       {
+               git rev-list HEAD |
+               git diff-tree --root --stdin |
+               sed "s/$OID_REGEX/OBJID/g"
+       } >actual.log &&
+       test_cmp expect foo &&
+       test_cmp expect.log actual.log
+'
+
+test_expect_success 'check advice when we move HEAD by committing' '
+       pristine_detach initial &&
+       cat >expect <<-EOF &&
+       error: there is nothing to skip
+       hint: have you committed already?
+       hint: try "git cherry-pick --continue"
+       fatal: cherry-pick failed
+       EOF
+       test_must_fail git cherry-pick base..yetanotherpick &&
+       echo c >foo &&
+       git commit -a &&
+       test_path_is_missing .git/CHERRY_PICK_HEAD &&
+       test_must_fail git cherry-pick --skip 2>advice &&
+       test_i18ncmp expect advice
+'
+
+test_expect_success 'selectively advise --skip while launching another sequence' '
+       pristine_detach initial &&
+       cat >expect <<-EOF &&
+       error: cherry-pick is already in progress
+       hint: try "git cherry-pick (--continue | --skip | --abort | --quit)"
+       fatal: cherry-pick failed
+       EOF
+       test_must_fail git cherry-pick picked..yetanotherpick &&
+       test_must_fail git cherry-pick picked..yetanotherpick 2>advice &&
+       test_i18ncmp expect advice &&
+       cat >expect <<-EOF &&
+       error: cherry-pick is already in progress
+       hint: try "git cherry-pick (--continue | --abort | --quit)"
+       fatal: cherry-pick failed
+       EOF
+       git reset --merge &&
+       test_must_fail git cherry-pick picked..yetanotherpick 2>advice &&
+       test_i18ncmp expect advice
+'
+
+test_expect_success 'allow skipping commit but not abort for a new history' '
+       pristine_detach initial &&
+       cat >expect <<-EOF &&
+       error: cannot abort from a branch yet to be born
+       fatal: cherry-pick failed
+       EOF
+       git checkout --orphan new_disconnected &&
+       git reset --hard &&
+       test_must_fail git cherry-pick anotherpick &&
+       test_must_fail git cherry-pick --abort 2>advice &&
+       git cherry-pick --skip &&
+       test_i18ncmp expect advice
+'
+
+test_expect_success 'allow skipping stopped cherry-pick because of untracked file modifications' '
+       pristine_detach initial &&
+       git rm --cached unrelated &&
+       git commit -m "untrack unrelated" &&
+       test_must_fail git cherry-pick initial base &&
+       test_path_is_missing .git/CHERRY_PICK_HEAD &&
+       git cherry-pick --skip
+'
+
 test_expect_success '--quit does not complain when no cherry-pick is in progress' '
        pristine_detach initial &&
        git cherry-pick --quit
index 5267c4be20e709bb1632c5b9786ffdb418bcb5c9..22cb9d66430410f726e821e906fa79587f43c3e8 100755 (executable)
@@ -20,7 +20,7 @@ test_expect_success 'verify graph with no graph file' '
 test_expect_success 'write graph with no packs' '
        cd "$TRASH_DIRECTORY/full" &&
        git commit-graph write --object-dir . &&
-       test_path_is_file info/commit-graph
+       test_path_is_missing info/commit-graph
 '
 
 test_expect_success 'close with correct error on bad input' '
index 1ebf19ec3cd559dbf9ae7205a225d6e49368a8d2..c72ca0439993bb25b3d0e25fa2ce5d399b49a2b7 100755 (executable)
@@ -363,4 +363,188 @@ test_expect_success 'verify incorrect 64-bit offset' '
                "incorrect object offset"
 '
 
+test_expect_success 'setup expire tests' '
+       mkdir dup &&
+       (
+               cd dup &&
+               git init &&
+               test-tool genrandom "data" 4096 >large_file.txt &&
+               git update-index --add large_file.txt &&
+               for i in $(test_seq 1 20)
+               do
+                       test_commit $i
+               done &&
+               git branch A HEAD &&
+               git branch B HEAD~8 &&
+               git branch C HEAD~13 &&
+               git branch D HEAD~16 &&
+               git branch E HEAD~18 &&
+               git pack-objects --revs .git/objects/pack/pack-A <<-EOF &&
+               refs/heads/A
+               ^refs/heads/B
+               EOF
+               git pack-objects --revs .git/objects/pack/pack-B <<-EOF &&
+               refs/heads/B
+               ^refs/heads/C
+               EOF
+               git pack-objects --revs .git/objects/pack/pack-C <<-EOF &&
+               refs/heads/C
+               ^refs/heads/D
+               EOF
+               git pack-objects --revs .git/objects/pack/pack-D <<-EOF &&
+               refs/heads/D
+               ^refs/heads/E
+               EOF
+               git pack-objects --revs .git/objects/pack/pack-E <<-EOF &&
+               refs/heads/E
+               EOF
+               git multi-pack-index write &&
+               cp -r .git/objects/pack .git/objects/pack-backup
+       )
+'
+
+test_expect_success 'expire does not remove any packs' '
+       (
+               cd dup &&
+               ls .git/objects/pack >expect &&
+               git multi-pack-index expire &&
+               ls .git/objects/pack >actual &&
+               test_cmp expect actual
+       )
+'
+
+test_expect_success 'expire removes unreferenced packs' '
+       (
+               cd dup &&
+               git pack-objects --revs .git/objects/pack/pack-combined <<-EOF &&
+               refs/heads/A
+               ^refs/heads/C
+               EOF
+               git multi-pack-index write &&
+               ls .git/objects/pack | grep -v -e pack-[AB] >expect &&
+               git multi-pack-index expire &&
+               ls .git/objects/pack >actual &&
+               test_cmp expect actual &&
+               ls .git/objects/pack/ | grep idx >expect-idx &&
+               test-tool read-midx .git/objects | grep idx >actual-midx &&
+               test_cmp expect-idx actual-midx &&
+               git multi-pack-index verify &&
+               git fsck
+       )
+'
+
+test_expect_success 'repack with minimum size does not alter existing packs' '
+       (
+               cd dup &&
+               rm -rf .git/objects/pack &&
+               mv .git/objects/pack-backup .git/objects/pack &&
+               touch -m -t 201901010000 .git/objects/pack/pack-D* &&
+               touch -m -t 201901010001 .git/objects/pack/pack-C* &&
+               touch -m -t 201901010002 .git/objects/pack/pack-B* &&
+               touch -m -t 201901010003 .git/objects/pack/pack-A* &&
+               ls .git/objects/pack >expect &&
+               MINSIZE=$(test-tool path-utils file-size .git/objects/pack/*pack | sort -n | head -n 1) &&
+               git multi-pack-index repack --batch-size=$MINSIZE &&
+               ls .git/objects/pack >actual &&
+               test_cmp expect actual
+       )
+'
+
+test_expect_success 'repack creates a new pack' '
+       (
+               cd dup &&
+               ls .git/objects/pack/*idx >idx-list &&
+               test_line_count = 5 idx-list &&
+               THIRD_SMALLEST_SIZE=$(test-tool path-utils file-size .git/objects/pack/*pack | sort -n | head -n 3 | tail -n 1) &&
+               BATCH_SIZE=$(($THIRD_SMALLEST_SIZE + 1)) &&
+               git multi-pack-index repack --batch-size=$BATCH_SIZE &&
+               ls .git/objects/pack/*idx >idx-list &&
+               test_line_count = 6 idx-list &&
+               test-tool read-midx .git/objects | grep idx >midx-list &&
+               test_line_count = 6 midx-list
+       )
+'
+
+test_expect_success 'expire removes repacked packs' '
+       (
+               cd dup &&
+               ls -al .git/objects/pack/*pack &&
+               ls -S .git/objects/pack/*pack | head -n 4 >expect &&
+               git multi-pack-index expire &&
+               ls -S .git/objects/pack/*pack >actual &&
+               test_cmp expect actual &&
+               test-tool read-midx .git/objects | grep idx >midx-list &&
+               test_line_count = 4 midx-list
+       )
+'
+
+test_expect_success 'expire works when adding new packs' '
+       (
+               cd dup &&
+               git pack-objects --revs .git/objects/pack/pack-combined <<-EOF &&
+               refs/heads/A
+               ^refs/heads/B
+               EOF
+               git pack-objects --revs .git/objects/pack/pack-combined <<-EOF &&
+               refs/heads/B
+               ^refs/heads/C
+               EOF
+               git pack-objects --revs .git/objects/pack/pack-combined <<-EOF &&
+               refs/heads/C
+               ^refs/heads/D
+               EOF
+               git multi-pack-index write &&
+               git pack-objects --revs .git/objects/pack/a-pack <<-EOF &&
+               refs/heads/D
+               ^refs/heads/E
+               EOF
+               git multi-pack-index write &&
+               git pack-objects --revs .git/objects/pack/z-pack <<-EOF &&
+               refs/heads/E
+               EOF
+               git multi-pack-index expire &&
+               ls .git/objects/pack/ | grep idx >expect &&
+               test-tool read-midx .git/objects | grep idx >actual &&
+               test_cmp expect actual &&
+               git multi-pack-index verify
+       )
+'
+
+test_expect_success 'expire respects .keep files' '
+       (
+               cd dup &&
+               git pack-objects --revs .git/objects/pack/pack-all <<-EOF &&
+               refs/heads/A
+               EOF
+               git multi-pack-index write &&
+               PACKA=$(ls .git/objects/pack/a-pack*\.pack | sed s/\.pack\$//) &&
+               touch $PACKA.keep &&
+               git multi-pack-index expire &&
+               ls -S .git/objects/pack/a-pack* | grep $PACKA >a-pack-files &&
+               test_line_count = 3 a-pack-files &&
+               test-tool read-midx .git/objects | grep idx >midx-list &&
+               test_line_count = 2 midx-list
+       )
+'
+
+test_expect_success 'repack --batch-size=0 repacks everything' '
+       (
+               cd dup &&
+               rm .git/objects/pack/*.keep &&
+               ls .git/objects/pack/*idx >idx-list &&
+               test_line_count = 2 idx-list &&
+               git multi-pack-index repack --batch-size=0 &&
+               ls .git/objects/pack/*idx >idx-list &&
+               test_line_count = 3 idx-list &&
+               test-tool read-midx .git/objects | grep idx >midx-list &&
+               test_line_count = 3 midx-list &&
+               git multi-pack-index expire &&
+               ls -al .git/objects/pack/*idx >idx-list &&
+               test_line_count = 1 idx-list &&
+               git multi-pack-index repack --batch-size=0 &&
+               ls -al .git/objects/pack/*idx >new-idx-list &&
+               test_cmp idx-list new-idx-list
+       )
+'
+
 test_done
diff --git a/t/t5324-split-commit-graph.sh b/t/t5324-split-commit-graph.sh
new file mode 100755 (executable)
index 0000000..03f45a1
--- /dev/null
@@ -0,0 +1,343 @@
+#!/bin/sh
+
+test_description='split commit graph'
+. ./test-lib.sh
+
+GIT_TEST_COMMIT_GRAPH=0
+
+test_expect_success 'setup repo' '
+       git init &&
+       git config core.commitGraph true &&
+       infodir=".git/objects/info" &&
+       graphdir="$infodir/commit-graphs" &&
+       test_oid_init
+'
+
+graph_read_expect() {
+       NUM_BASE=0
+       if test ! -z $2
+       then
+               NUM_BASE=$2
+       fi
+       cat >expect <<- EOF
+       header: 43475048 1 1 3 $NUM_BASE
+       num_commits: $1
+       chunks: oid_fanout oid_lookup commit_metadata
+       EOF
+       git commit-graph read >output &&
+       test_cmp expect output
+}
+
+test_expect_success 'create commits and write commit-graph' '
+       for i in $(test_seq 3)
+       do
+               test_commit $i &&
+               git branch commits/$i || return 1
+       done &&
+       git commit-graph write --reachable &&
+       test_path_is_file $infodir/commit-graph &&
+       graph_read_expect 3
+'
+
+graph_git_two_modes() {
+       git -c core.commitGraph=true $1 >output
+       git -c core.commitGraph=false $1 >expect
+       test_cmp expect output
+}
+
+graph_git_behavior() {
+       MSG=$1
+       BRANCH=$2
+       COMPARE=$3
+       test_expect_success "check normal git operations: $MSG" '
+               graph_git_two_modes "log --oneline $BRANCH" &&
+               graph_git_two_modes "log --topo-order $BRANCH" &&
+               graph_git_two_modes "log --graph $COMPARE..$BRANCH" &&
+               graph_git_two_modes "branch -vv" &&
+               graph_git_two_modes "merge-base -a $BRANCH $COMPARE"
+       '
+}
+
+graph_git_behavior 'graph exists' commits/3 commits/1
+
+verify_chain_files_exist() {
+       for hash in $(cat $1/commit-graph-chain)
+       do
+               test_path_is_file $1/graph-$hash.graph || return 1
+       done
+}
+
+test_expect_success 'add more commits, and write a new base graph' '
+       git reset --hard commits/1 &&
+       for i in $(test_seq 4 5)
+       do
+               test_commit $i &&
+               git branch commits/$i || return 1
+       done &&
+       git reset --hard commits/2 &&
+       for i in $(test_seq 6 10)
+       do
+               test_commit $i &&
+               git branch commits/$i || return 1
+       done &&
+       git reset --hard commits/2 &&
+       git merge commits/4 &&
+       git branch merge/1 &&
+       git reset --hard commits/4 &&
+       git merge commits/6 &&
+       git branch merge/2 &&
+       git commit-graph write --reachable &&
+       graph_read_expect 12
+'
+
+test_expect_success 'fork and fail to base a chain on a commit-graph file' '
+       test_when_finished rm -rf fork &&
+       git clone . fork &&
+       (
+               cd fork &&
+               rm .git/objects/info/commit-graph &&
+               echo "$(pwd)/../.git/objects" >.git/objects/info/alternates &&
+               test_commit new-commit &&
+               git commit-graph write --reachable --split &&
+               test_path_is_file $graphdir/commit-graph-chain &&
+               test_line_count = 1 $graphdir/commit-graph-chain &&
+               verify_chain_files_exist $graphdir
+       )
+'
+
+test_expect_success 'add three more commits, write a tip graph' '
+       git reset --hard commits/3 &&
+       git merge merge/1 &&
+       git merge commits/5 &&
+       git merge merge/2 &&
+       git branch merge/3 &&
+       git commit-graph write --reachable --split &&
+       test_path_is_missing $infodir/commit-graph &&
+       test_path_is_file $graphdir/commit-graph-chain &&
+       ls $graphdir/graph-*.graph >graph-files &&
+       test_line_count = 2 graph-files &&
+       verify_chain_files_exist $graphdir
+'
+
+graph_git_behavior 'split commit-graph: merge 3 vs 2' merge/3 merge/2
+
+test_expect_success 'add one commit, write a tip graph' '
+       test_commit 11 &&
+       git branch commits/11 &&
+       git commit-graph write --reachable --split &&
+       test_path_is_missing $infodir/commit-graph &&
+       test_path_is_file $graphdir/commit-graph-chain &&
+       ls $graphdir/graph-*.graph >graph-files &&
+       test_line_count = 3 graph-files &&
+       verify_chain_files_exist $graphdir
+'
+
+graph_git_behavior 'three-layer commit-graph: commit 11 vs 6' commits/11 commits/6
+
+test_expect_success 'add one commit, write a merged graph' '
+       test_commit 12 &&
+       git branch commits/12 &&
+       git commit-graph write --reachable --split &&
+       test_path_is_file $graphdir/commit-graph-chain &&
+       test_line_count = 2 $graphdir/commit-graph-chain &&
+       ls $graphdir/graph-*.graph >graph-files &&
+       test_line_count = 2 graph-files &&
+       verify_chain_files_exist $graphdir
+'
+
+graph_git_behavior 'merged commit-graph: commit 12 vs 6' commits/12 commits/6
+
+test_expect_success 'create fork and chain across alternate' '
+       git clone . fork &&
+       (
+               cd fork &&
+               git config core.commitGraph true &&
+               rm -rf $graphdir &&
+               echo "$(pwd)/../.git/objects" >.git/objects/info/alternates &&
+               test_commit 13 &&
+               git branch commits/13 &&
+               git commit-graph write --reachable --split &&
+               test_path_is_file $graphdir/commit-graph-chain &&
+               test_line_count = 3 $graphdir/commit-graph-chain &&
+               ls $graphdir/graph-*.graph >graph-files &&
+               test_line_count = 1 graph-files &&
+               git -c core.commitGraph=true  rev-list HEAD >expect &&
+               git -c core.commitGraph=false rev-list HEAD >actual &&
+               test_cmp expect actual &&
+               test_commit 14 &&
+               git commit-graph write --reachable --split --object-dir=.git/objects/ &&
+               test_line_count = 3 $graphdir/commit-graph-chain &&
+               ls $graphdir/graph-*.graph >graph-files &&
+               test_line_count = 1 graph-files
+       )
+'
+
+graph_git_behavior 'alternate: commit 13 vs 6' commits/13 commits/6
+
+test_expect_success 'test merge stragety constants' '
+       git clone . merge-2 &&
+       (
+               cd merge-2 &&
+               git config core.commitGraph true &&
+               test_line_count = 2 $graphdir/commit-graph-chain &&
+               test_commit 14 &&
+               git commit-graph write --reachable --split --size-multiple=2 &&
+               test_line_count = 3 $graphdir/commit-graph-chain
+
+       ) &&
+       git clone . merge-10 &&
+       (
+               cd merge-10 &&
+               git config core.commitGraph true &&
+               test_line_count = 2 $graphdir/commit-graph-chain &&
+               test_commit 14 &&
+               git commit-graph write --reachable --split --size-multiple=10 &&
+               test_line_count = 1 $graphdir/commit-graph-chain &&
+               ls $graphdir/graph-*.graph >graph-files &&
+               test_line_count = 1 graph-files
+       ) &&
+       git clone . merge-10-expire &&
+       (
+               cd merge-10-expire &&
+               git config core.commitGraph true &&
+               test_line_count = 2 $graphdir/commit-graph-chain &&
+               test_commit 15 &&
+               git commit-graph write --reachable --split --size-multiple=10 --expire-time=1980-01-01 &&
+               test_line_count = 1 $graphdir/commit-graph-chain &&
+               ls $graphdir/graph-*.graph >graph-files &&
+               test_line_count = 3 graph-files
+       ) &&
+       git clone --no-hardlinks . max-commits &&
+       (
+               cd max-commits &&
+               git config core.commitGraph true &&
+               test_line_count = 2 $graphdir/commit-graph-chain &&
+               test_commit 16 &&
+               test_commit 17 &&
+               git commit-graph write --reachable --split --max-commits=1 &&
+               test_line_count = 1 $graphdir/commit-graph-chain &&
+               ls $graphdir/graph-*.graph >graph-files &&
+               test_line_count = 1 graph-files
+       )
+'
+
+test_expect_success 'remove commit-graph-chain file after flattening' '
+       git clone . flatten &&
+       (
+               cd flatten &&
+               test_line_count = 2 $graphdir/commit-graph-chain &&
+               git commit-graph write --reachable &&
+               test_path_is_missing $graphdir/commit-graph-chain &&
+               ls $graphdir >graph-files &&
+               test_line_count = 0 graph-files
+       )
+'
+
+corrupt_file() {
+       file=$1
+       pos=$2
+       data="${3:-\0}"
+       chmod a+w "$file" &&
+       printf "$data" | dd of="$file" bs=1 seek="$pos" conv=notrunc
+}
+
+test_expect_success 'verify hashes along chain, even in shallow' '
+       git clone --no-hardlinks . verify &&
+       (
+               cd verify &&
+               git commit-graph verify &&
+               base_file=$graphdir/graph-$(head -n 1 $graphdir/commit-graph-chain).graph &&
+               corrupt_file "$base_file" 1760 "\01" &&
+               test_must_fail git commit-graph verify --shallow 2>test_err &&
+               grep -v "^+" test_err >err &&
+               test_i18ngrep "incorrect checksum" err
+       )
+'
+
+test_expect_success 'verify --shallow does not check base contents' '
+       git clone --no-hardlinks . verify-shallow &&
+       (
+               cd verify-shallow &&
+               git commit-graph verify &&
+               base_file=$graphdir/graph-$(head -n 1 $graphdir/commit-graph-chain).graph &&
+               corrupt_file "$base_file" 1000 "\01" &&
+               git commit-graph verify --shallow &&
+               test_must_fail git commit-graph verify 2>test_err &&
+               grep -v "^+" test_err >err &&
+               test_i18ngrep "incorrect checksum" err
+       )
+'
+
+test_expect_success 'warn on base graph chunk incorrect' '
+       git clone --no-hardlinks . base-chunk &&
+       (
+               cd base-chunk &&
+               git commit-graph verify &&
+               base_file=$graphdir/graph-$(tail -n 1 $graphdir/commit-graph-chain).graph &&
+               corrupt_file "$base_file" 1376 "\01" &&
+               git commit-graph verify --shallow 2>test_err &&
+               grep -v "^+" test_err >err &&
+               test_i18ngrep "commit-graph chain does not match" err
+       )
+'
+
+test_expect_success 'verify after commit-graph-chain corruption' '
+       git clone --no-hardlinks . verify-chain &&
+       (
+               cd verify-chain &&
+               corrupt_file "$graphdir/commit-graph-chain" 60 "G" &&
+               git commit-graph verify 2>test_err &&
+               grep -v "^+" test_err >err &&
+               test_i18ngrep "invalid commit-graph chain" err &&
+               corrupt_file "$graphdir/commit-graph-chain" 60 "A" &&
+               git commit-graph verify 2>test_err &&
+               grep -v "^+" test_err >err &&
+               test_i18ngrep "unable to find all commit-graph files" err
+       )
+'
+
+test_expect_success 'verify across alternates' '
+       git clone --no-hardlinks . verify-alt &&
+       (
+               cd verify-alt &&
+               rm -rf $graphdir &&
+               altdir="$(pwd)/../.git/objects" &&
+               echo "$altdir" >.git/objects/info/alternates &&
+               git commit-graph verify --object-dir="$altdir/" &&
+               test_commit extra &&
+               git commit-graph write --reachable --split &&
+               tip_file=$graphdir/graph-$(tail -n 1 $graphdir/commit-graph-chain).graph &&
+               corrupt_file "$tip_file" 100 "\01" &&
+               test_must_fail git commit-graph verify --shallow 2>test_err &&
+               grep -v "^+" test_err >err &&
+               test_i18ngrep "commit-graph has incorrect fanout value" err
+       )
+'
+
+test_expect_success 'add octopus merge' '
+       git reset --hard commits/10 &&
+       git merge commits/3 commits/4 &&
+       git branch merge/octopus &&
+       git commit-graph write --reachable --split &&
+       git commit-graph verify &&
+       test_line_count = 3 $graphdir/commit-graph-chain
+'
+
+graph_git_behavior 'graph exists' merge/octopus commits/12
+
+test_expect_success 'split across alternate where alternate is not split' '
+       git commit-graph write --reachable &&
+       test_path_is_file .git/objects/info/commit-graph &&
+       cp .git/objects/info/commit-graph . &&
+       git clone --no-hardlinks . alt-split &&
+       (
+               cd alt-split &&
+               echo "$(pwd)"/../.git/objects >.git/objects/info/alternates &&
+               test_commit 18 &&
+               git commit-graph write --reachable --split &&
+               test_line_count = 1 $graphdir/commit-graph-chain
+       ) &&
+       test_cmp commit-graph .git/objects/info/commit-graph
+'
+
+test_done
index 7bc706873c5b2341f6a0922cf2e4f79d34eee97c..fdfe179b11885be7fdc49ed3732d0dfe5d3537bc 100755 (executable)
@@ -164,9 +164,9 @@ test_expect_success 'fsck with unsorted skipList' '
 test_expect_success 'fsck with invalid or bogus skipList input' '
        git -c fsck.skipList=/dev/null -c fsck.missingEmail=ignore fsck &&
        test_must_fail git -c fsck.skipList=does-not-exist -c fsck.missingEmail=ignore fsck 2>err &&
-       test_i18ngrep "Could not open skip list: does-not-exist" err &&
+       test_i18ngrep "could not open.*: does-not-exist" err &&
        test_must_fail git -c fsck.skipList=.git/config -c fsck.missingEmail=ignore fsck 2>err &&
-       test_i18ngrep "Invalid SHA-1: \[core\]" err
+       test_i18ngrep "invalid object name: \[core\]" err
 '
 
 test_expect_success 'fsck with other accepted skipList input (comments & empty lines)' '
@@ -193,7 +193,7 @@ test_expect_success 'fsck no garbage output from comments & empty lines errors'
 test_expect_success 'fsck with invalid abbreviated skipList input' '
        echo $commit | test_copy_bytes 20 >SKIP.abbreviated &&
        test_must_fail git -c fsck.skipList=SKIP.abbreviated fsck 2>err-abbreviated &&
-       test_i18ngrep "^fatal: Invalid SHA-1: " err-abbreviated
+       test_i18ngrep "^fatal: invalid object name: " err-abbreviated
 '
 
 test_expect_success 'fsck with exhaustive accepted skipList input (various types of comments etc.)' '
@@ -226,10 +226,10 @@ test_expect_success 'push with receive.fsck.skipList' '
        test_must_fail git push --porcelain dst bogus &&
        git --git-dir=dst/.git config receive.fsck.skipList does-not-exist &&
        test_must_fail git push --porcelain dst bogus 2>err &&
-       test_i18ngrep "Could not open skip list: does-not-exist" err &&
+       test_i18ngrep "could not open.*: does-not-exist" err &&
        git --git-dir=dst/.git config receive.fsck.skipList config &&
        test_must_fail git push --porcelain dst bogus 2>err &&
-       test_i18ngrep "Invalid SHA-1: \[core\]" err &&
+       test_i18ngrep "invalid object name: \[core\]" err &&
 
        git --git-dir=dst/.git config receive.fsck.skipList SKIP &&
        git push --porcelain dst bogus
@@ -255,10 +255,10 @@ test_expect_success 'fetch with fetch.fsck.skipList' '
        test_must_fail git --git-dir=dst/.git fetch "file://$(pwd)" $refspec &&
        git --git-dir=dst/.git config fetch.fsck.skipList does-not-exist &&
        test_must_fail git --git-dir=dst/.git fetch "file://$(pwd)" $refspec 2>err &&
-       test_i18ngrep "Could not open skip list: does-not-exist" err &&
+       test_i18ngrep "could not open.*: does-not-exist" err &&
        git --git-dir=dst/.git config fetch.fsck.skipList dst/.git/config &&
        test_must_fail git --git-dir=dst/.git fetch "file://$(pwd)" $refspec 2>err &&
-       test_i18ngrep "Invalid SHA-1: \[core\]" err &&
+       test_i18ngrep "invalid object name: \[core\]" err &&
 
        git --git-dir=dst/.git config fetch.fsck.skipList dst/.git/SKIP &&
        git --git-dir=dst/.git fetch "file://$(pwd)" $refspec
index cb11f13d6dd72a0101850d8fbaedc6f722bb8377..e38e54386795a0e05003e803dc79301ed1cd52d9 100755 (executable)
@@ -301,11 +301,10 @@ test_expect_success CMDLINE_LIMIT \
        )
 '
 
-test_expect_success 'large fetch-pack requests can be split across POSTs' '
+test_expect_success 'large fetch-pack requests can be sent using chunked encoding' '
        GIT_TRACE_CURL=true git -c http.postbuffer=65536 \
                clone --bare "$HTTPD_URL/smart/repo.git" split.git 2>err &&
-       grep "^=> Send header: POST" err >posts &&
-       test_line_count = 2 posts
+       grep "^=> Send header: Transfer-Encoding: chunked" err
 '
 
 test_expect_success 'test allowreachablesha1inwant' '
diff --git a/t/t5618-alternate-refs.sh b/t/t5618-alternate-refs.sh
new file mode 100755 (executable)
index 0000000..3353216
--- /dev/null
@@ -0,0 +1,60 @@
+#!/bin/sh
+
+test_description='test handling of --alternate-refs traversal'
+. ./test-lib.sh
+
+# Avoid test_commit because we want a specific and known set of refs:
+#
+#  base -- one
+#      \      \
+#       two -- merged
+#
+# where "one" and "two" are on separate refs, and "merged" is available only in
+# the dependent child repository.
+test_expect_success 'set up local refs' '
+       git checkout -b one &&
+       test_tick &&
+       git commit --allow-empty -m base &&
+       test_tick &&
+       git commit --allow-empty -m one &&
+       git checkout -b two HEAD^ &&
+       test_tick &&
+       git commit --allow-empty -m two
+'
+
+# We'll enter the child repository after it's set up since that's where
+# all of the subsequent tests will want to run (and it's easy to forget a
+# "-C child" and get nonsense results).
+test_expect_success 'set up shared clone' '
+       git clone -s . child &&
+       cd child &&
+       git merge origin/one
+'
+
+test_expect_success 'rev-list --alternate-refs' '
+       git rev-list --remotes=origin >expect &&
+       git rev-list --alternate-refs >actual &&
+       test_cmp expect actual
+'
+
+test_expect_success 'rev-list --not --alternate-refs' '
+       git rev-parse HEAD >expect &&
+       git rev-list HEAD --not --alternate-refs >actual &&
+       test_cmp expect actual
+'
+
+test_expect_success 'limiting with alternateRefsPrefixes' '
+       test_config core.alternateRefsPrefixes refs/heads/one &&
+       git rev-list origin/one >expect &&
+       git rev-list --alternate-refs >actual &&
+       test_cmp expect actual
+'
+
+test_expect_success 'log --source shows .alternate marker' '
+       git log --oneline --source --remotes=origin >expect.orig &&
+       sed "s/origin.* /.alternate /" <expect.orig >expect &&
+       git log --oneline --source --alternate-refs >actual &&
+       test_cmp expect actual
+'
+
+test_done
index d9235217fcc72912574c80cea684aa17c8b736d2..ab69aa176d14bf2d8dd88be1d7cbae0560609be8 100755 (executable)
@@ -345,6 +345,32 @@ test_expect_success 'Verify descending sort' '
        test_cmp expected actual
 '
 
+cat >expected <<\EOF
+refs/tags/testtag
+refs/tags/testtag-2
+EOF
+
+test_expect_success 'exercise patterns with prefixes' '
+       git tag testtag-2 &&
+       test_when_finished "git tag -d testtag-2" &&
+       git for-each-ref --format="%(refname)" \
+               refs/tags/testtag refs/tags/testtag-2 >actual &&
+       test_cmp expected actual
+'
+
+cat >expected <<\EOF
+refs/tags/testtag
+refs/tags/testtag-2
+EOF
+
+test_expect_success 'exercise glob patterns with prefixes' '
+       git tag testtag-2 &&
+       test_when_finished "git tag -d testtag-2" &&
+       git for-each-ref --format="%(refname)" \
+               refs/tags/testtag "refs/tags/testtag-*" >actual &&
+       test_cmp expected actual
+'
+
 cat >expected <<\EOF
 'refs/heads/master'
 'refs/remotes/origin/master'
index 53cf42fac19c83f2da7de1cd5b6c4ca0cf6e5ccf..d5218743e963bd7e68788946c686e0a45c7d8a2f 100755 (executable)
@@ -38,7 +38,6 @@ You have unmerged paths.
 
 Unmerged paths:
   (use "git add/rm <file>..." as appropriate to mark resolution)
-
        deleted by us:   foo
 
 no changes added to commit (use "git add" and/or "git commit -a")
@@ -143,7 +142,6 @@ You have unmerged paths.
 
 Unmerged paths:
   (use "git add/rm <file>..." as appropriate to mark resolution)
-
        both added:      conflict.txt
        deleted by them: main.txt
 
@@ -177,7 +175,6 @@ You have unmerged paths.
 
 Unmerged paths:
   (use "git add/rm <file>..." as appropriate to mark resolution)
-
        both deleted:    main.txt
        added by them:   sub_master.txt
        added by us:     sub_second.txt
@@ -201,12 +198,10 @@ You have unmerged paths.
   (use "git merge --abort" to abort the merge)
 
 Changes to be committed:
-
        new file:   sub_master.txt
 
 Unmerged paths:
   (use "git rm <file>..." to mark resolution)
-
        both deleted:    main.txt
 
 Untracked files not listed (use -u option to show untracked files)
index 681bc314b483d61c145b450c195da7449bd413cc..4e676cdce8d621c86b4f758bf9535efb17735f46 100755 (executable)
@@ -95,18 +95,15 @@ test_expect_success 'status --column' '
 #
 # Changes to be committed:
 #   (use "git restore --staged <file>..." to unstage)
-#
 #      new file:   dir2/added
 #
 # Changes not staged for commit:
 #   (use "git add <file>..." to update what will be committed)
 #   (use "git restore <file>..." to discard changes in working directory)
-#
 #      modified:   dir1/modified
 #
 # Untracked files:
 #   (use "git add <file>..." to include in what will be committed)
-#
 #      dir1/untracked dir2/untracked
 #      dir2/modified  untracked
 #
@@ -129,18 +126,15 @@ cat >expect <<\EOF
 #
 # Changes to be committed:
 #   (use "git restore --staged <file>..." to unstage)
-#
 #      new file:   dir2/added
 #
 # Changes not staged for commit:
 #   (use "git add <file>..." to update what will be committed)
 #   (use "git restore <file>..." to discard changes in working directory)
-#
 #      modified:   dir1/modified
 #
 # Untracked files:
 #   (use "git add <file>..." to include in what will be committed)
-#
 #      dir1/untracked
 #      dir2/modified
 #      dir2/untracked
@@ -279,23 +273,19 @@ and have 1 and 2 different commits each, respectively.
 
 Changes to be committed:
   (use "git restore --staged <file>..." to unstage)
-
        new file:   dir2/added
 
 Changes not staged for commit:
   (use "git add <file>..." to update what will be committed)
   (use "git restore <file>..." to discard changes in working directory)
-
        modified:   dir1/modified
 
 Untracked files:
   (use "git add <file>..." to include in what will be committed)
-
        dir2/modified
 
 Ignored files:
   (use "git add -f <file>..." to include in what will be committed)
-
        .gitignore
        dir1/untracked
        dir2/untracked
@@ -348,18 +338,15 @@ and have 1 and 2 different commits each, respectively.
 
 Changes to be committed:
   (use "git restore --staged <file>..." to unstage)
-
        new file:   dir2/added
 
 Changes not staged for commit:
   (use "git add <file>..." to update what will be committed)
   (use "git restore <file>..." to discard changes in working directory)
-
        modified:   dir1/modified
 
 Ignored files:
   (use "git add -f <file>..." to include in what will be committed)
-
        .gitignore
        dir1/untracked
        dir2/modified
@@ -421,13 +408,11 @@ and have 1 and 2 different commits each, respectively.
 
 Changes to be committed:
   (use "git restore --staged <file>..." to unstage)
-
        new file:   dir2/added
 
 Changes not staged for commit:
   (use "git add <file>..." to update what will be committed)
   (use "git restore <file>..." to discard changes in working directory)
-
        modified:   dir1/modified
 
 Untracked files not listed (use -u option to show untracked files)
@@ -485,18 +470,15 @@ and have 1 and 2 different commits each, respectively.
 
 Changes to be committed:
   (use "git restore --staged <file>..." to unstage)
-
        new file:   dir2/added
 
 Changes not staged for commit:
   (use "git add <file>..." to update what will be committed)
   (use "git restore <file>..." to discard changes in working directory)
-
        modified:   dir1/modified
 
 Untracked files:
   (use "git add <file>..." to include in what will be committed)
-
        dir1/untracked
        dir2/modified
        dir2/untracked
@@ -543,18 +525,15 @@ and have 1 and 2 different commits each, respectively.
 
 Changes to be committed:
   (use "git restore --staged <file>..." to unstage)
-
        new file:   dir2/added
 
 Changes not staged for commit:
   (use "git add <file>..." to update what will be committed)
   (use "git restore <file>..." to discard changes in working directory)
-
        modified:   dir1/modified
 
 Untracked files:
   (use "git add <file>..." to include in what will be committed)
-
        dir1/untracked
        dir2/modified
        dir2/untracked
@@ -606,18 +585,15 @@ and have 1 and 2 different commits each, respectively.
 
 Changes to be committed:
   (use "git restore --staged <file>..." to unstage)
-
        new file:   ../dir2/added
 
 Changes not staged for commit:
   (use "git add <file>..." to update what will be committed)
   (use "git restore <file>..." to discard changes in working directory)
-
        modified:   modified
 
 Untracked files:
   (use "git add <file>..." to include in what will be committed)
-
        untracked
        ../dir2/modified
        ../dir2/untracked
@@ -677,18 +653,15 @@ and have 1 and 2 different commits each, respectively.
 
 Changes to be committed:
   (use "git restore --staged <file>..." to unstage)
-
        <GREEN>new file:   dir2/added<RESET>
 
 Changes not staged for commit:
   (use "git add <file>..." to update what will be committed)
   (use "git restore <file>..." to discard changes in working directory)
-
        <RED>modified:   dir1/modified<RESET>
 
 Untracked files:
   (use "git add <file>..." to include in what will be committed)
-
        <BLUE>dir1/untracked<RESET>
        <BLUE>dir2/modified<RESET>
        <BLUE>dir2/untracked<RESET>
@@ -803,18 +776,15 @@ and have 1 and 2 different commits each, respectively.
 
 Changes to be committed:
   (use "git restore --staged <file>..." to unstage)
-
        new file:   dir2/added
 
 Changes not staged for commit:
   (use "git add <file>..." to update what will be committed)
   (use "git restore <file>..." to discard changes in working directory)
-
        modified:   dir1/modified
 
 Untracked files:
   (use "git add <file>..." to include in what will be committed)
-
        dir1/untracked
        dir2/modified
        dir2/untracked
@@ -853,12 +823,10 @@ and have 1 and 2 different commits each, respectively.
 
 Changes to be committed:
   (use "git restore --staged <file>..." to unstage)
-
        modified:   dir1/modified
 
 Untracked files:
   (use "git add <file>..." to include in what will be committed)
-
        dir1/untracked
        dir2/
        untracked
@@ -897,19 +865,16 @@ and have 1 and 2 different commits each, respectively.
 
 Changes to be committed:
   (use "git restore --staged <file>..." to unstage)
-
        new file:   dir2/added
        new file:   sm
 
 Changes not staged for commit:
   (use "git add <file>..." to update what will be committed)
   (use "git restore <file>..." to discard changes in working directory)
-
        modified:   dir1/modified
 
 Untracked files:
   (use "git add <file>..." to include in what will be committed)
-
        dir1/untracked
        dir2/modified
        dir2/untracked
@@ -957,14 +922,12 @@ and have 1 and 2 different commits each, respectively.
 
 Changes to be committed:
   (use "git restore --staged <file>..." to unstage)
-
        new file:   dir2/added
        new file:   sm
 
 Changes not staged for commit:
   (use "git add <file>..." to update what will be committed)
   (use "git restore <file>..." to discard changes in working directory)
-
        modified:   dir1/modified
 
 Submodule changes to be committed:
@@ -974,7 +937,6 @@ Submodule changes to be committed:
 
 Untracked files:
   (use "git add <file>..." to include in what will be committed)
-
        dir1/untracked
        dir2/modified
        dir2/untracked
@@ -1020,12 +982,10 @@ and have 2 and 2 different commits each, respectively.
 Changes not staged for commit:
   (use "git add <file>..." to update what will be committed)
   (use "git restore <file>..." to discard changes in working directory)
-
        modified:   dir1/modified
 
 Untracked files:
   (use "git add <file>..." to include in what will be committed)
-
        dir1/untracked
        dir2/modified
        dir2/untracked
@@ -1069,14 +1029,12 @@ and have 2 and 2 different commits each, respectively.
 
 Changes to be committed:
   (use "git restore --source=HEAD^1 --staged <file>..." to unstage)
-
        new file:   dir2/added
        new file:   sm
 
 Changes not staged for commit:
   (use "git add <file>..." to update what will be committed)
   (use "git restore <file>..." to discard changes in working directory)
-
        modified:   dir1/modified
 
 Submodule changes to be committed:
@@ -1086,7 +1044,6 @@ Submodule changes to be committed:
 
 Untracked files:
   (use "git add <file>..." to include in what will be committed)
-
        dir1/untracked
        dir2/modified
        dir2/untracked
@@ -1124,13 +1081,11 @@ and have 2 and 2 different commits each, respectively.
 
 Changes to be committed:
   (use "git restore --staged <file>..." to unstage)
-
        modified:   sm
 
 Changes not staged for commit:
   (use "git add <file>..." to update what will be committed)
   (use "git restore <file>..." to discard changes in working directory)
-
        modified:   dir1/modified
 
 Submodule changes to be committed:
@@ -1140,7 +1095,6 @@ Submodule changes to be committed:
 
 Untracked files:
   (use "git add <file>..." to include in what will be committed)
-
        .gitmodules
        dir1/untracked
        dir2/modified
@@ -1236,14 +1190,12 @@ and have 2 and 2 different commits each, respectively.
 
 Changes to be committed:
   (use "git restore --staged <file>..." to unstage)
-
        modified:   sm
 
 Changes not staged for commit:
   (use "git add <file>..." to update what will be committed)
   (use "git restore <file>..." to discard changes in working directory)
   (commit or discard the untracked or modified content in submodules)
-
        modified:   dir1/modified
        modified:   sm (modified content)
 
@@ -1254,7 +1206,6 @@ Submodule changes to be committed:
 
 Untracked files:
   (use "git add <file>..." to include in what will be committed)
-
        .gitmodules
        dir1/untracked
        dir2/modified
@@ -1296,13 +1247,11 @@ and have 2 and 2 different commits each, respectively.
 
 Changes to be committed:
   (use "git restore --staged <file>..." to unstage)
-
        modified:   sm
 
 Changes not staged for commit:
   (use "git add <file>..." to update what will be committed)
   (use "git restore <file>..." to discard changes in working directory)
-
        modified:   dir1/modified
        modified:   sm (new commits)
 
@@ -1318,7 +1267,6 @@ Submodules changed but not updated:
 
 Untracked files:
   (use "git add <file>..." to include in what will be committed)
-
        .gitmodules
        dir1/untracked
        dir2/modified
@@ -1380,13 +1328,11 @@ cat > expect << EOF
 ;
 ; Changes to be committed:
 ;   (use "git restore --staged <file>..." to unstage)
-;
 ;      modified:   sm
 ;
 ; Changes not staged for commit:
 ;   (use "git add <file>..." to update what will be committed)
 ;   (use "git restore <file>..." to discard changes in working directory)
-;
 ;      modified:   dir1/modified
 ;      modified:   sm (new commits)
 ;
@@ -1402,7 +1348,6 @@ cat > expect << EOF
 ;
 ; Untracked files:
 ;   (use "git add <file>..." to include in what will be committed)
-;
 ;      .gitmodules
 ;      dir1/untracked
 ;      dir2/modified
@@ -1432,12 +1377,10 @@ and have 2 and 2 different commits each, respectively.
 Changes not staged for commit:
   (use "git add <file>..." to update what will be committed)
   (use "git restore <file>..." to discard changes in working directory)
-
        modified:   dir1/modified
 
 Untracked files:
   (use "git add <file>..." to include in what will be committed)
-
        .gitmodules
        dir1/untracked
        dir2/modified
@@ -1459,18 +1402,15 @@ and have 2 and 2 different commits each, respectively.
 
 Changes to be committed:
   (use "git restore --staged <file>..." to unstage)
-
        modified:   sm
 
 Changes not staged for commit:
   (use "git add <file>..." to update what will be committed)
   (use "git restore <file>..." to discard changes in working directory)
-
        modified:   dir1/modified
 
 Untracked files:
   (use "git add <file>..." to include in what will be committed)
-
        .gitmodules
        dir1/untracked
        dir2/modified
@@ -1582,13 +1522,11 @@ and have 2 and 2 different commits each, respectively.
 
 Changes to be committed:
   (use "git restore --staged <file>..." to unstage)
-
        modified:   sm
 
 Changes not staged for commit:
   (use "git add <file>..." to update what will be committed)
   (use "git restore <file>..." to discard changes in working directory)
-
        modified:   dir1/modified
 
 Untracked files not listed (use -u option to show untracked files)
index b9f5d73423cfec8e61e10b6b42e7f8dc237a37b2..e01c285cbf8fdaa511240858f9f43165d149bc7e 100755 (executable)
@@ -33,7 +33,6 @@ You have unmerged paths.
 
 Unmerged paths:
   (use "git add <file>..." to mark resolution)
-
        both modified:   main.txt
 
 no changes added to commit (use "git add" and/or "git commit -a")
@@ -54,7 +53,6 @@ All conflicts fixed but you are still merging.
   (use "git commit" to conclude merge)
 
 Changes to be committed:
-
        modified:   main.txt
 
 Untracked files not listed (use -u option to show untracked files)
@@ -87,7 +85,6 @@ You are currently rebasing branch '\''rebase_conflicts'\'' on '\''$ONTO'\''.
 Unmerged paths:
   (use "git restore --staged <file>..." to unstage)
   (use "git add <file>..." to mark resolution)
-
        both modified:   main.txt
 
 no changes added to commit (use "git add" and/or "git commit -a")
@@ -111,7 +108,6 @@ You are currently rebasing branch '\''rebase_conflicts'\'' on '\''$ONTO'\''.
 
 Changes to be committed:
   (use "git restore --staged <file>..." to unstage)
-
        modified:   main.txt
 
 Untracked files not listed (use -u option to show untracked files)
@@ -150,7 +146,6 @@ You are currently rebasing branch '\''rebase_i_conflicts_second'\'' on '\''$ONTO
 Unmerged paths:
   (use "git restore --staged <file>..." to unstage)
   (use "git add <file>..." to mark resolution)
-
        both modified:   main.txt
 
 no changes added to commit (use "git add" and/or "git commit -a")
@@ -177,7 +172,6 @@ You are currently rebasing branch '\''rebase_i_conflicts_second'\'' on '\''$ONTO
 
 Changes to be committed:
   (use "git restore --staged <file>..." to unstage)
-
        modified:   main.txt
 
 Untracked files not listed (use -u option to show untracked files)
@@ -247,7 +241,6 @@ You are currently splitting a commit while rebasing branch '\''split_commit'\''
 Changes not staged for commit:
   (use "git add <file>..." to update what will be committed)
   (use "git restore <file>..." to discard changes in working directory)
-
        modified:   main.txt
 
 no changes added to commit (use "git add" and/or "git commit -a")
@@ -355,7 +348,6 @@ You are currently splitting a commit while rebasing branch '\''several_edits'\''
 Changes not staged for commit:
   (use "git add <file>..." to update what will be committed)
   (use "git restore <file>..." to discard changes in working directory)
-
        modified:   main.txt
 
 no changes added to commit (use "git add" and/or "git commit -a")
@@ -454,7 +446,6 @@ You are currently splitting a commit while rebasing branch '\''several_edits'\''
 Changes not staged for commit:
   (use "git add <file>..." to update what will be committed)
   (use "git restore <file>..." to discard changes in working directory)
-
        modified:   main.txt
 
 no changes added to commit (use "git add" and/or "git commit -a")
@@ -558,7 +549,6 @@ You are currently splitting a commit while rebasing branch '\''several_edits'\''
 Changes not staged for commit:
   (use "git add <file>..." to update what will be committed)
   (use "git restore <file>..." to discard changes in working directory)
-
        modified:   main.txt
 
 no changes added to commit (use "git add" and/or "git commit -a")
@@ -747,7 +737,6 @@ You are currently cherry-picking commit $TO_CHERRY_PICK.
 
 Unmerged paths:
   (use "git add <file>..." to mark resolution)
-
        both modified:   main.txt
 
 no changes added to commit (use "git add" and/or "git commit -a")
@@ -771,7 +760,6 @@ You are currently cherry-picking commit $TO_CHERRY_PICK.
   (use "git cherry-pick --abort" to cancel the cherry-pick operation)
 
 Changes to be committed:
-
        modified:   main.txt
 
 Untracked files not listed (use -u option to show untracked files)
@@ -798,6 +786,22 @@ EOF
        test_i18ncmp expected actual
 '
 
+test_expect_success 'status shows cherry-pick with invalid oid' '
+       mkdir .git/sequencer &&
+       test_write_lines "pick invalid-oid" >.git/sequencer/todo &&
+       git status --untracked-files=no >actual 2>err &&
+       git cherry-pick --quit &&
+       test_must_be_empty err &&
+       test_i18ncmp expected actual
+'
+
+test_expect_success 'status does not show error if .git/sequencer is a file' '
+       test_when_finished "rm .git/sequencer" &&
+       test_write_lines hello >.git/sequencer &&
+       git status --untracked-files=no 2>err &&
+       test_must_be_empty err
+'
+
 test_expect_success 'status showing detached at and from a tag' '
        test_commit atag tagging &&
        git checkout atag &&
@@ -836,7 +840,6 @@ You are currently reverting commit $TO_REVERT.
 Unmerged paths:
   (use "git restore --staged <file>..." to unstage)
   (use "git add <file>..." to mark resolution)
-
        both modified:   to-revert.txt
 
 no changes added to commit (use "git add" and/or "git commit -a")
@@ -856,7 +859,6 @@ You are currently reverting commit $TO_REVERT.
 
 Changes to be committed:
   (use "git restore --staged <file>..." to unstage)
-
        modified:   to-revert.txt
 
 Untracked files not listed (use -u option to show untracked files)
index 86d05160a3589cefd7ad21dbf64c655658f26342..0e9af832c9790f3e329fc59d61b8bc177c151b48 100755 (executable)
@@ -239,4 +239,14 @@ test_expect_success 'bitmaps can be disabled on bare repos' '
        test -z "$bitmap"
 '
 
+test_expect_success 'no bitmaps created if .keep files present' '
+       pack=$(ls bare.git/objects/pack/*.pack) &&
+       test_path_is_file "$pack" &&
+       keep=${pack%.pack}.keep &&
+       >"$keep" &&
+       git -C bare.git repack -ad &&
+       find bare.git/objects/pack/ -type f -name "*.bitmap" >actual &&
+       test_must_be_empty actual
+'
+
 test_done
index 134a694516c924c5930b9c227da45e896297a338..a11366b4cee44da8206852f52fe749816443b34f 100755 (executable)
@@ -14,12 +14,14 @@ test_expect_success 'setup directory structure and submodule' '
        echo "(3|4)" >b/b &&
        git add a b &&
        git commit -m "add a and b" &&
+       test_tick &&
        git init submodule &&
        echo "(1|2)d(3|4)" >submodule/a &&
        git -C submodule add a &&
        git -C submodule commit -m "add a" &&
        git submodule add ./submodule &&
-       git commit -m "added submodule"
+       git commit -m "added submodule" &&
+       test_tick
 '
 
 test_expect_success 'grep correctly finds patterns in a submodule' '
@@ -65,11 +67,14 @@ test_expect_success 'grep and nested submodules' '
        echo "(1|2)d(3|4)" >submodule/sub/a &&
        git -C submodule/sub add a &&
        git -C submodule/sub commit -m "add a" &&
+       test_tick &&
        git -C submodule submodule add ./sub &&
        git -C submodule add sub &&
        git -C submodule commit -m "added sub" &&
+       test_tick &&
        git add submodule &&
        git commit -m "updated submodule" &&
+       test_tick &&
 
        cat >expect <<-\EOF &&
        a:(1|2)d(3|4)
@@ -179,15 +184,18 @@ test_expect_success !MINGW 'grep recurse submodule colon in name' '
        echo "(1|2)d(3|4)" >"parent/fi:le" &&
        git -C parent add "fi:le" &&
        git -C parent commit -m "add fi:le" &&
+       test_tick &&
 
        git init "su:b" &&
        test_when_finished "rm -rf su:b" &&
        echo "(1|2)d(3|4)" >"su:b/fi:le" &&
        git -C "su:b" add "fi:le" &&
        git -C "su:b" commit -m "add fi:le" &&
+       test_tick &&
 
        git -C parent submodule add "../su:b" "su:b" &&
        git -C parent commit -m "add submodule" &&
+       test_tick &&
 
        cat >expect <<-\EOF &&
        fi:le:(1|2)d(3|4)
@@ -210,15 +218,18 @@ test_expect_success 'grep history with moved submoules' '
        echo "(1|2)d(3|4)" >parent/file &&
        git -C parent add file &&
        git -C parent commit -m "add file" &&
+       test_tick &&
 
        git init sub &&
        test_when_finished "rm -rf sub" &&
        echo "(1|2)d(3|4)" >sub/file &&
        git -C sub add file &&
        git -C sub commit -m "add file" &&
+       test_tick &&
 
        git -C parent submodule add ../sub dir/sub &&
        git -C parent commit -m "add submodule" &&
+       test_tick &&
 
        cat >expect <<-\EOF &&
        dir/sub/file:(1|2)d(3|4)
@@ -229,6 +240,7 @@ test_expect_success 'grep history with moved submoules' '
 
        git -C parent mv dir/sub sub-moved &&
        git -C parent commit -m "moved submodule" &&
+       test_tick &&
 
        cat >expect <<-\EOF &&
        file:(1|2)d(3|4)
@@ -251,6 +263,7 @@ test_expect_success 'grep using relative path' '
        echo "(1|2)d(3|4)" >sub/file &&
        git -C sub add file &&
        git -C sub commit -m "add file" &&
+       test_tick &&
 
        git init parent &&
        echo "(1|2)d(3|4)" >parent/file &&
@@ -260,6 +273,7 @@ test_expect_success 'grep using relative path' '
        git -C parent add src/file2 &&
        git -C parent submodule add ../sub &&
        git -C parent commit -m "add files and submodule" &&
+       test_tick &&
 
        # From top works
        cat >expect <<-\EOF &&
@@ -293,6 +307,7 @@ test_expect_success 'grep from a subdir' '
        echo "(1|2)d(3|4)" >sub/file &&
        git -C sub add file &&
        git -C sub commit -m "add file" &&
+       test_tick &&
 
        git init parent &&
        mkdir parent/src &&
@@ -301,6 +316,7 @@ test_expect_success 'grep from a subdir' '
        git -C parent submodule add ../sub src/sub &&
        git -C parent submodule add ../sub sub &&
        git -C parent commit -m "add files and submodules" &&
+       test_tick &&
 
        # Verify grep from root works
        cat >expect <<-\EOF &&
index c92a47b6d5b11ab537ce9892ca21feddc19db7d9..1c5fb1d1f8c9cd9062ae44c6069fc530259762d4 100755 (executable)
@@ -275,4 +275,40 @@ test_expect_success 'blame file with CRLF core.autocrlf=true' '
        grep "A U Thor" actual
 '
 
+# Tests the splitting and merging of blame entries in blame_coalesce().
+# The output of blame is the same, regardless of whether blame_coalesce() runs
+# or not, so we'd likely only notice a problem if blame crashes or assigned
+# blame to the "splitting" commit ('SPLIT' below).
+test_expect_success 'blame coalesce' '
+       cat >giraffe <<-\EOF &&
+       ABC
+       DEF
+       EOF
+       git add giraffe &&
+       git commit -m "original file" &&
+       oid=$(git rev-parse HEAD) &&
+
+       cat >giraffe <<-\EOF &&
+       ABC
+       SPLIT
+       DEF
+       EOF
+       git add giraffe &&
+       git commit -m "interior SPLIT line" &&
+
+       cat >giraffe <<-\EOF &&
+       ABC
+       DEF
+       EOF
+       git add giraffe &&
+       git commit -m "same contents as original" &&
+
+       cat >expect <<-EOF &&
+       $oid 1) ABC
+       $oid 2) DEF
+       EOF
+       git -c core.abbrev=40 blame -s giraffe >actual &&
+       test_cmp expect actual
+'
+
 test_done
diff --git a/t/t8013-blame-ignore-revs.sh b/t/t8013-blame-ignore-revs.sh
new file mode 100755 (executable)
index 0000000..36dc31e
--- /dev/null
@@ -0,0 +1,274 @@
+#!/bin/sh
+
+test_description='ignore revisions when blaming'
+. ./test-lib.sh
+
+# Creates:
+#      A--B--X
+# A added line 1 and B added line 2.  X makes changes to those lines.  Sanity
+# check that X is blamed for both lines.
+test_expect_success setup '
+       test_commit A file line1 &&
+
+       echo line2 >>file &&
+       git add file &&
+       test_tick &&
+       git commit -m B &&
+       git tag B &&
+
+       test_write_lines line-one line-two >file &&
+       git add file &&
+       test_tick &&
+       git commit -m X &&
+       git tag X &&
+
+       git blame --line-porcelain file >blame_raw &&
+
+       grep -E "^[0-9a-f]+ [0-9]+ 1" blame_raw | sed -e "s/ .*//" >actual &&
+       git rev-parse X >expect &&
+       test_cmp expect actual &&
+
+       grep -E "^[0-9a-f]+ [0-9]+ 2" blame_raw | sed -e "s/ .*//" >actual &&
+       git rev-parse X >expect &&
+       test_cmp expect actual
+       '
+
+# Ignore X, make sure A is blamed for line 1 and B for line 2.
+test_expect_success ignore_rev_changing_lines '
+       git blame --line-porcelain --ignore-rev X file >blame_raw &&
+
+       grep -E "^[0-9a-f]+ [0-9]+ 1" blame_raw | sed -e "s/ .*//" >actual &&
+       git rev-parse A >expect &&
+       test_cmp expect actual &&
+
+       grep -E "^[0-9a-f]+ [0-9]+ 2" blame_raw | sed -e "s/ .*//" >actual &&
+       git rev-parse B >expect &&
+       test_cmp expect actual
+       '
+
+# For ignored revs that have added 'unblamable' lines, attribute those to the
+# ignored commit.
+#      A--B--X--Y
+# Where Y changes lines 1 and 2, and adds lines 3 and 4.  The added lines ought
+# to have nothing in common with "line-one" or "line-two", to keep any
+# heuristics from matching them with any lines in the parent.
+test_expect_success ignore_rev_adding_unblamable_lines '
+       test_write_lines line-one-change line-two-changed y3 y4 >file &&
+       git add file &&
+       test_tick &&
+       git commit -m Y &&
+       git tag Y &&
+
+       git rev-parse Y >expect &&
+       git blame --line-porcelain file --ignore-rev Y >blame_raw &&
+
+       grep -E "^[0-9a-f]+ [0-9]+ 3" blame_raw | sed -e "s/ .*//" >actual &&
+       test_cmp expect actual &&
+
+       grep -E "^[0-9a-f]+ [0-9]+ 4" blame_raw | sed -e "s/ .*//" >actual &&
+       test_cmp expect actual
+       '
+
+# Ignore X and Y, both in separate files.  Lines 1 == A, 2 == B.
+test_expect_success ignore_revs_from_files '
+       git rev-parse X >ignore_x &&
+       git rev-parse Y >ignore_y &&
+       git blame --line-porcelain file --ignore-revs-file ignore_x --ignore-revs-file ignore_y >blame_raw &&
+
+       grep -E "^[0-9a-f]+ [0-9]+ 1" blame_raw | sed -e "s/ .*//" >actual &&
+       git rev-parse A >expect &&
+       test_cmp expect actual &&
+
+       grep -E "^[0-9a-f]+ [0-9]+ 2" blame_raw | sed -e "s/ .*//" >actual &&
+       git rev-parse B >expect &&
+       test_cmp expect actual
+       '
+
+# Ignore X from the config option, Y from a file.
+test_expect_success ignore_revs_from_configs_and_files '
+       git config --add blame.ignoreRevsFile ignore_x &&
+       git blame --line-porcelain file --ignore-revs-file ignore_y >blame_raw &&
+
+       grep -E "^[0-9a-f]+ [0-9]+ 1" blame_raw | sed -e "s/ .*//" >actual &&
+       git rev-parse A >expect &&
+       test_cmp expect actual &&
+
+       grep -E "^[0-9a-f]+ [0-9]+ 2" blame_raw | sed -e "s/ .*//" >actual &&
+       git rev-parse B >expect &&
+       test_cmp expect actual
+       '
+
+# Override blame.ignoreRevsFile (ignore_x) with an empty string.  X should be
+# blamed now for lines 1 and 2, since we are no longer ignoring X.
+test_expect_success override_ignore_revs_file '
+       git blame --line-porcelain file --ignore-revs-file "" --ignore-revs-file ignore_y >blame_raw &&
+       git rev-parse X >expect &&
+
+       grep -E "^[0-9a-f]+ [0-9]+ 1" blame_raw | sed -e "s/ .*//" >actual &&
+       test_cmp expect actual &&
+
+       grep -E "^[0-9a-f]+ [0-9]+ 2" blame_raw | sed -e "s/ .*//" >actual &&
+       test_cmp expect actual
+       '
+test_expect_success bad_files_and_revs '
+       test_must_fail git blame file --ignore-rev NOREV 2>err &&
+       test_i18ngrep "cannot find revision NOREV to ignore" err &&
+
+       test_must_fail git blame file --ignore-revs-file NOFILE 2>err &&
+       test_i18ngrep "could not open.*: NOFILE" err &&
+
+       echo NOREV >ignore_norev &&
+       test_must_fail git blame file --ignore-revs-file ignore_norev 2>err &&
+       test_i18ngrep "invalid object name: NOREV" err
+       '
+
+# For ignored revs that have added 'unblamable' lines, mark those lines with a
+# '*'
+#      A--B--X--Y
+# Lines 3 and 4 are from Y and unblamable.  This was set up in
+# ignore_rev_adding_unblamable_lines.
+test_expect_success mark_unblamable_lines '
+       git config --add blame.markUnblamableLines true &&
+
+       git blame --ignore-rev Y file >blame_raw &&
+       echo "*" >expect &&
+
+       sed -n "3p" blame_raw | cut -c1 >actual &&
+       test_cmp expect actual &&
+
+       sed -n "4p" blame_raw | cut -c1 >actual &&
+       test_cmp expect actual
+       '
+
+# Commit Z will touch the first two lines.  Y touched all four.
+#      A--B--X--Y--Z
+# The blame output when ignoring Z should be:
+# ?Y ... 1)
+# ?Y ... 2)
+# Y  ... 3)
+# Y  ... 4)
+# We're checking only the first character
+test_expect_success mark_ignored_lines '
+       git config --add blame.markIgnoredLines true &&
+
+       test_write_lines line-one-Z line-two-Z y3 y4 >file &&
+       git add file &&
+       test_tick &&
+       git commit -m Z &&
+       git tag Z &&
+
+       git blame --ignore-rev Z file >blame_raw &&
+       echo "?" >expect &&
+
+       sed -n "1p" blame_raw | cut -c1 >actual &&
+       test_cmp expect actual &&
+
+       sed -n "2p" blame_raw | cut -c1 >actual &&
+       test_cmp expect actual &&
+
+       sed -n "3p" blame_raw | cut -c1 >actual &&
+       ! test_cmp expect actual &&
+
+       sed -n "4p" blame_raw | cut -c1 >actual &&
+       ! test_cmp expect actual
+       '
+
+# For ignored revs that added 'unblamable' lines and more recent commits changed
+# the blamable lines, mark the unblamable lines with a
+# '*'
+#      A--B--X--Y--Z
+# Lines 3 and 4 are from Y and unblamable, as set up in
+# ignore_rev_adding_unblamable_lines.  Z changed lines 1 and 2.
+test_expect_success mark_unblamable_lines_intermediate '
+       git config --add blame.markUnblamableLines true &&
+
+       git blame --ignore-rev Y file >blame_raw 2>stderr &&
+       echo "*" >expect &&
+
+       sed -n "3p" blame_raw | cut -c1 >actual &&
+       test_cmp expect actual &&
+
+       sed -n "4p" blame_raw | cut -c1 >actual &&
+       test_cmp expect actual
+       '
+
+# The heuristic called by guess_line_blames() tries to find the size of a
+# blame_entry 'e' in the parent's address space.  Those calculations need to
+# check for negative or zero values for when a blame entry is completely outside
+# the window of the parent's version of a file.
+#
+# This happens when one commit adds several lines (commit B below).  A later
+# commit (C) changes one line in the middle of B's change.  Commit C gets blamed
+# for its change, and that breaks up B's change into multiple blame entries.
+# When processing B, one of the blame_entries is outside A's window (which was
+# zero - it had no lines added on its side of the diff).
+#
+# A--B--C, ignore B to test the ignore heuristic's boundary checks.
+test_expect_success ignored_chunk_negative_parent_size '
+       rm -rf .git/ &&
+       git init &&
+
+       test_write_lines L1 L2 L7 L8 L9 >file &&
+       git add file &&
+       test_tick &&
+       git commit -m A &&
+       git tag A &&
+
+       test_write_lines L1 L2 L3 L4 L5 L6 L7 L8 L9 >file &&
+       git add file &&
+       test_tick &&
+       git commit -m B &&
+       git tag B &&
+
+       test_write_lines L1 L2 L3 L4 xxx L6 L7 L8 L9 >file &&
+       git add file &&
+       test_tick &&
+       git commit -m C &&
+       git tag C &&
+
+       git blame file --ignore-rev B >blame_raw
+       '
+
+# Resetting the repo and creating:
+#
+# A--B--M
+#  \   /
+#   C-+
+#
+# 'A' creates a file.  B changes line 1, and C changes line 9.  M merges.
+test_expect_success ignore_merge '
+       rm -rf .git/ &&
+       git init &&
+
+       test_write_lines L1 L2 L3 L4 L5 L6 L7 L8 L9 >file &&
+       git add file &&
+       test_tick &&
+       git commit -m A &&
+       git tag A &&
+
+       test_write_lines BB L2 L3 L4 L5 L6 L7 L8 L9 >file &&
+       git add file &&
+       test_tick &&
+       git commit -m B &&
+       git tag B &&
+
+       git reset --hard A &&
+       test_write_lines L1 L2 L3 L4 L5 L6 L7 L8 CC >file &&
+       git add file &&
+       test_tick &&
+       git commit -m C &&
+       git tag C &&
+
+       test_merge M B &&
+       git blame --line-porcelain file --ignore-rev M >blame_raw &&
+
+       grep -E "^[0-9a-f]+ [0-9]+ 1" blame_raw | sed -e "s/ .*//" >actual &&
+       git rev-parse B >expect &&
+       test_cmp expect actual &&
+
+       grep -E "^[0-9a-f]+ [0-9]+ 9" blame_raw | sed -e "s/ .*//" >actual &&
+       git rev-parse C >expect &&
+       test_cmp expect actual
+       '
+
+test_done
diff --git a/t/t8014-blame-ignore-fuzzy.sh b/t/t8014-blame-ignore-fuzzy.sh
new file mode 100755 (executable)
index 0000000..6e61882
--- /dev/null
@@ -0,0 +1,437 @@
+#!/bin/sh
+
+test_description='git blame ignore fuzzy heuristic'
+. ./test-lib.sh
+
+pick_author='s/^[0-9a-f^]* *(\([^ ]*\) .*/\1/'
+
+# Each test is composed of 4 variables:
+# titleN - the test name
+# aN - the initial content
+# bN - the final content
+# expectedN - the line numbers from aN that we expect git blame
+#             on bN to identify, or "Final" if bN itself should
+#             be identified as the origin of that line.
+
+# We start at test 2 because setup will show as test 1
+title2="Regression test for partially overlapping search ranges"
+cat <<EOF >a2
+1
+2
+3
+abcdef
+5
+6
+7
+ijkl
+9
+10
+11
+pqrs
+13
+14
+15
+wxyz
+17
+18
+19
+EOF
+cat <<EOF >b2
+abcde
+ijk
+pqr
+wxy
+EOF
+cat <<EOF >expected2
+4
+8
+12
+16
+EOF
+
+title3="Combine 3 lines into 2"
+cat <<EOF >a3
+if ((maxgrow==0) ||
+       ( single_line_field && (field->dcols < maxgrow)) ||
+       (!single_line_field && (field->drows < maxgrow)))
+EOF
+cat <<EOF >b3
+if ((maxgrow == 0) || (single_line_field && (field->dcols < maxgrow)) ||
+       (!single_line_field && (field->drows < maxgrow))) {
+EOF
+cat <<EOF >expected3
+2
+3
+EOF
+
+title4="Add curly brackets"
+cat <<EOF >a4
+       if (rows) *rows = field->rows;
+       if (cols) *cols = field->cols;
+       if (frow) *frow = field->frow;
+       if (fcol) *fcol = field->fcol;
+EOF
+cat <<EOF >b4
+       if (rows) {
+               *rows = field->rows;
+       }
+       if (cols) {
+               *cols = field->cols;
+       }
+       if (frow) {
+               *frow = field->frow;
+       }
+       if (fcol) {
+               *fcol = field->fcol;
+       }
+EOF
+cat <<EOF >expected4
+1
+1
+Final
+2
+2
+Final
+3
+3
+Final
+4
+4
+Final
+EOF
+
+
+title5="Combine many lines and change case"
+cat <<EOF >a5
+for(row=0,pBuffer=field->buf;
+       row<height;
+       row++,pBuffer+=width )
+{
+       if ((len = (int)( After_End_Of_Data( pBuffer, width ) - pBuffer )) > 0)
+       {
+               wmove( win, row, 0 );
+               waddnstr( win, pBuffer, len );
+EOF
+cat <<EOF >b5
+for (Row = 0, PBuffer = field->buf; Row < Height; Row++, PBuffer += Width) {
+       if ((Len = (int)(afterEndOfData(PBuffer, Width) - PBuffer)) > 0) {
+               wmove(win, Row, 0);
+               waddnstr(win, PBuffer, Len);
+EOF
+cat <<EOF >expected5
+1
+5
+7
+8
+EOF
+
+title6="Rename and combine lines"
+cat <<EOF >a6
+bool need_visual_update = ((form != (FORM *)0)      &&
+       (form->status & _POSTED) &&
+       (form->current==field));
+
+if (need_visual_update)
+       Synchronize_Buffer(form);
+
+if (single_line_field)
+{
+       growth = field->cols * amount;
+       if (field->maxgrow)
+               growth = Minimum(field->maxgrow - field->dcols,growth);
+       field->dcols += growth;
+       if (field->dcols == field->maxgrow)
+EOF
+cat <<EOF >b6
+bool NeedVisualUpdate = ((Form != (FORM *)0) && (Form->status & _POSTED) &&
+       (Form->current == field));
+
+if (NeedVisualUpdate) {
+       synchronizeBuffer(Form);
+}
+
+if (SingleLineField) {
+       Growth = field->cols * amount;
+       if (field->maxgrow) {
+               Growth = Minimum(field->maxgrow - field->dcols, Growth);
+       }
+       field->dcols += Growth;
+       if (field->dcols == field->maxgrow) {
+EOF
+cat <<EOF >expected6
+1
+3
+4
+5
+6
+Final
+7
+8
+10
+11
+12
+Final
+13
+14
+EOF
+
+# Both lines match identically so position must be used to tie-break.
+title7="Same line twice"
+cat <<EOF >a7
+abc
+abc
+EOF
+cat <<EOF >b7
+abcd
+abcd
+EOF
+cat <<EOF >expected7
+1
+2
+EOF
+
+title8="Enforce line order"
+cat <<EOF >a8
+abcdef
+ghijkl
+ab
+EOF
+cat <<EOF >b8
+ghijk
+abcd
+EOF
+cat <<EOF >expected8
+2
+3
+EOF
+
+title9="Expand lines and rename variables"
+cat <<EOF >a9
+int myFunction(int ArgumentOne, Thing *ArgTwo, Blah XuglyBug) {
+       Squiggle FabulousResult = squargle(ArgumentOne, *ArgTwo,
+               XuglyBug) + EwwwGlobalWithAReallyLongNameYepTooLong;
+       return FabulousResult * 42;
+}
+EOF
+cat <<EOF >b9
+int myFunction(int argument_one, Thing *arg_asdfgh,
+       Blah xugly_bug) {
+       Squiggle fabulous_result = squargle(argument_one,
+               *arg_asdfgh, xugly_bug)
+               + g_ewww_global_with_a_really_long_name_yep_too_long;
+       return fabulous_result * 42;
+}
+EOF
+cat <<EOF >expected9
+1
+1
+2
+3
+3
+4
+5
+EOF
+
+title10="Two close matches versus one less close match"
+cat <<EOF >a10
+abcdef
+abcdef
+ghijkl
+EOF
+cat <<EOF >b10
+gh
+abcdefx
+EOF
+cat <<EOF >expected10
+Final
+2
+EOF
+
+# The first line of b matches best with the last line of a, but the overall
+# match is better if we match it with the the first line of a.
+title11="Piggy in the middle"
+cat <<EOF >a11
+abcdefg
+ijklmn
+abcdefgh
+EOF
+cat <<EOF >b11
+abcdefghx
+ijklm
+EOF
+cat <<EOF >expected11
+1
+2
+EOF
+
+title12="No trailing newline"
+printf "abc\ndef" >a12
+printf "abx\nstu" >b12
+cat <<EOF >expected12
+1
+Final
+EOF
+
+title13="Reorder includes"
+cat <<EOF >a13
+#include "c.h"
+#include "b.h"
+#include "a.h"
+#include "e.h"
+#include "d.h"
+EOF
+cat <<EOF >b13
+#include "a.h"
+#include "b.h"
+#include "c.h"
+#include "d.h"
+#include "e.h"
+EOF
+cat <<EOF >expected13
+3
+2
+1
+5
+4
+EOF
+
+last_test=13
+
+test_expect_success setup '
+       for i in $(test_seq 2 $last_test)
+       do
+               # Append each line in a separate commit to make it easy to
+               # check which original line the blame output relates to.
+
+               line_count=0 &&
+               while IFS= read line
+               do
+                       line_count=$((line_count+1)) &&
+                       echo "$line" >>"$i" &&
+                       git add "$i" &&
+                       test_tick &&
+                       GIT_AUTHOR_NAME="$line_count" git commit -m "$line_count"
+               done <"a$i"
+       done &&
+
+       for i in $(test_seq 2 $last_test)
+       do
+               # Overwrite the files with the final content.
+               cp b$i $i &&
+               git add $i
+       done &&
+       test_tick &&
+
+       # Commit the final content all at once so it can all be
+       # referred to with the same commit ID.
+       GIT_AUTHOR_NAME=Final git commit -m Final &&
+
+       IGNOREME=$(git rev-parse HEAD)
+'
+
+for i in $(test_seq 2 $last_test); do
+       eval title="\$title$i"
+       test_expect_success "$title" \
+       "git blame -M9 --ignore-rev $IGNOREME $i >output &&
+       sed -e \"$pick_author\" output >actual &&
+       test_cmp expected$i actual"
+done
+
+# This invoked a null pointer dereference when the chunk callback was called
+# with a zero length parent chunk and there were no more suspects.
+test_expect_success 'Diff chunks with no suspects' '
+       test_write_lines xy1 A B C xy1 >file &&
+       git add file &&
+       test_tick &&
+       GIT_AUTHOR_NAME=1 git commit -m 1 &&
+
+       test_write_lines xy2 A B xy2 C xy2 >file &&
+       git add file &&
+       test_tick &&
+       GIT_AUTHOR_NAME=2 git commit -m 2 &&
+       REV_2=$(git rev-parse HEAD) &&
+
+       test_write_lines xy3 A >file &&
+       git add file &&
+       test_tick &&
+       GIT_AUTHOR_NAME=3 git commit -m 3 &&
+       REV_3=$(git rev-parse HEAD) &&
+
+       test_write_lines 1 1 >expected &&
+
+       git blame --ignore-rev $REV_2 --ignore-rev $REV_3 file >output &&
+       sed -e "$pick_author" output >actual &&
+
+       test_cmp expected actual
+       '
+
+test_expect_success 'position matching' '
+       test_write_lines abc def >file2 &&
+       git add file2 &&
+       test_tick &&
+       GIT_AUTHOR_NAME=1 git commit -m 1 &&
+
+       test_write_lines abc def abc def >file2 &&
+       git add file2 &&
+       test_tick &&
+       GIT_AUTHOR_NAME=2 git commit -m 2 &&
+
+       test_write_lines abcx defx abcx defx >file2 &&
+       git add file2 &&
+       test_tick &&
+       GIT_AUTHOR_NAME=3 git commit -m 3 &&
+       REV_3=$(git rev-parse HEAD) &&
+
+       test_write_lines abcy defy abcx defx >file2 &&
+       git add file2 &&
+       test_tick &&
+       GIT_AUTHOR_NAME=4 git commit -m 4 &&
+       REV_4=$(git rev-parse HEAD) &&
+
+       test_write_lines 1 1 2 2 >expected &&
+
+       git blame --ignore-rev $REV_3 --ignore-rev $REV_4 file2 >output &&
+       sed -e "$pick_author" output >actual &&
+
+       test_cmp expected actual
+       '
+
+# This fails if each blame entry is processed independently instead of
+# processing each diff change in full.
+test_expect_success 'preserve order' '
+       test_write_lines bcde >file3 &&
+       git add file3 &&
+       test_tick &&
+       GIT_AUTHOR_NAME=1 git commit -m 1 &&
+
+       test_write_lines bcde fghij >file3 &&
+       git add file3 &&
+       test_tick &&
+       GIT_AUTHOR_NAME=2 git commit -m 2 &&
+
+       test_write_lines bcde fghij abcd >file3 &&
+       git add file3 &&
+       test_tick &&
+       GIT_AUTHOR_NAME=3 git commit -m 3 &&
+
+       test_write_lines abcdx fghijx bcdex >file3 &&
+       git add file3 &&
+       test_tick &&
+       GIT_AUTHOR_NAME=4 git commit -m 4 &&
+       REV_4=$(git rev-parse HEAD) &&
+
+       test_write_lines abcdx fghijy bcdex >file3 &&
+       git add file3 &&
+       test_tick &&
+       GIT_AUTHOR_NAME=5 git commit -m 5 &&
+       REV_5=$(git rev-parse HEAD) &&
+
+       test_write_lines 1 2 3 >expected &&
+
+       git blame --ignore-rev $REV_4 --ignore-rev $REV_5 file3 >output &&
+       sed -e "$pick_author" output >actual &&
+
+       test_cmp expected actual
+       '
+
+test_done
index 5cadedb2a9bc6eb66b9ead3ccae037e62a655f2c..88bc733ad6910ae0a2b11bb5e239f010cb2cdb72 100755 (executable)
@@ -211,8 +211,24 @@ test_expect_success 'prompt - merge' '
 
 test_expect_success 'prompt - cherry-pick' '
        printf " (master|CHERRY-PICKING)" >expected &&
-       test_must_fail git cherry-pick b1 &&
-       test_when_finished "git reset --hard" &&
+       test_must_fail git cherry-pick b1 b1^ &&
+       test_when_finished "git cherry-pick --abort" &&
+       __git_ps1 >"$actual" &&
+       test_cmp expected "$actual" &&
+       git reset --merge &&
+       test_must_fail git rev-parse CHERRY_PICK_HEAD &&
+       __git_ps1 >"$actual" &&
+       test_cmp expected "$actual"
+'
+
+test_expect_success 'prompt - revert' '
+       printf " (master|REVERTING)" >expected &&
+       test_must_fail git revert b1^ b1 &&
+       test_when_finished "git revert --abort" &&
+       __git_ps1 >"$actual" &&
+       test_cmp expected "$actual" &&
+       git reset --merge &&
+       test_must_fail git rev-parse REVERT_HEAD &&
        __git_ps1 >"$actual" &&
        test_cmp expected "$actual"
 '
index f1fcd2c4b006dc2ece2019ac91f73a2f42bbf6bd..2def5a0c356bb1cff473aaefb2d17f0b6e494e0e 100644 (file)
@@ -1380,100 +1380,3 @@ char *transport_anonymize_url(const char *url)
 literal_copy:
        return xstrdup(url);
 }
-
-static void fill_alternate_refs_command(struct child_process *cmd,
-                                       const char *repo_path)
-{
-       const char *value;
-
-       if (!git_config_get_value("core.alternateRefsCommand", &value)) {
-               cmd->use_shell = 1;
-
-               argv_array_push(&cmd->args, value);
-               argv_array_push(&cmd->args, repo_path);
-       } else {
-               cmd->git_cmd = 1;
-
-               argv_array_pushf(&cmd->args, "--git-dir=%s", repo_path);
-               argv_array_push(&cmd->args, "for-each-ref");
-               argv_array_push(&cmd->args, "--format=%(objectname)");
-
-               if (!git_config_get_value("core.alternateRefsPrefixes", &value)) {
-                       argv_array_push(&cmd->args, "--");
-                       argv_array_split(&cmd->args, value);
-               }
-       }
-
-       cmd->env = local_repo_env;
-       cmd->out = -1;
-}
-
-static void read_alternate_refs(const char *path,
-                               alternate_ref_fn *cb,
-                               void *data)
-{
-       struct child_process cmd = CHILD_PROCESS_INIT;
-       struct strbuf line = STRBUF_INIT;
-       FILE *fh;
-
-       fill_alternate_refs_command(&cmd, path);
-
-       if (start_command(&cmd))
-               return;
-
-       fh = xfdopen(cmd.out, "r");
-       while (strbuf_getline_lf(&line, fh) != EOF) {
-               struct object_id oid;
-               const char *p;
-
-               if (parse_oid_hex(line.buf, &oid, &p) || *p) {
-                       warning(_("invalid line while parsing alternate refs: %s"),
-                               line.buf);
-                       break;
-               }
-
-               cb(&oid, data);
-       }
-
-       fclose(fh);
-       finish_command(&cmd);
-}
-
-struct alternate_refs_data {
-       alternate_ref_fn *fn;
-       void *data;
-};
-
-static int refs_from_alternate_cb(struct object_directory *e,
-                                 void *data)
-{
-       struct strbuf path = STRBUF_INIT;
-       size_t base_len;
-       struct alternate_refs_data *cb = data;
-
-       if (!strbuf_realpath(&path, e->path, 0))
-               goto out;
-       if (!strbuf_strip_suffix(&path, "/objects"))
-               goto out;
-       base_len = path.len;
-
-       /* Is this a git repository with refs? */
-       strbuf_addstr(&path, "/refs");
-       if (!is_directory(path.buf))
-               goto out;
-       strbuf_setlen(&path, base_len);
-
-       read_alternate_refs(path.buf, cb->fn, cb->data);
-
-out:
-       strbuf_release(&path);
-       return 0;
-}
-
-void for_each_alternate_ref(alternate_ref_fn fn, void *data)
-{
-       struct alternate_refs_data cb;
-       cb.fn = fn;
-       cb.data = data;
-       foreach_alt_odb(refs_from_alternate_cb, &cb);
-}
index 06e06d3d8937bc553ef42cd895ee7132c6e5639c..0b5f7806f625d888175e1c633ec2b59d8d07803b 100644 (file)
@@ -262,6 +262,4 @@ int transport_refs_pushed(struct ref *ref);
 void transport_print_push_status(const char *dest, struct ref *refs,
                  int verbose, int porcelain, unsigned int *reject_reasons);
 
-typedef void alternate_ref_fn(const struct object_id *oid, void *);
-void for_each_alternate_ref(alternate_ref_fn, void *);
 #endif
index f1f641eb6a64abc2dbb6f0ef70f7375101874812..33ded7f8b3e71069f7a75bf7524443f3d81b7532 100644 (file)
@@ -422,8 +422,8 @@ static struct combine_diff_path *ll_diff_tree_paths(
         *   diff_tree_oid(parent, commit) )
         */
        for (i = 0; i < nparent; ++i)
-               tptree[i] = fill_tree_descriptor(&tp[i], parents_oid[i]);
-       ttree = fill_tree_descriptor(&t, oid);
+               tptree[i] = fill_tree_descriptor(opt->repo, &tp[i], parents_oid[i]);
+       ttree = fill_tree_descriptor(opt->repo, &t, oid);
 
        /* Enable recursion indefinitely */
        opt->pathspec.recursive = opt->flags.recursive;
index ec32a47b2e7664365f771f3955747794001d3f28..c20b62f49e4709253d71e18eea0a2472809d8e9d 100644 (file)
@@ -81,13 +81,15 @@ int init_tree_desc_gently(struct tree_desc *desc, const void *buffer, unsigned l
        return result;
 }
 
-void *fill_tree_descriptor(struct tree_desc *desc, const struct object_id *oid)
+void *fill_tree_descriptor(struct repository *r,
+                          struct tree_desc *desc,
+                          const struct object_id *oid)
 {
        unsigned long size = 0;
        void *buf = NULL;
 
        if (oid) {
-               buf = read_object_with_reference(oid, tree_type, &size, NULL);
+               buf = read_object_with_reference(r, oid, tree_type, &size, NULL);
                if (!buf)
                        die("unable to read tree %s", oid_to_hex(oid));
        }
@@ -500,7 +502,9 @@ struct dir_state {
        struct object_id oid;
 };
 
-static int find_tree_entry(struct tree_desc *t, const char *name, struct object_id *result, unsigned short *mode)
+static int find_tree_entry(struct repository *r, struct tree_desc *t,
+                          const char *name, struct object_id *result,
+                          unsigned short *mode)
 {
        int namelen = strlen(name);
        while (t->size) {
@@ -530,19 +534,23 @@ static int find_tree_entry(struct tree_desc *t, const char *name, struct object_
                        oidcpy(result, &oid);
                        return 0;
                }
-               return get_tree_entry(&oid, name + entrylen, result, mode);
+               return get_tree_entry(r, &oid, name + entrylen, result, mode);
        }
        return -1;
 }
 
-int get_tree_entry(const struct object_id *tree_oid, const char *name, struct object_id *oid, unsigned short *mode)
+int get_tree_entry(struct repository *r,
+                  const struct object_id *tree_oid,
+                  const char *name,
+                  struct object_id *oid,
+                  unsigned short *mode)
 {
        int retval;
        void *tree;
        unsigned long size;
        struct object_id root;
 
-       tree = read_object_with_reference(tree_oid, tree_type, &size, &root);
+       tree = read_object_with_reference(r, tree_oid, tree_type, &size, &root);
        if (!tree)
                return -1;
 
@@ -557,7 +565,7 @@ int get_tree_entry(const struct object_id *tree_oid, const char *name, struct ob
        } else {
                struct tree_desc t;
                init_tree_desc(&t, tree, size);
-               retval = find_tree_entry(&t, name, oid, mode);
+               retval = find_tree_entry(r, &t, name, oid, mode);
        }
        free(tree);
        return retval;
@@ -585,7 +593,10 @@ int get_tree_entry(const struct object_id *tree_oid, const char *name, struct ob
  * See the code for enum get_oid_result for a description of
  * the return values.
  */
-enum get_oid_result get_tree_entry_follow_symlinks(struct object_id *tree_oid, const char *name, struct object_id *result, struct strbuf *result_path, unsigned short *mode)
+enum get_oid_result get_tree_entry_follow_symlinks(struct repository *r,
+               struct object_id *tree_oid, const char *name,
+               struct object_id *result, struct strbuf *result_path,
+               unsigned short *mode)
 {
        int retval = MISSING_OBJECT;
        struct dir_state *parents = NULL;
@@ -609,7 +620,8 @@ enum get_oid_result get_tree_entry_follow_symlinks(struct object_id *tree_oid, c
                        void *tree;
                        struct object_id root;
                        unsigned long size;
-                       tree = read_object_with_reference(&current_tree_oid,
+                       tree = read_object_with_reference(r,
+                                                         &current_tree_oid,
                                                          tree_type, &size,
                                                          &root);
                        if (!tree)
@@ -678,7 +690,7 @@ enum get_oid_result get_tree_entry_follow_symlinks(struct object_id *tree_oid, c
                }
 
                /* Look up the first (or only) path component in the tree. */
-               find_result = find_tree_entry(&t, namebuf.buf,
+               find_result = find_tree_entry(r, &t, namebuf.buf,
                                              &current_tree_oid, mode);
                if (find_result) {
                        goto done;
@@ -722,7 +734,8 @@ enum get_oid_result get_tree_entry_follow_symlinks(struct object_id *tree_oid, c
                         */
                        retval = DANGLING_SYMLINK;
 
-                       contents = read_object_file(&current_tree_oid, &type,
+                       contents = repo_read_object_file(r,
+                                                   &current_tree_oid, &type,
                                                    &link_len);
 
                        if (!contents)
index 161e2400f443460fc6606e97d4e356c1f0b478ed..2a5db29e8f196f535c75fbf84dcae8f45ca5c4a3 100644 (file)
@@ -45,13 +45,15 @@ int init_tree_desc_gently(struct tree_desc *desc, const void *buf, unsigned long
 int tree_entry(struct tree_desc *, struct name_entry *);
 int tree_entry_gently(struct tree_desc *, struct name_entry *);
 
-void *fill_tree_descriptor(struct tree_desc *desc, const struct object_id *oid);
+void *fill_tree_descriptor(struct repository *r,
+                          struct tree_desc *desc,
+                          const struct object_id *oid);
 
 struct traverse_info;
 typedef int (*traverse_callback_t)(int n, unsigned long mask, unsigned long dirmask, struct name_entry *entry, struct traverse_info *);
 int traverse_trees(struct index_state *istate, int n, struct tree_desc *t, struct traverse_info *info);
 
-enum get_oid_result get_tree_entry_follow_symlinks(struct object_id *tree_oid, const char *name, struct object_id *result, struct strbuf *result_path, unsigned short *mode);
+enum get_oid_result get_tree_entry_follow_symlinks(struct repository *r, struct object_id *tree_oid, const char *name, struct object_id *result, struct strbuf *result_path, unsigned short *mode);
 
 struct traverse_info {
        const char *traverse_path;
@@ -66,7 +68,7 @@ struct traverse_info {
        int show_all_errors;
 };
 
-int get_tree_entry(const struct object_id *, const char *, struct object_id *, unsigned short *);
+int get_tree_entry(struct repository *, const struct object_id *, const char *, struct object_id *, unsigned short *);
 char *make_traverse_path(char *path, const struct traverse_info *info, const struct name_entry *n);
 void setup_traverse_info(struct traverse_info *info, const char *base);
 
index dab713203e15a83e452119d0140bb4c28c8a7bf0..62276d4fef5f67e7147377f16e24e30abca0906a 100644 (file)
@@ -840,7 +840,7 @@ static int traverse_trees_recursive(int n, unsigned long dirmask,
                        const struct object_id *oid = NULL;
                        if (dirmask & 1)
                                oid = &names[i].oid;
-                       buf[nr_buf++] = fill_tree_descriptor(t + i, oid);
+                       buf[nr_buf++] = fill_tree_descriptor(the_repository, t + i, oid);
                }
        }
 
index 7d776f8a9756e9defa3d2b7a2c15217524204e07..9f6c65a5809754717f8c51f809eae78f435bcd12 100644 (file)
@@ -202,7 +202,6 @@ static void wt_longstatus_print_unmerged_header(struct wt_status *s)
        } else {
                status_printf_ln(s, c, _("  (use \"git add/rm <file>...\" as appropriate to mark resolution)"));
        }
-       status_printf_ln(s, c, "%s", "");
 }
 
 static void wt_longstatus_print_cached_header(struct wt_status *s)
@@ -224,7 +223,6 @@ static void wt_longstatus_print_cached_header(struct wt_status *s)
                                         s->reference);
        } else
                status_printf_ln(s, c, _("  (use \"git rm --cached <file>...\" to unstage)"));
-       status_printf_ln(s, c, "%s", "");
 }
 
 static void wt_longstatus_print_dirty_header(struct wt_status *s,
@@ -243,7 +241,6 @@ static void wt_longstatus_print_dirty_header(struct wt_status *s,
        status_printf_ln(s, c, _("  (use \"git restore <file>...\" to discard changes in working directory)"));
        if (has_dirty_submodules)
                status_printf_ln(s, c, _("  (commit or discard the untracked or modified content in submodules)"));
-       status_printf_ln(s, c, "%s", "");
 }
 
 static void wt_longstatus_print_other_header(struct wt_status *s,
@@ -255,7 +252,6 @@ static void wt_longstatus_print_other_header(struct wt_status *s,
        if (!s->hints)
                return;
        status_printf_ln(s, c, _("  (use \"git %s <file>...\" to include in what will be committed)"), how);
-       status_printf_ln(s, c, "%s", "");
 }
 
 static void wt_longstatus_print_trailer(struct wt_status *s)