+GIT-BUILD-OPTIONS
GIT-CFLAGS
GIT-GUI-VARS
GIT-VERSION-FILE
- We do not write the noiseword "function" in front of shell
functions.
+ - As to use of grep, stick to a subset of BRE (namely, no \{m,n\},
+ [::], [==], nor [..]) for portability.
+
+ - We do not use \{m,n\};
+
+ - We do not use -E;
+
+ - We do not use ? nor + (which are \{0,1\} and \{1,\}
+ respectively in BRE) but that goes without saying as these
+ are ERE elements not BRE (note that \? and \+ are not even part
+ of BRE -- making them accessible from BRE is a GNU extension).
+
For C programs:
- We use tabs to indent, and interpret tabs as taking up to
--- /dev/null
+GIT v1.5.4.4 Release Notes
+==========================
+
+Fixes since v1.5.4.3
+--------------------
+
+ * Building and installing with an overtight umask such as 077 made
+ installed templates unreadable by others, while the rest of the install
+ are done in a way that is friendly to umask 022.
+
+ * "git cvsexportcommit -w $cvsdir" misbehaved when GIT_DIR is set to a
+ relative directory.
+
+ * "git http-push" had an invalid memory access that could lead it to
+ segfault.
+
+ * When "git rebase -i" gave control back to the user for a commit that is
+ marked to be edited, it just said "modify it with commit --amend",
+ without saying what to do to continue after modifying it. Give an
+ explicit instruction to run "rebase --continue" to be more helpful.
+
+ * "git send-email" in 1.5.4.3 issued a bogus empty In-Reply-To: header.
+
+ * "git bisect" showed mysterious "won't bisect on seeked tree" error message.
+ This was leftover from Cogito days to prevent "bisect" starting from a
+ cg-seeked state. We still keep the Cogito safety, but running "git bisect
+ start" when another bisect was in effect will clean up and start over.
+
+ * "git push" with an explicit PATH to receive-pack did not quite work if
+ receive-pack was not on usual PATH. We earlier fixed the same issue
+ with "git fetch" and upload-pack, but somehow forgot to do so in the
+ other direction.
+
+ * git-gui's info dialog was not displayed correctly when the user tries
+ to commit nothing (i.e. without staging anything).
+
+ * "git revert" did not properly fail when attempting to run with a
+ dirty index.
+
+ * "git merge --no-commit --no-ff <other>" incorrectly made commits.
+
+ * "git merge --squash --no-ff <other>", which is a nonsense combination
+ of options, was not rejected.
+
+ * "git ls-remote" and "git remote show" against an empty repository
+ failed, instead of just giving an empty result (regression).
+
+ * "git fast-import" did not handle a renamed path whose name needs to be
+ quoted, due to a bug in unquote_c_style() function.
+
+ * "git cvsexportcommit" was confused when multiple files with the same
+ basename needed to be pushed out in the same commit.
+
+ * "git daemon" did not send early errors to syslog.
+
+ * "git log --merge" did not work well with --left-right option.
+
+ * "git svn" promprted for client cert password every time it accessed the
+ server.
+
+ * The reset command in "git fast-import" data stream was documented to
+ end with an optional LF, but it actually required one.
+
+ * "git svn dcommit/rebase" did not honor --rewrite-root option.
+
+Also included are a handful documentation updates.
Updates since v1.5.4
--------------------
+(subsystems)
+
+ * Comes with git-gui 0.9.3
+
(performance)
* On platforms with suboptimal qsort(3) implementation, there
* You can be warned when core.autocrlf conversion is applied in
such a way that results in an irreversible conversion.
+ * A catch-all "color.ui" configuration variable can be used to
+ enable coloring of all color-capable commands, instead of
+ individual ones such as "color.status" and "color.branch".
+
+ * The commands refused to take absolute pathnames where they
+ require pathnames relative to the work tree or the current
+ subdirectory. They now can take absolute pathnames in such a
+ case as long as the pathnames do not refer outside of the
+ work tree. E.g. "git add $(pwd)/foo" now works.
+
+ * Error messages used to be sent to stderr, only to get hidden,
+ when $PAGER was in use. They now are sent to stdout along
+ with the command output to be shown in the $PAGER.
+
* A pattern "foo/" in .gitignore file now matches a directory
"foo". Pattern "foo" also matches as before.
+ * bash completion's prompt helper function can talk about
+ operation in-progress (e.g. merge, rebase, etc.).
+
+ * Configuration variables "url.<usethis>.insteadof = <otherurl>" can be
+ used to tell "git-fetch" and "git-push" to use different URL than what
+ is given from the command line.
+
+ * "git push <somewhere> HEAD" and "git push <somewhere> +HEAD" works as
+ expected; they push the current branch (and only the current branch).
+ In addition, HEAD can be written as the value of "remote.<there>.push"
+ configuration variable.
+
+ * "git add -i" behaves better even before you make an initial commit.
+
+ * "git am" refused to run from a subdirectory without a good reason.
+
+ * After "git apply --whitespace=fix" fixes whitespace errors in a patch,
+ a line before the fix can appear as a context or preimage line in a
+ later patch, causing the patch not to apply. The command now knows to
+ see through whitespace fixes done to context lines to successfully
+ apply such a patch series.
+
+ * "git branch" (and "git checkout -b") to branch from a local branch can
+ optionally set "branch.<name>.merge" to mark the new branch to build on
+ the other local branch, when "branch.autosetupmerge" is set to
+ "always". By default, this does not happen when branching from a local
+ branch.
+
+ * "git checkout" to switch to a branch that has "branch.<name>.merge" set
+ (i.e. marked to build on another branch) reports how much the branch
+ and the other branch diverged.
+
+ * When "git checkout" has to update a lot of paths, it used to be silent
+ for 4 seconds before it showed any progress report. It is now a bit
+ more impatient and starts showing progress report early.
+
+ * "git commit" learned a new hook "prepare-commit-msg" that can
+ inspect what is going to be committed and prepare the commit
+ log message template to be edited.
+
+ * "git cvsimport" can now take more than one -M options.
+
* "git describe" learned to limit the tags to be used for
naming with --match option.
* "git describe --contains" now barfs when the named commit
cannot be described.
- * bash completion's prompt helper function can talk about
- operation in-progress (e.g. merge, rebase, etc.).
+ * "git describe --exact-match" describes only commits that are tagged.
- * "git commit" learned a new hook "prepare-commit-msg" that can
- inspect what is going to be committed and prepare the commit
- log message template to be edited.
+ * "git describe --long" describes a tagged commit as $tag-0-$sha1,
+ instead of just showing the exact tagname.
+
+ * "git describe" warns when using a tag whose name and path contradict
+ with each other.
+
+ * "git diff" learned "--relative" option to limit and output paths
+ relative to the current directory when working in a subdirectory.
+
+ * "git diff" learned "--dirstat" option to show birds-eye-summary of
+ changes more concisely than "--diffstat".
+
+ * "git format-patch" learned --cover-letter option to generate a cover
+ letter template.
+
+ * "git gc" learned --quiet option.
+
+ * "git grep" now knows "--name-only" is a synonym for the "-l" option.
+
+ * "git help <alias>" now reports "'git <alias>' is alias to <what>",
+ instead of saying "No manual entry for git-<alias>".
+
+ * "git log --grep=<what>" learned "--fixed-strings" option to look for
+ <what> without treating it as a regular expression.
* "git gui" learned an auto-spell checking.
* "git send-email" learned an easier way to suppress CC
recipients.
+ * When the configuration variable "pack.threads" is set to 0, "git
+ repack" auto detects the number of CPUs and uses that many threads.
+
* Various "git cvsimport", "git cvsexportcommit", "git svn" and
"git p4" improvements.
* It is now easier to write test scripts that records known
breakages.
+ * "git checkout" is rewritten in C.
+
+ * Two conflict hunks that are separated by a very short span of common
+ lines are now coalesced into one larger hunk, to make the result easier
+ to read.
+
+ * Run-command API's use of file descriptors is documented clearer and
+ is more consistent now.
+
Fixes since v1.5.4
------------------
All of the fixes in v1.5.4 maintenance series are included in
this release, unless otherwise noted.
+ * "git-http-push" did not allow deletion of remote ref with the usual
+ "push <remote> :<branch>" syntax.
+
+ * "git-rebase --abort" did not go back to the right location if
+ "git-reset" was run during the "git-rebase" session.
---
exec >/var/tmp/1
-O=v1.5.4
-O=v1.5.4.2-122-g7cb97da
+O=v1.5.4.3-428-g6b48990
echo O=`git describe refs/heads/master`
git shortlog --no-merges $O..refs/heads/master ^refs/heads/maint
-
`.patch`. Use this variable to change that suffix (make sure to
include the dot if you want it).
+format.pretty::
+ The default pretty format for log/show/whatchanged command,
+ See linkgit:git-log[1], linkgit:git-show[1],
+ linkgit:git-whatchanged[1].
+
gc.aggressiveWindow::
The window size parameter used in the delta compression
algorithm used by 'git gc --aggressive'. This defaults
merge.tool::
Controls which merge resolution program is used by
- linkgit:git-mergetool[1]. Valid values are: "kdiff3", "tkdiff",
- "meld", "xxdiff", "emerge", "vimdiff", "gvimdiff", and "opendiff".
+ linkgit:git-mergetool[1]. Valid built-in values are: "kdiff3",
+ "tkdiff", "meld", "xxdiff", "emerge", "vimdiff", "gvimdiff", and
+ "opendiff". Any other value is treated is custom merge tool
+ and there must be a corresponing mergetool.<tool>.cmd option.
merge.verbosity::
Controls the amount of output shown by the recursive merge
Override the path for the given tool. This is useful in case
your tool is not in the PATH.
+mergetool.<tool>.cmd::
+ Specify the command to invoke the specified merge tool. The
+ specified command is evaluated in shell with the following
+ variables available: 'BASE' is the name of a temporary file
+ containing the common base of the files to be merged, if available;
+ 'LOCAL' is the name of a temporary file containing the contents of
+ the file on the current branch; 'REMOTE' is the name of a temporary
+ file containing the contents of the file from the branch being
+ merged; 'MERGED' contains the name of the file to which the merge
+ tool should write the results of a successful merge.
+
+mergetool.<tool>.trustExitCode::
+ For a custom merge command, specify whether the exit code of
+ the merge command can be used to determine whether the merge was
+ successful. If this is not set to true then the merge target file
+ timestamp is checked and the merge assumed to have been successful
+ if the file has been updated, otherwise the user is prompted to
+ indicate the success of the merge.
+
+mergetool.keepBackup::
+ After performing a merge, the original file with conflict markers
+ can be saved as a file with a `.orig` extension. If this variable
+ is set to `false` then this file is not preserved. Defaults to
+ `true` (i.e. keep the backup files).
+
pack.window::
The size of the window used by linkgit:git-pack-objects[1] when no
window size is given on the command line. Defaults to 10.
remote.<name>.receivepack::
The default program to execute on the remote side when pushing. See
- option \--exec of linkgit:git-push[1].
+ option \--receive-pack of linkgit:git-push[1].
remote.<name>.uploadpack::
The default program to execute on the remote side when fetching. See
- option \--exec of linkgit:git-fetch-pack[1].
+ option \--upload-pack of linkgit:git-fetch-pack[1].
remote.<name>.tagopt::
- Setting this value to --no-tags disables automatic tag following when fetching
- from remote <name>
+ Setting this value to \--no-tags disables automatic tag following when
+ fetching from remote <name>
remotes.<group>::
The list of remotes which are fetched by "git remote update
SYNOPSIS
--------
[verse]
-'git-am' [--signoff] [--dotest=<dir>] [--keep] [--utf8 | --no-utf8]
+'git-am' [--signoff] [--keep] [--utf8 | --no-utf8]
[--3way] [--interactive] [--binary]
[--whitespace=<option>] [-C<n>] [-p<n>]
<mbox>|<Maildir>...
Add `Signed-off-by:` line to the commit message, using
the committer identity of yourself.
--d=<dir>, --dotest=<dir>::
- Instead of `.dotest` directory, use <dir> as a working
- area to store extracted patches.
-
-k, --keep::
Pass `-k` flag to `git-mailinfo` (see linkgit:git-mailinfo[1]).
default is not to do `-x` so this option is a no-op.
-m parent-number|--mainline parent-number::
- Usually you cannot revert a merge because you do not know which
+ Usually you cannot cherry-pick a merge because you do not know which
side of the merge should be considered the mainline. This
option specifies the parent number (starting from 1) of
the mainline and allows cherry-pick to replay the change
-m::
Attempt to detect merges based on the commit message. This option
- will enable default regexes that try to capture the name source
+ will enable default regexes that try to capture the source
branch name from the commit message.
-M <regex>::
Attempt to detect merges based on the commit message with a custom
regex. It can be used with '-m' to enable the default regexes
as well. You must escape forward slashes.
++
+The regex must capture the source branch name in $1.
++
+This option can be used several times to provide several detection regexes.
-S <regex>::
Skip paths matching the regex.
being employed to standard error. The tag name will still
be printed to standard out.
+--long::
+ Always output the long format (the tag, the number of commits
+ and the abbreviated commit name) even when it matches a tag.
+ This is useful when you want to see parts of the commit object name
+ in "describe" output, even when the commit in question happens to be
+ a tagged version. Instead of just emitting the tag name, it will
+ describe such a commit as v1.2-0-deadbeef (0th commit since tag v1.2
+ that points at object deadbeef....).
+
--match <pattern>::
Only consider tags matching the given pattern (can be used to avoid
leaking private tags made from the repository).
SYNOPSIS
--------
-'git-fetch-pack' [--all] [--quiet|-q] [--keep|-k] [--thin] [--upload-pack=<git-upload-pack>] [--depth=<n>] [--no-progress] [-v] [<host>:]<directory> [<refs>...]
+'git-fetch-pack' [--all] [--quiet|-q] [--keep|-k] [--thin] [--include-tag] [--upload-pack=<git-upload-pack>] [--depth=<n>] [--no-progress] [-v] [<host>:]<directory> [<refs>...]
DESCRIPTION
-----------
Spend extra cycles to minimize the number of objects to be sent.
Use it on slower connection.
+\--include-tag::
+ If the remote side supports it, annotated tags objects will
+ be downloaded on the same connection as the other objects if
+ the object the tag references is downloaded. The caller must
+ otherwise determine the tags this option made available.
+
\--upload-pack=<git-upload-pack>::
Use this to specify the path to 'git-upload-pack' on the
remote side, if is not found on your $PATH.
SYNOPSIS
--------
-'git-gc' [--prune] [--aggressive] [--auto]
+'git-gc' [--prune] [--aggressive] [--auto] [--quiet]
DESCRIPTION
-----------
`git-repack`. Setting `gc.autopacklimit` to 0 disables
automatic consolidation of packs.
+--quiet::
+ Suppress all progress reports.
+
Configuration
-------------
-l | --files-with-matches | --name-only | -L | --files-without-match::
Instead of showing every matched line, show only the
names of files that contain (or do not contain) matches.
- For better compatability with git-diff, --name-only is a
+ For better compatibility with git-diff, --name-only is a
synonym for --files-with-matches.
-c | --count::
to force the version for the generated pack index, and to force
64-bit index entries on objects located above the given offset.
+--strict::
+ Die, if the pack contains broken objects or links.
+
Note
----
---------------
A merge is always between the current `HEAD` and one or more
-remote branch heads, and the index file must exactly match the
+commits (usually, branch head or tag), and the index file must
+exactly match the
tree of `HEAD` commit (i.e. the contents of the last commit) when
it happens. In other words, `git-diff --cached HEAD` must
report no changes.
DESCRIPTION
-----------
-Use 'git mergetool' to run one of several merge utilities to resolve
+Use `git mergetool` to run one of several merge utilities to resolve
merge conflicts. It is typically run after linkgit:git-merge[1].
If one or more <file> parameters are given, the merge tool program will
be run to resolve differences on each file. If no <file> names are
-specified, 'git mergetool' will run the merge tool program on every file
+specified, `git mergetool` will run the merge tool program on every file
with merge conflicts.
OPTIONS
Valid merge tools are:
kdiff3, tkdiff, meld, xxdiff, emerge, vimdiff, gvimdiff, ecmerge, and opendiff
+
-If a merge resolution program is not specified, 'git mergetool'
-will use the configuration variable merge.tool. If the
-configuration variable merge.tool is not set, 'git mergetool'
+If a merge resolution program is not specified, `git mergetool`
+will use the configuration variable `merge.tool`. If the
+configuration variable `merge.tool` is not set, `git mergetool`
will pick a suitable default.
+
You can explicitly provide a full path to the tool by setting the
-configuration variable mergetool.<tool>.path. For example, you
+configuration variable `mergetool.<tool>.path`. For example, you
can configure the absolute path to kdiff3 by setting
-mergetool.kdiff3.path. Otherwise, 'git mergetool' assumes the tool
-is available in PATH.
+`mergetool.kdiff3.path`. Otherwise, `git mergetool` assumes the
+tool is available in PATH.
++
+Instead of running one of the known merge tool programs
+`git mergetool` can be customized to run an alternative program
+by specifying the command line to invoke in a configration
+variable `mergetool.<tool>.cmd`.
++
+When `git mergetool` is invoked with this tool (either through the
+`-t` or `--tool` option or the `merge.tool` configuration
+variable) the configured command line will be invoked with `$BASE`
+set to the name of a temporary file containing the common base for
+the merge, if available; `$LOCAL` set to the name of a temporary
+file containing the contents of the file on the current branch;
+`$REMOTE` set to the name of a temporary file containing the
+contents of the file to be merged, and `$MERGED` set to the name
+of the file to which the merge tool should write the result of the
+merge resolution.
++
+If the custom merge tool correctly indicates the success of a
+merge resolution with its exit code then the configuration
+variable `mergetool.<tool>.trustExitCode` can be set to `true`.
+Otherwise, `git mergetool` will prompt the user to indicate the
+success of the resolution after the custom tool has exited.
Author
------
as if all refs under `$GIT_DIR/refs` are specified to be
included.
+--include-tag::
+ Include unasked-for annotated tags if the object they
+ reference was included in the resulting packfile. This
+ can be useful to send new tags to native git clients.
+
--window=[N], --depth=[N]::
These two options affect how the objects contained in
the pack are stored using delta compression. The
<repository> to pull from the local repository -- this is useful
when merging local branches into the current branch.
+Also note that options meant for `git-pull` itself and underlying
+`git-merge` must be given before the options meant for `git-fetch`.
OPTIONS
-------
--------
[verse]
'git-rebase' [-i | --interactive] [-v | --verbose] [-m | --merge]
+ [-s <strategy> | --strategy=<strategy>]
[-C<n>] [ --whitespace=<option>] [-p | --preserve-merges]
[--onto <newbase>] <upstream> [<branch>]
'git-rebase' --continue | --skip | --abort
reject the rebase if it isn't appropriate. Please see the template
pre-rebase hook script for an example.
-You must be in the top directory of your project to start (or continue)
-a rebase. Upon completion, <branch> will be the current branch.
+Upon completion, <branch> will be the current branch.
INTERACTIVE MODE
----------------
git reflog expire [--dry-run] [--stale-fix] [--verbose]
[--expire=<time>] [--expire-unreachable=<time>] [--all] <refs>...
+git reflog delete ref@\{specifier\}...
+
git reflog [show] [log-options] [<ref>]
Reflog is a mechanism to record when the tip of branches are
point to one week ago", and so on. See linkgit:git-rev-parse[1] for
more details.
+To delete single entries from the reflog, use the subcommand "delete"
+and specify the _exact_ entry (e.g. ``git reflog delete master@\{2\}'').
+
OPTIONS
-------
--all::
Instead of listing <refs> explicitly, prune all refs.
+--updateref::
+ Update the ref with the sha1 of the top reflog entry (i.e.
+ <ref>@\{0\}) after expiring or deleting.
+
+--rewrite::
+ While expiring or deleting, adjust each reflog entry to ensure
+ that the `old` sha1 field points to the `new` sha1 field of the
+ previous entry.
+
--verbose::
Print extra information on screen.
[ \--full-history ]
[ \--not ]
[ \--all ]
+ [ \--branches ]
+ [ \--tags ]
+ [ \--remotes ]
[ \--stdin ]
[ \--quiet ]
[ \--topo-order ]
Each line of options has this format:
------------
-<opt_spec><arg_spec>? SP+ help LF
+<opt_spec><flags>* SP+ help LF
------------
`<opt_spec>`::
is necessary. `h,help`, `dry-run` and `f` are all three correct
`<opt_spec>`.
-`<arg_spec>`::
- an `<arg_spec>` tells the option parser if the option has an argument
- (`=`), an optional one (`?` though its use is discouraged) or none
- (no `<arg_spec>` in that case).
+`<flags>`::
+ `<flags>` are of `*`, `=`, `?` or `!`.
+ * Use `=` if the option takes an argument.
+
+ * Use `?` to mean that the option is optional (though its use is discouraged).
+
+ * Use `*` to mean that this option should not be listed in the usage
+ generated for the `-h` argument. It's shown for `--help-all` as
+ documented in linkgit:gitcli[5].
+
+ * Use `!` to not make the corresponding negated long option available.
The remainder of the line, after stripping the spaces, is used
as the help associated to the option.
SYNOPSIS
--------
[verse]
-'git-stash' (list | show [<stash>] | apply [<stash>] | clear)
+'git-stash' (list | show [<stash>] | apply [<stash>] | clear | drop [<stash>] | pop [<stash>])
'git-stash' [save [<message>]]
DESCRIPTION
Remove all the stashed states. Note that those states will then
be subject to pruning, and may be difficult or impossible to recover.
+drop [<stash>]::
+
+ Remove a single stashed state from the stash list. When no `<stash>`
+ is given, it removes the latest one. i.e. `stash@\{0}`
+
+pop [<stash>]::
+
+ Remove a single stashed state from the stash list and apply on top
+ of the current working tree state. When no `<stash>` is given,
+ `stash@\{0}` is assumed. See also `apply`.
+
DISCUSSION
----------
--------
add::
Add the given repository as a submodule at the given path
- to the changeset to be committed next. In particular, the
- repository is cloned at the specified path, added to the
+ to the changeset to be committed next. If path is a valid
+ repository within the project, it is added as is. Otherwise,
+ repository is cloned at the specified path. path is added to the
changeset and registered in .gitmodules. If no path is
specified, the path is deduced from the repository specification.
If the repository url begins with ./ or ../, it is stored as
-------------
When specifying the -v option the format used is:
- SHA1 type size offset-in-packfile
+ SHA1 type size size-in-pack-file offset-in-packfile
for objects that are not deltified in the pack, and
- SHA1 type size offset-in-packfile depth base-SHA1
+ SHA1 type size size-in-packfile offset-in-packfile depth base-SHA1
for objects that are deltified.
Show git internal diff output, but for the whole tree,
not just the top level.
---pretty=<format>::
- Controls the output format for the commit logs.
- <format> can be one of 'raw', 'medium', 'short', 'full',
- and 'oneline'.
-
-m::
By default, differences for merge commits are not shown.
With this flag, show differences to that commit from all
However, it is not very useful in general, although it
*is* useful on a file-by-file basis.
+include::pretty-options.txt[]
+
+include::pretty-formats.txt[]
+
Examples
--------
git-whatchanged -p v2.6.12.. include/scsi drivers/scsi::
branch of the `git.git` repository.
Documentation for older releases are available here:
-* link:v1.5.4.3/git.html[documentation for release 1.5.4.3]
+* link:v1.5.4.4/git.html[documentation for release 1.5.4.4]
* release notes for
+ link:RelNotes-1.5.4.4.txt[1.5.4.4],
link:RelNotes-1.5.4.3.txt[1.5.4.3],
link:RelNotes-1.5.4.2.txt[1.5.4.2],
link:RelNotes-1.5.4.1.txt[1.5.4.1],
where '<format>' can be one of 'oneline', 'short', 'medium',
'full', 'fuller', 'email', 'raw' and 'format:<string>'.
When omitted, the format defaults to 'medium'.
++
+Note: you can specify the default pretty format in the repository
+configuration (see linkgit:git-config[1]).
--abbrev-commit::
Instead of showing the full 40-byte hexadecimal commit object
Show commits older than a specific date.
+ifdef::git-rev-list[]
--max-age='timestamp', --min-age='timestamp'::
Limit the commits output to specified time range.
+endif::git-rev-list[]
--author='pattern', --committer='pattern'::
* Once you finish feeding the pairs of files, call `diffcore_std()`.
This will tell the diffcore library to go ahead and do its work.
-* Calling `diffcore_flush()` will produce the output.
+* Calling `diff_flush()` will produce the output.
Data structures
`finish_async`::
- Wait for the completeion of an asynchronous function that was
+ Wait for the completion of an asynchronous function that was
started with start_async().
.no_stdin, .no_stdout, .no_stderr: The respective channel is
redirected to /dev/null.
- .stdout_to_stderr: stdout of the child is redirected to the
- parent's stderr (i.e. *not* to what .err or
- .no_stderr specify).
+ .stdout_to_stderr: stdout of the child is redirected to its
+ stderr. This happens after stderr is itself redirected.
+ So stdout will follow stderr to wherever it is
+ redirected.
To modify the environment of the sub-process, specify an array of
string pointers (NULL terminated) in .env:
. If the string is of the form "VAR=value", i.e. it contains '='
the variable is added to the child process's environment.
-. If the string does not contain '=', it names an environement
- variable that will be removed from the child process's envionment.
+. If the string does not contain '=', it names an environment
+ variable that will be removed from the child process's environment.
To specify a new initial working directory for the sub-process,
specify it in the .dir member.
# Define V=1 to have a more verbose compile.
#
+# Define SNPRINTF_RETURNS_BOGUS if your are on a system which snprintf()
+# or vsnprintf() return -1 instead of number of characters which would
+# have been written to the final string if enough space had been available.
+#
# Define FREAD_READS_DIRECTORIES if your are on a system which succeeds
# when attempting to read from an fopen'ed directory.
#
run-command.h strbuf.h tag.h tree.h git-compat-util.h revision.h \
tree-walk.h log-tree.h dir.h path-list.h unpack-trees.h builtin.h \
utf8.h reflog-walk.h patch-ids.h attr.h decorate.h progress.h \
- mailmap.h remote.h parse-options.h transport.h diffcore.h hash.h
+ mailmap.h remote.h parse-options.h transport.h diffcore.h hash.h ll-merge.h fsck.h \
+ pack-revindex.h
DIFF_OBJS = \
diff.o diff-lib.o diffcore-break.o diffcore-order.o \
patch-ids.o \
object.o pack-check.o pack-write.o patch-delta.o path.o pkt-line.o \
sideband.o reachable.o reflog-walk.o \
- quote.o read-cache.o refs.o run-command.o dir.o object-refs.o \
+ quote.o read-cache.o refs.o run-command.o dir.o \
server-info.o setup.o sha1_file.o sha1_name.o strbuf.o \
tag.o tree.o usage.o config.o environment.o ctype.o copy.o \
revision.o pager.o tree-walk.o xdiff-interface.o \
color.o wt-status.o archive-zip.o archive-tar.o shallow.o utf8.o \
convert.o attr.o decorate.o progress.o mailmap.o symlinks.o remote.o \
transport.o bundle.o walker.o parse-options.o ws.o archive.o branch.o \
- alias.o
+ ll-merge.o alias.o fsck.o pack-revindex.o
BUILTIN_OBJS = \
builtin-add.o \
NO_MEMMEM = YesPlease
BASIC_CFLAGS += -I/usr/local/include
BASIC_LDFLAGS += -L/usr/local/lib
+ DIR_HAS_BSD_GROUP_SEMANTICS = YesPlease
endif
ifeq ($(uname_S),OpenBSD)
NO_STRCASESTR = YesPlease
ifdef NO_C99_FORMAT
BASIC_CFLAGS += -DNO_C99_FORMAT
endif
+ifdef SNPRINTF_RETURNS_BOGUS
+ COMPAT_CFLAGS += -DSNPRINTF_RETURNS_BOGUS
+ COMPAT_OBJS += compat/snprintf.o
+endif
ifdef FREAD_READS_DIRECTORIES
COMPAT_CFLAGS += -DFREAD_READS_DIRECTORIES
COMPAT_OBJS += compat/fopen.o
EXTLIBS += -lpthread
LIB_OBJS += thread-utils.o
endif
+ifdef DIR_HAS_BSD_GROUP_SEMANTICS
+ COMPAT_CFLAGS += -DDIR_HAS_BSD_GROUP_SEMANTICS
+endif
ifeq ($(TCLTK_PATH),)
NO_TCLTK=NoThanks
### Build rules
-all:: $(ALL_PROGRAMS) $(BUILT_INS) $(OTHER_PROGRAMS)
+all:: $(ALL_PROGRAMS) $(BUILT_INS) $(OTHER_PROGRAMS) GIT-BUILD-OPTIONS
ifneq (,$X)
$(foreach p,$(patsubst %$X,%,$(filter %$X,$(ALL_PROGRAMS) $(BUILT_INS) git$X)), $(RM) '$p';)
endif
echo "$$FLAGS" >GIT-CFLAGS; \
fi
+GIT-BUILD-OPTIONS: .FORCE-GIT-BUILD-OPTIONS
+ @echo SHELL_PATH=\''$(SHELL_PATH_SQ)'\' >$@
+
### Detect Tck/Tk interpreter path changes
ifndef NO_TCLTK
TRACK_VARS = $(subst ','\'',-DTCLTK_PATH='$(TCLTK_PATH_SQ)')
$(MAKE) -C gitk-git clean
$(MAKE) -C git-gui clean
endif
- $(RM) GIT-VERSION-FILE GIT-CFLAGS GIT-GUI-VARS
+ $(RM) GIT-VERSION-FILE GIT-CFLAGS GIT-GUI-VARS GIT-BUILD-OPTIONS
.PHONY: all install clean strip
.PHONY: .FORCE-GIT-VERSION-FILE TAGS tags cscope .FORCE-GIT-CFLAGS
+.PHONY: .FORCE-GIT-BUILD-OPTIONS
### Check documentation
#
goto finish;
}
- if (*argv) {
- /* Was there an invalid path? */
- if (pathspec) {
- int num;
- for (num = 0; pathspec[num]; num++)
- ; /* just counting */
- if (argc != num)
- exit(1); /* error message already given */
- } else
- exit(1); /* error message already given */
- }
-
fill_directory(&dir, pathspec, ignored_too);
if (show_only) {
static int read_tree_some(struct tree *tree, const char **pathspec)
{
- int newfd;
- struct lock_file *lock_file = xcalloc(1, sizeof(struct lock_file));
- newfd = hold_locked_index(lock_file, 1);
- read_cache();
-
read_tree_recursive(tree, "", 0, 0, pathspec, update_some);
- if (write_cache(newfd, active_cache, active_nr) ||
- commit_locked_index(lock_file))
- die("unable to write new index file");
-
/* update the index with the given tree's info
* for all args, expanding wildcards, and exit
* with any non-zero return code.
return 0;
}
-static int checkout_paths(const char **pathspec)
+static int checkout_paths(struct tree *source_tree, const char **pathspec)
{
int pos;
struct checkout state;
int flag;
struct commit *head;
+ int newfd;
+ struct lock_file *lock_file = xcalloc(1, sizeof(struct lock_file));
+
+ newfd = hold_locked_index(lock_file, 1);
+ read_cache();
+
+ if (source_tree)
+ read_tree_some(source_tree, pathspec);
+
for (pos = 0; pathspec[pos]; pos++)
;
ps_matched = xcalloc(1, pos);
}
}
+ if (write_cache(newfd, active_cache, active_nr) ||
+ commit_locked_index(lock_file))
+ die("unable to write new index file");
+
resolve_ref("HEAD", rev, 0, &flag);
head = lookup_commit_reference_gently(rev, 1);
{
struct unpack_trees_options opts;
struct tree_desc tree_desc;
+
memset(&opts, 0, sizeof(opts));
opts.head_idx = -1;
opts.update = 1;
opts.merge = 1;
opts.fn = oneway_merge;
opts.verbose_update = !quiet;
+ opts.src_index = &the_index;
+ opts.dst_index = &the_index;
parse_tree(tree);
init_tree_desc(&tree_desc, tree->buffer, tree->size);
if (unpack_trees(1, &tree_desc, &opts))
{
struct unpack_trees_options opts;
struct tree_desc tree_desc;
+
memset(&opts, 0, sizeof(opts));
opts.head_idx = -1;
opts.skip_unmerged = 1;
opts.merge = 1;
opts.fn = oneway_merge;
opts.verbose_update = !quiet;
+ opts.src_index = &the_index;
+ opts.dst_index = &the_index;
parse_tree(tree);
init_tree_desc(&tree_desc, tree->buffer, tree->size);
if (unpack_trees(1, &tree_desc, &opts))
struct tree_desc trees[2];
struct tree *tree;
struct unpack_trees_options topts;
+
memset(&topts, 0, sizeof(topts));
topts.head_idx = -1;
+ topts.src_index = &the_index;
+ topts.dst_index = &the_index;
refresh_cache(REFRESH_QUIET);
return post_checkout_hook(old.commit, new->commit, 1);
}
-static int git_checkout_config(const char *var, const char *value)
-{
- return git_default_config(var, value);
-}
-
int cmd_checkout(int argc, const char **argv, const char *prefix)
{
struct checkout_opts opts;
memset(&opts, 0, sizeof(opts));
memset(&new, 0, sizeof(new));
- git_config(git_checkout_config);
+ git_config(git_default_config);
opts.track = git_branch_track;
if (argc) {
const char **pathspec = get_pathspec(prefix, argv);
+
+ if (!pathspec)
+ die("invalid path specification");
+
/* Checkout paths */
if (opts.new_branch || opts.force || opts.merge) {
if (argc == 1) {
}
}
- if (source_tree)
- read_tree_some(source_tree, pathspec);
- else
- read_cache();
- return checkout_paths(pathspec);
+ return checkout_paths(source_tree, pathspec);
}
if (new.name && !new.commit) {
#include "cache.h"
#include "dir.h"
#include "parse-options.h"
+#include "quote.h"
static int force = -1; /* unset */
struct dir_struct dir;
const char *path, *base;
static const char **pathspec;
- int prefix_offset = 0;
+ struct strbuf buf;
+ const char *qname;
char *seen = NULL;
struct option options[] = {
OPT__QUIET(&quiet),
argc = parse_options(argc, argv, options, builtin_clean_usage, 0);
+ strbuf_init(&buf, 0);
memset(&dir, 0, sizeof(dir));
if (ignored_only)
dir.show_ignored = 1;
if (!ignored)
setup_standard_excludes(&dir);
- if (prefix)
- prefix_offset = strlen(prefix);
pathspec = get_pathspec(prefix, argv);
read_cache();
if (S_ISDIR(st.st_mode)) {
strbuf_addstr(&directory, ent->name);
+ qname = quote_path_relative(directory.buf, directory.len, &buf, prefix);
if (show_only && (remove_directories || matches)) {
- printf("Would remove %s\n",
- directory.buf + prefix_offset);
+ printf("Would remove %s\n", qname);
} else if (remove_directories || matches) {
if (!quiet)
- printf("Removing %s\n",
- directory.buf + prefix_offset);
+ printf("Removing %s\n", qname);
if (remove_dir_recursively(&directory, 0) != 0) {
- warning("failed to remove '%s'",
- directory.buf + prefix_offset);
+ warning("failed to remove '%s'", qname);
errors++;
}
} else if (show_only) {
- printf("Would not remove %s\n",
- directory.buf + prefix_offset);
+ printf("Would not remove %s\n", qname);
} else {
- printf("Not removing %s\n",
- directory.buf + prefix_offset);
+ printf("Not removing %s\n", qname);
}
strbuf_reset(&directory);
} else {
if (pathspec && !matches)
continue;
+ qname = quote_path_relative(ent->name, -1, &buf, prefix);
if (show_only) {
- printf("Would remove %s\n",
- ent->name + prefix_offset);
+ printf("Would remove %s\n", qname);
continue;
} else if (!quiet) {
- printf("Removing %s\n",
- ent->name + prefix_offset);
+ printf("Removing %s\n", qname);
}
if (unlink(ent->name) != 0) {
- warning("failed to remove '%s'", ent->name);
+ warning("failed to remove '%s'", qname);
errors++;
}
}
opts.head_idx = 1;
opts.index_only = 1;
opts.merge = 1;
+ opts.src_index = &the_index;
+ opts.dst_index = &the_index;
opts.fn = oneway_merge;
tree = parse_tree_indirect(head_sha1);
static int debug; /* Display lots of verbose info */
static int all; /* Default to annotated tags only */
static int tags; /* But allow any tags if --tags is specified */
+static int longformat;
static int abbrev = DEFAULT_ABBREV;
static int max_candidates = 10;
const char *pattern = NULL;
+static int always;
struct commit_name {
+ struct tag *tag;
int prio; /* annotated tag = 2, tag = 1, head = 0 */
+ unsigned char sha1[20];
char path[FLEX_ARRAY]; /* more */
};
static const char *prio_names[] = {
static void add_to_known_names(const char *path,
struct commit *commit,
- int prio)
+ int prio,
+ const unsigned char *sha1)
{
struct commit_name *e = commit->util;
if (!e || e->prio < prio) {
size_t len = strlen(path)+1;
free(e);
e = xmalloc(sizeof(struct commit_name) + len);
+ e->tag = NULL;
e->prio = prio;
+ hashcpy(e->sha1, sha1);
memcpy(e->path, path, len);
commit->util = e;
}
if (!tags && prio < 2)
return 0;
}
- add_to_known_names(all ? path + 5 : path + 10, commit, prio);
+ add_to_known_names(all ? path + 5 : path + 10, commit, prio, sha1);
return 0;
}
return seen_commits;
}
+static void display_name(struct commit_name *n)
+{
+ if (n->prio == 2 && !n->tag) {
+ n->tag = lookup_tag(n->sha1);
+ if (!n->tag || parse_tag(n->tag) || !n->tag->tag)
+ die("annotated tag %s not available", n->path);
+ if (strcmp(n->tag->tag, n->path))
+ warning("tag '%s' is really '%s' here", n->tag->tag, n->path);
+ }
+
+ if (n->tag)
+ printf("%s", n->tag->tag);
+ else
+ printf("%s", n->path);
+}
+
+static void show_suffix(int depth, const unsigned char *sha1)
+{
+ printf("-%d-g%s", depth, find_unique_abbrev(sha1, abbrev));
+}
+
static void describe(const char *arg, int last_one)
{
unsigned char sha1[20];
n = cmit->util;
if (n) {
- printf("%s\n", n->path);
+ /*
+ * Exact match to an existing ref.
+ */
+ display_name(n);
+ if (longformat)
+ show_suffix(0, n->tag->tagged->sha1);
+ printf("\n");
return;
}
}
}
- if (!match_cnt)
- die("cannot describe '%s'", sha1_to_hex(cmit->object.sha1));
+ if (!match_cnt) {
+ const unsigned char *sha1 = cmit->object.sha1;
+ if (always) {
+ printf("%s\n", find_unique_abbrev(sha1, abbrev));
+ return;
+ }
+ die("cannot describe '%s'", sha1_to_hex(sha1));
+ }
qsort(all_matches, match_cnt, sizeof(all_matches[0]), compare_pt);
sha1_to_hex(gave_up_on->object.sha1));
}
}
- if (abbrev == 0)
- printf("%s\n", all_matches[0].name->path );
- else
- printf("%s-%d-g%s\n", all_matches[0].name->path,
- all_matches[0].depth,
- find_unique_abbrev(cmit->object.sha1, abbrev));
+
+ display_name(all_matches[0].name);
+ if (abbrev)
+ show_suffix(all_matches[0].depth, cmit->object.sha1);
+ printf("\n");
if (!last_one)
clear_commit_marks(cmit, -1);
OPT_BOOLEAN(0, "debug", &debug, "debug search strategy on stderr"),
OPT_BOOLEAN(0, "all", &all, "use any ref in .git/refs"),
OPT_BOOLEAN(0, "tags", &tags, "use any tag in .git/refs/tags"),
+ OPT_BOOLEAN(0, "long", &longformat, "always use long format"),
OPT__ABBREV(&abbrev),
OPT_SET_INT(0, "exact-match", &max_candidates,
"only output exact matches", 0),
"consider <n> most recent tags (default: 10)"),
OPT_STRING(0, "match", &pattern, "pattern",
"only consider tags matching <pattern>"),
+ OPT_BOOLEAN(0, "always", &always,
+ "show abbreviated commit object as fallback"),
OPT_END(),
};
save_commit_buffer = 0;
+ if (longformat && abbrev == 0)
+ die("--long is incompatible with --abbrev=0");
+
if (contains) {
- const char **args = xmalloc((6 + argc) * sizeof(char*));
+ const char **args = xmalloc((7 + argc) * sizeof(char*));
int i = 0;
args[i++] = "name-rev";
args[i++] = "--name-only";
args[i++] = "--no-undefined";
+ if (always)
+ args[i++] = "--always";
if (!all) {
args[i++] = "--tags";
if (pattern) {
};
static const char fetch_pack_usage[] =
-"git-fetch-pack [--all] [--quiet|-q] [--keep|-k] [--thin] [--upload-pack=<git-upload-pack>] [--depth=<n>] [--no-progress] [-v] [<host>:]<directory> [<refs>...]";
+"git-fetch-pack [--all] [--quiet|-q] [--keep|-k] [--thin] [--include-tag] [--upload-pack=<git-upload-pack>] [--depth=<n>] [--no-progress] [-v] [<host>:]<directory> [<refs>...]";
#define COMPLETE (1U << 0)
#define COMMON (1U << 1)
commit->object.flags |= mark;
if (!(commit->object.parsed))
- parse_commit(commit);
+ if (parse_commit(commit))
+ return;
insert_by_date(commit, &rev_list);
if (!ancestors_only && !(o->flags & POPPED))
non_common_revs--;
if (!o->parsed && !dont_parse)
- parse_commit(commit);
+ if (parse_commit(commit))
+ return;
for (parents = commit->parents;
parents;
while (commit == NULL) {
unsigned int mark;
- struct commit_list* parents;
+ struct commit_list *parents = NULL;
if (rev_list == NULL || non_common_revs == 0)
return NULL;
commit = rev_list->item;
if (!(commit->object.parsed))
- parse_commit(commit);
+ if (!parse_commit(commit))
+ parents = commit->parents;
+
commit->object.flags |= POPPED;
if (!(commit->object.flags & COMMON))
non_common_revs--;
- parents = commit->parents;
-
if (commit->object.flags & COMMON) {
/* do not send "have", and ignore ancestors */
commit = NULL;
}
if (!fetching)
- packet_write(fd[1], "want %s%s%s%s%s%s%s\n",
+ packet_write(fd[1], "want %s%s%s%s%s%s%s%s\n",
sha1_to_hex(remote),
(multi_ack ? " multi_ack" : ""),
(use_sideband == 2 ? " side-band-64k" : ""),
(use_sideband == 1 ? " side-band" : ""),
(args.use_thin_pack ? " thin-pack" : ""),
(args.no_progress ? " no-progress" : ""),
+ (args.include_tag ? " include-tag" : ""),
" ofs-delta");
else
packet_write(fd[1], "want %s\n", sha1_to_hex(remote));
if (!lookup_object(sha1))
die("object not found: %s", line);
/* make sure that it is parsed as shallow */
- parse_object(sha1);
+ if (!parse_object(sha1))
+ die("error in object: %s", line);
if (unregister_shallow(sha1))
die("no shallow found: %s", line);
continue;
int retval;
unsigned long cutoff = 0;
- track_object_refs = 0;
save_commit_buffer = 0;
for (ref = *refs; ref; ref = ref->next) {
args.use_thin_pack = 1;
continue;
}
+ if (!strcmp("--include-tag", arg)) {
+ args.include_tag = 1;
+ continue;
+ }
if (!strcmp("--all", arg)) {
args.fetch_all = 1;
continue;
}
}
+static void find_non_local_tags(struct transport *transport,
+ struct ref **head,
+ struct ref ***tail);
+
static struct ref *get_ref_map(struct transport *transport,
struct refspec *refs, int ref_count, int tags,
int *autotags)
if (!ref_map)
die("Couldn't find remote ref HEAD");
ref_map->merge = 1;
+ tail = &ref_map->next;
}
}
+ if (tags == TAGS_DEFAULT && *autotags)
+ find_non_local_tags(transport, &ref_map, &tail);
ref_remove_duplicates(ref_map);
return ref_map;
return 0;
}
-static struct ref *find_non_local_tags(struct transport *transport,
- struct ref *fetch_map)
+static int will_fetch(struct ref **head, const unsigned char *sha1)
+{
+ struct ref *rm = *head;
+ while (rm) {
+ if (!hashcmp(rm->old_sha1, sha1))
+ return 1;
+ rm = rm->next;
+ }
+ return 0;
+}
+
+static void find_non_local_tags(struct transport *transport,
+ struct ref **head,
+ struct ref ***tail)
{
- static struct path_list existing_refs = { NULL, 0, 0, 0 };
+ struct path_list existing_refs = { NULL, 0, 0, 0 };
struct path_list new_refs = { NULL, 0, 0, 1 };
char *ref_name;
int ref_name_len;
const unsigned char *ref_sha1;
const struct ref *tag_ref;
struct ref *rm = NULL;
- struct ref *ref_map = NULL;
- struct ref **tail = &ref_map;
const struct ref *ref;
for_each_ref(add_existing, &existing_refs);
if (!path_list_has_path(&existing_refs, ref_name) &&
!path_list_has_path(&new_refs, ref_name) &&
- has_sha1_file(ref->old_sha1)) {
+ (has_sha1_file(ref->old_sha1) ||
+ will_fetch(head, ref->old_sha1))) {
path_list_insert(ref_name, &new_refs);
rm = alloc_ref(strlen(ref_name) + 1);
strcpy(rm->peer_ref->name, ref_name);
hashcpy(rm->old_sha1, ref_sha1);
- *tail = rm;
- tail = &rm->next;
+ **tail = rm;
+ *tail = &rm->next;
}
free(ref_name);
}
-
- return ref_map;
+ path_list_clear(&existing_refs, 0);
+ path_list_clear(&new_refs, 0);
}
static int do_fetch(struct transport *transport,
struct refspec *refs, int ref_count)
{
- struct ref *ref_map, *fetch_map;
+ struct ref *ref_map;
struct ref *rm;
int autotags = (transport->remote->fetch_tags == 1);
if (transport->remote->fetch_tags == 2 && tags != TAGS_UNSET)
read_ref(rm->peer_ref->name, rm->peer_ref->old_sha1);
}
+ if (tags == TAGS_DEFAULT && autotags)
+ transport_set_option(transport, TRANS_OPT_FOLLOWTAGS, "1");
if (fetch_refs(transport, ref_map)) {
free_refs(ref_map);
return 1;
}
-
- fetch_map = ref_map;
+ free_refs(ref_map);
/* if neither --no-tags nor --tags was specified, do automated tag
* following ... */
if (tags == TAGS_DEFAULT && autotags) {
- ref_map = find_non_local_tags(transport, fetch_map);
+ struct ref **tail = &ref_map;
+ ref_map = NULL;
+ find_non_local_tags(transport, &ref_map, &tail);
if (ref_map) {
+ transport_set_option(transport, TRANS_OPT_FOLLOWTAGS, NULL);
transport_set_option(transport, TRANS_OPT_DEPTH, "0");
fetch_refs(transport, ref_map);
}
free_refs(ref_map);
}
- free_refs(fetch_map);
-
transport_disconnect(transport);
return 0;
#include "pack.h"
#include "cache-tree.h"
#include "tree-walk.h"
+#include "fsck.h"
#include "parse-options.h"
#define REACHABLE 0x0001
return -1;
}
-static int objwarning(struct object *obj, const char *err, ...)
+static int fsck_error_func(struct object *obj, int type, const char *err, ...)
{
va_list params;
va_start(params, err);
- objreport(obj, "warning", err, params);
+ objreport(obj, (type == FSCK_WARN) ? "warning" : "error", err, params);
va_end(params);
- return -1;
+ return (type == FSCK_WARN) ? 0 : 1;
+}
+
+static int mark_object(struct object *obj, int type, void *data)
+{
+ struct tree *tree = NULL;
+ struct object *parent = data;
+ int result;
+
+ if (!obj) {
+ printf("broken link from %7s %s\n",
+ typename(parent->type), sha1_to_hex(parent->sha1));
+ printf("broken link from %7s %s\n",
+ (type == OBJ_ANY ? "unknown" : typename(type)), "unknown");
+ errors_found |= ERROR_REACHABLE;
+ return 1;
+ }
+
+ if (type != OBJ_ANY && obj->type != type)
+ objerror(parent, "wrong object type in link");
+
+ if (obj->flags & REACHABLE)
+ return 0;
+ obj->flags |= REACHABLE;
+ if (!obj->parsed) {
+ if (parent && !has_sha1_file(obj->sha1)) {
+ printf("broken link from %7s %s\n",
+ typename(parent->type), sha1_to_hex(parent->sha1));
+ printf(" to %7s %s\n",
+ typename(obj->type), sha1_to_hex(obj->sha1));
+ errors_found |= ERROR_REACHABLE;
+ }
+ return 1;
+ }
+
+ if (obj->type == OBJ_TREE) {
+ obj->parsed = 0;
+ tree = (struct tree *)obj;
+ if (parse_tree(tree) < 0)
+ return 1; /* error already displayed */
+ }
+ result = fsck_walk(obj, mark_object, obj);
+ if (tree) {
+ free(tree->buffer);
+ tree->buffer = NULL;
+ }
+ if (result < 0)
+ result = 1;
+
+ return result;
+}
+
+static void mark_object_reachable(struct object *obj)
+{
+ mark_object(obj, OBJ_ANY, 0);
+}
+
+static int mark_used(struct object *obj, int type, void *data)
+{
+ if (!obj)
+ return 1;
+ obj->used = 1;
+ return 0;
}
/*
*/
static void check_reachable_object(struct object *obj)
{
- const struct object_refs *refs;
-
/*
* We obviously want the object to be parsed,
* except if it was in a pack-file and we didn't
errors_found |= ERROR_REACHABLE;
return;
}
-
- /*
- * Check that everything that we try to reference is also good.
- */
- refs = lookup_object_refs(obj);
- if (refs) {
- unsigned j;
- for (j = 0; j < refs->count; j++) {
- struct object *ref = refs->ref[j];
- if (ref->parsed ||
- (has_sha1_file(ref->sha1)))
- continue;
- printf("broken link from %7s %s\n",
- typename(obj->type), sha1_to_hex(obj->sha1));
- printf(" to %7s %s\n",
- typename(ref->type), sha1_to_hex(ref->sha1));
- errors_found |= ERROR_REACHABLE;
- }
- }
}
/*
}
}
-/*
- * The entries in a tree are ordered in the _path_ order,
- * which means that a directory entry is ordered by adding
- * a slash to the end of it.
- *
- * So a directory called "a" is ordered _after_ a file
- * called "a.c", because "a/" sorts after "a.c".
- */
-#define TREE_UNORDERED (-1)
-#define TREE_HAS_DUPS (-2)
-
-static int verify_ordered(unsigned mode1, const char *name1, unsigned mode2, const char *name2)
+static int fsck_sha1(const unsigned char *sha1)
{
- int len1 = strlen(name1);
- int len2 = strlen(name2);
- int len = len1 < len2 ? len1 : len2;
- unsigned char c1, c2;
- int cmp;
-
- cmp = memcmp(name1, name2, len);
- if (cmp < 0)
+ struct object *obj = parse_object(sha1);
+ if (!obj) {
+ errors_found |= ERROR_OBJECT;
+ return error("%s: object corrupt or missing",
+ sha1_to_hex(sha1));
+ }
+ if (obj->flags & SEEN)
return 0;
- if (cmp > 0)
- return TREE_UNORDERED;
-
- /*
- * Ok, the first <len> characters are the same.
- * Now we need to order the next one, but turn
- * a '\0' into a '/' for a directory entry.
- */
- c1 = name1[len];
- c2 = name2[len];
- if (!c1 && !c2)
- /*
- * git-write-tree used to write out a nonsense tree that has
- * entries with the same name, one blob and one tree. Make
- * sure we do not have duplicate entries.
- */
- return TREE_HAS_DUPS;
- if (!c1 && S_ISDIR(mode1))
- c1 = '/';
- if (!c2 && S_ISDIR(mode2))
- c2 = '/';
- return c1 < c2 ? 0 : TREE_UNORDERED;
-}
-
-static int fsck_tree(struct tree *item)
-{
- int retval;
- int has_full_path = 0;
- int has_empty_name = 0;
- int has_zero_pad = 0;
- int has_bad_modes = 0;
- int has_dup_entries = 0;
- int not_properly_sorted = 0;
- struct tree_desc desc;
- unsigned o_mode;
- const char *o_name;
- const unsigned char *o_sha1;
+ obj->flags |= SEEN;
if (verbose)
- fprintf(stderr, "Checking tree %s\n",
- sha1_to_hex(item->object.sha1));
-
- init_tree_desc(&desc, item->buffer, item->size);
-
- o_mode = 0;
- o_name = NULL;
- o_sha1 = NULL;
- while (desc.size) {
- unsigned mode;
- const char *name;
- const unsigned char *sha1;
-
- sha1 = tree_entry_extract(&desc, &name, &mode);
-
- if (strchr(name, '/'))
- has_full_path = 1;
- if (!*name)
- has_empty_name = 1;
- has_zero_pad |= *(char *)desc.buffer == '0';
- update_tree_entry(&desc);
-
- switch (mode) {
- /*
- * Standard modes..
- */
- case S_IFREG | 0755:
- case S_IFREG | 0644:
- case S_IFLNK:
- case S_IFDIR:
- case S_IFGITLINK:
- break;
- /*
- * This is nonstandard, but we had a few of these
- * early on when we honored the full set of mode
- * bits..
- */
- case S_IFREG | 0664:
- if (!check_strict)
- break;
- default:
- has_bad_modes = 1;
- }
+ fprintf(stderr, "Checking %s %s\n",
+ typename(obj->type), sha1_to_hex(obj->sha1));
- if (o_name) {
- switch (verify_ordered(o_mode, o_name, mode, name)) {
- case TREE_UNORDERED:
- not_properly_sorted = 1;
- break;
- case TREE_HAS_DUPS:
- has_dup_entries = 1;
- break;
- default:
- break;
- }
- }
+ if (fsck_walk(obj, mark_used, 0))
+ objerror(obj, "broken links");
+ if (fsck_object(obj, check_strict, fsck_error_func))
+ return -1;
- o_mode = mode;
- o_name = name;
- o_sha1 = sha1;
- }
- free(item->buffer);
- item->buffer = NULL;
+ if (obj->type == OBJ_TREE) {
+ struct tree *item = (struct tree *) obj;
- retval = 0;
- if (has_full_path) {
- objwarning(&item->object, "contains full pathnames");
+ free(item->buffer);
+ item->buffer = NULL;
}
- if (has_empty_name) {
- objwarning(&item->object, "contains empty pathname");
- }
- if (has_zero_pad) {
- objwarning(&item->object, "contains zero-padded file modes");
- }
- if (has_bad_modes) {
- objwarning(&item->object, "contains bad file modes");
- }
- if (has_dup_entries) {
- retval = objerror(&item->object, "contains duplicate file entries");
- }
- if (not_properly_sorted) {
- retval = objerror(&item->object, "not properly sorted");
- }
- return retval;
-}
-static int fsck_commit(struct commit *commit)
-{
- char *buffer = commit->buffer;
- unsigned char tree_sha1[20], sha1[20];
+ if (obj->type == OBJ_COMMIT) {
+ struct commit *commit = (struct commit *) obj;
- if (verbose)
- fprintf(stderr, "Checking commit %s\n",
- sha1_to_hex(commit->object.sha1));
-
- if (!commit->date)
- return objerror(&commit->object, "invalid author/committer line");
-
- if (memcmp(buffer, "tree ", 5))
- return objerror(&commit->object, "invalid format - expected 'tree' line");
- if (get_sha1_hex(buffer+5, tree_sha1) || buffer[45] != '\n')
- return objerror(&commit->object, "invalid 'tree' line format - bad sha1");
- buffer += 46;
- while (!memcmp(buffer, "parent ", 7)) {
- if (get_sha1_hex(buffer+7, sha1) || buffer[47] != '\n')
- return objerror(&commit->object, "invalid 'parent' line format - bad sha1");
- buffer += 48;
- }
- if (memcmp(buffer, "author ", 7))
- return objerror(&commit->object, "invalid format - expected 'author' line");
- free(commit->buffer);
- commit->buffer = NULL;
- if (!commit->tree)
- return objerror(&commit->object, "could not load commit's tree %s", tree_sha1);
- if (!commit->parents && show_root)
- printf("root %s\n", sha1_to_hex(commit->object.sha1));
- return 0;
-}
+ free(commit->buffer);
+ commit->buffer = NULL;
-static int fsck_tag(struct tag *tag)
-{
- struct object *tagged = tag->tagged;
+ if (!commit->parents && show_root)
+ printf("root %s\n", sha1_to_hex(commit->object.sha1));
+ }
- if (verbose)
- fprintf(stderr, "Checking tag %s\n",
- sha1_to_hex(tag->object.sha1));
+ if (obj->type == OBJ_TAG) {
+ struct tag *tag = (struct tag *) obj;
- if (!tagged) {
- return objerror(&tag->object, "could not load tagged object");
+ if (show_tags && tag->tagged) {
+ printf("tagged %s %s", typename(tag->tagged->type), sha1_to_hex(tag->tagged->sha1));
+ printf(" (%s) in %s\n", tag->tag, sha1_to_hex(tag->object.sha1));
+ }
}
- if (!show_tags)
- return 0;
- printf("tagged %s %s", typename(tagged->type), sha1_to_hex(tagged->sha1));
- printf(" (%s) in %s\n", tag->tag, sha1_to_hex(tag->object.sha1));
return 0;
}
-static int fsck_sha1(const unsigned char *sha1)
-{
- struct object *obj = parse_object(sha1);
- if (!obj) {
- errors_found |= ERROR_OBJECT;
- return error("%s: object corrupt or missing",
- sha1_to_hex(sha1));
- }
- if (obj->flags & SEEN)
- return 0;
- obj->flags |= SEEN;
- if (obj->type == OBJ_BLOB)
- return 0;
- if (obj->type == OBJ_TREE)
- return fsck_tree((struct tree *) obj);
- if (obj->type == OBJ_COMMIT)
- return fsck_commit((struct commit *) obj);
- if (obj->type == OBJ_TAG)
- return fsck_tag((struct tag *) obj);
-
- /* By now, parse_object() would've returned NULL instead. */
- return objerror(obj, "unknown type '%d' (internal fsck error)",
- obj->type);
-}
-
/*
* This is the sorting chunk size: make it reasonably
* big so that we can sort well..
obj = lookup_object(osha1);
if (obj) {
obj->used = 1;
- mark_reachable(obj, REACHABLE);
+ mark_object_reachable(obj);
}
}
obj = lookup_object(nsha1);
if (obj) {
obj->used = 1;
- mark_reachable(obj, REACHABLE);
+ mark_object_reachable(obj);
}
return 0;
}
error("%s: not a commit", refname);
default_refs++;
obj->used = 1;
- mark_reachable(obj, REACHABLE);
+ mark_object_reachable(obj);
return 0;
}
sha1_to_hex(it->sha1));
return 1;
}
- mark_reachable(obj, REACHABLE);
+ mark_object_reachable(obj);
obj->used = 1;
if (obj->type != OBJ_TREE)
err |= objerror(obj, "non-tree in cache-tree");
{
int i, heads;
- track_object_refs = 1;
errors_found = 0;
argc = parse_options(argc, argv, fsck_opts, fsck_usage, 0);
continue;
obj->used = 1;
- mark_reachable(obj, REACHABLE);
+ mark_object_reachable(obj);
heads++;
continue;
}
continue;
obj = &blob->object;
obj->used = 1;
- mark_reachable(obj, REACHABLE);
+ mark_object_reachable(obj);
}
if (active_cache_tree)
fsck_cache_tree(active_cache_tree);
int prune = 0;
int aggressive = 0;
int auto_gc = 0;
+ int quiet = 0;
char buf[80];
struct option builtin_gc_options[] = {
OPT_BOOLEAN(0, "prune", &prune, "prune unreferenced objects"),
OPT_BOOLEAN(0, "aggressive", &aggressive, "be more thorough (increased runtime)"),
OPT_BOOLEAN(0, "auto", &auto_gc, "enable auto-gc mode"),
+ OPT_BOOLEAN('q', "quiet", &quiet, "suppress progress reports"),
OPT_END()
};
append_option(argv_repack, buf, MAX_ADD);
}
}
+ if (quiet)
+ append_option(argv_repack, "-q", MAX_ADD);
if (auto_gc) {
/*
url = rewritten_url;
}
- walker = get_http_walker(url);
+ walker = get_http_walker(url, NULL);
walker->get_tree = get_tree;
walker->get_history = get_history;
walker->get_all = get_all;
static int default_show_root = 1;
static const char *fmt_patch_subject_prefix = "PATCH";
+static const char *fmt_pretty;
static void add_name_decoration(const char *prefix, const char *name, struct object *obj)
{
rev->abbrev = DEFAULT_ABBREV;
rev->commit_format = CMIT_FMT_DEFAULT;
+ if (fmt_pretty)
+ rev->commit_format = get_commit_format(fmt_pretty);
rev->verbose_header = 1;
DIFF_OPT_SET(&rev->diffopt, RECURSIVE);
rev->show_root_diff = default_show_root;
static int git_log_config(const char *var, const char *value)
{
+ if (!strcmp(var, "format.pretty"))
+ return git_config_string(&fmt_pretty, var, value);
if (!strcmp(var, "format.subjectprefix")) {
if (!value)
config_error_nonbool(var);
int nr, struct commit **list, struct commit *head)
{
const char *committer;
- const char *origin_sha1, *head_sha1;
- const char *argv[7];
+ char *head_sha1;
const char *subject_start = NULL;
const char *body = "*** SUBJECT HERE ***\n\n*** BLURB HERE ***\n";
const char *msg;
struct strbuf sb;
int i;
const char *encoding = "utf-8";
+ struct diff_options opts;
if (rev->commit_format != CMIT_FMT_EMAIL)
die("Cover letter needs email format");
strbuf_release(&sb);
shortlog_init(&log);
+ log.wrap_lines = 1;
+ log.wrap = 72;
+ log.in1 = 2;
+ log.in2 = 4;
for (i = 0; i < nr; i++)
shortlog_add_commit(&log, list[i]);
if (!origin)
return;
- origin_sha1 = sha1_to_hex(origin->object.sha1);
+ memcpy(&opts, &rev->diffopt, sizeof(opts));
+ opts.output_format = DIFF_FORMAT_SUMMARY | DIFF_FORMAT_DIFFSTAT;
- argv[0] = "diff";
- argv[1] = "--stat";
- argv[2] = "--summary";
- argv[3] = head_sha1;
- argv[4] = "--not";
- argv[5] = origin_sha1;
- argv[6] = "--";
- argv[7] = NULL;
- fflush(stdout);
- run_command_v_opt(argv, RUN_GIT_CMD);
+ diff_setup_done(&opts);
+
+ diff_tree_sha1(origin->tree->object.sha1,
+ head->tree->object.sha1,
+ "", &opts);
+ diffcore_std(&opts);
+ diff_flush(&opts);
- fflush(stdout);
printf("\n");
}
rev.boundary = 1;
while ((commit = get_revision(&rev)) != NULL) {
if (commit->object.flags & BOUNDARY) {
- fprintf(stderr, "Boundary %s\n", sha1_to_hex(commit->object.sha1));
boundary_count++;
origin = (boundary_count == 1) ? commit : NULL;
continue;
pathspec = get_pathspec(prefix, argv + i);
/* Verify that the pathspec matches the prefix */
- if (pathspec) {
- if (argc != i) {
- int cnt;
- for (cnt = 0; pathspec[cnt]; cnt++)
- ;
- if (cnt != (argc - i))
- exit(1); /* error message already given */
- }
+ if (pathspec)
prefix = verify_pathspec(prefix);
- } else if (argc != i)
- exit(1); /* error message already given */
/* Treat unmatching pathspec elements as errors */
if (pathspec && error_unmatch) {
transport_set_option(transport, TRANS_OPT_UPLOADPACK, uploadpack);
ref = transport_get_remote_refs(transport);
- transport_disconnect(transport);
-
- if (!ref)
+ if (transport_disconnect(transport))
return 1;
-
for ( ; ref; ref = ref->next) {
if (!check_ref_type(ref, flags))
continue;
#include "tree-walk.h"
#include "diff.h"
#include "diffcore.h"
-#include "run-command.h"
#include "tag.h"
#include "unpack-trees.h"
#include "path-list.h"
#include "xdiff-interface.h"
+#include "ll-merge.h"
#include "interpolate.h"
#include "attr.h"
#include "merge-recursive.h"
opts.merge = 1;
opts.head_idx = 2;
opts.fn = threeway_merge;
+ opts.src_index = &the_index;
+ opts.dst_index = &the_index;
init_tree_desc_from_tree(t+0, common);
init_tree_desc_from_tree(t+1, head);
mm->size = size;
}
-/*
- * Customizable low-level merge drivers support.
- */
-
-struct ll_merge_driver;
-typedef int (*ll_merge_fn)(const struct ll_merge_driver *,
- const char *path,
- mmfile_t *orig,
- mmfile_t *src1, const char *name1,
- mmfile_t *src2, const char *name2,
- mmbuffer_t *result);
-
-struct ll_merge_driver {
- const char *name;
- const char *description;
- ll_merge_fn fn;
- const char *recursive;
- struct ll_merge_driver *next;
- char *cmdline;
-};
-
-/*
- * Built-in low-levels
- */
-static int ll_binary_merge(const struct ll_merge_driver *drv_unused,
- const char *path_unused,
- mmfile_t *orig,
- mmfile_t *src1, const char *name1,
- mmfile_t *src2, const char *name2,
- mmbuffer_t *result)
-{
- /*
- * The tentative merge result is "ours" for the final round,
- * or common ancestor for an internal merge. Still return
- * "conflicted merge" status.
- */
- mmfile_t *stolen = index_only ? orig : src1;
-
- result->ptr = stolen->ptr;
- result->size = stolen->size;
- stolen->ptr = NULL;
- return 1;
-}
-
-static int ll_xdl_merge(const struct ll_merge_driver *drv_unused,
- const char *path_unused,
- mmfile_t *orig,
- mmfile_t *src1, const char *name1,
- mmfile_t *src2, const char *name2,
- mmbuffer_t *result)
-{
- xpparam_t xpp;
-
- if (buffer_is_binary(orig->ptr, orig->size) ||
- buffer_is_binary(src1->ptr, src1->size) ||
- buffer_is_binary(src2->ptr, src2->size)) {
- warning("Cannot merge binary files: %s vs. %s\n",
- name1, name2);
- return ll_binary_merge(drv_unused, path_unused,
- orig, src1, name1,
- src2, name2,
- result);
- }
-
- memset(&xpp, 0, sizeof(xpp));
- return xdl_merge(orig,
- src1, name1,
- src2, name2,
- &xpp, XDL_MERGE_ZEALOUS,
- result);
-}
-
-static int ll_union_merge(const struct ll_merge_driver *drv_unused,
- const char *path_unused,
- mmfile_t *orig,
- mmfile_t *src1, const char *name1,
- mmfile_t *src2, const char *name2,
- mmbuffer_t *result)
-{
- char *src, *dst;
- long size;
- const int marker_size = 7;
-
- int status = ll_xdl_merge(drv_unused, path_unused,
- orig, src1, NULL, src2, NULL, result);
- if (status <= 0)
- return status;
- size = result->size;
- src = dst = result->ptr;
- while (size) {
- char ch;
- if ((marker_size < size) &&
- (*src == '<' || *src == '=' || *src == '>')) {
- int i;
- ch = *src;
- for (i = 0; i < marker_size; i++)
- if (src[i] != ch)
- goto not_a_marker;
- if (src[marker_size] != '\n')
- goto not_a_marker;
- src += marker_size + 1;
- size -= marker_size + 1;
- continue;
- }
- not_a_marker:
- do {
- ch = *src++;
- *dst++ = ch;
- size--;
- } while (ch != '\n' && size);
- }
- result->size = dst - result->ptr;
- return 0;
-}
-
-#define LL_BINARY_MERGE 0
-#define LL_TEXT_MERGE 1
-#define LL_UNION_MERGE 2
-static struct ll_merge_driver ll_merge_drv[] = {
- { "binary", "built-in binary merge", ll_binary_merge },
- { "text", "built-in 3-way text merge", ll_xdl_merge },
- { "union", "built-in union merge", ll_union_merge },
-};
-
-static void create_temp(mmfile_t *src, char *path)
-{
- int fd;
-
- strcpy(path, ".merge_file_XXXXXX");
- fd = xmkstemp(path);
- if (write_in_full(fd, src->ptr, src->size) != src->size)
- die("unable to write temp-file");
- close(fd);
-}
-
-/*
- * User defined low-level merge driver support.
- */
-static int ll_ext_merge(const struct ll_merge_driver *fn,
- const char *path,
- mmfile_t *orig,
- mmfile_t *src1, const char *name1,
- mmfile_t *src2, const char *name2,
- mmbuffer_t *result)
-{
- char temp[3][50];
- char cmdbuf[2048];
- struct interp table[] = {
- { "%O" },
- { "%A" },
- { "%B" },
- };
- struct child_process child;
- const char *args[20];
- int status, fd, i;
- struct stat st;
-
- if (fn->cmdline == NULL)
- die("custom merge driver %s lacks command line.", fn->name);
-
- result->ptr = NULL;
- result->size = 0;
- create_temp(orig, temp[0]);
- create_temp(src1, temp[1]);
- create_temp(src2, temp[2]);
-
- interp_set_entry(table, 0, temp[0]);
- interp_set_entry(table, 1, temp[1]);
- interp_set_entry(table, 2, temp[2]);
-
- output(1, "merging %s using %s", path,
- fn->description ? fn->description : fn->name);
-
- interpolate(cmdbuf, sizeof(cmdbuf), fn->cmdline, table, 3);
-
- memset(&child, 0, sizeof(child));
- child.argv = args;
- args[0] = "sh";
- args[1] = "-c";
- args[2] = cmdbuf;
- args[3] = NULL;
-
- status = run_command(&child);
- if (status < -ERR_RUN_COMMAND_FORK)
- ; /* failure in run-command */
- else
- status = -status;
- fd = open(temp[1], O_RDONLY);
- if (fd < 0)
- goto bad;
- if (fstat(fd, &st))
- goto close_bad;
- result->size = st.st_size;
- result->ptr = xmalloc(result->size + 1);
- if (read_in_full(fd, result->ptr, result->size) != result->size) {
- free(result->ptr);
- result->ptr = NULL;
- result->size = 0;
- }
- close_bad:
- close(fd);
- bad:
- for (i = 0; i < 3; i++)
- unlink(temp[i]);
- return status;
-}
-
-/*
- * merge.default and merge.driver configuration items
- */
-static struct ll_merge_driver *ll_user_merge, **ll_user_merge_tail;
-static const char *default_ll_merge;
-
-static int read_merge_config(const char *var, const char *value)
-{
- struct ll_merge_driver *fn;
- const char *ep, *name;
- int namelen;
-
- if (!strcmp(var, "merge.default")) {
- if (!value)
- return config_error_nonbool(var);
- default_ll_merge = strdup(value);
- return 0;
- }
-
- /*
- * We are not interested in anything but "merge.<name>.variable";
- * especially, we do not want to look at variables such as
- * "merge.summary", "merge.tool", and "merge.verbosity".
- */
- if (prefixcmp(var, "merge.") || (ep = strrchr(var, '.')) == var + 5)
- return 0;
-
- /*
- * Find existing one as we might be processing merge.<name>.var2
- * after seeing merge.<name>.var1.
- */
- name = var + 6;
- namelen = ep - name;
- for (fn = ll_user_merge; fn; fn = fn->next)
- if (!strncmp(fn->name, name, namelen) && !fn->name[namelen])
- break;
- if (!fn) {
- fn = xcalloc(1, sizeof(struct ll_merge_driver));
- fn->name = xmemdupz(name, namelen);
- fn->fn = ll_ext_merge;
- *ll_user_merge_tail = fn;
- ll_user_merge_tail = &(fn->next);
- }
-
- ep++;
-
- if (!strcmp("name", ep)) {
- if (!value)
- return config_error_nonbool(var);
- fn->description = strdup(value);
- return 0;
- }
-
- if (!strcmp("driver", ep)) {
- if (!value)
- return config_error_nonbool(var);
- /*
- * merge.<name>.driver specifies the command line:
- *
- * command-line
- *
- * The command-line will be interpolated with the following
- * tokens and is given to the shell:
- *
- * %O - temporary file name for the merge base.
- * %A - temporary file name for our version.
- * %B - temporary file name for the other branches' version.
- *
- * The external merge driver should write the results in the
- * file named by %A, and signal that it has done with zero exit
- * status.
- */
- fn->cmdline = strdup(value);
- return 0;
- }
-
- if (!strcmp("recursive", ep)) {
- if (!value)
- return config_error_nonbool(var);
- fn->recursive = strdup(value);
- return 0;
- }
-
- return 0;
-}
-
-static void initialize_ll_merge(void)
-{
- if (ll_user_merge_tail)
- return;
- ll_user_merge_tail = &ll_user_merge;
- git_config(read_merge_config);
-}
-
-static const struct ll_merge_driver *find_ll_merge_driver(const char *merge_attr)
-{
- struct ll_merge_driver *fn;
- const char *name;
- int i;
-
- initialize_ll_merge();
-
- if (ATTR_TRUE(merge_attr))
- return &ll_merge_drv[LL_TEXT_MERGE];
- else if (ATTR_FALSE(merge_attr))
- return &ll_merge_drv[LL_BINARY_MERGE];
- else if (ATTR_UNSET(merge_attr)) {
- if (!default_ll_merge)
- return &ll_merge_drv[LL_TEXT_MERGE];
- else
- name = default_ll_merge;
- }
- else
- name = merge_attr;
-
- for (fn = ll_user_merge; fn; fn = fn->next)
- if (!strcmp(fn->name, name))
- return fn;
-
- for (i = 0; i < ARRAY_SIZE(ll_merge_drv); i++)
- if (!strcmp(ll_merge_drv[i].name, name))
- return &ll_merge_drv[i];
-
- /* default to the 3-way */
- return &ll_merge_drv[LL_TEXT_MERGE];
-}
-
-static const char *git_path_check_merge(const char *path)
-{
- static struct git_attr_check attr_merge_check;
-
- if (!attr_merge_check.attr)
- attr_merge_check.attr = git_attr("merge", 5);
-
- if (git_checkattr(path, 1, &attr_merge_check))
- return NULL;
- return attr_merge_check.value;
-}
-
-static int ll_merge(mmbuffer_t *result_buf,
- struct diff_filespec *o,
- struct diff_filespec *a,
- struct diff_filespec *b,
- const char *branch1,
- const char *branch2)
+static int merge_3way(mmbuffer_t *result_buf,
+ struct diff_filespec *o,
+ struct diff_filespec *a,
+ struct diff_filespec *b,
+ const char *branch1,
+ const char *branch2)
{
mmfile_t orig, src1, src2;
char *name1, *name2;
int merge_status;
- const char *ll_driver_name;
- const struct ll_merge_driver *driver;
name1 = xstrdup(mkpath("%s:%s", branch1, a->path));
name2 = xstrdup(mkpath("%s:%s", branch2, b->path));
fill_mm(a->sha1, &src1);
fill_mm(b->sha1, &src2);
- ll_driver_name = git_path_check_merge(a->path);
- driver = find_ll_merge_driver(ll_driver_name);
-
- if (index_only && driver->recursive)
- driver = find_ll_merge_driver(driver->recursive);
- merge_status = driver->fn(driver, a->path,
- &orig, &src1, name1, &src2, name2,
- result_buf);
+ merge_status = ll_merge(result_buf, a->path, &orig,
+ &src1, name1, &src2, name2,
+ index_only);
free(name1);
free(name2);
mmbuffer_t result_buf;
int merge_status;
- merge_status = ll_merge(&result_buf, o, a, b,
- branch1, branch2);
+ merge_status = merge_3way(&result_buf, o, a, b,
+ branch1, branch2);
if ((merge_status < 0) || !result_buf.ptr)
die("Failed to execute internal merge");
int count, int base_name)
{
int i;
- int len = prefix ? strlen(prefix) : 0;
const char **result = xmalloc((count + 1) * sizeof(const char *));
memcpy(result, pathspec, count * sizeof(const char *));
result[count] = NULL;
if (last_slash)
result[i] = last_slash + 1;
}
- result[i] = prefix_path(prefix, len, result[i]);
- if (!result[i])
- exit(1); /* error already given */
}
- return result;
+ return get_pathspec(prefix, result);
}
static void show_list(const char *label, struct path_list *list)
}
/* returns a static buffer */
-static const char *get_rev_name(struct object *o)
+static const char *get_rev_name(const struct object *o)
{
static char buffer[1024];
struct rev_name *n;
}
}
+static void show_name(const struct object *obj,
+ const char *caller_name,
+ int always, int allow_undefined, int name_only)
+{
+ const char *name;
+ const unsigned char *sha1 = obj->sha1;
+
+ if (!name_only)
+ printf("%s ", caller_name ? caller_name : sha1_to_hex(sha1));
+ name = get_rev_name(obj);
+ if (name)
+ printf("%s\n", name);
+ else if (allow_undefined)
+ printf("undefined\n");
+ else if (always)
+ printf("%s\n", find_unique_abbrev(sha1, DEFAULT_ABBREV));
+ else
+ die("cannot describe '%s'", sha1_to_hex(sha1));
+}
+
static char const * const name_rev_usage[] = {
"git-name-rev [options] ( --all | --stdin | <commit>... )",
NULL
int cmd_name_rev(int argc, const char **argv, const char *prefix)
{
struct object_array revs = { 0, 0, NULL };
- int all = 0, transform_stdin = 0, allow_undefined = 1;
+ int all = 0, transform_stdin = 0, allow_undefined = 1, always = 0;
struct name_ref_data data = { 0, 0, NULL };
struct option opts[] = {
OPT_BOOLEAN(0, "name-only", &data.name_only, "print only names (no SHA-1)"),
OPT_BOOLEAN(0, "all", &all, "list all commits reachable from all refs"),
OPT_BOOLEAN(0, "stdin", &transform_stdin, "read from stdin"),
OPT_BOOLEAN(0, "undefined", &allow_undefined, "allow to print `undefined` names"),
+ OPT_BOOLEAN(0, "always", &always,
+ "show abbreviated commit object as fallback"),
OPT_END(),
};
int i, max;
max = get_max_object_index();
- for (i = 0; i < max; i++) {
- struct object * obj = get_indexed_object(i);
- const char *name;
- if (!obj)
- continue;
- if (!data.name_only)
- printf("%s ", sha1_to_hex(obj->sha1));
- name = get_rev_name(obj);
- if (name)
- printf("%s\n", name);
- else if (allow_undefined)
- printf("undefined\n");
- else
- die("cannot describe '%s'", sha1_to_hex(obj->sha1));
- }
+ for (i = 0; i < max; i++)
+ show_name(get_indexed_object(i), NULL,
+ always, allow_undefined, data.name_only);
} else {
int i;
- for (i = 0; i < revs.nr; i++) {
- const char *name;
- if (!data.name_only)
- printf("%s ", revs.objects[i].name);
- name = get_rev_name(revs.objects[i].item);
- if (name)
- printf("%s\n", name);
- else if (allow_undefined)
- printf("undefined\n");
- else
- die("cannot describe '%s'", sha1_to_hex(revs.objects[i].item->sha1));
- }
+ for (i = 0; i < revs.nr; i++)
+ show_name(revs.objects[i].item, revs.objects[i].name,
+ always, allow_undefined, data.name_only);
}
return 0;
#include "tree.h"
#include "delta.h"
#include "pack.h"
+#include "pack-revindex.h"
#include "csum-file.h"
#include "tree-walk.h"
#include "diff.h"
#include "revision.h"
#include "list-objects.h"
#include "progress.h"
+#include "refs.h"
#ifdef THREADED_DELTA_SEARCH
#include "thread-utils.h"
[--window=N] [--window-memory=N] [--depth=N] \n\
[--no-reuse-delta] [--no-reuse-object] [--delta-base-offset] \n\
[--threads=N] [--non-empty] [--revs [--unpacked | --all]*] [--reflog] \n\
- [--stdout | base-name] [--keep-unreachable] [<ref-list | <object-list]";
+ [--stdout | base-name] [--include-tag] [--keep-unreachable] \n\
+ [<ref-list | <object-list]";
struct object_entry {
struct pack_idx_entry idx;
static uint32_t nr_objects, nr_alloc, nr_result, nr_written;
static int non_empty;
-static int no_reuse_delta, no_reuse_object, keep_unreachable;
+static int no_reuse_delta, no_reuse_object, keep_unreachable, include_tag;
static int local;
static int incremental;
static int allow_ofs_delta;
static int *object_ix;
static int object_ix_hashsz;
-/*
- * Pack index for existing packs give us easy access to the offsets into
- * corresponding pack file where each object's data starts, but the entries
- * do not store the size of the compressed representation (uncompressed
- * size is easily available by examining the pack entry header). It is
- * also rather expensive to find the sha1 for an object given its offset.
- *
- * We build a hashtable of existing packs (pack_revindex), and keep reverse
- * index here -- pack index file is sorted by object name mapping to offset;
- * this pack_revindex[].revindex array is a list of offset/index_nr pairs
- * ordered by offset, so if you know the offset of an object, next offset
- * is where its packed representation ends and the index_nr can be used to
- * get the object sha1 from the main index.
- */
-struct revindex_entry {
- off_t offset;
- unsigned int nr;
-};
-struct pack_revindex {
- struct packed_git *p;
- struct revindex_entry *revindex;
-};
-static struct pack_revindex *pack_revindex;
-static int pack_revindex_hashsz;
-
/*
* stats
*/
static uint32_t written, written_delta;
static uint32_t reused, reused_delta;
-static int pack_revindex_ix(struct packed_git *p)
-{
- unsigned long ui = (unsigned long)p;
- int i;
-
- ui = ui ^ (ui >> 16); /* defeat structure alignment */
- i = (int)(ui % pack_revindex_hashsz);
- while (pack_revindex[i].p) {
- if (pack_revindex[i].p == p)
- return i;
- if (++i == pack_revindex_hashsz)
- i = 0;
- }
- return -1 - i;
-}
-
-static void prepare_pack_ix(void)
-{
- int num;
- struct packed_git *p;
- for (num = 0, p = packed_git; p; p = p->next)
- num++;
- if (!num)
- return;
- pack_revindex_hashsz = num * 11;
- pack_revindex = xcalloc(sizeof(*pack_revindex), pack_revindex_hashsz);
- for (p = packed_git; p; p = p->next) {
- num = pack_revindex_ix(p);
- num = - 1 - num;
- pack_revindex[num].p = p;
- }
- /* revindex elements are lazily initialized */
-}
-
-static int cmp_offset(const void *a_, const void *b_)
-{
- const struct revindex_entry *a = a_;
- const struct revindex_entry *b = b_;
- return (a->offset < b->offset) ? -1 : (a->offset > b->offset) ? 1 : 0;
-}
-
-/*
- * Ordered list of offsets of objects in the pack.
- */
-static void prepare_pack_revindex(struct pack_revindex *rix)
-{
- struct packed_git *p = rix->p;
- int num_ent = p->num_objects;
- int i;
- const char *index = p->index_data;
-
- rix->revindex = xmalloc(sizeof(*rix->revindex) * (num_ent + 1));
- index += 4 * 256;
-
- if (p->index_version > 1) {
- const uint32_t *off_32 =
- (uint32_t *)(index + 8 + p->num_objects * (20 + 4));
- const uint32_t *off_64 = off_32 + p->num_objects;
- for (i = 0; i < num_ent; i++) {
- uint32_t off = ntohl(*off_32++);
- if (!(off & 0x80000000)) {
- rix->revindex[i].offset = off;
- } else {
- rix->revindex[i].offset =
- ((uint64_t)ntohl(*off_64++)) << 32;
- rix->revindex[i].offset |=
- ntohl(*off_64++);
- }
- rix->revindex[i].nr = i;
- }
- } else {
- for (i = 0; i < num_ent; i++) {
- uint32_t hl = *((uint32_t *)(index + 24 * i));
- rix->revindex[i].offset = ntohl(hl);
- rix->revindex[i].nr = i;
- }
- }
-
- /* This knows the pack format -- the 20-byte trailer
- * follows immediately after the last object data.
- */
- rix->revindex[num_ent].offset = p->pack_size - 20;
- rix->revindex[num_ent].nr = -1;
- qsort(rix->revindex, num_ent, sizeof(*rix->revindex), cmp_offset);
-}
-
-static struct revindex_entry * find_packed_object(struct packed_git *p,
- off_t ofs)
-{
- int num;
- int lo, hi;
- struct pack_revindex *rix;
- struct revindex_entry *revindex;
- num = pack_revindex_ix(p);
- if (num < 0)
- die("internal error: pack revindex uninitialized");
- rix = &pack_revindex[num];
- if (!rix->revindex)
- prepare_pack_revindex(rix);
- revindex = rix->revindex;
- lo = 0;
- hi = p->num_objects + 1;
- do {
- int mi = (lo + hi) / 2;
- if (revindex[mi].offset == ofs) {
- return revindex + mi;
- }
- else if (ofs < revindex[mi].offset)
- hi = mi;
- else
- lo = mi + 1;
- } while (lo < hi);
- die("internal error: pack revindex corrupt");
-}
-
-static const unsigned char *find_packed_object_name(struct packed_git *p,
- off_t ofs)
-{
- struct revindex_entry *entry = find_packed_object(p, ofs);
- return nth_packed_object_sha1(p, entry->nr);
-}
static void *delta_against(void *buf, unsigned long size, struct object_entry *entry)
{
}
hdrlen = encode_header(obj_type, entry->size, header);
offset = entry->in_pack_offset;
- revidx = find_packed_object(p, offset);
+ revidx = find_pack_revindex(p, offset);
datalen = revidx[1].offset - offset;
if (!pack_to_stdout && p->index_version > 1 &&
check_pack_crc(p, &w_curs, offset, datalen, revidx->nr))
die("delta base offset out of bound for %s",
sha1_to_hex(entry->idx.sha1));
ofs = entry->in_pack_offset - ofs;
- if (!no_reuse_delta && !entry->preferred_base)
- base_ref = find_packed_object_name(p, ofs);
+ if (!no_reuse_delta && !entry->preferred_base) {
+ struct revindex_entry *revidx;
+ revidx = find_pack_revindex(p, ofs);
+ base_ref = nth_packed_object_sha1(p, revidx->nr);
+ }
entry->in_pack_header_size = used + used_0;
break;
}
sorted_by_offset[i] = objects + i;
qsort(sorted_by_offset, nr_objects, sizeof(*sorted_by_offset), pack_offset_sort);
- prepare_pack_ix();
+ init_pack_revindex();
+
for (i = 0; i < nr_objects; i++)
check_object(sorted_by_offset[i]);
+
free(sorted_by_offset);
}
#define ll_find_deltas(l, s, w, d, p) find_deltas(l, &s, w, d, p)
#endif
+static int add_ref_tag(const char *path, const unsigned char *sha1, int flag, void *cb_data)
+{
+ unsigned char peeled[20];
+
+ if (!prefixcmp(path, "refs/tags/") && /* is a tag? */
+ !peel_ref(path, peeled) && /* peelable? */
+ !is_null_sha1(peeled) && /* annotated tag? */
+ locate_object_entry(peeled)) /* object packed? */
+ add_object_entry(sha1, OBJ_TAG, NULL, 0);
+ return 0;
+}
+
static void prepare_pack(int window, int depth)
{
struct object_entry **delta_list;
init_revisions(&revs, NULL);
save_commit_buffer = 0;
- track_object_refs = 0;
setup_revisions(ac, av, &revs, NULL);
while (fgets(line, sizeof(line), stdin) != NULL) {
keep_unreachable = 1;
continue;
}
+ if (!strcmp("--include-tag", arg)) {
+ include_tag = 1;
+ continue;
+ }
if (!strcmp("--unpacked", arg) ||
!prefixcmp(arg, "--unpacked=") ||
!strcmp("--reflog", arg) ||
rp_av[rp_ac] = NULL;
get_object_list(rp_ac, rp_av);
}
+ if (include_tag && nr_result)
+ for_each_ref(add_ref_tag, NULL);
stop_progress(&progress_state);
if (non_empty && !nr_result)
memset(&opts, 0, sizeof(opts));
opts.head_idx = -1;
+ opts.src_index = &the_index;
+ opts.dst_index = &the_index;
git_config(git_default_config);
if ((opts.dir && !opts.update))
die("--exclude-per-directory is meaningless unless -u");
- if (opts.prefix) {
- int pfxlen = strlen(opts.prefix);
- int pos;
- if (opts.prefix[pfxlen-1] != '/')
- die("prefix must end with /");
- if (stage != 2)
- die("binding merge takes only one tree");
- pos = cache_name_pos(opts.prefix, pfxlen);
- if (0 <= pos)
- die("corrupt index file");
- pos = -pos-1;
- if (pos < active_nr &&
- !strncmp(active_cache[pos]->name, opts.prefix, pfxlen))
- die("subdirectory '%s' already exists.", opts.prefix);
- pos = cache_name_pos(opts.prefix, pfxlen-1);
- if (0 <= pos)
- die("file '%.*s' already exists.",
- pfxlen-1, opts.prefix);
- opts.pos = -1 - pos;
- }
-
if (opts.merge) {
if (stage < 2)
die("just how do you expect me to merge %d trees?", stage-1);
static const char reflog_expire_usage[] =
"git-reflog (show|expire) [--verbose] [--dry-run] [--stale-fix] [--expire=<time>] [--expire-unreachable=<time>] [--all] <refs>...";
+static const char reflog_delete_usage[] =
+"git-reflog delete [--verbose] [--dry-run] [--rewrite] [--updateref] <refs>...";
static unsigned long default_reflog_expire;
static unsigned long default_reflog_expire_unreachable;
struct rev_info revs;
int dry_run;
int stalefix;
+ int rewrite;
+ int updateref;
int verbose;
unsigned long expire_total;
unsigned long expire_unreachable;
+ int recno;
};
struct expire_reflog_cb {
const char *ref;
struct commit *ref_commit;
struct cmd_reflog_expire_cb *cmd;
+ unsigned char last_kept_sha1[20];
};
struct collected_reflog {
if (timestamp < cb->cmd->expire_total)
goto prune;
+ if (cb->cmd->rewrite)
+ osha1 = cb->last_kept_sha1;
+
old = new = NULL;
if (cb->cmd->stalefix &&
(!keep_entry(&old, osha1) || !keep_entry(&new, nsha1)))
goto prune;
}
+ if (cb->cmd->recno && --(cb->cmd->recno) == 0)
+ goto prune;
+
if (cb->newlog) {
char sign = (tz < 0) ? '-' : '+';
int zone = (tz < 0) ? (-tz) : tz;
sha1_to_hex(osha1), sha1_to_hex(nsha1),
email, timestamp, sign, zone,
message);
+ hashcpy(cb->last_kept_sha1, nsha1);
}
if (cb->cmd->verbose)
printf("keep %s", message);
status |= error("%s: %s", strerror(errno),
newlog_path);
unlink(newlog_path);
+ } else if (cmd->updateref &&
+ (write_in_full(lock->lock_fd,
+ sha1_to_hex(cb.last_kept_sha1), 40) != 40 ||
+ write_in_full(lock->lock_fd, "\n", 1) != 1 ||
+ close_ref(lock) < 0)) {
+ status |= error("Couldn't write %s",
+ lock->lk->filename);
+ unlink(newlog_path);
} else if (rename(newlog_path, log_file)) {
status |= error("cannot rename %s to %s",
newlog_path, log_file);
unlink(newlog_path);
+ } else if (cmd->updateref && commit_ref(lock)) {
+ status |= error("Couldn't set %s", lock->ref_name);
}
}
free(newlog_path);
cb.expire_unreachable = approxidate(arg + 21);
else if (!strcmp(arg, "--stale-fix"))
cb.stalefix = 1;
+ else if (!strcmp(arg, "--rewrite"))
+ cb.rewrite = 1;
+ else if (!strcmp(arg, "--updateref"))
+ cb.updateref = 1;
else if (!strcmp(arg, "--all"))
do_all = 1;
else if (!strcmp(arg, "--verbose"))
return status;
}
+static int count_reflog_ent(unsigned char *osha1, unsigned char *nsha1,
+ const char *email, unsigned long timestamp, int tz,
+ const char *message, void *cb_data)
+{
+ struct cmd_reflog_expire_cb *cb = cb_data;
+ if (!cb->expire_total || timestamp < cb->expire_total)
+ cb->recno++;
+ return 0;
+}
+
+static int cmd_reflog_delete(int argc, const char **argv, const char *prefix)
+{
+ struct cmd_reflog_expire_cb cb;
+ int i, status = 0;
+
+ memset(&cb, 0, sizeof(cb));
+
+ for (i = 1; i < argc; i++) {
+ const char *arg = argv[i];
+ if (!strcmp(arg, "--dry-run") || !strcmp(arg, "-n"))
+ cb.dry_run = 1;
+ else if (!strcmp(arg, "--rewrite"))
+ cb.rewrite = 1;
+ else if (!strcmp(arg, "--updateref"))
+ cb.updateref = 1;
+ else if (!strcmp(arg, "--verbose"))
+ cb.verbose = 1;
+ else if (!strcmp(arg, "--")) {
+ i++;
+ break;
+ }
+ else if (arg[0] == '-')
+ usage(reflog_delete_usage);
+ else
+ break;
+ }
+
+ if (argc - i < 1)
+ return error("Nothing to delete?");
+
+ for ( ; i < argc; i++) {
+ const char *spec = strstr(argv[i], "@{");
+ unsigned char sha1[20];
+ char *ep, *ref;
+ int recno;
+
+ if (!spec) {
+ status |= error("Not a reflog: %s", argv[i]);
+ continue;
+ }
+
+ if (!dwim_ref(argv[i], spec - argv[i], sha1, &ref)) {
+ status |= error("%s points nowhere!", argv[i]);
+ continue;
+ }
+
+ recno = strtoul(spec + 2, &ep, 10);
+ if (*ep == '}') {
+ cb.recno = -recno;
+ for_each_reflog_ent(ref, count_reflog_ent, &cb);
+ } else {
+ cb.expire_total = approxidate(spec + 2);
+ for_each_reflog_ent(ref, count_reflog_ent, &cb);
+ cb.expire_total = 0;
+ }
+
+ status |= expire_reflog(ref, sha1, 0, &cb);
+ free(ref);
+ }
+ return status;
+}
+
/*
* main "reflog"
*/
if (!strcmp(argv[1], "expire"))
return cmd_reflog_expire(argc - 1, argv + 1, prefix);
+ if (!strcmp(argv[1], "delete"))
+ return cmd_reflog_delete(argc - 1, argv + 1, prefix);
+
/* Not a recognized reflog command..*/
usage(reflog_usage);
}
#include "diffcore.h"
#include "tree.h"
#include "branch.h"
+#include "parse-options.h"
-static const char builtin_reset_usage[] =
-"git-reset [--mixed | --soft | --hard] [-q] [<commit-ish>] [ [--] <paths>...]";
+static const char * const git_reset_usage[] = {
+ "git-reset [--mixed | --soft | --hard] [-q] [<commit>]",
+ "git-reset [--mixed] <commit> [--] <paths>...",
+ NULL
+};
static char *args_to_str(const char **argv)
{
static void print_new_head_line(struct commit *commit)
{
- const char *hex, *dots = "...", *body;
+ const char *hex, *body;
hex = find_unique_abbrev(commit->object.sha1, DEFAULT_ABBREV);
- if (!hex) {
- hex = sha1_to_hex(commit->object.sha1);
- dots = "";
- }
- printf("HEAD is now at %s%s", hex, dots);
+ printf("HEAD is now at %s", hex);
body = strstr(commit->buffer, "\n\n");
if (body) {
const char *eol;
int cmd_reset(int argc, const char **argv, const char *prefix)
{
- int i = 1, reset_type = NONE, update_ref_status = 0, quiet = 0;
+ int i = 0, reset_type = NONE, update_ref_status = 0, quiet = 0;
const char *rev = "HEAD";
unsigned char sha1[20], *orig = NULL, sha1_orig[20],
*old_orig = NULL, sha1_old_orig[20];
struct commit *commit;
char *reflog_action, msg[1024];
+ const struct option options[] = {
+ OPT_SET_INT(0, "mixed", &reset_type,
+ "reset HEAD and index", MIXED),
+ OPT_SET_INT(0, "soft", &reset_type, "reset only HEAD", SOFT),
+ OPT_SET_INT(0, "hard", &reset_type,
+ "reset HEAD, index and working tree", HARD),
+ OPT_BOOLEAN('q', NULL, &quiet,
+ "disable showing new HEAD in hard reset"),
+ OPT_END()
+ };
git_config(git_default_config);
+ argc = parse_options(argc, argv, options, git_reset_usage,
+ PARSE_OPT_KEEP_DASHDASH);
reflog_action = args_to_str(argv);
setenv("GIT_REFLOG_ACTION", reflog_action, 0);
- while (i < argc) {
- if (!strcmp(argv[i], "--mixed")) {
- reset_type = MIXED;
- i++;
- }
- else if (!strcmp(argv[i], "--soft")) {
- reset_type = SOFT;
- i++;
- }
- else if (!strcmp(argv[i], "--hard")) {
- reset_type = HARD;
- i++;
- }
- else if (!strcmp(argv[i], "-q")) {
- quiet = 1;
- i++;
- }
- else
- break;
- }
-
- if (i < argc && argv[i][0] != '-')
+ if (i < argc && strcmp(argv[i], "--"))
rev = argv[i++];
if (get_sha1(rev, sha1))
if (i < argc && !strcmp(argv[i], "--"))
i++;
- else if (i < argc && argv[i][0] == '-')
- usage(builtin_reset_usage);
/* git reset tree [--] paths... can be used to
* load chosen paths from the tree into the index without
" --no-merges\n"
" --remove-empty\n"
" --all\n"
+" --branches\n"
+" --tags\n"
+" --remotes\n"
" --stdin\n"
" --quiet\n"
" ordering output:\n"
usage(rev_list_usage);
save_commit_buffer = revs.verbose_header || revs.grep_filter;
- track_object_refs = 0;
if (bisect_list)
revs.limited = 1;
o->type = OPTION_CALLBACK;
o->help = xstrdup(skipspaces(s));
o->value = &parsed;
+ o->flags = PARSE_OPT_NOARG;
o->callback = &parseopt_dump;
- switch (s[-1]) {
- case '=':
- s--;
- break;
- case '?':
- o->flags = PARSE_OPT_OPTARG;
- s--;
- break;
- default:
- o->flags = PARSE_OPT_NOARG;
- break;
+ while (s > sb.buf && strchr("*=?!", s[-1])) {
+ switch (*--s) {
+ case '=':
+ o->flags &= ~PARSE_OPT_NOARG;
+ break;
+ case '?':
+ o->flags &= ~PARSE_OPT_NOARG;
+ o->flags |= PARSE_OPT_OPTARG;
+ break;
+ case '!':
+ o->flags |= PARSE_OPT_NONEG;
+ break;
+ case '*':
+ o->flags |= PARSE_OPT_HIDDEN;
+ break;
+ }
}
if (s - sb.buf == 1) /* short option only */
#include "utf8.h"
#include "parse-options.h"
#include "cache-tree.h"
+#include "diff.h"
+#include "revision.h"
/*
* This implements the builtins revert and cherry-pick.
return helpbuf;
}
+static int index_is_dirty(void)
+{
+ struct rev_info rev;
+ init_revisions(&rev, NULL);
+ setup_revisions(0, NULL, &rev, "HEAD");
+ DIFF_OPT_SET(&rev.diffopt, QUIET);
+ DIFF_OPT_SET(&rev.diffopt, EXIT_WITH_STATUS);
+ run_diff_index(&rev, 1);
+ return !!DIFF_OPT_TST(&rev.diffopt, HAS_CHANGES);
+}
+
static int revert_or_cherry_pick(int argc, const char **argv)
{
unsigned char head[20];
if (write_cache_as_tree(head, 0, NULL))
die ("Your index file is unmerged.");
} else {
- struct wt_status s;
-
if (get_sha1("HEAD", head))
die ("You do not have a valid HEAD");
- wt_status_prepare(&s);
- if (s.commitable)
+ if (read_cache() < 0)
+ die("could not read the index");
+ if (index_is_dirty())
die ("Dirty index: cannot %s", me);
discard_cache();
}
static const char *status_abbrev(unsigned char sha1[20])
{
- const char *abbrev;
- abbrev = find_unique_abbrev(sha1, DEFAULT_ABBREV);
- return abbrev ? abbrev : sha1_to_hex(sha1);
+ return find_unique_abbrev(sha1, DEFAULT_ABBREV);
}
static void print_ok_ref_status(struct ref *ref)
else
free(buffer);
+ /* Skip any leading whitespace, including any blank lines. */
+ while (*oneline && isspace(*oneline))
+ oneline++;
eol = strchr(oneline, '\n');
if (!eol)
eol = oneline + strlen(oneline);
- while (*oneline && isspace(*oneline) && *oneline != '\n')
- oneline++;
if (!prefixcmp(oneline, "[PATCH")) {
char *eob = strchr(oneline, ']');
if (eob && (!eol || eob < eol))
size_t len = strlen(editor);
int i = 0;
const char *args[6];
+ struct strbuf arg0;
+ strbuf_init(&arg0, 0);
if (strcspn(editor, "$ \t'") != len) {
/* there are specials */
+ strbuf_addf(&arg0, "%s \"$@\"", editor);
args[i++] = "sh";
args[i++] = "-c";
- args[i++] = "$0 \"$@\"";
+ args[i++] = arg0.buf;
}
args[i++] = editor;
args[i++] = path;
if (run_command_v_opt_cd_env(args, 0, NULL, env))
die("There was a problem with the editor %s.", editor);
+ strbuf_release(&arg0);
}
if (!buffer)
#include "tag.h"
#include "tree.h"
#include "progress.h"
+#include "decorate.h"
static int dry_run, quiet, recover, has_errors;
static const char unpack_usage[] = "git-unpack-objects [-n] [-q] [-r] < pack-file";
static off_t consumed_bytes;
static SHA_CTX ctx;
+struct obj_buffer {
+ char *buffer;
+ unsigned long size;
+};
+
+static struct decoration obj_decorate;
+
+static struct obj_buffer *lookup_object_buffer(struct object *base)
+{
+ return lookup_decoration(&obj_decorate, base);
+}
+
/*
* Make sure at least "min" bytes are available in the buffer, and
* return the pointer to the buffer.
void *delta_data, *base;
unsigned long base_size;
unsigned char base_sha1[20];
+ struct object *obj;
if (type == OBJ_REF_DELTA) {
hashcpy(base_sha1, fill(20));
}
}
+ obj = lookup_object(base_sha1);
+ if (obj) {
+ struct obj_buffer *obj_buf = lookup_object_buffer(obj);
+ if (obj_buf) {
+ resolve_delta(nr, obj->type, obj_buf->buffer, obj_buf->size, delta_data, delta_size);
+ return;
+ }
+ }
+
base = read_sha1_file(base_sha1, &type, &base_size);
if (!base) {
error("failed to read delta-pack base object %s",
if (!pack)
return error("packfile %s not found.", arg);
+ install_packed_git(pack);
err = verify_pack(pack, verbose);
- free(pack);
return err;
}
/* 5 for future expansion */
OBJ_OFS_DELTA = 6,
OBJ_REF_DELTA = 7,
+ OBJ_ANY,
OBJ_MAX,
};
/* Initialize and use the cache information */
extern int read_index(struct index_state *);
extern int read_index_from(struct index_state *, const char *path);
-extern int write_index(struct index_state *, int newfd);
+extern int write_index(const struct index_state *, int newfd);
extern int discard_index(struct index_state *);
-extern int unmerged_index(struct index_state *);
+extern int unmerged_index(const struct index_state *);
extern int verify_path(const char *path);
extern int index_name_exists(struct index_state *istate, const char *name, int namelen);
-extern int index_name_pos(struct index_state *, const char *name, int namelen);
+extern int index_name_pos(const struct index_state *, const char *name, int namelen);
#define ADD_CACHE_OK_TO_ADD 1 /* Ok to add */
#define ADD_CACHE_OK_TO_REPLACE 2 /* Ok to replace file/directory */
#define ADD_CACHE_SKIP_DFCHECK 4 /* Ok to skip DF conflict checks */
#define CE_MATCH_IGNORE_VALID 01
/* do not check the contents but report dirty on racily-clean entries */
#define CE_MATCH_RACY_IS_DIRTY 02
-extern int ie_match_stat(struct index_state *, struct cache_entry *, struct stat *, unsigned int);
-extern int ie_modified(struct index_state *, struct cache_entry *, struct stat *, unsigned int);
+extern int ie_match_stat(const struct index_state *, struct cache_entry *, struct stat *, unsigned int);
+extern int ie_modified(const struct index_state *, struct cache_entry *, struct stat *, unsigned int);
extern int ce_path_match(const struct cache_entry *ce, const char **pathspec);
extern int index_fd(unsigned char *sha1, int fd, struct stat *st, int write_object, enum object_type type, const char *path);
extern int validate_headref(const char *ref);
extern int base_name_compare(const char *name1, int len1, int mode1, const char *name2, int len2, int mode2);
+extern int df_name_compare(const char *name1, int len1, int mode1, const char *name2, int len2, int mode2);
extern int cache_name_compare(const char *name1, int len1, const char *name2, int len2);
extern void *read_object_with_reference(const unsigned char *sha1,
unsigned long *size,
unsigned char *sha1_ret);
+extern struct object *peel_to_type(const char *name, int namelen,
+ struct object *o, enum object_type);
+
enum date_mode {
DATE_NORMAL = 0,
DATE_RELATIVE,
commit_graft_prepared = 1;
}
-static struct commit_graft *lookup_commit_graft(const unsigned char *sha1)
+struct commit_graft *lookup_commit_graft(const unsigned char *sha1)
{
int pos;
prepare_commit_graft();
}
item->date = parse_commit_date(bufptr, tail);
- if (track_object_refs) {
- unsigned i = 0;
- struct commit_list *p;
- struct object_refs *refs = alloc_object_refs(n_refs);
- if (item->tree)
- refs->ref[i++] = &item->tree->object;
- for (p = item->parents; p; p = p->next)
- refs->ref[i++] = &p->item->object;
- set_object_refs(&item->object, refs);
- }
-
return 0;
}
struct commit_graft *read_graft_line(char *buf, int len);
int register_commit_graft(struct commit_graft *, int);
int read_graft_file(const char *graft_file);
+struct commit_graft *lookup_commit_graft(const unsigned char *sha1);
extern struct commit_list *get_merge_bases(struct commit *rev1, struct commit *rev2, int cleanup);
--- /dev/null
+#include "../git-compat-util.h"
+
+#undef vsnprintf
+int git_vsnprintf(char *str, size_t maxsize, const char *format, va_list ap)
+{
+ char *s;
+ int ret;
+
+ ret = vsnprintf(str, maxsize, format, ap);
+ if (ret != -1)
+ return ret;
+
+ s = NULL;
+ if (maxsize < 128)
+ maxsize = 128;
+
+ while (ret == -1) {
+ maxsize *= 4;
+ str = realloc(s, maxsize);
+ if (! str)
+ break;
+ s = str;
+ ret = vsnprintf(str, maxsize, format, ap);
+ }
+ free(s);
+ return ret;
+}
+
+int git_snprintf(char *str, size_t maxsize, const char *format, ...)
+{
+ va_list ap;
+ int ret;
+
+ va_start(ap, format);
+ ret = git_vsnprintf(str, maxsize, format, ap);
+ va_end(ap);
+
+ return ret;
+}
+
NO_ICONV=@NO_ICONV@
OLD_ICONV=@OLD_ICONV@
NO_DEFLATE_BOUND=@NO_DEFLATE_BOUND@
+SNPRINTF_RETURNS_BOGUS=@SNPRINTF_RETURNS_BOGUS@
NO_C99_FORMAT=
fi
AC_SUBST(NO_C99_FORMAT)
+#
+# Define SNPRINTF_RETURNS_BOGUS if your are on a system which snprintf()
+# or vsnprintf() return -1 instead of number of characters which would
+# have been written to the final string if enough space had been available.
+AC_CACHE_CHECK([whether snprintf() and/or vsnprintf() return bogus value],
+ [ac_cv_snprintf_returns_bogus],
+[
+AC_RUN_IFELSE(
+ [AC_LANG_PROGRAM([AC_INCLUDES_DEFAULT
+ #include "stdarg.h"
+
+ int test_vsnprintf(char *str, size_t maxsize, const char *format, ...)
+ {
+ int ret;
+ va_list ap;
+ va_start(ap, format);
+ ret = vsnprintf(str, maxsize, format, ap);
+ va_end(ap);
+ return ret;
+ }],
+ [[char buf[6];
+ if (test_vsnprintf(buf, 3, "%s", "12345") != 5
+ || strcmp(buf, "12")) return 1;
+ if (snprintf(buf, 3, "%s", "12345") != 5
+ || strcmp(buf, "12")) return 1]])],
+ [ac_cv_snprintf_returns_bogus=no],
+ [ac_cv_snprintf_returns_bogus=yes])
+])
+if test $ac_cv_snprintf_returns_bogus = yes; then
+ SNPRINTF_RETURNS_BOGUS=UnfortunatelyYes
+else
+ SNPRINTF_RETURNS_BOGUS=
+fi
+AC_SUBST(SNPRINTF_RETURNS_BOGUS)
## Checks for library functions.
local b
if [ -d "$g/../.dotest" ]
then
- r="|AM/REBASE"
+ if test -f "$g/../.dotest/rebasing"
+ then
+ r="|REBASE"
+ elif test -f "$g/../.dotest/applying"
+ then
+ r="|AM"
+ else
+ r="|AM/REBASE"
+ fi
b="$(git symbolic-ref HEAD 2>/dev/null)"
elif [ -f "$g/.dotest-merge/interactive" ]
then
r="|REBASE-i"
- b="$(cat $g/.dotest-merge/head-name)"
+ b="$(cat "$g/.dotest-merge/head-name")"
elif [ -d "$g/.dotest-merge" ]
then
r="|REBASE-m"
- b="$(cat $g/.dotest-merge/head-name)"
+ b="$(cat "$g/.dotest-merge/head-name")"
elif [ -f "$g/MERGE_HEAD" ]
then
r="|MERGING"
b="$(git symbolic-ref HEAD 2>/dev/null)"
else
- if [ -f $g/BISECT_LOG ]
+ if [ -f "$g/BISECT_LOG" ]
then
r="|BISECTING"
fi
then
if ! b="$(git describe --exact-match HEAD 2>/dev/null)"
then
- b="$(cut -c1-7 $g/HEAD)..."
+ b="$(cut -c1-7 "$g/HEAD")..."
fi
fi
fi
if [ $# -gt 2 ]; then
cur="$3"
fi
- for c in $1; do
- case "$c$4" in
- --*=*) all="$all$c$4$s" ;;
- *.) all="$all$c$4$s" ;;
- *) all="$all$c$4 $s" ;;
- esac
- done
+ case "$cur" in
+ --*=)
+ COMPREPLY=()
+ return
+ ;;
+ *)
+ for c in $1; do
+ case "$c$4" in
+ --*=*) all="$all$c$4$s" ;;
+ *.) all="$all$c$4$s" ;;
+ *) all="$all$c$4 $s" ;;
+ esac
+ done
+ ;;
+ esac
IFS=$s
COMPREPLY=($(compgen -P "$2" -W "$all" -- "$cur"))
return
show-index) : plumbing;;
ssh-*) : transport;;
stripspace) : plumbing;;
- svn) : import export;;
symbolic-ref) : plumbing;;
tar-tree) : deprecated;;
unpack-file) : plumbing;;
done
}
+__git_find_subcommand ()
+{
+ local word subcommand c=1
+
+ while [ $c -lt $COMP_CWORD ]; do
+ word="${COMP_WORDS[c]}"
+ for subcommand in $1; do
+ if [ "$subcommand" = "$word" ]; then
+ echo "$subcommand"
+ return
+ fi
+ done
+ c=$((++c))
+ done
+}
+
__git_whitespacelist="nowarn warn error error-all strip"
_git_am ()
_git_bisect ()
{
- local i c=1 command
- while [ $c -lt $COMP_CWORD ]; do
- i="${COMP_WORDS[c]}"
- case "$i" in
- start|bad|good|reset|visualize|replay|log)
- command="$i"
- break
- ;;
- esac
- c=$((++c))
- done
-
- if [ $c -eq $COMP_CWORD -a -z "$command" ]; then
- __gitcomp "start bad good reset visualize replay log"
+ local subcommands="start bad good reset visualize replay log"
+ local subcommand="$(__git_find_subcommand "$subcommands")"
+ if [ -z "$subcommand" ]; then
+ __gitcomp "$subcommands"
return
fi
- case "$command" in
+ case "$subcommand" in
bad|good|reset)
__gitcomp "$(__git_refs)"
;;
_git_branch ()
{
- __gitcomp "$(__git_refs)"
+ local i c=1 only_local_ref="n" has_r="n"
+
+ while [ $c -lt $COMP_CWORD ]; do
+ i="${COMP_WORDS[c]}"
+ case "$i" in
+ -d|-m) only_local_ref="y" ;;
+ -r) has_r="y" ;;
+ esac
+ c=$((++c))
+ done
+
+ case "${COMP_WORDS[COMP_CWORD]}" in
+ --*=*) COMPREPLY=() ;;
+ --*)
+ __gitcomp "
+ --color --no-color --verbose --abbrev= --no-abbrev
+ --track --no-track
+ "
+ ;;
+ *)
+ if [ $only_local_ref = "y" -a $has_r = "n" ]; then
+ __gitcomp "$(__git_heads)"
+ else
+ __gitcomp "$(__git_refs)"
+ fi
+ ;;
+ esac
}
_git_bundle ()
--in-reply-to=
--full-index --binary
--not --all
+ --cover-letter
"
return
;;
_git_rebase ()
{
- local cur="${COMP_WORDS[COMP_CWORD]}"
- if [ -d .dotest ] || [ -d .git/.dotest-merge ]; then
+ local cur="${COMP_WORDS[COMP_CWORD]}" dir="$(__gitdir)"
+ if [ -d .dotest ] || [ -d "$dir"/.dotest-merge ]; then
__gitcomp "--continue --skip --abort"
return
fi
core.sharedRepository
core.warnAmbiguousRefs
core.compression
- core.legacyHeaders
core.packedGitWindowSize
core.packedGitLimit
clean.requireForce
_git_remote ()
{
- local i c=1 command
- while [ $c -lt $COMP_CWORD ]; do
- i="${COMP_WORDS[c]}"
- case "$i" in
- add|rm|show|prune|update) command="$i"; break ;;
- esac
- c=$((++c))
- done
-
- if [ $c -eq $COMP_CWORD -a -z "$command" ]; then
- __gitcomp "add rm show prune update"
+ local subcommands="add rm show prune update"
+ local subcommand="$(__git_find_subcommand "$subcommands")"
+ if [ -z "$subcommand" ]; then
return
fi
- case "$command" in
+ case "$subcommand" in
rm|show|prune)
__gitcomp "$(__git_remotes)"
;;
_git_stash ()
{
- __gitcomp 'list show apply clear'
+ local subcommands='save list show apply clear drop pop create'
+ if [ -z "$(__git_find_subcommand "$subcommands")" ]; then
+ __gitcomp "$subcommands"
+ fi
}
_git_submodule ()
{
- local i c=1 command
- while [ $c -lt $COMP_CWORD ]; do
- i="${COMP_WORDS[c]}"
- case "$i" in
- add|status|init|update) command="$i"; break ;;
- esac
- c=$((++c))
- done
-
- if [ $c -eq $COMP_CWORD -a -z "$command" ]; then
+ local subcommands="add status init update"
+ if [ -z "$(__git_find_subcommand "$subcommands")" ]; then
local cur="${COMP_WORDS[COMP_CWORD]}"
case "$cur" in
--*)
__gitcomp "--quiet --cached"
;;
*)
- __gitcomp "add status init update"
+ __gitcomp "$subcommands"
;;
esac
return
fi
}
+_git_svn ()
+{
+ local subcommands="
+ init fetch clone rebase dcommit log find-rev
+ set-tree commit-diff info create-ignore propget
+ proplist show-ignore show-externals
+ "
+ local subcommand="$(__git_find_subcommand "$subcommands")"
+ if [ -z "$subcommand" ]; then
+ __gitcomp "$subcommands"
+ else
+ local remote_opts="--username= --config-dir= --no-auth-cache"
+ local fc_opts="
+ --follow-parent --authors-file= --repack=
+ --no-metadata --use-svm-props --use-svnsync-props
+ --log-window-size= --no-checkout --quiet
+ --repack-flags --user-log-author $remote_opts
+ "
+ local init_opts="
+ --template= --shared= --trunk= --tags=
+ --branches= --stdlayout --minimize-url
+ --no-metadata --use-svm-props --use-svnsync-props
+ --rewrite-root= $remote_opts
+ "
+ local cmt_opts="
+ --edit --rmdir --find-copies-harder --copy-similarity=
+ "
+
+ local cur="${COMP_WORDS[COMP_CWORD]}"
+ case "$subcommand,$cur" in
+ fetch,--*)
+ __gitcomp "--revision= --fetch-all $fc_opts"
+ ;;
+ clone,--*)
+ __gitcomp "--revision= $fc_opts $init_opts"
+ ;;
+ init,--*)
+ __gitcomp "$init_opts"
+ ;;
+ dcommit,--*)
+ __gitcomp "
+ --merge --strategy= --verbose --dry-run
+ --fetch-all --no-rebase $cmt_opts $fc_opts
+ "
+ ;;
+ set-tree,--*)
+ __gitcomp "--stdin $cmt_opts $fc_opts"
+ ;;
+ create-ignore,--*|propget,--*|proplist,--*|show-ignore,--*|\
+ show-externals,--*)
+ __gitcomp "--revision="
+ ;;
+ log,--*)
+ __gitcomp "
+ --limit= --revision= --verbose --incremental
+ --oneline --show-commit --non-recursive
+ --authors-file=
+ "
+ ;;
+ rebase,--*)
+ __gitcomp "
+ --merge --verbose --strategy= --local
+ --fetch-all $fc_opts
+ "
+ ;;
+ commit-diff,--*)
+ __gitcomp "--message= --file= --revision= $cmt_opts"
+ ;;
+ info,--*)
+ __gitcomp "--url"
+ ;;
+ *)
+ COMPREPLY=()
+ ;;
+ esac
+ fi
+}
+
_git_tag ()
{
local i c=1 f=0
c=$((++c))
done
- if [ $c -eq $COMP_CWORD -a -z "$command" ]; then
+ if [ -z "$command" ]; then
case "${COMP_WORDS[COMP_CWORD]}" in
--*=*) COMPREPLY=() ;;
--*) __gitcomp "
+ --paginate
--no-pager
--git-dir=
--bare
--version
--exec-path
+ --work-tree=
+ --help
"
;;
*) __gitcomp "$(__git_commands) $(__git_aliases)" ;;
show-branch) _git_log ;;
stash) _git_stash ;;
submodule) _git_submodule ;;
+ svn) _git_svn ;;
tag) _git_tag ;;
whatchanged) _git_log ;;
*) COMPREPLY=() ;;
complete -o default -o nospace -F _git_show git-show
complete -o default -o nospace -F _git_stash git-stash
complete -o default -o nospace -F _git_submodule git-submodule
+complete -o default -o nospace -F _git_svn git-svn
complete -o default -o nospace -F _git_log git-show-branch
complete -o default -o nospace -F _git_tag git-tag
complete -o default -o nospace -F _git_log git-whatchanged
(let (author-name author-email subject date msg)
(with-temp-buffer
(let ((coding-system (git-get-logoutput-coding-system)))
- (git-call-process-env t nil "log" "-1" commit)
+ (git-call-process-env t nil "log" "-1" "--pretty=medium" commit)
(goto-char (point-min))
(when (re-search-forward "^Author: *\\(.*\\) <\\(.*\\)>$" nil t)
(setq author-name (match-string 1))
(with-current-buffer buffer
(when (and list-buffers-directory
(string-equal fulldir (expand-file-name list-buffers-directory))
- (string-match "\\*git-status\\*$" (buffer-name buffer)))
+ (eq major-mode 'git-status-mode))
(setq found buffer))))
(setq list (cdr list)))
found))
sub add_remote_config {
my ($hash, $name, $what, $value) = @_;
if ($what eq 'url') {
- if (exists $hash->{$name}{'URL'}) {
- print STDERR "Warning: more than one remote.$name.url\n";
+ # Having more than one is Ok -- it is used for push.
+ if (! exists $hash->{'URL'}) {
+ $hash->{$name}{'URL'} = $value;
}
- $hash->{$name}{'URL'} = $value;
}
elsif ($what eq 'fetch') {
$hash->{$name}{'FETCH'} ||= [];
## Should move this out, doesn't use SELF.
def readP4Files(self, files):
+ filesForCommit = []
+ filesToRead = []
+
for f in files:
+ includeFile = True
for val in self.clientSpecDirs:
if f['path'].startswith(val[0]):
- if val[1] > 0:
- f['include'] = True
- else:
- f['include'] = False
+ if val[1] <= 0:
+ includeFile = False
break
- files = [f for f in files
- if f['action'] != 'delete' and
- (f.has_key('include') == False or f['include'] == True)]
+ if includeFile:
+ filesForCommit.append(f)
+ if f['action'] != 'delete':
+ filesToRead.append(f)
- if not files:
- return []
+ filedata = []
+ if len(filesToRead) > 0:
+ filedata = p4CmdList('-x - print',
+ stdin='\n'.join(['%s#%s' % (f['path'], f['rev'])
+ for f in filesToRead]),
+ stdin_mode='w+')
- filedata = p4CmdList('-x - print',
- stdin='\n'.join(['%s#%s' % (f['path'], f['rev'])
- for f in files]),
- stdin_mode='w+')
- if "p4ExitCode" in filedata[0]:
- die("Problems executing p4. Error: [%d]."
- % (filedata[0]['p4ExitCode']));
+ if "p4ExitCode" in filedata[0]:
+ die("Problems executing p4. Error: [%d]."
+ % (filedata[0]['p4ExitCode']));
j = 0;
contents = {}
contents[stat['depotFile']] = text
- for f in files:
- assert not f.has_key('data')
- f['data'] = contents[f['path']]
- return files
+ for f in filesForCommit:
+ path = f['path']
+ if contents.has_key(path):
+ f['data'] = contents[path]
+
+ return filesForCommit
def commit(self, details, files, branch, branchPrefixes, parent = ""):
epoch = details["time"]
echo ""
if [ "$newrev_type" = "commit" ]; then
echo $LOGBEGIN
- git show --no-color --root -s $newrev
+ git show --no-color --root -s --pretty=medium $newrev
echo $LOGEND
else
# What can we do here? The tag marks an object that is not
my @depths;
while (<STDIN>) {
- my ($sha1, $type, $size, $offset, $depth, $parent) = split(/\s+/, $_);
+ my ($sha1, $type, $size, $space, $offset, $depth, $parent) = split(/\s+/, $_);
next unless ($sha1 =~ /^[0-9a-f]{40}$/);
$depths{$sha1} = $depth || 0;
push(@depths, $depth || 0);
usage(daemon_usage);
}
+ if (log_syslog) {
+ openlog("git-daemon", 0, LOG_DAEMON);
+ set_die_routine(daemon_die);
+ }
+
if (inetd_mode && (group_name || user_name))
die("--user and --group are incompatible with --inetd");
}
}
- if (log_syslog) {
- openlog("git-daemon", 0, LOG_DAEMON);
- set_die_routine(daemon_die);
- }
-
if (strict_paths && (!ok_paths || !*ok_paths))
die("option --strict-paths requires a whitelist");
+ if (base_path) {
+ struct stat st;
+
+ if (stat(base_path, &st) || !S_ISDIR(st.st_mode))
+ die("base-path '%s' does not exist or "
+ "is not a directory", base_path);
+ }
+
if (inetd_mode) {
struct sockaddr_storage ss;
struct sockaddr *peer = (struct sockaddr *)&ss;
static void diff_index_show_file(struct rev_info *revs,
const char *prefix,
struct cache_entry *ce,
- unsigned char *sha1, unsigned int mode)
+ const unsigned char *sha1, unsigned int mode)
{
diff_addremove(&revs->diffopt, prefix[0], mode,
sha1, ce->name, NULL);
}
static int get_stat_data(struct cache_entry *ce,
- unsigned char **sha1p,
+ const unsigned char **sha1p,
unsigned int *modep,
int cached, int match_missing)
{
- unsigned char *sha1 = ce->sha1;
+ const unsigned char *sha1 = ce->sha1;
unsigned int mode = ce->ce_mode;
if (!cached) {
- static unsigned char no_sha1[20];
int changed;
struct stat st;
if (lstat(ce->name, &st) < 0) {
changed = ce_match_stat(ce, &st, 0);
if (changed) {
mode = ce_mode_from_stat(ce, st.st_mode);
- sha1 = no_sha1;
+ sha1 = null_sha1;
}
}
struct cache_entry *new,
int cached, int match_missing)
{
- unsigned char *sha1;
+ const unsigned char *sha1;
unsigned int mode;
/* New file in the index: it might actually be different in
int cached, int match_missing)
{
unsigned int mode, oldmode;
- unsigned char *sha1;
+ const unsigned char *sha1;
if (get_stat_data(new, &sha1, &mode, cached, match_missing) < 0) {
if (report_missing)
*/
static void do_oneway_diff(struct unpack_trees_options *o,
struct cache_entry *idx,
- struct cache_entry *tree,
- int idx_pos, int idx_nr)
+ struct cache_entry *tree)
{
struct rev_info *revs = o->unpack_data;
int match_missing, cached;
show_modified(revs, tree, idx, 1, cached, match_missing);
}
-/*
- * Count how many index entries go with the first one
- */
-static inline int count_skip(const struct cache_entry *src, int pos)
+static inline void skip_same_name(struct cache_entry *ce, struct unpack_trees_options *o)
{
- int skip = 1;
-
- /* We can only have multiple entries if the first one is not stage-0 */
- if (ce_stage(src)) {
- struct cache_entry **p = active_cache + pos;
- int namelen = ce_namelen(src);
-
- for (;;) {
- const struct cache_entry *ce;
- pos++;
- if (pos >= active_nr)
- break;
- ce = *++p;
- if (ce_namelen(ce) != namelen)
- break;
- if (memcmp(ce->name, src->name, namelen))
- break;
- skip++;
- }
+ int len = ce_namelen(ce);
+ const struct index_state *index = o->src_index;
+
+ while (o->pos < index->cache_nr) {
+ struct cache_entry *next = index->cache[o->pos];
+ if (len != ce_namelen(next))
+ break;
+ if (memcmp(ce->name, next->name, len))
+ break;
+ o->pos++;
}
- return skip;
}
/*
* the fairly complex unpack_trees() semantic requirements, including
* the skipping, the path matching, the type conflict cases etc.
*/
-static int oneway_diff(struct cache_entry **src,
- struct unpack_trees_options *o,
- int index_pos)
+static int oneway_diff(struct cache_entry **src, struct unpack_trees_options *o)
{
- int skip = 0;
struct cache_entry *idx = src[0];
struct cache_entry *tree = src[1];
struct rev_info *revs = o->unpack_data;
- if (index_pos >= 0)
- skip = count_skip(idx, index_pos);
+ if (idx && ce_stage(idx))
+ skip_same_name(idx, o);
/*
* Unpack-trees generates a DF/conflict entry if
tree = NULL;
if (ce_path_match(idx ? idx : tree, revs->prune_data))
- do_oneway_diff(o, idx, tree, index_pos, skip);
+ do_oneway_diff(o, idx, tree);
- return skip;
+ return 0;
}
int run_diff_index(struct rev_info *revs, int cached)
opts.merge = 1;
opts.fn = oneway_diff;
opts.unpack_data = revs;
+ opts.src_index = &the_index;
+ opts.dst_index = NULL;
init_tree_desc(&t, tree->buffer, tree->size);
if (unpack_trees(1, &t, &opts))
opts.merge = 1;
opts.fn = oneway_diff;
opts.unpack_data = &revs;
+ opts.src_index = &the_index;
+ opts.dst_index = &the_index;
init_tree_desc(&t, tree->buffer, tree->size);
if (unpack_trees(1, &t, &opts))
return sha1_to_hex(sha1);
abbrev = find_unique_abbrev(sha1, len);
- if (!abbrev)
- return sha1_to_hex(sha1);
abblen = strlen(abbrev);
if (abblen < 37) {
static char hex[41];
static int diff_filespec_is_identical(struct diff_filespec *one,
struct diff_filespec *two)
{
- if (S_ISGITLINK(one->mode)) {
- diff_fill_sha1_info(one);
- diff_fill_sha1_info(two);
- return !hashcmp(one->sha1, two->sha1);
- }
+ if (S_ISGITLINK(one->mode))
+ return 0;
if (diff_populate_filespec(one, 0))
return 0;
if (diff_populate_filespec(two, 0))
*/
if (rename_limit <= 0 || rename_limit > 32767)
rename_limit = 32767;
- if (num_create > rename_limit && num_src > rename_limit)
- goto cleanup;
- if (num_create * num_src > rename_limit * rename_limit)
+ if ((num_create > rename_limit && num_src > rename_limit) ||
+ (num_create * num_src > rename_limit * rename_limit)) {
+ warning("too many files, skipping inexact rename detection");
goto cleanup;
+ }
mx = xmalloc(sizeof(*mx) * num_create * num_src);
for (dst_cnt = i = 0; i < rename_dst_nr; i++) {
else
b = new_branch(sp);
read_next_command();
- if (!cmd_from(b) && command_buf.len > 0)
+ cmd_from(b);
+ if (command_buf.len > 0)
unread_command_buf = 1;
}
{
unsigned int i, show_stats = 1;
+ setup_git_directory();
git_config(git_pack_config);
if (!pack_compression_seen && core_compression_seen)
pack_compression_level = core_compression_level;
use_thin_pack:1,
fetch_all:1,
verbose:1,
- no_progress:1;
+ no_progress:1,
+ include_tag:1;
};
struct ref *fetch_pack(struct fetch_pack_args *args,
--- /dev/null
+#include "cache.h"
+#include "object.h"
+#include "blob.h"
+#include "tree.h"
+#include "tree-walk.h"
+#include "commit.h"
+#include "tag.h"
+#include "fsck.h"
+
+static int fsck_walk_tree(struct tree *tree, fsck_walk_func walk, void *data)
+{
+ struct tree_desc desc;
+ struct name_entry entry;
+ int res = 0;
+
+ if (parse_tree(tree))
+ return -1;
+
+ init_tree_desc(&desc, tree->buffer, tree->size);
+ while (tree_entry(&desc, &entry)) {
+ int result;
+
+ if (S_ISGITLINK(entry.mode))
+ continue;
+ if (S_ISDIR(entry.mode))
+ result = walk(&lookup_tree(entry.sha1)->object, OBJ_TREE, data);
+ else if (S_ISREG(entry.mode) || S_ISLNK(entry.mode))
+ result = walk(&lookup_blob(entry.sha1)->object, OBJ_BLOB, data);
+ else {
+ result = error("in tree %s: entry %s has bad mode %.6o\n",
+ sha1_to_hex(tree->object.sha1), entry.path, entry.mode);
+ }
+ if (result < 0)
+ return result;
+ if (!res)
+ res = result;
+ }
+ return res;
+}
+
+static int fsck_walk_commit(struct commit *commit, fsck_walk_func walk, void *data)
+{
+ struct commit_list *parents;
+ int res;
+ int result;
+
+ if (parse_commit(commit))
+ return -1;
+
+ result = walk((struct object *)commit->tree, OBJ_TREE, data);
+ if (result < 0)
+ return result;
+ res = result;
+
+ parents = commit->parents;
+ while (parents) {
+ result = walk((struct object *)parents->item, OBJ_COMMIT, data);
+ if (result < 0)
+ return result;
+ if (!res)
+ res = result;
+ parents = parents->next;
+ }
+ return res;
+}
+
+static int fsck_walk_tag(struct tag *tag, fsck_walk_func walk, void *data)
+{
+ if (parse_tag(tag))
+ return -1;
+ return walk(tag->tagged, OBJ_ANY, data);
+}
+
+int fsck_walk(struct object *obj, fsck_walk_func walk, void *data)
+{
+ if (!obj)
+ return -1;
+ switch (obj->type) {
+ case OBJ_BLOB:
+ return 0;
+ case OBJ_TREE:
+ return fsck_walk_tree((struct tree *)obj, walk, data);
+ case OBJ_COMMIT:
+ return fsck_walk_commit((struct commit *)obj, walk, data);
+ case OBJ_TAG:
+ return fsck_walk_tag((struct tag *)obj, walk, data);
+ default:
+ error("Unknown object type for %s", sha1_to_hex(obj->sha1));
+ return -1;
+ }
+}
+
+/*
+ * The entries in a tree are ordered in the _path_ order,
+ * which means that a directory entry is ordered by adding
+ * a slash to the end of it.
+ *
+ * So a directory called "a" is ordered _after_ a file
+ * called "a.c", because "a/" sorts after "a.c".
+ */
+#define TREE_UNORDERED (-1)
+#define TREE_HAS_DUPS (-2)
+
+static int verify_ordered(unsigned mode1, const char *name1, unsigned mode2, const char *name2)
+{
+ int len1 = strlen(name1);
+ int len2 = strlen(name2);
+ int len = len1 < len2 ? len1 : len2;
+ unsigned char c1, c2;
+ int cmp;
+
+ cmp = memcmp(name1, name2, len);
+ if (cmp < 0)
+ return 0;
+ if (cmp > 0)
+ return TREE_UNORDERED;
+
+ /*
+ * Ok, the first <len> characters are the same.
+ * Now we need to order the next one, but turn
+ * a '\0' into a '/' for a directory entry.
+ */
+ c1 = name1[len];
+ c2 = name2[len];
+ if (!c1 && !c2)
+ /*
+ * git-write-tree used to write out a nonsense tree that has
+ * entries with the same name, one blob and one tree. Make
+ * sure we do not have duplicate entries.
+ */
+ return TREE_HAS_DUPS;
+ if (!c1 && S_ISDIR(mode1))
+ c1 = '/';
+ if (!c2 && S_ISDIR(mode2))
+ c2 = '/';
+ return c1 < c2 ? 0 : TREE_UNORDERED;
+}
+
+static int fsck_tree(struct tree *item, int strict, fsck_error error_func)
+{
+ int retval;
+ int has_full_path = 0;
+ int has_empty_name = 0;
+ int has_zero_pad = 0;
+ int has_bad_modes = 0;
+ int has_dup_entries = 0;
+ int not_properly_sorted = 0;
+ struct tree_desc desc;
+ unsigned o_mode;
+ const char *o_name;
+ const unsigned char *o_sha1;
+
+ init_tree_desc(&desc, item->buffer, item->size);
+
+ o_mode = 0;
+ o_name = NULL;
+ o_sha1 = NULL;
+
+ while (desc.size) {
+ unsigned mode;
+ const char *name;
+ const unsigned char *sha1;
+
+ sha1 = tree_entry_extract(&desc, &name, &mode);
+
+ if (strchr(name, '/'))
+ has_full_path = 1;
+ if (!*name)
+ has_empty_name = 1;
+ has_zero_pad |= *(char *)desc.buffer == '0';
+ update_tree_entry(&desc);
+
+ switch (mode) {
+ /*
+ * Standard modes..
+ */
+ case S_IFREG | 0755:
+ case S_IFREG | 0644:
+ case S_IFLNK:
+ case S_IFDIR:
+ case S_IFGITLINK:
+ break;
+ /*
+ * This is nonstandard, but we had a few of these
+ * early on when we honored the full set of mode
+ * bits..
+ */
+ case S_IFREG | 0664:
+ if (!strict)
+ break;
+ default:
+ has_bad_modes = 1;
+ }
+
+ if (o_name) {
+ switch (verify_ordered(o_mode, o_name, mode, name)) {
+ case TREE_UNORDERED:
+ not_properly_sorted = 1;
+ break;
+ case TREE_HAS_DUPS:
+ has_dup_entries = 1;
+ break;
+ default:
+ break;
+ }
+ }
+
+ o_mode = mode;
+ o_name = name;
+ o_sha1 = sha1;
+ }
+
+ retval = 0;
+ if (has_full_path)
+ retval += error_func(&item->object, FSCK_WARN, "contains full pathnames");
+ if (has_empty_name)
+ retval += error_func(&item->object, FSCK_WARN, "contains empty pathname");
+ if (has_zero_pad)
+ retval += error_func(&item->object, FSCK_WARN, "contains zero-padded file modes");
+ if (has_bad_modes)
+ retval += error_func(&item->object, FSCK_WARN, "contains bad file modes");
+ if (has_dup_entries)
+ retval += error_func(&item->object, FSCK_ERROR, "contains duplicate file entries");
+ if (not_properly_sorted)
+ retval += error_func(&item->object, FSCK_ERROR, "not properly sorted");
+ return retval;
+}
+
+static int fsck_commit(struct commit *commit, fsck_error error_func)
+{
+ char *buffer = commit->buffer;
+ unsigned char tree_sha1[20], sha1[20];
+ struct commit_graft *graft;
+ int parents = 0;
+
+ if (!commit->date)
+ return error_func(&commit->object, FSCK_ERROR, "invalid author/committer line");
+
+ if (memcmp(buffer, "tree ", 5))
+ return error_func(&commit->object, FSCK_ERROR, "invalid format - expected 'tree' line");
+ if (get_sha1_hex(buffer+5, tree_sha1) || buffer[45] != '\n')
+ return error_func(&commit->object, FSCK_ERROR, "invalid 'tree' line format - bad sha1");
+ buffer += 46;
+ while (!memcmp(buffer, "parent ", 7)) {
+ if (get_sha1_hex(buffer+7, sha1) || buffer[47] != '\n')
+ return error_func(&commit->object, FSCK_ERROR, "invalid 'parent' line format - bad sha1");
+ buffer += 48;
+ parents++;
+ }
+ graft = lookup_commit_graft(commit->object.sha1);
+ if (graft) {
+ struct commit_list *p = commit->parents;
+ parents = 0;
+ while (p) {
+ p = p->next;
+ parents++;
+ }
+ if (graft->nr_parent == -1 && !parents)
+ ; /* shallow commit */
+ else if (graft->nr_parent != parents)
+ return error_func(&commit->object, FSCK_ERROR, "graft objects missing");
+ } else {
+ struct commit_list *p = commit->parents;
+ while (p && parents) {
+ p = p->next;
+ parents--;
+ }
+ if (p || parents)
+ return error_func(&commit->object, FSCK_ERROR, "parent objects missing");
+ }
+ if (memcmp(buffer, "author ", 7))
+ return error_func(&commit->object, FSCK_ERROR, "invalid format - expected 'author' line");
+ if (!commit->tree)
+ return error_func(&commit->object, FSCK_ERROR, "could not load commit's tree %s", sha1_to_hex(tree_sha1));
+
+ return 0;
+}
+
+static int fsck_tag(struct tag *tag, fsck_error error_func)
+{
+ struct object *tagged = tag->tagged;
+
+ if (!tagged)
+ return error_func(&tag->object, FSCK_ERROR, "could not load tagged object");
+ return 0;
+}
+
+int fsck_object(struct object *obj, int strict, fsck_error error_func)
+{
+ if (!obj)
+ return error_func(obj, FSCK_ERROR, "no valid object to fsck");
+
+ if (obj->type == OBJ_BLOB)
+ return 0;
+ if (obj->type == OBJ_TREE)
+ return fsck_tree((struct tree *) obj, strict, error_func);
+ if (obj->type == OBJ_COMMIT)
+ return fsck_commit((struct commit *) obj, error_func);
+ if (obj->type == OBJ_TAG)
+ return fsck_tag((struct tag *) obj, error_func);
+
+ return error_func(obj, FSCK_ERROR, "unknown type '%d' (internal fsck error)",
+ obj->type);
+}
+
+int fsck_error_function(struct object *obj, int type, const char *fmt, ...)
+{
+ va_list ap;
+ int len;
+ struct strbuf sb;
+
+ strbuf_init(&sb, 0);
+ strbuf_addf(&sb, "object %s:", obj->sha1?sha1_to_hex(obj->sha1):"(null)");
+
+ va_start(ap, fmt);
+ len = vsnprintf(sb.buf + sb.len, strbuf_avail(&sb), fmt, ap);
+ va_end(ap);
+
+ if (len < 0)
+ len = 0;
+ if (len >= strbuf_avail(&sb)) {
+ strbuf_grow(&sb, len + 2);
+ va_start(ap, fmt);
+ len = vsnprintf(sb.buf + sb.len, strbuf_avail(&sb), fmt, ap);
+ va_end(ap);
+ if (len >= strbuf_avail(&sb))
+ die("this should not happen, your snprintf is broken");
+ }
+
+ error(sb.buf);
+ strbuf_release(&sb);
+ return 1;
+}
--- /dev/null
+#ifndef GIT_FSCK_H
+#define GIT_FSCK_H
+
+#define FSCK_ERROR 1
+#define FSCK_WARN 2
+
+/*
+ * callback function for fsck_walk
+ * type is the expected type of the object or OBJ_ANY
+ * the return value is:
+ * 0 everything OK
+ * <0 error signaled and abort
+ * >0 error signaled and do not abort
+ */
+typedef int (*fsck_walk_func)(struct object *obj, int type, void *data);
+
+/* callback for fsck_object, type is FSCK_ERROR or FSCK_WARN */
+typedef int (*fsck_error)(struct object *obj, int type, const char *err, ...);
+
+int fsck_error_function(struct object *obj, int type, const char *fmt, ...);
+
+/* descend in all linked child objects
+ * the return value is:
+ * -1 error in processing the object
+ * <0 return value of the callback, which lead to an abort
+ * >0 return value of the first sigaled error >0 (in the case of no other errors)
+ * 0 everything OK
+ */
+int fsck_walk(struct object *obj, fsck_walk_func walk, void *data);
+int fsck_object(struct object *obj, int strict, fsck_error error_func);
+
+#endif
#
# Copyright (c) 2005, 2006 Junio C Hamano
+SUBDIRECTORY_OK=Yes
OPTIONS_KEEPDASHDASH=
OPTIONS_SPEC="\
git-am [options] <mbox>|<Maildir>...
git-am [options] --resolved
git-am [options] --skip
--
-d,dotest= use <dir> and not .dotest
+d,dotest= (removed -- do not use)
i,interactive run interactively
b,binary pass --allo-binary-replacement to git-apply
3,3way allow fall back on 3way merging if needed
p= pass it through git-apply
resolvemsg= override error message when patch failure occurs
r,resolved to be used after a patch failure
-skip skip the current patch"
+skip skip the current patch
+rebasing (internal use for git-rebase)"
. git-sh-setup
+prefix=$(git rev-parse --show-prefix)
set_reflog_action am
require_work_tree
+cd_to_toplevel
git var GIT_COMMITTER_IDENT >/dev/null || exit
then
cmdline="$cmdline -3"
fi
- if test '.dotest' != "$dotest"
- then
- cmdline="$cmdline -d=$dotest"
- fi
echo "When you have resolved this problem run \"$cmdline --resolved\"."
echo "If you would prefer to skip this patch, instead run \"$cmdline --skip\"."
}
prec=4
-dotest=.dotest sign= utf8=t keep= skip= interactive= resolved= binary=
+dotest=".dotest"
+sign= utf8=t keep= skip= interactive= resolved= binary= rebasing=
resolvemsg= resume=
git_apply_opt=
resolved=t ;;
--skip)
skip=t ;;
+ --rebasing)
+ rebasing=t threeway=t keep=t binary=t ;;
-d|--dotest)
- shift; dotest=$1;;
+ die "-d option is no longer supported. Do not use."
+ ;;
--resolvemsg)
shift; resolvemsg=$1 ;;
--whitespace)
0,)
# No file input but without resume parameters; catch
# user error to feed us a patch from standard input
- # when there is already .dotest. This is somewhat
+ # when there is already $dotest. This is somewhat
# unreliable -- stdin could be /dev/null for example
# and the caller did not intend to feed us a patch but
# wanted to continue unattended.
# Start afresh.
mkdir -p "$dotest" || exit
+ if test -n "$prefix" && test $# != 0
+ then
+ first=t
+ for arg
+ do
+ test -n "$first" && {
+ set x
+ first=
+ }
+ case "$arg" in
+ /*)
+ set "$@" "$arg" ;;
+ *)
+ set "$@" "$prefix$arg" ;;
+ esac
+ done
+ shift
+ fi
git mailsplit -d"$prec" -o"$dotest" -b -- "$@" > "$dotest/last" || {
rm -fr "$dotest"
exit 1
echo "$utf8" >"$dotest/utf8"
echo "$keep" >"$dotest/keep"
echo 1 >"$dotest/next"
+ if test -n "$rebasing"
+ then
+ : >"$dotest/rebasing"
+ else
+ : >"$dotest/applying"
+ fi
fi
case "$resolved" in
then
local=yes
fi
+elif test -f "$repo"
+then
+ case "$repo" in /*) ;; *) repo="$PWD/$repo" ;; esac
+fi
+
+# Decide the directory name of the new repository
+if test -n "$2"
+then
+ dir="$2"
+else
+ # Derive one from the repository name
+ # Try using "humanish" part of source repo if user didn't specify one
+ if test -f "$repo"
+ then
+ # Cloning from a bundle
+ dir=$(echo "$repo" | sed -e 's|/*\.bundle$||' -e 's|.*/||g')
+ else
+ dir=$(echo "$repo" |
+ sed -e 's|/$||' -e 's|:*/*\.git$||' -e 's|.*[/:]||g')
+ fi
fi
-dir="$2"
-# Try using "humanish" part of source repo if user didn't specify one
-[ -z "$dir" ] && dir=$(echo "$repo" | sed -e 's|/$||' -e 's|:*/*\.git$||' -e 's|.*[/:]||g')
[ -e "$dir" ] && die "destination directory '$dir' already exists."
[ yes = "$bare" ] && unset GIT_WORK_TREE
[ -n "$GIT_WORK_TREE" ] && [ -e "$GIT_WORK_TREE" ] &&
fi
;;
*)
- case "$upload_pack" in
- '') git-fetch-pack --all -k $quiet $depth $no_progress "$repo";;
- *) git-fetch-pack --all -k $quiet "$upload_pack" $depth $no_progress "$repo" ;;
- esac >"$GIT_DIR/CLONE_HEAD" ||
+ if [ -f "$repo" ] ; then
+ git bundle unbundle "$repo" > "$GIT_DIR/CLONE_HEAD" ||
+ die "unbundle from '$repo' failed."
+ else
+ case "$upload_pack" in
+ '') git-fetch-pack --all -k $quiet $depth $no_progress "$repo";;
+ *) git-fetch-pack --all -k \
+ $quiet "$upload_pack" $depth $no_progress "$repo" ;;
+ esac >"$GIT_DIR/CLONE_HEAD" ||
die "fetch-pack from '$repo' failed."
+ fi
;;
esac
;;
extern FILE *git_fopen(const char*, const char*);
#endif
+#ifdef SNPRINTF_RETURNS_BOGUS
+#define snprintf git_snprintf
+extern int git_snprintf(char *str, size_t maxsize,
+ const char *format, ...);
+#define vsnprintf git_vsnprintf
+extern int git_vsnprintf(char *str, size_t maxsize,
+ const char *format, va_list ap);
+#endif
+
#ifdef __GLIBC_PREREQ
#if __GLIBC_PREREQ(2, 1)
#define HAVE_STRCHRNUL
#define qsort git_qsort
#endif
+#ifndef DIR_HAS_BSD_GROUP_SEMANTICS
+# define FORCE_DIR_SET_GID S_ISGID
+#else
+# define FORCE_DIR_SET_GID 0
+#endif
+
#endif
use strict;
use warnings;
-use Getopt::Std;
+use Getopt::Long;
use File::Spec;
use File::Temp qw(tempfile tmpnam);
use File::Path qw(mkpath);
$SIG{'PIPE'}="IGNORE";
$ENV{'TZ'}="UTC";
-our ($opt_h,$opt_o,$opt_v,$opt_k,$opt_u,$opt_d,$opt_p,$opt_C,$opt_z,$opt_i,$opt_P, $opt_s,$opt_m,$opt_M,$opt_A,$opt_S,$opt_L, $opt_a, $opt_r);
+our ($opt_h,$opt_o,$opt_v,$opt_k,$opt_u,$opt_d,$opt_p,$opt_C,$opt_z,$opt_i,$opt_P, $opt_s,$opt_m,@opt_M,$opt_A,$opt_S,$opt_L, $opt_a, $opt_r);
my (%conv_author_name, %conv_author_email);
sub usage(;$) {
my $opts = "haivmkuo:d:p:r:C:z:s:M:P:A:S:L:";
read_repo_config($opts);
-getopts($opts) or usage();
+Getopt::Long::Configure( 'no_ignore_case', 'bundling' );
+
+# turn the Getopt::Std specification in a Getopt::Long one,
+# with support for multiple -M options
+GetOptions( map { s/:/=s/; /M/ ? "$_\@" : $_ } split( /(?!:)/, $opts ) )
+ or usage();
usage if $opt_h;
if (@ARGV == 0) {
our @mergerx = ();
if ($opt_m) {
- @mergerx = ( qr/\b(?:from|of|merge|merging|merged) (\w+)/i );
+ @mergerx = ( qr/\b(?:from|of|merge|merging|merged) ([-\w]+)/i );
}
-if ($opt_M) {
- push (@mergerx, qr/$opt_M/);
+if (@opt_M) {
+ push (@mergerx, map { qr/$_/ } @opt_M);
}
# Remember UTC of our starting time
if ($base) {
my @merged;
# print "want to log between $base $parent \n";
- open(GITLOG, '-|', 'git-log', "$base..$parent")
+ open(GITLOG, '-|', 'git-log', '--pretty=medium', "$base..$parent")
or die "Cannot call git-log: $!";
my $mergedhash;
while (<GITLOG>) {
git read-tree -i -m $commit
;;
*)
- git read-tree -i -m $commit:"$filter_subdir"
+ # The commit may not have the subdirectory at all
+ err=$(git read-tree -i -m $commit:"$filter_subdir" 2>&1) || {
+ if ! git rev-parse --verify $commit:"$filter_subdir" 2>/dev/null
+ then
+ rm -f "$GIT_INDEX_FILE"
+ else
+ echo >&2 "$err"
+ false
+ fi
+ }
esac || die "Could not initialize the index"
GIT_COMMIT=$commit
ifeq ($(shell $(MSGFMT) >/dev/null 2>&1 || echo $$?),127)
MSGFMT := $(TCL_PATH) po/po2msg.sh
endif
+ ifeq (msgfmt,$(MSGFMT))
+ ifeq ($(shell $(MSGFMT) --tcl -l C -d . /dev/null 2>/dev/null || echo $?),1)
+ MSGFMT := $(TCL_PATH) po/po2msg.sh
+ endif
+ endif
endif
msgsdir = $(gg_libdir)/msgs
append v "Tcl version $tcl_patchLevel"
append v ", Tk version $tk_patchLevel"
}
- if {[info exists ui_comm_spell]} {
+ if {[info exists ui_comm_spell]
+ && [$ui_comm_spell version] ne {}} {
append v "\n"
append v [$ui_comm_spell version]
}
append title " ([reponame])"
}
tk_messageBox \
- -parent $parent \
+ -parent [_error_parent] \
-icon info \
-type ok \
-title $title \
-# git-gui spellchecking support through aspell
+# git-gui spellchecking support through ispell/aspell
# Copyright (C) 2008 Shawn Pearce
class spellcheck {
-field s_fd {} ; # pipe to aspell
-field s_version ; # aspell version string
-field s_lang ; # current language code
+field s_fd {} ; # pipe to ispell/aspell
+field s_version {} ; # ispell/aspell version string
+field s_lang {} ; # current language code
+field s_prog aspell; # are we actually old ispell?
+field s_failed 0 ; # is $s_prog bogus and not working?
field w_text ; # text widget we are spelling
field w_menu ; # context menu for the widget
field s_menuidx 0 ; # last index of insertion into $w_menu
-field s_i ; # timer registration for _run callbacks
+field s_i {} ; # timer registration for _run callbacks
field s_clear 0 ; # did we erase mispelled tags yet?
field s_seen [list] ; # lines last seen from $w_text in _run
field s_checked [list] ; # lines already checked
-field s_pending [list] ; # [$line $data] sent to aspell
+field s_pending [list] ; # [$line $data] sent to ispell/aspell
field s_suggest ; # array, list of suggestions, keyed by misspelling
constructor init {pipe_fd ui_text ui_menu} {
set w_text $ui_text
set w_menu $ui_menu
+ array unset s_suggest
+ bind_button3 $w_text [cb _popup_suggest %X %Y @%x,%y]
_connect $this $pipe_fd
return $this
}
-translation lf
if {[gets $pipe_fd s_version] <= 0} {
- close $pipe_fd
- error [mc "Not connected to aspell"]
+ if {[catch {close $pipe_fd} err]} {
+
+ # Eh? Is this actually ispell choking on aspell options?
+ #
+ if {$s_prog eq {aspell}
+ && [regexp -nocase {^Usage: } $err]
+ && ![catch {
+ set pipe_fd [open [list | $s_prog -v] r]
+ gets $pipe_fd s_version
+ close $pipe_fd
+ }]
+ && $s_version ne {}} {
+ if {{@(#) } eq [string range $s_version 0 4]} {
+ set s_version [string range $s_version 5 end]
+ }
+ set s_failed 1
+ error_popup [strcat \
+ [mc "Unsupported spell checker"] \
+ ":\n\n$s_version"]
+ set s_version {}
+ return
+ }
+
+ regsub -nocase {^Error: } $err {} err
+ if {$s_fd eq {}} {
+ error_popup [strcat [mc "Spell checking is unavailable"] ":\n\n$err"]
+ } else {
+ error_popup [strcat \
+ [mc "Invalid spell checking configuration"] \
+ ":\n\n$err\n\n" \
+ [mc "Reverting dictionary to %s." $s_lang]]
+ }
+ } else {
+ error_popup [mc "Spell checker silently failed on startup"]
+ }
+ return
}
+
if {{@(#) } ne [string range $s_version 0 4]} {
- close $pipe_fd
- error [strcat [mc "Unrecognized aspell version"] ": $s_version"]
+ catch {close $pipe_fd}
+ error_popup [strcat [mc "Unrecognized spell checker"] ":\n\n$s_version"]
+ return
}
set s_version [string range $s_version 5 end]
+ regexp \
+ {International Ispell Version .* \(but really (Aspell .*?)\)$} \
+ $s_version _junk s_version
puts $pipe_fd ! ; # enable terse mode
puts $pipe_fd {$$cr master} ; # fetch the language
$w_text tag conf misspelled \
-foreground red \
-underline 1
- bind_button3 $w_text [cb _popup_suggest %X %Y @%x,%y]
array unset s_suggest
set s_seen [list]
}
method lang {{n {}}} {
- if {$n ne {} && $s_lang ne $n} {
+ if {$n ne {} && $s_lang ne $n && !$s_failed} {
set spell_cmd [list |]
lappend spell_cmd aspell
lappend spell_cmd --master=$n
}
method version {} {
- return "$s_version, $s_lang"
+ if {$s_version ne {}} {
+ return "$s_version, $s_lang"
+ }
+ return {}
}
method stop {} {
fconfigure $s_fd -block 1
if {[eof $s_fd]} {
if {![catch {close $s_fd} err]} {
- set err [mc "unexpected eof from aspell"]
+ set err [mc "Unexpected EOF from spell checker"]
}
catch {after cancel $s_i}
$w_text tag remove misspelled 1.0 end
- error_popup [strcat "Spell Checker Failed" "\n\n" $err]
+ error_popup [strcat [mc "Spell Checker Failed"] "\n\n" $err]
return
}
fconfigure $s_fd -block 0
msgstr ""
"Project-Id-Version: git-gui\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2008-02-02 10:14+0100\n"
-"PO-Revision-Date: 2008-02-02 10:18+0100\n"
+"POT-Creation-Date: 2008-02-16 21:24+0100\n"
+"PO-Revision-Date: 2008-02-16 21:52+0100\n"
"Last-Translator: Christian Stimming <stimming@tuhh.de>\n"
"Language-Team: German\n"
"MIME-Version: 1.0\n"
#: lib/branch_delete.tcl:52
msgid "Delete Only If Merged Into"
-msgstr "Nur löschen, wenn darin zusammengeführt"
+msgstr "Nur löschen, wenn zusammengeführt nach"
#: lib/branch_delete.tcl:54
msgid "Always (Do not perform merge test.)"
#: lib/commit.tcl:221
msgid "Calling pre-commit hook..."
-msgstr ""
+msgstr "Aufrufen der Vor-Eintragen-Kontrolle..."
#: lib/commit.tcl:236
msgid "Commit declined by pre-commit hook."
-msgstr ""
+msgstr "Eintragen abgelehnt durch Vor-Eintragen-Kontrolle (»pre-commit hook«)."
#: lib/commit.tcl:259
msgid "Calling commit-msg hook..."
-msgstr ""
+msgstr "Aufrufen der Versionsbeschreibungs-Kontrolle..."
#: lib/commit.tcl:274
msgid "Commit declined by commit-msg hook."
-msgstr ""
+msgstr "Eintragen abgelehnt durch Versionsbeschreibungs-Kontrolle (»commit-message hook«)."
#: lib/commit.tcl:287
msgid "Committing changes..."
#: lib/database.tcl:48
msgid "Packed objects waiting for pruning"
-msgstr "Komprimierte Objekte, die zum Entfernen vorgesehen sind"
+msgstr "Komprimierte Objekte, die zum Aufräumen vorgesehen sind"
#: lib/database.tcl:49
msgid "Garbage files"
#: lib/merge.tcl:119
#, tcl-format
-msgid "Merging %s and %s"
-msgstr "Zusammenführen von %s und %s"
+msgid "Merging %s and %s..."
+msgstr "Zusammenführen von %s und %s..."
-#: lib/merge.tcl:131
+#: lib/merge.tcl:130
msgid "Merge completed successfully."
msgstr "Zusammenführen erfolgreich abgeschlossen."
#: lib/merge.tcl:158
#, tcl-format
msgid "Merge Into %s"
-msgstr "Zusammenführen in %s"
+msgstr "Zusammenführen in »%s«"
#: lib/merge.tcl:177
msgid "Revision To Merge"
#: lib/option.tcl:111
msgid "Prune Tracking Branches During Fetch"
-msgstr "Übernahmezweige entfernen während Anforderung"
+msgstr "Übernahmezweige aufräumen während Anforderung"
#: lib/option.tcl:112
msgid "Match Tracking Branches"
msgid "New Branch Name Template"
msgstr "Namensvorschlag für neue Zweige"
-#: lib/option.tcl:176
+#: lib/option.tcl:191
+msgid "Spelling Dictionary:"
+msgstr "Wörterbuch Rechtschreibprüfung:"
+
+#: lib/option.tcl:215
msgid "Change Font"
msgstr "Schriftart ändern"
#: lib/remote_branch_delete.tcl:29 lib/remote_branch_delete.tcl:34
msgid "Delete Remote Branch"
-msgstr "Zweig aus anderem Projektarchiv löschen"
+msgstr "Zweig in anderem Projektarchiv löschen"
#: lib/remote_branch_delete.tcl:47
msgid "From Repository"
-msgstr "Von Projektarchiv"
+msgstr "In Projektarchiv"
#: lib/remote_branch_delete.tcl:50 lib/transport.tcl:123
msgid "Remote:"
#: lib/remote_branch_delete.tcl:66 lib/transport.tcl:138
msgid "Arbitrary URL:"
-msgstr "Kommunikation mit URL:"
+msgstr "Archiv-URL:"
#: lib/remote_branch_delete.tcl:84
msgid "Branches"
#: lib/remote_branch_delete.tcl:109
msgid "Delete Only If"
-msgstr "Löschen, falls"
+msgstr "Nur löschen, wenn"
#: lib/remote_branch_delete.tcl:111
msgid "Merged Into:"
-msgstr "Zusammenführen mit:"
+msgstr "Zusammengeführt mit:"
#: lib/remote_branch_delete.tcl:119
msgid "Always (Do not perform merge checks)"
#: lib/remote.tcl:165
msgid "Prune from"
-msgstr "Entfernen von"
+msgstr "Aufräumen von"
#: lib/remote.tcl:170
msgid "Fetch from"
msgid "Cannot write icon:"
msgstr "Fehler beim Erstellen des Icons:"
+#: lib/spellcheck.tcl:37
+msgid "Not connected to aspell"
+msgstr "Keine Verbindung zu »aspell«"
+
+#: lib/spellcheck.tcl:41
+msgid "Unrecognized aspell version"
+msgstr "Unbekannte Version von »aspell«"
+
+#: lib/spellcheck.tcl:135
+msgid "No Suggestions"
+msgstr "Keine Vorschläge"
+
+#: lib/spellcheck.tcl:336
+msgid "Unexpected EOF from aspell"
+msgstr "Unerwartetes EOF von »aspell«"
+
+#: lib/spellcheck.tcl:340
+msgid "Spell Checker Failed"
+msgstr "Rechtschreibprüfung fehlgeschlagen"
+
#: lib/status_bar.tcl:83
#, tcl-format
msgid "%s ... %*i of %*i %s (%3i%%)"
#: lib/transport.tcl:18
#, tcl-format
msgid "remote prune %s"
-msgstr "Entfernen von »%s« aus anderem Archiv"
+msgstr "Aufräumen von »%s«"
#: lib/transport.tcl:19
#, tcl-format
msgid "Pruning tracking branches deleted from %s"
-msgstr "Übernahmezweige entfernen, die in »%s« gelöscht wurden"
+msgstr "Übernahmezweige aufräumen und entfernen, die in »%s« gelöscht wurden"
#: lib/transport.tcl:25 lib/transport.tcl:71
#, tcl-format
#: lib/transport.tcl:103
msgid "Source Branches"
-msgstr "Herkunftszweige"
+msgstr "Lokale Zweige"
#: lib/transport.tcl:120
msgid "Destination Repository"
msgstr ""
"Project-Id-Version: PACKAGE VERSION\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2008-02-02 10:14+0100\n"
+"POT-Creation-Date: 2008-02-16 21:24+0100\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n"
#: lib/merge.tcl:119
#, tcl-format
-msgid "Merging %s and %s"
+msgid "Merging %s and %s..."
msgstr ""
-#: lib/merge.tcl:131
+#: lib/merge.tcl:130
msgid "Merge completed successfully."
msgstr ""
msgid "New Branch Name Template"
msgstr ""
-#: lib/option.tcl:176
+#: lib/option.tcl:191
+msgid "Spelling Dictionary:"
+msgstr ""
+
+#: lib/option.tcl:215
msgid "Change Font"
msgstr ""
msgid "Cannot write icon:"
msgstr ""
+#: lib/spellcheck.tcl:37
+msgid "Not connected to aspell"
+msgstr ""
+
+#: lib/spellcheck.tcl:41
+msgid "Unrecognized aspell version"
+msgstr ""
+
+#: lib/spellcheck.tcl:135
+msgid "No Suggestions"
+msgstr ""
+
+#: lib/spellcheck.tcl:336
+msgid "Unexpected EOF from aspell"
+msgstr ""
+
+#: lib/spellcheck.tcl:340
+msgid "Spell Checker Failed"
+msgstr ""
+
#: lib/status_bar.tcl:83
#, tcl-format
msgid "%s ... %*i of %*i %s (%3i%%)"
msgstr ""
"Project-Id-Version: git-gui glossary\n"
"POT-Creation-Date: 2008-01-07 21:20+0100\n"
-"PO-Revision-Date: 2008-01-15 20:32+0100\n"
+"PO-Revision-Date: 2008-02-16 21:48+0100\n"
"Last-Translator: Christian Stimming <stimming@tuhh.de>\n"
"Language-Team: German \n"
"MIME-Version: 1.0\n"
#. "Deletes all stale tracking branches under <name>. These stale branches have already been removed from the remote repository referenced by <name>, but are still locally available in 'remotes/<name>'."
msgid "prune"
-msgstr "entfernen"
+msgstr "aufräumen (entfernen?)"
#. "Pulling a branch means to fetch it and merge it."
msgid "pull"
allow_fast_forward=t
allow_trivial_merge=t
+squash= no_commit=
dropsave() {
rm -f -- "$GIT_DIR/MERGE_HEAD" "$GIT_DIR/MERGE_MSG" \
squash_message () {
echo Squashed commit of the following:
echo
- git log --no-merges ^"$head" $remoteheads
+ git log --no-merges --pretty=medium ^"$head" $remoteheads
}
finish () {
--summary)
show_diffstat=t ;;
--squash)
- allow_fast_forward=t squash=t no_commit=t ;;
+ test "$allow_fast_forward" = t ||
+ die "You cannot combine --squash with --no-ff."
+ squash=t no_commit=t ;;
--no-squash)
- allow_fast_forward=t squash= no_commit= ;;
+ squash= no_commit= ;;
--commit)
- allow_fast_forward=t squash= no_commit= ;;
+ no_commit= ;;
--no-commit)
- allow_fast_forward=t squash= no_commit=t ;;
+ no_commit=t ;;
--ff)
- allow_fast_forward=t squash= no_commit= ;;
+ allow_fast_forward=t ;;
--no-ff)
- allow_fast_forward=false squash= no_commit= ;;
+ test "$squash" != t ||
+ die "You cannot combine --squash with --no-ff."
+ allow_fast_forward=f ;;
-s|--strategy)
shift
case " $all_strategies " in
cleanup_temp_files () {
if test "$1" = --save-backup ; then
- mv -- "$BACKUP" "$path.orig"
+ mv -- "$BACKUP" "$MERGED.orig"
rm -f -- "$LOCAL" "$REMOTE" "$BASE"
else
rm -f -- "$LOCAL" "$REMOTE" "$BASE" "$BACKUP"
read ans
case "$ans" in
[lL]*)
- git checkout-index -f --stage=2 -- "$path"
- git add -- "$path"
+ git checkout-index -f --stage=2 -- "$MERGED"
+ git add -- "$MERGED"
cleanup_temp_files --save-backup
return
;;
[rR]*)
- git checkout-index -f --stage=3 -- "$path"
- git add -- "$path"
+ git checkout-index -f --stage=3 -- "$MERGED"
+ git add -- "$MERGED"
cleanup_temp_files --save-backup
return
;;
read ans
case "$ans" in
[mMcC]*)
- git add -- "$path"
+ git add -- "$MERGED"
cleanup_temp_files --save-backup
return
;;
[dD]*)
- git rm -- "$path" > /dev/null
+ git rm -- "$MERGED" > /dev/null
cleanup_temp_files
return
;;
}
check_unchanged () {
- if test "$path" -nt "$BACKUP" ; then
+ if test "$MERGED" -nt "$BACKUP" ; then
status=0;
else
while true; do
- echo "$path seems unchanged."
+ echo "$MERGED seems unchanged."
printf "Was the merge successful? [y/n] "
read answer < /dev/tty
case "$answer" in
fi
}
-save_backup () {
- if test "$status" -eq 0; then
- mv -- "$BACKUP" "$path.orig"
- fi
-}
-
-remove_backup () {
- if test "$status" -eq 0; then
- rm "$BACKUP"
- fi
-}
-
merge_file () {
- path="$1"
+ MERGED="$1"
- f=`git ls-files -u -- "$path"`
+ f=`git ls-files -u -- "$MERGED"`
if test -z "$f" ; then
- if test ! -f "$path" ; then
- echo "$path: file not found"
+ if test ! -f "$MERGED" ; then
+ echo "$MERGED: file not found"
else
- echo "$path: file does not need merging"
+ echo "$MERGED: file does not need merging"
fi
exit 1
fi
- ext="$$$(expr "$path" : '.*\(\.[^/]*\)$')"
- BACKUP="$path.BACKUP.$ext"
- LOCAL="$path.LOCAL.$ext"
- REMOTE="$path.REMOTE.$ext"
- BASE="$path.BASE.$ext"
+ ext="$$$(expr "$MERGED" : '.*\(\.[^/]*\)$')"
+ BACKUP="$MERGED.BACKUP.$ext"
+ LOCAL="$MERGED.LOCAL.$ext"
+ REMOTE="$MERGED.REMOTE.$ext"
+ BASE="$MERGED.BASE.$ext"
- mv -- "$path" "$BACKUP"
- cp -- "$BACKUP" "$path"
+ mv -- "$MERGED" "$BACKUP"
+ cp -- "$BACKUP" "$MERGED"
- base_mode=`git ls-files -u -- "$path" | awk '{if ($3==1) print $1;}'`
- local_mode=`git ls-files -u -- "$path" | awk '{if ($3==2) print $1;}'`
- remote_mode=`git ls-files -u -- "$path" | awk '{if ($3==3) print $1;}'`
+ base_mode=`git ls-files -u -- "$MERGED" | awk '{if ($3==1) print $1;}'`
+ local_mode=`git ls-files -u -- "$MERGED" | awk '{if ($3==2) print $1;}'`
+ remote_mode=`git ls-files -u -- "$MERGED" | awk '{if ($3==3) print $1;}'`
- base_present && git cat-file blob ":1:$prefix$path" >"$BASE" 2>/dev/null
- local_present && git cat-file blob ":2:$prefix$path" >"$LOCAL" 2>/dev/null
- remote_present && git cat-file blob ":3:$prefix$path" >"$REMOTE" 2>/dev/null
+ base_present && git cat-file blob ":1:$prefix$MERGED" >"$BASE" 2>/dev/null
+ local_present && git cat-file blob ":2:$prefix$MERGED" >"$LOCAL" 2>/dev/null
+ remote_present && git cat-file blob ":3:$prefix$MERGED" >"$REMOTE" 2>/dev/null
if test -z "$local_mode" -o -z "$remote_mode"; then
- echo "Deleted merge conflict for '$path':"
+ echo "Deleted merge conflict for '$MERGED':"
describe_file "$local_mode" "local" "$LOCAL"
describe_file "$remote_mode" "remote" "$REMOTE"
resolve_deleted_merge
fi
if is_symlink "$local_mode" || is_symlink "$remote_mode"; then
- echo "Symbolic link merge conflict for '$path':"
+ echo "Symbolic link merge conflict for '$MERGED':"
describe_file "$local_mode" "local" "$LOCAL"
describe_file "$remote_mode" "remote" "$REMOTE"
resolve_symlink_merge
return
fi
- echo "Normal merge conflict for '$path':"
+ echo "Normal merge conflict for '$MERGED':"
describe_file "$local_mode" "local" "$LOCAL"
describe_file "$remote_mode" "remote" "$REMOTE"
printf "Hit return to start merge resolution tool (%s): " "$merge_tool"
case "$merge_tool" in
kdiff3)
if base_present ; then
- ("$merge_tool_path" --auto --L1 "$path (Base)" --L2 "$path (Local)" --L3 "$path (Remote)" \
- -o "$path" -- "$BASE" "$LOCAL" "$REMOTE" > /dev/null 2>&1)
+ ("$merge_tool_path" --auto --L1 "$MERGED (Base)" --L2 "$MERGED (Local)" --L3 "$MERGED (Remote)" \
+ -o "$MERGED" -- "$BASE" "$LOCAL" "$REMOTE" > /dev/null 2>&1)
else
- ("$merge_tool_path" --auto --L1 "$path (Local)" --L2 "$path (Remote)" \
- -o "$path" -- "$LOCAL" "$REMOTE" > /dev/null 2>&1)
+ ("$merge_tool_path" --auto --L1 "$MERGED (Local)" --L2 "$MERGED (Remote)" \
+ -o "$MERGED" -- "$LOCAL" "$REMOTE" > /dev/null 2>&1)
fi
status=$?
- remove_backup
;;
tkdiff)
if base_present ; then
- "$merge_tool_path" -a "$BASE" -o "$path" -- "$LOCAL" "$REMOTE"
+ "$merge_tool_path" -a "$BASE" -o "$MERGED" -- "$LOCAL" "$REMOTE"
else
- "$merge_tool_path" -o "$path" -- "$LOCAL" "$REMOTE"
+ "$merge_tool_path" -o "$MERGED" -- "$LOCAL" "$REMOTE"
fi
status=$?
- save_backup
;;
meld|vimdiff)
touch "$BACKUP"
- "$merge_tool_path" -- "$LOCAL" "$path" "$REMOTE"
+ "$merge_tool_path" -- "$LOCAL" "$MERGED" "$REMOTE"
check_unchanged
- save_backup
;;
gvimdiff)
- touch "$BACKUP"
- "$merge_tool_path" -f -- "$LOCAL" "$path" "$REMOTE"
- check_unchanged
- save_backup
- ;;
+ touch "$BACKUP"
+ "$merge_tool_path" -f -- "$LOCAL" "$MERGED" "$REMOTE"
+ check_unchanged
+ ;;
xxdiff)
touch "$BACKUP"
if base_present ; then
-R 'Accel.SaveAsMerged: "Ctrl-S"' \
-R 'Accel.Search: "Ctrl+F"' \
-R 'Accel.SearchForward: "Ctrl-G"' \
- --merged-file "$path" -- "$LOCAL" "$BASE" "$REMOTE"
+ --merged-file "$MERGED" -- "$LOCAL" "$BASE" "$REMOTE"
else
"$merge_tool_path" -X --show-merged-pane \
-R 'Accel.SaveAsMerged: "Ctrl-S"' \
-R 'Accel.Search: "Ctrl+F"' \
-R 'Accel.SearchForward: "Ctrl-G"' \
- --merged-file "$path" -- "$LOCAL" "$REMOTE"
+ --merged-file "$MERGED" -- "$LOCAL" "$REMOTE"
fi
check_unchanged
- save_backup
;;
opendiff)
touch "$BACKUP"
if base_present; then
- "$merge_tool_path" "$LOCAL" "$REMOTE" -ancestor "$BASE" -merge "$path" | cat
+ "$merge_tool_path" "$LOCAL" "$REMOTE" -ancestor "$BASE" -merge "$MERGED" | cat
else
- "$merge_tool_path" "$LOCAL" "$REMOTE" -merge "$path" | cat
+ "$merge_tool_path" "$LOCAL" "$REMOTE" -merge "$MERGED" | cat
fi
check_unchanged
- save_backup
;;
ecmerge)
touch "$BACKUP"
if base_present; then
- "$merge_tool_path" "$BASE" "$LOCAL" "$REMOTE" --mode=merge3 --to="$path"
+ "$merge_tool_path" "$BASE" "$LOCAL" "$REMOTE" --mode=merge3 --to="$MERGED"
else
- "$merge_tool_path" "$LOCAL" "$REMOTE" --mode=merge2 --to="$path"
+ "$merge_tool_path" "$LOCAL" "$REMOTE" --mode=merge2 --to="$MERGED"
fi
check_unchanged
- save_backup
;;
emerge)
if base_present ; then
- "$merge_tool_path" -f emerge-files-with-ancestor-command "$LOCAL" "$REMOTE" "$BASE" "$(basename "$path")"
+ "$merge_tool_path" -f emerge-files-with-ancestor-command "$LOCAL" "$REMOTE" "$BASE" "$(basename "$MERGED")"
else
- "$merge_tool_path" -f emerge-files-command "$LOCAL" "$REMOTE" "$(basename "$path")"
+ "$merge_tool_path" -f emerge-files-command "$LOCAL" "$REMOTE" "$(basename "$MERGED")"
fi
status=$?
- save_backup
+ ;;
+ *)
+ if test -n "$merge_tool_cmd"; then
+ if test "$merge_tool_trust_exit_code" = "false"; then
+ touch "$BACKUP"
+ ( eval $merge_tool_cmd )
+ check_unchanged
+ else
+ ( eval $merge_tool_cmd )
+ status=$?
+ fi
+ fi
;;
esac
if test "$status" -ne 0; then
- echo "merge of $path failed" 1>&2
- mv -- "$BACKUP" "$path"
+ echo "merge of $MERGED failed" 1>&2
+ mv -- "$BACKUP" "$MERGED"
exit 1
fi
- git add -- "$path"
+
+ if test "$merge_keep_backup" = "true"; then
+ mv -- "$BACKUP" "$MERGED.orig"
+ else
+ rm -- "$BACKUP"
+ fi
+
+ git add -- "$MERGED"
cleanup_temp_files
}
shift
done
+valid_custom_tool()
+{
+ merge_tool_cmd="$(git config mergetool.$1.cmd)"
+ test -n "$merge_tool_cmd"
+}
+
valid_tool() {
case "$1" in
kdiff3 | tkdiff | xxdiff | meld | opendiff | emerge | vimdiff | gvimdiff | ecmerge)
;; # happy
*)
- return 1
+ if ! valid_custom_tool "$1"; then
+ return 1
+ fi
;;
esac
}
init_merge_tool_path "$merge_tool"
- if ! type "$merge_tool_path" > /dev/null 2>&1; then
+ merge_keep_backup="$(git config --bool merge.keepBackup || echo true)"
+
+ if test -z "$merge_tool_cmd" && ! type "$merge_tool_path" > /dev/null 2>&1; then
echo "The merge tool $merge_tool is not available as '$merge_tool_path'"
exit 1
fi
+
+ if ! test -z "$merge_tool_cmd"; then
+ merge_tool_trust_exit_code="$(git config --bool mergetool.$merge_tool.trustExitCode || echo false)"
+ fi
fi
git rebase --abort instead.
Note that if <branch> is not specified on the command line, the
-currently checked out branch is used. You must be in the top
-directory of your project to start (or continue) a rebase.
+currently checked out branch is used.
Example: git-rebase master~1 topic
if test -d "$dotest"
then
move_to_original_branch
- rm -r "$dotest"
elif test -d .dotest
then
dotest=.dotest
move_to_original_branch
- rm -r .dotest
else
die "No rebase in progress?"
fi
- git reset --hard ORIG_HEAD
+ git reset --hard $(cat $dotest/orig-head)
+ rm -r "$dotest"
exit
;;
--onto)
if test -z "$do_merge"
then
git format-patch -k --stdout --full-index --ignore-if-in-upstream "$upstream"..ORIG_HEAD |
- git am $git_am_opt --binary -3 -k --resolvemsg="$RESOLVEMSG" &&
+ git am $git_am_opt --rebasing --resolvemsg="$RESOLVEMSG" &&
move_to_original_branch
ret=$?
test 0 != $ret -a -d .dotest &&
# If explicit old-style ones are specified, they trump --suppress-cc.
$suppress_cc{'self'} = $suppress_from if defined $suppress_from;
-$suppress_cc{'sob'} = $signed_off_cc if defined $signed_off_cc;
+$suppress_cc{'sob'} = !$signed_off_cc if defined $signed_off_cc;
# Debugging, print out the suppressions.
if (0) {
$message .= $_;
if (/^(Signed-off-by|Cc): (.*)$/i) {
next if ($suppress_cc{'sob'});
+ chomp;
my $c = $2;
chomp $c;
next if ($c eq $sender and $suppress_cc{'self'});
#!/bin/sh
# Copyright (c) 2007, Nanako Shiraishi
-USAGE='[ | save | list | show | apply | clear | create ]'
+USAGE='[ | save | list | show | apply | clear | drop | pop | create ]'
SUBDIRECTORY_OK=Yes
OPTIONS_SPEC=
fi
}
+drop_stash () {
+ have_stash || die 'No stash entries to drop'
+
+ if test $# = 0
+ then
+ set x "$ref_stash@{0}"
+ shift
+ fi
+ # Verify supplied argument looks like a stash entry
+ s=$(git rev-parse --revs-only --no-flags "$@") &&
+ git rev-parse --verify "$s:" > /dev/null 2>&1 &&
+ git rev-parse --verify "$s^1:" > /dev/null 2>&1 &&
+ git rev-parse --verify "$s^2:" > /dev/null 2>&1 ||
+ die "$*: not a valid stashed state"
+
+ git reflog delete --updateref --rewrite "$@" &&
+ echo "Dropped $* ($s)" || die "$*: Could not drop stash entry"
+
+ # clear_stash if we just dropped the last stash entry
+ git rev-parse --verify "$ref_stash@{0}" > /dev/null 2>&1 || clear_stash
+}
+
# Main command set
case "$1" in
list)
fi
create_stash "$*" && echo "$w_commit"
;;
+drop)
+ shift
+ drop_stash "$@"
+ ;;
+pop)
+ shift
+ if apply_stash "$@"
+ then
+ test -z "$unstash_index" || shift
+ drop_stash "$@"
+ fi
+ ;;
*)
if test $# -eq 0
then
usage
fi
- case "$repo" in
- ./*|../*)
- # dereference source url relative to parent's url
- realrepo="$(resolve_relative_url $repo)" ;;
- *)
- # Turn the source into an absolute path if
- # it is local
- if base=$(get_repo_base "$repo"); then
- repo="$base"
- fi
- realrepo=$repo
- ;;
- esac
-
# Guess path from repo if not specified or strip trailing slashes
if test -z "$path"; then
path=$(echo "$repo" | sed -e 's|/*$||' -e 's|:*/*\.git$||' -e 's|.*[/:]||g')
path=$(echo "$path" | sed -e 's|/*$||')
fi
- test -e "$path" &&
- die "'$path' already exists"
-
git ls-files --error-unmatch "$path" > /dev/null 2>&1 &&
die "'$path' already exists in the index"
- module_clone "$path" "$realrepo" || exit
- (unset GIT_DIR; cd "$path" && git checkout -q ${branch:+-b "$branch" "origin/$branch"}) ||
- die "Unable to checkout submodule '$path'"
+ # perhaps the path exists and is already a git repo, else clone it
+ if test -e "$path"
+ then
+ if test -d "$path/.git" &&
+ test "$(unset GIT_DIR; cd $path; git rev-parse --git-dir)" = ".git"
+ then
+ echo "Adding existing repo at '$path' to the index"
+ else
+ die "'$path' already exists and is not a valid git repo"
+ fi
+ else
+ case "$repo" in
+ ./*|../*)
+ # dereference source url relative to parent's url
+ realrepo="$(resolve_relative_url $repo)" ;;
+ *)
+ # Turn the source into an absolute path if
+ # it is local
+ if base=$(get_repo_base "$repo"); then
+ repo="$base"
+ fi
+ realrepo=$repo
+ ;;
+ esac
+
+ module_clone "$path" "$realrepo" || exit
+ (unset GIT_DIR; cd "$path" && git checkout -q ${branch:+-b "$branch" "origin/$branch"}) ||
+ die "Unable to checkout submodule '$path'"
+ fi
+
git add "$path" ||
die "Failed to add submodule '$path'"
do
name=$(module_name "$path") || exit
url=$(git config submodule."$name".url)
- if test -z "url" || ! test -d "$path"/.git
+ if test -z "$url" || ! test -d "$path"/.git
then
say "-$sha1 $path"
continue;
}
sub cmd_find_rev {
- my $revision_or_hash = shift;
+ my $revision_or_hash = shift or die "SVN or git revision required ",
+ "as a command-line argument\n";
my $result;
if ($revision_or_hash =~ /^r\d+$/) {
my $head = shift;
$remotes->{$repo_id}->{$_});
}
my $p = $path;
+ my $rwr = rewrite_root({repo_id => $repo_id});
unless (defined $p) {
$p = $full_url;
- $p =~ s#^\Q$u\E(?:/|$)## or next;
+ my $z = $u;
+ if ($rwr) {
+ $z = $rwr;
+ }
+ $p =~ s#^\Q$z\E(?:/|$)## or next;
}
foreach my $f (keys %$fetch) {
next if $f ne $p;
}
}
+our $search_use_regexp = $cgi->param('sr');
+
our $searchtext = $cgi->param('s');
our $search_regexp;
if (defined $searchtext) {
if (length($searchtext) < 2) {
die_error(undef, "At least two characters are required for search parameter");
}
- $search_regexp = quotemeta $searchtext;
+ $search_regexp = $search_use_regexp ? $searchtext : quotemeta $searchtext;
}
# now read PATH_INFO and use it as alternative to parameters
searchtype => "st",
snapshot_format => "sf",
extra_options => "opt",
+ search_use_regexp => "sr",
);
my %mapping = @mapping;
}
sub parse_commits {
- my ($commit_id, $maxcount, $skip, $arg, $filename) = @_;
+ my ($commit_id, $maxcount, $skip, $filename, @args) = @_;
my @cos;
$maxcount ||= 1;
open my $fd, "-|", git_cmd(), "rev-list",
"--header",
- ($arg ? ($arg) : ()),
+ @args,
("--max-count=" . $maxcount),
("--skip=" . $skip),
@extra_options,
$cgi->sup($cgi->a({-href => href(action=>"search_help")}, "?")) .
" search:\n",
$cgi->textfield(-name => "s", -value => $searchtext) . "\n" .
+ "<span title=\"Extended regular expression\">" .
+ $cgi->checkbox(-name => 'sr', -value => 1, -label => 're',
+ -checked => $search_use_regexp) .
+ "</span>" .
"</div>" .
$cgi->end_form() . "\n";
}
chop_and_escape_str($co{'title'}, 50) . "<br/>");
my $comment = $co{'comment'};
foreach my $line (@$comment) {
- if ($line =~ m/^(.*)($search_regexp)(.*)$/i) {
+ if ($line =~ m/^(.*?)($search_regexp)(.*)$/i) {
my ($lead, $match, $trail) = ($1, $2, $3);
$match = chop_str($match, 70, 5, 'center');
my $contextlen = int((80 - length($match))/2);
$ftype = git_get_type($hash);
}
- my @commitlist = parse_commits($hash_base, 101, (100 * $page), "--full-history", $file_name);
+ my @commitlist = parse_commits($hash_base, 101, (100 * $page), $file_name, "--full-history");
my $paging_nav = '';
if ($page > 0) {
} elsif ($searchtype eq 'committer') {
$greptype = "--committer=";
}
- $greptype .= $search_regexp;
- my @commitlist = parse_commits($hash, 101, (100 * $page), $greptype);
+ $greptype .= $searchtext;
+ my @commitlist = parse_commits($hash, 101, (100 * $page), undef,
+ $greptype, '--regexp-ignore-case',
+ $search_use_regexp ? '--extended-regexp' : '--fixed-strings');
my $paging_nav = '';
if ($page > 0) {
$paging_nav .=
$cgi->a({-href => href(action=>"search", hash=>$hash,
- searchtext=>$searchtext, searchtype=>$searchtype)},
+ searchtext=>$searchtext,
+ searchtype=>$searchtype)},
"first");
$paging_nav .= " ⋅ " .
$cgi->a({-href => href(-replay=>1, page=>$page-1),
print "<table class=\"pickaxe search\">\n";
my $alternate = 1;
$/ = "\n";
- my $git_command = git_cmd_str();
- my $searchqtext = $searchtext;
- $searchqtext =~ s/'/'\\''/;
- open my $fd, "-|", "$git_command rev-list $hash | " .
- "$git_command diff-tree -r --stdin -S\'$searchqtext\'";
+ open my $fd, '-|', git_cmd(), '--no-pager', 'log', @diff_opts,
+ '--pretty=format:%H', '--no-abbrev', '--raw', "-S$searchtext",
+ ($search_use_regexp ? '--pickaxe-regex' : ());
undef %co;
my @files;
while (my $line = <$fd>) {
- if (%co && $line =~ m/^:([0-7]{6}) ([0-7]{6}) ([0-9a-fA-F]{40}) ([0-9a-fA-F]{40}) (.)\t(.*)$/) {
- my %set;
- $set{'file'} = $6;
- $set{'from_id'} = $3;
- $set{'to_id'} = $4;
- $set{'id'} = $set{'to_id'};
- if ($set{'id'} =~ m/0{40}/) {
- $set{'id'} = $set{'from_id'};
- }
- if ($set{'id'} =~ m/0{40}/) {
- next;
- }
- push @files, \%set;
- } elsif ($line =~ m/^([0-9a-fA-F]{40})$/){
+ chomp $line;
+ next unless $line;
+
+ my %set = parse_difftree_raw_line($line);
+ if (defined $set{'commit'}) {
+ # finish previous commit
if (%co) {
- if ($alternate) {
- print "<tr class=\"dark\">\n";
- } else {
- print "<tr class=\"light\">\n";
- }
- $alternate ^= 1;
- my $author = chop_and_escape_str($co{'author_name'}, 15, 5);
- print "<td title=\"$co{'age_string_age'}\"><i>$co{'age_string_date'}</i></td>\n" .
- "<td><i>" . $author . "</i></td>\n" .
- "<td>" .
- $cgi->a({-href => href(action=>"commit", hash=>$co{'id'}),
- -class => "list subject"},
- chop_and_escape_str($co{'title'}, 50) . "<br/>");
- while (my $setref = shift @files) {
- my %set = %$setref;
- print $cgi->a({-href => href(action=>"blob", hash_base=>$co{'id'},
- hash=>$set{'id'}, file_name=>$set{'file'}),
- -class => "list"},
- "<span class=\"match\">" . esc_path($set{'file'}) . "</span>") .
- "<br/>\n";
- }
print "</td>\n" .
"<td class=\"link\">" .
$cgi->a({-href => href(action=>"commit", hash=>$co{'id'})}, "commit") .
print "</td>\n" .
"</tr>\n";
}
- %co = parse_commit($1);
+
+ if ($alternate) {
+ print "<tr class=\"dark\">\n";
+ } else {
+ print "<tr class=\"light\">\n";
+ }
+ $alternate ^= 1;
+ %co = parse_commit($set{'commit'});
+ my $author = chop_and_escape_str($co{'author_name'}, 15, 5);
+ print "<td title=\"$co{'age_string_age'}\"><i>$co{'age_string_date'}</i></td>\n" .
+ "<td><i>$author</i></td>\n" .
+ "<td>" .
+ $cgi->a({-href => href(action=>"commit", hash=>$co{'id'}),
+ -class => "list subject"},
+ chop_and_escape_str($co{'title'}, 50) . "<br/>");
+ } elsif (defined $set{'to_id'}) {
+ next if ($set{'to_id'} =~ m/^0{40}$/);
+
+ print $cgi->a({-href => href(action=>"blob", hash_base=>$co{'id'},
+ hash=>$set{'to_id'}, file_name=>$set{'to_file'}),
+ -class => "list"},
+ "<span class=\"match\">" . esc_path($set{'file'}) . "</span>") .
+ "<br/>\n";
}
}
close $fd;
+ # finish last commit (warning: repetition!)
+ if (%co) {
+ print "</td>\n" .
+ "<td class=\"link\">" .
+ $cgi->a({-href => href(action=>"commit", hash=>$co{'id'})}, "commit") .
+ " | " .
+ $cgi->a({-href => href(action=>"tree", hash=>$co{'tree'}, hash_base=>$co{'id'})}, "tree");
+ print "</td>\n" .
+ "</tr>\n";
+ }
+
print "</table>\n";
}
my $alternate = 1;
my $matches = 0;
$/ = "\n";
- open my $fd, "-|", git_cmd(), 'grep', '-n', '-i', '-E', $searchtext, $co{'tree'};
+ open my $fd, "-|", git_cmd(), 'grep', '-n',
+ $search_use_regexp ? ('-E', '-i') : '-F',
+ $searchtext, $co{'tree'};
my $lastfile = '';
while (my $line = <$fd>) {
chomp $line;
print "<div class=\"binary\">Binary file</div>\n";
} else {
$ltext = untabify($ltext);
- if ($ltext =~ m/^(.*)($searchtext)(.*)$/i) {
+ if ($ltext =~ m/^(.*)($search_regexp)(.*)$/i) {
$ltext = esc_html($1, -nbsp=>1);
$ltext .= '<span class="match">';
$ltext .= esc_html($2, -nbsp=>1);
git_header_html();
git_print_page_nav('','', $hash,$hash,$hash);
print <<EOT;
+<p><strong>Pattern</strong> is by default a normal string that is matched precisely (but without
+regard to case, except in the case of pickaxe). However, when you check the <em>re</em> checkbox,
+the pattern entered is recognized as the POSIX extended
+<a href="http://en.wikipedia.org/wiki/Regular_expression">regular expression</a> (also case
+insensitive).</p>
<dl>
<dt><b>commit</b></dt>
-<dd>The commit messages and authorship information will be scanned for the given string.</dd>
+<dd>The commit messages and authorship information will be scanned for the given pattern.</dd>
EOT
my ($have_grep) = gitweb_check_feature('grep');
if ($have_grep) {
print <<EOT;
<dt><b>grep</b></dt>
<dd>All files in the currently selected tree (HEAD unless you are explicitly browsing
- a different one) are searched for the given
-<a href="http://en.wikipedia.org/wiki/Regular_expression">regular expression</a>
-(POSIX extended) and the matches are listed. On large
-trees, this search can take a while and put some strain on the server, so please use it with
-some consideration.</dd>
+ a different one) are searched for the given pattern. On large trees, this search can take
+a while and put some strain on the server, so please use it with some consideration. Note that
+due to git-grep peculiarity, currently if regexp mode is turned off, the matches are
+case-sensitive.</dd>
EOT
}
print <<EOT;
<dt><b>author</b></dt>
-<dd>Name and e-mail of the change author and date of birth of the patch will be scanned for the given string.</dd>
+<dd>Name and e-mail of the change author and date of birth of the patch will be scanned for the given pattern.</dd>
<dt><b>committer</b></dt>
-<dd>Name and e-mail of the committer and date of commit will be scanned for the given string.</dd>
+<dd>Name and e-mail of the committer and date of commit will be scanned for the given pattern.</dd>
EOT
my ($have_pickaxe) = gitweb_check_feature('pickaxe');
if ($have_pickaxe) {
<dt><b>pickaxe</b></dt>
<dd>All commits that caused the string to appear or disappear from any file (changes that
added, removed or "modified" the string) will be listed. This search can take a while and
-takes a lot of strain on the server, so please use it wisely.</dd>
+takes a lot of strain on the server, so please use it wisely. Note that since you may be
+interested even in changes just changing the case as well, this search is case sensitive.</dd>
EOT
}
print "</dl>\n";
# log/feed of current (HEAD) branch, log of given branch, history of file/directory
my $head = $hash || 'HEAD';
- my @commitlist = parse_commits($head, 150, 0, undef, $file_name);
+ my @commitlist = parse_commits($head, 150, 0, $file_name);
my %latest_commit;
my %latest_date;
* the existing entry, or the empty slot if none existed. The caller
* can then look at the (*ptr) to see whether it existed or not.
*/
-static struct hash_table_entry *lookup_hash_entry(unsigned int hash, struct hash_table *table)
+static struct hash_table_entry *lookup_hash_entry(unsigned int hash, const struct hash_table *table)
{
unsigned int size = table->size, nr = hash % size;
struct hash_table_entry *array = table->array;
free(old_array);
}
-void *lookup_hash(unsigned int hash, struct hash_table *table)
+void *lookup_hash(unsigned int hash, const struct hash_table *table)
{
if (!table->array)
return NULL;
return insert_hash_entry(hash, ptr, table);
}
-int for_each_hash(struct hash_table *table, int (*fn)(void *))
+int for_each_hash(const struct hash_table *table, int (*fn)(void *))
{
int sum = 0;
unsigned int i;
struct hash_table_entry *array;
};
-extern void *lookup_hash(unsigned int hash, struct hash_table *table);
+extern void *lookup_hash(unsigned int hash, const struct hash_table *table);
extern void **insert_hash(unsigned int hash, void *ptr, struct hash_table *table);
-extern int for_each_hash(struct hash_table *table, int (*fn)(void *));
+extern int for_each_hash(const struct hash_table *table, int (*fn)(void *));
extern void free_hash(struct hash_table *table);
static inline void init_hash(struct hash_table *table)
/* Send delete request */
fprintf(stderr, "Removing remote branch '%s'\n", remote_ref->name);
+ if (dry_run)
+ return 0;
url = xmalloc(strlen(remote->url) + strlen(remote_ref->name) + 1);
sprintf(url, "%s%s", remote->url, remote_ref->name);
slot = get_active_slot();
memset(remote_dir_exists, -1, 256);
- http_init();
+ http_init(NULL);
no_pragma_header = curl_slist_append(no_pragma_header, "Pragma:");
if (!ref->peer_ref)
continue;
+
+ if (is_zero_sha1(ref->peer_ref->new_sha1)) {
+ if (delete_remote_branch(ref->name, 1) == -1) {
+ error("Could not remove %s", ref->name);
+ rc = -4;
+ }
+ new_refs++;
+ continue;
+ }
+
if (!hashcmp(ref->old_sha1, ref->peer_ref->new_sha1)) {
if (push_verbosely || 1)
fprintf(stderr, "'%s': up-to-date\n", ref->name);
}
}
hashcpy(ref->new_sha1, ref->peer_ref->new_sha1);
- if (is_zero_sha1(ref->new_sha1)) {
- error("cannot happen anymore");
- rc = -3;
- continue;
- }
new_refs++;
strcpy(old_hex, sha1_to_hex(ref->old_sha1));
new_hex = sha1_to_hex(ref->new_sha1);
curl_slist_free_all(data->no_pragma_header);
}
-struct walker *get_http_walker(const char *url)
+struct walker *get_http_walker(const char *url, struct remote *remote)
{
char *s;
struct walker_data *data = xmalloc(sizeof(struct walker_data));
struct walker *walker = xmalloc(sizeof(struct walker));
- http_init();
+ http_init(remote);
data->no_pragma_header = curl_slist_append(NULL, "Pragma:");
return result;
}
-void http_init(void)
+void http_init(struct remote *remote)
{
char *low_speed_limit;
char *low_speed_time;
curl_global_init(CURL_GLOBAL_ALL);
+ if (remote && remote->http_proxy)
+ curl_http_proxy = xstrdup(remote->http_proxy);
+
pragma_header = curl_slist_append(pragma_header, "Pragma: no-cache");
#ifdef USE_CURL_MULTI
void http_cleanup(void)
{
struct active_request_slot *slot = active_queue_head;
-#ifdef USE_CURL_MULTI
- char *wait_url;
-#endif
while (slot != NULL) {
struct active_request_slot *next = slot->next;
+ if (slot->curl != NULL) {
#ifdef USE_CURL_MULTI
- if (slot->in_use) {
- curl_easy_getinfo(slot->curl,
- CURLINFO_EFFECTIVE_URL,
- &wait_url);
- fprintf(stderr, "Waiting for %s\n", wait_url);
- run_active_slot(slot);
- }
+ curl_multi_remove_handle(curlm, slot->curl);
#endif
- if (slot->curl != NULL)
curl_easy_cleanup(slot->curl);
+ }
free(slot);
slot = next;
}
curl_slist_free_all(pragma_header);
pragma_header = NULL;
+
+ if (curl_http_proxy) {
+ free(curl_http_proxy);
+ curl_http_proxy = NULL;
+ }
}
struct active_request_slot *get_active_slot(void)
#include <curl/easy.h>
#include "strbuf.h"
+#include "remote.h"
/*
* We detect based on the cURL version if multi-transfer is
extern void step_active_slots(void);
#endif
-extern void http_init(void);
+extern void http_init(struct remote *remote);
extern void http_cleanup(void);
extern int data_received;
static const char co_env[] = "GIT_COMMITTER_NAME";
static const char *env_hint =
"\n"
-"*** Your name cannot be determined from your system services (gecos).\n"
+"*** Please tell me who you are.\n"
"\n"
"Run\n"
"\n"
#include "tag.h"
#include "tree.h"
#include "progress.h"
+#include "fsck.h"
static const char index_pack_usage[] =
-"git-index-pack [-v] [-o <index-file>] [{ ---keep | --keep=<msg> }] { <pack-file> | --stdin [--fix-thin] [<pack-file>] }";
+"git-index-pack [-v] [-o <index-file>] [{ ---keep | --keep=<msg> }] [--strict] { <pack-file> | --stdin [--fix-thin] [<pack-file>] }";
struct object_entry
{
*/
#define UNION_BASE_SZ 20
+#define FLAG_LINK (1u<<20)
+#define FLAG_CHECKED (1u<<21)
+
struct delta_entry
{
union delta_base base;
static int nr_resolved_deltas;
static int from_stdin;
+static int strict;
static int verbose;
static struct progress *progress;
static uint32_t input_crc32;
static int input_fd, output_fd, pack_fd;
+static int mark_link(struct object *obj, int type, void *data)
+{
+ if (!obj)
+ return -1;
+
+ if (type != OBJ_ANY && obj->type != type)
+ die("object type mismatch at %s", sha1_to_hex(obj->sha1));
+
+ obj->flags |= FLAG_LINK;
+ return 0;
+}
+
+/* The content of each linked object must have been checked
+ or it must be already present in the object database */
+static void check_object(struct object *obj)
+{
+ if (!obj)
+ return;
+
+ if (!(obj->flags & FLAG_LINK))
+ return;
+
+ if (!(obj->flags & FLAG_CHECKED)) {
+ unsigned long size;
+ int type = sha1_object_info(obj->sha1, &size);
+ if (type != obj->type || type <= 0)
+ die("object of unexpected type");
+ obj->flags |= FLAG_CHECKED;
+ return;
+ }
+}
+
+static void check_objects(void)
+{
+ unsigned i, max;
+
+ max = get_max_object_index();
+ for (i = 0; i < max; i++)
+ check_object(get_indexed_object(i));
+}
+
+
/* Discard current buffer used content. */
static void flush(void)
{
die("SHA1 COLLISION FOUND WITH %s !", sha1_to_hex(sha1));
free(has_data);
}
+ if (strict) {
+ if (type == OBJ_BLOB) {
+ struct blob *blob = lookup_blob(sha1);
+ if (blob)
+ blob->object.flags |= FLAG_CHECKED;
+ else
+ die("invalid blob object %s", sha1_to_hex(sha1));
+ } else {
+ struct object *obj;
+ int eaten;
+ void *buf = (void *) data;
+
+ /*
+ * we do not need to free the memory here, as the
+ * buf is deleted by the caller.
+ */
+ obj = parse_object_buffer(sha1, type, size, buf, &eaten);
+ if (!obj)
+ die("invalid %s", typename(type));
+ if (fsck_object(obj, 1, fsck_error_function))
+ die("Error in object");
+ if (fsck_walk(obj, mark_link, 0))
+ die("Not all child objects of %s are reachable", sha1_to_hex(obj->sha1));
+
+ if (obj->type == OBJ_TREE) {
+ struct tree *item = (struct tree *) obj;
+ item->buffer = NULL;
+ }
+ if (obj->type == OBJ_COMMIT) {
+ struct commit *commit = (struct commit *) obj;
+ commit->buffer = NULL;
+ }
+ obj->flags |= FLAG_CHECKED;
+ }
+ }
}
static void resolve_delta(struct object_entry *delta_obj, void *base_data,
from_stdin = 1;
} else if (!strcmp(arg, "--fix-thin")) {
fix_thin_pack = 1;
+ } else if (!strcmp(arg, "--strict")) {
+ strict = 1;
} else if (!strcmp(arg, "--keep")) {
keep_msg = "";
} else if (!prefixcmp(arg, "--keep=")) {
nr_deltas - nr_resolved_deltas);
}
free(deltas);
+ if (strict)
+ check_objects();
idx_objects = xmalloc((nr_objects) * sizeof(struct pack_idx_entry *));
for (i = 0; i < nr_objects; i++)
--- /dev/null
+/*
+ * Low level 3-way in-core file merge.
+ *
+ * Copyright (c) 2007 Junio C Hamano
+ */
+
+#include "cache.h"
+#include "attr.h"
+#include "xdiff-interface.h"
+#include "run-command.h"
+#include "interpolate.h"
+#include "ll-merge.h"
+
+struct ll_merge_driver;
+
+typedef int (*ll_merge_fn)(const struct ll_merge_driver *,
+ mmbuffer_t *result,
+ const char *path,
+ mmfile_t *orig,
+ mmfile_t *src1, const char *name1,
+ mmfile_t *src2, const char *name2,
+ int virtual_ancestor);
+
+struct ll_merge_driver {
+ const char *name;
+ const char *description;
+ ll_merge_fn fn;
+ const char *recursive;
+ struct ll_merge_driver *next;
+ char *cmdline;
+};
+
+/*
+ * Built-in low-levels
+ */
+static int ll_binary_merge(const struct ll_merge_driver *drv_unused,
+ mmbuffer_t *result,
+ const char *path_unused,
+ mmfile_t *orig,
+ mmfile_t *src1, const char *name1,
+ mmfile_t *src2, const char *name2,
+ int virtual_ancestor)
+{
+ /*
+ * The tentative merge result is "ours" for the final round,
+ * or common ancestor for an internal merge. Still return
+ * "conflicted merge" status.
+ */
+ mmfile_t *stolen = virtual_ancestor ? orig : src1;
+
+ result->ptr = stolen->ptr;
+ result->size = stolen->size;
+ stolen->ptr = NULL;
+ return 1;
+}
+
+static int ll_xdl_merge(const struct ll_merge_driver *drv_unused,
+ mmbuffer_t *result,
+ const char *path_unused,
+ mmfile_t *orig,
+ mmfile_t *src1, const char *name1,
+ mmfile_t *src2, const char *name2,
+ int virtual_ancestor)
+{
+ xpparam_t xpp;
+
+ if (buffer_is_binary(orig->ptr, orig->size) ||
+ buffer_is_binary(src1->ptr, src1->size) ||
+ buffer_is_binary(src2->ptr, src2->size)) {
+ warning("Cannot merge binary files: %s vs. %s\n",
+ name1, name2);
+ return ll_binary_merge(drv_unused, result,
+ path_unused,
+ orig, src1, name1,
+ src2, name2,
+ virtual_ancestor);
+ }
+
+ memset(&xpp, 0, sizeof(xpp));
+ return xdl_merge(orig,
+ src1, name1,
+ src2, name2,
+ &xpp, XDL_MERGE_ZEALOUS,
+ result);
+}
+
+static int ll_union_merge(const struct ll_merge_driver *drv_unused,
+ mmbuffer_t *result,
+ const char *path_unused,
+ mmfile_t *orig,
+ mmfile_t *src1, const char *name1,
+ mmfile_t *src2, const char *name2,
+ int virtual_ancestor)
+{
+ char *src, *dst;
+ long size;
+ const int marker_size = 7;
+
+ int status = ll_xdl_merge(drv_unused, result, path_unused,
+ orig, src1, NULL, src2, NULL,
+ virtual_ancestor);
+ if (status <= 0)
+ return status;
+ size = result->size;
+ src = dst = result->ptr;
+ while (size) {
+ char ch;
+ if ((marker_size < size) &&
+ (*src == '<' || *src == '=' || *src == '>')) {
+ int i;
+ ch = *src;
+ for (i = 0; i < marker_size; i++)
+ if (src[i] != ch)
+ goto not_a_marker;
+ if (src[marker_size] != '\n')
+ goto not_a_marker;
+ src += marker_size + 1;
+ size -= marker_size + 1;
+ continue;
+ }
+ not_a_marker:
+ do {
+ ch = *src++;
+ *dst++ = ch;
+ size--;
+ } while (ch != '\n' && size);
+ }
+ result->size = dst - result->ptr;
+ return 0;
+}
+
+#define LL_BINARY_MERGE 0
+#define LL_TEXT_MERGE 1
+#define LL_UNION_MERGE 2
+static struct ll_merge_driver ll_merge_drv[] = {
+ { "binary", "built-in binary merge", ll_binary_merge },
+ { "text", "built-in 3-way text merge", ll_xdl_merge },
+ { "union", "built-in union merge", ll_union_merge },
+};
+
+static void create_temp(mmfile_t *src, char *path)
+{
+ int fd;
+
+ strcpy(path, ".merge_file_XXXXXX");
+ fd = xmkstemp(path);
+ if (write_in_full(fd, src->ptr, src->size) != src->size)
+ die("unable to write temp-file");
+ close(fd);
+}
+
+/*
+ * User defined low-level merge driver support.
+ */
+static int ll_ext_merge(const struct ll_merge_driver *fn,
+ mmbuffer_t *result,
+ const char *path,
+ mmfile_t *orig,
+ mmfile_t *src1, const char *name1,
+ mmfile_t *src2, const char *name2,
+ int virtual_ancestor)
+{
+ char temp[3][50];
+ char cmdbuf[2048];
+ struct interp table[] = {
+ { "%O" },
+ { "%A" },
+ { "%B" },
+ };
+ struct child_process child;
+ const char *args[20];
+ int status, fd, i;
+ struct stat st;
+
+ if (fn->cmdline == NULL)
+ die("custom merge driver %s lacks command line.", fn->name);
+
+ result->ptr = NULL;
+ result->size = 0;
+ create_temp(orig, temp[0]);
+ create_temp(src1, temp[1]);
+ create_temp(src2, temp[2]);
+
+ interp_set_entry(table, 0, temp[0]);
+ interp_set_entry(table, 1, temp[1]);
+ interp_set_entry(table, 2, temp[2]);
+
+ interpolate(cmdbuf, sizeof(cmdbuf), fn->cmdline, table, 3);
+
+ memset(&child, 0, sizeof(child));
+ child.argv = args;
+ args[0] = "sh";
+ args[1] = "-c";
+ args[2] = cmdbuf;
+ args[3] = NULL;
+
+ status = run_command(&child);
+ if (status < -ERR_RUN_COMMAND_FORK)
+ ; /* failure in run-command */
+ else
+ status = -status;
+ fd = open(temp[1], O_RDONLY);
+ if (fd < 0)
+ goto bad;
+ if (fstat(fd, &st))
+ goto close_bad;
+ result->size = st.st_size;
+ result->ptr = xmalloc(result->size + 1);
+ if (read_in_full(fd, result->ptr, result->size) != result->size) {
+ free(result->ptr);
+ result->ptr = NULL;
+ result->size = 0;
+ }
+ close_bad:
+ close(fd);
+ bad:
+ for (i = 0; i < 3; i++)
+ unlink(temp[i]);
+ return status;
+}
+
+/*
+ * merge.default and merge.driver configuration items
+ */
+static struct ll_merge_driver *ll_user_merge, **ll_user_merge_tail;
+static const char *default_ll_merge;
+
+static int read_merge_config(const char *var, const char *value)
+{
+ struct ll_merge_driver *fn;
+ const char *ep, *name;
+ int namelen;
+
+ if (!strcmp(var, "merge.default")) {
+ if (value)
+ default_ll_merge = strdup(value);
+ return 0;
+ }
+
+ /*
+ * We are not interested in anything but "merge.<name>.variable";
+ * especially, we do not want to look at variables such as
+ * "merge.summary", "merge.tool", and "merge.verbosity".
+ */
+ if (prefixcmp(var, "merge.") || (ep = strrchr(var, '.')) == var + 5)
+ return 0;
+
+ /*
+ * Find existing one as we might be processing merge.<name>.var2
+ * after seeing merge.<name>.var1.
+ */
+ name = var + 6;
+ namelen = ep - name;
+ for (fn = ll_user_merge; fn; fn = fn->next)
+ if (!strncmp(fn->name, name, namelen) && !fn->name[namelen])
+ break;
+ if (!fn) {
+ fn = xcalloc(1, sizeof(struct ll_merge_driver));
+ fn->name = xmemdupz(name, namelen);
+ fn->fn = ll_ext_merge;
+ *ll_user_merge_tail = fn;
+ ll_user_merge_tail = &(fn->next);
+ }
+
+ ep++;
+
+ if (!strcmp("name", ep)) {
+ if (!value)
+ return error("%s: lacks value", var);
+ fn->description = strdup(value);
+ return 0;
+ }
+
+ if (!strcmp("driver", ep)) {
+ if (!value)
+ return error("%s: lacks value", var);
+ /*
+ * merge.<name>.driver specifies the command line:
+ *
+ * command-line
+ *
+ * The command-line will be interpolated with the following
+ * tokens and is given to the shell:
+ *
+ * %O - temporary file name for the merge base.
+ * %A - temporary file name for our version.
+ * %B - temporary file name for the other branches' version.
+ *
+ * The external merge driver should write the results in the
+ * file named by %A, and signal that it has done with zero exit
+ * status.
+ */
+ fn->cmdline = strdup(value);
+ return 0;
+ }
+
+ if (!strcmp("recursive", ep)) {
+ if (!value)
+ return error("%s: lacks value", var);
+ fn->recursive = strdup(value);
+ return 0;
+ }
+
+ return 0;
+}
+
+static void initialize_ll_merge(void)
+{
+ if (ll_user_merge_tail)
+ return;
+ ll_user_merge_tail = &ll_user_merge;
+ git_config(read_merge_config);
+}
+
+static const struct ll_merge_driver *find_ll_merge_driver(const char *merge_attr)
+{
+ struct ll_merge_driver *fn;
+ const char *name;
+ int i;
+
+ initialize_ll_merge();
+
+ if (ATTR_TRUE(merge_attr))
+ return &ll_merge_drv[LL_TEXT_MERGE];
+ else if (ATTR_FALSE(merge_attr))
+ return &ll_merge_drv[LL_BINARY_MERGE];
+ else if (ATTR_UNSET(merge_attr)) {
+ if (!default_ll_merge)
+ return &ll_merge_drv[LL_TEXT_MERGE];
+ else
+ name = default_ll_merge;
+ }
+ else
+ name = merge_attr;
+
+ for (fn = ll_user_merge; fn; fn = fn->next)
+ if (!strcmp(fn->name, name))
+ return fn;
+
+ for (i = 0; i < ARRAY_SIZE(ll_merge_drv); i++)
+ if (!strcmp(ll_merge_drv[i].name, name))
+ return &ll_merge_drv[i];
+
+ /* default to the 3-way */
+ return &ll_merge_drv[LL_TEXT_MERGE];
+}
+
+static const char *git_path_check_merge(const char *path)
+{
+ static struct git_attr_check attr_merge_check;
+
+ if (!attr_merge_check.attr)
+ attr_merge_check.attr = git_attr("merge", 5);
+
+ if (git_checkattr(path, 1, &attr_merge_check))
+ return NULL;
+ return attr_merge_check.value;
+}
+
+int ll_merge(mmbuffer_t *result_buf,
+ const char *path,
+ mmfile_t *ancestor,
+ mmfile_t *ours, const char *our_label,
+ mmfile_t *theirs, const char *their_label,
+ int virtual_ancestor)
+{
+ const char *ll_driver_name;
+ const struct ll_merge_driver *driver;
+
+ ll_driver_name = git_path_check_merge(path);
+ driver = find_ll_merge_driver(ll_driver_name);
+
+ if (virtual_ancestor && driver->recursive)
+ driver = find_ll_merge_driver(driver->recursive);
+ return driver->fn(driver, result_buf, path,
+ ancestor,
+ ours, our_label,
+ theirs, their_label, virtual_ancestor);
+}
--- /dev/null
+/*
+ * Low level 3-way in-core file merge.
+ */
+
+#ifndef LL_MERGE_H
+#define LL_MERGE_H
+
+int ll_merge(mmbuffer_t *result_buf,
+ const char *path,
+ mmfile_t *ancestor,
+ mmfile_t *ours, const char *our_label,
+ mmfile_t *theirs, const char *their_label,
+ int virtual_ancestor);
+
+#endif
return res;
}
-static void resolve(const char *base, struct name_entry *branch1, struct name_entry *result)
+static char *traverse_path(const struct traverse_info *info, const struct name_entry *n)
+{
+ char *path = xmalloc(traverse_path_len(info, n) + 1);
+ return make_traverse_path(path, info, n);
+}
+
+static void resolve(const struct traverse_info *info, struct name_entry *branch1, struct name_entry *result)
{
struct merge_list *orig, *final;
const char *path;
if (!branch1)
return;
- path = xstrdup(mkpath("%s%s", base, result->path));
+ path = traverse_path(info, result);
orig = create_entry(2, branch1->mode, branch1->sha1, path);
final = create_entry(0, result->mode, result->sha1, path);
add_merge_entry(final);
}
-static int unresolved_directory(const char *base, struct name_entry n[3])
+static int unresolved_directory(const struct traverse_info *info, struct name_entry n[3])
{
- int baselen, pathlen;
char *newbase;
struct name_entry *p;
struct tree_desc t[3];
}
if (!S_ISDIR(p->mode))
return 0;
- baselen = strlen(base);
- pathlen = tree_entry_len(p->path, p->sha1);
- newbase = xmalloc(baselen + pathlen + 2);
- memcpy(newbase, base, baselen);
- memcpy(newbase + baselen, p->path, pathlen);
- memcpy(newbase + baselen + pathlen, "/", 2);
-
+ newbase = traverse_path(info, p);
buf0 = fill_tree_descriptor(t+0, n[0].sha1);
buf1 = fill_tree_descriptor(t+1, n[1].sha1);
buf2 = fill_tree_descriptor(t+2, n[2].sha1);
}
-static struct merge_list *link_entry(unsigned stage, const char *base, struct name_entry *n, struct merge_list *entry)
+static struct merge_list *link_entry(unsigned stage, const struct traverse_info *info, struct name_entry *n, struct merge_list *entry)
{
const char *path;
struct merge_list *link;
if (entry)
path = entry->path;
else
- path = xstrdup(mkpath("%s%s", base, n->path));
+ path = traverse_path(info, n);
link = create_entry(stage, n->mode, n->sha1, path);
link->link = entry;
return link;
}
-static void unresolved(const char *base, struct name_entry n[3])
+static void unresolved(const struct traverse_info *info, struct name_entry n[3])
{
struct merge_list *entry = NULL;
- if (unresolved_directory(base, n))
+ if (unresolved_directory(info, n))
return;
/*
* list has the stages in order - link_entry adds new
* links at the front.
*/
- entry = link_entry(3, base, n + 2, entry);
- entry = link_entry(2, base, n + 1, entry);
- entry = link_entry(1, base, n + 0, entry);
+ entry = link_entry(3, info, n + 2, entry);
+ entry = link_entry(2, info, n + 1, entry);
+ entry = link_entry(1, info, n + 0, entry);
add_merge_entry(entry);
}
* The successful merge rules are the same as for the three-way merge
* in git-read-tree.
*/
-static void threeway_callback(int n, unsigned long mask, struct name_entry *entry, const char *base)
+static int threeway_callback(int n, unsigned long mask, unsigned long dirmask, struct name_entry *entry, struct traverse_info *info)
{
/* Same in both? */
if (same_entry(entry+1, entry+2)) {
if (entry[0].sha1) {
- resolve(base, NULL, entry+1);
- return;
+ resolve(info, NULL, entry+1);
+ return mask;
}
}
if (same_entry(entry+0, entry+1)) {
if (entry[2].sha1 && !S_ISDIR(entry[2].mode)) {
- resolve(base, entry+1, entry+2);
- return;
+ resolve(info, entry+1, entry+2);
+ return mask;
}
}
if (same_entry(entry+0, entry+2)) {
if (entry[1].sha1 && !S_ISDIR(entry[1].mode)) {
- resolve(base, NULL, entry+1);
- return;
+ resolve(info, NULL, entry+1);
+ return mask;
}
}
- unresolved(base, entry);
+ unresolved(info, entry);
+ return mask;
}
static void merge_trees(struct tree_desc t[3], const char *base)
{
- traverse_trees(3, t, base, threeway_callback);
+ struct traverse_info info;
+
+ setup_traverse_info(&info, base);
+ info.fn = threeway_callback;
+ traverse_trees(3, t, &info);
}
static void *get_tree_descriptor(struct tree_desc *desc, const char *rev)
+++ /dev/null
-#include "cache.h"
-#include "object.h"
-#include "decorate.h"
-
-int track_object_refs = 0;
-
-static struct decoration ref_decorate;
-
-struct object_refs *lookup_object_refs(struct object *base)
-{
- return lookup_decoration(&ref_decorate, base);
-}
-
-static void add_object_refs(struct object *obj, struct object_refs *refs)
-{
- if (add_decoration(&ref_decorate, obj, refs))
- die("object %s tried to add refs twice!", sha1_to_hex(obj->sha1));
-}
-
-struct object_refs *alloc_object_refs(unsigned count)
-{
- struct object_refs *refs;
- size_t size = sizeof(*refs) + count*sizeof(struct object *);
-
- refs = xcalloc(1, size);
- refs->count = count;
- return refs;
-}
-
-static int compare_object_pointers(const void *a, const void *b)
-{
- const struct object * const *pa = a;
- const struct object * const *pb = b;
- if (*pa == *pb)
- return 0;
- else if (*pa < *pb)
- return -1;
- else
- return 1;
-}
-
-void set_object_refs(struct object *obj, struct object_refs *refs)
-{
- unsigned int i, j;
-
- /* Do not install empty list of references */
- if (refs->count < 1) {
- free(refs);
- return;
- }
-
- /* Sort the list and filter out duplicates */
- qsort(refs->ref, refs->count, sizeof(refs->ref[0]),
- compare_object_pointers);
- for (i = j = 1; i < refs->count; i++) {
- if (refs->ref[i] != refs->ref[i - 1])
- refs->ref[j++] = refs->ref[i];
- }
- if (j < refs->count) {
- /* Duplicates were found - reallocate list */
- size_t size = sizeof(*refs) + j*sizeof(struct object *);
- refs->count = j;
- refs = xrealloc(refs, size);
- }
-
- for (i = 0; i < refs->count; i++)
- refs->ref[i]->used = 1;
- add_object_refs(obj, refs);
-}
-
-void mark_reachable(struct object *obj, unsigned int mask)
-{
- const struct object_refs *refs;
-
- if (!track_object_refs)
- die("cannot do reachability with object refs turned off");
- /* If we've been here already, don't bother */
- if (obj->flags & mask)
- return;
- obj->flags |= mask;
- refs = lookup_object_refs(obj);
- if (refs) {
- unsigned i;
- for (i = 0; i < refs->count; i++)
- mark_reachable(refs->ref[i], mask);
- }
-}
unsigned char sha1[20];
};
-extern int track_object_refs;
-
extern const char *typename(unsigned int type);
extern int type_from_string(const char *str);
extern unsigned int get_max_object_index(void);
extern struct object *get_indexed_object(unsigned int);
-extern struct object_refs *lookup_object_refs(struct object *);
/** Internal only **/
struct object *lookup_object(const unsigned char *sha1);
/** Returns the object, with potentially excess memory allocated. **/
struct object *lookup_unknown_object(const unsigned char *sha1);
-struct object_refs *alloc_object_refs(unsigned count);
-void set_object_refs(struct object *obj, struct object_refs *refs);
-
-void mark_reachable(struct object *obj, unsigned int mask);
-
struct object_list *object_list_insert(struct object *item,
struct object_list **list_p);
#include "cache.h"
#include "pack.h"
+#include "pack-revindex.h"
struct idx_entry
{
static void show_pack_info(struct packed_git *p)
{
uint32_t nr_objects, i, chain_histogram[MAX_CHAIN+1];
+
nr_objects = p->num_objects;
memset(chain_histogram, 0, sizeof(chain_histogram));
+ init_pack_revindex();
for (i = 0; i < nr_objects; i++) {
const unsigned char *sha1;
base_sha1);
printf("%s ", sha1_to_hex(sha1));
if (!delta_chain_length)
- printf("%-6s %lu %"PRIuMAX"\n",
- type, size, (uintmax_t)offset);
+ printf("%-6s %lu %lu %"PRIuMAX"\n",
+ type, size, store_size, (uintmax_t)offset);
else {
- printf("%-6s %lu %"PRIuMAX" %u %s\n",
- type, size, (uintmax_t)offset,
+ printf("%-6s %lu %lu %"PRIuMAX" %u %s\n",
+ type, size, store_size, (uintmax_t)offset,
delta_chain_length, sha1_to_hex(base_sha1));
if (delta_chain_length <= MAX_CHAIN)
chain_histogram[delta_chain_length]++;
--- /dev/null
+#include "cache.h"
+#include "pack-revindex.h"
+
+/*
+ * Pack index for existing packs give us easy access to the offsets into
+ * corresponding pack file where each object's data starts, but the entries
+ * do not store the size of the compressed representation (uncompressed
+ * size is easily available by examining the pack entry header). It is
+ * also rather expensive to find the sha1 for an object given its offset.
+ *
+ * We build a hashtable of existing packs (pack_revindex), and keep reverse
+ * index here -- pack index file is sorted by object name mapping to offset;
+ * this pack_revindex[].revindex array is a list of offset/index_nr pairs
+ * ordered by offset, so if you know the offset of an object, next offset
+ * is where its packed representation ends and the index_nr can be used to
+ * get the object sha1 from the main index.
+ */
+
+struct pack_revindex {
+ struct packed_git *p;
+ struct revindex_entry *revindex;
+};
+
+static struct pack_revindex *pack_revindex;
+static int pack_revindex_hashsz;
+
+static int pack_revindex_ix(struct packed_git *p)
+{
+ unsigned long ui = (unsigned long)p;
+ int i;
+
+ ui = ui ^ (ui >> 16); /* defeat structure alignment */
+ i = (int)(ui % pack_revindex_hashsz);
+ while (pack_revindex[i].p) {
+ if (pack_revindex[i].p == p)
+ return i;
+ if (++i == pack_revindex_hashsz)
+ i = 0;
+ }
+ return -1 - i;
+}
+
+void init_pack_revindex(void)
+{
+ int num;
+ struct packed_git *p;
+
+ for (num = 0, p = packed_git; p; p = p->next)
+ num++;
+ if (!num)
+ return;
+ pack_revindex_hashsz = num * 11;
+ pack_revindex = xcalloc(sizeof(*pack_revindex), pack_revindex_hashsz);
+ for (p = packed_git; p; p = p->next) {
+ num = pack_revindex_ix(p);
+ num = - 1 - num;
+ pack_revindex[num].p = p;
+ }
+ /* revindex elements are lazily initialized */
+}
+
+static int cmp_offset(const void *a_, const void *b_)
+{
+ const struct revindex_entry *a = a_;
+ const struct revindex_entry *b = b_;
+ return (a->offset < b->offset) ? -1 : (a->offset > b->offset) ? 1 : 0;
+}
+
+/*
+ * Ordered list of offsets of objects in the pack.
+ */
+static void create_pack_revindex(struct pack_revindex *rix)
+{
+ struct packed_git *p = rix->p;
+ int num_ent = p->num_objects;
+ int i;
+ const char *index = p->index_data;
+
+ rix->revindex = xmalloc(sizeof(*rix->revindex) * (num_ent + 1));
+ index += 4 * 256;
+
+ if (p->index_version > 1) {
+ const uint32_t *off_32 =
+ (uint32_t *)(index + 8 + p->num_objects * (20 + 4));
+ const uint32_t *off_64 = off_32 + p->num_objects;
+ for (i = 0; i < num_ent; i++) {
+ uint32_t off = ntohl(*off_32++);
+ if (!(off & 0x80000000)) {
+ rix->revindex[i].offset = off;
+ } else {
+ rix->revindex[i].offset =
+ ((uint64_t)ntohl(*off_64++)) << 32;
+ rix->revindex[i].offset |=
+ ntohl(*off_64++);
+ }
+ rix->revindex[i].nr = i;
+ }
+ } else {
+ for (i = 0; i < num_ent; i++) {
+ uint32_t hl = *((uint32_t *)(index + 24 * i));
+ rix->revindex[i].offset = ntohl(hl);
+ rix->revindex[i].nr = i;
+ }
+ }
+
+ /* This knows the pack format -- the 20-byte trailer
+ * follows immediately after the last object data.
+ */
+ rix->revindex[num_ent].offset = p->pack_size - 20;
+ rix->revindex[num_ent].nr = -1;
+ qsort(rix->revindex, num_ent, sizeof(*rix->revindex), cmp_offset);
+}
+
+struct revindex_entry *find_pack_revindex(struct packed_git *p, off_t ofs)
+{
+ int num;
+ int lo, hi;
+ struct pack_revindex *rix;
+ struct revindex_entry *revindex;
+
+ num = pack_revindex_ix(p);
+ if (num < 0)
+ die("internal error: pack revindex uninitialized");
+
+ rix = &pack_revindex[num];
+ if (!rix->revindex)
+ create_pack_revindex(rix);
+ revindex = rix->revindex;
+
+ lo = 0;
+ hi = p->num_objects + 1;
+ do {
+ int mi = (lo + hi) / 2;
+ if (revindex[mi].offset == ofs) {
+ return revindex + mi;
+ } else if (ofs < revindex[mi].offset)
+ hi = mi;
+ else
+ lo = mi + 1;
+ } while (lo < hi);
+ die("internal error: pack revindex corrupt");
+}
--- /dev/null
+#ifndef PACK_REVINDEX_H
+#define PACK_REVINDEX_H
+
+struct revindex_entry {
+ off_t offset;
+ unsigned int nr;
+};
+
+void init_pack_revindex(void);
+struct revindex_entry *find_pack_revindex(struct packed_git *p, off_t ofs);
+
+#endif
struct optparse_t {
const char **argv;
- int argc;
+ const char **out;
+ int argc, cpidx;
const char *opt;
};
continue;
rest = skip_prefix(arg, options->long_name);
+ if (options->type == OPTION_ARGUMENT) {
+ if (!rest)
+ continue;
+ if (*rest == '=')
+ return opterror(options, "takes no value", flags);
+ if (*rest)
+ continue;
+ p->out[p->cpidx++] = arg - 2;
+ return 0;
+ }
if (!rest) {
/* abbreviated? */
if (!strncmp(options->long_name, arg, arg_end - arg)) {
int parse_options(int argc, const char **argv, const struct option *options,
const char * const usagestr[], int flags)
{
- struct optparse_t args = { argv + 1, argc - 1, NULL };
- int j = 0;
+ struct optparse_t args = { argv + 1, argv, argc - 1, 0, NULL };
for (; args.argc; args.argc--, args.argv++) {
const char *arg = args.argv[0];
if (*arg != '-' || !arg[1]) {
if (flags & PARSE_OPT_STOP_AT_NON_OPTION)
break;
- argv[j++] = args.argv[0];
+ args.out[args.cpidx++] = args.argv[0];
continue;
}
usage_with_options(usagestr, options);
}
- memmove(argv + j, args.argv, args.argc * sizeof(*argv));
- argv[j + args.argc] = NULL;
- return j + args.argc;
+ memmove(args.out + args.cpidx, args.argv, args.argc * sizeof(*args.out));
+ args.out[args.cpidx + args.argc] = NULL;
+ return args.cpidx + args.argc;
}
#define USAGE_OPTS_WIDTH 24
pos += fprintf(stderr, "--%s", opts->long_name);
switch (opts->type) {
+ case OPTION_ARGUMENT:
+ break;
case OPTION_INTEGER:
if (opts->flags & PARSE_OPT_OPTARG)
pos += fprintf(stderr, " [<n>]");
enum parse_opt_type {
/* special types */
OPTION_END,
+ OPTION_ARGUMENT,
OPTION_GROUP,
/* options with no arguments */
OPTION_BIT,
};
#define OPT_END() { OPTION_END }
+#define OPT_ARGUMENT(l, h) { OPTION_ARGUMENT, 0, (l), NULL, NULL, (h) }
#define OPT_GROUP(h) { OPTION_GROUP, 0, NULL, NULL, NULL, (h) }
#define OPT_BIT(s, l, v, h, b) { OPTION_BIT, (s), (l), (v), NULL, (h), 0, NULL, (b) }
#define OPT_BOOLEAN(s, l, v, h) { OPTION_BOOLEAN, (s), (l), (v), NULL, (h) }
? (S_IXGRP|S_IXOTH)
: 0));
if (S_ISDIR(mode))
- mode |= S_ISGID;
+ mode |= FORCE_DIR_SET_GID;
if ((mode & st.st_mode) != mode && chmod(path, mode) < 0)
return -2;
return 0;
if (last_slash) {
*last_slash = '\0';
last_elem = xstrdup(last_slash + 1);
- } else
+ } else {
last_elem = xstrdup(buf);
+ *buf = '\0';
+ }
}
if (*buf) {
fputc(terminator, fp);
}
+/* quote path as relative to the given prefix */
+char *quote_path_relative(const char *in, int len,
+ struct strbuf *out, const char *prefix)
+{
+ int needquote;
+
+ if (len < 0)
+ len = strlen(in);
+
+ /* "../" prefix itself does not need quoting, but "in" might. */
+ needquote = next_quote_pos(in, len) < len;
+ strbuf_setlen(out, 0);
+ strbuf_grow(out, len);
+
+ if (needquote)
+ strbuf_addch(out, '"');
+ if (prefix) {
+ int off = 0;
+ while (prefix[off] && off < len && prefix[off] == in[off])
+ if (prefix[off] == '/') {
+ prefix += off + 1;
+ in += off + 1;
+ len -= off + 1;
+ off = 0;
+ } else
+ off++;
+
+ for (; *prefix; prefix++)
+ if (*prefix == '/')
+ strbuf_addstr(out, "../");
+ }
+
+ quote_c_style_counted (in, len, out, NULL, 1);
+
+ if (needquote)
+ strbuf_addch(out, '"');
+ if (!out->len)
+ strbuf_addstr(out, "./");
+
+ return out->buf;
+}
+
/*
* C-style name unquoting.
*
switch (*quoted++) {
case '"':
if (endp)
- *endp = quoted + 1;
+ *endp = quoted;
return 0;
case '\\':
break;
extern void write_name_quotedpfx(const char *pfx, size_t pfxlen,
const char *name, FILE *, int terminator);
+/* quote path as relative to the given prefix */
+char *quote_path_relative(const char *in, int len,
+ struct strbuf *out, const char *prefix);
+
/* quoting as a string literal for other languages */
extern void perl_quote_print(FILE *stream, const char *src);
extern void python_quote_print(FILE *stream, const char *src);
return changed;
}
-static int is_racy_timestamp(struct index_state *istate, struct cache_entry *ce)
+static int is_racy_timestamp(const struct index_state *istate, struct cache_entry *ce)
{
return (istate->timestamp &&
((unsigned int)istate->timestamp) <= ce->ce_mtime);
}
-int ie_match_stat(struct index_state *istate,
+int ie_match_stat(const struct index_state *istate,
struct cache_entry *ce, struct stat *st,
unsigned int options)
{
return changed;
}
-int ie_modified(struct index_state *istate,
+int ie_modified(const struct index_state *istate,
struct cache_entry *ce, struct stat *st, unsigned int options)
{
int changed, changed_fs;
return (c1 < c2) ? -1 : (c1 > c2) ? 1 : 0;
}
+/*
+ * df_name_compare() is identical to base_name_compare(), except it
+ * compares conflicting directory/file entries as equal. Note that
+ * while a directory name compares as equal to a regular file, they
+ * then individually compare _differently_ to a filename that has
+ * a dot after the basename (because '\0' < '.' < '/').
+ *
+ * This is used by routines that want to traverse the git namespace
+ * but then handle conflicting entries together when possible.
+ */
+int df_name_compare(const char *name1, int len1, int mode1,
+ const char *name2, int len2, int mode2)
+{
+ int len = len1 < len2 ? len1 : len2, cmp;
+ unsigned char c1, c2;
+
+ cmp = memcmp(name1, name2, len);
+ if (cmp)
+ return cmp;
+ /* Directories and files compare equal (same length, same name) */
+ if (len1 == len2)
+ return 0;
+ c1 = name1[len];
+ if (!c1 && S_ISDIR(mode1))
+ c1 = '/';
+ c2 = name2[len];
+ if (!c2 && S_ISDIR(mode2))
+ c2 = '/';
+ if (c1 == '/' && !c2)
+ return 0;
+ if (c2 == '/' && !c1)
+ return 0;
+ return c1 - c2;
+}
+
int cache_name_compare(const char *name1, int flags1, const char *name2, int flags2)
{
int len1 = flags1 & CE_NAMEMASK;
return 0;
}
-int index_name_pos(struct index_state *istate, const char *name, int namelen)
+int index_name_pos(const struct index_state *istate, const char *name, int namelen)
{
int first, last;
return 0;
}
-int unmerged_index(struct index_state *istate)
+int unmerged_index(const struct index_state *istate)
{
int i;
for (i = 0; i < istate->cache_nr; i++) {
return ce_write(c, fd, ondisk, size);
}
-int write_index(struct index_state *istate, int newfd)
+int write_index(const struct index_state *istate, int newfd)
{
SHA_CTX c;
struct cache_header hdr;
if (!dir)
usage(receive_pack_usage);
+ setup_path(NULL);
+
if (!enter_repo(dir, 0))
die("'%s': unable to chdir or not a git archive", dir);
return 1;
}
-static int close_ref(struct ref_lock *lock)
+int close_ref(struct ref_lock *lock)
{
if (close_lock_file(lock->lk))
return -1;
return 0;
}
-static int commit_ref(struct ref_lock *lock)
+int commit_ref(struct ref_lock *lock)
{
if (commit_lock_file(lock->lk))
return -1;
#define REF_NODEREF 0x01
extern struct ref_lock *lock_any_ref_for_update(const char *ref, const unsigned char *old_sha1, int flags);
+/** Close the file descriptor owned by a lock and return the status */
+extern int close_ref(struct ref_lock *lock);
+
+/** Close and commit the ref locked by the lock */
+extern int commit_ref(struct ref_lock *lock);
+
/** Release any lock taken but not written. **/
extern void unlock_ref(struct ref_lock *lock);
return 0;
}
-static void handle_all(struct rev_info *revs, unsigned flags)
+static void handle_refs(struct rev_info *revs, unsigned flags,
+ int (*for_each)(each_ref_fn, void *))
{
struct all_refs_cb cb;
cb.all_revs = revs;
cb.all_flags = flags;
- for_each_ref(handle_one_ref, &cb);
+ for_each(handle_one_ref, &cb);
}
static void handle_one_reflog_commit(unsigned char *sha1, void *cb_data)
add_pending_object(revs, &head->object, "HEAD");
add_pending_object(revs, &other->object, "MERGE_HEAD");
bases = get_merge_bases(head, other, 1);
- while (bases) {
- struct commit *it = bases->item;
- struct commit_list *n = bases->next;
- free(bases);
- bases = n;
- it->object.flags |= UNINTERESTING;
- add_pending_object(revs, &it->object, "(merge-base)");
- }
+ add_pending_commit_list(revs, bases, UNINTERESTING);
+ free_commit_list(bases);
+ head->object.flags |= SYMMETRIC_LEFT;
if (!active_nr)
read_cache();
i++;
}
revs->prune_data = prune;
+ revs->limited = 1;
}
int handle_revision_arg(const char *arg, struct rev_info *revs,
continue;
}
if (!strcmp(arg, "--all")) {
- handle_all(revs, flags);
+ handle_refs(revs, flags, for_each_ref);
+ continue;
+ }
+ if (!strcmp(arg, "--branches")) {
+ handle_refs(revs, flags, for_each_branch_ref);
+ continue;
+ }
+ if (!strcmp(arg, "--tags")) {
+ handle_refs(revs, flags, for_each_tag_ref);
+ continue;
+ }
+ if (!strcmp(arg, "--remotes")) {
+ handle_refs(revs, flags, for_each_remote_ref);
continue;
}
if (!strcmp(arg, "--first-parent")) {
close(cmd->in);
}
+ if (cmd->no_stderr)
+ dup_devnull(2);
+ else if (need_err) {
+ dup2(fderr[1], 2);
+ close_pair(fderr);
+ }
+
if (cmd->no_stdout)
dup_devnull(1);
else if (cmd->stdout_to_stderr)
close(cmd->out);
}
- if (cmd->no_stderr)
- dup_devnull(2);
- else if (need_err) {
- dup2(fderr[1], 2);
- close_pair(fderr);
- }
-
if (cmd->dir && chdir(cmd->dir))
die("exec %s: cd to %s failed (%s)", cmd->argv[0],
cmd->dir, strerror(errno));
const char *p = prefix_path(prefix, prefixlen, *src);
if (p)
*(dst++) = p;
+ else
+ exit(128); /* error message already given */
src++;
}
*dst = NULL;
#include "tag.h"
#include "tree.h"
#include "refs.h"
+#include "pack-revindex.h"
#ifndef O_NOATIME
#if defined(__linux__) && (defined(__i386__) || defined(__PPC__))
unsigned long dummy;
unsigned char *next_sha1;
enum object_type type;
+ struct revindex_entry *revidx;
*delta_chain_length = 0;
curpos = obj_offset;
type = unpack_object_header(p, &w_curs, &curpos, size);
+ revidx = find_pack_revindex(p, obj_offset);
+ *store_size = revidx[1].offset - obj_offset;
+
for (;;) {
switch (type) {
default:
case OBJ_TREE:
case OBJ_BLOB:
case OBJ_TAG:
- *store_size = 0; /* notyet */
unuse_pack(&w_curs);
return typename(type);
case OBJ_OFS_DELTA:
obj_offset = get_delta_base(p, &w_curs, &curpos, type, obj_offset);
if (*delta_chain_length == 0) {
- /* TODO: find base_sha1 as pointed by curpos */
- hashclr(base_sha1);
+ revidx = find_pack_revindex(p, obj_offset);
+ hashcpy(base_sha1, nth_packed_object_sha1(p, revidx->nr));
}
break;
case OBJ_REF_DELTA:
const char *find_unique_abbrev(const unsigned char *sha1, int len)
{
- int status, is_null;
+ int status, exists;
static char hex[41];
- is_null = is_null_sha1(sha1);
+ exists = has_sha1_file(sha1);
memcpy(hex, sha1_to_hex(sha1), 40);
if (len == 40 || !len)
return hex;
while (len < 40) {
unsigned char sha1_ret[20];
status = get_short_sha1(hex, len, sha1_ret, 1);
- if (!status ||
- (is_null && status != SHORT_NAME_AMBIGUOUS)) {
+ if (exists
+ ? !status
+ : status == SHORT_NAME_NOT_FOUND) {
hex[len] = 0;
return hex;
}
- if (status != SHORT_NAME_AMBIGUOUS)
- return NULL;
len++;
}
- return NULL;
+ return hex;
}
static int ambiguous_path(const char *path, int len)
return 0;
}
+struct object *peel_to_type(const char *name, int namelen,
+ struct object *o, enum object_type expected_type)
+{
+ if (name && !namelen)
+ namelen = strlen(name);
+ if (!o) {
+ unsigned char sha1[20];
+ if (get_sha1_1(name, namelen, sha1))
+ return NULL;
+ o = parse_object(sha1);
+ }
+ while (1) {
+ if (!o || (!o->parsed && !parse_object(o->sha1)))
+ return NULL;
+ if (o->type == expected_type)
+ return o;
+ if (o->type == OBJ_TAG)
+ o = ((struct tag*) o)->tagged;
+ else if (o->type == OBJ_COMMIT)
+ o = &(((struct commit *) o)->tree->object);
+ else {
+ if (name)
+ error("%.*s: expected %s type, but the object "
+ "dereferences to %s type",
+ namelen, name, typename(expected_type),
+ typename(o->type));
+ return NULL;
+ }
+ }
+}
+
static int peel_onion(const char *name, int len, unsigned char *sha1)
{
unsigned char outer[20];
hashcpy(sha1, o->sha1);
}
else {
- /* At this point, the syntax look correct, so
+ /*
+ * At this point, the syntax look correct, so
* if we do not get the needed object, we should
* barf.
*/
-
- while (1) {
- if (!o || (!o->parsed && !parse_object(o->sha1)))
- return -1;
- if (o->type == expected_type) {
- hashcpy(sha1, o->sha1);
- return 0;
- }
- if (o->type == OBJ_TAG)
- o = ((struct tag*) o)->tagged;
- else if (o->type == OBJ_COMMIT)
- o = &(((struct commit *) o)->tree->object);
- else
- return error("%.*s: expected %s type, but the object dereferences to %s type",
- len, name, typename(expected_type),
- typename(o->type));
- if (!o)
- return -1;
- if (!o->parsed)
- if (!parse_object(o->sha1))
- return -1;
+ o = peel_to_type(name, len, o, expected_type);
+ if (o) {
+ hashcpy(sha1, o->sha1);
+ return 0;
}
+ return -1;
}
return 0;
}
--- /dev/null
+#!/bin/sh
+#
+# Copyright (c) 2008 Clemens Buchacher <drizzd@aon.at>
+#
+
+if test -z "$GIT_TEST_HTTPD"
+then
+ say "skipping test, network testing disabled by default"
+ say "(define GIT_TEST_HTTPD to enable)"
+ test_done
+ exit
+fi
+
+LIB_HTTPD_PATH=${LIB_HTTPD_PATH-'/usr/sbin/apache2'}
+LIB_HTTPD_PORT=${LIB_HTTPD_PORT-'8111'}
+
+TEST_PATH="$PWD"/../lib-httpd
+HTTPD_ROOT_PATH="$PWD"/httpd
+HTTPD_DOCUMENT_ROOT_PATH=$HTTPD_ROOT_PATH/www
+
+if ! test -x "$LIB_HTTPD_PATH"
+then
+ say "skipping test, no web server found at '$LIB_HTTPD_PATH'"
+ test_done
+ exit
+fi
+
+HTTPD_VERSION=`$LIB_HTTPD_PATH -v | \
+ sed -n 's/^Server version: Apache\/\([0-9]*\)\..*$/\1/p; q'`
+
+if test -n "$HTTPD_VERSION"
+then
+ if test -z "$LIB_HTTPD_MODULE_PATH"
+ then
+ if ! test $HTTPD_VERSION -ge 2
+ then
+ say "skipping test, at least Apache version 2 is required"
+ test_done
+ exit
+ fi
+
+ LIB_HTTPD_MODULE_PATH='/usr/lib/apache2/modules'
+ fi
+else
+ error "Could not identify web server at '$LIB_HTTPD_PATH'"
+fi
+
+HTTPD_PARA="-d $HTTPD_ROOT_PATH -f $TEST_PATH/apache.conf"
+
+prepare_httpd() {
+ mkdir -p $HTTPD_DOCUMENT_ROOT_PATH
+
+ ln -s $LIB_HTTPD_MODULE_PATH $HTTPD_ROOT_PATH/modules
+
+ if test -n "$LIB_HTTPD_SSL"
+ then
+ HTTPD_URL=https://127.0.0.1:$LIB_HTTPD_PORT
+
+ RANDFILE_PATH="$HTTPD_ROOT_PATH"/.rnd openssl req \
+ -config $TEST_PATH/ssl.cnf \
+ -new -x509 -nodes \
+ -out $HTTPD_ROOT_PATH/httpd.pem \
+ -keyout $HTTPD_ROOT_PATH/httpd.pem
+ export GIT_SSL_NO_VERIFY=t
+ HTTPD_PARA="$HTTPD_PARA -DSSL"
+ else
+ HTTPD_URL=http://127.0.0.1:$LIB_HTTPD_PORT
+ fi
+
+ if test -n "$LIB_HTTPD_DAV" -o -n "$LIB_HTTPD_SVN"
+ then
+ HTTPD_PARA="$HTTPD_PARA -DDAV"
+
+ if test -n "$LIB_HTTPD_SVN"
+ then
+ HTTPD_PARA="$HTTPD_PARA -DSVN"
+ rawsvnrepo="$HTTPD_ROOT_PATH/svnrepo"
+ svnrepo="http://127.0.0.1:$LIB_HTTPD_PORT/svn"
+ fi
+ fi
+}
+
+start_httpd() {
+ prepare_httpd
+
+ trap 'stop_httpd; die' exit
+
+ "$LIB_HTTPD_PATH" $HTTPD_PARA \
+ -c "Listen 127.0.0.1:$LIB_HTTPD_PORT" -k start
+}
+
+stop_httpd() {
+ trap 'die' exit
+
+ "$LIB_HTTPD_PATH" $HTTPD_PARA -k stop
+}
--- /dev/null
+PidFile httpd.pid
+DocumentRoot www
+ErrorLog error.log
+
+<IfDefine SSL>
+LoadModule ssl_module modules/mod_ssl.so
+
+SSLCertificateFile httpd.pem
+SSLCertificateKeyFile httpd.pem
+SSLRandomSeed startup file:/dev/urandom 512
+SSLRandomSeed connect file:/dev/urandom 512
+SSLSessionCache none
+SSLMutex file:ssl_mutex
+SSLEngine On
+</IfDefine>
+
+<IfDefine DAV>
+ LoadModule dav_module modules/mod_dav.so
+ LoadModule dav_fs_module modules/mod_dav_fs.so
+
+ DAVLockDB DAVLock
+ <Location />
+ Dav on
+ </Location>
+</IfDefine>
+
+<IfDefine SVN>
+ LoadModule dav_svn_module modules/mod_dav_svn.so
+
+ <Location /svn>
+ DAV svn
+ SVNPath svnrepo
+ </Location>
+</IfDefine>
--- /dev/null
+RANDFILE = $ENV::RANDFILE_PATH
+
+[ req ]
+default_bits = 1024
+distinguished_name = req_distinguished_name
+prompt = no
+[ req_distinguished_name ]
+commonName = 127.0.0.1
test "$dir" = "$(test-absolute-path $dir2)" &&
file="$dir"/index &&
test "$file" = "$(test-absolute-path $dir2/index)" &&
+ basename=blub &&
+ test "$dir/$basename" = $(cd .git && test-absolute-path $basename) &&
ln -s ../first/file .git/syml &&
sym="$(cd first; pwd -P)"/file &&
test "$sym" = "$(test-absolute-path $dir2/syml)"
. ./test-lib.sh
cat <<\EOF >rot13.sh
-tr '[a-zA-Z]' '[n-za-mN-ZA-M]'
+tr \
+ 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' \
+ 'nopqrstuvwxyzabcdefghijklmNOPQRSTUVWXYZABCDEFGHIJKLM'
EOF
chmod +x rot13.sh
--st <st> get another string (pervert ordering)
-o <str> get another string
+magic arguments
+ --quux means --quux
+
EOF
test_expect_success 'test help' '
git diff expect.err output.err
'
+cat > expect <<EOF
+boolean: 0
+integer: 0
+string: (not set)
+arg 00: --quux
+EOF
+
+test_expect_success 'keep some options as arguments' '
+ test-parse-options --quux > output 2> output.err &&
+ test ! -s output.err &&
+ git diff expect output
+'
+
test_done
--- /dev/null
+#!/bin/sh
+
+test_description='read-tree -u --reset'
+
+. ./test-lib.sh
+
+# two-tree test
+
+test_expect_success 'setup' '
+ git init &&
+ mkdir df &&
+ echo content >df/file &&
+ git add df/file &&
+ git commit -m one &&
+ git ls-files >expect &&
+ rm -rf df &&
+ echo content >df &&
+ git add df &&
+ echo content >new &&
+ git add new &&
+ git commit -m two
+'
+
+test_expect_success 'reset should work' '
+ git read-tree -u --reset HEAD^ &&
+ git ls-files >actual &&
+ diff -u expect actual
+'
+
+test_done
'
+test_expect_success 'delete' '
+ echo 1 > C &&
+ test_tick &&
+ git commit -m rat C &&
+
+ echo 2 > C &&
+ test_tick &&
+ git commit -m ox C &&
+
+ echo 3 > C &&
+ test_tick &&
+ git commit -m tiger C &&
+
+ test 5 = $(git reflog | wc -l) &&
+
+ git reflog delete master@{1} &&
+ git reflog show master > output &&
+ test 4 = $(wc -l < output) &&
+ ! grep ox < output &&
+
+ git reflog delete master@{07.04.2005.15:15:00.-0700} &&
+ git reflog show master > output &&
+ test 3 = $(wc -l < output) &&
+ ! grep dragon < output
+
+'
+
test_expect_success 'prune --expire' '
before=$(git count-objects | sed "s/ .*//") &&
'
test_expect_success 'relative path outside tree should fail' \
- '! git checkout HEAD -- ../../Makefile'
+ 'test_must_fail git checkout HEAD -- ../../Makefile'
test_expect_success 'incorrect relative path to file should fail (1)' \
- '! git checkout HEAD -- ../file0'
+ 'test_must_fail git checkout HEAD -- ../file0'
test_expect_success 'incorrect relative path should fail (2)' \
- '( cd dir1 && ! git checkout HEAD -- ./file0 )'
+ '( cd dir1 && test_must_fail git checkout HEAD -- ./file0 )'
test_expect_success 'incorrect relative path should fail (3)' \
- '( cd dir1 && ! git checkout HEAD -- ../../file0 )'
+ '( cd dir1 && test_must_fail git checkout HEAD -- ../../file0 )'
test_done
--- /dev/null
+#!/bin/sh
+
+test_description='checkout should leave clean stat info'
+
+. ./test-lib.sh
+
+test_expect_success 'setup' '
+
+ echo hello >world &&
+ git update-index --add world &&
+ git commit -m initial &&
+ git branch side &&
+ echo goodbye >world &&
+ git update-index --add world &&
+ git commit -m second
+
+'
+
+test_expect_success 'branch switching' '
+
+ git reset --hard &&
+ test "$(git diff-files --raw)" = "" &&
+
+ git checkout master &&
+ test "$(git diff-files --raw)" = "" &&
+
+ git checkout side &&
+ test "$(git diff-files --raw)" = "" &&
+
+ git checkout master &&
+ test "$(git diff-files --raw)" = ""
+
+'
+
+test_expect_success 'path checkout' '
+
+ git reset --hard &&
+ test "$(git diff-files --raw)" = "" &&
+
+ git checkout master world &&
+ test "$(git diff-files --raw)" = "" &&
+
+ git checkout side world &&
+ test "$(git diff-files --raw)" = "" &&
+
+ git checkout master world &&
+ test "$(git diff-files --raw)" = ""
+
+'
+
+test_done
+
# having 1.txt and path3
test_expect_success \
'ls-tree filter odd names' \
- 'git ls-tree $tree 1.txt /1.txt //1.txt path3/1.txt /path3/1.txt //path3//1.txt path3 /path3/ path3// >current &&
+ 'git ls-tree $tree 1.txt ./1.txt .//1.txt path3/1.txt path3/./1.txt path3 path3// >current &&
cat >expected <<\EOF &&
100644 blob X 1.txt
100644 blob X path3/1.txt
git tag I
'
-echo "#!$SHELL" >fake-editor
+echo "#!$SHELL_PATH" >fake-editor.sh
cat >> fake-editor.sh <<\EOF
case "$1" in
*/COMMIT_EDITMSG)
--- /dev/null
+#!/bin/sh
+
+test_description='git rebase --abort tests'
+
+. ./test-lib.sh
+
+test_expect_success setup '
+ echo a > a &&
+ git add a &&
+ git commit -m a &&
+ git branch to-rebase &&
+
+ echo b > a &&
+ git commit -a -m b &&
+ echo c > a &&
+ git commit -a -m c &&
+
+ git checkout to-rebase &&
+ echo d > a &&
+ git commit -a -m "merge should fail on this" &&
+ echo e > a &&
+ git commit -a -m "merge should fail on this, too" &&
+ git branch pre-rebase
+'
+
+testrebase() {
+ type=$1
+ dotest=$2
+
+ test_expect_success "rebase$type --abort" '
+ # Clean up the state from the previous one
+ git reset --hard pre-rebase
+ test_must_fail git rebase'"$type"' master &&
+ test -d '$dotest' &&
+ git rebase --abort &&
+ test $(git rev-parse to-rebase) = $(git rev-parse pre-rebase) &&
+ test ! -d '$dotest'
+ '
+
+ test_expect_success "rebase$type --abort after --skip" '
+ # Clean up the state from the previous one
+ git reset --hard pre-rebase
+ test_must_fail git rebase'"$type"' master &&
+ test -d '$dotest' &&
+ test_must_fail git rebase --skip &&
+ test $(git rev-parse HEAD) = $(git rev-parse master) &&
+ git-rebase --abort &&
+ test $(git rev-parse to-rebase) = $(git rev-parse pre-rebase) &&
+ test ! -d '$dotest'
+ '
+
+ test_expect_success "rebase$type --abort after --continue" '
+ # Clean up the state from the previous one
+ git reset --hard pre-rebase
+ test_must_fail git rebase'"$type"' master &&
+ test -d '$dotest' &&
+ echo c > a &&
+ echo d >> a &&
+ git add a &&
+ test_must_fail git rebase --continue &&
+ test $(git rev-parse HEAD) != $(git rev-parse master) &&
+ git rebase --abort &&
+ test $(git rev-parse to-rebase) = $(git rev-parse pre-rebase) &&
+ test ! -d '$dotest'
+ '
+}
+
+testrebase "" .dotest
+testrebase " --merge" .git/.dotest-merge
+
+test_done
'
+test_expect_success 'revert forbidden on dirty working tree' '
+
+ echo content >extra_file &&
+ git add extra_file &&
+ test_must_fail git revert HEAD 2>errors &&
+ grep "Dirty index" errors
+
+'
+
test_done
test_expect_success 'apply needs clean working directory' '
echo 4 > other-file &&
git add other-file &&
- echo 5 > other-file
- ! git stash apply
+ echo 5 > other-file &&
+ test_must_fail git stash apply
'
test_expect_success 'apply stashed changes' '
git reset --hard HEAD &&
mkdir subdir &&
cd subdir &&
- git stash apply
+ git stash apply &&
+ cd ..
+'
+
+test_expect_success 'drop top stash' '
+ git reset --hard &&
+ git stash list > stashlist1 &&
+ echo 7 > file &&
+ git stash &&
+ git stash drop &&
+ git stash list > stashlist2 &&
+ diff stashlist1 stashlist2 &&
+ git stash apply &&
+ test 3 = $(cat file) &&
+ test 1 = $(git show :file) &&
+ test 1 = $(git show HEAD:file)
+'
+
+test_expect_success 'drop middle stash' '
+ git reset --hard &&
+ echo 8 > file &&
+ git stash &&
+ echo 9 > file &&
+ git stash &&
+ git stash drop stash@{1} &&
+ test 2 = $(git stash list | wc -l) &&
+ git stash apply &&
+ test 9 = $(cat file) &&
+ test 1 = $(git show :file) &&
+ test 1 = $(git show HEAD:file) &&
+ git reset --hard &&
+ git stash drop &&
+ git stash apply &&
+ test 3 = $(cat file) &&
+ test 1 = $(git show :file) &&
+ test 1 = $(git show HEAD:file)
+'
+
+test_expect_success 'stash pop' '
+ git reset --hard &&
+ git stash pop &&
+ test 3 = $(cat file) &&
+ test 1 = $(git show :file) &&
+ test 1 = $(git show HEAD:file) &&
+ test 0 = $(git stash list | wc -l)
'
test_done
*** BLURB HERE ***
A U Thor (2):
- Second
- Third
+ Second
+ Third
dir/sub | 4 ++++
file0 | 3 +++
for i in patches/0002-* patches/0003-*
do
grep "References: $FIRST_MID" $i &&
- grep "In-Reply-To: $FIRST_MID" $i
+ grep "In-Reply-To: $FIRST_MID" $i || break
done
'
for i in patches/*
do
grep "References: $FIRST_MID" $i &&
- grep "In-Reply-To: $FIRST_MID" $i
+ grep "In-Reply-To: $FIRST_MID" $i || break
done
'
for i in patches/0001-* patches/0002-* patches/0003-*
do
grep "References: $FIRST_MID" $i &&
- grep "In-Reply-To: $FIRST_MID" $i
+ grep "In-Reply-To: $FIRST_MID" $i || break
done
'
for i in patches/*
do
grep "References: $FIRST_MID" $i &&
- grep "In-Reply-To: $FIRST_MID" $i
+ grep "In-Reply-To: $FIRST_MID" $i || break
done
'
ls patches/0004-This-is-an-excessively-long-subject-line-for-a-messa.patch
'
+test_expect_success 'cover-letter inherits diff options' '
+
+ git mv file foo &&
+ git commit -m foo &&
+ git format-patch --cover-letter -1 &&
+ ! grep "file => foo .* 0 *$" 0000-cover-letter.patch &&
+ git format-patch --cover-letter -1 -M &&
+ grep "file => foo .* 0 *$" 0000-cover-letter.patch
+
+'
+
+cat > expect << EOF
+ This is an excessively long subject line for a message due to the
+ habit some projects have of not having a short, one-line subject at
+ the start of the commit message, but rather sticking a whole
+ paragraph right at the start as the only thing in the commit
+ message. It had better not become the filename for the patch.
+ foo
+
+EOF
+
+test_expect_success 'shortlog of cover-letter wraps overly-long onelines' '
+
+ git format-patch --cover-letter -2 &&
+ sed -e "1,/A U Thor/d" -e "/^$/q" < 0000-cover-letter.patch > output &&
+ git diff expect output
+
+'
+
test_done
--- /dev/null
+#!/bin/sh
+
+test_description='difference in submodules'
+
+. ./test-lib.sh
+. ../diff-lib.sh
+
+_z40=0000000000000000000000000000000000000000
+test_expect_success setup '
+ test_tick &&
+ test_create_repo sub &&
+ (
+ cd sub &&
+ echo hello >world &&
+ git add world &&
+ git commit -m submodule
+ ) &&
+
+ test_tick &&
+ echo frotz >nitfol &&
+ git add nitfol sub &&
+ git commit -m superproject &&
+
+ (
+ cd sub &&
+ echo goodbye >world &&
+ git add world &&
+ git commit -m "submodule #2"
+ ) &&
+
+ set x $(
+ cd sub &&
+ git rev-list HEAD
+ ) &&
+ echo ":160000 160000 $3 $_z40 M sub" >expect
+'
+
+test_expect_success 'git diff --raw HEAD' '
+ git diff --raw --abbrev=40 HEAD >actual &&
+ diff -u expect actual
+'
+
+test_expect_success 'git diff-index --raw HEAD' '
+ git diff-index --raw HEAD >actual.index &&
+ diff -u expect actual.index
+'
+
+test_expect_success 'git diff-files --raw' '
+ git diff-files --raw >actual.files &&
+ diff -u expect actual.files
+'
+
+test_done
--- /dev/null
+#!/bin/sh
+
+test_description='git am running from a subdirectory'
+
+. ./test-lib.sh
+
+test_expect_success setup '
+ echo hello >world &&
+ git add world &&
+ test_tick &&
+ git commit -m initial &&
+ git tag initial &&
+ echo goodbye >world &&
+ git add world &&
+ test_tick &&
+ git commit -m second &&
+ git format-patch --stdout HEAD^ >patchfile &&
+ : >expect
+'
+
+test_expect_success 'am regularly from stdin' '
+ git checkout initial &&
+ git am <patchfile &&
+ git diff master >actual &&
+ diff -u expect actual
+'
+
+test_expect_success 'am regularly from file' '
+ git checkout initial &&
+ git am patchfile &&
+ git diff master >actual &&
+ diff -u expect actual
+'
+
+test_expect_success 'am regularly from stdin in subdirectory' '
+ rm -fr subdir &&
+ git checkout initial &&
+ (
+ mkdir -p subdir &&
+ cd subdir &&
+ git am <../patchfile
+ ) &&
+ git diff master>actual &&
+ diff -u expect actual
+'
+
+test_expect_success 'am regularly from file in subdirectory' '
+ rm -fr subdir &&
+ git checkout initial &&
+ (
+ mkdir -p subdir &&
+ cd subdir &&
+ git am ../patchfile
+ ) &&
+ git diff master >actual &&
+ diff -u expect actual
+'
+
+test_expect_success 'am regularly from file in subdirectory with full path' '
+ rm -fr subdir &&
+ git checkout initial &&
+ P=$(pwd) &&
+ (
+ mkdir -p subdir &&
+ cd subdir &&
+ git am "$P/patchfile"
+ ) &&
+ git diff master >actual &&
+ diff -u expect actual
+'
+
+test_done
--- /dev/null
+#!/bin/sh
+
+test_description='git-pack-object --include-tag'
+. ./test-lib.sh
+
+TRASH=`pwd`
+
+test_expect_success setup '
+ echo c >d &&
+ git update-index --add d &&
+ tree=`git write-tree` &&
+ commit=`git commit-tree $tree </dev/null` &&
+ echo "object $commit" >sig &&
+ echo "type commit" >>sig &&
+ echo "tag mytag" >>sig &&
+ echo "tagger $(git var GIT_COMMITTER_IDENT)" >>sig &&
+ echo >>sig &&
+ echo "our test tag" >>sig &&
+ tag=`git mktag <sig` &&
+ rm d sig &&
+ git update-ref refs/tags/mytag $tag && {
+ echo $tree &&
+ echo $commit &&
+ git ls-tree $tree | sed -e "s/.* \\([0-9a-f]*\\) .*/\\1/"
+ } >obj-list
+'
+
+rm -rf clone.git
+test_expect_success 'pack without --include-tag' '
+ packname_1=$(git pack-objects \
+ --window=0 \
+ test-1 <obj-list)
+'
+
+test_expect_success 'unpack objects' '
+ (
+ GIT_DIR=clone.git &&
+ export GIT_DIR &&
+ git init &&
+ git unpack-objects -n <test-1-${packname_1}.pack &&
+ git unpack-objects <test-1-${packname_1}.pack
+ )
+'
+
+test_expect_success 'check unpacked result (have commit, no tag)' '
+ git rev-list --objects $commit >list.expect &&
+ (
+ GIT_DIR=clone.git &&
+ export GIT_DIR &&
+ test_must_fail git cat-file -e $tag &&
+ git rev-list --objects $commit
+ ) >list.actual &&
+ git diff list.expect list.actual
+'
+
+rm -rf clone.git
+test_expect_success 'pack with --include-tag' '
+ packname_1=$(git pack-objects \
+ --window=0 \
+ --include-tag \
+ test-2 <obj-list)
+'
+
+test_expect_success 'unpack objects' '
+ (
+ GIT_DIR=clone.git &&
+ export GIT_DIR &&
+ git init &&
+ git unpack-objects -n <test-2-${packname_1}.pack &&
+ git unpack-objects <test-2-${packname_1}.pack
+ )
+'
+
+test_expect_success 'check unpacked result (have commit, have tag)' '
+ git rev-list --objects mytag >list.expect &&
+ (
+ GIT_DIR=clone.git &&
+ export GIT_DIR &&
+ git rev-list --objects $tag
+ ) >list.actual &&
+ git diff list.expect list.actual
+'
+
+test_done
--- /dev/null
+#!/bin/sh
+
+test_description='test automatic tag following'
+
+. ./test-lib.sh
+
+# End state of the repository:
+#
+# T - tag1 S - tag2
+# / /
+# L - A ------ O ------ B
+# \ \ \
+# \ C - origin/cat \
+# origin/master master
+
+test_expect_success setup '
+ test_tick &&
+ echo ichi >file &&
+ git add file &&
+ git commit -m L &&
+ L=$(git rev-parse --verify HEAD) &&
+
+ (
+ mkdir cloned &&
+ cd cloned &&
+ git init-db &&
+ git remote add -f origin ..
+ ) &&
+
+ test_tick &&
+ echo A >file &&
+ git add file &&
+ git commit -m A &&
+ A=$(git rev-parse --verify HEAD)
+'
+
+U=UPLOAD_LOG
+
+cat - <<EOF >expect
+#S
+want $A
+#E
+EOF
+test_expect_success 'fetch A (new commit : 1 connection)' '
+ rm -f $U
+ (
+ cd cloned &&
+ GIT_DEBUG_SEND_PACK=3 git fetch 3>../$U &&
+ test $A = $(git rev-parse --verify origin/master)
+ ) &&
+ test -s $U &&
+ cut -d" " -f1,2 $U >actual &&
+ git diff expect actual
+'
+
+test_expect_success "create tag T on A, create C on branch cat" '
+ git tag -a -m tag1 tag1 $A &&
+ T=$(git rev-parse --verify tag1) &&
+
+ git checkout -b cat &&
+ echo C >file &&
+ git add file &&
+ git commit -m C &&
+ C=$(git rev-parse --verify HEAD) &&
+ git checkout master
+'
+
+cat - <<EOF >expect
+#S
+want $C
+want $T
+#E
+EOF
+test_expect_success 'fetch C, T (new branch, tag : 1 connection)' '
+ rm -f $U
+ (
+ cd cloned &&
+ GIT_DEBUG_SEND_PACK=3 git fetch 3>../$U &&
+ test $C = $(git rev-parse --verify origin/cat) &&
+ test $T = $(git rev-parse --verify tag1) &&
+ test $A = $(git rev-parse --verify tag1^0)
+ ) &&
+ test -s $U &&
+ cut -d" " -f1,2 $U >actual &&
+ git diff expect actual
+'
+
+test_expect_success "create commits O, B, tag S on B" '
+ test_tick &&
+ echo O >file &&
+ git add file &&
+ git commit -m O &&
+
+ test_tick &&
+ echo B >file &&
+ git add file &&
+ git commit -m B &&
+ B=$(git rev-parse --verify HEAD) &&
+
+ git tag -a -m tag2 tag2 $B &&
+ S=$(git rev-parse --verify tag2)
+'
+
+cat - <<EOF >expect
+#S
+want $B
+want $S
+#E
+EOF
+test_expect_success 'fetch B, S (commit and tag : 1 connection)' '
+ rm -f $U
+ (
+ cd cloned &&
+ GIT_DEBUG_SEND_PACK=3 git fetch 3>../$U &&
+ test $B = $(git rev-parse --verify origin/master) &&
+ test $B = $(git rev-parse --verify tag2^0) &&
+ test $S = $(git rev-parse --verify tag2)
+ ) &&
+ test -s $U &&
+ cut -d" " -f1,2 $U >actual &&
+ git diff expect actual
+'
+
+cat - <<EOF >expect
+#S
+want $B
+want $S
+#E
+EOF
+test_expect_success 'new clone fetch master and tags' '
+ git branch -D cat
+ rm -f $U
+ (
+ mkdir clone2 &&
+ cd clone2 &&
+ git init &&
+ git remote add origin .. &&
+ GIT_DEBUG_SEND_PACK=3 git fetch 3>../$U &&
+ test $B = $(git rev-parse --verify origin/master) &&
+ test $S = $(git rev-parse --verify tag2) &&
+ test $B = $(git rev-parse --verify tag2^0) &&
+ test $T = $(git rev-parse --verify tag1) &&
+ test $A = $(git rev-parse --verify tag1^0)
+ ) &&
+ test -s $U &&
+ cut -d" " -f1,2 $U >actual &&
+ git diff expect actual
+'
+
+test_done
--- /dev/null
+#!/bin/sh
+#
+# Copyright (c) 2008 Clemens Buchacher <drizzd@aon.at>
+#
+
+test_description='test http-push
+
+This test runs various sanity checks on http-push.'
+
+. ./test-lib.sh
+
+ROOT_PATH="$PWD"
+LIB_HTTPD_DAV=t
+
+. ../lib-httpd.sh
+
+if ! start_httpd >&3 2>&4
+then
+ say "skipping test, web server setup failed"
+ test_done
+ exit
+fi
+
+test_expect_success 'setup remote repository' '
+ cd "$ROOT_PATH" &&
+ mkdir test_repo &&
+ cd test_repo &&
+ git init &&
+ : >path1 &&
+ git add path1 &&
+ test_tick &&
+ git commit -m initial &&
+ cd - &&
+ git clone --bare test_repo test_repo.git &&
+ cd test_repo.git &&
+ git --bare update-server-info &&
+ chmod +x hooks/post-update &&
+ cd - &&
+ mv test_repo.git $HTTPD_DOCUMENT_ROOT_PATH
+'
+
+test_expect_success 'clone remote repository' '
+ cd "$ROOT_PATH" &&
+ git clone $HTTPD_URL/test_repo.git test_repo_clone
+'
+
+test_expect_success 'push to remote repository' '
+ cd "$ROOT_PATH"/test_repo_clone &&
+ : >path2 &&
+ git add path2 &&
+ test_tick &&
+ git commit -m path2 &&
+ git push
+'
+
+test_expect_success 'create and delete remote branch' '
+ cd "$ROOT_PATH"/test_repo_clone &&
+ git checkout -b dev &&
+ : >path3 &&
+ git add path3 &&
+ test_tick &&
+ git commit -m dev &&
+ git push origin dev &&
+ git fetch &&
+ git push origin :dev &&
+ git branch -d -r origin/dev &&
+ git fetch &&
+ ! git show-ref --verify refs/remotes/origin/dev
+'
+
+stop_httpd
+
+test_done
git clone --bare . x &&
test "$(GIT_CONFIG=a.git/config git config --bool core.bare)" = true &&
test "$(GIT_CONFIG=x/config git config --bool core.bare)" = true
+ git bundle create b1.bundle --all HEAD &&
+ git bundle create b2.bundle --all &&
+ mkdir dir &&
+ cp b1.bundle dir/b3
+ cp b1.bundle b4
'
test_expect_success 'local clone without .git suffix' '
git fetch &&
test ! -e .git/refs/remotes/origin/HEAD'
+test_expect_success 'bundle clone without .bundle suffix' '
+ cd "$D" &&
+ git clone dir/b3 &&
+ cd b3 &&
+ git fetch
+'
+
+test_expect_success 'bundle clone with .bundle suffix' '
+ cd "$D" &&
+ git clone b1.bundle &&
+ cd b1 &&
+ git fetch
+'
+
+test_expect_success 'bundle clone from b4' '
+ cd "$D" &&
+ git clone b4 bdl &&
+ cd bdl &&
+ git fetch
+'
+
+test_expect_success 'bundle clone from b4.bundle that does not exist' '
+ cd "$D" &&
+ if git clone b4.bundle bb
+ then
+ echo "Oops, should have failed"
+ false
+ else
+ echo happy
+ fi
+'
+
+test_expect_success 'bundle clone with nonexistent HEAD' '
+ cd "$D" &&
+ git clone b2.bundle b2 &&
+ cd b2 &&
+ git fetch
+ test ! -e .git/refs/heads/master
+'
+
test_done
test_expect_success "virtual trees were processed" "git diff expect out"
-git reset --hard
test_expect_success 'refuse to merge binary files' '
+ git reset --hard &&
printf "\0" > binary-file &&
git add binary-file &&
git commit -m binary &&
'
+test_expect_success 'setup' '
+ mkdir git-gui &&
+ cd git-gui &&
+ git init &&
+ echo git-gui > git-gui.sh &&
+ o1=$(git hash-object git-gui.sh) &&
+ git add git-gui.sh &&
+ git commit -m "initial git-gui" &&
+ cd .. &&
+ mkdir git &&
+ cd git &&
+ git init &&
+ echo git >git.c &&
+ o2=$(git hash-object git.c) &&
+ git add git.c &&
+ git commit -m "initial git"
+'
+
+test_expect_success 'initial merge' '
+ git remote add -f gui ../git-gui &&
+ git merge -s ours --no-commit gui/master &&
+ git read-tree --prefix=git-gui/ -u gui/master &&
+ git commit -m "Merge git-gui as our subdirectory" &&
+ git ls-files -s >actual &&
+ (
+ echo "100644 $o1 0 git-gui/git-gui.sh"
+ echo "100644 $o2 0 git.c"
+ ) >expected &&
+ git diff -u expected actual
+'
+
+test_expect_success 'merge update' '
+ cd ../git-gui &&
+ echo git-gui2 > git-gui.sh &&
+ o3=$(git hash-object git-gui.sh) &&
+ git add git-gui.sh &&
+ git commit -m "update git-gui" &&
+ cd ../git &&
+ git pull -s subtree gui master &&
+ git ls-files -s >actual &&
+ (
+ echo "100644 $o3 0 git-gui/git-gui.sh"
+ echo "100644 $o2 0 git.c"
+ ) >expected &&
+ git diff -u expected actual
+'
+
test_done
check_describe () {
expect="$1"
shift
- R=$(git describe "$@") &&
+ R=$(git describe "$@" 2>err.actual)
+ S=$?
+ cat err.actual >&3
test_expect_success "describe $*" '
+ test $S = 0 &&
case "$R" in
$expect) echo happy ;;
*) echo "Oops - $R is not $expect";
check_describe A-* --tags HEAD^^2
check_describe B --tags HEAD^^2^
+check_describe B-0-* --long HEAD^^2^
+check_describe A-3-* --long HEAD^^2
+
+test_expect_success 'rename tag A to Q locally' '
+ mv .git/refs/tags/A .git/refs/tags/Q
+'
+cat - >err.expect <<EOF
+warning: tag 'A' is really 'Q' here
+EOF
+check_describe A-* HEAD
+test_expect_success 'warning was displayed for Q' '
+ git diff err.expect err.actual
+'
+test_expect_success 'rename tag Q back to A' '
+ mv .git/refs/tags/Q .git/refs/tags/A
+'
+
+test_expect_success 'pack tag refs' 'git pack-refs'
+check_describe A-* HEAD
+
test_done
'
+test_expect_success 'Subdirectory filter with disappearing trees' '
+ git reset --hard &&
+ git checkout master &&
+
+ mkdir foo &&
+ touch foo/bar &&
+ git add foo &&
+ test_tick &&
+ git commit -m "Adding foo" &&
+
+ git rm -r foo &&
+ test_tick &&
+ git commit -m "Removing foo" &&
+
+ mkdir foo &&
+ touch foo/bar &&
+ git add foo &&
+ test_tick &&
+ git commit -m "Re-adding foo" &&
+
+ git filter-branch -f --subdirectory-filter foo &&
+ test $(git rev-list master | wc -l) = 3
+'
+
test_done
'
done
+test_expect_success 'editor with a space' '
+
+ if echo "echo space > \"\$1\"" > "e space.sh"
+ then
+ chmod a+x "e space.sh" &&
+ GIT_EDITOR="./e\ space.sh" git commit --amend &&
+ test space = "$(git show -s --pretty=format:%s)"
+ else
+ say "Skipping; FS does not support spaces in filenames"
+ fi
+
+'
+
+unset GIT_EDITOR
+test_expect_success 'core.editor with a space' '
+
+ if test -f "e space.sh"
+ then
+ git config core.editor \"./e\ space.sh\" &&
+ git commit --amend &&
+ test space = "$(git show -s --pretty=format:%s)"
+ else
+ say "Skipping; FS does not support spaces in filenames"
+ fi
+
+'
+
TERM="$OLD_TERM"
test_done
test_expect_success 'add a directory outside the work tree' '(
cd tester &&
d1="$(cd .. ; pwd)" &&
- git add "$d1"
+ test_must_fail git add "$d1"
)'
+
test_expect_success 'add a file outside the work tree, nasty case 1' '(
cd tester &&
f="$(pwd)x" &&
echo "$f" &&
touch "$f" &&
- git add "$f"
+ test_must_fail git add "$f"
)'
test_expect_success 'add a file outside the work tree, nasty case 2' '(
f="$(pwd | sed "s/.$//")x" &&
echo "$f" &&
touch "$f" &&
- git add "$f"
+ test_must_fail git add "$f"
)'
test_done
test -f build/lib.so
'
+
+test_expect_success 'git-clean with relative prefix' '
+
+ mkdir -p build docs &&
+ touch a.out src/part3.c docs/manual.txt obj.o build/lib.so &&
+ would_clean=$(
+ cd docs &&
+ git clean -n ../src |
+ sed -n -e "s|^Would remove ||p"
+ ) &&
+ test "$would_clean" = ../src/part3.c || {
+ echo "OOps <$would_clean>"
+ false
+ }
+'
+
+test_expect_success 'git-clean with absolute path' '
+
+ mkdir -p build docs &&
+ touch a.out src/part3.c docs/manual.txt obj.o build/lib.so &&
+ would_clean=$(
+ cd docs &&
+ git clean -n $(pwd)/../src |
+ sed -n -e "s|^Would remove ||p"
+ ) &&
+ test "$would_clean" = ../src/part3.c || {
+ echo "OOps <$would_clean>"
+ false
+ }
+'
+
+test_expect_success 'git-clean with out of work tree relative path' '
+
+ mkdir -p build docs &&
+ touch a.out src/part3.c docs/manual.txt obj.o build/lib.so &&
+ (
+ cd docs &&
+ test_must_fail git clean -n ../..
+ )
+'
+
+test_expect_success 'git-clean with out of work tree absolute path' '
+
+ mkdir -p build docs &&
+ touch a.out src/part3.c docs/manual.txt obj.o build/lib.so &&
+ dd=$(cd .. && pwd) &&
+ (
+ cd docs &&
+ test_must_fail git clean -n $dd
+ )
+'
+
test_expect_success 'git-clean -d with prefix and path' '
mkdir -p build docs src/feature &&
mkdir foo &&
touch foo/bar &&
+ exec <foo/bar &&
chmod 0 foo &&
- ! git clean -f -d
+ test_must_fail git clean -f -d
'
chmod 755 foo
test_expect_success 'merge c0 with c1 (no-ff)' '
git reset --hard c0 &&
+ git config branch.master.mergeoptions "" &&
test_tick &&
git merge --no-ff c1 &&
verify_merge file result.1 &&
test_debug 'gitk --all'
+test_expect_success 'combining --squash and --no-ff is refused' '
+ test_must_fail git merge --squash --no-ff c1 &&
+ test_must_fail git merge --no-ff --squash c1
+'
+
test_expect_success 'merge c0 with c1 (ff overrides no-ff)' '
git reset --hard c0 &&
git config branch.master.mergeoptions "--no-ff" &&
--- /dev/null
+#!/bin/sh
+#
+# Copyright (c) 2008 Charles Bailey
+#
+
+test_description='git-mergetool
+
+Testing basic merge tool invocation'
+
+. ./test-lib.sh
+
+test_expect_success 'setup' '
+ echo master >file1 &&
+ git add file1 &&
+ git commit -m "added file1" &&
+ git checkout -b branch1 master &&
+ echo branch1 change >file1 &&
+ echo branch1 newfile >file2 &&
+ git add file1 file2 &&
+ git commit -m "branch1 changes" &&
+ git checkout -b branch2 master &&
+ echo branch2 change >file1 &&
+ echo branch2 newfile >file2 &&
+ git add file1 file2 &&
+ git commit -m "branch2 changes" &&
+ git checkout master &&
+ echo master updated >file1 &&
+ echo master new >file2 &&
+ git add file1 file2 &&
+ git commit -m "master updates"
+'
+
+test_expect_success 'custom mergetool' '
+ git config merge.tool mytool &&
+ git config mergetool.mytool.cmd "cat \"\$REMOTE\" >\"\$MERGED\"" &&
+ git config mergetool.mytool.trustExitCode true &&
+ git checkout branch1 &&
+ ! git merge master >/dev/null 2>&1 &&
+ ( yes "" | git mergetool file1>/dev/null 2>&1 ) &&
+ ( yes "" | git mergetool file2>/dev/null 2>&1 ) &&
+ test "$(cat file1)" = "master updated" &&
+ test "$(cat file2)" = "master new" &&
+ git commit -m "branch1 resolved with mergetool"
+'
+
+test_done
COMMIT
reset refs/tags/O3-2nd
from :5
+reset refs/tags/O3-3rd
+from :5
INPUT_END
cat >expect <<INPUT_END
# Copyright (c) 2005 Junio C Hamano
#
+# Keep the original TERM for say_color
+ORIGINAL_TERM=$TERM
+
# For repeatability, reset the environment to known value.
LANG=C
LC_ALL=C
PAGER=cat
TZ=UTC
-export LANG LC_ALL PAGER TZ
+TERM=dumb
+export LANG LC_ALL PAGER TERM TZ
EDITOR=:
VISUAL=:
unset GIT_EDITOR
# This test checks if command xyzzy does the right thing...
# '
# . ./test-lib.sh
-
-[ "x$TERM" != "xdumb" ] &&
- [ -t 1 ] &&
- tput bold >/dev/null 2>&1 &&
- tput setaf 1 >/dev/null 2>&1 &&
- tput sgr0 >/dev/null 2>&1 &&
+[ "x$ORIGINAL_TERM" != "xdumb" ] && (
+ TERM=$ORIGINAL_TERM &&
+ export TERM &&
+ [ -t 1 ] &&
+ tput bold >/dev/null 2>&1 &&
+ tput setaf 1 >/dev/null 2>&1 &&
+ tput sgr0 >/dev/null 2>&1
+ ) &&
color=t
while test "$#" -ne 0
-q|--q|--qu|--qui|--quie|--quiet)
quiet=t; shift ;;
--no-color)
- color=; shift ;;
+ color=; shift ;;
--no-python)
# noop now...
shift ;;
if test -n "$color"; then
say_color () {
+ (
+ TERM=$ORIGINAL_TERM
+ export TERM
case "$1" in
error) tput bold; tput setaf 1;; # bold red
skip) tput bold; tput setaf 2;; # bold green
shift
echo "* $*"
tput sgr0
+ )
}
else
say_color() {
test_fixed=0
test_broken=0
-trap 'echo >&5 "FATAL: Unexpected exit with code $?"; exit 1' exit
+die () {
+ echo >&5 "FATAL: Unexpected exit with code $?"
+ exit 1
+}
+
+trap 'die' exit
test_tick () {
if test -z "${test_tick+set}"
echo >&3 ""
}
+# This is not among top-level (test_expect_success | test_expect_failure)
+# but is a prefix that can be used in the test script, like:
+#
+# test_expect_success 'complain and die' '
+# do something &&
+# do something else &&
+# test_must_fail git checkout ../outerspace
+# '
+#
+# Writing this as "! git checkout ../outerspace" is wrong, because
+# the failure could be due to a segv. We want a controlled failure.
+
+test_must_fail () {
+ "$@"
+ test $? -gt 0 -a $? -le 128
+}
+
# Most tests can use the created repository, but some may need to create more.
# Usage: test_create_repo <directory>
test_create_repo () {
exit 1
fi
+. ../GIT-BUILD-OPTIONS
+
# Test repository
test=trash
rm -fr "$test"
item->tagged = NULL;
}
- if (item->tagged && track_object_refs) {
- struct object_refs *refs = alloc_object_refs(1);
- refs->ref[0] = item->tagged;
- set_object_refs(&item->object, refs);
- }
-
return 0;
}
case "$$boilerplate" in *~) continue ;; esac && \
dst=`echo "$$boilerplate" | sed -e 's|^this|.|;s|--|/|g'` && \
dir=`expr "$$dst" : '\(.*\)/'` && \
- mkdir -p blt/$$dir && \
+ $(INSTALL) -d -m 755 blt/$$dir && \
case "$$boilerplate" in \
*--) ;; \
- *) cp $$boilerplate blt/$$dst ;; \
+ *) cp -p $$boilerplate blt/$$dst ;; \
esac || exit; \
done && \
date >$@
install: all
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(template_dir_SQ)'
(cd blt && $(TAR) cf - .) | \
- (cd '$(DESTDIR_SQ)$(template_dir_SQ)' && $(TAR) xf -)
+ (cd '$(DESTDIR_SQ)$(template_dir_SQ)' && umask 022 && $(TAR) xf -)
OPT_STRING(0, "string2", &string, "str", "get another string"),
OPT_STRING(0, "st", &string, "st", "get another string (pervert ordering)"),
OPT_STRING('o', NULL, &string, "str", "get another string"),
+ OPT_GROUP("magic arguments"),
+ OPT_ARGUMENT("quux", "means --quux"),
OPT_END(),
};
int i;
struct ref *last_ref = NULL;
if (!transport->data)
- transport->data = get_http_walker(transport->url);
+ transport->data = get_http_walker(transport->url,
+ transport->remote);
refs_url = xmalloc(strlen(transport->url) + 11);
sprintf(refs_url, "%s/info/refs", transport->url);
curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite_buffer);
curl_easy_setopt(slot->curl, CURLOPT_URL, refs_url);
curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, NULL);
- if (transport->remote->http_proxy)
- curl_easy_setopt(slot->curl, CURLOPT_PROXY,
- transport->remote->http_proxy);
if (start_active_slot(slot)) {
run_active_slot(slot);
int nr_objs, struct ref **to_fetch)
{
if (!transport->data)
- transport->data = get_http_walker(transport->url);
+ transport->data = get_http_walker(transport->url,
+ transport->remote);
return fetch_objs_via_walker(transport, nr_objs, to_fetch);
}
struct git_transport_data {
unsigned thin : 1;
unsigned keep : 1;
+ unsigned followtags : 1;
int depth;
struct child_process *conn;
int fd[2];
} else if (!strcmp(name, TRANS_OPT_THIN)) {
data->thin = !!value;
return 0;
+ } else if (!strcmp(name, TRANS_OPT_FOLLOWTAGS)) {
+ data->followtags = !!value;
+ return 0;
} else if (!strcmp(name, TRANS_OPT_KEEP)) {
data->keep = !!value;
return 0;
char *dest = xstrdup(transport->url);
struct fetch_pack_args args;
int i;
+ struct ref *refs_tmp = NULL;
memset(&args, 0, sizeof(args));
args.uploadpack = data->uploadpack;
args.keep_pack = data->keep;
args.lock_pack = 1;
args.use_thin_pack = data->thin;
+ args.include_tag = data->followtags;
args.verbose = transport->verbose > 0;
args.depth = data->depth;
for (i = 0; i < nr_heads; i++)
origh[i] = heads[i] = xstrdup(to_fetch[i]->name);
- refs = transport_get_remote_refs(transport);
if (!data->conn) {
- struct ref *refs_tmp;
connect_setup(transport);
get_remote_heads(data->fd[0], &refs_tmp, 0, NULL, 0);
- free_refs(refs_tmp);
}
- refs = fetch_pack(&args, data->fd, data->conn, transport->remote_refs,
+ refs = fetch_pack(&args, data->fd, data->conn,
+ refs_tmp ? refs_tmp : transport->remote_refs,
dest, nr_heads, heads, &transport->pack_lockfile);
close(data->fd[0]);
close(data->fd[1]);
refs = NULL;
data->conn = NULL;
+ free_refs(refs_tmp);
+
for (i = 0; i < nr_heads; i++)
free(origh[i]);
free(origh);
/* Limit the depth of the fetch if not null */
#define TRANS_OPT_DEPTH "depth"
+/* Aggressively fetch annotated tags if possible */
+#define TRANS_OPT_FOLLOWTAGS "followtags"
+
/**
* Returns 0 if the option was used, non-zero otherwise. Prints a
* message to stderr if the option is not used.
static int entry_compare(struct name_entry *a, struct name_entry *b)
{
- return base_name_compare(
+ return df_name_compare(
a->path, tree_entry_len(a->path, a->sha1), a->mode,
b->path, tree_entry_len(b->path, b->sha1), b->mode);
}
return 1;
}
-void traverse_trees(int n, struct tree_desc *t, const char *base, traverse_callback_t callback)
+void setup_traverse_info(struct traverse_info *info, const char *base)
{
+ int pathlen = strlen(base);
+ static struct traverse_info dummy;
+
+ memset(info, 0, sizeof(*info));
+ if (pathlen && base[pathlen-1] == '/')
+ pathlen--;
+ info->pathlen = pathlen ? pathlen + 1 : 0;
+ info->name.path = base;
+ info->name.sha1 = (void *)(base + pathlen + 1);
+ if (pathlen)
+ info->prev = &dummy;
+}
+
+char *make_traverse_path(char *path, const struct traverse_info *info, const struct name_entry *n)
+{
+ int len = tree_entry_len(n->path, n->sha1);
+ int pathlen = info->pathlen;
+
+ path[pathlen + len] = 0;
+ for (;;) {
+ memcpy(path + pathlen, n->path, len);
+ if (!pathlen)
+ break;
+ path[--pathlen] = '/';
+ n = &info->name;
+ len = tree_entry_len(n->path, n->sha1);
+ info = info->prev;
+ pathlen -= len;
+ }
+ return path;
+}
+
+int traverse_trees(int n, struct tree_desc *t, struct traverse_info *info)
+{
+ int ret = 0;
struct name_entry *entry = xmalloc(n*sizeof(*entry));
for (;;) {
unsigned long mask = 0;
+ unsigned long dirmask = 0;
int i, last;
last = -1;
mask = 0;
}
mask |= 1ul << i;
+ if (S_ISDIR(entry[i].mode))
+ dirmask |= 1ul << i;
last = i;
}
if (!mask)
break;
+ dirmask &= mask;
/*
- * Update the tree entries we've walked, and clear
- * all the unused name-entries.
+ * Clear all the unused name-entries.
*/
for (i = 0; i < n; i++) {
- if (mask & (1ul << i)) {
- update_tree_entry(t+i);
+ if (mask & (1ul << i))
continue;
- }
entry_clear(entry + i);
}
- callback(n, mask, entry, base);
+ ret = info->fn(n, mask, dirmask, entry, info);
+ if (ret < 0)
+ break;
+ if (ret)
+ mask &= ret;
+ ret = 0;
+ for (i = 0; i < n; i++) {
+ if (mask & (1ul << i))
+ update_tree_entry(t + i);
+ }
}
free(entry);
+ return ret;
}
static int find_tree_entry(struct tree_desc *t, const char *name, unsigned char *result, unsigned *mode)
void *fill_tree_descriptor(struct tree_desc *desc, const unsigned char *sha1);
-typedef void (*traverse_callback_t)(int n, unsigned long mask, struct name_entry *entry, const char *base);
-
-void traverse_trees(int n, struct tree_desc *t, const char *base, traverse_callback_t callback);
+struct traverse_info;
+typedef int (*traverse_callback_t)(int n, unsigned long mask, unsigned long dirmask, struct name_entry *entry, struct traverse_info *);
+int traverse_trees(int n, struct tree_desc *t, struct traverse_info *info);
+
+struct traverse_info {
+ struct traverse_info *prev;
+ struct name_entry name;
+ int pathlen;
+
+ unsigned long conflicts;
+ traverse_callback_t fn;
+ void *data;
+};
int get_tree_entry(const unsigned char *, const char *, unsigned char *, unsigned *);
+extern char *make_traverse_path(char *path, const struct traverse_info *info, const struct name_entry *n);
+extern void setup_traverse_info(struct traverse_info *info, const char *base);
+
+static inline int traverse_path_len(const struct traverse_info *info, const struct name_entry *n)
+{
+ return info->pathlen + tree_entry_len(n->path, n->sha1);
+}
#endif
return (struct tree *) obj;
}
-/*
- * NOTE! Tree refs to external git repositories
- * (ie gitlinks) do not count as real references.
- *
- * You don't have to have those repositories
- * available at all, much less have the objects
- * accessible from the current repository.
- */
-static void track_tree_refs(struct tree *item)
-{
- int n_refs = 0, i;
- struct object_refs *refs;
- struct tree_desc desc;
- struct name_entry entry;
-
- /* Count how many entries there are.. */
- init_tree_desc(&desc, item->buffer, item->size);
- while (tree_entry(&desc, &entry)) {
- if (S_ISGITLINK(entry.mode))
- continue;
- n_refs++;
- }
-
- /* Allocate object refs and walk it again.. */
- i = 0;
- refs = alloc_object_refs(n_refs);
- init_tree_desc(&desc, item->buffer, item->size);
- while (tree_entry(&desc, &entry)) {
- struct object *obj;
-
- if (S_ISGITLINK(entry.mode))
- continue;
- if (S_ISDIR(entry.mode))
- obj = &lookup_tree(entry.sha1)->object;
- else if (S_ISREG(entry.mode) || S_ISLNK(entry.mode))
- obj = &lookup_blob(entry.sha1)->object;
- else {
- warning("in tree %s: entry %s has bad mode %.6o\n",
- sha1_to_hex(item->object.sha1), entry.path, entry.mode);
- obj = lookup_unknown_object(entry.sha1);
- }
- refs->ref[i++] = obj;
- }
- set_object_refs(&item->object, refs);
-}
-
int parse_tree_buffer(struct tree *item, void *buffer, unsigned long size)
{
if (item->object.parsed)
item->buffer = buffer;
item->size = size;
- if (track_object_refs)
- track_tree_refs(item);
return 0;
}
+#define NO_THE_INDEX_COMPATIBILITY_MACROS
#include "cache.h"
#include "dir.h"
#include "tree.h"
#include "progress.h"
#include "refs.h"
-#define DBRT_DEBUG 1
-
-struct tree_entry_list {
- struct tree_entry_list *next;
- unsigned int mode;
- const char *name;
- const unsigned char *sha1;
-};
-
-static struct tree_entry_list *create_tree_entry_list(struct tree_desc *desc)
-{
- struct name_entry one;
- struct tree_entry_list *ret = NULL;
- struct tree_entry_list **list_p = &ret;
-
- while (tree_entry(desc, &one)) {
- struct tree_entry_list *entry;
-
- entry = xmalloc(sizeof(struct tree_entry_list));
- entry->name = one.path;
- entry->sha1 = one.sha1;
- entry->mode = one.mode;
- entry->next = NULL;
-
- *list_p = entry;
- list_p = &entry->next;
- }
- return ret;
-}
-
-static int entcmp(const char *name1, int dir1, const char *name2, int dir2)
+static void add_entry(struct unpack_trees_options *o, struct cache_entry *ce,
+ unsigned int set, unsigned int clear)
{
- int len1 = strlen(name1);
- int len2 = strlen(name2);
- int len = len1 < len2 ? len1 : len2;
- int ret = memcmp(name1, name2, len);
- unsigned char c1, c2;
- if (ret)
- return ret;
- c1 = name1[len];
- c2 = name2[len];
- if (!c1 && dir1)
- c1 = '/';
- if (!c2 && dir2)
- c2 = '/';
- ret = (c1 < c2) ? -1 : (c1 > c2) ? 1 : 0;
- if (c1 && c2 && !ret)
- ret = len1 - len2;
- return ret;
-}
-
-static inline void remove_entry(int remove)
-{
- if (remove >= 0)
- remove_cache_entry_at(remove);
-}
-
-static int unpack_trees_rec(struct tree_entry_list **posns, int len,
- const char *base, struct unpack_trees_options *o,
- struct tree_entry_list *df_conflict_list)
-{
- int remove;
- int baselen = strlen(base);
- int src_size = len + 1;
- int retval = 0;
-
- do {
- int i;
- const char *first;
- int firstdir = 0;
- int pathlen;
- unsigned ce_size;
- struct tree_entry_list **subposns;
- struct cache_entry **src;
- int any_files = 0;
- int any_dirs = 0;
- char *cache_name;
- int ce_stage;
- int skip_entry = 0;
-
- /* Find the first name in the input. */
-
- first = NULL;
- cache_name = NULL;
-
- /* Check the cache */
- if (o->merge && o->pos < active_nr) {
- /* This is a bit tricky: */
- /* If the index has a subdirectory (with
- * contents) as the first name, it'll get a
- * filename like "foo/bar". But that's after
- * "foo", so the entry in trees will get
- * handled first, at which point we'll go into
- * "foo", and deal with "bar" from the index,
- * because the base will be "foo/". The only
- * way we can actually have "foo/bar" first of
- * all the things is if the trees don't
- * contain "foo" at all, in which case we'll
- * handle "foo/bar" without going into the
- * directory, but that's fine (and will return
- * an error anyway, with the added unknown
- * file case.
- */
-
- cache_name = active_cache[o->pos]->name;
- if (strlen(cache_name) > baselen &&
- !memcmp(cache_name, base, baselen)) {
- cache_name += baselen;
- first = cache_name;
- } else {
- cache_name = NULL;
- }
- }
-
-#if DBRT_DEBUG > 1
- if (first)
- fprintf(stderr, "index %s\n", first);
-#endif
- for (i = 0; i < len; i++) {
- if (!posns[i] || posns[i] == df_conflict_list)
- continue;
-#if DBRT_DEBUG > 1
- fprintf(stderr, "%d %s\n", i + 1, posns[i]->name);
-#endif
- if (!first || entcmp(first, firstdir,
- posns[i]->name,
- S_ISDIR(posns[i]->mode)) > 0) {
- first = posns[i]->name;
- firstdir = S_ISDIR(posns[i]->mode);
- }
- }
- /* No name means we're done */
- if (!first)
- goto leave_directory;
-
- pathlen = strlen(first);
- ce_size = cache_entry_size(baselen + pathlen);
-
- src = xcalloc(src_size, sizeof(struct cache_entry *));
-
- subposns = xcalloc(len, sizeof(struct tree_list_entry *));
-
- remove = -1;
- if (cache_name && !strcmp(cache_name, first)) {
- any_files = 1;
- src[0] = active_cache[o->pos];
- remove = o->pos;
- if (o->skip_unmerged && ce_stage(src[0]))
- skip_entry = 1;
- }
-
- for (i = 0; i < len; i++) {
- struct cache_entry *ce;
-
- if (!posns[i] ||
- (posns[i] != df_conflict_list &&
- strcmp(first, posns[i]->name))) {
- continue;
- }
-
- if (posns[i] == df_conflict_list) {
- src[i + o->merge] = o->df_conflict_entry;
- continue;
- }
+ unsigned int size = ce_size(ce);
+ struct cache_entry *new = xmalloc(size);
- if (S_ISDIR(posns[i]->mode)) {
- struct tree *tree = lookup_tree(posns[i]->sha1);
- struct tree_desc t;
- any_dirs = 1;
- parse_tree(tree);
- init_tree_desc(&t, tree->buffer, tree->size);
- subposns[i] = create_tree_entry_list(&t);
- posns[i] = posns[i]->next;
- src[i + o->merge] = o->df_conflict_entry;
- continue;
- }
-
- if (skip_entry) {
- subposns[i] = df_conflict_list;
- posns[i] = posns[i]->next;
- continue;
- }
-
- if (!o->merge)
- ce_stage = 0;
- else if (i + 1 < o->head_idx)
- ce_stage = 1;
- else if (i + 1 > o->head_idx)
- ce_stage = 3;
- else
- ce_stage = 2;
-
- ce = xcalloc(1, ce_size);
- ce->ce_mode = create_ce_mode(posns[i]->mode);
- ce->ce_flags = create_ce_flags(baselen + pathlen,
- ce_stage);
- memcpy(ce->name, base, baselen);
- memcpy(ce->name + baselen, first, pathlen + 1);
-
- any_files = 1;
-
- hashcpy(ce->sha1, posns[i]->sha1);
- src[i + o->merge] = ce;
- subposns[i] = df_conflict_list;
- posns[i] = posns[i]->next;
- }
- if (any_files) {
- if (skip_entry) {
- o->pos++;
- while (o->pos < active_nr &&
- !strcmp(active_cache[o->pos]->name,
- src[0]->name))
- o->pos++;
- } else if (o->merge) {
- int ret;
-
-#if DBRT_DEBUG > 1
- fprintf(stderr, "%s:\n", first);
- for (i = 0; i < src_size; i++) {
- fprintf(stderr, " %d ", i);
- if (src[i])
- fprintf(stderr, "%06x %s\n", src[i]->ce_mode, sha1_to_hex(src[i]->sha1));
- else
- fprintf(stderr, "\n");
- }
-#endif
- ret = o->fn(src, o, remove);
- if (ret < 0)
- return ret;
+ clear |= CE_HASHED | CE_UNHASHED;
-#if DBRT_DEBUG > 1
- fprintf(stderr, "Added %d entries\n", ret);
-#endif
- o->pos += ret;
- } else {
- remove_entry(remove);
- for (i = 0; i < src_size; i++) {
- if (src[i]) {
- add_cache_entry(src[i], ADD_CACHE_OK_TO_ADD|ADD_CACHE_SKIP_DFCHECK);
- }
- }
- }
- }
- if (any_dirs) {
- char *newbase = xmalloc(baselen + 2 + pathlen);
- memcpy(newbase, base, baselen);
- memcpy(newbase + baselen, first, pathlen);
- newbase[baselen + pathlen] = '/';
- newbase[baselen + pathlen + 1] = '\0';
- if (unpack_trees_rec(subposns, len, newbase, o,
- df_conflict_list)) {
- retval = -1;
- goto leave_directory;
- }
- free(newbase);
- }
- free(subposns);
- free(src);
- } while (1);
-
- leave_directory:
- return retval;
+ memcpy(new, ce, size);
+ new->next = NULL;
+ new->ce_flags = (new->ce_flags & ~clear) | set;
+ add_index_entry(&o->result, new, ADD_CACHE_OK_TO_ADD|ADD_CACHE_OK_TO_REPLACE|ADD_CACHE_SKIP_DFCHECK);
}
/* Unlink the last component and attempt to remove leading
unsigned cnt = 0, total = 0;
struct progress *progress = NULL;
char last_symlink[PATH_MAX];
+ struct index_state *index = &o->result;
int i;
if (o->update && o->verbose_update) {
- for (total = cnt = 0; cnt < active_nr; cnt++) {
- struct cache_entry *ce = active_cache[cnt];
+ for (total = cnt = 0; cnt < index->cache_nr; cnt++) {
+ struct cache_entry *ce = index->cache[cnt];
if (ce->ce_flags & (CE_UPDATE | CE_REMOVE))
total++;
}
}
*last_symlink = '\0';
- for (i = 0; i < active_nr; i++) {
- struct cache_entry *ce = active_cache[i];
+ for (i = 0; i < index->cache_nr; i++) {
+ struct cache_entry *ce = index->cache[i];
if (ce->ce_flags & (CE_UPDATE | CE_REMOVE))
display_progress(progress, ++cnt);
if (ce->ce_flags & CE_REMOVE) {
if (o->update)
unlink_entry(ce->name, last_symlink);
- remove_cache_entry_at(i);
+ remove_index_entry_at(&o->result, i);
i--;
continue;
}
stop_progress(&progress);
}
-int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options *o)
+static inline int call_unpack_fn(struct cache_entry **src, struct unpack_trees_options *o)
+{
+ int ret = o->fn(src, o);
+ if (ret > 0)
+ ret = 0;
+ return ret;
+}
+
+static int unpack_index_entry(struct cache_entry *ce, struct unpack_trees_options *o)
+{
+ struct cache_entry *src[5] = { ce, };
+
+ o->pos++;
+ if (ce_stage(ce)) {
+ if (o->skip_unmerged) {
+ add_entry(o, ce, 0, 0);
+ return 0;
+ }
+ }
+ return call_unpack_fn(src, o);
+}
+
+int traverse_trees_recursive(int n, unsigned long dirmask, unsigned long df_conflicts, struct name_entry *names, struct traverse_info *info)
{
- struct tree_entry_list **posns;
int i;
- struct tree_entry_list df_conflict_list;
+ struct tree_desc t[3];
+ struct traverse_info newinfo;
+ struct name_entry *p;
+
+ p = names;
+ while (!p->mode)
+ p++;
+
+ newinfo = *info;
+ newinfo.prev = info;
+ newinfo.name = *p;
+ newinfo.pathlen += tree_entry_len(p->path, p->sha1) + 1;
+ newinfo.conflicts |= df_conflicts;
+
+ for (i = 0; i < n; i++, dirmask >>= 1) {
+ const unsigned char *sha1 = NULL;
+ if (dirmask & 1)
+ sha1 = names[i].sha1;
+ fill_tree_descriptor(t+i, sha1);
+ }
+ return traverse_trees(n, t, &newinfo);
+}
+
+/*
+ * Compare the traverse-path to the cache entry without actually
+ * having to generate the textual representation of the traverse
+ * path.
+ *
+ * NOTE! This *only* compares up to the size of the traverse path
+ * itself - the caller needs to do the final check for the cache
+ * entry having more data at the end!
+ */
+static int do_compare_entry(const struct cache_entry *ce, const struct traverse_info *info, const struct name_entry *n)
+{
+ int len, pathlen, ce_len;
+ const char *ce_name;
+
+ if (info->prev) {
+ int cmp = do_compare_entry(ce, info->prev, &info->name);
+ if (cmp)
+ return cmp;
+ }
+ pathlen = info->pathlen;
+ ce_len = ce_namelen(ce);
+
+ /* If ce_len < pathlen then we must have previously hit "name == directory" entry */
+ if (ce_len < pathlen)
+ return -1;
+
+ ce_len -= pathlen;
+ ce_name = ce->name + pathlen;
+
+ len = tree_entry_len(n->path, n->sha1);
+ return df_name_compare(ce_name, ce_len, S_IFREG, n->path, len, n->mode);
+}
+
+static int compare_entry(const struct cache_entry *ce, const struct traverse_info *info, const struct name_entry *n)
+{
+ int cmp = do_compare_entry(ce, info, n);
+ if (cmp)
+ return cmp;
+
+ /*
+ * Even if the beginning compared identically, the ce should
+ * compare as bigger than a directory leading up to it!
+ */
+ return ce_namelen(ce) > traverse_path_len(info, n);
+}
+
+static struct cache_entry *create_ce_entry(const struct traverse_info *info, const struct name_entry *n, int stage)
+{
+ int len = traverse_path_len(info, n);
+ struct cache_entry *ce = xcalloc(1, cache_entry_size(len));
+
+ ce->ce_mode = create_ce_mode(n->mode);
+ ce->ce_flags = create_ce_flags(len, stage);
+ hashcpy(ce->sha1, n->sha1);
+ make_traverse_path(ce->name, info, n);
+
+ return ce;
+}
+
+static int unpack_nondirectories(int n, unsigned long mask, unsigned long dirmask, struct cache_entry *src[5],
+ const struct name_entry *names, const struct traverse_info *info)
+{
+ int i;
+ struct unpack_trees_options *o = info->data;
+ unsigned long conflicts;
+
+ /* Do we have *only* directories? Nothing to do */
+ if (mask == dirmask && !src[0])
+ return 0;
+
+ conflicts = info->conflicts;
+ if (o->merge)
+ conflicts >>= 1;
+ conflicts |= dirmask;
+
+ /*
+ * Ok, we've filled in up to any potential index entry in src[0],
+ * now do the rest.
+ */
+ for (i = 0; i < n; i++) {
+ int stage;
+ unsigned int bit = 1ul << i;
+ if (conflicts & bit) {
+ src[i + o->merge] = o->df_conflict_entry;
+ continue;
+ }
+ if (!(mask & bit))
+ continue;
+ if (!o->merge)
+ stage = 0;
+ else if (i + 1 < o->head_idx)
+ stage = 1;
+ else if (i + 1 > o->head_idx)
+ stage = 3;
+ else
+ stage = 2;
+ src[i + o->merge] = create_ce_entry(info, names + i, stage);
+ }
+
+ if (o->merge)
+ return call_unpack_fn(src, o);
+
+ n += o->merge;
+ for (i = 0; i < n; i++)
+ add_entry(o, src[i], 0, 0);
+ return 0;
+}
+
+static int unpack_callback(int n, unsigned long mask, unsigned long dirmask, struct name_entry *names, struct traverse_info *info)
+{
+ struct cache_entry *src[5] = { NULL, };
+ struct unpack_trees_options *o = info->data;
+ const struct name_entry *p = names;
+
+ /* Find first entry with a real name (we could use "mask" too) */
+ while (!p->mode)
+ p++;
+
+ /* Are we supposed to look at the index too? */
+ if (o->merge) {
+ while (o->pos < o->src_index->cache_nr) {
+ struct cache_entry *ce = o->src_index->cache[o->pos];
+ int cmp = compare_entry(ce, info, p);
+ if (cmp < 0) {
+ if (unpack_index_entry(ce, o) < 0)
+ return -1;
+ continue;
+ }
+ if (!cmp) {
+ o->pos++;
+ if (ce_stage(ce)) {
+ /*
+ * If we skip unmerged index entries, we'll skip this
+ * entry *and* the tree entries associated with it!
+ */
+ if (o->skip_unmerged) {
+ add_entry(o, ce, 0, 0);
+ return mask;
+ }
+ }
+ src[0] = ce;
+ }
+ break;
+ }
+ }
+
+ if (unpack_nondirectories(n, mask, dirmask, src, names, info) < 0)
+ return -1;
+
+ /* Now handle any directories.. */
+ if (dirmask) {
+ unsigned long conflicts = mask & ~dirmask;
+ if (o->merge) {
+ conflicts <<= 1;
+ if (src[0])
+ conflicts |= 1;
+ }
+ if (traverse_trees_recursive(n, dirmask, conflicts,
+ names, info) < 0)
+ return -1;
+ return mask;
+ }
+
+ return mask;
+}
+
+static int unpack_failed(struct unpack_trees_options *o, const char *message)
+{
+ discard_index(&o->result);
+ if (!o->gently) {
+ if (message)
+ return error(message);
+ return -1;
+ }
+ return -1;
+}
+
+int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options *o)
+{
static struct cache_entry *dfc;
- memset(&df_conflict_list, 0, sizeof(df_conflict_list));
- df_conflict_list.next = &df_conflict_list;
+ if (len > 4)
+ die("unpack_trees takes at most four trees");
memset(&state, 0, sizeof(state));
state.base_dir = "";
state.force = 1;
state.quiet = 1;
state.refresh_cache = 1;
+ memset(&o->result, 0, sizeof(o->result));
o->merge_size = len;
if (!dfc)
o->df_conflict_entry = dfc;
if (len) {
- posns = xmalloc(len * sizeof(struct tree_entry_list *));
- for (i = 0; i < len; i++)
- posns[i] = create_tree_entry_list(t+i);
-
- if (unpack_trees_rec(posns, len, o->prefix ? o->prefix : "",
- o, &df_conflict_list)) {
- if (o->gently) {
- discard_cache();
- read_cache();
- }
- return -1;
- }
+ const char *prefix = o->prefix ? o->prefix : "";
+ struct traverse_info info;
+
+ setup_traverse_info(&info, prefix);
+ info.fn = unpack_callback;
+ info.data = o;
+
+ if (traverse_trees(len, t, &info) < 0)
+ return unpack_failed(o, NULL);
}
- if (o->trivial_merges_only && o->nontrivial_merge) {
- if (o->gently) {
- discard_cache();
- read_cache();
+ /* Any left-over entries in the index? */
+ if (o->merge) {
+ while (o->pos < o->src_index->cache_nr) {
+ struct cache_entry *ce = o->src_index->cache[o->pos];
+ if (unpack_index_entry(ce, o) < 0)
+ return unpack_failed(o, NULL);
}
- return o->gently ? -1 :
- error("Merge requires file-level merging");
}
+ if (o->trivial_merges_only && o->nontrivial_merge)
+ return unpack_failed(o, "Merge requires file-level merging");
+
+ o->src_index = NULL;
check_updates(o);
+ if (o->dst_index)
+ *o->dst_index = o->result;
return 0;
}
return 0;
if (!lstat(ce->name, &st)) {
- unsigned changed = ce_match_stat(ce, &st, CE_MATCH_IGNORE_VALID);
+ unsigned changed = ie_match_stat(o->src_index, ce, &st, CE_MATCH_IGNORE_VALID);
if (!changed)
return 0;
/*
error("Entry '%s' not uptodate. Cannot merge.", ce->name);
}
-static void invalidate_ce_path(struct cache_entry *ce)
+static void invalidate_ce_path(struct cache_entry *ce, struct unpack_trees_options *o)
{
if (ce)
- cache_tree_invalidate_path(active_cache_tree, ce->name);
+ cache_tree_invalidate_path(o->src_index->cache_tree, ce->name);
}
/*
* in that directory.
*/
namelen = strlen(ce->name);
- pos = cache_name_pos(ce->name, namelen);
+ pos = index_name_pos(o->src_index, ce->name, namelen);
if (0 <= pos)
return cnt; /* we have it as nondirectory */
pos = -pos - 1;
- for (i = pos; i < active_nr; i++) {
- struct cache_entry *ce = active_cache[i];
+ for (i = pos; i < o->src_index->cache_nr; i++) {
+ struct cache_entry *ce = o->src_index->cache[i];
int len = ce_namelen(ce);
if (len < namelen ||
strncmp(ce->name, ce->name, namelen) ||
if (!ce_stage(ce)) {
if (verify_uptodate(ce, o))
return -1;
- ce->ce_flags |= CE_REMOVE;
+ add_entry(o, ce, CE_REMOVE, 0);
}
cnt++;
}
* delete this path, which is in a subdirectory that
* is being replaced with a blob.
*/
- cnt = cache_name_pos(ce->name, strlen(ce->name));
+ cnt = index_name_pos(&o->result, ce->name, strlen(ce->name));
if (0 <= cnt) {
- struct cache_entry *ce = active_cache[cnt];
+ struct cache_entry *ce = o->result.cache[cnt];
if (ce->ce_flags & CE_REMOVE)
return 0;
}
static int merged_entry(struct cache_entry *merge, struct cache_entry *old,
struct unpack_trees_options *o)
{
- merge->ce_flags |= CE_UPDATE;
if (old) {
/*
* See if we can re-use the old CE directly?
} else {
if (verify_uptodate(old, o))
return -1;
- invalidate_ce_path(old);
+ invalidate_ce_path(old, o);
}
}
else {
if (verify_absent(merge, "overwritten", o))
return -1;
- invalidate_ce_path(merge);
+ invalidate_ce_path(merge, o);
}
- merge->ce_flags &= ~CE_STAGEMASK;
- add_cache_entry(merge, ADD_CACHE_OK_TO_ADD|ADD_CACHE_OK_TO_REPLACE);
+ add_entry(o, merge, CE_UPDATE, CE_STAGEMASK);
return 1;
}
static int deleted_entry(struct cache_entry *ce, struct cache_entry *old,
struct unpack_trees_options *o)
{
- if (old) {
- if (verify_uptodate(old, o))
- return -1;
- } else
+ /* Did it exist in the index? */
+ if (!old) {
if (verify_absent(ce, "removed", o))
return -1;
- ce->ce_flags |= CE_REMOVE;
- add_cache_entry(ce, ADD_CACHE_OK_TO_ADD|ADD_CACHE_OK_TO_REPLACE);
- invalidate_ce_path(ce);
+ return 0;
+ }
+ if (verify_uptodate(old, o))
+ return -1;
+ add_entry(o, ce, CE_REMOVE, 0);
+ invalidate_ce_path(ce, o);
return 1;
}
static int keep_entry(struct cache_entry *ce, struct unpack_trees_options *o)
{
- add_cache_entry(ce, ADD_CACHE_OK_TO_ADD);
+ add_entry(o, ce, 0, 0);
return 1;
}
}
#endif
-int threeway_merge(struct cache_entry **stages,
- struct unpack_trees_options *o,
- int remove)
+int threeway_merge(struct cache_entry **stages, struct unpack_trees_options *o)
{
struct cache_entry *index;
struct cache_entry *head;
}
/* #1 */
- if (!head && !remote && any_anc_missing) {
- remove_entry(remove);
+ if (!head && !remote && any_anc_missing)
return 0;
- }
/* Under the new "aggressive" rule, we resolve mostly trivial
* cases that we historically had git-merge-one-file resolve.
if ((head_deleted && remote_deleted) ||
(head_deleted && remote && remote_match) ||
(remote_deleted && head && head_match)) {
- remove_entry(remove);
if (index)
return deleted_entry(index, index, o);
- else if (ce && !head_deleted) {
+ if (ce && !head_deleted) {
if (verify_absent(ce, "removed", o))
return -1;
}
return -1;
}
- remove_entry(remove);
o->nontrivial_merge = 1;
/* #2, #3, #4, #6, #7, #9, #10, #11. */
* "carry forward" rule, please see <Documentation/git-read-tree.txt>.
*
*/
-int twoway_merge(struct cache_entry **src,
- struct unpack_trees_options *o,
- int remove)
+int twoway_merge(struct cache_entry **src, struct unpack_trees_options *o)
{
struct cache_entry *current = src[0];
struct cache_entry *oldtree = src[1];
}
else if (oldtree && !newtree && same(current, oldtree)) {
/* 10 or 11 */
- remove_entry(remove);
return deleted_entry(oldtree, current, o);
}
else if (oldtree && newtree &&
}
else {
/* all other failures */
- remove_entry(remove);
if (oldtree)
return o->gently ? -1 : reject_merge(oldtree);
if (current)
}
else if (newtree)
return merged_entry(newtree, current, o);
- remove_entry(remove);
return deleted_entry(oldtree, current, o);
}
* stage0 does not have anything there.
*/
int bind_merge(struct cache_entry **src,
- struct unpack_trees_options *o,
- int remove)
+ struct unpack_trees_options *o)
{
struct cache_entry *old = src[0];
struct cache_entry *a = src[1];
o->merge_size);
if (a && old)
return o->gently ? -1 :
- error("Entry '%s' overlaps. Cannot bind.", a->name);
+ error("Entry '%s' overlaps with '%s'. Cannot bind.", a->name, old->name);
if (!a)
return keep_entry(old, o);
else
* The rule is:
* - take the stat information from stage0, take the data from stage1
*/
-int oneway_merge(struct cache_entry **src,
- struct unpack_trees_options *o,
- int remove)
+int oneway_merge(struct cache_entry **src, struct unpack_trees_options *o)
{
struct cache_entry *old = src[0];
struct cache_entry *a = src[1];
return error("Cannot do a oneway merge of %d trees",
o->merge_size);
- if (!a) {
- remove_entry(remove);
+ if (!a)
return deleted_entry(old, old, o);
- }
+
if (old && same(old, a)) {
+ int update = 0;
if (o->reset) {
struct stat st;
if (lstat(old->name, &st) ||
- ce_match_stat(old, &st, CE_MATCH_IGNORE_VALID))
- old->ce_flags |= CE_UPDATE;
+ ie_match_stat(o->src_index, old, &st, CE_MATCH_IGNORE_VALID))
+ update |= CE_UPDATE;
}
- return keep_entry(old, o);
+ add_entry(o, old, update, 0);
+ return 0;
}
return merged_entry(a, old, o);
}
struct unpack_trees_options;
typedef int (*merge_fn_t)(struct cache_entry **src,
- struct unpack_trees_options *options,
- int remove);
+ struct unpack_trees_options *options);
struct unpack_trees_options {
int reset;
struct cache_entry *df_conflict_entry;
void *unpack_data;
+
+ struct index_state *dst_index;
+ const struct index_state *src_index;
+ struct index_state result;
};
extern int unpack_trees(unsigned n, struct tree_desc *t,
struct unpack_trees_options *options);
-int threeway_merge(struct cache_entry **stages, struct unpack_trees_options *o, int);
-int twoway_merge(struct cache_entry **src, struct unpack_trees_options *o, int);
-int bind_merge(struct cache_entry **src, struct unpack_trees_options *o, int);
-int oneway_merge(struct cache_entry **src, struct unpack_trees_options *o, int);
+int threeway_merge(struct cache_entry **stages, struct unpack_trees_options *o);
+int twoway_merge(struct cache_entry **src, struct unpack_trees_options *o);
+int bind_merge(struct cache_entry **src, struct unpack_trees_options *o);
+int oneway_merge(struct cache_entry **src, struct unpack_trees_options *o);
#endif
static unsigned long oldest_have;
static int multi_ack, nr_our_refs;
-static int use_thin_pack, use_ofs_delta, no_progress;
+static int use_thin_pack, use_ofs_delta, use_include_tag;
+static int no_progress;
static struct object_array have_obj;
static struct object_array want_obj;
static unsigned int timeout;
* otherwise maximum packet size (up to 65520 bytes).
*/
static int use_sideband;
+static int debug_fd;
static void reset_timeout(void)
{
argv[arg++] = "--progress";
if (use_ofs_delta)
argv[arg++] = "--delta-base-offset";
+ if (use_include_tag)
+ argv[arg++] = "--include-tag";
argv[arg++] = NULL;
memset(&pack_objects, 0, sizeof(pack_objects));
char hex[41], last_hex[41];
int len;
- track_object_refs = 0;
save_commit_buffer = 0;
for(;;) {
static char line[1000];
int len, depth = 0;
+ if (debug_fd)
+ write_in_full(debug_fd, "#S\n", 3);
for (;;) {
struct object *o;
unsigned char sha1_buf[20];
reset_timeout();
if (!len)
break;
+ if (debug_fd)
+ write_in_full(debug_fd, line, len);
if (!prefixcmp(line, "shallow ")) {
unsigned char sha1[20];
use_sideband = DEFAULT_PACKET_MAX;
if (strstr(line+45, "no-progress"))
no_progress = 1;
+ if (strstr(line+45, "include-tag"))
+ use_include_tag = 1;
/* We have sent all our refs already, and the other end
* should have chosen out of them; otherwise they are
add_object_array(o, NULL, &want_obj);
}
}
+ if (debug_fd)
+ write_in_full(debug_fd, "#E\n", 3);
if (depth == 0 && shallows.nr == 0)
return;
if (depth > 0) {
static int send_ref(const char *refname, const unsigned char *sha1, int flag, void *cb_data)
{
static const char *capabilities = "multi_ack thin-pack side-band"
- " side-band-64k ofs-delta shallow no-progress";
+ " side-band-64k ofs-delta shallow no-progress"
+ " include-tag";
struct object *o = parse_object(sha1);
if (!o)
die("'%s': unable to chdir or not a git archive", dir);
if (is_repository_shallow())
die("attempt to fetch/clone from a shallow repository");
+ if (getenv("GIT_DEBUG_SEND_PACK"))
+ debug_fd = atoi(getenv("GIT_DEBUG_SEND_PACK"));
upload_pack();
return 0;
}
int i;
save_commit_buffer = 0;
- track_object_refs = 0;
for (i = 0; i < targets; i++) {
if (!write_ref || !write_ref[i])
#ifndef WALKER_H
#define WALKER_H
+#include "remote.h"
+
struct walker {
void *data;
int (*fetch_ref)(struct walker *, char *ref, unsigned char *sha1);
void walker_free(struct walker *walker);
-struct walker *get_http_walker(const char *url);
+struct walker *get_http_walker(const char *url, struct remote *remote);
#endif /* WALKER_H */
#include "diff.h"
#include "revision.h"
#include "diffcore.h"
+#include "quote.h"
int wt_status_relative_paths = 1;
int wt_status_use_color = -1;
color_fprintf_ln(s->fp, color(WT_STATUS_HEADER), "#");
}
-static char *quote_path(const char *in, int len,
- struct strbuf *out, const char *prefix)
-{
- if (len < 0)
- len = strlen(in);
-
- strbuf_grow(out, len);
- strbuf_setlen(out, 0);
- if (prefix) {
- int off = 0;
- while (prefix[off] && off < len && prefix[off] == in[off])
- if (prefix[off] == '/') {
- prefix += off + 1;
- in += off + 1;
- len -= off + 1;
- off = 0;
- } else
- off++;
-
- for (; *prefix; prefix++)
- if (*prefix == '/')
- strbuf_addstr(out, "../");
- }
-
- for ( ; len > 0; in++, len--) {
- int ch = *in;
-
- switch (ch) {
- case '\n':
- strbuf_addstr(out, "\\n");
- break;
- case '\r':
- strbuf_addstr(out, "\\r");
- break;
- default:
- strbuf_addch(out, ch);
- continue;
- }
- }
-
- if (!out->len)
- strbuf_addstr(out, "./");
-
- return out->buf;
-}
+#define quote_path quote_path_relative
static void wt_status_print_filepair(struct wt_status *s,
int t, struct diff_filepair *p)