git-ssh-upload
git-status
git-stripspace
+git-submodule
git-svn
git-svnimport
git-symbolic-ref
Aneesh Kumar K.V <aneesh.kumar@gmail.com>
Chris Shoemaker <c.shoemaker@cox.net>
+Dana L. How <danahow@gmail.com>
+Dana L. How <how@deathvalley.cswitch.com>
Daniel Barkalow <barkalow@iabervon.org>
David Kågedal <davidk@lysator.liu.se>
Fredrik Kuivinen <freku045@student.liu.se>
Jon Seymour <jon@blackcubes.dyndns.org>
Karl Hasselström <kha@treskal.com>
Kent Engstrom <kent@lysator.liu.se>
-Lars Doelle <lars.doelle@on-line.de>
Lars Doelle <lars.doelle@on-line ! de>
+Lars Doelle <lars.doelle@on-line.de>
Lukas Sandström <lukass@etek.chalmers.se>
Martin Langhoff <martin@catalyst.net.nz>
Michele Ballabio <barra_cuda@katamail.com>
Shawn O. Pearce <spearce@spearce.org>
Theodore Ts'o <tytso@mit.edu>
Tony Luck <tony.luck@intel.com>
-Uwe Kleine-König <zeisberg@informatik.uni-freiburg.de>
Uwe Kleine-König <Uwe_Zeisberger@digi.com>
-Uwe Kleine-König <uzeisberger@io.fsforth.de>
Uwe Kleine-König <ukleinek@informatik.uni-freiburg.de>
+Uwe Kleine-König <uzeisberger@io.fsforth.de>
+Uwe Kleine-König <zeisberg@informatik.uni-freiburg.de>
Ville Skyttä <scop@xemacs.org>
YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
anonymous <linux@horizon.com>
anonymous <linux@horizon.net>
-Dana L. How <how@deathvalley.cswitch.com>
$(filter-out $(addsuffix .txt, $(ARTICLES) $(SP_ARTICLES)), \
$(wildcard git-*.txt)) \
gitk.txt
-MAN5_TXT=gitattributes.txt
+MAN5_TXT=gitattributes.txt gitignore.txt
MAN7_TXT=git.txt
DOC_HTML=$(patsubst %.txt,%.html,$(MAN1_TXT) $(MAN5_TXT) $(MAN7_TXT))
%.html : %.txt
rm -f $@+ $@
$(ASCIIDOC) -b xhtml11 -d manpage -f asciidoc.conf \
- $(ASCIIDOC_EXTRA) -o - $< | \
- sed -e 's/@@GIT_VERSION@@/$(GIT_VERSION)/g' >$@+
+ $(ASCIIDOC_EXTRA) -agit_version=$(GIT_VERSION) -o $@+ $<
mv $@+ $@
%.1 %.5 %.7 : %.xml
%.xml : %.txt
rm -f $@+ $@
$(ASCIIDOC) -b docbook -d manpage -f asciidoc.conf \
- $(ASCIIDOC_EXTRA) -o - $< | \
- sed -e 's/@@GIT_VERSION@@/$(GIT_VERSION)/g' >$@+
+ $(ASCIIDOC_EXTRA) -agit_version=$(GIT_VERSION) -o $@+ $<
mv $@+ $@
user-manual.xml: user-manual.txt user-manual.conf
<refentrytitle>{mantitle}</refentrytitle>
<manvolnum>{manvolnum}</manvolnum>
<refmiscinfo class="source">Git</refmiscinfo>
-<refmiscinfo class="version">@@GIT_VERSION@@</refmiscinfo>
+<refmiscinfo class="version">{git_version}</refmiscinfo>
<refmiscinfo class="manual">Git Manual</refmiscinfo>
</refmeta>
<refnamediv>
git-ssh-upload synchingrepositories
git-status mainporcelain
git-stripspace purehelpers
+git-submodule mainporcelain
git-svn foreignscminterface
git-svnimport foreignscminterface
git-symbolic-ref plumbingmanipulators
core.excludeFile::
In addition to '.gitignore' (per-directory) and
'.git/info/exclude', git looks into this file for patterns
- of files which are not meant to be tracked.
+ of files which are not meant to be tracked. See
+ gitlink:gitignore[5].
alias.*::
Command aliases for the gitlink:git[1] command wrapper - e.g.
slowest. If not set, defaults to core.compression. If that is
not set, defaults to -1.
+pack.deltaCacheSize::
+ The maxium memory in bytes used for caching deltas in
+ gitlink:git-pack-objects[1].
+ A value of 0 means no limit. Defaults to 0.
+
+pack.deltaCacheLimit::
+ The maxium size of a delta, that is cached in
+ gitlink:git-pack-objects[1]. Defaults to 1000.
+
pull.octopus::
The default merge strategy to use when pulling multiple branches
at once.
[--3way] [--interactive] [--binary]
[--whitespace=<option>] [-C<n>] [-p<n>]
<mbox>|<Maildir>...
-
'git-am' [--skip | --resolved]
DESCRIPTION
lines, a POSIX regexp `value_regex` needs to be given. Only the
existing values that match the regexp are updated or unset. If
you want to handle the lines that do *not* match the regex, just
-prepend a single exclamation mark in front (see EXAMPLES).
+prepend a single exclamation mark in front (see also <<EXAMPLES>>).
The type specifier can be either '--int' or '--bool', which will make
'git-config' ensure that the variable(s) are of the given type and
. the section or key is invalid,
. you try to unset an option which does not exist,
. you try to unset/set an option for which multiple lines match, or
-. you use --global option without $HOME being properly set.
+. you use '--global' option without $HOME being properly set.
OPTIONS
Like --get-all, but interprets the name as a regular expression.
--global::
- Use global ~/.gitconfig file rather than the repository .git/config.
+ For writing options: write to global ~/.gitconfig file rather than
+ the repository .git/config.
++
+For reading options: read only from global ~/.gitconfig rather than
+from all available files.
++
+See also <<FILES>>.
--system::
- Use system-wide $(prefix)/etc/gitconfig rather than the repository
- .git/config.
+ For writing options: write to system-wide $(prefix)/etc/gitconfig
+ rather than the repository .git/config.
++
+For reading options: read only from system-wide $(prefix)/etc/gitconfig
+rather than from all available files.
++
+See also <<FILES>>.
--remove-section::
Remove the given section from the configuration file.
by 1024, 1048576, or 1073741824 prior to output.
+[[FILES]]
+FILES
+-----
+
+There are three files where git-config will search for configuration
+options:
+
+.git/config::
+ Repository specific configuration file. (The filename is
+ of course relative to the repository root, not the working
+ directory.)
+
+~/.gitconfig::
+ User-specific configuration file. Also called "global"
+ configuration file.
+
+$(prefix)/etc/gitconfig::
+ System-wide configuration file.
+
+If no further options are given, all reading options will read all of these
+files that are available. If the global or the system-wide configuration
+file are not available they will be ignored. If the repository configuration
+file is not available or readable, git-config will exit with a non-zero
+error code. However, in neither case will an error message be issued.
+
+All writing options will per default write to the repository specific
+configuration file. Note that this also affects options like '--replace-all'
+and '--unset'. *git-config will only ever change one file at a time*.
+
+You can override these rules either by command line options or by environment
+variables. The '--global' and the '--system' options will limit the file used
+to the global or system-wide file respectively. The GIT_CONFIG environment
+variable has a similar effect, but you can specify any filename you want.
+
+The GIT_CONFIG_LOCAL environment variable on the other hand only changes
+the name used instead of the repository configuration file. The global and
+the system-wide configuration files will still be read. (For writing options
+this will obviously result in the same behavior as using GIT_CONFIG.)
+
+
ENVIRONMENT
-----------
GIT_CONFIG::
Take the configuration from the given file instead of .git/config.
- Using the "--global" option forces this to ~/.gitconfig.
+ Using the "--global" option forces this to ~/.gitconfig. Using the
+ "--system" option forces this to $(prefix)/etc/gitconfig.
GIT_CONFIG_LOCAL::
- Currently the same as $GIT_CONFIG; when Git will support global
- configuration files, this will cause it to take the configuration
- from the global configuration file in addition to the given file.
+ Take the configuration from the given file instead if .git/config.
+ Still read the global and the system-wide configuration files, though.
+See also <<FILES>>.
-EXAMPLE
--------
+
+[[EXAMPLES]]
+EXAMPLES
+--------
Given a .git/config like this:
SYNOPSIS
--------
-'git-cvsexportcommit' [-h] [-v] [-c] [-P] [-p] [-a] [-d cvsroot] [-f] [-m msgprefix] [PARENTCOMMIT] COMMITID
+'git-cvsexportcommit' [-h] [-u] [-v] [-c] [-P] [-p] [-a] [-d cvsroot] [-f] [-m msgprefix] [PARENTCOMMIT] COMMITID
DESCRIPTION
Prepend the commit message with the provided prefix.
Useful for patch series and the like.
+-u::
+ Update affected files from cvs repository before attempting export.
+
-v::
Verbose.
cvspserver stream tcp nowait nobody git-cvsserver pserver
------
-Note: In some cases, you need to pass the 'pserver' argument twice for
-git-cvsserver to see it. So the line would look like
+Note: Some inetd servers let you specify the name of the executable
+independently of the value of argv[0] (i.e. the name the program assumes
+it was executed with). In this case the correct line in /etc/inetd.conf
+looks like
------
- cvspserver stream tcp nowait nobody git-cvsserver pserver pserver
+ cvspserver stream tcp nowait nobody /usr/bin/git-cvsserver git-cvsserver pserver
------
No special setup is needed for SSH access, other than having GIT tools
in the PATH. If you have clients that do not accept the CVS_SERVER
-env variable, you can rename git-cvsserver to cvs.
+environment variable, you can rename git-cvsserver to cvs.
+
+Note: Newer cvs versions (>= 1.12.11) also support specifying
+CVS_SERVER directly in CVSROOT like
+
+------
+cvs -d ":ext;CVS_SERVER=git-cvsserver:user@server/path/repo.git" co <HEAD_name>
+------
+This has the advantage that it will be saved in your 'CVS/Root' files and
+you don't need to worry about always setting the correct environment
+variable.
--
2. For each repo that you want accessible from CVS you need to edit config in
the repo and add the following section.
SSH, the users of course also need write access to the git repository itself.
[[configaccessmethod]]
-All configuration variables can also be overriden for a specific method of
+All configuration variables can also be overridden for a specific method of
access. Valid method names are "ext" (for SSH access) and "pserver". The
following example configuration would disable pserver access while still
allowing access over SSH.
git-cvsserver uses one database per git head (i.e. CVS module) to
store information about the repository for faster access. The
-database doesn't contain any persitent data and can be completly
+database doesn't contain any persistent data and can be completely
regenerated from the git repository at any time. The database
needs to be updated (i.e. written to) after every commit.
--aggressive::
Usually 'git-gc' runs very quickly while providing good disk
- space utilization and performance. This option will cause
- git-gc to more aggressive optimize the repository at the expense
+ space utilization and performance. This option will cause
+ git-gc to more aggressively optimize the repository at the expense
of taking much more time. The effects of this optimization are
- persistent, so this option only needs to be sporadically; every
+ persistent, so this option only needs to be used occasionally; every
few hundred changesets or so.
Configuration
detailed information on unmerged paths.
For an unmerged path, instead of recording a single mode/SHA1 pair,
-the dircache records up to three such pairs; one from tree O in stage
+the index records up to three such pairs; one from tree O in stage
1, A in stage 2, and B in stage 3. This information can be used by
the user (or the porcelain) to see what should eventually be recorded at the
path. (see git-read-tree for more information on state)
'git-ls-files' can use a list of "exclude patterns" when
traversing the directory tree and finding files to show when the
-flags --others or --ignored are specified.
+flags --others or --ignored are specified. gitlink:gitignore[5]
+specifies the format of exclude patterns.
-These exclude patterns come from these places:
+These exclude patterns come from these places, in order:
- 1. command line flag --exclude=<pattern> specifies a single
- pattern.
+ 1. The command line flag --exclude=<pattern> specifies a
+ single pattern. Patterns are ordered in the same order
+ they appear in the command line.
- 2. command line flag --exclude-from=<file> specifies a list of
- patterns stored in a file.
+ 2. The command line flag --exclude-from=<file> specifies a
+ file containing a list of patterns. Patterns are ordered
+ in the same order they appear in the file.
3. command line flag --exclude-per-directory=<name> specifies
a name of the file in each directory 'git-ls-files'
- examines, and if exists, its contents are used as an
- additional list of patterns.
-
-An exclude pattern file used by (2) and (3) contains one pattern
-per line. A line that starts with a '#' can be used as comment
-for readability.
-
-There are three lists of patterns that are in effect at a given
-time. They are built and ordered in the following way:
-
- * --exclude=<pattern> from the command line; patterns are
- ordered in the same order as they appear on the command line.
-
- * lines read from --exclude-from=<file>; patterns are ordered
- in the same order as they appear in the file.
-
- * When --exclude-per-directory=<name> is specified, upon
- entering a directory that has such a file, its contents are
- appended at the end of the current "list of patterns". They
- are popped off when leaving the directory.
-
-Each pattern in the pattern list specifies "a match pattern" and
-optionally the fate; either a file that matches the pattern is
-considered excluded or included. A filename is matched against
-the patterns in the three lists; the --exclude-from list is
-checked first, then the --exclude-per-directory list, and then
-finally the --exclude list. The last match determines its fate.
-If there is no match in the three lists, the fate is "included".
+ examines, normally `.gitignore`. Files in deeper
+ directories take precedence. Patterns are ordered in the
+ same order they appear in the files.
A pattern specified on the command line with --exclude or read
from the file specified with --exclude-from is relative to the
by --exclude-per-directory is relative to the directory that the
pattern file appears in.
-An exclude pattern is of the following format:
-
- - an optional prefix '!' which means that the fate this pattern
- specifies is "include", not the usual "exclude"; the
- remainder of the pattern string is interpreted according to
- the following rules.
-
- - if it does not contain a slash '/', it is a shell glob
- pattern and used to match against the filename without
- leading directories.
-
- - otherwise, it is a shell glob pattern, suitable for
- consumption by fnmatch(3) with FNM_PATHNAME flag. I.e. a
- slash in the pattern must match a slash in the pathname.
- "Documentation/\*.html" matches "Documentation/git.html" but
- not "ppc/ppc.html". As a natural exception, "/*.c" matches
- "cat-file.c" but not "mozilla-sha1/sha1.c".
-
-An example:
-
---------------------------------------------------------------
- $ cat .git/info/exclude
- # ignore objects and archives, anywhere in the tree.
- *.[oa]
- $ cat Documentation/.gitignore
- # ignore generated html files,
- *.html
- # except foo.html which is maintained by hand
- !foo.html
- $ git-ls-files --ignored \
- --exclude='Documentation/*.[0-9]' \
- --exclude-from=.git/info/exclude \
- --exclude-per-directory=.gitignore
---------------------------------------------------------------
-
-Another example:
-
---------------------------------------------------------------
- $ cat .gitignore
- vmlinux*
- $ ls arch/foo/kernel/vm*
- arch/foo/kernel/vmlinux.lds.S
- $ echo '!/vmlinux*' >arch/foo/kernel/.gitignore
---------------------------------------------------------------
-
-The second .gitignore keeps `arch/foo/kernel/vmlinux.lds.S` file
-from getting ignored.
-
-
See Also
--------
-gitlink:git-read-tree[1]
+gitlink:git-read-tree[1], gitlink:gitignore[5]
Author
Documentation
--------------
-Documentation by David Greaves, Junio C Hamano and the git-list <git@vger.kernel.org>.
+Documentation by David Greaves, Junio C Hamano, Josh Triplett, and the git-list <git@vger.kernel.org>.
GIT
---
SYNOPSIS
--------
[verse]
-'git-merge' [-n] [--no-commit] [--squash] [-s <strategy>]...
+'git-merge' [-n] [--summary] [--no-commit] [--squash] [-s <strategy>]...
[-m <msg>] <remote> <remote>...
DESCRIPTION
times to get to the necessary object.
The default value for --window is 10 and --depth is 50.
+--max-pack-size=<n>::
+ Maximum size of each output packfile, expressed in MiB.
+ If specified, multiple packfiles may be created.
+ The default is unlimited.
+
--incremental::
This flag causes an object already in a pack ignored
even if it appears in the standard input.
See Also
--------
-gitlink:git-write-tree[1]; gitlink:git-ls-files[1]
+gitlink:git-write-tree[1]; gitlink:git-ls-files[1];
+gitlink:gitignore[5]
Author
to be applied that many times to get to the necessary object.
The default value for --window is 10 and --depth is 50.
+--max-pack-size=<n>::
+ Maximum size of each output packfile, expressed in MiB.
+ If specified, multiple packfiles may be created.
+ The default is unlimited.
+
Configuration
-------------
compatibility) and `color.status.<slot>` configuration variables
to colorize its output.
-As for gitlink:git-add[1], the configuration variable
-'core.excludesfile' can indicate a path to a file containing patterns
-of file names to exclude, in addition to patterns given in
-'info/exclude' and '.gitignore'.
-
+See Also
+--------
+gitlink:gitignore[5]
Author
------
--- /dev/null
+git-submodule(1)
+================
+
+NAME
+----
+git-submodule - Initialize, update or inspect submodules
+
+
+SYNOPSIS
+--------
+'git-submodule' [--quiet] [--cached] [status|init|update] [--] [<path>...]
+
+
+COMMANDS
+--------
+status::
+ Show the status of the submodules. This will print the SHA-1 of the
+ currently checked out commit for each submodule, along with the
+ submodule path and the output of gitlink:git-describe[1] for the
+ SHA-1. Each SHA-1 will be prefixed with `-` if the submodule is not
+ initialized and `+` if the currently checked out submodule commit
+ does not match the SHA-1 found in the index of the containing
+ repository. This command is the default command for git-submodule.
+
+init::
+ Initialize the submodules, i.e. clone the git repositories specified
+ in the .gitmodules file and checkout the submodule commits specified
+ in the index of the containing repository. This will make the
+ submodules HEAD be detached.
+
+update::
+ Update the initialized submodules, i.e. checkout the submodule commits
+ specified in the index of the containing repository. This will make
+ the submodules HEAD be detached.
+
+
+OPTIONS
+-------
+-q, --quiet::
+ Only print error messages.
+
+--cached::
+ Display the SHA-1 stored in the index, not the SHA-1 of the currently
+ checked out submodule commit. This option is only valid for the
+ status command.
+
+<path>::
+ Path to submodule(s). When specified this will restrict the command
+ to only operate on the submodules found at the specified paths.
+
+FILES
+-----
+When cloning submodules, a .gitmodules file in the top-level directory
+of the containing repository is used to find the url of each submodule.
+This file should be formatted in the same way as $GIR_DIR/config. The key
+to each submodule url is "module.$path.url".
+
+
+AUTHOR
+------
+Written by Lars Hjemli <hjemli@gmail.com>
+
+GIT
+---
+Part of the gitlink:git[7] suite
--- /dev/null
+gitignore(5)
+============
+
+NAME
+----
+gitignore - Specifies intentionally untracked files to ignore
+
+SYNOPSIS
+--------
+$GIT_DIR/info/exclude, .gitignore
+
+DESCRIPTION
+-----------
+
+A `gitignore` file specifies intentionally untracked files that
+git should ignore. Each line in a `gitignore` file specifies a
+pattern.
+
+When deciding whether to ignore a path, git normally checks
+`gitignore` patterns from multiple sources, with the following
+order of precedence:
+
+ * Patterns read from the file specified by the configuration
+ variable 'core.excludesfile'.
+
+ * Patterns read from `$GIT_DIR/info/exclude`.
+
+ * Patterns read from a `.gitignore` file in the same directory
+ as the path, or in any parent directory, ordered from the
+ deepest such file to a file in the root of the repository.
+ These patterns match relative to the location of the
+ `.gitignore` file. A project normally includes such
+ `.gitignore` files in its repository, containing patterns for
+ files generated as part of the project build.
+
+The underlying git plumbing tools, such as
+gitlink:git-ls-files[1] and gitlink:git-read-tree[1], read
+`gitignore` patterns specified by command-line options, or from
+files specified by command-line options. Higher-level git
+tools, such as gitlink:git-status[1] and gitlink:git-add[1],
+use patterns from the sources specified above.
+
+Patterns have the following format:
+
+ - A blank line matches no files, so it can serve as a separator
+ for readability.
+
+ - A line starting with # serves as a comment.
+
+ - An optional prefix '!' which negates the pattern; any
+ matching file excluded by a previous pattern will become
+ included again.
+
+ - If the pattern does not contain a slash '/', git treats it as
+ a shell glob pattern and checks for a match against the
+ pathname without leading directories.
+
+ - Otherwise, git treats the pattern as a shell glob suitable
+ for consumption by fnmatch(3) with the FNM_PATHNAME flag:
+ wildcards in the pattern will not match a / in the pathname.
+ For example, "Documentation/\*.html" matches
+ "Documentation/git.html" but not
+ "Documentation/ppc/ppc.html". A leading slash matches the
+ beginning of the pathname; for example, "/*.c" matches
+ "cat-file.c" but not "mozilla-sha1/sha1.c".
+
+An example:
+
+--------------------------------------------------------------
+ $ git-status
+ [...]
+ # Untracked files:
+ [...]
+ # Documentation/foo.html
+ # Documentation/gitignore.html
+ # file.o
+ # lib.a
+ # src/internal.o
+ [...]
+ $ cat .git/info/exclude
+ # ignore objects and archives, anywhere in the tree.
+ *.[oa]
+ $ cat Documentation/.gitignore
+ # ignore generated html files,
+ *.html
+ # except foo.html which is maintained by hand
+ !foo.html
+ $ git-status
+ [...]
+ # Untracked files:
+ [...]
+ # Documentation/foo.html
+ [...]
+--------------------------------------------------------------
+
+Another example:
+
+--------------------------------------------------------------
+ $ cat .gitignore
+ vmlinux*
+ $ ls arch/foo/kernel/vm*
+ arch/foo/kernel/vmlinux.lds.S
+ $ echo '!/vmlinux*' >arch/foo/kernel/.gitignore
+--------------------------------------------------------------
+
+The second .gitignore prevents git from ignoring
+`arch/foo/kernel/vmlinux.lds.S`.
+
+Documentation
+-------------
+Documentation by David Greaves, Junio C Hamano, Josh Triplett,
+Frank Lichtenheld, and the git-list <git@vger.kernel.org>.
+
+GIT
+---
+Part of the gitlink:git[7] suite
A bare repository is normally an appropriately
named <<def_directory,directory>> with a `.git` suffix that does not
have a locally checked-out copy of any of the files under
- <<def_revision,revision>> control. That is, all of the `git`
+ revision control. That is, all of the `git`
administrative and control files that would normally be present in the
hidden `.git` sub-directory are directly present in the
`repository.git` directory instead,
[[def_chain]]chain::
A list of objects, where each <<def_object,object>> in the list contains
a reference to its successor (for example, the successor of a
- <<def_commit,commit>> could be one of its parents).
+ <<def_commit,commit>> could be one of its <<def_parent,parents>>).
[[def_changeset]]changeset::
BitKeeper/cvsps speak for "<<def_commit,commit>>". Since git does not
[[def_commit_object]]commit object::
An <<def_object,object>> which contains the information about a
- particular <<def_revision,revision>>, such as parents, committer,
+ particular <<def_revision,revision>>, such as <<def_parent,parents>>, committer,
author, date and the <<def_tree_object,tree object>> which corresponds
to the top <<def_directory,directory>> of the stored
- <<def_revision,revision>>.
+ revision.
[[def_core_git]]core git::
Fundamental data structures and utilities of git. Exposes only limited
[[def_detached_HEAD]]detached HEAD::
Normally the <<def_HEAD,HEAD>> stores the name of a
- <<def_branch,branch>>. However, git also allows you to check
- out an arbitrary commit that isn't necessarily the tip of any
+ <<def_branch,branch>>. However, git also allows you to <<def_checkout,check out>>
+ an arbitrary <<def_commit,commit>> that isn't necessarily the tip of any
particular branch. In this case HEAD is said to be "detached".
[[def_dircache]]dircache::
- You are *waaaaay* behind.
+ You are *waaaaay* behind. See <<def_index,index>>.
[[def_directory]]directory::
The list you get with "ls" :-)
[[def_dirty]]dirty::
A <<def_working_tree,working tree>> is said to be "dirty" if
- it contains modifications which have not been committed to the current
+ it contains modifications which have not been <<def_commit,committed>> to the current
<<def_branch,branch>>.
[[def_ent]]ent::
`http://en.wikipedia.org/wiki/Ent_(Middle-earth)` for an in-depth
explanation. Avoid this term, not to confuse people.
+[[def_evil_merge]]evil merge::
+ An evil merge is a <<def_merge,merge>> that introduces changes that
+ do not appear in any <<def_parent,parent>>.
+
[[def_fast_forward]]fast forward::
A fast-forward is a special type of <<def_merge,merge>> where you have a
<<def_revision,revision>> and you are "merging" another
[[def_grafts]]grafts::
Grafts enables two otherwise different lines of development to be joined
together by recording fake ancestry information for commits. This way
- you can make git pretend the set of parents a <<def_commit,commit>> has
+ you can make git pretend the set of <<def_parent,parents>> a <<def_commit,commit>> has
is different from what was recorded when the commit was
created. Configured via the `.git/info/grafts` file.
In git's context, synonym to <<def_object_name,object name>>.
[[def_head]]head::
- A named reference to the <<def_commit,commit>> at the tip of a
+ A <<def_ref,named reference>> to the <<def_commit,commit>> at the tip of a
<<def_branch,branch>>. Heads are stored in
`$GIT_DIR/refs/heads/`, except when using packed refs. (See
gitlink:git-pack-refs[1].)
[[def_HEAD]]HEAD::
- The current branch. In more detail: Your <<def_working_tree,
+ The current <<def_branch,branch>>. In more detail: Your <<def_working_tree,
working tree>> is normally derived from the state of the tree
referred to by HEAD. HEAD is a reference to one of the
<<def_head,heads>> in your repository, except when using a
checking. Typically, the hooks allow for a command to be pre-verified
and potentially aborted, and allow for a post-notification after the
operation is done. The hook scripts are found in the
- `$GIT_DIR/hooks/` <<def_directory,directory>>, and are enabled by simply
+ `$GIT_DIR/hooks/` directory, and are enabled by simply
making them executable.
[[def_index]]index::
A collection of files with stat information, whose contents are stored
- as objects. The index is a stored version of your working
- <<def_tree,tree>>. Truth be told, it can also contain a second, and even
- a third version of a <<def_working_tree,working tree>>, which are used
- when merging.
+ as objects. The index is a stored version of your
+ <<def_working_tree,working tree>>. Truth be told, it can also contain a second, and even
+ a third version of a working tree, which are used
+ when <<def_merge,merging>>.
[[def_index_entry]]index entry::
The information regarding a particular file, stored in the
describing the type of an <<def_object,object>>.
[[def_octopus]]octopus::
- To <<def_merge,merge>> more than two branches. Also denotes an
+ To <<def_merge,merge>> more than two <<def_branch,branches>>. Also denotes an
intelligent predator.
[[def_origin]]origin::
The default upstream <<def_repository,repository>>. Most projects have
at least one upstream project which they track. By default
'origin' is used for that purpose. New upstream updates
- will be fetched into remote tracking branches named
+ will be fetched into remote <<def_tracking_branch,tracking branches>> named
origin/name-of-upstream-branch, which you can see using
- "git <<def_branch,branch>> -r".
+ "`git branch -r`".
[[def_pack]]pack::
A set of objects which have been compressed into one file (to save space
`$GIT_DIR/refs/`.
[[def_refspec]]refspec::
- A <<def_refspec,refspec>> is used by <<def_fetch,fetch>> and
+ A "refspec" is used by <<def_fetch,fetch>> and
<<def_push,push>> to describe the mapping between remote
<<def_ref,ref>> and local ref. They are combined with a colon in
the format <src>:<dst>, preceded by an optional plus sign, +.
gitlink:git-push[1]
[[def_repository]]repository::
- A collection of refs together with an
+ A collection of <<def_ref,refs>> together with an
<<def_object_database,object database>> containing all objects
which are <<def_reachable,reachable>> from the refs, possibly
- accompanied by meta data from one or more porcelains. A
- repository can share an object database with other repositories.
+ accompanied by meta data from one or more <<def_porcelain,porcelains>>. A
+ repository can share an object database with other repositories
+ via <<def_alternate_object_database,alternates mechanism>>.
[[def_resolve]]resolve::
The action of fixing up manually what a failed automatic
Synonym for <<def_object_name,object name>>.
[[def_shallow_repository]]shallow repository::
- A shallow repository has an incomplete
- history some of whose commits have parents cauterized away (in other
+ A shallow <<def_repository,repository>> has an incomplete
+ history some of whose <<def_commit,commits>> have <<def_parent,parents>> cauterized away (in other
words, git is told to pretend that these commits do not have the
parents, even though they are recorded in the <<def_commit_object,commit
object>>). This is sometimes useful when you are interested only in the
command.
[[def_tag]]tag::
- A <<def_ref,ref>> pointing to a tag or
+ A <<def_ref,ref>> pointing to a <<def_tag_object,tag>> or
<<def_commit_object,commit object>>. In contrast to a <<def_head,head>>,
a tag is not changed by a <<def_commit,commit>>. Tags (not
<<def_tag_object,tag objects>>) are stored in `$GIT_DIR/refs/tags/`. A
An <<def_object,object>> containing a <<def_ref,ref>> pointing to
another object, which can contain a message just like a
<<def_commit_object,commit object>>. It can also contain a (PGP)
- signature, in which case it is called a "signed <<def_tag_object,tag
- object>>".
+ signature, in which case it is called a "signed tag object".
[[def_topic_branch]]topic branch::
A regular git <<def_branch,branch>> that is used by a developer to
[[def_tree]]tree::
Either a <<def_working_tree,working tree>>, or a <<def_tree_object,tree
- object>> together with the dependent blob and tree objects
+ object>> together with the dependent <<def_blob_object,blob>> and tree objects
(i.e. a stored representation of a working tree).
[[def_tree_object]]tree object::
+--summary::
+ Show a diffstat at the end of the merge. The diffstat is also
+ controlled by the configuration option merge.diffstat.
+
-n, \--no-summary::
Do not show diffstat at the end of the merge.
exclude pattern list. `.gitignore` is the per-directory
ignore file. `git status`, `git add`, `git rm` and `git
clean` look at it but the core git commands do not look
- at it. See also: gitlink:git-ls-files[1] `--exclude-from`
- and `--exclude-per-directory`.
+ at it. See also: gitlink:gitignore[5].
remotes::
Stores shorthands to be used to give URL and default
commits reachable from some head but not from any tag in the repository:
-------------------------------------------------
-$ gitk ($ git show-ref --heads ) --not $( git show-ref --tags )
+$ gitk $( git show-ref --heads ) --not $( git show-ref --tags )
-------------------------------------------------
(See gitlink:git-rev-parse[1] for explanations of commit-selecting
Git therefore provides "exclude patterns" for telling git which files to
actively ignore. Exclude patterns are thoroughly explained in the
-"Exclude Patterns" section of the gitlink:git-ls-files[1] manual page,
-but the heart of the concept is simply a list of files which git should
-ignore. Entries in the list may contain globs to specify multiple files,
-or may be prefixed by "`!`" to explicitly include (un-ignore) a previously
-excluded (ignored) file (i.e. later exclude patterns override earlier ones).
-The following example should illustrate such patterns:
+gitlink:gitignore[5] manual page, but the heart of the concept is simply
+a list of files which git should ignore. Entries in the list may contain
+globs to specify multiple files, or may be prefixed by "`!`" to
+explicitly include (un-ignore) a previously excluded (ignored) file
+(i.e. later exclude patterns override earlier ones). The following
+example should illustrate such patterns:
-------------------------------------------------
# Lines starting with '#' are considered comments.
git-am.sh \
git-merge.sh git-merge-stupid.sh git-merge-octopus.sh \
git-merge-resolve.sh git-merge-ours.sh \
- git-lost-found.sh git-quiltimport.sh
+ git-lost-found.sh git-quiltimport.sh git-submodule.sh
SCRIPT_PERL = \
git-add--interactive.perl \
# ... and all the rest that could be moved out of bindir to gitexecdir
PROGRAMS = \
- git-convert-objects$X git-fetch-pack$X git-fsck$X \
+ git-convert-objects$X git-fetch-pack$X \
git-hash-object$X git-index-pack$X git-local-fetch$X \
git-fast-import$X \
git-merge-base$X \
git-show-index$X git-ssh-fetch$X \
git-ssh-upload$X git-unpack-file$X \
git-update-server-info$X \
- git-upload-pack$X git-verify-pack$X \
+ git-upload-pack$X \
git-pack-redundant$X git-var$X \
git-merge-tree$X git-imap-send$X \
git-merge-recursive$X \
diff.h object.h pack.h pkt-line.h quote.h refs.h list-objects.h sideband.h \
run-command.h strbuf.h tag.h tree.h git-compat-util.h revision.h \
tree-walk.h log-tree.h dir.h path-list.h unpack-trees.h builtin.h \
- utf8.h reflog-walk.h patch-ids.h attr.h decorate.h progress.h mailmap.h
+ utf8.h reflog-walk.h patch-ids.h attr.h decorate.h progress.h \
+ mailmap.h remote.h
DIFF_OBJS = \
diff.o diff-lib.o diffcore-break.o diffcore-order.o \
write_or_die.o trace.o list-objects.o grep.o match-trees.o \
alloc.o merge-file.o path-list.o help.o unpack-trees.o $(DIFF_OBJS) \
color.o wt-status.o archive-zip.o archive-tar.o shallow.o utf8.o \
- convert.o attr.o decorate.o progress.o mailmap.o symlinks.o
+ convert.o attr.o decorate.o progress.o mailmap.o symlinks.o remote.o
BUILTIN_OBJS = \
builtin-add.o \
### Testing rules
-TEST_PROGRAMS = test-chmtime$X test-genrandom$X
+TEST_PROGRAMS = test-chmtime$X test-genrandom$X test-date$X test-delta$X test-sha1$X test-match-trees$X
all:: $(TEST_PROGRAMS)
test: all
$(MAKE) -C t/ all
-test-date$X: test-date.c date.o ctype.o
- $(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) test-date.c date.o ctype.o
+test-date$X: date.o ctype.o
-test-delta$X: test-delta.o diff-delta.o patch-delta.o $(GITLIBS)
- $(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) $(LIBS)
+test-delta$X: diff-delta.o patch-delta.o
-test-dump-cache-tree$X: dump-cache-tree.o $(GITLIBS)
- $(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) $(LIBS)
-
-test-sha1$X: test-sha1.o $(GITLIBS)
- $(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) $(LIBS)
-
-test-match-trees$X: test-match-trees.o $(GITLIBS)
- $(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) $(LIBS)
-
-test-chmtime$X: test-chmtime.c
- $(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $<
-
-test-genrandom$X: test-genrandom.c
- $(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $<
+test-%$X: test-%.o $(GITLIBS)
+ $(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) $(LIBS)
check-sha1:: test-sha1$X
./test-sha1.sh
*/
if (0x03030303 < acc ||
0xffffffff - de < (acc *= 85))
- error("invalid base85 sequence %.5s", buffer-5);
+ return error("invalid base85 sequence %.5s", buffer-5);
acc += de;
say1(" %08x", acc);
for (p = packed_git; p; p = p->next) {
if (!p->pack_local)
continue;
+ if (open_pack_index(p))
+ continue;
packed += p->num_objects;
num_pack++;
}
verify_pack(p, 0);
for (p = packed_git; p; p = p->next) {
- uint32_t i, num = p->num_objects;
+ uint32_t i, num;
+ if (open_pack_index(p))
+ continue;
+ num = p->num_objects;
for (i = 0; i < num; i++)
fsck_sha1(nth_packed_object_sha1(p, i));
}
error("cannot split patches from stdin");
return 1;
}
- num += ret;
+ num += (ret - nr);
+ nr = ret;
continue;
}
error("cannot split patches from %s", arg);
return 1;
}
- num += ret;
+ num += (ret - nr);
+ nr = ret;
}
printf("%d\n", num);
#include "builtin.h"
#include "cache.h"
+#include "attr.h"
#include "object.h"
#include "blob.h"
#include "commit.h"
#include "progress.h"
static const char pack_usage[] = "\
-git-pack-objects [{ -q | --progress | --all-progress }] \n\
+git-pack-objects [{ -q | --progress | --all-progress }] [--max-pack-size=N] \n\
[--local] [--incremental] [--window=N] [--depth=N] \n\
[--no-reuse-delta] [--no-reuse-object] [--delta-base-offset] \n\
[--non-empty] [--revs [--unpacked | --all]*] [--reflog] \n\
struct object_entry *delta_sibling; /* other deltified objects who
* uses the same base as me
*/
+ void *delta_data; /* cached delta (uncompressed) */
unsigned long delta_size; /* delta data size (uncompressed) */
enum object_type type;
enum object_type in_pack_type; /* could be delta */
unsigned char in_pack_header_size;
unsigned char preferred_base; /* we do not pack this, but is available
- * to be used as the base objectto delta
+ * to be used as the base object to delta
* objects against.
*/
+ unsigned char no_try_delta;
};
/*
* nice "minimum seek" order.
*/
static struct object_entry *objects;
-static uint32_t nr_objects, nr_alloc, nr_result;
+static struct object_entry **written_list;
+static uint32_t nr_objects, nr_alloc, nr_result, nr_written;
static int non_empty;
static int no_reuse_delta, no_reuse_object;
static int allow_ofs_delta;
static const char *pack_tmp_name, *idx_tmp_name;
static char tmpname[PATH_MAX];
+static const char *base_name;
static unsigned char pack_file_sha1[20];
static int progress = 1;
static int window = 10;
+static uint32_t pack_size_limit;
static int depth = 50;
static int pack_to_stdout;
static int num_preferred_base;
static int pack_compression_level = Z_DEFAULT_COMPRESSION;
static int pack_compression_seen;
+static unsigned long delta_cache_size = 0;
+static unsigned long max_delta_cache_size = 0;
+static unsigned long cache_max_small_delta_size = 1000;
+
/*
* The object names in objects array are hashed with this hashtable,
* to help looking up the entry by object name.
}
static unsigned long write_object(struct sha1file *f,
- struct object_entry *entry)
+ struct object_entry *entry,
+ off_t write_offset)
{
unsigned long size;
enum object_type type;
void *buf;
unsigned char header[10];
+ unsigned char dheader[10];
unsigned hdrlen;
off_t datalen;
enum object_type obj_type;
int to_reuse = 0;
+ /* write limit if limited packsize and not first object */
+ unsigned long limit = pack_size_limit && nr_written ?
+ pack_size_limit - write_offset : 0;
+ /* no if no delta */
+ int usable_delta = !entry->delta ? 0 :
+ /* yes if unlimited packfile */
+ !pack_size_limit ? 1 :
+ /* no if base written to previous pack */
+ entry->delta->offset == (off_t)-1 ? 0 :
+ /* otherwise double-check written to this
+ * pack, like we do below
+ */
+ entry->delta->offset ? 1 : 0;
if (!pack_to_stdout)
crc32_begin(f);
else if (!entry->in_pack)
to_reuse = 0; /* can't reuse what we don't have */
else if (obj_type == OBJ_REF_DELTA || obj_type == OBJ_OFS_DELTA)
- to_reuse = 1; /* check_object() decided it for us */
+ /* check_object() decided it for us ... */
+ to_reuse = usable_delta;
+ /* ... but pack split may override that */
else if (obj_type != entry->in_pack_type)
to_reuse = 0; /* pack has delta which is unusable */
else if (entry->delta)
*/
if (!to_reuse) {
- buf = read_sha1_file(entry->sha1, &type, &size);
- if (!buf)
- die("unable to read %s", sha1_to_hex(entry->sha1));
- if (size != entry->size)
- die("object %s size inconsistency (%lu vs %lu)",
- sha1_to_hex(entry->sha1), size, entry->size);
- if (entry->delta) {
+ z_stream stream;
+ unsigned long maxsize;
+ void *out;
+ if (!usable_delta) {
+ buf = read_sha1_file(entry->sha1, &obj_type, &size);
+ if (!buf)
+ die("unable to read %s", sha1_to_hex(entry->sha1));
+ } else if (entry->delta_data) {
+ size = entry->delta_size;
+ buf = entry->delta_data;
+ entry->delta_data = NULL;
+ obj_type = (allow_ofs_delta && entry->delta->offset) ?
+ OBJ_OFS_DELTA : OBJ_REF_DELTA;
+ } else {
+ buf = read_sha1_file(entry->sha1, &type, &size);
+ if (!buf)
+ die("unable to read %s", sha1_to_hex(entry->sha1));
buf = delta_against(buf, size, entry);
size = entry->delta_size;
obj_type = (allow_ofs_delta && entry->delta->offset) ?
OBJ_OFS_DELTA : OBJ_REF_DELTA;
}
+ /* compress the data to store and put compressed length in datalen */
+ memset(&stream, 0, sizeof(stream));
+ deflateInit(&stream, pack_compression_level);
+ maxsize = deflateBound(&stream, size);
+ out = xmalloc(maxsize);
+ /* Compress it */
+ stream.next_in = buf;
+ stream.avail_in = size;
+ stream.next_out = out;
+ stream.avail_out = maxsize;
+ while (deflate(&stream, Z_FINISH) == Z_OK)
+ /* nothing */;
+ deflateEnd(&stream);
+ datalen = stream.total_out;
+ deflateEnd(&stream);
/*
* The object header is a byte of 'type' followed by zero or
* more bytes of length.
*/
hdrlen = encode_header(obj_type, size, header);
- sha1write(f, header, hdrlen);
if (obj_type == OBJ_OFS_DELTA) {
/*
* base from this object's position in the pack.
*/
off_t ofs = entry->offset - entry->delta->offset;
- unsigned pos = sizeof(header) - 1;
- header[pos] = ofs & 127;
+ unsigned pos = sizeof(dheader) - 1;
+ dheader[pos] = ofs & 127;
while (ofs >>= 7)
- header[--pos] = 128 | (--ofs & 127);
- sha1write(f, header + pos, sizeof(header) - pos);
- hdrlen += sizeof(header) - pos;
+ dheader[--pos] = 128 | (--ofs & 127);
+ if (limit && hdrlen + sizeof(dheader) - pos + datalen + 20 >= limit) {
+ free(out);
+ free(buf);
+ return 0;
+ }
+ sha1write(f, header, hdrlen);
+ sha1write(f, dheader + pos, sizeof(dheader) - pos);
+ hdrlen += sizeof(dheader) - pos;
} else if (obj_type == OBJ_REF_DELTA) {
/*
* Deltas with a base reference contain
* an additional 20 bytes for the base sha1.
*/
+ if (limit && hdrlen + 20 + datalen + 20 >= limit) {
+ free(out);
+ free(buf);
+ return 0;
+ }
+ sha1write(f, header, hdrlen);
sha1write(f, entry->delta->sha1, 20);
hdrlen += 20;
+ } else {
+ if (limit && hdrlen + datalen + 20 >= limit) {
+ free(out);
+ free(buf);
+ return 0;
+ }
+ sha1write(f, header, hdrlen);
}
- datalen = sha1write_compressed(f, buf, size, pack_compression_level);
+ sha1write(f, out, datalen);
+ free(out);
free(buf);
}
else {
reused_delta++;
}
hdrlen = encode_header(obj_type, entry->size, header);
- sha1write(f, header, hdrlen);
- if (obj_type == OBJ_OFS_DELTA) {
- off_t ofs = entry->offset - entry->delta->offset;
- unsigned pos = sizeof(header) - 1;
- header[pos] = ofs & 127;
- while (ofs >>= 7)
- header[--pos] = 128 | (--ofs & 127);
- sha1write(f, header + pos, sizeof(header) - pos);
- hdrlen += sizeof(header) - pos;
- } else if (obj_type == OBJ_REF_DELTA) {
- sha1write(f, entry->delta->sha1, 20);
- hdrlen += 20;
- }
-
offset = entry->in_pack_offset;
revidx = find_packed_object(p, offset);
datalen = revidx[1].offset - offset;
die("bad packed object CRC for %s", sha1_to_hex(entry->sha1));
offset += entry->in_pack_header_size;
datalen -= entry->in_pack_header_size;
+ if (obj_type == OBJ_OFS_DELTA) {
+ off_t ofs = entry->offset - entry->delta->offset;
+ unsigned pos = sizeof(dheader) - 1;
+ dheader[pos] = ofs & 127;
+ while (ofs >>= 7)
+ dheader[--pos] = 128 | (--ofs & 127);
+ if (limit && hdrlen + sizeof(dheader) - pos + datalen + 20 >= limit)
+ return 0;
+ sha1write(f, header, hdrlen);
+ sha1write(f, dheader + pos, sizeof(dheader) - pos);
+ hdrlen += sizeof(dheader) - pos;
+ } else if (obj_type == OBJ_REF_DELTA) {
+ if (limit && hdrlen + 20 + datalen + 20 >= limit)
+ return 0;
+ sha1write(f, header, hdrlen);
+ sha1write(f, entry->delta->sha1, 20);
+ hdrlen += 20;
+ } else {
+ if (limit && hdrlen + datalen + 20 >= limit)
+ return 0;
+ sha1write(f, header, hdrlen);
+ }
+
if (!pack_to_stdout && p->index_version == 1 &&
check_pack_inflate(p, &w_curs, offset, datalen, entry->size))
die("corrupt packed object for %s", sha1_to_hex(entry->sha1));
unuse_pack(&w_curs);
reused++;
}
- if (entry->delta)
+ if (usable_delta)
written_delta++;
written++;
if (!pack_to_stdout)
return offset;
/* if we are deltified, write out base object first. */
- if (e->delta)
+ if (e->delta) {
offset = write_one(f, e->delta, offset);
+ if (!offset)
+ return 0;
+ }
e->offset = offset;
- size = write_object(f, e);
+ size = write_object(f, e, offset);
+ if (!size) {
+ e->offset = 0;
+ return 0;
+ }
+ written_list[nr_written++] = e;
/* make sure off_t is sufficiently large not to wrap */
if (offset > offset + size)
return mkstemp(tmpname);
}
-static off_t write_pack_file(void)
+/* forward declarations for write_pack_file */
+static void write_index_file(off_t last_obj_offset, unsigned char *sha1);
+static int adjust_perm(const char *path, mode_t mode);
+
+static void write_pack_file(void)
{
- uint32_t i;
+ uint32_t i = 0, j;
struct sha1file *f;
- off_t offset, last_obj_offset = 0;
+ off_t offset, offset_one, last_obj_offset = 0;
struct pack_header hdr;
- int do_progress = progress;
-
- if (pack_to_stdout) {
- f = sha1fd(1, "<stdout>");
- do_progress >>= 1;
- } else {
- int fd = open_object_dir_tmp("tmp_pack_XXXXXX");
- if (fd < 0)
- die("unable to create %s: %s\n", tmpname, strerror(errno));
- pack_tmp_name = xstrdup(tmpname);
- f = sha1fd(fd, pack_tmp_name);
- }
+ int do_progress = progress >> pack_to_stdout;
+ uint32_t nr_remaining = nr_result;
if (do_progress)
start_progress(&progress_state, "Writing %u objects...", "", nr_result);
+ written_list = xmalloc(nr_objects * sizeof(struct object_entry *));
- hdr.hdr_signature = htonl(PACK_SIGNATURE);
- hdr.hdr_version = htonl(PACK_VERSION);
- hdr.hdr_entries = htonl(nr_result);
- sha1write(f, &hdr, sizeof(hdr));
- offset = sizeof(hdr);
- if (!nr_result)
- goto done;
- for (i = 0; i < nr_objects; i++) {
- last_obj_offset = offset;
- offset = write_one(f, objects + i, offset);
- if (do_progress)
- display_progress(&progress_state, written);
- }
+ do {
+ if (pack_to_stdout) {
+ f = sha1fd(1, "<stdout>");
+ } else {
+ int fd = open_object_dir_tmp("tmp_pack_XXXXXX");
+ if (fd < 0)
+ die("unable to create %s: %s\n", tmpname, strerror(errno));
+ pack_tmp_name = xstrdup(tmpname);
+ f = sha1fd(fd, pack_tmp_name);
+ }
+
+ hdr.hdr_signature = htonl(PACK_SIGNATURE);
+ hdr.hdr_version = htonl(PACK_VERSION);
+ hdr.hdr_entries = htonl(nr_remaining);
+ sha1write(f, &hdr, sizeof(hdr));
+ offset = sizeof(hdr);
+ nr_written = 0;
+ for (; i < nr_objects; i++) {
+ last_obj_offset = offset;
+ offset_one = write_one(f, objects + i, offset);
+ if (!offset_one)
+ break;
+ offset = offset_one;
+ if (do_progress)
+ display_progress(&progress_state, written);
+ }
+
+ /*
+ * Did we write the wrong # entries in the header?
+ * If so, rewrite it like in fast-import
+ */
+ if (pack_to_stdout || nr_written == nr_remaining) {
+ sha1close(f, pack_file_sha1, 1);
+ } else {
+ sha1close(f, pack_file_sha1, 0);
+ fixup_pack_header_footer(f->fd, pack_file_sha1, pack_tmp_name, nr_written);
+ close(f->fd);
+ }
+
+ if (!pack_to_stdout) {
+ unsigned char object_list_sha1[20];
+ mode_t mode = umask(0);
+
+ umask(mode);
+ mode = 0444 & ~mode;
+
+ write_index_file(last_obj_offset, object_list_sha1);
+ snprintf(tmpname, sizeof(tmpname), "%s-%s.pack",
+ base_name, sha1_to_hex(object_list_sha1));
+ if (adjust_perm(pack_tmp_name, mode))
+ die("unable to make temporary pack file readable: %s",
+ strerror(errno));
+ if (rename(pack_tmp_name, tmpname))
+ die("unable to rename temporary pack file: %s",
+ strerror(errno));
+ snprintf(tmpname, sizeof(tmpname), "%s-%s.idx",
+ base_name, sha1_to_hex(object_list_sha1));
+ if (adjust_perm(idx_tmp_name, mode))
+ die("unable to make temporary index file readable: %s",
+ strerror(errno));
+ if (rename(idx_tmp_name, tmpname))
+ die("unable to rename temporary index file: %s",
+ strerror(errno));
+ puts(sha1_to_hex(object_list_sha1));
+ }
+
+ /* mark written objects as written to previous pack */
+ for (j = 0; j < nr_written; j++) {
+ written_list[j]->offset = (off_t)-1;
+ }
+ nr_remaining -= nr_written;
+ } while (nr_remaining && i < nr_objects);
+
+ free(written_list);
if (do_progress)
stop_progress(&progress_state);
- done:
if (written != nr_result)
die("wrote %u objects while expecting %u", written, nr_result);
- sha1close(f, pack_file_sha1, 1);
-
- return last_obj_offset;
+ /*
+ * We have scanned through [0 ... i). Since we have written
+ * the correct number of objects, the remaining [i ... nr_objects)
+ * items must be either already written (due to out-of-order delta base)
+ * or a preferred base. Count those which are neither and complain if any.
+ */
+ for (j = 0; i < nr_objects; i++) {
+ struct object_entry *e = objects + i;
+ j += !e->offset && !e->preferred_base;
+ }
+ if (j)
+ die("wrote %u objects as expected but %u unwritten", written, j);
}
static int sha1_sort(const void *_a, const void *_b)
idx_tmp_name = xstrdup(tmpname);
f = sha1fd(fd, idx_tmp_name);
- if (nr_result) {
- uint32_t j = 0;
- sorted_by_sha =
- xcalloc(nr_result, sizeof(struct object_entry *));
- for (i = 0; i < nr_objects; i++)
- if (!objects[i].preferred_base)
- sorted_by_sha[j++] = objects + i;
- if (j != nr_result)
- die("listed %u objects while expecting %u", j, nr_result);
- qsort(sorted_by_sha, nr_result, sizeof(*sorted_by_sha), sha1_sort);
+ if (nr_written) {
+ sorted_by_sha = written_list;
+ qsort(sorted_by_sha, nr_written, sizeof(*sorted_by_sha), sha1_sort);
list = sorted_by_sha;
- last = sorted_by_sha + nr_result;
+ last = sorted_by_sha + nr_written;
} else
sorted_by_sha = list = last = NULL;
/* Write the actual SHA1 entries. */
list = sorted_by_sha;
- for (i = 0; i < nr_result; i++) {
+ for (i = 0; i < nr_written; i++) {
struct object_entry *entry = *list++;
if (index_version < 2) {
uint32_t offset = htonl(entry->offset);
/* write the crc32 table */
list = sorted_by_sha;
- for (i = 0; i < nr_objects; i++) {
+ for (i = 0; i < nr_written; i++) {
struct object_entry *entry = *list++;
uint32_t crc32_val = htonl(entry->crc32);
sha1write(f, &crc32_val, 4);
/* write the 32-bit offset table */
list = sorted_by_sha;
- for (i = 0; i < nr_objects; i++) {
+ for (i = 0; i < nr_written; i++) {
struct object_entry *entry = *list++;
uint32_t offset = (entry->offset <= index_off32_limit) ?
entry->offset : (0x80000000 | nr_large_offset++);
sha1write(f, pack_file_sha1, 20);
sha1close(f, NULL, 1);
- free(sorted_by_sha);
SHA1_Final(sha1, &ctx);
}
unsigned char c;
unsigned hash = 0;
+ if (!name)
+ return 0;
+
/*
* This effectively just creates a sortable number from the
* last sixteen non-whitespace characters. Last characters
return hash;
}
+static void setup_delta_attr_check(struct git_attr_check *check)
+{
+ static struct git_attr *attr_delta;
+
+ if (!attr_delta)
+ attr_delta = git_attr("delta", 5);
+
+ check[0].attr = attr_delta;
+}
+
+static int no_try_delta(const char *path)
+{
+ struct git_attr_check check[1];
+
+ setup_delta_attr_check(check);
+ if (git_checkattr(path, ARRAY_SIZE(check), check))
+ return 0;
+ if (ATTR_FALSE(check->value))
+ return 1;
+ return 0;
+}
+
static int add_object_entry(const unsigned char *sha1, enum object_type type,
- unsigned hash, int exclude)
+ const char *name, int exclude)
{
struct object_entry *entry;
struct packed_git *p, *found_pack = NULL;
off_t found_offset = 0;
int ix;
+ unsigned hash = name_hash(name);
ix = nr_objects ? locate_object_entry_hash(sha1) : -1;
if (ix >= 0) {
if (progress)
display_progress(&progress_state, nr_objects);
+ if (name && no_try_delta(name))
+ entry->no_try_delta = 1;
+
return 1;
}
if (cmp < 0)
return;
if (name[cmplen] != '/') {
- unsigned hash = name_hash(fullname);
add_object_entry(entry.sha1,
S_ISDIR(entry.mode) ? OBJ_TREE : OBJ_BLOB,
- hash, 1);
+ fullname, 1);
return;
}
if (S_ISDIR(entry.mode)) {
return 0;
}
-static void add_preferred_base_object(const char *name, unsigned hash)
+static void add_preferred_base_object(const char *name)
{
struct pbase_tree *it;
int cmplen;
+ unsigned hash = name_hash(name);
if (!num_preferred_base || check_pbase_path(hash))
return;
cmplen = name_cmp_len(name);
for (it = pbase_tree; it; it = it->next) {
if (cmplen == 0) {
- add_object_entry(it->pcache.sha1, OBJ_TREE, 0, 1);
+ add_object_entry(it->pcache.sha1, OBJ_TREE, NULL, 1);
}
else {
struct tree_desc tree;
struct delta_index *index;
};
+static int delta_cacheable(struct unpacked *trg, struct unpacked *src,
+ unsigned long src_size, unsigned long trg_size,
+ unsigned long delta_size)
+{
+ if (max_delta_cache_size && delta_cache_size + delta_size > max_delta_cache_size)
+ return 0;
+
+ if (delta_size < cache_max_small_delta_size)
+ return 1;
+
+ /* cache delta, if objects are large enough compared to delta size */
+ if ((src_size >> 20) + (trg_size >> 21) > (delta_size >> 10))
+ return 1;
+
+ return 0;
+}
+
/*
* We search for deltas _backwards_ in a list sorted by type and
* by size, so that we see progressively smaller and smaller files.
}
if (!src->index) {
src->index = create_delta_index(src->data, src_size);
- if (!src->index)
- die("out of memory");
+ if (!src->index) {
+ static int warned = 0;
+ if (!warned++)
+ warning("suboptimal pack - out of memory");
+ return 0;
+ }
}
delta_buf = create_delta(src->index, trg->data, trg_size, &delta_size, max_size);
if (!delta_buf)
return 0;
+ if (trg_entry->delta_data) {
+ delta_cache_size -= trg_entry->delta_size;
+ free(trg_entry->delta_data);
+ }
+ trg_entry->delta_data = 0;
trg_entry->delta = src_entry;
trg_entry->delta_size = delta_size;
trg_entry->depth = src_entry->depth + 1;
- free(delta_buf);
+
+ if (delta_cacheable(src, trg, src_size, trg_size, delta_size)) {
+ trg_entry->delta_data = xrealloc(delta_buf, delta_size);
+ delta_cache_size += trg_entry->delta_size;
+ } else
+ free(delta_buf);
return 1;
}
if (entry->size < 50)
continue;
+
+ if (entry->no_try_delta)
+ continue;
+
free_delta_index(n->index);
n->index = NULL;
free(n->data);
pack_compression_seen = 1;
return 0;
}
+ if (!strcmp(k, "pack.deltacachesize")) {
+ max_delta_cache_size = git_config_int(k, v);
+ return 0;
+ }
+ if (!strcmp(k, "pack.deltacachelimit")) {
+ cache_max_small_delta_size = git_config_int(k, v);
+ return 0;
+ }
return git_default_config(k, v);
}
{
char line[40 + 1 + PATH_MAX + 2];
unsigned char sha1[20];
- unsigned hash;
for (;;) {
if (!fgets(line, sizeof(line), stdin)) {
if (get_sha1_hex(line, sha1))
die("expected sha1, got garbage:\n %s", line);
- hash = name_hash(line+41);
- add_preferred_base_object(line+41, hash);
- add_object_entry(sha1, 0, hash, 0);
+ add_preferred_base_object(line+41);
+ add_object_entry(sha1, 0, line+41, 0);
}
}
static void show_commit(struct commit *commit)
{
- add_object_entry(commit->object.sha1, OBJ_COMMIT, 0, 0);
+ add_object_entry(commit->object.sha1, OBJ_COMMIT, NULL, 0);
}
static void show_object(struct object_array_entry *p)
{
- unsigned hash = name_hash(p->name);
- add_preferred_base_object(p->name, hash);
- add_object_entry(p->item->sha1, p->item->type, hash, 0);
+ add_preferred_base_object(p->name);
+ add_object_entry(p->item->sha1, p->item->type, p->name, 0);
}
static void show_edge(struct commit *commit)
int use_internal_rev_list = 0;
int thin = 0;
uint32_t i;
- off_t last_obj_offset;
- const char *base_name = NULL;
const char **rp_av;
int rp_ac_alloc = 64;
int rp_ac;
pack_compression_level = level;
continue;
}
+ if (!prefixcmp(arg, "--max-pack-size=")) {
+ char *end;
+ pack_size_limit = strtoul(arg+16, &end, 0) * 1024 * 1024;
+ if (!arg[16] || *end)
+ usage(pack_usage);
+ continue;
+ }
if (!prefixcmp(arg, "--window=")) {
char *end;
window = strtoul(arg+9, &end, 0);
if (pack_to_stdout != !base_name)
usage(pack_usage);
+ if (pack_to_stdout && pack_size_limit)
+ die("--max-pack-size cannot be used to build a pack for transfer.");
+
if (!pack_to_stdout && thin)
die("--thin cannot be used to build an indexable pack.");
fprintf(stderr, "Result has %u objects.\n", nr_result);
if (nr_result)
prepare_pack(window, depth);
- last_obj_offset = write_pack_file();
- if (!pack_to_stdout) {
- unsigned char object_list_sha1[20];
- mode_t mode = umask(0);
-
- umask(mode);
- mode = 0444 & ~mode;
-
- write_index_file(last_obj_offset, object_list_sha1);
- snprintf(tmpname, sizeof(tmpname), "%s-%s.pack",
- base_name, sha1_to_hex(object_list_sha1));
- if (adjust_perm(pack_tmp_name, mode))
- die("unable to make temporary pack file readable: %s",
- strerror(errno));
- if (rename(pack_tmp_name, tmpname))
- die("unable to rename temporary pack file: %s",
- strerror(errno));
- snprintf(tmpname, sizeof(tmpname), "%s-%s.idx",
- base_name, sha1_to_hex(object_list_sha1));
- if (adjust_perm(idx_tmp_name, mode))
- die("unable to make temporary index file readable: %s",
- strerror(errno));
- if (rename(idx_tmp_name, tmpname))
- die("unable to rename temporary index file: %s",
- strerror(errno));
- puts(sha1_to_hex(object_list_sha1));
- }
+ write_pack_file();
if (progress)
fprintf(stderr, "Total %u (delta %u), reused %u (delta %u)\n",
written, written_delta, reused, reused_delta);
char name[FLEX_ARRAY];
};
+#define PACK_REFS_PRUNE 0x0001
+#define PACK_REFS_ALL 0x0002
+
struct pack_refs_cb_data {
- int prune;
- int all;
+ unsigned int flags;
struct ref_to_prune *ref_to_prune;
FILE *refs_file;
};
is_tag_ref = !prefixcmp(path, "refs/tags/");
/* ALWAYS pack refs that were already packed or are tags */
- if (!cb->all && !is_tag_ref && !(flags & REF_ISPACKED))
+ if (!(cb->flags & PACK_REFS_ALL) && !is_tag_ref && !(flags & REF_ISPACKED))
return 0;
fprintf(cb->refs_file, "%s %s\n", sha1_to_hex(sha1), path);
}
}
- if (cb->prune && !do_not_prune(flags)) {
+ if ((cb->flags & PACK_REFS_PRUNE) && !do_not_prune(flags)) {
int namelen = strlen(path) + 1;
struct ref_to_prune *n = xcalloc(1, sizeof(*n) + namelen);
hashcpy(n->sha1, sha1);
static struct lock_file packed;
-int cmd_pack_refs(int argc, const char **argv, const char *prefix)
+static int pack_refs(unsigned int flags)
{
- int fd, i;
+ int fd;
struct pack_refs_cb_data cbdata;
memset(&cbdata, 0, sizeof(cbdata));
+ cbdata.flags = flags;
+
+ fd = hold_lock_file_for_update(&packed, git_path("packed-refs"), 1);
+ cbdata.refs_file = fdopen(fd, "w");
+ if (!cbdata.refs_file)
+ die("unable to create ref-pack file structure (%s)",
+ strerror(errno));
+
+ /* perhaps other traits later as well */
+ fprintf(cbdata.refs_file, "# pack-refs with: peeled \n");
+
+ for_each_ref(handle_one_ref, &cbdata);
+ if (fflush(cbdata.refs_file) || fsync(fd) || fclose(cbdata.refs_file))
+ die("failed to write ref-pack file (%s)", strerror(errno));
+ if (commit_lock_file(&packed) < 0)
+ die("unable to overwrite old ref-pack file (%s)", strerror(errno));
+ if (cbdata.flags & PACK_REFS_PRUNE)
+ prune_refs(cbdata.ref_to_prune);
+ return 0;
+}
- cbdata.prune = 1;
+int cmd_pack_refs(int argc, const char **argv, const char *prefix)
+{
+ int i;
+ unsigned int flags;
+
+ flags = PACK_REFS_PRUNE;
for (i = 1; i < argc; i++) {
const char *arg = argv[i];
if (!strcmp(arg, "--prune")) {
- cbdata.prune = 1; /* now the default */
+ flags |= PACK_REFS_PRUNE; /* now the default */
continue;
}
if (!strcmp(arg, "--no-prune")) {
- cbdata.prune = 0;
+ flags &= ~PACK_REFS_PRUNE;
continue;
}
if (!strcmp(arg, "--all")) {
- cbdata.all = 1;
+ flags |= PACK_REFS_ALL;
continue;
}
/* perhaps other parameters later... */
if (i != argc)
usage(builtin_pack_refs_usage);
- fd = hold_lock_file_for_update(&packed, git_path("packed-refs"), 1);
- cbdata.refs_file = fdopen(fd, "w");
- if (!cbdata.refs_file)
- die("unable to create ref-pack file structure (%s)",
- strerror(errno));
-
- /* perhaps other traits later as well */
- fprintf(cbdata.refs_file, "# pack-refs with: peeled \n");
-
- for_each_ref(handle_one_ref, &cbdata);
- fflush(cbdata.refs_file);
- fsync(fd);
- fclose(cbdata.refs_file);
- if (commit_lock_file(&packed) < 0)
- die("unable to overwrite old ref-pack file (%s)", strerror(errno));
- if (cbdata.prune)
- prune_refs(cbdata.ref_to_prune);
- return 0;
+ return pack_refs(flags);
}
#include "refs.h"
#include "run-command.h"
#include "builtin.h"
-
-#define MAX_URI (16)
+#include "remote.h"
static const char push_usage[] = "git-push [--all] [--tags] [--receive-pack=<git-receive-pack>] [--repo=all] [-f | --force] [-v] [<repository> <refspec>...]";
-static int all, tags, force, thin = 1, verbose;
+static int all, force, thin = 1, verbose;
static const char *receivepack;
-#define BUF_SIZE (2084)
-static char buffer[BUF_SIZE];
-
static const char **refspec;
static int refspec_nr;
refspec_nr = nr;
}
-static int expand_one_ref(const char *ref, const unsigned char *sha1, int flag, void *cb_data)
-{
- /* Ignore the "refs/" at the beginning of the refname */
- ref += 5;
-
- if (!prefixcmp(ref, "tags/"))
- add_refspec(xstrdup(ref));
- return 0;
-}
-
-static void expand_refspecs(void)
-{
- if (all) {
- if (refspec_nr)
- die("cannot mix '--all' and a refspec");
-
- /*
- * No need to expand "--all" - we'll just use
- * the "--all" flag to send-pack
- */
- return;
- }
- if (!tags)
- return;
- for_each_ref(expand_one_ref, NULL);
-}
-
-struct wildcard_cb {
- const char *from_prefix;
- int from_prefix_len;
- const char *to_prefix;
- int to_prefix_len;
- int force;
-};
-
-static int expand_wildcard_ref(const char *ref, const unsigned char *sha1, int flag, void *cb_data)
-{
- struct wildcard_cb *cb = cb_data;
- int len = strlen(ref);
- char *expanded, *newref;
-
- if (len < cb->from_prefix_len ||
- memcmp(cb->from_prefix, ref, cb->from_prefix_len))
- return 0;
- expanded = xmalloc(len * 2 + cb->force +
- (cb->to_prefix_len - cb->from_prefix_len) + 2);
- newref = expanded + cb->force;
- if (cb->force)
- expanded[0] = '+';
- memcpy(newref, ref, len);
- newref[len] = ':';
- memcpy(newref + len + 1, cb->to_prefix, cb->to_prefix_len);
- strcpy(newref + len + 1 + cb->to_prefix_len,
- ref + cb->from_prefix_len);
- add_refspec(expanded);
- return 0;
-}
-
-static int wildcard_ref(const char *ref)
-{
- int len;
- const char *colon;
- struct wildcard_cb cb;
-
- memset(&cb, 0, sizeof(cb));
- if (ref[0] == '+') {
- cb.force = 1;
- ref++;
- }
- len = strlen(ref);
- colon = strchr(ref, ':');
- if (! (colon && ref < colon &&
- colon[-2] == '/' && colon[-1] == '*' &&
- /* "<mine>/<asterisk>:<yours>/<asterisk>" is at least 7 bytes */
- 7 <= len &&
- ref[len-2] == '/' && ref[len-1] == '*') )
- return 0 ;
- cb.from_prefix = ref;
- cb.from_prefix_len = colon - ref - 1;
- cb.to_prefix = colon + 1;
- cb.to_prefix_len = len - (colon - ref) - 2;
- for_each_ref(expand_wildcard_ref, &cb);
- return 1;
-}
-
static void set_refspecs(const char **refs, int nr)
{
- if (nr) {
- int i;
- for (i = 0; i < nr; i++) {
- const char *ref = refs[i];
- if (!strcmp("tag", ref)) {
- char *tag;
- int len;
- if (nr <= ++i)
- die("tag shorthand without <tag>");
- len = strlen(refs[i]) + 11;
- tag = xmalloc(len);
- strcpy(tag, "refs/tags/");
- strcat(tag, refs[i]);
- ref = tag;
- }
- else if (wildcard_ref(ref))
- continue;
- add_refspec(ref);
- }
- }
- expand_refspecs();
-}
-
-static int get_remotes_uri(const char *repo, const char *uri[MAX_URI])
-{
- int n = 0;
- FILE *f = fopen(git_path("remotes/%s", repo), "r");
- int has_explicit_refspec = refspec_nr || all || tags;
-
- if (!f)
- return -1;
- while (fgets(buffer, BUF_SIZE, f)) {
- int is_refspec;
- char *s, *p;
-
- if (!prefixcmp(buffer, "URL:")) {
- is_refspec = 0;
- s = buffer + 4;
- } else if (!prefixcmp(buffer, "Push:")) {
- is_refspec = 1;
- s = buffer + 5;
- } else
- continue;
-
- /* Remove whitespace at the head.. */
- while (isspace(*s))
- s++;
- if (!*s)
- continue;
-
- /* ..and at the end */
- p = s + strlen(s);
- while (isspace(p[-1]))
- *--p = 0;
-
- if (!is_refspec) {
- if (n < MAX_URI)
- uri[n++] = xstrdup(s);
- else
- error("more than %d URL's specified, ignoring the rest", MAX_URI);
- }
- else if (is_refspec && !has_explicit_refspec) {
- if (!wildcard_ref(s))
- add_refspec(xstrdup(s));
- }
- }
- fclose(f);
- if (!n)
- die("remote '%s' has no URL", repo);
- return n;
-}
-
-static const char **config_uri;
-static const char *config_repo;
-static int config_repo_len;
-static int config_current_uri;
-static int config_get_refspecs;
-static int config_get_receivepack;
-
-static int get_remote_config(const char* key, const char* value)
-{
- if (!prefixcmp(key, "remote.") &&
- !strncmp(key + 7, config_repo, config_repo_len)) {
- if (!strcmp(key + 7 + config_repo_len, ".url")) {
- if (config_current_uri < MAX_URI)
- config_uri[config_current_uri++] = xstrdup(value);
- else
- error("more than %d URL's specified, ignoring the rest", MAX_URI);
- }
- else if (config_get_refspecs &&
- !strcmp(key + 7 + config_repo_len, ".push")) {
- if (!wildcard_ref(value))
- add_refspec(xstrdup(value));
- }
- else if (config_get_receivepack &&
- !strcmp(key + 7 + config_repo_len, ".receivepack")) {
- if (!receivepack) {
- char *rp = xmalloc(strlen(value) + 16);
- sprintf(rp, "--receive-pack=%s", value);
- receivepack = rp;
- } else
- error("more than one receivepack given, using the first");
- }
- }
- return 0;
-}
-
-static int get_config_remotes_uri(const char *repo, const char *uri[MAX_URI])
-{
- config_repo_len = strlen(repo);
- config_repo = repo;
- config_current_uri = 0;
- config_uri = uri;
- config_get_refspecs = !(refspec_nr || all || tags);
- config_get_receivepack = (receivepack == NULL);
-
- git_config(get_remote_config);
- return config_current_uri;
-}
-
-static int get_branches_uri(const char *repo, const char *uri[MAX_URI])
-{
- const char *slash = strchr(repo, '/');
- int n = slash ? slash - repo : 1000;
- FILE *f = fopen(git_path("branches/%.*s", n, repo), "r");
- char *s, *p;
- int len;
-
- if (!f)
- return 0;
- s = fgets(buffer, BUF_SIZE, f);
- fclose(f);
- if (!s)
- return 0;
- while (isspace(*s))
- s++;
- if (!*s)
- return 0;
- p = s + strlen(s);
- while (isspace(p[-1]))
- *--p = 0;
- len = p - s;
- if (slash)
- len += strlen(slash);
- p = xmalloc(len + 1);
- strcpy(p, s);
- if (slash)
- strcat(p, slash);
- uri[0] = p;
- return 1;
-}
-
-/*
- * Read remotes and branches file, fill the push target URI
- * list. If there is no command line refspecs, read Push: lines
- * to set up the *refspec list as well.
- * return the number of push target URIs
- */
-static int read_config(const char *repo, const char *uri[MAX_URI])
-{
- int n;
-
- if (*repo != '/') {
- n = get_remotes_uri(repo, uri);
- if (n > 0)
- return n;
-
- n = get_config_remotes_uri(repo, uri);
- if (n > 0)
- return n;
-
- n = get_branches_uri(repo, uri);
- if (n > 0)
- return n;
+ int i;
+ for (i = 0; i < nr; i++) {
+ const char *ref = refs[i];
+ if (!strcmp("tag", ref)) {
+ char *tag;
+ int len;
+ if (nr <= ++i)
+ die("tag shorthand without <tag>");
+ len = strlen(refs[i]) + 11;
+ tag = xmalloc(len);
+ strcpy(tag, "refs/tags/");
+ strcat(tag, refs[i]);
+ ref = tag;
+ }
+ add_refspec(ref);
}
-
- uri[0] = repo;
- return 1;
}
static int do_push(const char *repo)
{
- const char *uri[MAX_URI];
- int i, n, errs;
+ int i, errs;
int common_argc;
const char **argv;
int argc;
+ struct remote *remote = remote_get(repo);
- n = read_config(repo, uri);
- if (n <= 0)
+ if (!remote)
die("bad repository '%s'", repo);
+ if (remote->receivepack) {
+ char *rp = xmalloc(strlen(remote->receivepack) + 16);
+ sprintf(rp, "--receive-pack=%s", remote->receivepack);
+ receivepack = rp;
+ }
+ if (!refspec && !all && remote->push_refspec_nr) {
+ refspec = remote->push_refspec;
+ refspec_nr = remote->push_refspec_nr;
+ }
+
argv = xmalloc((refspec_nr + 10) * sizeof(char *));
argv[0] = "dummy-send-pack";
argc = 1;
common_argc = argc;
errs = 0;
- for (i = 0; i < n; i++) {
+ for (i = 0; i < remote->uri_nr; i++) {
int err;
int dest_argc = common_argc;
int dest_refspec_nr = refspec_nr;
const char **dest_refspec = refspec;
- const char *dest = uri[i];
+ const char *dest = remote->uri[i];
const char *sender = "send-pack";
if (!prefixcmp(dest, "http://") ||
!prefixcmp(dest, "https://"))
sender = "http-push";
- else if (thin)
- argv[dest_argc++] = "--thin";
+ else {
+ char *rem = xmalloc(strlen(remote->name) + 10);
+ sprintf(rem, "--remote=%s", remote->name);
+ argv[dest_argc++] = rem;
+ if (thin)
+ argv[dest_argc++] = "--thin";
+ }
argv[0] = sender;
argv[dest_argc++] = dest;
while (dest_refspec_nr--)
if (!err)
continue;
- error("failed to push to '%s'", uri[i]);
+ error("failed to push to '%s'", remote->uri[i]);
switch (err) {
case -ERR_RUN_COMMAND_FORK:
error("unable to fork for %s", sender);
int cmd_push(int argc, const char **argv, const char *prefix)
{
int i;
- const char *repo = "origin"; /* default repository */
+ const char *repo = NULL; /* default repository */
for (i = 1; i < argc; i++) {
const char *arg = argv[i];
continue;
}
if (!strcmp(arg, "--tags")) {
- tags = 1;
+ add_refspec("refs/tags/*");
continue;
}
if (!strcmp(arg, "--force") || !strcmp(arg, "-f")) {
usage(push_usage);
}
set_refspecs(argv + i, argc - i);
+ if (all && refspec)
+ usage(push_usage);
+
return do_push(repo);
}
extern int has_pack_file(const unsigned char *sha1);
extern int has_pack_index(const unsigned char *sha1);
-extern signed char hexval_table[256];
-static inline unsigned int hexval(unsigned int c)
+extern const signed char hexval_table[256];
+static inline unsigned int hexval(unsigned char c)
{
return hexval_table[c];
}
extern pid_t git_connect(int fd[2], char *url, const char *prog, int flags);
extern int finish_connect(pid_t pid);
extern int path_match(const char *path, int nr, char **match);
-extern int match_refs(struct ref *src, struct ref *dst, struct ref ***dst_tail,
- int nr_refspec, char **refspec, int all);
extern int get_ack(int fd, unsigned char *result_sha1);
extern struct ref **get_remote_heads(int in, struct ref **list, int nr_match, char **match, unsigned int flags);
extern int server_supports(const char *feature);
struct packed_git *packs);
extern void pack_report(void);
+extern int open_pack_index(struct packed_git *);
extern unsigned char* use_pack(struct packed_git *, struct pack_window **, off_t, unsigned int *);
extern void unuse_pack(struct pack_window **);
extern struct packed_git *add_packed_git(const char *, int, int);
-extern const unsigned char *nth_packed_object_sha1(const struct packed_git *, uint32_t);
+extern const unsigned char *nth_packed_object_sha1(struct packed_git *, uint32_t);
extern off_t find_pack_entry_one(const unsigned char *, struct packed_git *);
extern void *unpack_entry(struct packed_git *, off_t, enum object_type *, unsigned long *);
extern unsigned long unpack_object_header_gently(const unsigned char *buf, unsigned long len, enum object_type *type, unsigned long *sizep);
bp += i;
for (i = 0; i < len; i++) {
unsigned ch = line[i] & 0xFF;
- if (is_rfc2047_special(ch)) {
+ /*
+ * We encode ' ' using '=20' even though rfc2047
+ * allows using '_' for readability. Unfortunately,
+ * many programs do not understand this and just
+ * leave the underscore in place.
+ */
+ if (is_rfc2047_special(ch) || ch == ' ') {
sprintf(bp, "=%02X", ch);
bp += 3;
}
- else if (ch == ' ')
- *bp++ = '_';
else
*bp++ = ch;
}
#include "quote.h"
#include "refs.h"
#include "run-command.h"
+#include "remote.h"
static char *server_capabilities;
return 0;
}
-struct refspec {
- char *src;
- char *dst;
- char force;
-};
-
-/*
- * A:B means fast forward remote B with local A.
- * +A:B means overwrite remote B with local A.
- * +A is a shorthand for +A:A.
- * A is a shorthand for A:A.
- * :B means delete remote B.
- */
-static struct refspec *parse_ref_spec(int nr_refspec, char **refspec)
-{
- int i;
- struct refspec *rs = xcalloc(sizeof(*rs), (nr_refspec + 1));
- for (i = 0; i < nr_refspec; i++) {
- char *sp, *dp, *ep;
- sp = refspec[i];
- if (*sp == '+') {
- rs[i].force = 1;
- sp++;
- }
- ep = strchr(sp, ':');
- if (ep) {
- dp = ep + 1;
- *ep = 0;
- }
- else
- dp = sp;
- rs[i].src = sp;
- rs[i].dst = dp;
- }
- rs[nr_refspec].src = rs[nr_refspec].dst = NULL;
- return rs;
-}
-
-static int count_refspec_match(const char *pattern,
- struct ref *refs,
- struct ref **matched_ref)
-{
- int patlen = strlen(pattern);
- struct ref *matched_weak = NULL;
- struct ref *matched = NULL;
- int weak_match = 0;
- int match = 0;
-
- for (weak_match = match = 0; refs; refs = refs->next) {
- char *name = refs->name;
- int namelen = strlen(name);
- int weak_match;
-
- if (namelen < patlen ||
- memcmp(name + namelen - patlen, pattern, patlen))
- continue;
- if (namelen != patlen && name[namelen - patlen - 1] != '/')
- continue;
-
- /* A match is "weak" if it is with refs outside
- * heads or tags, and did not specify the pattern
- * in full (e.g. "refs/remotes/origin/master") or at
- * least from the toplevel (e.g. "remotes/origin/master");
- * otherwise "git push $URL master" would result in
- * ambiguity between remotes/origin/master and heads/master
- * at the remote site.
- */
- if (namelen != patlen &&
- patlen != namelen - 5 &&
- prefixcmp(name, "refs/heads/") &&
- prefixcmp(name, "refs/tags/")) {
- /* We want to catch the case where only weak
- * matches are found and there are multiple
- * matches, and where more than one strong
- * matches are found, as ambiguous. One
- * strong match with zero or more weak matches
- * are acceptable as a unique match.
- */
- matched_weak = refs;
- weak_match++;
- }
- else {
- matched = refs;
- match++;
- }
- }
- if (!matched) {
- *matched_ref = matched_weak;
- return weak_match;
- }
- else {
- *matched_ref = matched;
- return match;
- }
-}
-
-static void link_dst_tail(struct ref *ref, struct ref ***tail)
-{
- **tail = ref;
- *tail = &ref->next;
- **tail = NULL;
-}
-
-static struct ref *try_explicit_object_name(const char *name)
-{
- unsigned char sha1[20];
- struct ref *ref;
- int len;
-
- if (!*name) {
- ref = xcalloc(1, sizeof(*ref) + 20);
- strcpy(ref->name, "(delete)");
- hashclr(ref->new_sha1);
- return ref;
- }
- if (get_sha1(name, sha1))
- return NULL;
- len = strlen(name) + 1;
- ref = xcalloc(1, sizeof(*ref) + len);
- memcpy(ref->name, name, len);
- hashcpy(ref->new_sha1, sha1);
- return ref;
-}
-
-static int match_explicit_refs(struct ref *src, struct ref *dst,
- struct ref ***dst_tail, struct refspec *rs)
-{
- int i, errs;
- for (i = errs = 0; rs[i].src; i++) {
- struct ref *matched_src, *matched_dst;
-
- matched_src = matched_dst = NULL;
- switch (count_refspec_match(rs[i].src, src, &matched_src)) {
- case 1:
- break;
- case 0:
- /* The source could be in the get_sha1() format
- * not a reference name. :refs/other is a
- * way to delete 'other' ref at the remote end.
- */
- matched_src = try_explicit_object_name(rs[i].src);
- if (matched_src)
- break;
- errs = 1;
- error("src refspec %s does not match any.",
- rs[i].src);
- break;
- default:
- errs = 1;
- error("src refspec %s matches more than one.",
- rs[i].src);
- break;
- }
- switch (count_refspec_match(rs[i].dst, dst, &matched_dst)) {
- case 1:
- break;
- case 0:
- if (!memcmp(rs[i].dst, "refs/", 5)) {
- int len = strlen(rs[i].dst) + 1;
- matched_dst = xcalloc(1, sizeof(*dst) + len);
- memcpy(matched_dst->name, rs[i].dst, len);
- link_dst_tail(matched_dst, dst_tail);
- }
- else if (!strcmp(rs[i].src, rs[i].dst) &&
- matched_src) {
- /* pushing "master:master" when
- * remote does not have master yet.
- */
- int len = strlen(matched_src->name) + 1;
- matched_dst = xcalloc(1, sizeof(*dst) + len);
- memcpy(matched_dst->name, matched_src->name,
- len);
- link_dst_tail(matched_dst, dst_tail);
- }
- else {
- errs = 1;
- error("dst refspec %s does not match any "
- "existing ref on the remote and does "
- "not start with refs/.", rs[i].dst);
- }
- break;
- default:
- errs = 1;
- error("dst refspec %s matches more than one.",
- rs[i].dst);
- break;
- }
- if (errs)
- continue;
- if (matched_dst->peer_ref) {
- errs = 1;
- error("dst ref %s receives from more than one src.",
- matched_dst->name);
- }
- else {
- matched_dst->peer_ref = matched_src;
- matched_dst->force = rs[i].force;
- }
- }
- return -errs;
-}
-
-static struct ref *find_ref_by_name(struct ref *list, const char *name)
-{
- for ( ; list; list = list->next)
- if (!strcmp(list->name, name))
- return list;
- return NULL;
-}
-
-int match_refs(struct ref *src, struct ref *dst, struct ref ***dst_tail,
- int nr_refspec, char **refspec, int all)
-{
- struct refspec *rs = parse_ref_spec(nr_refspec, refspec);
-
- if (nr_refspec)
- return match_explicit_refs(src, dst, dst_tail, rs);
-
- /* pick the remainder */
- for ( ; src; src = src->next) {
- struct ref *dst_peer;
- if (src->peer_ref)
- continue;
- dst_peer = find_ref_by_name(dst, src->name);
- if ((dst_peer && dst_peer->peer_ref) || (!dst_peer && !all))
- continue;
- if (!dst_peer) {
- /* Create a new one and link it */
- int len = strlen(src->name) + 1;
- dst_peer = xcalloc(1, sizeof(*dst_peer) + len);
- memcpy(dst_peer->name, src->name, len);
- hashcpy(dst_peer->new_sha1, src->new_sha1);
- link_dst_tail(dst_peer, dst_tail);
- }
- dst_peer->peer_ref = src;
- }
- return 0;
-}
-
enum protocol {
PROTO_LOCAL = 1,
PROTO_SSH,
#ifndef NO_IPV6
+static const char *ai_name(const struct addrinfo *ai)
+{
+ static char addr[INET_ADDRSTRLEN];
+ if ( AF_INET == ai->ai_family ) {
+ struct sockaddr_in *in;
+ in = (struct sockaddr_in *)ai->ai_addr;
+ inet_ntop(ai->ai_family, &in->sin_addr, addr, sizeof(addr));
+ } else if ( AF_INET6 == ai->ai_family ) {
+ struct sockaddr_in6 *in;
+ in = (struct sockaddr_in6 *)ai->ai_addr;
+ inet_ntop(ai->ai_family, &in->sin6_addr, addr, sizeof(addr));
+ } else {
+ strcpy(addr, "(unknown)");
+ }
+ return addr;
+}
+
/*
* Returns a connected socket() fd, or else die()s.
*/
const char *port = STR(DEFAULT_GIT_PORT);
struct addrinfo hints, *ai0, *ai;
int gai;
+ int cnt = 0;
if (host[0] == '[') {
end = strchr(host + 1, ']');
}
if (connect(sockfd, ai->ai_addr, ai->ai_addrlen) < 0) {
saved_errno = errno;
+ fprintf(stderr, "%s[%d: %s]: net=%s, errno=%s\n",
+ host,
+ cnt,
+ ai_name(ai),
+ hstrerror(h_errno),
+ strerror(saved_errno));
close(sockfd);
sockfd = -1;
continue;
}
+ if (flags & CONNECT_VERBOSE)
+ fprintf(stderr, "%s ", ai_name(ai));
break;
}
struct sockaddr_in sa;
char **ap;
unsigned int nport;
+ int cnt;
if (host[0] == '[') {
end = strchr(host + 1, ']');
if (flags & CONNECT_VERBOSE)
fprintf(stderr, "done.\nConnecting to %s (port %s) ... ", host, port);
- for (ap = he->h_addr_list; *ap; ap++) {
+ for (cnt = 0, ap = he->h_addr_list; *ap; ap++, cnt++) {
sockfd = socket(he->h_addrtype, SOCK_STREAM, 0);
if (sockfd < 0) {
saved_errno = errno;
if (connect(sockfd, (struct sockaddr *)&sa, sizeof sa) < 0) {
saved_errno = errno;
+ fprintf(stderr, "%s[%d: %s]: net=%s, errno=%s\n",
+ host,
+ cnt,
+ inet_ntoa(*(struct in_addr *)&sa.sin_addr),
+ hstrerror(h_errno),
+ strerror(saved_errno));
close(sockfd);
sockfd = -1;
continue;
}
+ if (flags & CONNECT_VERBOSE)
+ fprintf(stderr, "%s ",
+ inet_ntoa(*(struct in_addr *)&sa.sin_addr));
break;
}
branch=$3
# want to make sure that what is pointed to has a .git directory ...
-test -d "$orig_git/.git" || die "\"$orig_git\" is not a git repository!"
+git_dir=$(cd "$orig_git" 2>/dev/null &&
+ git rev-parse --git-dir 2>/dev/null) ||
+ die "\"$orig_git\" is not a git repository!"
# don't link to a workdir
-if test -L "$orig_git/.git/config"
+if test -L "$git_dir/config"
then
die "\"$orig_git\" is a working directory only, please specify" \
"a complete repository."
fi
# make sure the the links use full paths
-orig_git=$(cd "$orig_git"; pwd)
+git_dir=$(cd "$git_dir"; pwd)
# create the workdir
mkdir -p "$new_workdir/.git" || die "unable to create \"$new_workdir\"!"
mkdir -p "$(dirname "$new_workdir/.git/$x")"
;;
esac
- ln -s "$orig_git/.git/$x" "$new_workdir/.git/$x"
+ ln -s "$git_dir/$x" "$new_workdir/.git/$x"
done
# now setup the workdir
cd "$new_workdir"
# copy the HEAD from the original repository as a default branch
-cp "$orig_git/.git/HEAD" .git/HEAD
+cp "$git_dir/HEAD" .git/HEAD
# checkout the branch (either the same as HEAD from the original repository, or
# the one that was asked for)
git checkout -f $branch
for (dst = buf; size; size--) {
const char *cp;
+ /* Fetch next source character, move the pointer on */
char ch = *src++;
+ /* Copy the current character to the destination */
*dst++ = ch;
+ /* If the current character is "$" or there are less than three
+ * remaining bytes or the two bytes following this one are not
+ * "Id", then simply read the next character */
if ((ch != '$') || (size < 3) || memcmp("Id", src, 2))
continue;
+ /*
+ * Here when
+ * - There are more than 2 bytes remaining
+ * - The current three bytes are "$Id"
+ * with
+ * - ch == "$"
+ * - src[0] == "I"
+ */
+ /*
+ * It's possible that an expanded Id has crept its way into the
+ * repository, we cope with that by stripping the expansion out
+ */
if (src[2] == ':') {
+ /* Expanded keywords have "$Id:" at the front */
+
/* discard up to but not including the closing $ */
unsigned long rem = size - 3;
+ /* Point at first byte after the ":" */
cp = src + 3;
+ /*
+ * Throw away characters until either
+ * - we reach a "$"
+ * - we run out of bytes (rem == 0)
+ */
do {
- ch = *cp++;
+ ch = *cp;
if (ch == '$')
break;
+ cp++;
rem--;
} while (rem);
+ /* If the above finished because it ran out of characters, then
+ * this is an incomplete keyword, so don't run the expansion */
if (!rem)
continue;
- size -= (cp - src);
} else if (src[2] == '$')
cp = src + 2;
else
+ /* Anything other than "$Id:XXX$" or $Id$ and we skip the
+ * expansion */
continue;
+ /* cp is now pointing at the last $ of the keyword */
+
memcpy(dst, "Id: ", 4);
dst += 4;
memcpy(dst, sha1_to_hex(sha1), 40);
dst += 40;
*dst++ = ' ';
+
+ /* Adjust for the characters we've discarded */
size -= (cp - src);
src = cp;
+
+ /* Copy the final "$" */
*dst++ = *src++;
size--;
}
}
}
-int sha1close(struct sha1file *f, unsigned char *result, int update)
+int sha1close(struct sha1file *f, unsigned char *result, int final)
{
unsigned offset = f->offset;
if (offset) {
SHA1_Update(&f->ctx, f->buffer, offset);
sha1flush(f, offset);
+ f->offset = 0;
}
+ if (!final)
+ return 0; /* only want to flush (no checksum write, no close) */
SHA1_Final(f->buffer, &f->ctx);
if (result)
hashcpy(result, f->buffer);
- if (update)
- sha1flush(f, 20);
+ sha1flush(f, 20);
if (close(f->fd))
die("%s: sha1 file error on close (%s)", f->name, strerror(errno));
free(f);
/*
* diff-delta.c: generate a delta between two buffers
*
- * Many parts of this file have been lifted from LibXDiff version 0.10.
- * http://www.xmailserver.org/xdiff-lib.html
+ * This code was greatly inspired by parts of LibXDiff from Davide Libenzi
+ * http://www.xmailserver.org/xdiff-lib.html
*
- * LibXDiff was written by Davide Libenzi <davidel@xmailserver.org>
- * Copyright (C) 2003 Davide Libenzi
+ * Rewritten for GIT by Nicolas Pitre <nico@cam.org>, (C) 2005-2007
*
- * Many mods for GIT usage by Nicolas Pitre <nico@cam.org>, (C) 2005.
- *
- * This file is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * Use of this within git automatically means that the LGPL
- * licensing gets turned into GPLv2 within this project.
+ * This code is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
*/
#include "git-compat-util.h"
const void *trg_buf, unsigned long trg_size,
unsigned long *delta_size, unsigned long max_size)
{
- unsigned int i, outpos, outsize, val;
+ unsigned int i, outpos, outsize, moff, msize, val;
int inscnt;
const unsigned char *ref_data, *ref_top, *data, *top;
unsigned char *out;
}
inscnt = i;
+ moff = 0;
+ msize = 0;
while (data < top) {
- unsigned int moff = 0, msize = 0;
- struct index_entry *entry;
- val ^= U[data[-RABIN_WINDOW]];
- val = ((val << 8) | *data) ^ T[val >> RABIN_SHIFT];
- i = val & index->hash_mask;
- for (entry = index->hash[i]; entry; entry = entry->next) {
- const unsigned char *ref = entry->ptr;
- const unsigned char *src = data;
- unsigned int ref_size = ref_top - ref;
- if (entry->val != val)
- continue;
- if (ref_size > top - src)
- ref_size = top - src;
- if (ref_size > 0x10000)
- ref_size = 0x10000;
- if (ref_size <= msize)
- break;
- while (ref_size-- && *src++ == *ref)
- ref++;
- if (msize < ref - entry->ptr) {
- /* this is our best match so far */
- msize = ref - entry->ptr;
- moff = entry->ptr - ref_data;
+ if (msize < 4096) {
+ struct index_entry *entry;
+ val ^= U[data[-RABIN_WINDOW]];
+ val = ((val << 8) | *data) ^ T[val >> RABIN_SHIFT];
+ i = val & index->hash_mask;
+ for (entry = index->hash[i]; entry; entry = entry->next) {
+ const unsigned char *ref = entry->ptr;
+ const unsigned char *src = data;
+ unsigned int ref_size = ref_top - ref;
+ if (entry->val != val)
+ continue;
+ if (ref_size > top - src)
+ ref_size = top - src;
+ if (ref_size <= msize)
+ break;
+ while (ref_size-- && *src++ == *ref)
+ ref++;
+ if (msize < ref - entry->ptr) {
+ /* this is our best match so far */
+ msize = ref - entry->ptr;
+ moff = entry->ptr - ref_data;
+ if (msize >= 4096) /* good enough */
+ break;
+ }
}
}
out[outpos - inscnt - 1] = inscnt;
inscnt = 0;
}
+ msize = 0;
} else {
+ unsigned int left;
unsigned char *op;
- if (msize >= RABIN_WINDOW) {
- const unsigned char *sk;
- sk = data + msize - RABIN_WINDOW;
- val = 0;
- for (i = 0; i < RABIN_WINDOW; i++)
- val = ((val << 8) | *sk++) ^ T[val >> RABIN_SHIFT];
- } else {
- const unsigned char *sk = data + 1;
- for (i = 1; i < msize; i++) {
- val ^= U[sk[-RABIN_WINDOW]];
- val = ((val << 8) | *sk++) ^ T[val >> RABIN_SHIFT];
- }
- }
-
if (inscnt) {
while (moff && ref_data[moff-1] == data[-1]) {
- if (msize == 0x10000)
- break;
/* we can match one byte back */
msize++;
moff--;
inscnt = 0;
}
- data += msize;
+ /* A copy op is currently limited to 64KB (pack v2) */
+ left = (msize < 0x10000) ? 0 : (msize - 0x10000);
+ msize -= left;
+
op = out + outpos++;
i = 0x80;
- if (moff & 0xff) { out[outpos++] = moff; i |= 0x01; }
- moff >>= 8;
- if (moff & 0xff) { out[outpos++] = moff; i |= 0x02; }
- moff >>= 8;
- if (moff & 0xff) { out[outpos++] = moff; i |= 0x04; }
- moff >>= 8;
- if (moff & 0xff) { out[outpos++] = moff; i |= 0x08; }
+ if (moff & 0x000000ff)
+ out[outpos++] = moff >> 0, i |= 0x01;
+ if (moff & 0x0000ff00)
+ out[outpos++] = moff >> 8, i |= 0x02;
+ if (moff & 0x00ff0000)
+ out[outpos++] = moff >> 16, i |= 0x04;
+ if (moff & 0xff000000)
+ out[outpos++] = moff >> 24, i |= 0x08;
- if (msize & 0xff) { out[outpos++] = msize; i |= 0x10; }
- msize >>= 8;
- if (msize & 0xff) { out[outpos++] = msize; i |= 0x20; }
+ if (msize & 0x00ff)
+ out[outpos++] = msize >> 0, i |= 0x10;
+ if (msize & 0xff00)
+ out[outpos++] = msize >> 8, i |= 0x20;
*op = i;
+
+ data += msize;
+ moff += msize;
+ msize = left;
+
+ if (msize < 4096) {
+ int j;
+ val = 0;
+ for (j = -RABIN_WINDOW; j < 0; j++)
+ val = ((val << 8) | data[j])
+ ^ T[val >> RABIN_SHIFT];
+ }
}
if (outpos >= outsize - MAX_OP_SIZE) {
outsize = max_size + MAX_OP_SIZE + 1;
if (max_size && outpos > max_size)
break;
- out = xrealloc(out, outsize);
+ out = realloc(out, outsize);
if (!out) {
free(tmp);
return NULL;
}
/*
- * Given a name and sha1 pair, if the dircache tells us the file in
+ * Given a name and sha1 pair, if the index tells us the file in
* the work tree has that object contents, return true, so that
* prepare_temp_file() does not have to inflate and extract.
*/
stop_here_user_resolve () {
if [ -n "$resolvemsg" ]; then
- echo "$resolvemsg"
+ printf '%s\n' "$resolvemsg"
stop_here $1
fi
cmdline=$(basename $0)
git_apply_opt="$git_apply_opt $1"; shift ;;
--resolvemsg=*)
- resolvemsg=$(echo "$1" | sed -e "s/^--resolvemsg=//"); shift ;;
+ resolvemsg=${1#--resolvemsg=}; shift ;;
--)
shift; break ;;
ADD_SIGNOFF=
fi
{
- echo "$SUBJECT"
+ printf '%s\n' "$SUBJECT"
if test -s "$dotest/msg-clean"
then
echo
fi
echo
- echo "Applying '$SUBJECT'"
+ printf 'Applying %s\n' "$SUBJECT"
echo
case "$resolved" in
rm -f "$TMP_INDEX"
fi || exit
- echo "$commit_only" |
+ printf '%s\n' "$commit_only" |
GIT_INDEX_FILE="$TMP_INDEX" \
git-update-index --add --remove --stdin &&
save_index &&
- echo "$commit_only" |
+ printf '%s\n' "$commit_only" |
(
GIT_INDEX_FILE="$NEXT_INDEX"
export GIT_INDEX_FILE
if test "$log_message" != ''
then
- echo "$log_message"
+ printf '%s\n' "$log_message"
elif test "$logfile" != ""
then
if test "$logfile" = -
echo "#"
echo "# It looks like you may be committing a MERGE."
echo "# If this is not correct, please remove the file"
- echo "# $GIT_DIR/MERGE_HEAD"
+ printf '%s\n' "# $GIT_DIR/MERGE_HEAD"
echo "# and try again"
echo "#"
fi >>"$GIT_DIR"/COMMIT_EDITMSG
die "GIT_DIR is not defined or is unreadable";
}
-our ($opt_h, $opt_P, $opt_p, $opt_v, $opt_c, $opt_f, $opt_a, $opt_m, $opt_d);
+our ($opt_h, $opt_P, $opt_p, $opt_v, $opt_c, $opt_f, $opt_a, $opt_m, $opt_d, $opt_u);
-getopts('hPpvcfam:d:');
+getopts('uhPpvcfam:d:');
$opt_h && usage();
my %cvsstat;
if (@canstatusfiles) {
+ if ($opt_u) {
+ my @updated = safe_pipe_capture(@cvs, 'update', @canstatusfiles);
+ print @updated;
+ }
my @cvsoutput;
@cvsoutput= safe_pipe_capture(@cvs, 'status', @canstatusfiles);
my $matchcount = 0;
if (@ARGV && $ARGV[0] eq 'pserver') {
$state->{method} = 'pserver';
my $line = <STDIN>; chomp $line;
- unless( $line eq 'BEGIN AUTH REQUEST') {
+ unless( $line =~ /^BEGIN (AUTH|VERIFICATION) REQUEST$/) {
die "E Do not understand $line - expecting BEGIN AUTH REQUEST\n";
}
+ my $request = $1;
$line = <STDIN>; chomp $line;
req_Root('root', $line) # reuse Root
or die "E Invalid root $line \n";
}
$line = <STDIN>; chomp $line; # validate the password?
$line = <STDIN>; chomp $line;
- unless ($line eq 'END AUTH REQUEST') {
- die "E Do not understand $line -- expecting END AUTH REQUEST\n";
+ unless ($line eq "END $request REQUEST") {
+ die "E Do not understand $line -- expecting END $request REQUEST\n";
}
print "I LOVE YOU\n";
+ exit if $request eq 'VERIFICATION'; # cvs login
# and now back to our regular programme...
}
quiet=--quiet
;;
-v|--verbose)
- verbose=Yes
+ verbose="$verbose"Yes
;;
-k|--k|--ke|--kee|--keep)
keep='-k -k'
echo "$ls_remote_result" | \
git-fetch--tool pick-rref "$rref" "-"
else
+ flags=
+ case $verbose in
+ YesYes*)
+ flags="-v"
+ ;;
+ esac
git-fetch-pack --thin $exec $keep $shallow_depth \
- $quiet $no_progress "$remote" $rref ||
+ $quiet $no_progress $flags "$remote" $rref ||
echo failed "$remote"
fi
fi
endif
ifndef sharedir
- sharedir := $(dir $(gitexecdir))/share
+ sharedir := $(dir $(gitexecdir))share
endif
ifndef INSTALL
libdir ?= $(sharedir)/git-gui/lib
libdir_SQ = $(subst ','\'',$(libdir))
+exedir = $(dir $(gitexecdir))share/git-gui/lib
+exedir_SQ = $(subst ','\'',$(exedir))
+
$(patsubst %.sh,%,$(SCRIPT_SH)) : % : %.sh
$(QUIET_GEN)rm -f $@ $@+ && \
+ if test '$(exedir_SQ)' = '$(libdir_SQ)'; then \
+ GITGUI_RELATIVE=1; \
+ fi && \
sed -e '1s|#!.*/sh|#!$(SHELL_PATH_SQ)|' \
-e 's|^exec wish "$$0"|exec $(subst |,'\|',$(TCLTK_PATH_SQ)) "$$0"|' \
-e 's/@@GITGUI_VERSION@@/$(GITGUI_VERSION)/g' \
- -e 's|@@GITGUI_LIBDIR@@|$(libdir_SQ)|' \
+ -e 's|@@GITGUI_RELATIVE@@|'$$GITGUI_RELATIVE'|' \
+ -e $$GITGUI_RELATIVE's|@@GITGUI_LIBDIR@@|$(libdir_SQ)|' \
$@.sh >$@+ && \
chmod +x $@+ && \
mv $@+ $@
$(subst ','\'',SHELL_PATH='$(SHELL_PATH_SQ)') \
$(subst ','\'',TCL_PATH='$(TCL_PATH_SQ)') \
$(subst ','\'',TCLTK_PATH='$(TCLTK_PATH_SQ)') \
+ $(subst ','\'',gitexecdir='$(gitexecdir_SQ)') \
$(subst ','\'',libdir='$(libdir_SQ)') \
#end TRACK_VARS
## configure our library
set oguilib {@@GITGUI_LIBDIR@@}
-if {[string match @@* $oguilib]} {
+set oguirel {@@GITGUI_RELATIVE@@}
+if {$oguirel eq {1}} {
+ set oguilib [file dirname [file dirname [file normalize $argv0]]]
+ set oguilib [file join $oguilib share git-gui lib]
+} elseif {[string match @@* $oguirel]} {
set oguilib [file join [file dirname [file normalize $argv0]] lib]
}
set idx [file join $oguilib tclIndex]
} else {
set auto_path [concat [list $oguilib] $auto_path]
}
-unset -nocomplain fd idx
+unset -nocomplain oguilib oguirel idx fd
if {![catch {set _verbose $env(GITGUI_VERBOSE)}]} {
unset _verbose
}
unset class
-if {[is_Windows]} {
- set M1B Control
- set M1T Ctrl
-} elseif {[is_MacOSX]} {
+if {[is_MacOSX]} {
set M1B M1
set M1T Cmd
} else {
- set M1B M1
- set M1T M1
+ set M1B Control
+ set M1T Ctrl
}
proc apply_config {} {
# Copyright (c) 2005 Junio C Hamano
#
-USAGE='[-n] [--no-commit] [--squash] [-s <strategy>] [-m=<merge-message>] <commit>+'
+USAGE='[-n] [--summary] [--no-commit] [--squash] [-s <strategy>] [-m=<merge-message>] <commit>+'
SUBDIRECTORY_OK=Yes
. git-sh-setup
'')
;;
?*)
- case "$no_summary" in
- '')
+ if test "$show_diffstat" = t
+ then
# We want color (if set), but no pager
GIT_PAGER='' git-diff --stat --summary -M "$head" "$1"
- ;;
- esac
+ fi
;;
esac
}
case "$1" in
-n|--n|--no|--no-|--no-s|--no-su|--no-sum|--no-summ|\
--no-summa|--no-summar|--no-summary)
- no_summary=t ;;
+ show_diffstat=false ;;
+ --summary)
+ show_diffstat=t ;;
--sq|--squ|--squa|--squas|--squash)
squash=t no_commit=t ;;
--no-c|--no-co|--no-com|--no-comm|--no-commi|--no-commit)
shift
done
+if test -z "$show_diffstat"; then
+ test "$(git-config --bool merge.diffstat)" = false && show_diffstat=false
+ test -z "$show_diffstat" && show_diffstat=t
+fi
+
# This could be traditional "merge <msg> HEAD <commit>..." and the
# way we can tell it is to see if the second token is HEAD, but some
# people might have misused the interface and used a committish that
then
echo "Wonderful."
result_commit=$(
- echo "$merge_msg" |
+ printf '%s\n' "$merge_msg" |
git-commit-tree $result_tree -p HEAD -p "$1"
) || exit
finish "$result_commit" "In-index merge"
if test '' != "$result_tree"
then
parents=$(git-show-branch --independent "$head" "$@" | sed -e 's/^/-p /')
- result_commit=$(echo "$merge_msg" | git-commit-tree $result_tree $parents) || exit
+ result_commit=$(printf '%s\n' "$merge_msg" | git-commit-tree $result_tree $parents) || exit
finish "$result_commit" "Merge made by $wt_strategy."
dropsave
exit 0
do
echo $remote
done >"$GIT_DIR/MERGE_HEAD"
- echo "$merge_msg" >"$GIT_DIR/MERGE_MSG"
+ printf '%s\n' "$merge_msg" >"$GIT_DIR/MERGE_MSG"
fi
if test "$merge_was_ok" = t
-n|--n|--no|--no-|--no-s|--no-su|--no-sum|--no-summ|\
--no-summa|--no-summar|--no-summary)
no_summary=-n ;;
+ --summary)
+ no_summary=$1
+ ;;
--no-c|--no-co|--no-com|--no-comm|--no-commi|--no-commit)
no_commit=--no-commit ;;
--sq|--squ|--squa|--squas|--squash)
if test -n "$unmerged"
then
echo "You still have unmerged paths in your index"
- echo "did you forget update-index?"
+ echo "did you forget to use git add?"
die "$RESOLVEMSG"
fi
--continue)
git-diff-files --quiet || {
echo "You must edit all merge conflicts and then"
- echo "mark them as resolved using git update-index"
+ echo "mark them as resolved using git add"
exit 1
}
if test -d "$dotest"
# Copyright (c) 2005 Linus Torvalds
#
-USAGE='[-a] [-d] [-f] [-l] [-n] [-q] [--window=N] [--depth=N]'
+USAGE='[-a] [-d] [-f] [-l] [-n] [-q] [--max-pack-size=N] [--window=N] [--depth=N]'
SUBDIRECTORY_OK='Yes'
. git-sh-setup
-q) quiet=-q ;;
-f) no_reuse=--no-reuse-object ;;
-l) local=--local ;;
+ --max-pack-size=*) extra="$extra $1" ;;
--window=*) extra="$extra $1" ;;
--depth=*) extra="$extra $1" ;;
*) usage ;;
esac
PACKDIR="$GIT_OBJECT_DIRECTORY/pack"
-PACKTMP="$GIT_DIR/.tmp-$$-pack"
+PACKTMP="$GIT_OBJECT_DIRECTORY/.tmp-$$-pack"
rm -f "$PACKTMP"-*
trap 'rm -f "$PACKTMP"-*' 0 1 2 3 15
esac
args="$args $local $quiet $no_reuse$extra"
-name=$(git-pack-objects --non-empty --all --reflog $args </dev/null "$PACKTMP") ||
+names=$(git-pack-objects --non-empty --all --reflog $args </dev/null "$PACKTMP") ||
exit 1
-if [ -z "$name" ]; then
+if [ -z "$names" ]; then
echo Nothing new to pack.
-else
+fi
+for name in $names ; do
+ fullbases="$fullbases pack-$name"
chmod a-w "$PACKTMP-$name.pack"
chmod a-w "$PACKTMP-$name.idx"
if test "$quiet" != '-q'; then
exit 1
}
rm -f "$PACKDIR/old-pack-$name.pack" "$PACKDIR/old-pack-$name.idx"
-fi
+done
if test "$remove_redundant" = t
then
( cd "$PACKDIR" &&
for e in $existing
do
- case "$e" in
- pack-$name) ;;
+ case " $fullbases " in
+ *" $e "*) ;;
*) rm -f "$e.pack" "$e.idx" "$e.keep" ;;
esac
done
--- /dev/null
+#!/bin/sh
+#
+# git-submodules.sh: init, update or list git submodules
+#
+# Copyright (c) 2007 Lars Hjemli
+
+USAGE='[--quiet] [--cached] [status|init|update] [--] [<path>...]'
+. git-sh-setup
+require_work_tree
+
+init=
+update=
+status=
+quiet=
+cached=
+
+#
+# print stuff on stdout unless -q was specified
+#
+say()
+{
+ if test -z "$quiet"
+ then
+ echo "$@"
+ fi
+}
+
+#
+# Run clone + checkout on missing submodules
+#
+# $@ = requested paths (default to all)
+#
+modules_init()
+{
+ git ls-files --stage -- "$@" | grep -e '^160000 ' |
+ while read mode sha1 stage path
+ do
+ # Skip submodule paths that already contain a .git directory.
+ # This will also trigger if $path is a symlink to a git
+ # repository
+ test -d "$path"/.git && continue
+
+ # If there already is a directory at the submodule path,
+ # expect it to be empty (since that is the default checkout
+ # action) and try to remove it.
+ # Note: if $path is a symlink to a directory the test will
+ # succeed but the rmdir will fail. We might want to fix this.
+ if test -d "$path"
+ then
+ rmdir "$path" 2>/dev/null ||
+ die "Directory '$path' exist, but is neither empty nor a git repository"
+ fi
+
+ test -e "$path" &&
+ die "A file already exist at path '$path'"
+
+ url=$(GIT_CONFIG=.gitmodules git-config module."$path".url)
+ test -z "$url" &&
+ die "No url found for submodule '$path' in .gitmodules"
+
+ # MAYBE FIXME: this would be the place to check GIT_CONFIG
+ # for a preferred url for this submodule, possibly like this:
+ #
+ # modname=$(GIT_CONFIG=.gitmodules git-config module."$path".name)
+ # alturl=$(git-config module."$modname".url)
+ #
+ # This would let the versioned .gitmodules file use the submodule
+ # path as key, while the unversioned GIT_CONFIG would use the
+ # logical modulename (if present) as key. But this would need
+ # another fallback mechanism if the module wasn't named.
+
+ git-clone -n "$url" "$path" ||
+ die "Clone of submodule '$path' failed"
+
+ (unset GIT_DIR && cd "$path" && git-checkout -q "$sha1") ||
+ die "Checkout of submodule '$path' failed"
+
+ say "Submodule '$path' initialized"
+ done
+}
+
+#
+# Checkout correct revision of each initialized submodule
+#
+# $@ = requested paths (default to all)
+#
+modules_update()
+{
+ git ls-files --stage -- "$@" | grep -e '^160000 ' |
+ while read mode sha1 stage path
+ do
+ if ! test -d "$path"/.git
+ then
+ # Only mention uninitialized submodules when its
+ # path have been specified
+ test "$#" != "0" &&
+ say "Submodule '$path' not initialized"
+ continue;
+ fi
+ subsha1=$(unset GIT_DIR && cd "$path" &&
+ git-rev-parse --verify HEAD) ||
+ die "Unable to find current revision of submodule '$path'"
+
+ if test "$subsha1" != "$sha1"
+ then
+ (unset GIT_DIR && cd "$path" && git-fetch &&
+ git-checkout -q "$sha1") ||
+ die "Unable to checkout '$sha1' in submodule '$path'"
+
+ say "Submodule '$path': checked out '$sha1'"
+ fi
+ done
+}
+
+#
+# List all registered submodules, prefixed with:
+# - submodule not initialized
+# + different revision checked out
+#
+# If --cached was specified the revision in the index will be printed
+# instead of the currently checked out revision.
+#
+# $@ = requested paths (default to all)
+#
+modules_list()
+{
+ git ls-files --stage -- "$@" | grep -e '^160000 ' |
+ while read mode sha1 stage path
+ do
+ if ! test -d "$path"/.git
+ then
+ say "-$sha1 $path"
+ continue;
+ fi
+ revname=$(unset GIT_DIR && cd "$path" && git-describe $sha1)
+ if git diff-files --quiet -- "$path"
+ then
+ say " $sha1 $path ($revname)"
+ else
+ if test -z "$cached"
+ then
+ sha1=$(unset GIT_DIR && cd "$path" && git-rev-parse --verify HEAD)
+ revname=$(unset GIT_DIR && cd "$path" && git-describe $sha1)
+ fi
+ say "+$sha1 $path ($revname)"
+ fi
+ done
+}
+
+while case "$#" in 0) break ;; esac
+do
+ case "$1" in
+ init)
+ init=1
+ ;;
+ update)
+ update=1
+ ;;
+ status)
+ status=1
+ ;;
+ -q|--quiet)
+ quiet=1
+ ;;
+ --cached)
+ cached=1
+ ;;
+ --)
+ break
+ ;;
+ -*)
+ usage
+ ;;
+ *)
+ break
+ ;;
+ esac
+ shift
+done
+
+case "$init,$update,$status,$cached" in
+1,,,)
+ modules_init "$@"
+ ;;
+,1,,)
+ modules_update "$@"
+ ;;
+,,*,*)
+ modules_list "$@"
+ ;;
+*)
+ usage
+ ;;
+esac
my $hash;
my $path = $self->git_path($fb->{path});
if (my $fh = $fb->{fh}) {
- seek($fh, 0, 0) or croak $!;
- my $md5 = Digest::MD5->new;
- $md5->addfile($fh);
- my $got = $md5->hexdigest;
- die "Checksum mismatch: $path\n",
- "expected: $exp\n got: $got\n" if ($got ne $exp);
+ if (defined $exp) {
+ seek($fh, 0, 0) or croak $!;
+ my $md5 = Digest::MD5->new;
+ $md5->addfile($fh);
+ my $got = $md5->hexdigest;
+ if ($got ne $exp) {
+ die "Checksum mismatch: $path\n",
+ "expected: $exp\n got: $got\n";
+ }
+ }
sysseek($fh, 0, 0) or croak $!;
if ($fb->{mode_b} == 120000) {
sysread($fh, my $buf, 5) == 5 or croak $!;
echo "#" ) > "$GIT_DIR"/TAG_EDITMSG
${VISUAL:-${EDITOR:-vi}} "$GIT_DIR"/TAG_EDITMSG || exit
else
- echo "$message" >"$GIT_DIR"/TAG_EDITMSG
+ printf '%s\n' "$message" >"$GIT_DIR"/TAG_EDITMSG
fi
grep -v '^#' <"$GIT_DIR"/TAG_EDITMSG |
#include "diff.h"
#include "revision.h"
#include "exec_cmd.h"
+#include "remote.h"
#include <expat.h>
}
-#define MAX_CHAIN 40
+#define MAX_CHAIN 50
static void show_pack_info(struct packed_git *p)
{
- uint32_t nr_objects, i, chain_histogram[MAX_CHAIN];
-
+ uint32_t nr_objects, i, chain_histogram[MAX_CHAIN+1];
nr_objects = p->num_objects;
memset(chain_histogram, 0, sizeof(chain_histogram));
printf("%-6s %lu %"PRIuMAX" %u %s\n",
type, size, (uintmax_t)offset,
delta_chain_length, sha1_to_hex(base_sha1));
- if (delta_chain_length < MAX_CHAIN)
+ if (delta_chain_length <= MAX_CHAIN)
chain_histogram[delta_chain_length]++;
else
chain_histogram[0]++;
}
}
- for (i = 0; i < MAX_CHAIN; i++) {
+ for (i = 0; i <= MAX_CHAIN; i++) {
if (!chain_histogram[i])
continue;
- printf("chain length %s %d: %d object%s\n",
- i ? "=" : ">=",
- i ? i : MAX_CHAIN,
- chain_histogram[i],
- 1 < chain_histogram[i] ? "s" : "");
+ printf("chain length = %d: %d object%s\n", i,
+ chain_histogram[i], chain_histogram[i] > 1 ? "s" : "");
}
+ if (chain_histogram[0])
+ printf("chain length > %d: %d object%s\n", MAX_CHAIN,
+ chain_histogram[0], chain_histogram[0] > 1 ? "s" : "");
}
int verify_pack(struct packed_git *p, int verbose)
{
- off_t index_size = p->index_size;
- const unsigned char *index_base = p->index_data;
+ off_t index_size;
+ const unsigned char *index_base;
SHA_CTX ctx;
unsigned char sha1[20];
int ret;
+ if (open_pack_index(p))
+ return error("packfile %s index not opened", p->pack_name);
+ index_size = p->index_size;
+ index_base = p->index_data;
+
ret = 0;
/* Verify SHA1 sum of the index file */
SHA1_Init(&ctx);
l.pack = p;
llist_init(&l.all_objects);
+ if (open_pack_index(p))
+ return NULL;
+
base = p->index_data;
base += 256 * 4 + ((p->index_version < 2) ? 4 : 8);
step = (p->index_version < 2) ? 24 : 20;
static inline int bad_ref_char(int ch)
{
- return (((unsigned) ch) <= ' ' ||
- ch == '~' || ch == '^' || ch == ':' ||
- /* 2.13 Pattern Matching Notation */
- ch == '?' || ch == '*' || ch == '[');
+ if (((unsigned) ch) <= ' ' ||
+ ch == '~' || ch == '^' || ch == ':')
+ return 1;
+ /* 2.13 Pattern Matching Notation */
+ if (ch == '?' || ch == '[') /* Unsupported */
+ return 1;
+ if (ch == '*') /* Supported at the end */
+ return 2;
+ return 0;
}
int check_ref_format(const char *ref)
{
- int ch, level;
+ int ch, level, bad_type;
const char *cp = ref;
level = 0;
return -1; /* should not end with slashes */
/* we are at the beginning of the path component */
- if (ch == '.' || bad_ref_char(ch))
+ if (ch == '.')
return -1;
+ bad_type = bad_ref_char(ch);
+ if (bad_type) {
+ return (bad_type == 2 && !*cp) ? -3 : -1;
+ }
/* scan the rest of the path component */
while ((ch = *cp++) != 0) {
- if (bad_ref_char(ch))
- return -1;
+ bad_type = bad_ref_char(ch);
+ if (bad_type) {
+ return (bad_type == 2 && !*cp) ? -3 : -1;
+ }
if (ch == '/')
break;
if (ch == '.' && *cp == '.')
--- /dev/null
+#include "cache.h"
+#include "remote.h"
+#include "refs.h"
+
+static struct remote **remotes;
+static int allocated_remotes;
+
+#define BUF_SIZE (2048)
+static char buffer[BUF_SIZE];
+
+static void add_push_refspec(struct remote *remote, const char *ref)
+{
+ int nr = remote->push_refspec_nr + 1;
+ remote->push_refspec =
+ xrealloc(remote->push_refspec, nr * sizeof(char *));
+ remote->push_refspec[nr-1] = ref;
+ remote->push_refspec_nr = nr;
+}
+
+static void add_fetch_refspec(struct remote *remote, const char *ref)
+{
+ int nr = remote->fetch_refspec_nr + 1;
+ remote->fetch_refspec =
+ xrealloc(remote->fetch_refspec, nr * sizeof(char *));
+ remote->fetch_refspec[nr-1] = ref;
+ remote->fetch_refspec_nr = nr;
+}
+
+static void add_uri(struct remote *remote, const char *uri)
+{
+ int nr = remote->uri_nr + 1;
+ remote->uri =
+ xrealloc(remote->uri, nr * sizeof(char *));
+ remote->uri[nr-1] = uri;
+ remote->uri_nr = nr;
+}
+
+static struct remote *make_remote(const char *name, int len)
+{
+ int i, empty = -1;
+
+ for (i = 0; i < allocated_remotes; i++) {
+ if (!remotes[i]) {
+ if (empty < 0)
+ empty = i;
+ } else {
+ if (len ? (!strncmp(name, remotes[i]->name, len) &&
+ !remotes[i]->name[len]) :
+ !strcmp(name, remotes[i]->name))
+ return remotes[i];
+ }
+ }
+
+ if (empty < 0) {
+ empty = allocated_remotes;
+ allocated_remotes += allocated_remotes ? allocated_remotes : 1;
+ remotes = xrealloc(remotes,
+ sizeof(*remotes) * allocated_remotes);
+ memset(remotes + empty, 0,
+ (allocated_remotes - empty) * sizeof(*remotes));
+ }
+ remotes[empty] = xcalloc(1, sizeof(struct remote));
+ if (len)
+ remotes[empty]->name = xstrndup(name, len);
+ else
+ remotes[empty]->name = xstrdup(name);
+ return remotes[empty];
+}
+
+static void read_remotes_file(struct remote *remote)
+{
+ FILE *f = fopen(git_path("remotes/%s", remote->name), "r");
+
+ if (!f)
+ return;
+ while (fgets(buffer, BUF_SIZE, f)) {
+ int value_list;
+ char *s, *p;
+
+ if (!prefixcmp(buffer, "URL:")) {
+ value_list = 0;
+ s = buffer + 4;
+ } else if (!prefixcmp(buffer, "Push:")) {
+ value_list = 1;
+ s = buffer + 5;
+ } else if (!prefixcmp(buffer, "Pull:")) {
+ value_list = 2;
+ s = buffer + 5;
+ } else
+ continue;
+
+ while (isspace(*s))
+ s++;
+ if (!*s)
+ continue;
+
+ p = s + strlen(s);
+ while (isspace(p[-1]))
+ *--p = 0;
+
+ switch (value_list) {
+ case 0:
+ add_uri(remote, xstrdup(s));
+ break;
+ case 1:
+ add_push_refspec(remote, xstrdup(s));
+ break;
+ case 2:
+ add_fetch_refspec(remote, xstrdup(s));
+ break;
+ }
+ }
+ fclose(f);
+}
+
+static void read_branches_file(struct remote *remote)
+{
+ const char *slash = strchr(remote->name, '/');
+ int n = slash ? slash - remote->name : 1000;
+ FILE *f = fopen(git_path("branches/%.*s", n, remote->name), "r");
+ char *s, *p;
+ int len;
+
+ if (!f)
+ return;
+ s = fgets(buffer, BUF_SIZE, f);
+ fclose(f);
+ if (!s)
+ return;
+ while (isspace(*s))
+ s++;
+ if (!*s)
+ return;
+ p = s + strlen(s);
+ while (isspace(p[-1]))
+ *--p = 0;
+ len = p - s;
+ if (slash)
+ len += strlen(slash);
+ p = xmalloc(len + 1);
+ strcpy(p, s);
+ if (slash)
+ strcat(p, slash);
+ add_uri(remote, p);
+}
+
+static char *default_remote_name = NULL;
+static const char *current_branch = NULL;
+static int current_branch_len = 0;
+
+static int handle_config(const char *key, const char *value)
+{
+ const char *name;
+ const char *subkey;
+ struct remote *remote;
+ if (!prefixcmp(key, "branch.") && current_branch &&
+ !strncmp(key + 7, current_branch, current_branch_len) &&
+ !strcmp(key + 7 + current_branch_len, ".remote")) {
+ free(default_remote_name);
+ default_remote_name = xstrdup(value);
+ }
+ if (prefixcmp(key, "remote."))
+ return 0;
+ name = key + 7;
+ subkey = strrchr(name, '.');
+ if (!subkey)
+ return error("Config with no key for remote %s", name);
+ if (*subkey == '/') {
+ warning("Config remote shorthand cannot begin with '/': %s", name);
+ return 0;
+ }
+ remote = make_remote(name, subkey - name);
+ if (!value) {
+ /* if we ever have a boolean variable, e.g. "remote.*.disabled"
+ * [remote "frotz"]
+ * disabled
+ * is a valid way to set it to true; we get NULL in value so
+ * we need to handle it here.
+ *
+ * if (!strcmp(subkey, ".disabled")) {
+ * val = git_config_bool(key, value);
+ * return 0;
+ * } else
+ *
+ */
+ return 0; /* ignore unknown booleans */
+ }
+ if (!strcmp(subkey, ".url")) {
+ add_uri(remote, xstrdup(value));
+ } else if (!strcmp(subkey, ".push")) {
+ add_push_refspec(remote, xstrdup(value));
+ } else if (!strcmp(subkey, ".fetch")) {
+ add_fetch_refspec(remote, xstrdup(value));
+ } else if (!strcmp(subkey, ".receivepack")) {
+ if (!remote->receivepack)
+ remote->receivepack = xstrdup(value);
+ else
+ error("more than one receivepack given, using the first");
+ }
+ return 0;
+}
+
+static void read_config(void)
+{
+ unsigned char sha1[20];
+ const char *head_ref;
+ int flag;
+ if (default_remote_name) // did this already
+ return;
+ default_remote_name = xstrdup("origin");
+ current_branch = NULL;
+ head_ref = resolve_ref("HEAD", sha1, 0, &flag);
+ if (head_ref && (flag & REF_ISSYMREF) &&
+ !prefixcmp(head_ref, "refs/heads/")) {
+ current_branch = head_ref + strlen("refs/heads/");
+ current_branch_len = strlen(current_branch);
+ }
+ git_config(handle_config);
+}
+
+static struct refspec *parse_ref_spec(int nr_refspec, const char **refspec)
+{
+ int i;
+ struct refspec *rs = xcalloc(sizeof(*rs), nr_refspec);
+ for (i = 0; i < nr_refspec; i++) {
+ const char *sp, *ep, *gp;
+ sp = refspec[i];
+ if (*sp == '+') {
+ rs[i].force = 1;
+ sp++;
+ }
+ gp = strchr(sp, '*');
+ ep = strchr(sp, ':');
+ if (gp && ep && gp > ep)
+ gp = NULL;
+ if (ep) {
+ if (ep[1]) {
+ const char *glob = strchr(ep + 1, '*');
+ if (!glob)
+ gp = NULL;
+ if (gp)
+ rs[i].dst = xstrndup(ep + 1,
+ glob - ep - 1);
+ else
+ rs[i].dst = xstrdup(ep + 1);
+ }
+ } else {
+ ep = sp + strlen(sp);
+ }
+ if (gp) {
+ rs[i].pattern = 1;
+ ep = gp;
+ }
+ rs[i].src = xstrndup(sp, ep - sp);
+ }
+ return rs;
+}
+
+struct remote *remote_get(const char *name)
+{
+ struct remote *ret;
+
+ read_config();
+ if (!name)
+ name = default_remote_name;
+ ret = make_remote(name, 0);
+ if (name[0] != '/') {
+ if (!ret->uri)
+ read_remotes_file(ret);
+ if (!ret->uri)
+ read_branches_file(ret);
+ }
+ if (!ret->uri)
+ add_uri(ret, name);
+ if (!ret->uri)
+ return NULL;
+ ret->fetch = parse_ref_spec(ret->fetch_refspec_nr, ret->fetch_refspec);
+ ret->push = parse_ref_spec(ret->push_refspec_nr, ret->push_refspec);
+ return ret;
+}
+
+int remote_has_uri(struct remote *remote, const char *uri)
+{
+ int i;
+ for (i = 0; i < remote->uri_nr; i++) {
+ if (!strcmp(remote->uri[i], uri))
+ return 1;
+ }
+ return 0;
+}
+
+int remote_find_tracking(struct remote *remote, struct refspec *refspec)
+{
+ int i;
+ for (i = 0; i < remote->fetch_refspec_nr; i++) {
+ struct refspec *fetch = &remote->fetch[i];
+ if (!fetch->dst)
+ continue;
+ if (fetch->pattern) {
+ if (!prefixcmp(refspec->src, fetch->src)) {
+ refspec->dst =
+ xmalloc(strlen(fetch->dst) +
+ strlen(refspec->src) -
+ strlen(fetch->src) + 1);
+ strcpy(refspec->dst, fetch->dst);
+ strcpy(refspec->dst + strlen(fetch->dst),
+ refspec->src + strlen(fetch->src));
+ refspec->force = fetch->force;
+ return 0;
+ }
+ } else {
+ if (!strcmp(refspec->src, fetch->src)) {
+ refspec->dst = xstrdup(fetch->dst);
+ refspec->force = fetch->force;
+ return 0;
+ }
+ }
+ }
+ refspec->dst = NULL;
+ return -1;
+}
+
+static int count_refspec_match(const char *pattern,
+ struct ref *refs,
+ struct ref **matched_ref)
+{
+ int patlen = strlen(pattern);
+ struct ref *matched_weak = NULL;
+ struct ref *matched = NULL;
+ int weak_match = 0;
+ int match = 0;
+
+ for (weak_match = match = 0; refs; refs = refs->next) {
+ char *name = refs->name;
+ int namelen = strlen(name);
+ int weak_match;
+
+ if (namelen < patlen ||
+ memcmp(name + namelen - patlen, pattern, patlen))
+ continue;
+ if (namelen != patlen && name[namelen - patlen - 1] != '/')
+ continue;
+
+ /* A match is "weak" if it is with refs outside
+ * heads or tags, and did not specify the pattern
+ * in full (e.g. "refs/remotes/origin/master") or at
+ * least from the toplevel (e.g. "remotes/origin/master");
+ * otherwise "git push $URL master" would result in
+ * ambiguity between remotes/origin/master and heads/master
+ * at the remote site.
+ */
+ if (namelen != patlen &&
+ patlen != namelen - 5 &&
+ prefixcmp(name, "refs/heads/") &&
+ prefixcmp(name, "refs/tags/")) {
+ /* We want to catch the case where only weak
+ * matches are found and there are multiple
+ * matches, and where more than one strong
+ * matches are found, as ambiguous. One
+ * strong match with zero or more weak matches
+ * are acceptable as a unique match.
+ */
+ matched_weak = refs;
+ weak_match++;
+ }
+ else {
+ matched = refs;
+ match++;
+ }
+ }
+ if (!matched) {
+ *matched_ref = matched_weak;
+ return weak_match;
+ }
+ else {
+ *matched_ref = matched;
+ return match;
+ }
+}
+
+static void link_dst_tail(struct ref *ref, struct ref ***tail)
+{
+ **tail = ref;
+ *tail = &ref->next;
+ **tail = NULL;
+}
+
+static struct ref *try_explicit_object_name(const char *name)
+{
+ unsigned char sha1[20];
+ struct ref *ref;
+ int len;
+
+ if (!*name) {
+ ref = xcalloc(1, sizeof(*ref) + 20);
+ strcpy(ref->name, "(delete)");
+ hashclr(ref->new_sha1);
+ return ref;
+ }
+ if (get_sha1(name, sha1))
+ return NULL;
+ len = strlen(name) + 1;
+ ref = xcalloc(1, sizeof(*ref) + len);
+ memcpy(ref->name, name, len);
+ hashcpy(ref->new_sha1, sha1);
+ return ref;
+}
+
+static int match_explicit_refs(struct ref *src, struct ref *dst,
+ struct ref ***dst_tail, struct refspec *rs,
+ int rs_nr)
+{
+ int i, errs;
+ for (i = errs = 0; i < rs_nr; i++) {
+ struct ref *matched_src, *matched_dst;
+
+ const char *dst_value = rs[i].dst;
+
+ if (rs[i].pattern)
+ continue;
+
+ if (dst_value == NULL)
+ dst_value = rs[i].src;
+
+ matched_src = matched_dst = NULL;
+ switch (count_refspec_match(rs[i].src, src, &matched_src)) {
+ case 1:
+ break;
+ case 0:
+ /* The source could be in the get_sha1() format
+ * not a reference name. :refs/other is a
+ * way to delete 'other' ref at the remote end.
+ */
+ matched_src = try_explicit_object_name(rs[i].src);
+ if (matched_src)
+ break;
+ errs = 1;
+ error("src refspec %s does not match any.",
+ rs[i].src);
+ break;
+ default:
+ errs = 1;
+ error("src refspec %s matches more than one.",
+ rs[i].src);
+ break;
+ }
+ switch (count_refspec_match(dst_value, dst, &matched_dst)) {
+ case 1:
+ break;
+ case 0:
+ if (!memcmp(dst_value, "refs/", 5)) {
+ int len = strlen(dst_value) + 1;
+ matched_dst = xcalloc(1, sizeof(*dst) + len);
+ memcpy(matched_dst->name, dst_value, len);
+ link_dst_tail(matched_dst, dst_tail);
+ }
+ else if (!strcmp(rs[i].src, dst_value) &&
+ matched_src) {
+ /* pushing "master:master" when
+ * remote does not have master yet.
+ */
+ int len = strlen(matched_src->name) + 1;
+ matched_dst = xcalloc(1, sizeof(*dst) + len);
+ memcpy(matched_dst->name, matched_src->name,
+ len);
+ link_dst_tail(matched_dst, dst_tail);
+ }
+ else {
+ errs = 1;
+ error("dst refspec %s does not match any "
+ "existing ref on the remote and does "
+ "not start with refs/.", dst_value);
+ }
+ break;
+ default:
+ errs = 1;
+ error("dst refspec %s matches more than one.",
+ dst_value);
+ break;
+ }
+ if (errs)
+ continue;
+ if (matched_dst->peer_ref) {
+ errs = 1;
+ error("dst ref %s receives from more than one src.",
+ matched_dst->name);
+ }
+ else {
+ matched_dst->peer_ref = matched_src;
+ matched_dst->force = rs[i].force;
+ }
+ }
+ return -errs;
+}
+
+static struct ref *find_ref_by_name(struct ref *list, const char *name)
+{
+ for ( ; list; list = list->next)
+ if (!strcmp(list->name, name))
+ return list;
+ return NULL;
+}
+
+static int check_pattern_match(struct refspec *rs, int rs_nr, struct ref *src)
+{
+ int i;
+ if (!rs_nr)
+ return 1;
+ for (i = 0; i < rs_nr; i++) {
+ if (rs[i].pattern && !prefixcmp(src->name, rs[i].src))
+ return 1;
+ }
+ return 0;
+}
+
+int match_refs(struct ref *src, struct ref *dst, struct ref ***dst_tail,
+ int nr_refspec, char **refspec, int all)
+{
+ struct refspec *rs =
+ parse_ref_spec(nr_refspec, (const char **) refspec);
+
+ if (match_explicit_refs(src, dst, dst_tail, rs, nr_refspec))
+ return -1;
+
+ /* pick the remainder */
+ for ( ; src; src = src->next) {
+ struct ref *dst_peer;
+ if (src->peer_ref)
+ continue;
+ if (!check_pattern_match(rs, nr_refspec, src))
+ continue;
+
+ dst_peer = find_ref_by_name(dst, src->name);
+ if (dst_peer && dst_peer->peer_ref)
+ /* We're already sending something to this ref. */
+ continue;
+ if (!dst_peer && !nr_refspec && !all)
+ /* Remote doesn't have it, and we have no
+ * explicit pattern, and we don't have
+ * --all. */
+ continue;
+ if (!dst_peer) {
+ /* Create a new one and link it */
+ int len = strlen(src->name) + 1;
+ dst_peer = xcalloc(1, sizeof(*dst_peer) + len);
+ memcpy(dst_peer->name, src->name, len);
+ hashcpy(dst_peer->new_sha1, src->new_sha1);
+ link_dst_tail(dst_peer, dst_tail);
+ }
+ dst_peer->peer_ref = src;
+ }
+ return 0;
+}
--- /dev/null
+#ifndef REMOTE_H
+#define REMOTE_H
+
+struct remote {
+ const char *name;
+
+ const char **uri;
+ int uri_nr;
+
+ const char **push_refspec;
+ struct refspec *push;
+ int push_refspec_nr;
+
+ const char **fetch_refspec;
+ struct refspec *fetch;
+ int fetch_refspec_nr;
+
+ const char *receivepack;
+};
+
+struct remote *remote_get(const char *name);
+
+int remote_has_uri(struct remote *remote, const char *uri);
+
+struct refspec {
+ unsigned force : 1;
+ unsigned pattern : 1;
+
+ const char *src;
+ char *dst;
+};
+
+int match_refs(struct ref *src, struct ref *dst, struct ref ***dst_tail,
+ int nr_refspec, char **refspec, int all);
+
+/*
+ * For the given remote, reads the refspec's src and sets the other fields.
+ */
+int remote_find_tracking(struct remote *remote, struct refspec *refspec);
+
+#endif
close(cmd->out);
}
+ if (cmd->dir && chdir(cmd->dir))
+ die("exec %s: cd to %s failed (%s)", cmd->argv[0],
+ cmd->dir, strerror(errno));
+ if (cmd->env) {
+ for (; *cmd->env; cmd->env++) {
+ if (strchr(*cmd->env, '='))
+ putenv((char*)*cmd->env);
+ else
+ unsetenv(*cmd->env);
+ }
+ }
if (cmd->git_cmd) {
execv_git_cmd(cmd->argv);
} else {
return finish_command(cmd);
}
+static void prepare_run_command_v_opt(struct child_process *cmd,
+ const char **argv,
+ int opt)
+{
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->argv = argv;
+ cmd->no_stdin = opt & RUN_COMMAND_NO_STDIN ? 1 : 0;
+ cmd->git_cmd = opt & RUN_GIT_CMD ? 1 : 0;
+ cmd->stdout_to_stderr = opt & RUN_COMMAND_STDOUT_TO_STDERR ? 1 : 0;
+}
+
int run_command_v_opt(const char **argv, int opt)
{
struct child_process cmd;
- memset(&cmd, 0, sizeof(cmd));
- cmd.argv = argv;
- cmd.no_stdin = opt & RUN_COMMAND_NO_STDIN ? 1 : 0;
- cmd.git_cmd = opt & RUN_GIT_CMD ? 1 : 0;
- cmd.stdout_to_stderr = opt & RUN_COMMAND_STDOUT_TO_STDERR ? 1 : 0;
+ prepare_run_command_v_opt(&cmd, argv, opt);
+ return run_command(&cmd);
+}
+
+int run_command_v_opt_cd(const char **argv, int opt, const char *dir)
+{
+ struct child_process cmd;
+ prepare_run_command_v_opt(&cmd, argv, opt);
+ cmd.dir = dir;
+ return run_command(&cmd);
+}
+
+int run_command_v_opt_cd_env(const char **argv, int opt, const char *dir, const char *const *env)
+{
+ struct child_process cmd;
+ prepare_run_command_v_opt(&cmd, argv, opt);
+ cmd.dir = dir;
+ cmd.env = env;
return run_command(&cmd);
}
pid_t pid;
int in;
int out;
+ const char *dir;
+ const char *const *env;
unsigned close_in:1;
unsigned close_out:1;
unsigned no_stdin:1;
#define RUN_GIT_CMD 2 /*If this is to be git sub-command */
#define RUN_COMMAND_STDOUT_TO_STDERR 4
int run_command_v_opt(const char **argv, int opt);
+int run_command_v_opt_cd(const char **argv, int opt, const char *dir);
+
+/*
+ * env (the environment) is to be formatted like environ: "VAR=VALUE".
+ * To unset an environment variable use just "VAR".
+ */
+int run_command_v_opt_cd_env(const char **argv, int opt, const char *dir, const char *const *env);
#endif
#include "refs.h"
#include "pkt-line.h"
#include "run-command.h"
+#include "remote.h"
static const char send_pack_usage[] =
"git-send-pack [--all] [--force] [--receive-pack=<git-receive-pack>] [--verbose] [--thin] [<host>:]<directory> [<ref>...]\n"
return ret;
}
-static int send_pack(int in, int out, int nr_refspec, char **refspec)
+static int send_pack(int in, int out, struct remote *remote, int nr_refspec, char **refspec)
{
struct ref *ref;
int new_refs;
new_refs = 0;
for (ref = remote_refs; ref; ref = ref->next) {
char old_hex[60], *new_hex;
- int delete_ref;
+ int will_delete_ref;
if (!ref->peer_ref)
continue;
- delete_ref = is_null_sha1(ref->peer_ref->new_sha1);
- if (delete_ref && !allow_deleting_refs) {
+
+ will_delete_ref = is_null_sha1(ref->peer_ref->new_sha1);
+ if (will_delete_ref && !allow_deleting_refs) {
error("remote does not support deleting refs");
ret = -2;
continue;
}
- if (!delete_ref &&
+ if (!will_delete_ref &&
!hashcmp(ref->old_sha1, ref->peer_ref->new_sha1)) {
if (verbose)
fprintf(stderr, "'%s': up-to-date\n", ref->name);
*/
if (!force_update &&
- !delete_ref &&
+ !will_delete_ref &&
!is_null_sha1(ref->old_sha1) &&
!ref->force) {
if (!has_sha1_file(ref->old_sha1) ||
}
}
hashcpy(ref->new_sha1, ref->peer_ref->new_sha1);
- if (!delete_ref)
+ if (!will_delete_ref)
new_refs++;
strcpy(old_hex, sha1_to_hex(ref->old_sha1));
new_hex = sha1_to_hex(ref->new_sha1);
else
packet_write(out, "%s %s %s",
old_hex, new_hex, ref->name);
- if (delete_ref)
+ if (will_delete_ref)
fprintf(stderr, "deleting '%s'\n", ref->name);
else {
fprintf(stderr, "updating '%s'", ref->name);
fprintf(stderr, "\n from %s\n to %s\n",
old_hex, new_hex);
}
+ if (remote) {
+ struct refspec rs;
+ rs.src = ref->name;
+ remote_find_tracking(remote, &rs);
+ if (rs.dst) {
+ struct ref_lock *lock;
+ fprintf(stderr, " Also local %s\n", rs.dst);
+ if (will_delete_ref) {
+ if (delete_ref(rs.dst, NULL)) {
+ error("Failed to delete");
+ }
+ } else {
+ lock = lock_any_ref_for_update(rs.dst, NULL, 0);
+ if (!lock)
+ error("Failed to lock");
+ else
+ write_ref_sha1(lock, ref->new_sha1,
+ "update by push");
+ }
+ free(rs.dst);
+ }
+ }
}
packet_flush(out);
case -2: /* ok but a single level -- that is fine for
* a match pattern.
*/
+ case -3: /* ok but ends with a pattern-match character */
continue;
}
die("remote part of refspec is not a valid name in %s",
char **heads = NULL;
int fd[2], ret;
pid_t pid;
+ char *remote_name = NULL;
+ struct remote *remote = NULL;
setup_git_directory();
git_config(git_default_config);
receivepack = arg + 7;
continue;
}
+ if (!prefixcmp(arg, "--remote=")) {
+ remote_name = arg + 9;
+ continue;
+ }
if (!strcmp(arg, "--all")) {
send_all = 1;
continue;
usage(send_pack_usage);
verify_remote_names(nr_heads, heads);
+ if (remote_name) {
+ remote = remote_get(remote_name);
+ if (!remote_has_uri(remote, dest)) {
+ die("Destination %s is not a uri for %s",
+ dest, remote_name);
+ }
+ }
+
pid = git_connect(fd, dest, receivepack, verbose ? CONNECT_VERBOSE : 0);
if (pid < 0)
return 1;
- ret = send_pack(fd[0], fd[1], nr_heads, heads);
+ ret = send_pack(fd[0], fd[1], remote, nr_heads, heads);
close(fd[0]);
close(fd[1]);
ret |= finish_connect(pid);
static unsigned int sha1_file_open_flag = O_NOATIME;
-signed char hexval_table[256] = {
+const signed char hexval_table[256] = {
-1, -1, -1, -1, -1, -1, -1, -1, /* 00-07 */
-1, -1, -1, -1, -1, -1, -1, -1, /* 08-0f */
-1, -1, -1, -1, -1, -1, -1, -1, /* 10-17 */
{
const char *alt;
+ if (alt_odb_tail)
+ return;
+
alt = getenv(ALTERNATE_DB_ENVIRONMENT);
if (!alt) alt = "";
- if (alt_odb_tail)
- return;
alt_odb_tail = &alt_odb_list;
link_alt_odb_entries(alt, alt + strlen(alt), ':', NULL, 0);
return 0;
}
+int open_pack_index(struct packed_git *p)
+{
+ char *idx_name;
+ int ret;
+
+ if (p->index_data)
+ return 0;
+
+ idx_name = xstrdup(p->pack_name);
+ strcpy(idx_name + strlen(idx_name) - strlen(".pack"), ".idx");
+ ret = check_packed_git_idx(idx_name, p);
+ free(idx_name);
+ return ret;
+}
+
static void scan_windows(struct packed_git *p,
struct packed_git **lru_p,
struct pack_window **lru_w,
unsigned char *idx_sha1;
long fd_flag;
+ if (!p->index_data && open_pack_index(p))
+ return error("packfile %s index unavailable", p->pack_name);
+
p->pack_fd = open(p->pack_name, O_RDONLY);
if (p->pack_fd < 0 || fstat(p->pack_fd, &st))
return -1;
return NULL;
memcpy(p->pack_name, path, path_len);
strcpy(p->pack_name + path_len, ".pack");
- if (stat(p->pack_name, &st) || !S_ISREG(st.st_mode) ||
- check_packed_git_idx(path, p)) {
+ if (stat(p->pack_name, &st) || !S_ISREG(st.st_mode)) {
free(p);
return NULL;
}
/* ok, it looks sane as far as we can check without
* actually mapping the pack file.
*/
+ p->index_version = 0;
+ p->index_data = NULL;
+ p->index_size = 0;
+ p->num_objects = 0;
p->pack_size = st.st_size;
p->next = NULL;
p->windows = NULL;
return data;
}
-const unsigned char *nth_packed_object_sha1(const struct packed_git *p,
+const unsigned char *nth_packed_object_sha1(struct packed_git *p,
uint32_t n)
{
const unsigned char *index = p->index_data;
+ if (!index) {
+ if (open_pack_index(p))
+ return NULL;
+ index = p->index_data;
+ }
if (n >= p->num_objects)
return NULL;
index += 4 * 256;
const unsigned char *index = p->index_data;
unsigned hi, lo;
+ if (!index) {
+ if (open_pack_index(p))
+ return 0;
+ level1_ofs = p->index_data;
+ index = p->index_data;
+ }
if (p->index_version > 1) {
level1_ofs += 2;
index += 8;
static int find_pack_entry(const unsigned char *sha1, struct pack_entry *e, const char **ignore_packed)
{
+ static struct packed_git *last_found = (void *)1;
struct packed_git *p;
off_t offset;
prepare_packed_git();
+ if (!packed_git)
+ return 0;
+ p = (last_found == (void *)1) ? packed_git : last_found;
- for (p = packed_git; p; p = p->next) {
+ do {
if (ignore_packed) {
const char **ig;
for (ig = ignore_packed; *ig; ig++)
if (!matches_pack_name(p, *ig))
break;
if (*ig)
- continue;
+ goto next;
}
+
offset = find_pack_entry_one(sha1, p);
if (offset) {
/*
*/
if (p->pack_fd == -1 && open_packed_git(p)) {
error("packfile %s cannot be accessed", p->pack_name);
- continue;
+ goto next;
}
e->offset = offset;
e->p = p;
hashcpy(e->sha1, sha1);
+ last_found = p;
return 1;
}
- }
+
+ next:
+ if (p == last_found)
+ p = packed_git;
+ else
+ p = p->next;
+ if (p == last_found)
+ p = p->next;
+ } while (p);
return 0;
}
prepare_packed_git();
for (p = packed_git; p && found < 2; p = p->next) {
- uint32_t num = p->num_objects;
- uint32_t first = 0, last = num;
+ uint32_t num, last;
+ uint32_t first = 0;
+ open_pack_index(p);
+ num = p->num_objects;
+ last = num;
while (first < last) {
uint32_t mid = (first + last) / 2;
const unsigned char *now;
int has_unpacked, has_packed;
unsigned char unpacked_sha1[20], packed_sha1[20];
+ prepare_alt_odb();
has_unpacked = find_short_object_filename(len, canonical, unpacked_sha1);
has_packed = find_short_packed_object(len, res, packed_sha1);
if (!has_unpacked && !has_packed)
const char *cp;
*mode = S_IFINVALID;
- prepare_alt_odb();
ret = get_sha1_1(name, namelen, sha1);
if (!ret)
return ret;
test "z$id" = "z$embedded"
'
+# If an expanded ident ever gets into the repository, we want to make sure that
+# it is collapsed before being expanded again on checkout
+test_expect_success expanded_in_repo '
+ {
+ echo "File with expanded keywords"
+ echo "\$Id\$"
+ echo "\$Id:\$"
+ echo "\$Id: 0000000000000000000000000000000000000000 \$"
+ echo "\$Id: NoSpaceAtEnd\$"
+ echo "\$Id:NoSpaceAtFront \$"
+ echo "\$Id:NoSpaceAtEitherEnd\$"
+ echo "\$Id: NoTerminatingSymbol"
+ } > expanded-keywords &&
+
+ {
+ echo "File with expanded keywords"
+ echo "\$Id: 4f21723e7b15065df7de95bd46c8ba6fb1818f4c \$"
+ echo "\$Id: 4f21723e7b15065df7de95bd46c8ba6fb1818f4c \$"
+ echo "\$Id: 4f21723e7b15065df7de95bd46c8ba6fb1818f4c \$"
+ echo "\$Id: 4f21723e7b15065df7de95bd46c8ba6fb1818f4c \$"
+ echo "\$Id: 4f21723e7b15065df7de95bd46c8ba6fb1818f4c \$"
+ echo "\$Id: 4f21723e7b15065df7de95bd46c8ba6fb1818f4c \$"
+ echo "\$Id: NoTerminatingSymbol"
+ } > expected-output &&
+
+ git add expanded-keywords &&
+ git commit -m "File with keywords expanded" &&
+
+ echo "expanded-keywords ident" >> .gitattributes &&
+
+ rm -f expanded-keywords &&
+ git checkout -- expanded-keywords &&
+ cat expanded-keywords &&
+ cmp expanded-keywords expected-output
+'
+
test_done
while test "$i" -le $cnt
do
git format-patch --encoding=UTF-8 --stdout HEAD~$i..HEAD~$j |
- grep "^From: =?UTF-8?q?=C3=81=C3=A9=C3=AD_=C3=B3=C3=BA?=" &&
+ grep "^From: =?UTF-8?q?=C3=81=C3=A9=C3=AD=20=C3=B3=C3=BA?=" &&
git-cat-file commit HEAD~$j |
case "$header" in
8859)
git format-patch --stdout master..HEAD^ >out-l1 &&
git format-patch --stdout HEAD^ >out-l2 &&
grep "^Content-Type: text/plain; charset=ISO-8859-1" out-l1 &&
- grep "^From: =?ISO-8859-1?q?=C1=E9=ED_=F3=FA?=" out-l1 &&
+ grep "^From: =?ISO-8859-1?q?=C1=E9=ED=20=F3=FA?=" out-l1 &&
grep "^Content-Type: text/plain; charset=ISO-8859-1" out-l2 &&
- grep "^From: =?ISO-8859-1?q?=C1=E9=ED_=F3=FA?=" out-l2
+ grep "^From: =?ISO-8859-1?q?=C1=E9=ED=20=F3=FA?=" out-l2
'
test_expect_success 'format-patch output (UTF-8)' '
git format-patch --stdout master..HEAD^ >out-u1 &&
git format-patch --stdout HEAD^ >out-u2 &&
grep "^Content-Type: text/plain; charset=UTF-8" out-u1 &&
- grep "^From: =?UTF-8?q?=C3=81=C3=A9=C3=AD_=C3=B3=C3=BA?=" out-u1 &&
+ grep "^From: =?UTF-8?q?=C3=81=C3=A9=C3=AD=20=C3=B3=C3=BA?=" out-u1 &&
grep "^Content-Type: text/plain; charset=UTF-8" out-u2 &&
- grep "^From: =?UTF-8?q?=C3=81=C3=A9=C3=AD_=C3=B3=C3=BA?=" out-u2
+ grep "^From: =?UTF-8?q?=C3=81=C3=A9=C3=AD=20=C3=B3=C3=BA?=" out-u2
'
test_expect_success 'rebase (U/U)' '
for i in 1 2 5 6 A B C 7 8 9 10; do echo "$i"; done >file &&
git update-index file &&
- git commit -m "Side change #1" &&
+ git commit -m "Side changes #1" &&
for i in D E F; do echo "$i"; done >>file &&
git update-index file &&
- git commit -m "Side change #2" &&
+ git commit -m "Side changes #2" &&
git tag C2 &&
for i in 5 6 1 2 3 A 4 B C 7 8 9 10 D E F; do echo "$i"; done >file &&
git update-index file &&
- git commit -m "Side change #3" &&
+ git commit -m "Side changes #3 with \\n backslash-n in it." &&
git checkout master &&
git diff-tree -p C2 | git apply --index &&
test $cnt = 2
'
+test_expect_success 'commit did not screw up the log message' '
+
+ git cat-file commit side | grep "^Side .* with .* backslash-n"
+
+'
+
+test_expect_success 'format-patch did not screw up the log message' '
+
+ grep "^Subject: .*Side changes #3 with .* backslash-n" patch0 &&
+ grep "^Subject: .*Side changes #3 with .* backslash-n" patch1
+
+'
+
+test_expect_success 'replay did not screw up the log message' '
+
+ git cat-file commit rebuild-1 | grep "^Side .* with .* backslash-n"
+
+'
+
test_done
test_expect_failure '--verify start2^1' 'git-rev-parse --verify start2^1'
test_expect_success '--verify start2^0' 'git-rev-parse --verify start2^0'
+test_expect_success 'repack for next test' 'git repack -a -d'
+test_expect_success 'short SHA-1 works' '
+ start=`git rev-parse --verify start` &&
+ echo $start &&
+ abbrv=`echo $start | sed s/.\$//` &&
+ echo $abbrv &&
+ abbrv=`git rev-parse --verify $abbrv` &&
+ echo $abbrv &&
+ test $start = $abbrv'
+
test_done
--- /dev/null
+#!/bin/sh
+#
+# Copyright (c) 2007 Lars Hjemli
+#
+
+test_description='Basic porcelain support for submodules
+
+This test tries to verify basic sanity of the init, update and status
+subcommands of git-submodule.
+'
+
+. ./test-lib.sh
+
+#
+# Test setup:
+# -create a repository in directory lib
+# -add a couple of files
+# -add directory lib to 'superproject', this creates a DIRLINK entry
+# -add a couple of regular files to enable testing of submodule filtering
+# -mv lib subrepo
+# -add an entry to .gitmodules for path 'lib'
+#
+test_expect_success 'Prepare submodule testing' '
+ mkdir lib &&
+ cd lib &&
+ git-init &&
+ echo a >a &&
+ git-add a &&
+ git-commit -m "submodule commit 1" &&
+ git-tag -a -m "rev-1" rev-1 &&
+ rev1=$(git-rev-parse HEAD) &&
+ if test -z "$rev1"
+ then
+ echo "[OOPS] submodule git-rev-parse returned nothing"
+ false
+ fi &&
+ cd .. &&
+ echo a >a &&
+ echo z >z &&
+ git-add a lib z &&
+ git-commit -m "super commit 1" &&
+ mv lib .subrepo &&
+ GIT_CONFIG=.gitmodules git-config module.lib.url ./.subrepo
+'
+
+test_expect_success 'status should only print one line' '
+ lines=$(git-submodule status | wc -l) &&
+ test $lines = 1
+'
+
+test_expect_success 'status should initially be "missing"' '
+ git-submodule status | grep "^-$rev1"
+'
+
+test_expect_success 'init should fail when path is used by a file' '
+ echo "hello" >lib &&
+ if git-submodule init
+ then
+ echo "[OOPS] init should have failed"
+ false
+ elif test -f lib && test "$(cat lib)" != "hello"
+ then
+ echo "[OOPS] init failed but lib file was molested"
+ false
+ else
+ rm lib
+ fi
+'
+
+test_expect_success 'init should fail when path is used by a nonempty directory' '
+ mkdir lib &&
+ echo "hello" >lib/a &&
+ if git-submodule init
+ then
+ echo "[OOPS] init should have failed"
+ false
+ elif test "$(cat lib/a)" != "hello"
+ then
+ echo "[OOPS] init failed but lib/a was molested"
+ false
+ else
+ rm lib/a
+ fi
+'
+
+test_expect_success 'init should work when path is an empty dir' '
+ rm -rf lib &&
+ mkdir lib &&
+ git-submodule init &&
+ head=$(cd lib && git-rev-parse HEAD) &&
+ if test -z "$head"
+ then
+ echo "[OOPS] Failed to obtain submodule head"
+ false
+ elif test "$head" != "$rev1"
+ then
+ echo "[OOPS] Submodule head is $head but should have been $rev1"
+ false
+ fi
+'
+
+test_expect_success 'status should be "up-to-date" after init' '
+ git-submodule status | grep "^ $rev1"
+'
+
+test_expect_success 'status should be "modified" after submodule commit' '
+ cd lib &&
+ echo b >b &&
+ git-add b &&
+ git-commit -m "submodule commit 2" &&
+ rev2=$(git-rev-parse HEAD) &&
+ cd .. &&
+ if test -z "$rev2"
+ then
+ echo "[OOPS] submodule git-rev-parse returned nothing"
+ false
+ fi &&
+ git-submodule status | grep "^+$rev2"
+'
+
+test_expect_success 'the --cached sha1 should be rev1' '
+ git-submodule --cached status | grep "^+$rev1"
+'
+
+test_expect_success 'update should checkout rev1' '
+ git-submodule update &&
+ head=$(cd lib && git-rev-parse HEAD) &&
+ if test -z "$head"
+ then
+ echo "[OOPS] submodule git-rev-parse returned nothing"
+ false
+ elif test "$head" != "$rev1"
+ then
+ echo "[OOPS] init did not checkout correct head"
+ false
+ fi
+'
+
+test_expect_success 'status should be "up-to-date" after update' '
+ git-submodule status | grep "^ $rev1"
+'
+
+test_done
--- /dev/null
+test_description='test that git handles an svn repository with missing md5sums'
+
+. ./lib-git-svn.sh
+
+# Loading a node from a svn dumpfile without a Text-Content-Length
+# field causes svn to neglect to store or report an md5sum. (it will
+# calculate one if you had put Text-Content-Length: 0). This showed
+# up in a repository creted with cvs2svn.
+
+cat > dumpfile.svn <<EOF
+SVN-fs-dump-format-version: 1
+
+Revision-number: 1
+Prop-content-length: 98
+Content-length: 98
+
+K 7
+svn:log
+V 0
+
+K 10
+svn:author
+V 4
+test
+K 8
+svn:date
+V 27
+2007-05-06T12:37:01.153339Z
+PROPS-END
+
+Node-path: md5less-file
+Node-kind: file
+Node-action: add
+Prop-content-length: 10
+Content-length: 10
+
+PROPS-END
+
+EOF
+
+test_expect_success 'load svn dumpfile' "svnadmin load $rawsvnrepo < dumpfile.svn"
+
+test_expect_success 'initialize git-svn' "git-svn init $svnrepo"
+test_expect_success 'fetch revisions from svn' 'git-svn fetch'
+test_done
END AUTH REQUEST
EOF
+cat >login-anonymous <<EOF
+BEGIN VERIFICATION REQUEST
+$SERVERDIR
+anonymous
+
+END VERIFICATION REQUEST
+EOF
+
+cat >login-git <<EOF
+BEGIN VERIFICATION REQUEST
+$SERVERDIR
+git
+
+END VERIFICATION REQUEST
+EOF
+
test_expect_success 'pserver authentication' \
'cat request-anonymous | git-cvsserver pserver >log 2>&1 &&
tail -n1 log | grep -q "^I LOVE YOU$"'
fi &&
tail -n1 log | grep -q "^I HATE YOU$"'
+test_expect_success 'pserver authentication (login)' \
+ 'cat login-anonymous | git-cvsserver pserver >log 2>&1 &&
+ tail -n1 log | grep -q "^I LOVE YOU$"'
+
+test_expect_success 'pserver authentication failure (login/non-anonymous user)' \
+ 'if cat login-git | git-cvsserver pserver >log 2>&1
+ then
+ false
+ else
+ true
+ fi &&
+ tail -n1 log | grep -q "^I HATE YOU$"'
+
#--------------
# CONFIG TESTS
git commit -q -m "Merge test (merge)" &&
git push gitcvs.git >/dev/null &&
cd cvswork &&
+ sleep 1 && touch merge &&
GIT_CONFIG="$git_config" cvs -Q update &&
diff -q merge ../expected'
git commit -q -m "Merge test (no-op)" &&
git push gitcvs.git >/dev/null &&
cd cvswork &&
+ sleep 1 && touch merge &&
GIT_CONFIG="$git_config" cvs -Q update &&
diff -q merge ../merge'
if (!obj->type)
obj->type = OBJ_TAG;
if (obj->type != OBJ_TAG) {
- error("Object %s is a %s, not a tree",
+ error("Object %s is a %s, not a tag",
sha1_to_hex(sha1), typename(obj->type));
return NULL;
}