/git-rm
/git-send-email
/git-send-pack
+/git-serve
/git-sh-i18n
/git-sh-i18n--envsubst
/git-sh-setup
TECH_DOCS += technical/pack-protocol
TECH_DOCS += technical/protocol-capabilities
TECH_DOCS += technical/protocol-common
+TECH_DOCS += technical/protocol-v2
TECH_DOCS += technical/racy-git
TECH_DOCS += technical/send-pack-pipeline
TECH_DOCS += technical/shallow
* When built with more recent cURL, GIT_SSL_VERSION can now specify
"tlsv1.3" as its value.
+ * "git gui" learned that "~/.ssh/id_ecdsa.pub" and
+ "~/.ssh/id_ed25519.pub" are also possible SSH key files.
+ (merge 2e2f0288ef bb/git-gui-ssh-key-files later to maint).
+
+ * "git gui" performs commit upon CTRL/CMD+ENTER but the
+ CTRL/CMD+KP_ENTER (i.e. enter key on the numpad) did not have the
+ same key binding. It now does.
+ (merge 28a1d94a06 bp/git-gui-bind-kp-enter later to maint).
+
+ * "git gui" has been taught to work with old versions of tk (like
+ 8.5.7) that do not support "ttk::style theme use" as a way to query
+ the current theme.
+ (merge 4891961105 cb/git-gui-ttk-style later to maint).
+
+ * "git rebase" has learned to honor "--signoff" option when using
+ backends other than "am" (but not "--preserve-merges").
+
+ * "git branch --list" during an interrupted "rebase -i" now lets
+ users distinguish the case where a detached HEAD is being rebased
+ and a normal branch is being rebased.
+
+ * "git mergetools" learned talking to guiffy.
+
Performance, Internal Implementation, Development Support etc.
* Small test-helper programs have been consolidated into a single
binary.
+ * API clean-up around ref-filter code.
+
+ * Shell completion (in contrib) that gives list of paths have been
+ optimized somewhat.
+
+ * The index file is updated to record the fsmonitor section after a
+ full scan was made, to avoid wasting the effort that has already
+ spent.
+
+ * Performance measuring framework in t/perf learned to help bisecting
+ performance regressions.
+
+ * Some multi-word source filenames are being renamed to separate
+ words with dashes instead of underscores.
+
+ * An reusable "memory pool" implementation has been extracted from
+ fast-import.c, which in turn has become the first user of the
+ mem-pool API.
+
Also contains various documentation updates and code clean-ups.
fixed.
(merge a0d51e8d0e eb/cred-helper-ignore-sigpipe later to maint).
+ * "git rebase --keep-empty" still removed an empty commit if the
+ other side contained an empty commit (due to the "does an
+ equivalent patch exist already?" check), which has been corrected.
+ (merge 3d946165e1 pw/rebase-keep-empty-fixes later to maint).
+
+ * Some codepaths, including the refs API, get and keep relative
+ paths, that go out of sync when the process does chdir(2). The
+ chdir-notify API is introduced to let these codepaths adjust these
+ cached paths to the new current directory.
+ (merge fb9c2d2703 jk/relative-directory-fix later to maint).
+
+ * "cd sub/dir && git commit ../path" ought to record the changes to
+ the file "sub/path", but this regressed long time ago.
+ (merge 86238e07ef bw/commit-partial-from-subdirectory-fix later to maint).
+
+ * Recent introduction of "--log-destination" option to "git daemon"
+ did not work well when the daemon was run under "--inetd" mode.
+ (merge e67d906d73 lw/daemon-log-destination later to maint).
+
+ * Small fix to the autoconf build procedure.
+ (merge 249482daf0 es/fread-reads-dir-autoconf-fix later to maint).
+
+ * Fix an unexploitable (because the oversized contents are not under
+ attacker's control) buffer overflow.
+ (merge d8579accfa bp/fsmonitor-bufsize-fix later to maint).
+
* Other minor doc, test and build updates and code cleanups.
(merge 248f66ed8e nd/trace-with-env later to maint).
(merge 14ced5562c ys/bisect-object-id-missing-conversion-fix later to maint).
(merge 5988eb631a ab/doc-hash-brokenness later to maint).
(merge a4d4e32a70 pk/test-avoid-pipe-hiding-exit-status later to maint).
+ (merge 05e293c1ac jk/flockfile-stdio later to maint).
+ (merge e9184b0789 jk/t5561-missing-curl later to maint).
+ (merge b1801b85a3 nd/worktree-move later to maint).
+ (merge bbd374dd20 ak/bisect-doc-typofix later to maint).
+ (merge 4855f06fb3 mn/send-email-credential-doc later to maint).
+ (merge 8523b1e355 en/doc-typoes later to maint).
+ (merge 43b44ccfe7 js/t5404-path-fix later to maint).
+ (merge decf711fc1 ps/test-chmtime-get later to maint).
+ (merge 22d11a6e8e es/worktree-docs later to maint).
+ (merge 92a5dbbc22 tg/use-git-contacts later to maint).
not a text/plain, it's something else.
Send your patch with "To:" set to the mailing list, with "cc:" listing
-people who are involved in the area you are touching (the output from
-`git blame $path` and `git shortlog --no-merges $path` would help to
+people who are involved in the area you are touching (the `git
+contacts` command in `contrib/contacts/` can help to
identify them), to solicit comments and reviews.
:1: footnote:[The current maintainer: gitster@pobox.com]
--
+
Patterns have the same syntax and semantics as patterns used for
-fnmantch(3) without the FNM_PATHNAME flag, except a pathname also
+fnmatch(3) without the FNM_PATHNAME flag, except a pathname also
matches a pattern if removing any number of the final pathname
components matches the pattern. For example, the pattern "`foo*bar`"
matches "`fooasdfbar`" and "`foo/bar/baz/asdf`" but not "`foobarx`".
Treat all files as text.
--ignore-cr-at-eol::
- Ignore carrige-return at the end of line when doing a comparison.
+ Ignore carriage-return at the end of line when doing a comparison.
--ignore-space-at-eol::
Ignore changes in whitespace at EOL.
git bisect terms
------------------------------------------------
-You can get just the old (respectively new) term with `git bisect term
---term-old` or `git bisect term --term-good`.
+You can get just the old (respectively new) term with `git bisect terms
+--term-old` or `git bisect terms --term-good`.
If you would like to use your own terms instead of "bad"/"good" or
"new"/"old", you can choose any names you like (except existing bisect
infinite even if there is an ancestor-chain that long.
--shallow-since=<date>::
- Deepen or shorten the history of a shallow'repository to
+ Deepen or shorten the history of a shallow repository to
include all reachable commits after <date>.
--shallow-exclude=<revision>::
stripping with positive <N>, or it becomes the full refname if
stripping with negative <N>. Neither is an error.
+
-`strip` can be used as a synomym to `lstrip`.
+`strip` can be used as a synonym to `lstrip`.
objecttype::
The type of the object (`blob`, `tree`, `commit`, `tag`).
DESCRIPTION
-----------
Reads standard input in non-recursive `ls-tree` output format, and creates
-a tree object. The order of the tree entries is normalised by mktree so
+a tree object. The order of the tree entries is normalized by mktree so
pre-sorting the input is not required. The object name of the tree object
built is written to the standard output.
Incompatible with the --interactive option.
--signoff::
- This flag is passed to 'git am' to sign off all the rebased
- commits (see linkgit:git-am[1]). Incompatible with the
- --interactive option.
+ Add a Signed-off-by: trailer to all the rebased commits. Note
+ that if `--interactive` is given then only commits marked to be
+ picked, edited or reworded will have the trailer added. Incompatible
+ with the `--preserve-merges` option.
-i::
--interactive::
--batch-size=<num>::
Some email servers (e.g. smtp.163.com) limit the number emails to be
- sent per session (connection) and this will lead to a faliure when
+ sent per session (connection) and this will lead to a failure when
sending many messages. With this option, send-email will disconnect after
sending $<num> messages and wait for a few seconds (see --relogin-delay)
and reconnect, to work around such a limit. You may want to
If you have multifactor authentication setup on your gmail account, you will
need to generate an app-specific password for use with 'git send-email'. Visit
-https://security.google.com/settings/security/apppasswords to setup an
-app-specific password. Once setup, you can store it with the credentials
-helper:
-
- $ git credential fill
- protocol=smtp
- host=smtp.gmail.com
- username=youname@gmail.com
- password=app-password
-
+https://security.google.com/settings/security/apppasswords to create it.
Once your commits are ready to be sent to the mailing list, run the
following commands:
$ edit outgoing/0000-*
$ git send-email outgoing/*
+The first time you run it, you will be prompted for your credentials. Enter the
+app-specific or your regular password as appropriate. If you have credential
+helper configured (see linkgit:git-credential[1]), the password will be saved in
+the credential store so you won't have to type it the next time.
+
Note: the following perl modules are required
Net::SMTP::SSL, MIME::Base64 and Authen::SASL
- 'matching' - Shows ignored files and directories matching an
ignore pattern.
+
-When 'matching' mode is specified, paths that explicity match an
+When 'matching' mode is specified, paths that explicitly match an
ignored pattern are shown. If a directory matches an ignore pattern,
then it is shown, but not paths contained in the ignored directory. If
a directory does not match an ignore pattern, but all contents are
-A<filename>::
--authors-file=<filename>::
- Syntax is compatible with the file used by 'git cvsimport':
+ Syntax is compatible with the file used by 'git cvsimport' but
+ an empty email address can be supplied with '<>':
+
------------------------------------------------------------------------
loginname = Joe User <user@example.com>
If this option is specified, for each SVN committer name that
does not exist in the authors file, the given file is executed
with the committer name as the first argument. The program is
- expected to return a single line of the form "Name <email>",
- which will be treated as if included in the authors file.
+ expected to return a single line of the form "Name <email>" or
+ "Name <>", which will be treated as if included in the authors
+ file.
++
+Due to historical reasons a relative 'filename' is first searched
+relative to the current directory for 'init' and 'clone' and relative
+to the root of the working tree for 'fetch'. If 'filename' is
+not found, it is searched like any other command in '$PATH'.
+
[verse]
config key: svn.authorsProg
tree is associated with the repository. This new working tree is called a
"linked working tree" as opposed to the "main working tree" prepared by "git
init" or "git clone". A repository has one main working tree (if it's not a
-bare repository) and zero or more linked working trees.
+bare repository) and zero or more linked working trees. When you are done
+with a linked working tree, remove it with `git worktree remove`.
-When you are done with a linked working tree you can simply delete it.
-The working tree's administrative files in the repository (see
-"DETAILS" below) will eventually be removed automatically (see
+If a working tree is deleted without using `git worktree remove`, then
+its associated administrative files, which reside in the repository
+(see "DETAILS" below), will eventually be removed automatically (see
`gc.worktreePruneExpire` in linkgit:git-config[1]), or you can run
`git worktree prune` in the main or any linked working tree to
clean up any stale administrative files.
By default, `add` refuses to create a new working tree when
`<commit-ish>` is a branch name and is already checked out by
another working tree and `remove` refuses to remove an unclean
- working tree. This option overrides that safeguard.
+ working tree. This option overrides these safeguards.
-b <new-branch>::
-B <new-branch>::
details on a single line with columns. For example:
------------
-S git worktree list
+$ git worktree list
/path/to/bare-source (bare)
/path/to/linked-worktree abcd1234 [master]
/path/to/other-linked-worktree 1234abc (detached HEAD)
example:
------------
-S git worktree list --porcelain
+$ git worktree list --porcelain
worktree /path/to/bare-source
bare
# ... hack hack hack ...
$ git commit -a -m 'emergency fix for boss'
$ popd
-$ rm -rf ../temp
-$ git worktree prune
+$ git worktree remove ../temp
------------
BUGS
+
Supported commands: 'connect'.
+'stateless-connect'::
+ Experimental; for internal use only.
+ Can attempt to connect to a remote server for communication
+ using git's wire-protocol version 2. See the documentation
+ for the stateless-connect command for more information.
++
+Supported commands: 'stateless-connect'.
+
'push'::
Can discover remote refs and push local commits and the
history leading up to them to new or existing remote refs.
+
Supported commands: 'connect'.
+'stateless-connect'::
+ Experimental; for internal use only.
+ Can attempt to connect to a remote server for communication
+ using git's wire-protocol version 2. See the documentation
+ for the stateless-connect command for more information.
++
+Supported commands: 'stateless-connect'.
+
'fetch'::
Can discover remote refs and transfer objects reachable from
them to the local object store.
+
Supported if the helper has the "connect" capability.
+'stateless-connect' <service>::
+ Experimental; for internal use only.
+ Connects to the given remote service for communication using
+ git's wire-protocol version 2. Valid replies to this command
+ are empty line (connection established), 'fallback' (no smart
+ transport support, fall back to dumb transports) and just
+ exiting with error message printed (can't connect, don't bother
+ trying to fall back). After line feed terminating the positive
+ (empty) response, the output of the service starts. Messages
+ (both request and response) must consist of zero or more
+ PKT-LINEs, terminating in a flush packet. The client must not
+ expect the server to store any state in between request-response
+ pairs. After the connection ends, the remote helper exits.
++
+Supported if the helper has the "stateless-connect" capability.
+
If a fatal error occurs, the program writes the error message to
stderr and exits. The caller should expect that a suitable error
message has been printed if the child closes the connection without
not be returned even if all of its contents are ignored. In
this case, the contents are returned as individual entries.
+
-If this is set, files and directories that explicity match an ignore
+If this is set, files and directories that explicitly match an ignore
pattern are reported. Implicity ignored directories (directories that
do not match an ignore pattern, but whose contents are all ignored)
are not reported, instead all of the contents are reported.
object access API
=================
-Talk about <sha1_file.c> and <object.h> family, things like
+Talk about <sha1-file.c> and <object.h> family, things like
* read_sha1_file()
* read_object_with_reference()
--- /dev/null
+ Git Wire Protocol, Version 2
+==============================
+
+This document presents a specification for a version 2 of Git's wire
+protocol. Protocol v2 will improve upon v1 in the following ways:
+
+ * Instead of multiple service names, multiple commands will be
+ supported by a single service
+ * Easily extendable as capabilities are moved into their own section
+ of the protocol, no longer being hidden behind a NUL byte and
+ limited by the size of a pkt-line
+ * Separate out other information hidden behind NUL bytes (e.g. agent
+ string as a capability and symrefs can be requested using 'ls-refs')
+ * Reference advertisement will be omitted unless explicitly requested
+ * ls-refs command to explicitly request some refs
+ * Designed with http and stateless-rpc in mind. With clear flush
+ semantics the http remote helper can simply act as a proxy
+
+In protocol v2 communication is command oriented. When first contacting a
+server a list of capabilities will advertised. Some of these capabilities
+will be commands which a client can request be executed. Once a command
+has completed, a client can reuse the connection and request that other
+commands be executed.
+
+ Packet-Line Framing
+---------------------
+
+All communication is done using packet-line framing, just as in v1. See
+`Documentation/technical/pack-protocol.txt` and
+`Documentation/technical/protocol-common.txt` for more information.
+
+In protocol v2 these special packets will have the following semantics:
+
+ * '0000' Flush Packet (flush-pkt) - indicates the end of a message
+ * '0001' Delimiter Packet (delim-pkt) - separates sections of a message
+
+ Initial Client Request
+------------------------
+
+In general a client can request to speak protocol v2 by sending
+`version=2` through the respective side-channel for the transport being
+used which inevitably sets `GIT_PROTOCOL`. More information can be
+found in `pack-protocol.txt` and `http-protocol.txt`. In all cases the
+response from the server is the capability advertisement.
+
+ Git Transport
+~~~~~~~~~~~~~~~
+
+When using the git:// transport, you can request to use protocol v2 by
+sending "version=2" as an extra parameter:
+
+ 003egit-upload-pack /project.git\0host=myserver.com\0\0version=2\0
+
+ SSH and File Transport
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+When using either the ssh:// or file:// transport, the GIT_PROTOCOL
+environment variable must be set explicitly to include "version=2".
+
+ HTTP Transport
+~~~~~~~~~~~~~~~~
+
+When using the http:// or https:// transport a client makes a "smart"
+info/refs request as described in `http-protocol.txt` and requests that
+v2 be used by supplying "version=2" in the `Git-Protocol` header.
+
+ C: Git-Protocol: version=2
+ C:
+ C: GET $GIT_URL/info/refs?service=git-upload-pack HTTP/1.0
+
+A v2 server would reply:
+
+ S: 200 OK
+ S: <Some headers>
+ S: ...
+ S:
+ S: 000eversion 2\n
+ S: <capability-advertisement>
+
+Subsequent requests are then made directly to the service
+`$GIT_URL/git-upload-pack`. (This works the same for git-receive-pack).
+
+ Capability Advertisement
+--------------------------
+
+A server which decides to communicate (based on a request from a client)
+using protocol version 2, notifies the client by sending a version string
+in its initial response followed by an advertisement of its capabilities.
+Each capability is a key with an optional value. Clients must ignore all
+unknown keys. Semantics of unknown values are left to the definition of
+each key. Some capabilities will describe commands which can be requested
+to be executed by the client.
+
+ capability-advertisement = protocol-version
+ capability-list
+ flush-pkt
+
+ protocol-version = PKT-LINE("version 2" LF)
+ capability-list = *capability
+ capability = PKT-LINE(key[=value] LF)
+
+ key = 1*(ALPHA | DIGIT | "-_")
+ value = 1*(ALPHA | DIGIT | " -_.,?\/{}[]()<>!@#$%^&*+=:;")
+
+ Command Request
+-----------------
+
+After receiving the capability advertisement, a client can then issue a
+request to select the command it wants with any particular capabilities
+or arguments. There is then an optional section where the client can
+provide any command specific parameters or queries. Only a single
+command can be requested at a time.
+
+ request = empty-request | command-request
+ empty-request = flush-pkt
+ command-request = command
+ capability-list
+ [command-args]
+ flush-pkt
+ command = PKT-LINE("command=" key LF)
+ command-args = delim-pkt
+ *command-specific-arg
+
+ command-specific-args are packet line framed arguments defined by
+ each individual command.
+
+The server will then check to ensure that the client's request is
+comprised of a valid command as well as valid capabilities which were
+advertised. If the request is valid the server will then execute the
+command. A server MUST wait till it has received the client's entire
+request before issuing a response. The format of the response is
+determined by the command being executed, but in all cases a flush-pkt
+indicates the end of the response.
+
+When a command has finished, and the client has received the entire
+response from the server, a client can either request that another
+command be executed or can terminate the connection. A client may
+optionally send an empty request consisting of just a flush-pkt to
+indicate that no more requests will be made.
+
+ Capabilities
+--------------
+
+There are two different types of capabilities: normal capabilities,
+which can be used to to convey information or alter the behavior of a
+request, and commands, which are the core actions that a client wants to
+perform (fetch, push, etc).
+
+Protocol version 2 is stateless by default. This means that all commands
+must only last a single round and be stateless from the perspective of the
+server side, unless the client has requested a capability indicating that
+state should be maintained by the server. Clients MUST NOT require state
+management on the server side in order to function correctly. This
+permits simple round-robin load-balancing on the server side, without
+needing to worry about state management.
+
+ agent
+~~~~~~~
+
+The server can advertise the `agent` capability with a value `X` (in the
+form `agent=X`) to notify the client that the server is running version
+`X`. The client may optionally send its own agent string by including
+the `agent` capability with a value `Y` (in the form `agent=Y`) in its
+request to the server (but it MUST NOT do so if the server did not
+advertise the agent capability). The `X` and `Y` strings may contain any
+printable ASCII characters except space (i.e., the byte range 32 < x <
+127), and are typically of the form "package/version" (e.g.,
+"git/1.8.3.1"). The agent strings are purely informative for statistics
+and debugging purposes, and MUST NOT be used to programmatically assume
+the presence or absence of particular features.
+
+ ls-refs
+~~~~~~~~~
+
+`ls-refs` is the command used to request a reference advertisement in v2.
+Unlike the current reference advertisement, ls-refs takes in arguments
+which can be used to limit the refs sent from the server.
+
+Additional features not supported in the base command will be advertised
+as the value of the command in the capability advertisement in the form
+of a space separated list of features: "<command>=<feature 1> <feature 2>"
+
+ls-refs takes in the following arguments:
+
+ symrefs
+ In addition to the object pointed by it, show the underlying ref
+ pointed by it when showing a symbolic ref.
+ peel
+ Show peeled tags.
+ ref-prefix <prefix>
+ When specified, only references having a prefix matching one of
+ the provided prefixes are displayed.
+
+The output of ls-refs is as follows:
+
+ output = *ref
+ flush-pkt
+ ref = PKT-LINE(obj-id SP refname *(SP ref-attribute) LF)
+ ref-attribute = (symref | peeled)
+ symref = "symref-target:" symref-target
+ peeled = "peeled:" obj-id
+
+ fetch
+~~~~~~~
+
+`fetch` is the command used to fetch a packfile in v2. It can be looked
+at as a modified version of the v1 fetch where the ref-advertisement is
+stripped out (since the `ls-refs` command fills that role) and the
+message format is tweaked to eliminate redundancies and permit easy
+addition of future extensions.
+
+Additional features not supported in the base command will be advertised
+as the value of the command in the capability advertisement in the form
+of a space separated list of features: "<command>=<feature 1> <feature 2>"
+
+A `fetch` request can take the following arguments:
+
+ want <oid>
+ Indicates to the server an object which the client wants to
+ retrieve. Wants can be anything and are not limited to
+ advertised objects.
+
+ have <oid>
+ Indicates to the server an object which the client has locally.
+ This allows the server to make a packfile which only contains
+ the objects that the client needs. Multiple 'have' lines can be
+ supplied.
+
+ done
+ Indicates to the server that negotiation should terminate (or
+ not even begin if performing a clone) and that the server should
+ use the information supplied in the request to construct the
+ packfile.
+
+ thin-pack
+ Request that a thin pack be sent, which is a pack with deltas
+ which reference base objects not contained within the pack (but
+ are known to exist at the receiving end). This can reduce the
+ network traffic significantly, but it requires the receiving end
+ to know how to "thicken" these packs by adding the missing bases
+ to the pack.
+
+ no-progress
+ Request that progress information that would normally be sent on
+ side-band channel 2, during the packfile transfer, should not be
+ sent. However, the side-band channel 3 is still used for error
+ responses.
+
+ include-tag
+ Request that annotated tags should be sent if the objects they
+ point to are being sent.
+
+ ofs-delta
+ Indicate that the client understands PACKv2 with delta referring
+ to its base by position in pack rather than by an oid. That is,
+ they can read OBJ_OFS_DELTA (ake type 6) in a packfile.
+
+If the 'shallow' feature is advertised the following arguments can be
+included in the clients request as well as the potential addition of the
+'shallow-info' section in the server's response as explained below.
+
+ shallow <oid>
+ A client must notify the server of all commits for which it only
+ has shallow copies (meaning that it doesn't have the parents of
+ a commit) by supplying a 'shallow <oid>' line for each such
+ object so that the server is aware of the limitations of the
+ client's history. This is so that the server is aware that the
+ client may not have all objects reachable from such commits.
+
+ deepen <depth>
+ Requests that the fetch/clone should be shallow having a commit
+ depth of <depth> relative to the remote side.
+
+ deepen-relative
+ Requests that the semantics of the "deepen" command be changed
+ to indicate that the depth requested is relative to the client's
+ current shallow boundary, instead of relative to the requested
+ commits.
+
+ deepen-since <timestamp>
+ Requests that the shallow clone/fetch should be cut at a
+ specific time, instead of depth. Internally it's equivalent to
+ doing "git rev-list --max-age=<timestamp>". Cannot be used with
+ "deepen".
+
+ deepen-not <rev>
+ Requests that the shallow clone/fetch should be cut at a
+ specific revision specified by '<rev>', instead of a depth.
+ Internally it's equivalent of doing "git rev-list --not <rev>".
+ Cannot be used with "deepen", but can be used with
+ "deepen-since".
+
+The response of `fetch` is broken into a number of sections separated by
+delimiter packets (0001), with each section beginning with its section
+header.
+
+ output = *section
+ section = (acknowledgments | shallow-info | packfile)
+ (flush-pkt | delim-pkt)
+
+ acknowledgments = PKT-LINE("acknowledgments" LF)
+ (nak | *ack)
+ (ready)
+ ready = PKT-LINE("ready" LF)
+ nak = PKT-LINE("NAK" LF)
+ ack = PKT-LINE("ACK" SP obj-id LF)
+
+ shallow-info = PKT-LINE("shallow-info" LF)
+ *PKT-LINE((shallow | unshallow) LF)
+ shallow = "shallow" SP obj-id
+ unshallow = "unshallow" SP obj-id
+
+ packfile = PKT-LINE("packfile" LF)
+ *PKT-LINE(%x01-03 *%x00-ff)
+
+ acknowledgments section
+ * If the client determines that it is finished with negotiations
+ by sending a "done" line, the acknowledgments sections MUST be
+ omitted from the server's response.
+
+ * Always begins with the section header "acknowledgments"
+
+ * The server will respond with "NAK" if none of the object ids sent
+ as have lines were common.
+
+ * The server will respond with "ACK obj-id" for all of the
+ object ids sent as have lines which are common.
+
+ * A response cannot have both "ACK" lines as well as a "NAK"
+ line.
+
+ * The server will respond with a "ready" line indicating that
+ the server has found an acceptable common base and is ready to
+ make and send a packfile (which will be found in the packfile
+ section of the same response)
+
+ * If the server has found a suitable cut point and has decided
+ to send a "ready" line, then the server can decide to (as an
+ optimization) omit any "ACK" lines it would have sent during
+ its response. This is because the server will have already
+ determined the objects it plans to send to the client and no
+ further negotiation is needed.
+
+ shallow-info section
+ * If the client has requested a shallow fetch/clone, a shallow
+ client requests a fetch or the server is shallow then the
+ server's response may include a shallow-info section. The
+ shallow-info section will be included if (due to one of the
+ above conditions) the server needs to inform the client of any
+ shallow boundaries or adjustments to the clients already
+ existing shallow boundaries.
+
+ * Always begins with the section header "shallow-info"
+
+ * If a positive depth is requested, the server will compute the
+ set of commits which are no deeper than the desired depth.
+
+ * The server sends a "shallow obj-id" line for each commit whose
+ parents will not be sent in the following packfile.
+
+ * The server sends an "unshallow obj-id" line for each commit
+ which the client has indicated is shallow, but is no longer
+ shallow as a result of the fetch (due to its parents being
+ sent in the following packfile).
+
+ * The server MUST NOT send any "unshallow" lines for anything
+ which the client has not indicated was shallow as a part of
+ its request.
+
+ * This section is only included if a packfile section is also
+ included in the response.
+
+ packfile section
+ * This section is only included if the client has sent 'want'
+ lines in its request and either requested that no more
+ negotiation be done by sending 'done' or if the server has
+ decided it has found a sufficient cut point to produce a
+ packfile.
+
+ * Always begins with the section header "packfile"
+
+ * The transmission of the packfile begins immediately after the
+ section header
+
+ * The data transfer of the packfile is always multiplexed, using
+ the same semantics of the 'side-band-64k' capability from
+ protocol version 1. This means that each packet, during the
+ packfile data stream, is made up of a leading 4-byte pkt-line
+ length (typical of the pkt-line format), followed by a 1-byte
+ stream code, followed by the actual data.
+
+ The stream code can be one of:
+ 1 - pack data
+ 2 - progress messages
+ 3 - fatal error message just before stream aborts
PROGRAM_OBJS += sh-i18n--envsubst.o
PROGRAM_OBJS += shell.o
PROGRAM_OBJS += show-index.o
-PROGRAM_OBJS += upload-pack.o
PROGRAM_OBJS += remote-testsvn.o
# Binary suffix, set to .exe for Windows builds
TEST_PROGRAMS_NEED_X += test-fake-ssh
TEST_PROGRAMS_NEED_X += test-line-buffer
TEST_PROGRAMS_NEED_X += test-parse-options
+TEST_PROGRAMS_NEED_X += test-pkt-line
TEST_PROGRAMS_NEED_X += test-svn-fe
TEST_PROGRAMS_NEED_X += test-tool
LIB_OBJS += bulk-checkin.o
LIB_OBJS += bundle.o
LIB_OBJS += cache-tree.o
+LIB_OBJS += chdir-notify.o
LIB_OBJS += checkout.o
LIB_OBJS += color.o
LIB_OBJS += column.o
LIB_OBJS += ewah/ewah_bitmap.o
LIB_OBJS += ewah/ewah_io.o
LIB_OBJS += ewah/ewah_rlw.o
-LIB_OBJS += exec_cmd.o
+LIB_OBJS += exec-cmd.o
LIB_OBJS += fetch-object.o
LIB_OBJS += fetch-pack.o
LIB_OBJS += fsck.o
LIB_OBJS += ll-merge.o
LIB_OBJS += lockfile.o
LIB_OBJS += log-tree.o
+LIB_OBJS += ls-refs.o
LIB_OBJS += mailinfo.o
LIB_OBJS += mailmap.o
LIB_OBJS += match-trees.o
+LIB_OBJS += mem-pool.o
LIB_OBJS += merge.o
LIB_OBJS += merge-blobs.o
LIB_OBJS += merge-recursive.o
LIB_OBJS += refs/ref-cache.o
LIB_OBJS += ref-filter.o
LIB_OBJS += remote.o
-LIB_OBJS += replace_object.o
+LIB_OBJS += replace-object.o
LIB_OBJS += repository.o
LIB_OBJS += rerere.o
LIB_OBJS += resolve-undo.o
LIB_OBJS += run-command.o
LIB_OBJS += send-pack.o
LIB_OBJS += sequencer.o
+LIB_OBJS += serve.o
LIB_OBJS += server-info.o
LIB_OBJS += setup.o
LIB_OBJS += sha1-array.o
LIB_OBJS += sha1-lookup.o
-LIB_OBJS += sha1_file.o
-LIB_OBJS += sha1_name.o
+LIB_OBJS += sha1-file.o
+LIB_OBJS += sha1-name.o
LIB_OBJS += shallow.o
LIB_OBJS += sideband.o
LIB_OBJS += sigchain.o
LIB_OBJS += tree.o
LIB_OBJS += tree-walk.o
LIB_OBJS += unpack-trees.o
+LIB_OBJS += upload-pack.o
LIB_OBJS += url.o
LIB_OBJS += urlmatch.o
LIB_OBJS += usage.o
LIB_OBJS += wildmatch.o
LIB_OBJS += worktree.o
LIB_OBJS += wrapper.o
-LIB_OBJS += write_or_die.o
+LIB_OBJS += write-or-die.o
LIB_OBJS += ws.o
LIB_OBJS += wt-status.o
LIB_OBJS += xdiff-interface.o
BUILTIN_OBJS += builtin/revert.o
BUILTIN_OBJS += builtin/rm.o
BUILTIN_OBJS += builtin/send-pack.o
+BUILTIN_OBJS += builtin/serve.o
BUILTIN_OBJS += builtin/shortlog.o
BUILTIN_OBJS += builtin/show-branch.o
BUILTIN_OBJS += builtin/show-ref.o
BUILTIN_OBJS += builtin/update-ref.o
BUILTIN_OBJS += builtin/update-server-info.o
BUILTIN_OBJS += builtin/upload-archive.o
+BUILTIN_OBJS += builtin/upload-pack.o
BUILTIN_OBJS += builtin/var.o
BUILTIN_OBJS += builtin/verify-commit.o
BUILTIN_OBJS += builtin/verify-pack.o
$< >$@+ && \
mv $@+ $@
+.PHONY: perllibdir
+perllibdir:
+ @echo '$(perllibdir_SQ)'
+
.PHONY: gitweb
gitweb:
$(QUIET_SUBDIR0)gitweb $(QUIET_SUBDIR1) all
$(OBJECTS): $(LIB_H)
endif
-exec_cmd.sp exec_cmd.s exec_cmd.o: GIT-PREFIX
-exec_cmd.sp exec_cmd.s exec_cmd.o: EXTRA_CPPFLAGS = \
+exec-cmd.sp exec-cmd.s exec-cmd.o: GIT-PREFIX
+exec-cmd.sp exec-cmd.s exec-cmd.o: EXTRA_CPPFLAGS = \
'-DGIT_EXEC_PATH="$(gitexecdir_SQ)"' \
'-DGIT_LOCALE_PATH="$(localedir_relative_SQ)"' \
'-DBINDIR="$(bindir_relative_SQ)"' \
#define NO_THE_INDEX_COMPATIBILITY_MACROS
#include "cache.h"
#include "config.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
#include "attr.h"
#include "dir.h"
#include "utf8.h"
extern int cmd_revert(int argc, const char **argv, const char *prefix);
extern int cmd_rm(int argc, const char **argv, const char *prefix);
extern int cmd_send_pack(int argc, const char **argv, const char *prefix);
+extern int cmd_serve(int argc, const char **argv, const char *prefix);
extern int cmd_shortlog(int argc, const char **argv, const char *prefix);
extern int cmd_show(int argc, const char **argv, const char *prefix);
extern int cmd_show_branch(int argc, const char **argv, const char *prefix);
extern int cmd_update_server_info(int argc, const char **argv, const char *prefix);
extern int cmd_upload_archive(int argc, const char **argv, const char *prefix);
extern int cmd_upload_archive_writer(int argc, const char **argv, const char *prefix);
+extern int cmd_upload_pack(int argc, const char **argv, const char *prefix);
extern int cmd_var(int argc, const char **argv, const char *prefix);
extern int cmd_verify_commit(int argc, const char **argv, const char *prefix);
extern int cmd_verify_tag(int argc, const char **argv, const char *prefix);
#include "lockfile.h"
#include "dir.h"
#include "pathspec.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
#include "cache-tree.h"
#include "run-command.h"
#include "parse-options.h"
#include "cache.h"
#include "config.h"
#include "builtin.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
#include "parse-options.h"
#include "dir.h"
#include "run-command.h"
if (transport->smart_options && !deepen && !filter_options.choice)
transport->smart_options->check_self_contained_and_connected = 1;
- refs = transport_get_remote_refs(transport);
+ refs = transport_get_remote_refs(transport, NULL);
if (refs) {
mapped_refs = wanted_peer_refs(refs, refspec);
if (with_tree) {
char *max_prefix = common_prefix(pattern);
- overlay_tree_on_index(&the_index, with_tree,
- max_prefix ? max_prefix : prefix);
+ overlay_tree_on_index(&the_index, with_tree, max_prefix);
free(max_prefix);
}
#include "blob.h"
#include "refs.h"
#include "builtin.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
#include "parse-options.h"
#include "revision.h"
#include "diff.h"
#include "config.h"
#include "builtin.h"
#include "run-command.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
#include "parse-options.h"
#include "argv-array.h"
#include "strbuf.h"
#include "remote.h"
#include "connect.h"
#include "sha1-array.h"
+#include "protocol.h"
static const char fetch_pack_usage[] =
"git fetch-pack [--all] [--stdin] [--quiet | -q] [--keep | -k] [--thin] "
struct fetch_pack_args args;
struct oid_array shallow = OID_ARRAY_INIT;
struct string_list deepen_not = STRING_LIST_INIT_DUP;
+ struct packet_reader reader;
fetch_if_missing = 0;
if (!conn)
return args.diag_url ? 0 : 1;
}
- get_remote_heads(fd[0], NULL, 0, &ref, 0, NULL, &shallow);
+
+ packet_reader_init(&reader, fd[0], NULL, 0,
+ PACKET_READ_CHOMP_NEWLINE |
+ PACKET_READ_GENTLE_ON_EOF);
+
+ switch (discover_version(&reader)) {
+ case protocol_v2:
+ die("support for protocol v2 not implemented yet");
+ case protocol_v1:
+ case protocol_v0:
+ get_remote_heads(&reader, &ref, 0, NULL, &shallow);
+ break;
+ case protocol_unknown_version:
+ BUG("unknown protocol version");
+ }
ref = fetch_pack(&args, fd, conn, ref, dest, sought, nr_sought,
- &shallow, pack_lockfile_ptr);
+ &shallow, pack_lockfile_ptr, protocol_v0);
if (pack_lockfile) {
printf("lock %s\n", pack_lockfile);
fflush(stdout);
struct string_list_item *item = NULL;
for_each_ref(add_existing, &existing_refs);
- for (ref = transport_get_remote_refs(transport); ref; ref = ref->next) {
+ for (ref = transport_get_remote_refs(transport, NULL); ref; ref = ref->next) {
if (!starts_with(ref->name, "refs/tags/"))
continue;
struct ref *rm;
struct ref *ref_map = NULL;
struct ref **tail = &ref_map;
+ struct argv_array ref_prefixes = ARGV_ARRAY_INIT;
/* opportunistically-updated references: */
struct ref *orefs = NULL, **oref_tail = &orefs;
- const struct ref *remote_refs = transport_get_remote_refs(transport);
+ const struct ref *remote_refs;
+
+ for (i = 0; i < refspec_count; i++) {
+ if (!refspecs[i].exact_sha1) {
+ const char *glob = strchr(refspecs[i].src, '*');
+ if (glob)
+ argv_array_pushf(&ref_prefixes, "%.*s",
+ (int)(glob - refspecs[i].src),
+ refspecs[i].src);
+ else
+ expand_ref_prefix(&ref_prefixes, refspecs[i].src);
+ }
+ }
+
+ remote_refs = transport_get_remote_refs(transport, &ref_prefixes);
+
+ argv_array_clear(&ref_prefixes);
if (refspec_count) {
struct refspec *fetch_refspec;
#include "blob.h"
#include "quote.h"
#include "parse-options.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
/*
* This is to create corrupt objects for debugging and as such it
#include "cache.h"
#include "config.h"
#include "builtin.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
#include "parse-options.h"
#include "run-command.h"
#include "column.h"
#include "tree.h"
#include "progress.h"
#include "fsck.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
#include "streaming.h"
#include "thread-utils.h"
#include "packfile.h"
/*
* Get rid of the idx file as we do not need it anymore.
* NEEDSWORK: extract this bit from free_pack_by_name() in
- * sha1_file.c, perhaps? It shouldn't matter very much as we
+ * sha1-file.c, perhaps? It shouldn't matter very much as we
* know we haven't installed this pack (hence we never have
* read anything from it).
*/
#include "config.h"
#include "refs.h"
#include "builtin.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
#include "parse-options.h"
#ifndef DEFAULT_GIT_TEMPLATE_DIR
#include "cache.h"
#include "transport.h"
#include "remote.h"
+#include "refs.h"
static const char * const ls_remote_usage[] = {
N_("git ls-remote [--heads] [--tags] [--refs] [--upload-pack=<exec>]\n"
int show_symref_target = 0;
const char *uploadpack = NULL;
const char **pattern = NULL;
+ struct argv_array ref_prefixes = ARGV_ARRAY_INIT;
struct remote *remote;
struct transport *transport;
if (argc > 1) {
int i;
pattern = xcalloc(argc, sizeof(const char *));
- for (i = 1; i < argc; i++)
+ for (i = 1; i < argc; i++) {
+ const char *glob;
pattern[i - 1] = xstrfmt("*/%s", argv[i]);
+
+ glob = strchr(argv[i], '*');
+ if (glob)
+ argv_array_pushf(&ref_prefixes, "%.*s",
+ (int)(glob - argv[i]), argv[i]);
+ else
+ expand_ref_prefix(&ref_prefixes, argv[i]);
+ }
}
remote = remote_get(dest);
if (uploadpack != NULL)
transport_set_option(transport, TRANS_OPT_UPLOADPACK, uploadpack);
- ref = transport_get_remote_refs(transport);
+ ref = transport_get_remote_refs(transport, &ref_prefixes);
if (transport_disconnect(transport))
return 1;
#include "tree-walk.h"
#include "xdiff-interface.h"
#include "blob.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
#include "merge-blobs.h"
static const char merge_tree_usage[] = "git merge-tree <base-tree> <branch1> <branch2>";
#include "blob.h"
#include "pretty.h"
#include "refs.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
#include "run-command.h"
#include "parse-options.h"
#include "string-list.h"
#include "config.h"
#include "builtin.h"
#include "parse-options.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
#include "run-command.h"
#include "sha1-array.h"
#include "remote.h"
#include "pkt-line.h"
#include "sideband.h"
#include "run-command.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
#include "commit.h"
#include "object.h"
#include "remote.h"
unpack_limit = receive_unpack_limit;
switch (determine_protocol_version_server()) {
+ case protocol_v2:
+ /*
+ * push support for protocol v2 has not been implemented yet,
+ * so ignore the request to use v2 and fallback to using v0.
+ */
+ break;
case protocol_v1:
/*
* v1 is just the original protocol with a version string,
if (query) {
transport = transport_get(states->remote, states->remote->url_nr > 0 ?
states->remote->url[0] : NULL);
- remote_refs = transport_get_remote_refs(transport);
+ remote_refs = transport_get_remote_refs(transport, NULL);
transport_disconnect(transport);
states->queried = 1;
#include "sha1-array.h"
#include "gpg-interface.h"
#include "gettext.h"
+#include "protocol.h"
static const char * const send_pack_usage[] = {
N_("git send-pack [--all | --mirror] [--dry-run] [--force] "
int progress = -1;
int from_stdin = 0;
struct push_cas_option cas = {0};
+ struct packet_reader reader;
struct option options[] = {
OPT__VERBOSITY(&verbose),
args.verbose ? CONNECT_VERBOSE : 0);
}
- get_remote_heads(fd[0], NULL, 0, &remote_refs, REF_NORMAL,
- &extra_have, &shallow);
+ packet_reader_init(&reader, fd[0], NULL, 0,
+ PACKET_READ_CHOMP_NEWLINE |
+ PACKET_READ_GENTLE_ON_EOF);
+
+ switch (discover_version(&reader)) {
+ case protocol_v2:
+ die("support for protocol v2 not implemented yet");
+ break;
+ case protocol_v1:
+ case protocol_v0:
+ get_remote_heads(&reader, &remote_refs, REF_NORMAL,
+ &extra_have, &shallow);
+ break;
+ case protocol_unknown_version:
+ BUG("unknown protocol version");
+ }
transport_verify_remote_names(nr_refspecs, refspecs);
--- /dev/null
+#include "cache.h"
+#include "builtin.h"
+#include "parse-options.h"
+#include "serve.h"
+
+static char const * const serve_usage[] = {
+ N_("git serve [<options>]"),
+ NULL
+};
+
+int cmd_serve(int argc, const char **argv, const char *prefix)
+{
+ struct serve_options opts = SERVE_OPTIONS_INIT;
+
+ struct option options[] = {
+ OPT_BOOL(0, "stateless-rpc", &opts.stateless_rpc,
+ N_("quit after a single request/response exchange")),
+ OPT_BOOL(0, "advertise-capabilities", &opts.advertise_capabilities,
+ N_("exit immediately after advertising capabilities")),
+ OPT_END()
+ };
+
+ /* ignore all unknown cmdline switches for now */
+ argc = parse_options(argc, argv, prefix, options, serve_usage,
+ PARSE_OPT_KEEP_DASHDASH |
+ PARSE_OPT_KEEP_UNKNOWN);
+ serve(&opts);
+
+ return 0;
+}
return -1;
if (format->format)
- pretty_print_ref(name, oid->hash, format);
+ pretty_print_ref(name, oid, format);
return 0;
}
--- /dev/null
+#include "cache.h"
+#include "builtin.h"
+#include "exec-cmd.h"
+#include "pkt-line.h"
+#include "parse-options.h"
+#include "protocol.h"
+#include "upload-pack.h"
+#include "serve.h"
+
+static const char * const upload_pack_usage[] = {
+ N_("git upload-pack [<options>] <dir>"),
+ NULL
+};
+
+int cmd_upload_pack(int argc, const char **argv, const char *prefix)
+{
+ const char *dir;
+ int strict = 0;
+ struct upload_pack_options opts = { 0 };
+ struct serve_options serve_opts = SERVE_OPTIONS_INIT;
+ struct option options[] = {
+ OPT_BOOL(0, "stateless-rpc", &opts.stateless_rpc,
+ N_("quit after a single request/response exchange")),
+ OPT_BOOL(0, "advertise-refs", &opts.advertise_refs,
+ N_("exit immediately after initial ref advertisement")),
+ OPT_BOOL(0, "strict", &strict,
+ N_("do not try <directory>/.git/ if <directory> is no Git directory")),
+ OPT_INTEGER(0, "timeout", &opts.timeout,
+ N_("interrupt transfer after <n> seconds of inactivity")),
+ OPT_END()
+ };
+
+ packet_trace_identity("upload-pack");
+ check_replace_refs = 0;
+
+ argc = parse_options(argc, argv, NULL, options, upload_pack_usage, 0);
+
+ if (argc != 1)
+ usage_with_options(upload_pack_usage, options);
+
+ if (opts.timeout)
+ opts.daemon_mode = 1;
+
+ setup_path();
+
+ dir = argv[0];
+
+ if (!enter_repo(dir, strict))
+ die("'%s' does not appear to be a git repository", dir);
+
+ switch (determine_protocol_version_server()) {
+ case protocol_v2:
+ serve_opts.advertise_capabilities = opts.advertise_refs;
+ serve_opts.stateless_rpc = opts.stateless_rpc;
+ serve(&serve_opts);
+ break;
+ case protocol_v1:
+ /*
+ * v1 is just the original protocol with a version string,
+ * so just fall through after writing the version string.
+ */
+ if (opts.advertise_refs || !opts.stateless_rpc)
+ packet_write_fmt(1, "version 1\n");
+
+ /* fallthrough */
+ case protocol_v0:
+ upload_pack(&opts);
+ break;
+ case protocol_unknown_version:
+ BUG("unknown protocol version");
+ }
+
+ return 0;
+}
}
if (format.format)
- pretty_print_ref(name, oid.hash, &format);
+ pretty_print_ref(name, &oid, &format);
}
return had_error;
}
extern char *get_object_directory(void);
extern char *get_index_file(void);
extern char *get_graft_file(void);
-extern int set_git_dir(const char *path);
+extern void set_git_dir(const char *path);
extern int get_common_dir_noenv(struct strbuf *sb, const char *gitdir);
extern int get_common_dir(struct strbuf *sb, const char *gitdir);
extern const char *get_git_namespace(void);
--- /dev/null
+#include "cache.h"
+#include "chdir-notify.h"
+#include "list.h"
+#include "strbuf.h"
+
+struct chdir_notify_entry {
+ const char *name;
+ chdir_notify_callback cb;
+ void *data;
+ struct list_head list;
+};
+static LIST_HEAD(chdir_notify_entries);
+
+void chdir_notify_register(const char *name,
+ chdir_notify_callback cb,
+ void *data)
+{
+ struct chdir_notify_entry *e = xmalloc(sizeof(*e));
+ e->name = name;
+ e->cb = cb;
+ e->data = data;
+ list_add_tail(&e->list, &chdir_notify_entries);
+}
+
+static void reparent_cb(const char *name,
+ const char *old_cwd,
+ const char *new_cwd,
+ void *data)
+{
+ char **path = data;
+ char *tmp = *path;
+
+ if (!tmp)
+ return;
+
+ *path = reparent_relative_path(old_cwd, new_cwd, tmp);
+ free(tmp);
+
+ if (name) {
+ trace_printf_key(&trace_setup_key,
+ "setup: reparent %s to '%s'",
+ name, *path);
+ }
+}
+
+void chdir_notify_reparent(const char *name, char **path)
+{
+ chdir_notify_register(name, reparent_cb, path);
+}
+
+int chdir_notify(const char *new_cwd)
+{
+ struct strbuf old_cwd = STRBUF_INIT;
+ struct list_head *pos;
+
+ if (strbuf_getcwd(&old_cwd) < 0)
+ return -1;
+ if (chdir(new_cwd) < 0) {
+ int saved_errno = errno;
+ strbuf_release(&old_cwd);
+ errno = saved_errno;
+ return -1;
+ }
+
+ trace_printf_key(&trace_setup_key,
+ "setup: chdir from '%s' to '%s'",
+ old_cwd.buf, new_cwd);
+
+ list_for_each(pos, &chdir_notify_entries) {
+ struct chdir_notify_entry *e =
+ list_entry(pos, struct chdir_notify_entry, list);
+ e->cb(e->name, old_cwd.buf, new_cwd, e->data);
+ }
+
+ strbuf_release(&old_cwd);
+ return 0;
+}
+
+char *reparent_relative_path(const char *old_cwd,
+ const char *new_cwd,
+ const char *path)
+{
+ char *ret, *full;
+
+ if (is_absolute_path(path))
+ return xstrdup(path);
+
+ full = xstrfmt("%s/%s", old_cwd, path);
+ ret = xstrdup(remove_leading_path(full, new_cwd));
+ free(full);
+
+ return ret;
+}
--- /dev/null
+#ifndef CHDIR_NOTIFY_H
+#define CHDIR_NOTIFY_H
+
+/*
+ * An API to let code "subscribe" to changes to the current working directory.
+ * The general idea is that some code asks to be notified when the working
+ * directory changes, and other code that calls chdir uses a special wrapper
+ * that notifies everyone.
+ */
+
+/*
+ * Callers who need to know about changes can do:
+ *
+ * void foo(const char *old_path, const char *new_path, void *data)
+ * {
+ * warning("switched from %s to %s!", old_path, new_path);
+ * }
+ * ...
+ * chdir_notify_register("description", foo, data);
+ *
+ * In practice most callers will want to move a relative path to the new root;
+ * they can use the reparent_relative_path() helper for that. If that's all
+ * you're doing, you can also use the convenience function:
+ *
+ * chdir_notify_reparent("description", &my_path);
+ *
+ * Whenever a chdir event occurs, that will update my_path (if it's relative)
+ * to adjust for the new cwd by freeing any existing string and allocating a
+ * new one.
+ *
+ * Registered functions are called in the order in which they were added. Note
+ * that there's currently no way to remove a function, so make sure that the
+ * data parameter remains valid for the rest of the program.
+ *
+ * The "name" argument is used only for printing trace output from
+ * $GIT_TRACE_SETUP. It may be NULL, but if non-NULL should point to
+ * storage which lasts as long as the registration is active.
+ */
+typedef void (*chdir_notify_callback)(const char *name,
+ const char *old_cwd,
+ const char *new_cwd,
+ void *data);
+void chdir_notify_register(const char *name, chdir_notify_callback cb, void *data);
+void chdir_notify_reparent(const char *name, char **path);
+
+/*
+ *
+ * Callers that want to chdir:
+ *
+ * chdir_notify(new_path);
+ *
+ * to switch to the new path and notify any callbacks.
+ *
+ * Note that you don't need to chdir_notify() if you're just temporarily moving
+ * to a directory and back, as long as you don't call any subscribed code in
+ * between (but it should be safe to do so if you're unsure).
+ */
+int chdir_notify(const char *new_cwd);
+
+/*
+ * Reparent a relative path from old_root to new_root. For example:
+ *
+ * reparent_relative_path("/a", "/a/b", "b/rel");
+ *
+ * would return the (newly allocated) string "rel". Note that we may return an
+ * absolute path in some cases (e.g., if the resulting path is not inside
+ * new_cwd).
+ */
+char *reparent_relative_path(const char *old_cwd,
+ const char *new_cwd,
+ const char *path);
+
+#endif /* CHDIR_NOTIFY_H */
#include "cache.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
#include "attr.h"
/*
#include "config.h"
#include "repository.h"
#include "lockfile.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
#include "strbuf.h"
#include "quote.h"
#include "hashmap.h"
void *data)
{
struct config_source top;
+ int ret;
top.u.file = f;
top.origin_type = origin_type;
top.do_ungetc = config_file_ungetc;
top.do_ftell = config_file_ftell;
- return do_config_from(&top, fn, data);
+ flockfile(f);
+ ret = do_config_from(&top, fn, data);
+ funlockfile(f);
+ return ret;
}
static int git_config_from_stdin(config_fn_t fn, void *data)
f = fopen_or_warn(filename, "r");
if (f) {
- flockfile(f);
ret = do_config_from_file(fn, CONFIG_ORIGIN_FILE, filename, filename, f, data);
- funlockfile(f);
fclose(f);
}
return ret;
[AC_LANG_PROGRAM([AC_INCLUDES_DEFAULT],
[[
FILE *f = fopen(".", "r");
- return f)]])],
+ return f != NULL;]])],
[ac_cv_fread_reads_directories=no],
[ac_cv_fread_reads_directories=yes])
])
#include "sha1-array.h"
#include "transport.h"
#include "strbuf.h"
+#include "version.h"
#include "protocol.h"
-static char *server_capabilities;
+static char *server_capabilities_v1;
+static struct argv_array server_capabilities_v2 = ARGV_ARRAY_INIT;
static const char *parse_feature_value(const char *, const char *, int *);
static int check_ref(const char *name, unsigned int flags)
static void die_initial_contact(int unexpected)
{
+ /*
+ * A hang-up after seeing some response from the other end
+ * means that it is unexpected, as we know the other end is
+ * willing to talk to us. A hang-up before seeing any
+ * response does not necessarily mean an ACL problem, though.
+ */
if (unexpected)
die(_("The remote end hung up upon initial contact"));
else
"and the repository exists."));
}
+/* Checks if the server supports the capability 'c' */
+int server_supports_v2(const char *c, int die_on_error)
+{
+ int i;
+
+ for (i = 0; i < server_capabilities_v2.argc; i++) {
+ const char *out;
+ if (skip_prefix(server_capabilities_v2.argv[i], c, &out) &&
+ (!*out || *out == '='))
+ return 1;
+ }
+
+ if (die_on_error)
+ die("server doesn't support '%s'", c);
+
+ return 0;
+}
+
+int server_supports_feature(const char *c, const char *feature,
+ int die_on_error)
+{
+ int i;
+
+ for (i = 0; i < server_capabilities_v2.argc; i++) {
+ const char *out;
+ if (skip_prefix(server_capabilities_v2.argv[i], c, &out) &&
+ (!*out || *(out++) == '=')) {
+ if (parse_feature_request(out, feature))
+ return 1;
+ else
+ break;
+ }
+ }
+
+ if (die_on_error)
+ die("server doesn't support feature '%s'", feature);
+
+ return 0;
+}
+
+static void process_capabilities_v2(struct packet_reader *reader)
+{
+ while (packet_reader_read(reader) == PACKET_READ_NORMAL)
+ argv_array_push(&server_capabilities_v2, reader->line);
+
+ if (reader->status != PACKET_READ_FLUSH)
+ die("expected flush after capabilities");
+}
+
+enum protocol_version discover_version(struct packet_reader *reader)
+{
+ enum protocol_version version = protocol_unknown_version;
+
+ /*
+ * Peek the first line of the server's response to
+ * determine the protocol version the server is speaking.
+ */
+ switch (packet_reader_peek(reader)) {
+ case PACKET_READ_EOF:
+ die_initial_contact(0);
+ case PACKET_READ_FLUSH:
+ case PACKET_READ_DELIM:
+ version = protocol_v0;
+ break;
+ case PACKET_READ_NORMAL:
+ version = determine_protocol_version_client(reader->line);
+ break;
+ }
+
+ switch (version) {
+ case protocol_v2:
+ process_capabilities_v2(reader);
+ break;
+ case protocol_v1:
+ /* Read the peeked version line */
+ packet_reader_read(reader);
+ break;
+ case protocol_v0:
+ break;
+ case protocol_unknown_version:
+ BUG("unknown protocol version");
+ }
+
+ return version;
+}
+
static void parse_one_symref_info(struct string_list *symref, const char *val, int len)
{
char *sym, *target;
static void annotate_refs_with_symref_info(struct ref *ref)
{
struct string_list symref = STRING_LIST_INIT_DUP;
- const char *feature_list = server_capabilities;
+ const char *feature_list = server_capabilities_v1;
while (feature_list) {
int len;
string_list_clear(&symref, 0);
}
-/*
- * Read one line of a server's ref advertisement into packet_buffer.
- */
-static int read_remote_ref(int in, char **src_buf, size_t *src_len,
- int *responded)
+static void process_capabilities(const char *line, int *len)
{
- int len = packet_read(in, src_buf, src_len,
- packet_buffer, sizeof(packet_buffer),
- PACKET_READ_GENTLE_ON_EOF |
- PACKET_READ_CHOMP_NEWLINE);
- const char *arg;
- if (len < 0)
- die_initial_contact(*responded);
- if (len > 4 && skip_prefix(packet_buffer, "ERR ", &arg))
- die("remote error: %s", arg);
-
- *responded = 1;
-
- return len;
-}
-
-#define EXPECTING_PROTOCOL_VERSION 0
-#define EXPECTING_FIRST_REF 1
-#define EXPECTING_REF 2
-#define EXPECTING_SHALLOW 3
-
-/* Returns 1 if packet_buffer is a protocol version pkt-line, 0 otherwise. */
-static int process_protocol_version(void)
-{
- switch (determine_protocol_version_client(packet_buffer)) {
- case protocol_v1:
- return 1;
- case protocol_v0:
- return 0;
- default:
- die("server is speaking an unknown protocol");
- }
-}
-
-static void process_capabilities(int *len)
-{
- int nul_location = strlen(packet_buffer);
+ int nul_location = strlen(line);
if (nul_location == *len)
return;
- server_capabilities = xstrdup(packet_buffer + nul_location + 1);
+ server_capabilities_v1 = xstrdup(line + nul_location + 1);
*len = nul_location;
}
-static int process_dummy_ref(void)
+static int process_dummy_ref(const char *line)
{
struct object_id oid;
const char *name;
- if (parse_oid_hex(packet_buffer, &oid, &name))
+ if (parse_oid_hex(line, &oid, &name))
return 0;
if (*name != ' ')
return 0;
return !oidcmp(&null_oid, &oid) && !strcmp(name, "capabilities^{}");
}
-static void check_no_capabilities(int len)
+static void check_no_capabilities(const char *line, int len)
{
- if (strlen(packet_buffer) != len)
+ if (strlen(line) != len)
warning("Ignoring capabilities after first line '%s'",
- packet_buffer + strlen(packet_buffer));
+ line + strlen(line));
}
-static int process_ref(int len, struct ref ***list, unsigned int flags,
- struct oid_array *extra_have)
+static int process_ref(const char *line, int len, struct ref ***list,
+ unsigned int flags, struct oid_array *extra_have)
{
struct object_id old_oid;
const char *name;
- if (parse_oid_hex(packet_buffer, &old_oid, &name))
+ if (parse_oid_hex(line, &old_oid, &name))
return 0;
if (*name != ' ')
return 0;
**list = ref;
*list = &ref->next;
}
- check_no_capabilities(len);
+ check_no_capabilities(line, len);
return 1;
}
-static int process_shallow(int len, struct oid_array *shallow_points)
+static int process_shallow(const char *line, int len,
+ struct oid_array *shallow_points)
{
const char *arg;
struct object_id old_oid;
- if (!skip_prefix(packet_buffer, "shallow ", &arg))
+ if (!skip_prefix(line, "shallow ", &arg))
return 0;
if (get_oid_hex(arg, &old_oid))
if (!shallow_points)
die("repository on the other end cannot be shallow");
oid_array_append(shallow_points, &old_oid);
- check_no_capabilities(len);
+ check_no_capabilities(line, len);
return 1;
}
+enum get_remote_heads_state {
+ EXPECTING_FIRST_REF = 0,
+ EXPECTING_REF,
+ EXPECTING_SHALLOW,
+ EXPECTING_DONE,
+};
+
/*
* Read all the refs from the other end
*/
-struct ref **get_remote_heads(int in, char *src_buf, size_t src_len,
+struct ref **get_remote_heads(struct packet_reader *reader,
struct ref **list, unsigned int flags,
struct oid_array *extra_have,
struct oid_array *shallow_points)
{
struct ref **orig_list = list;
-
- /*
- * A hang-up after seeing some response from the other end
- * means that it is unexpected, as we know the other end is
- * willing to talk to us. A hang-up before seeing any
- * response does not necessarily mean an ACL problem, though.
- */
- int responded = 0;
- int len;
- int state = EXPECTING_PROTOCOL_VERSION;
+ int len = 0;
+ enum get_remote_heads_state state = EXPECTING_FIRST_REF;
+ const char *arg;
*list = NULL;
- while ((len = read_remote_ref(in, &src_buf, &src_len, &responded))) {
+ while (state != EXPECTING_DONE) {
+ switch (packet_reader_read(reader)) {
+ case PACKET_READ_EOF:
+ die_initial_contact(1);
+ case PACKET_READ_NORMAL:
+ len = reader->pktlen;
+ if (len > 4 && skip_prefix(reader->line, "ERR ", &arg))
+ die("remote error: %s", arg);
+ break;
+ case PACKET_READ_FLUSH:
+ state = EXPECTING_DONE;
+ break;
+ case PACKET_READ_DELIM:
+ die("invalid packet");
+ }
+
switch (state) {
- case EXPECTING_PROTOCOL_VERSION:
- if (process_protocol_version()) {
- state = EXPECTING_FIRST_REF;
- break;
- }
- state = EXPECTING_FIRST_REF;
- /* fallthrough */
case EXPECTING_FIRST_REF:
- process_capabilities(&len);
- if (process_dummy_ref()) {
+ process_capabilities(reader->line, &len);
+ if (process_dummy_ref(reader->line)) {
state = EXPECTING_SHALLOW;
break;
}
state = EXPECTING_REF;
/* fallthrough */
case EXPECTING_REF:
- if (process_ref(len, &list, flags, extra_have))
+ if (process_ref(reader->line, len, &list, flags, extra_have))
break;
state = EXPECTING_SHALLOW;
/* fallthrough */
case EXPECTING_SHALLOW:
- if (process_shallow(len, shallow_points))
+ if (process_shallow(reader->line, len, shallow_points))
break;
- die("protocol error: unexpected '%s'", packet_buffer);
- default:
- die("unexpected state %d", state);
+ die("protocol error: unexpected '%s'", reader->line);
+ case EXPECTING_DONE:
+ break;
}
}
return list;
}
+/* Returns 1 when a valid ref has been added to `list`, 0 otherwise */
+static int process_ref_v2(const char *line, struct ref ***list)
+{
+ int ret = 1;
+ int i = 0;
+ struct object_id old_oid;
+ struct ref *ref;
+ struct string_list line_sections = STRING_LIST_INIT_DUP;
+ const char *end;
+
+ /*
+ * Ref lines have a number of fields which are space deliminated. The
+ * first field is the OID of the ref. The second field is the ref
+ * name. Subsequent fields (symref-target and peeled) are optional and
+ * don't have a particular order.
+ */
+ if (string_list_split(&line_sections, line, ' ', -1) < 2) {
+ ret = 0;
+ goto out;
+ }
+
+ if (parse_oid_hex(line_sections.items[i++].string, &old_oid, &end) ||
+ *end) {
+ ret = 0;
+ goto out;
+ }
+
+ ref = alloc_ref(line_sections.items[i++].string);
+
+ oidcpy(&ref->old_oid, &old_oid);
+ **list = ref;
+ *list = &ref->next;
+
+ for (; i < line_sections.nr; i++) {
+ const char *arg = line_sections.items[i].string;
+ if (skip_prefix(arg, "symref-target:", &arg))
+ ref->symref = xstrdup(arg);
+
+ if (skip_prefix(arg, "peeled:", &arg)) {
+ struct object_id peeled_oid;
+ char *peeled_name;
+ struct ref *peeled;
+ if (parse_oid_hex(arg, &peeled_oid, &end) || *end) {
+ ret = 0;
+ goto out;
+ }
+
+ peeled_name = xstrfmt("%s^{}", ref->name);
+ peeled = alloc_ref(peeled_name);
+
+ oidcpy(&peeled->old_oid, &peeled_oid);
+ **list = peeled;
+ *list = &peeled->next;
+
+ free(peeled_name);
+ }
+ }
+
+out:
+ string_list_clear(&line_sections, 0);
+ return ret;
+}
+
+struct ref **get_remote_refs(int fd_out, struct packet_reader *reader,
+ struct ref **list, int for_push,
+ const struct argv_array *ref_prefixes)
+{
+ int i;
+ *list = NULL;
+
+ if (server_supports_v2("ls-refs", 1))
+ packet_write_fmt(fd_out, "command=ls-refs\n");
+
+ if (server_supports_v2("agent", 0))
+ packet_write_fmt(fd_out, "agent=%s", git_user_agent_sanitized());
+
+ packet_delim(fd_out);
+ /* When pushing we don't want to request the peeled tags */
+ if (!for_push)
+ packet_write_fmt(fd_out, "peel\n");
+ packet_write_fmt(fd_out, "symrefs\n");
+ for (i = 0; ref_prefixes && i < ref_prefixes->argc; i++) {
+ packet_write_fmt(fd_out, "ref-prefix %s\n",
+ ref_prefixes->argv[i]);
+ }
+ packet_flush(fd_out);
+
+ /* Process response from server */
+ while (packet_reader_read(reader) == PACKET_READ_NORMAL) {
+ if (!process_ref_v2(reader->line, &list))
+ die("invalid ls-refs response: %s", reader->line);
+ }
+
+ if (reader->status != PACKET_READ_FLUSH)
+ die("expected flush after ref listing");
+
+ return list;
+}
+
static const char *parse_feature_value(const char *feature_list, const char *feature, int *lenp)
{
int len;
const char *server_feature_value(const char *feature, int *len)
{
- return parse_feature_value(server_capabilities, feature, len);
+ return parse_feature_value(server_capabilities_v1, feature, len);
}
int server_supports(const char *feature)
*/
static struct child_process *git_connect_git(int fd[2], char *hostandport,
const char *path, const char *prog,
+ enum protocol_version version,
int flags)
{
struct child_process *conn;
target_host, 0);
/* If using a new version put that stuff here after a second null byte */
- if (get_protocol_version_config() > 0) {
+ if (version > 0) {
strbuf_addch(&request, '\0');
strbuf_addf(&request, "version=%d%c",
- get_protocol_version_config(), '\0');
+ version, '\0');
}
packet_write(fd[1], request.buf, request.len);
*/
static void push_ssh_options(struct argv_array *args, struct argv_array *env,
enum ssh_variant variant, const char *port,
- int flags)
+ enum protocol_version version, int flags)
{
if (variant == VARIANT_SSH &&
- get_protocol_version_config() > 0) {
+ version > 0) {
argv_array_push(args, "-o");
argv_array_push(args, "SendEnv=" GIT_PROTOCOL_ENVIRONMENT);
argv_array_pushf(env, GIT_PROTOCOL_ENVIRONMENT "=version=%d",
- get_protocol_version_config());
+ version);
}
if (flags & CONNECT_IPV4) {
/* Prepare a child_process for use by Git's SSH-tunneled transport. */
static void fill_ssh_args(struct child_process *conn, const char *ssh_host,
- const char *port, int flags)
+ const char *port, enum protocol_version version,
+ int flags)
{
const char *ssh;
enum ssh_variant variant;
argv_array_push(&detect.args, ssh);
argv_array_push(&detect.args, "-G");
push_ssh_options(&detect.args, &detect.env_array,
- VARIANT_SSH, port, flags);
+ VARIANT_SSH, port, version, flags);
argv_array_push(&detect.args, ssh_host);
variant = run_command(&detect) ? VARIANT_SIMPLE : VARIANT_SSH;
}
argv_array_push(&conn->args, ssh);
- push_ssh_options(&conn->args, &conn->env_array, variant, port, flags);
+ push_ssh_options(&conn->args, &conn->env_array, variant, port, version, flags);
argv_array_push(&conn->args, ssh_host);
}
char *hostandport, *path;
struct child_process *conn;
enum protocol protocol;
+ enum protocol_version version = get_protocol_version_config();
+
+ /*
+ * NEEDSWORK: If we are trying to use protocol v2 and we are planning
+ * to perform a push, then fallback to v0 since the client doesn't know
+ * how to push yet using v2.
+ */
+ if (version == protocol_v2 && !strcmp("git-receive-pack", prog))
+ version = protocol_v0;
/* Without this we cannot rely on waitpid() to tell
* what happened to our children.
printf("Diag: path=%s\n", path ? path : "NULL");
conn = NULL;
} else if (protocol == PROTO_GIT) {
- conn = git_connect_git(fd, hostandport, path, prog, flags);
+ conn = git_connect_git(fd, hostandport, path, prog, version, flags);
} else {
struct strbuf cmd = STRBUF_INIT;
const char *const *var;
strbuf_release(&cmd);
return NULL;
}
- fill_ssh_args(conn, ssh_host, port, flags);
+ fill_ssh_args(conn, ssh_host, port, version, flags);
} else {
transport_check_allowed("file");
- if (get_protocol_version_config() > 0) {
+ if (version > 0) {
argv_array_pushf(&conn->env_array, GIT_PROTOCOL_ENVIRONMENT "=version=%d",
- get_protocol_version_config());
+ version);
}
}
argv_array_push(&conn->args, cmd.buf);
extern const char *server_feature_value(const char *feature, int *len_ret);
extern int url_is_local_not_ssh(const char *url);
+struct packet_reader;
+extern enum protocol_version discover_version(struct packet_reader *reader);
+
+extern int server_supports_v2(const char *c, int die_on_error);
+extern int server_supports_feature(const char *c, const char *feature,
+ int die_on_error);
+
#endif
local root="${2-.}" file
__git_ls_files_helper "$root" "$1" |
- while read -r file; do
- case "$file" in
- ?*/*) echo "${file%%/*}" ;;
- *) echo "$file" ;;
- esac
- done | sort | uniq
+ cut -f1 -d/ | sort | uniq
}
# Lists branches from the local repository.
INSTALL = install
SCRIPT_PERL_FULL=$(patsubst %,$(HERE)/%,$(SCRIPT_PERL))
-INSTLIBDIR=$(shell $(MAKE) -C $(GIT_ROOT_DIR)/perl \
- -s --no-print-directory instlibdir)
+INSTLIBDIR=$(shell $(MAKE) -C $(GIT_ROOT_DIR)/ \
+ -s --no-print-directory prefix=$(prefix) \
+ perllibdir=$(perllibdir) perllibdir)
DESTDIR_SQ = $(subst ','\'',$(DESTDIR))
INSTLIBDIR_SQ = $(subst ','\'',$(INSTLIBDIR))
TL;DR: Run update_unicode.sh after the publication of a new Unicode
-standard and commit the resulting unicode_widths.h file.
+standard and commit the resulting unicode-widths.h file.
The long version
================
-The Git source code ships the file unicode_widths.h which contains
+The Git source code ships the file unicode-widths.h which contains
tables of zero and double width Unicode code points, respectively.
These tables are generated using update_unicode.sh in this directory.
update_unicode.sh itself uses a third-party tool, uniset, to query two
On each run, update_unicode.sh checks whether more recent Unicode data
files are available from the Unicode consortium, and rebuilds the header
-unicode_widths.h with the new data. The new header can then be
+unicode-widths.h with the new data. The new header can then be
committed.
#Cf Format a format control character
#
cd "$(dirname "$0")"
-UNICODEWIDTH_H=$(git rev-parse --show-toplevel)/unicode_width.h
+UNICODEWIDTH_H=$(git rev-parse --show-toplevel)/unicode-width.h
wget -N http://www.unicode.org/Public/UCD/latest/ucd/UnicodeData.txt \
http://www.unicode.org/Public/UCD/latest/ucd/EastAsianWidth.txt &&
die("base-path '%s' does not exist or is not a directory",
base_path);
- if (inetd_mode) {
+ if (log_destination != LOG_DESTINATION_STDERR) {
if (!freopen("/dev/null", "w", stderr))
die_errno("failed to redirect stderr to /dev/null");
}
#include "commit.h"
#include "argv-array.h"
#include "object-store.h"
+#include "chdir-notify.h"
int trust_executable_bit = 1;
int trust_ctime = 1;
return the_repository->graft_file;
}
-int set_git_dir(const char *path)
+static void set_git_dir_1(const char *path)
{
if (setenv(GIT_DIR_ENVIRONMENT, path, 1))
- return error("Could not set GIT_DIR to '%s'", path);
+ die("could not set GIT_DIR to '%s'", path);
setup_git_env(path);
- return 0;
+}
+
+static void update_relative_gitdir(const char *name,
+ const char *old_cwd,
+ const char *new_cwd,
+ void *data)
+{
+ char *path = reparent_relative_path(old_cwd, new_cwd, get_git_dir());
+ trace_printf_key(&trace_setup_key,
+ "setup: move $GIT_DIR to '%s'",
+ path);
+ set_git_dir_1(path);
+ free(path);
+}
+
+void set_git_dir(const char *path)
+{
+ set_git_dir_1(path);
+ if (!is_absolute_path(path))
+ chdir_notify_register(NULL, update_relative_gitdir, NULL);
}
const char *get_log_output_encoding(void)
--- /dev/null
+#include "cache.h"
+#include "exec-cmd.h"
+#include "quote.h"
+#include "argv-array.h"
+
+#if defined(RUNTIME_PREFIX)
+
+#if defined(HAVE_NS_GET_EXECUTABLE_PATH)
+#include <mach-o/dyld.h>
+#endif
+
+#if defined(HAVE_BSD_KERN_PROC_SYSCTL)
+#include <sys/param.h>
+#include <sys/types.h>
+#include <sys/sysctl.h>
+#endif
+
+#endif /* RUNTIME_PREFIX */
+
+#define MAX_ARGS 32
+
+static const char *system_prefix(void);
+
+#ifdef RUNTIME_PREFIX
+
+/**
+ * When using a runtime prefix, Git dynamically resolves paths relative to its
+ * executable.
+ *
+ * The method for determining the path of the executable is highly
+ * platform-specific.
+ */
+
+/**
+ * Path to the current Git executable. Resolved on startup by
+ * 'git_resolve_executable_dir'.
+ */
+static const char *executable_dirname;
+
+static const char *system_prefix(void)
+{
+ static const char *prefix;
+
+ assert(executable_dirname);
+ assert(is_absolute_path(executable_dirname));
+
+ if (!prefix &&
+ !(prefix = strip_path_suffix(executable_dirname, GIT_EXEC_PATH)) &&
+ !(prefix = strip_path_suffix(executable_dirname, BINDIR)) &&
+ !(prefix = strip_path_suffix(executable_dirname, "git"))) {
+ prefix = PREFIX;
+ trace_printf("RUNTIME_PREFIX requested, "
+ "but prefix computation failed. "
+ "Using static fallback '%s'.\n", prefix);
+ }
+ return prefix;
+}
+
+/*
+ * Resolves the executable path from argv[0], only if it is absolute.
+ *
+ * Returns 0 on success, -1 on failure.
+ */
+static int git_get_exec_path_from_argv0(struct strbuf *buf, const char *argv0)
+{
+ const char *slash;
+
+ if (!argv0 || !*argv0)
+ return -1;
+
+ slash = find_last_dir_sep(argv0);
+ if (slash) {
+ trace_printf("trace: resolved executable path from argv0: %s\n",
+ argv0);
+ strbuf_add_absolute_path(buf, argv0);
+ return 0;
+ }
+ return -1;
+}
+
+#ifdef PROCFS_EXECUTABLE_PATH
+/*
+ * Resolves the executable path by examining a procfs symlink.
+ *
+ * Returns 0 on success, -1 on failure.
+ */
+static int git_get_exec_path_procfs(struct strbuf *buf)
+{
+ if (strbuf_realpath(buf, PROCFS_EXECUTABLE_PATH, 0)) {
+ trace_printf(
+ "trace: resolved executable path from procfs: %s\n",
+ buf->buf);
+ return 0;
+ }
+ return -1;
+}
+#endif /* PROCFS_EXECUTABLE_PATH */
+
+#ifdef HAVE_BSD_KERN_PROC_SYSCTL
+/*
+ * Resolves the executable path using KERN_PROC_PATHNAME BSD sysctl.
+ *
+ * Returns 0 on success, -1 on failure.
+ */
+static int git_get_exec_path_bsd_sysctl(struct strbuf *buf)
+{
+ int mib[4];
+ char path[MAXPATHLEN];
+ size_t cb = sizeof(path);
+
+ mib[0] = CTL_KERN;
+ mib[1] = KERN_PROC;
+ mib[2] = KERN_PROC_PATHNAME;
+ mib[3] = -1;
+ if (!sysctl(mib, 4, path, &cb, NULL, 0)) {
+ trace_printf(
+ "trace: resolved executable path from sysctl: %s\n",
+ path);
+ strbuf_addstr(buf, path);
+ return 0;
+ }
+ return -1;
+}
+#endif /* HAVE_BSD_KERN_PROC_SYSCTL */
+
+#ifdef HAVE_NS_GET_EXECUTABLE_PATH
+/*
+ * Resolves the executable path by querying Darwin application stack.
+ *
+ * Returns 0 on success, -1 on failure.
+ */
+static int git_get_exec_path_darwin(struct strbuf *buf)
+{
+ char path[PATH_MAX];
+ uint32_t size = sizeof(path);
+ if (!_NSGetExecutablePath(path, &size)) {
+ trace_printf(
+ "trace: resolved executable path from Darwin stack: %s\n",
+ path);
+ strbuf_addstr(buf, path);
+ return 0;
+ }
+ return -1;
+}
+#endif /* HAVE_NS_GET_EXECUTABLE_PATH */
+
+#ifdef HAVE_WPGMPTR
+/*
+ * Resolves the executable path by using the global variable _wpgmptr.
+ *
+ * Returns 0 on success, -1 on failure.
+ */
+static int git_get_exec_path_wpgmptr(struct strbuf *buf)
+{
+ int len = wcslen(_wpgmptr) * 3 + 1;
+ strbuf_grow(buf, len);
+ len = xwcstoutf(buf->buf, _wpgmptr, len);
+ if (len < 0)
+ return -1;
+ buf->len += len;
+ return 0;
+}
+#endif /* HAVE_WPGMPTR */
+
+/*
+ * Resolves the absolute path of the current executable.
+ *
+ * Returns 0 on success, -1 on failure.
+ */
+static int git_get_exec_path(struct strbuf *buf, const char *argv0)
+{
+ /*
+ * Identifying the executable path is operating system specific.
+ * Selectively employ all available methods in order of preference,
+ * preferring highly-available authoritative methods over
+ * selectively-available or non-authoritative methods.
+ *
+ * All cases fall back on resolving against argv[0] if there isn't a
+ * better functional method. However, note that argv[0] can be
+ * used-supplied on many operating systems, and is not authoritative
+ * in those cases.
+ *
+ * Each of these functions returns 0 on success, so evaluation will stop
+ * after the first successful method.
+ */
+ if (
+#ifdef HAVE_BSD_KERN_PROC_SYSCTL
+ git_get_exec_path_bsd_sysctl(buf) &&
+#endif /* HAVE_BSD_KERN_PROC_SYSCTL */
+
+#ifdef HAVE_NS_GET_EXECUTABLE_PATH
+ git_get_exec_path_darwin(buf) &&
+#endif /* HAVE_NS_GET_EXECUTABLE_PATH */
+
+#ifdef PROCFS_EXECUTABLE_PATH
+ git_get_exec_path_procfs(buf) &&
+#endif /* PROCFS_EXECUTABLE_PATH */
+
+#ifdef HAVE_WPGMPTR
+ git_get_exec_path_wpgmptr(buf) &&
+#endif /* HAVE_WPGMPTR */
+
+ git_get_exec_path_from_argv0(buf, argv0)) {
+ return -1;
+ }
+
+ if (strbuf_normalize_path(buf)) {
+ trace_printf("trace: could not normalize path: %s\n", buf->buf);
+ return -1;
+ }
+
+ return 0;
+}
+
+void git_resolve_executable_dir(const char *argv0)
+{
+ struct strbuf buf = STRBUF_INIT;
+ char *resolved;
+ const char *slash;
+
+ if (git_get_exec_path(&buf, argv0)) {
+ trace_printf(
+ "trace: could not determine executable path from: %s\n",
+ argv0);
+ strbuf_release(&buf);
+ return;
+ }
+
+ resolved = strbuf_detach(&buf, NULL);
+ slash = find_last_dir_sep(resolved);
+ if (slash)
+ resolved[slash - resolved] = '\0';
+
+ executable_dirname = resolved;
+ trace_printf("trace: resolved executable dir: %s\n",
+ executable_dirname);
+}
+
+#else
+
+/*
+ * When not using a runtime prefix, Git uses a hard-coded path.
+ */
+static const char *system_prefix(void)
+{
+ return PREFIX;
+}
+
+/*
+ * This is called during initialization, but No work needs to be done here when
+ * runtime prefix is not being used.
+ */
+void git_resolve_executable_dir(const char *argv0)
+{
+}
+
+#endif /* RUNTIME_PREFIX */
+
+char *system_path(const char *path)
+{
+ struct strbuf d = STRBUF_INIT;
+
+ if (is_absolute_path(path))
+ return xstrdup(path);
+
+ strbuf_addf(&d, "%s/%s", system_prefix(), path);
+ return strbuf_detach(&d, NULL);
+}
+
+static const char *exec_path_value;
+
+void git_set_exec_path(const char *exec_path)
+{
+ exec_path_value = exec_path;
+ /*
+ * Propagate this setting to external programs.
+ */
+ setenv(EXEC_PATH_ENVIRONMENT, exec_path, 1);
+}
+
+/* Returns the highest-priority location to look for git programs. */
+const char *git_exec_path(void)
+{
+ if (!exec_path_value) {
+ const char *env = getenv(EXEC_PATH_ENVIRONMENT);
+ if (env && *env)
+ exec_path_value = xstrdup(env);
+ else
+ exec_path_value = system_path(GIT_EXEC_PATH);
+ }
+ return exec_path_value;
+}
+
+static void add_path(struct strbuf *out, const char *path)
+{
+ if (path && *path) {
+ strbuf_add_absolute_path(out, path);
+ strbuf_addch(out, PATH_SEP);
+ }
+}
+
+void setup_path(void)
+{
+ const char *exec_path = git_exec_path();
+ const char *old_path = getenv("PATH");
+ struct strbuf new_path = STRBUF_INIT;
+
+ git_set_exec_path(exec_path);
+ add_path(&new_path, exec_path);
+
+ if (old_path)
+ strbuf_addstr(&new_path, old_path);
+ else
+ strbuf_addstr(&new_path, _PATH_DEFPATH);
+
+ setenv("PATH", new_path.buf, 1);
+
+ strbuf_release(&new_path);
+}
+
+const char **prepare_git_cmd(struct argv_array *out, const char **argv)
+{
+ argv_array_push(out, "git");
+ argv_array_pushv(out, argv);
+ return out->argv;
+}
+
+int execv_git_cmd(const char **argv)
+{
+ struct argv_array nargv = ARGV_ARRAY_INIT;
+
+ prepare_git_cmd(&nargv, argv);
+ trace_argv_printf(nargv.argv, "trace: exec:");
+
+ /* execvp() can only ever return if it fails */
+ sane_execvp("git", (char **)nargv.argv);
+
+ trace_printf("trace: exec failed: %s\n", strerror(errno));
+
+ argv_array_clear(&nargv);
+ return -1;
+}
+
+int execl_git_cmd(const char *cmd, ...)
+{
+ int argc;
+ const char *argv[MAX_ARGS + 1];
+ const char *arg;
+ va_list param;
+
+ va_start(param, cmd);
+ argv[0] = cmd;
+ argc = 1;
+ while (argc < MAX_ARGS) {
+ arg = argv[argc++] = va_arg(param, char *);
+ if (!arg)
+ break;
+ }
+ va_end(param);
+ if (MAX_ARGS <= argc)
+ return error("too many args to run %s", cmd);
+
+ argv[argc] = NULL;
+ return execv_git_cmd(argv);
+}
--- /dev/null
+#ifndef GIT_EXEC_CMD_H
+#define GIT_EXEC_CMD_H
+
+struct argv_array;
+
+extern void git_set_exec_path(const char *exec_path);
+extern void git_resolve_executable_dir(const char *path);
+extern const char *git_exec_path(void);
+extern void setup_path(void);
+extern const char **prepare_git_cmd(struct argv_array *out, const char **argv);
+extern int execv_git_cmd(const char **argv); /* NULL terminated */
+LAST_ARG_MUST_BE_NULL
+extern int execl_git_cmd(const char *cmd, ...);
+extern char *system_path(const char *path);
+
+#endif /* GIT_EXEC_CMD_H */
+++ /dev/null
-#include "cache.h"
-#include "exec_cmd.h"
-#include "quote.h"
-#include "argv-array.h"
-
-#if defined(RUNTIME_PREFIX)
-
-#if defined(HAVE_NS_GET_EXECUTABLE_PATH)
-#include <mach-o/dyld.h>
-#endif
-
-#if defined(HAVE_BSD_KERN_PROC_SYSCTL)
-#include <sys/param.h>
-#include <sys/types.h>
-#include <sys/sysctl.h>
-#endif
-
-#endif /* RUNTIME_PREFIX */
-
-#define MAX_ARGS 32
-
-static const char *system_prefix(void);
-
-#ifdef RUNTIME_PREFIX
-
-/**
- * When using a runtime prefix, Git dynamically resolves paths relative to its
- * executable.
- *
- * The method for determining the path of the executable is highly
- * platform-specific.
- */
-
-/**
- * Path to the current Git executable. Resolved on startup by
- * 'git_resolve_executable_dir'.
- */
-static const char *executable_dirname;
-
-static const char *system_prefix(void)
-{
- static const char *prefix;
-
- assert(executable_dirname);
- assert(is_absolute_path(executable_dirname));
-
- if (!prefix &&
- !(prefix = strip_path_suffix(executable_dirname, GIT_EXEC_PATH)) &&
- !(prefix = strip_path_suffix(executable_dirname, BINDIR)) &&
- !(prefix = strip_path_suffix(executable_dirname, "git"))) {
- prefix = PREFIX;
- trace_printf("RUNTIME_PREFIX requested, "
- "but prefix computation failed. "
- "Using static fallback '%s'.\n", prefix);
- }
- return prefix;
-}
-
-/*
- * Resolves the executable path from argv[0], only if it is absolute.
- *
- * Returns 0 on success, -1 on failure.
- */
-static int git_get_exec_path_from_argv0(struct strbuf *buf, const char *argv0)
-{
- const char *slash;
-
- if (!argv0 || !*argv0)
- return -1;
-
- slash = find_last_dir_sep(argv0);
- if (slash) {
- trace_printf("trace: resolved executable path from argv0: %s\n",
- argv0);
- strbuf_add_absolute_path(buf, argv0);
- return 0;
- }
- return -1;
-}
-
-#ifdef PROCFS_EXECUTABLE_PATH
-/*
- * Resolves the executable path by examining a procfs symlink.
- *
- * Returns 0 on success, -1 on failure.
- */
-static int git_get_exec_path_procfs(struct strbuf *buf)
-{
- if (strbuf_realpath(buf, PROCFS_EXECUTABLE_PATH, 0)) {
- trace_printf(
- "trace: resolved executable path from procfs: %s\n",
- buf->buf);
- return 0;
- }
- return -1;
-}
-#endif /* PROCFS_EXECUTABLE_PATH */
-
-#ifdef HAVE_BSD_KERN_PROC_SYSCTL
-/*
- * Resolves the executable path using KERN_PROC_PATHNAME BSD sysctl.
- *
- * Returns 0 on success, -1 on failure.
- */
-static int git_get_exec_path_bsd_sysctl(struct strbuf *buf)
-{
- int mib[4];
- char path[MAXPATHLEN];
- size_t cb = sizeof(path);
-
- mib[0] = CTL_KERN;
- mib[1] = KERN_PROC;
- mib[2] = KERN_PROC_PATHNAME;
- mib[3] = -1;
- if (!sysctl(mib, 4, path, &cb, NULL, 0)) {
- trace_printf(
- "trace: resolved executable path from sysctl: %s\n",
- path);
- strbuf_addstr(buf, path);
- return 0;
- }
- return -1;
-}
-#endif /* HAVE_BSD_KERN_PROC_SYSCTL */
-
-#ifdef HAVE_NS_GET_EXECUTABLE_PATH
-/*
- * Resolves the executable path by querying Darwin application stack.
- *
- * Returns 0 on success, -1 on failure.
- */
-static int git_get_exec_path_darwin(struct strbuf *buf)
-{
- char path[PATH_MAX];
- uint32_t size = sizeof(path);
- if (!_NSGetExecutablePath(path, &size)) {
- trace_printf(
- "trace: resolved executable path from Darwin stack: %s\n",
- path);
- strbuf_addstr(buf, path);
- return 0;
- }
- return -1;
-}
-#endif /* HAVE_NS_GET_EXECUTABLE_PATH */
-
-#ifdef HAVE_WPGMPTR
-/*
- * Resolves the executable path by using the global variable _wpgmptr.
- *
- * Returns 0 on success, -1 on failure.
- */
-static int git_get_exec_path_wpgmptr(struct strbuf *buf)
-{
- int len = wcslen(_wpgmptr) * 3 + 1;
- strbuf_grow(buf, len);
- len = xwcstoutf(buf->buf, _wpgmptr, len);
- if (len < 0)
- return -1;
- buf->len += len;
- return 0;
-}
-#endif /* HAVE_WPGMPTR */
-
-/*
- * Resolves the absolute path of the current executable.
- *
- * Returns 0 on success, -1 on failure.
- */
-static int git_get_exec_path(struct strbuf *buf, const char *argv0)
-{
- /*
- * Identifying the executable path is operating system specific.
- * Selectively employ all available methods in order of preference,
- * preferring highly-available authoritative methods over
- * selectively-available or non-authoritative methods.
- *
- * All cases fall back on resolving against argv[0] if there isn't a
- * better functional method. However, note that argv[0] can be
- * used-supplied on many operating systems, and is not authoritative
- * in those cases.
- *
- * Each of these functions returns 0 on success, so evaluation will stop
- * after the first successful method.
- */
- if (
-#ifdef HAVE_BSD_KERN_PROC_SYSCTL
- git_get_exec_path_bsd_sysctl(buf) &&
-#endif /* HAVE_BSD_KERN_PROC_SYSCTL */
-
-#ifdef HAVE_NS_GET_EXECUTABLE_PATH
- git_get_exec_path_darwin(buf) &&
-#endif /* HAVE_NS_GET_EXECUTABLE_PATH */
-
-#ifdef PROCFS_EXECUTABLE_PATH
- git_get_exec_path_procfs(buf) &&
-#endif /* PROCFS_EXECUTABLE_PATH */
-
-#ifdef HAVE_WPGMPTR
- git_get_exec_path_wpgmptr(buf) &&
-#endif /* HAVE_WPGMPTR */
-
- git_get_exec_path_from_argv0(buf, argv0)) {
- return -1;
- }
-
- if (strbuf_normalize_path(buf)) {
- trace_printf("trace: could not normalize path: %s\n", buf->buf);
- return -1;
- }
-
- return 0;
-}
-
-void git_resolve_executable_dir(const char *argv0)
-{
- struct strbuf buf = STRBUF_INIT;
- char *resolved;
- const char *slash;
-
- if (git_get_exec_path(&buf, argv0)) {
- trace_printf(
- "trace: could not determine executable path from: %s\n",
- argv0);
- strbuf_release(&buf);
- return;
- }
-
- resolved = strbuf_detach(&buf, NULL);
- slash = find_last_dir_sep(resolved);
- if (slash)
- resolved[slash - resolved] = '\0';
-
- executable_dirname = resolved;
- trace_printf("trace: resolved executable dir: %s\n",
- executable_dirname);
-}
-
-#else
-
-/*
- * When not using a runtime prefix, Git uses a hard-coded path.
- */
-static const char *system_prefix(void)
-{
- return PREFIX;
-}
-
-/*
- * This is called during initialization, but No work needs to be done here when
- * runtime prefix is not being used.
- */
-void git_resolve_executable_dir(const char *argv0)
-{
-}
-
-#endif /* RUNTIME_PREFIX */
-
-char *system_path(const char *path)
-{
- struct strbuf d = STRBUF_INIT;
-
- if (is_absolute_path(path))
- return xstrdup(path);
-
- strbuf_addf(&d, "%s/%s", system_prefix(), path);
- return strbuf_detach(&d, NULL);
-}
-
-static const char *exec_path_value;
-
-void git_set_exec_path(const char *exec_path)
-{
- exec_path_value = exec_path;
- /*
- * Propagate this setting to external programs.
- */
- setenv(EXEC_PATH_ENVIRONMENT, exec_path, 1);
-}
-
-/* Returns the highest-priority location to look for git programs. */
-const char *git_exec_path(void)
-{
- if (!exec_path_value) {
- const char *env = getenv(EXEC_PATH_ENVIRONMENT);
- if (env && *env)
- exec_path_value = xstrdup(env);
- else
- exec_path_value = system_path(GIT_EXEC_PATH);
- }
- return exec_path_value;
-}
-
-static void add_path(struct strbuf *out, const char *path)
-{
- if (path && *path) {
- strbuf_add_absolute_path(out, path);
- strbuf_addch(out, PATH_SEP);
- }
-}
-
-void setup_path(void)
-{
- const char *exec_path = git_exec_path();
- const char *old_path = getenv("PATH");
- struct strbuf new_path = STRBUF_INIT;
-
- git_set_exec_path(exec_path);
- add_path(&new_path, exec_path);
-
- if (old_path)
- strbuf_addstr(&new_path, old_path);
- else
- strbuf_addstr(&new_path, _PATH_DEFPATH);
-
- setenv("PATH", new_path.buf, 1);
-
- strbuf_release(&new_path);
-}
-
-const char **prepare_git_cmd(struct argv_array *out, const char **argv)
-{
- argv_array_push(out, "git");
- argv_array_pushv(out, argv);
- return out->argv;
-}
-
-int execv_git_cmd(const char **argv)
-{
- struct argv_array nargv = ARGV_ARRAY_INIT;
-
- prepare_git_cmd(&nargv, argv);
- trace_argv_printf(nargv.argv, "trace: exec:");
-
- /* execvp() can only ever return if it fails */
- sane_execvp("git", (char **)nargv.argv);
-
- trace_printf("trace: exec failed: %s\n", strerror(errno));
-
- argv_array_clear(&nargv);
- return -1;
-}
-
-int execl_git_cmd(const char *cmd, ...)
-{
- int argc;
- const char *argv[MAX_ARGS + 1];
- const char *arg;
- va_list param;
-
- va_start(param, cmd);
- argv[0] = cmd;
- argc = 1;
- while (argc < MAX_ARGS) {
- arg = argv[argc++] = va_arg(param, char *);
- if (!arg)
- break;
- }
- va_end(param);
- if (MAX_ARGS <= argc)
- return error("too many args to run %s", cmd);
-
- argv[argc] = NULL;
- return execv_git_cmd(argv);
-}
+++ /dev/null
-#ifndef GIT_EXEC_CMD_H
-#define GIT_EXEC_CMD_H
-
-struct argv_array;
-
-extern void git_set_exec_path(const char *exec_path);
-extern void git_resolve_executable_dir(const char *path);
-extern const char *git_exec_path(void);
-extern void setup_path(void);
-extern const char **prepare_git_cmd(struct argv_array *out, const char **argv);
-extern int execv_git_cmd(const char **argv); /* NULL terminated */
-LAST_ARG_MUST_BE_NULL
-extern int execl_git_cmd(const char *cmd, ...);
-extern char *system_path(const char *path);
-
-#endif /* GIT_EXEC_CMD_H */
#include "run-command.h"
#include "packfile.h"
#include "object-store.h"
+#include "mem-pool.h"
#define PACK_ID_BITS 16
#define MAX_PACK_ID ((1<<PACK_ID_BITS)-1)
unsigned no_swap : 1;
};
-struct mem_pool {
- struct mem_pool *next_pool;
- char *next_free;
- char *end;
- uintmax_t space[FLEX_ARRAY]; /* more */
-};
-
struct atom_str {
struct atom_str *next_atom;
unsigned short str_len;
static const char **global_argv;
/* Memory pools */
-static size_t mem_pool_alloc = 2*1024*1024 - sizeof(struct mem_pool);
-static size_t total_allocd;
-static struct mem_pool *mem_pool;
+static struct mem_pool fi_mem_pool = {NULL, 2*1024*1024 -
+ sizeof(struct mp_block), 0 };
/* Atom management */
static unsigned int atom_table_sz = 4451;
static void *avail_tree_entry;
static unsigned int avail_tree_table_sz = 100;
static struct avail_tree_content **avail_tree_table;
+static size_t tree_entry_allocd;
static struct strbuf old_tree = STRBUF_INIT;
static struct strbuf new_tree = STRBUF_INIT;
return r;
}
-static void *pool_alloc(size_t len)
-{
- struct mem_pool *p;
- void *r;
-
- /* round up to a 'uintmax_t' alignment */
- if (len & (sizeof(uintmax_t) - 1))
- len += sizeof(uintmax_t) - (len & (sizeof(uintmax_t) - 1));
-
- for (p = mem_pool; p; p = p->next_pool)
- if ((p->end - p->next_free >= len))
- break;
-
- if (!p) {
- if (len >= (mem_pool_alloc/2)) {
- total_allocd += len;
- return xmalloc(len);
- }
- total_allocd += sizeof(struct mem_pool) + mem_pool_alloc;
- p = xmalloc(st_add(sizeof(struct mem_pool), mem_pool_alloc));
- p->next_pool = mem_pool;
- p->next_free = (char *) p->space;
- p->end = p->next_free + mem_pool_alloc;
- mem_pool = p;
- }
-
- r = p->next_free;
- p->next_free += len;
- return r;
-}
-
-static void *pool_calloc(size_t count, size_t size)
-{
- size_t len = count * size;
- void *r = pool_alloc(len);
- memset(r, 0, len);
- return r;
-}
-
static char *pool_strdup(const char *s)
{
size_t len = strlen(s) + 1;
- char *r = pool_alloc(len);
+ char *r = mem_pool_alloc(&fi_mem_pool, len);
memcpy(r, s, len);
return r;
}
{
struct mark_set *s = marks;
while ((idnum >> s->shift) >= 1024) {
- s = pool_calloc(1, sizeof(struct mark_set));
+ s = mem_pool_calloc(&fi_mem_pool, 1, sizeof(struct mark_set));
s->shift = marks->shift + 10;
s->data.sets[0] = marks;
marks = s;
uintmax_t i = idnum >> s->shift;
idnum -= i << s->shift;
if (!s->data.sets[i]) {
- s->data.sets[i] = pool_calloc(1, sizeof(struct mark_set));
+ s->data.sets[i] = mem_pool_calloc(&fi_mem_pool, 1, sizeof(struct mark_set));
s->data.sets[i]->shift = s->shift - 10;
}
s = s->data.sets[i];
if (c->str_len == len && !strncmp(s, c->str_dat, len))
return c;
- c = pool_alloc(sizeof(struct atom_str) + len + 1);
+ c = mem_pool_alloc(&fi_mem_pool, sizeof(struct atom_str) + len + 1);
c->str_len = len;
memcpy(c->str_dat, s, len);
c->str_dat[len] = 0;
if (check_refname_format(name, REFNAME_ALLOW_ONELEVEL))
die("Branch name doesn't conform to GIT standards: %s", name);
- b = pool_calloc(1, sizeof(struct branch));
+ b = mem_pool_calloc(&fi_mem_pool, 1, sizeof(struct branch));
b->name = pool_strdup(name);
b->table_next_branch = branch_table[hc];
b->branch_tree.versions[0].mode = S_IFDIR;
avail_tree_table[hc] = f->next_avail;
} else {
cnt = cnt & 7 ? ((cnt / 8) + 1) * 8 : cnt;
- f = pool_alloc(sizeof(*t) + sizeof(t->entries[0]) * cnt);
+ f = mem_pool_alloc(&fi_mem_pool, sizeof(*t) + sizeof(t->entries[0]) * cnt);
f->entry_capacity = cnt;
}
if (!avail_tree_entry) {
unsigned int n = tree_entry_alloc;
- total_allocd += n * sizeof(struct tree_entry);
+ tree_entry_allocd += n * sizeof(struct tree_entry);
ALLOC_ARRAY(e, n);
avail_tree_entry = e;
while (n-- > 1) {
enum object_type type;
const char *v;
- t = pool_alloc(sizeof(struct tag));
+ t = mem_pool_alloc(&fi_mem_pool, sizeof(struct tag));
memset(t, 0, sizeof(struct tag));
t->name = pool_strdup(arg);
if (last_tag)
atom_table = xcalloc(atom_table_sz, sizeof(struct atom_str*));
branch_table = xcalloc(branch_table_sz, sizeof(struct branch*));
avail_tree_table = xcalloc(avail_tree_table_sz, sizeof(struct avail_tree_content*));
- marks = pool_calloc(1, sizeof(struct mark_set));
+ marks = mem_pool_calloc(&fi_mem_pool, 1, sizeof(struct mark_set));
global_argc = argc;
global_argv = argv;
- rc_free = pool_alloc(cmd_save * sizeof(*rc_free));
+ rc_free = mem_pool_alloc(&fi_mem_pool, cmd_save * sizeof(*rc_free));
for (i = 0; i < (cmd_save - 1); i++)
rc_free[i].next = &rc_free[i + 1];
rc_free[cmd_save - 1].next = NULL;
fprintf(stderr, "Total branches: %10lu (%10lu loads )\n", branch_count, branch_load_count);
fprintf(stderr, " marks: %10" PRIuMAX " (%10" PRIuMAX " unique )\n", (((uintmax_t)1) << marks->shift) * 1024, marks_set_count);
fprintf(stderr, " atoms: %10u\n", atom_cnt);
- fprintf(stderr, "Memory total: %10" PRIuMAX " KiB\n", (total_allocd + alloc_count*sizeof(struct object_entry))/1024);
- fprintf(stderr, " pools: %10lu KiB\n", (unsigned long)(total_allocd/1024));
+ fprintf(stderr, "Memory total: %10" PRIuMAX " KiB\n", (tree_entry_allocd + fi_mem_pool.pool_alloc + alloc_count*sizeof(struct object_entry))/1024);
+ fprintf(stderr, " pools: %10lu KiB\n", (unsigned long)((tree_entry_allocd + fi_mem_pool.pool_alloc) /1024));
fprintf(stderr, " objects: %10" PRIuMAX " KiB\n", (alloc_count*sizeof(struct object_entry))/1024);
fprintf(stderr, "---------------------------------------------------------------------\n");
pack_report();
#include "pkt-line.h"
#include "commit.h"
#include "tag.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
#include "pack.h"
#include "sideband.h"
#include "fetch-pack.h"
#define PIPESAFE_FLUSH 32
#define LARGE_FLUSH 16384
-static int next_flush(struct fetch_pack_args *args, int count)
+static int next_flush(int stateless_rpc, int count)
{
- if (args->stateless_rpc) {
+ if (stateless_rpc) {
if (count < LARGE_FLUSH)
count <<= 1;
else
send_request(args, fd[1], &req_buf);
strbuf_setlen(&req_buf, state_len);
flushes++;
- flush_at = next_flush(args, count);
+ flush_at = next_flush(args->stateless_rpc, count);
/*
* We keep one window "ahead" of the other side, and
return ref;
}
+static void add_shallow_requests(struct strbuf *req_buf,
+ const struct fetch_pack_args *args)
+{
+ if (is_repository_shallow())
+ write_shallow_commits(req_buf, 1, NULL);
+ if (args->depth > 0)
+ packet_buf_write(req_buf, "deepen %d", args->depth);
+ if (args->deepen_since) {
+ timestamp_t max_age = approxidate(args->deepen_since);
+ packet_buf_write(req_buf, "deepen-since %"PRItime, max_age);
+ }
+ if (args->deepen_not) {
+ int i;
+ for (i = 0; i < args->deepen_not->nr; i++) {
+ struct string_list_item *s = args->deepen_not->items + i;
+ packet_buf_write(req_buf, "deepen-not %s", s->string);
+ }
+ }
+}
+
+static void add_wants(const struct ref *wants, struct strbuf *req_buf)
+{
+ for ( ; wants ; wants = wants->next) {
+ const struct object_id *remote = &wants->old_oid;
+ const char *remote_hex;
+ struct object *o;
+
+ /*
+ * If that object is complete (i.e. it is an ancestor of a
+ * local ref), we tell them we have it but do not have to
+ * tell them about its ancestors, which they already know
+ * about.
+ *
+ * We use lookup_object here because we are only
+ * interested in the case we *know* the object is
+ * reachable and we have already scanned it.
+ */
+ if (((o = lookup_object(remote->hash)) != NULL) &&
+ (o->flags & COMPLETE)) {
+ continue;
+ }
+
+ remote_hex = oid_to_hex(remote);
+ packet_buf_write(req_buf, "want %s\n", remote_hex);
+ }
+}
+
+static void add_common(struct strbuf *req_buf, struct oidset *common)
+{
+ struct oidset_iter iter;
+ const struct object_id *oid;
+ oidset_iter_init(common, &iter);
+
+ while ((oid = oidset_iter_next(&iter))) {
+ packet_buf_write(req_buf, "have %s\n", oid_to_hex(oid));
+ }
+}
+
+static int add_haves(struct strbuf *req_buf, int *haves_to_send, int *in_vain)
+{
+ int ret = 0;
+ int haves_added = 0;
+ const struct object_id *oid;
+
+ while ((oid = get_rev())) {
+ packet_buf_write(req_buf, "have %s\n", oid_to_hex(oid));
+ if (++haves_added >= *haves_to_send)
+ break;
+ }
+
+ *in_vain += haves_added;
+ if (!haves_added || *in_vain >= MAX_IN_VAIN) {
+ /* Send Done */
+ packet_buf_write(req_buf, "done\n");
+ ret = 1;
+ }
+
+ /* Increase haves to send on next round */
+ *haves_to_send = next_flush(1, *haves_to_send);
+
+ return ret;
+}
+
+static int send_fetch_request(int fd_out, const struct fetch_pack_args *args,
+ const struct ref *wants, struct oidset *common,
+ int *haves_to_send, int *in_vain)
+{
+ int ret = 0;
+ struct strbuf req_buf = STRBUF_INIT;
+
+ if (server_supports_v2("fetch", 1))
+ packet_buf_write(&req_buf, "command=fetch");
+ if (server_supports_v2("agent", 0))
+ packet_buf_write(&req_buf, "agent=%s", git_user_agent_sanitized());
+
+ packet_buf_delim(&req_buf);
+ if (args->use_thin_pack)
+ packet_buf_write(&req_buf, "thin-pack");
+ if (args->no_progress)
+ packet_buf_write(&req_buf, "no-progress");
+ if (args->include_tag)
+ packet_buf_write(&req_buf, "include-tag");
+ if (prefer_ofs_delta)
+ packet_buf_write(&req_buf, "ofs-delta");
+
+ /* Add shallow-info and deepen request */
+ if (server_supports_feature("fetch", "shallow", 0))
+ add_shallow_requests(&req_buf, args);
+ else if (is_repository_shallow() || args->deepen)
+ die(_("Server does not support shallow requests"));
+
+ /* add wants */
+ add_wants(wants, &req_buf);
+
+ /* Add all of the common commits we've found in previous rounds */
+ add_common(&req_buf, common);
+
+ /* Add initial haves */
+ ret = add_haves(&req_buf, haves_to_send, in_vain);
+
+ /* Send request */
+ packet_buf_flush(&req_buf);
+ write_or_die(fd_out, req_buf.buf, req_buf.len);
+
+ strbuf_release(&req_buf);
+ return ret;
+}
+
+/*
+ * Processes a section header in a server's response and checks if it matches
+ * `section`. If the value of `peek` is 1, the header line will be peeked (and
+ * not consumed); if 0, the line will be consumed and the function will die if
+ * the section header doesn't match what was expected.
+ */
+static int process_section_header(struct packet_reader *reader,
+ const char *section, int peek)
+{
+ int ret;
+
+ if (packet_reader_peek(reader) != PACKET_READ_NORMAL)
+ die("error reading section header '%s'", section);
+
+ ret = !strcmp(reader->line, section);
+
+ if (!peek) {
+ if (!ret)
+ die("expected '%s', received '%s'",
+ section, reader->line);
+ packet_reader_read(reader);
+ }
+
+ return ret;
+}
+
+static int process_acks(struct packet_reader *reader, struct oidset *common)
+{
+ /* received */
+ int received_ready = 0;
+ int received_ack = 0;
+
+ process_section_header(reader, "acknowledgments", 0);
+ while (packet_reader_read(reader) == PACKET_READ_NORMAL) {
+ const char *arg;
+
+ if (!strcmp(reader->line, "NAK"))
+ continue;
+
+ if (skip_prefix(reader->line, "ACK ", &arg)) {
+ struct object_id oid;
+ if (!get_oid_hex(arg, &oid)) {
+ struct commit *commit;
+ oidset_insert(common, &oid);
+ commit = lookup_commit(&oid);
+ mark_common(commit, 0, 1);
+ }
+ continue;
+ }
+
+ if (!strcmp(reader->line, "ready")) {
+ clear_prio_queue(&rev_list);
+ received_ready = 1;
+ continue;
+ }
+
+ die("unexpected acknowledgment line: '%s'", reader->line);
+ }
+
+ if (reader->status != PACKET_READ_FLUSH &&
+ reader->status != PACKET_READ_DELIM)
+ die("error processing acks: %d", reader->status);
+
+ /* return 0 if no common, 1 if there are common, or 2 if ready */
+ return received_ready ? 2 : (received_ack ? 1 : 0);
+}
+
+static void receive_shallow_info(struct fetch_pack_args *args,
+ struct packet_reader *reader)
+{
+ process_section_header(reader, "shallow-info", 0);
+ while (packet_reader_read(reader) == PACKET_READ_NORMAL) {
+ const char *arg;
+ struct object_id oid;
+
+ if (skip_prefix(reader->line, "shallow ", &arg)) {
+ if (get_oid_hex(arg, &oid))
+ die(_("invalid shallow line: %s"), reader->line);
+ register_shallow(&oid);
+ continue;
+ }
+ if (skip_prefix(reader->line, "unshallow ", &arg)) {
+ if (get_oid_hex(arg, &oid))
+ die(_("invalid unshallow line: %s"), reader->line);
+ if (!lookup_object(oid.hash))
+ die(_("object not found: %s"), reader->line);
+ /* make sure that it is parsed as shallow */
+ if (!parse_object(&oid))
+ die(_("error in object: %s"), reader->line);
+ if (unregister_shallow(&oid))
+ die(_("no shallow found: %s"), reader->line);
+ continue;
+ }
+ die(_("expected shallow/unshallow, got %s"), reader->line);
+ }
+
+ if (reader->status != PACKET_READ_FLUSH &&
+ reader->status != PACKET_READ_DELIM)
+ die("error processing shallow info: %d", reader->status);
+
+ setup_alternate_shallow(&shallow_lock, &alternate_shallow_file, NULL);
+ args->deepen = 1;
+}
+
+enum fetch_state {
+ FETCH_CHECK_LOCAL = 0,
+ FETCH_SEND_REQUEST,
+ FETCH_PROCESS_ACKS,
+ FETCH_GET_PACK,
+ FETCH_DONE,
+};
+
+static struct ref *do_fetch_pack_v2(struct fetch_pack_args *args,
+ int fd[2],
+ const struct ref *orig_ref,
+ struct ref **sought, int nr_sought,
+ char **pack_lockfile)
+{
+ struct ref *ref = copy_ref_list(orig_ref);
+ enum fetch_state state = FETCH_CHECK_LOCAL;
+ struct oidset common = OIDSET_INIT;
+ struct packet_reader reader;
+ int in_vain = 0;
+ int haves_to_send = INITIAL_FLUSH;
+ packet_reader_init(&reader, fd[0], NULL, 0,
+ PACKET_READ_CHOMP_NEWLINE);
+
+ while (state != FETCH_DONE) {
+ switch (state) {
+ case FETCH_CHECK_LOCAL:
+ sort_ref_list(&ref, ref_compare_name);
+ QSORT(sought, nr_sought, cmp_ref_by_name);
+
+ /* v2 supports these by default */
+ allow_unadvertised_object_request |= ALLOW_REACHABLE_SHA1;
+ use_sideband = 2;
+ if (args->depth > 0 || args->deepen_since || args->deepen_not)
+ args->deepen = 1;
+
+ if (marked)
+ for_each_ref(clear_marks, NULL);
+ marked = 1;
+
+ for_each_ref(rev_list_insert_ref_oid, NULL);
+ for_each_cached_alternate(insert_one_alternate_object);
+
+ /* Filter 'ref' by 'sought' and those that aren't local */
+ if (everything_local(args, &ref, sought, nr_sought))
+ state = FETCH_DONE;
+ else
+ state = FETCH_SEND_REQUEST;
+ break;
+ case FETCH_SEND_REQUEST:
+ if (send_fetch_request(fd[1], args, ref, &common,
+ &haves_to_send, &in_vain))
+ state = FETCH_GET_PACK;
+ else
+ state = FETCH_PROCESS_ACKS;
+ break;
+ case FETCH_PROCESS_ACKS:
+ /* Process ACKs/NAKs */
+ switch (process_acks(&reader, &common)) {
+ case 2:
+ state = FETCH_GET_PACK;
+ break;
+ case 1:
+ in_vain = 0;
+ /* fallthrough */
+ default:
+ state = FETCH_SEND_REQUEST;
+ break;
+ }
+ break;
+ case FETCH_GET_PACK:
+ /* Check for shallow-info section */
+ if (process_section_header(&reader, "shallow-info", 1))
+ receive_shallow_info(args, &reader);
+
+ /* get the pack */
+ process_section_header(&reader, "packfile", 0);
+ if (get_pack(args, fd, pack_lockfile))
+ die(_("git fetch-pack: fetch failed."));
+
+ state = FETCH_DONE;
+ break;
+ case FETCH_DONE:
+ continue;
+ }
+ }
+
+ oidset_clear(&common);
+ return ref;
+}
+
static void fetch_pack_config(void)
{
git_config_get_int("fetch.unpacklimit", &fetch_unpack_limit);
const char *dest,
struct ref **sought, int nr_sought,
struct oid_array *shallow,
- char **pack_lockfile)
+ char **pack_lockfile,
+ enum protocol_version version)
{
struct ref *ref_cpy;
struct shallow_info si;
die(_("no matching remote head"));
}
prepare_shallow_info(&si, shallow);
- ref_cpy = do_fetch_pack(args, fd, ref, sought, nr_sought,
- &si, pack_lockfile);
+ if (version == protocol_v2)
+ ref_cpy = do_fetch_pack_v2(args, fd, ref, sought, nr_sought,
+ pack_lockfile);
+ else
+ ref_cpy = do_fetch_pack(args, fd, ref, sought, nr_sought,
+ &si, pack_lockfile);
reprepare_packed_git(the_repository);
update_shallow(args, sought, nr_sought, &si);
clear_shallow_info(&si);
#include "string-list.h"
#include "run-command.h"
+#include "protocol.h"
#include "list-objects-filter-options.h"
struct oid_array;
struct ref **sought,
int nr_sought,
struct oid_array *shallow,
- char **pack_lockfile);
+ char **pack_lockfile,
+ enum protocol_version version);
/*
* Print an appropriate error message for each sought ref that wasn't
if (!(argv[0] = core_fsmonitor))
return -1;
- snprintf(ver, sizeof(version), "%d", version);
+ snprintf(ver, sizeof(ver), "%d", version);
snprintf(date, sizeof(date), "%" PRIuMAX, (uintmax_t)last_update);
argv[1] = ver;
argv[2] = date;
for (i = 0; i < istate->cache_nr; i++)
istate->cache[i]->ce_flags &= ~CE_FSMONITOR_VALID;
+ /* If we're going to check every file, ensure we save the results */
+ istate->cache_changed |= FSMONITOR_CHANGED;
+
if (istate->untracked)
istate->untracked->use_fsmonitor = 0;
}
*/
#include "cache.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
#include "gettext.h"
#include "strbuf.h"
#include "utf8.h"
bind . <$M1B-Key-plus> {show_more_context;break}
bind . <$M1B-Key-KP_Add> {show_more_context;break}
bind . <$M1B-Key-Return> do_commit
+bind . <$M1B-Key-KP_Enter> do_commit
foreach i [list $ui_index $ui_workdir] {
bind $i <Button-1> { toggle_or_diff click %W %x %y; break }
bind $i <$M1B-Button-1> { add_one_to_selection %W %x %y; break }
# Copyright (C) 2006, 2007 Shawn Pearce
proc find_ssh_key {} {
- foreach name {~/.ssh/id_dsa.pub ~/.ssh/id_rsa.pub ~/.ssh/identity.pub} {
+ foreach name {
+ ~/.ssh/id_dsa.pub ~/.ssh/id_ecdsa.pub ~/.ssh/id_ed25519.pub
+ ~/.ssh/id_rsa.pub ~/.ssh/identity.pub
+ } {
if {[file exists $name]} {
set fh [open $name r]
set cont [read $fh]
# Functions for supporting the use of themed Tk widgets in git-gui.
# Copyright (C) 2009 Pat Thoyts <patthoyts@users.sourceforge.net>
+proc ttk_get_current_theme {} {
+ # Handle either current Tk or older versions of 8.5
+ if {[catch {set theme [ttk::style theme use]}]} {
+ set theme $::ttk::currentTheme
+ }
+ return $theme
+}
+
proc InitTheme {} {
# Create a color label style (bg can be overridden by widget option)
ttk::style layout Color.TLabel {
}
}
- # Handle either current Tk or older versions of 8.5
- if {[catch {set theme [ttk::style theme use]}]} {
- set theme $::ttk::currentTheme
- }
+ set theme [ttk_get_current_theme]
if {[lsearch -exact {default alt classic clam} $theme] != -1} {
# Simple override of standard ttk::entry to change the field
proc ttext {w args} {
global use_ttk
if {$use_ttk} {
- switch -- [ttk::style theme use] {
+ switch -- [ttk_get_current_theme] {
"vista" - "xpnative" {
lappend args -highlightthickness 0 -borderwidth 0
}
fi
ret=0
-if test -n "$keep_empty"
-then
- # we have to do this the hard way. git format-patch completely squashes
- # empty commits and even if it didn't the format doesn't really lend
- # itself well to recording empty patches. fortunately, cherry-pick
- # makes this easy
- git cherry-pick ${gpg_sign_opt:+"$gpg_sign_opt"} --allow-empty \
- $allow_rerere_autoupdate --right-only "$revisions" \
- $allow_empty_message \
- ${restrict_revision+^$restrict_revision}
- ret=$?
-else
- rm -f "$GIT_DIR/rebased-patches"
+rm -f "$GIT_DIR/rebased-patches"
- git format-patch -k --stdout --full-index --cherry-pick --right-only \
- --src-prefix=a/ --dst-prefix=b/ --no-renames --no-cover-letter \
- --pretty=mboxrd \
- $git_format_patch_opt \
- "$revisions" ${restrict_revision+^$restrict_revision} \
- >"$GIT_DIR/rebased-patches"
- ret=$?
+git format-patch -k --stdout --full-index --cherry-pick --right-only \
+ --src-prefix=a/ --dst-prefix=b/ --no-renames --no-cover-letter \
+ --pretty=mboxrd \
+ $git_format_patch_opt \
+ "$revisions" ${restrict_revision+^$restrict_revision} \
+ >"$GIT_DIR/rebased-patches"
+ret=$?
- if test 0 != $ret
- then
- rm -f "$GIT_DIR/rebased-patches"
- case "$head_name" in
- refs/heads/*)
- git checkout -q "$head_name"
- ;;
- *)
- git checkout -q "$orig_head"
- ;;
- esac
+if test 0 != $ret
+then
+ rm -f "$GIT_DIR/rebased-patches"
+ case "$head_name" in
+ refs/heads/*)
+ git checkout -q "$head_name"
+ ;;
+ *)
+ git checkout -q "$orig_head"
+ ;;
+ esac
- cat >&2 <<-EOF
+ cat >&2 <<-EOF
- git encountered an error while preparing the patches to replay
- these revisions:
+ git encountered an error while preparing the patches to replay
+ these revisions:
- $revisions
+ $revisions
- As a result, git cannot rebase them.
- EOF
- return $ret
- fi
+ As a result, git cannot rebase them.
+ EOF
+ return $ret
+fi
- git am $git_am_opt --rebasing --resolvemsg="$resolvemsg" \
- --patch-format=mboxrd \
- $allow_rerere_autoupdate \
- ${gpg_sign_opt:+"$gpg_sign_opt"} <"$GIT_DIR/rebased-patches"
- ret=$?
+git am $git_am_opt --rebasing --resolvemsg="$resolvemsg" \
+ --patch-format=mboxrd \
+ $allow_rerere_autoupdate \
+ ${gpg_sign_opt:+"$gpg_sign_opt"} <"$GIT_DIR/rebased-patches"
+ret=$?
- rm -f "$GIT_DIR/rebased-patches"
-fi
+rm -f "$GIT_DIR/rebased-patches"
if test 0 != $ret
then
pick_one_preserving_merges "$@" && return
output eval git cherry-pick $allow_rerere_autoupdate $allow_empty_message \
${gpg_sign_opt:+$(git rev-parse --sq-quote "$gpg_sign_opt")} \
- "$strategy_args" $empty_args $ff "$@"
+ $signoff "$strategy_args" $empty_args $ff "$@"
# If cherry-pick dies it leaves the to-be-picked commit unrecorded. Reschedule
# previous task so this commit is not lost.
# resolve before manually running git commit --amend then git
# rebase --continue.
git commit --allow-empty --allow-empty-message --amend \
- --no-post-rewrite -n -q -C $sha1 &&
+ --no-post-rewrite -n -q -C $sha1 $signoff &&
pick_one -n $sha1 &&
git commit --allow-empty --allow-empty-message \
- --amend --no-post-rewrite -n -q -C $sha1 \
+ --amend --no-post-rewrite -n -q -C $sha1 $signoff \
${gpg_sign_opt:+"$gpg_sign_opt"} ||
die_with_patch $sha1 "$(eval_gettext "Could not apply \$sha1... \$rest")"
else
cmt=$(cat "$state_dir/current")
if ! git diff-index --quiet --ignore-submodules HEAD --
then
- if ! git commit ${gpg_sign_opt:+"$gpg_sign_opt"} $allow_empty_message \
+ if ! git commit ${gpg_sign_opt:+"$gpg_sign_opt"} $signoff $allow_empty_message \
--no-verify -C "$cmt"
then
echo "Commit failed, please do not call \"git commit\""
You can instead skip this commit: run "git rebase --skip".
To abort and get back to the state before "git rebase", run "git rebase --abort".')
"
+squash_onto=
unset onto
unset restrict_revision
cmd=
autosquash=
keep_empty=
allow_empty_message=
+signoff=
test "$(git config --bool rebase.autosquash)" = "true" && autosquash=t
case "$(git config --bool commit.gpgsign)" in
true) gpg_sign_opt=-S ;;
allow_rerere_autoupdate="$(cat "$state_dir"/allow_rerere_autoupdate)"
test -f "$state_dir"/gpg_sign_opt &&
gpg_sign_opt="$(cat "$state_dir"/gpg_sign_opt)"
+ test -f "$state_dir"/signoff && {
+ signoff="$(cat "$state_dir"/signoff)"
+ force_rebase=t
+ }
}
write_basic_state () {
test -n "$allow_rerere_autoupdate" && echo "$allow_rerere_autoupdate" > \
"$state_dir"/allow_rerere_autoupdate
test -n "$gpg_sign_opt" && echo "$gpg_sign_opt" > "$state_dir"/gpg_sign_opt
+ test -n "$signoff" && echo "$signoff" >"$state_dir"/signoff
}
output () {
--allow-empty-message)
allow_empty_message=--allow-empty-message
;;
+ --no-keep-empty)
+ keep_empty=
+ ;;
--preserve-merges)
preserve_merges=t
test -z "$interactive_rebase" && interactive_rebase=implied
--ignore-whitespace)
git_am_opt="$git_am_opt $1"
;;
- --committer-date-is-author-date|--ignore-date|--signoff|--no-signoff)
+ --signoff)
+ signoff=--signoff
+ ;;
+ --no-signoff)
+ signoff=
+ ;;
+ --committer-date-is-author-date|--ignore-date)
git_am_opt="$git_am_opt $1"
force_rebase=t
;;
test -z "$interactive_rebase" && interactive_rebase=implied
fi
+if test -n "$keep_empty"
+then
+ test -z "$interactive_rebase" && interactive_rebase=implied
+fi
+
if test -n "$interactive_rebase"
then
type=interactive
git_format_patch_opt="$git_format_patch_opt --progress"
fi
+if test -n "$signoff"
+then
+ test -n "$preserve_merges" &&
+ die "$(gettext "error: cannot combine '--signoff' with '--preserve-merges'")"
+ git_am_opt="$git_am_opt $signoff"
+ force_rebase=t
+fi
+
if test -z "$rebase_root"
then
case "$#" in
usage(1) unless defined $cmd;
load_authors() if $_authors;
if (defined $_authors_prog) {
- $_authors_prog = "'" . File::Spec->rel2abs($_authors_prog) . "'";
+ my $abs_file = File::Spec->rel2abs($_authors_prog);
+ $_authors_prog = "'" . $abs_file . "'" if -x $abs_file;
}
unless ($cmd =~ /^(?:clone|init|multi-init|commit-diff)$/) {
#include "builtin.h"
#include "config.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
#include "help.h"
#include "run-command.h"
{ "revert", cmd_revert, RUN_SETUP | NEED_WORK_TREE },
{ "rm", cmd_rm, RUN_SETUP },
{ "send-pack", cmd_send_pack, RUN_SETUP },
+ { "serve", cmd_serve, RUN_SETUP },
{ "shortlog", cmd_shortlog, RUN_SETUP_GENTLY | USE_PAGER },
{ "show", cmd_show, RUN_SETUP },
{ "show-branch", cmd_show_branch, RUN_SETUP },
{ "update-server-info", cmd_update_server_info, RUN_SETUP },
{ "upload-archive", cmd_upload_archive, NO_PARSEOPT },
{ "upload-archive--writer", cmd_upload_archive_writer, NO_PARSEOPT },
+ { "upload-pack", cmd_upload_pack },
{ "var", cmd_var, RUN_SETUP_GENTLY | NO_PARSEOPT },
{ "verify-commit", cmd_verify_commit, RUN_SETUP },
{ "verify-pack", cmd_verify_pack },
#include "cache.h"
#include "config.h"
#include "builtin.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
#include "run-command.h"
#include "levenshtein.h"
#include "help.h"
#include "pkt-line.h"
#include "object.h"
#include "tag.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
#include "run-command.h"
#include "string-list.h"
#include "url.h"
#include "argv-array.h"
#include "packfile.h"
#include "object-store.h"
+#include "protocol.h"
static const char content_type[] = "Content-Type";
static const char content_length[] = "Content-Length";
hdr_str(hdr, content_type, buf.buf);
end_headers(hdr);
- packet_write_fmt(1, "# service=git-%s\n", svc->name);
- packet_flush(1);
+
+ if (determine_protocol_version_server() != protocol_v2) {
+ packet_write_fmt(1, "# service=git-%s\n", svc->name);
+ packet_flush(1);
+ }
argv[0] = svc->name;
run_service(argv, 0);
#include "cache.h"
#include "config.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
#include "http.h"
#include "walker.h"
#include "refs.h"
#include "diff.h"
#include "revision.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
#include "remote.h"
#include "list-objects.h"
#include "sigchain.h"
*var = val;
}
-static void protocol_http_header(void)
-{
- if (get_protocol_version_config() > 0) {
- struct strbuf protocol_header = STRBUF_INIT;
-
- strbuf_addf(&protocol_header, GIT_PROTOCOL_HEADER ": version=%d",
- get_protocol_version_config());
-
-
- extra_http_headers = curl_slist_append(extra_http_headers,
- protocol_header.buf);
- strbuf_release(&protocol_header);
- }
-}
-
void http_init(struct remote *remote, const char *url, int proactive_auth)
{
char *low_speed_limit;
if (remote)
var_override(&http_proxy_authmethod, remote->http_proxy_authmethod);
- protocol_http_header();
-
pragma_header = curl_slist_append(http_copy_default_headers(),
"Pragma: no-cache");
no_pragma_header = curl_slist_append(http_copy_default_headers(),
headers = curl_slist_append(headers, buf.buf);
+ /* Add additional headers here */
+ if (options && options->extra_headers) {
+ const struct string_list_item *item;
+ for_each_string_list_item(item, options->extra_headers) {
+ headers = curl_slist_append(headers, item->string);
+ }
+ }
+
curl_easy_setopt(slot->curl, CURLOPT_URL, url);
curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, headers);
curl_easy_setopt(slot->curl, CURLOPT_ENCODING, "gzip");
* for details.
*/
struct strbuf *base_url;
+
+ /*
+ * If not NULL, contains additional HTTP headers to be sent with the
+ * request. The strings in the list must not be freed until after the
+ * request has completed.
+ */
+ struct string_list *extra_headers;
};
/* Return values for http_get_*() */
#include "cache.h"
#include "config.h"
#include "credential.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
#include "run-command.h"
#include "parse-options.h"
#ifdef NO_OPENSSL
* in the traversal (until we mark it SEEN). This is a way to
* let us silently de-dup calls to show() in the caller. This
* is subtly different from the "revision.h:SHOWN" and the
- * "sha1_name.c:ONELINE_SEEN" bits. And also different from
+ * "sha1-name.c:ONELINE_SEEN" bits. And also different from
* the non-de-dup usage in pack-bitmap.c
*/
#define FILTER_SHOWN_BUT_REVISIT (1<<21)
--- /dev/null
+#include "cache.h"
+#include "repository.h"
+#include "refs.h"
+#include "remote.h"
+#include "argv-array.h"
+#include "ls-refs.h"
+#include "pkt-line.h"
+
+/*
+ * Check if one of the prefixes is a prefix of the ref.
+ * If no prefixes were provided, all refs match.
+ */
+static int ref_match(const struct argv_array *prefixes, const char *refname)
+{
+ int i;
+
+ if (!prefixes->argc)
+ return 1; /* no restriction */
+
+ for (i = 0; i < prefixes->argc; i++) {
+ const char *prefix = prefixes->argv[i];
+
+ if (starts_with(refname, prefix))
+ return 1;
+ }
+
+ return 0;
+}
+
+struct ls_refs_data {
+ unsigned peel;
+ unsigned symrefs;
+ struct argv_array prefixes;
+};
+
+static int send_ref(const char *refname, const struct object_id *oid,
+ int flag, void *cb_data)
+{
+ struct ls_refs_data *data = cb_data;
+ const char *refname_nons = strip_namespace(refname);
+ struct strbuf refline = STRBUF_INIT;
+
+ if (!ref_match(&data->prefixes, refname))
+ return 0;
+
+ strbuf_addf(&refline, "%s %s", oid_to_hex(oid), refname_nons);
+ if (data->symrefs && flag & REF_ISSYMREF) {
+ struct object_id unused;
+ const char *symref_target = resolve_ref_unsafe(refname, 0,
+ &unused,
+ &flag);
+
+ if (!symref_target)
+ die("'%s' is a symref but it is not?", refname);
+
+ strbuf_addf(&refline, " symref-target:%s", symref_target);
+ }
+
+ if (data->peel) {
+ struct object_id peeled;
+ if (!peel_ref(refname, &peeled))
+ strbuf_addf(&refline, " peeled:%s", oid_to_hex(&peeled));
+ }
+
+ strbuf_addch(&refline, '\n');
+ packet_write(1, refline.buf, refline.len);
+
+ strbuf_release(&refline);
+ return 0;
+}
+
+int ls_refs(struct repository *r, struct argv_array *keys,
+ struct packet_reader *request)
+{
+ struct ls_refs_data data;
+
+ memset(&data, 0, sizeof(data));
+
+ while (packet_reader_read(request) != PACKET_READ_FLUSH) {
+ const char *arg = request->line;
+ const char *out;
+
+ if (!strcmp("peel", arg))
+ data.peel = 1;
+ else if (!strcmp("symrefs", arg))
+ data.symrefs = 1;
+ else if (skip_prefix(arg, "ref-prefix ", &out))
+ argv_array_push(&data.prefixes, out);
+ }
+
+ head_ref_namespaced(send_ref, &data);
+ for_each_namespaced_ref(send_ref, &data);
+ packet_flush(1);
+ argv_array_clear(&data.prefixes);
+ return 0;
+}
--- /dev/null
+#ifndef LS_REFS_H
+#define LS_REFS_H
+
+struct repository;
+struct argv_array;
+struct packet_reader;
+extern int ls_refs(struct repository *r, struct argv_array *keys,
+ struct packet_reader *request);
+
+#endif /* LS_REFS_H */
--- /dev/null
+/*
+ * Memory Pool implementation logic.
+ */
+
+#include "cache.h"
+#include "mem-pool.h"
+
+static struct mp_block *mem_pool_alloc_block(struct mem_pool *mem_pool, size_t block_alloc)
+{
+ struct mp_block *p;
+
+ mem_pool->pool_alloc += sizeof(struct mp_block) + block_alloc;
+ p = xmalloc(st_add(sizeof(struct mp_block), block_alloc));
+ p->next_block = mem_pool->mp_block;
+ p->next_free = (char *)p->space;
+ p->end = p->next_free + block_alloc;
+ mem_pool->mp_block = p;
+
+ return p;
+}
+
+void *mem_pool_alloc(struct mem_pool *mem_pool, size_t len)
+{
+ struct mp_block *p;
+ void *r;
+
+ /* round up to a 'uintmax_t' alignment */
+ if (len & (sizeof(uintmax_t) - 1))
+ len += sizeof(uintmax_t) - (len & (sizeof(uintmax_t) - 1));
+
+ for (p = mem_pool->mp_block; p; p = p->next_block)
+ if (p->end - p->next_free >= len)
+ break;
+
+ if (!p) {
+ if (len >= (mem_pool->block_alloc / 2)) {
+ mem_pool->pool_alloc += len;
+ return xmalloc(len);
+ }
+
+ p = mem_pool_alloc_block(mem_pool, mem_pool->block_alloc);
+ }
+
+ r = p->next_free;
+ p->next_free += len;
+ return r;
+}
+
+void *mem_pool_calloc(struct mem_pool *mem_pool, size_t count, size_t size)
+{
+ size_t len = st_mult(count, size);
+ void *r = mem_pool_alloc(mem_pool, len);
+ memset(r, 0, len);
+ return r;
+}
--- /dev/null
+#ifndef MEM_POOL_H
+#define MEM_POOL_H
+
+struct mp_block {
+ struct mp_block *next_block;
+ char *next_free;
+ char *end;
+ uintmax_t space[FLEX_ARRAY]; /* more */
+};
+
+struct mem_pool {
+ struct mp_block *mp_block;
+
+ /*
+ * The amount of available memory to grow the pool by.
+ * This size does not include the overhead for the mp_block.
+ */
+ size_t block_alloc;
+
+ /* The total amount of memory allocated by the pool. */
+ size_t pool_alloc;
+};
+
+/*
+ * Alloc memory from the mem_pool.
+ */
+void *mem_pool_alloc(struct mem_pool *pool, size_t len);
+
+/*
+ * Allocate and zero memory from the memory pool.
+ */
+void *mem_pool_calloc(struct mem_pool *pool, size_t count, size_t size);
+
+#endif
--- /dev/null
+diff_cmd () {
+ "$merge_tool_path" "$LOCAL" "$REMOTE"
+}
+
+merge_cmd () {
+ if $base_present
+ then
+ "$merge_tool_path" -s "$LOCAL" \
+ "$REMOTE" "$BASE" "$MERGED"
+ else
+ "$merge_tool_path" -m "$LOCAL" \
+ "$REMOTE" "$MERGED"
+ fi
+}
+
+exit_code_trustable () {
+ true
+}
* bundle.c: 16
* http-push.c: 16-----19
* commit.c: 16-----19
- * sha1_name.c: 20
+ * sha1-name.c: 20
* list-objects-filter.c: 21
* builtin/fsck.c: 0--3
* builtin/index-pack.c: 2021
}
if ($author =~ /^\s*(.+?)\s*<(.*)>\s*$/) {
my ($name, $email) = ($1, $2);
- $email = undef if length $2 == 0;
return [$name, $email];
} else {
die "Author: $orig_author: $::_authors_prog returned "
remove_username($full_url);
$log_entry{metadata} = "$full_url\@$r $uuid";
$log_entry{svm_revision} = $r;
- $email ||= "$author\@$uuid";
- $commit_email ||= "$author\@$uuid";
+ $email = "$author\@$uuid" unless defined $email;
+ $commit_email = "$author\@$uuid" unless defined $commit_email;
} elsif ($self->use_svnsync_props) {
my $full_url = canonicalize_url(
add_path_to_url( $self->svnsync->{url}, $self->path )
remove_username($full_url);
my $uuid = $self->svnsync->{uuid};
$log_entry{metadata} = "$full_url\@$rev $uuid";
- $email ||= "$author\@$uuid";
- $commit_email ||= "$author\@$uuid";
+ $email = "$author\@$uuid" unless defined $email;
+ $commit_email = "$author\@$uuid" unless defined $commit_email;
} else {
my $url = $self->metadata_url;
remove_username($url);
my $uuid = $self->rewrite_uuid || $self->ra->get_uuid;
$log_entry{metadata} = "$url\@$rev " . $uuid;
- $email ||= "$author\@" . $uuid;
- $commit_email ||= "$author\@" . $uuid;
+ $email = "$author\@$uuid" unless defined $email;
+ $commit_email = "$author\@$uuid" unless defined $commit_email;
}
$log_entry{name} = $name;
$log_entry{email} = $email;
write_or_die(fd, "0000", 4);
}
+void packet_delim(int fd)
+{
+ packet_trace("0001", 4, 1);
+ write_or_die(fd, "0001", 4);
+}
+
int packet_flush_gently(int fd)
{
packet_trace("0000", 4, 1);
strbuf_add(buf, "0000", 4);
}
+void packet_buf_delim(struct strbuf *buf)
+{
+ packet_trace("0001", 4, 1);
+ strbuf_add(buf, "0001", 4);
+}
+
static void set_packet_header(char *buf, const int size)
{
static char hexchar[] = "0123456789abcdef";
va_end(args);
}
+void packet_buf_write_len(struct strbuf *buf, const char *data, size_t len)
+{
+ size_t orig_len, n;
+
+ orig_len = buf->len;
+ strbuf_addstr(buf, "0000");
+ strbuf_add(buf, data, len);
+ n = buf->len - orig_len;
+
+ if (n > LARGE_PACKET_MAX)
+ die("protocol error: impossibly long line");
+
+ set_packet_header(&buf->buf[orig_len], n);
+ packet_trace(data, len, 1);
+}
+
int write_packetized_from_fd(int fd_in, int fd_out)
{
static char buf[LARGE_PACKET_DATA_MAX];
return (val < 0) ? val : (val << 8) | hex2chr(linelen + 2);
}
-int packet_read(int fd, char **src_buf, size_t *src_len,
- char *buffer, unsigned size, int options)
+enum packet_read_status packet_read_with_status(int fd, char **src_buffer,
+ size_t *src_len, char *buffer,
+ unsigned size, int *pktlen,
+ int options)
{
- int len, ret;
+ int len;
char linelen[4];
- ret = get_packet_data(fd, src_buf, src_len, linelen, 4, options);
- if (ret < 0)
- return ret;
+ if (get_packet_data(fd, src_buffer, src_len, linelen, 4, options) < 0) {
+ *pktlen = -1;
+ return PACKET_READ_EOF;
+ }
+
len = packet_length(linelen);
- if (len < 0)
+
+ if (len < 0) {
die("protocol error: bad line length character: %.4s", linelen);
- if (!len) {
+ } else if (!len) {
packet_trace("0000", 4, 0);
- return 0;
+ *pktlen = 0;
+ return PACKET_READ_FLUSH;
+ } else if (len == 1) {
+ packet_trace("0001", 4, 0);
+ *pktlen = 0;
+ return PACKET_READ_DELIM;
+ } else if (len < 4) {
+ die("protocol error: bad line length %d", len);
}
+
len -= 4;
- if (len >= size)
+ if ((unsigned)len >= size)
die("protocol error: bad line length %d", len);
- ret = get_packet_data(fd, src_buf, src_len, buffer, len, options);
- if (ret < 0)
- return ret;
+
+ if (get_packet_data(fd, src_buffer, src_len, buffer, len, options) < 0) {
+ *pktlen = -1;
+ return PACKET_READ_EOF;
+ }
if ((options & PACKET_READ_CHOMP_NEWLINE) &&
len && buffer[len-1] == '\n')
buffer[len] = 0;
packet_trace(buffer, len, 0);
- return len;
+ *pktlen = len;
+ return PACKET_READ_NORMAL;
+}
+
+int packet_read(int fd, char **src_buffer, size_t *src_len,
+ char *buffer, unsigned size, int options)
+{
+ int pktlen = -1;
+
+ packet_read_with_status(fd, src_buffer, src_len, buffer, size,
+ &pktlen, options);
+
+ return pktlen;
}
static char *packet_read_line_generic(int fd,
}
return sb_out->len - orig_len;
}
+
+/* Packet Reader Functions */
+void packet_reader_init(struct packet_reader *reader, int fd,
+ char *src_buffer, size_t src_len,
+ int options)
+{
+ memset(reader, 0, sizeof(*reader));
+
+ reader->fd = fd;
+ reader->src_buffer = src_buffer;
+ reader->src_len = src_len;
+ reader->buffer = packet_buffer;
+ reader->buffer_size = sizeof(packet_buffer);
+ reader->options = options;
+}
+
+enum packet_read_status packet_reader_read(struct packet_reader *reader)
+{
+ if (reader->line_peeked) {
+ reader->line_peeked = 0;
+ return reader->status;
+ }
+
+ reader->status = packet_read_with_status(reader->fd,
+ &reader->src_buffer,
+ &reader->src_len,
+ reader->buffer,
+ reader->buffer_size,
+ &reader->pktlen,
+ reader->options);
+
+ if (reader->status == PACKET_READ_NORMAL)
+ reader->line = reader->buffer;
+ else
+ reader->line = NULL;
+
+ return reader->status;
+}
+
+enum packet_read_status packet_reader_peek(struct packet_reader *reader)
+{
+ /* Only allow peeking a single line */
+ if (reader->line_peeked)
+ return reader->status;
+
+ /* Peek a line by reading it and setting peeked flag */
+ packet_reader_read(reader);
+ reader->line_peeked = 1;
+ return reader->status;
+}
* side can't, we stay with pure read/write interfaces.
*/
void packet_flush(int fd);
+void packet_delim(int fd);
void packet_write_fmt(int fd, const char *fmt, ...) __attribute__((format (printf, 2, 3)));
void packet_buf_flush(struct strbuf *buf);
+void packet_buf_delim(struct strbuf *buf);
void packet_write(int fd_out, const char *buf, size_t size);
void packet_buf_write(struct strbuf *buf, const char *fmt, ...) __attribute__((format (printf, 2, 3)));
+void packet_buf_write_len(struct strbuf *buf, const char *data, size_t len);
int packet_flush_gently(int fd);
int packet_write_fmt_gently(int fd, const char *fmt, ...) __attribute__((format (printf, 2, 3)));
int write_packetized_from_fd(int fd_in, int fd_out);
int packet_read(int fd, char **src_buffer, size_t *src_len, char
*buffer, unsigned size, int options);
+/*
+ * Read a packetized line into a buffer like the 'packet_read()' function but
+ * returns an 'enum packet_read_status' which indicates the status of the read.
+ * The number of bytes read will be assigined to *pktlen if the status of the
+ * read was 'PACKET_READ_NORMAL'.
+ */
+enum packet_read_status {
+ PACKET_READ_EOF,
+ PACKET_READ_NORMAL,
+ PACKET_READ_FLUSH,
+ PACKET_READ_DELIM,
+};
+enum packet_read_status packet_read_with_status(int fd, char **src_buffer,
+ size_t *src_len, char *buffer,
+ unsigned size, int *pktlen,
+ int options);
+
/*
* Convenience wrapper for packet_read that is not gentle, and sets the
* CHOMP_NEWLINE option. The return value is NULL for a flush packet,
*/
ssize_t read_packetized_to_strbuf(int fd_in, struct strbuf *sb_out);
+struct packet_reader {
+ /* source file descriptor */
+ int fd;
+
+ /* source buffer and its size */
+ char *src_buffer;
+ size_t src_len;
+
+ /* buffer that pkt-lines are read into and its size */
+ char *buffer;
+ unsigned buffer_size;
+
+ /* options to be used during reads */
+ int options;
+
+ /* status of the last read */
+ enum packet_read_status status;
+
+ /* length of data read during the last read */
+ int pktlen;
+
+ /* the last line read */
+ const char *line;
+
+ /* indicates if a line has been peeked */
+ int line_peeked;
+};
+
+/*
+ * Initialize a 'struct packet_reader' object which is an
+ * abstraction around the 'packet_read_with_status()' function.
+ */
+extern void packet_reader_init(struct packet_reader *reader, int fd,
+ char *src_buffer, size_t src_len,
+ int options);
+
+/*
+ * Perform a packet read and return the status of the read.
+ * The values of 'pktlen' and 'line' are updated based on the status of the
+ * read as follows:
+ *
+ * PACKET_READ_ERROR: 'pktlen' is set to '-1' and 'line' is set to NULL
+ * PACKET_READ_NORMAL: 'pktlen' is set to the number of bytes read
+ * 'line' is set to point at the read line
+ * PACKET_READ_FLUSH: 'pktlen' is set to '0' and 'line' is set to NULL
+ */
+extern enum packet_read_status packet_reader_read(struct packet_reader *reader);
+
+/*
+ * Peek the next packet line without consuming it and return the status.
+ * The next call to 'packet_reader_read()' will perform a read of the same line
+ * that was peeked, consuming the line.
+ *
+ * Peeking multiple times without calling 'packet_reader_read()' will return
+ * the same result.
+ */
+extern enum packet_read_status packet_reader_peek(struct packet_reader *reader);
+
#define DEFAULT_PACKET_MAX 1000
#define LARGE_PACKET_MAX 65520
#define LARGE_PACKET_DATA_MAX (LARGE_PACKET_MAX - 4)
return protocol_v0;
else if (!strcmp(value, "1"))
return protocol_v1;
+ else if (!strcmp(value, "2"))
+ return protocol_v2;
else
return protocol_unknown_version;
}
protocol_unknown_version = -1,
protocol_v0 = 0,
protocol_v1 = 1,
+ protocol_v2 = 2,
};
/*
memset(&state, 0, sizeof(state));
wt_status_get_state(&state, 1);
if (state.rebase_in_progress ||
- state.rebase_interactive_in_progress)
- strbuf_addf(&desc, _("(no branch, rebasing %s)"),
- state.branch);
- else if (state.bisect_in_progress)
+ state.rebase_interactive_in_progress) {
+ if (state.branch)
+ strbuf_addf(&desc, _("(no branch, rebasing %s)"),
+ state.branch);
+ else
+ strbuf_addf(&desc, _("(no branch, rebasing detached HEAD %s)"),
+ state.detached_from);
+ } else if (state.bisect_in_progress)
strbuf_addf(&desc, _("(no branch, bisect started on %s)"),
state.branch);
else if (state.detached_from) {
return NULL;
}
-/* Allocate space for a new ref_array_item and copy the objectname and flag to it */
+/*
+ * Allocate space for a new ref_array_item and copy the name and oid to it.
+ *
+ * Callers can then fill in other struct members at their leisure.
+ */
static struct ref_array_item *new_ref_array_item(const char *refname,
- const unsigned char *objectname,
- int flag)
+ const struct object_id *oid)
{
struct ref_array_item *ref;
+
FLEX_ALLOC_STR(ref, refname, refname);
- hashcpy(ref->objectname.hash, objectname);
- ref->flag = flag;
+ oidcpy(&ref->objectname, oid);
+
+ return ref;
+}
+
+struct ref_array_item *ref_array_push(struct ref_array *array,
+ const char *refname,
+ const struct object_id *oid)
+{
+ struct ref_array_item *ref = new_ref_array_item(refname, oid);
+
+ ALLOC_GROW(array->items, array->nr + 1, array->alloc);
+ array->items[array->nr++] = ref;
return ref;
}
* to do its job and the resulting list may yet to be pruned
* by maxcount logic.
*/
- ref = new_ref_array_item(refname, oid->hash, flag);
+ ref = ref_array_push(ref_cbdata->array, refname, oid);
ref->commit = commit;
-
- REALLOC_ARRAY(ref_cbdata->array->items, ref_cbdata->array->nr + 1);
- ref_cbdata->array->items[ref_cbdata->array->nr++] = ref;
+ ref->flag = flag;
ref->kind = kind;
+
return 0;
}
putchar('\n');
}
-void pretty_print_ref(const char *name, const unsigned char *sha1,
+void pretty_print_ref(const char *name, const struct object_id *oid,
const struct ref_format *format)
{
struct ref_array_item *ref_item;
- ref_item = new_ref_array_item(name, sha1, 0);
+ ref_item = new_ref_array_item(name, oid);
ref_item->kind = ref_kind_from_refname(name);
show_ref_array_item(ref_item, format);
free_array_item(ref_item);
* Print a single ref, outside of any ref-filter. Note that the
* name must be a fully qualified refname.
*/
-void pretty_print_ref(const char *name, const unsigned char *sha1,
+void pretty_print_ref(const char *name, const struct object_id *oid,
const struct ref_format *format);
+/*
+ * Push a single ref onto the array; this can be used to construct your own
+ * ref_array without using filter_refs().
+ */
+struct ref_array_item *ref_array_push(struct ref_array *array,
+ const char *refname,
+ const struct object_id *oid);
+
#endif /* REF_FILTER_H */
#include "tag.h"
#include "submodule.h"
#include "worktree.h"
+#include "argv-array.h"
/*
* List of all available backends
return 0;
}
+/*
+ * Given a 'prefix' expand it by the rules in 'ref_rev_parse_rules' and add
+ * the results to 'prefixes'
+ */
+void expand_ref_prefix(struct argv_array *prefixes, const char *prefix)
+{
+ const char **p;
+ int len = strlen(prefix);
+
+ for (p = ref_rev_parse_rules; *p; p++)
+ argv_array_pushf(prefixes, *p, len, prefix);
+}
+
/*
* *string and *len will only be substituted, and *string returned (for
* later free()ing) if the string passed in is a magic short-hand form
*/
int refname_match(const char *abbrev_name, const char *full_name);
+/*
+ * Given a 'prefix' expand it by the rules in 'ref_rev_parse_rules' and add
+ * the results to 'prefixes'
+ */
+struct argv_array;
+void expand_ref_prefix(struct argv_array *prefixes, const char *prefix);
+
int expand_ref(const char *str, int len, struct object_id *oid, char **ref);
int dwim_ref(const char *str, int len, struct object_id *oid, char **ref);
int dwim_log(const char *str, int len, struct object_id *oid, char **ref);
#include "../lockfile.h"
#include "../object.h"
#include "../dir.h"
+#include "../chdir-notify.h"
/*
* This backend uses the following flags in `ref_update::flags` for
refs->packed_ref_store = packed_ref_store_create(sb.buf, flags);
strbuf_release(&sb);
+ chdir_notify_reparent("files-backend $GIT_DIR",
+ &refs->gitdir);
+ chdir_notify_reparent("files-backend $GIT_COMMONDIR",
+ &refs->gitcommondir);
+
return ref_store;
}
#include "packed-backend.h"
#include "../iterator.h"
#include "../lockfile.h"
+#include "../chdir-notify.h"
enum mmap_strategy {
/*
refs->store_flags = store_flags;
refs->path = xstrdup(path);
+ chdir_notify_reparent("packed-refs", &refs->path);
+
return ref_store;
}
#include "cache.h"
#include "config.h"
#include "remote.h"
+#include "connect.h"
#include "strbuf.h"
#include "walker.h"
#include "http.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
#include "run-command.h"
#include "pkt-line.h"
#include "string-list.h"
#include "credential.h"
#include "sha1-array.h"
#include "send-pack.h"
+#include "protocol.h"
#include "quote.h"
static struct remote *remote;
}
struct discovery {
- const char *service;
+ char *service;
char *buf_alloc;
char *buf;
size_t len;
struct ref *refs;
struct oid_array shallow;
+ enum protocol_version version;
unsigned proto_git : 1;
};
static struct discovery *last_discovery;
static struct ref *parse_git_refs(struct discovery *heads, int for_push)
{
struct ref *list = NULL;
- get_remote_heads(-1, heads->buf, heads->len, &list,
- for_push ? REF_NORMAL : 0, NULL, &heads->shallow);
+ struct packet_reader reader;
+
+ packet_reader_init(&reader, -1, heads->buf, heads->len,
+ PACKET_READ_CHOMP_NEWLINE |
+ PACKET_READ_GENTLE_ON_EOF);
+
+ heads->version = discover_version(&reader);
+ switch (heads->version) {
+ case protocol_v2:
+ /*
+ * Do nothing. This isn't a list of refs but rather a
+ * capability advertisement. Client would have run
+ * 'stateless-connect' so we'll dump this capability listing
+ * and let them request the refs themselves.
+ */
+ break;
+ case protocol_v1:
+ case protocol_v0:
+ get_remote_heads(&reader, &list, for_push ? REF_NORMAL : 0,
+ NULL, &heads->shallow);
+ break;
+ case protocol_unknown_version:
+ BUG("unknown protocol version");
+ }
+
return list;
}
free(d->shallow.oid);
free(d->buf_alloc);
free_refs(d->refs);
+ free(d->service);
free(d);
}
}
return 0;
}
+static int get_protocol_http_header(enum protocol_version version,
+ struct strbuf *header)
+{
+ if (version > 0) {
+ strbuf_addf(header, GIT_PROTOCOL_HEADER ": version=%d",
+ version);
+
+ return 1;
+ }
+
+ return 0;
+}
+
static struct discovery *discover_refs(const char *service, int for_push)
{
struct strbuf exp = STRBUF_INIT;
struct strbuf buffer = STRBUF_INIT;
struct strbuf refs_url = STRBUF_INIT;
struct strbuf effective_url = STRBUF_INIT;
+ struct strbuf protocol_header = STRBUF_INIT;
+ struct string_list extra_headers = STRING_LIST_INIT_DUP;
struct discovery *last = last_discovery;
int http_ret, maybe_smart = 0;
struct http_get_options http_options;
+ enum protocol_version version = get_protocol_version_config();
if (last && !strcmp(service, last->service))
return last;
strbuf_addf(&refs_url, "service=%s", service);
}
+ /*
+ * NEEDSWORK: If we are trying to use protocol v2 and we are planning
+ * to perform a push, then fallback to v0 since the client doesn't know
+ * how to push yet using v2.
+ */
+ if (version == protocol_v2 && !strcmp("git-receive-pack", service))
+ version = protocol_v0;
+
+ /* Add the extra Git-Protocol header */
+ if (get_protocol_http_header(version, &protocol_header))
+ string_list_append(&extra_headers, protocol_header.buf);
+
memset(&http_options, 0, sizeof(http_options));
http_options.content_type = &type;
http_options.charset = &charset;
http_options.effective_url = &effective_url;
http_options.base_url = &url;
+ http_options.extra_headers = &extra_headers;
http_options.initial_request = 1;
http_options.no_cache = 1;
http_options.keep_error = 1;
warning(_("redirecting to %s"), url.buf);
last= xcalloc(1, sizeof(*last_discovery));
- last->service = service;
+ last->service = xstrdup(service);
last->buf_alloc = strbuf_detach(&buffer, &last->len);
last->buf = last->buf_alloc;
;
last->proto_git = 1;
+ } else if (maybe_smart &&
+ last->len > 5 && starts_with(last->buf + 4, "version 2")) {
+ last->proto_git = 1;
}
if (last->proto_git)
strbuf_release(&charset);
strbuf_release(&effective_url);
strbuf_release(&buffer);
+ strbuf_release(&protocol_header);
+ string_list_clear(&extra_headers, 0);
last_discovery = last;
return last;
}
char *service_url;
char *hdr_content_type;
char *hdr_accept;
+ char *protocol_header;
char *buf;
size_t alloc;
size_t len;
headers = curl_slist_append(headers, needs_100_continue ?
"Expect: 100-continue" : "Expect:");
+ /* Add the extra Git-Protocol header */
+ if (rpc->protocol_header)
+ headers = curl_slist_append(headers, rpc->protocol_header);
+
retry:
slot = get_active_slot();
strbuf_addf(&buf, "Accept: application/x-%s-result", svc);
rpc->hdr_accept = strbuf_detach(&buf, NULL);
+ if (get_protocol_http_header(heads->version, &buf))
+ rpc->protocol_header = strbuf_detach(&buf, NULL);
+ else
+ rpc->protocol_header = NULL;
+
while (!err) {
int n = packet_read(rpc->out, NULL, NULL, rpc->buf, rpc->alloc, 0);
if (!n)
free(rpc->service_url);
free(rpc->hdr_content_type);
free(rpc->hdr_accept);
+ free(rpc->protocol_header);
free(rpc->buf);
strbuf_release(&buf);
return err;
free(specs);
}
+/*
+ * Used to represent the state of a connection to an HTTP server when
+ * communicating using git's wire-protocol version 2.
+ */
+struct proxy_state {
+ char *service_name;
+ char *service_url;
+ struct curl_slist *headers;
+ struct strbuf request_buffer;
+ int in;
+ int out;
+ struct packet_reader reader;
+ size_t pos;
+ int seen_flush;
+};
+
+static void proxy_state_init(struct proxy_state *p, const char *service_name,
+ enum protocol_version version)
+{
+ struct strbuf buf = STRBUF_INIT;
+
+ memset(p, 0, sizeof(*p));
+ p->service_name = xstrdup(service_name);
+
+ p->in = 0;
+ p->out = 1;
+ strbuf_init(&p->request_buffer, 0);
+
+ strbuf_addf(&buf, "%s%s", url.buf, p->service_name);
+ p->service_url = strbuf_detach(&buf, NULL);
+
+ p->headers = http_copy_default_headers();
+
+ strbuf_addf(&buf, "Content-Type: application/x-%s-request", p->service_name);
+ p->headers = curl_slist_append(p->headers, buf.buf);
+ strbuf_reset(&buf);
+
+ strbuf_addf(&buf, "Accept: application/x-%s-result", p->service_name);
+ p->headers = curl_slist_append(p->headers, buf.buf);
+ strbuf_reset(&buf);
+
+ p->headers = curl_slist_append(p->headers, "Transfer-Encoding: chunked");
+
+ /* Add the Git-Protocol header */
+ if (get_protocol_http_header(version, &buf))
+ p->headers = curl_slist_append(p->headers, buf.buf);
+
+ packet_reader_init(&p->reader, p->in, NULL, 0,
+ PACKET_READ_GENTLE_ON_EOF);
+
+ strbuf_release(&buf);
+}
+
+static void proxy_state_clear(struct proxy_state *p)
+{
+ free(p->service_name);
+ free(p->service_url);
+ curl_slist_free_all(p->headers);
+ strbuf_release(&p->request_buffer);
+}
+
+/*
+ * CURLOPT_READFUNCTION callback function.
+ * Attempts to copy over a single packet-line at a time into the
+ * curl provided buffer.
+ */
+static size_t proxy_in(char *buffer, size_t eltsize,
+ size_t nmemb, void *userdata)
+{
+ size_t max;
+ struct proxy_state *p = userdata;
+ size_t avail = p->request_buffer.len - p->pos;
+
+
+ if (eltsize != 1)
+ BUG("curl read callback called with size = %"PRIuMAX" != 1",
+ (uintmax_t)eltsize);
+ max = nmemb;
+
+ if (!avail) {
+ if (p->seen_flush) {
+ p->seen_flush = 0;
+ return 0;
+ }
+
+ strbuf_reset(&p->request_buffer);
+ switch (packet_reader_read(&p->reader)) {
+ case PACKET_READ_EOF:
+ die("unexpected EOF when reading from parent process");
+ case PACKET_READ_NORMAL:
+ packet_buf_write_len(&p->request_buffer, p->reader.line,
+ p->reader.pktlen);
+ break;
+ case PACKET_READ_DELIM:
+ packet_buf_delim(&p->request_buffer);
+ break;
+ case PACKET_READ_FLUSH:
+ packet_buf_flush(&p->request_buffer);
+ p->seen_flush = 1;
+ break;
+ }
+ p->pos = 0;
+ avail = p->request_buffer.len;
+ }
+
+ if (max < avail)
+ avail = max;
+ memcpy(buffer, p->request_buffer.buf + p->pos, avail);
+ p->pos += avail;
+ return avail;
+}
+
+static size_t proxy_out(char *buffer, size_t eltsize,
+ size_t nmemb, void *userdata)
+{
+ size_t size;
+ struct proxy_state *p = userdata;
+
+ if (eltsize != 1)
+ BUG("curl read callback called with size = %"PRIuMAX" != 1",
+ (uintmax_t)eltsize);
+ size = nmemb;
+
+ write_or_die(p->out, buffer, size);
+ return size;
+}
+
+/* Issues a request to the HTTP server configured in `p` */
+static int proxy_request(struct proxy_state *p)
+{
+ struct active_request_slot *slot;
+
+ slot = get_active_slot();
+
+ curl_easy_setopt(slot->curl, CURLOPT_NOBODY, 0);
+ curl_easy_setopt(slot->curl, CURLOPT_POST, 1);
+ curl_easy_setopt(slot->curl, CURLOPT_URL, p->service_url);
+ curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, p->headers);
+
+ /* Setup function to read request from client */
+ curl_easy_setopt(slot->curl, CURLOPT_READFUNCTION, proxy_in);
+ curl_easy_setopt(slot->curl, CURLOPT_READDATA, p);
+
+ /* Setup function to write server response to client */
+ curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, proxy_out);
+ curl_easy_setopt(slot->curl, CURLOPT_WRITEDATA, p);
+
+ if (run_slot(slot, NULL) != HTTP_OK)
+ return -1;
+
+ return 0;
+}
+
+static int stateless_connect(const char *service_name)
+{
+ struct discovery *discover;
+ struct proxy_state p;
+
+ /*
+ * Run the info/refs request and see if the server supports protocol
+ * v2. If and only if the server supports v2 can we successfully
+ * establish a stateless connection, otherwise we need to tell the
+ * client to fallback to using other transport helper functions to
+ * complete their request.
+ */
+ discover = discover_refs(service_name, 0);
+ if (discover->version != protocol_v2) {
+ printf("fallback\n");
+ fflush(stdout);
+ return -1;
+ } else {
+ /* Stateless Connection established */
+ printf("\n");
+ fflush(stdout);
+ }
+
+ proxy_state_init(&p, service_name, discover->version);
+
+ /*
+ * Dump the capability listing that we got from the server earlier
+ * during the info/refs request.
+ */
+ write_or_die(p.out, discover->buf, discover->len);
+
+ /* Peek the next packet line. Until we see EOF keep sending POSTs */
+ while (packet_reader_peek(&p.reader) != PACKET_READ_EOF) {
+ if (proxy_request(&p)) {
+ /* We would have an err here */
+ break;
+ }
+ }
+
+ proxy_state_clear(&p);
+ return 0;
+}
+
int cmd_main(int argc, const char **argv)
{
struct strbuf buf = STRBUF_INIT;
fflush(stdout);
} else if (!strcmp(buf.buf, "capabilities")) {
+ printf("stateless-connect\n");
printf("fetch\n");
printf("option\n");
printf("push\n");
printf("check-connectivity\n");
printf("\n");
fflush(stdout);
+ } else if (skip_prefix(buf.buf, "stateless-connect ", &arg)) {
+ if (!stateless_connect(arg))
+ break;
} else {
error("remote-curl: unknown command '%s' from git", buf.buf);
return 1;
#include "remote.h"
#include "strbuf.h"
#include "url.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
#include "run-command.h"
#include "vcs-svn/svndump.h"
#include "notes.h"
void free_refs(struct ref *ref);
struct oid_array;
-extern struct ref **get_remote_heads(int in, char *src_buf, size_t src_len,
+struct packet_reader;
+struct argv_array;
+extern struct ref **get_remote_heads(struct packet_reader *reader,
struct ref **list, unsigned int flags,
struct oid_array *extra_have,
- struct oid_array *shallow);
+ struct oid_array *shallow_points);
+
+/* Used for protocol v2 in order to retrieve refs from a remote */
+extern struct ref **get_remote_refs(int fd_out, struct packet_reader *reader,
+ struct ref **list, int for_push,
+ const struct argv_array *ref_prefixes);
int resolve_remote_symref(struct ref *ref, struct ref *list);
int ref_newer(const struct object_id *new_oid, const struct object_id *old_oid);
--- /dev/null
+#include "cache.h"
+#include "sha1-lookup.h"
+#include "refs.h"
+#include "commit.h"
+
+/*
+ * An array of replacements. The array is kept sorted by the original
+ * sha1.
+ */
+static struct replace_object {
+ struct object_id original;
+ struct object_id replacement;
+} **replace_object;
+
+static int replace_object_alloc, replace_object_nr;
+
+static const unsigned char *replace_sha1_access(size_t index, void *table)
+{
+ struct replace_object **replace = table;
+ return replace[index]->original.hash;
+}
+
+static int replace_object_pos(const unsigned char *sha1)
+{
+ return sha1_pos(sha1, replace_object, replace_object_nr,
+ replace_sha1_access);
+}
+
+static int register_replace_object(struct replace_object *replace,
+ int ignore_dups)
+{
+ int pos = replace_object_pos(replace->original.hash);
+
+ if (0 <= pos) {
+ if (ignore_dups)
+ free(replace);
+ else {
+ free(replace_object[pos]);
+ replace_object[pos] = replace;
+ }
+ return 1;
+ }
+ pos = -pos - 1;
+ ALLOC_GROW(replace_object, replace_object_nr + 1, replace_object_alloc);
+ replace_object_nr++;
+ if (pos < replace_object_nr)
+ MOVE_ARRAY(replace_object + pos + 1, replace_object + pos,
+ replace_object_nr - pos - 1);
+ replace_object[pos] = replace;
+ return 0;
+}
+
+static int register_replace_ref(const char *refname,
+ const struct object_id *oid,
+ int flag, void *cb_data)
+{
+ /* Get sha1 from refname */
+ const char *slash = strrchr(refname, '/');
+ const char *hash = slash ? slash + 1 : refname;
+ struct replace_object *repl_obj = xmalloc(sizeof(*repl_obj));
+
+ if (get_oid_hex(hash, &repl_obj->original)) {
+ free(repl_obj);
+ warning("bad replace ref name: %s", refname);
+ return 0;
+ }
+
+ /* Copy sha1 from the read ref */
+ oidcpy(&repl_obj->replacement, oid);
+
+ /* Register new object */
+ if (register_replace_object(repl_obj, 1))
+ die("duplicate replace ref: %s", refname);
+
+ return 0;
+}
+
+static void prepare_replace_object(void)
+{
+ static int replace_object_prepared;
+
+ if (replace_object_prepared)
+ return;
+
+ for_each_replace_ref(register_replace_ref, NULL);
+ replace_object_prepared = 1;
+ if (!replace_object_nr)
+ check_replace_refs = 0;
+}
+
+/* We allow "recursive" replacement. Only within reason, though */
+#define MAXREPLACEDEPTH 5
+
+/*
+ * If a replacement for object oid has been set up, return the
+ * replacement object's name (replaced recursively, if necessary).
+ * The return value is either oid or a pointer to a
+ * permanently-allocated value. This function always respects replace
+ * references, regardless of the value of check_replace_refs.
+ */
+const struct object_id *do_lookup_replace_object(const struct object_id *oid)
+{
+ int pos, depth = MAXREPLACEDEPTH;
+ const struct object_id *cur = oid;
+
+ prepare_replace_object();
+
+ /* Try to recursively replace the object */
+ do {
+ if (--depth < 0)
+ die("replace depth too high for object %s",
+ oid_to_hex(oid));
+
+ pos = replace_object_pos(cur->hash);
+ if (0 <= pos)
+ cur = &replace_object[pos]->replacement;
+ } while (0 <= pos);
+
+ return cur;
+}
+++ /dev/null
-#include "cache.h"
-#include "sha1-lookup.h"
-#include "refs.h"
-#include "commit.h"
-
-/*
- * An array of replacements. The array is kept sorted by the original
- * sha1.
- */
-static struct replace_object {
- struct object_id original;
- struct object_id replacement;
-} **replace_object;
-
-static int replace_object_alloc, replace_object_nr;
-
-static const unsigned char *replace_sha1_access(size_t index, void *table)
-{
- struct replace_object **replace = table;
- return replace[index]->original.hash;
-}
-
-static int replace_object_pos(const unsigned char *sha1)
-{
- return sha1_pos(sha1, replace_object, replace_object_nr,
- replace_sha1_access);
-}
-
-static int register_replace_object(struct replace_object *replace,
- int ignore_dups)
-{
- int pos = replace_object_pos(replace->original.hash);
-
- if (0 <= pos) {
- if (ignore_dups)
- free(replace);
- else {
- free(replace_object[pos]);
- replace_object[pos] = replace;
- }
- return 1;
- }
- pos = -pos - 1;
- ALLOC_GROW(replace_object, replace_object_nr + 1, replace_object_alloc);
- replace_object_nr++;
- if (pos < replace_object_nr)
- MOVE_ARRAY(replace_object + pos + 1, replace_object + pos,
- replace_object_nr - pos - 1);
- replace_object[pos] = replace;
- return 0;
-}
-
-static int register_replace_ref(const char *refname,
- const struct object_id *oid,
- int flag, void *cb_data)
-{
- /* Get sha1 from refname */
- const char *slash = strrchr(refname, '/');
- const char *hash = slash ? slash + 1 : refname;
- struct replace_object *repl_obj = xmalloc(sizeof(*repl_obj));
-
- if (get_oid_hex(hash, &repl_obj->original)) {
- free(repl_obj);
- warning("bad replace ref name: %s", refname);
- return 0;
- }
-
- /* Copy sha1 from the read ref */
- oidcpy(&repl_obj->replacement, oid);
-
- /* Register new object */
- if (register_replace_object(repl_obj, 1))
- die("duplicate replace ref: %s", refname);
-
- return 0;
-}
-
-static void prepare_replace_object(void)
-{
- static int replace_object_prepared;
-
- if (replace_object_prepared)
- return;
-
- for_each_replace_ref(register_replace_ref, NULL);
- replace_object_prepared = 1;
- if (!replace_object_nr)
- check_replace_refs = 0;
-}
-
-/* We allow "recursive" replacement. Only within reason, though */
-#define MAXREPLACEDEPTH 5
-
-/*
- * If a replacement for object oid has been set up, return the
- * replacement object's name (replaced recursively, if necessary).
- * The return value is either oid or a pointer to a
- * permanently-allocated value. This function always respects replace
- * references, regardless of the value of check_replace_refs.
- */
-const struct object_id *do_lookup_replace_object(const struct object_id *oid)
-{
- int pos, depth = MAXREPLACEDEPTH;
- const struct object_id *cur = oid;
-
- prepare_replace_object();
-
- /* Try to recursively replace the object */
- do {
- if (--depth < 0)
- die("replace depth too high for object %s",
- oid_to_hex(oid));
-
- pos = replace_object_pos(cur->hash);
- if (0 <= pos)
- cur = &replace_object[pos]->replacement;
- } while (0 <= pos);
-
- return cur;
-}
#include "cache.h"
#include "run-command.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
#include "sigchain.h"
#include "argv-array.h"
#include "thread-utils.h"
#include "sequencer.h"
#include "tag.h"
#include "run-command.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
#include "utf8.h"
#include "cache-tree.h"
#include "diff.h"
static GIT_PATH_FUNC(rebase_path_gpg_sign_opt, "rebase-merge/gpg_sign_opt")
static GIT_PATH_FUNC(rebase_path_orig_head, "rebase-merge/orig-head")
static GIT_PATH_FUNC(rebase_path_verbose, "rebase-merge/verbose")
+static GIT_PATH_FUNC(rebase_path_signoff, "rebase-merge/signoff")
static GIT_PATH_FUNC(rebase_path_head_name, "rebase-merge/head-name")
static GIT_PATH_FUNC(rebase_path_onto, "rebase-merge/onto")
static GIT_PATH_FUNC(rebase_path_autostash, "rebase-merge/autostash")
}
}
- if (opts->signoff)
+ if (opts->signoff && !is_fixup(command))
append_signoff(&msgbuf, 0, 0);
if (is_rebase_i(opts) && write_author_script(msg.message) < 0)
if (file_exists(rebase_path_verbose()))
opts->verbose = 1;
+ if (file_exists(rebase_path_signoff())) {
+ opts->allow_ff = 0;
+ opts->signoff = 1;
+ }
+
read_strategy_opts(opts, &buf);
strbuf_release(&buf);
init_revisions(&revs, NULL);
revs.verbose_header = 1;
revs.max_parents = 1;
- revs.cherry_pick = 1;
+ revs.cherry_mark = 1;
revs.limited = 1;
revs.reverse = 1;
revs.right_only = 1;
return error(_("make_script: error preparing revisions"));
while ((commit = get_revision(&revs))) {
+ int is_empty = is_original_commit_empty(commit);
+
+ if (!is_empty && (commit->object.flags & PATCHSAME))
+ continue;
strbuf_reset(&buf);
- if (!keep_empty && is_original_commit_empty(commit))
+ if (!keep_empty && is_empty)
strbuf_addf(&buf, "%c ", comment_line_char);
strbuf_addf(&buf, "%s %s ", insn,
oid_to_hex(&commit->object.oid));
--- /dev/null
+#include "cache.h"
+#include "repository.h"
+#include "config.h"
+#include "pkt-line.h"
+#include "version.h"
+#include "argv-array.h"
+#include "ls-refs.h"
+#include "serve.h"
+#include "upload-pack.h"
+
+static int always_advertise(struct repository *r,
+ struct strbuf *value)
+{
+ return 1;
+}
+
+static int agent_advertise(struct repository *r,
+ struct strbuf *value)
+{
+ if (value)
+ strbuf_addstr(value, git_user_agent_sanitized());
+ return 1;
+}
+
+struct protocol_capability {
+ /*
+ * The name of the capability. The server uses this name when
+ * advertising this capability, and the client uses this name to
+ * specify this capability.
+ */
+ const char *name;
+
+ /*
+ * Function queried to see if a capability should be advertised.
+ * Optionally a value can be specified by adding it to 'value'.
+ * If a value is added to 'value', the server will advertise this
+ * capability as "<name>=<value>" instead of "<name>".
+ */
+ int (*advertise)(struct repository *r, struct strbuf *value);
+
+ /*
+ * Function called when a client requests the capability as a command.
+ * The function will be provided the capabilities requested via 'keys'
+ * as well as a struct packet_reader 'request' which the command should
+ * use to read the command specific part of the request. Every command
+ * MUST read until a flush packet is seen before sending a response.
+ *
+ * This field should be NULL for capabilities which are not commands.
+ */
+ int (*command)(struct repository *r,
+ struct argv_array *keys,
+ struct packet_reader *request);
+};
+
+static struct protocol_capability capabilities[] = {
+ { "agent", agent_advertise, NULL },
+ { "ls-refs", always_advertise, ls_refs },
+ { "fetch", upload_pack_advertise, upload_pack_v2 },
+};
+
+static void advertise_capabilities(void)
+{
+ struct strbuf capability = STRBUF_INIT;
+ struct strbuf value = STRBUF_INIT;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(capabilities); i++) {
+ struct protocol_capability *c = &capabilities[i];
+
+ if (c->advertise(the_repository, &value)) {
+ strbuf_addstr(&capability, c->name);
+
+ if (value.len) {
+ strbuf_addch(&capability, '=');
+ strbuf_addbuf(&capability, &value);
+ }
+
+ strbuf_addch(&capability, '\n');
+ packet_write(1, capability.buf, capability.len);
+ }
+
+ strbuf_reset(&capability);
+ strbuf_reset(&value);
+ }
+
+ packet_flush(1);
+ strbuf_release(&capability);
+ strbuf_release(&value);
+}
+
+static struct protocol_capability *get_capability(const char *key)
+{
+ int i;
+
+ if (!key)
+ return NULL;
+
+ for (i = 0; i < ARRAY_SIZE(capabilities); i++) {
+ struct protocol_capability *c = &capabilities[i];
+ const char *out;
+ if (skip_prefix(key, c->name, &out) && (!*out || *out == '='))
+ return c;
+ }
+
+ return NULL;
+}
+
+static int is_valid_capability(const char *key)
+{
+ const struct protocol_capability *c = get_capability(key);
+
+ return c && c->advertise(the_repository, NULL);
+}
+
+static int is_command(const char *key, struct protocol_capability **command)
+{
+ const char *out;
+
+ if (skip_prefix(key, "command=", &out)) {
+ struct protocol_capability *cmd = get_capability(out);
+
+ if (*command)
+ die("command '%s' requested after already requesting command '%s'",
+ out, (*command)->name);
+ if (!cmd || !cmd->advertise(the_repository, NULL) || !cmd->command)
+ die("invalid command '%s'", out);
+
+ *command = cmd;
+ return 1;
+ }
+
+ return 0;
+}
+
+int has_capability(const struct argv_array *keys, const char *capability,
+ const char **value)
+{
+ int i;
+ for (i = 0; i < keys->argc; i++) {
+ const char *out;
+ if (skip_prefix(keys->argv[i], capability, &out) &&
+ (!*out || *out == '=')) {
+ if (value) {
+ if (*out == '=')
+ out++;
+ *value = out;
+ }
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+enum request_state {
+ PROCESS_REQUEST_KEYS,
+ PROCESS_REQUEST_DONE,
+};
+
+static int process_request(void)
+{
+ enum request_state state = PROCESS_REQUEST_KEYS;
+ struct packet_reader reader;
+ struct argv_array keys = ARGV_ARRAY_INIT;
+ struct protocol_capability *command = NULL;
+
+ packet_reader_init(&reader, 0, NULL, 0,
+ PACKET_READ_CHOMP_NEWLINE |
+ PACKET_READ_GENTLE_ON_EOF);
+
+ /*
+ * Check to see if the client closed their end before sending another
+ * request. If so we can terminate the connection.
+ */
+ if (packet_reader_peek(&reader) == PACKET_READ_EOF)
+ return 1;
+ reader.options = PACKET_READ_CHOMP_NEWLINE;
+
+ while (state != PROCESS_REQUEST_DONE) {
+ switch (packet_reader_peek(&reader)) {
+ case PACKET_READ_EOF:
+ BUG("Should have already died when seeing EOF");
+ case PACKET_READ_NORMAL:
+ /* collect request; a sequence of keys and values */
+ if (is_command(reader.line, &command) ||
+ is_valid_capability(reader.line))
+ argv_array_push(&keys, reader.line);
+ else
+ die("unknown capability '%s'", reader.line);
+
+ /* Consume the peeked line */
+ packet_reader_read(&reader);
+ break;
+ case PACKET_READ_FLUSH:
+ /*
+ * If no command and no keys were given then the client
+ * wanted to terminate the connection.
+ */
+ if (!keys.argc)
+ return 1;
+
+ /*
+ * The flush packet isn't consume here like it is in
+ * the other parts of this switch statement. This is
+ * so that the command can read the flush packet and
+ * see the end of the request in the same way it would
+ * if command specific arguments were provided after a
+ * delim packet.
+ */
+ state = PROCESS_REQUEST_DONE;
+ break;
+ case PACKET_READ_DELIM:
+ /* Consume the peeked line */
+ packet_reader_read(&reader);
+
+ state = PROCESS_REQUEST_DONE;
+ break;
+ }
+ }
+
+ if (!command)
+ die("no command requested");
+
+ command->command(the_repository, &keys, &reader);
+
+ argv_array_clear(&keys);
+ return 0;
+}
+
+/* Main serve loop for protocol version 2 */
+void serve(struct serve_options *options)
+{
+ if (options->advertise_capabilities || !options->stateless_rpc) {
+ /* serve by default supports v2 */
+ packet_write_fmt(1, "version 2\n");
+
+ advertise_capabilities();
+ /*
+ * If only the list of capabilities was requested exit
+ * immediately after advertising capabilities
+ */
+ if (options->advertise_capabilities)
+ return;
+ }
+
+ /*
+ * If stateless-rpc was requested then exit after
+ * a single request/response exchange
+ */
+ if (options->stateless_rpc) {
+ process_request();
+ } else {
+ for (;;)
+ if (process_request())
+ break;
+ }
+}
--- /dev/null
+#ifndef SERVE_H
+#define SERVE_H
+
+struct argv_array;
+extern int has_capability(const struct argv_array *keys, const char *capability,
+ const char **value);
+
+struct serve_options {
+ unsigned advertise_capabilities;
+ unsigned stateless_rpc;
+};
+#define SERVE_OPTIONS_INIT { 0 }
+extern void serve(struct serve_options *options);
+
+#endif /* SERVE_H */
#include "config.h"
#include "dir.h"
#include "string-list.h"
+#include "chdir-notify.h"
static int inside_git_dir = -1;
static int inside_work_tree = -1;
void setup_work_tree(void)
{
- const char *work_tree, *git_dir;
+ const char *work_tree;
static int initialized = 0;
if (initialized)
die(_("unable to set up work tree using invalid config"));
work_tree = get_git_work_tree();
- git_dir = get_git_dir();
- if (!is_absolute_path(git_dir))
- git_dir = real_path(get_git_dir());
- if (!work_tree || chdir(work_tree))
+ if (!work_tree || chdir_notify(work_tree))
die(_("this operation must be run in a work tree"));
/*
if (getenv(GIT_WORK_TREE_ENVIRONMENT))
setenv(GIT_WORK_TREE_ENVIRONMENT, ".", 1);
- set_git_dir(remove_leading_path(git_dir, work_tree));
initialized = 1;
}
--- /dev/null
+/*
+ * GIT - The information manager from hell
+ *
+ * Copyright (C) Linus Torvalds, 2005
+ *
+ * This handles basic git sha1 object files - packing, unpacking,
+ * creation etc.
+ */
+#include "cache.h"
+#include "config.h"
+#include "string-list.h"
+#include "lockfile.h"
+#include "delta.h"
+#include "pack.h"
+#include "blob.h"
+#include "commit.h"
+#include "run-command.h"
+#include "tag.h"
+#include "tree.h"
+#include "tree-walk.h"
+#include "refs.h"
+#include "pack-revindex.h"
+#include "sha1-lookup.h"
+#include "bulk-checkin.h"
+#include "repository.h"
+#include "streaming.h"
+#include "dir.h"
+#include "list.h"
+#include "mergesort.h"
+#include "quote.h"
+#include "packfile.h"
+#include "fetch-object.h"
+#include "object-store.h"
+
+/* The maximum size for an object header. */
+#define MAX_HEADER_LEN 32
+
+const unsigned char null_sha1[GIT_MAX_RAWSZ];
+const struct object_id null_oid;
+const struct object_id empty_tree_oid = {
+ EMPTY_TREE_SHA1_BIN_LITERAL
+};
+const struct object_id empty_blob_oid = {
+ EMPTY_BLOB_SHA1_BIN_LITERAL
+};
+
+static void git_hash_sha1_init(git_hash_ctx *ctx)
+{
+ git_SHA1_Init(&ctx->sha1);
+}
+
+static void git_hash_sha1_update(git_hash_ctx *ctx, const void *data, size_t len)
+{
+ git_SHA1_Update(&ctx->sha1, data, len);
+}
+
+static void git_hash_sha1_final(unsigned char *hash, git_hash_ctx *ctx)
+{
+ git_SHA1_Final(hash, &ctx->sha1);
+}
+
+static void git_hash_unknown_init(git_hash_ctx *ctx)
+{
+ die("trying to init unknown hash");
+}
+
+static void git_hash_unknown_update(git_hash_ctx *ctx, const void *data, size_t len)
+{
+ die("trying to update unknown hash");
+}
+
+static void git_hash_unknown_final(unsigned char *hash, git_hash_ctx *ctx)
+{
+ die("trying to finalize unknown hash");
+}
+
+const struct git_hash_algo hash_algos[GIT_HASH_NALGOS] = {
+ {
+ NULL,
+ 0x00000000,
+ 0,
+ 0,
+ git_hash_unknown_init,
+ git_hash_unknown_update,
+ git_hash_unknown_final,
+ NULL,
+ NULL,
+ },
+ {
+ "sha-1",
+ /* "sha1", big-endian */
+ 0x73686131,
+ GIT_SHA1_RAWSZ,
+ GIT_SHA1_HEXSZ,
+ git_hash_sha1_init,
+ git_hash_sha1_update,
+ git_hash_sha1_final,
+ &empty_tree_oid,
+ &empty_blob_oid,
+ },
+};
+
+/*
+ * This is meant to hold a *small* number of objects that you would
+ * want read_sha1_file() to be able to return, but yet you do not want
+ * to write them into the object store (e.g. a browse-only
+ * application).
+ */
+static struct cached_object {
+ unsigned char sha1[20];
+ enum object_type type;
+ void *buf;
+ unsigned long size;
+} *cached_objects;
+static int cached_object_nr, cached_object_alloc;
+
+static struct cached_object empty_tree = {
+ EMPTY_TREE_SHA1_BIN_LITERAL,
+ OBJ_TREE,
+ "",
+ 0
+};
+
+static struct cached_object *find_cached_object(const unsigned char *sha1)
+{
+ int i;
+ struct cached_object *co = cached_objects;
+
+ for (i = 0; i < cached_object_nr; i++, co++) {
+ if (!hashcmp(co->sha1, sha1))
+ return co;
+ }
+ if (!hashcmp(sha1, empty_tree.sha1))
+ return &empty_tree;
+ return NULL;
+}
+
+
+static int get_conv_flags(unsigned flags)
+{
+ if (flags & HASH_RENORMALIZE)
+ return CONV_EOL_RENORMALIZE;
+ else if (flags & HASH_WRITE_OBJECT)
+ return global_conv_flags_eol;
+ else
+ return 0;
+}
+
+
+int mkdir_in_gitdir(const char *path)
+{
+ if (mkdir(path, 0777)) {
+ int saved_errno = errno;
+ struct stat st;
+ struct strbuf sb = STRBUF_INIT;
+
+ if (errno != EEXIST)
+ return -1;
+ /*
+ * Are we looking at a path in a symlinked worktree
+ * whose original repository does not yet have it?
+ * e.g. .git/rr-cache pointing at its original
+ * repository in which the user hasn't performed any
+ * conflict resolution yet?
+ */
+ if (lstat(path, &st) || !S_ISLNK(st.st_mode) ||
+ strbuf_readlink(&sb, path, st.st_size) ||
+ !is_absolute_path(sb.buf) ||
+ mkdir(sb.buf, 0777)) {
+ strbuf_release(&sb);
+ errno = saved_errno;
+ return -1;
+ }
+ strbuf_release(&sb);
+ }
+ return adjust_shared_perm(path);
+}
+
+enum scld_error safe_create_leading_directories(char *path)
+{
+ char *next_component = path + offset_1st_component(path);
+ enum scld_error ret = SCLD_OK;
+
+ while (ret == SCLD_OK && next_component) {
+ struct stat st;
+ char *slash = next_component, slash_character;
+
+ while (*slash && !is_dir_sep(*slash))
+ slash++;
+
+ if (!*slash)
+ break;
+
+ next_component = slash + 1;
+ while (is_dir_sep(*next_component))
+ next_component++;
+ if (!*next_component)
+ break;
+
+ slash_character = *slash;
+ *slash = '\0';
+ if (!stat(path, &st)) {
+ /* path exists */
+ if (!S_ISDIR(st.st_mode)) {
+ errno = ENOTDIR;
+ ret = SCLD_EXISTS;
+ }
+ } else if (mkdir(path, 0777)) {
+ if (errno == EEXIST &&
+ !stat(path, &st) && S_ISDIR(st.st_mode))
+ ; /* somebody created it since we checked */
+ else if (errno == ENOENT)
+ /*
+ * Either mkdir() failed because
+ * somebody just pruned the containing
+ * directory, or stat() failed because
+ * the file that was in our way was
+ * just removed. Either way, inform
+ * the caller that it might be worth
+ * trying again:
+ */
+ ret = SCLD_VANISHED;
+ else
+ ret = SCLD_FAILED;
+ } else if (adjust_shared_perm(path)) {
+ ret = SCLD_PERMS;
+ }
+ *slash = slash_character;
+ }
+ return ret;
+}
+
+enum scld_error safe_create_leading_directories_const(const char *path)
+{
+ int save_errno;
+ /* path points to cache entries, so xstrdup before messing with it */
+ char *buf = xstrdup(path);
+ enum scld_error result = safe_create_leading_directories(buf);
+
+ save_errno = errno;
+ free(buf);
+ errno = save_errno;
+ return result;
+}
+
+int raceproof_create_file(const char *path, create_file_fn fn, void *cb)
+{
+ /*
+ * The number of times we will try to remove empty directories
+ * in the way of path. This is only 1 because if another
+ * process is racily creating directories that conflict with
+ * us, we don't want to fight against them.
+ */
+ int remove_directories_remaining = 1;
+
+ /*
+ * The number of times that we will try to create the
+ * directories containing path. We are willing to attempt this
+ * more than once, because another process could be trying to
+ * clean up empty directories at the same time as we are
+ * trying to create them.
+ */
+ int create_directories_remaining = 3;
+
+ /* A scratch copy of path, filled lazily if we need it: */
+ struct strbuf path_copy = STRBUF_INIT;
+
+ int ret, save_errno;
+
+ /* Sanity check: */
+ assert(*path);
+
+retry_fn:
+ ret = fn(path, cb);
+ save_errno = errno;
+ if (!ret)
+ goto out;
+
+ if (errno == EISDIR && remove_directories_remaining-- > 0) {
+ /*
+ * A directory is in the way. Maybe it is empty; try
+ * to remove it:
+ */
+ if (!path_copy.len)
+ strbuf_addstr(&path_copy, path);
+
+ if (!remove_dir_recursively(&path_copy, REMOVE_DIR_EMPTY_ONLY))
+ goto retry_fn;
+ } else if (errno == ENOENT && create_directories_remaining-- > 0) {
+ /*
+ * Maybe the containing directory didn't exist, or
+ * maybe it was just deleted by a process that is
+ * racing with us to clean up empty directories. Try
+ * to create it:
+ */
+ enum scld_error scld_result;
+
+ if (!path_copy.len)
+ strbuf_addstr(&path_copy, path);
+
+ do {
+ scld_result = safe_create_leading_directories(path_copy.buf);
+ if (scld_result == SCLD_OK)
+ goto retry_fn;
+ } while (scld_result == SCLD_VANISHED && create_directories_remaining-- > 0);
+ }
+
+out:
+ strbuf_release(&path_copy);
+ errno = save_errno;
+ return ret;
+}
+
+static void fill_sha1_path(struct strbuf *buf, const unsigned char *sha1)
+{
+ int i;
+ for (i = 0; i < 20; i++) {
+ static char hex[] = "0123456789abcdef";
+ unsigned int val = sha1[i];
+ strbuf_addch(buf, hex[val >> 4]);
+ strbuf_addch(buf, hex[val & 0xf]);
+ if (!i)
+ strbuf_addch(buf, '/');
+ }
+}
+
+void sha1_file_name(struct repository *r, struct strbuf *buf, const unsigned char *sha1)
+{
+ strbuf_addstr(buf, r->objects->objectdir);
+ strbuf_addch(buf, '/');
+ fill_sha1_path(buf, sha1);
+}
+
+struct strbuf *alt_scratch_buf(struct alternate_object_database *alt)
+{
+ strbuf_setlen(&alt->scratch, alt->base_len);
+ return &alt->scratch;
+}
+
+static const char *alt_sha1_path(struct alternate_object_database *alt,
+ const unsigned char *sha1)
+{
+ struct strbuf *buf = alt_scratch_buf(alt);
+ fill_sha1_path(buf, sha1);
+ return buf->buf;
+}
+
+/*
+ * Return non-zero iff the path is usable as an alternate object database.
+ */
+static int alt_odb_usable(struct raw_object_store *o,
+ struct strbuf *path,
+ const char *normalized_objdir)
+{
+ struct alternate_object_database *alt;
+
+ /* Detect cases where alternate disappeared */
+ if (!is_directory(path->buf)) {
+ error("object directory %s does not exist; "
+ "check .git/objects/info/alternates.",
+ path->buf);
+ return 0;
+ }
+
+ /*
+ * Prevent the common mistake of listing the same
+ * thing twice, or object directory itself.
+ */
+ for (alt = o->alt_odb_list; alt; alt = alt->next) {
+ if (!fspathcmp(path->buf, alt->path))
+ return 0;
+ }
+ if (!fspathcmp(path->buf, normalized_objdir))
+ return 0;
+
+ return 1;
+}
+
+/*
+ * Prepare alternate object database registry.
+ *
+ * The variable alt_odb_list points at the list of struct
+ * alternate_object_database. The elements on this list come from
+ * non-empty elements from colon separated ALTERNATE_DB_ENVIRONMENT
+ * environment variable, and $GIT_OBJECT_DIRECTORY/info/alternates,
+ * whose contents is similar to that environment variable but can be
+ * LF separated. Its base points at a statically allocated buffer that
+ * contains "/the/directory/corresponding/to/.git/objects/...", while
+ * its name points just after the slash at the end of ".git/objects/"
+ * in the example above, and has enough space to hold 40-byte hex
+ * SHA1, an extra slash for the first level indirection, and the
+ * terminating NUL.
+ */
+static void read_info_alternates(struct repository *r,
+ const char *relative_base,
+ int depth);
+static int link_alt_odb_entry(struct repository *r, const char *entry,
+ const char *relative_base, int depth, const char *normalized_objdir)
+{
+ struct alternate_object_database *ent;
+ struct strbuf pathbuf = STRBUF_INIT;
+
+ if (!is_absolute_path(entry) && relative_base) {
+ strbuf_realpath(&pathbuf, relative_base, 1);
+ strbuf_addch(&pathbuf, '/');
+ }
+ strbuf_addstr(&pathbuf, entry);
+
+ if (strbuf_normalize_path(&pathbuf) < 0 && relative_base) {
+ error("unable to normalize alternate object path: %s",
+ pathbuf.buf);
+ strbuf_release(&pathbuf);
+ return -1;
+ }
+
+ /*
+ * The trailing slash after the directory name is given by
+ * this function at the end. Remove duplicates.
+ */
+ while (pathbuf.len && pathbuf.buf[pathbuf.len - 1] == '/')
+ strbuf_setlen(&pathbuf, pathbuf.len - 1);
+
+ if (!alt_odb_usable(r->objects, &pathbuf, normalized_objdir)) {
+ strbuf_release(&pathbuf);
+ return -1;
+ }
+
+ ent = alloc_alt_odb(pathbuf.buf);
+
+ /* add the alternate entry */
+ *r->objects->alt_odb_tail = ent;
+ r->objects->alt_odb_tail = &(ent->next);
+ ent->next = NULL;
+
+ /* recursively add alternates */
+ read_info_alternates(r, pathbuf.buf, depth + 1);
+
+ strbuf_release(&pathbuf);
+ return 0;
+}
+
+static const char *parse_alt_odb_entry(const char *string,
+ int sep,
+ struct strbuf *out)
+{
+ const char *end;
+
+ strbuf_reset(out);
+
+ if (*string == '#') {
+ /* comment; consume up to next separator */
+ end = strchrnul(string, sep);
+ } else if (*string == '"' && !unquote_c_style(out, string, &end)) {
+ /*
+ * quoted path; unquote_c_style has copied the
+ * data for us and set "end". Broken quoting (e.g.,
+ * an entry that doesn't end with a quote) falls
+ * back to the unquoted case below.
+ */
+ } else {
+ /* normal, unquoted path */
+ end = strchrnul(string, sep);
+ strbuf_add(out, string, end - string);
+ }
+
+ if (*end)
+ end++;
+ return end;
+}
+
+static void link_alt_odb_entries(struct repository *r, const char *alt,
+ int sep, const char *relative_base, int depth)
+{
+ struct strbuf objdirbuf = STRBUF_INIT;
+ struct strbuf entry = STRBUF_INIT;
+
+ if (!alt || !*alt)
+ return;
+
+ if (depth > 5) {
+ error("%s: ignoring alternate object stores, nesting too deep.",
+ relative_base);
+ return;
+ }
+
+ strbuf_add_absolute_path(&objdirbuf, r->objects->objectdir);
+ if (strbuf_normalize_path(&objdirbuf) < 0)
+ die("unable to normalize object directory: %s",
+ objdirbuf.buf);
+
+ while (*alt) {
+ alt = parse_alt_odb_entry(alt, sep, &entry);
+ if (!entry.len)
+ continue;
+ link_alt_odb_entry(r, entry.buf,
+ relative_base, depth, objdirbuf.buf);
+ }
+ strbuf_release(&entry);
+ strbuf_release(&objdirbuf);
+}
+
+static void read_info_alternates(struct repository *r,
+ const char *relative_base,
+ int depth)
+{
+ char *path;
+ struct strbuf buf = STRBUF_INIT;
+
+ path = xstrfmt("%s/info/alternates", relative_base);
+ if (strbuf_read_file(&buf, path, 1024) < 0) {
+ warn_on_fopen_errors(path);
+ free(path);
+ return;
+ }
+
+ link_alt_odb_entries(r, buf.buf, '\n', relative_base, depth);
+ strbuf_release(&buf);
+ free(path);
+}
+
+struct alternate_object_database *alloc_alt_odb(const char *dir)
+{
+ struct alternate_object_database *ent;
+
+ FLEX_ALLOC_STR(ent, path, dir);
+ strbuf_init(&ent->scratch, 0);
+ strbuf_addf(&ent->scratch, "%s/", dir);
+ ent->base_len = ent->scratch.len;
+
+ return ent;
+}
+
+void add_to_alternates_file(const char *reference)
+{
+ struct lock_file lock = LOCK_INIT;
+ char *alts = git_pathdup("objects/info/alternates");
+ FILE *in, *out;
+ int found = 0;
+
+ hold_lock_file_for_update(&lock, alts, LOCK_DIE_ON_ERROR);
+ out = fdopen_lock_file(&lock, "w");
+ if (!out)
+ die_errno("unable to fdopen alternates lockfile");
+
+ in = fopen(alts, "r");
+ if (in) {
+ struct strbuf line = STRBUF_INIT;
+
+ while (strbuf_getline(&line, in) != EOF) {
+ if (!strcmp(reference, line.buf)) {
+ found = 1;
+ break;
+ }
+ fprintf_or_die(out, "%s\n", line.buf);
+ }
+
+ strbuf_release(&line);
+ fclose(in);
+ }
+ else if (errno != ENOENT)
+ die_errno("unable to read alternates file");
+
+ if (found) {
+ rollback_lock_file(&lock);
+ } else {
+ fprintf_or_die(out, "%s\n", reference);
+ if (commit_lock_file(&lock))
+ die_errno("unable to move new alternates file into place");
+ if (the_repository->objects->alt_odb_tail)
+ link_alt_odb_entries(the_repository, reference,
+ '\n', NULL, 0);
+ }
+ free(alts);
+}
+
+void add_to_alternates_memory(const char *reference)
+{
+ /*
+ * Make sure alternates are initialized, or else our entry may be
+ * overwritten when they are.
+ */
+ prepare_alt_odb(the_repository);
+
+ link_alt_odb_entries(the_repository, reference,
+ '\n', NULL, 0);
+}
+
+/*
+ * Compute the exact path an alternate is at and returns it. In case of
+ * error NULL is returned and the human readable error is added to `err`
+ * `path` may be relative and should point to $GITDIR.
+ * `err` must not be null.
+ */
+char *compute_alternate_path(const char *path, struct strbuf *err)
+{
+ char *ref_git = NULL;
+ const char *repo, *ref_git_s;
+ int seen_error = 0;
+
+ ref_git_s = real_path_if_valid(path);
+ if (!ref_git_s) {
+ seen_error = 1;
+ strbuf_addf(err, _("path '%s' does not exist"), path);
+ goto out;
+ } else
+ /*
+ * Beware: read_gitfile(), real_path() and mkpath()
+ * return static buffer
+ */
+ ref_git = xstrdup(ref_git_s);
+
+ repo = read_gitfile(ref_git);
+ if (!repo)
+ repo = read_gitfile(mkpath("%s/.git", ref_git));
+ if (repo) {
+ free(ref_git);
+ ref_git = xstrdup(repo);
+ }
+
+ if (!repo && is_directory(mkpath("%s/.git/objects", ref_git))) {
+ char *ref_git_git = mkpathdup("%s/.git", ref_git);
+ free(ref_git);
+ ref_git = ref_git_git;
+ } else if (!is_directory(mkpath("%s/objects", ref_git))) {
+ struct strbuf sb = STRBUF_INIT;
+ seen_error = 1;
+ if (get_common_dir(&sb, ref_git)) {
+ strbuf_addf(err,
+ _("reference repository '%s' as a linked "
+ "checkout is not supported yet."),
+ path);
+ goto out;
+ }
+
+ strbuf_addf(err, _("reference repository '%s' is not a "
+ "local repository."), path);
+ goto out;
+ }
+
+ if (!access(mkpath("%s/shallow", ref_git), F_OK)) {
+ strbuf_addf(err, _("reference repository '%s' is shallow"),
+ path);
+ seen_error = 1;
+ goto out;
+ }
+
+ if (!access(mkpath("%s/info/grafts", ref_git), F_OK)) {
+ strbuf_addf(err,
+ _("reference repository '%s' is grafted"),
+ path);
+ seen_error = 1;
+ goto out;
+ }
+
+out:
+ if (seen_error) {
+ FREE_AND_NULL(ref_git);
+ }
+
+ return ref_git;
+}
+
+int foreach_alt_odb(alt_odb_fn fn, void *cb)
+{
+ struct alternate_object_database *ent;
+ int r = 0;
+
+ prepare_alt_odb(the_repository);
+ for (ent = the_repository->objects->alt_odb_list; ent; ent = ent->next) {
+ r = fn(ent, cb);
+ if (r)
+ break;
+ }
+ return r;
+}
+
+void prepare_alt_odb(struct repository *r)
+{
+ if (r->objects->alt_odb_tail)
+ return;
+
+ r->objects->alt_odb_tail = &r->objects->alt_odb_list;
+ link_alt_odb_entries(r, r->objects->alternate_db, PATH_SEP, NULL, 0);
+
+ read_info_alternates(r, r->objects->objectdir, 0);
+}
+
+/* Returns 1 if we have successfully freshened the file, 0 otherwise. */
+static int freshen_file(const char *fn)
+{
+ struct utimbuf t;
+ t.actime = t.modtime = time(NULL);
+ return !utime(fn, &t);
+}
+
+/*
+ * All of the check_and_freshen functions return 1 if the file exists and was
+ * freshened (if freshening was requested), 0 otherwise. If they return
+ * 0, you should not assume that it is safe to skip a write of the object (it
+ * either does not exist on disk, or has a stale mtime and may be subject to
+ * pruning).
+ */
+int check_and_freshen_file(const char *fn, int freshen)
+{
+ if (access(fn, F_OK))
+ return 0;
+ if (freshen && !freshen_file(fn))
+ return 0;
+ return 1;
+}
+
+static int check_and_freshen_local(const unsigned char *sha1, int freshen)
+{
+ static struct strbuf buf = STRBUF_INIT;
+
+ strbuf_reset(&buf);
+ sha1_file_name(the_repository, &buf, sha1);
+
+ return check_and_freshen_file(buf.buf, freshen);
+}
+
+static int check_and_freshen_nonlocal(const unsigned char *sha1, int freshen)
+{
+ struct alternate_object_database *alt;
+ prepare_alt_odb(the_repository);
+ for (alt = the_repository->objects->alt_odb_list; alt; alt = alt->next) {
+ const char *path = alt_sha1_path(alt, sha1);
+ if (check_and_freshen_file(path, freshen))
+ return 1;
+ }
+ return 0;
+}
+
+static int check_and_freshen(const unsigned char *sha1, int freshen)
+{
+ return check_and_freshen_local(sha1, freshen) ||
+ check_and_freshen_nonlocal(sha1, freshen);
+}
+
+int has_loose_object_nonlocal(const unsigned char *sha1)
+{
+ return check_and_freshen_nonlocal(sha1, 0);
+}
+
+static int has_loose_object(const unsigned char *sha1)
+{
+ return check_and_freshen(sha1, 0);
+}
+
+static void mmap_limit_check(size_t length)
+{
+ static size_t limit = 0;
+ if (!limit) {
+ limit = git_env_ulong("GIT_MMAP_LIMIT", 0);
+ if (!limit)
+ limit = SIZE_MAX;
+ }
+ if (length > limit)
+ die("attempting to mmap %"PRIuMAX" over limit %"PRIuMAX,
+ (uintmax_t)length, (uintmax_t)limit);
+}
+
+void *xmmap_gently(void *start, size_t length,
+ int prot, int flags, int fd, off_t offset)
+{
+ void *ret;
+
+ mmap_limit_check(length);
+ ret = mmap(start, length, prot, flags, fd, offset);
+ if (ret == MAP_FAILED) {
+ if (!length)
+ return NULL;
+ release_pack_memory(length);
+ ret = mmap(start, length, prot, flags, fd, offset);
+ }
+ return ret;
+}
+
+void *xmmap(void *start, size_t length,
+ int prot, int flags, int fd, off_t offset)
+{
+ void *ret = xmmap_gently(start, length, prot, flags, fd, offset);
+ if (ret == MAP_FAILED)
+ die_errno("mmap failed");
+ return ret;
+}
+
+/*
+ * With an in-core object data in "map", rehash it to make sure the
+ * object name actually matches "sha1" to detect object corruption.
+ * With "map" == NULL, try reading the object named with "sha1" using
+ * the streaming interface and rehash it to do the same.
+ */
+int check_object_signature(const struct object_id *oid, void *map,
+ unsigned long size, const char *type)
+{
+ struct object_id real_oid;
+ enum object_type obj_type;
+ struct git_istream *st;
+ git_hash_ctx c;
+ char hdr[MAX_HEADER_LEN];
+ int hdrlen;
+
+ if (map) {
+ hash_object_file(map, size, type, &real_oid);
+ return oidcmp(oid, &real_oid) ? -1 : 0;
+ }
+
+ st = open_istream(oid, &obj_type, &size, NULL);
+ if (!st)
+ return -1;
+
+ /* Generate the header */
+ hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", type_name(obj_type), size) + 1;
+
+ /* Sha1.. */
+ the_hash_algo->init_fn(&c);
+ the_hash_algo->update_fn(&c, hdr, hdrlen);
+ for (;;) {
+ char buf[1024 * 16];
+ ssize_t readlen = read_istream(st, buf, sizeof(buf));
+
+ if (readlen < 0) {
+ close_istream(st);
+ return -1;
+ }
+ if (!readlen)
+ break;
+ the_hash_algo->update_fn(&c, buf, readlen);
+ }
+ the_hash_algo->final_fn(real_oid.hash, &c);
+ close_istream(st);
+ return oidcmp(oid, &real_oid) ? -1 : 0;
+}
+
+int git_open_cloexec(const char *name, int flags)
+{
+ int fd;
+ static int o_cloexec = O_CLOEXEC;
+
+ fd = open(name, flags | o_cloexec);
+ if ((o_cloexec & O_CLOEXEC) && fd < 0 && errno == EINVAL) {
+ /* Try again w/o O_CLOEXEC: the kernel might not support it */
+ o_cloexec &= ~O_CLOEXEC;
+ fd = open(name, flags | o_cloexec);
+ }
+
+#if defined(F_GETFD) && defined(F_SETFD) && defined(FD_CLOEXEC)
+ {
+ static int fd_cloexec = FD_CLOEXEC;
+
+ if (!o_cloexec && 0 <= fd && fd_cloexec) {
+ /* Opened w/o O_CLOEXEC? try with fcntl(2) to add it */
+ int flags = fcntl(fd, F_GETFD);
+ if (fcntl(fd, F_SETFD, flags | fd_cloexec))
+ fd_cloexec = 0;
+ }
+ }
+#endif
+ return fd;
+}
+
+/*
+ * Find "sha1" as a loose object in the local repository or in an alternate.
+ * Returns 0 on success, negative on failure.
+ *
+ * The "path" out-parameter will give the path of the object we found (if any).
+ * Note that it may point to static storage and is only valid until another
+ * call to sha1_file_name(), etc.
+ */
+static int stat_sha1_file(struct repository *r, const unsigned char *sha1,
+ struct stat *st, const char **path)
+{
+ struct alternate_object_database *alt;
+ static struct strbuf buf = STRBUF_INIT;
+
+ strbuf_reset(&buf);
+ sha1_file_name(r, &buf, sha1);
+ *path = buf.buf;
+
+ if (!lstat(*path, st))
+ return 0;
+
+ prepare_alt_odb(r);
+ errno = ENOENT;
+ for (alt = r->objects->alt_odb_list; alt; alt = alt->next) {
+ *path = alt_sha1_path(alt, sha1);
+ if (!lstat(*path, st))
+ return 0;
+ }
+
+ return -1;
+}
+
+/*
+ * Like stat_sha1_file(), but actually open the object and return the
+ * descriptor. See the caveats on the "path" parameter above.
+ */
+static int open_sha1_file(struct repository *r,
+ const unsigned char *sha1, const char **path)
+{
+ int fd;
+ struct alternate_object_database *alt;
+ int most_interesting_errno;
+ static struct strbuf buf = STRBUF_INIT;
+
+ strbuf_reset(&buf);
+ sha1_file_name(r, &buf, sha1);
+ *path = buf.buf;
+
+ fd = git_open(*path);
+ if (fd >= 0)
+ return fd;
+ most_interesting_errno = errno;
+
+ prepare_alt_odb(r);
+ for (alt = r->objects->alt_odb_list; alt; alt = alt->next) {
+ *path = alt_sha1_path(alt, sha1);
+ fd = git_open(*path);
+ if (fd >= 0)
+ return fd;
+ if (most_interesting_errno == ENOENT)
+ most_interesting_errno = errno;
+ }
+ errno = most_interesting_errno;
+ return -1;
+}
+
+/*
+ * Map the loose object at "path" if it is not NULL, or the path found by
+ * searching for a loose object named "sha1".
+ */
+static void *map_sha1_file_1(struct repository *r, const char *path,
+ const unsigned char *sha1, unsigned long *size)
+{
+ void *map;
+ int fd;
+
+ if (path)
+ fd = git_open(path);
+ else
+ fd = open_sha1_file(r, sha1, &path);
+ map = NULL;
+ if (fd >= 0) {
+ struct stat st;
+
+ if (!fstat(fd, &st)) {
+ *size = xsize_t(st.st_size);
+ if (!*size) {
+ /* mmap() is forbidden on empty files */
+ error("object file %s is empty", path);
+ return NULL;
+ }
+ map = xmmap(NULL, *size, PROT_READ, MAP_PRIVATE, fd, 0);
+ }
+ close(fd);
+ }
+ return map;
+}
+
+void *map_sha1_file(struct repository *r,
+ const unsigned char *sha1, unsigned long *size)
+{
+ return map_sha1_file_1(r, NULL, sha1, size);
+}
+
+static int unpack_sha1_short_header(git_zstream *stream,
+ unsigned char *map, unsigned long mapsize,
+ void *buffer, unsigned long bufsiz)
+{
+ /* Get the data stream */
+ memset(stream, 0, sizeof(*stream));
+ stream->next_in = map;
+ stream->avail_in = mapsize;
+ stream->next_out = buffer;
+ stream->avail_out = bufsiz;
+
+ git_inflate_init(stream);
+ return git_inflate(stream, 0);
+}
+
+int unpack_sha1_header(git_zstream *stream,
+ unsigned char *map, unsigned long mapsize,
+ void *buffer, unsigned long bufsiz)
+{
+ int status = unpack_sha1_short_header(stream, map, mapsize,
+ buffer, bufsiz);
+
+ if (status < Z_OK)
+ return status;
+
+ /* Make sure we have the terminating NUL */
+ if (!memchr(buffer, '\0', stream->next_out - (unsigned char *)buffer))
+ return -1;
+ return 0;
+}
+
+static int unpack_sha1_header_to_strbuf(git_zstream *stream, unsigned char *map,
+ unsigned long mapsize, void *buffer,
+ unsigned long bufsiz, struct strbuf *header)
+{
+ int status;
+
+ status = unpack_sha1_short_header(stream, map, mapsize, buffer, bufsiz);
+ if (status < Z_OK)
+ return -1;
+
+ /*
+ * Check if entire header is unpacked in the first iteration.
+ */
+ if (memchr(buffer, '\0', stream->next_out - (unsigned char *)buffer))
+ return 0;
+
+ /*
+ * buffer[0..bufsiz] was not large enough. Copy the partial
+ * result out to header, and then append the result of further
+ * reading the stream.
+ */
+ strbuf_add(header, buffer, stream->next_out - (unsigned char *)buffer);
+ stream->next_out = buffer;
+ stream->avail_out = bufsiz;
+
+ do {
+ status = git_inflate(stream, 0);
+ strbuf_add(header, buffer, stream->next_out - (unsigned char *)buffer);
+ if (memchr(buffer, '\0', stream->next_out - (unsigned char *)buffer))
+ return 0;
+ stream->next_out = buffer;
+ stream->avail_out = bufsiz;
+ } while (status != Z_STREAM_END);
+ return -1;
+}
+
+static void *unpack_sha1_rest(git_zstream *stream, void *buffer, unsigned long size, const unsigned char *sha1)
+{
+ int bytes = strlen(buffer) + 1;
+ unsigned char *buf = xmallocz(size);
+ unsigned long n;
+ int status = Z_OK;
+
+ n = stream->total_out - bytes;
+ if (n > size)
+ n = size;
+ memcpy(buf, (char *) buffer + bytes, n);
+ bytes = n;
+ if (bytes <= size) {
+ /*
+ * The above condition must be (bytes <= size), not
+ * (bytes < size). In other words, even though we
+ * expect no more output and set avail_out to zero,
+ * the input zlib stream may have bytes that express
+ * "this concludes the stream", and we *do* want to
+ * eat that input.
+ *
+ * Otherwise we would not be able to test that we
+ * consumed all the input to reach the expected size;
+ * we also want to check that zlib tells us that all
+ * went well with status == Z_STREAM_END at the end.
+ */
+ stream->next_out = buf + bytes;
+ stream->avail_out = size - bytes;
+ while (status == Z_OK)
+ status = git_inflate(stream, Z_FINISH);
+ }
+ if (status == Z_STREAM_END && !stream->avail_in) {
+ git_inflate_end(stream);
+ return buf;
+ }
+
+ if (status < 0)
+ error("corrupt loose object '%s'", sha1_to_hex(sha1));
+ else if (stream->avail_in)
+ error("garbage at end of loose object '%s'",
+ sha1_to_hex(sha1));
+ free(buf);
+ return NULL;
+}
+
+/*
+ * We used to just use "sscanf()", but that's actually way
+ * too permissive for what we want to check. So do an anal
+ * object header parse by hand.
+ */
+static int parse_sha1_header_extended(const char *hdr, struct object_info *oi,
+ unsigned int flags)
+{
+ const char *type_buf = hdr;
+ unsigned long size;
+ int type, type_len = 0;
+
+ /*
+ * The type can be of any size but is followed by
+ * a space.
+ */
+ for (;;) {
+ char c = *hdr++;
+ if (!c)
+ return -1;
+ if (c == ' ')
+ break;
+ type_len++;
+ }
+
+ type = type_from_string_gently(type_buf, type_len, 1);
+ if (oi->type_name)
+ strbuf_add(oi->type_name, type_buf, type_len);
+ /*
+ * Set type to 0 if its an unknown object and
+ * we're obtaining the type using '--allow-unknown-type'
+ * option.
+ */
+ if ((flags & OBJECT_INFO_ALLOW_UNKNOWN_TYPE) && (type < 0))
+ type = 0;
+ else if (type < 0)
+ die("invalid object type");
+ if (oi->typep)
+ *oi->typep = type;
+
+ /*
+ * The length must follow immediately, and be in canonical
+ * decimal format (ie "010" is not valid).
+ */
+ size = *hdr++ - '0';
+ if (size > 9)
+ return -1;
+ if (size) {
+ for (;;) {
+ unsigned long c = *hdr - '0';
+ if (c > 9)
+ break;
+ hdr++;
+ size = size * 10 + c;
+ }
+ }
+
+ if (oi->sizep)
+ *oi->sizep = size;
+
+ /*
+ * The length must be followed by a zero byte
+ */
+ return *hdr ? -1 : type;
+}
+
+int parse_sha1_header(const char *hdr, unsigned long *sizep)
+{
+ struct object_info oi = OBJECT_INFO_INIT;
+
+ oi.sizep = sizep;
+ return parse_sha1_header_extended(hdr, &oi, 0);
+}
+
+static int sha1_loose_object_info(struct repository *r,
+ const unsigned char *sha1,
+ struct object_info *oi, int flags)
+{
+ int status = 0;
+ unsigned long mapsize;
+ void *map;
+ git_zstream stream;
+ char hdr[MAX_HEADER_LEN];
+ struct strbuf hdrbuf = STRBUF_INIT;
+ unsigned long size_scratch;
+
+ if (oi->delta_base_sha1)
+ hashclr(oi->delta_base_sha1);
+
+ /*
+ * If we don't care about type or size, then we don't
+ * need to look inside the object at all. Note that we
+ * do not optimize out the stat call, even if the
+ * caller doesn't care about the disk-size, since our
+ * return value implicitly indicates whether the
+ * object even exists.
+ */
+ if (!oi->typep && !oi->type_name && !oi->sizep && !oi->contentp) {
+ const char *path;
+ struct stat st;
+ if (stat_sha1_file(r, sha1, &st, &path) < 0)
+ return -1;
+ if (oi->disk_sizep)
+ *oi->disk_sizep = st.st_size;
+ return 0;
+ }
+
+ map = map_sha1_file(r, sha1, &mapsize);
+ if (!map)
+ return -1;
+
+ if (!oi->sizep)
+ oi->sizep = &size_scratch;
+
+ if (oi->disk_sizep)
+ *oi->disk_sizep = mapsize;
+ if ((flags & OBJECT_INFO_ALLOW_UNKNOWN_TYPE)) {
+ if (unpack_sha1_header_to_strbuf(&stream, map, mapsize, hdr, sizeof(hdr), &hdrbuf) < 0)
+ status = error("unable to unpack %s header with --allow-unknown-type",
+ sha1_to_hex(sha1));
+ } else if (unpack_sha1_header(&stream, map, mapsize, hdr, sizeof(hdr)) < 0)
+ status = error("unable to unpack %s header",
+ sha1_to_hex(sha1));
+ if (status < 0)
+ ; /* Do nothing */
+ else if (hdrbuf.len) {
+ if ((status = parse_sha1_header_extended(hdrbuf.buf, oi, flags)) < 0)
+ status = error("unable to parse %s header with --allow-unknown-type",
+ sha1_to_hex(sha1));
+ } else if ((status = parse_sha1_header_extended(hdr, oi, flags)) < 0)
+ status = error("unable to parse %s header", sha1_to_hex(sha1));
+
+ if (status >= 0 && oi->contentp) {
+ *oi->contentp = unpack_sha1_rest(&stream, hdr,
+ *oi->sizep, sha1);
+ if (!*oi->contentp) {
+ git_inflate_end(&stream);
+ status = -1;
+ }
+ } else
+ git_inflate_end(&stream);
+
+ munmap(map, mapsize);
+ if (status && oi->typep)
+ *oi->typep = status;
+ if (oi->sizep == &size_scratch)
+ oi->sizep = NULL;
+ strbuf_release(&hdrbuf);
+ oi->whence = OI_LOOSE;
+ return (status < 0) ? status : 0;
+}
+
+int fetch_if_missing = 1;
+
+int oid_object_info_extended(const struct object_id *oid, struct object_info *oi, unsigned flags)
+{
+ static struct object_info blank_oi = OBJECT_INFO_INIT;
+ struct pack_entry e;
+ int rtype;
+ const struct object_id *real = oid;
+ int already_retried = 0;
+
+ if (flags & OBJECT_INFO_LOOKUP_REPLACE)
+ real = lookup_replace_object(oid);
+
+ if (is_null_oid(real))
+ return -1;
+
+ if (!oi)
+ oi = &blank_oi;
+
+ if (!(flags & OBJECT_INFO_SKIP_CACHED)) {
+ struct cached_object *co = find_cached_object(real->hash);
+ if (co) {
+ if (oi->typep)
+ *(oi->typep) = co->type;
+ if (oi->sizep)
+ *(oi->sizep) = co->size;
+ if (oi->disk_sizep)
+ *(oi->disk_sizep) = 0;
+ if (oi->delta_base_sha1)
+ hashclr(oi->delta_base_sha1);
+ if (oi->type_name)
+ strbuf_addstr(oi->type_name, type_name(co->type));
+ if (oi->contentp)
+ *oi->contentp = xmemdupz(co->buf, co->size);
+ oi->whence = OI_CACHED;
+ return 0;
+ }
+ }
+
+ while (1) {
+ if (find_pack_entry(the_repository, real->hash, &e))
+ break;
+
+ if (flags & OBJECT_INFO_IGNORE_LOOSE)
+ return -1;
+
+ /* Most likely it's a loose object. */
+ if (!sha1_loose_object_info(the_repository, real->hash, oi, flags))
+ return 0;
+
+ /* Not a loose object; someone else may have just packed it. */
+ if (!(flags & OBJECT_INFO_QUICK)) {
+ reprepare_packed_git(the_repository);
+ if (find_pack_entry(the_repository, real->hash, &e))
+ break;
+ }
+
+ /* Check if it is a missing object */
+ if (fetch_if_missing && repository_format_partial_clone &&
+ !already_retried) {
+ /*
+ * TODO Investigate haveing fetch_object() return
+ * TODO error/success and stopping the music here.
+ */
+ fetch_object(repository_format_partial_clone, real->hash);
+ already_retried = 1;
+ continue;
+ }
+
+ return -1;
+ }
+
+ if (oi == &blank_oi)
+ /*
+ * We know that the caller doesn't actually need the
+ * information below, so return early.
+ */
+ return 0;
+ rtype = packed_object_info(e.p, e.offset, oi);
+ if (rtype < 0) {
+ mark_bad_packed_object(e.p, real->hash);
+ return oid_object_info_extended(real, oi, 0);
+ } else if (oi->whence == OI_PACKED) {
+ oi->u.packed.offset = e.offset;
+ oi->u.packed.pack = e.p;
+ oi->u.packed.is_delta = (rtype == OBJ_REF_DELTA ||
+ rtype == OBJ_OFS_DELTA);
+ }
+
+ return 0;
+}
+
+/* returns enum object_type or negative */
+int oid_object_info(const struct object_id *oid, unsigned long *sizep)
+{
+ enum object_type type;
+ struct object_info oi = OBJECT_INFO_INIT;
+
+ oi.typep = &type;
+ oi.sizep = sizep;
+ if (oid_object_info_extended(oid, &oi,
+ OBJECT_INFO_LOOKUP_REPLACE) < 0)
+ return -1;
+ return type;
+}
+
+static void *read_object(const unsigned char *sha1, enum object_type *type,
+ unsigned long *size)
+{
+ struct object_id oid;
+ struct object_info oi = OBJECT_INFO_INIT;
+ void *content;
+ oi.typep = type;
+ oi.sizep = size;
+ oi.contentp = &content;
+
+ hashcpy(oid.hash, sha1);
+
+ if (oid_object_info_extended(&oid, &oi, 0) < 0)
+ return NULL;
+ return content;
+}
+
+int pretend_object_file(void *buf, unsigned long len, enum object_type type,
+ struct object_id *oid)
+{
+ struct cached_object *co;
+
+ hash_object_file(buf, len, type_name(type), oid);
+ if (has_sha1_file(oid->hash) || find_cached_object(oid->hash))
+ return 0;
+ ALLOC_GROW(cached_objects, cached_object_nr + 1, cached_object_alloc);
+ co = &cached_objects[cached_object_nr++];
+ co->size = len;
+ co->type = type;
+ co->buf = xmalloc(len);
+ memcpy(co->buf, buf, len);
+ hashcpy(co->sha1, oid->hash);
+ return 0;
+}
+
+/*
+ * This function dies on corrupt objects; the callers who want to
+ * deal with them should arrange to call read_object() and give error
+ * messages themselves.
+ */
+void *read_object_file_extended(const struct object_id *oid,
+ enum object_type *type,
+ unsigned long *size,
+ int lookup_replace)
+{
+ void *data;
+ const struct packed_git *p;
+ const char *path;
+ struct stat st;
+ const struct object_id *repl = lookup_replace ? lookup_replace_object(oid)
+ : oid;
+
+ errno = 0;
+ data = read_object(repl->hash, type, size);
+ if (data)
+ return data;
+
+ if (errno && errno != ENOENT)
+ die_errno("failed to read object %s", oid_to_hex(oid));
+
+ /* die if we replaced an object with one that does not exist */
+ if (repl != oid)
+ die("replacement %s not found for %s",
+ oid_to_hex(repl), oid_to_hex(oid));
+
+ if (!stat_sha1_file(the_repository, repl->hash, &st, &path))
+ die("loose object %s (stored in %s) is corrupt",
+ oid_to_hex(repl), path);
+
+ if ((p = has_packed_and_bad(repl->hash)) != NULL)
+ die("packed object %s (stored in %s) is corrupt",
+ oid_to_hex(repl), p->pack_name);
+
+ return NULL;
+}
+
+void *read_object_with_reference(const struct object_id *oid,
+ const char *required_type_name,
+ unsigned long *size,
+ struct object_id *actual_oid_return)
+{
+ enum object_type type, required_type;
+ void *buffer;
+ unsigned long isize;
+ struct object_id actual_oid;
+
+ required_type = type_from_string(required_type_name);
+ oidcpy(&actual_oid, oid);
+ while (1) {
+ int ref_length = -1;
+ const char *ref_type = NULL;
+
+ buffer = read_object_file(&actual_oid, &type, &isize);
+ if (!buffer)
+ return NULL;
+ if (type == required_type) {
+ *size = isize;
+ if (actual_oid_return)
+ oidcpy(actual_oid_return, &actual_oid);
+ return buffer;
+ }
+ /* Handle references */
+ else if (type == OBJ_COMMIT)
+ ref_type = "tree ";
+ else if (type == OBJ_TAG)
+ ref_type = "object ";
+ else {
+ free(buffer);
+ return NULL;
+ }
+ ref_length = strlen(ref_type);
+
+ if (ref_length + GIT_SHA1_HEXSZ > isize ||
+ memcmp(buffer, ref_type, ref_length) ||
+ get_oid_hex((char *) buffer + ref_length, &actual_oid)) {
+ free(buffer);
+ return NULL;
+ }
+ free(buffer);
+ /* Now we have the ID of the referred-to object in
+ * actual_oid. Check again. */
+ }
+}
+
+static void write_object_file_prepare(const void *buf, unsigned long len,
+ const char *type, struct object_id *oid,
+ char *hdr, int *hdrlen)
+{
+ git_hash_ctx c;
+
+ /* Generate the header */
+ *hdrlen = xsnprintf(hdr, *hdrlen, "%s %lu", type, len)+1;
+
+ /* Sha1.. */
+ the_hash_algo->init_fn(&c);
+ the_hash_algo->update_fn(&c, hdr, *hdrlen);
+ the_hash_algo->update_fn(&c, buf, len);
+ the_hash_algo->final_fn(oid->hash, &c);
+}
+
+/*
+ * Move the just written object into its final resting place.
+ */
+int finalize_object_file(const char *tmpfile, const char *filename)
+{
+ int ret = 0;
+
+ if (object_creation_mode == OBJECT_CREATION_USES_RENAMES)
+ goto try_rename;
+ else if (link(tmpfile, filename))
+ ret = errno;
+
+ /*
+ * Coda hack - coda doesn't like cross-directory links,
+ * so we fall back to a rename, which will mean that it
+ * won't be able to check collisions, but that's not a
+ * big deal.
+ *
+ * The same holds for FAT formatted media.
+ *
+ * When this succeeds, we just return. We have nothing
+ * left to unlink.
+ */
+ if (ret && ret != EEXIST) {
+ try_rename:
+ if (!rename(tmpfile, filename))
+ goto out;
+ ret = errno;
+ }
+ unlink_or_warn(tmpfile);
+ if (ret) {
+ if (ret != EEXIST) {
+ return error_errno("unable to write sha1 filename %s", filename);
+ }
+ /* FIXME!!! Collision check here ? */
+ }
+
+out:
+ if (adjust_shared_perm(filename))
+ return error("unable to set permission to '%s'", filename);
+ return 0;
+}
+
+static int write_buffer(int fd, const void *buf, size_t len)
+{
+ if (write_in_full(fd, buf, len) < 0)
+ return error_errno("file write error");
+ return 0;
+}
+
+int hash_object_file(const void *buf, unsigned long len, const char *type,
+ struct object_id *oid)
+{
+ char hdr[MAX_HEADER_LEN];
+ int hdrlen = sizeof(hdr);
+ write_object_file_prepare(buf, len, type, oid, hdr, &hdrlen);
+ return 0;
+}
+
+/* Finalize a file on disk, and close it. */
+static void close_sha1_file(int fd)
+{
+ if (fsync_object_files)
+ fsync_or_die(fd, "sha1 file");
+ if (close(fd) != 0)
+ die_errno("error when closing sha1 file");
+}
+
+/* Size of directory component, including the ending '/' */
+static inline int directory_size(const char *filename)
+{
+ const char *s = strrchr(filename, '/');
+ if (!s)
+ return 0;
+ return s - filename + 1;
+}
+
+/*
+ * This creates a temporary file in the same directory as the final
+ * 'filename'
+ *
+ * We want to avoid cross-directory filename renames, because those
+ * can have problems on various filesystems (FAT, NFS, Coda).
+ */
+static int create_tmpfile(struct strbuf *tmp, const char *filename)
+{
+ int fd, dirlen = directory_size(filename);
+
+ strbuf_reset(tmp);
+ strbuf_add(tmp, filename, dirlen);
+ strbuf_addstr(tmp, "tmp_obj_XXXXXX");
+ fd = git_mkstemp_mode(tmp->buf, 0444);
+ if (fd < 0 && dirlen && errno == ENOENT) {
+ /*
+ * Make sure the directory exists; note that the contents
+ * of the buffer are undefined after mkstemp returns an
+ * error, so we have to rewrite the whole buffer from
+ * scratch.
+ */
+ strbuf_reset(tmp);
+ strbuf_add(tmp, filename, dirlen - 1);
+ if (mkdir(tmp->buf, 0777) && errno != EEXIST)
+ return -1;
+ if (adjust_shared_perm(tmp->buf))
+ return -1;
+
+ /* Try again */
+ strbuf_addstr(tmp, "/tmp_obj_XXXXXX");
+ fd = git_mkstemp_mode(tmp->buf, 0444);
+ }
+ return fd;
+}
+
+static int write_loose_object(const struct object_id *oid, char *hdr,
+ int hdrlen, const void *buf, unsigned long len,
+ time_t mtime)
+{
+ int fd, ret;
+ unsigned char compressed[4096];
+ git_zstream stream;
+ git_hash_ctx c;
+ struct object_id parano_oid;
+ static struct strbuf tmp_file = STRBUF_INIT;
+ static struct strbuf filename = STRBUF_INIT;
+
+ strbuf_reset(&filename);
+ sha1_file_name(the_repository, &filename, oid->hash);
+
+ fd = create_tmpfile(&tmp_file, filename.buf);
+ if (fd < 0) {
+ if (errno == EACCES)
+ return error("insufficient permission for adding an object to repository database %s", get_object_directory());
+ else
+ return error_errno("unable to create temporary file");
+ }
+
+ /* Set it up */
+ git_deflate_init(&stream, zlib_compression_level);
+ stream.next_out = compressed;
+ stream.avail_out = sizeof(compressed);
+ the_hash_algo->init_fn(&c);
+
+ /* First header.. */
+ stream.next_in = (unsigned char *)hdr;
+ stream.avail_in = hdrlen;
+ while (git_deflate(&stream, 0) == Z_OK)
+ ; /* nothing */
+ the_hash_algo->update_fn(&c, hdr, hdrlen);
+
+ /* Then the data itself.. */
+ stream.next_in = (void *)buf;
+ stream.avail_in = len;
+ do {
+ unsigned char *in0 = stream.next_in;
+ ret = git_deflate(&stream, Z_FINISH);
+ the_hash_algo->update_fn(&c, in0, stream.next_in - in0);
+ if (write_buffer(fd, compressed, stream.next_out - compressed) < 0)
+ die("unable to write sha1 file");
+ stream.next_out = compressed;
+ stream.avail_out = sizeof(compressed);
+ } while (ret == Z_OK);
+
+ if (ret != Z_STREAM_END)
+ die("unable to deflate new object %s (%d)", oid_to_hex(oid),
+ ret);
+ ret = git_deflate_end_gently(&stream);
+ if (ret != Z_OK)
+ die("deflateEnd on object %s failed (%d)", oid_to_hex(oid),
+ ret);
+ the_hash_algo->final_fn(parano_oid.hash, &c);
+ if (oidcmp(oid, ¶no_oid) != 0)
+ die("confused by unstable object source data for %s",
+ oid_to_hex(oid));
+
+ close_sha1_file(fd);
+
+ if (mtime) {
+ struct utimbuf utb;
+ utb.actime = mtime;
+ utb.modtime = mtime;
+ if (utime(tmp_file.buf, &utb) < 0)
+ warning_errno("failed utime() on %s", tmp_file.buf);
+ }
+
+ return finalize_object_file(tmp_file.buf, filename.buf);
+}
+
+static int freshen_loose_object(const unsigned char *sha1)
+{
+ return check_and_freshen(sha1, 1);
+}
+
+static int freshen_packed_object(const unsigned char *sha1)
+{
+ struct pack_entry e;
+ if (!find_pack_entry(the_repository, sha1, &e))
+ return 0;
+ if (e.p->freshened)
+ return 1;
+ if (!freshen_file(e.p->pack_name))
+ return 0;
+ e.p->freshened = 1;
+ return 1;
+}
+
+int write_object_file(const void *buf, unsigned long len, const char *type,
+ struct object_id *oid)
+{
+ char hdr[MAX_HEADER_LEN];
+ int hdrlen = sizeof(hdr);
+
+ /* Normally if we have it in the pack then we do not bother writing
+ * it out into .git/objects/??/?{38} file.
+ */
+ write_object_file_prepare(buf, len, type, oid, hdr, &hdrlen);
+ if (freshen_packed_object(oid->hash) || freshen_loose_object(oid->hash))
+ return 0;
+ return write_loose_object(oid, hdr, hdrlen, buf, len, 0);
+}
+
+int hash_object_file_literally(const void *buf, unsigned long len,
+ const char *type, struct object_id *oid,
+ unsigned flags)
+{
+ char *header;
+ int hdrlen, status = 0;
+
+ /* type string, SP, %lu of the length plus NUL must fit this */
+ hdrlen = strlen(type) + MAX_HEADER_LEN;
+ header = xmalloc(hdrlen);
+ write_object_file_prepare(buf, len, type, oid, header, &hdrlen);
+
+ if (!(flags & HASH_WRITE_OBJECT))
+ goto cleanup;
+ if (freshen_packed_object(oid->hash) || freshen_loose_object(oid->hash))
+ goto cleanup;
+ status = write_loose_object(oid, header, hdrlen, buf, len, 0);
+
+cleanup:
+ free(header);
+ return status;
+}
+
+int force_object_loose(const struct object_id *oid, time_t mtime)
+{
+ void *buf;
+ unsigned long len;
+ enum object_type type;
+ char hdr[MAX_HEADER_LEN];
+ int hdrlen;
+ int ret;
+
+ if (has_loose_object(oid->hash))
+ return 0;
+ buf = read_object(oid->hash, &type, &len);
+ if (!buf)
+ return error("cannot read sha1_file for %s", oid_to_hex(oid));
+ hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", type_name(type), len) + 1;
+ ret = write_loose_object(oid, hdr, hdrlen, buf, len, mtime);
+ free(buf);
+
+ return ret;
+}
+
+int has_sha1_file_with_flags(const unsigned char *sha1, int flags)
+{
+ struct object_id oid;
+ if (!startup_info->have_repository)
+ return 0;
+ hashcpy(oid.hash, sha1);
+ return oid_object_info_extended(&oid, NULL,
+ flags | OBJECT_INFO_SKIP_CACHED) >= 0;
+}
+
+int has_object_file(const struct object_id *oid)
+{
+ return has_sha1_file(oid->hash);
+}
+
+int has_object_file_with_flags(const struct object_id *oid, int flags)
+{
+ return has_sha1_file_with_flags(oid->hash, flags);
+}
+
+static void check_tree(const void *buf, size_t size)
+{
+ struct tree_desc desc;
+ struct name_entry entry;
+
+ init_tree_desc(&desc, buf, size);
+ while (tree_entry(&desc, &entry))
+ /* do nothing
+ * tree_entry() will die() on malformed entries */
+ ;
+}
+
+static void check_commit(const void *buf, size_t size)
+{
+ struct commit c;
+ memset(&c, 0, sizeof(c));
+ if (parse_commit_buffer(&c, buf, size))
+ die("corrupt commit");
+}
+
+static void check_tag(const void *buf, size_t size)
+{
+ struct tag t;
+ memset(&t, 0, sizeof(t));
+ if (parse_tag_buffer(&t, buf, size))
+ die("corrupt tag");
+}
+
+static int index_mem(struct object_id *oid, void *buf, size_t size,
+ enum object_type type,
+ const char *path, unsigned flags)
+{
+ int ret, re_allocated = 0;
+ int write_object = flags & HASH_WRITE_OBJECT;
+
+ if (!type)
+ type = OBJ_BLOB;
+
+ /*
+ * Convert blobs to git internal format
+ */
+ if ((type == OBJ_BLOB) && path) {
+ struct strbuf nbuf = STRBUF_INIT;
+ if (convert_to_git(&the_index, path, buf, size, &nbuf,
+ get_conv_flags(flags))) {
+ buf = strbuf_detach(&nbuf, &size);
+ re_allocated = 1;
+ }
+ }
+ if (flags & HASH_FORMAT_CHECK) {
+ if (type == OBJ_TREE)
+ check_tree(buf, size);
+ if (type == OBJ_COMMIT)
+ check_commit(buf, size);
+ if (type == OBJ_TAG)
+ check_tag(buf, size);
+ }
+
+ if (write_object)
+ ret = write_object_file(buf, size, type_name(type), oid);
+ else
+ ret = hash_object_file(buf, size, type_name(type), oid);
+ if (re_allocated)
+ free(buf);
+ return ret;
+}
+
+static int index_stream_convert_blob(struct object_id *oid, int fd,
+ const char *path, unsigned flags)
+{
+ int ret;
+ const int write_object = flags & HASH_WRITE_OBJECT;
+ struct strbuf sbuf = STRBUF_INIT;
+
+ assert(path);
+ assert(would_convert_to_git_filter_fd(path));
+
+ convert_to_git_filter_fd(&the_index, path, fd, &sbuf,
+ get_conv_flags(flags));
+
+ if (write_object)
+ ret = write_object_file(sbuf.buf, sbuf.len, type_name(OBJ_BLOB),
+ oid);
+ else
+ ret = hash_object_file(sbuf.buf, sbuf.len, type_name(OBJ_BLOB),
+ oid);
+ strbuf_release(&sbuf);
+ return ret;
+}
+
+static int index_pipe(struct object_id *oid, int fd, enum object_type type,
+ const char *path, unsigned flags)
+{
+ struct strbuf sbuf = STRBUF_INIT;
+ int ret;
+
+ if (strbuf_read(&sbuf, fd, 4096) >= 0)
+ ret = index_mem(oid, sbuf.buf, sbuf.len, type, path, flags);
+ else
+ ret = -1;
+ strbuf_release(&sbuf);
+ return ret;
+}
+
+#define SMALL_FILE_SIZE (32*1024)
+
+static int index_core(struct object_id *oid, int fd, size_t size,
+ enum object_type type, const char *path,
+ unsigned flags)
+{
+ int ret;
+
+ if (!size) {
+ ret = index_mem(oid, "", size, type, path, flags);
+ } else if (size <= SMALL_FILE_SIZE) {
+ char *buf = xmalloc(size);
+ ssize_t read_result = read_in_full(fd, buf, size);
+ if (read_result < 0)
+ ret = error_errno("read error while indexing %s",
+ path ? path : "<unknown>");
+ else if (read_result != size)
+ ret = error("short read while indexing %s",
+ path ? path : "<unknown>");
+ else
+ ret = index_mem(oid, buf, size, type, path, flags);
+ free(buf);
+ } else {
+ void *buf = xmmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0);
+ ret = index_mem(oid, buf, size, type, path, flags);
+ munmap(buf, size);
+ }
+ return ret;
+}
+
+/*
+ * This creates one packfile per large blob unless bulk-checkin
+ * machinery is "plugged".
+ *
+ * This also bypasses the usual "convert-to-git" dance, and that is on
+ * purpose. We could write a streaming version of the converting
+ * functions and insert that before feeding the data to fast-import
+ * (or equivalent in-core API described above). However, that is
+ * somewhat complicated, as we do not know the size of the filter
+ * result, which we need to know beforehand when writing a git object.
+ * Since the primary motivation for trying to stream from the working
+ * tree file and to avoid mmaping it in core is to deal with large
+ * binary blobs, they generally do not want to get any conversion, and
+ * callers should avoid this code path when filters are requested.
+ */
+static int index_stream(struct object_id *oid, int fd, size_t size,
+ enum object_type type, const char *path,
+ unsigned flags)
+{
+ return index_bulk_checkin(oid, fd, size, type, path, flags);
+}
+
+int index_fd(struct object_id *oid, int fd, struct stat *st,
+ enum object_type type, const char *path, unsigned flags)
+{
+ int ret;
+
+ /*
+ * Call xsize_t() only when needed to avoid potentially unnecessary
+ * die() for large files.
+ */
+ if (type == OBJ_BLOB && path && would_convert_to_git_filter_fd(path))
+ ret = index_stream_convert_blob(oid, fd, path, flags);
+ else if (!S_ISREG(st->st_mode))
+ ret = index_pipe(oid, fd, type, path, flags);
+ else if (st->st_size <= big_file_threshold || type != OBJ_BLOB ||
+ (path && would_convert_to_git(&the_index, path)))
+ ret = index_core(oid, fd, xsize_t(st->st_size), type, path,
+ flags);
+ else
+ ret = index_stream(oid, fd, xsize_t(st->st_size), type, path,
+ flags);
+ close(fd);
+ return ret;
+}
+
+int index_path(struct object_id *oid, const char *path, struct stat *st, unsigned flags)
+{
+ int fd;
+ struct strbuf sb = STRBUF_INIT;
+ int rc = 0;
+
+ switch (st->st_mode & S_IFMT) {
+ case S_IFREG:
+ fd = open(path, O_RDONLY);
+ if (fd < 0)
+ return error_errno("open(\"%s\")", path);
+ if (index_fd(oid, fd, st, OBJ_BLOB, path, flags) < 0)
+ return error("%s: failed to insert into database",
+ path);
+ break;
+ case S_IFLNK:
+ if (strbuf_readlink(&sb, path, st->st_size))
+ return error_errno("readlink(\"%s\")", path);
+ if (!(flags & HASH_WRITE_OBJECT))
+ hash_object_file(sb.buf, sb.len, blob_type, oid);
+ else if (write_object_file(sb.buf, sb.len, blob_type, oid))
+ rc = error("%s: failed to insert into database", path);
+ strbuf_release(&sb);
+ break;
+ case S_IFDIR:
+ return resolve_gitlink_ref(path, "HEAD", oid);
+ default:
+ return error("%s: unsupported file type", path);
+ }
+ return rc;
+}
+
+int read_pack_header(int fd, struct pack_header *header)
+{
+ if (read_in_full(fd, header, sizeof(*header)) != sizeof(*header))
+ /* "eof before pack header was fully read" */
+ return PH_ERROR_EOF;
+
+ if (header->hdr_signature != htonl(PACK_SIGNATURE))
+ /* "protocol error (pack signature mismatch detected)" */
+ return PH_ERROR_PACK_SIGNATURE;
+ if (!pack_version_ok(header->hdr_version))
+ /* "protocol error (pack version unsupported)" */
+ return PH_ERROR_PROTOCOL;
+ return 0;
+}
+
+void assert_oid_type(const struct object_id *oid, enum object_type expect)
+{
+ enum object_type type = oid_object_info(oid, NULL);
+ if (type < 0)
+ die("%s is not a valid object", oid_to_hex(oid));
+ if (type != expect)
+ die("%s is not a valid '%s' object", oid_to_hex(oid),
+ type_name(expect));
+}
+
+int for_each_file_in_obj_subdir(unsigned int subdir_nr,
+ struct strbuf *path,
+ each_loose_object_fn obj_cb,
+ each_loose_cruft_fn cruft_cb,
+ each_loose_subdir_fn subdir_cb,
+ void *data)
+{
+ size_t origlen, baselen;
+ DIR *dir;
+ struct dirent *de;
+ int r = 0;
+ struct object_id oid;
+
+ if (subdir_nr > 0xff)
+ BUG("invalid loose object subdirectory: %x", subdir_nr);
+
+ origlen = path->len;
+ strbuf_complete(path, '/');
+ strbuf_addf(path, "%02x", subdir_nr);
+
+ dir = opendir(path->buf);
+ if (!dir) {
+ if (errno != ENOENT)
+ r = error_errno("unable to open %s", path->buf);
+ strbuf_setlen(path, origlen);
+ return r;
+ }
+
+ oid.hash[0] = subdir_nr;
+ strbuf_addch(path, '/');
+ baselen = path->len;
+
+ while ((de = readdir(dir))) {
+ size_t namelen;
+ if (is_dot_or_dotdot(de->d_name))
+ continue;
+
+ namelen = strlen(de->d_name);
+ strbuf_setlen(path, baselen);
+ strbuf_add(path, de->d_name, namelen);
+ if (namelen == GIT_SHA1_HEXSZ - 2 &&
+ !hex_to_bytes(oid.hash + 1, de->d_name,
+ GIT_SHA1_RAWSZ - 1)) {
+ if (obj_cb) {
+ r = obj_cb(&oid, path->buf, data);
+ if (r)
+ break;
+ }
+ continue;
+ }
+
+ if (cruft_cb) {
+ r = cruft_cb(de->d_name, path->buf, data);
+ if (r)
+ break;
+ }
+ }
+ closedir(dir);
+
+ strbuf_setlen(path, baselen - 1);
+ if (!r && subdir_cb)
+ r = subdir_cb(subdir_nr, path->buf, data);
+
+ strbuf_setlen(path, origlen);
+
+ return r;
+}
+
+int for_each_loose_file_in_objdir_buf(struct strbuf *path,
+ each_loose_object_fn obj_cb,
+ each_loose_cruft_fn cruft_cb,
+ each_loose_subdir_fn subdir_cb,
+ void *data)
+{
+ int r = 0;
+ int i;
+
+ for (i = 0; i < 256; i++) {
+ r = for_each_file_in_obj_subdir(i, path, obj_cb, cruft_cb,
+ subdir_cb, data);
+ if (r)
+ break;
+ }
+
+ return r;
+}
+
+int for_each_loose_file_in_objdir(const char *path,
+ each_loose_object_fn obj_cb,
+ each_loose_cruft_fn cruft_cb,
+ each_loose_subdir_fn subdir_cb,
+ void *data)
+{
+ struct strbuf buf = STRBUF_INIT;
+ int r;
+
+ strbuf_addstr(&buf, path);
+ r = for_each_loose_file_in_objdir_buf(&buf, obj_cb, cruft_cb,
+ subdir_cb, data);
+ strbuf_release(&buf);
+
+ return r;
+}
+
+struct loose_alt_odb_data {
+ each_loose_object_fn *cb;
+ void *data;
+};
+
+static int loose_from_alt_odb(struct alternate_object_database *alt,
+ void *vdata)
+{
+ struct loose_alt_odb_data *data = vdata;
+ struct strbuf buf = STRBUF_INIT;
+ int r;
+
+ strbuf_addstr(&buf, alt->path);
+ r = for_each_loose_file_in_objdir_buf(&buf,
+ data->cb, NULL, NULL,
+ data->data);
+ strbuf_release(&buf);
+ return r;
+}
+
+int for_each_loose_object(each_loose_object_fn cb, void *data, unsigned flags)
+{
+ struct loose_alt_odb_data alt;
+ int r;
+
+ r = for_each_loose_file_in_objdir(get_object_directory(),
+ cb, NULL, NULL, data);
+ if (r)
+ return r;
+
+ if (flags & FOR_EACH_OBJECT_LOCAL_ONLY)
+ return 0;
+
+ alt.cb = cb;
+ alt.data = data;
+ return foreach_alt_odb(loose_from_alt_odb, &alt);
+}
+
+static int check_stream_sha1(git_zstream *stream,
+ const char *hdr,
+ unsigned long size,
+ const char *path,
+ const unsigned char *expected_sha1)
+{
+ git_hash_ctx c;
+ unsigned char real_sha1[GIT_MAX_RAWSZ];
+ unsigned char buf[4096];
+ unsigned long total_read;
+ int status = Z_OK;
+
+ the_hash_algo->init_fn(&c);
+ the_hash_algo->update_fn(&c, hdr, stream->total_out);
+
+ /*
+ * We already read some bytes into hdr, but the ones up to the NUL
+ * do not count against the object's content size.
+ */
+ total_read = stream->total_out - strlen(hdr) - 1;
+
+ /*
+ * This size comparison must be "<=" to read the final zlib packets;
+ * see the comment in unpack_sha1_rest for details.
+ */
+ while (total_read <= size &&
+ (status == Z_OK || status == Z_BUF_ERROR)) {
+ stream->next_out = buf;
+ stream->avail_out = sizeof(buf);
+ if (size - total_read < stream->avail_out)
+ stream->avail_out = size - total_read;
+ status = git_inflate(stream, Z_FINISH);
+ the_hash_algo->update_fn(&c, buf, stream->next_out - buf);
+ total_read += stream->next_out - buf;
+ }
+ git_inflate_end(stream);
+
+ if (status != Z_STREAM_END) {
+ error("corrupt loose object '%s'", sha1_to_hex(expected_sha1));
+ return -1;
+ }
+ if (stream->avail_in) {
+ error("garbage at end of loose object '%s'",
+ sha1_to_hex(expected_sha1));
+ return -1;
+ }
+
+ the_hash_algo->final_fn(real_sha1, &c);
+ if (hashcmp(expected_sha1, real_sha1)) {
+ error("sha1 mismatch for %s (expected %s)", path,
+ sha1_to_hex(expected_sha1));
+ return -1;
+ }
+
+ return 0;
+}
+
+int read_loose_object(const char *path,
+ const struct object_id *expected_oid,
+ enum object_type *type,
+ unsigned long *size,
+ void **contents)
+{
+ int ret = -1;
+ void *map = NULL;
+ unsigned long mapsize;
+ git_zstream stream;
+ char hdr[MAX_HEADER_LEN];
+
+ *contents = NULL;
+
+ map = map_sha1_file_1(the_repository, path, NULL, &mapsize);
+ if (!map) {
+ error_errno("unable to mmap %s", path);
+ goto out;
+ }
+
+ if (unpack_sha1_header(&stream, map, mapsize, hdr, sizeof(hdr)) < 0) {
+ error("unable to unpack header of %s", path);
+ goto out;
+ }
+
+ *type = parse_sha1_header(hdr, size);
+ if (*type < 0) {
+ error("unable to parse header of %s", path);
+ git_inflate_end(&stream);
+ goto out;
+ }
+
+ if (*type == OBJ_BLOB) {
+ if (check_stream_sha1(&stream, hdr, *size, path, expected_oid->hash) < 0)
+ goto out;
+ } else {
+ *contents = unpack_sha1_rest(&stream, hdr, *size, expected_oid->hash);
+ if (!*contents) {
+ error("unable to unpack contents of %s", path);
+ git_inflate_end(&stream);
+ goto out;
+ }
+ if (check_object_signature(expected_oid, *contents,
+ *size, type_name(*type))) {
+ error("sha1 mismatch for %s (expected %s)", path,
+ oid_to_hex(expected_oid));
+ free(*contents);
+ goto out;
+ }
+ }
+
+ ret = 0; /* everything checks out */
+
+out:
+ if (map)
+ munmap(map, mapsize);
+ return ret;
+}
--- /dev/null
+#include "cache.h"
+#include "config.h"
+#include "tag.h"
+#include "commit.h"
+#include "tree.h"
+#include "blob.h"
+#include "tree-walk.h"
+#include "refs.h"
+#include "remote.h"
+#include "dir.h"
+#include "sha1-array.h"
+#include "packfile.h"
+#include "object-store.h"
+#include "repository.h"
+
+static int get_oid_oneline(const char *, struct object_id *, struct commit_list *);
+
+typedef int (*disambiguate_hint_fn)(const struct object_id *, void *);
+
+struct disambiguate_state {
+ int len; /* length of prefix in hex chars */
+ char hex_pfx[GIT_MAX_HEXSZ + 1];
+ struct object_id bin_pfx;
+
+ disambiguate_hint_fn fn;
+ void *cb_data;
+ struct object_id candidate;
+ unsigned candidate_exists:1;
+ unsigned candidate_checked:1;
+ unsigned candidate_ok:1;
+ unsigned disambiguate_fn_used:1;
+ unsigned ambiguous:1;
+ unsigned always_call_fn:1;
+};
+
+static void update_candidates(struct disambiguate_state *ds, const struct object_id *current)
+{
+ if (ds->always_call_fn) {
+ ds->ambiguous = ds->fn(current, ds->cb_data) ? 1 : 0;
+ return;
+ }
+ if (!ds->candidate_exists) {
+ /* this is the first candidate */
+ oidcpy(&ds->candidate, current);
+ ds->candidate_exists = 1;
+ return;
+ } else if (!oidcmp(&ds->candidate, current)) {
+ /* the same as what we already have seen */
+ return;
+ }
+
+ if (!ds->fn) {
+ /* cannot disambiguate between ds->candidate and current */
+ ds->ambiguous = 1;
+ return;
+ }
+
+ if (!ds->candidate_checked) {
+ ds->candidate_ok = ds->fn(&ds->candidate, ds->cb_data);
+ ds->disambiguate_fn_used = 1;
+ ds->candidate_checked = 1;
+ }
+
+ if (!ds->candidate_ok) {
+ /* discard the candidate; we know it does not satisfy fn */
+ oidcpy(&ds->candidate, current);
+ ds->candidate_checked = 0;
+ return;
+ }
+
+ /* if we reach this point, we know ds->candidate satisfies fn */
+ if (ds->fn(current, ds->cb_data)) {
+ /*
+ * if both current and candidate satisfy fn, we cannot
+ * disambiguate.
+ */
+ ds->candidate_ok = 0;
+ ds->ambiguous = 1;
+ }
+
+ /* otherwise, current can be discarded and candidate is still good */
+}
+
+static int append_loose_object(const struct object_id *oid, const char *path,
+ void *data)
+{
+ oid_array_append(data, oid);
+ return 0;
+}
+
+static int match_sha(unsigned, const unsigned char *, const unsigned char *);
+
+static void find_short_object_filename(struct disambiguate_state *ds)
+{
+ int subdir_nr = ds->bin_pfx.hash[0];
+ struct alternate_object_database *alt;
+ static struct alternate_object_database *fakeent;
+
+ if (!fakeent) {
+ /*
+ * Create a "fake" alternate object database that
+ * points to our own object database, to make it
+ * easier to get a temporary working space in
+ * alt->name/alt->base while iterating over the
+ * object databases including our own.
+ */
+ fakeent = alloc_alt_odb(get_object_directory());
+ }
+ fakeent->next = the_repository->objects->alt_odb_list;
+
+ for (alt = fakeent; alt && !ds->ambiguous; alt = alt->next) {
+ int pos;
+
+ if (!alt->loose_objects_subdir_seen[subdir_nr]) {
+ struct strbuf *buf = alt_scratch_buf(alt);
+ for_each_file_in_obj_subdir(subdir_nr, buf,
+ append_loose_object,
+ NULL, NULL,
+ &alt->loose_objects_cache);
+ alt->loose_objects_subdir_seen[subdir_nr] = 1;
+ }
+
+ pos = oid_array_lookup(&alt->loose_objects_cache, &ds->bin_pfx);
+ if (pos < 0)
+ pos = -1 - pos;
+ while (!ds->ambiguous && pos < alt->loose_objects_cache.nr) {
+ const struct object_id *oid;
+ oid = alt->loose_objects_cache.oid + pos;
+ if (!match_sha(ds->len, ds->bin_pfx.hash, oid->hash))
+ break;
+ update_candidates(ds, oid);
+ pos++;
+ }
+ }
+}
+
+static int match_sha(unsigned len, const unsigned char *a, const unsigned char *b)
+{
+ do {
+ if (*a != *b)
+ return 0;
+ a++;
+ b++;
+ len -= 2;
+ } while (len > 1);
+ if (len)
+ if ((*a ^ *b) & 0xf0)
+ return 0;
+ return 1;
+}
+
+static void unique_in_pack(struct packed_git *p,
+ struct disambiguate_state *ds)
+{
+ uint32_t num, i, first = 0;
+ const struct object_id *current = NULL;
+
+ if (open_pack_index(p) || !p->num_objects)
+ return;
+
+ num = p->num_objects;
+ bsearch_pack(&ds->bin_pfx, p, &first);
+
+ /*
+ * At this point, "first" is the location of the lowest object
+ * with an object name that could match "bin_pfx". See if we have
+ * 0, 1 or more objects that actually match(es).
+ */
+ for (i = first; i < num && !ds->ambiguous; i++) {
+ struct object_id oid;
+ current = nth_packed_object_oid(&oid, p, i);
+ if (!match_sha(ds->len, ds->bin_pfx.hash, current->hash))
+ break;
+ update_candidates(ds, current);
+ }
+}
+
+static void find_short_packed_object(struct disambiguate_state *ds)
+{
+ struct packed_git *p;
+
+ for (p = get_packed_git(the_repository); p && !ds->ambiguous;
+ p = p->next)
+ unique_in_pack(p, ds);
+}
+
+#define SHORT_NAME_NOT_FOUND (-1)
+#define SHORT_NAME_AMBIGUOUS (-2)
+
+static int finish_object_disambiguation(struct disambiguate_state *ds,
+ struct object_id *oid)
+{
+ if (ds->ambiguous)
+ return SHORT_NAME_AMBIGUOUS;
+
+ if (!ds->candidate_exists)
+ return SHORT_NAME_NOT_FOUND;
+
+ if (!ds->candidate_checked)
+ /*
+ * If this is the only candidate, there is no point
+ * calling the disambiguation hint callback.
+ *
+ * On the other hand, if the current candidate
+ * replaced an earlier candidate that did _not_ pass
+ * the disambiguation hint callback, then we do have
+ * more than one objects that match the short name
+ * given, so we should make sure this one matches;
+ * otherwise, if we discovered this one and the one
+ * that we previously discarded in the reverse order,
+ * we would end up showing different results in the
+ * same repository!
+ */
+ ds->candidate_ok = (!ds->disambiguate_fn_used ||
+ ds->fn(&ds->candidate, ds->cb_data));
+
+ if (!ds->candidate_ok)
+ return SHORT_NAME_AMBIGUOUS;
+
+ oidcpy(oid, &ds->candidate);
+ return 0;
+}
+
+static int disambiguate_commit_only(const struct object_id *oid, void *cb_data_unused)
+{
+ int kind = oid_object_info(oid, NULL);
+ return kind == OBJ_COMMIT;
+}
+
+static int disambiguate_committish_only(const struct object_id *oid, void *cb_data_unused)
+{
+ struct object *obj;
+ int kind;
+
+ kind = oid_object_info(oid, NULL);
+ if (kind == OBJ_COMMIT)
+ return 1;
+ if (kind != OBJ_TAG)
+ return 0;
+
+ /* We need to do this the hard way... */
+ obj = deref_tag(parse_object(oid), NULL, 0);
+ if (obj && obj->type == OBJ_COMMIT)
+ return 1;
+ return 0;
+}
+
+static int disambiguate_tree_only(const struct object_id *oid, void *cb_data_unused)
+{
+ int kind = oid_object_info(oid, NULL);
+ return kind == OBJ_TREE;
+}
+
+static int disambiguate_treeish_only(const struct object_id *oid, void *cb_data_unused)
+{
+ struct object *obj;
+ int kind;
+
+ kind = oid_object_info(oid, NULL);
+ if (kind == OBJ_TREE || kind == OBJ_COMMIT)
+ return 1;
+ if (kind != OBJ_TAG)
+ return 0;
+
+ /* We need to do this the hard way... */
+ obj = deref_tag(parse_object(oid), NULL, 0);
+ if (obj && (obj->type == OBJ_TREE || obj->type == OBJ_COMMIT))
+ return 1;
+ return 0;
+}
+
+static int disambiguate_blob_only(const struct object_id *oid, void *cb_data_unused)
+{
+ int kind = oid_object_info(oid, NULL);
+ return kind == OBJ_BLOB;
+}
+
+static disambiguate_hint_fn default_disambiguate_hint;
+
+int set_disambiguate_hint_config(const char *var, const char *value)
+{
+ static const struct {
+ const char *name;
+ disambiguate_hint_fn fn;
+ } hints[] = {
+ { "none", NULL },
+ { "commit", disambiguate_commit_only },
+ { "committish", disambiguate_committish_only },
+ { "tree", disambiguate_tree_only },
+ { "treeish", disambiguate_treeish_only },
+ { "blob", disambiguate_blob_only }
+ };
+ int i;
+
+ if (!value)
+ return config_error_nonbool(var);
+
+ for (i = 0; i < ARRAY_SIZE(hints); i++) {
+ if (!strcasecmp(value, hints[i].name)) {
+ default_disambiguate_hint = hints[i].fn;
+ return 0;
+ }
+ }
+
+ return error("unknown hint type for '%s': %s", var, value);
+}
+
+static int init_object_disambiguation(const char *name, int len,
+ struct disambiguate_state *ds)
+{
+ int i;
+
+ if (len < MINIMUM_ABBREV || len > GIT_SHA1_HEXSZ)
+ return -1;
+
+ memset(ds, 0, sizeof(*ds));
+
+ for (i = 0; i < len ;i++) {
+ unsigned char c = name[i];
+ unsigned char val;
+ if (c >= '0' && c <= '9')
+ val = c - '0';
+ else if (c >= 'a' && c <= 'f')
+ val = c - 'a' + 10;
+ else if (c >= 'A' && c <='F') {
+ val = c - 'A' + 10;
+ c -= 'A' - 'a';
+ }
+ else
+ return -1;
+ ds->hex_pfx[i] = c;
+ if (!(i & 1))
+ val <<= 4;
+ ds->bin_pfx.hash[i >> 1] |= val;
+ }
+
+ ds->len = len;
+ ds->hex_pfx[len] = '\0';
+ prepare_alt_odb(the_repository);
+ return 0;
+}
+
+static int show_ambiguous_object(const struct object_id *oid, void *data)
+{
+ const struct disambiguate_state *ds = data;
+ struct strbuf desc = STRBUF_INIT;
+ int type;
+
+
+ if (ds->fn && !ds->fn(oid, ds->cb_data))
+ return 0;
+
+ type = oid_object_info(oid, NULL);
+ if (type == OBJ_COMMIT) {
+ struct commit *commit = lookup_commit(oid);
+ if (commit) {
+ struct pretty_print_context pp = {0};
+ pp.date_mode.type = DATE_SHORT;
+ format_commit_message(commit, " %ad - %s", &desc, &pp);
+ }
+ } else if (type == OBJ_TAG) {
+ struct tag *tag = lookup_tag(oid);
+ if (!parse_tag(tag) && tag->tag)
+ strbuf_addf(&desc, " %s", tag->tag);
+ }
+
+ advise(" %s %s%s",
+ find_unique_abbrev(oid, DEFAULT_ABBREV),
+ type_name(type) ? type_name(type) : "unknown type",
+ desc.buf);
+
+ strbuf_release(&desc);
+ return 0;
+}
+
+static int get_short_oid(const char *name, int len, struct object_id *oid,
+ unsigned flags)
+{
+ int status;
+ struct disambiguate_state ds;
+ int quietly = !!(flags & GET_OID_QUIETLY);
+
+ if (init_object_disambiguation(name, len, &ds) < 0)
+ return -1;
+
+ if (HAS_MULTI_BITS(flags & GET_OID_DISAMBIGUATORS))
+ die("BUG: multiple get_short_oid disambiguator flags");
+
+ if (flags & GET_OID_COMMIT)
+ ds.fn = disambiguate_commit_only;
+ else if (flags & GET_OID_COMMITTISH)
+ ds.fn = disambiguate_committish_only;
+ else if (flags & GET_OID_TREE)
+ ds.fn = disambiguate_tree_only;
+ else if (flags & GET_OID_TREEISH)
+ ds.fn = disambiguate_treeish_only;
+ else if (flags & GET_OID_BLOB)
+ ds.fn = disambiguate_blob_only;
+ else
+ ds.fn = default_disambiguate_hint;
+
+ find_short_object_filename(&ds);
+ find_short_packed_object(&ds);
+ status = finish_object_disambiguation(&ds, oid);
+
+ if (!quietly && (status == SHORT_NAME_AMBIGUOUS)) {
+ error(_("short SHA1 %s is ambiguous"), ds.hex_pfx);
+
+ /*
+ * We may still have ambiguity if we simply saw a series of
+ * candidates that did not satisfy our hint function. In
+ * that case, we still want to show them, so disable the hint
+ * function entirely.
+ */
+ if (!ds.ambiguous)
+ ds.fn = NULL;
+
+ advise(_("The candidates are:"));
+ for_each_abbrev(ds.hex_pfx, show_ambiguous_object, &ds);
+ }
+
+ return status;
+}
+
+static int collect_ambiguous(const struct object_id *oid, void *data)
+{
+ oid_array_append(data, oid);
+ return 0;
+}
+
+int for_each_abbrev(const char *prefix, each_abbrev_fn fn, void *cb_data)
+{
+ struct oid_array collect = OID_ARRAY_INIT;
+ struct disambiguate_state ds;
+ int ret;
+
+ if (init_object_disambiguation(prefix, strlen(prefix), &ds) < 0)
+ return -1;
+
+ ds.always_call_fn = 1;
+ ds.fn = collect_ambiguous;
+ ds.cb_data = &collect;
+ find_short_object_filename(&ds);
+ find_short_packed_object(&ds);
+
+ ret = oid_array_for_each_unique(&collect, fn, cb_data);
+ oid_array_clear(&collect);
+ return ret;
+}
+
+/*
+ * Return the slot of the most-significant bit set in "val". There are various
+ * ways to do this quickly with fls() or __builtin_clzl(), but speed is
+ * probably not a big deal here.
+ */
+static unsigned msb(unsigned long val)
+{
+ unsigned r = 0;
+ while (val >>= 1)
+ r++;
+ return r;
+}
+
+struct min_abbrev_data {
+ unsigned int init_len;
+ unsigned int cur_len;
+ char *hex;
+ const struct object_id *oid;
+};
+
+static inline char get_hex_char_from_oid(const struct object_id *oid,
+ unsigned int pos)
+{
+ static const char hex[] = "0123456789abcdef";
+
+ if ((pos & 1) == 0)
+ return hex[oid->hash[pos >> 1] >> 4];
+ else
+ return hex[oid->hash[pos >> 1] & 0xf];
+}
+
+static int extend_abbrev_len(const struct object_id *oid, void *cb_data)
+{
+ struct min_abbrev_data *mad = cb_data;
+
+ unsigned int i = mad->init_len;
+ while (mad->hex[i] && mad->hex[i] == get_hex_char_from_oid(oid, i))
+ i++;
+
+ if (i < GIT_MAX_RAWSZ && i >= mad->cur_len)
+ mad->cur_len = i + 1;
+
+ return 0;
+}
+
+static void find_abbrev_len_for_pack(struct packed_git *p,
+ struct min_abbrev_data *mad)
+{
+ int match = 0;
+ uint32_t num, first = 0;
+ struct object_id oid;
+ const struct object_id *mad_oid;
+
+ if (open_pack_index(p) || !p->num_objects)
+ return;
+
+ num = p->num_objects;
+ mad_oid = mad->oid;
+ match = bsearch_pack(mad_oid, p, &first);
+
+ /*
+ * first is now the position in the packfile where we would insert
+ * mad->hash if it does not exist (or the position of mad->hash if
+ * it does exist). Hence, we consider a maximum of two objects
+ * nearby for the abbreviation length.
+ */
+ mad->init_len = 0;
+ if (!match) {
+ if (nth_packed_object_oid(&oid, p, first))
+ extend_abbrev_len(&oid, mad);
+ } else if (first < num - 1) {
+ if (nth_packed_object_oid(&oid, p, first + 1))
+ extend_abbrev_len(&oid, mad);
+ }
+ if (first > 0) {
+ if (nth_packed_object_oid(&oid, p, first - 1))
+ extend_abbrev_len(&oid, mad);
+ }
+ mad->init_len = mad->cur_len;
+}
+
+static void find_abbrev_len_packed(struct min_abbrev_data *mad)
+{
+ struct packed_git *p;
+
+ for (p = get_packed_git(the_repository); p; p = p->next)
+ find_abbrev_len_for_pack(p, mad);
+}
+
+int find_unique_abbrev_r(char *hex, const struct object_id *oid, int len)
+{
+ struct disambiguate_state ds;
+ struct min_abbrev_data mad;
+ struct object_id oid_ret;
+ if (len < 0) {
+ unsigned long count = approximate_object_count();
+ /*
+ * Add one because the MSB only tells us the highest bit set,
+ * not including the value of all the _other_ bits (so "15"
+ * is only one off of 2^4, but the MSB is the 3rd bit.
+ */
+ len = msb(count) + 1;
+ /*
+ * We now know we have on the order of 2^len objects, which
+ * expects a collision at 2^(len/2). But we also care about hex
+ * chars, not bits, and there are 4 bits per hex. So all
+ * together we need to divide by 2 and round up.
+ */
+ len = DIV_ROUND_UP(len, 2);
+ /*
+ * For very small repos, we stick with our regular fallback.
+ */
+ if (len < FALLBACK_DEFAULT_ABBREV)
+ len = FALLBACK_DEFAULT_ABBREV;
+ }
+
+ oid_to_hex_r(hex, oid);
+ if (len == GIT_SHA1_HEXSZ || !len)
+ return GIT_SHA1_HEXSZ;
+
+ mad.init_len = len;
+ mad.cur_len = len;
+ mad.hex = hex;
+ mad.oid = oid;
+
+ find_abbrev_len_packed(&mad);
+
+ if (init_object_disambiguation(hex, mad.cur_len, &ds) < 0)
+ return -1;
+
+ ds.fn = extend_abbrev_len;
+ ds.always_call_fn = 1;
+ ds.cb_data = (void *)&mad;
+
+ find_short_object_filename(&ds);
+ (void)finish_object_disambiguation(&ds, &oid_ret);
+
+ hex[mad.cur_len] = 0;
+ return mad.cur_len;
+}
+
+const char *find_unique_abbrev(const struct object_id *oid, int len)
+{
+ static int bufno;
+ static char hexbuffer[4][GIT_MAX_HEXSZ + 1];
+ char *hex = hexbuffer[bufno];
+ bufno = (bufno + 1) % ARRAY_SIZE(hexbuffer);
+ find_unique_abbrev_r(hex, oid, len);
+ return hex;
+}
+
+static int ambiguous_path(const char *path, int len)
+{
+ int slash = 1;
+ int cnt;
+
+ for (cnt = 0; cnt < len; cnt++) {
+ switch (*path++) {
+ case '\0':
+ break;
+ case '/':
+ if (slash)
+ break;
+ slash = 1;
+ continue;
+ case '.':
+ continue;
+ default:
+ slash = 0;
+ continue;
+ }
+ break;
+ }
+ return slash;
+}
+
+static inline int at_mark(const char *string, int len,
+ const char **suffix, int nr)
+{
+ int i;
+
+ for (i = 0; i < nr; i++) {
+ int suffix_len = strlen(suffix[i]);
+ if (suffix_len <= len
+ && !strncasecmp(string, suffix[i], suffix_len))
+ return suffix_len;
+ }
+ return 0;
+}
+
+static inline int upstream_mark(const char *string, int len)
+{
+ const char *suffix[] = { "@{upstream}", "@{u}" };
+ return at_mark(string, len, suffix, ARRAY_SIZE(suffix));
+}
+
+static inline int push_mark(const char *string, int len)
+{
+ const char *suffix[] = { "@{push}" };
+ return at_mark(string, len, suffix, ARRAY_SIZE(suffix));
+}
+
+static int get_oid_1(const char *name, int len, struct object_id *oid, unsigned lookup_flags);
+static int interpret_nth_prior_checkout(const char *name, int namelen, struct strbuf *buf);
+
+static int get_oid_basic(const char *str, int len, struct object_id *oid,
+ unsigned int flags)
+{
+ static const char *warn_msg = "refname '%.*s' is ambiguous.";
+ static const char *object_name_msg = N_(
+ "Git normally never creates a ref that ends with 40 hex characters\n"
+ "because it will be ignored when you just specify 40-hex. These refs\n"
+ "may be created by mistake. For example,\n"
+ "\n"
+ " git checkout -b $br $(git rev-parse ...)\n"
+ "\n"
+ "where \"$br\" is somehow empty and a 40-hex ref is created. Please\n"
+ "examine these refs and maybe delete them. Turn this message off by\n"
+ "running \"git config advice.objectNameWarning false\"");
+ struct object_id tmp_oid;
+ char *real_ref = NULL;
+ int refs_found = 0;
+ int at, reflog_len, nth_prior = 0;
+
+ if (len == GIT_SHA1_HEXSZ && !get_oid_hex(str, oid)) {
+ if (warn_ambiguous_refs && warn_on_object_refname_ambiguity) {
+ refs_found = dwim_ref(str, len, &tmp_oid, &real_ref);
+ if (refs_found > 0) {
+ warning(warn_msg, len, str);
+ if (advice_object_name_warning)
+ fprintf(stderr, "%s\n", _(object_name_msg));
+ }
+ free(real_ref);
+ }
+ return 0;
+ }
+
+ /* basic@{time or number or -number} format to query ref-log */
+ reflog_len = at = 0;
+ if (len && str[len-1] == '}') {
+ for (at = len-4; at >= 0; at--) {
+ if (str[at] == '@' && str[at+1] == '{') {
+ if (str[at+2] == '-') {
+ if (at != 0)
+ /* @{-N} not at start */
+ return -1;
+ nth_prior = 1;
+ continue;
+ }
+ if (!upstream_mark(str + at, len - at) &&
+ !push_mark(str + at, len - at)) {
+ reflog_len = (len-1) - (at+2);
+ len = at;
+ }
+ break;
+ }
+ }
+ }
+
+ /* Accept only unambiguous ref paths. */
+ if (len && ambiguous_path(str, len))
+ return -1;
+
+ if (nth_prior) {
+ struct strbuf buf = STRBUF_INIT;
+ int detached;
+
+ if (interpret_nth_prior_checkout(str, len, &buf) > 0) {
+ detached = (buf.len == GIT_SHA1_HEXSZ && !get_oid_hex(buf.buf, oid));
+ strbuf_release(&buf);
+ if (detached)
+ return 0;
+ }
+ }
+
+ if (!len && reflog_len)
+ /* allow "@{...}" to mean the current branch reflog */
+ refs_found = dwim_ref("HEAD", 4, oid, &real_ref);
+ else if (reflog_len)
+ refs_found = dwim_log(str, len, oid, &real_ref);
+ else
+ refs_found = dwim_ref(str, len, oid, &real_ref);
+
+ if (!refs_found)
+ return -1;
+
+ if (warn_ambiguous_refs && !(flags & GET_OID_QUIETLY) &&
+ (refs_found > 1 ||
+ !get_short_oid(str, len, &tmp_oid, GET_OID_QUIETLY)))
+ warning(warn_msg, len, str);
+
+ if (reflog_len) {
+ int nth, i;
+ timestamp_t at_time;
+ timestamp_t co_time;
+ int co_tz, co_cnt;
+
+ /* Is it asking for N-th entry, or approxidate? */
+ for (i = nth = 0; 0 <= nth && i < reflog_len; i++) {
+ char ch = str[at+2+i];
+ if ('0' <= ch && ch <= '9')
+ nth = nth * 10 + ch - '0';
+ else
+ nth = -1;
+ }
+ if (100000000 <= nth) {
+ at_time = nth;
+ nth = -1;
+ } else if (0 <= nth)
+ at_time = 0;
+ else {
+ int errors = 0;
+ char *tmp = xstrndup(str + at + 2, reflog_len);
+ at_time = approxidate_careful(tmp, &errors);
+ free(tmp);
+ if (errors) {
+ free(real_ref);
+ return -1;
+ }
+ }
+ if (read_ref_at(real_ref, flags, at_time, nth, oid, NULL,
+ &co_time, &co_tz, &co_cnt)) {
+ if (!len) {
+ if (starts_with(real_ref, "refs/heads/")) {
+ str = real_ref + 11;
+ len = strlen(real_ref + 11);
+ } else {
+ /* detached HEAD */
+ str = "HEAD";
+ len = 4;
+ }
+ }
+ if (at_time) {
+ if (!(flags & GET_OID_QUIETLY)) {
+ warning("Log for '%.*s' only goes "
+ "back to %s.", len, str,
+ show_date(co_time, co_tz, DATE_MODE(RFC2822)));
+ }
+ } else {
+ if (flags & GET_OID_QUIETLY) {
+ exit(128);
+ }
+ die("Log for '%.*s' only has %d entries.",
+ len, str, co_cnt);
+ }
+ }
+ }
+
+ free(real_ref);
+ return 0;
+}
+
+static int get_parent(const char *name, int len,
+ struct object_id *result, int idx)
+{
+ struct object_id oid;
+ int ret = get_oid_1(name, len, &oid, GET_OID_COMMITTISH);
+ struct commit *commit;
+ struct commit_list *p;
+
+ if (ret)
+ return ret;
+ commit = lookup_commit_reference(&oid);
+ if (parse_commit(commit))
+ return -1;
+ if (!idx) {
+ oidcpy(result, &commit->object.oid);
+ return 0;
+ }
+ p = commit->parents;
+ while (p) {
+ if (!--idx) {
+ oidcpy(result, &p->item->object.oid);
+ return 0;
+ }
+ p = p->next;
+ }
+ return -1;
+}
+
+static int get_nth_ancestor(const char *name, int len,
+ struct object_id *result, int generation)
+{
+ struct object_id oid;
+ struct commit *commit;
+ int ret;
+
+ ret = get_oid_1(name, len, &oid, GET_OID_COMMITTISH);
+ if (ret)
+ return ret;
+ commit = lookup_commit_reference(&oid);
+ if (!commit)
+ return -1;
+
+ while (generation--) {
+ if (parse_commit(commit) || !commit->parents)
+ return -1;
+ commit = commit->parents->item;
+ }
+ oidcpy(result, &commit->object.oid);
+ return 0;
+}
+
+struct object *peel_to_type(const char *name, int namelen,
+ struct object *o, enum object_type expected_type)
+{
+ if (name && !namelen)
+ namelen = strlen(name);
+ while (1) {
+ if (!o || (!o->parsed && !parse_object(&o->oid)))
+ return NULL;
+ if (expected_type == OBJ_ANY || o->type == expected_type)
+ return o;
+ if (o->type == OBJ_TAG)
+ o = ((struct tag*) o)->tagged;
+ else if (o->type == OBJ_COMMIT)
+ o = &(((struct commit *) o)->tree->object);
+ else {
+ if (name)
+ error("%.*s: expected %s type, but the object "
+ "dereferences to %s type",
+ namelen, name, type_name(expected_type),
+ type_name(o->type));
+ return NULL;
+ }
+ }
+}
+
+static int peel_onion(const char *name, int len, struct object_id *oid,
+ unsigned lookup_flags)
+{
+ struct object_id outer;
+ const char *sp;
+ unsigned int expected_type = 0;
+ struct object *o;
+
+ /*
+ * "ref^{type}" dereferences ref repeatedly until you cannot
+ * dereference anymore, or you get an object of given type,
+ * whichever comes first. "ref^{}" means just dereference
+ * tags until you get a non-tag. "ref^0" is a shorthand for
+ * "ref^{commit}". "commit^{tree}" could be used to find the
+ * top-level tree of the given commit.
+ */
+ if (len < 4 || name[len-1] != '}')
+ return -1;
+
+ for (sp = name + len - 1; name <= sp; sp--) {
+ int ch = *sp;
+ if (ch == '{' && name < sp && sp[-1] == '^')
+ break;
+ }
+ if (sp <= name)
+ return -1;
+
+ sp++; /* beginning of type name, or closing brace for empty */
+ if (starts_with(sp, "commit}"))
+ expected_type = OBJ_COMMIT;
+ else if (starts_with(sp, "tag}"))
+ expected_type = OBJ_TAG;
+ else if (starts_with(sp, "tree}"))
+ expected_type = OBJ_TREE;
+ else if (starts_with(sp, "blob}"))
+ expected_type = OBJ_BLOB;
+ else if (starts_with(sp, "object}"))
+ expected_type = OBJ_ANY;
+ else if (sp[0] == '}')
+ expected_type = OBJ_NONE;
+ else if (sp[0] == '/')
+ expected_type = OBJ_COMMIT;
+ else
+ return -1;
+
+ lookup_flags &= ~GET_OID_DISAMBIGUATORS;
+ if (expected_type == OBJ_COMMIT)
+ lookup_flags |= GET_OID_COMMITTISH;
+ else if (expected_type == OBJ_TREE)
+ lookup_flags |= GET_OID_TREEISH;
+
+ if (get_oid_1(name, sp - name - 2, &outer, lookup_flags))
+ return -1;
+
+ o = parse_object(&outer);
+ if (!o)
+ return -1;
+ if (!expected_type) {
+ o = deref_tag(o, name, sp - name - 2);
+ if (!o || (!o->parsed && !parse_object(&o->oid)))
+ return -1;
+ oidcpy(oid, &o->oid);
+ return 0;
+ }
+
+ /*
+ * At this point, the syntax look correct, so
+ * if we do not get the needed object, we should
+ * barf.
+ */
+ o = peel_to_type(name, len, o, expected_type);
+ if (!o)
+ return -1;
+
+ oidcpy(oid, &o->oid);
+ if (sp[0] == '/') {
+ /* "$commit^{/foo}" */
+ char *prefix;
+ int ret;
+ struct commit_list *list = NULL;
+
+ /*
+ * $commit^{/}. Some regex implementation may reject.
+ * We don't need regex anyway. '' pattern always matches.
+ */
+ if (sp[1] == '}')
+ return 0;
+
+ prefix = xstrndup(sp + 1, name + len - 1 - (sp + 1));
+ commit_list_insert((struct commit *)o, &list);
+ ret = get_oid_oneline(prefix, oid, list);
+ free(prefix);
+ return ret;
+ }
+ return 0;
+}
+
+static int get_describe_name(const char *name, int len, struct object_id *oid)
+{
+ const char *cp;
+ unsigned flags = GET_OID_QUIETLY | GET_OID_COMMIT;
+
+ for (cp = name + len - 1; name + 2 <= cp; cp--) {
+ char ch = *cp;
+ if (!isxdigit(ch)) {
+ /* We must be looking at g in "SOMETHING-g"
+ * for it to be describe output.
+ */
+ if (ch == 'g' && cp[-1] == '-') {
+ cp++;
+ len -= cp - name;
+ return get_short_oid(cp, len, oid, flags);
+ }
+ }
+ }
+ return -1;
+}
+
+static int get_oid_1(const char *name, int len, struct object_id *oid, unsigned lookup_flags)
+{
+ int ret, has_suffix;
+ const char *cp;
+
+ /*
+ * "name~3" is "name^^^", "name~" is "name~1", and "name^" is "name^1".
+ */
+ has_suffix = 0;
+ for (cp = name + len - 1; name <= cp; cp--) {
+ int ch = *cp;
+ if ('0' <= ch && ch <= '9')
+ continue;
+ if (ch == '~' || ch == '^')
+ has_suffix = ch;
+ break;
+ }
+
+ if (has_suffix) {
+ int num = 0;
+ int len1 = cp - name;
+ cp++;
+ while (cp < name + len)
+ num = num * 10 + *cp++ - '0';
+ if (!num && len1 == len - 1)
+ num = 1;
+ if (has_suffix == '^')
+ return get_parent(name, len1, oid, num);
+ /* else if (has_suffix == '~') -- goes without saying */
+ return get_nth_ancestor(name, len1, oid, num);
+ }
+
+ ret = peel_onion(name, len, oid, lookup_flags);
+ if (!ret)
+ return 0;
+
+ ret = get_oid_basic(name, len, oid, lookup_flags);
+ if (!ret)
+ return 0;
+
+ /* It could be describe output that is "SOMETHING-gXXXX" */
+ ret = get_describe_name(name, len, oid);
+ if (!ret)
+ return 0;
+
+ return get_short_oid(name, len, oid, lookup_flags);
+}
+
+/*
+ * This interprets names like ':/Initial revision of "git"' by searching
+ * through history and returning the first commit whose message starts
+ * the given regular expression.
+ *
+ * For negative-matching, prefix the pattern-part with '!-', like: ':/!-WIP'.
+ *
+ * For a literal '!' character at the beginning of a pattern, you have to repeat
+ * that, like: ':/!!foo'
+ *
+ * For future extension, all other sequences beginning with ':/!' are reserved.
+ */
+
+/* Remember to update object flag allocation in object.h */
+#define ONELINE_SEEN (1u<<20)
+
+static int handle_one_ref(const char *path, const struct object_id *oid,
+ int flag, void *cb_data)
+{
+ struct commit_list **list = cb_data;
+ struct object *object = parse_object(oid);
+ if (!object)
+ return 0;
+ if (object->type == OBJ_TAG) {
+ object = deref_tag(object, path, strlen(path));
+ if (!object)
+ return 0;
+ }
+ if (object->type != OBJ_COMMIT)
+ return 0;
+ commit_list_insert((struct commit *)object, list);
+ return 0;
+}
+
+static int get_oid_oneline(const char *prefix, struct object_id *oid,
+ struct commit_list *list)
+{
+ struct commit_list *backup = NULL, *l;
+ int found = 0;
+ int negative = 0;
+ regex_t regex;
+
+ if (prefix[0] == '!') {
+ prefix++;
+
+ if (prefix[0] == '-') {
+ prefix++;
+ negative = 1;
+ } else if (prefix[0] != '!') {
+ return -1;
+ }
+ }
+
+ if (regcomp(®ex, prefix, REG_EXTENDED))
+ return -1;
+
+ for (l = list; l; l = l->next) {
+ l->item->object.flags |= ONELINE_SEEN;
+ commit_list_insert(l->item, &backup);
+ }
+ while (list) {
+ const char *p, *buf;
+ struct commit *commit;
+ int matches;
+
+ commit = pop_most_recent_commit(&list, ONELINE_SEEN);
+ if (!parse_object(&commit->object.oid))
+ continue;
+ buf = get_commit_buffer(commit, NULL);
+ p = strstr(buf, "\n\n");
+ matches = negative ^ (p && !regexec(®ex, p + 2, 0, NULL, 0));
+ unuse_commit_buffer(commit, buf);
+
+ if (matches) {
+ oidcpy(oid, &commit->object.oid);
+ found = 1;
+ break;
+ }
+ }
+ regfree(®ex);
+ free_commit_list(list);
+ for (l = backup; l; l = l->next)
+ clear_commit_marks(l->item, ONELINE_SEEN);
+ free_commit_list(backup);
+ return found ? 0 : -1;
+}
+
+struct grab_nth_branch_switch_cbdata {
+ int remaining;
+ struct strbuf buf;
+};
+
+static int grab_nth_branch_switch(struct object_id *ooid, struct object_id *noid,
+ const char *email, timestamp_t timestamp, int tz,
+ const char *message, void *cb_data)
+{
+ struct grab_nth_branch_switch_cbdata *cb = cb_data;
+ const char *match = NULL, *target = NULL;
+ size_t len;
+
+ if (skip_prefix(message, "checkout: moving from ", &match))
+ target = strstr(match, " to ");
+
+ if (!match || !target)
+ return 0;
+ if (--(cb->remaining) == 0) {
+ len = target - match;
+ strbuf_reset(&cb->buf);
+ strbuf_add(&cb->buf, match, len);
+ return 1; /* we are done */
+ }
+ return 0;
+}
+
+/*
+ * Parse @{-N} syntax, return the number of characters parsed
+ * if successful; otherwise signal an error with negative value.
+ */
+static int interpret_nth_prior_checkout(const char *name, int namelen,
+ struct strbuf *buf)
+{
+ long nth;
+ int retval;
+ struct grab_nth_branch_switch_cbdata cb;
+ const char *brace;
+ char *num_end;
+
+ if (namelen < 4)
+ return -1;
+ if (name[0] != '@' || name[1] != '{' || name[2] != '-')
+ return -1;
+ brace = memchr(name, '}', namelen);
+ if (!brace)
+ return -1;
+ nth = strtol(name + 3, &num_end, 10);
+ if (num_end != brace)
+ return -1;
+ if (nth <= 0)
+ return -1;
+ cb.remaining = nth;
+ strbuf_init(&cb.buf, 20);
+
+ retval = 0;
+ if (0 < for_each_reflog_ent_reverse("HEAD", grab_nth_branch_switch, &cb)) {
+ strbuf_reset(buf);
+ strbuf_addbuf(buf, &cb.buf);
+ retval = brace - name + 1;
+ }
+
+ strbuf_release(&cb.buf);
+ return retval;
+}
+
+int get_oid_mb(const char *name, struct object_id *oid)
+{
+ struct commit *one, *two;
+ struct commit_list *mbs;
+ struct object_id oid_tmp;
+ const char *dots;
+ int st;
+
+ dots = strstr(name, "...");
+ if (!dots)
+ return get_oid(name, oid);
+ if (dots == name)
+ st = get_oid("HEAD", &oid_tmp);
+ else {
+ struct strbuf sb;
+ strbuf_init(&sb, dots - name);
+ strbuf_add(&sb, name, dots - name);
+ st = get_oid_committish(sb.buf, &oid_tmp);
+ strbuf_release(&sb);
+ }
+ if (st)
+ return st;
+ one = lookup_commit_reference_gently(&oid_tmp, 0);
+ if (!one)
+ return -1;
+
+ if (get_oid_committish(dots[3] ? (dots + 3) : "HEAD", &oid_tmp))
+ return -1;
+ two = lookup_commit_reference_gently(&oid_tmp, 0);
+ if (!two)
+ return -1;
+ mbs = get_merge_bases(one, two);
+ if (!mbs || mbs->next)
+ st = -1;
+ else {
+ st = 0;
+ oidcpy(oid, &mbs->item->object.oid);
+ }
+ free_commit_list(mbs);
+ return st;
+}
+
+/* parse @something syntax, when 'something' is not {.*} */
+static int interpret_empty_at(const char *name, int namelen, int len, struct strbuf *buf)
+{
+ const char *next;
+
+ if (len || name[1] == '{')
+ return -1;
+
+ /* make sure it's a single @, or @@{.*}, not @foo */
+ next = memchr(name + len + 1, '@', namelen - len - 1);
+ if (next && next[1] != '{')
+ return -1;
+ if (!next)
+ next = name + namelen;
+ if (next != name + 1)
+ return -1;
+
+ strbuf_reset(buf);
+ strbuf_add(buf, "HEAD", 4);
+ return 1;
+}
+
+static int reinterpret(const char *name, int namelen, int len,
+ struct strbuf *buf, unsigned allowed)
+{
+ /* we have extra data, which might need further processing */
+ struct strbuf tmp = STRBUF_INIT;
+ int used = buf->len;
+ int ret;
+
+ strbuf_add(buf, name + len, namelen - len);
+ ret = interpret_branch_name(buf->buf, buf->len, &tmp, allowed);
+ /* that data was not interpreted, remove our cruft */
+ if (ret < 0) {
+ strbuf_setlen(buf, used);
+ return len;
+ }
+ strbuf_reset(buf);
+ strbuf_addbuf(buf, &tmp);
+ strbuf_release(&tmp);
+ /* tweak for size of {-N} versus expanded ref name */
+ return ret - used + len;
+}
+
+static void set_shortened_ref(struct strbuf *buf, const char *ref)
+{
+ char *s = shorten_unambiguous_ref(ref, 0);
+ strbuf_reset(buf);
+ strbuf_addstr(buf, s);
+ free(s);
+}
+
+static int branch_interpret_allowed(const char *refname, unsigned allowed)
+{
+ if (!allowed)
+ return 1;
+
+ if ((allowed & INTERPRET_BRANCH_LOCAL) &&
+ starts_with(refname, "refs/heads/"))
+ return 1;
+ if ((allowed & INTERPRET_BRANCH_REMOTE) &&
+ starts_with(refname, "refs/remotes/"))
+ return 1;
+
+ return 0;
+}
+
+static int interpret_branch_mark(const char *name, int namelen,
+ int at, struct strbuf *buf,
+ int (*get_mark)(const char *, int),
+ const char *(*get_data)(struct branch *,
+ struct strbuf *),
+ unsigned allowed)
+{
+ int len;
+ struct branch *branch;
+ struct strbuf err = STRBUF_INIT;
+ const char *value;
+
+ len = get_mark(name + at, namelen - at);
+ if (!len)
+ return -1;
+
+ if (memchr(name, ':', at))
+ return -1;
+
+ if (at) {
+ char *name_str = xmemdupz(name, at);
+ branch = branch_get(name_str);
+ free(name_str);
+ } else
+ branch = branch_get(NULL);
+
+ value = get_data(branch, &err);
+ if (!value)
+ die("%s", err.buf);
+
+ if (!branch_interpret_allowed(value, allowed))
+ return -1;
+
+ set_shortened_ref(buf, value);
+ return len + at;
+}
+
+int interpret_branch_name(const char *name, int namelen, struct strbuf *buf,
+ unsigned allowed)
+{
+ char *at;
+ const char *start;
+ int len;
+
+ if (!namelen)
+ namelen = strlen(name);
+
+ if (!allowed || (allowed & INTERPRET_BRANCH_LOCAL)) {
+ len = interpret_nth_prior_checkout(name, namelen, buf);
+ if (!len) {
+ return len; /* syntax Ok, not enough switches */
+ } else if (len > 0) {
+ if (len == namelen)
+ return len; /* consumed all */
+ else
+ return reinterpret(name, namelen, len, buf, allowed);
+ }
+ }
+
+ for (start = name;
+ (at = memchr(start, '@', namelen - (start - name)));
+ start = at + 1) {
+
+ if (!allowed || (allowed & INTERPRET_BRANCH_HEAD)) {
+ len = interpret_empty_at(name, namelen, at - name, buf);
+ if (len > 0)
+ return reinterpret(name, namelen, len, buf,
+ allowed);
+ }
+
+ len = interpret_branch_mark(name, namelen, at - name, buf,
+ upstream_mark, branch_get_upstream,
+ allowed);
+ if (len > 0)
+ return len;
+
+ len = interpret_branch_mark(name, namelen, at - name, buf,
+ push_mark, branch_get_push,
+ allowed);
+ if (len > 0)
+ return len;
+ }
+
+ return -1;
+}
+
+void strbuf_branchname(struct strbuf *sb, const char *name, unsigned allowed)
+{
+ int len = strlen(name);
+ int used = interpret_branch_name(name, len, sb, allowed);
+
+ if (used < 0)
+ used = 0;
+ strbuf_add(sb, name + used, len - used);
+}
+
+int strbuf_check_branch_ref(struct strbuf *sb, const char *name)
+{
+ if (startup_info->have_repository)
+ strbuf_branchname(sb, name, INTERPRET_BRANCH_LOCAL);
+ else
+ strbuf_addstr(sb, name);
+
+ /*
+ * This splice must be done even if we end up rejecting the
+ * name; builtin/branch.c::copy_or_rename_branch() still wants
+ * to see what the name expanded to so that "branch -m" can be
+ * used as a tool to correct earlier mistakes.
+ */
+ strbuf_splice(sb, 0, 0, "refs/heads/", 11);
+
+ if (*name == '-' ||
+ !strcmp(sb->buf, "refs/heads/HEAD"))
+ return -1;
+
+ return check_refname_format(sb->buf, 0);
+}
+
+/*
+ * This is like "get_oid_basic()", except it allows "object ID expressions",
+ * notably "xyz^" for "parent of xyz"
+ */
+int get_oid(const char *name, struct object_id *oid)
+{
+ struct object_context unused;
+ return get_oid_with_context(name, 0, oid, &unused);
+}
+
+
+/*
+ * Many callers know that the user meant to name a commit-ish by
+ * syntactical positions where the object name appears. Calling this
+ * function allows the machinery to disambiguate shorter-than-unique
+ * abbreviated object names between commit-ish and others.
+ *
+ * Note that this does NOT error out when the named object is not a
+ * commit-ish. It is merely to give a hint to the disambiguation
+ * machinery.
+ */
+int get_oid_committish(const char *name, struct object_id *oid)
+{
+ struct object_context unused;
+ return get_oid_with_context(name, GET_OID_COMMITTISH,
+ oid, &unused);
+}
+
+int get_oid_treeish(const char *name, struct object_id *oid)
+{
+ struct object_context unused;
+ return get_oid_with_context(name, GET_OID_TREEISH,
+ oid, &unused);
+}
+
+int get_oid_commit(const char *name, struct object_id *oid)
+{
+ struct object_context unused;
+ return get_oid_with_context(name, GET_OID_COMMIT,
+ oid, &unused);
+}
+
+int get_oid_tree(const char *name, struct object_id *oid)
+{
+ struct object_context unused;
+ return get_oid_with_context(name, GET_OID_TREE,
+ oid, &unused);
+}
+
+int get_oid_blob(const char *name, struct object_id *oid)
+{
+ struct object_context unused;
+ return get_oid_with_context(name, GET_OID_BLOB,
+ oid, &unused);
+}
+
+/* Must be called only when object_name:filename doesn't exist. */
+static void diagnose_invalid_oid_path(const char *prefix,
+ const char *filename,
+ const struct object_id *tree_oid,
+ const char *object_name,
+ int object_name_len)
+{
+ struct object_id oid;
+ unsigned mode;
+
+ if (!prefix)
+ prefix = "";
+
+ if (file_exists(filename))
+ die("Path '%s' exists on disk, but not in '%.*s'.",
+ filename, object_name_len, object_name);
+ if (is_missing_file_error(errno)) {
+ char *fullname = xstrfmt("%s%s", prefix, filename);
+
+ if (!get_tree_entry(tree_oid, fullname, &oid, &mode)) {
+ die("Path '%s' exists, but not '%s'.\n"
+ "Did you mean '%.*s:%s' aka '%.*s:./%s'?",
+ fullname,
+ filename,
+ object_name_len, object_name,
+ fullname,
+ object_name_len, object_name,
+ filename);
+ }
+ die("Path '%s' does not exist in '%.*s'",
+ filename, object_name_len, object_name);
+ }
+}
+
+/* Must be called only when :stage:filename doesn't exist. */
+static void diagnose_invalid_index_path(int stage,
+ const char *prefix,
+ const char *filename)
+{
+ const struct cache_entry *ce;
+ int pos;
+ unsigned namelen = strlen(filename);
+ struct strbuf fullname = STRBUF_INIT;
+
+ if (!prefix)
+ prefix = "";
+
+ /* Wrong stage number? */
+ pos = cache_name_pos(filename, namelen);
+ if (pos < 0)
+ pos = -pos - 1;
+ if (pos < active_nr) {
+ ce = active_cache[pos];
+ if (ce_namelen(ce) == namelen &&
+ !memcmp(ce->name, filename, namelen))
+ die("Path '%s' is in the index, but not at stage %d.\n"
+ "Did you mean ':%d:%s'?",
+ filename, stage,
+ ce_stage(ce), filename);
+ }
+
+ /* Confusion between relative and absolute filenames? */
+ strbuf_addstr(&fullname, prefix);
+ strbuf_addstr(&fullname, filename);
+ pos = cache_name_pos(fullname.buf, fullname.len);
+ if (pos < 0)
+ pos = -pos - 1;
+ if (pos < active_nr) {
+ ce = active_cache[pos];
+ if (ce_namelen(ce) == fullname.len &&
+ !memcmp(ce->name, fullname.buf, fullname.len))
+ die("Path '%s' is in the index, but not '%s'.\n"
+ "Did you mean ':%d:%s' aka ':%d:./%s'?",
+ fullname.buf, filename,
+ ce_stage(ce), fullname.buf,
+ ce_stage(ce), filename);
+ }
+
+ if (file_exists(filename))
+ die("Path '%s' exists on disk, but not in the index.", filename);
+ if (is_missing_file_error(errno))
+ die("Path '%s' does not exist (neither on disk nor in the index).",
+ filename);
+
+ strbuf_release(&fullname);
+}
+
+
+static char *resolve_relative_path(const char *rel)
+{
+ if (!starts_with(rel, "./") && !starts_with(rel, "../"))
+ return NULL;
+
+ if (!is_inside_work_tree())
+ die("relative path syntax can't be used outside working tree.");
+
+ /* die() inside prefix_path() if resolved path is outside worktree */
+ return prefix_path(startup_info->prefix,
+ startup_info->prefix ? strlen(startup_info->prefix) : 0,
+ rel);
+}
+
+static int get_oid_with_context_1(const char *name,
+ unsigned flags,
+ const char *prefix,
+ struct object_id *oid,
+ struct object_context *oc)
+{
+ int ret, bracket_depth;
+ int namelen = strlen(name);
+ const char *cp;
+ int only_to_die = flags & GET_OID_ONLY_TO_DIE;
+
+ if (only_to_die)
+ flags |= GET_OID_QUIETLY;
+
+ memset(oc, 0, sizeof(*oc));
+ oc->mode = S_IFINVALID;
+ strbuf_init(&oc->symlink_path, 0);
+ ret = get_oid_1(name, namelen, oid, flags);
+ if (!ret)
+ return ret;
+ /*
+ * sha1:path --> object name of path in ent sha1
+ * :path -> object name of absolute path in index
+ * :./path -> object name of path relative to cwd in index
+ * :[0-3]:path -> object name of path in index at stage
+ * :/foo -> recent commit matching foo
+ */
+ if (name[0] == ':') {
+ int stage = 0;
+ const struct cache_entry *ce;
+ char *new_path = NULL;
+ int pos;
+ if (!only_to_die && namelen > 2 && name[1] == '/') {
+ struct commit_list *list = NULL;
+
+ for_each_ref(handle_one_ref, &list);
+ commit_list_sort_by_date(&list);
+ return get_oid_oneline(name + 2, oid, list);
+ }
+ if (namelen < 3 ||
+ name[2] != ':' ||
+ name[1] < '0' || '3' < name[1])
+ cp = name + 1;
+ else {
+ stage = name[1] - '0';
+ cp = name + 3;
+ }
+ new_path = resolve_relative_path(cp);
+ if (!new_path) {
+ namelen = namelen - (cp - name);
+ } else {
+ cp = new_path;
+ namelen = strlen(cp);
+ }
+
+ if (flags & GET_OID_RECORD_PATH)
+ oc->path = xstrdup(cp);
+
+ if (!active_cache)
+ read_cache();
+ pos = cache_name_pos(cp, namelen);
+ if (pos < 0)
+ pos = -pos - 1;
+ while (pos < active_nr) {
+ ce = active_cache[pos];
+ if (ce_namelen(ce) != namelen ||
+ memcmp(ce->name, cp, namelen))
+ break;
+ if (ce_stage(ce) == stage) {
+ oidcpy(oid, &ce->oid);
+ oc->mode = ce->ce_mode;
+ free(new_path);
+ return 0;
+ }
+ pos++;
+ }
+ if (only_to_die && name[1] && name[1] != '/')
+ diagnose_invalid_index_path(stage, prefix, cp);
+ free(new_path);
+ return -1;
+ }
+ for (cp = name, bracket_depth = 0; *cp; cp++) {
+ if (*cp == '{')
+ bracket_depth++;
+ else if (bracket_depth && *cp == '}')
+ bracket_depth--;
+ else if (!bracket_depth && *cp == ':')
+ break;
+ }
+ if (*cp == ':') {
+ struct object_id tree_oid;
+ int len = cp - name;
+ unsigned sub_flags = flags;
+
+ sub_flags &= ~GET_OID_DISAMBIGUATORS;
+ sub_flags |= GET_OID_TREEISH;
+
+ if (!get_oid_1(name, len, &tree_oid, sub_flags)) {
+ const char *filename = cp+1;
+ char *new_filename = NULL;
+
+ new_filename = resolve_relative_path(filename);
+ if (new_filename)
+ filename = new_filename;
+ if (flags & GET_OID_FOLLOW_SYMLINKS) {
+ ret = get_tree_entry_follow_symlinks(tree_oid.hash,
+ filename, oid->hash, &oc->symlink_path,
+ &oc->mode);
+ } else {
+ ret = get_tree_entry(&tree_oid, filename, oid,
+ &oc->mode);
+ if (ret && only_to_die) {
+ diagnose_invalid_oid_path(prefix,
+ filename,
+ &tree_oid,
+ name, len);
+ }
+ }
+ hashcpy(oc->tree, tree_oid.hash);
+ if (flags & GET_OID_RECORD_PATH)
+ oc->path = xstrdup(filename);
+
+ free(new_filename);
+ return ret;
+ } else {
+ if (only_to_die)
+ die("Invalid object name '%.*s'.", len, name);
+ }
+ }
+ return ret;
+}
+
+/*
+ * Call this function when you know "name" given by the end user must
+ * name an object but it doesn't; the function _may_ die with a better
+ * diagnostic message than "no such object 'name'", e.g. "Path 'doc' does not
+ * exist in 'HEAD'" when given "HEAD:doc", or it may return in which case
+ * you have a chance to diagnose the error further.
+ */
+void maybe_die_on_misspelt_object_name(const char *name, const char *prefix)
+{
+ struct object_context oc;
+ struct object_id oid;
+ get_oid_with_context_1(name, GET_OID_ONLY_TO_DIE, prefix, &oid, &oc);
+}
+
+int get_oid_with_context(const char *str, unsigned flags, struct object_id *oid, struct object_context *oc)
+{
+ if (flags & GET_OID_FOLLOW_SYMLINKS && flags & GET_OID_ONLY_TO_DIE)
+ die("BUG: incompatible flags for get_sha1_with_context");
+ return get_oid_with_context_1(str, flags, NULL, oid, oc);
+}
+++ /dev/null
-/*
- * GIT - The information manager from hell
- *
- * Copyright (C) Linus Torvalds, 2005
- *
- * This handles basic git sha1 object files - packing, unpacking,
- * creation etc.
- */
-#include "cache.h"
-#include "config.h"
-#include "string-list.h"
-#include "lockfile.h"
-#include "delta.h"
-#include "pack.h"
-#include "blob.h"
-#include "commit.h"
-#include "run-command.h"
-#include "tag.h"
-#include "tree.h"
-#include "tree-walk.h"
-#include "refs.h"
-#include "pack-revindex.h"
-#include "sha1-lookup.h"
-#include "bulk-checkin.h"
-#include "repository.h"
-#include "streaming.h"
-#include "dir.h"
-#include "list.h"
-#include "mergesort.h"
-#include "quote.h"
-#include "packfile.h"
-#include "fetch-object.h"
-#include "object-store.h"
-
-/* The maximum size for an object header. */
-#define MAX_HEADER_LEN 32
-
-const unsigned char null_sha1[GIT_MAX_RAWSZ];
-const struct object_id null_oid;
-const struct object_id empty_tree_oid = {
- EMPTY_TREE_SHA1_BIN_LITERAL
-};
-const struct object_id empty_blob_oid = {
- EMPTY_BLOB_SHA1_BIN_LITERAL
-};
-
-static void git_hash_sha1_init(git_hash_ctx *ctx)
-{
- git_SHA1_Init(&ctx->sha1);
-}
-
-static void git_hash_sha1_update(git_hash_ctx *ctx, const void *data, size_t len)
-{
- git_SHA1_Update(&ctx->sha1, data, len);
-}
-
-static void git_hash_sha1_final(unsigned char *hash, git_hash_ctx *ctx)
-{
- git_SHA1_Final(hash, &ctx->sha1);
-}
-
-static void git_hash_unknown_init(git_hash_ctx *ctx)
-{
- die("trying to init unknown hash");
-}
-
-static void git_hash_unknown_update(git_hash_ctx *ctx, const void *data, size_t len)
-{
- die("trying to update unknown hash");
-}
-
-static void git_hash_unknown_final(unsigned char *hash, git_hash_ctx *ctx)
-{
- die("trying to finalize unknown hash");
-}
-
-const struct git_hash_algo hash_algos[GIT_HASH_NALGOS] = {
- {
- NULL,
- 0x00000000,
- 0,
- 0,
- git_hash_unknown_init,
- git_hash_unknown_update,
- git_hash_unknown_final,
- NULL,
- NULL,
- },
- {
- "sha-1",
- /* "sha1", big-endian */
- 0x73686131,
- GIT_SHA1_RAWSZ,
- GIT_SHA1_HEXSZ,
- git_hash_sha1_init,
- git_hash_sha1_update,
- git_hash_sha1_final,
- &empty_tree_oid,
- &empty_blob_oid,
- },
-};
-
-/*
- * This is meant to hold a *small* number of objects that you would
- * want read_sha1_file() to be able to return, but yet you do not want
- * to write them into the object store (e.g. a browse-only
- * application).
- */
-static struct cached_object {
- unsigned char sha1[20];
- enum object_type type;
- void *buf;
- unsigned long size;
-} *cached_objects;
-static int cached_object_nr, cached_object_alloc;
-
-static struct cached_object empty_tree = {
- EMPTY_TREE_SHA1_BIN_LITERAL,
- OBJ_TREE,
- "",
- 0
-};
-
-static struct cached_object *find_cached_object(const unsigned char *sha1)
-{
- int i;
- struct cached_object *co = cached_objects;
-
- for (i = 0; i < cached_object_nr; i++, co++) {
- if (!hashcmp(co->sha1, sha1))
- return co;
- }
- if (!hashcmp(sha1, empty_tree.sha1))
- return &empty_tree;
- return NULL;
-}
-
-
-static int get_conv_flags(unsigned flags)
-{
- if (flags & HASH_RENORMALIZE)
- return CONV_EOL_RENORMALIZE;
- else if (flags & HASH_WRITE_OBJECT)
- return global_conv_flags_eol;
- else
- return 0;
-}
-
-
-int mkdir_in_gitdir(const char *path)
-{
- if (mkdir(path, 0777)) {
- int saved_errno = errno;
- struct stat st;
- struct strbuf sb = STRBUF_INIT;
-
- if (errno != EEXIST)
- return -1;
- /*
- * Are we looking at a path in a symlinked worktree
- * whose original repository does not yet have it?
- * e.g. .git/rr-cache pointing at its original
- * repository in which the user hasn't performed any
- * conflict resolution yet?
- */
- if (lstat(path, &st) || !S_ISLNK(st.st_mode) ||
- strbuf_readlink(&sb, path, st.st_size) ||
- !is_absolute_path(sb.buf) ||
- mkdir(sb.buf, 0777)) {
- strbuf_release(&sb);
- errno = saved_errno;
- return -1;
- }
- strbuf_release(&sb);
- }
- return adjust_shared_perm(path);
-}
-
-enum scld_error safe_create_leading_directories(char *path)
-{
- char *next_component = path + offset_1st_component(path);
- enum scld_error ret = SCLD_OK;
-
- while (ret == SCLD_OK && next_component) {
- struct stat st;
- char *slash = next_component, slash_character;
-
- while (*slash && !is_dir_sep(*slash))
- slash++;
-
- if (!*slash)
- break;
-
- next_component = slash + 1;
- while (is_dir_sep(*next_component))
- next_component++;
- if (!*next_component)
- break;
-
- slash_character = *slash;
- *slash = '\0';
- if (!stat(path, &st)) {
- /* path exists */
- if (!S_ISDIR(st.st_mode)) {
- errno = ENOTDIR;
- ret = SCLD_EXISTS;
- }
- } else if (mkdir(path, 0777)) {
- if (errno == EEXIST &&
- !stat(path, &st) && S_ISDIR(st.st_mode))
- ; /* somebody created it since we checked */
- else if (errno == ENOENT)
- /*
- * Either mkdir() failed because
- * somebody just pruned the containing
- * directory, or stat() failed because
- * the file that was in our way was
- * just removed. Either way, inform
- * the caller that it might be worth
- * trying again:
- */
- ret = SCLD_VANISHED;
- else
- ret = SCLD_FAILED;
- } else if (adjust_shared_perm(path)) {
- ret = SCLD_PERMS;
- }
- *slash = slash_character;
- }
- return ret;
-}
-
-enum scld_error safe_create_leading_directories_const(const char *path)
-{
- int save_errno;
- /* path points to cache entries, so xstrdup before messing with it */
- char *buf = xstrdup(path);
- enum scld_error result = safe_create_leading_directories(buf);
-
- save_errno = errno;
- free(buf);
- errno = save_errno;
- return result;
-}
-
-int raceproof_create_file(const char *path, create_file_fn fn, void *cb)
-{
- /*
- * The number of times we will try to remove empty directories
- * in the way of path. This is only 1 because if another
- * process is racily creating directories that conflict with
- * us, we don't want to fight against them.
- */
- int remove_directories_remaining = 1;
-
- /*
- * The number of times that we will try to create the
- * directories containing path. We are willing to attempt this
- * more than once, because another process could be trying to
- * clean up empty directories at the same time as we are
- * trying to create them.
- */
- int create_directories_remaining = 3;
-
- /* A scratch copy of path, filled lazily if we need it: */
- struct strbuf path_copy = STRBUF_INIT;
-
- int ret, save_errno;
-
- /* Sanity check: */
- assert(*path);
-
-retry_fn:
- ret = fn(path, cb);
- save_errno = errno;
- if (!ret)
- goto out;
-
- if (errno == EISDIR && remove_directories_remaining-- > 0) {
- /*
- * A directory is in the way. Maybe it is empty; try
- * to remove it:
- */
- if (!path_copy.len)
- strbuf_addstr(&path_copy, path);
-
- if (!remove_dir_recursively(&path_copy, REMOVE_DIR_EMPTY_ONLY))
- goto retry_fn;
- } else if (errno == ENOENT && create_directories_remaining-- > 0) {
- /*
- * Maybe the containing directory didn't exist, or
- * maybe it was just deleted by a process that is
- * racing with us to clean up empty directories. Try
- * to create it:
- */
- enum scld_error scld_result;
-
- if (!path_copy.len)
- strbuf_addstr(&path_copy, path);
-
- do {
- scld_result = safe_create_leading_directories(path_copy.buf);
- if (scld_result == SCLD_OK)
- goto retry_fn;
- } while (scld_result == SCLD_VANISHED && create_directories_remaining-- > 0);
- }
-
-out:
- strbuf_release(&path_copy);
- errno = save_errno;
- return ret;
-}
-
-static void fill_sha1_path(struct strbuf *buf, const unsigned char *sha1)
-{
- int i;
- for (i = 0; i < 20; i++) {
- static char hex[] = "0123456789abcdef";
- unsigned int val = sha1[i];
- strbuf_addch(buf, hex[val >> 4]);
- strbuf_addch(buf, hex[val & 0xf]);
- if (!i)
- strbuf_addch(buf, '/');
- }
-}
-
-void sha1_file_name(struct repository *r, struct strbuf *buf, const unsigned char *sha1)
-{
- strbuf_addstr(buf, r->objects->objectdir);
- strbuf_addch(buf, '/');
- fill_sha1_path(buf, sha1);
-}
-
-struct strbuf *alt_scratch_buf(struct alternate_object_database *alt)
-{
- strbuf_setlen(&alt->scratch, alt->base_len);
- return &alt->scratch;
-}
-
-static const char *alt_sha1_path(struct alternate_object_database *alt,
- const unsigned char *sha1)
-{
- struct strbuf *buf = alt_scratch_buf(alt);
- fill_sha1_path(buf, sha1);
- return buf->buf;
-}
-
-/*
- * Return non-zero iff the path is usable as an alternate object database.
- */
-static int alt_odb_usable(struct raw_object_store *o,
- struct strbuf *path,
- const char *normalized_objdir)
-{
- struct alternate_object_database *alt;
-
- /* Detect cases where alternate disappeared */
- if (!is_directory(path->buf)) {
- error("object directory %s does not exist; "
- "check .git/objects/info/alternates.",
- path->buf);
- return 0;
- }
-
- /*
- * Prevent the common mistake of listing the same
- * thing twice, or object directory itself.
- */
- for (alt = o->alt_odb_list; alt; alt = alt->next) {
- if (!fspathcmp(path->buf, alt->path))
- return 0;
- }
- if (!fspathcmp(path->buf, normalized_objdir))
- return 0;
-
- return 1;
-}
-
-/*
- * Prepare alternate object database registry.
- *
- * The variable alt_odb_list points at the list of struct
- * alternate_object_database. The elements on this list come from
- * non-empty elements from colon separated ALTERNATE_DB_ENVIRONMENT
- * environment variable, and $GIT_OBJECT_DIRECTORY/info/alternates,
- * whose contents is similar to that environment variable but can be
- * LF separated. Its base points at a statically allocated buffer that
- * contains "/the/directory/corresponding/to/.git/objects/...", while
- * its name points just after the slash at the end of ".git/objects/"
- * in the example above, and has enough space to hold 40-byte hex
- * SHA1, an extra slash for the first level indirection, and the
- * terminating NUL.
- */
-static void read_info_alternates(struct repository *r,
- const char *relative_base,
- int depth);
-static int link_alt_odb_entry(struct repository *r, const char *entry,
- const char *relative_base, int depth, const char *normalized_objdir)
-{
- struct alternate_object_database *ent;
- struct strbuf pathbuf = STRBUF_INIT;
-
- if (!is_absolute_path(entry) && relative_base) {
- strbuf_realpath(&pathbuf, relative_base, 1);
- strbuf_addch(&pathbuf, '/');
- }
- strbuf_addstr(&pathbuf, entry);
-
- if (strbuf_normalize_path(&pathbuf) < 0 && relative_base) {
- error("unable to normalize alternate object path: %s",
- pathbuf.buf);
- strbuf_release(&pathbuf);
- return -1;
- }
-
- /*
- * The trailing slash after the directory name is given by
- * this function at the end. Remove duplicates.
- */
- while (pathbuf.len && pathbuf.buf[pathbuf.len - 1] == '/')
- strbuf_setlen(&pathbuf, pathbuf.len - 1);
-
- if (!alt_odb_usable(r->objects, &pathbuf, normalized_objdir)) {
- strbuf_release(&pathbuf);
- return -1;
- }
-
- ent = alloc_alt_odb(pathbuf.buf);
-
- /* add the alternate entry */
- *r->objects->alt_odb_tail = ent;
- r->objects->alt_odb_tail = &(ent->next);
- ent->next = NULL;
-
- /* recursively add alternates */
- read_info_alternates(r, pathbuf.buf, depth + 1);
-
- strbuf_release(&pathbuf);
- return 0;
-}
-
-static const char *parse_alt_odb_entry(const char *string,
- int sep,
- struct strbuf *out)
-{
- const char *end;
-
- strbuf_reset(out);
-
- if (*string == '#') {
- /* comment; consume up to next separator */
- end = strchrnul(string, sep);
- } else if (*string == '"' && !unquote_c_style(out, string, &end)) {
- /*
- * quoted path; unquote_c_style has copied the
- * data for us and set "end". Broken quoting (e.g.,
- * an entry that doesn't end with a quote) falls
- * back to the unquoted case below.
- */
- } else {
- /* normal, unquoted path */
- end = strchrnul(string, sep);
- strbuf_add(out, string, end - string);
- }
-
- if (*end)
- end++;
- return end;
-}
-
-static void link_alt_odb_entries(struct repository *r, const char *alt,
- int sep, const char *relative_base, int depth)
-{
- struct strbuf objdirbuf = STRBUF_INIT;
- struct strbuf entry = STRBUF_INIT;
-
- if (!alt || !*alt)
- return;
-
- if (depth > 5) {
- error("%s: ignoring alternate object stores, nesting too deep.",
- relative_base);
- return;
- }
-
- strbuf_add_absolute_path(&objdirbuf, r->objects->objectdir);
- if (strbuf_normalize_path(&objdirbuf) < 0)
- die("unable to normalize object directory: %s",
- objdirbuf.buf);
-
- while (*alt) {
- alt = parse_alt_odb_entry(alt, sep, &entry);
- if (!entry.len)
- continue;
- link_alt_odb_entry(r, entry.buf,
- relative_base, depth, objdirbuf.buf);
- }
- strbuf_release(&entry);
- strbuf_release(&objdirbuf);
-}
-
-static void read_info_alternates(struct repository *r,
- const char *relative_base,
- int depth)
-{
- char *path;
- struct strbuf buf = STRBUF_INIT;
-
- path = xstrfmt("%s/info/alternates", relative_base);
- if (strbuf_read_file(&buf, path, 1024) < 0) {
- warn_on_fopen_errors(path);
- free(path);
- return;
- }
-
- link_alt_odb_entries(r, buf.buf, '\n', relative_base, depth);
- strbuf_release(&buf);
- free(path);
-}
-
-struct alternate_object_database *alloc_alt_odb(const char *dir)
-{
- struct alternate_object_database *ent;
-
- FLEX_ALLOC_STR(ent, path, dir);
- strbuf_init(&ent->scratch, 0);
- strbuf_addf(&ent->scratch, "%s/", dir);
- ent->base_len = ent->scratch.len;
-
- return ent;
-}
-
-void add_to_alternates_file(const char *reference)
-{
- struct lock_file lock = LOCK_INIT;
- char *alts = git_pathdup("objects/info/alternates");
- FILE *in, *out;
- int found = 0;
-
- hold_lock_file_for_update(&lock, alts, LOCK_DIE_ON_ERROR);
- out = fdopen_lock_file(&lock, "w");
- if (!out)
- die_errno("unable to fdopen alternates lockfile");
-
- in = fopen(alts, "r");
- if (in) {
- struct strbuf line = STRBUF_INIT;
-
- while (strbuf_getline(&line, in) != EOF) {
- if (!strcmp(reference, line.buf)) {
- found = 1;
- break;
- }
- fprintf_or_die(out, "%s\n", line.buf);
- }
-
- strbuf_release(&line);
- fclose(in);
- }
- else if (errno != ENOENT)
- die_errno("unable to read alternates file");
-
- if (found) {
- rollback_lock_file(&lock);
- } else {
- fprintf_or_die(out, "%s\n", reference);
- if (commit_lock_file(&lock))
- die_errno("unable to move new alternates file into place");
- if (the_repository->objects->alt_odb_tail)
- link_alt_odb_entries(the_repository, reference,
- '\n', NULL, 0);
- }
- free(alts);
-}
-
-void add_to_alternates_memory(const char *reference)
-{
- /*
- * Make sure alternates are initialized, or else our entry may be
- * overwritten when they are.
- */
- prepare_alt_odb(the_repository);
-
- link_alt_odb_entries(the_repository, reference,
- '\n', NULL, 0);
-}
-
-/*
- * Compute the exact path an alternate is at and returns it. In case of
- * error NULL is returned and the human readable error is added to `err`
- * `path` may be relative and should point to $GITDIR.
- * `err` must not be null.
- */
-char *compute_alternate_path(const char *path, struct strbuf *err)
-{
- char *ref_git = NULL;
- const char *repo, *ref_git_s;
- int seen_error = 0;
-
- ref_git_s = real_path_if_valid(path);
- if (!ref_git_s) {
- seen_error = 1;
- strbuf_addf(err, _("path '%s' does not exist"), path);
- goto out;
- } else
- /*
- * Beware: read_gitfile(), real_path() and mkpath()
- * return static buffer
- */
- ref_git = xstrdup(ref_git_s);
-
- repo = read_gitfile(ref_git);
- if (!repo)
- repo = read_gitfile(mkpath("%s/.git", ref_git));
- if (repo) {
- free(ref_git);
- ref_git = xstrdup(repo);
- }
-
- if (!repo && is_directory(mkpath("%s/.git/objects", ref_git))) {
- char *ref_git_git = mkpathdup("%s/.git", ref_git);
- free(ref_git);
- ref_git = ref_git_git;
- } else if (!is_directory(mkpath("%s/objects", ref_git))) {
- struct strbuf sb = STRBUF_INIT;
- seen_error = 1;
- if (get_common_dir(&sb, ref_git)) {
- strbuf_addf(err,
- _("reference repository '%s' as a linked "
- "checkout is not supported yet."),
- path);
- goto out;
- }
-
- strbuf_addf(err, _("reference repository '%s' is not a "
- "local repository."), path);
- goto out;
- }
-
- if (!access(mkpath("%s/shallow", ref_git), F_OK)) {
- strbuf_addf(err, _("reference repository '%s' is shallow"),
- path);
- seen_error = 1;
- goto out;
- }
-
- if (!access(mkpath("%s/info/grafts", ref_git), F_OK)) {
- strbuf_addf(err,
- _("reference repository '%s' is grafted"),
- path);
- seen_error = 1;
- goto out;
- }
-
-out:
- if (seen_error) {
- FREE_AND_NULL(ref_git);
- }
-
- return ref_git;
-}
-
-int foreach_alt_odb(alt_odb_fn fn, void *cb)
-{
- struct alternate_object_database *ent;
- int r = 0;
-
- prepare_alt_odb(the_repository);
- for (ent = the_repository->objects->alt_odb_list; ent; ent = ent->next) {
- r = fn(ent, cb);
- if (r)
- break;
- }
- return r;
-}
-
-void prepare_alt_odb(struct repository *r)
-{
- if (r->objects->alt_odb_tail)
- return;
-
- r->objects->alt_odb_tail = &r->objects->alt_odb_list;
- link_alt_odb_entries(r, r->objects->alternate_db, PATH_SEP, NULL, 0);
-
- read_info_alternates(r, r->objects->objectdir, 0);
-}
-
-/* Returns 1 if we have successfully freshened the file, 0 otherwise. */
-static int freshen_file(const char *fn)
-{
- struct utimbuf t;
- t.actime = t.modtime = time(NULL);
- return !utime(fn, &t);
-}
-
-/*
- * All of the check_and_freshen functions return 1 if the file exists and was
- * freshened (if freshening was requested), 0 otherwise. If they return
- * 0, you should not assume that it is safe to skip a write of the object (it
- * either does not exist on disk, or has a stale mtime and may be subject to
- * pruning).
- */
-int check_and_freshen_file(const char *fn, int freshen)
-{
- if (access(fn, F_OK))
- return 0;
- if (freshen && !freshen_file(fn))
- return 0;
- return 1;
-}
-
-static int check_and_freshen_local(const unsigned char *sha1, int freshen)
-{
- static struct strbuf buf = STRBUF_INIT;
-
- strbuf_reset(&buf);
- sha1_file_name(the_repository, &buf, sha1);
-
- return check_and_freshen_file(buf.buf, freshen);
-}
-
-static int check_and_freshen_nonlocal(const unsigned char *sha1, int freshen)
-{
- struct alternate_object_database *alt;
- prepare_alt_odb(the_repository);
- for (alt = the_repository->objects->alt_odb_list; alt; alt = alt->next) {
- const char *path = alt_sha1_path(alt, sha1);
- if (check_and_freshen_file(path, freshen))
- return 1;
- }
- return 0;
-}
-
-static int check_and_freshen(const unsigned char *sha1, int freshen)
-{
- return check_and_freshen_local(sha1, freshen) ||
- check_and_freshen_nonlocal(sha1, freshen);
-}
-
-int has_loose_object_nonlocal(const unsigned char *sha1)
-{
- return check_and_freshen_nonlocal(sha1, 0);
-}
-
-static int has_loose_object(const unsigned char *sha1)
-{
- return check_and_freshen(sha1, 0);
-}
-
-static void mmap_limit_check(size_t length)
-{
- static size_t limit = 0;
- if (!limit) {
- limit = git_env_ulong("GIT_MMAP_LIMIT", 0);
- if (!limit)
- limit = SIZE_MAX;
- }
- if (length > limit)
- die("attempting to mmap %"PRIuMAX" over limit %"PRIuMAX,
- (uintmax_t)length, (uintmax_t)limit);
-}
-
-void *xmmap_gently(void *start, size_t length,
- int prot, int flags, int fd, off_t offset)
-{
- void *ret;
-
- mmap_limit_check(length);
- ret = mmap(start, length, prot, flags, fd, offset);
- if (ret == MAP_FAILED) {
- if (!length)
- return NULL;
- release_pack_memory(length);
- ret = mmap(start, length, prot, flags, fd, offset);
- }
- return ret;
-}
-
-void *xmmap(void *start, size_t length,
- int prot, int flags, int fd, off_t offset)
-{
- void *ret = xmmap_gently(start, length, prot, flags, fd, offset);
- if (ret == MAP_FAILED)
- die_errno("mmap failed");
- return ret;
-}
-
-/*
- * With an in-core object data in "map", rehash it to make sure the
- * object name actually matches "sha1" to detect object corruption.
- * With "map" == NULL, try reading the object named with "sha1" using
- * the streaming interface and rehash it to do the same.
- */
-int check_object_signature(const struct object_id *oid, void *map,
- unsigned long size, const char *type)
-{
- struct object_id real_oid;
- enum object_type obj_type;
- struct git_istream *st;
- git_hash_ctx c;
- char hdr[MAX_HEADER_LEN];
- int hdrlen;
-
- if (map) {
- hash_object_file(map, size, type, &real_oid);
- return oidcmp(oid, &real_oid) ? -1 : 0;
- }
-
- st = open_istream(oid, &obj_type, &size, NULL);
- if (!st)
- return -1;
-
- /* Generate the header */
- hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", type_name(obj_type), size) + 1;
-
- /* Sha1.. */
- the_hash_algo->init_fn(&c);
- the_hash_algo->update_fn(&c, hdr, hdrlen);
- for (;;) {
- char buf[1024 * 16];
- ssize_t readlen = read_istream(st, buf, sizeof(buf));
-
- if (readlen < 0) {
- close_istream(st);
- return -1;
- }
- if (!readlen)
- break;
- the_hash_algo->update_fn(&c, buf, readlen);
- }
- the_hash_algo->final_fn(real_oid.hash, &c);
- close_istream(st);
- return oidcmp(oid, &real_oid) ? -1 : 0;
-}
-
-int git_open_cloexec(const char *name, int flags)
-{
- int fd;
- static int o_cloexec = O_CLOEXEC;
-
- fd = open(name, flags | o_cloexec);
- if ((o_cloexec & O_CLOEXEC) && fd < 0 && errno == EINVAL) {
- /* Try again w/o O_CLOEXEC: the kernel might not support it */
- o_cloexec &= ~O_CLOEXEC;
- fd = open(name, flags | o_cloexec);
- }
-
-#if defined(F_GETFD) && defined(F_SETFD) && defined(FD_CLOEXEC)
- {
- static int fd_cloexec = FD_CLOEXEC;
-
- if (!o_cloexec && 0 <= fd && fd_cloexec) {
- /* Opened w/o O_CLOEXEC? try with fcntl(2) to add it */
- int flags = fcntl(fd, F_GETFD);
- if (fcntl(fd, F_SETFD, flags | fd_cloexec))
- fd_cloexec = 0;
- }
- }
-#endif
- return fd;
-}
-
-/*
- * Find "sha1" as a loose object in the local repository or in an alternate.
- * Returns 0 on success, negative on failure.
- *
- * The "path" out-parameter will give the path of the object we found (if any).
- * Note that it may point to static storage and is only valid until another
- * call to sha1_file_name(), etc.
- */
-static int stat_sha1_file(struct repository *r, const unsigned char *sha1,
- struct stat *st, const char **path)
-{
- struct alternate_object_database *alt;
- static struct strbuf buf = STRBUF_INIT;
-
- strbuf_reset(&buf);
- sha1_file_name(r, &buf, sha1);
- *path = buf.buf;
-
- if (!lstat(*path, st))
- return 0;
-
- prepare_alt_odb(r);
- errno = ENOENT;
- for (alt = r->objects->alt_odb_list; alt; alt = alt->next) {
- *path = alt_sha1_path(alt, sha1);
- if (!lstat(*path, st))
- return 0;
- }
-
- return -1;
-}
-
-/*
- * Like stat_sha1_file(), but actually open the object and return the
- * descriptor. See the caveats on the "path" parameter above.
- */
-static int open_sha1_file(struct repository *r,
- const unsigned char *sha1, const char **path)
-{
- int fd;
- struct alternate_object_database *alt;
- int most_interesting_errno;
- static struct strbuf buf = STRBUF_INIT;
-
- strbuf_reset(&buf);
- sha1_file_name(r, &buf, sha1);
- *path = buf.buf;
-
- fd = git_open(*path);
- if (fd >= 0)
- return fd;
- most_interesting_errno = errno;
-
- prepare_alt_odb(r);
- for (alt = r->objects->alt_odb_list; alt; alt = alt->next) {
- *path = alt_sha1_path(alt, sha1);
- fd = git_open(*path);
- if (fd >= 0)
- return fd;
- if (most_interesting_errno == ENOENT)
- most_interesting_errno = errno;
- }
- errno = most_interesting_errno;
- return -1;
-}
-
-/*
- * Map the loose object at "path" if it is not NULL, or the path found by
- * searching for a loose object named "sha1".
- */
-static void *map_sha1_file_1(struct repository *r, const char *path,
- const unsigned char *sha1, unsigned long *size)
-{
- void *map;
- int fd;
-
- if (path)
- fd = git_open(path);
- else
- fd = open_sha1_file(r, sha1, &path);
- map = NULL;
- if (fd >= 0) {
- struct stat st;
-
- if (!fstat(fd, &st)) {
- *size = xsize_t(st.st_size);
- if (!*size) {
- /* mmap() is forbidden on empty files */
- error("object file %s is empty", path);
- return NULL;
- }
- map = xmmap(NULL, *size, PROT_READ, MAP_PRIVATE, fd, 0);
- }
- close(fd);
- }
- return map;
-}
-
-void *map_sha1_file(struct repository *r,
- const unsigned char *sha1, unsigned long *size)
-{
- return map_sha1_file_1(r, NULL, sha1, size);
-}
-
-static int unpack_sha1_short_header(git_zstream *stream,
- unsigned char *map, unsigned long mapsize,
- void *buffer, unsigned long bufsiz)
-{
- /* Get the data stream */
- memset(stream, 0, sizeof(*stream));
- stream->next_in = map;
- stream->avail_in = mapsize;
- stream->next_out = buffer;
- stream->avail_out = bufsiz;
-
- git_inflate_init(stream);
- return git_inflate(stream, 0);
-}
-
-int unpack_sha1_header(git_zstream *stream,
- unsigned char *map, unsigned long mapsize,
- void *buffer, unsigned long bufsiz)
-{
- int status = unpack_sha1_short_header(stream, map, mapsize,
- buffer, bufsiz);
-
- if (status < Z_OK)
- return status;
-
- /* Make sure we have the terminating NUL */
- if (!memchr(buffer, '\0', stream->next_out - (unsigned char *)buffer))
- return -1;
- return 0;
-}
-
-static int unpack_sha1_header_to_strbuf(git_zstream *stream, unsigned char *map,
- unsigned long mapsize, void *buffer,
- unsigned long bufsiz, struct strbuf *header)
-{
- int status;
-
- status = unpack_sha1_short_header(stream, map, mapsize, buffer, bufsiz);
- if (status < Z_OK)
- return -1;
-
- /*
- * Check if entire header is unpacked in the first iteration.
- */
- if (memchr(buffer, '\0', stream->next_out - (unsigned char *)buffer))
- return 0;
-
- /*
- * buffer[0..bufsiz] was not large enough. Copy the partial
- * result out to header, and then append the result of further
- * reading the stream.
- */
- strbuf_add(header, buffer, stream->next_out - (unsigned char *)buffer);
- stream->next_out = buffer;
- stream->avail_out = bufsiz;
-
- do {
- status = git_inflate(stream, 0);
- strbuf_add(header, buffer, stream->next_out - (unsigned char *)buffer);
- if (memchr(buffer, '\0', stream->next_out - (unsigned char *)buffer))
- return 0;
- stream->next_out = buffer;
- stream->avail_out = bufsiz;
- } while (status != Z_STREAM_END);
- return -1;
-}
-
-static void *unpack_sha1_rest(git_zstream *stream, void *buffer, unsigned long size, const unsigned char *sha1)
-{
- int bytes = strlen(buffer) + 1;
- unsigned char *buf = xmallocz(size);
- unsigned long n;
- int status = Z_OK;
-
- n = stream->total_out - bytes;
- if (n > size)
- n = size;
- memcpy(buf, (char *) buffer + bytes, n);
- bytes = n;
- if (bytes <= size) {
- /*
- * The above condition must be (bytes <= size), not
- * (bytes < size). In other words, even though we
- * expect no more output and set avail_out to zero,
- * the input zlib stream may have bytes that express
- * "this concludes the stream", and we *do* want to
- * eat that input.
- *
- * Otherwise we would not be able to test that we
- * consumed all the input to reach the expected size;
- * we also want to check that zlib tells us that all
- * went well with status == Z_STREAM_END at the end.
- */
- stream->next_out = buf + bytes;
- stream->avail_out = size - bytes;
- while (status == Z_OK)
- status = git_inflate(stream, Z_FINISH);
- }
- if (status == Z_STREAM_END && !stream->avail_in) {
- git_inflate_end(stream);
- return buf;
- }
-
- if (status < 0)
- error("corrupt loose object '%s'", sha1_to_hex(sha1));
- else if (stream->avail_in)
- error("garbage at end of loose object '%s'",
- sha1_to_hex(sha1));
- free(buf);
- return NULL;
-}
-
-/*
- * We used to just use "sscanf()", but that's actually way
- * too permissive for what we want to check. So do an anal
- * object header parse by hand.
- */
-static int parse_sha1_header_extended(const char *hdr, struct object_info *oi,
- unsigned int flags)
-{
- const char *type_buf = hdr;
- unsigned long size;
- int type, type_len = 0;
-
- /*
- * The type can be of any size but is followed by
- * a space.
- */
- for (;;) {
- char c = *hdr++;
- if (!c)
- return -1;
- if (c == ' ')
- break;
- type_len++;
- }
-
- type = type_from_string_gently(type_buf, type_len, 1);
- if (oi->type_name)
- strbuf_add(oi->type_name, type_buf, type_len);
- /*
- * Set type to 0 if its an unknown object and
- * we're obtaining the type using '--allow-unknown-type'
- * option.
- */
- if ((flags & OBJECT_INFO_ALLOW_UNKNOWN_TYPE) && (type < 0))
- type = 0;
- else if (type < 0)
- die("invalid object type");
- if (oi->typep)
- *oi->typep = type;
-
- /*
- * The length must follow immediately, and be in canonical
- * decimal format (ie "010" is not valid).
- */
- size = *hdr++ - '0';
- if (size > 9)
- return -1;
- if (size) {
- for (;;) {
- unsigned long c = *hdr - '0';
- if (c > 9)
- break;
- hdr++;
- size = size * 10 + c;
- }
- }
-
- if (oi->sizep)
- *oi->sizep = size;
-
- /*
- * The length must be followed by a zero byte
- */
- return *hdr ? -1 : type;
-}
-
-int parse_sha1_header(const char *hdr, unsigned long *sizep)
-{
- struct object_info oi = OBJECT_INFO_INIT;
-
- oi.sizep = sizep;
- return parse_sha1_header_extended(hdr, &oi, 0);
-}
-
-static int sha1_loose_object_info(struct repository *r,
- const unsigned char *sha1,
- struct object_info *oi, int flags)
-{
- int status = 0;
- unsigned long mapsize;
- void *map;
- git_zstream stream;
- char hdr[MAX_HEADER_LEN];
- struct strbuf hdrbuf = STRBUF_INIT;
- unsigned long size_scratch;
-
- if (oi->delta_base_sha1)
- hashclr(oi->delta_base_sha1);
-
- /*
- * If we don't care about type or size, then we don't
- * need to look inside the object at all. Note that we
- * do not optimize out the stat call, even if the
- * caller doesn't care about the disk-size, since our
- * return value implicitly indicates whether the
- * object even exists.
- */
- if (!oi->typep && !oi->type_name && !oi->sizep && !oi->contentp) {
- const char *path;
- struct stat st;
- if (stat_sha1_file(r, sha1, &st, &path) < 0)
- return -1;
- if (oi->disk_sizep)
- *oi->disk_sizep = st.st_size;
- return 0;
- }
-
- map = map_sha1_file(r, sha1, &mapsize);
- if (!map)
- return -1;
-
- if (!oi->sizep)
- oi->sizep = &size_scratch;
-
- if (oi->disk_sizep)
- *oi->disk_sizep = mapsize;
- if ((flags & OBJECT_INFO_ALLOW_UNKNOWN_TYPE)) {
- if (unpack_sha1_header_to_strbuf(&stream, map, mapsize, hdr, sizeof(hdr), &hdrbuf) < 0)
- status = error("unable to unpack %s header with --allow-unknown-type",
- sha1_to_hex(sha1));
- } else if (unpack_sha1_header(&stream, map, mapsize, hdr, sizeof(hdr)) < 0)
- status = error("unable to unpack %s header",
- sha1_to_hex(sha1));
- if (status < 0)
- ; /* Do nothing */
- else if (hdrbuf.len) {
- if ((status = parse_sha1_header_extended(hdrbuf.buf, oi, flags)) < 0)
- status = error("unable to parse %s header with --allow-unknown-type",
- sha1_to_hex(sha1));
- } else if ((status = parse_sha1_header_extended(hdr, oi, flags)) < 0)
- status = error("unable to parse %s header", sha1_to_hex(sha1));
-
- if (status >= 0 && oi->contentp) {
- *oi->contentp = unpack_sha1_rest(&stream, hdr,
- *oi->sizep, sha1);
- if (!*oi->contentp) {
- git_inflate_end(&stream);
- status = -1;
- }
- } else
- git_inflate_end(&stream);
-
- munmap(map, mapsize);
- if (status && oi->typep)
- *oi->typep = status;
- if (oi->sizep == &size_scratch)
- oi->sizep = NULL;
- strbuf_release(&hdrbuf);
- oi->whence = OI_LOOSE;
- return (status < 0) ? status : 0;
-}
-
-int fetch_if_missing = 1;
-
-int oid_object_info_extended(const struct object_id *oid, struct object_info *oi, unsigned flags)
-{
- static struct object_info blank_oi = OBJECT_INFO_INIT;
- struct pack_entry e;
- int rtype;
- const struct object_id *real = oid;
- int already_retried = 0;
-
- if (flags & OBJECT_INFO_LOOKUP_REPLACE)
- real = lookup_replace_object(oid);
-
- if (is_null_oid(real))
- return -1;
-
- if (!oi)
- oi = &blank_oi;
-
- if (!(flags & OBJECT_INFO_SKIP_CACHED)) {
- struct cached_object *co = find_cached_object(real->hash);
- if (co) {
- if (oi->typep)
- *(oi->typep) = co->type;
- if (oi->sizep)
- *(oi->sizep) = co->size;
- if (oi->disk_sizep)
- *(oi->disk_sizep) = 0;
- if (oi->delta_base_sha1)
- hashclr(oi->delta_base_sha1);
- if (oi->type_name)
- strbuf_addstr(oi->type_name, type_name(co->type));
- if (oi->contentp)
- *oi->contentp = xmemdupz(co->buf, co->size);
- oi->whence = OI_CACHED;
- return 0;
- }
- }
-
- while (1) {
- if (find_pack_entry(the_repository, real->hash, &e))
- break;
-
- if (flags & OBJECT_INFO_IGNORE_LOOSE)
- return -1;
-
- /* Most likely it's a loose object. */
- if (!sha1_loose_object_info(the_repository, real->hash, oi, flags))
- return 0;
-
- /* Not a loose object; someone else may have just packed it. */
- if (!(flags & OBJECT_INFO_QUICK)) {
- reprepare_packed_git(the_repository);
- if (find_pack_entry(the_repository, real->hash, &e))
- break;
- }
-
- /* Check if it is a missing object */
- if (fetch_if_missing && repository_format_partial_clone &&
- !already_retried) {
- /*
- * TODO Investigate haveing fetch_object() return
- * TODO error/success and stopping the music here.
- */
- fetch_object(repository_format_partial_clone, real->hash);
- already_retried = 1;
- continue;
- }
-
- return -1;
- }
-
- if (oi == &blank_oi)
- /*
- * We know that the caller doesn't actually need the
- * information below, so return early.
- */
- return 0;
- rtype = packed_object_info(e.p, e.offset, oi);
- if (rtype < 0) {
- mark_bad_packed_object(e.p, real->hash);
- return oid_object_info_extended(real, oi, 0);
- } else if (oi->whence == OI_PACKED) {
- oi->u.packed.offset = e.offset;
- oi->u.packed.pack = e.p;
- oi->u.packed.is_delta = (rtype == OBJ_REF_DELTA ||
- rtype == OBJ_OFS_DELTA);
- }
-
- return 0;
-}
-
-/* returns enum object_type or negative */
-int oid_object_info(const struct object_id *oid, unsigned long *sizep)
-{
- enum object_type type;
- struct object_info oi = OBJECT_INFO_INIT;
-
- oi.typep = &type;
- oi.sizep = sizep;
- if (oid_object_info_extended(oid, &oi,
- OBJECT_INFO_LOOKUP_REPLACE) < 0)
- return -1;
- return type;
-}
-
-static void *read_object(const unsigned char *sha1, enum object_type *type,
- unsigned long *size)
-{
- struct object_id oid;
- struct object_info oi = OBJECT_INFO_INIT;
- void *content;
- oi.typep = type;
- oi.sizep = size;
- oi.contentp = &content;
-
- hashcpy(oid.hash, sha1);
-
- if (oid_object_info_extended(&oid, &oi, 0) < 0)
- return NULL;
- return content;
-}
-
-int pretend_object_file(void *buf, unsigned long len, enum object_type type,
- struct object_id *oid)
-{
- struct cached_object *co;
-
- hash_object_file(buf, len, type_name(type), oid);
- if (has_sha1_file(oid->hash) || find_cached_object(oid->hash))
- return 0;
- ALLOC_GROW(cached_objects, cached_object_nr + 1, cached_object_alloc);
- co = &cached_objects[cached_object_nr++];
- co->size = len;
- co->type = type;
- co->buf = xmalloc(len);
- memcpy(co->buf, buf, len);
- hashcpy(co->sha1, oid->hash);
- return 0;
-}
-
-/*
- * This function dies on corrupt objects; the callers who want to
- * deal with them should arrange to call read_object() and give error
- * messages themselves.
- */
-void *read_object_file_extended(const struct object_id *oid,
- enum object_type *type,
- unsigned long *size,
- int lookup_replace)
-{
- void *data;
- const struct packed_git *p;
- const char *path;
- struct stat st;
- const struct object_id *repl = lookup_replace ? lookup_replace_object(oid)
- : oid;
-
- errno = 0;
- data = read_object(repl->hash, type, size);
- if (data)
- return data;
-
- if (errno && errno != ENOENT)
- die_errno("failed to read object %s", oid_to_hex(oid));
-
- /* die if we replaced an object with one that does not exist */
- if (repl != oid)
- die("replacement %s not found for %s",
- oid_to_hex(repl), oid_to_hex(oid));
-
- if (!stat_sha1_file(the_repository, repl->hash, &st, &path))
- die("loose object %s (stored in %s) is corrupt",
- oid_to_hex(repl), path);
-
- if ((p = has_packed_and_bad(repl->hash)) != NULL)
- die("packed object %s (stored in %s) is corrupt",
- oid_to_hex(repl), p->pack_name);
-
- return NULL;
-}
-
-void *read_object_with_reference(const struct object_id *oid,
- const char *required_type_name,
- unsigned long *size,
- struct object_id *actual_oid_return)
-{
- enum object_type type, required_type;
- void *buffer;
- unsigned long isize;
- struct object_id actual_oid;
-
- required_type = type_from_string(required_type_name);
- oidcpy(&actual_oid, oid);
- while (1) {
- int ref_length = -1;
- const char *ref_type = NULL;
-
- buffer = read_object_file(&actual_oid, &type, &isize);
- if (!buffer)
- return NULL;
- if (type == required_type) {
- *size = isize;
- if (actual_oid_return)
- oidcpy(actual_oid_return, &actual_oid);
- return buffer;
- }
- /* Handle references */
- else if (type == OBJ_COMMIT)
- ref_type = "tree ";
- else if (type == OBJ_TAG)
- ref_type = "object ";
- else {
- free(buffer);
- return NULL;
- }
- ref_length = strlen(ref_type);
-
- if (ref_length + GIT_SHA1_HEXSZ > isize ||
- memcmp(buffer, ref_type, ref_length) ||
- get_oid_hex((char *) buffer + ref_length, &actual_oid)) {
- free(buffer);
- return NULL;
- }
- free(buffer);
- /* Now we have the ID of the referred-to object in
- * actual_oid. Check again. */
- }
-}
-
-static void write_object_file_prepare(const void *buf, unsigned long len,
- const char *type, struct object_id *oid,
- char *hdr, int *hdrlen)
-{
- git_hash_ctx c;
-
- /* Generate the header */
- *hdrlen = xsnprintf(hdr, *hdrlen, "%s %lu", type, len)+1;
-
- /* Sha1.. */
- the_hash_algo->init_fn(&c);
- the_hash_algo->update_fn(&c, hdr, *hdrlen);
- the_hash_algo->update_fn(&c, buf, len);
- the_hash_algo->final_fn(oid->hash, &c);
-}
-
-/*
- * Move the just written object into its final resting place.
- */
-int finalize_object_file(const char *tmpfile, const char *filename)
-{
- int ret = 0;
-
- if (object_creation_mode == OBJECT_CREATION_USES_RENAMES)
- goto try_rename;
- else if (link(tmpfile, filename))
- ret = errno;
-
- /*
- * Coda hack - coda doesn't like cross-directory links,
- * so we fall back to a rename, which will mean that it
- * won't be able to check collisions, but that's not a
- * big deal.
- *
- * The same holds for FAT formatted media.
- *
- * When this succeeds, we just return. We have nothing
- * left to unlink.
- */
- if (ret && ret != EEXIST) {
- try_rename:
- if (!rename(tmpfile, filename))
- goto out;
- ret = errno;
- }
- unlink_or_warn(tmpfile);
- if (ret) {
- if (ret != EEXIST) {
- return error_errno("unable to write sha1 filename %s", filename);
- }
- /* FIXME!!! Collision check here ? */
- }
-
-out:
- if (adjust_shared_perm(filename))
- return error("unable to set permission to '%s'", filename);
- return 0;
-}
-
-static int write_buffer(int fd, const void *buf, size_t len)
-{
- if (write_in_full(fd, buf, len) < 0)
- return error_errno("file write error");
- return 0;
-}
-
-int hash_object_file(const void *buf, unsigned long len, const char *type,
- struct object_id *oid)
-{
- char hdr[MAX_HEADER_LEN];
- int hdrlen = sizeof(hdr);
- write_object_file_prepare(buf, len, type, oid, hdr, &hdrlen);
- return 0;
-}
-
-/* Finalize a file on disk, and close it. */
-static void close_sha1_file(int fd)
-{
- if (fsync_object_files)
- fsync_or_die(fd, "sha1 file");
- if (close(fd) != 0)
- die_errno("error when closing sha1 file");
-}
-
-/* Size of directory component, including the ending '/' */
-static inline int directory_size(const char *filename)
-{
- const char *s = strrchr(filename, '/');
- if (!s)
- return 0;
- return s - filename + 1;
-}
-
-/*
- * This creates a temporary file in the same directory as the final
- * 'filename'
- *
- * We want to avoid cross-directory filename renames, because those
- * can have problems on various filesystems (FAT, NFS, Coda).
- */
-static int create_tmpfile(struct strbuf *tmp, const char *filename)
-{
- int fd, dirlen = directory_size(filename);
-
- strbuf_reset(tmp);
- strbuf_add(tmp, filename, dirlen);
- strbuf_addstr(tmp, "tmp_obj_XXXXXX");
- fd = git_mkstemp_mode(tmp->buf, 0444);
- if (fd < 0 && dirlen && errno == ENOENT) {
- /*
- * Make sure the directory exists; note that the contents
- * of the buffer are undefined after mkstemp returns an
- * error, so we have to rewrite the whole buffer from
- * scratch.
- */
- strbuf_reset(tmp);
- strbuf_add(tmp, filename, dirlen - 1);
- if (mkdir(tmp->buf, 0777) && errno != EEXIST)
- return -1;
- if (adjust_shared_perm(tmp->buf))
- return -1;
-
- /* Try again */
- strbuf_addstr(tmp, "/tmp_obj_XXXXXX");
- fd = git_mkstemp_mode(tmp->buf, 0444);
- }
- return fd;
-}
-
-static int write_loose_object(const struct object_id *oid, char *hdr,
- int hdrlen, const void *buf, unsigned long len,
- time_t mtime)
-{
- int fd, ret;
- unsigned char compressed[4096];
- git_zstream stream;
- git_hash_ctx c;
- struct object_id parano_oid;
- static struct strbuf tmp_file = STRBUF_INIT;
- static struct strbuf filename = STRBUF_INIT;
-
- strbuf_reset(&filename);
- sha1_file_name(the_repository, &filename, oid->hash);
-
- fd = create_tmpfile(&tmp_file, filename.buf);
- if (fd < 0) {
- if (errno == EACCES)
- return error("insufficient permission for adding an object to repository database %s", get_object_directory());
- else
- return error_errno("unable to create temporary file");
- }
-
- /* Set it up */
- git_deflate_init(&stream, zlib_compression_level);
- stream.next_out = compressed;
- stream.avail_out = sizeof(compressed);
- the_hash_algo->init_fn(&c);
-
- /* First header.. */
- stream.next_in = (unsigned char *)hdr;
- stream.avail_in = hdrlen;
- while (git_deflate(&stream, 0) == Z_OK)
- ; /* nothing */
- the_hash_algo->update_fn(&c, hdr, hdrlen);
-
- /* Then the data itself.. */
- stream.next_in = (void *)buf;
- stream.avail_in = len;
- do {
- unsigned char *in0 = stream.next_in;
- ret = git_deflate(&stream, Z_FINISH);
- the_hash_algo->update_fn(&c, in0, stream.next_in - in0);
- if (write_buffer(fd, compressed, stream.next_out - compressed) < 0)
- die("unable to write sha1 file");
- stream.next_out = compressed;
- stream.avail_out = sizeof(compressed);
- } while (ret == Z_OK);
-
- if (ret != Z_STREAM_END)
- die("unable to deflate new object %s (%d)", oid_to_hex(oid),
- ret);
- ret = git_deflate_end_gently(&stream);
- if (ret != Z_OK)
- die("deflateEnd on object %s failed (%d)", oid_to_hex(oid),
- ret);
- the_hash_algo->final_fn(parano_oid.hash, &c);
- if (oidcmp(oid, ¶no_oid) != 0)
- die("confused by unstable object source data for %s",
- oid_to_hex(oid));
-
- close_sha1_file(fd);
-
- if (mtime) {
- struct utimbuf utb;
- utb.actime = mtime;
- utb.modtime = mtime;
- if (utime(tmp_file.buf, &utb) < 0)
- warning_errno("failed utime() on %s", tmp_file.buf);
- }
-
- return finalize_object_file(tmp_file.buf, filename.buf);
-}
-
-static int freshen_loose_object(const unsigned char *sha1)
-{
- return check_and_freshen(sha1, 1);
-}
-
-static int freshen_packed_object(const unsigned char *sha1)
-{
- struct pack_entry e;
- if (!find_pack_entry(the_repository, sha1, &e))
- return 0;
- if (e.p->freshened)
- return 1;
- if (!freshen_file(e.p->pack_name))
- return 0;
- e.p->freshened = 1;
- return 1;
-}
-
-int write_object_file(const void *buf, unsigned long len, const char *type,
- struct object_id *oid)
-{
- char hdr[MAX_HEADER_LEN];
- int hdrlen = sizeof(hdr);
-
- /* Normally if we have it in the pack then we do not bother writing
- * it out into .git/objects/??/?{38} file.
- */
- write_object_file_prepare(buf, len, type, oid, hdr, &hdrlen);
- if (freshen_packed_object(oid->hash) || freshen_loose_object(oid->hash))
- return 0;
- return write_loose_object(oid, hdr, hdrlen, buf, len, 0);
-}
-
-int hash_object_file_literally(const void *buf, unsigned long len,
- const char *type, struct object_id *oid,
- unsigned flags)
-{
- char *header;
- int hdrlen, status = 0;
-
- /* type string, SP, %lu of the length plus NUL must fit this */
- hdrlen = strlen(type) + MAX_HEADER_LEN;
- header = xmalloc(hdrlen);
- write_object_file_prepare(buf, len, type, oid, header, &hdrlen);
-
- if (!(flags & HASH_WRITE_OBJECT))
- goto cleanup;
- if (freshen_packed_object(oid->hash) || freshen_loose_object(oid->hash))
- goto cleanup;
- status = write_loose_object(oid, header, hdrlen, buf, len, 0);
-
-cleanup:
- free(header);
- return status;
-}
-
-int force_object_loose(const struct object_id *oid, time_t mtime)
-{
- void *buf;
- unsigned long len;
- enum object_type type;
- char hdr[MAX_HEADER_LEN];
- int hdrlen;
- int ret;
-
- if (has_loose_object(oid->hash))
- return 0;
- buf = read_object(oid->hash, &type, &len);
- if (!buf)
- return error("cannot read sha1_file for %s", oid_to_hex(oid));
- hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", type_name(type), len) + 1;
- ret = write_loose_object(oid, hdr, hdrlen, buf, len, mtime);
- free(buf);
-
- return ret;
-}
-
-int has_sha1_file_with_flags(const unsigned char *sha1, int flags)
-{
- struct object_id oid;
- if (!startup_info->have_repository)
- return 0;
- hashcpy(oid.hash, sha1);
- return oid_object_info_extended(&oid, NULL,
- flags | OBJECT_INFO_SKIP_CACHED) >= 0;
-}
-
-int has_object_file(const struct object_id *oid)
-{
- return has_sha1_file(oid->hash);
-}
-
-int has_object_file_with_flags(const struct object_id *oid, int flags)
-{
- return has_sha1_file_with_flags(oid->hash, flags);
-}
-
-static void check_tree(const void *buf, size_t size)
-{
- struct tree_desc desc;
- struct name_entry entry;
-
- init_tree_desc(&desc, buf, size);
- while (tree_entry(&desc, &entry))
- /* do nothing
- * tree_entry() will die() on malformed entries */
- ;
-}
-
-static void check_commit(const void *buf, size_t size)
-{
- struct commit c;
- memset(&c, 0, sizeof(c));
- if (parse_commit_buffer(&c, buf, size))
- die("corrupt commit");
-}
-
-static void check_tag(const void *buf, size_t size)
-{
- struct tag t;
- memset(&t, 0, sizeof(t));
- if (parse_tag_buffer(&t, buf, size))
- die("corrupt tag");
-}
-
-static int index_mem(struct object_id *oid, void *buf, size_t size,
- enum object_type type,
- const char *path, unsigned flags)
-{
- int ret, re_allocated = 0;
- int write_object = flags & HASH_WRITE_OBJECT;
-
- if (!type)
- type = OBJ_BLOB;
-
- /*
- * Convert blobs to git internal format
- */
- if ((type == OBJ_BLOB) && path) {
- struct strbuf nbuf = STRBUF_INIT;
- if (convert_to_git(&the_index, path, buf, size, &nbuf,
- get_conv_flags(flags))) {
- buf = strbuf_detach(&nbuf, &size);
- re_allocated = 1;
- }
- }
- if (flags & HASH_FORMAT_CHECK) {
- if (type == OBJ_TREE)
- check_tree(buf, size);
- if (type == OBJ_COMMIT)
- check_commit(buf, size);
- if (type == OBJ_TAG)
- check_tag(buf, size);
- }
-
- if (write_object)
- ret = write_object_file(buf, size, type_name(type), oid);
- else
- ret = hash_object_file(buf, size, type_name(type), oid);
- if (re_allocated)
- free(buf);
- return ret;
-}
-
-static int index_stream_convert_blob(struct object_id *oid, int fd,
- const char *path, unsigned flags)
-{
- int ret;
- const int write_object = flags & HASH_WRITE_OBJECT;
- struct strbuf sbuf = STRBUF_INIT;
-
- assert(path);
- assert(would_convert_to_git_filter_fd(path));
-
- convert_to_git_filter_fd(&the_index, path, fd, &sbuf,
- get_conv_flags(flags));
-
- if (write_object)
- ret = write_object_file(sbuf.buf, sbuf.len, type_name(OBJ_BLOB),
- oid);
- else
- ret = hash_object_file(sbuf.buf, sbuf.len, type_name(OBJ_BLOB),
- oid);
- strbuf_release(&sbuf);
- return ret;
-}
-
-static int index_pipe(struct object_id *oid, int fd, enum object_type type,
- const char *path, unsigned flags)
-{
- struct strbuf sbuf = STRBUF_INIT;
- int ret;
-
- if (strbuf_read(&sbuf, fd, 4096) >= 0)
- ret = index_mem(oid, sbuf.buf, sbuf.len, type, path, flags);
- else
- ret = -1;
- strbuf_release(&sbuf);
- return ret;
-}
-
-#define SMALL_FILE_SIZE (32*1024)
-
-static int index_core(struct object_id *oid, int fd, size_t size,
- enum object_type type, const char *path,
- unsigned flags)
-{
- int ret;
-
- if (!size) {
- ret = index_mem(oid, "", size, type, path, flags);
- } else if (size <= SMALL_FILE_SIZE) {
- char *buf = xmalloc(size);
- ssize_t read_result = read_in_full(fd, buf, size);
- if (read_result < 0)
- ret = error_errno("read error while indexing %s",
- path ? path : "<unknown>");
- else if (read_result != size)
- ret = error("short read while indexing %s",
- path ? path : "<unknown>");
- else
- ret = index_mem(oid, buf, size, type, path, flags);
- free(buf);
- } else {
- void *buf = xmmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0);
- ret = index_mem(oid, buf, size, type, path, flags);
- munmap(buf, size);
- }
- return ret;
-}
-
-/*
- * This creates one packfile per large blob unless bulk-checkin
- * machinery is "plugged".
- *
- * This also bypasses the usual "convert-to-git" dance, and that is on
- * purpose. We could write a streaming version of the converting
- * functions and insert that before feeding the data to fast-import
- * (or equivalent in-core API described above). However, that is
- * somewhat complicated, as we do not know the size of the filter
- * result, which we need to know beforehand when writing a git object.
- * Since the primary motivation for trying to stream from the working
- * tree file and to avoid mmaping it in core is to deal with large
- * binary blobs, they generally do not want to get any conversion, and
- * callers should avoid this code path when filters are requested.
- */
-static int index_stream(struct object_id *oid, int fd, size_t size,
- enum object_type type, const char *path,
- unsigned flags)
-{
- return index_bulk_checkin(oid, fd, size, type, path, flags);
-}
-
-int index_fd(struct object_id *oid, int fd, struct stat *st,
- enum object_type type, const char *path, unsigned flags)
-{
- int ret;
-
- /*
- * Call xsize_t() only when needed to avoid potentially unnecessary
- * die() for large files.
- */
- if (type == OBJ_BLOB && path && would_convert_to_git_filter_fd(path))
- ret = index_stream_convert_blob(oid, fd, path, flags);
- else if (!S_ISREG(st->st_mode))
- ret = index_pipe(oid, fd, type, path, flags);
- else if (st->st_size <= big_file_threshold || type != OBJ_BLOB ||
- (path && would_convert_to_git(&the_index, path)))
- ret = index_core(oid, fd, xsize_t(st->st_size), type, path,
- flags);
- else
- ret = index_stream(oid, fd, xsize_t(st->st_size), type, path,
- flags);
- close(fd);
- return ret;
-}
-
-int index_path(struct object_id *oid, const char *path, struct stat *st, unsigned flags)
-{
- int fd;
- struct strbuf sb = STRBUF_INIT;
- int rc = 0;
-
- switch (st->st_mode & S_IFMT) {
- case S_IFREG:
- fd = open(path, O_RDONLY);
- if (fd < 0)
- return error_errno("open(\"%s\")", path);
- if (index_fd(oid, fd, st, OBJ_BLOB, path, flags) < 0)
- return error("%s: failed to insert into database",
- path);
- break;
- case S_IFLNK:
- if (strbuf_readlink(&sb, path, st->st_size))
- return error_errno("readlink(\"%s\")", path);
- if (!(flags & HASH_WRITE_OBJECT))
- hash_object_file(sb.buf, sb.len, blob_type, oid);
- else if (write_object_file(sb.buf, sb.len, blob_type, oid))
- rc = error("%s: failed to insert into database", path);
- strbuf_release(&sb);
- break;
- case S_IFDIR:
- return resolve_gitlink_ref(path, "HEAD", oid);
- default:
- return error("%s: unsupported file type", path);
- }
- return rc;
-}
-
-int read_pack_header(int fd, struct pack_header *header)
-{
- if (read_in_full(fd, header, sizeof(*header)) != sizeof(*header))
- /* "eof before pack header was fully read" */
- return PH_ERROR_EOF;
-
- if (header->hdr_signature != htonl(PACK_SIGNATURE))
- /* "protocol error (pack signature mismatch detected)" */
- return PH_ERROR_PACK_SIGNATURE;
- if (!pack_version_ok(header->hdr_version))
- /* "protocol error (pack version unsupported)" */
- return PH_ERROR_PROTOCOL;
- return 0;
-}
-
-void assert_oid_type(const struct object_id *oid, enum object_type expect)
-{
- enum object_type type = oid_object_info(oid, NULL);
- if (type < 0)
- die("%s is not a valid object", oid_to_hex(oid));
- if (type != expect)
- die("%s is not a valid '%s' object", oid_to_hex(oid),
- type_name(expect));
-}
-
-int for_each_file_in_obj_subdir(unsigned int subdir_nr,
- struct strbuf *path,
- each_loose_object_fn obj_cb,
- each_loose_cruft_fn cruft_cb,
- each_loose_subdir_fn subdir_cb,
- void *data)
-{
- size_t origlen, baselen;
- DIR *dir;
- struct dirent *de;
- int r = 0;
- struct object_id oid;
-
- if (subdir_nr > 0xff)
- BUG("invalid loose object subdirectory: %x", subdir_nr);
-
- origlen = path->len;
- strbuf_complete(path, '/');
- strbuf_addf(path, "%02x", subdir_nr);
-
- dir = opendir(path->buf);
- if (!dir) {
- if (errno != ENOENT)
- r = error_errno("unable to open %s", path->buf);
- strbuf_setlen(path, origlen);
- return r;
- }
-
- oid.hash[0] = subdir_nr;
- strbuf_addch(path, '/');
- baselen = path->len;
-
- while ((de = readdir(dir))) {
- size_t namelen;
- if (is_dot_or_dotdot(de->d_name))
- continue;
-
- namelen = strlen(de->d_name);
- strbuf_setlen(path, baselen);
- strbuf_add(path, de->d_name, namelen);
- if (namelen == GIT_SHA1_HEXSZ - 2 &&
- !hex_to_bytes(oid.hash + 1, de->d_name,
- GIT_SHA1_RAWSZ - 1)) {
- if (obj_cb) {
- r = obj_cb(&oid, path->buf, data);
- if (r)
- break;
- }
- continue;
- }
-
- if (cruft_cb) {
- r = cruft_cb(de->d_name, path->buf, data);
- if (r)
- break;
- }
- }
- closedir(dir);
-
- strbuf_setlen(path, baselen - 1);
- if (!r && subdir_cb)
- r = subdir_cb(subdir_nr, path->buf, data);
-
- strbuf_setlen(path, origlen);
-
- return r;
-}
-
-int for_each_loose_file_in_objdir_buf(struct strbuf *path,
- each_loose_object_fn obj_cb,
- each_loose_cruft_fn cruft_cb,
- each_loose_subdir_fn subdir_cb,
- void *data)
-{
- int r = 0;
- int i;
-
- for (i = 0; i < 256; i++) {
- r = for_each_file_in_obj_subdir(i, path, obj_cb, cruft_cb,
- subdir_cb, data);
- if (r)
- break;
- }
-
- return r;
-}
-
-int for_each_loose_file_in_objdir(const char *path,
- each_loose_object_fn obj_cb,
- each_loose_cruft_fn cruft_cb,
- each_loose_subdir_fn subdir_cb,
- void *data)
-{
- struct strbuf buf = STRBUF_INIT;
- int r;
-
- strbuf_addstr(&buf, path);
- r = for_each_loose_file_in_objdir_buf(&buf, obj_cb, cruft_cb,
- subdir_cb, data);
- strbuf_release(&buf);
-
- return r;
-}
-
-struct loose_alt_odb_data {
- each_loose_object_fn *cb;
- void *data;
-};
-
-static int loose_from_alt_odb(struct alternate_object_database *alt,
- void *vdata)
-{
- struct loose_alt_odb_data *data = vdata;
- struct strbuf buf = STRBUF_INIT;
- int r;
-
- strbuf_addstr(&buf, alt->path);
- r = for_each_loose_file_in_objdir_buf(&buf,
- data->cb, NULL, NULL,
- data->data);
- strbuf_release(&buf);
- return r;
-}
-
-int for_each_loose_object(each_loose_object_fn cb, void *data, unsigned flags)
-{
- struct loose_alt_odb_data alt;
- int r;
-
- r = for_each_loose_file_in_objdir(get_object_directory(),
- cb, NULL, NULL, data);
- if (r)
- return r;
-
- if (flags & FOR_EACH_OBJECT_LOCAL_ONLY)
- return 0;
-
- alt.cb = cb;
- alt.data = data;
- return foreach_alt_odb(loose_from_alt_odb, &alt);
-}
-
-static int check_stream_sha1(git_zstream *stream,
- const char *hdr,
- unsigned long size,
- const char *path,
- const unsigned char *expected_sha1)
-{
- git_hash_ctx c;
- unsigned char real_sha1[GIT_MAX_RAWSZ];
- unsigned char buf[4096];
- unsigned long total_read;
- int status = Z_OK;
-
- the_hash_algo->init_fn(&c);
- the_hash_algo->update_fn(&c, hdr, stream->total_out);
-
- /*
- * We already read some bytes into hdr, but the ones up to the NUL
- * do not count against the object's content size.
- */
- total_read = stream->total_out - strlen(hdr) - 1;
-
- /*
- * This size comparison must be "<=" to read the final zlib packets;
- * see the comment in unpack_sha1_rest for details.
- */
- while (total_read <= size &&
- (status == Z_OK || status == Z_BUF_ERROR)) {
- stream->next_out = buf;
- stream->avail_out = sizeof(buf);
- if (size - total_read < stream->avail_out)
- stream->avail_out = size - total_read;
- status = git_inflate(stream, Z_FINISH);
- the_hash_algo->update_fn(&c, buf, stream->next_out - buf);
- total_read += stream->next_out - buf;
- }
- git_inflate_end(stream);
-
- if (status != Z_STREAM_END) {
- error("corrupt loose object '%s'", sha1_to_hex(expected_sha1));
- return -1;
- }
- if (stream->avail_in) {
- error("garbage at end of loose object '%s'",
- sha1_to_hex(expected_sha1));
- return -1;
- }
-
- the_hash_algo->final_fn(real_sha1, &c);
- if (hashcmp(expected_sha1, real_sha1)) {
- error("sha1 mismatch for %s (expected %s)", path,
- sha1_to_hex(expected_sha1));
- return -1;
- }
-
- return 0;
-}
-
-int read_loose_object(const char *path,
- const struct object_id *expected_oid,
- enum object_type *type,
- unsigned long *size,
- void **contents)
-{
- int ret = -1;
- void *map = NULL;
- unsigned long mapsize;
- git_zstream stream;
- char hdr[MAX_HEADER_LEN];
-
- *contents = NULL;
-
- map = map_sha1_file_1(the_repository, path, NULL, &mapsize);
- if (!map) {
- error_errno("unable to mmap %s", path);
- goto out;
- }
-
- if (unpack_sha1_header(&stream, map, mapsize, hdr, sizeof(hdr)) < 0) {
- error("unable to unpack header of %s", path);
- goto out;
- }
-
- *type = parse_sha1_header(hdr, size);
- if (*type < 0) {
- error("unable to parse header of %s", path);
- git_inflate_end(&stream);
- goto out;
- }
-
- if (*type == OBJ_BLOB) {
- if (check_stream_sha1(&stream, hdr, *size, path, expected_oid->hash) < 0)
- goto out;
- } else {
- *contents = unpack_sha1_rest(&stream, hdr, *size, expected_oid->hash);
- if (!*contents) {
- error("unable to unpack contents of %s", path);
- git_inflate_end(&stream);
- goto out;
- }
- if (check_object_signature(expected_oid, *contents,
- *size, type_name(*type))) {
- error("sha1 mismatch for %s (expected %s)", path,
- oid_to_hex(expected_oid));
- free(*contents);
- goto out;
- }
- }
-
- ret = 0; /* everything checks out */
-
-out:
- if (map)
- munmap(map, mapsize);
- return ret;
-}
+++ /dev/null
-#include "cache.h"
-#include "config.h"
-#include "tag.h"
-#include "commit.h"
-#include "tree.h"
-#include "blob.h"
-#include "tree-walk.h"
-#include "refs.h"
-#include "remote.h"
-#include "dir.h"
-#include "sha1-array.h"
-#include "packfile.h"
-#include "object-store.h"
-#include "repository.h"
-
-static int get_oid_oneline(const char *, struct object_id *, struct commit_list *);
-
-typedef int (*disambiguate_hint_fn)(const struct object_id *, void *);
-
-struct disambiguate_state {
- int len; /* length of prefix in hex chars */
- char hex_pfx[GIT_MAX_HEXSZ + 1];
- struct object_id bin_pfx;
-
- disambiguate_hint_fn fn;
- void *cb_data;
- struct object_id candidate;
- unsigned candidate_exists:1;
- unsigned candidate_checked:1;
- unsigned candidate_ok:1;
- unsigned disambiguate_fn_used:1;
- unsigned ambiguous:1;
- unsigned always_call_fn:1;
-};
-
-static void update_candidates(struct disambiguate_state *ds, const struct object_id *current)
-{
- if (ds->always_call_fn) {
- ds->ambiguous = ds->fn(current, ds->cb_data) ? 1 : 0;
- return;
- }
- if (!ds->candidate_exists) {
- /* this is the first candidate */
- oidcpy(&ds->candidate, current);
- ds->candidate_exists = 1;
- return;
- } else if (!oidcmp(&ds->candidate, current)) {
- /* the same as what we already have seen */
- return;
- }
-
- if (!ds->fn) {
- /* cannot disambiguate between ds->candidate and current */
- ds->ambiguous = 1;
- return;
- }
-
- if (!ds->candidate_checked) {
- ds->candidate_ok = ds->fn(&ds->candidate, ds->cb_data);
- ds->disambiguate_fn_used = 1;
- ds->candidate_checked = 1;
- }
-
- if (!ds->candidate_ok) {
- /* discard the candidate; we know it does not satisfy fn */
- oidcpy(&ds->candidate, current);
- ds->candidate_checked = 0;
- return;
- }
-
- /* if we reach this point, we know ds->candidate satisfies fn */
- if (ds->fn(current, ds->cb_data)) {
- /*
- * if both current and candidate satisfy fn, we cannot
- * disambiguate.
- */
- ds->candidate_ok = 0;
- ds->ambiguous = 1;
- }
-
- /* otherwise, current can be discarded and candidate is still good */
-}
-
-static int append_loose_object(const struct object_id *oid, const char *path,
- void *data)
-{
- oid_array_append(data, oid);
- return 0;
-}
-
-static int match_sha(unsigned, const unsigned char *, const unsigned char *);
-
-static void find_short_object_filename(struct disambiguate_state *ds)
-{
- int subdir_nr = ds->bin_pfx.hash[0];
- struct alternate_object_database *alt;
- static struct alternate_object_database *fakeent;
-
- if (!fakeent) {
- /*
- * Create a "fake" alternate object database that
- * points to our own object database, to make it
- * easier to get a temporary working space in
- * alt->name/alt->base while iterating over the
- * object databases including our own.
- */
- fakeent = alloc_alt_odb(get_object_directory());
- }
- fakeent->next = the_repository->objects->alt_odb_list;
-
- for (alt = fakeent; alt && !ds->ambiguous; alt = alt->next) {
- int pos;
-
- if (!alt->loose_objects_subdir_seen[subdir_nr]) {
- struct strbuf *buf = alt_scratch_buf(alt);
- for_each_file_in_obj_subdir(subdir_nr, buf,
- append_loose_object,
- NULL, NULL,
- &alt->loose_objects_cache);
- alt->loose_objects_subdir_seen[subdir_nr] = 1;
- }
-
- pos = oid_array_lookup(&alt->loose_objects_cache, &ds->bin_pfx);
- if (pos < 0)
- pos = -1 - pos;
- while (!ds->ambiguous && pos < alt->loose_objects_cache.nr) {
- const struct object_id *oid;
- oid = alt->loose_objects_cache.oid + pos;
- if (!match_sha(ds->len, ds->bin_pfx.hash, oid->hash))
- break;
- update_candidates(ds, oid);
- pos++;
- }
- }
-}
-
-static int match_sha(unsigned len, const unsigned char *a, const unsigned char *b)
-{
- do {
- if (*a != *b)
- return 0;
- a++;
- b++;
- len -= 2;
- } while (len > 1);
- if (len)
- if ((*a ^ *b) & 0xf0)
- return 0;
- return 1;
-}
-
-static void unique_in_pack(struct packed_git *p,
- struct disambiguate_state *ds)
-{
- uint32_t num, i, first = 0;
- const struct object_id *current = NULL;
-
- if (open_pack_index(p) || !p->num_objects)
- return;
-
- num = p->num_objects;
- bsearch_pack(&ds->bin_pfx, p, &first);
-
- /*
- * At this point, "first" is the location of the lowest object
- * with an object name that could match "bin_pfx". See if we have
- * 0, 1 or more objects that actually match(es).
- */
- for (i = first; i < num && !ds->ambiguous; i++) {
- struct object_id oid;
- current = nth_packed_object_oid(&oid, p, i);
- if (!match_sha(ds->len, ds->bin_pfx.hash, current->hash))
- break;
- update_candidates(ds, current);
- }
-}
-
-static void find_short_packed_object(struct disambiguate_state *ds)
-{
- struct packed_git *p;
-
- for (p = get_packed_git(the_repository); p && !ds->ambiguous;
- p = p->next)
- unique_in_pack(p, ds);
-}
-
-#define SHORT_NAME_NOT_FOUND (-1)
-#define SHORT_NAME_AMBIGUOUS (-2)
-
-static int finish_object_disambiguation(struct disambiguate_state *ds,
- struct object_id *oid)
-{
- if (ds->ambiguous)
- return SHORT_NAME_AMBIGUOUS;
-
- if (!ds->candidate_exists)
- return SHORT_NAME_NOT_FOUND;
-
- if (!ds->candidate_checked)
- /*
- * If this is the only candidate, there is no point
- * calling the disambiguation hint callback.
- *
- * On the other hand, if the current candidate
- * replaced an earlier candidate that did _not_ pass
- * the disambiguation hint callback, then we do have
- * more than one objects that match the short name
- * given, so we should make sure this one matches;
- * otherwise, if we discovered this one and the one
- * that we previously discarded in the reverse order,
- * we would end up showing different results in the
- * same repository!
- */
- ds->candidate_ok = (!ds->disambiguate_fn_used ||
- ds->fn(&ds->candidate, ds->cb_data));
-
- if (!ds->candidate_ok)
- return SHORT_NAME_AMBIGUOUS;
-
- oidcpy(oid, &ds->candidate);
- return 0;
-}
-
-static int disambiguate_commit_only(const struct object_id *oid, void *cb_data_unused)
-{
- int kind = oid_object_info(oid, NULL);
- return kind == OBJ_COMMIT;
-}
-
-static int disambiguate_committish_only(const struct object_id *oid, void *cb_data_unused)
-{
- struct object *obj;
- int kind;
-
- kind = oid_object_info(oid, NULL);
- if (kind == OBJ_COMMIT)
- return 1;
- if (kind != OBJ_TAG)
- return 0;
-
- /* We need to do this the hard way... */
- obj = deref_tag(parse_object(oid), NULL, 0);
- if (obj && obj->type == OBJ_COMMIT)
- return 1;
- return 0;
-}
-
-static int disambiguate_tree_only(const struct object_id *oid, void *cb_data_unused)
-{
- int kind = oid_object_info(oid, NULL);
- return kind == OBJ_TREE;
-}
-
-static int disambiguate_treeish_only(const struct object_id *oid, void *cb_data_unused)
-{
- struct object *obj;
- int kind;
-
- kind = oid_object_info(oid, NULL);
- if (kind == OBJ_TREE || kind == OBJ_COMMIT)
- return 1;
- if (kind != OBJ_TAG)
- return 0;
-
- /* We need to do this the hard way... */
- obj = deref_tag(parse_object(oid), NULL, 0);
- if (obj && (obj->type == OBJ_TREE || obj->type == OBJ_COMMIT))
- return 1;
- return 0;
-}
-
-static int disambiguate_blob_only(const struct object_id *oid, void *cb_data_unused)
-{
- int kind = oid_object_info(oid, NULL);
- return kind == OBJ_BLOB;
-}
-
-static disambiguate_hint_fn default_disambiguate_hint;
-
-int set_disambiguate_hint_config(const char *var, const char *value)
-{
- static const struct {
- const char *name;
- disambiguate_hint_fn fn;
- } hints[] = {
- { "none", NULL },
- { "commit", disambiguate_commit_only },
- { "committish", disambiguate_committish_only },
- { "tree", disambiguate_tree_only },
- { "treeish", disambiguate_treeish_only },
- { "blob", disambiguate_blob_only }
- };
- int i;
-
- if (!value)
- return config_error_nonbool(var);
-
- for (i = 0; i < ARRAY_SIZE(hints); i++) {
- if (!strcasecmp(value, hints[i].name)) {
- default_disambiguate_hint = hints[i].fn;
- return 0;
- }
- }
-
- return error("unknown hint type for '%s': %s", var, value);
-}
-
-static int init_object_disambiguation(const char *name, int len,
- struct disambiguate_state *ds)
-{
- int i;
-
- if (len < MINIMUM_ABBREV || len > GIT_SHA1_HEXSZ)
- return -1;
-
- memset(ds, 0, sizeof(*ds));
-
- for (i = 0; i < len ;i++) {
- unsigned char c = name[i];
- unsigned char val;
- if (c >= '0' && c <= '9')
- val = c - '0';
- else if (c >= 'a' && c <= 'f')
- val = c - 'a' + 10;
- else if (c >= 'A' && c <='F') {
- val = c - 'A' + 10;
- c -= 'A' - 'a';
- }
- else
- return -1;
- ds->hex_pfx[i] = c;
- if (!(i & 1))
- val <<= 4;
- ds->bin_pfx.hash[i >> 1] |= val;
- }
-
- ds->len = len;
- ds->hex_pfx[len] = '\0';
- prepare_alt_odb(the_repository);
- return 0;
-}
-
-static int show_ambiguous_object(const struct object_id *oid, void *data)
-{
- const struct disambiguate_state *ds = data;
- struct strbuf desc = STRBUF_INIT;
- int type;
-
-
- if (ds->fn && !ds->fn(oid, ds->cb_data))
- return 0;
-
- type = oid_object_info(oid, NULL);
- if (type == OBJ_COMMIT) {
- struct commit *commit = lookup_commit(oid);
- if (commit) {
- struct pretty_print_context pp = {0};
- pp.date_mode.type = DATE_SHORT;
- format_commit_message(commit, " %ad - %s", &desc, &pp);
- }
- } else if (type == OBJ_TAG) {
- struct tag *tag = lookup_tag(oid);
- if (!parse_tag(tag) && tag->tag)
- strbuf_addf(&desc, " %s", tag->tag);
- }
-
- advise(" %s %s%s",
- find_unique_abbrev(oid, DEFAULT_ABBREV),
- type_name(type) ? type_name(type) : "unknown type",
- desc.buf);
-
- strbuf_release(&desc);
- return 0;
-}
-
-static int get_short_oid(const char *name, int len, struct object_id *oid,
- unsigned flags)
-{
- int status;
- struct disambiguate_state ds;
- int quietly = !!(flags & GET_OID_QUIETLY);
-
- if (init_object_disambiguation(name, len, &ds) < 0)
- return -1;
-
- if (HAS_MULTI_BITS(flags & GET_OID_DISAMBIGUATORS))
- die("BUG: multiple get_short_oid disambiguator flags");
-
- if (flags & GET_OID_COMMIT)
- ds.fn = disambiguate_commit_only;
- else if (flags & GET_OID_COMMITTISH)
- ds.fn = disambiguate_committish_only;
- else if (flags & GET_OID_TREE)
- ds.fn = disambiguate_tree_only;
- else if (flags & GET_OID_TREEISH)
- ds.fn = disambiguate_treeish_only;
- else if (flags & GET_OID_BLOB)
- ds.fn = disambiguate_blob_only;
- else
- ds.fn = default_disambiguate_hint;
-
- find_short_object_filename(&ds);
- find_short_packed_object(&ds);
- status = finish_object_disambiguation(&ds, oid);
-
- if (!quietly && (status == SHORT_NAME_AMBIGUOUS)) {
- error(_("short SHA1 %s is ambiguous"), ds.hex_pfx);
-
- /*
- * We may still have ambiguity if we simply saw a series of
- * candidates that did not satisfy our hint function. In
- * that case, we still want to show them, so disable the hint
- * function entirely.
- */
- if (!ds.ambiguous)
- ds.fn = NULL;
-
- advise(_("The candidates are:"));
- for_each_abbrev(ds.hex_pfx, show_ambiguous_object, &ds);
- }
-
- return status;
-}
-
-static int collect_ambiguous(const struct object_id *oid, void *data)
-{
- oid_array_append(data, oid);
- return 0;
-}
-
-int for_each_abbrev(const char *prefix, each_abbrev_fn fn, void *cb_data)
-{
- struct oid_array collect = OID_ARRAY_INIT;
- struct disambiguate_state ds;
- int ret;
-
- if (init_object_disambiguation(prefix, strlen(prefix), &ds) < 0)
- return -1;
-
- ds.always_call_fn = 1;
- ds.fn = collect_ambiguous;
- ds.cb_data = &collect;
- find_short_object_filename(&ds);
- find_short_packed_object(&ds);
-
- ret = oid_array_for_each_unique(&collect, fn, cb_data);
- oid_array_clear(&collect);
- return ret;
-}
-
-/*
- * Return the slot of the most-significant bit set in "val". There are various
- * ways to do this quickly with fls() or __builtin_clzl(), but speed is
- * probably not a big deal here.
- */
-static unsigned msb(unsigned long val)
-{
- unsigned r = 0;
- while (val >>= 1)
- r++;
- return r;
-}
-
-struct min_abbrev_data {
- unsigned int init_len;
- unsigned int cur_len;
- char *hex;
- const struct object_id *oid;
-};
-
-static inline char get_hex_char_from_oid(const struct object_id *oid,
- unsigned int pos)
-{
- static const char hex[] = "0123456789abcdef";
-
- if ((pos & 1) == 0)
- return hex[oid->hash[pos >> 1] >> 4];
- else
- return hex[oid->hash[pos >> 1] & 0xf];
-}
-
-static int extend_abbrev_len(const struct object_id *oid, void *cb_data)
-{
- struct min_abbrev_data *mad = cb_data;
-
- unsigned int i = mad->init_len;
- while (mad->hex[i] && mad->hex[i] == get_hex_char_from_oid(oid, i))
- i++;
-
- if (i < GIT_MAX_RAWSZ && i >= mad->cur_len)
- mad->cur_len = i + 1;
-
- return 0;
-}
-
-static void find_abbrev_len_for_pack(struct packed_git *p,
- struct min_abbrev_data *mad)
-{
- int match = 0;
- uint32_t num, first = 0;
- struct object_id oid;
- const struct object_id *mad_oid;
-
- if (open_pack_index(p) || !p->num_objects)
- return;
-
- num = p->num_objects;
- mad_oid = mad->oid;
- match = bsearch_pack(mad_oid, p, &first);
-
- /*
- * first is now the position in the packfile where we would insert
- * mad->hash if it does not exist (or the position of mad->hash if
- * it does exist). Hence, we consider a maximum of two objects
- * nearby for the abbreviation length.
- */
- mad->init_len = 0;
- if (!match) {
- if (nth_packed_object_oid(&oid, p, first))
- extend_abbrev_len(&oid, mad);
- } else if (first < num - 1) {
- if (nth_packed_object_oid(&oid, p, first + 1))
- extend_abbrev_len(&oid, mad);
- }
- if (first > 0) {
- if (nth_packed_object_oid(&oid, p, first - 1))
- extend_abbrev_len(&oid, mad);
- }
- mad->init_len = mad->cur_len;
-}
-
-static void find_abbrev_len_packed(struct min_abbrev_data *mad)
-{
- struct packed_git *p;
-
- for (p = get_packed_git(the_repository); p; p = p->next)
- find_abbrev_len_for_pack(p, mad);
-}
-
-int find_unique_abbrev_r(char *hex, const struct object_id *oid, int len)
-{
- struct disambiguate_state ds;
- struct min_abbrev_data mad;
- struct object_id oid_ret;
- if (len < 0) {
- unsigned long count = approximate_object_count();
- /*
- * Add one because the MSB only tells us the highest bit set,
- * not including the value of all the _other_ bits (so "15"
- * is only one off of 2^4, but the MSB is the 3rd bit.
- */
- len = msb(count) + 1;
- /*
- * We now know we have on the order of 2^len objects, which
- * expects a collision at 2^(len/2). But we also care about hex
- * chars, not bits, and there are 4 bits per hex. So all
- * together we need to divide by 2 and round up.
- */
- len = DIV_ROUND_UP(len, 2);
- /*
- * For very small repos, we stick with our regular fallback.
- */
- if (len < FALLBACK_DEFAULT_ABBREV)
- len = FALLBACK_DEFAULT_ABBREV;
- }
-
- oid_to_hex_r(hex, oid);
- if (len == GIT_SHA1_HEXSZ || !len)
- return GIT_SHA1_HEXSZ;
-
- mad.init_len = len;
- mad.cur_len = len;
- mad.hex = hex;
- mad.oid = oid;
-
- find_abbrev_len_packed(&mad);
-
- if (init_object_disambiguation(hex, mad.cur_len, &ds) < 0)
- return -1;
-
- ds.fn = extend_abbrev_len;
- ds.always_call_fn = 1;
- ds.cb_data = (void *)&mad;
-
- find_short_object_filename(&ds);
- (void)finish_object_disambiguation(&ds, &oid_ret);
-
- hex[mad.cur_len] = 0;
- return mad.cur_len;
-}
-
-const char *find_unique_abbrev(const struct object_id *oid, int len)
-{
- static int bufno;
- static char hexbuffer[4][GIT_MAX_HEXSZ + 1];
- char *hex = hexbuffer[bufno];
- bufno = (bufno + 1) % ARRAY_SIZE(hexbuffer);
- find_unique_abbrev_r(hex, oid, len);
- return hex;
-}
-
-static int ambiguous_path(const char *path, int len)
-{
- int slash = 1;
- int cnt;
-
- for (cnt = 0; cnt < len; cnt++) {
- switch (*path++) {
- case '\0':
- break;
- case '/':
- if (slash)
- break;
- slash = 1;
- continue;
- case '.':
- continue;
- default:
- slash = 0;
- continue;
- }
- break;
- }
- return slash;
-}
-
-static inline int at_mark(const char *string, int len,
- const char **suffix, int nr)
-{
- int i;
-
- for (i = 0; i < nr; i++) {
- int suffix_len = strlen(suffix[i]);
- if (suffix_len <= len
- && !strncasecmp(string, suffix[i], suffix_len))
- return suffix_len;
- }
- return 0;
-}
-
-static inline int upstream_mark(const char *string, int len)
-{
- const char *suffix[] = { "@{upstream}", "@{u}" };
- return at_mark(string, len, suffix, ARRAY_SIZE(suffix));
-}
-
-static inline int push_mark(const char *string, int len)
-{
- const char *suffix[] = { "@{push}" };
- return at_mark(string, len, suffix, ARRAY_SIZE(suffix));
-}
-
-static int get_oid_1(const char *name, int len, struct object_id *oid, unsigned lookup_flags);
-static int interpret_nth_prior_checkout(const char *name, int namelen, struct strbuf *buf);
-
-static int get_oid_basic(const char *str, int len, struct object_id *oid,
- unsigned int flags)
-{
- static const char *warn_msg = "refname '%.*s' is ambiguous.";
- static const char *object_name_msg = N_(
- "Git normally never creates a ref that ends with 40 hex characters\n"
- "because it will be ignored when you just specify 40-hex. These refs\n"
- "may be created by mistake. For example,\n"
- "\n"
- " git checkout -b $br $(git rev-parse ...)\n"
- "\n"
- "where \"$br\" is somehow empty and a 40-hex ref is created. Please\n"
- "examine these refs and maybe delete them. Turn this message off by\n"
- "running \"git config advice.objectNameWarning false\"");
- struct object_id tmp_oid;
- char *real_ref = NULL;
- int refs_found = 0;
- int at, reflog_len, nth_prior = 0;
-
- if (len == GIT_SHA1_HEXSZ && !get_oid_hex(str, oid)) {
- if (warn_ambiguous_refs && warn_on_object_refname_ambiguity) {
- refs_found = dwim_ref(str, len, &tmp_oid, &real_ref);
- if (refs_found > 0) {
- warning(warn_msg, len, str);
- if (advice_object_name_warning)
- fprintf(stderr, "%s\n", _(object_name_msg));
- }
- free(real_ref);
- }
- return 0;
- }
-
- /* basic@{time or number or -number} format to query ref-log */
- reflog_len = at = 0;
- if (len && str[len-1] == '}') {
- for (at = len-4; at >= 0; at--) {
- if (str[at] == '@' && str[at+1] == '{') {
- if (str[at+2] == '-') {
- if (at != 0)
- /* @{-N} not at start */
- return -1;
- nth_prior = 1;
- continue;
- }
- if (!upstream_mark(str + at, len - at) &&
- !push_mark(str + at, len - at)) {
- reflog_len = (len-1) - (at+2);
- len = at;
- }
- break;
- }
- }
- }
-
- /* Accept only unambiguous ref paths. */
- if (len && ambiguous_path(str, len))
- return -1;
-
- if (nth_prior) {
- struct strbuf buf = STRBUF_INIT;
- int detached;
-
- if (interpret_nth_prior_checkout(str, len, &buf) > 0) {
- detached = (buf.len == GIT_SHA1_HEXSZ && !get_oid_hex(buf.buf, oid));
- strbuf_release(&buf);
- if (detached)
- return 0;
- }
- }
-
- if (!len && reflog_len)
- /* allow "@{...}" to mean the current branch reflog */
- refs_found = dwim_ref("HEAD", 4, oid, &real_ref);
- else if (reflog_len)
- refs_found = dwim_log(str, len, oid, &real_ref);
- else
- refs_found = dwim_ref(str, len, oid, &real_ref);
-
- if (!refs_found)
- return -1;
-
- if (warn_ambiguous_refs && !(flags & GET_OID_QUIETLY) &&
- (refs_found > 1 ||
- !get_short_oid(str, len, &tmp_oid, GET_OID_QUIETLY)))
- warning(warn_msg, len, str);
-
- if (reflog_len) {
- int nth, i;
- timestamp_t at_time;
- timestamp_t co_time;
- int co_tz, co_cnt;
-
- /* Is it asking for N-th entry, or approxidate? */
- for (i = nth = 0; 0 <= nth && i < reflog_len; i++) {
- char ch = str[at+2+i];
- if ('0' <= ch && ch <= '9')
- nth = nth * 10 + ch - '0';
- else
- nth = -1;
- }
- if (100000000 <= nth) {
- at_time = nth;
- nth = -1;
- } else if (0 <= nth)
- at_time = 0;
- else {
- int errors = 0;
- char *tmp = xstrndup(str + at + 2, reflog_len);
- at_time = approxidate_careful(tmp, &errors);
- free(tmp);
- if (errors) {
- free(real_ref);
- return -1;
- }
- }
- if (read_ref_at(real_ref, flags, at_time, nth, oid, NULL,
- &co_time, &co_tz, &co_cnt)) {
- if (!len) {
- if (starts_with(real_ref, "refs/heads/")) {
- str = real_ref + 11;
- len = strlen(real_ref + 11);
- } else {
- /* detached HEAD */
- str = "HEAD";
- len = 4;
- }
- }
- if (at_time) {
- if (!(flags & GET_OID_QUIETLY)) {
- warning("Log for '%.*s' only goes "
- "back to %s.", len, str,
- show_date(co_time, co_tz, DATE_MODE(RFC2822)));
- }
- } else {
- if (flags & GET_OID_QUIETLY) {
- exit(128);
- }
- die("Log for '%.*s' only has %d entries.",
- len, str, co_cnt);
- }
- }
- }
-
- free(real_ref);
- return 0;
-}
-
-static int get_parent(const char *name, int len,
- struct object_id *result, int idx)
-{
- struct object_id oid;
- int ret = get_oid_1(name, len, &oid, GET_OID_COMMITTISH);
- struct commit *commit;
- struct commit_list *p;
-
- if (ret)
- return ret;
- commit = lookup_commit_reference(&oid);
- if (parse_commit(commit))
- return -1;
- if (!idx) {
- oidcpy(result, &commit->object.oid);
- return 0;
- }
- p = commit->parents;
- while (p) {
- if (!--idx) {
- oidcpy(result, &p->item->object.oid);
- return 0;
- }
- p = p->next;
- }
- return -1;
-}
-
-static int get_nth_ancestor(const char *name, int len,
- struct object_id *result, int generation)
-{
- struct object_id oid;
- struct commit *commit;
- int ret;
-
- ret = get_oid_1(name, len, &oid, GET_OID_COMMITTISH);
- if (ret)
- return ret;
- commit = lookup_commit_reference(&oid);
- if (!commit)
- return -1;
-
- while (generation--) {
- if (parse_commit(commit) || !commit->parents)
- return -1;
- commit = commit->parents->item;
- }
- oidcpy(result, &commit->object.oid);
- return 0;
-}
-
-struct object *peel_to_type(const char *name, int namelen,
- struct object *o, enum object_type expected_type)
-{
- if (name && !namelen)
- namelen = strlen(name);
- while (1) {
- if (!o || (!o->parsed && !parse_object(&o->oid)))
- return NULL;
- if (expected_type == OBJ_ANY || o->type == expected_type)
- return o;
- if (o->type == OBJ_TAG)
- o = ((struct tag*) o)->tagged;
- else if (o->type == OBJ_COMMIT)
- o = &(((struct commit *) o)->tree->object);
- else {
- if (name)
- error("%.*s: expected %s type, but the object "
- "dereferences to %s type",
- namelen, name, type_name(expected_type),
- type_name(o->type));
- return NULL;
- }
- }
-}
-
-static int peel_onion(const char *name, int len, struct object_id *oid,
- unsigned lookup_flags)
-{
- struct object_id outer;
- const char *sp;
- unsigned int expected_type = 0;
- struct object *o;
-
- /*
- * "ref^{type}" dereferences ref repeatedly until you cannot
- * dereference anymore, or you get an object of given type,
- * whichever comes first. "ref^{}" means just dereference
- * tags until you get a non-tag. "ref^0" is a shorthand for
- * "ref^{commit}". "commit^{tree}" could be used to find the
- * top-level tree of the given commit.
- */
- if (len < 4 || name[len-1] != '}')
- return -1;
-
- for (sp = name + len - 1; name <= sp; sp--) {
- int ch = *sp;
- if (ch == '{' && name < sp && sp[-1] == '^')
- break;
- }
- if (sp <= name)
- return -1;
-
- sp++; /* beginning of type name, or closing brace for empty */
- if (starts_with(sp, "commit}"))
- expected_type = OBJ_COMMIT;
- else if (starts_with(sp, "tag}"))
- expected_type = OBJ_TAG;
- else if (starts_with(sp, "tree}"))
- expected_type = OBJ_TREE;
- else if (starts_with(sp, "blob}"))
- expected_type = OBJ_BLOB;
- else if (starts_with(sp, "object}"))
- expected_type = OBJ_ANY;
- else if (sp[0] == '}')
- expected_type = OBJ_NONE;
- else if (sp[0] == '/')
- expected_type = OBJ_COMMIT;
- else
- return -1;
-
- lookup_flags &= ~GET_OID_DISAMBIGUATORS;
- if (expected_type == OBJ_COMMIT)
- lookup_flags |= GET_OID_COMMITTISH;
- else if (expected_type == OBJ_TREE)
- lookup_flags |= GET_OID_TREEISH;
-
- if (get_oid_1(name, sp - name - 2, &outer, lookup_flags))
- return -1;
-
- o = parse_object(&outer);
- if (!o)
- return -1;
- if (!expected_type) {
- o = deref_tag(o, name, sp - name - 2);
- if (!o || (!o->parsed && !parse_object(&o->oid)))
- return -1;
- oidcpy(oid, &o->oid);
- return 0;
- }
-
- /*
- * At this point, the syntax look correct, so
- * if we do not get the needed object, we should
- * barf.
- */
- o = peel_to_type(name, len, o, expected_type);
- if (!o)
- return -1;
-
- oidcpy(oid, &o->oid);
- if (sp[0] == '/') {
- /* "$commit^{/foo}" */
- char *prefix;
- int ret;
- struct commit_list *list = NULL;
-
- /*
- * $commit^{/}. Some regex implementation may reject.
- * We don't need regex anyway. '' pattern always matches.
- */
- if (sp[1] == '}')
- return 0;
-
- prefix = xstrndup(sp + 1, name + len - 1 - (sp + 1));
- commit_list_insert((struct commit *)o, &list);
- ret = get_oid_oneline(prefix, oid, list);
- free(prefix);
- return ret;
- }
- return 0;
-}
-
-static int get_describe_name(const char *name, int len, struct object_id *oid)
-{
- const char *cp;
- unsigned flags = GET_OID_QUIETLY | GET_OID_COMMIT;
-
- for (cp = name + len - 1; name + 2 <= cp; cp--) {
- char ch = *cp;
- if (!isxdigit(ch)) {
- /* We must be looking at g in "SOMETHING-g"
- * for it to be describe output.
- */
- if (ch == 'g' && cp[-1] == '-') {
- cp++;
- len -= cp - name;
- return get_short_oid(cp, len, oid, flags);
- }
- }
- }
- return -1;
-}
-
-static int get_oid_1(const char *name, int len, struct object_id *oid, unsigned lookup_flags)
-{
- int ret, has_suffix;
- const char *cp;
-
- /*
- * "name~3" is "name^^^", "name~" is "name~1", and "name^" is "name^1".
- */
- has_suffix = 0;
- for (cp = name + len - 1; name <= cp; cp--) {
- int ch = *cp;
- if ('0' <= ch && ch <= '9')
- continue;
- if (ch == '~' || ch == '^')
- has_suffix = ch;
- break;
- }
-
- if (has_suffix) {
- int num = 0;
- int len1 = cp - name;
- cp++;
- while (cp < name + len)
- num = num * 10 + *cp++ - '0';
- if (!num && len1 == len - 1)
- num = 1;
- if (has_suffix == '^')
- return get_parent(name, len1, oid, num);
- /* else if (has_suffix == '~') -- goes without saying */
- return get_nth_ancestor(name, len1, oid, num);
- }
-
- ret = peel_onion(name, len, oid, lookup_flags);
- if (!ret)
- return 0;
-
- ret = get_oid_basic(name, len, oid, lookup_flags);
- if (!ret)
- return 0;
-
- /* It could be describe output that is "SOMETHING-gXXXX" */
- ret = get_describe_name(name, len, oid);
- if (!ret)
- return 0;
-
- return get_short_oid(name, len, oid, lookup_flags);
-}
-
-/*
- * This interprets names like ':/Initial revision of "git"' by searching
- * through history and returning the first commit whose message starts
- * the given regular expression.
- *
- * For negative-matching, prefix the pattern-part with '!-', like: ':/!-WIP'.
- *
- * For a literal '!' character at the beginning of a pattern, you have to repeat
- * that, like: ':/!!foo'
- *
- * For future extension, all other sequences beginning with ':/!' are reserved.
- */
-
-/* Remember to update object flag allocation in object.h */
-#define ONELINE_SEEN (1u<<20)
-
-static int handle_one_ref(const char *path, const struct object_id *oid,
- int flag, void *cb_data)
-{
- struct commit_list **list = cb_data;
- struct object *object = parse_object(oid);
- if (!object)
- return 0;
- if (object->type == OBJ_TAG) {
- object = deref_tag(object, path, strlen(path));
- if (!object)
- return 0;
- }
- if (object->type != OBJ_COMMIT)
- return 0;
- commit_list_insert((struct commit *)object, list);
- return 0;
-}
-
-static int get_oid_oneline(const char *prefix, struct object_id *oid,
- struct commit_list *list)
-{
- struct commit_list *backup = NULL, *l;
- int found = 0;
- int negative = 0;
- regex_t regex;
-
- if (prefix[0] == '!') {
- prefix++;
-
- if (prefix[0] == '-') {
- prefix++;
- negative = 1;
- } else if (prefix[0] != '!') {
- return -1;
- }
- }
-
- if (regcomp(®ex, prefix, REG_EXTENDED))
- return -1;
-
- for (l = list; l; l = l->next) {
- l->item->object.flags |= ONELINE_SEEN;
- commit_list_insert(l->item, &backup);
- }
- while (list) {
- const char *p, *buf;
- struct commit *commit;
- int matches;
-
- commit = pop_most_recent_commit(&list, ONELINE_SEEN);
- if (!parse_object(&commit->object.oid))
- continue;
- buf = get_commit_buffer(commit, NULL);
- p = strstr(buf, "\n\n");
- matches = negative ^ (p && !regexec(®ex, p + 2, 0, NULL, 0));
- unuse_commit_buffer(commit, buf);
-
- if (matches) {
- oidcpy(oid, &commit->object.oid);
- found = 1;
- break;
- }
- }
- regfree(®ex);
- free_commit_list(list);
- for (l = backup; l; l = l->next)
- clear_commit_marks(l->item, ONELINE_SEEN);
- free_commit_list(backup);
- return found ? 0 : -1;
-}
-
-struct grab_nth_branch_switch_cbdata {
- int remaining;
- struct strbuf buf;
-};
-
-static int grab_nth_branch_switch(struct object_id *ooid, struct object_id *noid,
- const char *email, timestamp_t timestamp, int tz,
- const char *message, void *cb_data)
-{
- struct grab_nth_branch_switch_cbdata *cb = cb_data;
- const char *match = NULL, *target = NULL;
- size_t len;
-
- if (skip_prefix(message, "checkout: moving from ", &match))
- target = strstr(match, " to ");
-
- if (!match || !target)
- return 0;
- if (--(cb->remaining) == 0) {
- len = target - match;
- strbuf_reset(&cb->buf);
- strbuf_add(&cb->buf, match, len);
- return 1; /* we are done */
- }
- return 0;
-}
-
-/*
- * Parse @{-N} syntax, return the number of characters parsed
- * if successful; otherwise signal an error with negative value.
- */
-static int interpret_nth_prior_checkout(const char *name, int namelen,
- struct strbuf *buf)
-{
- long nth;
- int retval;
- struct grab_nth_branch_switch_cbdata cb;
- const char *brace;
- char *num_end;
-
- if (namelen < 4)
- return -1;
- if (name[0] != '@' || name[1] != '{' || name[2] != '-')
- return -1;
- brace = memchr(name, '}', namelen);
- if (!brace)
- return -1;
- nth = strtol(name + 3, &num_end, 10);
- if (num_end != brace)
- return -1;
- if (nth <= 0)
- return -1;
- cb.remaining = nth;
- strbuf_init(&cb.buf, 20);
-
- retval = 0;
- if (0 < for_each_reflog_ent_reverse("HEAD", grab_nth_branch_switch, &cb)) {
- strbuf_reset(buf);
- strbuf_addbuf(buf, &cb.buf);
- retval = brace - name + 1;
- }
-
- strbuf_release(&cb.buf);
- return retval;
-}
-
-int get_oid_mb(const char *name, struct object_id *oid)
-{
- struct commit *one, *two;
- struct commit_list *mbs;
- struct object_id oid_tmp;
- const char *dots;
- int st;
-
- dots = strstr(name, "...");
- if (!dots)
- return get_oid(name, oid);
- if (dots == name)
- st = get_oid("HEAD", &oid_tmp);
- else {
- struct strbuf sb;
- strbuf_init(&sb, dots - name);
- strbuf_add(&sb, name, dots - name);
- st = get_oid_committish(sb.buf, &oid_tmp);
- strbuf_release(&sb);
- }
- if (st)
- return st;
- one = lookup_commit_reference_gently(&oid_tmp, 0);
- if (!one)
- return -1;
-
- if (get_oid_committish(dots[3] ? (dots + 3) : "HEAD", &oid_tmp))
- return -1;
- two = lookup_commit_reference_gently(&oid_tmp, 0);
- if (!two)
- return -1;
- mbs = get_merge_bases(one, two);
- if (!mbs || mbs->next)
- st = -1;
- else {
- st = 0;
- oidcpy(oid, &mbs->item->object.oid);
- }
- free_commit_list(mbs);
- return st;
-}
-
-/* parse @something syntax, when 'something' is not {.*} */
-static int interpret_empty_at(const char *name, int namelen, int len, struct strbuf *buf)
-{
- const char *next;
-
- if (len || name[1] == '{')
- return -1;
-
- /* make sure it's a single @, or @@{.*}, not @foo */
- next = memchr(name + len + 1, '@', namelen - len - 1);
- if (next && next[1] != '{')
- return -1;
- if (!next)
- next = name + namelen;
- if (next != name + 1)
- return -1;
-
- strbuf_reset(buf);
- strbuf_add(buf, "HEAD", 4);
- return 1;
-}
-
-static int reinterpret(const char *name, int namelen, int len,
- struct strbuf *buf, unsigned allowed)
-{
- /* we have extra data, which might need further processing */
- struct strbuf tmp = STRBUF_INIT;
- int used = buf->len;
- int ret;
-
- strbuf_add(buf, name + len, namelen - len);
- ret = interpret_branch_name(buf->buf, buf->len, &tmp, allowed);
- /* that data was not interpreted, remove our cruft */
- if (ret < 0) {
- strbuf_setlen(buf, used);
- return len;
- }
- strbuf_reset(buf);
- strbuf_addbuf(buf, &tmp);
- strbuf_release(&tmp);
- /* tweak for size of {-N} versus expanded ref name */
- return ret - used + len;
-}
-
-static void set_shortened_ref(struct strbuf *buf, const char *ref)
-{
- char *s = shorten_unambiguous_ref(ref, 0);
- strbuf_reset(buf);
- strbuf_addstr(buf, s);
- free(s);
-}
-
-static int branch_interpret_allowed(const char *refname, unsigned allowed)
-{
- if (!allowed)
- return 1;
-
- if ((allowed & INTERPRET_BRANCH_LOCAL) &&
- starts_with(refname, "refs/heads/"))
- return 1;
- if ((allowed & INTERPRET_BRANCH_REMOTE) &&
- starts_with(refname, "refs/remotes/"))
- return 1;
-
- return 0;
-}
-
-static int interpret_branch_mark(const char *name, int namelen,
- int at, struct strbuf *buf,
- int (*get_mark)(const char *, int),
- const char *(*get_data)(struct branch *,
- struct strbuf *),
- unsigned allowed)
-{
- int len;
- struct branch *branch;
- struct strbuf err = STRBUF_INIT;
- const char *value;
-
- len = get_mark(name + at, namelen - at);
- if (!len)
- return -1;
-
- if (memchr(name, ':', at))
- return -1;
-
- if (at) {
- char *name_str = xmemdupz(name, at);
- branch = branch_get(name_str);
- free(name_str);
- } else
- branch = branch_get(NULL);
-
- value = get_data(branch, &err);
- if (!value)
- die("%s", err.buf);
-
- if (!branch_interpret_allowed(value, allowed))
- return -1;
-
- set_shortened_ref(buf, value);
- return len + at;
-}
-
-int interpret_branch_name(const char *name, int namelen, struct strbuf *buf,
- unsigned allowed)
-{
- char *at;
- const char *start;
- int len;
-
- if (!namelen)
- namelen = strlen(name);
-
- if (!allowed || (allowed & INTERPRET_BRANCH_LOCAL)) {
- len = interpret_nth_prior_checkout(name, namelen, buf);
- if (!len) {
- return len; /* syntax Ok, not enough switches */
- } else if (len > 0) {
- if (len == namelen)
- return len; /* consumed all */
- else
- return reinterpret(name, namelen, len, buf, allowed);
- }
- }
-
- for (start = name;
- (at = memchr(start, '@', namelen - (start - name)));
- start = at + 1) {
-
- if (!allowed || (allowed & INTERPRET_BRANCH_HEAD)) {
- len = interpret_empty_at(name, namelen, at - name, buf);
- if (len > 0)
- return reinterpret(name, namelen, len, buf,
- allowed);
- }
-
- len = interpret_branch_mark(name, namelen, at - name, buf,
- upstream_mark, branch_get_upstream,
- allowed);
- if (len > 0)
- return len;
-
- len = interpret_branch_mark(name, namelen, at - name, buf,
- push_mark, branch_get_push,
- allowed);
- if (len > 0)
- return len;
- }
-
- return -1;
-}
-
-void strbuf_branchname(struct strbuf *sb, const char *name, unsigned allowed)
-{
- int len = strlen(name);
- int used = interpret_branch_name(name, len, sb, allowed);
-
- if (used < 0)
- used = 0;
- strbuf_add(sb, name + used, len - used);
-}
-
-int strbuf_check_branch_ref(struct strbuf *sb, const char *name)
-{
- if (startup_info->have_repository)
- strbuf_branchname(sb, name, INTERPRET_BRANCH_LOCAL);
- else
- strbuf_addstr(sb, name);
-
- /*
- * This splice must be done even if we end up rejecting the
- * name; builtin/branch.c::copy_or_rename_branch() still wants
- * to see what the name expanded to so that "branch -m" can be
- * used as a tool to correct earlier mistakes.
- */
- strbuf_splice(sb, 0, 0, "refs/heads/", 11);
-
- if (*name == '-' ||
- !strcmp(sb->buf, "refs/heads/HEAD"))
- return -1;
-
- return check_refname_format(sb->buf, 0);
-}
-
-/*
- * This is like "get_oid_basic()", except it allows "object ID expressions",
- * notably "xyz^" for "parent of xyz"
- */
-int get_oid(const char *name, struct object_id *oid)
-{
- struct object_context unused;
- return get_oid_with_context(name, 0, oid, &unused);
-}
-
-
-/*
- * Many callers know that the user meant to name a commit-ish by
- * syntactical positions where the object name appears. Calling this
- * function allows the machinery to disambiguate shorter-than-unique
- * abbreviated object names between commit-ish and others.
- *
- * Note that this does NOT error out when the named object is not a
- * commit-ish. It is merely to give a hint to the disambiguation
- * machinery.
- */
-int get_oid_committish(const char *name, struct object_id *oid)
-{
- struct object_context unused;
- return get_oid_with_context(name, GET_OID_COMMITTISH,
- oid, &unused);
-}
-
-int get_oid_treeish(const char *name, struct object_id *oid)
-{
- struct object_context unused;
- return get_oid_with_context(name, GET_OID_TREEISH,
- oid, &unused);
-}
-
-int get_oid_commit(const char *name, struct object_id *oid)
-{
- struct object_context unused;
- return get_oid_with_context(name, GET_OID_COMMIT,
- oid, &unused);
-}
-
-int get_oid_tree(const char *name, struct object_id *oid)
-{
- struct object_context unused;
- return get_oid_with_context(name, GET_OID_TREE,
- oid, &unused);
-}
-
-int get_oid_blob(const char *name, struct object_id *oid)
-{
- struct object_context unused;
- return get_oid_with_context(name, GET_OID_BLOB,
- oid, &unused);
-}
-
-/* Must be called only when object_name:filename doesn't exist. */
-static void diagnose_invalid_oid_path(const char *prefix,
- const char *filename,
- const struct object_id *tree_oid,
- const char *object_name,
- int object_name_len)
-{
- struct object_id oid;
- unsigned mode;
-
- if (!prefix)
- prefix = "";
-
- if (file_exists(filename))
- die("Path '%s' exists on disk, but not in '%.*s'.",
- filename, object_name_len, object_name);
- if (is_missing_file_error(errno)) {
- char *fullname = xstrfmt("%s%s", prefix, filename);
-
- if (!get_tree_entry(tree_oid, fullname, &oid, &mode)) {
- die("Path '%s' exists, but not '%s'.\n"
- "Did you mean '%.*s:%s' aka '%.*s:./%s'?",
- fullname,
- filename,
- object_name_len, object_name,
- fullname,
- object_name_len, object_name,
- filename);
- }
- die("Path '%s' does not exist in '%.*s'",
- filename, object_name_len, object_name);
- }
-}
-
-/* Must be called only when :stage:filename doesn't exist. */
-static void diagnose_invalid_index_path(int stage,
- const char *prefix,
- const char *filename)
-{
- const struct cache_entry *ce;
- int pos;
- unsigned namelen = strlen(filename);
- struct strbuf fullname = STRBUF_INIT;
-
- if (!prefix)
- prefix = "";
-
- /* Wrong stage number? */
- pos = cache_name_pos(filename, namelen);
- if (pos < 0)
- pos = -pos - 1;
- if (pos < active_nr) {
- ce = active_cache[pos];
- if (ce_namelen(ce) == namelen &&
- !memcmp(ce->name, filename, namelen))
- die("Path '%s' is in the index, but not at stage %d.\n"
- "Did you mean ':%d:%s'?",
- filename, stage,
- ce_stage(ce), filename);
- }
-
- /* Confusion between relative and absolute filenames? */
- strbuf_addstr(&fullname, prefix);
- strbuf_addstr(&fullname, filename);
- pos = cache_name_pos(fullname.buf, fullname.len);
- if (pos < 0)
- pos = -pos - 1;
- if (pos < active_nr) {
- ce = active_cache[pos];
- if (ce_namelen(ce) == fullname.len &&
- !memcmp(ce->name, fullname.buf, fullname.len))
- die("Path '%s' is in the index, but not '%s'.\n"
- "Did you mean ':%d:%s' aka ':%d:./%s'?",
- fullname.buf, filename,
- ce_stage(ce), fullname.buf,
- ce_stage(ce), filename);
- }
-
- if (file_exists(filename))
- die("Path '%s' exists on disk, but not in the index.", filename);
- if (is_missing_file_error(errno))
- die("Path '%s' does not exist (neither on disk nor in the index).",
- filename);
-
- strbuf_release(&fullname);
-}
-
-
-static char *resolve_relative_path(const char *rel)
-{
- if (!starts_with(rel, "./") && !starts_with(rel, "../"))
- return NULL;
-
- if (!is_inside_work_tree())
- die("relative path syntax can't be used outside working tree.");
-
- /* die() inside prefix_path() if resolved path is outside worktree */
- return prefix_path(startup_info->prefix,
- startup_info->prefix ? strlen(startup_info->prefix) : 0,
- rel);
-}
-
-static int get_oid_with_context_1(const char *name,
- unsigned flags,
- const char *prefix,
- struct object_id *oid,
- struct object_context *oc)
-{
- int ret, bracket_depth;
- int namelen = strlen(name);
- const char *cp;
- int only_to_die = flags & GET_OID_ONLY_TO_DIE;
-
- if (only_to_die)
- flags |= GET_OID_QUIETLY;
-
- memset(oc, 0, sizeof(*oc));
- oc->mode = S_IFINVALID;
- strbuf_init(&oc->symlink_path, 0);
- ret = get_oid_1(name, namelen, oid, flags);
- if (!ret)
- return ret;
- /*
- * sha1:path --> object name of path in ent sha1
- * :path -> object name of absolute path in index
- * :./path -> object name of path relative to cwd in index
- * :[0-3]:path -> object name of path in index at stage
- * :/foo -> recent commit matching foo
- */
- if (name[0] == ':') {
- int stage = 0;
- const struct cache_entry *ce;
- char *new_path = NULL;
- int pos;
- if (!only_to_die && namelen > 2 && name[1] == '/') {
- struct commit_list *list = NULL;
-
- for_each_ref(handle_one_ref, &list);
- commit_list_sort_by_date(&list);
- return get_oid_oneline(name + 2, oid, list);
- }
- if (namelen < 3 ||
- name[2] != ':' ||
- name[1] < '0' || '3' < name[1])
- cp = name + 1;
- else {
- stage = name[1] - '0';
- cp = name + 3;
- }
- new_path = resolve_relative_path(cp);
- if (!new_path) {
- namelen = namelen - (cp - name);
- } else {
- cp = new_path;
- namelen = strlen(cp);
- }
-
- if (flags & GET_OID_RECORD_PATH)
- oc->path = xstrdup(cp);
-
- if (!active_cache)
- read_cache();
- pos = cache_name_pos(cp, namelen);
- if (pos < 0)
- pos = -pos - 1;
- while (pos < active_nr) {
- ce = active_cache[pos];
- if (ce_namelen(ce) != namelen ||
- memcmp(ce->name, cp, namelen))
- break;
- if (ce_stage(ce) == stage) {
- oidcpy(oid, &ce->oid);
- oc->mode = ce->ce_mode;
- free(new_path);
- return 0;
- }
- pos++;
- }
- if (only_to_die && name[1] && name[1] != '/')
- diagnose_invalid_index_path(stage, prefix, cp);
- free(new_path);
- return -1;
- }
- for (cp = name, bracket_depth = 0; *cp; cp++) {
- if (*cp == '{')
- bracket_depth++;
- else if (bracket_depth && *cp == '}')
- bracket_depth--;
- else if (!bracket_depth && *cp == ':')
- break;
- }
- if (*cp == ':') {
- struct object_id tree_oid;
- int len = cp - name;
- unsigned sub_flags = flags;
-
- sub_flags &= ~GET_OID_DISAMBIGUATORS;
- sub_flags |= GET_OID_TREEISH;
-
- if (!get_oid_1(name, len, &tree_oid, sub_flags)) {
- const char *filename = cp+1;
- char *new_filename = NULL;
-
- new_filename = resolve_relative_path(filename);
- if (new_filename)
- filename = new_filename;
- if (flags & GET_OID_FOLLOW_SYMLINKS) {
- ret = get_tree_entry_follow_symlinks(tree_oid.hash,
- filename, oid->hash, &oc->symlink_path,
- &oc->mode);
- } else {
- ret = get_tree_entry(&tree_oid, filename, oid,
- &oc->mode);
- if (ret && only_to_die) {
- diagnose_invalid_oid_path(prefix,
- filename,
- &tree_oid,
- name, len);
- }
- }
- hashcpy(oc->tree, tree_oid.hash);
- if (flags & GET_OID_RECORD_PATH)
- oc->path = xstrdup(filename);
-
- free(new_filename);
- return ret;
- } else {
- if (only_to_die)
- die("Invalid object name '%.*s'.", len, name);
- }
- }
- return ret;
-}
-
-/*
- * Call this function when you know "name" given by the end user must
- * name an object but it doesn't; the function _may_ die with a better
- * diagnostic message than "no such object 'name'", e.g. "Path 'doc' does not
- * exist in 'HEAD'" when given "HEAD:doc", or it may return in which case
- * you have a chance to diagnose the error further.
- */
-void maybe_die_on_misspelt_object_name(const char *name, const char *prefix)
-{
- struct object_context oc;
- struct object_id oid;
- get_oid_with_context_1(name, GET_OID_ONLY_TO_DIE, prefix, &oid, &oc);
-}
-
-int get_oid_with_context(const char *str, unsigned flags, struct object_id *oid, struct object_context *oc)
-{
- if (flags & GET_OID_FOLLOW_SYMLINKS && flags & GET_OID_ONLY_TO_DIE)
- die("BUG: incompatible flags for get_sha1_with_context");
- return get_oid_with_context_1(str, flags, NULL, oid, oc);
-}
#include "cache.h"
#include "quote.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
#include "strbuf.h"
#include "run-command.h"
*
* Examples:
*
- * To just print the mtime use --verbose and set the file mtime offset to 0:
+ * To print the mtime and the file name use --verbose and set
+ * the file mtime offset to 0:
*
* test-tool chmtime -v +0 file
*
+ * To print only the mtime use --get:
+ *
+ * test-tool chmtime --get file
+ *
* To set the mtime to current time:
*
* test-tool chmtime =+0 file
*
+ * To set the file mtime offset to +1 and print the new value:
+ *
+ * test-tool chmtime --get +1 file
+ *
*/
#include "test-tool.h"
#include "git-compat-util.h"
#include <utime.h>
-static const char usage_str[] = "-v|--verbose (+|=|=+|=-|-)<seconds> <file>...";
+static const char usage_str[] =
+ "(-v|--verbose|-g|--get) (+|=|=+|=-|-)<seconds> <file>...";
static int timespec_arg(const char *arg, long int *set_time, int *set_eq)
{
}
*set_time = strtol(timespec, &test, 10);
if (*test) {
- fprintf(stderr, "Not a base-10 integer: %s\n", arg + 1);
return 0;
}
if ((*set_eq && *set_time < 0) || *set_eq == 2) {
int cmd__chmtime(int argc, const char **argv)
{
static int verbose;
+ static int get;
int i = 1;
/* no mtime change by default */
if (argc < 3)
goto usage;
- if (strcmp(argv[i], "--verbose") == 0 || strcmp(argv[i], "-v") == 0) {
+ if (strcmp(argv[i], "--get") == 0 || strcmp(argv[i], "-g") == 0) {
+ get = 1;
+ ++i;
+ } else if (strcmp(argv[i], "--verbose") == 0 || strcmp(argv[i], "-v") == 0) {
verbose = 1;
++i;
}
- if (timespec_arg(argv[i], &set_time, &set_eq))
+
+ if (i == argc) {
+ goto usage;
+ }
+
+ if (timespec_arg(argv[i], &set_time, &set_eq)) {
++i;
- else
+ } else {
+ if (get == 0) {
+ fprintf(stderr, "Not a base-10 integer: %s\n", argv[i] + 1);
+ goto usage;
+ }
+ }
+
+ if (i == argc)
goto usage;
for (; i < argc; i++) {
struct stat sb;
struct utimbuf utb;
+ uintmax_t mtime;
if (stat(argv[i], &sb) < 0) {
fprintf(stderr, "Failed to stat %s: %s\n",
utb.actime = sb.st_atime;
utb.modtime = set_eq ? set_time : sb.st_mtime + set_time;
- if (verbose) {
- uintmax_t mtime = utb.modtime < 0 ? 0: utb.modtime;
+ mtime = utb.modtime < 0 ? 0: utb.modtime;
+ if (get) {
+ printf("%"PRIuMAX"\n", mtime);
+ } else if (verbose) {
printf("%"PRIuMAX"\t%s\n", mtime, argv[i]);
}
--- /dev/null
+#include "pkt-line.h"
+
+static void pack_line(const char *line)
+{
+ if (!strcmp(line, "0000") || !strcmp(line, "0000\n"))
+ packet_flush(1);
+ else if (!strcmp(line, "0001") || !strcmp(line, "0001\n"))
+ packet_delim(1);
+ else
+ packet_write_fmt(1, "%s", line);
+}
+
+static void pack(int argc, const char **argv)
+{
+ if (argc) { /* read from argv */
+ int i;
+ for (i = 0; i < argc; i++)
+ pack_line(argv[i]);
+ } else { /* read from stdin */
+ char line[LARGE_PACKET_MAX];
+ while (fgets(line, sizeof(line), stdin)) {
+ pack_line(line);
+ }
+ }
+}
+
+static void unpack(void)
+{
+ struct packet_reader reader;
+ packet_reader_init(&reader, 0, NULL, 0,
+ PACKET_READ_GENTLE_ON_EOF |
+ PACKET_READ_CHOMP_NEWLINE);
+
+ while (packet_reader_read(&reader) != PACKET_READ_EOF) {
+ switch (reader.status) {
+ case PACKET_READ_EOF:
+ break;
+ case PACKET_READ_NORMAL:
+ printf("%s\n", reader.line);
+ break;
+ case PACKET_READ_FLUSH:
+ printf("0000\n");
+ break;
+ case PACKET_READ_DELIM:
+ printf("0001\n");
+ break;
+ }
+ }
+}
+
+int cmd_main(int argc, const char **argv)
+{
+ if (argc < 2)
+ die("too few arguments");
+
+ if (!strcmp(argv[1], "pack"))
+ pack(argc - 2, argv + 2);
+ else if (!strcmp(argv[1], "unpack"))
+ unpack();
+ else
+ die("invalid argument '%s'", argv[1]);
+
+ return 0;
+}
--- /dev/null
+#!/bin/sh
+
+# Read a line coming from `./aggregate.perl --sort-by regression ...`
+# and automatically bisect to find the commit responsible for the
+# performance regression.
+#
+# Lines from `./aggregate.perl --sort-by regression ...` look like:
+#
+# +100.0% p7821-grep-engines-fixed.1 0.04(0.10+0.03) 0.08(0.11+0.08) v2.14.3 v2.15.1
+# +33.3% p7820-grep-engines.1 0.03(0.08+0.02) 0.04(0.08+0.02) v2.14.3 v2.15.1
+#
+
+die () {
+ echo >&2 "error: $*"
+ exit 1
+}
+
+while [ $# -gt 0 ]; do
+ arg="$1"
+ case "$arg" in
+ --help)
+ echo "usage: $0 [--config file] [--subsection subsection]"
+ exit 0
+ ;;
+ --config)
+ shift
+ GIT_PERF_CONFIG_FILE=$(cd "$(dirname "$1")"; pwd)/$(basename "$1")
+ export GIT_PERF_CONFIG_FILE
+ shift ;;
+ --subsection)
+ shift
+ GIT_PERF_SUBSECTION="$1"
+ export GIT_PERF_SUBSECTION
+ shift ;;
+ --*)
+ die "unrecognised option: '$arg'" ;;
+ *)
+ die "unknown argument '$arg'"
+ ;;
+ esac
+done
+
+read -r regression subtest oldtime newtime oldrev newrev
+
+test_script=$(echo "$subtest" | sed -e 's/\(.*\)\.[0-9]*$/\1.sh/')
+test_number=$(echo "$subtest" | sed -e 's/.*\.\([0-9]*\)$/\1/')
+
+# oldtime and newtime are decimal number, not integers
+
+oldtime=$(echo "$oldtime" | sed -e 's/^\([0-9]\+\.[0-9]\+\).*$/\1/')
+newtime=$(echo "$newtime" | sed -e 's/^\([0-9]\+\.[0-9]\+\).*$/\1/')
+
+test $(echo "$newtime" "$oldtime" | awk '{ print ($1 > $2) }') = 1 ||
+ die "New time '$newtime' shoud be greater than old time '$oldtime'"
+
+tmpdir=$(mktemp -d -t bisect_regression_XXXXXX) || die "Failed to create temp directory"
+echo "$oldtime" >"$tmpdir/oldtime" || die "Failed to write to '$tmpdir/oldtime'"
+echo "$newtime" >"$tmpdir/newtime" || die "Failed to write to '$tmpdir/newtime'"
+
+# Bisecting must be performed from the top level directory (even with --no-checkout)
+(
+ toplevel_dir=$(git rev-parse --show-toplevel) || die "Failed to find top level directory"
+ cd "$toplevel_dir" || die "Failed to cd into top level directory '$toplevel_dir'"
+
+ git bisect start --no-checkout "$newrev" "$oldrev" || die "Failed to start bisecting"
+
+ git bisect run t/perf/bisect_run_script "$test_script" "$test_number" "$tmpdir"
+ res="$?"
+
+ git bisect reset
+
+ exit "$res"
+)
--- /dev/null
+#!/bin/sh
+
+script="$1"
+test_number="$2"
+info_dir="$3"
+
+# This aborts the bisection immediately
+die () {
+ echo >&2 "error: $*"
+ exit 255
+}
+
+bisect_head=$(git rev-parse --verify BISECT_HEAD) || die "Failed to find BISECT_HEAD ref"
+
+script_number=$(echo "$script" | sed -e "s/^p\([0-9]*\).*\$/\1/") || die "Failed to get script number for '$script'"
+
+oldtime=$(cat "$info_dir/oldtime") || die "Failed to access '$info_dir/oldtime'"
+newtime=$(cat "$info_dir/newtime") || die "Failed to access '$info_dir/newtime'"
+
+cd t/perf || die "Failed to cd into 't/perf'"
+
+result_file="$info_dir/perf_${script_number}_${bisect_head}_results.txt"
+
+GIT_PERF_DIRS_OR_REVS="$bisect_head"
+export GIT_PERF_DIRS_OR_REVS
+
+./run "$script" >"$result_file" 2>&1 || die "Failed to run perf test '$script'"
+
+rtime=$(sed -n "s/^$script_number\.$test_number:.*\([0-9]\+\.[0-9]\+\)(.*).*\$/\1/p" "$result_file")
+
+echo "newtime: $newtime"
+echo "rtime: $rtime"
+echo "oldtime: $oldtime"
+
+# Compare ($newtime - $rtime) with ($rtime - $oldtime)
+# Times are decimal number, not integers
+
+if test $(echo "$newtime" "$rtime" "$oldtime" | awk '{ print ($1 - $2 > $2 - $3) }') = 1
+then
+ # Current commit is considered "good/old"
+ echo "$rtime" >"$info_dir/oldtime"
+ exit 0
+else
+ # Current commit is considered "bad/new"
+ echo "$rtime" >"$info_dir/newtime"
+ exit 1
+fi
#!/bin/sh
-case "$1" in
+die () {
+ echo >&2 "error: $*"
+ exit 1
+}
+
+while [ $# -gt 0 ]; do
+ arg="$1"
+ case "$arg" in
+ --)
+ break ;;
--help)
- echo "usage: $0 [--config file] [other_git_tree...] [--] [test_scripts]"
- exit 0
- ;;
+ echo "usage: $0 [--config file] [--subsection subsec] [other_git_tree...] [--] [test_scripts]"
+ exit 0 ;;
--config)
shift
GIT_PERF_CONFIG_FILE=$(cd "$(dirname "$1")"; pwd)/$(basename "$1")
export GIT_PERF_CONFIG_FILE
shift ;;
-esac
-
-die () {
- echo >&2 "error: $*"
- exit 1
-}
+ --subsection)
+ shift
+ GIT_PERF_SUBSECTION="$1"
+ export GIT_PERF_SUBSECTION
+ shift ;;
+ --*)
+ die "unrecognised option: '$arg'" ;;
+ *)
+ break ;;
+ esac
+done
run_one_dir () {
if test $# -eq 0; then
if test $(wc -l <test-results/run_subsections.names) -eq 0
then
+ if test -n "$GIT_PERF_SUBSECTION"
+ then
+ if test -n "$GIT_PERF_CONFIG_FILE"
+ then
+ die "no subsections are defined in config file '$GIT_PERF_CONFIG_FILE'"
+ else
+ die "subsection '$GIT_PERF_SUBSECTION' defined without a config file"
+ fi
+ fi
(
run_subsection "$@"
)
+elif test -n "$GIT_PERF_SUBSECTION"
+then
+ egrep "^$GIT_PERF_SUBSECTION\$" test-results/run_subsections.names >/dev/null ||
+ die "subsection '$GIT_PERF_SUBSECTION' not found in '$GIT_PERF_CONFIG_FILE'"
+
+ egrep "^$GIT_PERF_SUBSECTION\$" test-results/run_subsections.names | while read -r subsec
+ do
+ (
+ GIT_PERF_SUBSECTION="$subsec"
+ export GIT_PERF_SUBSECTION
+ echo "======== Run for subsection '$GIT_PERF_SUBSECTION' ========"
+ run_subsection "$@"
+ )
+ done
else
while read -r subsec
do
)
'
+test_expect_success 'refs work with relative gitdir and work tree' '
+ git init relative &&
+ git -C relative commit --allow-empty -m one &&
+ git -C relative commit --allow-empty -m two &&
+
+ GIT_DIR=relative/.git GIT_WORK_TREE=relative git reset HEAD^ &&
+
+ git -C relative log -1 --format=%s >actual &&
+ echo one >expect &&
+ test_cmp expect actual
+'
+
test_done
git checkout HEAD -- file1 file2 &&
echo one >expect &&
test_cmp expect file1 &&
- echo "1000000000 file2" >expect &&
- test-tool chmtime -v +0 file2 >actual &&
+ echo "1000000000" >expect &&
+ test-tool chmtime --get file2 >actual &&
test_cmp expect actual
'
'
test_expect_success 'move worktree' '
- toplevel="$(pwd)" &&
git worktree move source destination &&
test_path_is_missing source &&
git worktree list --porcelain >out &&
- grep "^worktree.*/destination" out &&
- ! grep "^worktree.*/source" out &&
+ grep "^worktree.*/destination$" out &&
+ ! grep "^worktree.*/source$" out &&
git -C destination log --format=%s >actual2 &&
echo init >expected2 &&
test_cmp expected2 actual2
test_when_finished "git worktree move some-dir/destination destination" &&
test_path_is_missing destination &&
git worktree list --porcelain >out &&
- grep "^worktree.*/some-dir/destination" out &&
+ grep "^worktree.*/some-dir/destination$" out &&
git -C some-dir/destination log --format=%s >actual2 &&
echo init >expected2 &&
test_cmp expected2 actual2
test_description='git branch assorted tests'
. ./test-lib.sh
+. "$TEST_DIRECTORY"/lib-rebase.sh
test_expect_success 'prepare a trivial repository' '
echo Hello >A &&
test_must_fail git branch --merged HEAD --no-merged HEAD
'
+test_expect_success '--list during rebase' '
+ test_when_finished "reset_rebase" &&
+ git checkout master &&
+ FAKE_LINES="1 edit 2" &&
+ export FAKE_LINES &&
+ set_fake_editor &&
+ git rebase -i HEAD~2 &&
+ git branch --list >actual &&
+ test_i18ngrep "rebasing master" actual
+'
+
+test_expect_success '--list during rebase from detached HEAD' '
+ test_when_finished "reset_rebase && git checkout master" &&
+ git checkout master^0 &&
+ oid=$(git rev-parse --short HEAD) &&
+ FAKE_LINES="1 edit 2" &&
+ export FAKE_LINES &&
+ set_fake_editor &&
+ git rebase -i HEAD~2 &&
+ git branch --list >actual &&
+ test_i18ngrep "rebasing detached HEAD $oid" actual
+'
+
test_expect_success 'tracking with unexpected .fetch refspec' '
rm -rf a b c d &&
git init a &&
set_fake_editor &&
git rebase -i HEAD~4 &&
test $HEAD = $(git rev-parse HEAD) &&
- MTIME=$(test-tool chmtime -v +0 file3 | sed 's/[^0-9].*$//') &&
+ MTIME=$(test-tool chmtime --get file3) &&
test 123456789 = $MTIME
'
"
}
test_run_rebase success ''
-test_run_rebase failure -m
+test_run_rebase success -m
test_run_rebase success -i
test_run_rebase failure -p
"
}
test_run_rebase success ''
-test_run_rebase failure -m
-test_run_rebase failure -i
+test_run_rebase success -m
+test_run_rebase success -i
test_run_rebase failure -p
# m
a
EOF
+# Expected commit message for initial commit after rebase --signoff
+cat >expected-initial-signed <<EOF
+Initial empty commit
+
+Signed-off-by: $(git var GIT_COMMITTER_IDENT | sed -e "s/>.*/>/")
+EOF
+
# Expected commit message after rebase --signoff
cat >expected-signed <<EOF
first
test_cmp expected-unsigned actual
'
+test_expect_success 'rebase --exec --signoff adds a sign-off line' '
+ test_when_finished "rm exec" &&
+ git commit --amend -m "first" &&
+ git rebase --exec "touch exec" --signoff HEAD^ &&
+ test_path_is_file exec &&
+ git cat-file commit HEAD | sed -e "1,/^\$/d" >actual &&
+ test_cmp expected-signed actual
+'
+
+test_expect_success 'rebase --root --signoff adds a sign-off line' '
+ git commit --amend -m "first" &&
+ git rebase --root --keep-empty --signoff &&
+ git cat-file commit HEAD^ | sed -e "1,/^\$/d" >actual &&
+ test_cmp expected-initial-signed actual &&
+ git cat-file commit HEAD | sed -e "1,/^\$/d" >actual &&
+ test_cmp expected-signed actual
+'
+
+test_expect_success 'rebase -i --signoff fails' '
+ git commit --amend -m "first" &&
+ git rebase -i --signoff HEAD^ &&
+ git cat-file commit HEAD | sed -e "1,/^\$/d" >actual &&
+ test_cmp expected-signed actual
+'
+
+test_expect_success 'rebase -m --signoff fails' '
+ git commit --amend -m "first" &&
+ git rebase -m --signoff HEAD^ &&
+ git cat-file commit HEAD | sed -e "1,/^\$/d" >actual &&
+ test_cmp expected-signed actual
+'
test_done
test_expect_success 'cherry-pick does not implicitly stomp an existing operation' '
pristine_detach initial &&
test_expect_code 1 git cherry-pick base..anotherpick &&
- test-tool chmtime -v +0 .git/sequencer >expect &&
+ test-tool chmtime --get .git/sequencer >expect &&
test_expect_code 128 git cherry-pick unrelatedpick &&
- test-tool chmtime -v +0 .git/sequencer >actual &&
+ test-tool chmtime --get .git/sequencer >actual &&
test_cmp expect actual
'
git commit -q -a -m "prefer first over second" &&
test -f $rr/postimage &&
- oldmtimepost=$(test-tool chmtime -v -60 $rr/postimage | cut -f 1) &&
+ oldmtimepost=$(test-tool chmtime --get -60 $rr/postimage) &&
git checkout -b third master &&
git show second^:a1 | sed "s/To die: t/To die! T/" >a1 &&
'
test_expect_success 'rerere updates postimage timestamp' '
- newmtimepost=$(test-tool chmtime -v +0 $rr/postimage | cut -f 1) &&
+ newmtimepost=$(test-tool chmtime --get $rr/postimage) &&
test $oldmtimepost -lt $newmtimepost
'
count_pre_post 2 0 &&
# Pretend that the conflicts were made quite some time ago
- find .git/rr-cache/ -type f | xargs test-tool chmtime -172800 &&
+ test-tool chmtime -172800 $(find .git/rr-cache/ -type f) &&
# Unresolved entries have not expired yet
git -c gc.rerereresolved=5 -c gc.rerereunresolved=5 rerere gc &&
git rerere &&
# Pretend that the resolutions are old again
- find .git/rr-cache/ -type f | xargs test-tool chmtime -172800 &&
+ test-tool chmtime -172800 $(find .git/rr-cache/ -type f) &&
# Resolved entries have not expired yet
git -c gc.rerereresolved=5 -c gc.rerereunresolved=5 rerere gc &&
'validate file modification time' \
'mkdir extract &&
"$TAR" xf b.tar -C extract a/a &&
- test-tool chmtime -v +0 extract/a/a |cut -f 1 >b.mtime &&
+ test-tool chmtime --get extract/a/a >b.mtime &&
echo "1117231200" >expected.mtime &&
test_cmp expected.mtime b.mtime'
test_expect_success 'already deleted tracking branches ignored' '
git branch -d -r origin/b3 &&
git push origin :b3 >output 2>&1 &&
- ! grep error output
+ ! grep "^error: " output
'
test_done
test_description='test git-http-backend'
. ./test-lib.sh
. "$TEST_DIRECTORY"/lib-httpd.sh
+
+if ! test_have_prereq CURL; then
+ skip_all='skipping raw http-backend tests, curl not available'
+ test_done
+fi
+
start_httpd
GET() {
- curl --include "$HTTPD_URL/$SMART/repo.git/$1" >out 2>/dev/null &&
+ curl --include "$HTTPD_URL/$SMART/repo.git/$1" >out &&
tr '\015' Q <out |
sed '
s/Q$//
POST() {
curl --include --data "$2" \
--header "Content-Type: application/x-$1-request" \
- "$HTTPD_URL/smart/repo.git/$1" >out 2>/dev/null &&
+ "$HTTPD_URL/smart/repo.git/$1" >out &&
tr '\015' Q <out |
sed '
s/Q$//
--- /dev/null
+#!/bin/sh
+
+test_description='test git-serve and server commands'
+
+. ./test-lib.sh
+
+test_expect_success 'test capability advertisement' '
+ cat >expect <<-EOF &&
+ version 2
+ agent=git/$(git version | cut -d" " -f3)
+ ls-refs
+ fetch=shallow
+ 0000
+ EOF
+
+ git serve --advertise-capabilities >out &&
+ test-pkt-line unpack <out >actual &&
+ test_cmp actual expect
+'
+
+test_expect_success 'stateless-rpc flag does not list capabilities' '
+ # Empty request
+ test-pkt-line pack >in <<-EOF &&
+ 0000
+ EOF
+ git serve --stateless-rpc >out <in &&
+ test_must_be_empty out &&
+
+ # EOF
+ git serve --stateless-rpc >out &&
+ test_must_be_empty out
+'
+
+test_expect_success 'request invalid capability' '
+ test-pkt-line pack >in <<-EOF &&
+ foobar
+ 0000
+ EOF
+ test_must_fail git serve --stateless-rpc 2>err <in &&
+ test_i18ngrep "unknown capability" err
+'
+
+test_expect_success 'request with no command' '
+ test-pkt-line pack >in <<-EOF &&
+ agent=git/test
+ 0000
+ EOF
+ test_must_fail git serve --stateless-rpc 2>err <in &&
+ test_i18ngrep "no command requested" err
+'
+
+test_expect_success 'request invalid command' '
+ test-pkt-line pack >in <<-EOF &&
+ command=foo
+ agent=git/test
+ 0000
+ EOF
+ test_must_fail git serve --stateless-rpc 2>err <in &&
+ test_i18ngrep "invalid command" err
+'
+
+# Test the basics of ls-refs
+#
+test_expect_success 'setup some refs and tags' '
+ test_commit one &&
+ git branch dev master &&
+ test_commit two &&
+ git symbolic-ref refs/heads/release refs/heads/master &&
+ git tag -a -m "annotated tag" annotated-tag
+'
+
+test_expect_success 'basics of ls-refs' '
+ test-pkt-line pack >in <<-EOF &&
+ command=ls-refs
+ 0000
+ EOF
+
+ cat >expect <<-EOF &&
+ $(git rev-parse HEAD) HEAD
+ $(git rev-parse refs/heads/dev) refs/heads/dev
+ $(git rev-parse refs/heads/master) refs/heads/master
+ $(git rev-parse refs/heads/release) refs/heads/release
+ $(git rev-parse refs/tags/annotated-tag) refs/tags/annotated-tag
+ $(git rev-parse refs/tags/one) refs/tags/one
+ $(git rev-parse refs/tags/two) refs/tags/two
+ 0000
+ EOF
+
+ git serve --stateless-rpc <in >out &&
+ test-pkt-line unpack <out >actual &&
+ test_cmp actual expect
+'
+
+test_expect_success 'basic ref-prefixes' '
+ test-pkt-line pack >in <<-EOF &&
+ command=ls-refs
+ 0001
+ ref-prefix refs/heads/master
+ ref-prefix refs/tags/one
+ 0000
+ EOF
+
+ cat >expect <<-EOF &&
+ $(git rev-parse refs/heads/master) refs/heads/master
+ $(git rev-parse refs/tags/one) refs/tags/one
+ 0000
+ EOF
+
+ git serve --stateless-rpc <in >out &&
+ test-pkt-line unpack <out >actual &&
+ test_cmp actual expect
+'
+
+test_expect_success 'refs/heads prefix' '
+ test-pkt-line pack >in <<-EOF &&
+ command=ls-refs
+ 0001
+ ref-prefix refs/heads/
+ 0000
+ EOF
+
+ cat >expect <<-EOF &&
+ $(git rev-parse refs/heads/dev) refs/heads/dev
+ $(git rev-parse refs/heads/master) refs/heads/master
+ $(git rev-parse refs/heads/release) refs/heads/release
+ 0000
+ EOF
+
+ git serve --stateless-rpc <in >out &&
+ test-pkt-line unpack <out >actual &&
+ test_cmp actual expect
+'
+
+test_expect_success 'peel parameter' '
+ test-pkt-line pack >in <<-EOF &&
+ command=ls-refs
+ 0001
+ peel
+ ref-prefix refs/tags/
+ 0000
+ EOF
+
+ cat >expect <<-EOF &&
+ $(git rev-parse refs/tags/annotated-tag) refs/tags/annotated-tag peeled:$(git rev-parse refs/tags/annotated-tag^{})
+ $(git rev-parse refs/tags/one) refs/tags/one
+ $(git rev-parse refs/tags/two) refs/tags/two
+ 0000
+ EOF
+
+ git serve --stateless-rpc <in >out &&
+ test-pkt-line unpack <out >actual &&
+ test_cmp actual expect
+'
+
+test_expect_success 'symrefs parameter' '
+ test-pkt-line pack >in <<-EOF &&
+ command=ls-refs
+ 0001
+ symrefs
+ ref-prefix refs/heads/
+ 0000
+ EOF
+
+ cat >expect <<-EOF &&
+ $(git rev-parse refs/heads/dev) refs/heads/dev
+ $(git rev-parse refs/heads/master) refs/heads/master
+ $(git rev-parse refs/heads/release) refs/heads/release symref-target:refs/heads/master
+ 0000
+ EOF
+
+ git serve --stateless-rpc <in >out &&
+ test-pkt-line unpack <out >actual &&
+ test_cmp actual expect
+'
+
+test_done
--- /dev/null
+#!/bin/sh
+
+test_description='test git wire-protocol version 2'
+
+TEST_NO_CREATE_REPO=1
+
+. ./test-lib.sh
+
+# Test protocol v2 with 'git://' transport
+#
+. "$TEST_DIRECTORY"/lib-git-daemon.sh
+start_git_daemon --export-all --enable=receive-pack
+daemon_parent=$GIT_DAEMON_DOCUMENT_ROOT_PATH/parent
+
+test_expect_success 'create repo to be served by git-daemon' '
+ git init "$daemon_parent" &&
+ test_commit -C "$daemon_parent" one
+'
+
+test_expect_success 'list refs with git:// using protocol v2' '
+ test_when_finished "rm -f log" &&
+
+ GIT_TRACE_PACKET="$(pwd)/log" git -c protocol.version=2 \
+ ls-remote --symref "$GIT_DAEMON_URL/parent" >actual &&
+
+ # Client requested to use protocol v2
+ grep "git> .*\\\0\\\0version=2\\\0$" log &&
+ # Server responded using protocol v2
+ grep "git< version 2" log &&
+
+ git ls-remote --symref "$GIT_DAEMON_URL/parent" >expect &&
+ test_cmp actual expect
+'
+
+test_expect_success 'ref advertisment is filtered with ls-remote using protocol v2' '
+ test_when_finished "rm -f log" &&
+
+ GIT_TRACE_PACKET="$(pwd)/log" git -c protocol.version=2 \
+ ls-remote "$GIT_DAEMON_URL/parent" master >actual &&
+
+ cat >expect <<-EOF &&
+ $(git -C "$daemon_parent" rev-parse refs/heads/master)$(printf "\t")refs/heads/master
+ EOF
+
+ test_cmp actual expect
+'
+
+test_expect_success 'clone with git:// using protocol v2' '
+ test_when_finished "rm -f log" &&
+
+ GIT_TRACE_PACKET="$(pwd)/log" git -c protocol.version=2 \
+ clone "$GIT_DAEMON_URL/parent" daemon_child &&
+
+ git -C daemon_child log -1 --format=%s >actual &&
+ git -C "$daemon_parent" log -1 --format=%s >expect &&
+ test_cmp expect actual &&
+
+ # Client requested to use protocol v2
+ grep "clone> .*\\\0\\\0version=2\\\0$" log &&
+ # Server responded using protocol v2
+ grep "clone< version 2" log
+'
+
+test_expect_success 'fetch with git:// using protocol v2' '
+ test_when_finished "rm -f log" &&
+
+ test_commit -C "$daemon_parent" two &&
+
+ GIT_TRACE_PACKET="$(pwd)/log" git -C daemon_child -c protocol.version=2 \
+ fetch &&
+
+ git -C daemon_child log -1 --format=%s origin/master >actual &&
+ git -C "$daemon_parent" log -1 --format=%s >expect &&
+ test_cmp expect actual &&
+
+ # Client requested to use protocol v2
+ grep "fetch> .*\\\0\\\0version=2\\\0$" log &&
+ # Server responded using protocol v2
+ grep "fetch< version 2" log
+'
+
+test_expect_success 'pull with git:// using protocol v2' '
+ test_when_finished "rm -f log" &&
+
+ GIT_TRACE_PACKET="$(pwd)/log" git -C daemon_child -c protocol.version=2 \
+ pull &&
+
+ git -C daemon_child log -1 --format=%s >actual &&
+ git -C "$daemon_parent" log -1 --format=%s >expect &&
+ test_cmp expect actual &&
+
+ # Client requested to use protocol v2
+ grep "fetch> .*\\\0\\\0version=2\\\0$" log &&
+ # Server responded using protocol v2
+ grep "fetch< version 2" log
+'
+
+test_expect_success 'push with git:// and a config of v2 does not request v2' '
+ test_when_finished "rm -f log" &&
+
+ # Till v2 for push is designed, make sure that if a client has
+ # protocol.version configured to use v2, that the client instead falls
+ # back and uses v0.
+
+ test_commit -C daemon_child three &&
+
+ # Push to another branch, as the target repository has the
+ # master branch checked out and we cannot push into it.
+ GIT_TRACE_PACKET="$(pwd)/log" git -C daemon_child -c protocol.version=2 \
+ push origin HEAD:client_branch &&
+
+ git -C daemon_child log -1 --format=%s >actual &&
+ git -C "$daemon_parent" log -1 --format=%s client_branch >expect &&
+ test_cmp expect actual &&
+
+ # Client requested to use protocol v2
+ ! grep "push> .*\\\0\\\0version=2\\\0$" log &&
+ # Server responded using protocol v2
+ ! grep "push< version 2" log
+'
+
+stop_git_daemon
+
+# Test protocol v2 with 'file://' transport
+#
+test_expect_success 'create repo to be served by file:// transport' '
+ git init file_parent &&
+ test_commit -C file_parent one
+'
+
+test_expect_success 'list refs with file:// using protocol v2' '
+ test_when_finished "rm -f log" &&
+
+ GIT_TRACE_PACKET="$(pwd)/log" git -c protocol.version=2 \
+ ls-remote --symref "file://$(pwd)/file_parent" >actual &&
+
+ # Server responded using protocol v2
+ grep "git< version 2" log &&
+
+ git ls-remote --symref "file://$(pwd)/file_parent" >expect &&
+ test_cmp actual expect
+'
+
+test_expect_success 'ref advertisment is filtered with ls-remote using protocol v2' '
+ test_when_finished "rm -f log" &&
+
+ GIT_TRACE_PACKET="$(pwd)/log" git -c protocol.version=2 \
+ ls-remote "file://$(pwd)/file_parent" master >actual &&
+
+ cat >expect <<-EOF &&
+ $(git -C file_parent rev-parse refs/heads/master)$(printf "\t")refs/heads/master
+ EOF
+
+ test_cmp actual expect
+'
+
+test_expect_success 'clone with file:// using protocol v2' '
+ test_when_finished "rm -f log" &&
+
+ GIT_TRACE_PACKET="$(pwd)/log" git -c protocol.version=2 \
+ clone "file://$(pwd)/file_parent" file_child &&
+
+ git -C file_child log -1 --format=%s >actual &&
+ git -C file_parent log -1 --format=%s >expect &&
+ test_cmp expect actual &&
+
+ # Server responded using protocol v2
+ grep "clone< version 2" log
+'
+
+test_expect_success 'fetch with file:// using protocol v2' '
+ test_when_finished "rm -f log" &&
+
+ test_commit -C file_parent two &&
+
+ GIT_TRACE_PACKET="$(pwd)/log" git -C file_child -c protocol.version=2 \
+ fetch origin &&
+
+ git -C file_child log -1 --format=%s origin/master >actual &&
+ git -C file_parent log -1 --format=%s >expect &&
+ test_cmp expect actual &&
+
+ # Server responded using protocol v2
+ grep "fetch< version 2" log
+'
+
+test_expect_success 'ref advertisment is filtered during fetch using protocol v2' '
+ test_when_finished "rm -f log" &&
+
+ test_commit -C file_parent three &&
+
+ GIT_TRACE_PACKET="$(pwd)/log" git -C file_child -c protocol.version=2 \
+ fetch origin master &&
+
+ git -C file_child log -1 --format=%s origin/master >actual &&
+ git -C file_parent log -1 --format=%s >expect &&
+ test_cmp expect actual &&
+
+ ! grep "refs/tags/one" log &&
+ ! grep "refs/tags/two" log &&
+ ! grep "refs/tags/three" log
+'
+
+# Test protocol v2 with 'http://' transport
+#
+. "$TEST_DIRECTORY"/lib-httpd.sh
+start_httpd
+
+test_expect_success 'create repo to be served by http:// transport' '
+ git init "$HTTPD_DOCUMENT_ROOT_PATH/http_parent" &&
+ git -C "$HTTPD_DOCUMENT_ROOT_PATH/http_parent" config http.receivepack true &&
+ test_commit -C "$HTTPD_DOCUMENT_ROOT_PATH/http_parent" one
+'
+
+test_expect_success 'clone with http:// using protocol v2' '
+ test_when_finished "rm -f log" &&
+
+ GIT_TRACE_PACKET="$(pwd)/log" GIT_TRACE_CURL="$(pwd)/log" git -c protocol.version=2 \
+ clone "$HTTPD_URL/smart/http_parent" http_child &&
+
+ git -C http_child log -1 --format=%s >actual &&
+ git -C "$HTTPD_DOCUMENT_ROOT_PATH/http_parent" log -1 --format=%s >expect &&
+ test_cmp expect actual &&
+
+ # Client requested to use protocol v2
+ grep "Git-Protocol: version=2" log &&
+ # Server responded using protocol v2
+ grep "git< version 2" log
+'
+
+test_expect_success 'fetch with http:// using protocol v2' '
+ test_when_finished "rm -f log" &&
+
+ test_commit -C "$HTTPD_DOCUMENT_ROOT_PATH/http_parent" two &&
+
+ GIT_TRACE_PACKET="$(pwd)/log" git -C http_child -c protocol.version=2 \
+ fetch &&
+
+ git -C http_child log -1 --format=%s origin/master >actual &&
+ git -C "$HTTPD_DOCUMENT_ROOT_PATH/http_parent" log -1 --format=%s >expect &&
+ test_cmp expect actual &&
+
+ # Server responded using protocol v2
+ grep "git< version 2" log
+'
+
+test_expect_success 'push with http:// and a config of v2 does not request v2' '
+ test_when_finished "rm -f log" &&
+ # Till v2 for push is designed, make sure that if a client has
+ # protocol.version configured to use v2, that the client instead falls
+ # back and uses v0.
+
+ test_commit -C http_child three &&
+
+ # Push to another branch, as the target repository has the
+ # master branch checked out and we cannot push into it.
+ GIT_TRACE_PACKET="$(pwd)/log" git -C http_child -c protocol.version=2 \
+ push origin HEAD:client_branch &&
+
+ git -C http_child log -1 --format=%s >actual &&
+ git -C "$HTTPD_DOCUMENT_ROOT_PATH/http_parent" log -1 --format=%s client_branch >expect &&
+ test_cmp expect actual &&
+
+ # Client didnt request to use protocol v2
+ ! grep "Git-Protocol: version=2" log &&
+ # Server didnt respond using protocol v2
+ ! grep "git< version 2" log
+'
+
+
+stop_httpd
+
+test_done
test_expect_success 'avoid unnecessary update, normal rename' '
git checkout -q avoid-unnecessary-update-1^0 &&
- test-tool chmtime =1000000000 rename &&
- test-tool chmtime -v +0 rename >expect &&
+ test-tool chmtime --get =1000000000 rename >expect &&
git merge merge-branch-1 &&
- test-tool chmtime -v +0 rename >actual &&
+ test-tool chmtime --get rename >actual &&
test_cmp expect actual # "rename" should have stayed intact
'
test_expect_success 'avoid unnecessary update, with D/F conflict' '
git checkout -q avoid-unnecessary-update-2^0 &&
- test-tool chmtime =1000000000 df &&
- test-tool chmtime -v +0 df >expect &&
+ test-tool chmtime --get =1000000000 df >expect &&
git merge merge-branch-2 &&
- test-tool chmtime -v +0 df >actual &&
+ test-tool chmtime --get df >actual &&
test_cmp expect actual # "df" should have stayed intact
'
test_expect_success 'avoid unnecessary update, dir->(file,nothing)' '
git checkout -q master^0 &&
- test-tool chmtime =1000000000 df &&
- test-tool chmtime -v +0 df >expect &&
+ test-tool chmtime --get =1000000000 df >expect &&
git merge side &&
- test-tool chmtime -v +0 df >actual &&
+ test-tool chmtime --get df >actual &&
test_cmp expect actual # "df" should have stayed intact
'
test_expect_success 'avoid unnecessary update, modify/delete' '
git checkout -q master^0 &&
- test-tool chmtime =1000000000 file &&
- test-tool chmtime -v +0 file >expect &&
+ test-tool chmtime --get =1000000000 file >expect &&
test_must_fail git merge side &&
- test-tool chmtime -v +0 file >actual &&
+ test-tool chmtime --get file >actual &&
test_cmp expect actual # "file" should have stayed intact
'
test_expect_success 'avoid unnecessary update, rename/add-dest' '
git checkout -q master^0 &&
- test-tool chmtime =1000000000 newfile &&
- test-tool chmtime -v +0 newfile >expect &&
+ test-tool chmtime --get =1000000000 newfile >expect &&
git merge side &&
- test-tool chmtime -v +0 newfile >actual &&
+ test-tool chmtime --get newfile >actual &&
test_cmp expect actual # "file" should have stayed intact
'
'
test_expect_success "simulate time passing ($title)" '
- find .git/objects -type f |
- xargs test-tool chmtime -v -86400
+ test-tool chmtime --get -86400 $(find .git/objects -type f)
'
test_expect_success "start writing new commit with old blob ($title)" '
test_expect_success "abandon objects again ($title)" '
git reset --hard HEAD^ &&
- find .git/objects -type f |
- xargs test-tool chmtime -v -86400
+ test-tool chmtime --get -86400 $(find .git/objects -type f)
'
test_expect_success "start writing new commit with same tree ($title)" '
git reset --hard HEAD^
'
+test_expect_success 'removed files and relative paths' '
+ test_when_finished "rm -rf foo" &&
+ git init foo &&
+ >foo/foo.txt &&
+ git -C foo add foo.txt &&
+ git -C foo commit -m first &&
+ git -C foo rm foo.txt &&
+
+ mkdir -p foo/bar &&
+ git -C foo/bar commit -m second ../foo.txt
+'
+
test_expect_success 'using invalid commit with -C' '
test_must_fail git commit --allow-empty -C bogus
'
test_expect_success '--no-optional-locks prevents index update' '
test-tool chmtime =1234567890 .git/index &&
git --no-optional-locks status &&
- test-tool chmtime -v +0 .git/index >out &&
+ test-tool chmtime --get .git/index >out &&
grep ^1234567890 out &&
git status &&
- test-tool chmtime -v +0 .git/index >out &&
+ test-tool chmtime --get .git/index >out &&
! grep ^1234567890 out
'
compare_mtimes ()
{
- read tref rest &&
- while read t rest; do
+ read tref &&
+ while read t; do
test "$tref" = "$t" || return 1
done
}
tmppack=".git/objects/pack/tmp_pack" &&
ln "$packfile" "$tmppack" &&
git repack -A -l -d &&
- test-tool chmtime -v +0 "$tmppack" "$fsha1path" "$csha1path" "$tsha1path" \
+ test-tool chmtime --get "$tmppack" "$fsha1path" "$csha1path" "$tsha1path" \
> mtimes &&
compare_mtimes < mtimes
'
)
'
+cat >> svn-authors <<EOF
+ff = FFFFFFF FFFFFFF <>
+EOF
+
+test_expect_success 'authors-file imported user without email' '
+ svn_cmd mkdir -m aa/branches/ff --username ff "$svnrepo/aa/branches/ff" &&
+ (
+ cd aa-work &&
+ git svn fetch --authors-file=../svn-authors &&
+ git rev-list -1 --pretty=raw refs/remotes/origin/ff | \
+ grep "^author FFFFFFF FFFFFFF <> "
+ )
+ '
+
test_debug 'GIT_DIR=gitconfig.clone/.git git log'
test_done
write_script svn-authors-prog "$PERL_PATH" <<-\EOF
$_ = shift;
- if (s/-sub$//) {
+ if (s/-hermit//) {
+ print "$_ <>\n";
+ } elsif (s/-sub$//) {
print "$_ <$_\@sub.example.com>\n";
} else {
print "$_ <$_\@example.com>\n";
git --git-dir=x/.git config --unset svn.authorsfile
git --git-dir=x/.git config --unset svn.authorsprog
+test_expect_success 'authors-prog imported user without email' '
+ svn mkdir -m gg --username gg-hermit "$svnrepo"/gg &&
+ (
+ cd x &&
+ git svn fetch --authors-prog=../svn-authors-prog &&
+ git rev-list -1 --pretty=raw refs/remotes/git-svn | \
+ grep "^author gg <> "
+ )
+'
+
+test_expect_success 'imported without authors-prog and authors-file' '
+ svn mkdir -m hh --username hh "$svnrepo"/hh &&
+ (
+ uuid=$(svn info "$svnrepo" |
+ sed -n "s/^Repository UUID: //p") &&
+ cd x &&
+ git svn fetch &&
+ git rev-list -1 --pretty=raw refs/remotes/git-svn | \
+ grep "^author hh <hh@$uuid> "
+ )
+'
+
test_expect_success 'authors-prog handled special characters in username' '
svn mkdir -m bad --username "xyz; touch evil" "$svnrepo"/bad &&
(
test_lazy_prereq TIME_IS_64BIT 'test-tool date is64bit'
test_lazy_prereq TIME_T_IS_64BIT 'test-tool date time_t-is64bit'
+
+test_lazy_prereq CURL '
+ curl --version
+'
struct trace_key trace_default_key = { "GIT_TRACE", 0, 0, 0 };
struct trace_key trace_perf_key = TRACE_KEY_INIT(PERFORMANCE);
+struct trace_key trace_setup_key = TRACE_KEY_INIT(SETUP);
/* Get a trace file descriptor from "key" env variable. */
static int get_trace_fd(struct trace_key *key)
/* FIXME: move prefix to startup_info struct and get rid of this arg */
void trace_repo_setup(const char *prefix)
{
- static struct trace_key key = TRACE_KEY_INIT(SETUP);
const char *git_work_tree;
char *cwd;
- if (!trace_want(&key))
+ if (!trace_want(&trace_setup_key))
return;
cwd = xgetcwd();
if (!prefix)
prefix = "(null)";
- trace_printf_key(&key, "setup: git_dir: %s\n", quote_crnl(get_git_dir()));
- trace_printf_key(&key, "setup: git_common_dir: %s\n", quote_crnl(get_git_common_dir()));
- trace_printf_key(&key, "setup: worktree: %s\n", quote_crnl(git_work_tree));
- trace_printf_key(&key, "setup: cwd: %s\n", quote_crnl(cwd));
- trace_printf_key(&key, "setup: prefix: %s\n", quote_crnl(prefix));
+ trace_printf_key(&trace_setup_key, "setup: git_dir: %s\n", quote_crnl(get_git_dir()));
+ trace_printf_key(&trace_setup_key, "setup: git_common_dir: %s\n", quote_crnl(get_git_common_dir()));
+ trace_printf_key(&trace_setup_key, "setup: worktree: %s\n", quote_crnl(git_work_tree));
+ trace_printf_key(&trace_setup_key, "setup: cwd: %s\n", quote_crnl(cwd));
+ trace_printf_key(&trace_setup_key, "setup: prefix: %s\n", quote_crnl(prefix));
free(cwd);
}
#define TRACE_KEY_INIT(name) { "GIT_TRACE_" #name, 0, 0, 0 }
extern struct trace_key trace_perf_key;
+extern struct trace_key trace_setup_key;
extern void trace_repo_setup(const char *prefix);
extern int trace_want(struct trace_key *key);
#include "argv-array.h"
#include "refs.h"
#include "transport-internal.h"
+#include "protocol.h"
static int debug;
option : 1,
push : 1,
connect : 1,
+ stateless_connect : 1,
signed_tags : 1,
check_connectivity : 1,
no_disconnect_req : 1,
die_errno("Full write to remote helper failed");
}
-static int recvline_fh(FILE *helper, struct strbuf *buffer, const char *name)
+static int recvline_fh(FILE *helper, struct strbuf *buffer)
{
strbuf_reset(buffer);
if (debug)
static int recvline(struct helper_data *helper, struct strbuf *buffer)
{
- return recvline_fh(helper->out, buffer, helper->name);
+ return recvline_fh(helper->out, buffer);
}
static void write_constant(int fd, const char *str)
refspecs[refspec_nr++] = xstrdup(arg);
} else if (!strcmp(capname, "connect")) {
data->connect = 1;
+ } else if (!strcmp(capname, "stateless-connect")) {
+ data->stateless_connect = 1;
} else if (!strcmp(capname, "signed-tags")) {
data->signed_tags = 1;
} else if (skip_prefix(capname, "export-marks ", &arg)) {
return 0;
}
-static int process_connect_service(struct transport *transport,
- const char *name, const char *exec)
+static int run_connect(struct transport *transport, struct strbuf *cmdbuf)
{
struct helper_data *data = transport->data;
- struct strbuf cmdbuf = STRBUF_INIT;
- struct child_process *helper;
- int r, duped, ret = 0;
+ int ret = 0;
+ int duped;
FILE *input;
+ struct child_process *helper;
helper = get_helper(transport);
input = xfdopen(duped, "r");
setvbuf(input, NULL, _IONBF, 0);
+ sendline(data, cmdbuf);
+ if (recvline_fh(input, cmdbuf))
+ exit(128);
+
+ if (!strcmp(cmdbuf->buf, "")) {
+ data->no_disconnect_req = 1;
+ if (debug)
+ fprintf(stderr, "Debug: Smart transport connection "
+ "ready.\n");
+ ret = 1;
+ } else if (!strcmp(cmdbuf->buf, "fallback")) {
+ if (debug)
+ fprintf(stderr, "Debug: Falling back to dumb "
+ "transport.\n");
+ } else {
+ die("Unknown response to connect: %s",
+ cmdbuf->buf);
+ }
+
+ fclose(input);
+ return ret;
+}
+
+static int process_connect_service(struct transport *transport,
+ const char *name, const char *exec)
+{
+ struct helper_data *data = transport->data;
+ struct strbuf cmdbuf = STRBUF_INIT;
+ int ret = 0;
+
/*
* Handle --upload-pack and friends. This is fire and forget...
* just warn if it fails.
*/
if (strcmp(name, exec)) {
- r = set_helper_option(transport, "servpath", exec);
+ int r = set_helper_option(transport, "servpath", exec);
if (r > 0)
warning("Setting remote service path not supported by protocol.");
else if (r < 0)
warning("Invalid remote service path.");
}
- if (data->connect)
+ if (data->connect) {
strbuf_addf(&cmdbuf, "connect %s\n", name);
- else
- goto exit;
-
- sendline(data, &cmdbuf);
- if (recvline_fh(input, &cmdbuf, name))
- exit(128);
-
- if (!strcmp(cmdbuf.buf, "")) {
- data->no_disconnect_req = 1;
- if (debug)
- fprintf(stderr, "Debug: Smart transport connection "
- "ready.\n");
- ret = 1;
- } else if (!strcmp(cmdbuf.buf, "fallback")) {
- if (debug)
- fprintf(stderr, "Debug: Falling back to dumb "
- "transport.\n");
- } else
- die("Unknown response to connect: %s",
- cmdbuf.buf);
+ ret = run_connect(transport, &cmdbuf);
+ } else if (data->stateless_connect &&
+ (get_protocol_version_config() == protocol_v2) &&
+ !strcmp("git-upload-pack", name)) {
+ strbuf_addf(&cmdbuf, "stateless-connect %s\n", name);
+ ret = run_connect(transport, &cmdbuf);
+ if (ret)
+ transport->stateless_rpc = 1;
+ }
-exit:
strbuf_release(&cmdbuf);
- fclose(input);
return ret;
}
}
}
-static struct ref *get_refs_list(struct transport *transport, int for_push)
+static struct ref *get_refs_list(struct transport *transport, int for_push,
+ const struct argv_array *ref_prefixes)
{
struct helper_data *data = transport->data;
struct child_process *helper;
if (process_connect(transport, for_push)) {
do_take_over(transport);
- return transport->vtable->get_refs_list(transport, for_push);
+ return transport->vtable->get_refs_list(transport, for_push, ref_prefixes);
}
if (data->push && for_push)
struct ref;
struct transport;
+struct argv_array;
struct transport_vtable {
/**
* the transport to try to share connections, for_push is a
* hint as to whether the ultimate operation is a push or a fetch.
*
+ * If communicating using protocol v2 a list of prefixes can be
+ * provided to be sent to the server to enable it to limit the ref
+ * advertisement. Since ref filtering is done on the server's end, and
+ * only when using protocol v2, this list will be ignored when not
+ * using protocol v2 meaning this function can return refs which don't
+ * match the provided ref_prefixes.
+ *
* If the transport is able to determine the remote hash for
* the ref without a huge amount of effort, it should store it
* in the ref's old_sha1 field; otherwise it should be all 0.
**/
- struct ref *(*get_refs_list)(struct transport *transport, int for_push);
+ struct ref *(*get_refs_list)(struct transport *transport, int for_push,
+ const struct argv_array *ref_prefixes);
/**
* Fetch the objects for the given refs. Note that this gets
#include "sha1-array.h"
#include "sigchain.h"
#include "transport-internal.h"
+#include "protocol.h"
#include "object-store.h"
static void set_upstreams(struct transport *transport, struct ref *refs,
struct bundle_header header;
};
-static struct ref *get_refs_from_bundle(struct transport *transport, int for_push)
+static struct ref *get_refs_from_bundle(struct transport *transport,
+ int for_push,
+ const struct argv_array *ref_prefixes)
{
struct bundle_transport_data *data = transport->data;
struct ref *result = NULL;
struct child_process *conn;
int fd[2];
unsigned got_remote_heads : 1;
+ enum protocol_version version;
struct oid_array extra_have;
struct oid_array shallow;
};
return 0;
}
-static struct ref *get_refs_via_connect(struct transport *transport, int for_push)
+static struct ref *get_refs_via_connect(struct transport *transport, int for_push,
+ const struct argv_array *ref_prefixes)
{
struct git_transport_data *data = transport->data;
- struct ref *refs;
+ struct ref *refs = NULL;
+ struct packet_reader reader;
connect_setup(transport, for_push);
- get_remote_heads(data->fd[0], NULL, 0, &refs,
- for_push ? REF_NORMAL : 0,
- &data->extra_have,
- &data->shallow);
+
+ packet_reader_init(&reader, data->fd[0], NULL, 0,
+ PACKET_READ_CHOMP_NEWLINE |
+ PACKET_READ_GENTLE_ON_EOF);
+
+ data->version = discover_version(&reader);
+ switch (data->version) {
+ case protocol_v2:
+ get_remote_refs(data->fd[1], &reader, &refs, for_push,
+ ref_prefixes);
+ break;
+ case protocol_v1:
+ case protocol_v0:
+ get_remote_heads(&reader, &refs,
+ for_push ? REF_NORMAL : 0,
+ &data->extra_have,
+ &data->shallow);
+ break;
+ case protocol_unknown_version:
+ BUG("unknown protocol version");
+ }
data->got_remote_heads = 1;
return refs;
{
int ret = 0;
struct git_transport_data *data = transport->data;
- struct ref *refs;
+ struct ref *refs = NULL;
char *dest = xstrdup(transport->url);
struct fetch_pack_args args;
struct ref *refs_tmp = NULL;
args.from_promisor = data->options.from_promisor;
args.no_dependents = data->options.no_dependents;
args.filter_options = data->options.filter_options;
+ args.stateless_rpc = transport->stateless_rpc;
- if (!data->got_remote_heads) {
- connect_setup(transport, 0);
- get_remote_heads(data->fd[0], NULL, 0, &refs_tmp, 0,
- NULL, &data->shallow);
- data->got_remote_heads = 1;
+ if (!data->got_remote_heads)
+ refs_tmp = get_refs_via_connect(transport, 0, NULL);
+
+ switch (data->version) {
+ case protocol_v2:
+ refs = fetch_pack(&args, data->fd, data->conn,
+ refs_tmp ? refs_tmp : transport->remote_refs,
+ dest, to_fetch, nr_heads, &data->shallow,
+ &transport->pack_lockfile, data->version);
+ break;
+ case protocol_v1:
+ case protocol_v0:
+ refs = fetch_pack(&args, data->fd, data->conn,
+ refs_tmp ? refs_tmp : transport->remote_refs,
+ dest, to_fetch, nr_heads, &data->shallow,
+ &transport->pack_lockfile, data->version);
+ break;
+ case protocol_unknown_version:
+ BUG("unknown protocol version");
}
- refs = fetch_pack(&args, data->fd, data->conn,
- refs_tmp ? refs_tmp : transport->remote_refs,
- dest, to_fetch, nr_heads, &data->shallow,
- &transport->pack_lockfile);
close(data->fd[0]);
close(data->fd[1]);
if (finish_connect(data->conn))
{
struct git_transport_data *data = transport->data;
struct send_pack_args args;
- int ret;
-
- if (!data->got_remote_heads) {
- struct ref *tmp_refs;
- connect_setup(transport, 1);
+ int ret = 0;
- get_remote_heads(data->fd[0], NULL, 0, &tmp_refs, REF_NORMAL,
- NULL, &data->shallow);
- data->got_remote_heads = 1;
- }
+ if (!data->got_remote_heads)
+ get_refs_via_connect(transport, 1, NULL);
memset(&args, 0, sizeof(args));
args.send_mirror = !!(flags & TRANSPORT_PUSH_MIRROR);
else
args.push_cert = SEND_PACK_PUSH_CERT_NEVER;
- ret = send_pack(&args, data->fd, data->conn, remote_refs,
- &data->extra_have);
+ switch (data->version) {
+ case protocol_v2:
+ die("support for protocol v2 not implemented yet");
+ break;
+ case protocol_v1:
+ case protocol_v0:
+ ret = send_pack(&args, data->fd, data->conn, remote_refs,
+ &data->extra_have);
+ break;
+ case protocol_unknown_version:
+ BUG("unknown protocol version");
+ }
close(data->fd[1]);
close(data->fd[0]);
int porcelain = flags & TRANSPORT_PUSH_PORCELAIN;
int pretend = flags & TRANSPORT_PUSH_DRY_RUN;
int push_ret, ret, err;
+ struct refspec *tmp_rs;
+ struct argv_array ref_prefixes = ARGV_ARRAY_INIT;
+ int i;
if (check_push_refs(local_refs, refspec_nr, refspec) < 0)
return -1;
- remote_refs = transport->vtable->get_refs_list(transport, 1);
+ tmp_rs = parse_push_refspec(refspec_nr, refspec);
+ for (i = 0; i < refspec_nr; i++) {
+ const char *prefix = NULL;
+
+ if (tmp_rs[i].dst)
+ prefix = tmp_rs[i].dst;
+ else if (tmp_rs[i].src && !tmp_rs[i].exact_sha1)
+ prefix = tmp_rs[i].src;
+
+ if (prefix) {
+ const char *glob = strchr(prefix, '*');
+ if (glob)
+ argv_array_pushf(&ref_prefixes, "%.*s",
+ (int)(glob - prefix),
+ prefix);
+ else
+ expand_ref_prefix(&ref_prefixes, prefix);
+ }
+ }
+
+ remote_refs = transport->vtable->get_refs_list(transport, 1,
+ &ref_prefixes);
+
+ argv_array_clear(&ref_prefixes);
+ free_refspec(refspec_nr, tmp_rs);
if (flags & TRANSPORT_PUSH_ALL)
match_flags |= MATCH_REFS_ALL;
return 1;
}
-const struct ref *transport_get_remote_refs(struct transport *transport)
+const struct ref *transport_get_remote_refs(struct transport *transport,
+ const struct argv_array *ref_prefixes)
{
if (!transport->got_remote_refs) {
- transport->remote_refs = transport->vtable->get_refs_list(transport, 0);
+ transport->remote_refs =
+ transport->vtable->get_refs_list(transport, 0,
+ ref_prefixes);
transport->got_remote_refs = 1;
}
*/
unsigned cloning : 1;
+ /*
+ * Indicates that the transport is connected via a half-duplex
+ * connection and should operate in stateless-rpc mode.
+ */
+ unsigned stateless_rpc : 1;
+
/*
* These strings will be passed to the {pre, post}-receive hook,
* on the remote side, if both sides support the push options capability.
int refspec_nr, const char **refspec, int flags,
unsigned int * reject_reasons);
-const struct ref *transport_get_remote_refs(struct transport *transport);
+/*
+ * Retrieve refs from a remote.
+ *
+ * Optionally a list of ref prefixes can be provided which can be sent to the
+ * server (when communicating using protocol v2) to enable it to limit the ref
+ * advertisement. Since ref filtering is done on the server's end (and only
+ * when using protocol v2), this can return refs which don't match the provided
+ * ref_prefixes.
+ */
+const struct ref *transport_get_remote_refs(struct transport *transport,
+ const struct argv_array *ref_prefixes);
int transport_fetch_refs(struct transport *transport, struct ref *refs);
void transport_unlock_pack(struct transport *transport);
--- /dev/null
+static const struct interval zero_width[] = {
+{ 0x0300, 0x036F },
+{ 0x0483, 0x0489 },
+{ 0x0591, 0x05BD },
+{ 0x05BF, 0x05BF },
+{ 0x05C1, 0x05C2 },
+{ 0x05C4, 0x05C5 },
+{ 0x05C7, 0x05C7 },
+{ 0x0600, 0x0605 },
+{ 0x0610, 0x061A },
+{ 0x061C, 0x061C },
+{ 0x064B, 0x065F },
+{ 0x0670, 0x0670 },
+{ 0x06D6, 0x06DD },
+{ 0x06DF, 0x06E4 },
+{ 0x06E7, 0x06E8 },
+{ 0x06EA, 0x06ED },
+{ 0x070F, 0x070F },
+{ 0x0711, 0x0711 },
+{ 0x0730, 0x074A },
+{ 0x07A6, 0x07B0 },
+{ 0x07EB, 0x07F3 },
+{ 0x0816, 0x0819 },
+{ 0x081B, 0x0823 },
+{ 0x0825, 0x0827 },
+{ 0x0829, 0x082D },
+{ 0x0859, 0x085B },
+{ 0x08D4, 0x0902 },
+{ 0x093A, 0x093A },
+{ 0x093C, 0x093C },
+{ 0x0941, 0x0948 },
+{ 0x094D, 0x094D },
+{ 0x0951, 0x0957 },
+{ 0x0962, 0x0963 },
+{ 0x0981, 0x0981 },
+{ 0x09BC, 0x09BC },
+{ 0x09C1, 0x09C4 },
+{ 0x09CD, 0x09CD },
+{ 0x09E2, 0x09E3 },
+{ 0x0A01, 0x0A02 },
+{ 0x0A3C, 0x0A3C },
+{ 0x0A41, 0x0A42 },
+{ 0x0A47, 0x0A48 },
+{ 0x0A4B, 0x0A4D },
+{ 0x0A51, 0x0A51 },
+{ 0x0A70, 0x0A71 },
+{ 0x0A75, 0x0A75 },
+{ 0x0A81, 0x0A82 },
+{ 0x0ABC, 0x0ABC },
+{ 0x0AC1, 0x0AC5 },
+{ 0x0AC7, 0x0AC8 },
+{ 0x0ACD, 0x0ACD },
+{ 0x0AE2, 0x0AE3 },
+{ 0x0AFA, 0x0AFF },
+{ 0x0B01, 0x0B01 },
+{ 0x0B3C, 0x0B3C },
+{ 0x0B3F, 0x0B3F },
+{ 0x0B41, 0x0B44 },
+{ 0x0B4D, 0x0B4D },
+{ 0x0B56, 0x0B56 },
+{ 0x0B62, 0x0B63 },
+{ 0x0B82, 0x0B82 },
+{ 0x0BC0, 0x0BC0 },
+{ 0x0BCD, 0x0BCD },
+{ 0x0C00, 0x0C00 },
+{ 0x0C3E, 0x0C40 },
+{ 0x0C46, 0x0C48 },
+{ 0x0C4A, 0x0C4D },
+{ 0x0C55, 0x0C56 },
+{ 0x0C62, 0x0C63 },
+{ 0x0C81, 0x0C81 },
+{ 0x0CBC, 0x0CBC },
+{ 0x0CBF, 0x0CBF },
+{ 0x0CC6, 0x0CC6 },
+{ 0x0CCC, 0x0CCD },
+{ 0x0CE2, 0x0CE3 },
+{ 0x0D00, 0x0D01 },
+{ 0x0D3B, 0x0D3C },
+{ 0x0D41, 0x0D44 },
+{ 0x0D4D, 0x0D4D },
+{ 0x0D62, 0x0D63 },
+{ 0x0DCA, 0x0DCA },
+{ 0x0DD2, 0x0DD4 },
+{ 0x0DD6, 0x0DD6 },
+{ 0x0E31, 0x0E31 },
+{ 0x0E34, 0x0E3A },
+{ 0x0E47, 0x0E4E },
+{ 0x0EB1, 0x0EB1 },
+{ 0x0EB4, 0x0EB9 },
+{ 0x0EBB, 0x0EBC },
+{ 0x0EC8, 0x0ECD },
+{ 0x0F18, 0x0F19 },
+{ 0x0F35, 0x0F35 },
+{ 0x0F37, 0x0F37 },
+{ 0x0F39, 0x0F39 },
+{ 0x0F71, 0x0F7E },
+{ 0x0F80, 0x0F84 },
+{ 0x0F86, 0x0F87 },
+{ 0x0F8D, 0x0F97 },
+{ 0x0F99, 0x0FBC },
+{ 0x0FC6, 0x0FC6 },
+{ 0x102D, 0x1030 },
+{ 0x1032, 0x1037 },
+{ 0x1039, 0x103A },
+{ 0x103D, 0x103E },
+{ 0x1058, 0x1059 },
+{ 0x105E, 0x1060 },
+{ 0x1071, 0x1074 },
+{ 0x1082, 0x1082 },
+{ 0x1085, 0x1086 },
+{ 0x108D, 0x108D },
+{ 0x109D, 0x109D },
+{ 0x1160, 0x11FF },
+{ 0x135D, 0x135F },
+{ 0x1712, 0x1714 },
+{ 0x1732, 0x1734 },
+{ 0x1752, 0x1753 },
+{ 0x1772, 0x1773 },
+{ 0x17B4, 0x17B5 },
+{ 0x17B7, 0x17BD },
+{ 0x17C6, 0x17C6 },
+{ 0x17C9, 0x17D3 },
+{ 0x17DD, 0x17DD },
+{ 0x180B, 0x180E },
+{ 0x1885, 0x1886 },
+{ 0x18A9, 0x18A9 },
+{ 0x1920, 0x1922 },
+{ 0x1927, 0x1928 },
+{ 0x1932, 0x1932 },
+{ 0x1939, 0x193B },
+{ 0x1A17, 0x1A18 },
+{ 0x1A1B, 0x1A1B },
+{ 0x1A56, 0x1A56 },
+{ 0x1A58, 0x1A5E },
+{ 0x1A60, 0x1A60 },
+{ 0x1A62, 0x1A62 },
+{ 0x1A65, 0x1A6C },
+{ 0x1A73, 0x1A7C },
+{ 0x1A7F, 0x1A7F },
+{ 0x1AB0, 0x1ABE },
+{ 0x1B00, 0x1B03 },
+{ 0x1B34, 0x1B34 },
+{ 0x1B36, 0x1B3A },
+{ 0x1B3C, 0x1B3C },
+{ 0x1B42, 0x1B42 },
+{ 0x1B6B, 0x1B73 },
+{ 0x1B80, 0x1B81 },
+{ 0x1BA2, 0x1BA5 },
+{ 0x1BA8, 0x1BA9 },
+{ 0x1BAB, 0x1BAD },
+{ 0x1BE6, 0x1BE6 },
+{ 0x1BE8, 0x1BE9 },
+{ 0x1BED, 0x1BED },
+{ 0x1BEF, 0x1BF1 },
+{ 0x1C2C, 0x1C33 },
+{ 0x1C36, 0x1C37 },
+{ 0x1CD0, 0x1CD2 },
+{ 0x1CD4, 0x1CE0 },
+{ 0x1CE2, 0x1CE8 },
+{ 0x1CED, 0x1CED },
+{ 0x1CF4, 0x1CF4 },
+{ 0x1CF8, 0x1CF9 },
+{ 0x1DC0, 0x1DF9 },
+{ 0x1DFB, 0x1DFF },
+{ 0x200B, 0x200F },
+{ 0x202A, 0x202E },
+{ 0x2060, 0x2064 },
+{ 0x2066, 0x206F },
+{ 0x20D0, 0x20F0 },
+{ 0x2CEF, 0x2CF1 },
+{ 0x2D7F, 0x2D7F },
+{ 0x2DE0, 0x2DFF },
+{ 0x302A, 0x302D },
+{ 0x3099, 0x309A },
+{ 0xA66F, 0xA672 },
+{ 0xA674, 0xA67D },
+{ 0xA69E, 0xA69F },
+{ 0xA6F0, 0xA6F1 },
+{ 0xA802, 0xA802 },
+{ 0xA806, 0xA806 },
+{ 0xA80B, 0xA80B },
+{ 0xA825, 0xA826 },
+{ 0xA8C4, 0xA8C5 },
+{ 0xA8E0, 0xA8F1 },
+{ 0xA926, 0xA92D },
+{ 0xA947, 0xA951 },
+{ 0xA980, 0xA982 },
+{ 0xA9B3, 0xA9B3 },
+{ 0xA9B6, 0xA9B9 },
+{ 0xA9BC, 0xA9BC },
+{ 0xA9E5, 0xA9E5 },
+{ 0xAA29, 0xAA2E },
+{ 0xAA31, 0xAA32 },
+{ 0xAA35, 0xAA36 },
+{ 0xAA43, 0xAA43 },
+{ 0xAA4C, 0xAA4C },
+{ 0xAA7C, 0xAA7C },
+{ 0xAAB0, 0xAAB0 },
+{ 0xAAB2, 0xAAB4 },
+{ 0xAAB7, 0xAAB8 },
+{ 0xAABE, 0xAABF },
+{ 0xAAC1, 0xAAC1 },
+{ 0xAAEC, 0xAAED },
+{ 0xAAF6, 0xAAF6 },
+{ 0xABE5, 0xABE5 },
+{ 0xABE8, 0xABE8 },
+{ 0xABED, 0xABED },
+{ 0xFB1E, 0xFB1E },
+{ 0xFE00, 0xFE0F },
+{ 0xFE20, 0xFE2F },
+{ 0xFEFF, 0xFEFF },
+{ 0xFFF9, 0xFFFB },
+{ 0x101FD, 0x101FD },
+{ 0x102E0, 0x102E0 },
+{ 0x10376, 0x1037A },
+{ 0x10A01, 0x10A03 },
+{ 0x10A05, 0x10A06 },
+{ 0x10A0C, 0x10A0F },
+{ 0x10A38, 0x10A3A },
+{ 0x10A3F, 0x10A3F },
+{ 0x10AE5, 0x10AE6 },
+{ 0x11001, 0x11001 },
+{ 0x11038, 0x11046 },
+{ 0x1107F, 0x11081 },
+{ 0x110B3, 0x110B6 },
+{ 0x110B9, 0x110BA },
+{ 0x110BD, 0x110BD },
+{ 0x11100, 0x11102 },
+{ 0x11127, 0x1112B },
+{ 0x1112D, 0x11134 },
+{ 0x11173, 0x11173 },
+{ 0x11180, 0x11181 },
+{ 0x111B6, 0x111BE },
+{ 0x111CA, 0x111CC },
+{ 0x1122F, 0x11231 },
+{ 0x11234, 0x11234 },
+{ 0x11236, 0x11237 },
+{ 0x1123E, 0x1123E },
+{ 0x112DF, 0x112DF },
+{ 0x112E3, 0x112EA },
+{ 0x11300, 0x11301 },
+{ 0x1133C, 0x1133C },
+{ 0x11340, 0x11340 },
+{ 0x11366, 0x1136C },
+{ 0x11370, 0x11374 },
+{ 0x11438, 0x1143F },
+{ 0x11442, 0x11444 },
+{ 0x11446, 0x11446 },
+{ 0x114B3, 0x114B8 },
+{ 0x114BA, 0x114BA },
+{ 0x114BF, 0x114C0 },
+{ 0x114C2, 0x114C3 },
+{ 0x115B2, 0x115B5 },
+{ 0x115BC, 0x115BD },
+{ 0x115BF, 0x115C0 },
+{ 0x115DC, 0x115DD },
+{ 0x11633, 0x1163A },
+{ 0x1163D, 0x1163D },
+{ 0x1163F, 0x11640 },
+{ 0x116AB, 0x116AB },
+{ 0x116AD, 0x116AD },
+{ 0x116B0, 0x116B5 },
+{ 0x116B7, 0x116B7 },
+{ 0x1171D, 0x1171F },
+{ 0x11722, 0x11725 },
+{ 0x11727, 0x1172B },
+{ 0x11A01, 0x11A06 },
+{ 0x11A09, 0x11A0A },
+{ 0x11A33, 0x11A38 },
+{ 0x11A3B, 0x11A3E },
+{ 0x11A47, 0x11A47 },
+{ 0x11A51, 0x11A56 },
+{ 0x11A59, 0x11A5B },
+{ 0x11A8A, 0x11A96 },
+{ 0x11A98, 0x11A99 },
+{ 0x11C30, 0x11C36 },
+{ 0x11C38, 0x11C3D },
+{ 0x11C3F, 0x11C3F },
+{ 0x11C92, 0x11CA7 },
+{ 0x11CAA, 0x11CB0 },
+{ 0x11CB2, 0x11CB3 },
+{ 0x11CB5, 0x11CB6 },
+{ 0x11D31, 0x11D36 },
+{ 0x11D3A, 0x11D3A },
+{ 0x11D3C, 0x11D3D },
+{ 0x11D3F, 0x11D45 },
+{ 0x11D47, 0x11D47 },
+{ 0x16AF0, 0x16AF4 },
+{ 0x16B30, 0x16B36 },
+{ 0x16F8F, 0x16F92 },
+{ 0x1BC9D, 0x1BC9E },
+{ 0x1BCA0, 0x1BCA3 },
+{ 0x1D167, 0x1D169 },
+{ 0x1D173, 0x1D182 },
+{ 0x1D185, 0x1D18B },
+{ 0x1D1AA, 0x1D1AD },
+{ 0x1D242, 0x1D244 },
+{ 0x1DA00, 0x1DA36 },
+{ 0x1DA3B, 0x1DA6C },
+{ 0x1DA75, 0x1DA75 },
+{ 0x1DA84, 0x1DA84 },
+{ 0x1DA9B, 0x1DA9F },
+{ 0x1DAA1, 0x1DAAF },
+{ 0x1E000, 0x1E006 },
+{ 0x1E008, 0x1E018 },
+{ 0x1E01B, 0x1E021 },
+{ 0x1E023, 0x1E024 },
+{ 0x1E026, 0x1E02A },
+{ 0x1E8D0, 0x1E8D6 },
+{ 0x1E944, 0x1E94A },
+{ 0xE0001, 0xE0001 },
+{ 0xE0020, 0xE007F },
+{ 0xE0100, 0xE01EF }
+};
+static const struct interval double_width[] = {
+{ 0x1100, 0x115F },
+{ 0x231A, 0x231B },
+{ 0x2329, 0x232A },
+{ 0x23E9, 0x23EC },
+{ 0x23F0, 0x23F0 },
+{ 0x23F3, 0x23F3 },
+{ 0x25FD, 0x25FE },
+{ 0x2614, 0x2615 },
+{ 0x2648, 0x2653 },
+{ 0x267F, 0x267F },
+{ 0x2693, 0x2693 },
+{ 0x26A1, 0x26A1 },
+{ 0x26AA, 0x26AB },
+{ 0x26BD, 0x26BE },
+{ 0x26C4, 0x26C5 },
+{ 0x26CE, 0x26CE },
+{ 0x26D4, 0x26D4 },
+{ 0x26EA, 0x26EA },
+{ 0x26F2, 0x26F3 },
+{ 0x26F5, 0x26F5 },
+{ 0x26FA, 0x26FA },
+{ 0x26FD, 0x26FD },
+{ 0x2705, 0x2705 },
+{ 0x270A, 0x270B },
+{ 0x2728, 0x2728 },
+{ 0x274C, 0x274C },
+{ 0x274E, 0x274E },
+{ 0x2753, 0x2755 },
+{ 0x2757, 0x2757 },
+{ 0x2795, 0x2797 },
+{ 0x27B0, 0x27B0 },
+{ 0x27BF, 0x27BF },
+{ 0x2B1B, 0x2B1C },
+{ 0x2B50, 0x2B50 },
+{ 0x2B55, 0x2B55 },
+{ 0x2E80, 0x2E99 },
+{ 0x2E9B, 0x2EF3 },
+{ 0x2F00, 0x2FD5 },
+{ 0x2FF0, 0x2FFB },
+{ 0x3000, 0x303E },
+{ 0x3041, 0x3096 },
+{ 0x3099, 0x30FF },
+{ 0x3105, 0x312E },
+{ 0x3131, 0x318E },
+{ 0x3190, 0x31BA },
+{ 0x31C0, 0x31E3 },
+{ 0x31F0, 0x321E },
+{ 0x3220, 0x3247 },
+{ 0x3250, 0x32FE },
+{ 0x3300, 0x4DBF },
+{ 0x4E00, 0xA48C },
+{ 0xA490, 0xA4C6 },
+{ 0xA960, 0xA97C },
+{ 0xAC00, 0xD7A3 },
+{ 0xF900, 0xFAFF },
+{ 0xFE10, 0xFE19 },
+{ 0xFE30, 0xFE52 },
+{ 0xFE54, 0xFE66 },
+{ 0xFE68, 0xFE6B },
+{ 0xFF01, 0xFF60 },
+{ 0xFFE0, 0xFFE6 },
+{ 0x16FE0, 0x16FE1 },
+{ 0x17000, 0x187EC },
+{ 0x18800, 0x18AF2 },
+{ 0x1B000, 0x1B11E },
+{ 0x1B170, 0x1B2FB },
+{ 0x1F004, 0x1F004 },
+{ 0x1F0CF, 0x1F0CF },
+{ 0x1F18E, 0x1F18E },
+{ 0x1F191, 0x1F19A },
+{ 0x1F200, 0x1F202 },
+{ 0x1F210, 0x1F23B },
+{ 0x1F240, 0x1F248 },
+{ 0x1F250, 0x1F251 },
+{ 0x1F260, 0x1F265 },
+{ 0x1F300, 0x1F320 },
+{ 0x1F32D, 0x1F335 },
+{ 0x1F337, 0x1F37C },
+{ 0x1F37E, 0x1F393 },
+{ 0x1F3A0, 0x1F3CA },
+{ 0x1F3CF, 0x1F3D3 },
+{ 0x1F3E0, 0x1F3F0 },
+{ 0x1F3F4, 0x1F3F4 },
+{ 0x1F3F8, 0x1F43E },
+{ 0x1F440, 0x1F440 },
+{ 0x1F442, 0x1F4FC },
+{ 0x1F4FF, 0x1F53D },
+{ 0x1F54B, 0x1F54E },
+{ 0x1F550, 0x1F567 },
+{ 0x1F57A, 0x1F57A },
+{ 0x1F595, 0x1F596 },
+{ 0x1F5A4, 0x1F5A4 },
+{ 0x1F5FB, 0x1F64F },
+{ 0x1F680, 0x1F6C5 },
+{ 0x1F6CC, 0x1F6CC },
+{ 0x1F6D0, 0x1F6D2 },
+{ 0x1F6EB, 0x1F6EC },
+{ 0x1F6F4, 0x1F6F8 },
+{ 0x1F910, 0x1F93E },
+{ 0x1F940, 0x1F94C },
+{ 0x1F950, 0x1F96B },
+{ 0x1F980, 0x1F997 },
+{ 0x1F9C0, 0x1F9C0 },
+{ 0x1F9D0, 0x1F9E6 },
+{ 0x20000, 0x2FFFD },
+{ 0x30000, 0x3FFFD }
+};
+++ /dev/null
-static const struct interval zero_width[] = {
-{ 0x0300, 0x036F },
-{ 0x0483, 0x0489 },
-{ 0x0591, 0x05BD },
-{ 0x05BF, 0x05BF },
-{ 0x05C1, 0x05C2 },
-{ 0x05C4, 0x05C5 },
-{ 0x05C7, 0x05C7 },
-{ 0x0600, 0x0605 },
-{ 0x0610, 0x061A },
-{ 0x061C, 0x061C },
-{ 0x064B, 0x065F },
-{ 0x0670, 0x0670 },
-{ 0x06D6, 0x06DD },
-{ 0x06DF, 0x06E4 },
-{ 0x06E7, 0x06E8 },
-{ 0x06EA, 0x06ED },
-{ 0x070F, 0x070F },
-{ 0x0711, 0x0711 },
-{ 0x0730, 0x074A },
-{ 0x07A6, 0x07B0 },
-{ 0x07EB, 0x07F3 },
-{ 0x0816, 0x0819 },
-{ 0x081B, 0x0823 },
-{ 0x0825, 0x0827 },
-{ 0x0829, 0x082D },
-{ 0x0859, 0x085B },
-{ 0x08D4, 0x0902 },
-{ 0x093A, 0x093A },
-{ 0x093C, 0x093C },
-{ 0x0941, 0x0948 },
-{ 0x094D, 0x094D },
-{ 0x0951, 0x0957 },
-{ 0x0962, 0x0963 },
-{ 0x0981, 0x0981 },
-{ 0x09BC, 0x09BC },
-{ 0x09C1, 0x09C4 },
-{ 0x09CD, 0x09CD },
-{ 0x09E2, 0x09E3 },
-{ 0x0A01, 0x0A02 },
-{ 0x0A3C, 0x0A3C },
-{ 0x0A41, 0x0A42 },
-{ 0x0A47, 0x0A48 },
-{ 0x0A4B, 0x0A4D },
-{ 0x0A51, 0x0A51 },
-{ 0x0A70, 0x0A71 },
-{ 0x0A75, 0x0A75 },
-{ 0x0A81, 0x0A82 },
-{ 0x0ABC, 0x0ABC },
-{ 0x0AC1, 0x0AC5 },
-{ 0x0AC7, 0x0AC8 },
-{ 0x0ACD, 0x0ACD },
-{ 0x0AE2, 0x0AE3 },
-{ 0x0AFA, 0x0AFF },
-{ 0x0B01, 0x0B01 },
-{ 0x0B3C, 0x0B3C },
-{ 0x0B3F, 0x0B3F },
-{ 0x0B41, 0x0B44 },
-{ 0x0B4D, 0x0B4D },
-{ 0x0B56, 0x0B56 },
-{ 0x0B62, 0x0B63 },
-{ 0x0B82, 0x0B82 },
-{ 0x0BC0, 0x0BC0 },
-{ 0x0BCD, 0x0BCD },
-{ 0x0C00, 0x0C00 },
-{ 0x0C3E, 0x0C40 },
-{ 0x0C46, 0x0C48 },
-{ 0x0C4A, 0x0C4D },
-{ 0x0C55, 0x0C56 },
-{ 0x0C62, 0x0C63 },
-{ 0x0C81, 0x0C81 },
-{ 0x0CBC, 0x0CBC },
-{ 0x0CBF, 0x0CBF },
-{ 0x0CC6, 0x0CC6 },
-{ 0x0CCC, 0x0CCD },
-{ 0x0CE2, 0x0CE3 },
-{ 0x0D00, 0x0D01 },
-{ 0x0D3B, 0x0D3C },
-{ 0x0D41, 0x0D44 },
-{ 0x0D4D, 0x0D4D },
-{ 0x0D62, 0x0D63 },
-{ 0x0DCA, 0x0DCA },
-{ 0x0DD2, 0x0DD4 },
-{ 0x0DD6, 0x0DD6 },
-{ 0x0E31, 0x0E31 },
-{ 0x0E34, 0x0E3A },
-{ 0x0E47, 0x0E4E },
-{ 0x0EB1, 0x0EB1 },
-{ 0x0EB4, 0x0EB9 },
-{ 0x0EBB, 0x0EBC },
-{ 0x0EC8, 0x0ECD },
-{ 0x0F18, 0x0F19 },
-{ 0x0F35, 0x0F35 },
-{ 0x0F37, 0x0F37 },
-{ 0x0F39, 0x0F39 },
-{ 0x0F71, 0x0F7E },
-{ 0x0F80, 0x0F84 },
-{ 0x0F86, 0x0F87 },
-{ 0x0F8D, 0x0F97 },
-{ 0x0F99, 0x0FBC },
-{ 0x0FC6, 0x0FC6 },
-{ 0x102D, 0x1030 },
-{ 0x1032, 0x1037 },
-{ 0x1039, 0x103A },
-{ 0x103D, 0x103E },
-{ 0x1058, 0x1059 },
-{ 0x105E, 0x1060 },
-{ 0x1071, 0x1074 },
-{ 0x1082, 0x1082 },
-{ 0x1085, 0x1086 },
-{ 0x108D, 0x108D },
-{ 0x109D, 0x109D },
-{ 0x1160, 0x11FF },
-{ 0x135D, 0x135F },
-{ 0x1712, 0x1714 },
-{ 0x1732, 0x1734 },
-{ 0x1752, 0x1753 },
-{ 0x1772, 0x1773 },
-{ 0x17B4, 0x17B5 },
-{ 0x17B7, 0x17BD },
-{ 0x17C6, 0x17C6 },
-{ 0x17C9, 0x17D3 },
-{ 0x17DD, 0x17DD },
-{ 0x180B, 0x180E },
-{ 0x1885, 0x1886 },
-{ 0x18A9, 0x18A9 },
-{ 0x1920, 0x1922 },
-{ 0x1927, 0x1928 },
-{ 0x1932, 0x1932 },
-{ 0x1939, 0x193B },
-{ 0x1A17, 0x1A18 },
-{ 0x1A1B, 0x1A1B },
-{ 0x1A56, 0x1A56 },
-{ 0x1A58, 0x1A5E },
-{ 0x1A60, 0x1A60 },
-{ 0x1A62, 0x1A62 },
-{ 0x1A65, 0x1A6C },
-{ 0x1A73, 0x1A7C },
-{ 0x1A7F, 0x1A7F },
-{ 0x1AB0, 0x1ABE },
-{ 0x1B00, 0x1B03 },
-{ 0x1B34, 0x1B34 },
-{ 0x1B36, 0x1B3A },
-{ 0x1B3C, 0x1B3C },
-{ 0x1B42, 0x1B42 },
-{ 0x1B6B, 0x1B73 },
-{ 0x1B80, 0x1B81 },
-{ 0x1BA2, 0x1BA5 },
-{ 0x1BA8, 0x1BA9 },
-{ 0x1BAB, 0x1BAD },
-{ 0x1BE6, 0x1BE6 },
-{ 0x1BE8, 0x1BE9 },
-{ 0x1BED, 0x1BED },
-{ 0x1BEF, 0x1BF1 },
-{ 0x1C2C, 0x1C33 },
-{ 0x1C36, 0x1C37 },
-{ 0x1CD0, 0x1CD2 },
-{ 0x1CD4, 0x1CE0 },
-{ 0x1CE2, 0x1CE8 },
-{ 0x1CED, 0x1CED },
-{ 0x1CF4, 0x1CF4 },
-{ 0x1CF8, 0x1CF9 },
-{ 0x1DC0, 0x1DF9 },
-{ 0x1DFB, 0x1DFF },
-{ 0x200B, 0x200F },
-{ 0x202A, 0x202E },
-{ 0x2060, 0x2064 },
-{ 0x2066, 0x206F },
-{ 0x20D0, 0x20F0 },
-{ 0x2CEF, 0x2CF1 },
-{ 0x2D7F, 0x2D7F },
-{ 0x2DE0, 0x2DFF },
-{ 0x302A, 0x302D },
-{ 0x3099, 0x309A },
-{ 0xA66F, 0xA672 },
-{ 0xA674, 0xA67D },
-{ 0xA69E, 0xA69F },
-{ 0xA6F0, 0xA6F1 },
-{ 0xA802, 0xA802 },
-{ 0xA806, 0xA806 },
-{ 0xA80B, 0xA80B },
-{ 0xA825, 0xA826 },
-{ 0xA8C4, 0xA8C5 },
-{ 0xA8E0, 0xA8F1 },
-{ 0xA926, 0xA92D },
-{ 0xA947, 0xA951 },
-{ 0xA980, 0xA982 },
-{ 0xA9B3, 0xA9B3 },
-{ 0xA9B6, 0xA9B9 },
-{ 0xA9BC, 0xA9BC },
-{ 0xA9E5, 0xA9E5 },
-{ 0xAA29, 0xAA2E },
-{ 0xAA31, 0xAA32 },
-{ 0xAA35, 0xAA36 },
-{ 0xAA43, 0xAA43 },
-{ 0xAA4C, 0xAA4C },
-{ 0xAA7C, 0xAA7C },
-{ 0xAAB0, 0xAAB0 },
-{ 0xAAB2, 0xAAB4 },
-{ 0xAAB7, 0xAAB8 },
-{ 0xAABE, 0xAABF },
-{ 0xAAC1, 0xAAC1 },
-{ 0xAAEC, 0xAAED },
-{ 0xAAF6, 0xAAF6 },
-{ 0xABE5, 0xABE5 },
-{ 0xABE8, 0xABE8 },
-{ 0xABED, 0xABED },
-{ 0xFB1E, 0xFB1E },
-{ 0xFE00, 0xFE0F },
-{ 0xFE20, 0xFE2F },
-{ 0xFEFF, 0xFEFF },
-{ 0xFFF9, 0xFFFB },
-{ 0x101FD, 0x101FD },
-{ 0x102E0, 0x102E0 },
-{ 0x10376, 0x1037A },
-{ 0x10A01, 0x10A03 },
-{ 0x10A05, 0x10A06 },
-{ 0x10A0C, 0x10A0F },
-{ 0x10A38, 0x10A3A },
-{ 0x10A3F, 0x10A3F },
-{ 0x10AE5, 0x10AE6 },
-{ 0x11001, 0x11001 },
-{ 0x11038, 0x11046 },
-{ 0x1107F, 0x11081 },
-{ 0x110B3, 0x110B6 },
-{ 0x110B9, 0x110BA },
-{ 0x110BD, 0x110BD },
-{ 0x11100, 0x11102 },
-{ 0x11127, 0x1112B },
-{ 0x1112D, 0x11134 },
-{ 0x11173, 0x11173 },
-{ 0x11180, 0x11181 },
-{ 0x111B6, 0x111BE },
-{ 0x111CA, 0x111CC },
-{ 0x1122F, 0x11231 },
-{ 0x11234, 0x11234 },
-{ 0x11236, 0x11237 },
-{ 0x1123E, 0x1123E },
-{ 0x112DF, 0x112DF },
-{ 0x112E3, 0x112EA },
-{ 0x11300, 0x11301 },
-{ 0x1133C, 0x1133C },
-{ 0x11340, 0x11340 },
-{ 0x11366, 0x1136C },
-{ 0x11370, 0x11374 },
-{ 0x11438, 0x1143F },
-{ 0x11442, 0x11444 },
-{ 0x11446, 0x11446 },
-{ 0x114B3, 0x114B8 },
-{ 0x114BA, 0x114BA },
-{ 0x114BF, 0x114C0 },
-{ 0x114C2, 0x114C3 },
-{ 0x115B2, 0x115B5 },
-{ 0x115BC, 0x115BD },
-{ 0x115BF, 0x115C0 },
-{ 0x115DC, 0x115DD },
-{ 0x11633, 0x1163A },
-{ 0x1163D, 0x1163D },
-{ 0x1163F, 0x11640 },
-{ 0x116AB, 0x116AB },
-{ 0x116AD, 0x116AD },
-{ 0x116B0, 0x116B5 },
-{ 0x116B7, 0x116B7 },
-{ 0x1171D, 0x1171F },
-{ 0x11722, 0x11725 },
-{ 0x11727, 0x1172B },
-{ 0x11A01, 0x11A06 },
-{ 0x11A09, 0x11A0A },
-{ 0x11A33, 0x11A38 },
-{ 0x11A3B, 0x11A3E },
-{ 0x11A47, 0x11A47 },
-{ 0x11A51, 0x11A56 },
-{ 0x11A59, 0x11A5B },
-{ 0x11A8A, 0x11A96 },
-{ 0x11A98, 0x11A99 },
-{ 0x11C30, 0x11C36 },
-{ 0x11C38, 0x11C3D },
-{ 0x11C3F, 0x11C3F },
-{ 0x11C92, 0x11CA7 },
-{ 0x11CAA, 0x11CB0 },
-{ 0x11CB2, 0x11CB3 },
-{ 0x11CB5, 0x11CB6 },
-{ 0x11D31, 0x11D36 },
-{ 0x11D3A, 0x11D3A },
-{ 0x11D3C, 0x11D3D },
-{ 0x11D3F, 0x11D45 },
-{ 0x11D47, 0x11D47 },
-{ 0x16AF0, 0x16AF4 },
-{ 0x16B30, 0x16B36 },
-{ 0x16F8F, 0x16F92 },
-{ 0x1BC9D, 0x1BC9E },
-{ 0x1BCA0, 0x1BCA3 },
-{ 0x1D167, 0x1D169 },
-{ 0x1D173, 0x1D182 },
-{ 0x1D185, 0x1D18B },
-{ 0x1D1AA, 0x1D1AD },
-{ 0x1D242, 0x1D244 },
-{ 0x1DA00, 0x1DA36 },
-{ 0x1DA3B, 0x1DA6C },
-{ 0x1DA75, 0x1DA75 },
-{ 0x1DA84, 0x1DA84 },
-{ 0x1DA9B, 0x1DA9F },
-{ 0x1DAA1, 0x1DAAF },
-{ 0x1E000, 0x1E006 },
-{ 0x1E008, 0x1E018 },
-{ 0x1E01B, 0x1E021 },
-{ 0x1E023, 0x1E024 },
-{ 0x1E026, 0x1E02A },
-{ 0x1E8D0, 0x1E8D6 },
-{ 0x1E944, 0x1E94A },
-{ 0xE0001, 0xE0001 },
-{ 0xE0020, 0xE007F },
-{ 0xE0100, 0xE01EF }
-};
-static const struct interval double_width[] = {
-{ 0x1100, 0x115F },
-{ 0x231A, 0x231B },
-{ 0x2329, 0x232A },
-{ 0x23E9, 0x23EC },
-{ 0x23F0, 0x23F0 },
-{ 0x23F3, 0x23F3 },
-{ 0x25FD, 0x25FE },
-{ 0x2614, 0x2615 },
-{ 0x2648, 0x2653 },
-{ 0x267F, 0x267F },
-{ 0x2693, 0x2693 },
-{ 0x26A1, 0x26A1 },
-{ 0x26AA, 0x26AB },
-{ 0x26BD, 0x26BE },
-{ 0x26C4, 0x26C5 },
-{ 0x26CE, 0x26CE },
-{ 0x26D4, 0x26D4 },
-{ 0x26EA, 0x26EA },
-{ 0x26F2, 0x26F3 },
-{ 0x26F5, 0x26F5 },
-{ 0x26FA, 0x26FA },
-{ 0x26FD, 0x26FD },
-{ 0x2705, 0x2705 },
-{ 0x270A, 0x270B },
-{ 0x2728, 0x2728 },
-{ 0x274C, 0x274C },
-{ 0x274E, 0x274E },
-{ 0x2753, 0x2755 },
-{ 0x2757, 0x2757 },
-{ 0x2795, 0x2797 },
-{ 0x27B0, 0x27B0 },
-{ 0x27BF, 0x27BF },
-{ 0x2B1B, 0x2B1C },
-{ 0x2B50, 0x2B50 },
-{ 0x2B55, 0x2B55 },
-{ 0x2E80, 0x2E99 },
-{ 0x2E9B, 0x2EF3 },
-{ 0x2F00, 0x2FD5 },
-{ 0x2FF0, 0x2FFB },
-{ 0x3000, 0x303E },
-{ 0x3041, 0x3096 },
-{ 0x3099, 0x30FF },
-{ 0x3105, 0x312E },
-{ 0x3131, 0x318E },
-{ 0x3190, 0x31BA },
-{ 0x31C0, 0x31E3 },
-{ 0x31F0, 0x321E },
-{ 0x3220, 0x3247 },
-{ 0x3250, 0x32FE },
-{ 0x3300, 0x4DBF },
-{ 0x4E00, 0xA48C },
-{ 0xA490, 0xA4C6 },
-{ 0xA960, 0xA97C },
-{ 0xAC00, 0xD7A3 },
-{ 0xF900, 0xFAFF },
-{ 0xFE10, 0xFE19 },
-{ 0xFE30, 0xFE52 },
-{ 0xFE54, 0xFE66 },
-{ 0xFE68, 0xFE6B },
-{ 0xFF01, 0xFF60 },
-{ 0xFFE0, 0xFFE6 },
-{ 0x16FE0, 0x16FE1 },
-{ 0x17000, 0x187EC },
-{ 0x18800, 0x18AF2 },
-{ 0x1B000, 0x1B11E },
-{ 0x1B170, 0x1B2FB },
-{ 0x1F004, 0x1F004 },
-{ 0x1F0CF, 0x1F0CF },
-{ 0x1F18E, 0x1F18E },
-{ 0x1F191, 0x1F19A },
-{ 0x1F200, 0x1F202 },
-{ 0x1F210, 0x1F23B },
-{ 0x1F240, 0x1F248 },
-{ 0x1F250, 0x1F251 },
-{ 0x1F260, 0x1F265 },
-{ 0x1F300, 0x1F320 },
-{ 0x1F32D, 0x1F335 },
-{ 0x1F337, 0x1F37C },
-{ 0x1F37E, 0x1F393 },
-{ 0x1F3A0, 0x1F3CA },
-{ 0x1F3CF, 0x1F3D3 },
-{ 0x1F3E0, 0x1F3F0 },
-{ 0x1F3F4, 0x1F3F4 },
-{ 0x1F3F8, 0x1F43E },
-{ 0x1F440, 0x1F440 },
-{ 0x1F442, 0x1F4FC },
-{ 0x1F4FF, 0x1F53D },
-{ 0x1F54B, 0x1F54E },
-{ 0x1F550, 0x1F567 },
-{ 0x1F57A, 0x1F57A },
-{ 0x1F595, 0x1F596 },
-{ 0x1F5A4, 0x1F5A4 },
-{ 0x1F5FB, 0x1F64F },
-{ 0x1F680, 0x1F6C5 },
-{ 0x1F6CC, 0x1F6CC },
-{ 0x1F6D0, 0x1F6D2 },
-{ 0x1F6EB, 0x1F6EC },
-{ 0x1F6F4, 0x1F6F8 },
-{ 0x1F910, 0x1F93E },
-{ 0x1F940, 0x1F94C },
-{ 0x1F950, 0x1F96B },
-{ 0x1F980, 0x1F997 },
-{ 0x1F9C0, 0x1F9C0 },
-{ 0x1F9D0, 0x1F9E6 },
-{ 0x20000, 0x2FFFD },
-{ 0x30000, 0x3FFFD }
-};
#include "tag.h"
#include "object.h"
#include "commit.h"
-#include "exec_cmd.h"
#include "diff.h"
#include "revision.h"
#include "list-objects.h"
#include "sigchain.h"
#include "version.h"
#include "string-list.h"
-#include "parse-options.h"
#include "argv-array.h"
#include "prio-queue.h"
#include "protocol.h"
#include "quote.h"
-
-static const char * const upload_pack_usage[] = {
- N_("git upload-pack [<options>] <dir>"),
- NULL
-};
+#include "upload-pack.h"
+#include "serve.h"
/* Remember to update object flag allocation in object.h */
#define THEY_HAVE (1u << 11)
* otherwise maximum packet size (up to 65520 bytes).
*/
static int use_sideband;
-static int advertise_refs;
static int stateless_rpc;
static const char *pack_objects_hook;
}
send_unshallow(shallows);
- packet_flush(1);
}
static void deepen_by_rev_list(int ac, const char **av,
send_shallow(result);
free_commit_list(result);
send_unshallow(shallows);
- packet_flush(1);
+}
+
+/* Returns 1 if a shallow list is sent or 0 otherwise */
+static int send_shallow_list(int depth, int deepen_rev_list,
+ timestamp_t deepen_since,
+ struct string_list *deepen_not,
+ struct object_array *shallows)
+{
+ int ret = 0;
+
+ if (depth > 0 && deepen_rev_list)
+ die("git upload-pack: deepen and deepen-since (or deepen-not) cannot be used together");
+ if (depth > 0) {
+ deepen(depth, deepen_relative, shallows);
+ ret = 1;
+ } else if (deepen_rev_list) {
+ struct argv_array av = ARGV_ARRAY_INIT;
+ int i;
+
+ argv_array_push(&av, "rev-list");
+ if (deepen_since)
+ argv_array_pushf(&av, "--max-age=%"PRItime, deepen_since);
+ if (deepen_not->nr) {
+ argv_array_push(&av, "--not");
+ for (i = 0; i < deepen_not->nr; i++) {
+ struct string_list_item *s = deepen_not->items + i;
+ argv_array_push(&av, s->string);
+ }
+ argv_array_push(&av, "--not");
+ }
+ for (i = 0; i < want_obj.nr; i++) {
+ struct object *o = want_obj.objects[i].item;
+ argv_array_push(&av, oid_to_hex(&o->oid));
+ }
+ deepen_by_rev_list(av.argc, av.argv, shallows);
+ argv_array_clear(&av);
+ ret = 1;
+ } else {
+ if (shallows->nr > 0) {
+ int i;
+ for (i = 0; i < shallows->nr; i++)
+ register_shallow(&shallows->objects[i].item->oid);
+ }
+ }
+
+ shallow_nr += shallows->nr;
+ return ret;
+}
+
+static int process_shallow(const char *line, struct object_array *shallows)
+{
+ const char *arg;
+ if (skip_prefix(line, "shallow ", &arg)) {
+ struct object_id oid;
+ struct object *object;
+ if (get_oid_hex(arg, &oid))
+ die("invalid shallow line: %s", line);
+ object = parse_object(&oid);
+ if (!object)
+ return 1;
+ if (object->type != OBJ_COMMIT)
+ die("invalid shallow object %s", oid_to_hex(&oid));
+ if (!(object->flags & CLIENT_SHALLOW)) {
+ object->flags |= CLIENT_SHALLOW;
+ add_object_array(object, NULL, shallows);
+ }
+ return 1;
+ }
+
+ return 0;
+}
+
+static int process_deepen(const char *line, int *depth)
+{
+ const char *arg;
+ if (skip_prefix(line, "deepen ", &arg)) {
+ char *end = NULL;
+ *depth = (int)strtol(arg, &end, 0);
+ if (!end || *end || *depth <= 0)
+ die("Invalid deepen: %s", line);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int process_deepen_since(const char *line, timestamp_t *deepen_since, int *deepen_rev_list)
+{
+ const char *arg;
+ if (skip_prefix(line, "deepen-since ", &arg)) {
+ char *end = NULL;
+ *deepen_since = parse_timestamp(arg, &end, 0);
+ if (!end || *end || !deepen_since ||
+ /* revisions.c's max_age -1 is special */
+ *deepen_since == -1)
+ die("Invalid deepen-since: %s", line);
+ *deepen_rev_list = 1;
+ return 1;
+ }
+ return 0;
+}
+
+static int process_deepen_not(const char *line, struct string_list *deepen_not, int *deepen_rev_list)
+{
+ const char *arg;
+ if (skip_prefix(line, "deepen-not ", &arg)) {
+ char *ref = NULL;
+ struct object_id oid;
+ if (expand_ref(arg, strlen(arg), &oid, &ref) != 1)
+ die("git upload-pack: ambiguous deepen-not: %s", line);
+ string_list_append(deepen_not, ref);
+ free(ref);
+ *deepen_rev_list = 1;
+ return 1;
+ }
+ return 0;
}
static void receive_needs(void)
if (!line)
break;
- if (skip_prefix(line, "shallow ", &arg)) {
- struct object_id oid;
- struct object *object;
- if (get_oid_hex(arg, &oid))
- die("invalid shallow line: %s", line);
- object = parse_object(&oid);
- if (!object)
- continue;
- if (object->type != OBJ_COMMIT)
- die("invalid shallow object %s", oid_to_hex(&oid));
- if (!(object->flags & CLIENT_SHALLOW)) {
- object->flags |= CLIENT_SHALLOW;
- add_object_array(object, NULL, &shallows);
- }
+ if (process_shallow(line, &shallows))
continue;
- }
- if (skip_prefix(line, "deepen ", &arg)) {
- char *end = NULL;
- depth = strtol(arg, &end, 0);
- if (!end || *end || depth <= 0)
- die("Invalid deepen: %s", line);
+ if (process_deepen(line, &depth))
continue;
- }
- if (skip_prefix(line, "deepen-since ", &arg)) {
- char *end = NULL;
- deepen_since = parse_timestamp(arg, &end, 0);
- if (!end || *end || !deepen_since ||
- /* revisions.c's max_age -1 is special */
- deepen_since == -1)
- die("Invalid deepen-since: %s", line);
- deepen_rev_list = 1;
+ if (process_deepen_since(line, &deepen_since, &deepen_rev_list))
continue;
- }
- if (skip_prefix(line, "deepen-not ", &arg)) {
- char *ref = NULL;
- struct object_id oid;
- if (expand_ref(arg, strlen(arg), &oid, &ref) != 1)
- die("git upload-pack: ambiguous deepen-not: %s", line);
- string_list_append(&deepen_not, ref);
- free(ref);
- deepen_rev_list = 1;
+ if (process_deepen_not(line, &deepen_not, &deepen_rev_list))
continue;
- }
+
if (skip_prefix(line, "filter ", &arg)) {
if (!filter_capability_requested)
die("git upload-pack: filtering capability not negotiated");
parse_list_objects_filter(&filter_options, arg);
continue;
}
+
if (!skip_prefix(line, "want ", &arg) ||
get_oid_hex(arg, &oid_buf))
die("git upload-pack: protocol error, "
if (depth == 0 && !deepen_rev_list && shallows.nr == 0)
return;
- if (depth > 0 && deepen_rev_list)
- die("git upload-pack: deepen and deepen-since (or deepen-not) cannot be used together");
- if (depth > 0)
- deepen(depth, deepen_relative, &shallows);
- else if (deepen_rev_list) {
- struct argv_array av = ARGV_ARRAY_INIT;
- int i;
- argv_array_push(&av, "rev-list");
- if (deepen_since)
- argv_array_pushf(&av, "--max-age=%"PRItime, deepen_since);
- if (deepen_not.nr) {
- argv_array_push(&av, "--not");
- for (i = 0; i < deepen_not.nr; i++) {
- struct string_list_item *s = deepen_not.items + i;
- argv_array_push(&av, s->string);
- }
- argv_array_push(&av, "--not");
- }
- for (i = 0; i < want_obj.nr; i++) {
- struct object *o = want_obj.objects[i].item;
- argv_array_push(&av, oid_to_hex(&o->oid));
- }
- deepen_by_rev_list(av.argc, av.argv, &shallows);
- argv_array_clear(&av);
- }
- else
- if (shallows.nr > 0) {
- int i;
- for (i = 0; i < shallows.nr; i++)
- register_shallow(&shallows.objects[i].item->oid);
- }
-
- shallow_nr += shallows.nr;
+ if (send_shallow_list(depth, deepen_rev_list, deepen_since,
+ &deepen_not, &shallows))
+ packet_flush(1);
object_array_clear(&shallows);
}
return 0;
}
-static void upload_pack(void)
-{
- struct string_list symref = STRING_LIST_INIT_DUP;
-
- head_ref_namespaced(find_symref, &symref);
-
- if (advertise_refs || !stateless_rpc) {
- reset_timeout();
- head_ref_namespaced(send_ref, &symref);
- for_each_namespaced_ref(send_ref, &symref);
- advertise_shallow_grafts(1);
- packet_flush(1);
- } else {
- head_ref_namespaced(check_ref, NULL);
- for_each_namespaced_ref(check_ref, NULL);
- }
- string_list_clear(&symref, 1);
- if (advertise_refs)
- return;
-
- receive_needs();
- if (want_obj.nr) {
- get_common_commits();
- create_pack_file();
- }
-}
-
static int upload_pack_config(const char *var, const char *value, void *unused)
{
if (!strcmp("uploadpack.allowtipsha1inwant", var)) {
return parse_hide_refs_config(var, value, "uploadpack");
}
-int cmd_main(int argc, const char **argv)
+void upload_pack(struct upload_pack_options *options)
{
- const char *dir;
- int strict = 0;
- struct option options[] = {
- OPT_BOOL(0, "stateless-rpc", &stateless_rpc,
- N_("quit after a single request/response exchange")),
- OPT_BOOL(0, "advertise-refs", &advertise_refs,
- N_("exit immediately after initial ref advertisement")),
- OPT_BOOL(0, "strict", &strict,
- N_("do not try <directory>/.git/ if <directory> is no Git directory")),
- OPT_INTEGER(0, "timeout", &timeout,
- N_("interrupt transfer after <n> seconds of inactivity")),
- OPT_END()
- };
+ struct string_list symref = STRING_LIST_INIT_DUP;
- packet_trace_identity("upload-pack");
- check_replace_refs = 0;
+ stateless_rpc = options->stateless_rpc;
+ timeout = options->timeout;
+ daemon_mode = options->daemon_mode;
- argc = parse_options(argc, argv, NULL, options, upload_pack_usage, 0);
+ git_config(upload_pack_config, NULL);
- if (argc != 1)
- usage_with_options(upload_pack_usage, options);
+ head_ref_namespaced(find_symref, &symref);
- if (timeout)
- daemon_mode = 1;
+ if (options->advertise_refs || !stateless_rpc) {
+ reset_timeout();
+ head_ref_namespaced(send_ref, &symref);
+ for_each_namespaced_ref(send_ref, &symref);
+ advertise_shallow_grafts(1);
+ packet_flush(1);
+ } else {
+ head_ref_namespaced(check_ref, NULL);
+ for_each_namespaced_ref(check_ref, NULL);
+ }
+ string_list_clear(&symref, 1);
+ if (options->advertise_refs)
+ return;
- setup_path();
+ receive_needs();
+ if (want_obj.nr) {
+ get_common_commits();
+ create_pack_file();
+ }
+}
- dir = argv[0];
+struct upload_pack_data {
+ struct object_array wants;
+ struct oid_array haves;
- if (!enter_repo(dir, strict))
- die("'%s' does not appear to be a git repository", dir);
+ struct object_array shallows;
+ struct string_list deepen_not;
+ int depth;
+ timestamp_t deepen_since;
+ int deepen_rev_list;
+ int deepen_relative;
- git_config(upload_pack_config, NULL);
+ unsigned stateless_rpc : 1;
- switch (determine_protocol_version_server()) {
- case protocol_v1:
- /*
- * v1 is just the original protocol with a version string,
- * so just fall through after writing the version string.
- */
- if (advertise_refs || !stateless_rpc)
- packet_write_fmt(1, "version 1\n");
-
- /* fallthrough */
- case protocol_v0:
- upload_pack();
- break;
- case protocol_unknown_version:
- BUG("unknown protocol version");
+ unsigned use_thin_pack : 1;
+ unsigned use_ofs_delta : 1;
+ unsigned no_progress : 1;
+ unsigned use_include_tag : 1;
+ unsigned done : 1;
+};
+
+static void upload_pack_data_init(struct upload_pack_data *data)
+{
+ struct object_array wants = OBJECT_ARRAY_INIT;
+ struct oid_array haves = OID_ARRAY_INIT;
+ struct object_array shallows = OBJECT_ARRAY_INIT;
+ struct string_list deepen_not = STRING_LIST_INIT_DUP;
+
+ memset(data, 0, sizeof(*data));
+ data->wants = wants;
+ data->haves = haves;
+ data->shallows = shallows;
+ data->deepen_not = deepen_not;
+}
+
+static void upload_pack_data_clear(struct upload_pack_data *data)
+{
+ object_array_clear(&data->wants);
+ oid_array_clear(&data->haves);
+ object_array_clear(&data->shallows);
+ string_list_clear(&data->deepen_not, 0);
+}
+
+static int parse_want(const char *line)
+{
+ const char *arg;
+ if (skip_prefix(line, "want ", &arg)) {
+ struct object_id oid;
+ struct object *o;
+
+ if (get_oid_hex(arg, &oid))
+ die("git upload-pack: protocol error, "
+ "expected to get oid, not '%s'", line);
+
+ o = parse_object(&oid);
+ if (!o) {
+ packet_write_fmt(1,
+ "ERR upload-pack: not our ref %s",
+ oid_to_hex(&oid));
+ die("git upload-pack: not our ref %s",
+ oid_to_hex(&oid));
+ }
+
+ if (!(o->flags & WANTED)) {
+ o->flags |= WANTED;
+ add_object_array(o, NULL, &want_obj);
+ }
+
+ return 1;
+ }
+
+ return 0;
+}
+
+static int parse_have(const char *line, struct oid_array *haves)
+{
+ const char *arg;
+ if (skip_prefix(line, "have ", &arg)) {
+ struct object_id oid;
+
+ if (get_oid_hex(arg, &oid))
+ die("git upload-pack: expected SHA1 object, got '%s'", arg);
+ oid_array_append(haves, &oid);
+ return 1;
}
return 0;
}
+
+static void process_args(struct packet_reader *request,
+ struct upload_pack_data *data)
+{
+ while (packet_reader_read(request) != PACKET_READ_FLUSH) {
+ const char *arg = request->line;
+
+ /* process want */
+ if (parse_want(arg))
+ continue;
+ /* process have line */
+ if (parse_have(arg, &data->haves))
+ continue;
+
+ /* process args like thin-pack */
+ if (!strcmp(arg, "thin-pack")) {
+ use_thin_pack = 1;
+ continue;
+ }
+ if (!strcmp(arg, "ofs-delta")) {
+ use_ofs_delta = 1;
+ continue;
+ }
+ if (!strcmp(arg, "no-progress")) {
+ no_progress = 1;
+ continue;
+ }
+ if (!strcmp(arg, "include-tag")) {
+ use_include_tag = 1;
+ continue;
+ }
+ if (!strcmp(arg, "done")) {
+ data->done = 1;
+ continue;
+ }
+
+ /* Shallow related arguments */
+ if (process_shallow(arg, &data->shallows))
+ continue;
+ if (process_deepen(arg, &data->depth))
+ continue;
+ if (process_deepen_since(arg, &data->deepen_since,
+ &data->deepen_rev_list))
+ continue;
+ if (process_deepen_not(arg, &data->deepen_not,
+ &data->deepen_rev_list))
+ continue;
+ if (!strcmp(arg, "deepen-relative")) {
+ data->deepen_relative = 1;
+ continue;
+ }
+
+ /* ignore unknown lines maybe? */
+ die("unexpect line: '%s'", arg);
+ }
+}
+
+static int process_haves(struct oid_array *haves, struct oid_array *common)
+{
+ int i;
+
+ /* Process haves */
+ for (i = 0; i < haves->nr; i++) {
+ const struct object_id *oid = &haves->oid[i];
+ struct object *o;
+ int we_knew_they_have = 0;
+
+ if (!has_object_file(oid))
+ continue;
+
+ oid_array_append(common, oid);
+
+ o = parse_object(oid);
+ if (!o)
+ die("oops (%s)", oid_to_hex(oid));
+ if (o->type == OBJ_COMMIT) {
+ struct commit_list *parents;
+ struct commit *commit = (struct commit *)o;
+ if (o->flags & THEY_HAVE)
+ we_knew_they_have = 1;
+ else
+ o->flags |= THEY_HAVE;
+ if (!oldest_have || (commit->date < oldest_have))
+ oldest_have = commit->date;
+ for (parents = commit->parents;
+ parents;
+ parents = parents->next)
+ parents->item->object.flags |= THEY_HAVE;
+ }
+ if (!we_knew_they_have)
+ add_object_array(o, NULL, &have_obj);
+ }
+
+ return 0;
+}
+
+static int send_acks(struct oid_array *acks, struct strbuf *response)
+{
+ int i;
+
+ packet_buf_write(response, "acknowledgments\n");
+
+ /* Send Acks */
+ if (!acks->nr)
+ packet_buf_write(response, "NAK\n");
+
+ for (i = 0; i < acks->nr; i++) {
+ packet_buf_write(response, "ACK %s\n",
+ oid_to_hex(&acks->oid[i]));
+ }
+
+ if (ok_to_give_up()) {
+ /* Send Ready */
+ packet_buf_write(response, "ready\n");
+ return 1;
+ }
+
+ return 0;
+}
+
+static int process_haves_and_send_acks(struct upload_pack_data *data)
+{
+ struct oid_array common = OID_ARRAY_INIT;
+ struct strbuf response = STRBUF_INIT;
+ int ret = 0;
+
+ process_haves(&data->haves, &common);
+ if (data->done) {
+ ret = 1;
+ } else if (send_acks(&common, &response)) {
+ packet_buf_delim(&response);
+ ret = 1;
+ } else {
+ /* Add Flush */
+ packet_buf_flush(&response);
+ ret = 0;
+ }
+
+ /* Send response */
+ write_or_die(1, response.buf, response.len);
+ strbuf_release(&response);
+
+ oid_array_clear(&data->haves);
+ oid_array_clear(&common);
+ return ret;
+}
+
+static void send_shallow_info(struct upload_pack_data *data)
+{
+ /* No shallow info needs to be sent */
+ if (!data->depth && !data->deepen_rev_list && !data->shallows.nr &&
+ !is_repository_shallow())
+ return;
+
+ packet_write_fmt(1, "shallow-info\n");
+
+ if (!send_shallow_list(data->depth, data->deepen_rev_list,
+ data->deepen_since, &data->deepen_not,
+ &data->shallows) && is_repository_shallow())
+ deepen(INFINITE_DEPTH, data->deepen_relative, &data->shallows);
+
+ packet_delim(1);
+}
+
+enum fetch_state {
+ FETCH_PROCESS_ARGS = 0,
+ FETCH_SEND_ACKS,
+ FETCH_SEND_PACK,
+ FETCH_DONE,
+};
+
+int upload_pack_v2(struct repository *r, struct argv_array *keys,
+ struct packet_reader *request)
+{
+ enum fetch_state state = FETCH_PROCESS_ARGS;
+ struct upload_pack_data data;
+
+ upload_pack_data_init(&data);
+ use_sideband = LARGE_PACKET_MAX;
+
+ while (state != FETCH_DONE) {
+ switch (state) {
+ case FETCH_PROCESS_ARGS:
+ process_args(request, &data);
+
+ if (!want_obj.nr) {
+ /*
+ * Request didn't contain any 'want' lines,
+ * guess they didn't want anything.
+ */
+ state = FETCH_DONE;
+ } else if (data.haves.nr) {
+ /*
+ * Request had 'have' lines, so lets ACK them.
+ */
+ state = FETCH_SEND_ACKS;
+ } else {
+ /*
+ * Request had 'want's but no 'have's so we can
+ * immedietly go to construct and send a pack.
+ */
+ state = FETCH_SEND_PACK;
+ }
+ break;
+ case FETCH_SEND_ACKS:
+ if (process_haves_and_send_acks(&data))
+ state = FETCH_SEND_PACK;
+ else
+ state = FETCH_DONE;
+ break;
+ case FETCH_SEND_PACK:
+ send_shallow_info(&data);
+
+ packet_write_fmt(1, "packfile\n");
+ create_pack_file();
+ state = FETCH_DONE;
+ break;
+ case FETCH_DONE:
+ continue;
+ }
+ }
+
+ upload_pack_data_clear(&data);
+ return 0;
+}
+
+int upload_pack_advertise(struct repository *r,
+ struct strbuf *value)
+{
+ if (value)
+ strbuf_addstr(value, "shallow");
+ return 1;
+}
--- /dev/null
+#ifndef UPLOAD_PACK_H
+#define UPLOAD_PACK_H
+
+struct upload_pack_options {
+ int stateless_rpc;
+ int advertise_refs;
+ unsigned int timeout;
+ int daemon_mode;
+};
+
+void upload_pack(struct upload_pack_options *options);
+
+struct repository;
+struct argv_array;
+struct packet_reader;
+extern int upload_pack_v2(struct repository *r, struct argv_array *keys,
+ struct packet_reader *request);
+
+struct strbuf;
+extern int upload_pack_advertise(struct repository *r,
+ struct strbuf *value);
+
+#endif /* UPLOAD_PACK_H */
/*
* Sorted list of non-overlapping intervals of non-spacing characters,
*/
-#include "unicode_width.h"
+#include "unicode-width.h"
/* test for 8-bit control characters */
if (ch == 0)
--- /dev/null
+#include "cache.h"
+#include "run-command.h"
+
+/*
+ * Some cases use stdio, but want to flush after the write
+ * to get error handling (and to get better interactive
+ * behaviour - not buffering excessively).
+ *
+ * Of course, if the flush happened within the write itself,
+ * we've already lost the error code, and cannot report it any
+ * more. So we just ignore that case instead (and hope we get
+ * the right error code on the flush).
+ *
+ * If the file handle is stdout, and stdout is a file, then skip the
+ * flush entirely since it's not needed.
+ */
+void maybe_flush_or_die(FILE *f, const char *desc)
+{
+ static int skip_stdout_flush = -1;
+ struct stat st;
+ char *cp;
+
+ if (f == stdout) {
+ if (skip_stdout_flush < 0) {
+ cp = getenv("GIT_FLUSH");
+ if (cp)
+ skip_stdout_flush = (atoi(cp) == 0);
+ else if ((fstat(fileno(stdout), &st) == 0) &&
+ S_ISREG(st.st_mode))
+ skip_stdout_flush = 1;
+ else
+ skip_stdout_flush = 0;
+ }
+ if (skip_stdout_flush && !ferror(f))
+ return;
+ }
+ if (fflush(f)) {
+ check_pipe(errno);
+ die_errno("write failure on '%s'", desc);
+ }
+}
+
+void fprintf_or_die(FILE *f, const char *fmt, ...)
+{
+ va_list ap;
+ int ret;
+
+ va_start(ap, fmt);
+ ret = vfprintf(f, fmt, ap);
+ va_end(ap);
+
+ if (ret < 0) {
+ check_pipe(errno);
+ die_errno("write error");
+ }
+}
+
+void fsync_or_die(int fd, const char *msg)
+{
+ if (fsync(fd) < 0) {
+ die_errno("fsync error on '%s'", msg);
+ }
+}
+
+void write_or_die(int fd, const void *buf, size_t count)
+{
+ if (write_in_full(fd, buf, count) < 0) {
+ check_pipe(errno);
+ die_errno("write error");
+ }
+}
+++ /dev/null
-#include "cache.h"
-#include "run-command.h"
-
-/*
- * Some cases use stdio, but want to flush after the write
- * to get error handling (and to get better interactive
- * behaviour - not buffering excessively).
- *
- * Of course, if the flush happened within the write itself,
- * we've already lost the error code, and cannot report it any
- * more. So we just ignore that case instead (and hope we get
- * the right error code on the flush).
- *
- * If the file handle is stdout, and stdout is a file, then skip the
- * flush entirely since it's not needed.
- */
-void maybe_flush_or_die(FILE *f, const char *desc)
-{
- static int skip_stdout_flush = -1;
- struct stat st;
- char *cp;
-
- if (f == stdout) {
- if (skip_stdout_flush < 0) {
- cp = getenv("GIT_FLUSH");
- if (cp)
- skip_stdout_flush = (atoi(cp) == 0);
- else if ((fstat(fileno(stdout), &st) == 0) &&
- S_ISREG(st.st_mode))
- skip_stdout_flush = 1;
- else
- skip_stdout_flush = 0;
- }
- if (skip_stdout_flush && !ferror(f))
- return;
- }
- if (fflush(f)) {
- check_pipe(errno);
- die_errno("write failure on '%s'", desc);
- }
-}
-
-void fprintf_or_die(FILE *f, const char *fmt, ...)
-{
- va_list ap;
- int ret;
-
- va_start(ap, fmt);
- ret = vfprintf(f, fmt, ap);
- va_end(ap);
-
- if (ret < 0) {
- check_pipe(errno);
- die_errno("write error");
- }
-}
-
-void fsync_or_die(int fd, const char *msg)
-{
- if (fsync(fd) < 0) {
- die_errno("fsync error on '%s'", msg);
- }
-}
-
-void write_or_die(int fd, const void *buf, size_t count)
-{
- if (write_in_full(fd, buf, count) < 0) {
- check_pipe(errno);
- die_errno("write error");
- }
-}