git-applymbox
git-applypatch
git-archimport
+git-archive
git-bisect
git-branch
git-cat-file
git-update-index
git-update-ref
git-update-server-info
+git-upload-archive
git-upload-pack
git-upload-tar
git-var
have to worry. git supports "shared public repository" style of
cooperation you are probably more familiar with as well.
-See link:cvs-migration.txt[git for CVS users] for the details.
+See link:cvs-migration.html[git for CVS users] for the details.
Bundling your work together
---------------------------
Git allows you to specify scripts called "hooks" to be run at certain
points. You can use these, for example, to send all commits to the shared
-repository to a mailing list. See link:hooks.txt[Hooks used by git].
+repository to a mailing list. See link:hooks.html[Hooks used by git].
You can enforce finer grained permissions using update hooks. See
link:howto/update-hook-example.txt[Controlling access to branches using
context exist they all must match. By default no context is
ever ignored.
+--unidiff-zero::
+ By default, gitlink:git-apply[1] expects that the patch being
+ applied is a unified diff with at least one line of context.
+ This provides good safety measures, but breaks down when
+ applying a diff generated with --unified=0. To bypass these
+ checks use '--unidiff-zero'.
++
+Note, for the reasons stated above usage of context-free patches are
+discouraged.
+
--apply::
If you use any of the options marked "Turns off
'apply'" above, gitlink:git-apply[1] reads and outputs the
deletion part but not addition part.
--allow-binary-replacement, --binary::
- When applying a patch, which is a git-enhanced patch
- that was prepared to record the pre- and post-image object
- name in full, and the path being patched exactly matches
- the object the patch applies to (i.e. "index" line's
- pre-image object name is what is in the working tree),
- and the post-image object is available in the object
- database, use the post-image object as the patch
- result. This allows binary files to be patched in a
- very limited way.
+ Historically we did not allow binary patch applied
+ without an explicit permission from the user, and this
+ flag was the way to do so. Currently we always allow binary
+ patch application, so this is a no-op.
--exclude=<path-pattern>::
Don't apply changes to files matching the given path pattern. This can
--- /dev/null
+git-archive(1)
+==============
+
+NAME
+----
+git-archive - Creates a archive of the files in the named tree
+
+
+SYNOPSIS
+--------
+'git-archive' --format=<fmt> [--list] [--prefix=<prefix>/] [<extra>]
+ [--remote=<repo>] <tree-ish> [path...]
+
+DESCRIPTION
+-----------
+Creates an archive of the specified format containing the tree
+structure for the named tree. If <prefix> is specified it is
+prepended to the filenames in the archive.
+
+'git-archive' behaves differently when given a tree ID versus when
+given a commit ID or tag ID. In the first case the current time is
+used as modification time of each file in the archive. In the latter
+case the commit time as recorded in the referenced commit object is
+used instead. Additionally the commit ID is stored in a global
+extended pax header if the tar format is used; it can be extracted
+using 'git-get-tar-commit-id'. In ZIP files it is stored as a file
+comment.
+
+OPTIONS
+-------
+
+--format=<fmt>::
+ Format of the resulting archive: 'tar', 'zip'...
+
+--list::
+ Show all available formats.
+
+--prefix=<prefix>/::
+ Prepend <prefix>/ to each filename in the archive.
+
+<extra>::
+ This can be any options that the archiver backend understand.
+
+--remote=<repo>::
+ Instead of making a tar archive from local repository,
+ retrieve a tar archive from a remote repository.
+
+<tree-ish>::
+ The tree or commit to produce an archive for.
+
+path::
+ If one or more paths are specified, include only these in the
+ archive, otherwise include all files and subdirectories.
+
+CONFIGURATION
+-------------
+By default, file and directories modes are set to 0666 or 0777 in tar
+archives. It is possible to change this by setting the "umask" variable
+in the repository configuration as follows :
+
+[tar]
+ umask = 002 ;# group friendly
+
+The special umask value "user" indicates that the user's current umask
+will be used instead. The default value remains 0, which means world
+readable/writable files and directories.
+
+EXAMPLES
+--------
+git archive --format=tar --prefix=junk/ HEAD | (cd /var/tmp/ && tar xf -)::
+
+ Create a tar archive that contains the contents of the
+ latest commit on the current branch, and extracts it in
+ `/var/tmp/junk` directory.
+
+git archive --format=tar --prefix=git-1.4.0/ v1.4.0 | gzip >git-1.4.0.tar.gz::
+
+ Create a compressed tarball for v1.4.0 release.
+
+git archive --format=tar --prefix=git-1.4.0/ v1.4.0{caret}\{tree\} | gzip >git-1.4.0.tar.gz::
+
+ Create a compressed tarball for v1.4.0 release, but without a
+ global extended pax header.
+
+git archive --format=zip --prefix=git-docs/ HEAD:Documentation/ > git-1.4.0-docs.zip::
+
+ Put everything in the current head's Documentation/ directory
+ into 'git-1.4.0-docs.zip', with the prefix 'git-docs/'.
+
+Author
+------
+Written by Franck Bui-Huu and Rene Scharfe.
+
+Documentation
+--------------
+Documentation by David Greaves, Junio C Hamano and the git-list <git@vger.kernel.org>.
+
+GIT
+---
+Part of the gitlink:git[7] suite
[verse]
'git-grep' [--cached]
[-a | --text] [-I] [-i | --ignore-case] [-w | --word-regexp]
- [-v | --invert-match] [--full-name]
+ [-v | --invert-match] [-h|-H] [--full-name]
[-E | --extended-regexp] [-G | --basic-regexp] [-F | --fixed-strings]
[-n] [-l | --files-with-matches] [-L | --files-without-match]
[-c | --count]
-v | --invert-match::
Select non-matching lines.
+-h | -H::
+ By default, the command shows the filename for each
+ match. `-h` option is used to suppress this output.
+ `-H` is there for completeness and does not do anything
+ except it overrides `-h` given earlier on the command
+ line.
+
--full-name::
When run from a subdirectory, the command usually
outputs paths relative to the current directory. This
SYNOPSIS
--------
-'git-repack' [-a] [-d] [-f] [-l] [-n] [-q]
+'git-repack' [-a] [-d] [-f] [-l] [-n] [-q] [--window=N] [--depth=N]
DESCRIPTION
-----------
Do not update the server information with
`git update-server-info`.
+--window=[N], --depth=[N]::
+ These two options affects how the objects contained in the pack are
+ stored using delta compression. The objects are first internally
+ sorted by type, size and optionally names and compared against the
+ other objects within `--window` to see if using delta compression saves
+ space. `--depth` limits the maximum delta depth; making it too deep
+ affects the performance on the unpacker side, because delta data needs
+ to be applied that many times to get to the necessary object.
+
+
Author
------
Written by Linus Torvalds <torvalds@osdl.org>
SYNOPSIS
--------
-'git-unpack-objects' [-n] [-q] <pack-file
+'git-unpack-objects' [-n] [-q] [-r] <pack-file
DESCRIPTION
The command usually shows percentage progress. This
flag suppresses it.
+-r::
+ When unpacking a corrupt packfile, the command dies at
+ the first corruption. This flag tells it to keep going
+ and make the best effort to recover as many objects as
+ possible.
+
Author
------
--- /dev/null
+git-upload-archive(1)
+====================
+
+NAME
+----
+git-upload-archive - Send archive
+
+
+SYNOPSIS
+--------
+'git-upload-archive' <directory>
+
+DESCRIPTION
+-----------
+Invoked by 'git-archive --remote' and sends a generated archive to the
+other end over the git protocol.
+
+This command is usually not invoked directly by the end user. The UI
+for the protocol is on the 'git-archive' side, and the program pair
+is meant to be used to get an archive from a remote repository.
+
+OPTIONS
+-------
+<directory>::
+ The repository to get a tar archive from.
+
+Author
+------
+Written by Franck Bui-Huu.
+
+Documentation
+--------------
+Documentation by Junio C Hamano and the git-list <git@vger.kernel.org>.
+
+GIT
+---
+Part of the gitlink:git[7] suite
In addition to being the staging area for new commits, the index file
is also populated from the object database when checking out a
branch, and is used to hold the trees involved in a merge operation.
-See the link:core-tutorial.txt[core tutorial] and the relevant man
+See the link:core-tutorial.html[core tutorial] and the relevant man
pages for details.
What next?
GITWEB_HOME_LINK_STR = projects
GITWEB_SITENAME =
GITWEB_PROJECTROOT = /pub/git
+GITWEB_EXPORT_OK =
+GITWEB_STRICT_EXPORT =
GITWEB_BASE_URL =
GITWEB_LIST =
GITWEB_HOMETEXT = indextext.html
XDIFF_LIB=xdiff/lib.a
LIB_H = \
- blob.h cache.h commit.h csum-file.h delta.h \
- diff.h object.h pack.h pkt-line.h quote.h refs.h \
+ archive.h blob.h cache.h commit.h csum-file.h delta.h \
+ diff.h object.h pack.h pkt-line.h quote.h refs.h sideband.h \
run-command.h strbuf.h tag.h tree.h git-compat-util.h revision.h \
tree-walk.h log-tree.h dir.h path-list.h unpack-trees.h builtin.h
LIB_OBJS = \
blob.o commit.o connect.o csum-file.o cache-tree.o base85.o \
date.o diff-delta.o entry.o exec_cmd.o ident.o lockfile.o \
- object.o pack-check.o patch-delta.o path.o pkt-line.o \
+ object.o pack-check.o patch-delta.o path.o pkt-line.o sideband.o \
quote.o read-cache.o refs.o run-command.o dir.o object-refs.o \
server-info.o setup.o sha1_file.o sha1_name.o strbuf.o \
tag.o tree.o usage.o config.o environment.o ctype.o copy.o \
BUILTIN_OBJS = \
builtin-add.o \
builtin-apply.o \
+ builtin-archive.o \
builtin-cat-file.o \
builtin-checkout-index.o \
builtin-check-ref-format.o \
builtin-unpack-objects.o \
builtin-update-index.o \
builtin-update-ref.o \
+ builtin-upload-archive.o \
builtin-upload-tar.o \
builtin-verify-pack.o \
builtin-write-tree.o \
-e 's|++GITWEB_HOME_LINK_STR++|$(GITWEB_HOME_LINK_STR)|g' \
-e 's|++GITWEB_SITENAME++|$(GITWEB_SITENAME)|g' \
-e 's|++GITWEB_PROJECTROOT++|$(GITWEB_PROJECTROOT)|g' \
+ -e 's|++GITWEB_EXPORT_OK++|$(GITWEB_EXPORT_OK)|g' \
+ -e 's|++GITWEB_STRICT_EXPORT++|$(GITWEB_STRICT_EXPORT)|g' \
-e 's|++GITWEB_BASE_URL++|$(GITWEB_BASE_URL)|g' \
-e 's|++GITWEB_LIST++|$(GITWEB_LIST)|g' \
-e 's|++GITWEB_HOMETEXT++|$(GITWEB_HOMETEXT)|g' \
--- /dev/null
+#ifndef ARCHIVE_H
+#define ARCHIVE_H
+
+#define MAX_EXTRA_ARGS 32
+#define MAX_ARGS (MAX_EXTRA_ARGS + 32)
+
+struct archiver_args {
+ const char *base;
+ struct tree *tree;
+ const unsigned char *commit_sha1;
+ time_t time;
+ const char **pathspec;
+ unsigned int verbose : 1;
+ void *extra;
+};
+
+typedef int (*write_archive_fn_t)(struct archiver_args *);
+
+typedef void *(*parse_extra_args_fn_t)(int argc, const char **argv);
+
+struct archiver {
+ const char *name;
+ struct archiver_args args;
+ write_archive_fn_t write_archive;
+ parse_extra_args_fn_t parse_extra;
+};
+
+extern struct archiver archivers[];
+
+extern int parse_archive_args(int argc,
+ const char **argv,
+ struct archiver *ar);
+
+extern void parse_treeish_arg(const char **treeish,
+ struct archiver_args *ar_args,
+ const char *prefix);
+
+extern void parse_pathspec_arg(const char **pathspec,
+ struct archiver_args *args);
+/*
+ * Archive-format specific backends.
+ */
+extern int write_tar_archive(struct archiver_args *);
+extern int write_zip_archive(struct archiver_args *);
+extern void *parse_extra_zip_args(int argc, const char **argv);
+
+#endif /* ARCHIVE_H */
base = "";
if (baselen) {
char *common = xmalloc(baselen + 1);
- common = xmalloc(baselen + 1);
memcpy(common, *pathspec, baselen);
common[baselen] = 0;
path = base = common;
static int prefix_length = -1;
static int newfd = -1;
+static int unidiff_zero;
static int p_value = 1;
-static int allow_binary_replacement;
static int check_index;
static int write_index;
static int cached;
}
/*
- * Parse a unified diff. Note that this really needs
- * to parse each fragment separately, since the only
- * way to know the difference between a "---" that is
- * part of a patch, and a "---" that starts the next
- * patch is to look at the line counts..
+ * Parse a unified diff. Note that this really needs to parse each
+ * fragment separately, since the only way to know the difference
+ * between a "---" that is part of a patch, and a "---" that starts
+ * the next patch is to look at the line counts..
*/
static int parse_fragment(char *line, unsigned long size, struct patch *patch, struct fragment *fragment)
{
leading = 0;
trailing = 0;
- if (patch->is_new < 0) {
- patch->is_new = !oldlines;
- if (!oldlines)
- patch->old_name = NULL;
- }
- if (patch->is_delete < 0) {
- patch->is_delete = !newlines;
- if (!newlines)
- patch->new_name = NULL;
- }
-
- if (patch->is_new && oldlines)
- return error("new file depends on old contents");
- if (patch->is_delete != !newlines) {
- if (newlines)
- return error("deleted file still has contents");
- fprintf(stderr, "** warning: file %s becomes empty but is not deleted\n", patch->new_name);
- }
-
/* Parse the thing.. */
line += len;
size -= len;
linenr++;
added = deleted = 0;
- for (offset = len; size > 0; offset += len, size -= len, line += len, linenr++) {
+ for (offset = len;
+ 0 < size;
+ offset += len, size -= len, line += len, linenr++) {
if (!oldlines && !newlines)
break;
len = linelen(line, size);
patch->lines_added += added;
patch->lines_deleted += deleted;
+
+ if (0 < patch->is_new && oldlines)
+ return error("new file depends on old contents");
+ if (0 < patch->is_delete && newlines)
+ return error("deleted file still has contents");
return offset;
}
static int parse_single_patch(char *line, unsigned long size, struct patch *patch)
{
unsigned long offset = 0;
+ unsigned long oldlines = 0, newlines = 0, context = 0;
struct fragment **fragp = &patch->fragments;
while (size > 4 && !memcmp(line, "@@ -", 4)) {
len = parse_fragment(line, size, patch, fragment);
if (len <= 0)
die("corrupt patch at line %d", linenr);
-
fragment->patch = line;
fragment->size = len;
+ oldlines += fragment->oldlines;
+ newlines += fragment->newlines;
+ context += fragment->leading + fragment->trailing;
*fragp = fragment;
fragp = &fragment->next;
line += len;
size -= len;
}
+
+ /*
+ * If something was removed (i.e. we have old-lines) it cannot
+ * be creation, and if something was added it cannot be
+ * deletion. However, the reverse is not true; --unified=0
+ * patches that only add are not necessarily creation even
+ * though they do not have any old lines, and ones that only
+ * delete are not necessarily deletion.
+ *
+ * Unfortunately, a real creation/deletion patch do _not_ have
+ * any context line by definition, so we cannot safely tell it
+ * apart with --unified=0 insanity. At least if the patch has
+ * more than one hunk it is not creation or deletion.
+ */
+ if (patch->is_new < 0 &&
+ (oldlines || (patch->fragments && patch->fragments->next)))
+ patch->is_new = 0;
+ if (patch->is_delete < 0 &&
+ (newlines || (patch->fragments && patch->fragments->next)))
+ patch->is_delete = 0;
+ if (!unidiff_zero || context) {
+ /* If the user says the patch is not generated with
+ * --unified=0, or if we have seen context lines,
+ * then not having oldlines means the patch is creation,
+ * and not having newlines means the patch is deletion.
+ */
+ if (patch->is_new < 0 && !oldlines)
+ patch->is_new = 1;
+ if (patch->is_delete < 0 && !newlines)
+ patch->is_delete = 1;
+ }
+
+ if (0 < patch->is_new && oldlines)
+ die("new file %s depends on old contents", patch->new_name);
+ if (0 < patch->is_delete && newlines)
+ die("deleted file %s still has contents", patch->old_name);
+ if (!patch->is_delete && !newlines && context)
+ fprintf(stderr, "** warning: file %s becomes empty but "
+ "is not deleted\n", patch->new_name);
+
return offset;
}
}
}
- /* Empty patch cannot be applied if:
- * - it is a binary patch and we do not do binary_replace, or
- * - text patch without metadata change
+ /* Empty patch cannot be applied if it is a text patch
+ * without metadata change. A binary patch appears
+ * empty to us here.
*/
if ((apply || check) &&
- (patch->is_binary
- ? !allow_binary_replacement
- : !metadata_changes(patch)))
+ (!patch->is_binary && !metadata_changes(patch)))
die("patch with only garbage at line %d", linenr);
}
/*
* If we don't have any leading/trailing data in the patch,
* we want it to match at the beginning/end of the file.
+ *
+ * But that would break if the patch is generated with
+ * --unified=0; sane people wouldn't do that to cause us
+ * trouble, but we try to please not so sane ones as well.
*/
- match_beginning = !leading && (frag->oldpos == 1);
- match_end = !trailing;
+ if (unidiff_zero) {
+ match_beginning = (!leading && !frag->oldpos);
+ match_end = 0;
+ }
+ else {
+ match_beginning = !leading && (frag->oldpos == 1);
+ match_end = !trailing;
+ }
lines = 0;
pos = frag->newpos;
unsigned char hdr[50];
int hdrlen;
- if (!allow_binary_replacement)
- return error("cannot apply binary patch to '%s' "
- "without --allow-binary-replacement",
- name);
-
/* For safety, we require patch index line to contain
* full 40-byte textual SHA1 for old and new, at least for now.
*/
patch->result = desc.buffer;
patch->resultsize = desc.size;
- if (patch->is_delete && patch->resultsize)
+ if (0 < patch->is_delete && patch->resultsize)
return error("removal patch leaves file contents");
return 0;
old_name, st_mode, patch->old_mode);
}
- if (new_name && prev_patch && prev_patch->is_delete &&
+ if (new_name && prev_patch && 0 < prev_patch->is_delete &&
!strcmp(prev_patch->old_name, new_name))
/* A type-change diff is always split into a patch to
* delete old, immediately followed by a patch to
else
ok_if_exists = 0;
- if (new_name && (patch->is_new | patch->is_rename | patch->is_copy)) {
+ if (new_name &&
+ ((0 < patch->is_new) | (0 < patch->is_rename) | patch->is_copy)) {
if (check_index &&
cache_name_pos(new_name, strlen(new_name)) >= 0 &&
!ok_if_exists)
return error("%s: %s", new_name, strerror(errno));
}
if (!patch->new_mode) {
- if (patch->is_new)
+ if (0 < patch->is_new)
patch->new_mode = S_IFREG | 0644;
else
patch->new_mode = patch->old_mode;
const char *name;
name = patch->old_name ? patch->old_name : patch->new_name;
- if (patch->is_new)
+ if (0 < patch->is_new)
sha1_ptr = null_sha1;
else if (get_sha1(patch->old_sha1_prefix, sha1))
die("sha1 information is lacking or useless (%s).",
}
if (!strcmp(arg, "--allow-binary-replacement") ||
!strcmp(arg, "--binary")) {
- allow_binary_replacement = 1;
- continue;
+ continue; /* now no-op */
}
if (!strcmp(arg, "--numstat")) {
apply = 0;
apply_in_reverse = 1;
continue;
}
+ if (!strcmp(arg, "--unidiff-zero")) {
+ unidiff_zero = 1;
+ continue;
+ }
if (!strcmp(arg, "--reject")) {
apply = apply_with_reject = apply_verbosely = 1;
continue;
--- /dev/null
+/*
+ * Copyright (c) 2006 Franck Bui-Huu
+ * Copyright (c) 2006 Rene Scharfe
+ */
+#include <time.h>
+#include "cache.h"
+#include "builtin.h"
+#include "archive.h"
+#include "commit.h"
+#include "tree-walk.h"
+#include "exec_cmd.h"
+#include "pkt-line.h"
+#include "sideband.h"
+
+static const char archive_usage[] = \
+"git-archive --format=<fmt> [--prefix=<prefix>/] [--verbose] [<extra>] <tree-ish> [path...]";
+
+struct archiver archivers[] = {
+ {
+ .name = "tar",
+ .write_archive = write_tar_archive,
+ },
+ {
+ .name = "zip",
+ .write_archive = write_zip_archive,
+ .parse_extra = parse_extra_zip_args,
+ },
+};
+
+static int run_remote_archiver(const char *remote, int argc,
+ const char **argv)
+{
+ char *url, buf[LARGE_PACKET_MAX];
+ int fd[2], i, len, rv;
+ pid_t pid;
+ const char *exec = "git-upload-archive";
+ int exec_at = 0;
+
+ for (i = 1; i < argc; i++) {
+ const char *arg = argv[i];
+ if (!strncmp("--exec=", arg, 7)) {
+ if (exec_at)
+ die("multiple --exec specified");
+ exec = arg + 7;
+ exec_at = i;
+ break;
+ }
+ }
+
+ url = xstrdup(remote);
+ pid = git_connect(fd, url, exec);
+ if (pid < 0)
+ return pid;
+
+ for (i = 1; i < argc; i++) {
+ if (i == exec_at)
+ continue;
+ packet_write(fd[1], "argument %s\n", argv[i]);
+ }
+ packet_flush(fd[1]);
+
+ len = packet_read_line(fd[0], buf, sizeof(buf));
+ if (!len)
+ die("git-archive: expected ACK/NAK, got EOF");
+ if (buf[len-1] == '\n')
+ buf[--len] = 0;
+ if (strcmp(buf, "ACK")) {
+ if (len > 5 && !strncmp(buf, "NACK ", 5))
+ die("git-archive: NACK %s", buf + 5);
+ die("git-archive: protocol error");
+ }
+
+ len = packet_read_line(fd[0], buf, sizeof(buf));
+ if (len)
+ die("git-archive: expected a flush");
+
+ /* Now, start reading from fd[0] and spit it out to stdout */
+ rv = recv_sideband("archive", fd[0], 1, 2, buf, sizeof(buf));
+ close(fd[0]);
+ rv |= finish_connect(pid);
+
+ return !!rv;
+}
+
+static int init_archiver(const char *name, struct archiver *ar)
+{
+ int rv = -1, i;
+
+ for (i = 0; i < ARRAY_SIZE(archivers); i++) {
+ if (!strcmp(name, archivers[i].name)) {
+ memcpy(ar, &archivers[i], sizeof(struct archiver));
+ rv = 0;
+ break;
+ }
+ }
+ return rv;
+}
+
+void parse_pathspec_arg(const char **pathspec, struct archiver_args *ar_args)
+{
+ ar_args->pathspec = get_pathspec(ar_args->base, pathspec);
+}
+
+void parse_treeish_arg(const char **argv, struct archiver_args *ar_args,
+ const char *prefix)
+{
+ const char *name = argv[0];
+ const unsigned char *commit_sha1;
+ time_t archive_time;
+ struct tree *tree;
+ struct commit *commit;
+ unsigned char sha1[20];
+
+ if (get_sha1(name, sha1))
+ die("Not a valid object name");
+
+ commit = lookup_commit_reference_gently(sha1, 1);
+ if (commit) {
+ commit_sha1 = commit->object.sha1;
+ archive_time = commit->date;
+ } else {
+ commit_sha1 = NULL;
+ archive_time = time(NULL);
+ }
+
+ tree = parse_tree_indirect(sha1);
+ if (tree == NULL)
+ die("not a tree object");
+
+ if (prefix) {
+ unsigned char tree_sha1[20];
+ unsigned int mode;
+ int err;
+
+ err = get_tree_entry(tree->object.sha1, prefix,
+ tree_sha1, &mode);
+ if (err || !S_ISDIR(mode))
+ die("current working directory is untracked");
+
+ free(tree);
+ tree = parse_tree_indirect(tree_sha1);
+ }
+ ar_args->tree = tree;
+ ar_args->commit_sha1 = commit_sha1;
+ ar_args->time = archive_time;
+}
+
+int parse_archive_args(int argc, const char **argv, struct archiver *ar)
+{
+ const char *extra_argv[MAX_EXTRA_ARGS];
+ int extra_argc = 0;
+ const char *format = NULL; /* might want to default to "tar" */
+ const char *base = "";
+ int verbose = 0;
+ int i;
+
+ for (i = 1; i < argc; i++) {
+ const char *arg = argv[i];
+
+ if (!strcmp(arg, "--list") || !strcmp(arg, "-l")) {
+ for (i = 0; i < ARRAY_SIZE(archivers); i++)
+ printf("%s\n", archivers[i].name);
+ exit(0);
+ }
+ if (!strcmp(arg, "--verbose") || !strcmp(arg, "-v")) {
+ verbose = 1;
+ continue;
+ }
+ if (!strncmp(arg, "--format=", 9)) {
+ format = arg + 9;
+ continue;
+ }
+ if (!strncmp(arg, "--prefix=", 9)) {
+ base = arg + 9;
+ continue;
+ }
+ if (!strcmp(arg, "--")) {
+ i++;
+ break;
+ }
+ if (arg[0] == '-') {
+ if (extra_argc > MAX_EXTRA_ARGS - 1)
+ die("Too many extra options");
+ extra_argv[extra_argc++] = arg;
+ continue;
+ }
+ break;
+ }
+
+ /* We need at least one parameter -- tree-ish */
+ if (argc - 1 < i)
+ usage(archive_usage);
+ if (!format)
+ die("You must specify an archive format");
+ if (init_archiver(format, ar) < 0)
+ die("Unknown archive format '%s'", format);
+
+ if (extra_argc) {
+ if (!ar->parse_extra)
+ die("'%s' format does not handle %s",
+ ar->name, extra_argv[0]);
+ ar->args.extra = ar->parse_extra(extra_argc, extra_argv);
+ }
+ ar->args.verbose = verbose;
+ ar->args.base = base;
+
+ return i;
+}
+
+static const char *extract_remote_arg(int *ac, const char **av)
+{
+ int ix, iy, cnt = *ac;
+ int no_more_options = 0;
+ const char *remote = NULL;
+
+ for (ix = iy = 1; ix < cnt; ix++) {
+ const char *arg = av[ix];
+ if (!strcmp(arg, "--"))
+ no_more_options = 1;
+ if (!no_more_options) {
+ if (!strncmp(arg, "--remote=", 9)) {
+ if (remote)
+ die("Multiple --remote specified");
+ remote = arg + 9;
+ continue;
+ }
+ if (arg[0] != '-')
+ no_more_options = 1;
+ }
+ if (ix != iy)
+ av[iy] = arg;
+ iy++;
+ }
+ if (remote) {
+ av[--cnt] = NULL;
+ *ac = cnt;
+ }
+ return remote;
+}
+
+int cmd_archive(int argc, const char **argv, const char *prefix)
+{
+ struct archiver ar;
+ int tree_idx;
+ const char *remote = NULL;
+
+ remote = extract_remote_arg(&argc, argv);
+ if (remote)
+ return run_remote_archiver(remote, argc, argv);
+
+ setlinebuf(stderr);
+
+ memset(&ar, 0, sizeof(ar));
+ tree_idx = parse_archive_args(argc, argv, &ar);
+ if (prefix == NULL)
+ prefix = setup_git_directory();
+
+ argv += tree_idx;
+ parse_treeish_arg(argv, &ar.args, prefix);
+ parse_pathspec_arg(argv + 1, &ar.args);
+
+ return ar.write_archive(&ar.args);
+}
unsigned binary:2;
unsigned extended:1;
unsigned relative:1;
+ unsigned pathname:1;
int regflags;
unsigned pre_context;
unsigned post_context;
static void show_line(struct grep_opt *opt, const char *bol, const char *eol,
const char *name, unsigned lno, char sign)
{
- printf("%s%c", name, sign);
+ if (opt->pathname)
+ printf("%s%c", name, sign);
if (opt->linenum)
printf("%d%c", lno, sign);
printf("%.*s\n", (int)(eol-bol), bol);
push_arg("-F");
if (opt->linenum)
push_arg("-n");
+ if (!opt->pathname)
+ push_arg("-h");
if (opt->regflags & REG_EXTENDED)
push_arg("-E");
if (opt->regflags & REG_ICASE)
memset(&opt, 0, sizeof(opt));
opt.prefix_length = (prefix && *prefix) ? strlen(prefix) : 0;
opt.relative = 1;
+ opt.pathname = 1;
opt.pattern_tail = &opt.pattern_list;
opt.regflags = REG_NEWLINE;
opt.linenum = 1;
continue;
}
+ if (!strcmp("-h", arg)) {
+ opt.pathname = 0;
+ continue;
+ }
if (!strcmp("-H", arg)) {
- /* We always show the pathname, so this
- * is a noop.
- */
+ opt.pathname = 1;
continue;
}
if (!strcmp("-l", arg) ||
*/
#include <time.h>
#include "cache.h"
-#include "tree-walk.h"
#include "commit.h"
#include "strbuf.h"
#include "tar.h"
#include "builtin.h"
#include "pkt-line.h"
+#include "archive.h"
#define RECORDSIZE (512)
#define BLOCKSIZE (RECORDSIZE * 20)
static time_t archive_time;
static int tar_umask;
+static int verbose;
/* writes out the whole block, but only if it is full */
static void write_if_needed(void)
mode = 0100666;
sprintf(header.name, "%s.paxheader", sha1_to_hex(sha1));
} else {
+ if (verbose)
+ fprintf(stderr, "%.*s\n", path->len, path->buf);
if (S_ISDIR(mode)) {
*header.typeflag = TYPEFLAG_DIR;
mode = (mode | 0777) & ~tar_umask;
free(ext_header.buf);
}
-static void traverse_tree(struct tree_desc *tree, struct strbuf *path)
-{
- int pathlen = path->len;
- struct name_entry entry;
-
- while (tree_entry(tree, &entry)) {
- void *eltbuf;
- char elttype[20];
- unsigned long eltsize;
-
- eltbuf = read_sha1_file(entry.sha1, elttype, &eltsize);
- if (!eltbuf)
- die("cannot read %s", sha1_to_hex(entry.sha1));
-
- path->len = pathlen;
- strbuf_append_string(path, entry.path);
- if (S_ISDIR(entry.mode))
- strbuf_append_string(path, "/");
-
- write_entry(entry.sha1, path, entry.mode, eltbuf, eltsize);
-
- if (S_ISDIR(entry.mode)) {
- struct tree_desc subtree;
- subtree.buf = eltbuf;
- subtree.size = eltsize;
- traverse_tree(&subtree, path);
- }
- free(eltbuf);
- }
-}
-
static int git_tar_config(const char *var, const char *value)
{
if (!strcmp(var, "tar.umask")) {
static int generate_tar(int argc, const char **argv, const char *prefix)
{
- unsigned char sha1[20], tree_sha1[20];
- struct commit *commit;
- struct tree_desc tree;
- struct strbuf current_path;
- void *buffer;
-
- current_path.buf = xmalloc(PATH_MAX);
- current_path.alloc = PATH_MAX;
- current_path.len = current_path.eof = 0;
+ struct archiver_args args;
+ int result;
+ char *base = NULL;
git_config(git_tar_config);
- switch (argc) {
- case 3:
- strbuf_append_string(¤t_path, argv[2]);
- strbuf_append_string(¤t_path, "/");
- /* FALLTHROUGH */
- case 2:
- if (get_sha1(argv[1], sha1))
- die("Not a valid object name %s", argv[1]);
- break;
- default:
+ memset(&args, 0, sizeof(args));
+ if (argc != 2 && argc != 3)
usage(tar_tree_usage);
+ if (argc == 3) {
+ int baselen = strlen(argv[2]);
+ base = xmalloc(baselen + 2);
+ memcpy(base, argv[2], baselen);
+ base[baselen] = '/';
+ base[baselen + 1] = '\0';
}
+ args.base = base;
+ parse_treeish_arg(argv + 1, &args, NULL);
- commit = lookup_commit_reference_gently(sha1, 1);
- if (commit) {
- write_global_extended_header(commit->object.sha1);
- archive_time = commit->date;
- } else
- archive_time = time(NULL);
-
- tree.buf = buffer = read_object_with_reference(sha1, tree_type,
- &tree.size, tree_sha1);
- if (!tree.buf)
- die("not a reference to a tag, commit or tree object: %s",
- sha1_to_hex(sha1));
-
- if (current_path.len > 0)
- write_entry(tree_sha1, ¤t_path, 040777, NULL, 0);
- traverse_tree(&tree, ¤t_path);
- write_trailer();
+ result = write_tar_archive(&args);
+ free(base);
+
+ return result;
+}
+
+static int write_tar_entry(const unsigned char *sha1,
+ const char *base, int baselen,
+ const char *filename, unsigned mode, int stage)
+{
+ static struct strbuf path;
+ int filenamelen = strlen(filename);
+ void *buffer;
+ char type[20];
+ unsigned long size;
+
+ if (!path.alloc) {
+ path.buf = xmalloc(PATH_MAX);
+ path.alloc = PATH_MAX;
+ path.len = path.eof = 0;
+ }
+ if (path.alloc < baselen + filenamelen) {
+ free(path.buf);
+ path.buf = xmalloc(baselen + filenamelen);
+ path.alloc = baselen + filenamelen;
+ }
+ memcpy(path.buf, base, baselen);
+ memcpy(path.buf + baselen, filename, filenamelen);
+ path.len = baselen + filenamelen;
+ if (S_ISDIR(mode)) {
+ strbuf_append_string(&path, "/");
+ buffer = NULL;
+ size = 0;
+ } else {
+ buffer = read_sha1_file(sha1, type, &size);
+ if (!buffer)
+ die("cannot read %s", sha1_to_hex(sha1));
+ }
+
+ write_entry(sha1, &path, mode, buffer, size);
free(buffer);
- free(current_path.buf);
+
+ return READ_TREE_RECURSIVE;
+}
+
+int write_tar_archive(struct archiver_args *args)
+{
+ int plen = args->base ? strlen(args->base) : 0;
+
+ git_config(git_tar_config);
+
+ archive_time = args->time;
+ verbose = args->verbose;
+
+ if (args->commit_sha1)
+ write_global_extended_header(args->commit_sha1);
+
+ if (args->base && plen > 0 && args->base[plen - 1] == '/') {
+ char *base = xstrdup(args->base);
+ int baselen = strlen(base);
+
+ while (baselen > 0 && base[baselen - 1] == '/')
+ base[--baselen] = '\0';
+ write_tar_entry(args->tree->object.sha1, "", 0, base, 040777, 0);
+ free(base);
+ }
+ read_tree_recursive(args->tree, args->base, plen, 0,
+ args->pathspec, write_tar_entry);
+ write_trailer();
+
return 0;
}
#include <sys/time.h>
-static int dry_run, quiet;
-static const char unpack_usage[] = "git-unpack-objects [-n] [-q] < pack-file";
+static int dry_run, quiet, recover, has_errors;
+static const char unpack_usage[] = "git-unpack-objects [-n] [-q] [-r] < pack-file";
/* We always read in 4kB chunks. */
static unsigned char buffer[4096];
use(len - stream.avail_in);
if (stream.total_out == size && ret == Z_STREAM_END)
break;
- if (ret != Z_OK)
- die("inflate returned %d\n", ret);
+ if (ret != Z_OK) {
+ error("inflate returned %d\n", ret);
+ free(buf);
+ buf = NULL;
+ if (!recover)
+ exit(1);
+ has_errors = 1;
+ break;
+ }
stream.next_in = fill(1);
stream.avail_in = len;
}
added_object(sha1, type, buf, size);
}
-static int resolve_delta(const char *type,
- void *base, unsigned long base_size,
- void *delta, unsigned long delta_size)
+static void resolve_delta(const char *type,
+ void *base, unsigned long base_size,
+ void *delta, unsigned long delta_size)
{
void *result;
unsigned long result_size;
free(delta);
write_object(result, result_size, type);
free(result);
- return 0;
}
static void added_object(unsigned char *sha1, const char *type, void *data, unsigned long size)
}
}
-static int unpack_non_delta_entry(enum object_type kind, unsigned long size)
+static void unpack_non_delta_entry(enum object_type kind, unsigned long size)
{
void *buf = get_data(size);
const char *type;
case OBJ_TAG: type = tag_type; break;
default: die("bad type %d", kind);
}
- if (!dry_run)
+ if (!dry_run && buf)
write_object(buf, size, type);
free(buf);
- return 0;
}
-static int unpack_delta_entry(unsigned long delta_size)
+static void unpack_delta_entry(unsigned long delta_size)
{
void *delta_data, *base;
unsigned long base_size;
char type[20];
unsigned char base_sha1[20];
- int result;
hashcpy(base_sha1, fill(20));
use(20);
delta_data = get_data(delta_size);
- if (dry_run) {
+ if (dry_run || !delta_data) {
free(delta_data);
- return 0;
+ return;
}
if (!has_sha1_file(base_sha1)) {
add_delta_to_list(base_sha1, delta_data, delta_size);
- return 0;
+ return;
}
base = read_sha1_file(base_sha1, type, &base_size);
- if (!base)
- die("failed to read delta-pack base object %s", sha1_to_hex(base_sha1));
- result = resolve_delta(type, base, base_size, delta_data, delta_size);
+ if (!base) {
+ error("failed to read delta-pack base object %s",
+ sha1_to_hex(base_sha1));
+ if (!recover)
+ exit(1);
+ has_errors = 1;
+ return;
+ }
+ resolve_delta(type, base, base_size, delta_data, delta_size);
free(base);
- return result;
}
static void unpack_one(unsigned nr, unsigned total)
unpack_delta_entry(size);
return;
default:
- die("bad object type %d", type);
+ error("bad object type %d", type);
+ has_errors = 1;
+ if (recover)
+ return;
+ exit(1);
}
}
quiet = 1;
continue;
}
+ if (!strcmp(arg, "-r")) {
+ recover = 1;
+ continue;
+ }
usage(unpack_usage);
}
/* All done */
if (!quiet)
fprintf(stderr, "\n");
- return 0;
+ return has_errors;
}
--- /dev/null
+/*
+ * Copyright (c) 2006 Franck Bui-Huu
+ */
+#include <time.h>
+#include "cache.h"
+#include "builtin.h"
+#include "archive.h"
+#include "pkt-line.h"
+#include "sideband.h"
+#include <sys/wait.h>
+#include <sys/poll.h>
+
+static const char upload_archive_usage[] =
+ "git-upload-archive <repo>";
+
+static const char deadchild[] =
+"git-upload-archive: archiver died with error";
+
+static const char lostchild[] =
+"git-upload-archive: archiver process was lost";
+
+
+static int run_upload_archive(int argc, const char **argv, const char *prefix)
+{
+ struct archiver ar;
+ const char *sent_argv[MAX_ARGS];
+ const char *arg_cmd = "argument ";
+ char *p, buf[4096];
+ int treeish_idx;
+ int sent_argc;
+ int len;
+
+ if (argc != 2)
+ usage(upload_archive_usage);
+
+ if (strlen(argv[1]) > sizeof(buf))
+ die("insanely long repository name");
+
+ strcpy(buf, argv[1]); /* enter-repo smudges its argument */
+
+ if (!enter_repo(buf, 0))
+ die("not a git archive");
+
+ /* put received options in sent_argv[] */
+ sent_argc = 1;
+ sent_argv[0] = "git-upload-archive";
+ for (p = buf;;) {
+ /* This will die if not enough free space in buf */
+ len = packet_read_line(0, p, (buf + sizeof buf) - p);
+ if (len == 0)
+ break; /* got a flush */
+ if (sent_argc > MAX_ARGS - 2)
+ die("Too many options (>29)");
+
+ if (p[len-1] == '\n') {
+ p[--len] = 0;
+ }
+ if (len < strlen(arg_cmd) ||
+ strncmp(arg_cmd, p, strlen(arg_cmd)))
+ die("'argument' token or flush expected");
+
+ len -= strlen(arg_cmd);
+ memmove(p, p + strlen(arg_cmd), len);
+ sent_argv[sent_argc++] = p;
+ p += len;
+ *p++ = 0;
+ }
+ sent_argv[sent_argc] = NULL;
+
+ /* parse all options sent by the client */
+ treeish_idx = parse_archive_args(sent_argc, sent_argv, &ar);
+
+ parse_treeish_arg(sent_argv + treeish_idx, &ar.args, prefix);
+ parse_pathspec_arg(sent_argv + treeish_idx + 1, &ar.args);
+
+ return ar.write_archive(&ar.args);
+}
+
+static void error_clnt(const char *fmt, ...)
+{
+ char buf[1024];
+ va_list params;
+ int len;
+
+ va_start(params, fmt);
+ len = vsprintf(buf, fmt, params);
+ va_end(params);
+ send_sideband(1, 3, buf, len, LARGE_PACKET_MAX);
+ die("sent error to the client: %s", buf);
+}
+
+static void process_input(int child_fd, int band)
+{
+ char buf[16384];
+ ssize_t sz = read(child_fd, buf, sizeof(buf));
+ if (sz < 0) {
+ if (errno != EINTR)
+ error_clnt("read error: %s\n", strerror(errno));
+ return;
+ }
+ send_sideband(1, band, buf, sz, LARGE_PACKET_MAX);
+}
+
+int cmd_upload_archive(int argc, const char **argv, const char *prefix)
+{
+ pid_t writer;
+ int fd1[2], fd2[2];
+ /*
+ * Set up sideband subprocess.
+ *
+ * We (parent) monitor and read from child, sending its fd#1 and fd#2
+ * multiplexed out to our fd#1. If the child dies, we tell the other
+ * end over channel #3.
+ */
+ if (pipe(fd1) < 0 || pipe(fd2) < 0) {
+ int err = errno;
+ packet_write(1, "NACK pipe failed on the remote side\n");
+ die("upload-archive: %s", strerror(err));
+ }
+ writer = fork();
+ if (writer < 0) {
+ int err = errno;
+ packet_write(1, "NACK fork failed on the remote side\n");
+ die("upload-archive: %s", strerror(err));
+ }
+ if (!writer) {
+ /* child - connect fd#1 and fd#2 to the pipe */
+ dup2(fd1[1], 1);
+ dup2(fd2[1], 2);
+ close(fd1[1]); close(fd2[1]);
+ close(fd1[0]); close(fd2[0]); /* we do not read from pipe */
+
+ exit(run_upload_archive(argc, argv, prefix));
+ }
+
+ /* parent - read from child, multiplex and send out to fd#1 */
+ close(fd1[1]); close(fd2[1]); /* we do not write to pipe */
+ packet_write(1, "ACK\n");
+ packet_flush(1);
+
+ while (1) {
+ struct pollfd pfd[2];
+ int status;
+
+ pfd[0].fd = fd1[0];
+ pfd[0].events = POLLIN;
+ pfd[1].fd = fd2[0];
+ pfd[1].events = POLLIN;
+ if (poll(pfd, 2, -1) < 0) {
+ if (errno != EINTR) {
+ error("poll failed resuming: %s",
+ strerror(errno));
+ sleep(1);
+ }
+ continue;
+ }
+ if (pfd[0].revents & POLLIN)
+ /* Data stream ready */
+ process_input(pfd[0].fd, 1);
+ if (pfd[1].revents & POLLIN)
+ /* Status stream ready */
+ process_input(pfd[1].fd, 2);
+ /* Always finish to read data when available */
+ if ((pfd[0].revents | pfd[1].revents) & POLLIN)
+ continue;
+
+ if (waitpid(writer, &status, 0) < 0)
+ error_clnt("%s", lostchild);
+ else if (!WIFEXITED(status) || WEXITSTATUS(status) > 0)
+ error_clnt("%s", deadchild);
+ packet_flush(1);
+ break;
+ }
+ return 0;
+}
#include "tree.h"
#include "quote.h"
#include "builtin.h"
+#include "archive.h"
static const char zip_tree_usage[] =
"git-zip-tree [-0|...|-9] <tree-ish> [ <base> ]";
+static int verbose;
static int zip_date;
static int zip_time;
crc = crc32(0, Z_NULL, 0);
path = construct_path(base, baselen, filename, S_ISDIR(mode), &pathlen);
+ if (verbose)
+ fprintf(stderr, "%s\n", path);
if (pathlen > 0xffff) {
error("path too long (%d chars, SHA1: %s): %s", pathlen,
sha1_to_hex(sha1), path);
return 0;
}
+
+int write_zip_archive(struct archiver_args *args)
+{
+ int plen = strlen(args->base);
+
+ dos_time(&args->time, &zip_date, &zip_time);
+
+ zip_dir = xmalloc(ZIP_DIRECTORY_MIN_SIZE);
+ zip_dir_size = ZIP_DIRECTORY_MIN_SIZE;
+ verbose = args->verbose;
+
+ if (args->base && plen > 0 && args->base[plen - 1] == '/') {
+ char *base = xstrdup(args->base);
+ int baselen = strlen(base);
+
+ while (baselen > 0 && base[baselen - 1] == '/')
+ base[--baselen] = '\0';
+ write_zip_entry(args->tree->object.sha1, "", 0, base, 040777, 0);
+ free(base);
+ }
+ read_tree_recursive(args->tree, args->base, plen, 0,
+ args->pathspec, write_zip_entry);
+ write_zip_trailer(args->commit_sha1);
+
+ free(zip_dir);
+
+ return 0;
+}
+
+void *parse_extra_zip_args(int argc, const char **argv)
+{
+ for (; argc > 0; argc--, argv++) {
+ const char *arg = argv[0];
+
+ if (arg[0] == '-' && isdigit(arg[1]) && arg[2] == '\0')
+ zlib_compression_level = arg[1] - '0';
+ else
+ die("Unknown argument for zip format: %s", arg);
+ }
+ return NULL;
+}
#ifndef BUILTIN_H
#define BUILTIN_H
-#include <stdio.h>
-#include <limits.h>
+#include "git-compat-util.h"
extern const char git_version_string[];
extern const char git_usage_string[];
extern int cmd_add(int argc, const char **argv, const char *prefix);
extern int cmd_apply(int argc, const char **argv, const char *prefix);
+extern int cmd_archive(int argc, const char **argv, const char *prefix);
extern int cmd_cat_file(int argc, const char **argv, const char *prefix);
extern int cmd_checkout_index(int argc, const char **argv, const char *prefix);
extern int cmd_check_ref_format(int argc, const char **argv, const char *prefix);
extern int cmd_unpack_objects(int argc, const char **argv, const char *prefix);
extern int cmd_update_index(int argc, const char **argv, const char *prefix);
extern int cmd_update_ref(int argc, const char **argv, const char *prefix);
+extern int cmd_upload_archive(int argc, const char **argv, const char *prefix);
extern int cmd_upload_tar(int argc, const char **argv, const char *prefix);
extern int cmd_version(int argc, const char **argv, const char *prefix);
extern int cmd_whatchanged(int argc, const char **argv, const char *prefix);
#define REF_HEADS (1u << 1)
#define REF_TAGS (1u << 2)
-extern int git_connect(int fd[2], char *url, const char *prog);
+extern pid_t git_connect(int fd[2], char *url, const char *prog);
extern int finish_connect(pid_t pid);
extern int path_match(const char *path, int nr, char **match);
extern int match_refs(struct ref *src, struct ref *dst, struct ref ***dst_tail,
close(pipefd[1][0]);
}
+#define MAX_CMD_LEN 1024
+
/*
- * Yeah, yeah, fixme. Need to pass in the heads etc.
+ * This returns 0 if the transport protocol does not need fork(2),
+ * or a process id if it does. Once done, finish the connection
+ * with finish_connect() with the value returned from this function
+ * (it is safe to call finish_connect() with 0 to support the former
+ * case).
+ *
+ * Does not return a negative value on error; it just dies.
*/
-int git_connect(int fd[2], char *url, const char *prog)
+pid_t git_connect(int fd[2], char *url, const char *prog)
{
- char command[1024];
char *host, *path = url;
char *end;
int c;
if (pid < 0)
die("unable to fork");
if (!pid) {
- snprintf(command, sizeof(command), "%s %s", prog,
- sq_quote(path));
+ char command[MAX_CMD_LEN];
+ char *posn = command;
+ int size = MAX_CMD_LEN;
+ int of = 0;
+
+ of |= add_to_string(&posn, &size, prog, 0);
+ of |= add_to_string(&posn, &size, " ", 0);
+ of |= add_to_string(&posn, &size, path, 1);
+
+ if (of)
+ die("command line too long");
+
dup2(pipefd[1][0], 0);
dup2(pipefd[0][1], 1);
close(pipefd[0][0]);
int finish_connect(pid_t pid)
{
+ if (pid == 0)
+ return 0;
+
while (waitpid(pid, NULL, 0) < 0) {
if (errno != EINTR)
return -1;
--- /dev/null
+To syntax highlight git's commit messages, you need to:
+ 1. Copy syntax/gitcommit.vim to vim's syntax directory:
+ $ mkdir -p $HOME/.vim/syntax
+ $ cp syntax/gitcommit.vim $HOME/.vim/syntax
+ 2. Auto-detect the editing of git commit files:
+ $ cat >>$HOME/.vimrc <<'EOF'
+ autocmd BufNewFile,BufRead COMMIT_EDITMSG set filetype=gitcommit
+ EOF
--- /dev/null
+syn region gitLine start=/^#/ end=/$/
+syn region gitCommit start=/^# Updated but not checked in:$/ end=/^#$/ contains=gitHead,gitCommitFile
+syn region gitHead contained start=/^# (.*)/ end=/^#$/
+syn region gitChanged start=/^# Changed but not updated:/ end=/^#$/ contains=gitHead,gitChangedFile
+syn region gitUntracked start=/^# Untracked files:/ end=/^#$/ contains=gitHead,gitUntrackedFile
+
+syn match gitCommitFile contained /^#\t.*/hs=s+2
+syn match gitChangedFile contained /^#\t.*/hs=s+2
+syn match gitUntrackedFile contained /^#\t.*/hs=s+2
+
+hi def link gitLine Comment
+hi def link gitCommit Comment
+hi def link gitChanged Comment
+hi def link gitHead Comment
+hi def link gitUntracked Comment
+hi def link gitCommitFile Type
+hi def link gitChangedFile Constant
+hi def link gitUntrackedFile Constant
return -1;
}
+static int upload_archive(void)
+{
+ execl_git_cmd("upload-archive", ".", NULL);
+ return -1;
+}
+
static struct daemon_service daemon_service[] = {
+ { "upload-archive", "uploadarch", upload_archive, 0, 1 },
{ "upload-pack", "uploadpack", upload_pack, 1, 1 },
};
usage(describe_usage);
}
- if (i == argc)
+ setup_git_directory();
+
+ if (argc <= i)
describe("HEAD", 1);
else
while (i < argc) {
if (hashcmp(one->sha1, two->sha1)) {
int abbrev = o->full_index ? 40 : DEFAULT_ABBREV;
+ if (o->binary) {
+ mmfile_t mf;
+ if ((!fill_mmfile(&mf, one) && mmfile_is_binary(&mf)) ||
+ (!fill_mmfile(&mf, two) && mmfile_is_binary(&mf)))
+ abbrev = 40;
+ }
len += snprintf(msg + len, sizeof(msg) - len,
"index %.*s..%.*s",
abbrev, sha1_to_hex(one->sha1),
options->full_index = 1;
else if (!strcmp(arg, "--binary")) {
options->output_format |= DIFF_FORMAT_PATCH;
- options->full_index = options->binary = 1;
+ options->binary = 1;
}
else if (!strcmp(arg, "-a") || !strcmp(arg, "--text")) {
options->text = 1;
#include "cache.h"
#include "exec_cmd.h"
#include "pkt-line.h"
+#include "sideband.h"
#include <sys/wait.h>
#include <sys/time.h>
die("%s: unable to fork off sideband demultiplexer", me);
if (!side_pid) {
/* subprocess */
+ char buf[LARGE_PACKET_MAX];
+
close(fd[0]);
if (xd[0] != xd[1])
close(xd[1]);
- while (1) {
- char buf[1024];
- int len = packet_read_line(xd[0], buf, sizeof(buf));
- if (len == 0)
- break;
- if (len < 1)
- die("%s: protocol error: no band designator",
- me);
- len--;
- switch (buf[0] & 0xFF) {
- case 3:
- safe_write(2, "remote: ", 8);
- safe_write(2, buf+1, len);
- safe_write(2, "\n", 1);
- exit(1);
- case 2:
- safe_write(2, "remote: ", 8);
- safe_write(2, buf+1, len);
- continue;
- case 1:
- safe_write(fd[1], buf+1, len);
- continue;
- default:
- die("%s: protocol error: bad band #%d",
- me, (buf[0] & 0xFF));
- }
- }
+ if (recv_sideband(me, xd[0], fd[1], 2, buf, sizeof(buf)))
+ exit(1);
exit(0);
}
close(xd[0]);
}
if (!fetching)
- packet_write(fd[1], "want %s%s%s%s\n",
+ packet_write(fd[1], "want %s%s%s%s%s\n",
sha1_to_hex(remote),
(multi_ack ? " multi_ack" : ""),
- (use_sideband ? " side-band" : ""),
+ (use_sideband == 2 ? " side-band-64k" : ""),
+ (use_sideband == 1 ? " side-band" : ""),
(use_thin_pack ? " thin-pack" : ""));
else
packet_write(fd[1], "want %s\n", sha1_to_hex(remote));
fprintf(stderr, "Server supports multi_ack\n");
multi_ack = 1;
}
- if (server_supports("side-band")) {
+ if (server_supports("side-band-64k")) {
+ if (verbose)
+ fprintf(stderr, "Server supports side-band-64k\n");
+ use_sideband = 2;
+ }
+ else if (server_supports("side-band")) {
if (verbose)
fprintf(stderr, "Server supports side-band\n");
use_sideband = 1;
ret = fetch_pack(fd, nr_heads, heads);
close(fd[0]);
close(fd[1]);
- finish_connect(pid);
+ ret |= finish_connect(pid);
if (!ret && nr_heads) {
/* If the heads to pull were given, we should have
}
}
- return ret;
+ return !!ret;
}
sort <<\EOF |
add
apply
+archive
bisect
branch
checkout
fi
git-ls-remote "$repo" >"$GIT_DIR/CLONE_HEAD" || exit 1
;;
- https://*|http://*)
+ https://*|http://*|ftp://*)
if test -z "@@NO_CURL@@"
then
clone_dumb_http "$repo" "$D"
#include <sys/types.h>
#include <dirent.h>
+/* On most systems <limits.h> would have given us this, but
+ * not on some systems (e.g. GNU/Hurd).
+ */
+#ifndef PATH_MAX
+#define PATH_MAX 4096
+#endif
+
#ifdef __GNUC__
#define NORETURN __attribute__((__noreturn__))
#else
# There are transports that can fetch only one head at a time...
case "$remote" in
- http://* | https://*)
+ http://* | https://* | ftp://*)
if [ -n "$GIT_SSL_NO_VERIFY" ]; then
curl_extra_args="-k"
fi
done
case "$remote" in
- http://* | https://* | rsync://* )
+ http://* | https://* | ftp://* | rsync://* )
;; # we are already done.
*)
( : subshell because we muck with IFS
tmpdir=$tmp-d
case "$peek_repo" in
-http://* | https://* )
+http://* | https://* | ftp://* )
if [ -n "$GIT_SSL_NO_VERIFY" ]; then
curl_extra_args="-k"
fi
} commands[] = {
{ "add", cmd_add, RUN_SETUP },
{ "apply", cmd_apply },
+ { "archive", cmd_archive },
{ "cat-file", cmd_cat_file, RUN_SETUP },
{ "checkout-index", cmd_checkout_index, RUN_SETUP },
{ "check-ref-format", cmd_check_ref_format },
{ "commit-tree", cmd_commit_tree, RUN_SETUP },
- { "count-objects", cmd_count_objects },
+ { "count-objects", cmd_count_objects, RUN_SETUP },
{ "diff", cmd_diff, RUN_SETUP },
{ "diff-files", cmd_diff_files, RUN_SETUP },
{ "diff-index", cmd_diff_index, RUN_SETUP },
{ "unpack-objects", cmd_unpack_objects, RUN_SETUP },
{ "update-index", cmd_update_index, RUN_SETUP },
{ "update-ref", cmd_update_ref, RUN_SETUP },
+ { "upload-archive", cmd_upload_archive },
{ "upload-tar", cmd_upload_tar },
{ "version", cmd_version },
{ "whatchanged", cmd_whatchanged, RUN_SETUP | USE_PAGER },
int main(int argc, const char **argv, char **envp)
{
- const char *cmd = argv[0];
+ const char *cmd = argv[0] ? argv[0] : "git-help";
char *slash = strrchr(cmd, '/');
const char *exec_path = NULL;
int done_alias = 0;
# source of projects list
our $projects_list = "++GITWEB_LIST++";
+# show repository only if this file exists
+# (only effective if this variable evaluates to true)
+our $export_ok = "++GITWEB_EXPORT_OK++";
+
+# only allow viewing of repositories also shown on the overview page
+our $strict_export = "++GITWEB_STRICT_EXPORT++";
+
# list of git base URLs used for URL to where fetch project from,
# i.e. full URL is "$git_base_url/$project"
our @git_base_url_list = ("++GITWEB_BASE_URL++");
'override' => 0,
# => [content-encoding, suffix, program]
'default' => ['x-gzip', 'gz', 'gzip']},
+
+ 'pickaxe' => {
+ 'sub' => \&feature_pickaxe,
+ 'override' => 0,
+ 'default' => [1]},
);
sub gitweb_check_feature {
return ($ctype, $suffix, $command);
}
+# To enable system wide have in $GITWEB_CONFIG
+# $feature{'pickaxe'}{'default'} = [1];
+# To have project specific config enable override in $GITWEB_CONFIG
+# $feature{'pickaxe'}{'override'} = 1;
+# and in project config gitweb.pickaxe = 0|1;
+
+sub feature_pickaxe {
+ my ($val) = git_get_project_config('pickaxe', '--bool');
+
+ if ($val eq 'true') {
+ return (1);
+ } elsif ($val eq 'false') {
+ return (0);
+ }
+
+ return ($_[0]);
+}
+
# rename detection options for git-diff and git-diff-tree
# - default is '-M', with the cost proportional to
# (number of removed files) * (number of new files).
# version of the core git binary
our $git_version = qx($GIT --version) =~ m/git version (.*)$/ ? $1 : "unknown";
-# path to the current git repository
-our $git_dir;
-
$projects_list ||= $projectroot;
# ======================================================================
}
}
-our $project = ($cgi->param('p') || $ENV{'PATH_INFO'});
-if (defined $project) {
- $project =~ s|^/||;
- $project =~ s|/$||;
- $project = undef unless $project;
-}
+our $project = $cgi->param('p');
if (defined $project) {
- if (!validate_input($project)) {
- die_error(undef, "Invalid project parameter");
- }
- if (!(-d "$projectroot/$project")) {
- die_error(undef, "No such directory");
- }
- if (!(-e "$projectroot/$project/HEAD")) {
+ if (!validate_input($project) ||
+ !(-d "$projectroot/$project") ||
+ !(-e "$projectroot/$project/HEAD") ||
+ ($export_ok && !(-e "$projectroot/$project/$export_ok")) ||
+ ($strict_export && !project_in_list($project))) {
+ undef $project;
die_error(undef, "No such project");
}
- $git_dir = "$projectroot/$project";
}
our $file_name = $cgi->param('f');
our $page = $cgi->param('pg');
if (defined $page) {
- if ($page =~ m/[^0-9]$/) {
+ if ($page =~ m/[^0-9]/) {
die_error(undef, "Invalid page parameter");
}
}
$searchtext = quotemeta $searchtext;
}
+# now read PATH_INFO and use it as alternative to parameters
+sub evaluate_path_info {
+ return if defined $project;
+ my $path_info = $ENV{"PATH_INFO"};
+ return if !$path_info;
+ $path_info =~ s,(^/|/$),,gs;
+ $path_info = validate_input($path_info);
+ return if !$path_info;
+ $project = $path_info;
+ while ($project && !-e "$projectroot/$project/HEAD") {
+ $project =~ s,/*[^/]*$,,;
+ }
+ if (!$project ||
+ ($export_ok && !-e "$projectroot/$project/$export_ok") ||
+ ($strict_export && !project_in_list($project))) {
+ undef $project;
+ return;
+ }
+ # do not change any parameters if an action is given using the query string
+ return if $action;
+ if ($path_info =~ m,^$project/([^/]+)/(.+)$,) {
+ # we got "project.git/branch/filename"
+ $action ||= "blob_plain";
+ $hash_base ||= validate_input($1);
+ $file_name ||= validate_input($2);
+ } elsif ($path_info =~ m,^$project/([^/]+)$,) {
+ # we got "project.git/branch"
+ $action ||= "shortlog";
+ $hash ||= validate_input($1);
+ }
+}
+evaluate_path_info();
+
+# path to the current git repository
+our $git_dir;
+$git_dir = "$projectroot/$project" if $project;
+
# dispatch
my %actions = (
"blame" => \&git_blame2,
# those below don't need $project
"opml" => \&git_opml,
"project_list" => \&git_project_list,
+ "project_index" => \&git_project_index,
);
if (defined $project) {
hash_base => "hb",
hash_parent_base => "hpb",
page => "pg",
+ order => "o",
searchtext => "s",
);
my %mapping = @mapping;
- $params{"project"} ||= $project;
+ $params{'project'} = $project unless exists $params{'project'};
my @result = ();
for (my $i = 0; $i < @mapping; $i += 2) {
return $line;
}
+sub project_in_list {
+ my $project = shift;
+ my @list = git_get_projects_list();
+ return @list && scalar(grep { $_->{'path'} eq $project } @list);
+}
+
## ----------------------------------------------------------------------
## HTML aware string manipulation
## ......................................................................
## git utility functions, directly accessing git repository
-# assumes that PATH is not symref
-sub git_get_hash_by_ref {
- my $path = shift;
-
- open my $fd, "$projectroot/$path" or return undef;
- my $head = <$fd>;
- close $fd;
- chomp $head;
- if ($head =~ m/^[0-9a-fA-F]{40}$/) {
- return $head;
- }
-}
-
sub git_get_project_description {
my $path = shift;
if (-d $projects_list) {
# search in directory
my $dir = $projects_list;
- opendir my ($dh), $dir or return undef;
- while (my $dir = readdir($dh)) {
- if (-e "$projectroot/$dir/HEAD") {
- my $pr = {
- path => $dir,
- };
- push @list, $pr
- }
- }
- closedir($dh);
+ my $pfxlen = length("$dir");
+
+ File::Find::find({
+ follow_fast => 1, # follow symbolic links
+ dangling_symlinks => 0, # ignore dangling symlinks, silently
+ wanted => sub {
+ # skip project-list toplevel, if we get it.
+ return if (m!^[/.]$!);
+ # only directories can be git repositories
+ return unless (-d $_);
+
+ my $subdir = substr($File::Find::name, $pfxlen + 1);
+ # we check related file in $projectroot
+ if (-e "$projectroot/$subdir/HEAD" && (!$export_ok ||
+ -e "$projectroot/$subdir/$export_ok")) {
+ push @list, { path => $subdir };
+ $File::Find::prune = 1;
+ }
+ },
+ }, "$dir");
+
} elsif (-f $projects_list) {
# read from file(url-encoded):
# 'git%2Fgit.git Linus+Torvalds'
if (!defined $path) {
next;
}
- if (-e "$projectroot/$path/HEAD") {
+ if (-e "$projectroot/$path/HEAD" && (!$export_ok ||
+ -e "$projectroot/$path/$export_ok")) {
my $pr = {
path => $path,
owner => decode("utf8", $owner, Encode::FB_DEFAULT),
my @reflist;
my @refs;
- my $pfxlen = length("$projectroot/$project/$ref_dir");
- File::Find::find(sub {
- return if (/^\./);
- if (-f $_) {
- push @refs, substr($File::Find::name, $pfxlen + 1);
+ open my $fd, "-|", $GIT, "peek-remote", "$projectroot/$project/"
+ or return;
+ while (my $line = <$fd>) {
+ chomp $line;
+ if ($line =~ m/^([0-9a-fA-F]{40})\t$ref_dir\/?([^\^]+)$/) {
+ push @refs, { hash => $1, name => $2 };
+ } elsif ($line =~ m/^[0-9a-fA-F]{40}\t$ref_dir\/?(.*)\^\{\}$/ &&
+ $1 eq $refs[-1]{'name'}) {
+ # most likely a tag is followed by its peeled
+ # (deref) one, and when that happens we know the
+ # previous one was of type 'tag'.
+ $refs[-1]{'type'} = "tag";
}
- }, "$projectroot/$project/$ref_dir");
+ }
+ close $fd;
+
+ foreach my $ref (@refs) {
+ my $ref_file = $ref->{'name'};
+ my $ref_id = $ref->{'hash'};
- foreach my $ref_file (@refs) {
- my $ref_id = git_get_hash_by_ref("$project/$ref_dir/$ref_file");
- my $type = git_get_type($ref_id) || next;
+ my $type = $ref->{'type'} || git_get_type($ref_id) || next;
my %ref_item = parse_ref($ref_file, $ref_id, $type);
push @reflist, \%ref_item;
printf('<link rel="alternate" title="%s log" '.
'href="%s" type="application/rss+xml"/>'."\n",
esc_param($project), href(action=>"rss"));
+ } else {
+ printf('<link rel="alternate" title="%s projects list" '.
+ 'href="%s" type="text/plain; charset=utf-8"/>'."\n",
+ $site_name, href(project=>undef, action=>"project_index"));
+ printf('<link rel="alternate" title="%s projects logs" '.
+ 'href="%s" type="text/x-opml"/>'."\n",
+ $site_name, href(project=>undef, action=>"opml"));
}
if (defined $favicon) {
print qq(<link rel="shortcut icon" href="$favicon" type="image/png"/>\n);
if (defined $descr) {
print "<div class=\"page_footer_text\">" . esc_html($descr) . "</div>\n";
}
- print $cgi->a({-href => href(action=>"rss"), -class => "rss_logo"}, "RSS") . "\n";
+ print $cgi->a({-href => href(action=>"rss"),
+ -class => "rss_logo"}, "RSS") . "\n";
} else {
- print $cgi->a({-href => href(action=>"opml"), -class => "rss_logo"}, "OPML") . "\n";
+ print $cgi->a({-href => href(project=>undef, action=>"opml"),
+ -class => "rss_logo"}, "OPML") . " ";
+ print $cgi->a({-href => href(project=>undef, action=>"project_index"),
+ -class => "rss_logo"}, "TXT") . "\n";
}
print "</div>\n" .
"</body>\n" .
sub git_history_body {
# Warning: assumes constant type (blob or tree) during history
- my ($fd, $refs, $hash_base, $ftype, $extra) = @_;
+ my ($revlist, $from, $to, $refs, $hash_base, $ftype, $extra) = @_;
+
+ $from = 0 unless defined $from;
+ $to = $#{$revlist} unless (defined $to && $to <= $#{$revlist});
print "<table class=\"history\" cellspacing=\"0\">\n";
my $alternate = 0;
- while (my $line = <$fd>) {
- if ($line !~ m/^([0-9a-fA-F]{40})/) {
+ for (my $i = $from; $i <= $to; $i++) {
+ if ($revlist->[$i] !~ m/^([0-9a-fA-F]{40})/) {
next;
}
print "<th>Project</th>\n";
} else {
print "<th>" .
- $cgi->a({-href => "$my_uri?" . esc_param("o=project"),
+ $cgi->a({-href => href(project=>undef, order=>'project'),
-class => "header"}, "Project") .
"</th>\n";
}
print "<th>Description</th>\n";
} else {
print "<th>" .
- $cgi->a({-href => "$my_uri?" . esc_param("o=descr"),
+ $cgi->a({-href => href(project=>undef, order=>'descr'),
-class => "header"}, "Description") .
"</th>\n";
}
print "<th>Owner</th>\n";
} else {
print "<th>" .
- $cgi->a({-href => "$my_uri?" . esc_param("o=owner"),
+ $cgi->a({-href => href(project=>undef, order=>'owner'),
-class => "header"}, "Owner") .
"</th>\n";
}
print "<th>Last Change</th>\n";
} else {
print "<th>" .
- $cgi->a({-href => "$my_uri?" . esc_param("o=age"),
+ $cgi->a({-href => href(project=>undef, order=>'age'),
-class => "header"}, "Last Change") .
"</th>\n";
}
git_footer_html();
}
+sub git_project_index {
+ my @projects = git_get_projects_list();
+
+ print $cgi->header(
+ -type => 'text/plain',
+ -charset => 'utf-8',
+ -content_disposition => qq(inline; filename="index.aux"));
+
+ foreach my $pr (@projects) {
+ if (!exists $pr->{'owner'}) {
+ $pr->{'owner'} = get_file_owner("$projectroot/$project");
+ }
+
+ my ($path, $owner) = ($pr->{'path'}, $pr->{'owner'});
+ # quote as in CGI::Util::encode, but keep the slash, and use '+' for ' '
+ $path =~ s/([^a-zA-Z0-9_.\-\/ ])/sprintf("%%%02X", ord($1))/eg;
+ $owner =~ s/([^a-zA-Z0-9_.\-\/ ])/sprintf("%%%02X", ord($1))/eg;
+ $path =~ s/ /\+/g;
+ $owner =~ s/ /\+/g;
+
+ print "$path $owner\n";
+ }
+}
+
sub git_summary {
my $descr = git_get_project_description($project) || "none";
my $head = git_get_head_hash($project);
}
sub git_blob_plain {
- # blobs defined by non-textual hash id's can be cached
my $expires;
- if ($hash =~ m/^[0-9a-fA-F]{40}$/) {
- $expires = "+1d";
- }
if (!defined $hash) {
if (defined $file_name) {
} else {
die_error(undef, "No file name defined");
}
+ } elsif ($hash =~ m/^[0-9a-fA-F]{40}$/) {
+ # blobs defined by non-textual hash id's can be cached
+ $expires = "+1d";
}
+
my $type = shift;
open my $fd, "-|", git_cmd(), "cat-file", "blob", $hash
or die_error(undef, "Couldn't cat $file_name, $hash");
}
sub git_blob {
- # blobs defined by non-textual hash id's can be cached
my $expires;
- if ($hash =~ m/^[0-9a-fA-F]{40}$/) {
- $expires = "+1d";
- }
if (!defined $hash) {
if (defined $file_name) {
} else {
die_error(undef, "No file name defined");
}
+ } elsif ($hash =~ m/^[0-9a-fA-F]{40}$/) {
+ # blobs defined by non-textual hash id's can be cached
+ $expires = "+1d";
}
+
my ($have_blame) = gitweb_check_feature('blame');
open my $fd, "-|", git_cmd(), "cat-file", "blob", $hash
or die_error(undef, "Couldn't cat $file_name, $hash");
if (!defined $hash_base) {
$hash_base = git_get_head_hash($project);
}
+ if (!defined $page) {
+ $page = 0;
+ }
my $ftype;
my %co = parse_commit($hash_base);
if (!%co) {
die_error(undef, "Unknown commit object");
}
+
my $refs = git_get_references();
- git_header_html();
- git_print_page_nav('','', $hash_base,$co{'tree'},$hash_base);
- git_print_header_div('commit', esc_html($co{'title'}), $hash_base);
+ my $limit = sprintf("--max-count=%i", (100 * ($page+1)));
+
if (!defined $hash && defined $file_name) {
$hash = git_get_hash_by_path($hash_base, $file_name);
}
if (defined $hash) {
$ftype = git_get_type($hash);
}
- git_print_page_path($file_name, $ftype, $hash_base);
open my $fd, "-|",
- git_cmd(), "rev-list", "--full-history", $hash_base, "--", $file_name;
+ git_cmd(), "rev-list", $limit, "--full-history", $hash_base, "--", $file_name
+ or die_error(undef, "Open git-rev-list-failed");
+ my @revlist = map { chomp; $_ } <$fd>;
+ close $fd
+ or die_error(undef, "Reading git-rev-list failed");
+
+ my $paging_nav = '';
+ if ($page > 0) {
+ $paging_nav .=
+ $cgi->a({-href => href(action=>"history", hash=>$hash, hash_base=>$hash_base,
+ file_name=>$file_name)},
+ "first");
+ $paging_nav .= " ⋅ " .
+ $cgi->a({-href => href(action=>"history", hash=>$hash, hash_base=>$hash_base,
+ file_name=>$file_name, page=>$page-1),
+ -accesskey => "p", -title => "Alt-p"}, "prev");
+ } else {
+ $paging_nav .= "first";
+ $paging_nav .= " ⋅ prev";
+ }
+ if ($#revlist >= (100 * ($page+1)-1)) {
+ $paging_nav .= " ⋅ " .
+ $cgi->a({-href => href(action=>"history", hash=>$hash, hash_base=>$hash_base,
+ file_name=>$file_name, page=>$page+1),
+ -accesskey => "n", -title => "Alt-n"}, "next");
+ } else {
+ $paging_nav .= " ⋅ next";
+ }
+ my $next_link = '';
+ if ($#revlist >= (100 * ($page+1)-1)) {
+ $next_link =
+ $cgi->a({-href => href(action=>"history", hash=>$hash, hash_base=>$hash_base,
+ file_name=>$file_name, page=>$page+1),
+ -title => "Alt-n"}, "next");
+ }
+
+ git_header_html();
+ git_print_page_nav('history','', $hash_base,$co{'tree'},$hash_base, $paging_nav);
+ git_print_header_div('commit', esc_html($co{'title'}), $hash_base);
+ git_print_page_path($file_name, $ftype, $hash_base);
- git_history_body($fd, $refs, $hash_base, $ftype);
+ git_history_body(\@revlist, ($page * 100), $#revlist,
+ $refs, $hash_base, $ftype, $next_link);
- close $fd;
git_footer_html();
}
if (!%co) {
die_error(undef, "Unknown commit object");
}
- # pickaxe may take all resources of your box and run for several minutes
- # with every query - so decide by yourself how public you make this feature :)
+
my $commit_search = 1;
my $author_search = 0;
my $committer_search = 0;
} elsif ($searchtext =~ s/^pickaxe\\://i) {
$commit_search = 0;
$pickaxe_search = 1;
+
+ # pickaxe may take all resources of your box and run for several minutes
+ # with every query - so decide by yourself how public you make this feature
+ my ($have_pickaxe) = gitweb_check_feature('pickaxe');
+ if (!$have_pickaxe) {
+ die_error('403 Permission denied', "Permission denied");
+ }
}
git_header_html();
git_print_page_nav('','', $hash,$co{'tree'},$hash);
char *target = NULL;
char *path;
if (data[i] == '/') {
- serverlen = strchr(base + 8, '/') - base;
- okay = 1;
+ /* This counts
+ * http://git.host/pub/scm/linux.git/
+ * -----------here^
+ * so memcpy(dst, base, serverlen) will
+ * copy up to "...git.host".
+ */
+ const char *colon_ss = strstr(base,"://");
+ if (colon_ss) {
+ serverlen = (strchr(colon_ss + 3, '/')
+ - base);
+ okay = 1;
+ }
} else if (!memcmp(data + i, "../", 3)) {
+ /* Relative URL; chop the corresponding
+ * number of subpath from base (and ../
+ * from data), and concatenate the result.
+ *
+ * The code first drops ../ from data, and
+ * then drops one ../ from data and one path
+ * from base. IOW, one extra ../ is dropped
+ * from data than path is dropped from base.
+ *
+ * This is not wrong. The alternate in
+ * http://git.host/pub/scm/linux.git/
+ * to borrow from
+ * http://git.host/pub/scm/linus.git/
+ * is ../../linus.git/objects/. You need
+ * two ../../ to borrow from your direct
+ * neighbour.
+ */
i += 3;
serverlen = strlen(base);
while (i + 2 < posn &&
okay = 1;
}
}
- /* skip 'objects' at end */
+ /* skip "objects\n" at end */
if (okay) {
target = xmalloc(serverlen + posn - i - 6);
- strlcpy(target, base, serverlen);
- strlcpy(target + serverlen, data + i, posn - i - 6);
+ memcpy(target, base, serverlen);
+ memcpy(target + serverlen, data + i,
+ posn - i - 7);
+ target[serverlen + posn - i - 7] = 0;
if (get_verbosely)
fprintf(stderr,
"Also look at %s\n", target);
struct tree_desc t[3];
void *buf1, *buf2, *buf3;
- if (argc < 4)
+ if (argc != 4)
usage(merge_tree_usage);
+ setup_git_directory();
+
buf1 = get_tree_descriptor(t+0, argv[1]);
buf2 = get_tree_descriptor(t+1, argv[2]);
buf3 = get_tree_descriptor(t+2, argv[3]);
#include "cache.h"
#include <pwd.h>
-static char pathname[PATH_MAX];
static char bad_path[] = "/bad-path/";
+static char *get_pathname(void)
+{
+ static char pathname_array[4][PATH_MAX];
+ static int index;
+ return pathname_array[3 & ++index];
+}
+
static char *cleanup_path(char *path)
{
/* Clean it up */
{
va_list args;
unsigned len;
+ char *pathname = get_pathname();
va_start(args, fmt);
len = vsnprintf(pathname, PATH_MAX, fmt, args);
char *git_path(const char *fmt, ...)
{
const char *git_dir = get_git_dir();
+ char *pathname = get_pathname();
va_list args;
unsigned len;
ret = peek_remote(fd, flags);
close(fd[0]);
close(fd[1]);
- finish_connect(pid);
- return ret;
+ ret |= finish_connect(pid);
+ return !!ret;
}
return buf;
}
+/*
+ * Append a string to a string buffer, with or without shell quoting.
+ * Return true if the buffer overflowed.
+ */
+int add_to_string(char **ptrp, int *sizep, const char *str, int quote)
+{
+ char *p = *ptrp;
+ int size = *sizep;
+ int oc;
+ int err = 0;
+
+ if (quote)
+ oc = sq_quote_buf(p, size, str);
+ else {
+ oc = strlen(str);
+ memcpy(p, str, (size <= oc) ? size - 1 : oc);
+ }
+
+ if (size <= oc) {
+ err = 1;
+ oc = size - 1;
+ }
+
+ *ptrp += oc;
+ **ptrp = '\0';
+ *sizep -= oc;
+ return err;
+}
+
char *sq_dequote(char *arg)
{
char *dst = arg;
extern size_t sq_quote_buf(char *dst, size_t n, const char *src);
extern char *sq_quote_argv(const char** argv, int count);
+/*
+ * Append a string to a string buffer, with or without shell quoting.
+ * Return true if the buffer overflowed.
+ */
+extern int add_to_string(char **ptrp, int *sizep, const char *str, int quote);
+
/* This unwraps what sq_quote() produces in place, but returns
* NULL if the input does not look like what sq_quote would have
* produced.
#define COMMAND_SIZE 4096
-/*
- * Append a string to a string buffer, with or without shell quoting.
- * Return true if the buffer overflowed.
- */
-static int add_to_string(char **ptrp, int *sizep, const char *str, int quote)
-{
- char *p = *ptrp;
- int size = *sizep;
- int oc;
- int err = 0;
-
- if ( quote ) {
- oc = sq_quote_buf(p, size, str);
- } else {
- oc = strlen(str);
- memcpy(p, str, (oc >= size) ? size-1 : oc);
- }
-
- if ( oc >= size ) {
- err = 1;
- oc = size-1;
- }
-
- *ptrp += oc;
- **ptrp = '\0';
- *sizep -= oc;
- return err;
-}
-
-int setup_connection(int *fd_in, int *fd_out, const char *remote_prog,
+int setup_connection(int *fd_in, int *fd_out, const char *remote_prog,
char *url, int rmt_argc, char **rmt_argv)
{
char *host;
static void exec_rev_list(struct ref *refs)
{
- struct ref *ref;
- static const char *args[1000];
- int i = 0, j;
+ static const char *args[4];
+ int i = 0;
args[i++] = "rev-list"; /* 0 */
if (use_thin_pack) /* 1 */
else
args[i++] = "--objects";
- /* First send the ones we care about most */
- for (ref = refs; ref; ref = ref->next) {
- if (900 < i)
- die("git-rev-list environment overflow");
- if (!is_zero_sha1(ref->new_sha1)) {
- char *buf = xmalloc(100);
- args[i++] = buf;
- snprintf(buf, 50, "%s", sha1_to_hex(ref->new_sha1));
- buf += 50;
- if (!is_zero_sha1(ref->old_sha1) &&
- has_sha1_file(ref->old_sha1)) {
- args[i++] = buf;
- snprintf(buf, 50, "^%s",
- sha1_to_hex(ref->old_sha1));
- }
- }
- }
+ args[i++] = "--stdin";
- /* Then a handful of the remainder
- * NEEDSWORK: we would be better off if used the newer ones first.
- */
- for (ref = refs, j = i + 16;
- i < 900 && i < j && ref;
- ref = ref->next) {
- if (is_zero_sha1(ref->new_sha1) &&
- !is_zero_sha1(ref->old_sha1) &&
- has_sha1_file(ref->old_sha1)) {
- char *buf = xmalloc(42);
- args[i++] = buf;
- snprintf(buf, 42, "^%s", sha1_to_hex(ref->old_sha1));
- }
- }
args[i] = NULL;
execv_git_cmd(args);
die("git-rev-list exec failed (%s)", strerror(errno));
}
+/*
+ * Run "rev-list --stdin | pack-objects" pipe.
+ */
static void rev_list(int fd, struct ref *refs)
{
int pipe_fd[2];
die("rev-list setup: pipe failed");
pack_objects_pid = fork();
if (!pack_objects_pid) {
+ /* The child becomes pack-objects; reads from pipe
+ * and writes to the original fd
+ */
dup2(pipe_fd[0], 0);
dup2(fd, 1);
close(pipe_fd[0]);
}
if (pack_objects_pid < 0)
die("pack-objects fork failed");
+
+ /* We become rev-list --stdin; output goes to pipe. */
dup2(pipe_fd[1], 1);
close(pipe_fd[0]);
close(pipe_fd[1]);
exec_rev_list(refs);
}
+/*
+ * Create "rev-list --stdin | pack-objects" pipe and feed
+ * the refs into the pipeline.
+ */
+static void rev_list_generate(int fd, struct ref *refs)
+{
+ int pipe_fd[2];
+ pid_t rev_list_generate_pid;
+
+ if (pipe(pipe_fd) < 0)
+ die("rev-list-generate setup: pipe failed");
+ rev_list_generate_pid = fork();
+ if (!rev_list_generate_pid) {
+ /* The child becomes the "rev-list | pack-objects"
+ * pipeline. It takes input from us, and its output
+ * goes to fd.
+ */
+ dup2(pipe_fd[0], 0);
+ dup2(fd, 1);
+ close(pipe_fd[0]);
+ close(pipe_fd[1]);
+ close(fd);
+ rev_list(fd, refs);
+ die("rev-list setup failed");
+ }
+ if (rev_list_generate_pid < 0)
+ die("rev-list-generate fork failed");
+
+ /* We feed the rev parameters to them. We do not write into
+ * fd nor read from the pipe.
+ */
+ close(pipe_fd[0]);
+ close(fd);
+ while (refs) {
+ char buf[42];
+
+ if (!is_null_sha1(refs->old_sha1) &&
+ has_sha1_file(refs->old_sha1)) {
+ memcpy(buf + 1, sha1_to_hex(refs->old_sha1), 40);
+ buf[0] = '^';
+ buf[41] = '\n';
+ write(pipe_fd[1], buf, 42);
+ }
+ if (!is_null_sha1(refs->new_sha1)) {
+ memcpy(buf, sha1_to_hex(refs->new_sha1), 40);
+ buf[40] = '\n';
+ write(pipe_fd[1], buf, 41);
+ }
+ refs = refs->next;
+ }
+ close(pipe_fd[1]);
+ // waitpid(rev_list_generate_pid);
+ exit(0);
+}
+
+/*
+ * Make a pack stream and spit it out into file descriptor fd
+ */
static void pack_objects(int fd, struct ref *refs)
{
pid_t rev_list_pid;
rev_list_pid = fork();
if (!rev_list_pid) {
- rev_list(fd, refs);
+ rev_list_generate(fd, refs);
die("rev-list setup failed");
}
if (rev_list_pid < 0)
ret = send_pack(fd[0], fd[1], nr_heads, heads);
close(fd[0]);
close(fd[1]);
- finish_connect(pid);
- return ret;
+ ret |= finish_connect(pid);
+ return !!ret;
}
static unsigned int sha1_file_open_flag = O_NOATIME;
-static unsigned hexval(char c)
-{
- if (c >= '0' && c <= '9')
- return c - '0';
- if (c >= 'a' && c <= 'f')
- return c - 'a' + 10;
- if (c >= 'A' && c <= 'F')
- return c - 'A' + 10;
- return ~0;
+static inline unsigned int hexval(unsigned int c)
+{
+ static signed char val[256] = {
+ -1, -1, -1, -1, -1, -1, -1, -1, /* 00-07 */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* 08-0f */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* 10-17 */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* 18-1f */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* 20-27 */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* 28-2f */
+ 0, 1, 2, 3, 4, 5, 6, 7, /* 30-37 */
+ 8, 9, -1, -1, -1, -1, -1, -1, /* 38-3f */
+ -1, 10, 11, 12, 13, 14, 15, -1, /* 40-47 */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* 48-4f */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* 50-57 */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* 58-5f */
+ -1, 10, 11, 12, 13, 14, 15, -1, /* 60-67 */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* 68-67 */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* 70-77 */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* 78-7f */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* 80-87 */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* 88-8f */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* 90-97 */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* 98-9f */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* a0-a7 */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* a8-af */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* b0-b7 */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* b8-bf */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* c0-c7 */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* c8-cf */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* d0-d7 */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* d8-df */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* e0-e7 */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* e8-ef */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* f0-f7 */
+ -1, -1, -1, -1, -1, -1, -1, -1, /* f8-ff */
+ };
+ return val[c];
}
int get_sha1_hex(const char *hex, unsigned char *sha1)
--- /dev/null
+#include "pkt-line.h"
+#include "sideband.h"
+
+/*
+ * Receive multiplexed output stream over git native protocol.
+ * in_stream is the input stream from the remote, which carries data
+ * in pkt_line format with band designator. Demultiplex it into out
+ * and err and return error appropriately. Band #1 carries the
+ * primary payload. Things coming over band #2 is not necessarily
+ * error; they are usually informative message on the standard error
+ * stream, aka "verbose"). A message over band #3 is a signal that
+ * the remote died unexpectedly. A flush() concludes the stream.
+ */
+int recv_sideband(const char *me, int in_stream, int out, int err, char *buf, int bufsz)
+{
+ while (1) {
+ int len = packet_read_line(in_stream, buf, bufsz);
+ if (len == 0)
+ break;
+ if (len < 1) {
+ len = sprintf(buf, "%s: protocol error: no band designator\n", me);
+ safe_write(err, buf, len);
+ return SIDEBAND_PROTOCOL_ERROR;
+ }
+ len--;
+ switch (buf[0] & 0xFF) {
+ case 3:
+ safe_write(err, "remote: ", 8);
+ safe_write(err, buf+1, len);
+ safe_write(err, "\n", 1);
+ return SIDEBAND_REMOTE_ERROR;
+ case 2:
+ safe_write(err, "remote: ", 8);
+ safe_write(err, buf+1, len);
+ continue;
+ case 1:
+ safe_write(out, buf+1, len);
+ continue;
+ default:
+ len = sprintf(buf + 1,
+ "%s: protocol error: bad band #%d\n",
+ me, buf[0] & 0xFF);
+ safe_write(err, buf+1, len);
+ return SIDEBAND_PROTOCOL_ERROR;
+ }
+ }
+ return 0;
+}
+
+/*
+ * fd is connected to the remote side; send the sideband data
+ * over multiplexed packet stream.
+ */
+ssize_t send_sideband(int fd, int band, const char *data, ssize_t sz, int packet_max)
+{
+ ssize_t ssz = sz;
+ const char *p = data;
+
+ while (sz) {
+ unsigned n;
+ char hdr[5];
+
+ n = sz;
+ if (packet_max - 5 < n)
+ n = packet_max - 5;
+ sprintf(hdr, "%04x", n + 5);
+ hdr[4] = band;
+ safe_write(fd, hdr, 5);
+ safe_write(fd, p, n);
+ p += n;
+ sz -= n;
+ }
+ return ssz;
+}
--- /dev/null
+#ifndef SIDEBAND_H
+#define SIDEBAND_H
+
+#define SIDEBAND_PROTOCOL_ERROR -2
+#define SIDEBAND_REMOTE_ERROR -1
+
+#define DEFAULT_PACKET_MAX 1000
+#define LARGE_PACKET_MAX 65520
+
+int recv_sideband(const char *me, int in_stream, int out, int err, char *, int);
+ssize_t send_sideband(int fd, int band, const char *data, ssize_t sz, int packet_max);
+
+#endif
test_expect_success \
"create $m" \
- 'git-update-ref $m $A &&
- test $A = $(cat .git/$m)'
+ "git-update-ref $m $A &&
+ test $A"' = $(cat .git/'"$m"')'
test_expect_success \
"create $m" \
- 'git-update-ref $m $B $A &&
- test $B = $(cat .git/$m)'
+ "git-update-ref $m $B $A &&
+ test $B"' = $(cat .git/'"$m"')'
rm -f .git/$m
test_expect_success \
"fail to create $n" \
- 'touch .git/$n_dir
+ "touch .git/$n_dir
git-update-ref $n $A >out 2>err
- test $? = 1 &&
+ test "'$? = 1 &&
test "" = "$(cat out)" &&
grep "error: unable to resolve reference" err &&
- grep $n err'
+ grep '"$n err"
rm -f .git/$n_dir out err
test_expect_success \
"create $m (by HEAD)" \
- 'git-update-ref HEAD $A &&
- test $A = $(cat .git/$m)'
+ "git-update-ref HEAD $A &&
+ test $A"' = $(cat .git/'"$m"')'
test_expect_success \
"create $m (by HEAD)" \
- 'git-update-ref HEAD $B $A &&
- test $B = $(cat .git/$m)'
+ "git-update-ref HEAD $B $A &&
+ test $B"' = $(cat .git/'"$m"')'
rm -f .git/$m
test_expect_failure \
'(not) create HEAD with old sha1' \
- 'git-update-ref HEAD $A $B'
+ "git-update-ref HEAD $A $B"
test_expect_failure \
"(not) prior created .git/$m" \
- 'test -f .git/$m'
+ "test -f .git/$m"
rm -f .git/$m
test_expect_success \
"create HEAD" \
- 'git-update-ref HEAD $A'
+ "git-update-ref HEAD $A"
test_expect_failure \
'(not) change HEAD with wrong SHA1' \
- 'git-update-ref HEAD $B $Z'
+ "git-update-ref HEAD $B $Z"
test_expect_failure \
"(not) changed .git/$m" \
- 'test $B = $(cat .git/$m)'
+ "test $B"' = $(cat .git/'"$m"')'
rm -f .git/$m
mkdir -p .git/logs/refs/heads
test_expect_success \
"create $m (logged by touch)" \
'GIT_COMMITTER_DATE="2005-05-26 23:30" \
- git-update-ref HEAD $A -m "Initial Creation" &&
- test $A = $(cat .git/$m)'
+ git-update-ref HEAD '"$A"' -m "Initial Creation" &&
+ test '"$A"' = $(cat .git/'"$m"')'
test_expect_success \
"update $m (logged by touch)" \
'GIT_COMMITTER_DATE="2005-05-26 23:31" \
- git-update-ref HEAD $B $A -m "Switch" &&
- test $B = $(cat .git/$m)'
+ git-update-ref HEAD'" $B $A "'-m "Switch" &&
+ test '"$B"' = $(cat .git/'"$m"')'
test_expect_success \
"set $m (logged by touch)" \
'GIT_COMMITTER_DATE="2005-05-26 23:41" \
- git-update-ref HEAD $A &&
- test $A = $(cat .git/$m)'
+ git-update-ref HEAD'" $A &&
+ test $A"' = $(cat .git/'"$m"')'
cat >expect <<EOF
$Z $A $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> 1117150200 +0000 Initial Creation
EOF
test_expect_success \
"verifying $m's log" \
- 'diff expect .git/logs/$m'
+ "diff expect .git/logs/$m"
rm -rf .git/$m .git/logs expect
test_expect_success \
test_expect_success \
"create $m (logged by config)" \
'GIT_COMMITTER_DATE="2005-05-26 23:32" \
- git-update-ref HEAD $A -m "Initial Creation" &&
- test $A = $(cat .git/$m)'
+ git-update-ref HEAD'" $A "'-m "Initial Creation" &&
+ test '"$A"' = $(cat .git/'"$m"')'
test_expect_success \
"update $m (logged by config)" \
'GIT_COMMITTER_DATE="2005-05-26 23:33" \
- git-update-ref HEAD $B $A -m "Switch" &&
- test $B = $(cat .git/$m)'
+ git-update-ref HEAD'" $B $A "'-m "Switch" &&
+ test '"$B"' = $(cat .git/'"$m"')'
test_expect_success \
"set $m (logged by config)" \
'GIT_COMMITTER_DATE="2005-05-26 23:43" \
- git-update-ref HEAD $A &&
- test $A = $(cat .git/$m)'
+ git-update-ref HEAD '"$A &&
+ test $A"' = $(cat .git/'"$m"')'
cat >expect <<EOF
$Z $A $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> 1117150320 +0000 Initial Creation
'Query "master@{May 25 2005}" (before history)' \
'rm -f o e
git-rev-parse --verify "master@{May 25 2005}" >o 2>e &&
- test $C = $(cat o) &&
- test "warning: Log .git/logs/$m only goes back to $ed." = "$(cat e)"'
+ test '"$C"' = $(cat o) &&
+ test "warning: Log .git/logs/'"$m only goes back to $ed"'." = "$(cat e)"'
test_expect_success \
"Query master@{2005-05-25} (before history)" \
'rm -f o e
git-rev-parse --verify master@{2005-05-25} >o 2>e &&
- test $C = $(cat o) &&
- echo test "warning: Log .git/logs/$m only goes back to $ed." = "$(cat e)"'
+ test '"$C"' = $(cat o) &&
+ echo test "warning: Log .git/logs/'"$m only goes back to $ed"'." = "$(cat e)"'
test_expect_success \
'Query "master@{May 26 2005 23:31:59}" (1 second before history)' \
'rm -f o e
git-rev-parse --verify "master@{May 26 2005 23:31:59}" >o 2>e &&
- test $C = $(cat o) &&
- test "warning: Log .git/logs/$m only goes back to $ed." = "$(cat e)"'
+ test '"$C"' = $(cat o) &&
+ test "warning: Log .git/logs/'"$m only goes back to $ed"'." = "$(cat e)"'
test_expect_success \
'Query "master@{May 26 2005 23:32:00}" (exactly history start)' \
'rm -f o e
git-rev-parse --verify "master@{May 26 2005 23:32:00}" >o 2>e &&
- test $A = $(cat o) &&
+ test '"$A"' = $(cat o) &&
test "" = "$(cat e)"'
test_expect_success \
'Query "master@{2005-05-26 23:33:01}" (middle of history with gap)' \
'rm -f o e
git-rev-parse --verify "master@{2005-05-26 23:33:01}" >o 2>e &&
- test $B = $(cat o) &&
- test "warning: Log .git/logs/$m has gap after $gd." = "$(cat e)"'
+ test '"$B"' = $(cat o) &&
+ test "warning: Log .git/logs/'"$m has gap after $gd"'." = "$(cat e)"'
test_expect_success \
'Query "master@{2005-05-26 23:38:00}" (middle of history)' \
'rm -f o e
git-rev-parse --verify "master@{2005-05-26 23:38:00}" >o 2>e &&
- test $Z = $(cat o) &&
+ test '"$Z"' = $(cat o) &&
test "" = "$(cat e)"'
test_expect_success \
'Query "master@{2005-05-26 23:43:00}" (exact end of history)' \
'rm -f o e
git-rev-parse --verify "master@{2005-05-26 23:43:00}" >o 2>e &&
- test $E = $(cat o) &&
+ test '"$E"' = $(cat o) &&
test "" = "$(cat e)"'
test_expect_success \
'Query "master@{2005-05-28}" (past end of history)' \
'rm -f o e
git-rev-parse --verify "master@{2005-05-28}" >o 2>e &&
- test $D = $(cat o) &&
- test "warning: Log .git/logs/$m unexpectedly ended on $ld." = "$(cat e)"'
+ test '"$D"' = $(cat o) &&
+ test "warning: Log .git/logs/'"$m unexpectedly ended on $ld"'." = "$(cat e)"'
rm -f .git/$m .git/logs/$m expect
EOF
test_expect_success \
'git-commit logged updates' \
- 'diff expect .git/logs/$m'
+ "diff expect .git/logs/$m"
unset h_TEST h_OTHER h_FIXED h_MERGED
test_expect_success \
git branch skip-merge skip-reference
'
-test_expect_failure 'rebase with git am -3 (default)' 'git rebase master'
+test_expect_failure 'rebase with git am -3 (default)' '
+ git rebase master
+'
test_expect_success 'rebase --skip with am -3' '
git reset --hard HEAD &&
'do_reset
git-apply --index C.diff'
-test_expect_failure 'apply binary diff without replacement -- should fail.' \
+test_expect_success 'apply binary diff without replacement.' \
'do_reset
git-apply BF.diff'
-test_expect_failure 'apply binary diff without replacement (copy) -- should fail.' \
+test_expect_success 'apply binary diff without replacement (copy).' \
'do_reset
git-apply CF.diff'
--- /dev/null
+#!/bin/sh
+#
+# Copyright (c) 2005 Junio C Hamano
+#
+
+test_description='git-apply boundary tests
+
+'
+. ./test-lib.sh
+
+L="c d e f g h i j k l m n o p q r s t u v w x"
+
+test_expect_success setup '
+ for i in b '"$L"' y
+ do
+ echo $i
+ done >victim &&
+ cat victim >original &&
+ git update-index --add victim &&
+
+ : add to the head
+ for i in a b '"$L"' y
+ do
+ echo $i
+ done >victim &&
+ cat victim >add-a-expect &&
+ git diff victim >add-a-patch.with &&
+ git diff --unified=0 >add-a-patch.without &&
+
+ : modify at the head
+ for i in a '"$L"' y
+ do
+ echo $i
+ done >victim &&
+ cat victim >mod-a-expect &&
+ git diff victim >mod-a-patch.with &&
+ git diff --unified=0 >mod-a-patch.without &&
+
+ : remove from the head
+ for i in '"$L"' y
+ do
+ echo $i
+ done >victim &&
+ cat victim >del-a-expect &&
+ git diff victim >del-a-patch.with
+ git diff --unified=0 >del-a-patch.without &&
+
+ : add to the tail
+ for i in b '"$L"' y z
+ do
+ echo $i
+ done >victim &&
+ cat victim >add-z-expect &&
+ git diff victim >add-z-patch.with &&
+ git diff --unified=0 >add-z-patch.without &&
+
+ : modify at the tail
+ for i in a '"$L"' y
+ do
+ echo $i
+ done >victim &&
+ cat victim >mod-z-expect &&
+ git diff victim >mod-z-patch.with &&
+ git diff --unified=0 >mod-z-patch.without &&
+
+ : remove from the tail
+ for i in b '"$L"'
+ do
+ echo $i
+ done >victim &&
+ cat victim >del-z-expect &&
+ git diff victim >del-z-patch.with
+ git diff --unified=0 >del-z-patch.without &&
+
+ : done
+'
+
+for with in with without
+do
+ case "$with" in
+ with) u= ;;
+ without) u='--unidiff-zero ' ;;
+ esac
+ for kind in add-a add-z mod-a mod-z del-a del-z
+ do
+ test_expect_success "apply $kind-patch $with context" '
+ cat original >victim &&
+ git update-index victim &&
+ git apply --index '"$u$kind-patch.$with"' || {
+ cat '"$kind-patch.$with"'
+ (exit 1)
+ } &&
+ diff -u '"$kind"'-expect victim
+ '
+ done
+done
+
+for kind in add-a add-z mod-a mod-z del-a del-z
+do
+ rm -f $kind-ng.without
+ sed -e "s/^diff --git /diff /" \
+ -e '/^index /d' \
+ <$kind-patch.without >$kind-ng.without
+ test_expect_success "apply non-git $kind-patch without context" '
+ cat original >victim &&
+ git update-index victim &&
+ git apply --unidiff-zero --index '"$kind-ng.without"' || {
+ cat '"$kind-ng.without"'
+ (exit 1)
+ } &&
+ diff -u '"$kind"'-expect victim
+ '
+done
+
+test_done
{
char *trace = getenv("GIT_TRACE");
- if (!trace || !strcmp(trace, "0") || !strcasecmp(trace," false"))
+ if (!trace || !strcmp(trace, "0") || !strcasecmp(trace, "false"))
return 0;
if (!strcmp(trace, "1") || !strcasecmp(trace, "true"))
return STDERR_FILENO;
#include "cache.h"
#include "refs.h"
#include "pkt-line.h"
+#include "sideband.h"
#include "tag.h"
#include "object.h"
#include "commit.h"
static struct object_array have_obj;
static struct object_array want_obj;
static unsigned int timeout;
+/* 0 for no sideband,
+ * otherwise maximum packet size (up to 65520 bytes).
+ */
static int use_sideband;
static void reset_timeout(void)
return len;
}
-#define PACKET_MAX 1000
static ssize_t send_client_data(int fd, const char *data, ssize_t sz)
{
- ssize_t ssz;
- const char *p;
-
- if (!data) {
- if (!use_sideband)
- return 0;
- packet_flush(1);
- }
-
- if (!use_sideband) {
- if (fd == 3)
- /* emergency quit */
- fd = 2;
- if (fd == 2) {
- xwrite(fd, data, sz);
- return sz;
- }
- return safe_write(fd, data, sz);
- }
- p = data;
- ssz = sz;
- while (sz) {
- unsigned n;
- char hdr[5];
-
- n = sz;
- if (PACKET_MAX - 5 < n)
- n = PACKET_MAX - 5;
- sprintf(hdr, "%04x", n + 5);
- hdr[4] = fd;
- safe_write(1, hdr, 5);
- safe_write(1, p, n);
- p += n;
- sz -= n;
+ if (use_sideband)
+ return send_sideband(1, fd, data, sz, use_sideband);
+ if (fd == 3)
+ /* emergency quit */
+ fd = 2;
+ if (fd == 2) {
+ xwrite(fd, data, sz);
+ return sz;
}
- return ssz;
+ return safe_write(fd, data, sz);
}
static void create_pack_file(void)
goto fail;
fprintf(stderr, "flushed.\n");
}
- send_client_data(1, NULL, 0);
+ if (use_sideband)
+ packet_flush(1);
return;
}
fail:
multi_ack = 1;
if (strstr(line+45, "thin-pack"))
use_thin_pack = 1;
- if (strstr(line+45, "side-band"))
- use_sideband = 1;
+ if (strstr(line+45, "side-band-64k"))
+ use_sideband = LARGE_PACKET_MAX;
+ else if (strstr(line+45, "side-band"))
+ use_sideband = DEFAULT_PACKET_MAX;
/* We have sent all our refs already, and the other end
* should have chosen out of them; otherwise they are
static int send_ref(const char *refname, const unsigned char *sha1)
{
- static const char *capabilities = "multi_ack thin-pack side-band";
+ static const char *capabilities = "multi_ack thin-pack side-band side-band-64k";
struct object *o = parse_object(sha1);
if (!o)