Merge branch 'db/fetch-pack'
authorJunio C Hamano <gitster@pobox.com>
Thu, 25 Oct 2007 04:59:50 +0000 (21:59 -0700)
committerJunio C Hamano <gitster@pobox.com>
Thu, 25 Oct 2007 04:59:50 +0000 (21:59 -0700)
* db/fetch-pack: (60 commits)
Define compat version of mkdtemp for systems lacking it
Avoid scary errors about tagged trees/blobs during git-fetch
fetch: if not fetching from default remote, ignore default merge
Support 'push --dry-run' for http transport
Support 'push --dry-run' for rsync transport
Fix 'push --all branch...' error handling
Fix compilation when NO_CURL is defined
Added a test for fetching remote tags when there is not tags.
Fix a crash in ls-remote when refspec expands into nothing
Remove duplicate ref matches in fetch
Restore default verbosity for http fetches.
fetch/push: readd rsync support
Introduce remove_dir_recursively()
bundle transport: fix an alloc_ref() call
Allow abbreviations in the first refspec to be merged
Prevent send-pack from segfaulting when a branch doesn't match
Cleanup unnecessary break in remote.c
Cleanup style nit of 'x == NULL' in remote.c
Fix memory leaks when disconnecting transport instances
Ensure builtin-fetch honors {fetch,transfer}.unpackLimit
...

62 files changed:
Documentation/config.txt
Documentation/git-http-push.txt
Makefile
builtin-bundle.c
builtin-fetch-pack.c [new file with mode: 0644]
builtin-fetch.c [new file with mode: 0644]
builtin-http-fetch.c [new file with mode: 0644]
builtin-push.c
builtin.h
bundle.c [new file with mode: 0644]
bundle.h [new file with mode: 0644]
cache.h
compat/mkdtemp.c [new file with mode: 0644]
connect.c
contrib/examples/git-fetch.sh [new file with mode: 0755]
dir.c
dir.h
fetch-pack.c [deleted file]
fetch-pack.h [new file with mode: 0644]
fetch.c [deleted file]
fetch.h [deleted file]
git-compat-util.h
git-fetch.sh [deleted file]
git.c
http-fetch.c [deleted file]
http-push.c
http-walker.c [new file with mode: 0644]
http.c
http.h
local-fetch.c [deleted file]
pack-write.c
pack.h
receive-pack.c
refs.c
remote.c
remote.h
rsh.c [deleted file]
rsh.h [deleted file]
send-pack.c
ssh-fetch.c [deleted file]
ssh-pull.c [deleted file]
ssh-push.c [deleted file]
ssh-upload.c [deleted file]
t/t5510-fetch.sh
t/t5515-fetch-merge-logic.sh
t/t5515/fetch.br-branches-default-merge
t/t5515/fetch.br-branches-default-merge_branches-default
t/t5515/fetch.br-branches-default-octopus
t/t5515/fetch.br-branches-default-octopus_branches-default
t/t5515/fetch.br-branches-one-merge
t/t5515/fetch.br-branches-one-merge_branches-one
t/t5515/fetch.br-branches-one-octopus
t/t5515/fetch.br-branches-one-octopus_branches-one
t/t5515/fetch.br-config-glob-octopus
t/t5515/fetch.br-config-glob-octopus_config-glob
t/t5515/fetch.br-remote-glob-octopus
t/t5515/fetch.br-remote-glob-octopus_remote-glob
t/t5700-clone-reference.sh
transport.c [new file with mode: 0644]
transport.h [new file with mode: 0644]
walker.c [new file with mode: 0644]
walker.h [new file with mode: 0644]
index d4a476e2ff9322348df099bfdfbba2832c8d47a4..edf50cd2113e73daa0a05c16a416424441f010f3 100644 (file)
@@ -324,10 +324,11 @@ branch.<name>.remote::
        If this option is not given, `git fetch` defaults to remote "origin".
 
 branch.<name>.merge::
-       When in branch <name>, it tells `git fetch` the default refspec to
-       be marked for merging in FETCH_HEAD. The value has exactly to match
-       a remote part of one of the refspecs which are fetched from the remote
-       given by "branch.<name>.remote".
+       When in branch <name>, it tells `git fetch` the default
+       refspec to be marked for merging in FETCH_HEAD. The value is
+       handled like the remote part of a refspec, and must match a
+       ref which is fetched from the remote given by
+       "branch.<name>.remote".
        The merge information is used by `git pull` (which at first calls
        `git fetch`) to lookup the default branch for merging. Without
        this option, `git pull` defaults to merge the first refspec fetched.
index 9afb860381369767a0a3f5295f588b75f559ff22..3a69b719b5cdddc9f48cdbfefe358783e12f396d 100644 (file)
@@ -8,7 +8,7 @@ git-http-push - Push objects over HTTP/DAV to another repository
 
 SYNOPSIS
 --------
-'git-http-push' [--all] [--force] [--verbose] <url> <ref> [<ref>...]
+'git-http-push' [--all] [--dry-run] [--force] [--verbose] <url> <ref> [<ref>...]
 
 DESCRIPTION
 -----------
@@ -30,6 +30,9 @@ OPTIONS
        the remote repository can lose commits; use it with
        care.
 
+--dry-run::
+       Do everything except actually send the updates.
+
 --verbose::
        Report the list of objects being walked locally and the
        list of objects successfully sent to the remote repository.
index 9f81c73af7d58ec75c6ca448ff6f96a3ba97c48d..b7289204ebfa628195acaf8b0e087289c1468c62 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -38,6 +38,8 @@ all::
 #
 # Define NO_SETENV if you don't have setenv in the C library.
 #
+# Define NO_MKDTEMP if you don't have mkdtemp in the C library.
+#
 # Define NO_SYMLINK_HEAD if you never want .git/HEAD to be a symbolic link.
 # Enable it on Windows.  By default, symrefs are still used.
 #
@@ -208,7 +210,6 @@ BASIC_LDFLAGS =
 SCRIPT_SH = \
        git-bisect.sh git-checkout.sh \
        git-clean.sh git-clone.sh git-commit.sh \
-       git-fetch.sh \
        git-ls-remote.sh \
        git-merge-one-file.sh git-mergetool.sh git-parse-remote.sh \
        git-pull.sh git-rebase.sh git-rebase--interactive.sh \
@@ -235,14 +236,14 @@ SCRIPTS = $(patsubst %.sh,%,$(SCRIPT_SH)) \
 # ... and all the rest that could be moved out of bindir to gitexecdir
 PROGRAMS = \
        git-fetch-pack$X \
-       git-hash-object$X git-index-pack$X git-local-fetch$X \
+       git-hash-object$X git-index-pack$X \
        git-fast-import$X \
        git-daemon$X \
        git-merge-index$X git-mktag$X git-mktree$X git-patch-id$X \
        git-peek-remote$X git-receive-pack$X \
        git-send-pack$X git-shell$X \
-       git-show-index$X git-ssh-fetch$X \
-       git-ssh-upload$X git-unpack-file$X \
+       git-show-index$X \
+       git-unpack-file$X \
        git-update-server-info$X \
        git-upload-pack$X \
        git-pack-redundant$X git-var$X \
@@ -270,9 +271,6 @@ ifndef NO_TCLTK
 OTHER_PROGRAMS += gitk-wish
 endif
 
-# Backward compatibility -- to be removed after 1.0
-PROGRAMS += git-ssh-pull$X git-ssh-push$X
-
 # Set paths to tools early so that they can be used for version tests.
 ifndef SHELL_PATH
        SHELL_PATH = /bin/sh
@@ -292,7 +290,7 @@ LIB_H = \
        run-command.h strbuf.h tag.h tree.h git-compat-util.h revision.h \
        tree-walk.h log-tree.h dir.h path-list.h unpack-trees.h builtin.h \
        utf8.h reflog-walk.h patch-ids.h attr.h decorate.h progress.h \
-       mailmap.h remote.h
+       mailmap.h remote.h transport.h
 
 DIFF_OBJS = \
        diff.o diff-lib.o diffcore-break.o diffcore-order.o \
@@ -314,7 +312,8 @@ LIB_OBJS = \
        write_or_die.o trace.o list-objects.o grep.o match-trees.o \
        alloc.o merge-file.o path-list.o help.o unpack-trees.o $(DIFF_OBJS) \
        color.o wt-status.o archive-zip.o archive-tar.o shallow.o utf8.o \
-       convert.o attr.o decorate.o progress.o mailmap.o symlinks.o remote.o
+       convert.o attr.o decorate.o progress.o mailmap.o symlinks.o remote.o \
+       transport.o bundle.o walker.o
 
 BUILTIN_OBJS = \
        builtin-add.o \
@@ -335,6 +334,8 @@ BUILTIN_OBJS = \
        builtin-diff-files.o \
        builtin-diff-index.o \
        builtin-diff-tree.o \
+       builtin-fetch.o \
+       builtin-fetch-pack.o \
        builtin-fetch--tool.o \
        builtin-fmt-merge-msg.o \
        builtin-for-each-ref.o \
@@ -416,12 +417,14 @@ ifeq ($(uname_S),SunOS)
                NEEDS_LIBICONV = YesPlease
                NO_UNSETENV = YesPlease
                NO_SETENV = YesPlease
+               NO_MKDTEMP = YesPlease
                NO_C99_FORMAT = YesPlease
                NO_STRTOUMAX = YesPlease
        endif
        ifeq ($(uname_R),5.9)
                NO_UNSETENV = YesPlease
                NO_SETENV = YesPlease
+               NO_MKDTEMP = YesPlease
                NO_C99_FORMAT = YesPlease
                NO_STRTOUMAX = YesPlease
        endif
@@ -518,7 +521,9 @@ else
        CC_LD_DYNPATH = -R
 endif
 
-ifndef NO_CURL
+ifdef NO_CURL
+       BASIC_CFLAGS += -DNO_CURL
+else
        ifdef CURLDIR
                # Try "-Wl,-rpath=$(CURLDIR)/$(lib)" in such a case.
                BASIC_CFLAGS += -I$(CURLDIR)/include
@@ -526,7 +531,9 @@ ifndef NO_CURL
        else
                CURL_LIBCURL = -lcurl
        endif
-       PROGRAMS += git-http-fetch$X
+       BUILTIN_OBJS += builtin-http-fetch.o
+       EXTLIBS += $(CURL_LIBCURL)
+       LIB_OBJS += http.o http-walker.o
        curl_check := $(shell (echo 070908; curl-config --vernum) | sort -r | sed -ne 2p)
        ifeq "$(curl_check)" "070908"
                ifndef NO_EXPAT
@@ -608,6 +615,10 @@ ifdef NO_SETENV
        COMPAT_CFLAGS += -DNO_SETENV
        COMPAT_OBJS += compat/setenv.o
 endif
+ifdef NO_MKDTEMP
+       COMPAT_CFLAGS += -DNO_MKDTEMP
+       COMPAT_OBJS += compat/mkdtemp.o
+endif
 ifdef NO_UNSETENV
        COMPAT_CFLAGS += -DNO_UNSETENV
        COMPAT_OBJS += compat/unsetenv.o
@@ -889,33 +900,22 @@ http.o: http.c GIT-CFLAGS
        $(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) -DGIT_USER_AGENT='"git/$(GIT_VERSION)"' $<
 
 ifdef NO_EXPAT
-http-fetch.o: http-fetch.c http.h GIT-CFLAGS
+http-walker.o: http-walker.c http.h GIT-CFLAGS
        $(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) -DNO_EXPAT $<
 endif
 
 git-%$X: %.o $(GITLIBS)
        $(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) $(LIBS)
 
-ssh-pull.o: ssh-fetch.c
-ssh-push.o: ssh-upload.c
-git-local-fetch$X: fetch.o
-git-ssh-fetch$X: rsh.o fetch.o
-git-ssh-upload$X: rsh.o
-git-ssh-pull$X: rsh.o fetch.o
-git-ssh-push$X: rsh.o
-
 git-imap-send$X: imap-send.o $(LIB_FILE)
 
-http.o http-fetch.o http-push.o: http.h
-git-http-fetch$X: fetch.o http.o http-fetch.o $(GITLIBS)
-       $(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) \
-               $(LIBS) $(CURL_LIBCURL) $(EXPAT_LIBEXPAT)
+http.o http-walker.o http-push.o: http.h
 
 git-http-push$X: revision.o http.o http-push.o $(GITLIBS)
        $(QUIET_LINK)$(CC) $(ALL_CFLAGS) -o $@ $(ALL_LDFLAGS) $(filter %.o,$^) \
                $(LIBS) $(CURL_LIBCURL) $(EXPAT_LIBEXPAT)
 
-$(LIB_OBJS) $(BUILTIN_OBJS) fetch.o: $(LIB_H)
+$(LIB_OBJS) $(BUILTIN_OBJS): $(LIB_H)
 $(patsubst git-%$X,%.o,$(PROGRAMS)): $(LIB_H) $(wildcard */*.h)
 $(DIFF_OBJS): diffcore.h
 
@@ -1131,8 +1131,7 @@ check-docs::
                git-merge-octopus | git-merge-ours | git-merge-recursive | \
                git-merge-resolve | git-merge-stupid | \
                git-add--interactive | git-fsck-objects | git-init-db | \
-               git-repo-config | git-fetch--tool | \
-               git-ssh-pull | git-ssh-push ) continue ;; \
+               git-repo-config | git-fetch--tool ) continue ;; \
                esac ; \
                test -f "Documentation/$$v.txt" || \
                echo "no doc: $$v"; \
index 1b650069c929744c43f95e62ca49f8a542a70111..9f38e2176a4c05fe9f6f8efcabb9b1e7204fdb85 100644 (file)
@@ -1,11 +1,6 @@
 #include "builtin.h"
 #include "cache.h"
-#include "object.h"
-#include "commit.h"
-#include "diff.h"
-#include "revision.h"
-#include "list-objects.h"
-#include "run-command.h"
+#include "bundle.h"
 
 /*
  * Basic handler for bundle files to connect repositories via sneakernet.
 
 static const char *bundle_usage="git-bundle (create <bundle> <git-rev-list args> | verify <bundle> | list-heads <bundle> [refname]... | unbundle <bundle> [refname]... )";
 
-static const char bundle_signature[] = "# v2 git bundle\n";
-
-struct ref_list {
-       unsigned int nr, alloc;
-       struct ref_list_entry {
-               unsigned char sha1[20];
-               char *name;
-       } *list;
-};
-
-static void add_to_ref_list(const unsigned char *sha1, const char *name,
-               struct ref_list *list)
-{
-       if (list->nr + 1 >= list->alloc) {
-               list->alloc = alloc_nr(list->nr + 1);
-               list->list = xrealloc(list->list,
-                               list->alloc * sizeof(list->list[0]));
-       }
-       memcpy(list->list[list->nr].sha1, sha1, 20);
-       list->list[list->nr].name = xstrdup(name);
-       list->nr++;
-}
-
-struct bundle_header {
-       struct ref_list prerequisites;
-       struct ref_list references;
-};
-
-/* returns an fd */
-static int read_header(const char *path, struct bundle_header *header) {
-       char buffer[1024];
-       int fd;
-       long fpos;
-       FILE *ffd = fopen(path, "rb");
-
-       if (!ffd)
-               return error("could not open '%s'", path);
-       if (!fgets(buffer, sizeof(buffer), ffd) ||
-                       strcmp(buffer, bundle_signature)) {
-               fclose(ffd);
-               return error("'%s' does not look like a v2 bundle file", path);
-       }
-       while (fgets(buffer, sizeof(buffer), ffd)
-                       && buffer[0] != '\n') {
-               int is_prereq = buffer[0] == '-';
-               int offset = is_prereq ? 1 : 0;
-               int len = strlen(buffer);
-               unsigned char sha1[20];
-               struct ref_list *list = is_prereq ? &header->prerequisites
-                       : &header->references;
-               char delim;
-
-               if (buffer[len - 1] == '\n')
-                       buffer[len - 1] = '\0';
-               if (get_sha1_hex(buffer + offset, sha1)) {
-                       warning("unrecognized header: %s", buffer);
-                       continue;
-               }
-               delim = buffer[40 + offset];
-               if (!isspace(delim) && (delim != '\0' || !is_prereq))
-                       die ("invalid header: %s", buffer);
-               add_to_ref_list(sha1, isspace(delim) ?
-                               buffer + 41 + offset : "", list);
-       }
-       fpos = ftell(ffd);
-       fclose(ffd);
-       fd = open(path, O_RDONLY);
-       if (fd < 0)
-               return error("could not open '%s'", path);
-       lseek(fd, fpos, SEEK_SET);
-       return fd;
-}
-
-static int list_refs(struct ref_list *r, int argc, const char **argv)
-{
-       int i;
-
-       for (i = 0; i < r->nr; i++) {
-               if (argc > 1) {
-                       int j;
-                       for (j = 1; j < argc; j++)
-                               if (!strcmp(r->list[i].name, argv[j]))
-                                       break;
-                       if (j == argc)
-                               continue;
-               }
-               printf("%s %s\n", sha1_to_hex(r->list[i].sha1),
-                               r->list[i].name);
-       }
-       return 0;
-}
-
-#define PREREQ_MARK (1u<<16)
-
-static int verify_bundle(struct bundle_header *header, int verbose)
-{
-       /*
-        * Do fast check, then if any prereqs are missing then go line by line
-        * to be verbose about the errors
-        */
-       struct ref_list *p = &header->prerequisites;
-       struct rev_info revs;
-       const char *argv[] = {NULL, "--all"};
-       struct object_array refs;
-       struct commit *commit;
-       int i, ret = 0, req_nr;
-       const char *message = "Repository lacks these prerequisite commits:";
-
-       init_revisions(&revs, NULL);
-       for (i = 0; i < p->nr; i++) {
-               struct ref_list_entry *e = p->list + i;
-               struct object *o = parse_object(e->sha1);
-               if (o) {
-                       o->flags |= PREREQ_MARK;
-                       add_pending_object(&revs, o, e->name);
-                       continue;
-               }
-               if (++ret == 1)
-                       error(message);
-               error("%s %s", sha1_to_hex(e->sha1), e->name);
-       }
-       if (revs.pending.nr != p->nr)
-               return ret;
-       req_nr = revs.pending.nr;
-       setup_revisions(2, argv, &revs, NULL);
-
-       memset(&refs, 0, sizeof(struct object_array));
-       for (i = 0; i < revs.pending.nr; i++) {
-               struct object_array_entry *e = revs.pending.objects + i;
-               add_object_array(e->item, e->name, &refs);
-       }
-
-       prepare_revision_walk(&revs);
-
-       i = req_nr;
-       while (i && (commit = get_revision(&revs)))
-               if (commit->object.flags & PREREQ_MARK)
-                       i--;
-
-       for (i = 0; i < req_nr; i++)
-               if (!(refs.objects[i].item->flags & SHOWN)) {
-                       if (++ret == 1)
-                               error(message);
-                       error("%s %s", sha1_to_hex(refs.objects[i].item->sha1),
-                               refs.objects[i].name);
-               }
-
-       for (i = 0; i < refs.nr; i++)
-               clear_commit_marks((struct commit *)refs.objects[i].item, -1);
-
-       if (verbose) {
-               struct ref_list *r;
-
-               r = &header->references;
-               printf("The bundle contains %d ref%s\n",
-                      r->nr, (1 < r->nr) ? "s" : "");
-               list_refs(r, 0, NULL);
-               r = &header->prerequisites;
-               printf("The bundle requires these %d ref%s\n",
-                      r->nr, (1 < r->nr) ? "s" : "");
-               list_refs(r, 0, NULL);
-       }
-       return ret;
-}
-
-static int list_heads(struct bundle_header *header, int argc, const char **argv)
-{
-       return list_refs(&header->references, argc, argv);
-}
-
-static int create_bundle(struct bundle_header *header, const char *path,
-               int argc, const char **argv)
-{
-       static struct lock_file lock;
-       int bundle_fd = -1;
-       int bundle_to_stdout;
-       const char **argv_boundary = xmalloc((argc + 4) * sizeof(const char *));
-       const char **argv_pack = xmalloc(5 * sizeof(const char *));
-       int i, ref_count = 0;
-       char buffer[1024];
-       struct rev_info revs;
-       struct child_process rls;
-       FILE *rls_fout;
-
-       bundle_to_stdout = !strcmp(path, "-");
-       if (bundle_to_stdout)
-               bundle_fd = 1;
-       else
-               bundle_fd = hold_lock_file_for_update(&lock, path, 1);
-
-       /* write signature */
-       write_or_die(bundle_fd, bundle_signature, strlen(bundle_signature));
-
-       /* init revs to list objects for pack-objects later */
-       save_commit_buffer = 0;
-       init_revisions(&revs, NULL);
-
-       /* write prerequisites */
-       memcpy(argv_boundary + 3, argv + 1, argc * sizeof(const char *));
-       argv_boundary[0] = "rev-list";
-       argv_boundary[1] = "--boundary";
-       argv_boundary[2] = "--pretty=oneline";
-       argv_boundary[argc + 2] = NULL;
-       memset(&rls, 0, sizeof(rls));
-       rls.argv = argv_boundary;
-       rls.out = -1;
-       rls.git_cmd = 1;
-       if (start_command(&rls))
-               return -1;
-       rls_fout = fdopen(rls.out, "r");
-       while (fgets(buffer, sizeof(buffer), rls_fout)) {
-               unsigned char sha1[20];
-               if (buffer[0] == '-') {
-                       write_or_die(bundle_fd, buffer, strlen(buffer));
-                       if (!get_sha1_hex(buffer + 1, sha1)) {
-                               struct object *object = parse_object(sha1);
-                               object->flags |= UNINTERESTING;
-                               add_pending_object(&revs, object, buffer);
-                       }
-               } else if (!get_sha1_hex(buffer, sha1)) {
-                       struct object *object = parse_object(sha1);
-                       object->flags |= SHOWN;
-               }
-       }
-       fclose(rls_fout);
-       if (finish_command(&rls))
-               return error("rev-list died");
-
-       /* write references */
-       argc = setup_revisions(argc, argv, &revs, NULL);
-       if (argc > 1)
-               return error("unrecognized argument: %s'", argv[1]);
-
-       for (i = 0; i < revs.pending.nr; i++) {
-               struct object_array_entry *e = revs.pending.objects + i;
-               unsigned char sha1[20];
-               char *ref;
-
-               if (e->item->flags & UNINTERESTING)
-                       continue;
-               if (dwim_ref(e->name, strlen(e->name), sha1, &ref) != 1)
-                       continue;
-               /*
-                * Make sure the refs we wrote out is correct; --max-count and
-                * other limiting options could have prevented all the tips
-                * from getting output.
-                *
-                * Non commit objects such as tags and blobs do not have
-                * this issue as they are not affected by those extra
-                * constraints.
-                */
-               if (!(e->item->flags & SHOWN) && e->item->type == OBJ_COMMIT) {
-                       warning("ref '%s' is excluded by the rev-list options",
-                               e->name);
-                       free(ref);
-                       continue;
-               }
-               /*
-                * If you run "git bundle create bndl v1.0..v2.0", the
-                * name of the positive ref is "v2.0" but that is the
-                * commit that is referenced by the tag, and not the tag
-                * itself.
-                */
-               if (hashcmp(sha1, e->item->sha1)) {
-                       /*
-                        * Is this the positive end of a range expressed
-                        * in terms of a tag (e.g. v2.0 from the range
-                        * "v1.0..v2.0")?
-                        */
-                       struct commit *one = lookup_commit_reference(sha1);
-                       struct object *obj;
-
-                       if (e->item == &(one->object)) {
-                               /*
-                                * Need to include e->name as an
-                                * independent ref to the pack-objects
-                                * input, so that the tag is included
-                                * in the output; otherwise we would
-                                * end up triggering "empty bundle"
-                                * error.
-                                */
-                               obj = parse_object(sha1);
-                               obj->flags |= SHOWN;
-                               add_pending_object(&revs, obj, e->name);
-                       }
-                       free(ref);
-                       continue;
-               }
-
-               ref_count++;
-               write_or_die(bundle_fd, sha1_to_hex(e->item->sha1), 40);
-               write_or_die(bundle_fd, " ", 1);
-               write_or_die(bundle_fd, ref, strlen(ref));
-               write_or_die(bundle_fd, "\n", 1);
-               free(ref);
-       }
-       if (!ref_count)
-               die ("Refusing to create empty bundle.");
-
-       /* end header */
-       write_or_die(bundle_fd, "\n", 1);
-
-       /* write pack */
-       argv_pack[0] = "pack-objects";
-       argv_pack[1] = "--all-progress";
-       argv_pack[2] = "--stdout";
-       argv_pack[3] = "--thin";
-       argv_pack[4] = NULL;
-       memset(&rls, 0, sizeof(rls));
-       rls.argv = argv_pack;
-       rls.in = -1;
-       rls.out = bundle_fd;
-       rls.git_cmd = 1;
-       if (start_command(&rls))
-               return error("Could not spawn pack-objects");
-       for (i = 0; i < revs.pending.nr; i++) {
-               struct object *object = revs.pending.objects[i].item;
-               if (object->flags & UNINTERESTING)
-                       write(rls.in, "^", 1);
-               write(rls.in, sha1_to_hex(object->sha1), 40);
-               write(rls.in, "\n", 1);
-       }
-       if (finish_command(&rls))
-               return error ("pack-objects died");
-       close(bundle_fd);
-       if (!bundle_to_stdout)
-               commit_lock_file(&lock);
-       return 0;
-}
-
-static int unbundle(struct bundle_header *header, int bundle_fd,
-               int argc, const char **argv)
-{
-       const char *argv_index_pack[] = {"index-pack",
-               "--fix-thin", "--stdin", NULL};
-       struct child_process ip;
-
-       if (verify_bundle(header, 0))
-               return -1;
-       memset(&ip, 0, sizeof(ip));
-       ip.argv = argv_index_pack;
-       ip.in = bundle_fd;
-       ip.no_stdout = 1;
-       ip.git_cmd = 1;
-       if (run_command(&ip))
-               return error("index-pack died");
-       return list_heads(header, argc, argv);
-}
-
 int cmd_bundle(int argc, const char **argv, const char *prefix)
 {
        struct bundle_header header;
@@ -388,8 +34,8 @@ int cmd_bundle(int argc, const char **argv, const char *prefix)
        }
 
        memset(&header, 0, sizeof(header));
-       if (strcmp(cmd, "create") &&
-                       (bundle_fd = read_header(bundle_file, &header)) < 0)
+       if (strcmp(cmd, "create") && (bundle_fd =
+                               read_bundle_header(bundle_file, &header)) < 0)
                return 1;
 
        if (!strcmp(cmd, "verify")) {
@@ -401,7 +47,7 @@ int cmd_bundle(int argc, const char **argv, const char *prefix)
        }
        if (!strcmp(cmd, "list-heads")) {
                close(bundle_fd);
-               return !!list_heads(&header, argc, argv);
+               return !!list_bundle_refs(&header, argc, argv);
        }
        if (!strcmp(cmd, "create")) {
                if (nongit)
@@ -410,7 +56,8 @@ int cmd_bundle(int argc, const char **argv, const char *prefix)
        } else if (!strcmp(cmd, "unbundle")) {
                if (nongit)
                        die("Need a repository to unbundle.");
-               return !!unbundle(&header, bundle_fd, argc, argv);
+               return !!unbundle(&header, bundle_fd) ||
+                       list_bundle_refs(&header, argc, argv);
        } else
                usage(bundle_usage);
 }
diff --git a/builtin-fetch-pack.c b/builtin-fetch-pack.c
new file mode 100644 (file)
index 0000000..8f25d50
--- /dev/null
@@ -0,0 +1,833 @@
+#include "cache.h"
+#include "refs.h"
+#include "pkt-line.h"
+#include "commit.h"
+#include "tag.h"
+#include "exec_cmd.h"
+#include "pack.h"
+#include "sideband.h"
+#include "fetch-pack.h"
+
+static int transfer_unpack_limit = -1;
+static int fetch_unpack_limit = -1;
+static int unpack_limit = 100;
+static struct fetch_pack_args args;
+
+static const char fetch_pack_usage[] =
+"git-fetch-pack [--all] [--quiet|-q] [--keep|-k] [--thin] [--upload-pack=<git-upload-pack>] [--depth=<n>] [--no-progress] [-v] [<host>:]<directory> [<refs>...]";
+static const char *uploadpack = "git-upload-pack";
+
+#define COMPLETE       (1U << 0)
+#define COMMON         (1U << 1)
+#define COMMON_REF     (1U << 2)
+#define SEEN           (1U << 3)
+#define POPPED         (1U << 4)
+
+/*
+ * After sending this many "have"s if we do not get any new ACK , we
+ * give up traversing our history.
+ */
+#define MAX_IN_VAIN 256
+
+static struct commit_list *rev_list;
+static int non_common_revs, multi_ack, use_thin_pack, use_sideband;
+
+static void rev_list_push(struct commit *commit, int mark)
+{
+       if (!(commit->object.flags & mark)) {
+               commit->object.flags |= mark;
+
+               if (!(commit->object.parsed))
+                       parse_commit(commit);
+
+               insert_by_date(commit, &rev_list);
+
+               if (!(commit->object.flags & COMMON))
+                       non_common_revs++;
+       }
+}
+
+static int rev_list_insert_ref(const char *path, const unsigned char *sha1, int flag, void *cb_data)
+{
+       struct object *o = deref_tag(parse_object(sha1), path, 0);
+
+       if (o && o->type == OBJ_COMMIT)
+               rev_list_push((struct commit *)o, SEEN);
+
+       return 0;
+}
+
+/*
+   This function marks a rev and its ancestors as common.
+   In some cases, it is desirable to mark only the ancestors (for example
+   when only the server does not yet know that they are common).
+*/
+
+static void mark_common(struct commit *commit,
+               int ancestors_only, int dont_parse)
+{
+       if (commit != NULL && !(commit->object.flags & COMMON)) {
+               struct object *o = (struct object *)commit;
+
+               if (!ancestors_only)
+                       o->flags |= COMMON;
+
+               if (!(o->flags & SEEN))
+                       rev_list_push(commit, SEEN);
+               else {
+                       struct commit_list *parents;
+
+                       if (!ancestors_only && !(o->flags & POPPED))
+                               non_common_revs--;
+                       if (!o->parsed && !dont_parse)
+                               parse_commit(commit);
+
+                       for (parents = commit->parents;
+                                       parents;
+                                       parents = parents->next)
+                               mark_common(parents->item, 0, dont_parse);
+               }
+       }
+}
+
+/*
+  Get the next rev to send, ignoring the common.
+*/
+
+static const unsigned char* get_rev(void)
+{
+       struct commit *commit = NULL;
+
+       while (commit == NULL) {
+               unsigned int mark;
+               struct commit_list* parents;
+
+               if (rev_list == NULL || non_common_revs == 0)
+                       return NULL;
+
+               commit = rev_list->item;
+               if (!(commit->object.parsed))
+                       parse_commit(commit);
+               commit->object.flags |= POPPED;
+               if (!(commit->object.flags & COMMON))
+                       non_common_revs--;
+
+               parents = commit->parents;
+
+               if (commit->object.flags & COMMON) {
+                       /* do not send "have", and ignore ancestors */
+                       commit = NULL;
+                       mark = COMMON | SEEN;
+               } else if (commit->object.flags & COMMON_REF)
+                       /* send "have", and ignore ancestors */
+                       mark = COMMON | SEEN;
+               else
+                       /* send "have", also for its ancestors */
+                       mark = SEEN;
+
+               while (parents) {
+                       if (!(parents->item->object.flags & SEEN))
+                               rev_list_push(parents->item, mark);
+                       if (mark & COMMON)
+                               mark_common(parents->item, 1, 0);
+                       parents = parents->next;
+               }
+
+               rev_list = rev_list->next;
+       }
+
+       return commit->object.sha1;
+}
+
+static int find_common(int fd[2], unsigned char *result_sha1,
+                      struct ref *refs)
+{
+       int fetching;
+       int count = 0, flushes = 0, retval;
+       const unsigned char *sha1;
+       unsigned in_vain = 0;
+       int got_continue = 0;
+
+       for_each_ref(rev_list_insert_ref, NULL);
+
+       fetching = 0;
+       for ( ; refs ; refs = refs->next) {
+               unsigned char *remote = refs->old_sha1;
+               struct object *o;
+
+               /*
+                * If that object is complete (i.e. it is an ancestor of a
+                * local ref), we tell them we have it but do not have to
+                * tell them about its ancestors, which they already know
+                * about.
+                *
+                * We use lookup_object here because we are only
+                * interested in the case we *know* the object is
+                * reachable and we have already scanned it.
+                */
+               if (((o = lookup_object(remote)) != NULL) &&
+                               (o->flags & COMPLETE)) {
+                       continue;
+               }
+
+               if (!fetching)
+                       packet_write(fd[1], "want %s%s%s%s%s%s%s\n",
+                                    sha1_to_hex(remote),
+                                    (multi_ack ? " multi_ack" : ""),
+                                    (use_sideband == 2 ? " side-band-64k" : ""),
+                                    (use_sideband == 1 ? " side-band" : ""),
+                                    (use_thin_pack ? " thin-pack" : ""),
+                                    (args.no_progress ? " no-progress" : ""),
+                                    " ofs-delta");
+               else
+                       packet_write(fd[1], "want %s\n", sha1_to_hex(remote));
+               fetching++;
+       }
+       if (is_repository_shallow())
+               write_shallow_commits(fd[1], 1);
+       if (args.depth > 0)
+               packet_write(fd[1], "deepen %d", args.depth);
+       packet_flush(fd[1]);
+       if (!fetching)
+               return 1;
+
+       if (args.depth > 0) {
+               char line[1024];
+               unsigned char sha1[20];
+               int len;
+
+               while ((len = packet_read_line(fd[0], line, sizeof(line)))) {
+                       if (!prefixcmp(line, "shallow ")) {
+                               if (get_sha1_hex(line + 8, sha1))
+                                       die("invalid shallow line: %s", line);
+                               register_shallow(sha1);
+                               continue;
+                       }
+                       if (!prefixcmp(line, "unshallow ")) {
+                               if (get_sha1_hex(line + 10, sha1))
+                                       die("invalid unshallow line: %s", line);
+                               if (!lookup_object(sha1))
+                                       die("object not found: %s", line);
+                               /* make sure that it is parsed as shallow */
+                               parse_object(sha1);
+                               if (unregister_shallow(sha1))
+                                       die("no shallow found: %s", line);
+                               continue;
+                       }
+                       die("expected shallow/unshallow, got %s", line);
+               }
+       }
+
+       flushes = 0;
+       retval = -1;
+       while ((sha1 = get_rev())) {
+               packet_write(fd[1], "have %s\n", sha1_to_hex(sha1));
+               if (args.verbose)
+                       fprintf(stderr, "have %s\n", sha1_to_hex(sha1));
+               in_vain++;
+               if (!(31 & ++count)) {
+                       int ack;
+
+                       packet_flush(fd[1]);
+                       flushes++;
+
+                       /*
+                        * We keep one window "ahead" of the other side, and
+                        * will wait for an ACK only on the next one
+                        */
+                       if (count == 32)
+                               continue;
+
+                       do {
+                               ack = get_ack(fd[0], result_sha1);
+                               if (args.verbose && ack)
+                                       fprintf(stderr, "got ack %d %s\n", ack,
+                                                       sha1_to_hex(result_sha1));
+                               if (ack == 1) {
+                                       flushes = 0;
+                                       multi_ack = 0;
+                                       retval = 0;
+                                       goto done;
+                               } else if (ack == 2) {
+                                       struct commit *commit =
+                                               lookup_commit(result_sha1);
+                                       mark_common(commit, 0, 1);
+                                       retval = 0;
+                                       in_vain = 0;
+                                       got_continue = 1;
+                               }
+                       } while (ack);
+                       flushes--;
+                       if (got_continue && MAX_IN_VAIN < in_vain) {
+                               if (args.verbose)
+                                       fprintf(stderr, "giving up\n");
+                               break; /* give up */
+                       }
+               }
+       }
+done:
+       packet_write(fd[1], "done\n");
+       if (args.verbose)
+               fprintf(stderr, "done\n");
+       if (retval != 0) {
+               multi_ack = 0;
+               flushes++;
+       }
+       while (flushes || multi_ack) {
+               int ack = get_ack(fd[0], result_sha1);
+               if (ack) {
+                       if (args.verbose)
+                               fprintf(stderr, "got ack (%d) %s\n", ack,
+                                       sha1_to_hex(result_sha1));
+                       if (ack == 1)
+                               return 0;
+                       multi_ack = 1;
+                       continue;
+               }
+               flushes--;
+       }
+       return retval;
+}
+
+static struct commit_list *complete;
+
+static int mark_complete(const char *path, const unsigned char *sha1, int flag, void *cb_data)
+{
+       struct object *o = parse_object(sha1);
+
+       while (o && o->type == OBJ_TAG) {
+               struct tag *t = (struct tag *) o;
+               if (!t->tagged)
+                       break; /* broken repository */
+               o->flags |= COMPLETE;
+               o = parse_object(t->tagged->sha1);
+       }
+       if (o && o->type == OBJ_COMMIT) {
+               struct commit *commit = (struct commit *)o;
+               commit->object.flags |= COMPLETE;
+               insert_by_date(commit, &complete);
+       }
+       return 0;
+}
+
+static void mark_recent_complete_commits(unsigned long cutoff)
+{
+       while (complete && cutoff <= complete->item->date) {
+               if (args.verbose)
+                       fprintf(stderr, "Marking %s as complete\n",
+                               sha1_to_hex(complete->item->object.sha1));
+               pop_most_recent_commit(&complete, COMPLETE);
+       }
+}
+
+static void filter_refs(struct ref **refs, int nr_match, char **match)
+{
+       struct ref **return_refs;
+       struct ref *newlist = NULL;
+       struct ref **newtail = &newlist;
+       struct ref *ref, *next;
+       struct ref *fastarray[32];
+
+       if (nr_match && !args.fetch_all) {
+               if (ARRAY_SIZE(fastarray) < nr_match)
+                       return_refs = xcalloc(nr_match, sizeof(struct ref *));
+               else {
+                       return_refs = fastarray;
+                       memset(return_refs, 0, sizeof(struct ref *) * nr_match);
+               }
+       }
+       else
+               return_refs = NULL;
+
+       for (ref = *refs; ref; ref = next) {
+               next = ref->next;
+               if (!memcmp(ref->name, "refs/", 5) &&
+                   check_ref_format(ref->name + 5))
+                       ; /* trash */
+               else if (args.fetch_all &&
+                        (!args.depth || prefixcmp(ref->name, "refs/tags/") )) {
+                       *newtail = ref;
+                       ref->next = NULL;
+                       newtail = &ref->next;
+                       continue;
+               }
+               else {
+                       int order = path_match(ref->name, nr_match, match);
+                       if (order) {
+                               return_refs[order-1] = ref;
+                               continue; /* we will link it later */
+                       }
+               }
+               free(ref);
+       }
+
+       if (!args.fetch_all) {
+               int i;
+               for (i = 0; i < nr_match; i++) {
+                       ref = return_refs[i];
+                       if (ref) {
+                               *newtail = ref;
+                               ref->next = NULL;
+                               newtail = &ref->next;
+                       }
+               }
+               if (return_refs != fastarray)
+                       free(return_refs);
+       }
+       *refs = newlist;
+}
+
+static int everything_local(struct ref **refs, int nr_match, char **match)
+{
+       struct ref *ref;
+       int retval;
+       unsigned long cutoff = 0;
+
+       track_object_refs = 0;
+       save_commit_buffer = 0;
+
+       for (ref = *refs; ref; ref = ref->next) {
+               struct object *o;
+
+               o = parse_object(ref->old_sha1);
+               if (!o)
+                       continue;
+
+               /* We already have it -- which may mean that we were
+                * in sync with the other side at some time after
+                * that (it is OK if we guess wrong here).
+                */
+               if (o->type == OBJ_COMMIT) {
+                       struct commit *commit = (struct commit *)o;
+                       if (!cutoff || cutoff < commit->date)
+                               cutoff = commit->date;
+               }
+       }
+
+       if (!args.depth) {
+               for_each_ref(mark_complete, NULL);
+               if (cutoff)
+                       mark_recent_complete_commits(cutoff);
+       }
+
+       /*
+        * Mark all complete remote refs as common refs.
+        * Don't mark them common yet; the server has to be told so first.
+        */
+       for (ref = *refs; ref; ref = ref->next) {
+               struct object *o = deref_tag(lookup_object(ref->old_sha1),
+                                            NULL, 0);
+
+               if (!o || o->type != OBJ_COMMIT || !(o->flags & COMPLETE))
+                       continue;
+
+               if (!(o->flags & SEEN)) {
+                       rev_list_push((struct commit *)o, COMMON_REF | SEEN);
+
+                       mark_common((struct commit *)o, 1, 1);
+               }
+       }
+
+       filter_refs(refs, nr_match, match);
+
+       for (retval = 1, ref = *refs; ref ; ref = ref->next) {
+               const unsigned char *remote = ref->old_sha1;
+               unsigned char local[20];
+               struct object *o;
+
+               o = lookup_object(remote);
+               if (!o || !(o->flags & COMPLETE)) {
+                       retval = 0;
+                       if (!args.verbose)
+                               continue;
+                       fprintf(stderr,
+                               "want %s (%s)\n", sha1_to_hex(remote),
+                               ref->name);
+                       continue;
+               }
+
+               hashcpy(ref->new_sha1, local);
+               if (!args.verbose)
+                       continue;
+               fprintf(stderr,
+                       "already have %s (%s)\n", sha1_to_hex(remote),
+                       ref->name);
+       }
+       return retval;
+}
+
+static pid_t setup_sideband(int fd[2], int xd[2])
+{
+       pid_t side_pid;
+
+       if (!use_sideband) {
+               fd[0] = xd[0];
+               fd[1] = xd[1];
+               return 0;
+       }
+       /* xd[] is talking with upload-pack; subprocess reads from
+        * xd[0], spits out band#2 to stderr, and feeds us band#1
+        * through our fd[0].
+        */
+       if (pipe(fd) < 0)
+               die("fetch-pack: unable to set up pipe");
+       side_pid = fork();
+       if (side_pid < 0)
+               die("fetch-pack: unable to fork off sideband demultiplexer");
+       if (!side_pid) {
+               /* subprocess */
+               close(fd[0]);
+               if (xd[0] != xd[1])
+                       close(xd[1]);
+               if (recv_sideband("fetch-pack", xd[0], fd[1], 2))
+                       exit(1);
+               exit(0);
+       }
+       close(xd[0]);
+       close(fd[1]);
+       fd[1] = xd[1];
+       return side_pid;
+}
+
+static int get_pack(int xd[2], char **pack_lockfile)
+{
+       int status;
+       pid_t pid, side_pid;
+       int fd[2];
+       const char *argv[20];
+       char keep_arg[256];
+       char hdr_arg[256];
+       const char **av;
+       int do_keep = args.keep_pack;
+       int keep_pipe[2];
+
+       side_pid = setup_sideband(fd, xd);
+
+       av = argv;
+       *hdr_arg = 0;
+       if (!args.keep_pack && unpack_limit) {
+               struct pack_header header;
+
+               if (read_pack_header(fd[0], &header))
+                       die("protocol error: bad pack header");
+               snprintf(hdr_arg, sizeof(hdr_arg), "--pack_header=%u,%u",
+                        ntohl(header.hdr_version), ntohl(header.hdr_entries));
+               if (ntohl(header.hdr_entries) < unpack_limit)
+                       do_keep = 0;
+               else
+                       do_keep = 1;
+       }
+
+       if (do_keep) {
+               if (pack_lockfile && pipe(keep_pipe))
+                       die("fetch-pack: pipe setup failure: %s", strerror(errno));
+               *av++ = "index-pack";
+               *av++ = "--stdin";
+               if (!args.quiet && !args.no_progress)
+                       *av++ = "-v";
+               if (args.use_thin_pack)
+                       *av++ = "--fix-thin";
+               if (args.lock_pack || unpack_limit) {
+                       int s = sprintf(keep_arg,
+                                       "--keep=fetch-pack %d on ", getpid());
+                       if (gethostname(keep_arg + s, sizeof(keep_arg) - s))
+                               strcpy(keep_arg + s, "localhost");
+                       *av++ = keep_arg;
+               }
+       }
+       else {
+               *av++ = "unpack-objects";
+               if (args.quiet)
+                       *av++ = "-q";
+       }
+       if (*hdr_arg)
+               *av++ = hdr_arg;
+       *av++ = NULL;
+
+       pid = fork();
+       if (pid < 0)
+               die("fetch-pack: unable to fork off %s", argv[0]);
+       if (!pid) {
+               dup2(fd[0], 0);
+               if (do_keep && pack_lockfile) {
+                       dup2(keep_pipe[1], 1);
+                       close(keep_pipe[0]);
+                       close(keep_pipe[1]);
+               }
+               close(fd[0]);
+               close(fd[1]);
+               execv_git_cmd(argv);
+               die("%s exec failed", argv[0]);
+       }
+       close(fd[0]);
+       close(fd[1]);
+       if (do_keep && pack_lockfile) {
+               close(keep_pipe[1]);
+               *pack_lockfile = index_pack_lockfile(keep_pipe[0]);
+               close(keep_pipe[0]);
+       }
+       while (waitpid(pid, &status, 0) < 0) {
+               if (errno != EINTR)
+                       die("waiting for %s: %s", argv[0], strerror(errno));
+       }
+       if (WIFEXITED(status)) {
+               int code = WEXITSTATUS(status);
+               if (code)
+                       die("%s died with error code %d", argv[0], code);
+               return 0;
+       }
+       if (WIFSIGNALED(status)) {
+               int sig = WTERMSIG(status);
+               die("%s died of signal %d", argv[0], sig);
+       }
+       die("%s died of unnatural causes %d", argv[0], status);
+}
+
+static struct ref *do_fetch_pack(int fd[2],
+               int nr_match,
+               char **match,
+               char **pack_lockfile)
+{
+       struct ref *ref;
+       unsigned char sha1[20];
+
+       get_remote_heads(fd[0], &ref, 0, NULL, 0);
+       if (is_repository_shallow() && !server_supports("shallow"))
+               die("Server does not support shallow clients");
+       if (server_supports("multi_ack")) {
+               if (args.verbose)
+                       fprintf(stderr, "Server supports multi_ack\n");
+               multi_ack = 1;
+       }
+       if (server_supports("side-band-64k")) {
+               if (args.verbose)
+                       fprintf(stderr, "Server supports side-band-64k\n");
+               use_sideband = 2;
+       }
+       else if (server_supports("side-band")) {
+               if (args.verbose)
+                       fprintf(stderr, "Server supports side-band\n");
+               use_sideband = 1;
+       }
+       if (!ref) {
+               packet_flush(fd[1]);
+               die("no matching remote head");
+       }
+       if (everything_local(&ref, nr_match, match)) {
+               packet_flush(fd[1]);
+               goto all_done;
+       }
+       if (find_common(fd, sha1, ref) < 0)
+               if (!args.keep_pack)
+                       /* When cloning, it is not unusual to have
+                        * no common commit.
+                        */
+                       fprintf(stderr, "warning: no common commits\n");
+
+       if (get_pack(fd, pack_lockfile))
+               die("git-fetch-pack: fetch failed.");
+
+ all_done:
+       return ref;
+}
+
+static int remove_duplicates(int nr_heads, char **heads)
+{
+       int src, dst;
+
+       for (src = dst = 0; src < nr_heads; src++) {
+               /* If heads[src] is different from any of
+                * heads[0..dst], push it in.
+                */
+               int i;
+               for (i = 0; i < dst; i++) {
+                       if (!strcmp(heads[i], heads[src]))
+                               break;
+               }
+               if (i < dst)
+                       continue;
+               if (src != dst)
+                       heads[dst] = heads[src];
+               dst++;
+       }
+       return dst;
+}
+
+static int fetch_pack_config(const char *var, const char *value)
+{
+       if (strcmp(var, "fetch.unpacklimit") == 0) {
+               fetch_unpack_limit = git_config_int(var, value);
+               return 0;
+       }
+
+       if (strcmp(var, "transfer.unpacklimit") == 0) {
+               transfer_unpack_limit = git_config_int(var, value);
+               return 0;
+       }
+
+       return git_default_config(var, value);
+}
+
+static struct lock_file lock;
+
+static void fetch_pack_setup(void)
+{
+       static int did_setup;
+       if (did_setup)
+               return;
+       git_config(fetch_pack_config);
+       if (0 <= transfer_unpack_limit)
+               unpack_limit = transfer_unpack_limit;
+       else if (0 <= fetch_unpack_limit)
+               unpack_limit = fetch_unpack_limit;
+       did_setup = 1;
+}
+
+int cmd_fetch_pack(int argc, const char **argv, const char *prefix)
+{
+       int i, ret, nr_heads;
+       struct ref *ref;
+       char *dest = NULL, **heads;
+
+       nr_heads = 0;
+       heads = NULL;
+       for (i = 1; i < argc; i++) {
+               const char *arg = argv[i];
+
+               if (*arg == '-') {
+                       if (!prefixcmp(arg, "--upload-pack=")) {
+                               args.uploadpack = arg + 14;
+                               continue;
+                       }
+                       if (!prefixcmp(arg, "--exec=")) {
+                               args.uploadpack = arg + 7;
+                               continue;
+                       }
+                       if (!strcmp("--quiet", arg) || !strcmp("-q", arg)) {
+                               args.quiet = 1;
+                               continue;
+                       }
+                       if (!strcmp("--keep", arg) || !strcmp("-k", arg)) {
+                               args.lock_pack = args.keep_pack;
+                               args.keep_pack = 1;
+                               continue;
+                       }
+                       if (!strcmp("--thin", arg)) {
+                               args.use_thin_pack = 1;
+                               continue;
+                       }
+                       if (!strcmp("--all", arg)) {
+                               args.fetch_all = 1;
+                               continue;
+                       }
+                       if (!strcmp("-v", arg)) {
+                               args.verbose = 1;
+                               continue;
+                       }
+                       if (!prefixcmp(arg, "--depth=")) {
+                               args.depth = strtol(arg + 8, NULL, 0);
+                               continue;
+                       }
+                       if (!strcmp("--no-progress", arg)) {
+                               args.no_progress = 1;
+                               continue;
+                       }
+                       usage(fetch_pack_usage);
+               }
+               dest = (char *)arg;
+               heads = (char **)(argv + i + 1);
+               nr_heads = argc - i - 1;
+               break;
+       }
+       if (!dest)
+               usage(fetch_pack_usage);
+
+       ref = fetch_pack(&args, dest, nr_heads, heads, NULL);
+       ret = !ref;
+
+       while (ref) {
+               printf("%s %s\n",
+                      sha1_to_hex(ref->old_sha1), ref->name);
+               ref = ref->next;
+       }
+
+       return ret;
+}
+
+struct ref *fetch_pack(struct fetch_pack_args *my_args,
+               const char *dest,
+               int nr_heads,
+               char **heads,
+               char **pack_lockfile)
+{
+       int i, ret;
+       int fd[2];
+       pid_t pid;
+       struct ref *ref;
+       struct stat st;
+
+       fetch_pack_setup();
+       memcpy(&args, my_args, sizeof(args));
+       if (args.depth > 0) {
+               if (stat(git_path("shallow"), &st))
+                       st.st_mtime = 0;
+       }
+
+       pid = git_connect(fd, (char *)dest, uploadpack,
+                          args.verbose ? CONNECT_VERBOSE : 0);
+       if (pid < 0)
+               return NULL;
+       if (heads && nr_heads)
+               nr_heads = remove_duplicates(nr_heads, heads);
+       ref = do_fetch_pack(fd, nr_heads, heads, pack_lockfile);
+       close(fd[0]);
+       close(fd[1]);
+       ret = finish_connect(pid);
+
+       if (!ret && nr_heads) {
+               /* If the heads to pull were given, we should have
+                * consumed all of them by matching the remote.
+                * Otherwise, 'git-fetch remote no-such-ref' would
+                * silently succeed without issuing an error.
+                */
+               for (i = 0; i < nr_heads; i++)
+                       if (heads[i] && heads[i][0]) {
+                               error("no such remote ref %s", heads[i]);
+                               ret = 1;
+                       }
+       }
+
+       if (!ret && args.depth > 0) {
+               struct cache_time mtime;
+               char *shallow = git_path("shallow");
+               int fd;
+
+               mtime.sec = st.st_mtime;
+#ifdef USE_NSEC
+               mtime.usec = st.st_mtim.usec;
+#endif
+               if (stat(shallow, &st)) {
+                       if (mtime.sec)
+                               die("shallow file was removed during fetch");
+               } else if (st.st_mtime != mtime.sec
+#ifdef USE_NSEC
+                               || st.st_mtim.usec != mtime.usec
+#endif
+                         )
+                       die("shallow file was changed during fetch");
+
+               fd = hold_lock_file_for_update(&lock, shallow, 1);
+               if (!write_shallow_commits(fd, 0)) {
+                       unlink(shallow);
+                       rollback_lock_file(&lock);
+               } else {
+                       close(fd);
+                       commit_lock_file(&lock);
+               }
+       }
+
+       if (ret)
+               ref = NULL;
+
+       return ref;
+}
diff --git a/builtin-fetch.c b/builtin-fetch.c
new file mode 100644 (file)
index 0000000..b9d2b0c
--- /dev/null
@@ -0,0 +1,578 @@
+/*
+ * "git fetch"
+ */
+#include "cache.h"
+#include "refs.h"
+#include "commit.h"
+#include "builtin.h"
+#include "path-list.h"
+#include "remote.h"
+#include "transport.h"
+
+static const char fetch_usage[] = "git-fetch [-a | --append] [--upload-pack <upload-pack>] [-f | --force] [--no-tags] [-t | --tags] [-k | --keep] [-u | --update-head-ok] [--depth <depth>] [-v | --verbose] [<repository> <refspec>...]";
+
+static int append, force, tags, no_tags, update_head_ok, verbose, quiet;
+static char *default_rla = NULL;
+static struct transport *transport;
+
+static void unlock_pack(void)
+{
+       if (transport)
+               transport_unlock_pack(transport);
+}
+
+static void unlock_pack_on_signal(int signo)
+{
+       unlock_pack();
+       signal(SIGINT, SIG_DFL);
+       raise(signo);
+}
+
+static void add_merge_config(struct ref **head,
+                          struct ref *remote_refs,
+                          struct branch *branch,
+                          struct ref ***tail)
+{
+       int i;
+
+       for (i = 0; i < branch->merge_nr; i++) {
+               struct ref *rm, **old_tail = *tail;
+               struct refspec refspec;
+
+               for (rm = *head; rm; rm = rm->next) {
+                       if (branch_merge_matches(branch, i, rm->name)) {
+                               rm->merge = 1;
+                               break;
+                       }
+               }
+               if (rm)
+                       continue;
+
+               /* Not fetched to a tracking branch?  We need to fetch
+                * it anyway to allow this branch's "branch.$name.merge"
+                * to be honored by git-pull.
+                */
+               refspec.src = branch->merge[i]->src;
+               refspec.dst = NULL;
+               refspec.pattern = 0;
+               refspec.force = 0;
+               get_fetch_map(remote_refs, &refspec, tail);
+               for (rm = *old_tail; rm; rm = rm->next)
+                       rm->merge = 1;
+       }
+}
+
+static struct ref *get_ref_map(struct transport *transport,
+                              struct refspec *refs, int ref_count, int tags,
+                              int *autotags)
+{
+       int i;
+       struct ref *rm;
+       struct ref *ref_map = NULL;
+       struct ref **tail = &ref_map;
+
+       struct ref *remote_refs = transport_get_remote_refs(transport);
+
+       if (ref_count || tags) {
+               for (i = 0; i < ref_count; i++) {
+                       get_fetch_map(remote_refs, &refs[i], &tail);
+                       if (refs[i].dst && refs[i].dst[0])
+                               *autotags = 1;
+               }
+               /* Merge everything on the command line, but not --tags */
+               for (rm = ref_map; rm; rm = rm->next)
+                       rm->merge = 1;
+               if (tags) {
+                       struct refspec refspec;
+                       refspec.src = "refs/tags/";
+                       refspec.dst = "refs/tags/";
+                       refspec.pattern = 1;
+                       refspec.force = 0;
+                       get_fetch_map(remote_refs, &refspec, &tail);
+               }
+       } else {
+               /* Use the defaults */
+               struct remote *remote = transport->remote;
+               struct branch *branch = branch_get(NULL);
+               int has_merge = branch_has_merge_config(branch);
+               if (remote && (remote->fetch_refspec_nr || has_merge)) {
+                       for (i = 0; i < remote->fetch_refspec_nr; i++) {
+                               get_fetch_map(remote_refs, &remote->fetch[i], &tail);
+                               if (remote->fetch[i].dst &&
+                                   remote->fetch[i].dst[0])
+                                       *autotags = 1;
+                               if (!i && !has_merge && ref_map &&
+                                   !remote->fetch[0].pattern)
+                                       ref_map->merge = 1;
+                       }
+                       /*
+                        * if the remote we're fetching from is the same
+                        * as given in branch.<name>.remote, we add the
+                        * ref given in branch.<name>.merge, too.
+                        */
+                       if (has_merge && !strcmp(branch->remote_name,
+                                               remote->name))
+                               add_merge_config(&ref_map, remote_refs, branch, &tail);
+               } else {
+                       ref_map = get_remote_ref(remote_refs, "HEAD");
+                       ref_map->merge = 1;
+               }
+       }
+       ref_remove_duplicates(ref_map);
+
+       return ref_map;
+}
+
+static void show_new(enum object_type type, unsigned char *sha1_new)
+{
+       fprintf(stderr, "  %s: %s\n", typename(type),
+               find_unique_abbrev(sha1_new, DEFAULT_ABBREV));
+}
+
+static int s_update_ref(const char *action,
+                       struct ref *ref,
+                       int check_old)
+{
+       char msg[1024];
+       char *rla = getenv("GIT_REFLOG_ACTION");
+       static struct ref_lock *lock;
+
+       if (!rla)
+               rla = default_rla;
+       snprintf(msg, sizeof(msg), "%s: %s", rla, action);
+       lock = lock_any_ref_for_update(ref->name,
+                                      check_old ? ref->old_sha1 : NULL, 0);
+       if (!lock)
+               return 1;
+       if (write_ref_sha1(lock, ref->new_sha1, msg) < 0)
+               return 1;
+       return 0;
+}
+
+static int update_local_ref(struct ref *ref,
+                           const char *note,
+                           int verbose)
+{
+       char oldh[41], newh[41];
+       struct commit *current = NULL, *updated;
+       enum object_type type;
+       struct branch *current_branch = branch_get(NULL);
+
+       type = sha1_object_info(ref->new_sha1, NULL);
+       if (type < 0)
+               die("object %s not found", sha1_to_hex(ref->new_sha1));
+
+       if (!*ref->name) {
+               /* Not storing */
+               if (verbose) {
+                       fprintf(stderr, "* fetched %s\n", note);
+                       show_new(type, ref->new_sha1);
+               }
+               return 0;
+       }
+
+       if (!hashcmp(ref->old_sha1, ref->new_sha1)) {
+               if (verbose) {
+                       fprintf(stderr, "* %s: same as %s\n",
+                               ref->name, note);
+                       show_new(type, ref->new_sha1);
+               }
+               return 0;
+       }
+
+       if (current_branch &&
+           !strcmp(ref->name, current_branch->name) &&
+           !(update_head_ok || is_bare_repository()) &&
+           !is_null_sha1(ref->old_sha1)) {
+               /*
+                * If this is the head, and it's not okay to update
+                * the head, and the old value of the head isn't empty...
+                */
+               fprintf(stderr,
+                       " * %s: Cannot fetch into the current branch.\n",
+                       ref->name);
+               return 1;
+       }
+
+       if (!is_null_sha1(ref->old_sha1) &&
+           !prefixcmp(ref->name, "refs/tags/")) {
+               fprintf(stderr, "* %s: updating with %s\n",
+                       ref->name, note);
+               show_new(type, ref->new_sha1);
+               return s_update_ref("updating tag", ref, 0);
+       }
+
+       current = lookup_commit_reference_gently(ref->old_sha1, 1);
+       updated = lookup_commit_reference_gently(ref->new_sha1, 1);
+       if (!current || !updated) {
+               char *msg;
+               if (!strncmp(ref->name, "refs/tags/", 10))
+                       msg = "storing tag";
+               else
+                       msg = "storing head";
+               fprintf(stderr, "* %s: storing %s\n",
+                       ref->name, note);
+               show_new(type, ref->new_sha1);
+               return s_update_ref(msg, ref, 0);
+       }
+
+       strcpy(oldh, find_unique_abbrev(current->object.sha1, DEFAULT_ABBREV));
+       strcpy(newh, find_unique_abbrev(ref->new_sha1, DEFAULT_ABBREV));
+
+       if (in_merge_bases(current, &updated, 1)) {
+               fprintf(stderr, "* %s: fast forward to %s\n",
+                       ref->name, note);
+               fprintf(stderr, "  old..new: %s..%s\n", oldh, newh);
+               return s_update_ref("fast forward", ref, 1);
+       }
+       if (!force && !ref->force) {
+               fprintf(stderr,
+                       "* %s: not updating to non-fast forward %s\n",
+                       ref->name, note);
+               fprintf(stderr,
+                       "  old...new: %s...%s\n", oldh, newh);
+               return 1;
+       }
+       fprintf(stderr,
+               "* %s: forcing update to non-fast forward %s\n",
+               ref->name, note);
+       fprintf(stderr, "  old...new: %s...%s\n", oldh, newh);
+       return s_update_ref("forced-update", ref, 1);
+}
+
+static void store_updated_refs(const char *url, struct ref *ref_map)
+{
+       FILE *fp;
+       struct commit *commit;
+       int url_len, i, note_len;
+       char note[1024];
+       const char *what, *kind;
+       struct ref *rm;
+
+       fp = fopen(git_path("FETCH_HEAD"), "a");
+       for (rm = ref_map; rm; rm = rm->next) {
+               struct ref *ref = NULL;
+
+               if (rm->peer_ref) {
+                       ref = xcalloc(1, sizeof(*ref) + strlen(rm->peer_ref->name) + 1);
+                       strcpy(ref->name, rm->peer_ref->name);
+                       hashcpy(ref->old_sha1, rm->peer_ref->old_sha1);
+                       hashcpy(ref->new_sha1, rm->old_sha1);
+                       ref->force = rm->peer_ref->force;
+               }
+
+               commit = lookup_commit_reference_gently(rm->old_sha1, 1);
+               if (!commit)
+                       rm->merge = 0;
+
+               if (!strcmp(rm->name, "HEAD")) {
+                       kind = "";
+                       what = "";
+               }
+               else if (!prefixcmp(rm->name, "refs/heads/")) {
+                       kind = "branch";
+                       what = rm->name + 11;
+               }
+               else if (!prefixcmp(rm->name, "refs/tags/")) {
+                       kind = "tag";
+                       what = rm->name + 10;
+               }
+               else if (!prefixcmp(rm->name, "refs/remotes/")) {
+                       kind = "remote branch";
+                       what = rm->name + 13;
+               }
+               else {
+                       kind = "";
+                       what = rm->name;
+               }
+
+               url_len = strlen(url);
+               for (i = url_len - 1; url[i] == '/' && 0 <= i; i--)
+                       ;
+               url_len = i + 1;
+               if (4 < i && !strncmp(".git", url + i - 3, 4))
+                       url_len = i - 3;
+
+               note_len = 0;
+               if (*what) {
+                       if (*kind)
+                               note_len += sprintf(note + note_len, "%s ",
+                                                   kind);
+                       note_len += sprintf(note + note_len, "'%s' of ", what);
+               }
+               note_len += sprintf(note + note_len, "%.*s", url_len, url);
+               fprintf(fp, "%s\t%s\t%s\n",
+                       sha1_to_hex(commit ? commit->object.sha1 :
+                                   rm->old_sha1),
+                       rm->merge ? "" : "not-for-merge",
+                       note);
+
+               if (ref)
+                       update_local_ref(ref, note, verbose);
+       }
+       fclose(fp);
+}
+
+static int fetch_refs(struct transport *transport, struct ref *ref_map)
+{
+       int ret = transport_fetch_refs(transport, ref_map);
+       if (!ret)
+               store_updated_refs(transport->url, ref_map);
+       transport_unlock_pack(transport);
+       return ret;
+}
+
+static int add_existing(const char *refname, const unsigned char *sha1,
+                       int flag, void *cbdata)
+{
+       struct path_list *list = (struct path_list *)cbdata;
+       path_list_insert(refname, list);
+       return 0;
+}
+
+static struct ref *find_non_local_tags(struct transport *transport,
+                                      struct ref *fetch_map)
+{
+       static struct path_list existing_refs = { NULL, 0, 0, 0 };
+       struct path_list new_refs = { NULL, 0, 0, 1 };
+       char *ref_name;
+       int ref_name_len;
+       unsigned char *ref_sha1;
+       struct ref *tag_ref;
+       struct ref *rm = NULL;
+       struct ref *ref_map = NULL;
+       struct ref **tail = &ref_map;
+       struct ref *ref;
+
+       for_each_ref(add_existing, &existing_refs);
+       for (ref = transport_get_remote_refs(transport); ref; ref = ref->next) {
+               if (prefixcmp(ref->name, "refs/tags"))
+                       continue;
+
+               ref_name = xstrdup(ref->name);
+               ref_name_len = strlen(ref_name);
+               ref_sha1 = ref->old_sha1;
+
+               if (!strcmp(ref_name + ref_name_len - 3, "^{}")) {
+                       ref_name[ref_name_len - 3] = 0;
+                       tag_ref = transport_get_remote_refs(transport);
+                       while (tag_ref) {
+                               if (!strcmp(tag_ref->name, ref_name)) {
+                                       ref_sha1 = tag_ref->old_sha1;
+                                       break;
+                               }
+                               tag_ref = tag_ref->next;
+                       }
+               }
+
+               if (!path_list_has_path(&existing_refs, ref_name) &&
+                   !path_list_has_path(&new_refs, ref_name) &&
+                   lookup_object(ref->old_sha1)) {
+                       fprintf(stderr, "Auto-following %s\n",
+                               ref_name);
+
+                       path_list_insert(ref_name, &new_refs);
+
+                       rm = alloc_ref(strlen(ref_name) + 1);
+                       strcpy(rm->name, ref_name);
+                       rm->peer_ref = alloc_ref(strlen(ref_name) + 1);
+                       strcpy(rm->peer_ref->name, ref_name);
+                       hashcpy(rm->old_sha1, ref_sha1);
+
+                       *tail = rm;
+                       tail = &rm->next;
+               }
+               free(ref_name);
+       }
+
+       return ref_map;
+}
+
+static int do_fetch(struct transport *transport,
+                   struct refspec *refs, int ref_count)
+{
+       struct ref *ref_map, *fetch_map;
+       struct ref *rm;
+       int autotags = (transport->remote->fetch_tags == 1);
+       if (transport->remote->fetch_tags == 2 && !no_tags)
+               tags = 1;
+       if (transport->remote->fetch_tags == -1)
+               no_tags = 1;
+
+       if (!transport->get_refs_list || !transport->fetch)
+               die("Don't know how to fetch from %s", transport->url);
+
+       /* if not appending, truncate FETCH_HEAD */
+       if (!append)
+               fclose(fopen(git_path("FETCH_HEAD"), "w"));
+
+       ref_map = get_ref_map(transport, refs, ref_count, tags, &autotags);
+
+       for (rm = ref_map; rm; rm = rm->next) {
+               if (rm->peer_ref)
+                       read_ref(rm->peer_ref->name, rm->peer_ref->old_sha1);
+       }
+
+       if (fetch_refs(transport, ref_map)) {
+               free_refs(ref_map);
+               return 1;
+       }
+
+       fetch_map = ref_map;
+
+       /* if neither --no-tags nor --tags was specified, do automated tag
+        * following ... */
+       if (!(tags || no_tags) && autotags) {
+               ref_map = find_non_local_tags(transport, fetch_map);
+               if (ref_map) {
+                       transport_set_option(transport, TRANS_OPT_DEPTH, "0");
+                       fetch_refs(transport, ref_map);
+               }
+               free_refs(ref_map);
+       }
+
+       free_refs(fetch_map);
+
+       return 0;
+}
+
+static void set_option(const char *name, const char *value)
+{
+       int r = transport_set_option(transport, name, value);
+       if (r < 0)
+               die("Option \"%s\" value \"%s\" is not valid for %s\n",
+                       name, value, transport->url);
+       if (r > 0)
+               warning("Option \"%s\" is ignored for %s\n",
+                       name, transport->url);
+}
+
+int cmd_fetch(int argc, const char **argv, const char *prefix)
+{
+       struct remote *remote;
+       int i, j, rla_offset;
+       static const char **refs = NULL;
+       int ref_nr = 0;
+       int cmd_len = 0;
+       const char *depth = NULL, *upload_pack = NULL;
+       int keep = 0;
+
+       for (i = 1; i < argc; i++) {
+               const char *arg = argv[i];
+               cmd_len += strlen(arg);
+
+               if (arg[0] != '-')
+                       break;
+               if (!strcmp(arg, "--append") || !strcmp(arg, "-a")) {
+                       append = 1;
+                       continue;
+               }
+               if (!prefixcmp(arg, "--upload-pack=")) {
+                       upload_pack = arg + 14;
+                       continue;
+               }
+               if (!strcmp(arg, "--upload-pack")) {
+                       i++;
+                       if (i == argc)
+                               usage(fetch_usage);
+                       upload_pack = argv[i];
+                       continue;
+               }
+               if (!strcmp(arg, "--force") || !strcmp(arg, "-f")) {
+                       force = 1;
+                       continue;
+               }
+               if (!strcmp(arg, "--no-tags")) {
+                       no_tags = 1;
+                       continue;
+               }
+               if (!strcmp(arg, "--tags") || !strcmp(arg, "-t")) {
+                       tags = 1;
+                       continue;
+               }
+               if (!strcmp(arg, "--keep") || !strcmp(arg, "-k")) {
+                       keep = 1;
+                       continue;
+               }
+               if (!strcmp(arg, "--update-head-ok") || !strcmp(arg, "-u")) {
+                       update_head_ok = 1;
+                       continue;
+               }
+               if (!prefixcmp(arg, "--depth=")) {
+                       depth = arg + 8;
+                       continue;
+               }
+               if (!strcmp(arg, "--depth")) {
+                       i++;
+                       if (i == argc)
+                               usage(fetch_usage);
+                       depth = argv[i];
+                       continue;
+               }
+               if (!strcmp(arg, "--quiet")) {
+                       quiet = 1;
+                       continue;
+               }
+               if (!strcmp(arg, "--verbose") || !strcmp(arg, "-v")) {
+                       verbose++;
+                       continue;
+               }
+               usage(fetch_usage);
+       }
+
+       for (j = i; j < argc; j++)
+               cmd_len += strlen(argv[j]);
+
+       default_rla = xmalloc(cmd_len + 5 + argc + 1);
+       sprintf(default_rla, "fetch");
+       rla_offset = strlen(default_rla);
+       for (j = 1; j < argc; j++) {
+               sprintf(default_rla + rla_offset, " %s", argv[j]);
+               rla_offset += strlen(argv[j]) + 1;
+       }
+
+       if (i == argc)
+               remote = remote_get(NULL);
+       else
+               remote = remote_get(argv[i++]);
+
+       transport = transport_get(remote, remote->url[0]);
+       if (verbose >= 2)
+               transport->verbose = 1;
+       if (quiet)
+               transport->verbose = -1;
+       if (upload_pack)
+               set_option(TRANS_OPT_UPLOADPACK, upload_pack);
+       if (keep)
+               set_option(TRANS_OPT_KEEP, "yes");
+       if (depth)
+               set_option(TRANS_OPT_DEPTH, depth);
+
+       if (!transport->url)
+               die("Where do you want to fetch from today?");
+
+       if (i < argc) {
+               int j = 0;
+               refs = xcalloc(argc - i + 1, sizeof(const char *));
+               while (i < argc) {
+                       if (!strcmp(argv[i], "tag")) {
+                               char *ref;
+                               i++;
+                               ref = xmalloc(strlen(argv[i]) * 2 + 22);
+                               strcpy(ref, "refs/tags/");
+                               strcat(ref, argv[i]);
+                               strcat(ref, ":refs/tags/");
+                               strcat(ref, argv[i]);
+                               refs[j++] = ref;
+                       } else
+                               refs[j++] = argv[i];
+                       i++;
+               }
+               refs[j] = NULL;
+               ref_nr = j;
+       }
+
+       signal(SIGINT, unlock_pack_on_signal);
+       atexit(unlock_pack);
+       return do_fetch(transport, parse_ref_spec(ref_nr, refs), ref_nr);
+}
diff --git a/builtin-http-fetch.c b/builtin-http-fetch.c
new file mode 100644 (file)
index 0000000..4a50dbd
--- /dev/null
@@ -0,0 +1,77 @@
+#include "cache.h"
+#include "walker.h"
+
+int cmd_http_fetch(int argc, const char **argv, const char *prefix)
+{
+       struct walker *walker;
+       int commits_on_stdin = 0;
+       int commits;
+       const char **write_ref = NULL;
+       char **commit_id;
+       const char *url;
+       int arg = 1;
+       int rc = 0;
+       int get_tree = 0;
+       int get_history = 0;
+       int get_all = 0;
+       int get_verbosely = 0;
+       int get_recover = 0;
+
+       git_config(git_default_config);
+
+       while (arg < argc && argv[arg][0] == '-') {
+               if (argv[arg][1] == 't') {
+                       get_tree = 1;
+               } else if (argv[arg][1] == 'c') {
+                       get_history = 1;
+               } else if (argv[arg][1] == 'a') {
+                       get_all = 1;
+                       get_tree = 1;
+                       get_history = 1;
+               } else if (argv[arg][1] == 'v') {
+                       get_verbosely = 1;
+               } else if (argv[arg][1] == 'w') {
+                       write_ref = &argv[arg + 1];
+                       arg++;
+               } else if (!strcmp(argv[arg], "--recover")) {
+                       get_recover = 1;
+               } else if (!strcmp(argv[arg], "--stdin")) {
+                       commits_on_stdin = 1;
+               }
+               arg++;
+       }
+       if (argc < arg + 2 - commits_on_stdin) {
+               usage("git-http-fetch [-c] [-t] [-a] [-v] [--recover] [-w ref] [--stdin] commit-id url");
+               return 1;
+       }
+       if (commits_on_stdin) {
+               commits = walker_targets_stdin(&commit_id, &write_ref);
+       } else {
+               commit_id = (char **) &argv[arg++];
+               commits = 1;
+       }
+       url = argv[arg];
+
+       walker = get_http_walker(url);
+       walker->get_tree = get_tree;
+       walker->get_history = get_history;
+       walker->get_all = get_all;
+       walker->get_verbosely = get_verbosely;
+       walker->get_recover = get_recover;
+
+       rc = walker_fetch(walker, commits, commit_id, write_ref, url);
+
+       if (commits_on_stdin)
+               walker_targets_free(commits, commit_id, write_ref);
+
+       if (walker->corrupt_object_found) {
+               fprintf(stderr,
+"Some loose object were found to be corrupt, but they might be just\n"
+"a false '404 Not Found' error message sent with incorrect HTTP\n"
+"status code.  Suggest running git-fsck.\n");
+       }
+
+       walker_free(walker);
+
+       return rc;
+}
index 141380b852771e107ee5ae1df3f8b9e30cfcea6b..4b39ef3852a5dcb9b099527d4aafc33ba3bb6da1 100644 (file)
@@ -6,10 +6,11 @@
 #include "run-command.h"
 #include "builtin.h"
 #include "remote.h"
+#include "transport.h"
 
 static const char push_usage[] = "git-push [--all] [--dry-run] [--tags] [--receive-pack=<git-receive-pack>] [--repo=all] [-f | --force] [-v] [<repository> <refspec>...]";
 
-static int all, dry_run, force, thin, verbose;
+static int thin, verbose;
 static const char *receivepack;
 
 static const char **refspec;
@@ -43,82 +44,40 @@ static void set_refspecs(const char **refs, int nr)
        }
 }
 
-static int do_push(const char *repo)
+static int do_push(const char *repo, int flags)
 {
        int i, errs;
-       int common_argc;
-       const char **argv;
-       int argc;
        struct remote *remote = remote_get(repo);
 
        if (!remote)
                die("bad repository '%s'", repo);
 
-       if (remote->receivepack) {
-               char *rp = xmalloc(strlen(remote->receivepack) + 16);
-               sprintf(rp, "--receive-pack=%s", remote->receivepack);
-               receivepack = rp;
-       }
-       if (!refspec && !all && remote->push_refspec_nr) {
+       if (!refspec
+               && !(flags & TRANSPORT_PUSH_ALL)
+               && remote->push_refspec_nr) {
                refspec = remote->push_refspec;
                refspec_nr = remote->push_refspec_nr;
        }
-
-       argv = xmalloc((refspec_nr + 10) * sizeof(char *));
-       argv[0] = "dummy-send-pack";
-       argc = 1;
-       if (all)
-               argv[argc++] = "--all";
-       if (dry_run)
-               argv[argc++] = "--dry-run";
-       if (force)
-               argv[argc++] = "--force";
-       if (receivepack)
-               argv[argc++] = receivepack;
-       common_argc = argc;
-
        errs = 0;
-       for (i = 0; i < remote->uri_nr; i++) {
+       for (i = 0; i < remote->url_nr; i++) {
+               struct transport *transport =
+                       transport_get(remote, remote->url[i]);
                int err;
-               int dest_argc = common_argc;
-               int dest_refspec_nr = refspec_nr;
-               const char **dest_refspec = refspec;
-               const char *dest = remote->uri[i];
-               const char *sender = "send-pack";
-               if (!prefixcmp(dest, "http://") ||
-                   !prefixcmp(dest, "https://"))
-                       sender = "http-push";
-               else {
-                       char *rem = xmalloc(strlen(remote->name) + 10);
-                       sprintf(rem, "--remote=%s", remote->name);
-                       argv[dest_argc++] = rem;
-                       if (thin)
-                               argv[dest_argc++] = "--thin";
-               }
-               argv[0] = sender;
-               argv[dest_argc++] = dest;
-               while (dest_refspec_nr--)
-                       argv[dest_argc++] = *dest_refspec++;
-               argv[dest_argc] = NULL;
+               if (receivepack)
+                       transport_set_option(transport,
+                                            TRANS_OPT_RECEIVEPACK, receivepack);
+               if (thin)
+                       transport_set_option(transport, TRANS_OPT_THIN, "yes");
+
                if (verbose)
-                       fprintf(stderr, "Pushing to %s\n", dest);
-               err = run_command_v_opt(argv, RUN_GIT_CMD);
+                       fprintf(stderr, "Pushing to %s\n", remote->url[i]);
+               err = transport_push(transport, refspec_nr, refspec, flags);
+               err |= transport_disconnect(transport);
+
                if (!err)
                        continue;
 
-               error("failed to push to '%s'", remote->uri[i]);
-               switch (err) {
-               case -ERR_RUN_COMMAND_FORK:
-                       error("unable to fork for %s", sender);
-               case -ERR_RUN_COMMAND_EXEC:
-                       error("unable to exec %s", sender);
-                       break;
-               case -ERR_RUN_COMMAND_WAITPID:
-               case -ERR_RUN_COMMAND_WAITPID_WRONG_PID:
-               case -ERR_RUN_COMMAND_WAITPID_SIGNAL:
-               case -ERR_RUN_COMMAND_WAITPID_NOEXIT:
-                       error("%s died with strange error", sender);
-               }
+               error("failed to push to '%s'", remote->url[i]);
                errs++;
        }
        return !!errs;
@@ -127,6 +86,7 @@ static int do_push(const char *repo)
 int cmd_push(int argc, const char **argv, const char *prefix)
 {
        int i;
+       int flags = 0;
        const char *repo = NULL;        /* default repository */
 
        for (i = 1; i < argc; i++) {
@@ -146,11 +106,11 @@ int cmd_push(int argc, const char **argv, const char *prefix)
                        continue;
                }
                if (!strcmp(arg, "--all")) {
-                       all = 1;
+                       flags |= TRANSPORT_PUSH_ALL;
                        continue;
                }
                if (!strcmp(arg, "--dry-run")) {
-                       dry_run = 1;
+                       flags |= TRANSPORT_PUSH_DRY_RUN;
                        continue;
                }
                if (!strcmp(arg, "--tags")) {
@@ -158,7 +118,7 @@ int cmd_push(int argc, const char **argv, const char *prefix)
                        continue;
                }
                if (!strcmp(arg, "--force") || !strcmp(arg, "-f")) {
-                       force = 1;
+                       flags |= TRANSPORT_PUSH_FORCE;
                        continue;
                }
                if (!strcmp(arg, "--thin")) {
@@ -170,18 +130,18 @@ int cmd_push(int argc, const char **argv, const char *prefix)
                        continue;
                }
                if (!prefixcmp(arg, "--receive-pack=")) {
-                       receivepack = arg;
+                       receivepack = arg + 15;
                        continue;
                }
                if (!prefixcmp(arg, "--exec=")) {
-                       receivepack = arg;
+                       receivepack = arg + 7;
                        continue;
                }
                usage(push_usage);
        }
        set_refspecs(argv + i, argc - i);
-       if (all && refspec)
+       if ((flags & TRANSPORT_PUSH_ALL) && refspec)
                usage(push_usage);
 
-       return do_push(repo);
+       return do_push(repo, flags);
 }
index d6f2c76b86174e6353c3d6146368e3ff71406a22..65cc0fb34a952356e0c5fce3e2e4bea2f1804b02 100644 (file)
--- a/builtin.h
+++ b/builtin.h
@@ -30,6 +30,8 @@ extern int cmd_diff_files(int argc, const char **argv, const char *prefix);
 extern int cmd_diff_index(int argc, const char **argv, const char *prefix);
 extern int cmd_diff(int argc, const char **argv, const char *prefix);
 extern int cmd_diff_tree(int argc, const char **argv, const char *prefix);
+extern int cmd_fetch(int argc, const char **argv, const char *prefix);
+extern int cmd_fetch_pack(int argc, const char **argv, const char *prefix);
 extern int cmd_fetch__tool(int argc, const char **argv, const char *prefix);
 extern int cmd_fmt_merge_msg(int argc, const char **argv, const char *prefix);
 extern int cmd_for_each_ref(int argc, const char **argv, const char *prefix);
@@ -39,6 +41,7 @@ extern int cmd_gc(int argc, const char **argv, const char *prefix);
 extern int cmd_get_tar_commit_id(int argc, const char **argv, const char *prefix);
 extern int cmd_grep(int argc, const char **argv, const char *prefix);
 extern int cmd_help(int argc, const char **argv, const char *prefix);
+extern int cmd_http_fetch(int argc, const char **argv, const char *prefix);
 extern int cmd_init_db(int argc, const char **argv, const char *prefix);
 extern int cmd_log(int argc, const char **argv, const char *prefix);
 extern int cmd_log_reflog(int argc, const char **argv, const char *prefix);
diff --git a/bundle.c b/bundle.c
new file mode 100644 (file)
index 0000000..0869fcf
--- /dev/null
+++ b/bundle.c
@@ -0,0 +1,343 @@
+#include "cache.h"
+#include "bundle.h"
+#include "object.h"
+#include "commit.h"
+#include "diff.h"
+#include "revision.h"
+#include "list-objects.h"
+#include "run-command.h"
+
+static const char bundle_signature[] = "# v2 git bundle\n";
+
+static void add_to_ref_list(const unsigned char *sha1, const char *name,
+               struct ref_list *list)
+{
+       if (list->nr + 1 >= list->alloc) {
+               list->alloc = alloc_nr(list->nr + 1);
+               list->list = xrealloc(list->list,
+                               list->alloc * sizeof(list->list[0]));
+       }
+       memcpy(list->list[list->nr].sha1, sha1, 20);
+       list->list[list->nr].name = xstrdup(name);
+       list->nr++;
+}
+
+/* returns an fd */
+int read_bundle_header(const char *path, struct bundle_header *header) {
+       char buffer[1024];
+       int fd;
+       long fpos;
+       FILE *ffd = fopen(path, "rb");
+
+       if (!ffd)
+               return error("could not open '%s'", path);
+       if (!fgets(buffer, sizeof(buffer), ffd) ||
+                       strcmp(buffer, bundle_signature)) {
+               fclose(ffd);
+               return error("'%s' does not look like a v2 bundle file", path);
+       }
+       while (fgets(buffer, sizeof(buffer), ffd)
+                       && buffer[0] != '\n') {
+               int is_prereq = buffer[0] == '-';
+               int offset = is_prereq ? 1 : 0;
+               int len = strlen(buffer);
+               unsigned char sha1[20];
+               struct ref_list *list = is_prereq ? &header->prerequisites
+                       : &header->references;
+               char delim;
+
+               if (buffer[len - 1] == '\n')
+                       buffer[len - 1] = '\0';
+               if (get_sha1_hex(buffer + offset, sha1)) {
+                       warning("unrecognized header: %s", buffer);
+                       continue;
+               }
+               delim = buffer[40 + offset];
+               if (!isspace(delim) && (delim != '\0' || !is_prereq))
+                       die ("invalid header: %s", buffer);
+               add_to_ref_list(sha1, isspace(delim) ?
+                               buffer + 41 + offset : "", list);
+       }
+       fpos = ftell(ffd);
+       fclose(ffd);
+       fd = open(path, O_RDONLY);
+       if (fd < 0)
+               return error("could not open '%s'", path);
+       lseek(fd, fpos, SEEK_SET);
+       return fd;
+}
+
+static int list_refs(struct ref_list *r, int argc, const char **argv)
+{
+       int i;
+
+       for (i = 0; i < r->nr; i++) {
+               if (argc > 1) {
+                       int j;
+                       for (j = 1; j < argc; j++)
+                               if (!strcmp(r->list[i].name, argv[j]))
+                                       break;
+                       if (j == argc)
+                               continue;
+               }
+               printf("%s %s\n", sha1_to_hex(r->list[i].sha1),
+                               r->list[i].name);
+       }
+       return 0;
+}
+
+#define PREREQ_MARK (1u<<16)
+
+int verify_bundle(struct bundle_header *header, int verbose)
+{
+       /*
+        * Do fast check, then if any prereqs are missing then go line by line
+        * to be verbose about the errors
+        */
+       struct ref_list *p = &header->prerequisites;
+       struct rev_info revs;
+       const char *argv[] = {NULL, "--all"};
+       struct object_array refs;
+       struct commit *commit;
+       int i, ret = 0, req_nr;
+       const char *message = "Repository lacks these prerequisite commits:";
+
+       init_revisions(&revs, NULL);
+       for (i = 0; i < p->nr; i++) {
+               struct ref_list_entry *e = p->list + i;
+               struct object *o = parse_object(e->sha1);
+               if (o) {
+                       o->flags |= PREREQ_MARK;
+                       add_pending_object(&revs, o, e->name);
+                       continue;
+               }
+               if (++ret == 1)
+                       error(message);
+               error("%s %s", sha1_to_hex(e->sha1), e->name);
+       }
+       if (revs.pending.nr != p->nr)
+               return ret;
+       req_nr = revs.pending.nr;
+       setup_revisions(2, argv, &revs, NULL);
+
+       memset(&refs, 0, sizeof(struct object_array));
+       for (i = 0; i < revs.pending.nr; i++) {
+               struct object_array_entry *e = revs.pending.objects + i;
+               add_object_array(e->item, e->name, &refs);
+       }
+
+       prepare_revision_walk(&revs);
+
+       i = req_nr;
+       while (i && (commit = get_revision(&revs)))
+               if (commit->object.flags & PREREQ_MARK)
+                       i--;
+
+       for (i = 0; i < req_nr; i++)
+               if (!(refs.objects[i].item->flags & SHOWN)) {
+                       if (++ret == 1)
+                               error(message);
+                       error("%s %s", sha1_to_hex(refs.objects[i].item->sha1),
+                               refs.objects[i].name);
+               }
+
+       for (i = 0; i < refs.nr; i++)
+               clear_commit_marks((struct commit *)refs.objects[i].item, -1);
+
+       if (verbose) {
+               struct ref_list *r;
+
+               r = &header->references;
+               printf("The bundle contains %d ref%s\n",
+                      r->nr, (1 < r->nr) ? "s" : "");
+               list_refs(r, 0, NULL);
+               r = &header->prerequisites;
+               printf("The bundle requires these %d ref%s\n",
+                      r->nr, (1 < r->nr) ? "s" : "");
+               list_refs(r, 0, NULL);
+       }
+       return ret;
+}
+
+int list_bundle_refs(struct bundle_header *header, int argc, const char **argv)
+{
+       return list_refs(&header->references, argc, argv);
+}
+
+int create_bundle(struct bundle_header *header, const char *path,
+               int argc, const char **argv)
+{
+       static struct lock_file lock;
+       int bundle_fd = -1;
+       int bundle_to_stdout;
+       const char **argv_boundary = xmalloc((argc + 4) * sizeof(const char *));
+       const char **argv_pack = xmalloc(5 * sizeof(const char *));
+       int i, ref_count = 0;
+       char buffer[1024];
+       struct rev_info revs;
+       struct child_process rls;
+       FILE *rls_fout;
+
+       bundle_to_stdout = !strcmp(path, "-");
+       if (bundle_to_stdout)
+               bundle_fd = 1;
+       else
+               bundle_fd = hold_lock_file_for_update(&lock, path, 1);
+
+       /* write signature */
+       write_or_die(bundle_fd, bundle_signature, strlen(bundle_signature));
+
+       /* init revs to list objects for pack-objects later */
+       save_commit_buffer = 0;
+       init_revisions(&revs, NULL);
+
+       /* write prerequisites */
+       memcpy(argv_boundary + 3, argv + 1, argc * sizeof(const char *));
+       argv_boundary[0] = "rev-list";
+       argv_boundary[1] = "--boundary";
+       argv_boundary[2] = "--pretty=oneline";
+       argv_boundary[argc + 2] = NULL;
+       memset(&rls, 0, sizeof(rls));
+       rls.argv = argv_boundary;
+       rls.out = -1;
+       rls.git_cmd = 1;
+       if (start_command(&rls))
+               return -1;
+       rls_fout = fdopen(rls.out, "r");
+       while (fgets(buffer, sizeof(buffer), rls_fout)) {
+               unsigned char sha1[20];
+               if (buffer[0] == '-') {
+                       write_or_die(bundle_fd, buffer, strlen(buffer));
+                       if (!get_sha1_hex(buffer + 1, sha1)) {
+                               struct object *object = parse_object(sha1);
+                               object->flags |= UNINTERESTING;
+                               add_pending_object(&revs, object, buffer);
+                       }
+               } else if (!get_sha1_hex(buffer, sha1)) {
+                       struct object *object = parse_object(sha1);
+                       object->flags |= SHOWN;
+               }
+       }
+       fclose(rls_fout);
+       if (finish_command(&rls))
+               return error("rev-list died");
+
+       /* write references */
+       argc = setup_revisions(argc, argv, &revs, NULL);
+       if (argc > 1)
+               return error("unrecognized argument: %s'", argv[1]);
+
+       for (i = 0; i < revs.pending.nr; i++) {
+               struct object_array_entry *e = revs.pending.objects + i;
+               unsigned char sha1[20];
+               char *ref;
+
+               if (e->item->flags & UNINTERESTING)
+                       continue;
+               if (dwim_ref(e->name, strlen(e->name), sha1, &ref) != 1)
+                       continue;
+               /*
+                * Make sure the refs we wrote out is correct; --max-count and
+                * other limiting options could have prevented all the tips
+                * from getting output.
+                *
+                * Non commit objects such as tags and blobs do not have
+                * this issue as they are not affected by those extra
+                * constraints.
+                */
+               if (!(e->item->flags & SHOWN) && e->item->type == OBJ_COMMIT) {
+                       warning("ref '%s' is excluded by the rev-list options",
+                               e->name);
+                       free(ref);
+                       continue;
+               }
+               /*
+                * If you run "git bundle create bndl v1.0..v2.0", the
+                * name of the positive ref is "v2.0" but that is the
+                * commit that is referenced by the tag, and not the tag
+                * itself.
+                */
+               if (hashcmp(sha1, e->item->sha1)) {
+                       /*
+                        * Is this the positive end of a range expressed
+                        * in terms of a tag (e.g. v2.0 from the range
+                        * "v1.0..v2.0")?
+                        */
+                       struct commit *one = lookup_commit_reference(sha1);
+                       struct object *obj;
+
+                       if (e->item == &(one->object)) {
+                               /*
+                                * Need to include e->name as an
+                                * independent ref to the pack-objects
+                                * input, so that the tag is included
+                                * in the output; otherwise we would
+                                * end up triggering "empty bundle"
+                                * error.
+                                */
+                               obj = parse_object(sha1);
+                               obj->flags |= SHOWN;
+                               add_pending_object(&revs, obj, e->name);
+                       }
+                       free(ref);
+                       continue;
+               }
+
+               ref_count++;
+               write_or_die(bundle_fd, sha1_to_hex(e->item->sha1), 40);
+               write_or_die(bundle_fd, " ", 1);
+               write_or_die(bundle_fd, ref, strlen(ref));
+               write_or_die(bundle_fd, "\n", 1);
+               free(ref);
+       }
+       if (!ref_count)
+               die ("Refusing to create empty bundle.");
+
+       /* end header */
+       write_or_die(bundle_fd, "\n", 1);
+
+       /* write pack */
+       argv_pack[0] = "pack-objects";
+       argv_pack[1] = "--all-progress";
+       argv_pack[2] = "--stdout";
+       argv_pack[3] = "--thin";
+       argv_pack[4] = NULL;
+       memset(&rls, 0, sizeof(rls));
+       rls.argv = argv_pack;
+       rls.in = -1;
+       rls.out = bundle_fd;
+       rls.git_cmd = 1;
+       if (start_command(&rls))
+               return error("Could not spawn pack-objects");
+       for (i = 0; i < revs.pending.nr; i++) {
+               struct object *object = revs.pending.objects[i].item;
+               if (object->flags & UNINTERESTING)
+                       write(rls.in, "^", 1);
+               write(rls.in, sha1_to_hex(object->sha1), 40);
+               write(rls.in, "\n", 1);
+       }
+       if (finish_command(&rls))
+               return error ("pack-objects died");
+       close(bundle_fd);
+       if (!bundle_to_stdout)
+               commit_lock_file(&lock);
+       return 0;
+}
+
+int unbundle(struct bundle_header *header, int bundle_fd)
+{
+       const char *argv_index_pack[] = {"index-pack",
+               "--fix-thin", "--stdin", NULL};
+       struct child_process ip;
+
+       if (verify_bundle(header, 0))
+               return -1;
+       memset(&ip, 0, sizeof(ip));
+       ip.argv = argv_index_pack;
+       ip.in = bundle_fd;
+       ip.no_stdout = 1;
+       ip.git_cmd = 1;
+       if (run_command(&ip))
+               return error("index-pack died");
+       return 0;
+}
diff --git a/bundle.h b/bundle.h
new file mode 100644 (file)
index 0000000..e2aedd6
--- /dev/null
+++ b/bundle.h
@@ -0,0 +1,25 @@
+#ifndef BUNDLE_H
+#define BUNDLE_H
+
+struct ref_list {
+       unsigned int nr, alloc;
+       struct ref_list_entry {
+               unsigned char sha1[20];
+               char *name;
+       } *list;
+};
+
+struct bundle_header {
+       struct ref_list prerequisites;
+       struct ref_list references;
+};
+
+int read_bundle_header(const char *path, struct bundle_header *header);
+int create_bundle(struct bundle_header *header, const char *path,
+               int argc, const char **argv);
+int verify_bundle(struct bundle_header *header, int verbose);
+int unbundle(struct bundle_header *header, int bundle_fd);
+int list_bundle_refs(struct bundle_header *header,
+               int argc, const char **argv);
+
+#endif
diff --git a/cache.h b/cache.h
index e0abcd697ce54853bbf4545d20a4ae88a95b533a..27485d36c2f56b6832f96e68ca8cbc7ffad0b957 100644 (file)
--- a/cache.h
+++ b/cache.h
@@ -493,6 +493,7 @@ struct ref {
        unsigned char old_sha1[20];
        unsigned char new_sha1[20];
        unsigned char force;
+       unsigned char merge;
        struct ref *peer_ref; /* when renaming */
        char name[FLEX_ARRAY]; /* more */
 };
diff --git a/compat/mkdtemp.c b/compat/mkdtemp.c
new file mode 100644 (file)
index 0000000..34d4b49
--- /dev/null
@@ -0,0 +1,8 @@
+#include "../git-compat-util.h"
+
+char *gitmkdtemp(char *template)
+{
+       if (!mktemp(template) || mkdir(template, 0700))
+               return NULL;
+       return template;
+}
index 06d279e37ca73b6128479a990b9cd5dbd7cb1317..3d5c4ab7550d3665a4b24265c0c052e3c7e00231 100644 (file)
--- a/connect.c
+++ b/connect.c
@@ -72,9 +72,9 @@ struct ref **get_remote_heads(int in, struct ref **list,
                        continue;
                if (nr_match && !path_match(name, nr_match, match))
                        continue;
-               ref = alloc_ref(len - 40);
+               ref = alloc_ref(name_len + 1);
                hashcpy(ref->old_sha1, old_sha1);
-               memcpy(ref->name, buffer + 41, len - 40);
+               memcpy(ref->name, buffer + 41, name_len + 1);
                *list = ref;
                list = &ref->next;
        }
diff --git a/contrib/examples/git-fetch.sh b/contrib/examples/git-fetch.sh
new file mode 100755 (executable)
index 0000000..e44af2c
--- /dev/null
@@ -0,0 +1,377 @@
+#!/bin/sh
+#
+
+USAGE='<fetch-options> <repository> <refspec>...'
+SUBDIRECTORY_OK=Yes
+. git-sh-setup
+set_reflog_action "fetch $*"
+cd_to_toplevel ;# probably unnecessary...
+
+. git-parse-remote
+_x40='[0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f]'
+_x40="$_x40$_x40$_x40$_x40$_x40$_x40$_x40$_x40"
+
+LF='
+'
+IFS="$LF"
+
+no_tags=
+tags=
+append=
+force=
+verbose=
+update_head_ok=
+exec=
+keep=
+shallow_depth=
+no_progress=
+test -t 1 || no_progress=--no-progress
+quiet=
+while test $# != 0
+do
+       case "$1" in
+       -a|--a|--ap|--app|--appe|--appen|--append)
+               append=t
+               ;;
+       --upl|--uplo|--uploa|--upload|--upload-|--upload-p|\
+       --upload-pa|--upload-pac|--upload-pack)
+               shift
+               exec="--upload-pack=$1"
+               ;;
+       --upl=*|--uplo=*|--uploa=*|--upload=*|\
+       --upload-=*|--upload-p=*|--upload-pa=*|--upload-pac=*|--upload-pack=*)
+               exec=--upload-pack=$(expr "z$1" : 'z-[^=]*=\(.*\)')
+               shift
+               ;;
+       -f|--f|--fo|--for|--forc|--force)
+               force=t
+               ;;
+       -t|--t|--ta|--tag|--tags)
+               tags=t
+               ;;
+       -n|--n|--no|--no-|--no-t|--no-ta|--no-tag|--no-tags)
+               no_tags=t
+               ;;
+       -u|--u|--up|--upd|--upda|--updat|--update|--update-|--update-h|\
+       --update-he|--update-hea|--update-head|--update-head-|\
+       --update-head-o|--update-head-ok)
+               update_head_ok=t
+               ;;
+       -q|--q|--qu|--qui|--quie|--quiet)
+               quiet=--quiet
+               ;;
+       -v|--verbose)
+               verbose="$verbose"Yes
+               ;;
+       -k|--k|--ke|--kee|--keep)
+               keep='-k -k'
+               ;;
+       --depth=*)
+               shallow_depth="--depth=`expr "z$1" : 'z-[^=]*=\(.*\)'`"
+               ;;
+       --depth)
+               shift
+               shallow_depth="--depth=$1"
+               ;;
+       -*)
+               usage
+               ;;
+       *)
+               break
+               ;;
+       esac
+       shift
+done
+
+case "$#" in
+0)
+       origin=$(get_default_remote)
+       test -n "$(get_remote_url ${origin})" ||
+               die "Where do you want to fetch from today?"
+       set x $origin ; shift ;;
+esac
+
+if test -z "$exec"
+then
+       # No command line override and we have configuration for the remote.
+       exec="--upload-pack=$(get_uploadpack $1)"
+fi
+
+remote_nick="$1"
+remote=$(get_remote_url "$@")
+refs=
+rref=
+rsync_slurped_objects=
+
+if test "" = "$append"
+then
+       : >"$GIT_DIR/FETCH_HEAD"
+fi
+
+# Global that is reused later
+ls_remote_result=$(git ls-remote $exec "$remote") ||
+       die "Cannot get the repository state from $remote"
+
+append_fetch_head () {
+       flags=
+       test -n "$verbose" && flags="$flags$LF-v"
+       test -n "$force$single_force" && flags="$flags$LF-f"
+       GIT_REFLOG_ACTION="$GIT_REFLOG_ACTION" \
+               git fetch--tool $flags append-fetch-head "$@"
+}
+
+# updating the current HEAD with git-fetch in a bare
+# repository is always fine.
+if test -z "$update_head_ok" && test $(is_bare_repository) = false
+then
+       orig_head=$(git rev-parse --verify HEAD 2>/dev/null)
+fi
+
+# Allow --notags from remote.$1.tagopt
+case "$tags$no_tags" in
+'')
+       case "$(git config --get "remote.$1.tagopt")" in
+       --no-tags)
+               no_tags=t ;;
+       esac
+esac
+
+# If --tags (and later --heads or --all) is specified, then we are
+# not talking about defaults stored in Pull: line of remotes or
+# branches file, and just fetch those and refspecs explicitly given.
+# Otherwise we do what we always did.
+
+reflist=$(get_remote_refs_for_fetch "$@")
+if test "$tags"
+then
+       taglist=`IFS='  ' &&
+                 echo "$ls_remote_result" |
+                 git show-ref --exclude-existing=refs/tags/ |
+                 while read sha1 name
+                 do
+                       echo ".${name}:${name}"
+                 done` || exit
+       if test "$#" -gt 1
+       then
+               # remote URL plus explicit refspecs; we need to merge them.
+               reflist="$reflist$LF$taglist"
+       else
+               # No explicit refspecs; fetch tags only.
+               reflist=$taglist
+       fi
+fi
+
+fetch_all_at_once () {
+
+  eval=$(echo "$1" | git fetch--tool parse-reflist "-")
+  eval "$eval"
+
+    ( : subshell because we muck with IFS
+      IFS="    $LF"
+      (
+       if test "$remote" = . ; then
+           git show-ref $rref || echo failed "$remote"
+       elif test -f "$remote" ; then
+           test -n "$shallow_depth" &&
+               die "shallow clone with bundle is not supported"
+           git bundle unbundle "$remote" $rref ||
+           echo failed "$remote"
+       else
+               if      test -d "$remote" &&
+
+                       # The remote might be our alternate.  With
+                       # this optimization we will bypass fetch-pack
+                       # altogether, which means we cannot be doing
+                       # the shallow stuff at all.
+                       test ! -f "$GIT_DIR/shallow" &&
+                       test -z "$shallow_depth" &&
+
+                       # See if all of what we are going to fetch are
+                       # connected to our repository's tips, in which
+                       # case we do not have to do any fetch.
+                       theirs=$(echo "$ls_remote_result" | \
+                               git fetch--tool -s pick-rref "$rref" "-") &&
+
+                       # This will barf when $theirs reach an object that
+                       # we do not have in our repository.  Otherwise,
+                       # we already have everything the fetch would bring in.
+                       git rev-list --objects $theirs --not --all \
+                               >/dev/null 2>/dev/null
+               then
+                       echo "$ls_remote_result" | \
+                               git fetch--tool pick-rref "$rref" "-"
+               else
+                       flags=
+                       case $verbose in
+                       YesYes*)
+                           flags="-v"
+                           ;;
+                       esac
+                       git-fetch-pack --thin $exec $keep $shallow_depth \
+                               $quiet $no_progress $flags "$remote" $rref ||
+                       echo failed "$remote"
+               fi
+       fi
+      ) |
+      (
+       flags=
+       test -n "$verbose" && flags="$flags -v"
+       test -n "$force" && flags="$flags -f"
+       GIT_REFLOG_ACTION="$GIT_REFLOG_ACTION" \
+               git fetch--tool $flags native-store \
+                       "$remote" "$remote_nick" "$refs"
+      )
+    ) || exit
+
+}
+
+fetch_per_ref () {
+  reflist="$1"
+  refs=
+  rref=
+
+  for ref in $reflist
+  do
+      refs="$refs$LF$ref"
+
+      # These are relative path from $GIT_DIR, typically starting at refs/
+      # but may be HEAD
+      if expr "z$ref" : 'z\.' >/dev/null
+      then
+         not_for_merge=t
+         ref=$(expr "z$ref" : 'z\.\(.*\)')
+      else
+         not_for_merge=
+      fi
+      if expr "z$ref" : 'z+' >/dev/null
+      then
+         single_force=t
+         ref=$(expr "z$ref" : 'z+\(.*\)')
+      else
+         single_force=
+      fi
+      remote_name=$(expr "z$ref" : 'z\([^:]*\):')
+      local_name=$(expr "z$ref" : 'z[^:]*:\(.*\)')
+
+      rref="$rref$LF$remote_name"
+
+      # There are transports that can fetch only one head at a time...
+      case "$remote" in
+      http://* | https://* | ftp://*)
+         test -n "$shallow_depth" &&
+               die "shallow clone with http not supported"
+         proto=`expr "$remote" : '\([^:]*\):'`
+         if [ -n "$GIT_SSL_NO_VERIFY" ]; then
+             curl_extra_args="-k"
+         fi
+         if [ -n "$GIT_CURL_FTP_NO_EPSV" -o \
+               "`git config --bool http.noEPSV`" = true ]; then
+             noepsv_opt="--disable-epsv"
+         fi
+
+         # Find $remote_name from ls-remote output.
+         head=$(echo "$ls_remote_result" | \
+               git fetch--tool -s pick-rref "$remote_name" "-")
+         expr "z$head" : "z$_x40\$" >/dev/null ||
+               die "No such ref $remote_name at $remote"
+         echo >&2 "Fetching $remote_name from $remote using $proto"
+         case "$quiet" in '') v=-v ;; *) v= ;; esac
+         git-http-fetch $v -a "$head" "$remote" || exit
+         ;;
+      rsync://*)
+         test -n "$shallow_depth" &&
+               die "shallow clone with rsync not supported"
+         TMP_HEAD="$GIT_DIR/TMP_HEAD"
+         rsync -L -q "$remote/$remote_name" "$TMP_HEAD" || exit 1
+         head=$(git rev-parse --verify TMP_HEAD)
+         rm -f "$TMP_HEAD"
+         case "$quiet" in '') v=-v ;; *) v= ;; esac
+         test "$rsync_slurped_objects" || {
+             rsync -a $v --ignore-existing --exclude info \
+                 "$remote/objects/" "$GIT_OBJECT_DIRECTORY/" || exit
+
+             # Look at objects/info/alternates for rsync -- http will
+             # support it natively and git native ones will do it on
+             # the remote end.  Not having that file is not a crime.
+             rsync -q "$remote/objects/info/alternates" \
+                 "$GIT_DIR/TMP_ALT" 2>/dev/null ||
+                 rm -f "$GIT_DIR/TMP_ALT"
+             if test -f "$GIT_DIR/TMP_ALT"
+             then
+                 resolve_alternates "$remote" <"$GIT_DIR/TMP_ALT" |
+                 while read alt
+                 do
+                     case "$alt" in 'bad alternate: '*) die "$alt";; esac
+                     echo >&2 "Getting alternate: $alt"
+                     rsync -av --ignore-existing --exclude info \
+                     "$alt" "$GIT_OBJECT_DIRECTORY/" || exit
+                 done
+                 rm -f "$GIT_DIR/TMP_ALT"
+             fi
+             rsync_slurped_objects=t
+         }
+         ;;
+      esac
+
+      append_fetch_head "$head" "$remote" \
+         "$remote_name" "$remote_nick" "$local_name" "$not_for_merge" || exit
+
+  done
+
+}
+
+fetch_main () {
+       case "$remote" in
+       http://* | https://* | ftp://* | rsync://* )
+               fetch_per_ref "$@"
+               ;;
+       *)
+               fetch_all_at_once "$@"
+               ;;
+       esac
+}
+
+fetch_main "$reflist" || exit
+
+# automated tag following
+case "$no_tags$tags" in
+'')
+       case "$reflist" in
+       *:refs/*)
+               # effective only when we are following remote branch
+               # using local tracking branch.
+               taglist=$(IFS=' ' &&
+               echo "$ls_remote_result" |
+               git show-ref --exclude-existing=refs/tags/ |
+               while read sha1 name
+               do
+                       git cat-file -t "$sha1" >/dev/null 2>&1 || continue
+                       echo >&2 "Auto-following $name"
+                       echo ".${name}:${name}"
+               done)
+       esac
+       case "$taglist" in
+       '') ;;
+       ?*)
+               # do not deepen a shallow tree when following tags
+               shallow_depth=
+               fetch_main "$taglist" || exit ;;
+       esac
+esac
+
+# If the original head was empty (i.e. no "master" yet), or
+# if we were told not to worry, we do not have to check.
+case "$orig_head" in
+'')
+       ;;
+?*)
+       curr_head=$(git rev-parse --verify HEAD 2>/dev/null)
+       if test "$curr_head" != "$orig_head"
+       then
+           git update-ref \
+                       -m "$GIT_REFLOG_ACTION: Undoing incorrectly fetched HEAD." \
+                       HEAD "$orig_head"
+               die "Cannot fetch into the current branch."
+       fi
+       ;;
+esac
diff --git a/dir.c b/dir.c
index f843c4dd208ac0f37f9c70383e522590688f1966..4c17d3643eebf0d9071003065b5c120db130f9ab 100644 (file)
--- a/dir.c
+++ b/dir.c
@@ -709,3 +709,44 @@ int is_inside_dir(const char *dir)
        char buffer[PATH_MAX];
        return get_relative_cwd(buffer, sizeof(buffer), dir) != NULL;
 }
+
+int remove_dir_recursively(struct strbuf *path, int only_empty)
+{
+       DIR *dir = opendir(path->buf);
+       struct dirent *e;
+       int ret = 0, original_len = path->len, len;
+
+       if (!dir)
+               return -1;
+       if (path->buf[original_len - 1] != '/')
+               strbuf_addch(path, '/');
+
+       len = path->len;
+       while ((e = readdir(dir)) != NULL) {
+               struct stat st;
+               if ((e->d_name[0] == '.') &&
+                   ((e->d_name[1] == 0) ||
+                    ((e->d_name[1] == '.') && e->d_name[2] == 0)))
+                       continue; /* "." and ".." */
+
+               strbuf_setlen(path, len);
+               strbuf_addstr(path, e->d_name);
+               if (lstat(path->buf, &st))
+                       ; /* fall thru */
+               else if (S_ISDIR(st.st_mode)) {
+                       if (!remove_dir_recursively(path, only_empty))
+                               continue; /* happy */
+               } else if (!only_empty && !unlink(path->buf))
+                       continue; /* happy, too */
+
+               /* path too long, stat fails, or non-directory still exists */
+               ret = -1;
+               break;
+       }
+       closedir(dir);
+
+       strbuf_setlen(path, original_len);
+       if (!ret)
+               ret = rmdir(path->buf);
+       return ret;
+}
diff --git a/dir.h b/dir.h
index f55a87b2cd5f2b4e06e14b4c1b832fc0a60ad319..a248a23ac4eab2aff1e91060f0e9281a8842bf6b 100644 (file)
--- a/dir.h
+++ b/dir.h
@@ -64,4 +64,6 @@ extern struct dir_entry *dir_add_name(struct dir_struct *dir, const char *pathna
 extern char *get_relative_cwd(char *buffer, int size, const char *dir);
 extern int is_inside_dir(const char *dir);
 
+extern int remove_dir_recursively(struct strbuf *path, int only_empty);
+
 #endif
diff --git a/fetch-pack.c b/fetch-pack.c
deleted file mode 100644 (file)
index 9c81305..0000000
+++ /dev/null
@@ -1,789 +0,0 @@
-#include "cache.h"
-#include "refs.h"
-#include "pkt-line.h"
-#include "commit.h"
-#include "tag.h"
-#include "exec_cmd.h"
-#include "pack.h"
-#include "sideband.h"
-
-static int keep_pack;
-static int transfer_unpack_limit = -1;
-static int fetch_unpack_limit = -1;
-static int unpack_limit = 100;
-static int quiet;
-static int verbose;
-static int fetch_all;
-static int depth;
-static int no_progress;
-static const char fetch_pack_usage[] =
-"git-fetch-pack [--all] [--quiet|-q] [--keep|-k] [--thin] [--upload-pack=<git-upload-pack>] [--depth=<n>] [--no-progress] [-v] [<host>:]<directory> [<refs>...]";
-static const char *uploadpack = "git-upload-pack";
-
-#define COMPLETE       (1U << 0)
-#define COMMON         (1U << 1)
-#define COMMON_REF     (1U << 2)
-#define SEEN           (1U << 3)
-#define POPPED         (1U << 4)
-
-/*
- * After sending this many "have"s if we do not get any new ACK , we
- * give up traversing our history.
- */
-#define MAX_IN_VAIN 256
-
-static struct commit_list *rev_list;
-static int non_common_revs, multi_ack, use_thin_pack, use_sideband;
-
-static void rev_list_push(struct commit *commit, int mark)
-{
-       if (!(commit->object.flags & mark)) {
-               commit->object.flags |= mark;
-
-               if (!(commit->object.parsed))
-                       parse_commit(commit);
-
-               insert_by_date(commit, &rev_list);
-
-               if (!(commit->object.flags & COMMON))
-                       non_common_revs++;
-       }
-}
-
-static int rev_list_insert_ref(const char *path, const unsigned char *sha1, int flag, void *cb_data)
-{
-       struct object *o = deref_tag(parse_object(sha1), path, 0);
-
-       if (o && o->type == OBJ_COMMIT)
-               rev_list_push((struct commit *)o, SEEN);
-
-       return 0;
-}
-
-/*
-   This function marks a rev and its ancestors as common.
-   In some cases, it is desirable to mark only the ancestors (for example
-   when only the server does not yet know that they are common).
-*/
-
-static void mark_common(struct commit *commit,
-               int ancestors_only, int dont_parse)
-{
-       if (commit != NULL && !(commit->object.flags & COMMON)) {
-               struct object *o = (struct object *)commit;
-
-               if (!ancestors_only)
-                       o->flags |= COMMON;
-
-               if (!(o->flags & SEEN))
-                       rev_list_push(commit, SEEN);
-               else {
-                       struct commit_list *parents;
-
-                       if (!ancestors_only && !(o->flags & POPPED))
-                               non_common_revs--;
-                       if (!o->parsed && !dont_parse)
-                               parse_commit(commit);
-
-                       for (parents = commit->parents;
-                                       parents;
-                                       parents = parents->next)
-                               mark_common(parents->item, 0, dont_parse);
-               }
-       }
-}
-
-/*
-  Get the next rev to send, ignoring the common.
-*/
-
-static const unsigned char* get_rev(void)
-{
-       struct commit *commit = NULL;
-
-       while (commit == NULL) {
-               unsigned int mark;
-               struct commit_list* parents;
-
-               if (rev_list == NULL || non_common_revs == 0)
-                       return NULL;
-
-               commit = rev_list->item;
-               if (!(commit->object.parsed))
-                       parse_commit(commit);
-               commit->object.flags |= POPPED;
-               if (!(commit->object.flags & COMMON))
-                       non_common_revs--;
-
-               parents = commit->parents;
-
-               if (commit->object.flags & COMMON) {
-                       /* do not send "have", and ignore ancestors */
-                       commit = NULL;
-                       mark = COMMON | SEEN;
-               } else if (commit->object.flags & COMMON_REF)
-                       /* send "have", and ignore ancestors */
-                       mark = COMMON | SEEN;
-               else
-                       /* send "have", also for its ancestors */
-                       mark = SEEN;
-
-               while (parents) {
-                       if (!(parents->item->object.flags & SEEN))
-                               rev_list_push(parents->item, mark);
-                       if (mark & COMMON)
-                               mark_common(parents->item, 1, 0);
-                       parents = parents->next;
-               }
-
-               rev_list = rev_list->next;
-       }
-
-       return commit->object.sha1;
-}
-
-static int find_common(int fd[2], unsigned char *result_sha1,
-                      struct ref *refs)
-{
-       int fetching;
-       int count = 0, flushes = 0, retval;
-       const unsigned char *sha1;
-       unsigned in_vain = 0;
-       int got_continue = 0;
-
-       for_each_ref(rev_list_insert_ref, NULL);
-
-       fetching = 0;
-       for ( ; refs ; refs = refs->next) {
-               unsigned char *remote = refs->old_sha1;
-               struct object *o;
-
-               /*
-                * If that object is complete (i.e. it is an ancestor of a
-                * local ref), we tell them we have it but do not have to
-                * tell them about its ancestors, which they already know
-                * about.
-                *
-                * We use lookup_object here because we are only
-                * interested in the case we *know* the object is
-                * reachable and we have already scanned it.
-                */
-               if (((o = lookup_object(remote)) != NULL) &&
-                               (o->flags & COMPLETE)) {
-                       continue;
-               }
-
-               if (!fetching)
-                       packet_write(fd[1], "want %s%s%s%s%s%s%s\n",
-                                    sha1_to_hex(remote),
-                                    (multi_ack ? " multi_ack" : ""),
-                                    (use_sideband == 2 ? " side-band-64k" : ""),
-                                    (use_sideband == 1 ? " side-band" : ""),
-                                    (use_thin_pack ? " thin-pack" : ""),
-                                    (no_progress ? " no-progress" : ""),
-                                    " ofs-delta");
-               else
-                       packet_write(fd[1], "want %s\n", sha1_to_hex(remote));
-               fetching++;
-       }
-       if (is_repository_shallow())
-               write_shallow_commits(fd[1], 1);
-       if (depth > 0)
-               packet_write(fd[1], "deepen %d", depth);
-       packet_flush(fd[1]);
-       if (!fetching)
-               return 1;
-
-       if (depth > 0) {
-               char line[1024];
-               unsigned char sha1[20];
-               int len;
-
-               while ((len = packet_read_line(fd[0], line, sizeof(line)))) {
-                       if (!prefixcmp(line, "shallow ")) {
-                               if (get_sha1_hex(line + 8, sha1))
-                                       die("invalid shallow line: %s", line);
-                               register_shallow(sha1);
-                               continue;
-                       }
-                       if (!prefixcmp(line, "unshallow ")) {
-                               if (get_sha1_hex(line + 10, sha1))
-                                       die("invalid unshallow line: %s", line);
-                               if (!lookup_object(sha1))
-                                       die("object not found: %s", line);
-                               /* make sure that it is parsed as shallow */
-                               parse_object(sha1);
-                               if (unregister_shallow(sha1))
-                                       die("no shallow found: %s", line);
-                               continue;
-                       }
-                       die("expected shallow/unshallow, got %s", line);
-               }
-       }
-
-       flushes = 0;
-       retval = -1;
-       while ((sha1 = get_rev())) {
-               packet_write(fd[1], "have %s\n", sha1_to_hex(sha1));
-               if (verbose)
-                       fprintf(stderr, "have %s\n", sha1_to_hex(sha1));
-               in_vain++;
-               if (!(31 & ++count)) {
-                       int ack;
-
-                       packet_flush(fd[1]);
-                       flushes++;
-
-                       /*
-                        * We keep one window "ahead" of the other side, and
-                        * will wait for an ACK only on the next one
-                        */
-                       if (count == 32)
-                               continue;
-
-                       do {
-                               ack = get_ack(fd[0], result_sha1);
-                               if (verbose && ack)
-                                       fprintf(stderr, "got ack %d %s\n", ack,
-                                                       sha1_to_hex(result_sha1));
-                               if (ack == 1) {
-                                       flushes = 0;
-                                       multi_ack = 0;
-                                       retval = 0;
-                                       goto done;
-                               } else if (ack == 2) {
-                                       struct commit *commit =
-                                               lookup_commit(result_sha1);
-                                       mark_common(commit, 0, 1);
-                                       retval = 0;
-                                       in_vain = 0;
-                                       got_continue = 1;
-                               }
-                       } while (ack);
-                       flushes--;
-                       if (got_continue && MAX_IN_VAIN < in_vain) {
-                               if (verbose)
-                                       fprintf(stderr, "giving up\n");
-                               break; /* give up */
-                       }
-               }
-       }
-done:
-       packet_write(fd[1], "done\n");
-       if (verbose)
-               fprintf(stderr, "done\n");
-       if (retval != 0) {
-               multi_ack = 0;
-               flushes++;
-       }
-       while (flushes || multi_ack) {
-               int ack = get_ack(fd[0], result_sha1);
-               if (ack) {
-                       if (verbose)
-                               fprintf(stderr, "got ack (%d) %s\n", ack,
-                                       sha1_to_hex(result_sha1));
-                       if (ack == 1)
-                               return 0;
-                       multi_ack = 1;
-                       continue;
-               }
-               flushes--;
-       }
-       return retval;
-}
-
-static struct commit_list *complete;
-
-static int mark_complete(const char *path, const unsigned char *sha1, int flag, void *cb_data)
-{
-       struct object *o = parse_object(sha1);
-
-       while (o && o->type == OBJ_TAG) {
-               struct tag *t = (struct tag *) o;
-               if (!t->tagged)
-                       break; /* broken repository */
-               o->flags |= COMPLETE;
-               o = parse_object(t->tagged->sha1);
-       }
-       if (o && o->type == OBJ_COMMIT) {
-               struct commit *commit = (struct commit *)o;
-               commit->object.flags |= COMPLETE;
-               insert_by_date(commit, &complete);
-       }
-       return 0;
-}
-
-static void mark_recent_complete_commits(unsigned long cutoff)
-{
-       while (complete && cutoff <= complete->item->date) {
-               if (verbose)
-                       fprintf(stderr, "Marking %s as complete\n",
-                               sha1_to_hex(complete->item->object.sha1));
-               pop_most_recent_commit(&complete, COMPLETE);
-       }
-}
-
-static void filter_refs(struct ref **refs, int nr_match, char **match)
-{
-       struct ref **return_refs;
-       struct ref *newlist = NULL;
-       struct ref **newtail = &newlist;
-       struct ref *ref, *next;
-       struct ref *fastarray[32];
-
-       if (nr_match && !fetch_all) {
-               if (ARRAY_SIZE(fastarray) < nr_match)
-                       return_refs = xcalloc(nr_match, sizeof(struct ref *));
-               else {
-                       return_refs = fastarray;
-                       memset(return_refs, 0, sizeof(struct ref *) * nr_match);
-               }
-       }
-       else
-               return_refs = NULL;
-
-       for (ref = *refs; ref; ref = next) {
-               next = ref->next;
-               if (!memcmp(ref->name, "refs/", 5) &&
-                   check_ref_format(ref->name + 5))
-                       ; /* trash */
-               else if (fetch_all &&
-                        (!depth || prefixcmp(ref->name, "refs/tags/") )) {
-                       *newtail = ref;
-                       ref->next = NULL;
-                       newtail = &ref->next;
-                       continue;
-               }
-               else {
-                       int order = path_match(ref->name, nr_match, match);
-                       if (order) {
-                               return_refs[order-1] = ref;
-                               continue; /* we will link it later */
-                       }
-               }
-               free(ref);
-       }
-
-       if (!fetch_all) {
-               int i;
-               for (i = 0; i < nr_match; i++) {
-                       ref = return_refs[i];
-                       if (ref) {
-                               *newtail = ref;
-                               ref->next = NULL;
-                               newtail = &ref->next;
-                       }
-               }
-               if (return_refs != fastarray)
-                       free(return_refs);
-       }
-       *refs = newlist;
-}
-
-static int everything_local(struct ref **refs, int nr_match, char **match)
-{
-       struct ref *ref;
-       int retval;
-       unsigned long cutoff = 0;
-
-       track_object_refs = 0;
-       save_commit_buffer = 0;
-
-       for (ref = *refs; ref; ref = ref->next) {
-               struct object *o;
-
-               o = parse_object(ref->old_sha1);
-               if (!o)
-                       continue;
-
-               /* We already have it -- which may mean that we were
-                * in sync with the other side at some time after
-                * that (it is OK if we guess wrong here).
-                */
-               if (o->type == OBJ_COMMIT) {
-                       struct commit *commit = (struct commit *)o;
-                       if (!cutoff || cutoff < commit->date)
-                               cutoff = commit->date;
-               }
-       }
-
-       if (!depth) {
-               for_each_ref(mark_complete, NULL);
-               if (cutoff)
-                       mark_recent_complete_commits(cutoff);
-       }
-
-       /*
-        * Mark all complete remote refs as common refs.
-        * Don't mark them common yet; the server has to be told so first.
-        */
-       for (ref = *refs; ref; ref = ref->next) {
-               struct object *o = deref_tag(lookup_object(ref->old_sha1),
-                                            NULL, 0);
-
-               if (!o || o->type != OBJ_COMMIT || !(o->flags & COMPLETE))
-                       continue;
-
-               if (!(o->flags & SEEN)) {
-                       rev_list_push((struct commit *)o, COMMON_REF | SEEN);
-
-                       mark_common((struct commit *)o, 1, 1);
-               }
-       }
-
-       filter_refs(refs, nr_match, match);
-
-       for (retval = 1, ref = *refs; ref ; ref = ref->next) {
-               const unsigned char *remote = ref->old_sha1;
-               unsigned char local[20];
-               struct object *o;
-
-               o = lookup_object(remote);
-               if (!o || !(o->flags & COMPLETE)) {
-                       retval = 0;
-                       if (!verbose)
-                               continue;
-                       fprintf(stderr,
-                               "want %s (%s)\n", sha1_to_hex(remote),
-                               ref->name);
-                       continue;
-               }
-
-               hashcpy(ref->new_sha1, local);
-               if (!verbose)
-                       continue;
-               fprintf(stderr,
-                       "already have %s (%s)\n", sha1_to_hex(remote),
-                       ref->name);
-       }
-       return retval;
-}
-
-static pid_t setup_sideband(int fd[2], int xd[2])
-{
-       pid_t side_pid;
-
-       if (!use_sideband) {
-               fd[0] = xd[0];
-               fd[1] = xd[1];
-               return 0;
-       }
-       /* xd[] is talking with upload-pack; subprocess reads from
-        * xd[0], spits out band#2 to stderr, and feeds us band#1
-        * through our fd[0].
-        */
-       if (pipe(fd) < 0)
-               die("fetch-pack: unable to set up pipe");
-       side_pid = fork();
-       if (side_pid < 0)
-               die("fetch-pack: unable to fork off sideband demultiplexer");
-       if (!side_pid) {
-               /* subprocess */
-               close(fd[0]);
-               if (xd[0] != xd[1])
-                       close(xd[1]);
-               if (recv_sideband("fetch-pack", xd[0], fd[1], 2))
-                       exit(1);
-               exit(0);
-       }
-       close(xd[0]);
-       close(fd[1]);
-       fd[1] = xd[1];
-       return side_pid;
-}
-
-static int get_pack(int xd[2])
-{
-       int status;
-       pid_t pid, side_pid;
-       int fd[2];
-       const char *argv[20];
-       char keep_arg[256];
-       char hdr_arg[256];
-       const char **av;
-       int do_keep = keep_pack;
-
-       side_pid = setup_sideband(fd, xd);
-
-       av = argv;
-       *hdr_arg = 0;
-       if (unpack_limit) {
-               struct pack_header header;
-
-               if (read_pack_header(fd[0], &header))
-                       die("protocol error: bad pack header");
-               snprintf(hdr_arg, sizeof(hdr_arg), "--pack_header=%u,%u",
-                        ntohl(header.hdr_version), ntohl(header.hdr_entries));
-               if (ntohl(header.hdr_entries) < unpack_limit)
-                       do_keep = 0;
-               else
-                       do_keep = 1;
-       }
-
-       if (do_keep) {
-               *av++ = "index-pack";
-               *av++ = "--stdin";
-               if (!quiet && !no_progress)
-                       *av++ = "-v";
-               if (use_thin_pack)
-                       *av++ = "--fix-thin";
-               if (keep_pack > 1 || unpack_limit) {
-                       int s = sprintf(keep_arg,
-                                       "--keep=fetch-pack %d on ", getpid());
-                       if (gethostname(keep_arg + s, sizeof(keep_arg) - s))
-                               strcpy(keep_arg + s, "localhost");
-                       *av++ = keep_arg;
-               }
-       }
-       else {
-               *av++ = "unpack-objects";
-               if (quiet)
-                       *av++ = "-q";
-       }
-       if (*hdr_arg)
-               *av++ = hdr_arg;
-       *av++ = NULL;
-
-       pid = fork();
-       if (pid < 0)
-               die("fetch-pack: unable to fork off %s", argv[0]);
-       if (!pid) {
-               dup2(fd[0], 0);
-               close(fd[0]);
-               close(fd[1]);
-               execv_git_cmd(argv);
-               die("%s exec failed", argv[0]);
-       }
-       close(fd[0]);
-       close(fd[1]);
-       while (waitpid(pid, &status, 0) < 0) {
-               if (errno != EINTR)
-                       die("waiting for %s: %s", argv[0], strerror(errno));
-       }
-       if (WIFEXITED(status)) {
-               int code = WEXITSTATUS(status);
-               if (code)
-                       die("%s died with error code %d", argv[0], code);
-               return 0;
-       }
-       if (WIFSIGNALED(status)) {
-               int sig = WTERMSIG(status);
-               die("%s died of signal %d", argv[0], sig);
-       }
-       die("%s died of unnatural causes %d", argv[0], status);
-}
-
-static int fetch_pack(int fd[2], int nr_match, char **match)
-{
-       struct ref *ref;
-       unsigned char sha1[20];
-
-       get_remote_heads(fd[0], &ref, 0, NULL, 0);
-       if (is_repository_shallow() && !server_supports("shallow"))
-               die("Server does not support shallow clients");
-       if (server_supports("multi_ack")) {
-               if (verbose)
-                       fprintf(stderr, "Server supports multi_ack\n");
-               multi_ack = 1;
-       }
-       if (server_supports("side-band-64k")) {
-               if (verbose)
-                       fprintf(stderr, "Server supports side-band-64k\n");
-               use_sideband = 2;
-       }
-       else if (server_supports("side-band")) {
-               if (verbose)
-                       fprintf(stderr, "Server supports side-band\n");
-               use_sideband = 1;
-       }
-       if (!ref) {
-               packet_flush(fd[1]);
-               die("no matching remote head");
-       }
-       if (everything_local(&ref, nr_match, match)) {
-               packet_flush(fd[1]);
-               goto all_done;
-       }
-       if (find_common(fd, sha1, ref) < 0)
-               if (keep_pack != 1)
-                       /* When cloning, it is not unusual to have
-                        * no common commit.
-                        */
-                       fprintf(stderr, "warning: no common commits\n");
-
-       if (get_pack(fd))
-               die("git-fetch-pack: fetch failed.");
-
- all_done:
-       while (ref) {
-               printf("%s %s\n",
-                      sha1_to_hex(ref->old_sha1), ref->name);
-               ref = ref->next;
-       }
-       return 0;
-}
-
-static int remove_duplicates(int nr_heads, char **heads)
-{
-       int src, dst;
-
-       for (src = dst = 0; src < nr_heads; src++) {
-               /* If heads[src] is different from any of
-                * heads[0..dst], push it in.
-                */
-               int i;
-               for (i = 0; i < dst; i++) {
-                       if (!strcmp(heads[i], heads[src]))
-                               break;
-               }
-               if (i < dst)
-                       continue;
-               if (src != dst)
-                       heads[dst] = heads[src];
-               dst++;
-       }
-       heads[dst] = 0;
-       return dst;
-}
-
-static int fetch_pack_config(const char *var, const char *value)
-{
-       if (strcmp(var, "fetch.unpacklimit") == 0) {
-               fetch_unpack_limit = git_config_int(var, value);
-               return 0;
-       }
-
-       if (strcmp(var, "transfer.unpacklimit") == 0) {
-               transfer_unpack_limit = git_config_int(var, value);
-               return 0;
-       }
-
-       return git_default_config(var, value);
-}
-
-static struct lock_file lock;
-
-int main(int argc, char **argv)
-{
-       int i, ret, nr_heads;
-       char *dest = NULL, **heads;
-       int fd[2];
-       pid_t pid;
-       struct stat st;
-
-       setup_git_directory();
-       git_config(fetch_pack_config);
-
-       if (0 <= transfer_unpack_limit)
-               unpack_limit = transfer_unpack_limit;
-       else if (0 <= fetch_unpack_limit)
-               unpack_limit = fetch_unpack_limit;
-
-       nr_heads = 0;
-       heads = NULL;
-       for (i = 1; i < argc; i++) {
-               char *arg = argv[i];
-
-               if (*arg == '-') {
-                       if (!prefixcmp(arg, "--upload-pack=")) {
-                               uploadpack = arg + 14;
-                               continue;
-                       }
-                       if (!prefixcmp(arg, "--exec=")) {
-                               uploadpack = arg + 7;
-                               continue;
-                       }
-                       if (!strcmp("--quiet", arg) || !strcmp("-q", arg)) {
-                               quiet = 1;
-                               continue;
-                       }
-                       if (!strcmp("--keep", arg) || !strcmp("-k", arg)) {
-                               keep_pack++;
-                               unpack_limit = 0;
-                               continue;
-                       }
-                       if (!strcmp("--thin", arg)) {
-                               use_thin_pack = 1;
-                               continue;
-                       }
-                       if (!strcmp("--all", arg)) {
-                               fetch_all = 1;
-                               continue;
-                       }
-                       if (!strcmp("-v", arg)) {
-                               verbose = 1;
-                               continue;
-                       }
-                       if (!prefixcmp(arg, "--depth=")) {
-                               depth = strtol(arg + 8, NULL, 0);
-                               if (stat(git_path("shallow"), &st))
-                                       st.st_mtime = 0;
-                               continue;
-                       }
-                       if (!strcmp("--no-progress", arg)) {
-                               no_progress = 1;
-                               continue;
-                       }
-                       usage(fetch_pack_usage);
-               }
-               dest = arg;
-               heads = argv + i + 1;
-               nr_heads = argc - i - 1;
-               break;
-       }
-       if (!dest)
-               usage(fetch_pack_usage);
-       pid = git_connect(fd, dest, uploadpack, verbose ? CONNECT_VERBOSE : 0);
-       if (pid < 0)
-               return 1;
-       if (heads && nr_heads)
-               nr_heads = remove_duplicates(nr_heads, heads);
-       ret = fetch_pack(fd, nr_heads, heads);
-       close(fd[0]);
-       close(fd[1]);
-       ret |= finish_connect(pid);
-
-       if (!ret && nr_heads) {
-               /* If the heads to pull were given, we should have
-                * consumed all of them by matching the remote.
-                * Otherwise, 'git-fetch remote no-such-ref' would
-                * silently succeed without issuing an error.
-                */
-               for (i = 0; i < nr_heads; i++)
-                       if (heads[i] && heads[i][0]) {
-                               error("no such remote ref %s", heads[i]);
-                               ret = 1;
-                       }
-       }
-
-       if (!ret && depth > 0) {
-               struct cache_time mtime;
-               char *shallow = git_path("shallow");
-               int fd;
-
-               mtime.sec = st.st_mtime;
-#ifdef USE_NSEC
-               mtime.usec = st.st_mtim.usec;
-#endif
-               if (stat(shallow, &st)) {
-                       if (mtime.sec)
-                               die("shallow file was removed during fetch");
-               } else if (st.st_mtime != mtime.sec
-#ifdef USE_NSEC
-                               || st.st_mtim.usec != mtime.usec
-#endif
-                         )
-                       die("shallow file was changed during fetch");
-
-               fd = hold_lock_file_for_update(&lock, shallow, 1);
-               if (!write_shallow_commits(fd, 0)) {
-                       unlink(shallow);
-                       rollback_lock_file(&lock);
-               } else {
-                       close(fd);
-                       commit_lock_file(&lock);
-               }
-       }
-
-       return !!ret;
-}
diff --git a/fetch-pack.h b/fetch-pack.h
new file mode 100644 (file)
index 0000000..a7888ea
--- /dev/null
@@ -0,0 +1,24 @@
+#ifndef FETCH_PACK_H
+#define FETCH_PACK_H
+
+struct fetch_pack_args
+{
+       const char *uploadpack;
+       int unpacklimit;
+       int depth;
+       unsigned quiet:1,
+               keep_pack:1,
+               lock_pack:1,
+               use_thin_pack:1,
+               fetch_all:1,
+               verbose:1,
+               no_progress:1;
+};
+
+struct ref *fetch_pack(struct fetch_pack_args *args,
+               const char *dest,
+               int nr_heads,
+               char **heads,
+               char **pack_lockfile);
+
+#endif
diff --git a/fetch.c b/fetch.c
deleted file mode 100644 (file)
index b1c1f07..0000000
--- a/fetch.c
+++ /dev/null
@@ -1,316 +0,0 @@
-#include "cache.h"
-#include "fetch.h"
-#include "commit.h"
-#include "tree.h"
-#include "tree-walk.h"
-#include "tag.h"
-#include "blob.h"
-#include "refs.h"
-
-int get_tree = 0;
-int get_history = 0;
-int get_all = 0;
-int get_verbosely = 0;
-int get_recover = 0;
-static unsigned char current_commit_sha1[20];
-
-void pull_say(const char *fmt, const char *hex)
-{
-       if (get_verbosely)
-               fprintf(stderr, fmt, hex);
-}
-
-static void report_missing(const struct object *obj)
-{
-       char missing_hex[41];
-       strcpy(missing_hex, sha1_to_hex(obj->sha1));;
-       fprintf(stderr, "Cannot obtain needed %s %s\n",
-               obj->type ? typename(obj->type): "object", missing_hex);
-       if (!is_null_sha1(current_commit_sha1))
-               fprintf(stderr, "while processing commit %s.\n",
-                       sha1_to_hex(current_commit_sha1));
-}
-
-static int process(struct object *obj);
-
-static int process_tree(struct tree *tree)
-{
-       struct tree_desc desc;
-       struct name_entry entry;
-
-       if (parse_tree(tree))
-               return -1;
-
-       init_tree_desc(&desc, tree->buffer, tree->size);
-       while (tree_entry(&desc, &entry)) {
-               struct object *obj = NULL;
-
-               /* submodule commits are not stored in the superproject */
-               if (S_ISGITLINK(entry.mode))
-                       continue;
-               if (S_ISDIR(entry.mode)) {
-                       struct tree *tree = lookup_tree(entry.sha1);
-                       if (tree)
-                               obj = &tree->object;
-               }
-               else {
-                       struct blob *blob = lookup_blob(entry.sha1);
-                       if (blob)
-                               obj = &blob->object;
-               }
-               if (!obj || process(obj))
-                       return -1;
-       }
-       free(tree->buffer);
-       tree->buffer = NULL;
-       tree->size = 0;
-       return 0;
-}
-
-#define COMPLETE       (1U << 0)
-#define SEEN           (1U << 1)
-#define TO_SCAN                (1U << 2)
-
-static struct commit_list *complete = NULL;
-
-static int process_commit(struct commit *commit)
-{
-       if (parse_commit(commit))
-               return -1;
-
-       while (complete && complete->item->date >= commit->date) {
-               pop_most_recent_commit(&complete, COMPLETE);
-       }
-
-       if (commit->object.flags & COMPLETE)
-               return 0;
-
-       hashcpy(current_commit_sha1, commit->object.sha1);
-
-       pull_say("walk %s\n", sha1_to_hex(commit->object.sha1));
-
-       if (get_tree) {
-               if (process(&commit->tree->object))
-                       return -1;
-               if (!get_all)
-                       get_tree = 0;
-       }
-       if (get_history) {
-               struct commit_list *parents = commit->parents;
-               for (; parents; parents = parents->next) {
-                       if (process(&parents->item->object))
-                               return -1;
-               }
-       }
-       return 0;
-}
-
-static int process_tag(struct tag *tag)
-{
-       if (parse_tag(tag))
-               return -1;
-       return process(tag->tagged);
-}
-
-static struct object_list *process_queue = NULL;
-static struct object_list **process_queue_end = &process_queue;
-
-static int process_object(struct object *obj)
-{
-       if (obj->type == OBJ_COMMIT) {
-               if (process_commit((struct commit *)obj))
-                       return -1;
-               return 0;
-       }
-       if (obj->type == OBJ_TREE) {
-               if (process_tree((struct tree *)obj))
-                       return -1;
-               return 0;
-       }
-       if (obj->type == OBJ_BLOB) {
-               return 0;
-       }
-       if (obj->type == OBJ_TAG) {
-               if (process_tag((struct tag *)obj))
-                       return -1;
-               return 0;
-       }
-       return error("Unable to determine requirements "
-                    "of type %s for %s",
-                    typename(obj->type), sha1_to_hex(obj->sha1));
-}
-
-static int process(struct object *obj)
-{
-       if (obj->flags & SEEN)
-               return 0;
-       obj->flags |= SEEN;
-
-       if (has_sha1_file(obj->sha1)) {
-               /* We already have it, so we should scan it now. */
-               obj->flags |= TO_SCAN;
-       }
-       else {
-               if (obj->flags & COMPLETE)
-                       return 0;
-               prefetch(obj->sha1);
-       }
-
-       object_list_insert(obj, process_queue_end);
-       process_queue_end = &(*process_queue_end)->next;
-       return 0;
-}
-
-static int loop(void)
-{
-       struct object_list *elem;
-
-       while (process_queue) {
-               struct object *obj = process_queue->item;
-               elem = process_queue;
-               process_queue = elem->next;
-               free(elem);
-               if (!process_queue)
-                       process_queue_end = &process_queue;
-
-               /* If we are not scanning this object, we placed it in
-                * the queue because we needed to fetch it first.
-                */
-               if (! (obj->flags & TO_SCAN)) {
-                       if (fetch(obj->sha1)) {
-                               report_missing(obj);
-                               return -1;
-                       }
-               }
-               if (!obj->type)
-                       parse_object(obj->sha1);
-               if (process_object(obj))
-                       return -1;
-       }
-       return 0;
-}
-
-static int interpret_target(char *target, unsigned char *sha1)
-{
-       if (!get_sha1_hex(target, sha1))
-               return 0;
-       if (!check_ref_format(target)) {
-               if (!fetch_ref(target, sha1)) {
-                       return 0;
-               }
-       }
-       return -1;
-}
-
-static int mark_complete(const char *path, const unsigned char *sha1, int flag, void *cb_data)
-{
-       struct commit *commit = lookup_commit_reference_gently(sha1, 1);
-       if (commit) {
-               commit->object.flags |= COMPLETE;
-               insert_by_date(commit, &complete);
-       }
-       return 0;
-}
-
-int pull_targets_stdin(char ***target, const char ***write_ref)
-{
-       int targets = 0, targets_alloc = 0;
-       struct strbuf buf;
-       *target = NULL; *write_ref = NULL;
-       strbuf_init(&buf, 0);
-       while (1) {
-               char *rf_one = NULL;
-               char *tg_one;
-
-               if (strbuf_getline(&buf, stdin, '\n') == EOF)
-                       break;
-               tg_one = buf.buf;
-               rf_one = strchr(tg_one, '\t');
-               if (rf_one)
-                       *rf_one++ = 0;
-
-               if (targets >= targets_alloc) {
-                       targets_alloc = targets_alloc ? targets_alloc * 2 : 64;
-                       *target = xrealloc(*target, targets_alloc * sizeof(**target));
-                       *write_ref = xrealloc(*write_ref, targets_alloc * sizeof(**write_ref));
-               }
-               (*target)[targets] = xstrdup(tg_one);
-               (*write_ref)[targets] = rf_one ? xstrdup(rf_one) : NULL;
-               targets++;
-       }
-       strbuf_release(&buf);
-       return targets;
-}
-
-void pull_targets_free(int targets, char **target, const char **write_ref)
-{
-       while (targets--) {
-               free(target[targets]);
-               if (write_ref && write_ref[targets])
-                       free((char *) write_ref[targets]);
-       }
-}
-
-int pull(int targets, char **target, const char **write_ref,
-         const char *write_ref_log_details)
-{
-       struct ref_lock **lock = xcalloc(targets, sizeof(struct ref_lock *));
-       unsigned char *sha1 = xmalloc(targets * 20);
-       char *msg;
-       int ret;
-       int i;
-
-       save_commit_buffer = 0;
-       track_object_refs = 0;
-
-       for (i = 0; i < targets; i++) {
-               if (!write_ref || !write_ref[i])
-                       continue;
-
-               lock[i] = lock_ref_sha1(write_ref[i], NULL);
-               if (!lock[i]) {
-                       error("Can't lock ref %s", write_ref[i]);
-                       goto unlock_and_fail;
-               }
-       }
-
-       if (!get_recover)
-               for_each_ref(mark_complete, NULL);
-
-       for (i = 0; i < targets; i++) {
-               if (interpret_target(target[i], &sha1[20 * i])) {
-                       error("Could not interpret %s as something to pull", target[i]);
-                       goto unlock_and_fail;
-               }
-               if (process(lookup_unknown_object(&sha1[20 * i])))
-                       goto unlock_and_fail;
-       }
-
-       if (loop())
-               goto unlock_and_fail;
-
-       if (write_ref_log_details) {
-               msg = xmalloc(strlen(write_ref_log_details) + 12);
-               sprintf(msg, "fetch from %s", write_ref_log_details);
-       } else {
-               msg = NULL;
-       }
-       for (i = 0; i < targets; i++) {
-               if (!write_ref || !write_ref[i])
-                       continue;
-               ret = write_ref_sha1(lock[i], &sha1[20 * i], msg ? msg : "fetch (unknown)");
-               lock[i] = NULL;
-               if (ret)
-                       goto unlock_and_fail;
-       }
-       free(msg);
-
-       return 0;
-
-
-unlock_and_fail:
-       for (i = 0; i < targets; i++)
-               if (lock[i])
-                       unlock_ref(lock[i]);
-       return -1;
-}
diff --git a/fetch.h b/fetch.h
deleted file mode 100644 (file)
index be48c6f..0000000
--- a/fetch.h
+++ /dev/null
@@ -1,54 +0,0 @@
-#ifndef PULL_H
-#define PULL_H
-
-/*
- * Fetch object given SHA1 from the remote, and store it locally under
- * GIT_OBJECT_DIRECTORY.  Return 0 on success, -1 on failure.  To be
- * provided by the particular implementation.
- */
-extern int fetch(unsigned char *sha1);
-
-/*
- * Fetch the specified object and store it locally; fetch() will be
- * called later to determine success. To be provided by the particular
- * implementation.
- */
-extern void prefetch(unsigned char *sha1);
-
-/*
- * Fetch ref (relative to $GIT_DIR/refs) from the remote, and store
- * the 20-byte SHA1 in sha1.  Return 0 on success, -1 on failure.  To
- * be provided by the particular implementation.
- */
-extern int fetch_ref(char *ref, unsigned char *sha1);
-
-/* Set to fetch the target tree. */
-extern int get_tree;
-
-/* Set to fetch the commit history. */
-extern int get_history;
-
-/* Set to fetch the trees in the commit history. */
-extern int get_all;
-
-/* Set to be verbose */
-extern int get_verbosely;
-
-/* Set to check on all reachable objects. */
-extern int get_recover;
-
-/* Report what we got under get_verbosely */
-extern void pull_say(const char *, const char *);
-
-/* Load pull targets from stdin */
-extern int pull_targets_stdin(char ***target, const char ***write_ref);
-
-/* Free up loaded targets */
-extern void pull_targets_free(int targets, char **target, const char **write_ref);
-
-/* If write_ref is set, the ref filename to write the target value to. */
-/* If write_ref_log_details is set, additional text will appear in the ref log. */
-extern int pull(int targets, char **target, const char **write_ref,
-               const char *write_ref_log_details);
-
-#endif /* PULL_H */
index f23d934f667cc2b10ee668a3bfef8492aadbbe7d..474f1d1ffbee5433ec311174ee37804ab16417bb 100644 (file)
@@ -147,6 +147,11 @@ extern ssize_t git_pread(int fd, void *buf, size_t count, off_t offset);
 extern int gitsetenv(const char *, const char *, int);
 #endif
 
+#ifdef NO_MKDTEMP
+#define mkdtemp gitmkdtemp
+extern char *gitmkdtemp(char *);
+#endif
+
 #ifdef NO_UNSETENV
 #define unsetenv gitunsetenv
 extern void gitunsetenv(const char *);
diff --git a/git-fetch.sh b/git-fetch.sh
deleted file mode 100755 (executable)
index e44af2c..0000000
+++ /dev/null
@@ -1,377 +0,0 @@
-#!/bin/sh
-#
-
-USAGE='<fetch-options> <repository> <refspec>...'
-SUBDIRECTORY_OK=Yes
-. git-sh-setup
-set_reflog_action "fetch $*"
-cd_to_toplevel ;# probably unnecessary...
-
-. git-parse-remote
-_x40='[0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f]'
-_x40="$_x40$_x40$_x40$_x40$_x40$_x40$_x40$_x40"
-
-LF='
-'
-IFS="$LF"
-
-no_tags=
-tags=
-append=
-force=
-verbose=
-update_head_ok=
-exec=
-keep=
-shallow_depth=
-no_progress=
-test -t 1 || no_progress=--no-progress
-quiet=
-while test $# != 0
-do
-       case "$1" in
-       -a|--a|--ap|--app|--appe|--appen|--append)
-               append=t
-               ;;
-       --upl|--uplo|--uploa|--upload|--upload-|--upload-p|\
-       --upload-pa|--upload-pac|--upload-pack)
-               shift
-               exec="--upload-pack=$1"
-               ;;
-       --upl=*|--uplo=*|--uploa=*|--upload=*|\
-       --upload-=*|--upload-p=*|--upload-pa=*|--upload-pac=*|--upload-pack=*)
-               exec=--upload-pack=$(expr "z$1" : 'z-[^=]*=\(.*\)')
-               shift
-               ;;
-       -f|--f|--fo|--for|--forc|--force)
-               force=t
-               ;;
-       -t|--t|--ta|--tag|--tags)
-               tags=t
-               ;;
-       -n|--n|--no|--no-|--no-t|--no-ta|--no-tag|--no-tags)
-               no_tags=t
-               ;;
-       -u|--u|--up|--upd|--upda|--updat|--update|--update-|--update-h|\
-       --update-he|--update-hea|--update-head|--update-head-|\
-       --update-head-o|--update-head-ok)
-               update_head_ok=t
-               ;;
-       -q|--q|--qu|--qui|--quie|--quiet)
-               quiet=--quiet
-               ;;
-       -v|--verbose)
-               verbose="$verbose"Yes
-               ;;
-       -k|--k|--ke|--kee|--keep)
-               keep='-k -k'
-               ;;
-       --depth=*)
-               shallow_depth="--depth=`expr "z$1" : 'z-[^=]*=\(.*\)'`"
-               ;;
-       --depth)
-               shift
-               shallow_depth="--depth=$1"
-               ;;
-       -*)
-               usage
-               ;;
-       *)
-               break
-               ;;
-       esac
-       shift
-done
-
-case "$#" in
-0)
-       origin=$(get_default_remote)
-       test -n "$(get_remote_url ${origin})" ||
-               die "Where do you want to fetch from today?"
-       set x $origin ; shift ;;
-esac
-
-if test -z "$exec"
-then
-       # No command line override and we have configuration for the remote.
-       exec="--upload-pack=$(get_uploadpack $1)"
-fi
-
-remote_nick="$1"
-remote=$(get_remote_url "$@")
-refs=
-rref=
-rsync_slurped_objects=
-
-if test "" = "$append"
-then
-       : >"$GIT_DIR/FETCH_HEAD"
-fi
-
-# Global that is reused later
-ls_remote_result=$(git ls-remote $exec "$remote") ||
-       die "Cannot get the repository state from $remote"
-
-append_fetch_head () {
-       flags=
-       test -n "$verbose" && flags="$flags$LF-v"
-       test -n "$force$single_force" && flags="$flags$LF-f"
-       GIT_REFLOG_ACTION="$GIT_REFLOG_ACTION" \
-               git fetch--tool $flags append-fetch-head "$@"
-}
-
-# updating the current HEAD with git-fetch in a bare
-# repository is always fine.
-if test -z "$update_head_ok" && test $(is_bare_repository) = false
-then
-       orig_head=$(git rev-parse --verify HEAD 2>/dev/null)
-fi
-
-# Allow --notags from remote.$1.tagopt
-case "$tags$no_tags" in
-'')
-       case "$(git config --get "remote.$1.tagopt")" in
-       --no-tags)
-               no_tags=t ;;
-       esac
-esac
-
-# If --tags (and later --heads or --all) is specified, then we are
-# not talking about defaults stored in Pull: line of remotes or
-# branches file, and just fetch those and refspecs explicitly given.
-# Otherwise we do what we always did.
-
-reflist=$(get_remote_refs_for_fetch "$@")
-if test "$tags"
-then
-       taglist=`IFS='  ' &&
-                 echo "$ls_remote_result" |
-                 git show-ref --exclude-existing=refs/tags/ |
-                 while read sha1 name
-                 do
-                       echo ".${name}:${name}"
-                 done` || exit
-       if test "$#" -gt 1
-       then
-               # remote URL plus explicit refspecs; we need to merge them.
-               reflist="$reflist$LF$taglist"
-       else
-               # No explicit refspecs; fetch tags only.
-               reflist=$taglist
-       fi
-fi
-
-fetch_all_at_once () {
-
-  eval=$(echo "$1" | git fetch--tool parse-reflist "-")
-  eval "$eval"
-
-    ( : subshell because we muck with IFS
-      IFS="    $LF"
-      (
-       if test "$remote" = . ; then
-           git show-ref $rref || echo failed "$remote"
-       elif test -f "$remote" ; then
-           test -n "$shallow_depth" &&
-               die "shallow clone with bundle is not supported"
-           git bundle unbundle "$remote" $rref ||
-           echo failed "$remote"
-       else
-               if      test -d "$remote" &&
-
-                       # The remote might be our alternate.  With
-                       # this optimization we will bypass fetch-pack
-                       # altogether, which means we cannot be doing
-                       # the shallow stuff at all.
-                       test ! -f "$GIT_DIR/shallow" &&
-                       test -z "$shallow_depth" &&
-
-                       # See if all of what we are going to fetch are
-                       # connected to our repository's tips, in which
-                       # case we do not have to do any fetch.
-                       theirs=$(echo "$ls_remote_result" | \
-                               git fetch--tool -s pick-rref "$rref" "-") &&
-
-                       # This will barf when $theirs reach an object that
-                       # we do not have in our repository.  Otherwise,
-                       # we already have everything the fetch would bring in.
-                       git rev-list --objects $theirs --not --all \
-                               >/dev/null 2>/dev/null
-               then
-                       echo "$ls_remote_result" | \
-                               git fetch--tool pick-rref "$rref" "-"
-               else
-                       flags=
-                       case $verbose in
-                       YesYes*)
-                           flags="-v"
-                           ;;
-                       esac
-                       git-fetch-pack --thin $exec $keep $shallow_depth \
-                               $quiet $no_progress $flags "$remote" $rref ||
-                       echo failed "$remote"
-               fi
-       fi
-      ) |
-      (
-       flags=
-       test -n "$verbose" && flags="$flags -v"
-       test -n "$force" && flags="$flags -f"
-       GIT_REFLOG_ACTION="$GIT_REFLOG_ACTION" \
-               git fetch--tool $flags native-store \
-                       "$remote" "$remote_nick" "$refs"
-      )
-    ) || exit
-
-}
-
-fetch_per_ref () {
-  reflist="$1"
-  refs=
-  rref=
-
-  for ref in $reflist
-  do
-      refs="$refs$LF$ref"
-
-      # These are relative path from $GIT_DIR, typically starting at refs/
-      # but may be HEAD
-      if expr "z$ref" : 'z\.' >/dev/null
-      then
-         not_for_merge=t
-         ref=$(expr "z$ref" : 'z\.\(.*\)')
-      else
-         not_for_merge=
-      fi
-      if expr "z$ref" : 'z+' >/dev/null
-      then
-         single_force=t
-         ref=$(expr "z$ref" : 'z+\(.*\)')
-      else
-         single_force=
-      fi
-      remote_name=$(expr "z$ref" : 'z\([^:]*\):')
-      local_name=$(expr "z$ref" : 'z[^:]*:\(.*\)')
-
-      rref="$rref$LF$remote_name"
-
-      # There are transports that can fetch only one head at a time...
-      case "$remote" in
-      http://* | https://* | ftp://*)
-         test -n "$shallow_depth" &&
-               die "shallow clone with http not supported"
-         proto=`expr "$remote" : '\([^:]*\):'`
-         if [ -n "$GIT_SSL_NO_VERIFY" ]; then
-             curl_extra_args="-k"
-         fi
-         if [ -n "$GIT_CURL_FTP_NO_EPSV" -o \
-               "`git config --bool http.noEPSV`" = true ]; then
-             noepsv_opt="--disable-epsv"
-         fi
-
-         # Find $remote_name from ls-remote output.
-         head=$(echo "$ls_remote_result" | \
-               git fetch--tool -s pick-rref "$remote_name" "-")
-         expr "z$head" : "z$_x40\$" >/dev/null ||
-               die "No such ref $remote_name at $remote"
-         echo >&2 "Fetching $remote_name from $remote using $proto"
-         case "$quiet" in '') v=-v ;; *) v= ;; esac
-         git-http-fetch $v -a "$head" "$remote" || exit
-         ;;
-      rsync://*)
-         test -n "$shallow_depth" &&
-               die "shallow clone with rsync not supported"
-         TMP_HEAD="$GIT_DIR/TMP_HEAD"
-         rsync -L -q "$remote/$remote_name" "$TMP_HEAD" || exit 1
-         head=$(git rev-parse --verify TMP_HEAD)
-         rm -f "$TMP_HEAD"
-         case "$quiet" in '') v=-v ;; *) v= ;; esac
-         test "$rsync_slurped_objects" || {
-             rsync -a $v --ignore-existing --exclude info \
-                 "$remote/objects/" "$GIT_OBJECT_DIRECTORY/" || exit
-
-             # Look at objects/info/alternates for rsync -- http will
-             # support it natively and git native ones will do it on
-             # the remote end.  Not having that file is not a crime.
-             rsync -q "$remote/objects/info/alternates" \
-                 "$GIT_DIR/TMP_ALT" 2>/dev/null ||
-                 rm -f "$GIT_DIR/TMP_ALT"
-             if test -f "$GIT_DIR/TMP_ALT"
-             then
-                 resolve_alternates "$remote" <"$GIT_DIR/TMP_ALT" |
-                 while read alt
-                 do
-                     case "$alt" in 'bad alternate: '*) die "$alt";; esac
-                     echo >&2 "Getting alternate: $alt"
-                     rsync -av --ignore-existing --exclude info \
-                     "$alt" "$GIT_OBJECT_DIRECTORY/" || exit
-                 done
-                 rm -f "$GIT_DIR/TMP_ALT"
-             fi
-             rsync_slurped_objects=t
-         }
-         ;;
-      esac
-
-      append_fetch_head "$head" "$remote" \
-         "$remote_name" "$remote_nick" "$local_name" "$not_for_merge" || exit
-
-  done
-
-}
-
-fetch_main () {
-       case "$remote" in
-       http://* | https://* | ftp://* | rsync://* )
-               fetch_per_ref "$@"
-               ;;
-       *)
-               fetch_all_at_once "$@"
-               ;;
-       esac
-}
-
-fetch_main "$reflist" || exit
-
-# automated tag following
-case "$no_tags$tags" in
-'')
-       case "$reflist" in
-       *:refs/*)
-               # effective only when we are following remote branch
-               # using local tracking branch.
-               taglist=$(IFS=' ' &&
-               echo "$ls_remote_result" |
-               git show-ref --exclude-existing=refs/tags/ |
-               while read sha1 name
-               do
-                       git cat-file -t "$sha1" >/dev/null 2>&1 || continue
-                       echo >&2 "Auto-following $name"
-                       echo ".${name}:${name}"
-               done)
-       esac
-       case "$taglist" in
-       '') ;;
-       ?*)
-               # do not deepen a shallow tree when following tags
-               shallow_depth=
-               fetch_main "$taglist" || exit ;;
-       esac
-esac
-
-# If the original head was empty (i.e. no "master" yet), or
-# if we were told not to worry, we do not have to check.
-case "$orig_head" in
-'')
-       ;;
-?*)
-       curr_head=$(git rev-parse --verify HEAD 2>/dev/null)
-       if test "$curr_head" != "$orig_head"
-       then
-           git update-ref \
-                       -m "$GIT_REFLOG_ACTION: Undoing incorrectly fetched HEAD." \
-                       HEAD "$orig_head"
-               die "Cannot fetch into the current branch."
-       fi
-       ;;
-esac
diff --git a/git.c b/git.c
index 853e66cddbbcc00943cfc0af06741ca8116b7966..23a430c3690ed1f921ec22196edf1f0062bc6dcd 100644 (file)
--- a/git.c
+++ b/git.c
@@ -328,6 +328,8 @@ static void handle_internal_command(int argc, const char **argv)
                { "diff-files", cmd_diff_files },
                { "diff-index", cmd_diff_index, RUN_SETUP },
                { "diff-tree", cmd_diff_tree, RUN_SETUP },
+               { "fetch", cmd_fetch, RUN_SETUP },
+               { "fetch-pack", cmd_fetch_pack, RUN_SETUP },
                { "fetch--tool", cmd_fetch__tool, RUN_SETUP },
                { "fmt-merge-msg", cmd_fmt_merge_msg, RUN_SETUP },
                { "for-each-ref", cmd_for_each_ref, RUN_SETUP },
@@ -338,6 +340,9 @@ static void handle_internal_command(int argc, const char **argv)
                { "get-tar-commit-id", cmd_get_tar_commit_id },
                { "grep", cmd_grep, RUN_SETUP | USE_PAGER },
                { "help", cmd_help },
+#ifndef NO_CURL
+               { "http-fetch", cmd_http_fetch, RUN_SETUP },
+#endif
                { "init", cmd_init_db },
                { "init-db", cmd_init_db },
                { "log", cmd_log, RUN_SETUP | USE_PAGER },
diff --git a/http-fetch.c b/http-fetch.c
deleted file mode 100644 (file)
index 202fae0..0000000
+++ /dev/null
@@ -1,1064 +0,0 @@
-#include "cache.h"
-#include "commit.h"
-#include "pack.h"
-#include "fetch.h"
-#include "http.h"
-
-#define PREV_BUF_SIZE 4096
-#define RANGE_HEADER_SIZE 30
-
-static int commits_on_stdin;
-
-static int got_alternates = -1;
-static int corrupt_object_found;
-
-static struct curl_slist *no_pragma_header;
-
-struct alt_base
-{
-       char *base;
-       int got_indices;
-       struct packed_git *packs;
-       struct alt_base *next;
-};
-
-static struct alt_base *alt;
-
-enum object_request_state {
-       WAITING,
-       ABORTED,
-       ACTIVE,
-       COMPLETE,
-};
-
-struct object_request
-{
-       unsigned char sha1[20];
-       struct alt_base *repo;
-       char *url;
-       char filename[PATH_MAX];
-       char tmpfile[PATH_MAX];
-       int local;
-       enum object_request_state state;
-       CURLcode curl_result;
-       char errorstr[CURL_ERROR_SIZE];
-       long http_code;
-       unsigned char real_sha1[20];
-       SHA_CTX c;
-       z_stream stream;
-       int zret;
-       int rename;
-       struct active_request_slot *slot;
-       struct object_request *next;
-};
-
-struct alternates_request {
-       const char *base;
-       char *url;
-       struct buffer *buffer;
-       struct active_request_slot *slot;
-       int http_specific;
-};
-
-static struct object_request *object_queue_head;
-
-static size_t fwrite_sha1_file(void *ptr, size_t eltsize, size_t nmemb,
-                              void *data)
-{
-       unsigned char expn[4096];
-       size_t size = eltsize * nmemb;
-       int posn = 0;
-       struct object_request *obj_req = (struct object_request *)data;
-       do {
-               ssize_t retval = xwrite(obj_req->local,
-                                      (char *) ptr + posn, size - posn);
-               if (retval < 0)
-                       return posn;
-               posn += retval;
-       } while (posn < size);
-
-       obj_req->stream.avail_in = size;
-       obj_req->stream.next_in = ptr;
-       do {
-               obj_req->stream.next_out = expn;
-               obj_req->stream.avail_out = sizeof(expn);
-               obj_req->zret = inflate(&obj_req->stream, Z_SYNC_FLUSH);
-               SHA1_Update(&obj_req->c, expn,
-                           sizeof(expn) - obj_req->stream.avail_out);
-       } while (obj_req->stream.avail_in && obj_req->zret == Z_OK);
-       data_received++;
-       return size;
-}
-
-static int missing__target(int code, int result)
-{
-       return  /* file:// URL -- do we ever use one??? */
-               (result == CURLE_FILE_COULDNT_READ_FILE) ||
-               /* http:// and https:// URL */
-               (code == 404 && result == CURLE_HTTP_RETURNED_ERROR) ||
-               /* ftp:// URL */
-               (code == 550 && result == CURLE_FTP_COULDNT_RETR_FILE)
-               ;
-}
-
-#define missing_target(a) missing__target((a)->http_code, (a)->curl_result)
-
-static void fetch_alternates(const char *base);
-
-static void process_object_response(void *callback_data);
-
-static void start_object_request(struct object_request *obj_req)
-{
-       char *hex = sha1_to_hex(obj_req->sha1);
-       char prevfile[PATH_MAX];
-       char *url;
-       char *posn;
-       int prevlocal;
-       unsigned char prev_buf[PREV_BUF_SIZE];
-       ssize_t prev_read = 0;
-       long prev_posn = 0;
-       char range[RANGE_HEADER_SIZE];
-       struct curl_slist *range_header = NULL;
-       struct active_request_slot *slot;
-
-       snprintf(prevfile, sizeof(prevfile), "%s.prev", obj_req->filename);
-       unlink(prevfile);
-       rename(obj_req->tmpfile, prevfile);
-       unlink(obj_req->tmpfile);
-
-       if (obj_req->local != -1)
-               error("fd leakage in start: %d", obj_req->local);
-       obj_req->local = open(obj_req->tmpfile,
-                             O_WRONLY | O_CREAT | O_EXCL, 0666);
-       /* This could have failed due to the "lazy directory creation";
-        * try to mkdir the last path component.
-        */
-       if (obj_req->local < 0 && errno == ENOENT) {
-               char *dir = strrchr(obj_req->tmpfile, '/');
-               if (dir) {
-                       *dir = 0;
-                       mkdir(obj_req->tmpfile, 0777);
-                       *dir = '/';
-               }
-               obj_req->local = open(obj_req->tmpfile,
-                                     O_WRONLY | O_CREAT | O_EXCL, 0666);
-       }
-
-       if (obj_req->local < 0) {
-               obj_req->state = ABORTED;
-               error("Couldn't create temporary file %s for %s: %s",
-                     obj_req->tmpfile, obj_req->filename, strerror(errno));
-               return;
-       }
-
-       memset(&obj_req->stream, 0, sizeof(obj_req->stream));
-
-       inflateInit(&obj_req->stream);
-
-       SHA1_Init(&obj_req->c);
-
-       url = xmalloc(strlen(obj_req->repo->base) + 51);
-       obj_req->url = xmalloc(strlen(obj_req->repo->base) + 51);
-       strcpy(url, obj_req->repo->base);
-       posn = url + strlen(obj_req->repo->base);
-       strcpy(posn, "/objects/");
-       posn += 9;
-       memcpy(posn, hex, 2);
-       posn += 2;
-       *(posn++) = '/';
-       strcpy(posn, hex + 2);
-       strcpy(obj_req->url, url);
-
-       /* If a previous temp file is present, process what was already
-          fetched. */
-       prevlocal = open(prevfile, O_RDONLY);
-       if (prevlocal != -1) {
-               do {
-                       prev_read = xread(prevlocal, prev_buf, PREV_BUF_SIZE);
-                       if (prev_read>0) {
-                               if (fwrite_sha1_file(prev_buf,
-                                                    1,
-                                                    prev_read,
-                                                    obj_req) == prev_read) {
-                                       prev_posn += prev_read;
-                               } else {
-                                       prev_read = -1;
-                               }
-                       }
-               } while (prev_read > 0);
-               close(prevlocal);
-       }
-       unlink(prevfile);
-
-       /* Reset inflate/SHA1 if there was an error reading the previous temp
-          file; also rewind to the beginning of the local file. */
-       if (prev_read == -1) {
-               memset(&obj_req->stream, 0, sizeof(obj_req->stream));
-               inflateInit(&obj_req->stream);
-               SHA1_Init(&obj_req->c);
-               if (prev_posn>0) {
-                       prev_posn = 0;
-                       lseek(obj_req->local, 0, SEEK_SET);
-                       ftruncate(obj_req->local, 0);
-               }
-       }
-
-       slot = get_active_slot();
-       slot->callback_func = process_object_response;
-       slot->callback_data = obj_req;
-       obj_req->slot = slot;
-
-       curl_easy_setopt(slot->curl, CURLOPT_FILE, obj_req);
-       curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite_sha1_file);
-       curl_easy_setopt(slot->curl, CURLOPT_ERRORBUFFER, obj_req->errorstr);
-       curl_easy_setopt(slot->curl, CURLOPT_URL, url);
-       curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, no_pragma_header);
-
-       /* If we have successfully processed data from a previous fetch
-          attempt, only fetch the data we don't already have. */
-       if (prev_posn>0) {
-               if (get_verbosely)
-                       fprintf(stderr,
-                               "Resuming fetch of object %s at byte %ld\n",
-                               hex, prev_posn);
-               sprintf(range, "Range: bytes=%ld-", prev_posn);
-               range_header = curl_slist_append(range_header, range);
-               curl_easy_setopt(slot->curl,
-                                CURLOPT_HTTPHEADER, range_header);
-       }
-
-       /* Try to get the request started, abort the request on error */
-       obj_req->state = ACTIVE;
-       if (!start_active_slot(slot)) {
-               obj_req->state = ABORTED;
-               obj_req->slot = NULL;
-               close(obj_req->local); obj_req->local = -1;
-               free(obj_req->url);
-               return;
-       }
-}
-
-static void finish_object_request(struct object_request *obj_req)
-{
-       struct stat st;
-
-       fchmod(obj_req->local, 0444);
-       close(obj_req->local); obj_req->local = -1;
-
-       if (obj_req->http_code == 416) {
-               fprintf(stderr, "Warning: requested range invalid; we may already have all the data.\n");
-       } else if (obj_req->curl_result != CURLE_OK) {
-               if (stat(obj_req->tmpfile, &st) == 0)
-                       if (st.st_size == 0)
-                               unlink(obj_req->tmpfile);
-               return;
-       }
-
-       inflateEnd(&obj_req->stream);
-       SHA1_Final(obj_req->real_sha1, &obj_req->c);
-       if (obj_req->zret != Z_STREAM_END) {
-               unlink(obj_req->tmpfile);
-               return;
-       }
-       if (hashcmp(obj_req->sha1, obj_req->real_sha1)) {
-               unlink(obj_req->tmpfile);
-               return;
-       }
-       obj_req->rename =
-               move_temp_to_file(obj_req->tmpfile, obj_req->filename);
-
-       if (obj_req->rename == 0)
-               pull_say("got %s\n", sha1_to_hex(obj_req->sha1));
-}
-
-static void process_object_response(void *callback_data)
-{
-       struct object_request *obj_req =
-               (struct object_request *)callback_data;
-
-       obj_req->curl_result = obj_req->slot->curl_result;
-       obj_req->http_code = obj_req->slot->http_code;
-       obj_req->slot = NULL;
-       obj_req->state = COMPLETE;
-
-       /* Use alternates if necessary */
-       if (missing_target(obj_req)) {
-               fetch_alternates(alt->base);
-               if (obj_req->repo->next != NULL) {
-                       obj_req->repo =
-                               obj_req->repo->next;
-                       close(obj_req->local);
-                       obj_req->local = -1;
-                       start_object_request(obj_req);
-                       return;
-               }
-       }
-
-       finish_object_request(obj_req);
-}
-
-static void release_object_request(struct object_request *obj_req)
-{
-       struct object_request *entry = object_queue_head;
-
-       if (obj_req->local != -1)
-               error("fd leakage in release: %d", obj_req->local);
-       if (obj_req == object_queue_head) {
-               object_queue_head = obj_req->next;
-       } else {
-               while (entry->next != NULL && entry->next != obj_req)
-                       entry = entry->next;
-               if (entry->next == obj_req)
-                       entry->next = entry->next->next;
-       }
-
-       free(obj_req->url);
-       free(obj_req);
-}
-
-#ifdef USE_CURL_MULTI
-void fill_active_slots(void)
-{
-       struct object_request *obj_req = object_queue_head;
-       struct active_request_slot *slot = active_queue_head;
-       int num_transfers;
-
-       while (active_requests < max_requests && obj_req != NULL) {
-               if (obj_req->state == WAITING) {
-                       if (has_sha1_file(obj_req->sha1))
-                               obj_req->state = COMPLETE;
-                       else
-                               start_object_request(obj_req);
-                       curl_multi_perform(curlm, &num_transfers);
-               }
-               obj_req = obj_req->next;
-       }
-
-       while (slot != NULL) {
-               if (!slot->in_use && slot->curl != NULL) {
-                       curl_easy_cleanup(slot->curl);
-                       slot->curl = NULL;
-               }
-               slot = slot->next;
-       }
-}
-#endif
-
-void prefetch(unsigned char *sha1)
-{
-       struct object_request *newreq;
-       struct object_request *tail;
-       char *filename = sha1_file_name(sha1);
-
-       newreq = xmalloc(sizeof(*newreq));
-       hashcpy(newreq->sha1, sha1);
-       newreq->repo = alt;
-       newreq->url = NULL;
-       newreq->local = -1;
-       newreq->state = WAITING;
-       snprintf(newreq->filename, sizeof(newreq->filename), "%s", filename);
-       snprintf(newreq->tmpfile, sizeof(newreq->tmpfile),
-                "%s.temp", filename);
-       newreq->slot = NULL;
-       newreq->next = NULL;
-
-       if (object_queue_head == NULL) {
-               object_queue_head = newreq;
-       } else {
-               tail = object_queue_head;
-               while (tail->next != NULL) {
-                       tail = tail->next;
-               }
-               tail->next = newreq;
-       }
-
-#ifdef USE_CURL_MULTI
-       fill_active_slots();
-       step_active_slots();
-#endif
-}
-
-static int fetch_index(struct alt_base *repo, unsigned char *sha1)
-{
-       char *hex = sha1_to_hex(sha1);
-       char *filename;
-       char *url;
-       char tmpfile[PATH_MAX];
-       long prev_posn = 0;
-       char range[RANGE_HEADER_SIZE];
-       struct curl_slist *range_header = NULL;
-
-       FILE *indexfile;
-       struct active_request_slot *slot;
-       struct slot_results results;
-
-       if (has_pack_index(sha1))
-               return 0;
-
-       if (get_verbosely)
-               fprintf(stderr, "Getting index for pack %s\n", hex);
-
-       url = xmalloc(strlen(repo->base) + 64);
-       sprintf(url, "%s/objects/pack/pack-%s.idx", repo->base, hex);
-
-       filename = sha1_pack_index_name(sha1);
-       snprintf(tmpfile, sizeof(tmpfile), "%s.temp", filename);
-       indexfile = fopen(tmpfile, "a");
-       if (!indexfile)
-               return error("Unable to open local file %s for pack index",
-                            filename);
-
-       slot = get_active_slot();
-       slot->results = &results;
-       curl_easy_setopt(slot->curl, CURLOPT_FILE, indexfile);
-       curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite);
-       curl_easy_setopt(slot->curl, CURLOPT_URL, url);
-       curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, no_pragma_header);
-       slot->local = indexfile;
-
-       /* If there is data present from a previous transfer attempt,
-          resume where it left off */
-       prev_posn = ftell(indexfile);
-       if (prev_posn>0) {
-               if (get_verbosely)
-                       fprintf(stderr,
-                               "Resuming fetch of index for pack %s at byte %ld\n",
-                               hex, prev_posn);
-               sprintf(range, "Range: bytes=%ld-", prev_posn);
-               range_header = curl_slist_append(range_header, range);
-               curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, range_header);
-       }
-
-       if (start_active_slot(slot)) {
-               run_active_slot(slot);
-               if (results.curl_result != CURLE_OK) {
-                       fclose(indexfile);
-                       return error("Unable to get pack index %s\n%s", url,
-                                    curl_errorstr);
-               }
-       } else {
-               fclose(indexfile);
-               return error("Unable to start request");
-       }
-
-       fclose(indexfile);
-
-       return move_temp_to_file(tmpfile, filename);
-}
-
-static int setup_index(struct alt_base *repo, unsigned char *sha1)
-{
-       struct packed_git *new_pack;
-       if (has_pack_file(sha1))
-               return 0; /* don't list this as something we can get */
-
-       if (fetch_index(repo, sha1))
-               return -1;
-
-       new_pack = parse_pack_index(sha1);
-       new_pack->next = repo->packs;
-       repo->packs = new_pack;
-       return 0;
-}
-
-static void process_alternates_response(void *callback_data)
-{
-       struct alternates_request *alt_req =
-               (struct alternates_request *)callback_data;
-       struct active_request_slot *slot = alt_req->slot;
-       struct alt_base *tail = alt;
-       const char *base = alt_req->base;
-       static const char null_byte = '\0';
-       char *data;
-       int i = 0;
-
-       if (alt_req->http_specific) {
-               if (slot->curl_result != CURLE_OK ||
-                   !alt_req->buffer->posn) {
-
-                       /* Try reusing the slot to get non-http alternates */
-                       alt_req->http_specific = 0;
-                       sprintf(alt_req->url, "%s/objects/info/alternates",
-                               base);
-                       curl_easy_setopt(slot->curl, CURLOPT_URL,
-                                        alt_req->url);
-                       active_requests++;
-                       slot->in_use = 1;
-                       if (slot->finished != NULL)
-                               (*slot->finished) = 0;
-                       if (!start_active_slot(slot)) {
-                               got_alternates = -1;
-                               slot->in_use = 0;
-                               if (slot->finished != NULL)
-                                       (*slot->finished) = 1;
-                       }
-                       return;
-               }
-       } else if (slot->curl_result != CURLE_OK) {
-               if (!missing_target(slot)) {
-                       got_alternates = -1;
-                       return;
-               }
-       }
-
-       fwrite_buffer(&null_byte, 1, 1, alt_req->buffer);
-       alt_req->buffer->posn--;
-       data = alt_req->buffer->buffer;
-
-       while (i < alt_req->buffer->posn) {
-               int posn = i;
-               while (posn < alt_req->buffer->posn && data[posn] != '\n')
-                       posn++;
-               if (data[posn] == '\n') {
-                       int okay = 0;
-                       int serverlen = 0;
-                       struct alt_base *newalt;
-                       char *target = NULL;
-                       if (data[i] == '/') {
-                               /* This counts
-                                * http://git.host/pub/scm/linux.git/
-                                * -----------here^
-                                * so memcpy(dst, base, serverlen) will
-                                * copy up to "...git.host".
-                                */
-                               const char *colon_ss = strstr(base,"://");
-                               if (colon_ss) {
-                                       serverlen = (strchr(colon_ss + 3, '/')
-                                                    - base);
-                                       okay = 1;
-                               }
-                       } else if (!memcmp(data + i, "../", 3)) {
-                               /* Relative URL; chop the corresponding
-                                * number of subpath from base (and ../
-                                * from data), and concatenate the result.
-                                *
-                                * The code first drops ../ from data, and
-                                * then drops one ../ from data and one path
-                                * from base.  IOW, one extra ../ is dropped
-                                * from data than path is dropped from base.
-                                *
-                                * This is not wrong.  The alternate in
-                                *     http://git.host/pub/scm/linux.git/
-                                * to borrow from
-                                *     http://git.host/pub/scm/linus.git/
-                                * is ../../linus.git/objects/.  You need
-                                * two ../../ to borrow from your direct
-                                * neighbour.
-                                */
-                               i += 3;
-                               serverlen = strlen(base);
-                               while (i + 2 < posn &&
-                                      !memcmp(data + i, "../", 3)) {
-                                       do {
-                                               serverlen--;
-                                       } while (serverlen &&
-                                                base[serverlen - 1] != '/');
-                                       i += 3;
-                               }
-                               /* If the server got removed, give up. */
-                               okay = strchr(base, ':') - base + 3 <
-                                       serverlen;
-                       } else if (alt_req->http_specific) {
-                               char *colon = strchr(data + i, ':');
-                               char *slash = strchr(data + i, '/');
-                               if (colon && slash && colon < data + posn &&
-                                   slash < data + posn && colon < slash) {
-                                       okay = 1;
-                               }
-                       }
-                       /* skip "objects\n" at end */
-                       if (okay) {
-                               target = xmalloc(serverlen + posn - i - 6);
-                               memcpy(target, base, serverlen);
-                               memcpy(target + serverlen, data + i,
-                                      posn - i - 7);
-                               target[serverlen + posn - i - 7] = 0;
-                               if (get_verbosely)
-                                       fprintf(stderr,
-                                               "Also look at %s\n", target);
-                               newalt = xmalloc(sizeof(*newalt));
-                               newalt->next = NULL;
-                               newalt->base = target;
-                               newalt->got_indices = 0;
-                               newalt->packs = NULL;
-
-                               while (tail->next != NULL)
-                                       tail = tail->next;
-                               tail->next = newalt;
-                       }
-               }
-               i = posn + 1;
-       }
-
-       got_alternates = 1;
-}
-
-static void fetch_alternates(const char *base)
-{
-       struct buffer buffer;
-       char *url;
-       char *data;
-       struct active_request_slot *slot;
-       struct alternates_request alt_req;
-
-       /* If another request has already started fetching alternates,
-          wait for them to arrive and return to processing this request's
-          curl message */
-#ifdef USE_CURL_MULTI
-       while (got_alternates == 0) {
-               step_active_slots();
-       }
-#endif
-
-       /* Nothing to do if they've already been fetched */
-       if (got_alternates == 1)
-               return;
-
-       /* Start the fetch */
-       got_alternates = 0;
-
-       data = xmalloc(4096);
-       buffer.size = 4096;
-       buffer.posn = 0;
-       buffer.buffer = data;
-
-       if (get_verbosely)
-               fprintf(stderr, "Getting alternates list for %s\n", base);
-
-       url = xmalloc(strlen(base) + 31);
-       sprintf(url, "%s/objects/info/http-alternates", base);
-
-       /* Use a callback to process the result, since another request
-          may fail and need to have alternates loaded before continuing */
-       slot = get_active_slot();
-       slot->callback_func = process_alternates_response;
-       slot->callback_data = &alt_req;
-
-       curl_easy_setopt(slot->curl, CURLOPT_FILE, &buffer);
-       curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite_buffer);
-       curl_easy_setopt(slot->curl, CURLOPT_URL, url);
-
-       alt_req.base = base;
-       alt_req.url = url;
-       alt_req.buffer = &buffer;
-       alt_req.http_specific = 1;
-       alt_req.slot = slot;
-
-       if (start_active_slot(slot))
-               run_active_slot(slot);
-       else
-               got_alternates = -1;
-
-       free(data);
-       free(url);
-}
-
-static int fetch_indices(struct alt_base *repo)
-{
-       unsigned char sha1[20];
-       char *url;
-       struct buffer buffer;
-       char *data;
-       int i = 0;
-
-       struct active_request_slot *slot;
-       struct slot_results results;
-
-       if (repo->got_indices)
-               return 0;
-
-       data = xmalloc(4096);
-       buffer.size = 4096;
-       buffer.posn = 0;
-       buffer.buffer = data;
-
-       if (get_verbosely)
-               fprintf(stderr, "Getting pack list for %s\n", repo->base);
-
-       url = xmalloc(strlen(repo->base) + 21);
-       sprintf(url, "%s/objects/info/packs", repo->base);
-
-       slot = get_active_slot();
-       slot->results = &results;
-       curl_easy_setopt(slot->curl, CURLOPT_FILE, &buffer);
-       curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite_buffer);
-       curl_easy_setopt(slot->curl, CURLOPT_URL, url);
-       curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, NULL);
-       if (start_active_slot(slot)) {
-               run_active_slot(slot);
-               if (results.curl_result != CURLE_OK) {
-                       if (missing_target(&results)) {
-                               repo->got_indices = 1;
-                               free(buffer.buffer);
-                               return 0;
-                       } else {
-                               repo->got_indices = 0;
-                               free(buffer.buffer);
-                               return error("%s", curl_errorstr);
-                       }
-               }
-       } else {
-               repo->got_indices = 0;
-               free(buffer.buffer);
-               return error("Unable to start request");
-       }
-
-       data = buffer.buffer;
-       while (i < buffer.posn) {
-               switch (data[i]) {
-               case 'P':
-                       i++;
-                       if (i + 52 <= buffer.posn &&
-                           !prefixcmp(data + i, " pack-") &&
-                           !prefixcmp(data + i + 46, ".pack\n")) {
-                               get_sha1_hex(data + i + 6, sha1);
-                               setup_index(repo, sha1);
-                               i += 51;
-                               break;
-                       }
-               default:
-                       while (i < buffer.posn && data[i] != '\n')
-                               i++;
-               }
-               i++;
-       }
-
-       free(buffer.buffer);
-       repo->got_indices = 1;
-       return 0;
-}
-
-static int fetch_pack(struct alt_base *repo, unsigned char *sha1)
-{
-       char *url;
-       struct packed_git *target;
-       struct packed_git **lst;
-       FILE *packfile;
-       char *filename;
-       char tmpfile[PATH_MAX];
-       int ret;
-       long prev_posn = 0;
-       char range[RANGE_HEADER_SIZE];
-       struct curl_slist *range_header = NULL;
-
-       struct active_request_slot *slot;
-       struct slot_results results;
-
-       if (fetch_indices(repo))
-               return -1;
-       target = find_sha1_pack(sha1, repo->packs);
-       if (!target)
-               return -1;
-
-       if (get_verbosely) {
-               fprintf(stderr, "Getting pack %s\n",
-                       sha1_to_hex(target->sha1));
-               fprintf(stderr, " which contains %s\n",
-                       sha1_to_hex(sha1));
-       }
-
-       url = xmalloc(strlen(repo->base) + 65);
-       sprintf(url, "%s/objects/pack/pack-%s.pack",
-               repo->base, sha1_to_hex(target->sha1));
-
-       filename = sha1_pack_name(target->sha1);
-       snprintf(tmpfile, sizeof(tmpfile), "%s.temp", filename);
-       packfile = fopen(tmpfile, "a");
-       if (!packfile)
-               return error("Unable to open local file %s for pack",
-                            filename);
-
-       slot = get_active_slot();
-       slot->results = &results;
-       curl_easy_setopt(slot->curl, CURLOPT_FILE, packfile);
-       curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite);
-       curl_easy_setopt(slot->curl, CURLOPT_URL, url);
-       curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, no_pragma_header);
-       slot->local = packfile;
-
-       /* If there is data present from a previous transfer attempt,
-          resume where it left off */
-       prev_posn = ftell(packfile);
-       if (prev_posn>0) {
-               if (get_verbosely)
-                       fprintf(stderr,
-                               "Resuming fetch of pack %s at byte %ld\n",
-                               sha1_to_hex(target->sha1), prev_posn);
-               sprintf(range, "Range: bytes=%ld-", prev_posn);
-               range_header = curl_slist_append(range_header, range);
-               curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, range_header);
-       }
-
-       if (start_active_slot(slot)) {
-               run_active_slot(slot);
-               if (results.curl_result != CURLE_OK) {
-                       fclose(packfile);
-                       return error("Unable to get pack file %s\n%s", url,
-                                    curl_errorstr);
-               }
-       } else {
-               fclose(packfile);
-               return error("Unable to start request");
-       }
-
-       target->pack_size = ftell(packfile);
-       fclose(packfile);
-
-       ret = move_temp_to_file(tmpfile, filename);
-       if (ret)
-               return ret;
-
-       lst = &repo->packs;
-       while (*lst != target)
-               lst = &((*lst)->next);
-       *lst = (*lst)->next;
-
-       if (verify_pack(target, 0))
-               return -1;
-       install_packed_git(target);
-
-       return 0;
-}
-
-static void abort_object_request(struct object_request *obj_req)
-{
-       if (obj_req->local >= 0) {
-               close(obj_req->local);
-               obj_req->local = -1;
-       }
-       unlink(obj_req->tmpfile);
-       if (obj_req->slot) {
-               release_active_slot(obj_req->slot);
-               obj_req->slot = NULL;
-       }
-       release_object_request(obj_req);
-}
-
-static int fetch_object(struct alt_base *repo, unsigned char *sha1)
-{
-       char *hex = sha1_to_hex(sha1);
-       int ret = 0;
-       struct object_request *obj_req = object_queue_head;
-
-       while (obj_req != NULL && hashcmp(obj_req->sha1, sha1))
-               obj_req = obj_req->next;
-       if (obj_req == NULL)
-               return error("Couldn't find request for %s in the queue", hex);
-
-       if (has_sha1_file(obj_req->sha1)) {
-               abort_object_request(obj_req);
-               return 0;
-       }
-
-#ifdef USE_CURL_MULTI
-       while (obj_req->state == WAITING) {
-               step_active_slots();
-       }
-#else
-       start_object_request(obj_req);
-#endif
-
-       while (obj_req->state == ACTIVE) {
-               run_active_slot(obj_req->slot);
-       }
-       if (obj_req->local != -1) {
-               close(obj_req->local); obj_req->local = -1;
-       }
-
-       if (obj_req->state == ABORTED) {
-               ret = error("Request for %s aborted", hex);
-       } else if (obj_req->curl_result != CURLE_OK &&
-                  obj_req->http_code != 416) {
-               if (missing_target(obj_req))
-                       ret = -1; /* Be silent, it is probably in a pack. */
-               else
-                       ret = error("%s (curl_result = %d, http_code = %ld, sha1 = %s)",
-                                   obj_req->errorstr, obj_req->curl_result,
-                                   obj_req->http_code, hex);
-       } else if (obj_req->zret != Z_STREAM_END) {
-               corrupt_object_found++;
-               ret = error("File %s (%s) corrupt", hex, obj_req->url);
-       } else if (hashcmp(obj_req->sha1, obj_req->real_sha1)) {
-               ret = error("File %s has bad hash", hex);
-       } else if (obj_req->rename < 0) {
-               ret = error("unable to write sha1 filename %s",
-                           obj_req->filename);
-       }
-
-       release_object_request(obj_req);
-       return ret;
-}
-
-int fetch(unsigned char *sha1)
-{
-       struct alt_base *altbase = alt;
-
-       if (!fetch_object(altbase, sha1))
-               return 0;
-       while (altbase) {
-               if (!fetch_pack(altbase, sha1))
-                       return 0;
-               fetch_alternates(alt->base);
-               altbase = altbase->next;
-       }
-       return error("Unable to find %s under %s", sha1_to_hex(sha1),
-                    alt->base);
-}
-
-static inline int needs_quote(int ch)
-{
-       if (((ch >= 'A') && (ch <= 'Z'))
-                       || ((ch >= 'a') && (ch <= 'z'))
-                       || ((ch >= '0') && (ch <= '9'))
-                       || (ch == '/')
-                       || (ch == '-')
-                       || (ch == '.'))
-               return 0;
-       return 1;
-}
-
-static inline int hex(int v)
-{
-       if (v < 10) return '0' + v;
-       else return 'A' + v - 10;
-}
-
-static char *quote_ref_url(const char *base, const char *ref)
-{
-       const char *cp;
-       char *dp, *qref;
-       int len, baselen, ch;
-
-       baselen = strlen(base);
-       len = baselen + 7; /* "/refs/" + NUL */
-       for (cp = ref; (ch = *cp) != 0; cp++, len++)
-               if (needs_quote(ch))
-                       len += 2; /* extra two hex plus replacement % */
-       qref = xmalloc(len);
-       memcpy(qref, base, baselen);
-       memcpy(qref + baselen, "/refs/", 6);
-       for (cp = ref, dp = qref + baselen + 6; (ch = *cp) != 0; cp++) {
-               if (needs_quote(ch)) {
-                       *dp++ = '%';
-                       *dp++ = hex((ch >> 4) & 0xF);
-                       *dp++ = hex(ch & 0xF);
-               }
-               else
-                       *dp++ = ch;
-       }
-       *dp = 0;
-
-       return qref;
-}
-
-int fetch_ref(char *ref, unsigned char *sha1)
-{
-        char *url;
-        char hex[42];
-        struct buffer buffer;
-       const char *base = alt->base;
-       struct active_request_slot *slot;
-       struct slot_results results;
-        buffer.size = 41;
-        buffer.posn = 0;
-        buffer.buffer = hex;
-        hex[41] = '\0';
-
-       url = quote_ref_url(base, ref);
-       slot = get_active_slot();
-       slot->results = &results;
-       curl_easy_setopt(slot->curl, CURLOPT_FILE, &buffer);
-       curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite_buffer);
-       curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, NULL);
-       curl_easy_setopt(slot->curl, CURLOPT_URL, url);
-       if (start_active_slot(slot)) {
-               run_active_slot(slot);
-               if (results.curl_result != CURLE_OK)
-                       return error("Couldn't get %s for %s\n%s",
-                                    url, ref, curl_errorstr);
-       } else {
-               return error("Unable to start request");
-       }
-
-        hex[40] = '\0';
-        get_sha1_hex(hex, sha1);
-        return 0;
-}
-
-int main(int argc, const char **argv)
-{
-       int commits;
-       const char **write_ref = NULL;
-       char **commit_id;
-       const char *url;
-       char *s;
-       int arg = 1;
-       int rc = 0;
-
-       setup_git_directory();
-       git_config(git_default_config);
-
-       while (arg < argc && argv[arg][0] == '-') {
-               if (argv[arg][1] == 't') {
-                       get_tree = 1;
-               } else if (argv[arg][1] == 'c') {
-                       get_history = 1;
-               } else if (argv[arg][1] == 'a') {
-                       get_all = 1;
-                       get_tree = 1;
-                       get_history = 1;
-               } else if (argv[arg][1] == 'v') {
-                       get_verbosely = 1;
-               } else if (argv[arg][1] == 'w') {
-                       write_ref = &argv[arg + 1];
-                       arg++;
-               } else if (!strcmp(argv[arg], "--recover")) {
-                       get_recover = 1;
-               } else if (!strcmp(argv[arg], "--stdin")) {
-                       commits_on_stdin = 1;
-               }
-               arg++;
-       }
-       if (argc < arg + 2 - commits_on_stdin) {
-               usage("git-http-fetch [-c] [-t] [-a] [-v] [--recover] [-w ref] [--stdin] commit-id url");
-               return 1;
-       }
-       if (commits_on_stdin) {
-               commits = pull_targets_stdin(&commit_id, &write_ref);
-       } else {
-               commit_id = (char **) &argv[arg++];
-               commits = 1;
-       }
-       url = argv[arg];
-
-       http_init();
-
-       no_pragma_header = curl_slist_append(no_pragma_header, "Pragma:");
-
-       alt = xmalloc(sizeof(*alt));
-       alt->base = xmalloc(strlen(url) + 1);
-       strcpy(alt->base, url);
-       for (s = alt->base + strlen(alt->base) - 1; *s == '/'; --s)
-               *s = 0;
-       alt->got_indices = 0;
-       alt->packs = NULL;
-       alt->next = NULL;
-
-       if (pull(commits, commit_id, write_ref, url))
-               rc = 1;
-
-       http_cleanup();
-
-       curl_slist_free_all(no_pragma_header);
-
-       if (commits_on_stdin)
-               pull_targets_free(commits, commit_id, write_ref);
-
-       if (corrupt_object_found) {
-               fprintf(stderr,
-"Some loose object were found to be corrupt, but they might be just\n"
-"a false '404 Not Found' error message sent with incorrect HTTP\n"
-"status code.  Suggest running git-fsck.\n");
-       }
-       return rc;
-}
index 276e1eb1d913f5e8f274545495e39ed7530d5244..c02a3af63450fd7cf22118b481b4f0d9dd35b156 100644 (file)
@@ -1,7 +1,6 @@
 #include "cache.h"
 #include "commit.h"
 #include "pack.h"
-#include "fetch.h"
 #include "tag.h"
 #include "blob.h"
 #include "http.h"
@@ -14,7 +13,7 @@
 #include <expat.h>
 
 static const char http_push_usage[] =
-"git-http-push [--all] [--force] [--verbose] <remote> [<head>...]\n";
+"git-http-push [--all] [--dry-run] [--force] [--verbose] <remote> [<head>...]\n";
 
 #ifndef XML_STATUS_OK
 enum XML_Status {
@@ -81,6 +80,7 @@ static struct curl_slist *default_headers;
 static int push_verbosely;
 static int push_all;
 static int force_all;
+static int dry_run;
 
 static struct object_list *objects;
 
@@ -795,38 +795,27 @@ static void finish_request(struct transfer_request *request)
 }
 
 #ifdef USE_CURL_MULTI
-void fill_active_slots(void)
+static int fill_active_slot(void *unused)
 {
        struct transfer_request *request = request_queue_head;
-       struct transfer_request *next;
-       struct active_request_slot *slot = active_queue_head;
-       int num_transfers;
 
        if (aborted)
-               return;
+               return 0;
 
-       while (active_requests < max_requests && request != NULL) {
-               next = request->next;
+       for (request = request_queue_head; request; request = request->next) {
                if (request->state == NEED_FETCH) {
                        start_fetch_loose(request);
+                       return 1;
                } else if (pushing && request->state == NEED_PUSH) {
                        if (remote_dir_exists[request->obj->sha1[0]] == 1) {
                                start_put(request);
                        } else {
                                start_mkcol(request);
                        }
-                       curl_multi_perform(curlm, &num_transfers);
-               }
-               request = next;
-       }
-
-       while (slot != NULL) {
-               if (!slot->in_use && slot->curl != NULL) {
-                       curl_easy_cleanup(slot->curl);
-                       slot->curl = NULL;
+                       return 1;
                }
-               slot = slot->next;
        }
+       return 0;
 }
 #endif
 
@@ -2314,6 +2303,10 @@ int main(int argc, char **argv)
                                force_all = 1;
                                continue;
                        }
+                       if (!strcmp(arg, "--dry-run")) {
+                               dry_run = 1;
+                               continue;
+                       }
                        if (!strcmp(arg, "--verbose")) {
                                push_verbosely = 1;
                                continue;
@@ -2455,7 +2448,8 @@ int main(int argc, char **argv)
                if (strcmp(ref->name, ref->peer_ref->name))
                        fprintf(stderr, " using '%s'", ref->peer_ref->name);
                fprintf(stderr, "\n  from %s\n  to   %s\n", old_hex, new_hex);
-
+               if (dry_run)
+                       continue;
 
                /* Lock remote branch ref */
                ref_lock = lock_remote(ref->name, LOCK_TIME);
@@ -2502,6 +2496,7 @@ int main(int argc, char **argv)
                                objects_to_send);
 #ifdef USE_CURL_MULTI
                fill_active_slots();
+               add_fill_function(NULL, fill_active_slot);
 #endif
                finish_all_active_slots();
 
@@ -2522,7 +2517,8 @@ int main(int argc, char **argv)
        if (remote->has_info_refs && new_refs) {
                if (info_ref_lock && remote->can_update_info_refs) {
                        fprintf(stderr, "Updating remote server info\n");
-                       update_remote_info_refs(info_ref_lock);
+                       if (!dry_run)
+                               update_remote_info_refs(info_ref_lock);
                } else {
                        fprintf(stderr, "Unable to update server info\n");
                }
diff --git a/http-walker.c b/http-walker.c
new file mode 100644 (file)
index 0000000..444aebf
--- /dev/null
@@ -0,0 +1,1035 @@
+#include "cache.h"
+#include "commit.h"
+#include "pack.h"
+#include "walker.h"
+#include "http.h"
+
+#define PREV_BUF_SIZE 4096
+#define RANGE_HEADER_SIZE 30
+
+struct alt_base
+{
+       char *base;
+       int got_indices;
+       struct packed_git *packs;
+       struct alt_base *next;
+};
+
+enum object_request_state {
+       WAITING,
+       ABORTED,
+       ACTIVE,
+       COMPLETE,
+};
+
+struct object_request
+{
+       struct walker *walker;
+       unsigned char sha1[20];
+       struct alt_base *repo;
+       char *url;
+       char filename[PATH_MAX];
+       char tmpfile[PATH_MAX];
+       int local;
+       enum object_request_state state;
+       CURLcode curl_result;
+       char errorstr[CURL_ERROR_SIZE];
+       long http_code;
+       unsigned char real_sha1[20];
+       SHA_CTX c;
+       z_stream stream;
+       int zret;
+       int rename;
+       struct active_request_slot *slot;
+       struct object_request *next;
+};
+
+struct alternates_request {
+       struct walker *walker;
+       const char *base;
+       char *url;
+       struct buffer *buffer;
+       struct active_request_slot *slot;
+       int http_specific;
+};
+
+struct walker_data {
+       const char *url;
+       int got_alternates;
+       struct alt_base *alt;
+       struct curl_slist *no_pragma_header;
+};
+
+static struct object_request *object_queue_head;
+
+static size_t fwrite_sha1_file(void *ptr, size_t eltsize, size_t nmemb,
+                              void *data)
+{
+       unsigned char expn[4096];
+       size_t size = eltsize * nmemb;
+       int posn = 0;
+       struct object_request *obj_req = (struct object_request *)data;
+       do {
+               ssize_t retval = xwrite(obj_req->local,
+                                      (char *) ptr + posn, size - posn);
+               if (retval < 0)
+                       return posn;
+               posn += retval;
+       } while (posn < size);
+
+       obj_req->stream.avail_in = size;
+       obj_req->stream.next_in = ptr;
+       do {
+               obj_req->stream.next_out = expn;
+               obj_req->stream.avail_out = sizeof(expn);
+               obj_req->zret = inflate(&obj_req->stream, Z_SYNC_FLUSH);
+               SHA1_Update(&obj_req->c, expn,
+                           sizeof(expn) - obj_req->stream.avail_out);
+       } while (obj_req->stream.avail_in && obj_req->zret == Z_OK);
+       data_received++;
+       return size;
+}
+
+static int missing__target(int code, int result)
+{
+       return  /* file:// URL -- do we ever use one??? */
+               (result == CURLE_FILE_COULDNT_READ_FILE) ||
+               /* http:// and https:// URL */
+               (code == 404 && result == CURLE_HTTP_RETURNED_ERROR) ||
+               /* ftp:// URL */
+               (code == 550 && result == CURLE_FTP_COULDNT_RETR_FILE)
+               ;
+}
+
+#define missing_target(a) missing__target((a)->http_code, (a)->curl_result)
+
+static void fetch_alternates(struct walker *walker, const char *base);
+
+static void process_object_response(void *callback_data);
+
+static void start_object_request(struct walker *walker,
+                                struct object_request *obj_req)
+{
+       char *hex = sha1_to_hex(obj_req->sha1);
+       char prevfile[PATH_MAX];
+       char *url;
+       char *posn;
+       int prevlocal;
+       unsigned char prev_buf[PREV_BUF_SIZE];
+       ssize_t prev_read = 0;
+       long prev_posn = 0;
+       char range[RANGE_HEADER_SIZE];
+       struct curl_slist *range_header = NULL;
+       struct active_request_slot *slot;
+       struct walker_data *data = walker->data;
+
+       snprintf(prevfile, sizeof(prevfile), "%s.prev", obj_req->filename);
+       unlink(prevfile);
+       rename(obj_req->tmpfile, prevfile);
+       unlink(obj_req->tmpfile);
+
+       if (obj_req->local != -1)
+               error("fd leakage in start: %d", obj_req->local);
+       obj_req->local = open(obj_req->tmpfile,
+                             O_WRONLY | O_CREAT | O_EXCL, 0666);
+       /* This could have failed due to the "lazy directory creation";
+        * try to mkdir the last path component.
+        */
+       if (obj_req->local < 0 && errno == ENOENT) {
+               char *dir = strrchr(obj_req->tmpfile, '/');
+               if (dir) {
+                       *dir = 0;
+                       mkdir(obj_req->tmpfile, 0777);
+                       *dir = '/';
+               }
+               obj_req->local = open(obj_req->tmpfile,
+                                     O_WRONLY | O_CREAT | O_EXCL, 0666);
+       }
+
+       if (obj_req->local < 0) {
+               obj_req->state = ABORTED;
+               error("Couldn't create temporary file %s for %s: %s",
+                     obj_req->tmpfile, obj_req->filename, strerror(errno));
+               return;
+       }
+
+       memset(&obj_req->stream, 0, sizeof(obj_req->stream));
+
+       inflateInit(&obj_req->stream);
+
+       SHA1_Init(&obj_req->c);
+
+       url = xmalloc(strlen(obj_req->repo->base) + 51);
+       obj_req->url = xmalloc(strlen(obj_req->repo->base) + 51);
+       strcpy(url, obj_req->repo->base);
+       posn = url + strlen(obj_req->repo->base);
+       strcpy(posn, "/objects/");
+       posn += 9;
+       memcpy(posn, hex, 2);
+       posn += 2;
+       *(posn++) = '/';
+       strcpy(posn, hex + 2);
+       strcpy(obj_req->url, url);
+
+       /* If a previous temp file is present, process what was already
+          fetched. */
+       prevlocal = open(prevfile, O_RDONLY);
+       if (prevlocal != -1) {
+               do {
+                       prev_read = xread(prevlocal, prev_buf, PREV_BUF_SIZE);
+                       if (prev_read>0) {
+                               if (fwrite_sha1_file(prev_buf,
+                                                    1,
+                                                    prev_read,
+                                                    obj_req) == prev_read) {
+                                       prev_posn += prev_read;
+                               } else {
+                                       prev_read = -1;
+                               }
+                       }
+               } while (prev_read > 0);
+               close(prevlocal);
+       }
+       unlink(prevfile);
+
+       /* Reset inflate/SHA1 if there was an error reading the previous temp
+          file; also rewind to the beginning of the local file. */
+       if (prev_read == -1) {
+               memset(&obj_req->stream, 0, sizeof(obj_req->stream));
+               inflateInit(&obj_req->stream);
+               SHA1_Init(&obj_req->c);
+               if (prev_posn>0) {
+                       prev_posn = 0;
+                       lseek(obj_req->local, 0, SEEK_SET);
+                       ftruncate(obj_req->local, 0);
+               }
+       }
+
+       slot = get_active_slot();
+       slot->callback_func = process_object_response;
+       slot->callback_data = obj_req;
+       obj_req->slot = slot;
+
+       curl_easy_setopt(slot->curl, CURLOPT_FILE, obj_req);
+       curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite_sha1_file);
+       curl_easy_setopt(slot->curl, CURLOPT_ERRORBUFFER, obj_req->errorstr);
+       curl_easy_setopt(slot->curl, CURLOPT_URL, url);
+       curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, data->no_pragma_header);
+
+       /* If we have successfully processed data from a previous fetch
+          attempt, only fetch the data we don't already have. */
+       if (prev_posn>0) {
+               if (walker->get_verbosely)
+                       fprintf(stderr,
+                               "Resuming fetch of object %s at byte %ld\n",
+                               hex, prev_posn);
+               sprintf(range, "Range: bytes=%ld-", prev_posn);
+               range_header = curl_slist_append(range_header, range);
+               curl_easy_setopt(slot->curl,
+                                CURLOPT_HTTPHEADER, range_header);
+       }
+
+       /* Try to get the request started, abort the request on error */
+       obj_req->state = ACTIVE;
+       if (!start_active_slot(slot)) {
+               obj_req->state = ABORTED;
+               obj_req->slot = NULL;
+               close(obj_req->local); obj_req->local = -1;
+               free(obj_req->url);
+               return;
+       }
+}
+
+static void finish_object_request(struct object_request *obj_req)
+{
+       struct stat st;
+
+       fchmod(obj_req->local, 0444);
+       close(obj_req->local); obj_req->local = -1;
+
+       if (obj_req->http_code == 416) {
+               fprintf(stderr, "Warning: requested range invalid; we may already have all the data.\n");
+       } else if (obj_req->curl_result != CURLE_OK) {
+               if (stat(obj_req->tmpfile, &st) == 0)
+                       if (st.st_size == 0)
+                               unlink(obj_req->tmpfile);
+               return;
+       }
+
+       inflateEnd(&obj_req->stream);
+       SHA1_Final(obj_req->real_sha1, &obj_req->c);
+       if (obj_req->zret != Z_STREAM_END) {
+               unlink(obj_req->tmpfile);
+               return;
+       }
+       if (hashcmp(obj_req->sha1, obj_req->real_sha1)) {
+               unlink(obj_req->tmpfile);
+               return;
+       }
+       obj_req->rename =
+               move_temp_to_file(obj_req->tmpfile, obj_req->filename);
+
+       if (obj_req->rename == 0)
+               walker_say(obj_req->walker, "got %s\n", sha1_to_hex(obj_req->sha1));
+}
+
+static void process_object_response(void *callback_data)
+{
+       struct object_request *obj_req =
+               (struct object_request *)callback_data;
+       struct walker *walker = obj_req->walker;
+       struct walker_data *data = walker->data;
+       struct alt_base *alt = data->alt;
+
+       obj_req->curl_result = obj_req->slot->curl_result;
+       obj_req->http_code = obj_req->slot->http_code;
+       obj_req->slot = NULL;
+       obj_req->state = COMPLETE;
+
+       /* Use alternates if necessary */
+       if (missing_target(obj_req)) {
+               fetch_alternates(walker, alt->base);
+               if (obj_req->repo->next != NULL) {
+                       obj_req->repo =
+                               obj_req->repo->next;
+                       close(obj_req->local);
+                       obj_req->local = -1;
+                       start_object_request(walker, obj_req);
+                       return;
+               }
+       }
+
+       finish_object_request(obj_req);
+}
+
+static void release_object_request(struct object_request *obj_req)
+{
+       struct object_request *entry = object_queue_head;
+
+       if (obj_req->local != -1)
+               error("fd leakage in release: %d", obj_req->local);
+       if (obj_req == object_queue_head) {
+               object_queue_head = obj_req->next;
+       } else {
+               while (entry->next != NULL && entry->next != obj_req)
+                       entry = entry->next;
+               if (entry->next == obj_req)
+                       entry->next = entry->next->next;
+       }
+
+       free(obj_req->url);
+       free(obj_req);
+}
+
+#ifdef USE_CURL_MULTI
+static int fill_active_slot(struct walker *walker)
+{
+       struct object_request *obj_req;
+
+       for (obj_req = object_queue_head; obj_req; obj_req = obj_req->next) {
+               if (obj_req->state == WAITING) {
+                       if (has_sha1_file(obj_req->sha1))
+                               obj_req->state = COMPLETE;
+                       else {
+                               start_object_request(walker, obj_req);
+                               return 1;
+                       }
+               }
+       }
+       return 0;
+}
+#endif
+
+static void prefetch(struct walker *walker, unsigned char *sha1)
+{
+       struct object_request *newreq;
+       struct object_request *tail;
+       struct walker_data *data = walker->data;
+       char *filename = sha1_file_name(sha1);
+
+       newreq = xmalloc(sizeof(*newreq));
+       newreq->walker = walker;
+       hashcpy(newreq->sha1, sha1);
+       newreq->repo = data->alt;
+       newreq->url = NULL;
+       newreq->local = -1;
+       newreq->state = WAITING;
+       snprintf(newreq->filename, sizeof(newreq->filename), "%s", filename);
+       snprintf(newreq->tmpfile, sizeof(newreq->tmpfile),
+                "%s.temp", filename);
+       newreq->slot = NULL;
+       newreq->next = NULL;
+
+       if (object_queue_head == NULL) {
+               object_queue_head = newreq;
+       } else {
+               tail = object_queue_head;
+               while (tail->next != NULL) {
+                       tail = tail->next;
+               }
+               tail->next = newreq;
+       }
+
+#ifdef USE_CURL_MULTI
+       fill_active_slots();
+       step_active_slots();
+#endif
+}
+
+static int fetch_index(struct walker *walker, struct alt_base *repo, unsigned char *sha1)
+{
+       char *hex = sha1_to_hex(sha1);
+       char *filename;
+       char *url;
+       char tmpfile[PATH_MAX];
+       long prev_posn = 0;
+       char range[RANGE_HEADER_SIZE];
+       struct curl_slist *range_header = NULL;
+       struct walker_data *data = walker->data;
+
+       FILE *indexfile;
+       struct active_request_slot *slot;
+       struct slot_results results;
+
+       if (has_pack_index(sha1))
+               return 0;
+
+       if (walker->get_verbosely)
+               fprintf(stderr, "Getting index for pack %s\n", hex);
+
+       url = xmalloc(strlen(repo->base) + 64);
+       sprintf(url, "%s/objects/pack/pack-%s.idx", repo->base, hex);
+
+       filename = sha1_pack_index_name(sha1);
+       snprintf(tmpfile, sizeof(tmpfile), "%s.temp", filename);
+       indexfile = fopen(tmpfile, "a");
+       if (!indexfile)
+               return error("Unable to open local file %s for pack index",
+                            filename);
+
+       slot = get_active_slot();
+       slot->results = &results;
+       curl_easy_setopt(slot->curl, CURLOPT_FILE, indexfile);
+       curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite);
+       curl_easy_setopt(slot->curl, CURLOPT_URL, url);
+       curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, data->no_pragma_header);
+       slot->local = indexfile;
+
+       /* If there is data present from a previous transfer attempt,
+          resume where it left off */
+       prev_posn = ftell(indexfile);
+       if (prev_posn>0) {
+               if (walker->get_verbosely)
+                       fprintf(stderr,
+                               "Resuming fetch of index for pack %s at byte %ld\n",
+                               hex, prev_posn);
+               sprintf(range, "Range: bytes=%ld-", prev_posn);
+               range_header = curl_slist_append(range_header, range);
+               curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, range_header);
+       }
+
+       if (start_active_slot(slot)) {
+               run_active_slot(slot);
+               if (results.curl_result != CURLE_OK) {
+                       fclose(indexfile);
+                       return error("Unable to get pack index %s\n%s", url,
+                                    curl_errorstr);
+               }
+       } else {
+               fclose(indexfile);
+               return error("Unable to start request");
+       }
+
+       fclose(indexfile);
+
+       return move_temp_to_file(tmpfile, filename);
+}
+
+static int setup_index(struct walker *walker, struct alt_base *repo, unsigned char *sha1)
+{
+       struct packed_git *new_pack;
+       if (has_pack_file(sha1))
+               return 0; /* don't list this as something we can get */
+
+       if (fetch_index(walker, repo, sha1))
+               return -1;
+
+       new_pack = parse_pack_index(sha1);
+       new_pack->next = repo->packs;
+       repo->packs = new_pack;
+       return 0;
+}
+
+static void process_alternates_response(void *callback_data)
+{
+       struct alternates_request *alt_req =
+               (struct alternates_request *)callback_data;
+       struct walker *walker = alt_req->walker;
+       struct walker_data *cdata = walker->data;
+       struct active_request_slot *slot = alt_req->slot;
+       struct alt_base *tail = cdata->alt;
+       const char *base = alt_req->base;
+       static const char null_byte = '\0';
+       char *data;
+       int i = 0;
+
+       if (alt_req->http_specific) {
+               if (slot->curl_result != CURLE_OK ||
+                   !alt_req->buffer->posn) {
+
+                       /* Try reusing the slot to get non-http alternates */
+                       alt_req->http_specific = 0;
+                       sprintf(alt_req->url, "%s/objects/info/alternates",
+                               base);
+                       curl_easy_setopt(slot->curl, CURLOPT_URL,
+                                        alt_req->url);
+                       active_requests++;
+                       slot->in_use = 1;
+                       if (slot->finished != NULL)
+                               (*slot->finished) = 0;
+                       if (!start_active_slot(slot)) {
+                               cdata->got_alternates = -1;
+                               slot->in_use = 0;
+                               if (slot->finished != NULL)
+                                       (*slot->finished) = 1;
+                       }
+                       return;
+               }
+       } else if (slot->curl_result != CURLE_OK) {
+               if (!missing_target(slot)) {
+                       cdata->got_alternates = -1;
+                       return;
+               }
+       }
+
+       fwrite_buffer(&null_byte, 1, 1, alt_req->buffer);
+       alt_req->buffer->posn--;
+       data = alt_req->buffer->buffer;
+
+       while (i < alt_req->buffer->posn) {
+               int posn = i;
+               while (posn < alt_req->buffer->posn && data[posn] != '\n')
+                       posn++;
+               if (data[posn] == '\n') {
+                       int okay = 0;
+                       int serverlen = 0;
+                       struct alt_base *newalt;
+                       char *target = NULL;
+                       if (data[i] == '/') {
+                               /* This counts
+                                * http://git.host/pub/scm/linux.git/
+                                * -----------here^
+                                * so memcpy(dst, base, serverlen) will
+                                * copy up to "...git.host".
+                                */
+                               const char *colon_ss = strstr(base,"://");
+                               if (colon_ss) {
+                                       serverlen = (strchr(colon_ss + 3, '/')
+                                                    - base);
+                                       okay = 1;
+                               }
+                       } else if (!memcmp(data + i, "../", 3)) {
+                               /* Relative URL; chop the corresponding
+                                * number of subpath from base (and ../
+                                * from data), and concatenate the result.
+                                *
+                                * The code first drops ../ from data, and
+                                * then drops one ../ from data and one path
+                                * from base.  IOW, one extra ../ is dropped
+                                * from data than path is dropped from base.
+                                *
+                                * This is not wrong.  The alternate in
+                                *     http://git.host/pub/scm/linux.git/
+                                * to borrow from
+                                *     http://git.host/pub/scm/linus.git/
+                                * is ../../linus.git/objects/.  You need
+                                * two ../../ to borrow from your direct
+                                * neighbour.
+                                */
+                               i += 3;
+                               serverlen = strlen(base);
+                               while (i + 2 < posn &&
+                                      !memcmp(data + i, "../", 3)) {
+                                       do {
+                                               serverlen--;
+                                       } while (serverlen &&
+                                                base[serverlen - 1] != '/');
+                                       i += 3;
+                               }
+                               /* If the server got removed, give up. */
+                               okay = strchr(base, ':') - base + 3 <
+                                       serverlen;
+                       } else if (alt_req->http_specific) {
+                               char *colon = strchr(data + i, ':');
+                               char *slash = strchr(data + i, '/');
+                               if (colon && slash && colon < data + posn &&
+                                   slash < data + posn && colon < slash) {
+                                       okay = 1;
+                               }
+                       }
+                       /* skip "objects\n" at end */
+                       if (okay) {
+                               target = xmalloc(serverlen + posn - i - 6);
+                               memcpy(target, base, serverlen);
+                               memcpy(target + serverlen, data + i,
+                                      posn - i - 7);
+                               target[serverlen + posn - i - 7] = 0;
+                               if (walker->get_verbosely)
+                                       fprintf(stderr,
+                                               "Also look at %s\n", target);
+                               newalt = xmalloc(sizeof(*newalt));
+                               newalt->next = NULL;
+                               newalt->base = target;
+                               newalt->got_indices = 0;
+                               newalt->packs = NULL;
+
+                               while (tail->next != NULL)
+                                       tail = tail->next;
+                               tail->next = newalt;
+                       }
+               }
+               i = posn + 1;
+       }
+
+       cdata->got_alternates = 1;
+}
+
+static void fetch_alternates(struct walker *walker, const char *base)
+{
+       struct buffer buffer;
+       char *url;
+       char *data;
+       struct active_request_slot *slot;
+       struct alternates_request alt_req;
+       struct walker_data *cdata = walker->data;
+
+       /* If another request has already started fetching alternates,
+          wait for them to arrive and return to processing this request's
+          curl message */
+#ifdef USE_CURL_MULTI
+       while (cdata->got_alternates == 0) {
+               step_active_slots();
+       }
+#endif
+
+       /* Nothing to do if they've already been fetched */
+       if (cdata->got_alternates == 1)
+               return;
+
+       /* Start the fetch */
+       cdata->got_alternates = 0;
+
+       data = xmalloc(4096);
+       buffer.size = 4096;
+       buffer.posn = 0;
+       buffer.buffer = data;
+
+       if (walker->get_verbosely)
+               fprintf(stderr, "Getting alternates list for %s\n", base);
+
+       url = xmalloc(strlen(base) + 31);
+       sprintf(url, "%s/objects/info/http-alternates", base);
+
+       /* Use a callback to process the result, since another request
+          may fail and need to have alternates loaded before continuing */
+       slot = get_active_slot();
+       slot->callback_func = process_alternates_response;
+       alt_req.walker = walker;
+       slot->callback_data = &alt_req;
+
+       curl_easy_setopt(slot->curl, CURLOPT_FILE, &buffer);
+       curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite_buffer);
+       curl_easy_setopt(slot->curl, CURLOPT_URL, url);
+
+       alt_req.base = base;
+       alt_req.url = url;
+       alt_req.buffer = &buffer;
+       alt_req.http_specific = 1;
+       alt_req.slot = slot;
+
+       if (start_active_slot(slot))
+               run_active_slot(slot);
+       else
+               cdata->got_alternates = -1;
+
+       free(data);
+       free(url);
+}
+
+static int fetch_indices(struct walker *walker, struct alt_base *repo)
+{
+       unsigned char sha1[20];
+       char *url;
+       struct buffer buffer;
+       char *data;
+       int i = 0;
+
+       struct active_request_slot *slot;
+       struct slot_results results;
+
+       if (repo->got_indices)
+               return 0;
+
+       data = xmalloc(4096);
+       buffer.size = 4096;
+       buffer.posn = 0;
+       buffer.buffer = data;
+
+       if (walker->get_verbosely)
+               fprintf(stderr, "Getting pack list for %s\n", repo->base);
+
+       url = xmalloc(strlen(repo->base) + 21);
+       sprintf(url, "%s/objects/info/packs", repo->base);
+
+       slot = get_active_slot();
+       slot->results = &results;
+       curl_easy_setopt(slot->curl, CURLOPT_FILE, &buffer);
+       curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite_buffer);
+       curl_easy_setopt(slot->curl, CURLOPT_URL, url);
+       curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, NULL);
+       if (start_active_slot(slot)) {
+               run_active_slot(slot);
+               if (results.curl_result != CURLE_OK) {
+                       if (missing_target(&results)) {
+                               repo->got_indices = 1;
+                               free(buffer.buffer);
+                               return 0;
+                       } else {
+                               repo->got_indices = 0;
+                               free(buffer.buffer);
+                               return error("%s", curl_errorstr);
+                       }
+               }
+       } else {
+               repo->got_indices = 0;
+               free(buffer.buffer);
+               return error("Unable to start request");
+       }
+
+       data = buffer.buffer;
+       while (i < buffer.posn) {
+               switch (data[i]) {
+               case 'P':
+                       i++;
+                       if (i + 52 <= buffer.posn &&
+                           !prefixcmp(data + i, " pack-") &&
+                           !prefixcmp(data + i + 46, ".pack\n")) {
+                               get_sha1_hex(data + i + 6, sha1);
+                               setup_index(walker, repo, sha1);
+                               i += 51;
+                               break;
+                       }
+               default:
+                       while (i < buffer.posn && data[i] != '\n')
+                               i++;
+               }
+               i++;
+       }
+
+       free(buffer.buffer);
+       repo->got_indices = 1;
+       return 0;
+}
+
+static int fetch_pack(struct walker *walker, struct alt_base *repo, unsigned char *sha1)
+{
+       char *url;
+       struct packed_git *target;
+       struct packed_git **lst;
+       FILE *packfile;
+       char *filename;
+       char tmpfile[PATH_MAX];
+       int ret;
+       long prev_posn = 0;
+       char range[RANGE_HEADER_SIZE];
+       struct curl_slist *range_header = NULL;
+       struct walker_data *data = walker->data;
+
+       struct active_request_slot *slot;
+       struct slot_results results;
+
+       if (fetch_indices(walker, repo))
+               return -1;
+       target = find_sha1_pack(sha1, repo->packs);
+       if (!target)
+               return -1;
+
+       if (walker->get_verbosely) {
+               fprintf(stderr, "Getting pack %s\n",
+                       sha1_to_hex(target->sha1));
+               fprintf(stderr, " which contains %s\n",
+                       sha1_to_hex(sha1));
+       }
+
+       url = xmalloc(strlen(repo->base) + 65);
+       sprintf(url, "%s/objects/pack/pack-%s.pack",
+               repo->base, sha1_to_hex(target->sha1));
+
+       filename = sha1_pack_name(target->sha1);
+       snprintf(tmpfile, sizeof(tmpfile), "%s.temp", filename);
+       packfile = fopen(tmpfile, "a");
+       if (!packfile)
+               return error("Unable to open local file %s for pack",
+                            filename);
+
+       slot = get_active_slot();
+       slot->results = &results;
+       curl_easy_setopt(slot->curl, CURLOPT_FILE, packfile);
+       curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite);
+       curl_easy_setopt(slot->curl, CURLOPT_URL, url);
+       curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, data->no_pragma_header);
+       slot->local = packfile;
+
+       /* If there is data present from a previous transfer attempt,
+          resume where it left off */
+       prev_posn = ftell(packfile);
+       if (prev_posn>0) {
+               if (walker->get_verbosely)
+                       fprintf(stderr,
+                               "Resuming fetch of pack %s at byte %ld\n",
+                               sha1_to_hex(target->sha1), prev_posn);
+               sprintf(range, "Range: bytes=%ld-", prev_posn);
+               range_header = curl_slist_append(range_header, range);
+               curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, range_header);
+       }
+
+       if (start_active_slot(slot)) {
+               run_active_slot(slot);
+               if (results.curl_result != CURLE_OK) {
+                       fclose(packfile);
+                       return error("Unable to get pack file %s\n%s", url,
+                                    curl_errorstr);
+               }
+       } else {
+               fclose(packfile);
+               return error("Unable to start request");
+       }
+
+       target->pack_size = ftell(packfile);
+       fclose(packfile);
+
+       ret = move_temp_to_file(tmpfile, filename);
+       if (ret)
+               return ret;
+
+       lst = &repo->packs;
+       while (*lst != target)
+               lst = &((*lst)->next);
+       *lst = (*lst)->next;
+
+       if (verify_pack(target, 0))
+               return -1;
+       install_packed_git(target);
+
+       return 0;
+}
+
+static void abort_object_request(struct object_request *obj_req)
+{
+       if (obj_req->local >= 0) {
+               close(obj_req->local);
+               obj_req->local = -1;
+       }
+       unlink(obj_req->tmpfile);
+       if (obj_req->slot) {
+               release_active_slot(obj_req->slot);
+               obj_req->slot = NULL;
+       }
+       release_object_request(obj_req);
+}
+
+static int fetch_object(struct walker *walker, struct alt_base *repo, unsigned char *sha1)
+{
+       char *hex = sha1_to_hex(sha1);
+       int ret = 0;
+       struct object_request *obj_req = object_queue_head;
+
+       while (obj_req != NULL && hashcmp(obj_req->sha1, sha1))
+               obj_req = obj_req->next;
+       if (obj_req == NULL)
+               return error("Couldn't find request for %s in the queue", hex);
+
+       if (has_sha1_file(obj_req->sha1)) {
+               abort_object_request(obj_req);
+               return 0;
+       }
+
+#ifdef USE_CURL_MULTI
+       while (obj_req->state == WAITING) {
+               step_active_slots();
+       }
+#else
+       start_object_request(walker, obj_req);
+#endif
+
+       while (obj_req->state == ACTIVE) {
+               run_active_slot(obj_req->slot);
+       }
+       if (obj_req->local != -1) {
+               close(obj_req->local); obj_req->local = -1;
+       }
+
+       if (obj_req->state == ABORTED) {
+               ret = error("Request for %s aborted", hex);
+       } else if (obj_req->curl_result != CURLE_OK &&
+                  obj_req->http_code != 416) {
+               if (missing_target(obj_req))
+                       ret = -1; /* Be silent, it is probably in a pack. */
+               else
+                       ret = error("%s (curl_result = %d, http_code = %ld, sha1 = %s)",
+                                   obj_req->errorstr, obj_req->curl_result,
+                                   obj_req->http_code, hex);
+       } else if (obj_req->zret != Z_STREAM_END) {
+               walker->corrupt_object_found++;
+               ret = error("File %s (%s) corrupt", hex, obj_req->url);
+       } else if (hashcmp(obj_req->sha1, obj_req->real_sha1)) {
+               ret = error("File %s has bad hash", hex);
+       } else if (obj_req->rename < 0) {
+               ret = error("unable to write sha1 filename %s",
+                           obj_req->filename);
+       }
+
+       release_object_request(obj_req);
+       return ret;
+}
+
+static int fetch(struct walker *walker, unsigned char *sha1)
+{
+       struct walker_data *data = walker->data;
+       struct alt_base *altbase = data->alt;
+
+       if (!fetch_object(walker, altbase, sha1))
+               return 0;
+       while (altbase) {
+               if (!fetch_pack(walker, altbase, sha1))
+                       return 0;
+               fetch_alternates(walker, data->alt->base);
+               altbase = altbase->next;
+       }
+       return error("Unable to find %s under %s", sha1_to_hex(sha1),
+                    data->alt->base);
+}
+
+static inline int needs_quote(int ch)
+{
+       if (((ch >= 'A') && (ch <= 'Z'))
+                       || ((ch >= 'a') && (ch <= 'z'))
+                       || ((ch >= '0') && (ch <= '9'))
+                       || (ch == '/')
+                       || (ch == '-')
+                       || (ch == '.'))
+               return 0;
+       return 1;
+}
+
+static inline int hex(int v)
+{
+       if (v < 10) return '0' + v;
+       else return 'A' + v - 10;
+}
+
+static char *quote_ref_url(const char *base, const char *ref)
+{
+       const char *cp;
+       char *dp, *qref;
+       int len, baselen, ch;
+
+       baselen = strlen(base);
+       len = baselen + 7; /* "/refs/" + NUL */
+       for (cp = ref; (ch = *cp) != 0; cp++, len++)
+               if (needs_quote(ch))
+                       len += 2; /* extra two hex plus replacement % */
+       qref = xmalloc(len);
+       memcpy(qref, base, baselen);
+       memcpy(qref + baselen, "/refs/", 6);
+       for (cp = ref, dp = qref + baselen + 6; (ch = *cp) != 0; cp++) {
+               if (needs_quote(ch)) {
+                       *dp++ = '%';
+                       *dp++ = hex((ch >> 4) & 0xF);
+                       *dp++ = hex(ch & 0xF);
+               }
+               else
+                       *dp++ = ch;
+       }
+       *dp = 0;
+
+       return qref;
+}
+
+static int fetch_ref(struct walker *walker, char *ref, unsigned char *sha1)
+{
+        char *url;
+        char hex[42];
+        struct buffer buffer;
+       struct walker_data *data = walker->data;
+       const char *base = data->alt->base;
+       struct active_request_slot *slot;
+       struct slot_results results;
+        buffer.size = 41;
+        buffer.posn = 0;
+        buffer.buffer = hex;
+        hex[41] = '\0';
+
+       url = quote_ref_url(base, ref);
+       slot = get_active_slot();
+       slot->results = &results;
+       curl_easy_setopt(slot->curl, CURLOPT_FILE, &buffer);
+       curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite_buffer);
+       curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, NULL);
+       curl_easy_setopt(slot->curl, CURLOPT_URL, url);
+       if (start_active_slot(slot)) {
+               run_active_slot(slot);
+               if (results.curl_result != CURLE_OK)
+                       return error("Couldn't get %s for %s\n%s",
+                                    url, ref, curl_errorstr);
+       } else {
+               return error("Unable to start request");
+       }
+
+        hex[40] = '\0';
+        get_sha1_hex(hex, sha1);
+        return 0;
+}
+
+static void cleanup(struct walker *walker)
+{
+       struct walker_data *data = walker->data;
+       http_cleanup();
+
+       curl_slist_free_all(data->no_pragma_header);
+}
+
+struct walker *get_http_walker(const char *url)
+{
+       char *s;
+       struct walker_data *data = xmalloc(sizeof(struct walker_data));
+       struct walker *walker = xmalloc(sizeof(struct walker));
+
+       http_init();
+
+       data->no_pragma_header = curl_slist_append(NULL, "Pragma:");
+
+       data->alt = xmalloc(sizeof(*data->alt));
+       data->alt->base = xmalloc(strlen(url) + 1);
+       strcpy(data->alt->base, url);
+       for (s = data->alt->base + strlen(data->alt->base) - 1; *s == '/'; --s)
+               *s = 0;
+
+       data->alt->got_indices = 0;
+       data->alt->packs = NULL;
+       data->alt->next = NULL;
+       data->got_alternates = -1;
+
+       walker->corrupt_object_found = 0;
+       walker->fetch = fetch;
+       walker->fetch_ref = fetch_ref;
+       walker->prefetch = prefetch;
+       walker->cleanup = cleanup;
+       walker->data = data;
+
+#ifdef USE_CURL_MULTI
+       add_fill_function(walker, (int (*)(void *)) fill_active_slot);
+#endif
+
+       return walker;
+}
diff --git a/http.c b/http.c
index c6fb8ace9f9f43935f4128fc223b01e6cb9fa605..87ebf7b86548d229afbfd9263d2470296a7b2ac7 100644 (file)
--- a/http.c
+++ b/http.c
@@ -276,6 +276,7 @@ void http_cleanup(void)
 #endif
 
        while (slot != NULL) {
+               struct active_request_slot *next = slot->next;
 #ifdef USE_CURL_MULTI
                if (slot->in_use) {
                        curl_easy_getinfo(slot->curl,
@@ -287,8 +288,10 @@ void http_cleanup(void)
 #endif
                if (slot->curl != NULL)
                        curl_easy_cleanup(slot->curl);
-               slot = slot->next;
+               free(slot);
+               slot = next;
        }
+       active_queue_head = NULL;
 
 #ifndef NO_CURL_EASY_DUPHANDLE
        curl_easy_cleanup(curl_default);
@@ -300,7 +303,7 @@ void http_cleanup(void)
        curl_global_cleanup();
 
        curl_slist_free_all(pragma_header);
-        pragma_header = NULL;
+       pragma_header = NULL;
 }
 
 struct active_request_slot *get_active_slot(void)
@@ -372,6 +375,7 @@ int start_active_slot(struct active_request_slot *slot)
 {
 #ifdef USE_CURL_MULTI
        CURLMcode curlm_result = curl_multi_add_handle(curlm, slot->curl);
+       int num_transfers;
 
        if (curlm_result != CURLM_OK &&
            curlm_result != CURLM_CALL_MULTI_PERFORM) {
@@ -379,11 +383,60 @@ int start_active_slot(struct active_request_slot *slot)
                slot->in_use = 0;
                return 0;
        }
+
+       /*
+        * We know there must be something to do, since we just added
+        * something.
+        */
+       curl_multi_perform(curlm, &num_transfers);
 #endif
        return 1;
 }
 
 #ifdef USE_CURL_MULTI
+struct fill_chain {
+       void *data;
+       int (*fill)(void *);
+       struct fill_chain *next;
+};
+
+static struct fill_chain *fill_cfg = NULL;
+
+void add_fill_function(void *data, int (*fill)(void *))
+{
+       struct fill_chain *new = malloc(sizeof(*new));
+       struct fill_chain **linkp = &fill_cfg;
+       new->data = data;
+       new->fill = fill;
+       new->next = NULL;
+       while (*linkp)
+               linkp = &(*linkp)->next;
+       *linkp = new;
+}
+
+void fill_active_slots(void)
+{
+       struct active_request_slot *slot = active_queue_head;
+
+       while (active_requests < max_requests) {
+               struct fill_chain *fill;
+               for (fill = fill_cfg; fill; fill = fill->next)
+                       if (fill->fill(fill->data))
+                               break;
+
+               if (!fill)
+                       break;
+       }
+
+       while (slot != NULL) {
+               if (!slot->in_use && slot->curl != NULL) {
+                       curl_easy_cleanup(slot->curl);
+                       slot->curl = NULL;
+               }
+               slot = slot->next;
+       }
+}
+
 void step_active_slots(void)
 {
        int num_transfers;
diff --git a/http.h b/http.h
index 69b6b667d956933eca7153b51867493d7271df0b..72abac20f856b45c873cc370f23c7df08b650370 100644 (file)
--- a/http.h
+++ b/http.h
@@ -70,6 +70,7 @@ extern void release_active_slot(struct active_request_slot *slot);
 
 #ifdef USE_CURL_MULTI
 extern void fill_active_slots(void);
+extern void add_fill_function(void *data, int (*fill)(void *));
 extern void step_active_slots(void);
 #endif
 
@@ -79,10 +80,6 @@ extern void http_cleanup(void);
 extern int data_received;
 extern int active_requests;
 
-#ifdef USE_CURL_MULTI
-extern int max_requests;
-extern CURLM *curlm;
-#endif
 #ifndef NO_CURL_EASY_DUPHANDLE
 extern CURL *curl_default;
 #endif
@@ -103,6 +100,4 @@ extern long curl_low_speed_time;
 extern struct curl_slist *pragma_header;
 extern struct curl_slist *no_range_header;
 
-extern struct active_request_slot *active_queue_head;
-
 #endif /* HTTP_H */
diff --git a/local-fetch.c b/local-fetch.c
deleted file mode 100644 (file)
index bf7ec6c..0000000
+++ /dev/null
@@ -1,254 +0,0 @@
-/*
- * Copyright (C) 2005 Junio C Hamano
- */
-#include "cache.h"
-#include "commit.h"
-#include "fetch.h"
-
-static int use_link;
-static int use_symlink;
-static int use_filecopy = 1;
-static int commits_on_stdin;
-
-static const char *path; /* "Remote" git repository */
-
-void prefetch(unsigned char *sha1)
-{
-}
-
-static struct packed_git *packs;
-
-static void setup_index(unsigned char *sha1)
-{
-       struct packed_git *new_pack;
-       char filename[PATH_MAX];
-       strcpy(filename, path);
-       strcat(filename, "/objects/pack/pack-");
-       strcat(filename, sha1_to_hex(sha1));
-       strcat(filename, ".idx");
-       new_pack = parse_pack_index_file(sha1, filename);
-       new_pack->next = packs;
-       packs = new_pack;
-}
-
-static int setup_indices(void)
-{
-       DIR *dir;
-       struct dirent *de;
-       char filename[PATH_MAX];
-       unsigned char sha1[20];
-       sprintf(filename, "%s/objects/pack/", path);
-       dir = opendir(filename);
-       if (!dir)
-               return -1;
-       while ((de = readdir(dir)) != NULL) {
-               int namelen = strlen(de->d_name);
-               if (namelen != 50 ||
-                   !has_extension(de->d_name, ".pack"))
-                       continue;
-               get_sha1_hex(de->d_name + 5, sha1);
-               setup_index(sha1);
-       }
-       closedir(dir);
-       return 0;
-}
-
-static int copy_file(const char *source, char *dest, const char *hex,
-                    int warn_if_not_exists)
-{
-       safe_create_leading_directories(dest);
-       if (use_link) {
-               if (!link(source, dest)) {
-                       pull_say("link %s\n", hex);
-                       return 0;
-               }
-               /* If we got ENOENT there is no point continuing. */
-               if (errno == ENOENT) {
-                       if (!warn_if_not_exists)
-                               return -1;
-                       return error("does not exist %s", source);
-               }
-       }
-       if (use_symlink) {
-               struct stat st;
-               if (stat(source, &st)) {
-                       if (!warn_if_not_exists && errno == ENOENT)
-                               return -1;
-                       return error("cannot stat %s: %s", source,
-                                    strerror(errno));
-               }
-               if (!symlink(source, dest)) {
-                       pull_say("symlink %s\n", hex);
-                       return 0;
-               }
-       }
-       if (use_filecopy) {
-               int ifd, ofd, status = 0;
-
-               ifd = open(source, O_RDONLY);
-               if (ifd < 0) {
-                       if (!warn_if_not_exists && errno == ENOENT)
-                               return -1;
-                       return error("cannot open %s", source);
-               }
-               ofd = open(dest, O_WRONLY | O_CREAT | O_EXCL, 0666);
-               if (ofd < 0) {
-                       close(ifd);
-                       return error("cannot open %s", dest);
-               }
-               status = copy_fd(ifd, ofd);
-               close(ofd);
-               if (status)
-                       return error("cannot write %s", dest);
-               pull_say("copy %s\n", hex);
-               return 0;
-       }
-       return error("failed to copy %s with given copy methods.", hex);
-}
-
-static int fetch_pack(const unsigned char *sha1)
-{
-       struct packed_git *target;
-       char filename[PATH_MAX];
-       if (setup_indices())
-               return -1;
-       target = find_sha1_pack(sha1, packs);
-       if (!target)
-               return error("Couldn't find %s: not separate or in any pack",
-                            sha1_to_hex(sha1));
-       if (get_verbosely) {
-               fprintf(stderr, "Getting pack %s\n",
-                       sha1_to_hex(target->sha1));
-               fprintf(stderr, " which contains %s\n",
-                       sha1_to_hex(sha1));
-       }
-       sprintf(filename, "%s/objects/pack/pack-%s.pack",
-               path, sha1_to_hex(target->sha1));
-       copy_file(filename, sha1_pack_name(target->sha1),
-                 sha1_to_hex(target->sha1), 1);
-       sprintf(filename, "%s/objects/pack/pack-%s.idx",
-               path, sha1_to_hex(target->sha1));
-       copy_file(filename, sha1_pack_index_name(target->sha1),
-                 sha1_to_hex(target->sha1), 1);
-       install_packed_git(target);
-       return 0;
-}
-
-static int fetch_file(const unsigned char *sha1)
-{
-       static int object_name_start = -1;
-       static char filename[PATH_MAX];
-       char *hex = sha1_to_hex(sha1);
-       char *dest_filename = sha1_file_name(sha1);
-
-       if (object_name_start < 0) {
-               strcpy(filename, path); /* e.g. git.git */
-               strcat(filename, "/objects/");
-               object_name_start = strlen(filename);
-       }
-       filename[object_name_start+0] = hex[0];
-       filename[object_name_start+1] = hex[1];
-       filename[object_name_start+2] = '/';
-       strcpy(filename + object_name_start + 3, hex + 2);
-       return copy_file(filename, dest_filename, hex, 0);
-}
-
-int fetch(unsigned char *sha1)
-{
-       if (has_sha1_file(sha1))
-               return 0;
-       else
-               return fetch_file(sha1) && fetch_pack(sha1);
-}
-
-int fetch_ref(char *ref, unsigned char *sha1)
-{
-       static int ref_name_start = -1;
-       static char filename[PATH_MAX];
-       static char hex[41];
-       int ifd;
-
-       if (ref_name_start < 0) {
-               sprintf(filename, "%s/refs/", path);
-               ref_name_start = strlen(filename);
-       }
-       strcpy(filename + ref_name_start, ref);
-       ifd = open(filename, O_RDONLY);
-       if (ifd < 0) {
-               close(ifd);
-               return error("cannot open %s", filename);
-       }
-       if (read_in_full(ifd, hex, 40) != 40 || get_sha1_hex(hex, sha1)) {
-               close(ifd);
-               return error("cannot read from %s", filename);
-       }
-       close(ifd);
-       pull_say("ref %s\n", sha1_to_hex(sha1));
-       return 0;
-}
-
-static const char local_pull_usage[] =
-"git-local-fetch [-c] [-t] [-a] [-v] [-w filename] [--recover] [-l] [-s] [-n] [--stdin] commit-id path";
-
-/*
- * By default we only use file copy.
- * If -l is specified, a hard link is attempted.
- * If -s is specified, then a symlink is attempted.
- * If -n is _not_ specified, then a regular file-to-file copy is done.
- */
-int main(int argc, const char **argv)
-{
-       int commits;
-       const char **write_ref = NULL;
-       char **commit_id;
-       int arg = 1;
-
-       setup_git_directory();
-       git_config(git_default_config);
-
-       while (arg < argc && argv[arg][0] == '-') {
-               if (argv[arg][1] == 't')
-                       get_tree = 1;
-               else if (argv[arg][1] == 'c')
-                       get_history = 1;
-               else if (argv[arg][1] == 'a') {
-                       get_all = 1;
-                       get_tree = 1;
-                       get_history = 1;
-               }
-               else if (argv[arg][1] == 'l')
-                       use_link = 1;
-               else if (argv[arg][1] == 's')
-                       use_symlink = 1;
-               else if (argv[arg][1] == 'n')
-                       use_filecopy = 0;
-               else if (argv[arg][1] == 'v')
-                       get_verbosely = 1;
-               else if (argv[arg][1] == 'w')
-                       write_ref = &argv[++arg];
-               else if (!strcmp(argv[arg], "--recover"))
-                       get_recover = 1;
-               else if (!strcmp(argv[arg], "--stdin"))
-                       commits_on_stdin = 1;
-               else
-                       usage(local_pull_usage);
-               arg++;
-       }
-       if (argc < arg + 2 - commits_on_stdin)
-               usage(local_pull_usage);
-       if (commits_on_stdin) {
-               commits = pull_targets_stdin(&commit_id, &write_ref);
-       } else {
-               commit_id = (char **) &argv[arg++];
-               commits = 1;
-       }
-       path = argv[arg];
-
-       if (pull(commits, commit_id, write_ref, path))
-               return 1;
-
-       if (commits_on_stdin)
-               pull_targets_free(commits, commit_id, write_ref);
-
-       return 0;
-}
index e59b197e5ebb301107f9a18b7765e18097a1c8e3..979bdfff7c516ada7fb36281a22b41d303d1b99c 100644 (file)
@@ -179,3 +179,29 @@ void fixup_pack_header_footer(int pack_fd,
        SHA1_Final(pack_file_sha1, &c);
        write_or_die(pack_fd, pack_file_sha1, 20);
 }
+
+char *index_pack_lockfile(int ip_out)
+{
+       int len, s;
+       char packname[46];
+
+       /*
+        * The first thing we expects from index-pack's output
+        * is "pack\t%40s\n" or "keep\t%40s\n" (46 bytes) where
+        * %40s is the newly created pack SHA1 name.  In the "keep"
+        * case, we need it to remove the corresponding .keep file
+        * later on.  If we don't get that then tough luck with it.
+        */
+       for (len = 0;
+                len < 46 && (s = xread(ip_out, packname+len, 46-len)) > 0;
+                len += s);
+       if (len == 46 && packname[45] == '\n' &&
+               memcmp(packname, "keep\t", 5) == 0) {
+               char path[PATH_MAX];
+               packname[45] = 0;
+               snprintf(path, sizeof(path), "%s/pack/pack-%s.keep",
+                        get_object_directory(), packname + 5);
+               return xstrdup(path);
+       }
+       return NULL;
+}
diff --git a/pack.h b/pack.h
index f357c9f4282d5bc8bbcff6f3a44b9812415745a6..b57ba2d9ed6120612c2576b07d8c185d4b54bb76 100644 (file)
--- a/pack.h
+++ b/pack.h
@@ -59,6 +59,7 @@ extern const char *write_idx_file(const char *index_name, struct pack_idx_entry
 
 extern int verify_pack(struct packed_git *, int);
 extern void fixup_pack_header_footer(int, unsigned char *, const char *, uint32_t);
+extern char *index_pack_lockfile(int fd);
 
 #define PH_ERROR_EOF           (-1)
 #define PH_ERROR_PACK_SIGNATURE        (-2)
index 1521d0b2de77eaccf3db1ebf357fd7560b2de1b0..38e35c06b9e73376adde597c4fe28490a2e886b1 100644 (file)
@@ -382,9 +382,8 @@ static const char *unpack(void)
                }
        } else {
                const char *keeper[6];
-               int s, len, status;
+               int s, status;
                char keep_arg[256];
-               char packname[46];
                struct child_process ip;
 
                s = sprintf(keep_arg, "--keep=receive-pack %i on ", getpid());
@@ -403,26 +402,7 @@ static const char *unpack(void)
                ip.git_cmd = 1;
                if (start_command(&ip))
                        return "index-pack fork failed";
-
-               /*
-                * The first thing we expects from index-pack's output
-                * is "pack\t%40s\n" or "keep\t%40s\n" (46 bytes) where
-                * %40s is the newly created pack SHA1 name.  In the "keep"
-                * case, we need it to remove the corresponding .keep file
-                * later on.  If we don't get that then tough luck with it.
-                */
-               for (len = 0;
-                    len < 46 && (s = xread(ip.out, packname+len, 46-len)) > 0;
-                    len += s);
-               if (len == 46 && packname[45] == '\n' &&
-                   memcmp(packname, "keep\t", 5) == 0) {
-                       char path[PATH_MAX];
-                       packname[45] = 0;
-                       snprintf(path, sizeof(path), "%s/pack/pack-%s.keep",
-                                get_object_directory(), packname + 5);
-                       pack_lockfile = xstrdup(path);
-               }
-
+               pack_lockfile = index_pack_lockfile(ip.out);
                status = finish_command(&ip);
                if (!status) {
                        reprepare_packed_git();
diff --git a/refs.c b/refs.c
index 07e260c8a15c9118ee829964aed65b209900dc36..aff02cd09d4e40b04f6764a6f6ab43240b736a08 100644 (file)
--- a/refs.c
+++ b/refs.c
@@ -2,6 +2,7 @@
 #include "refs.h"
 #include "object.h"
 #include "tag.h"
+#include "dir.h"
 
 /* ISSYMREF=01 and ISPACKED=02 are public interfaces */
 #define REF_KNOWS_PEELED 04
@@ -671,57 +672,23 @@ static struct ref_lock *verify_lock(struct ref_lock *lock,
        return lock;
 }
 
-static int remove_empty_dir_recursive(char *path, int len)
-{
-       DIR *dir = opendir(path);
-       struct dirent *e;
-       int ret = 0;
-
-       if (!dir)
-               return -1;
-       if (path[len-1] != '/')
-               path[len++] = '/';
-       while ((e = readdir(dir)) != NULL) {
-               struct stat st;
-               int namlen;
-               if ((e->d_name[0] == '.') &&
-                   ((e->d_name[1] == 0) ||
-                    ((e->d_name[1] == '.') && e->d_name[2] == 0)))
-                       continue; /* "." and ".." */
-
-               namlen = strlen(e->d_name);
-               if ((len + namlen < PATH_MAX) &&
-                   strcpy(path + len, e->d_name) &&
-                   !lstat(path, &st) &&
-                   S_ISDIR(st.st_mode) &&
-                   !remove_empty_dir_recursive(path, len + namlen))
-                       continue; /* happy */
-
-               /* path too long, stat fails, or non-directory still exists */
-               ret = -1;
-               break;
-       }
-       closedir(dir);
-       if (!ret) {
-               path[len] = 0;
-               ret = rmdir(path);
-       }
-       return ret;
-}
-
-static int remove_empty_directories(char *file)
+static int remove_empty_directories(const char *file)
 {
        /* we want to create a file but there is a directory there;
         * if that is an empty directory (or a directory that contains
         * only empty directories), remove them.
         */
-       char path[PATH_MAX];
-       int len = strlen(file);
+       struct strbuf path;
+       int result;
 
-       if (len >= PATH_MAX) /* path too long ;-) */
-               return -1;
-       strcpy(path, file);
-       return remove_empty_dir_recursive(path, len);
+       strbuf_init(&path, 20);
+       strbuf_addstr(&path, file);
+
+       result = remove_dir_recursively(&path, 1);
+
+       strbuf_release(&path);
+
+       return result;
 }
 
 static int is_refname_available(const char *ref, const char *oldref,
index cdbbdcb00dee400f4fe654a86c1dd0060a613904..170015aabfc18d028b22a5200cbddc5a5ec3f042 100644 (file)
--- a/remote.c
+++ b/remote.c
@@ -5,6 +5,12 @@
 static struct remote **remotes;
 static int allocated_remotes;
 
+static struct branch **branches;
+static int allocated_branches;
+
+static struct branch *current_branch;
+static const char *default_remote_name;
+
 #define BUF_SIZE (2048)
 static char buffer[BUF_SIZE];
 
@@ -26,13 +32,13 @@ static void add_fetch_refspec(struct remote *remote, const char *ref)
        remote->fetch_refspec_nr = nr;
 }
 
-static void add_uri(struct remote *remote, const char *uri)
+static void add_url(struct remote *remote, const char *url)
 {
-       int nr = remote->uri_nr + 1;
-       remote->uri =
-               xrealloc(remote->uri, nr * sizeof(char *));
-       remote->uri[nr-1] = uri;
-       remote->uri_nr = nr;
+       int nr = remote->url_nr + 1;
+       remote->url =
+               xrealloc(remote->url, nr * sizeof(char *));
+       remote->url[nr-1] = url;
+       remote->url_nr = nr;
 }
 
 static struct remote *make_remote(const char *name, int len)
@@ -67,6 +73,54 @@ static struct remote *make_remote(const char *name, int len)
        return remotes[empty];
 }
 
+static void add_merge(struct branch *branch, const char *name)
+{
+       int nr = branch->merge_nr + 1;
+       branch->merge_name =
+               xrealloc(branch->merge_name, nr * sizeof(char *));
+       branch->merge_name[nr-1] = name;
+       branch->merge_nr = nr;
+}
+
+static struct branch *make_branch(const char *name, int len)
+{
+       int i, empty = -1;
+       char *refname;
+
+       for (i = 0; i < allocated_branches; i++) {
+               if (!branches[i]) {
+                       if (empty < 0)
+                               empty = i;
+               } else {
+                       if (len ? (!strncmp(name, branches[i]->name, len) &&
+                                  !branches[i]->name[len]) :
+                           !strcmp(name, branches[i]->name))
+                               return branches[i];
+               }
+       }
+
+       if (empty < 0) {
+               empty = allocated_branches;
+               allocated_branches += allocated_branches ? allocated_branches : 1;
+               branches = xrealloc(branches,
+                                  sizeof(*branches) * allocated_branches);
+               memset(branches + empty, 0,
+                      (allocated_branches - empty) * sizeof(*branches));
+       }
+       branches[empty] = xcalloc(1, sizeof(struct branch));
+       if (len)
+               branches[empty]->name = xstrndup(name, len);
+       else
+               branches[empty]->name = xstrdup(name);
+       refname = malloc(strlen(name) + strlen("refs/heads/") + 1);
+       strcpy(refname, "refs/heads/");
+       strcpy(refname + strlen("refs/heads/"),
+              branches[empty]->name);
+       branches[empty]->refname = refname;
+
+       return branches[empty];
+}
+
 static void read_remotes_file(struct remote *remote)
 {
        FILE *f = fopen(git_path("remotes/%s", remote->name), "r");
@@ -100,7 +154,7 @@ static void read_remotes_file(struct remote *remote)
 
                switch (value_list) {
                case 0:
-                       add_uri(remote, xstrdup(s));
+                       add_url(remote, xstrdup(s));
                        break;
                case 1:
                        add_push_refspec(remote, xstrdup(s));
@@ -116,6 +170,8 @@ static void read_remotes_file(struct remote *remote)
 static void read_branches_file(struct remote *remote)
 {
        const char *slash = strchr(remote->name, '/');
+       char *frag;
+       char *branch;
        int n = slash ? slash - remote->name : 1000;
        FILE *f = fopen(git_path("branches/%.*s", n, remote->name), "r");
        char *s, *p;
@@ -141,23 +197,41 @@ static void read_branches_file(struct remote *remote)
        strcpy(p, s);
        if (slash)
                strcat(p, slash);
-       add_uri(remote, p);
+       frag = strchr(p, '#');
+       if (frag) {
+               *(frag++) = '\0';
+               branch = xmalloc(strlen(frag) + 12);
+               strcpy(branch, "refs/heads/");
+               strcat(branch, frag);
+       } else {
+               branch = "refs/heads/master";
+       }
+       add_url(remote, p);
+       add_fetch_refspec(remote, branch);
+       remote->fetch_tags = 1; /* always auto-follow */
 }
 
-static char *default_remote_name = NULL;
-static const char *current_branch = NULL;
-static int current_branch_len = 0;
-
 static int handle_config(const char *key, const char *value)
 {
        const char *name;
        const char *subkey;
        struct remote *remote;
-       if (!prefixcmp(key, "branch.") && current_branch &&
-           !strncmp(key + 7, current_branch, current_branch_len) &&
-           !strcmp(key + 7 + current_branch_len, ".remote")) {
-               free(default_remote_name);
-               default_remote_name = xstrdup(value);
+       struct branch *branch;
+       if (!prefixcmp(key, "branch.")) {
+               name = key + 7;
+               subkey = strrchr(name, '.');
+               branch = make_branch(name, subkey - name);
+               if (!subkey)
+                       return 0;
+               if (!value)
+                       return 0;
+               if (!strcmp(subkey, ".remote")) {
+                       branch->remote_name = xstrdup(value);
+                       if (branch == current_branch)
+                               default_remote_name = branch->remote_name;
+               } else if (!strcmp(subkey, ".merge"))
+                       add_merge(branch, xstrdup(value));
+               return 0;
        }
        if (prefixcmp(key,  "remote."))
                return 0;
@@ -186,7 +260,7 @@ static int handle_config(const char *key, const char *value)
                return 0; /* ignore unknown booleans */
        }
        if (!strcmp(subkey, ".url")) {
-               add_uri(remote, xstrdup(value));
+               add_url(remote, xstrdup(value));
        } else if (!strcmp(subkey, ".push")) {
                add_push_refspec(remote, xstrdup(value));
        } else if (!strcmp(subkey, ".fetch")) {
@@ -196,6 +270,14 @@ static int handle_config(const char *key, const char *value)
                        remote->receivepack = xstrdup(value);
                else
                        error("more than one receivepack given, using the first");
+       } else if (!strcmp(subkey, ".uploadpack")) {
+               if (!remote->uploadpack)
+                       remote->uploadpack = xstrdup(value);
+               else
+                       error("more than one uploadpack given, using the first");
+       } else if (!strcmp(subkey, ".tagopt")) {
+               if (!strcmp(value, "--no-tags"))
+                       remote->fetch_tags = -1;
        }
        return 0;
 }
@@ -212,13 +294,13 @@ static void read_config(void)
        head_ref = resolve_ref("HEAD", sha1, 0, &flag);
        if (head_ref && (flag & REF_ISSYMREF) &&
            !prefixcmp(head_ref, "refs/heads/")) {
-               current_branch = head_ref + strlen("refs/heads/");
-               current_branch_len = strlen(current_branch);
+               current_branch =
+                       make_branch(head_ref + strlen("refs/heads/"), 0);
        }
        git_config(handle_config);
 }
 
-static struct refspec *parse_ref_spec(int nr_refspec, const char **refspec)
+struct refspec *parse_ref_spec(int nr_refspec, const char **refspec)
 {
        int i;
        struct refspec *rs = xcalloc(sizeof(*rs), nr_refspec);
@@ -265,14 +347,14 @@ struct remote *remote_get(const char *name)
                name = default_remote_name;
        ret = make_remote(name, 0);
        if (name[0] != '/') {
-               if (!ret->uri)
+               if (!ret->url)
                        read_remotes_file(ret);
-               if (!ret->uri)
+               if (!ret->url)
                        read_branches_file(ret);
        }
-       if (!ret->uri)
-               add_uri(ret, name);
-       if (!ret->uri)
+       if (!ret->url)
+               add_url(ret, name);
+       if (!ret->url)
                return NULL;
        ret->fetch = parse_ref_spec(ret->fetch_refspec_nr, ret->fetch_refspec);
        ret->push = parse_ref_spec(ret->push_refspec_nr, ret->push_refspec);
@@ -298,16 +380,62 @@ int for_each_remote(each_remote_fn fn, void *priv)
        return result;
 }
 
-int remote_has_uri(struct remote *remote, const char *uri)
+void ref_remove_duplicates(struct ref *ref_map)
+{
+       struct ref **posn;
+       struct ref *next;
+       for (; ref_map; ref_map = ref_map->next) {
+               if (!ref_map->peer_ref)
+                       continue;
+               posn = &ref_map->next;
+               while (*posn) {
+                       if ((*posn)->peer_ref &&
+                           !strcmp((*posn)->peer_ref->name,
+                                   ref_map->peer_ref->name)) {
+                               if (strcmp((*posn)->name, ref_map->name))
+                                       die("%s tracks both %s and %s",
+                                           ref_map->peer_ref->name,
+                                           (*posn)->name, ref_map->name);
+                               next = (*posn)->next;
+                               free((*posn)->peer_ref);
+                               free(*posn);
+                               *posn = next;
+                       } else {
+                               posn = &(*posn)->next;
+                       }
+               }
+       }
+}
+
+int remote_has_url(struct remote *remote, const char *url)
 {
        int i;
-       for (i = 0; i < remote->uri_nr; i++) {
-               if (!strcmp(remote->uri[i], uri))
+       for (i = 0; i < remote->url_nr; i++) {
+               if (!strcmp(remote->url[i], url))
                        return 1;
        }
        return 0;
 }
 
+/*
+ * Returns true if, under the matching rules for fetching, name is the
+ * same as the given full name.
+ */
+static int ref_matches_abbrev(const char *name, const char *full)
+{
+       if (!prefixcmp(name, "refs/") || !strcmp(name, "HEAD"))
+               return !strcmp(name, full);
+       if (prefixcmp(full, "refs/"))
+               return 0;
+       if (!prefixcmp(name, "heads/") ||
+           !prefixcmp(name, "tags/") ||
+           !prefixcmp(name, "remotes/"))
+               return !strcmp(name, full + 5);
+       if (prefixcmp(full + 5, "heads/"))
+               return 0;
+       return !strcmp(full + 11, name);
+}
+
 int remote_find_tracking(struct remote *remote, struct refspec *refspec)
 {
        int find_src = refspec->src == NULL;
@@ -315,7 +443,7 @@ int remote_find_tracking(struct remote *remote, struct refspec *refspec)
        int i;
 
        if (find_src) {
-               if (refspec->dst == NULL)
+               if (!refspec->dst)
                        return error("find_tracking: need either src or dst");
                needle = refspec->dst;
                result = &refspec->src;
@@ -357,6 +485,14 @@ struct ref *alloc_ref(unsigned namelen)
        return ret;
 }
 
+static struct ref *copy_ref(struct ref *ref)
+{
+       struct ref *ret = xmalloc(sizeof(struct ref) + strlen(ref->name) + 1);
+       memcpy(ret, ref, sizeof(struct ref) + strlen(ref->name) + 1);
+       ret->next = NULL;
+       return ret;
+}
+
 void free_refs(struct ref *ref)
 {
        struct ref *next;
@@ -489,23 +625,23 @@ static int match_explicit(struct ref *src, struct ref *dst,
                 * way to delete 'other' ref at the remote end.
                 */
                matched_src = try_explicit_object_name(rs->src);
-               if (matched_src)
-                       break;
-               error("src refspec %s does not match any.",
-                     rs->src);
+               if (!matched_src)
+                       error("src refspec %s does not match any.", rs->src);
                break;
        default:
                matched_src = NULL;
-               error("src refspec %s matches more than one.",
-                     rs->src);
+               error("src refspec %s matches more than one.", rs->src);
                break;
        }
 
        if (!matched_src)
                errs = 1;
 
-       if (dst_value == NULL)
+       if (!dst_value) {
+               if (!matched_src)
+                       return errs;
                dst_value = matched_src->name;
+       }
 
        switch (count_refspec_match(dst_value, dst, &matched_dst)) {
        case 1:
@@ -524,7 +660,7 @@ static int match_explicit(struct ref *src, struct ref *dst,
                      dst_value);
                break;
        }
-       if (errs || matched_dst == NULL)
+       if (errs || !matched_dst)
                return 1;
        if (matched_dst->peer_ref) {
                errs = 1;
@@ -633,3 +769,150 @@ int match_refs(struct ref *src, struct ref *dst, struct ref ***dst_tail,
        }
        return 0;
 }
+
+struct branch *branch_get(const char *name)
+{
+       struct branch *ret;
+
+       read_config();
+       if (!name || !*name || !strcmp(name, "HEAD"))
+               ret = current_branch;
+       else
+               ret = make_branch(name, 0);
+       if (ret && ret->remote_name) {
+               ret->remote = remote_get(ret->remote_name);
+               if (ret->merge_nr) {
+                       int i;
+                       ret->merge = xcalloc(sizeof(*ret->merge),
+                                            ret->merge_nr);
+                       for (i = 0; i < ret->merge_nr; i++) {
+                               ret->merge[i] = xcalloc(1, sizeof(**ret->merge));
+                               ret->merge[i]->src = xstrdup(ret->merge_name[i]);
+                               remote_find_tracking(ret->remote,
+                                                    ret->merge[i]);
+                       }
+               }
+       }
+       return ret;
+}
+
+int branch_has_merge_config(struct branch *branch)
+{
+       return branch && !!branch->merge;
+}
+
+int branch_merge_matches(struct branch *branch,
+                                int i,
+                                const char *refname)
+{
+       if (!branch || i < 0 || i >= branch->merge_nr)
+               return 0;
+       return ref_matches_abbrev(branch->merge[i]->src, refname);
+}
+
+static struct ref *get_expanded_map(struct ref *remote_refs,
+                                   const struct refspec *refspec)
+{
+       struct ref *ref;
+       struct ref *ret = NULL;
+       struct ref **tail = &ret;
+
+       int remote_prefix_len = strlen(refspec->src);
+       int local_prefix_len = strlen(refspec->dst);
+
+       for (ref = remote_refs; ref; ref = ref->next) {
+               if (strchr(ref->name, '^'))
+                       continue; /* a dereference item */
+               if (!prefixcmp(ref->name, refspec->src)) {
+                       char *match;
+                       struct ref *cpy = copy_ref(ref);
+                       match = ref->name + remote_prefix_len;
+
+                       cpy->peer_ref = alloc_ref(local_prefix_len +
+                                                 strlen(match) + 1);
+                       sprintf(cpy->peer_ref->name, "%s%s",
+                               refspec->dst, match);
+                       if (refspec->force)
+                               cpy->peer_ref->force = 1;
+                       *tail = cpy;
+                       tail = &cpy->next;
+               }
+       }
+
+       return ret;
+}
+
+static struct ref *find_ref_by_name_abbrev(struct ref *refs, const char *name)
+{
+       struct ref *ref;
+       for (ref = refs; ref; ref = ref->next) {
+               if (ref_matches_abbrev(name, ref->name))
+                       return ref;
+       }
+       return NULL;
+}
+
+struct ref *get_remote_ref(struct ref *remote_refs, const char *name)
+{
+       struct ref *ref = find_ref_by_name_abbrev(remote_refs, name);
+
+       if (!ref)
+               die("Couldn't find remote ref %s\n", name);
+
+       return copy_ref(ref);
+}
+
+static struct ref *get_local_ref(const char *name)
+{
+       struct ref *ret;
+       if (!name)
+               return NULL;
+
+       if (!prefixcmp(name, "refs/")) {
+               ret = alloc_ref(strlen(name) + 1);
+               strcpy(ret->name, name);
+               return ret;
+       }
+
+       if (!prefixcmp(name, "heads/") ||
+           !prefixcmp(name, "tags/") ||
+           !prefixcmp(name, "remotes/")) {
+               ret = alloc_ref(strlen(name) + 6);
+               sprintf(ret->name, "refs/%s", name);
+               return ret;
+       }
+
+       ret = alloc_ref(strlen(name) + 12);
+       sprintf(ret->name, "refs/heads/%s", name);
+       return ret;
+}
+
+int get_fetch_map(struct ref *remote_refs,
+                 const struct refspec *refspec,
+                 struct ref ***tail)
+{
+       struct ref *ref_map, *rm;
+
+       if (refspec->pattern) {
+               ref_map = get_expanded_map(remote_refs, refspec);
+       } else {
+               ref_map = get_remote_ref(remote_refs,
+                                        refspec->src[0] ?
+                                        refspec->src : "HEAD");
+
+               ref_map->peer_ref = get_local_ref(refspec->dst);
+               if (ref_map->peer_ref && refspec->force)
+                       ref_map->peer_ref->force = 1;
+       }
+
+       for (rm = ref_map; rm; rm = rm->next) {
+               if (rm->peer_ref && check_ref_format(rm->peer_ref->name + 5))
+                       die("* refusing to create funny ref '%s' locally",
+                           rm->peer_ref->name);
+       }
+
+       if (ref_map)
+               tail_link_ref(ref_map, tail);
+
+       return 0;
+}
index 17b8b5b5d5469419842be3d41d528ba88c987a3e..c62636d78ef996b97aef73cf37cfb5de3f279145 100644 (file)
--- a/remote.h
+++ b/remote.h
@@ -4,8 +4,8 @@
 struct remote {
        const char *name;
 
-       const char **uri;
-       int uri_nr;
+       const char **url;
+       int url_nr;
 
        const char **push_refspec;
        struct refspec *push;
@@ -15,7 +15,16 @@ struct remote {
        struct refspec *fetch;
        int fetch_refspec_nr;
 
+       /*
+        * -1 to never fetch tags
+        * 0 to auto-follow tags on heuristic (default)
+        * 1 to always auto-follow tags
+        * 2 to always fetch tags
+        */
+       int fetch_tags;
+
        const char *receivepack;
+       const char *uploadpack;
 };
 
 struct remote *remote_get(const char *name);
@@ -23,7 +32,7 @@ struct remote *remote_get(const char *name);
 typedef int each_remote_fn(struct remote *remote, void *priv);
 int for_each_remote(each_remote_fn fn, void *priv);
 
-int remote_has_uri(struct remote *remote, const char *uri);
+int remote_has_url(struct remote *remote, const char *url);
 
 struct refspec {
        unsigned force : 1;
@@ -40,12 +49,50 @@ struct ref *alloc_ref(unsigned namelen);
  */
 void free_refs(struct ref *ref);
 
+/*
+ * Removes and frees any duplicate refs in the map.
+ */
+void ref_remove_duplicates(struct ref *ref_map);
+
+struct refspec *parse_ref_spec(int nr_refspec, const char **refspec);
+
 int match_refs(struct ref *src, struct ref *dst, struct ref ***dst_tail,
               int nr_refspec, char **refspec, int all);
 
+/*
+ * Given a list of the remote refs and the specification of things to
+ * fetch, makes a (separate) list of the refs to fetch and the local
+ * refs to store into.
+ *
+ * *tail is the pointer to the tail pointer of the list of results
+ * beforehand, and will be set to the tail pointer of the list of
+ * results afterward.
+ */
+int get_fetch_map(struct ref *remote_refs, const struct refspec *refspec,
+                 struct ref ***tail);
+
+struct ref *get_remote_ref(struct ref *remote_refs, const char *name);
+
 /*
  * For the given remote, reads the refspec's src and sets the other fields.
  */
 int remote_find_tracking(struct remote *remote, struct refspec *refspec);
 
+struct branch {
+       const char *name;
+       const char *refname;
+
+       const char *remote_name;
+       struct remote *remote;
+
+       const char **merge_name;
+       struct refspec **merge;
+       int merge_nr;
+};
+
+struct branch *branch_get(const char *name);
+
+int branch_has_merge_config(struct branch *branch);
+int branch_merge_matches(struct branch *, int n, const char *);
+
 #endif
diff --git a/rsh.c b/rsh.c
deleted file mode 100644 (file)
index 016d72e..0000000
--- a/rsh.c
+++ /dev/null
@@ -1,79 +0,0 @@
-#include "cache.h"
-#include "rsh.h"
-#include "quote.h"
-
-#define COMMAND_SIZE 4096
-
-int setup_connection(int *fd_in, int *fd_out, const char *remote_prog,
-                    char *url, int rmt_argc, char **rmt_argv)
-{
-       char *host;
-       char *path;
-       int sv[2];
-       int i;
-       pid_t pid;
-       struct strbuf cmd;
-
-       if (!strcmp(url, "-")) {
-               *fd_in = 0;
-               *fd_out = 1;
-               return 0;
-       }
-
-       host = strstr(url, "//");
-       if (host) {
-               host += 2;
-               path = strchr(host, '/');
-       } else {
-               host = url;
-               path = strchr(host, ':');
-               if (path)
-                       *(path++) = '\0';
-       }
-       if (!path) {
-               return error("Bad URL: %s", url);
-       }
-
-       /* $GIT_RSH <host> "env GIT_DIR=<path> <remote_prog> <args...>" */
-       strbuf_init(&cmd, COMMAND_SIZE);
-       strbuf_addstr(&cmd, "env ");
-       strbuf_addstr(&cmd, GIT_DIR_ENVIRONMENT "=");
-       sq_quote_buf(&cmd, path);
-       strbuf_addch(&cmd, ' ');
-       sq_quote_buf(&cmd, remote_prog);
-
-       for (i = 0 ; i < rmt_argc ; i++) {
-               strbuf_addch(&cmd, ' ');
-               sq_quote_buf(&cmd, rmt_argv[i]);
-       }
-
-       strbuf_addstr(&cmd, " -");
-
-       if (cmd.len >= COMMAND_SIZE)
-               return error("Command line too long");
-
-       if (socketpair(AF_UNIX, SOCK_STREAM, 0, sv))
-               return error("Couldn't create socket");
-
-       pid = fork();
-       if (pid < 0)
-               return error("Couldn't fork");
-       if (!pid) {
-               const char *ssh, *ssh_basename;
-               ssh = getenv("GIT_SSH");
-               if (!ssh) ssh = "ssh";
-               ssh_basename = strrchr(ssh, '/');
-               if (!ssh_basename)
-                       ssh_basename = ssh;
-               else
-                       ssh_basename++;
-               close(sv[1]);
-               dup2(sv[0], 0);
-               dup2(sv[0], 1);
-               execlp(ssh, ssh_basename, host, cmd.buf, NULL);
-       }
-       close(sv[0]);
-       *fd_in = sv[1];
-       *fd_out = sv[1];
-       return 0;
-}
diff --git a/rsh.h b/rsh.h
deleted file mode 100644 (file)
index ee2f499..0000000
--- a/rsh.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef RSH_H
-#define RSH_H
-
-int setup_connection(int *fd_in, int *fd_out, const char *remote_prog,
-                    char *url, int rmt_argc, char **rmt_argv);
-
-#endif
index c1807f07946ca204bc1e8307eed04150e62c551d..e9b9a39f411b6cfff1c0a4bc3f7e31274c8d2782 100644 (file)
@@ -428,7 +428,7 @@ int main(int argc, char **argv)
 
        if (remote_name) {
                remote = remote_get(remote_name);
-               if (!remote_has_uri(remote, dest)) {
+               if (!remote_has_url(remote, dest)) {
                        die("Destination %s is not a uri for %s",
                            dest, remote_name);
                }
diff --git a/ssh-fetch.c b/ssh-fetch.c
deleted file mode 100644 (file)
index bdf51a7..0000000
+++ /dev/null
@@ -1,166 +0,0 @@
-#ifndef COUNTERPART_ENV_NAME
-#define COUNTERPART_ENV_NAME "GIT_SSH_UPLOAD"
-#endif
-#ifndef COUNTERPART_PROGRAM_NAME
-#define COUNTERPART_PROGRAM_NAME "git-ssh-upload"
-#endif
-#ifndef MY_PROGRAM_NAME
-#define MY_PROGRAM_NAME "git-ssh-fetch"
-#endif
-
-#include "cache.h"
-#include "commit.h"
-#include "rsh.h"
-#include "fetch.h"
-#include "refs.h"
-
-static int fd_in;
-static int fd_out;
-
-static unsigned char remote_version;
-static unsigned char local_version = 1;
-
-static int prefetches;
-
-static struct object_list *in_transit;
-static struct object_list **end_of_transit = &in_transit;
-
-void prefetch(unsigned char *sha1)
-{
-       char type = 'o';
-       struct object_list *node;
-       if (prefetches > 100) {
-               fetch(in_transit->item->sha1);
-       }
-       node = xmalloc(sizeof(struct object_list));
-       node->next = NULL;
-       node->item = lookup_unknown_object(sha1);
-       *end_of_transit = node;
-       end_of_transit = &node->next;
-       /* XXX: what if these writes fail? */
-       write_in_full(fd_out, &type, 1);
-       write_in_full(fd_out, sha1, 20);
-       prefetches++;
-}
-
-static char conn_buf[4096];
-static size_t conn_buf_posn;
-
-int fetch(unsigned char *sha1)
-{
-       int ret;
-       signed char remote;
-       struct object_list *temp;
-
-       if (hashcmp(sha1, in_transit->item->sha1)) {
-               /* we must have already fetched it to clean the queue */
-               return has_sha1_file(sha1) ? 0 : -1;
-       }
-       prefetches--;
-       temp = in_transit;
-       in_transit = in_transit->next;
-       if (!in_transit)
-               end_of_transit = &in_transit;
-       free(temp);
-
-       if (conn_buf_posn) {
-               remote = conn_buf[0];
-               memmove(conn_buf, conn_buf + 1, --conn_buf_posn);
-       } else {
-               if (xread(fd_in, &remote, 1) < 1)
-                       return -1;
-       }
-       /* fprintf(stderr, "Got %d\n", remote); */
-       if (remote < 0)
-               return remote;
-       ret = write_sha1_from_fd(sha1, fd_in, conn_buf, 4096, &conn_buf_posn);
-       if (!ret)
-               pull_say("got %s\n", sha1_to_hex(sha1));
-       return ret;
-}
-
-static int get_version(void)
-{
-       char type = 'v';
-       if (write_in_full(fd_out, &type, 1) != 1 ||
-           write_in_full(fd_out, &local_version, 1)) {
-               return error("Couldn't request version from remote end");
-       }
-       if (xread(fd_in, &remote_version, 1) < 1) {
-               return error("Couldn't read version from remote end");
-       }
-       return 0;
-}
-
-int fetch_ref(char *ref, unsigned char *sha1)
-{
-       signed char remote;
-       char type = 'r';
-       int length = strlen(ref) + 1;
-       if (write_in_full(fd_out, &type, 1) != 1 ||
-           write_in_full(fd_out, ref, length) != length)
-               return -1;
-
-       if (read_in_full(fd_in, &remote, 1) != 1)
-               return -1;
-       if (remote < 0)
-               return remote;
-       if (read_in_full(fd_in, sha1, 20) != 20)
-               return -1;
-       return 0;
-}
-
-static const char ssh_fetch_usage[] =
-  MY_PROGRAM_NAME
-  " [-c] [-t] [-a] [-v] [--recover] [-w ref] commit-id url";
-int main(int argc, char **argv)
-{
-       const char *write_ref = NULL;
-       char *commit_id;
-       char *url;
-       int arg = 1;
-       const char *prog;
-
-       prog = getenv("GIT_SSH_PUSH");
-       if (!prog) prog = "git-ssh-upload";
-
-       setup_git_directory();
-       git_config(git_default_config);
-
-       while (arg < argc && argv[arg][0] == '-') {
-               if (argv[arg][1] == 't') {
-                       get_tree = 1;
-               } else if (argv[arg][1] == 'c') {
-                       get_history = 1;
-               } else if (argv[arg][1] == 'a') {
-                       get_all = 1;
-                       get_tree = 1;
-                       get_history = 1;
-               } else if (argv[arg][1] == 'v') {
-                       get_verbosely = 1;
-               } else if (argv[arg][1] == 'w') {
-                       write_ref = argv[arg + 1];
-                       arg++;
-               } else if (!strcmp(argv[arg], "--recover")) {
-                       get_recover = 1;
-               }
-               arg++;
-       }
-       if (argc < arg + 2) {
-               usage(ssh_fetch_usage);
-               return 1;
-       }
-       commit_id = argv[arg];
-       url = argv[arg + 1];
-
-       if (setup_connection(&fd_in, &fd_out, prog, url, arg, argv + 1))
-               return 1;
-
-       if (get_version())
-               return 1;
-
-       if (pull(1, &commit_id, &write_ref, url))
-               return 1;
-
-       return 0;
-}
diff --git a/ssh-pull.c b/ssh-pull.c
deleted file mode 100644 (file)
index 868ce4d..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
-#define COUNTERPART_ENV_NAME "GIT_SSH_PUSH"
-#define COUNTERPART_PROGRAM_NAME "git-ssh-push"
-#define MY_PROGRAM_NAME "git-ssh-pull"
-#include "ssh-fetch.c"
diff --git a/ssh-push.c b/ssh-push.c
deleted file mode 100644 (file)
index a562df1..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
-#define COUNTERPART_ENV_NAME "GIT_SSH_PULL"
-#define COUNTERPART_PROGRAM_NAME "git-ssh-pull"
-#define MY_PROGRAM_NAME "git-ssh-push"
-#include "ssh-upload.c"
diff --git a/ssh-upload.c b/ssh-upload.c
deleted file mode 100644 (file)
index 20c35f0..0000000
+++ /dev/null
@@ -1,143 +0,0 @@
-#ifndef COUNTERPART_ENV_NAME
-#define COUNTERPART_ENV_NAME "GIT_SSH_FETCH"
-#endif
-#ifndef COUNTERPART_PROGRAM_NAME
-#define COUNTERPART_PROGRAM_NAME "git-ssh-fetch"
-#endif
-#ifndef MY_PROGRAM_NAME
-#define MY_PROGRAM_NAME "git-ssh-upload"
-#endif
-
-#include "cache.h"
-#include "rsh.h"
-#include "refs.h"
-
-static unsigned char local_version = 1;
-static unsigned char remote_version;
-
-static int verbose;
-
-static int serve_object(int fd_in, int fd_out) {
-       ssize_t size;
-       unsigned char sha1[20];
-       signed char remote;
-
-       size = read_in_full(fd_in, sha1, 20);
-       if (size < 0) {
-               perror("git-ssh-upload: read ");
-               return -1;
-       }
-       if (!size)
-               return -1;
-
-       if (verbose)
-               fprintf(stderr, "Serving %s\n", sha1_to_hex(sha1));
-
-       remote = 0;
-
-       if (!has_sha1_file(sha1)) {
-               fprintf(stderr, "git-ssh-upload: could not find %s\n",
-                       sha1_to_hex(sha1));
-               remote = -1;
-       }
-
-       if (write_in_full(fd_out, &remote, 1) != 1)
-               return 0;
-
-       if (remote < 0)
-               return 0;
-
-       return write_sha1_to_fd(fd_out, sha1);
-}
-
-static int serve_version(int fd_in, int fd_out)
-{
-       if (xread(fd_in, &remote_version, 1) < 1)
-               return -1;
-       write_in_full(fd_out, &local_version, 1);
-       return 0;
-}
-
-static int serve_ref(int fd_in, int fd_out)
-{
-       char ref[PATH_MAX];
-       unsigned char sha1[20];
-       int posn = 0;
-       signed char remote = 0;
-       do {
-               if (posn >= PATH_MAX || xread(fd_in, ref + posn, 1) < 1)
-                       return -1;
-               posn++;
-       } while (ref[posn - 1]);
-
-       if (verbose)
-               fprintf(stderr, "Serving %s\n", ref);
-
-       if (get_ref_sha1(ref, sha1))
-               remote = -1;
-       if (write_in_full(fd_out, &remote, 1) != 1)
-               return 0;
-       if (remote)
-               return 0;
-       write_in_full(fd_out, sha1, 20);
-        return 0;
-}
-
-
-static void service(int fd_in, int fd_out) {
-       char type;
-       ssize_t retval;
-       do {
-               retval = xread(fd_in, &type, 1);
-               if (retval < 1) {
-                       if (retval < 0)
-                               perror("git-ssh-upload: read ");
-                       return;
-               }
-               if (type == 'v' && serve_version(fd_in, fd_out))
-                       return;
-               if (type == 'o' && serve_object(fd_in, fd_out))
-                       return;
-               if (type == 'r' && serve_ref(fd_in, fd_out))
-                       return;
-       } while (1);
-}
-
-static const char ssh_push_usage[] =
-       MY_PROGRAM_NAME " [-c] [-t] [-a] [-w ref] commit-id url";
-
-int main(int argc, char **argv)
-{
-       int arg = 1;
-        char *commit_id;
-        char *url;
-       int fd_in, fd_out;
-       const char *prog;
-       unsigned char sha1[20];
-       char hex[41];
-
-       prog = getenv(COUNTERPART_ENV_NAME);
-       if (!prog) prog = COUNTERPART_PROGRAM_NAME;
-
-       setup_git_directory();
-
-       while (arg < argc && argv[arg][0] == '-') {
-               if (argv[arg][1] == 'w')
-                       arg++;
-                arg++;
-        }
-       if (argc < arg + 2)
-               usage(ssh_push_usage);
-       commit_id = argv[arg];
-       url = argv[arg + 1];
-       if (get_sha1(commit_id, sha1))
-               die("Not a valid object name %s", commit_id);
-       memcpy(hex, sha1_to_hex(sha1), sizeof(hex));
-       argv[arg] = hex;
-
-       if (setup_connection(&fd_in, &fd_out, prog, url, arg, argv + 1))
-               return 1;
-
-       service(fd_in, fd_out);
-       return 0;
-}
index 439430f569ca70b5e3b08ef07996949d7259c9b7..d2176571462af7dd66ecdb197731cd9d810dccdf 100755 (executable)
@@ -67,6 +67,18 @@ test_expect_success "fetch test for-merge" '
        cut -f -2 .git/FETCH_HEAD >actual &&
        diff expected actual'
 
+test_expect_success 'fetch tags when there is no tags' '
+
+    cd "$D" &&
+
+    mkdir notags &&
+    cd notags &&
+    git init &&
+
+    git fetch -t ..
+
+'
+
 test_expect_success 'fetch following tags' '
 
        cd "$D" &&
@@ -153,4 +165,47 @@ test_expect_success 'bundle should be able to create a full history' '
 
 '
 
+test "$TEST_RSYNC" && {
+test_expect_success 'fetch via rsync' '
+       git pack-refs &&
+       mkdir rsynced &&
+       cd rsynced &&
+       git init &&
+       git fetch rsync://127.0.0.1$(pwd)/../.git master:refs/heads/master &&
+       git gc --prune &&
+       test $(git rev-parse master) = $(cd .. && git rev-parse master) &&
+       git fsck --full
+'
+
+test_expect_success 'push via rsync' '
+       mkdir ../rsynced2 &&
+       (cd ../rsynced2 &&
+        git init) &&
+       git push rsync://127.0.0.1$(pwd)/../rsynced2/.git master &&
+       cd ../rsynced2 &&
+       git gc --prune &&
+       test $(git rev-parse master) = $(cd .. && git rev-parse master) &&
+       git fsck --full
+'
+
+test_expect_success 'push via rsync' '
+       cd .. &&
+       mkdir rsynced3 &&
+       (cd rsynced3 &&
+        git init) &&
+       git push --all rsync://127.0.0.1$(pwd)/rsynced3/.git &&
+       cd rsynced3 &&
+       test $(git rev-parse master) = $(cd .. && git rev-parse master) &&
+       git fsck --full
+'
+}
+
+test_expect_success 'fetch with a non-applying branch.<name>.merge' '
+       git config branch.master.remote yeti &&
+       git config branch.master.merge refs/heads/bigfoot &&
+       git config remote.blub.url one &&
+       git config remote.blub.fetch "refs/heads/*:refs/remotes/one/*" &&
+       git fetch blub
+'
+
 test_done
index 6c9cc67508f4351f5627b613215e6b88b0adc49a..31c108161781165d5e32f08b95089086627eda64 100755 (executable)
@@ -84,8 +84,7 @@ test_expect_success setup '
                git config branch.br-$remote-merge.merge refs/heads/three &&
                git config branch.br-$remote-octopus.remote $remote &&
                git config branch.br-$remote-octopus.merge refs/heads/one &&
-               git config --add branch.br-$remote-octopus.merge two &&
-               git config --add branch.br-$remote-octopus.merge remotes/rem/three
+               git config --add branch.br-$remote-octopus.merge two
        done
 '
 
index ea65f31bde8cf485f50cac0ddb6774a11a824b95..ca2cc1d1b44e3edc8cd42e2e77d0f85658a52195 100644 (file)
@@ -1,5 +1,6 @@
 # br-branches-default-merge
-754b754407bf032e9a2f9d5a9ad05ca79a6b228f               branch 'master' of ../
+754b754407bf032e9a2f9d5a9ad05ca79a6b228f       not-for-merge   branch 'master' of ../
+0567da4d5edd2ff4bb292a465ba9e64dcad9536b               branch 'three' of ../
 754b754407bf032e9a2f9d5a9ad05ca79a6b228f       not-for-merge   tag 'tag-master' of ../
 8e32a6d901327a23ef831511badce7bf3bf46689       not-for-merge   tag 'tag-one' of ../
 22feea448b023a2d864ef94b013735af34d238ba       not-for-merge   tag 'tag-one-tree' of ../
index 7b5fa949e653d0e29bef65f7380b04a5f2cc9a2e..7d947cd80f9cf656024206f1ea31da0d9f10f493 100644 (file)
@@ -1,5 +1,6 @@
 # br-branches-default-merge branches-default
-754b754407bf032e9a2f9d5a9ad05ca79a6b228f               branch 'master' of ../
+754b754407bf032e9a2f9d5a9ad05ca79a6b228f       not-for-merge   branch 'master' of ../
+0567da4d5edd2ff4bb292a465ba9e64dcad9536b               branch 'three' of ../
 754b754407bf032e9a2f9d5a9ad05ca79a6b228f       not-for-merge   tag 'tag-master' of ../
 8e32a6d901327a23ef831511badce7bf3bf46689       not-for-merge   tag 'tag-one' of ../
 22feea448b023a2d864ef94b013735af34d238ba       not-for-merge   tag 'tag-one-tree' of ../
index 128397d7370390821a859b90d5cce97772a37082..ec39c54b7e242ddbeec76f55b98f555d562aa271 100644 (file)
@@ -1,5 +1,7 @@
 # br-branches-default-octopus
-754b754407bf032e9a2f9d5a9ad05ca79a6b228f               branch 'master' of ../
+754b754407bf032e9a2f9d5a9ad05ca79a6b228f       not-for-merge   branch 'master' of ../
+8e32a6d901327a23ef831511badce7bf3bf46689               branch 'one' of ../
+6134ee8f857693b96ff1cc98d3e2fd62b199e5a8               branch 'two' of ../
 754b754407bf032e9a2f9d5a9ad05ca79a6b228f       not-for-merge   tag 'tag-master' of ../
 8e32a6d901327a23ef831511badce7bf3bf46689       not-for-merge   tag 'tag-one' of ../
 22feea448b023a2d864ef94b013735af34d238ba       not-for-merge   tag 'tag-one-tree' of ../
index 4b37cd481abedaa376b837519b78f8b862dfc34a..6bf42e24b67b526bac49e3cdb287e32513f4a6c4 100644 (file)
@@ -1,5 +1,7 @@
 # br-branches-default-octopus branches-default
-754b754407bf032e9a2f9d5a9ad05ca79a6b228f               branch 'master' of ../
+754b754407bf032e9a2f9d5a9ad05ca79a6b228f       not-for-merge   branch 'master' of ../
+8e32a6d901327a23ef831511badce7bf3bf46689               branch 'one' of ../
+6134ee8f857693b96ff1cc98d3e2fd62b199e5a8               branch 'two' of ../
 754b754407bf032e9a2f9d5a9ad05ca79a6b228f       not-for-merge   tag 'tag-master' of ../
 8e32a6d901327a23ef831511badce7bf3bf46689       not-for-merge   tag 'tag-one' of ../
 22feea448b023a2d864ef94b013735af34d238ba       not-for-merge   tag 'tag-one-tree' of ../
index 3a4e77ead534bb8b041aa46201c3fa47c870c0fe..b4b3b35ce0e2f46a16b015a74b771eb90ed3ebad 100644 (file)
@@ -1,5 +1,6 @@
 # br-branches-one-merge
-8e32a6d901327a23ef831511badce7bf3bf46689               branch 'one' of ../
+8e32a6d901327a23ef831511badce7bf3bf46689       not-for-merge   branch 'one' of ../
+0567da4d5edd2ff4bb292a465ba9e64dcad9536b               branch 'three' of ../
 754b754407bf032e9a2f9d5a9ad05ca79a6b228f       not-for-merge   tag 'tag-master' of ../
 8e32a6d901327a23ef831511badce7bf3bf46689       not-for-merge   tag 'tag-one' of ../
 22feea448b023a2d864ef94b013735af34d238ba       not-for-merge   tag 'tag-one-tree' of ../
index 00e04b435e94a15724278168b9022f506414ca93..2ecef384eb7d823104581bfe2b4bd240b449e5df 100644 (file)
@@ -1,5 +1,6 @@
 # br-branches-one-merge branches-one
-8e32a6d901327a23ef831511badce7bf3bf46689               branch 'one' of ../
+8e32a6d901327a23ef831511badce7bf3bf46689       not-for-merge   branch 'one' of ../
+0567da4d5edd2ff4bb292a465ba9e64dcad9536b               branch 'three' of ../
 754b754407bf032e9a2f9d5a9ad05ca79a6b228f       not-for-merge   tag 'tag-master' of ../
 8e32a6d901327a23ef831511badce7bf3bf46689       not-for-merge   tag 'tag-one' of ../
 22feea448b023a2d864ef94b013735af34d238ba       not-for-merge   tag 'tag-one-tree' of ../
index 53fe808a3b73cefe9af1407e459ccde22b78cad9..96e3029416b46ab4192d3e4aaa285a02489e4054 100644 (file)
@@ -1,5 +1,6 @@
 # br-branches-one-octopus
 8e32a6d901327a23ef831511badce7bf3bf46689               branch 'one' of ../
+6134ee8f857693b96ff1cc98d3e2fd62b199e5a8               branch 'two' of ../
 754b754407bf032e9a2f9d5a9ad05ca79a6b228f       not-for-merge   tag 'tag-master' of ../
 8e32a6d901327a23ef831511badce7bf3bf46689       not-for-merge   tag 'tag-one' of ../
 22feea448b023a2d864ef94b013735af34d238ba       not-for-merge   tag 'tag-one-tree' of ../
index 41b18ff78a4e841efd688240f1f5060f42aea2d9..55e0bad621cde0c93e6a6fb92dc259c61986aba5 100644 (file)
@@ -1,5 +1,6 @@
 # br-branches-one-octopus branches-one
 8e32a6d901327a23ef831511badce7bf3bf46689               branch 'one' of ../
+6134ee8f857693b96ff1cc98d3e2fd62b199e5a8               branch 'two' of ../
 754b754407bf032e9a2f9d5a9ad05ca79a6b228f       not-for-merge   tag 'tag-master' of ../
 8e32a6d901327a23ef831511badce7bf3bf46689       not-for-merge   tag 'tag-one' of ../
 22feea448b023a2d864ef94b013735af34d238ba       not-for-merge   tag 'tag-one-tree' of ../
index 9ee213ea45155562edca9cd811f40c4b03f212dc..938e532db25e684599b39d1c862680a1caf8ea23 100644 (file)
@@ -2,7 +2,7 @@
 754b754407bf032e9a2f9d5a9ad05ca79a6b228f       not-for-merge   branch 'master' of ../
 8e32a6d901327a23ef831511badce7bf3bf46689               branch 'one' of ../
 0567da4d5edd2ff4bb292a465ba9e64dcad9536b       not-for-merge   branch 'three' of ../
-6134ee8f857693b96ff1cc98d3e2fd62b199e5a8       not-for-merge   branch 'two' of ../
+6134ee8f857693b96ff1cc98d3e2fd62b199e5a8               branch 'two' of ../
 754b754407bf032e9a2f9d5a9ad05ca79a6b228f       not-for-merge   tag 'tag-master' of ../
 8e32a6d901327a23ef831511badce7bf3bf46689       not-for-merge   tag 'tag-one' of ../
 22feea448b023a2d864ef94b013735af34d238ba       not-for-merge   tag 'tag-one-tree' of ../
index 44bd0ec59f80d7404b0259608ebab88c98d8934d..c9225bf6ff060118ae85b5c666085b3a558db16e 100644 (file)
@@ -2,7 +2,7 @@
 754b754407bf032e9a2f9d5a9ad05ca79a6b228f       not-for-merge   branch 'master' of ../
 8e32a6d901327a23ef831511badce7bf3bf46689               branch 'one' of ../
 0567da4d5edd2ff4bb292a465ba9e64dcad9536b       not-for-merge   branch 'three' of ../
-6134ee8f857693b96ff1cc98d3e2fd62b199e5a8       not-for-merge   branch 'two' of ../
+6134ee8f857693b96ff1cc98d3e2fd62b199e5a8               branch 'two' of ../
 754b754407bf032e9a2f9d5a9ad05ca79a6b228f       not-for-merge   tag 'tag-master' of ../
 8e32a6d901327a23ef831511badce7bf3bf46689       not-for-merge   tag 'tag-one' of ../
 22feea448b023a2d864ef94b013735af34d238ba       not-for-merge   tag 'tag-one-tree' of ../
index c1554f8f2dd7ee6f37810448d002520a2b6b544d..b08e0461954dcedc90df43c03302e3d4257c6f4b 100644 (file)
@@ -2,7 +2,7 @@
 754b754407bf032e9a2f9d5a9ad05ca79a6b228f       not-for-merge   branch 'master' of ../
 8e32a6d901327a23ef831511badce7bf3bf46689               branch 'one' of ../
 0567da4d5edd2ff4bb292a465ba9e64dcad9536b       not-for-merge   branch 'three' of ../
-6134ee8f857693b96ff1cc98d3e2fd62b199e5a8       not-for-merge   branch 'two' of ../
+6134ee8f857693b96ff1cc98d3e2fd62b199e5a8               branch 'two' of ../
 754b754407bf032e9a2f9d5a9ad05ca79a6b228f       not-for-merge   tag 'tag-master' of ../
 8e32a6d901327a23ef831511badce7bf3bf46689       not-for-merge   tag 'tag-one' of ../
 22feea448b023a2d864ef94b013735af34d238ba       not-for-merge   tag 'tag-one-tree' of ../
index e6134345b8d1361308b89c57926aa4e916bb358e..d4d547c84733f0faacc85c88c7b7fa138933e4a6 100644 (file)
@@ -2,7 +2,7 @@
 754b754407bf032e9a2f9d5a9ad05ca79a6b228f       not-for-merge   branch 'master' of ../
 8e32a6d901327a23ef831511badce7bf3bf46689               branch 'one' of ../
 0567da4d5edd2ff4bb292a465ba9e64dcad9536b       not-for-merge   branch 'three' of ../
-6134ee8f857693b96ff1cc98d3e2fd62b199e5a8       not-for-merge   branch 'two' of ../
+6134ee8f857693b96ff1cc98d3e2fd62b199e5a8               branch 'two' of ../
 754b754407bf032e9a2f9d5a9ad05ca79a6b228f       not-for-merge   tag 'tag-master' of ../
 8e32a6d901327a23ef831511badce7bf3bf46689       not-for-merge   tag 'tag-one' of ../
 22feea448b023a2d864ef94b013735af34d238ba       not-for-merge   tag 'tag-one-tree' of ../
index 4e93aaab02e7b84b4bcf6ac70515e6cf52f0dabc..b6a54867b491ba67e4813fd492a1a8cc16959a21 100755 (executable)
@@ -38,7 +38,7 @@ cd "$base_dir"
 
 test_expect_success 'pulling from reference' \
 'cd C &&
-git pull ../B'
+git pull ../B master'
 
 cd "$base_dir"
 
@@ -61,7 +61,7 @@ test_expect_success 'existence of info/alternates' \
 cd "$base_dir"
 
 test_expect_success 'pulling from reference' \
-'cd D && git pull ../B'
+'cd D && git pull ../B master'
 
 cd "$base_dir"
 
diff --git a/transport.c b/transport.c
new file mode 100644 (file)
index 0000000..400af71
--- /dev/null
@@ -0,0 +1,832 @@
+#include "cache.h"
+#include "transport.h"
+#include "run-command.h"
+#ifndef NO_CURL
+#include "http.h"
+#endif
+#include "pkt-line.h"
+#include "fetch-pack.h"
+#include "walker.h"
+#include "bundle.h"
+#include "dir.h"
+#include "refs.h"
+
+/* rsync support */
+
+/*
+ * We copy packed-refs and refs/ into a temporary file, then read the
+ * loose refs recursively (sorting whenever possible), and then inserting
+ * those packed refs that are not yet in the list (not validating, but
+ * assuming that the file is sorted).
+ *
+ * Appears refactoring this from refs.c is too cumbersome.
+ */
+
+static int str_cmp(const void *a, const void *b)
+{
+       const char *s1 = a;
+       const char *s2 = b;
+
+       return strcmp(s1, s2);
+}
+
+/* path->buf + name_offset is expected to point to "refs/" */
+
+static int read_loose_refs(struct strbuf *path, int name_offset,
+               struct ref **tail)
+{
+       DIR *dir = opendir(path->buf);
+       struct dirent *de;
+       struct {
+               char **entries;
+               int nr, alloc;
+       } list;
+       int i, pathlen;
+
+       if (!dir)
+               return -1;
+
+       memset (&list, 0, sizeof(list));
+
+       while ((de = readdir(dir))) {
+               if (de->d_name[0] == '.' && (de->d_name[1] == '\0' ||
+                               (de->d_name[1] == '.' &&
+                                de->d_name[2] == '\0')))
+                       continue;
+               ALLOC_GROW(list.entries, list.nr + 1, list.alloc);
+               list.entries[list.nr++] = xstrdup(de->d_name);
+       }
+       closedir(dir);
+
+       /* sort the list */
+
+       qsort(list.entries, list.nr, sizeof(char *), str_cmp);
+
+       pathlen = path->len;
+       strbuf_addch(path, '/');
+
+       for (i = 0; i < list.nr; i++, strbuf_setlen(path, pathlen + 1)) {
+               strbuf_addstr(path, list.entries[i]);
+               if (read_loose_refs(path, name_offset, tail)) {
+                       int fd = open(path->buf, O_RDONLY);
+                       char buffer[40];
+                       struct ref *next;
+
+                       if (fd < 0)
+                               continue;
+                       next = alloc_ref(path->len - name_offset + 1);
+                       if (read_in_full(fd, buffer, 40) != 40 ||
+                                       get_sha1_hex(buffer, next->old_sha1)) {
+                               close(fd);
+                               free(next);
+                               continue;
+                       }
+                       close(fd);
+                       strcpy(next->name, path->buf + name_offset);
+                       (*tail)->next = next;
+                       *tail = next;
+               }
+       }
+       strbuf_setlen(path, pathlen);
+
+       for (i = 0; i < list.nr; i++)
+               free(list.entries[i]);
+       free(list.entries);
+
+       return 0;
+}
+
+/* insert the packed refs for which no loose refs were found */
+
+static void insert_packed_refs(const char *packed_refs, struct ref **list)
+{
+       FILE *f = fopen(packed_refs, "r");
+       static char buffer[PATH_MAX];
+
+       if (!f)
+               return;
+
+       for (;;) {
+               int cmp, len;
+
+               if (!fgets(buffer, sizeof(buffer), f)) {
+                       fclose(f);
+                       return;
+               }
+
+               if (hexval(buffer[0]) > 0xf)
+                       continue;
+               len = strlen(buffer);
+               if (buffer[len - 1] == '\n')
+                       buffer[--len] = '\0';
+               if (len < 41)
+                       continue;
+               while ((*list)->next &&
+                               (cmp = strcmp(buffer + 41,
+                                     (*list)->next->name)) > 0)
+                       list = &(*list)->next;
+               if (!(*list)->next || cmp < 0) {
+                       struct ref *next = alloc_ref(len - 40);
+                       buffer[40] = '\0';
+                       if (get_sha1_hex(buffer, next->old_sha1)) {
+                               warning ("invalid SHA-1: %s", buffer);
+                               free(next);
+                               continue;
+                       }
+                       strcpy(next->name, buffer + 41);
+                       next->next = (*list)->next;
+                       (*list)->next = next;
+                       list = &(*list)->next;
+               }
+       }
+}
+
+static struct ref *get_refs_via_rsync(const struct transport *transport)
+{
+       struct strbuf buf = STRBUF_INIT, temp_dir = STRBUF_INIT;
+       struct ref dummy, *tail = &dummy;
+       struct child_process rsync;
+       const char *args[5];
+       int temp_dir_len;
+
+       /* copy the refs to the temporary directory */
+
+       strbuf_addstr(&temp_dir, git_path("rsync-refs-XXXXXX"));
+       if (!mkdtemp(temp_dir.buf))
+               die ("Could not make temporary directory");
+       temp_dir_len = temp_dir.len;
+
+       strbuf_addstr(&buf, transport->url);
+       strbuf_addstr(&buf, "/refs");
+
+       memset(&rsync, 0, sizeof(rsync));
+       rsync.argv = args;
+       rsync.stdout_to_stderr = 1;
+       args[0] = "rsync";
+       args[1] = (transport->verbose > 0) ? "-rv" : "-r";
+       args[2] = buf.buf;
+       args[3] = temp_dir.buf;
+       args[4] = NULL;
+
+       if (run_command(&rsync))
+               die ("Could not run rsync to get refs");
+
+       strbuf_reset(&buf);
+       strbuf_addstr(&buf, transport->url);
+       strbuf_addstr(&buf, "/packed-refs");
+
+       args[2] = buf.buf;
+
+       if (run_command(&rsync))
+               die ("Could not run rsync to get refs");
+
+       /* read the copied refs */
+
+       strbuf_addstr(&temp_dir, "/refs");
+       read_loose_refs(&temp_dir, temp_dir_len + 1, &tail);
+       strbuf_setlen(&temp_dir, temp_dir_len);
+
+       tail = &dummy;
+       strbuf_addstr(&temp_dir, "/packed-refs");
+       insert_packed_refs(temp_dir.buf, &tail);
+       strbuf_setlen(&temp_dir, temp_dir_len);
+
+       if (remove_dir_recursively(&temp_dir, 0))
+               warning ("Error removing temporary directory %s.",
+                               temp_dir.buf);
+
+       strbuf_release(&buf);
+       strbuf_release(&temp_dir);
+
+       return dummy.next;
+}
+
+static int fetch_objs_via_rsync(struct transport *transport,
+                                int nr_objs, struct ref **to_fetch)
+{
+       struct strbuf buf = STRBUF_INIT;
+       struct child_process rsync;
+       const char *args[8];
+       int result;
+
+       strbuf_addstr(&buf, transport->url);
+       strbuf_addstr(&buf, "/objects/");
+
+       memset(&rsync, 0, sizeof(rsync));
+       rsync.argv = args;
+       rsync.stdout_to_stderr = 1;
+       args[0] = "rsync";
+       args[1] = (transport->verbose > 0) ? "-rv" : "-r";
+       args[2] = "--ignore-existing";
+       args[3] = "--exclude";
+       args[4] = "info";
+       args[5] = buf.buf;
+       args[6] = get_object_directory();
+       args[7] = NULL;
+
+       /* NEEDSWORK: handle one level of alternates */
+       result = run_command(&rsync);
+
+       strbuf_release(&buf);
+
+       return result;
+}
+
+static int write_one_ref(const char *name, const unsigned char *sha1,
+               int flags, void *data)
+{
+       struct strbuf *buf = data;
+       int len = buf->len;
+       FILE *f;
+
+       /* when called via for_each_ref(), flags is non-zero */
+       if (flags && prefixcmp(name, "refs/heads/") &&
+                       prefixcmp(name, "refs/tags/"))
+               return 0;
+
+       strbuf_addstr(buf, name);
+       if (safe_create_leading_directories(buf->buf) ||
+                       !(f = fopen(buf->buf, "w")) ||
+                       fprintf(f, "%s\n", sha1_to_hex(sha1)) < 0 ||
+                       fclose(f))
+               return error("problems writing temporary file %s", buf->buf);
+       strbuf_setlen(buf, len);
+       return 0;
+}
+
+static int write_refs_to_temp_dir(struct strbuf *temp_dir,
+               int refspec_nr, const char **refspec)
+{
+       int i;
+
+       for (i = 0; i < refspec_nr; i++) {
+               unsigned char sha1[20];
+               char *ref;
+
+               if (dwim_ref(refspec[i], strlen(refspec[i]), sha1, &ref) != 1)
+                       return error("Could not get ref %s", refspec[i]);
+
+               if (write_one_ref(ref, sha1, 0, temp_dir)) {
+                       free(ref);
+                       return -1;
+               }
+               free(ref);
+       }
+       return 0;
+}
+
+static int rsync_transport_push(struct transport *transport,
+               int refspec_nr, const char **refspec, int flags)
+{
+       struct strbuf buf = STRBUF_INIT, temp_dir = STRBUF_INIT;
+       int result = 0, i;
+       struct child_process rsync;
+       const char *args[10];
+
+       /* first push the objects */
+
+       strbuf_addstr(&buf, transport->url);
+       strbuf_addch(&buf, '/');
+
+       memset(&rsync, 0, sizeof(rsync));
+       rsync.argv = args;
+       rsync.stdout_to_stderr = 1;
+       i = 0;
+       args[i++] = "rsync";
+       args[i++] = "-a";
+       if (flags & TRANSPORT_PUSH_DRY_RUN)
+               args[i++] = "--dry-run";
+       if (transport->verbose > 0)
+               args[i++] = "-v";
+       args[i++] = "--ignore-existing";
+       args[i++] = "--exclude";
+       args[i++] = "info";
+       args[i++] = get_object_directory();
+       args[i++] = buf.buf;
+       args[i++] = NULL;
+
+       if (run_command(&rsync))
+               return error("Could not push objects to %s", transport->url);
+
+       /* copy the refs to the temporary directory; they could be packed. */
+
+       strbuf_addstr(&temp_dir, git_path("rsync-refs-XXXXXX"));
+       if (!mkdtemp(temp_dir.buf))
+               die ("Could not make temporary directory");
+       strbuf_addch(&temp_dir, '/');
+
+       if (flags & TRANSPORT_PUSH_ALL) {
+               if (for_each_ref(write_one_ref, &temp_dir))
+                       return -1;
+       } else if (write_refs_to_temp_dir(&temp_dir, refspec_nr, refspec))
+               return -1;
+
+       i = 2;
+       if (flags & TRANSPORT_PUSH_DRY_RUN)
+               args[i++] = "--dry-run";
+       if (!(flags & TRANSPORT_PUSH_FORCE))
+               args[i++] = "--ignore-existing";
+       args[i++] = temp_dir.buf;
+       args[i++] = transport->url;
+       args[i++] = NULL;
+       if (run_command(&rsync))
+               result = error("Could not push to %s", transport->url);
+
+       if (remove_dir_recursively(&temp_dir, 0))
+               warning ("Could not remove temporary directory %s.",
+                               temp_dir.buf);
+
+       strbuf_release(&buf);
+       strbuf_release(&temp_dir);
+
+       return result;
+}
+
+/* Generic functions for using commit walkers */
+
+static int fetch_objs_via_walker(struct transport *transport,
+                                int nr_objs, struct ref **to_fetch)
+{
+       char *dest = xstrdup(transport->url);
+       struct walker *walker = transport->data;
+       char **objs = xmalloc(nr_objs * sizeof(*objs));
+       int i;
+
+       walker->get_all = 1;
+       walker->get_tree = 1;
+       walker->get_history = 1;
+       walker->get_verbosely = transport->verbose >= 0;
+       walker->get_recover = 0;
+
+       for (i = 0; i < nr_objs; i++)
+               objs[i] = xstrdup(sha1_to_hex(to_fetch[i]->old_sha1));
+
+       if (walker_fetch(walker, nr_objs, objs, NULL, NULL))
+               die("Fetch failed.");
+
+       for (i = 0; i < nr_objs; i++)
+               free(objs[i]);
+       free(objs);
+       free(dest);
+       return 0;
+}
+
+static int disconnect_walker(struct transport *transport)
+{
+       struct walker *walker = transport->data;
+       if (walker)
+               walker_free(walker);
+       return 0;
+}
+
+#ifndef NO_CURL
+static int curl_transport_push(struct transport *transport, int refspec_nr, const char **refspec, int flags) {
+       const char **argv;
+       int argc;
+       int err;
+
+       argv = xmalloc((refspec_nr + 11) * sizeof(char *));
+       argv[0] = "http-push";
+       argc = 1;
+       if (flags & TRANSPORT_PUSH_ALL)
+               argv[argc++] = "--all";
+       if (flags & TRANSPORT_PUSH_FORCE)
+               argv[argc++] = "--force";
+       if (flags & TRANSPORT_PUSH_DRY_RUN)
+               argv[argc++] = "--dry-run";
+       argv[argc++] = transport->url;
+       while (refspec_nr--)
+               argv[argc++] = *refspec++;
+       argv[argc] = NULL;
+       err = run_command_v_opt(argv, RUN_GIT_CMD);
+       switch (err) {
+       case -ERR_RUN_COMMAND_FORK:
+               error("unable to fork for %s", argv[0]);
+       case -ERR_RUN_COMMAND_EXEC:
+               error("unable to exec %s", argv[0]);
+               break;
+       case -ERR_RUN_COMMAND_WAITPID:
+       case -ERR_RUN_COMMAND_WAITPID_WRONG_PID:
+       case -ERR_RUN_COMMAND_WAITPID_SIGNAL:
+       case -ERR_RUN_COMMAND_WAITPID_NOEXIT:
+               error("%s died with strange error", argv[0]);
+       }
+       return !!err;
+}
+
+static int missing__target(int code, int result)
+{
+       return  /* file:// URL -- do we ever use one??? */
+               (result == CURLE_FILE_COULDNT_READ_FILE) ||
+               /* http:// and https:// URL */
+               (code == 404 && result == CURLE_HTTP_RETURNED_ERROR) ||
+               /* ftp:// URL */
+               (code == 550 && result == CURLE_FTP_COULDNT_RETR_FILE)
+               ;
+}
+
+#define missing_target(a) missing__target((a)->http_code, (a)->curl_result)
+
+static struct ref *get_refs_via_curl(const struct transport *transport)
+{
+       struct buffer buffer;
+       char *data, *start, *mid;
+       char *ref_name;
+       char *refs_url;
+       int i = 0;
+
+       struct active_request_slot *slot;
+       struct slot_results results;
+
+       struct ref *refs = NULL;
+       struct ref *ref = NULL;
+       struct ref *last_ref = NULL;
+
+       data = xmalloc(4096);
+       buffer.size = 4096;
+       buffer.posn = 0;
+       buffer.buffer = data;
+
+       refs_url = xmalloc(strlen(transport->url) + 11);
+       sprintf(refs_url, "%s/info/refs", transport->url);
+
+       http_init();
+
+       slot = get_active_slot();
+       slot->results = &results;
+       curl_easy_setopt(slot->curl, CURLOPT_FILE, &buffer);
+       curl_easy_setopt(slot->curl, CURLOPT_WRITEFUNCTION, fwrite_buffer);
+       curl_easy_setopt(slot->curl, CURLOPT_URL, refs_url);
+       curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, NULL);
+       if (start_active_slot(slot)) {
+               run_active_slot(slot);
+               if (results.curl_result != CURLE_OK) {
+                       if (missing_target(&results)) {
+                               free(buffer.buffer);
+                               return NULL;
+                       } else {
+                               free(buffer.buffer);
+                               error("%s", curl_errorstr);
+                               return NULL;
+                       }
+               }
+       } else {
+               free(buffer.buffer);
+               error("Unable to start request");
+               return NULL;
+       }
+
+       http_cleanup();
+
+       data = buffer.buffer;
+       start = NULL;
+       mid = data;
+       while (i < buffer.posn) {
+               if (!start)
+                       start = &data[i];
+               if (data[i] == '\t')
+                       mid = &data[i];
+               if (data[i] == '\n') {
+                       data[i] = 0;
+                       ref_name = mid + 1;
+                       ref = xmalloc(sizeof(struct ref) +
+                                     strlen(ref_name) + 1);
+                       memset(ref, 0, sizeof(struct ref));
+                       strcpy(ref->name, ref_name);
+                       get_sha1_hex(start, ref->old_sha1);
+                       if (!refs)
+                               refs = ref;
+                       if (last_ref)
+                               last_ref->next = ref;
+                       last_ref = ref;
+                       start = NULL;
+               }
+               i++;
+       }
+
+       free(buffer.buffer);
+
+       return refs;
+}
+
+static int fetch_objs_via_curl(struct transport *transport,
+                                int nr_objs, struct ref **to_fetch)
+{
+       if (!transport->data)
+               transport->data = get_http_walker(transport->url);
+       return fetch_objs_via_walker(transport, nr_objs, to_fetch);
+}
+
+#endif
+
+struct bundle_transport_data {
+       int fd;
+       struct bundle_header header;
+};
+
+static struct ref *get_refs_from_bundle(const struct transport *transport)
+{
+       struct bundle_transport_data *data = transport->data;
+       struct ref *result = NULL;
+       int i;
+
+       if (data->fd > 0)
+               close(data->fd);
+       data->fd = read_bundle_header(transport->url, &data->header);
+       if (data->fd < 0)
+               die ("Could not read bundle '%s'.", transport->url);
+       for (i = 0; i < data->header.references.nr; i++) {
+               struct ref_list_entry *e = data->header.references.list + i;
+               struct ref *ref = alloc_ref(strlen(e->name) + 1);
+               hashcpy(ref->old_sha1, e->sha1);
+               strcpy(ref->name, e->name);
+               ref->next = result;
+               result = ref;
+       }
+       return result;
+}
+
+static int fetch_refs_from_bundle(struct transport *transport,
+                              int nr_heads, struct ref **to_fetch)
+{
+       struct bundle_transport_data *data = transport->data;
+       return unbundle(&data->header, data->fd);
+}
+
+static int close_bundle(struct transport *transport)
+{
+       struct bundle_transport_data *data = transport->data;
+       if (data->fd > 0)
+               close(data->fd);
+       free(data);
+       return 0;
+}
+
+struct git_transport_data {
+       unsigned thin : 1;
+       unsigned keep : 1;
+       int depth;
+       const char *uploadpack;
+       const char *receivepack;
+};
+
+static int set_git_option(struct transport *connection,
+                         const char *name, const char *value)
+{
+       struct git_transport_data *data = connection->data;
+       if (!strcmp(name, TRANS_OPT_UPLOADPACK)) {
+               data->uploadpack = value;
+               return 0;
+       } else if (!strcmp(name, TRANS_OPT_RECEIVEPACK)) {
+               data->receivepack = value;
+               return 0;
+       } else if (!strcmp(name, TRANS_OPT_THIN)) {
+               data->thin = !!value;
+               return 0;
+       } else if (!strcmp(name, TRANS_OPT_KEEP)) {
+               data->keep = !!value;
+               return 0;
+       } else if (!strcmp(name, TRANS_OPT_DEPTH)) {
+               if (!value)
+                       data->depth = 0;
+               else
+                       data->depth = atoi(value);
+               return 0;
+       }
+       return 1;
+}
+
+static struct ref *get_refs_via_connect(const struct transport *transport)
+{
+       struct git_transport_data *data = transport->data;
+       struct ref *refs;
+       int fd[2];
+       pid_t pid;
+       char *dest = xstrdup(transport->url);
+
+       pid = git_connect(fd, dest, data->uploadpack, 0);
+
+       if (pid < 0)
+               die("Failed to connect to \"%s\"", transport->url);
+
+       get_remote_heads(fd[0], &refs, 0, NULL, 0);
+       packet_flush(fd[1]);
+
+       finish_connect(pid);
+
+       free(dest);
+
+       return refs;
+}
+
+static int fetch_refs_via_pack(struct transport *transport,
+                              int nr_heads, struct ref **to_fetch)
+{
+       struct git_transport_data *data = transport->data;
+       char **heads = xmalloc(nr_heads * sizeof(*heads));
+       char **origh = xmalloc(nr_heads * sizeof(*origh));
+       struct ref *refs;
+       char *dest = xstrdup(transport->url);
+       struct fetch_pack_args args;
+       int i;
+
+       memset(&args, 0, sizeof(args));
+       args.uploadpack = data->uploadpack;
+       args.keep_pack = data->keep;
+       args.lock_pack = 1;
+       args.use_thin_pack = data->thin;
+       args.verbose = transport->verbose > 0;
+       args.depth = data->depth;
+
+       for (i = 0; i < nr_heads; i++)
+               origh[i] = heads[i] = xstrdup(to_fetch[i]->name);
+       refs = fetch_pack(&args, dest, nr_heads, heads, &transport->pack_lockfile);
+
+       for (i = 0; i < nr_heads; i++)
+               free(origh[i]);
+       free(origh);
+       free(heads);
+       free_refs(refs);
+       free(dest);
+       return 0;
+}
+
+static int git_transport_push(struct transport *transport, int refspec_nr, const char **refspec, int flags) {
+       struct git_transport_data *data = transport->data;
+       const char **argv;
+       char *rem;
+       int argc;
+       int err;
+
+       argv = xmalloc((refspec_nr + 11) * sizeof(char *));
+       argv[0] = "send-pack";
+       argc = 1;
+       if (flags & TRANSPORT_PUSH_ALL)
+               argv[argc++] = "--all";
+       if (flags & TRANSPORT_PUSH_FORCE)
+               argv[argc++] = "--force";
+       if (flags & TRANSPORT_PUSH_DRY_RUN)
+               argv[argc++] = "--dry-run";
+       if (data->receivepack) {
+               char *rp = xmalloc(strlen(data->receivepack) + 16);
+               sprintf(rp, "--receive-pack=%s", data->receivepack);
+               argv[argc++] = rp;
+       }
+       if (data->thin)
+               argv[argc++] = "--thin";
+       rem = xmalloc(strlen(transport->remote->name) + 10);
+       sprintf(rem, "--remote=%s", transport->remote->name);
+       argv[argc++] = rem;
+       argv[argc++] = transport->url;
+       while (refspec_nr--)
+               argv[argc++] = *refspec++;
+       argv[argc] = NULL;
+       err = run_command_v_opt(argv, RUN_GIT_CMD);
+       switch (err) {
+       case -ERR_RUN_COMMAND_FORK:
+               error("unable to fork for %s", argv[0]);
+       case -ERR_RUN_COMMAND_EXEC:
+               error("unable to exec %s", argv[0]);
+               break;
+       case -ERR_RUN_COMMAND_WAITPID:
+       case -ERR_RUN_COMMAND_WAITPID_WRONG_PID:
+       case -ERR_RUN_COMMAND_WAITPID_SIGNAL:
+       case -ERR_RUN_COMMAND_WAITPID_NOEXIT:
+               error("%s died with strange error", argv[0]);
+       }
+       return !!err;
+}
+
+static int disconnect_git(struct transport *transport)
+{
+       free(transport->data);
+       return 0;
+}
+
+static int is_local(const char *url)
+{
+       const char *colon = strchr(url, ':');
+       const char *slash = strchr(url, '/');
+       return !colon || (slash && slash < colon);
+}
+
+static int is_file(const char *url)
+{
+       struct stat buf;
+       if (stat(url, &buf))
+               return 0;
+       return S_ISREG(buf.st_mode);
+}
+
+struct transport *transport_get(struct remote *remote, const char *url)
+{
+       struct transport *ret = xcalloc(1, sizeof(*ret));
+
+       ret->remote = remote;
+       ret->url = url;
+
+       if (!prefixcmp(url, "rsync://")) {
+               ret->get_refs_list = get_refs_via_rsync;
+               ret->fetch = fetch_objs_via_rsync;
+               ret->push = rsync_transport_push;
+
+       } else if (!prefixcmp(url, "http://")
+               || !prefixcmp(url, "https://")
+               || !prefixcmp(url, "ftp://")) {
+#ifdef NO_CURL
+               error("git was compiled without libcurl support.");
+#else
+               ret->get_refs_list = get_refs_via_curl;
+               ret->fetch = fetch_objs_via_curl;
+               ret->push = curl_transport_push;
+#endif
+               ret->disconnect = disconnect_walker;
+
+       } else if (is_local(url) && is_file(url)) {
+               struct bundle_transport_data *data = xcalloc(1, sizeof(*data));
+               ret->data = data;
+               ret->get_refs_list = get_refs_from_bundle;
+               ret->fetch = fetch_refs_from_bundle;
+               ret->disconnect = close_bundle;
+
+       } else {
+               struct git_transport_data *data = xcalloc(1, sizeof(*data));
+               ret->data = data;
+               ret->set_option = set_git_option;
+               ret->get_refs_list = get_refs_via_connect;
+               ret->fetch = fetch_refs_via_pack;
+               ret->push = git_transport_push;
+               ret->disconnect = disconnect_git;
+
+               data->thin = 1;
+               data->uploadpack = "git-upload-pack";
+               if (remote && remote->uploadpack)
+                       data->uploadpack = remote->uploadpack;
+               data->receivepack = "git-receive-pack";
+               if (remote && remote->receivepack)
+                       data->receivepack = remote->receivepack;
+       }
+
+       return ret;
+}
+
+int transport_set_option(struct transport *transport,
+                        const char *name, const char *value)
+{
+       if (transport->set_option)
+               return transport->set_option(transport, name, value);
+       return 1;
+}
+
+int transport_push(struct transport *transport,
+                  int refspec_nr, const char **refspec, int flags)
+{
+       if (!transport->push)
+               return 1;
+       return transport->push(transport, refspec_nr, refspec, flags);
+}
+
+struct ref *transport_get_remote_refs(struct transport *transport)
+{
+       if (!transport->remote_refs)
+               transport->remote_refs = transport->get_refs_list(transport);
+       return transport->remote_refs;
+}
+
+int transport_fetch_refs(struct transport *transport, struct ref *refs)
+{
+       int rc;
+       int nr_heads = 0, nr_alloc = 0;
+       struct ref **heads = NULL;
+       struct ref *rm;
+
+       for (rm = refs; rm; rm = rm->next) {
+               if (rm->peer_ref &&
+                   !hashcmp(rm->peer_ref->old_sha1, rm->old_sha1))
+                       continue;
+               ALLOC_GROW(heads, nr_heads + 1, nr_alloc);
+               heads[nr_heads++] = rm;
+       }
+
+       rc = transport->fetch(transport, nr_heads, heads);
+       free(heads);
+       return rc;
+}
+
+void transport_unlock_pack(struct transport *transport)
+{
+       if (transport->pack_lockfile) {
+               unlink(transport->pack_lockfile);
+               free(transport->pack_lockfile);
+               transport->pack_lockfile = NULL;
+       }
+}
+
+int transport_disconnect(struct transport *transport)
+{
+       int ret = 0;
+       if (transport->disconnect)
+               ret = transport->disconnect(transport);
+       free(transport);
+       return ret;
+}
diff --git a/transport.h b/transport.h
new file mode 100644 (file)
index 0000000..df12ea7
--- /dev/null
@@ -0,0 +1,70 @@
+#ifndef TRANSPORT_H
+#define TRANSPORT_H
+
+#include "cache.h"
+#include "remote.h"
+
+struct transport {
+       struct remote *remote;
+       const char *url;
+       void *data;
+       struct ref *remote_refs;
+
+       /**
+        * Returns 0 if successful, positive if the option is not
+        * recognized or is inapplicable, and negative if the option
+        * is applicable but the value is invalid.
+        **/
+       int (*set_option)(struct transport *connection, const char *name,
+                         const char *value);
+
+       struct ref *(*get_refs_list)(const struct transport *transport);
+       int (*fetch)(struct transport *transport, int refs_nr, struct ref **refs);
+       int (*push)(struct transport *connection, int refspec_nr, const char **refspec, int flags);
+
+       int (*disconnect)(struct transport *connection);
+       char *pack_lockfile;
+       signed verbose : 2;
+};
+
+#define TRANSPORT_PUSH_ALL 1
+#define TRANSPORT_PUSH_FORCE 2
+#define TRANSPORT_PUSH_DRY_RUN 4
+
+/* Returns a transport suitable for the url */
+struct transport *transport_get(struct remote *, const char *);
+
+/* Transport options which apply to git:// and scp-style URLs */
+
+/* The program to use on the remote side to send a pack */
+#define TRANS_OPT_UPLOADPACK "uploadpack"
+
+/* The program to use on the remote side to receive a pack */
+#define TRANS_OPT_RECEIVEPACK "receivepack"
+
+/* Transfer the data as a thin pack if not null */
+#define TRANS_OPT_THIN "thin"
+
+/* Keep the pack that was transferred if not null */
+#define TRANS_OPT_KEEP "keep"
+
+/* Limit the depth of the fetch if not null */
+#define TRANS_OPT_DEPTH "depth"
+
+/**
+ * Returns 0 if the option was used, non-zero otherwise. Prints a
+ * message to stderr if the option is not used.
+ **/
+int transport_set_option(struct transport *transport, const char *name,
+                        const char *value);
+
+int transport_push(struct transport *connection,
+                  int refspec_nr, const char **refspec, int flags);
+
+struct ref *transport_get_remote_refs(struct transport *transport);
+
+int transport_fetch_refs(struct transport *transport, struct ref *refs);
+void transport_unlock_pack(struct transport *transport);
+int transport_disconnect(struct transport *transport);
+
+#endif
diff --git a/walker.c b/walker.c
new file mode 100644 (file)
index 0000000..397b80d
--- /dev/null
+++ b/walker.c
@@ -0,0 +1,317 @@
+#include "cache.h"
+#include "walker.h"
+#include "commit.h"
+#include "tree.h"
+#include "tree-walk.h"
+#include "tag.h"
+#include "blob.h"
+#include "refs.h"
+
+static unsigned char current_commit_sha1[20];
+
+void walker_say(struct walker *walker, const char *fmt, const char *hex)
+{
+       if (walker->get_verbosely)
+               fprintf(stderr, fmt, hex);
+}
+
+static void report_missing(const struct object *obj)
+{
+       char missing_hex[41];
+       strcpy(missing_hex, sha1_to_hex(obj->sha1));;
+       fprintf(stderr, "Cannot obtain needed %s %s\n",
+               obj->type ? typename(obj->type): "object", missing_hex);
+       if (!is_null_sha1(current_commit_sha1))
+               fprintf(stderr, "while processing commit %s.\n",
+                       sha1_to_hex(current_commit_sha1));
+}
+
+static int process(struct walker *walker, struct object *obj);
+
+static int process_tree(struct walker *walker, struct tree *tree)
+{
+       struct tree_desc desc;
+       struct name_entry entry;
+
+       if (parse_tree(tree))
+               return -1;
+
+       init_tree_desc(&desc, tree->buffer, tree->size);
+       while (tree_entry(&desc, &entry)) {
+               struct object *obj = NULL;
+
+               /* submodule commits are not stored in the superproject */
+               if (S_ISGITLINK(entry.mode))
+                       continue;
+               if (S_ISDIR(entry.mode)) {
+                       struct tree *tree = lookup_tree(entry.sha1);
+                       if (tree)
+                               obj = &tree->object;
+               }
+               else {
+                       struct blob *blob = lookup_blob(entry.sha1);
+                       if (blob)
+                               obj = &blob->object;
+               }
+               if (!obj || process(walker, obj))
+                       return -1;
+       }
+       free(tree->buffer);
+       tree->buffer = NULL;
+       tree->size = 0;
+       return 0;
+}
+
+#define COMPLETE       (1U << 0)
+#define SEEN           (1U << 1)
+#define TO_SCAN                (1U << 2)
+
+static struct commit_list *complete = NULL;
+
+static int process_commit(struct walker *walker, struct commit *commit)
+{
+       if (parse_commit(commit))
+               return -1;
+
+       while (complete && complete->item->date >= commit->date) {
+               pop_most_recent_commit(&complete, COMPLETE);
+       }
+
+       if (commit->object.flags & COMPLETE)
+               return 0;
+
+       hashcpy(current_commit_sha1, commit->object.sha1);
+
+       walker_say(walker, "walk %s\n", sha1_to_hex(commit->object.sha1));
+
+       if (walker->get_tree) {
+               if (process(walker, &commit->tree->object))
+                       return -1;
+               if (!walker->get_all)
+                       walker->get_tree = 0;
+       }
+       if (walker->get_history) {
+               struct commit_list *parents = commit->parents;
+               for (; parents; parents = parents->next) {
+                       if (process(walker, &parents->item->object))
+                               return -1;
+               }
+       }
+       return 0;
+}
+
+static int process_tag(struct walker *walker, struct tag *tag)
+{
+       if (parse_tag(tag))
+               return -1;
+       return process(walker, tag->tagged);
+}
+
+static struct object_list *process_queue = NULL;
+static struct object_list **process_queue_end = &process_queue;
+
+static int process_object(struct walker *walker, struct object *obj)
+{
+       if (obj->type == OBJ_COMMIT) {
+               if (process_commit(walker, (struct commit *)obj))
+                       return -1;
+               return 0;
+       }
+       if (obj->type == OBJ_TREE) {
+               if (process_tree(walker, (struct tree *)obj))
+                       return -1;
+               return 0;
+       }
+       if (obj->type == OBJ_BLOB) {
+               return 0;
+       }
+       if (obj->type == OBJ_TAG) {
+               if (process_tag(walker, (struct tag *)obj))
+                       return -1;
+               return 0;
+       }
+       return error("Unable to determine requirements "
+                    "of type %s for %s",
+                    typename(obj->type), sha1_to_hex(obj->sha1));
+}
+
+static int process(struct walker *walker, struct object *obj)
+{
+       if (obj->flags & SEEN)
+               return 0;
+       obj->flags |= SEEN;
+
+       if (has_sha1_file(obj->sha1)) {
+               /* We already have it, so we should scan it now. */
+               obj->flags |= TO_SCAN;
+       }
+       else {
+               if (obj->flags & COMPLETE)
+                       return 0;
+               walker->prefetch(walker, obj->sha1);
+       }
+
+       object_list_insert(obj, process_queue_end);
+       process_queue_end = &(*process_queue_end)->next;
+       return 0;
+}
+
+static int loop(struct walker *walker)
+{
+       struct object_list *elem;
+
+       while (process_queue) {
+               struct object *obj = process_queue->item;
+               elem = process_queue;
+               process_queue = elem->next;
+               free(elem);
+               if (!process_queue)
+                       process_queue_end = &process_queue;
+
+               /* If we are not scanning this object, we placed it in
+                * the queue because we needed to fetch it first.
+                */
+               if (! (obj->flags & TO_SCAN)) {
+                       if (walker->fetch(walker, obj->sha1)) {
+                               report_missing(obj);
+                               return -1;
+                       }
+               }
+               if (!obj->type)
+                       parse_object(obj->sha1);
+               if (process_object(walker, obj))
+                       return -1;
+       }
+       return 0;
+}
+
+static int interpret_target(struct walker *walker, char *target, unsigned char *sha1)
+{
+       if (!get_sha1_hex(target, sha1))
+               return 0;
+       if (!check_ref_format(target)) {
+               if (!walker->fetch_ref(walker, target, sha1)) {
+                       return 0;
+               }
+       }
+       return -1;
+}
+
+static int mark_complete(const char *path, const unsigned char *sha1, int flag, void *cb_data)
+{
+       struct commit *commit = lookup_commit_reference_gently(sha1, 1);
+       if (commit) {
+               commit->object.flags |= COMPLETE;
+               insert_by_date(commit, &complete);
+       }
+       return 0;
+}
+
+int walker_targets_stdin(char ***target, const char ***write_ref)
+{
+       int targets = 0, targets_alloc = 0;
+       struct strbuf buf;
+       *target = NULL; *write_ref = NULL;
+       strbuf_init(&buf, 0);
+       while (1) {
+               char *rf_one = NULL;
+               char *tg_one;
+
+               if (strbuf_getline(&buf, stdin, '\n') == EOF)
+                       break;
+               tg_one = buf.buf;
+               rf_one = strchr(tg_one, '\t');
+               if (rf_one)
+                       *rf_one++ = 0;
+
+               if (targets >= targets_alloc) {
+                       targets_alloc = targets_alloc ? targets_alloc * 2 : 64;
+                       *target = xrealloc(*target, targets_alloc * sizeof(**target));
+                       *write_ref = xrealloc(*write_ref, targets_alloc * sizeof(**write_ref));
+               }
+               (*target)[targets] = xstrdup(tg_one);
+               (*write_ref)[targets] = rf_one ? xstrdup(rf_one) : NULL;
+               targets++;
+       }
+       strbuf_release(&buf);
+       return targets;
+}
+
+void walker_targets_free(int targets, char **target, const char **write_ref)
+{
+       while (targets--) {
+               free(target[targets]);
+               if (write_ref && write_ref[targets])
+                       free((char *) write_ref[targets]);
+       }
+}
+
+int walker_fetch(struct walker *walker, int targets, char **target,
+                const char **write_ref, const char *write_ref_log_details)
+{
+       struct ref_lock **lock = xcalloc(targets, sizeof(struct ref_lock *));
+       unsigned char *sha1 = xmalloc(targets * 20);
+       char *msg;
+       int ret;
+       int i;
+
+       save_commit_buffer = 0;
+       track_object_refs = 0;
+
+       for (i = 0; i < targets; i++) {
+               if (!write_ref || !write_ref[i])
+                       continue;
+
+               lock[i] = lock_ref_sha1(write_ref[i], NULL);
+               if (!lock[i]) {
+                       error("Can't lock ref %s", write_ref[i]);
+                       goto unlock_and_fail;
+               }
+       }
+
+       if (!walker->get_recover)
+               for_each_ref(mark_complete, NULL);
+
+       for (i = 0; i < targets; i++) {
+               if (interpret_target(walker, target[i], &sha1[20 * i])) {
+                       error("Could not interpret %s as something to pull", target[i]);
+                       goto unlock_and_fail;
+               }
+               if (process(walker, lookup_unknown_object(&sha1[20 * i])))
+                       goto unlock_and_fail;
+       }
+
+       if (loop(walker))
+               goto unlock_and_fail;
+
+       if (write_ref_log_details) {
+               msg = xmalloc(strlen(write_ref_log_details) + 12);
+               sprintf(msg, "fetch from %s", write_ref_log_details);
+       } else {
+               msg = NULL;
+       }
+       for (i = 0; i < targets; i++) {
+               if (!write_ref || !write_ref[i])
+                       continue;
+               ret = write_ref_sha1(lock[i], &sha1[20 * i], msg ? msg : "fetch (unknown)");
+               lock[i] = NULL;
+               if (ret)
+                       goto unlock_and_fail;
+       }
+       free(msg);
+
+       return 0;
+
+unlock_and_fail:
+       for (i = 0; i < targets; i++)
+               if (lock[i])
+                       unlock_ref(lock[i]);
+
+       return -1;
+}
+
+void walker_free(struct walker *walker)
+{
+       walker->cleanup(walker);
+       free(walker);
+}
diff --git a/walker.h b/walker.h
new file mode 100644 (file)
index 0000000..ea2c363
--- /dev/null
+++ b/walker.h
@@ -0,0 +1,37 @@
+#ifndef WALKER_H
+#define WALKER_H
+
+struct walker {
+       void *data;
+       int (*fetch_ref)(struct walker *, char *ref, unsigned char *sha1);
+       void (*prefetch)(struct walker *, unsigned char *sha1);
+       int (*fetch)(struct walker *, unsigned char *sha1);
+       void (*cleanup)(struct walker *);
+       int get_tree;
+       int get_history;
+       int get_all;
+       int get_verbosely;
+       int get_recover;
+
+       int corrupt_object_found;
+};
+
+/* Report what we got under get_verbosely */
+void walker_say(struct walker *walker, const char *, const char *);
+
+/* Load pull targets from stdin */
+int walker_targets_stdin(char ***target, const char ***write_ref);
+
+/* Free up loaded targets */
+void walker_targets_free(int targets, char **target, const char **write_ref);
+
+/* If write_ref is set, the ref filename to write the target value to. */
+/* If write_ref_log_details is set, additional text will appear in the ref log. */
+int walker_fetch(struct walker *impl, int targets, char **target,
+                const char **write_ref, const char *write_ref_log_details);
+
+void walker_free(struct walker *walker);
+
+struct walker *get_http_walker(const char *url);
+
+#endif /* WALKER_H */