#include "cache.h"
#include "config.h"
+#include "color.h"
#include "builtin.h"
+ #include "repository.h"
#include "commit.h"
#include "diff.h"
#include "revision.h"
#include "line-log.h"
#include "dir.h"
#include "progress.h"
+ #include "object-store.h"
#include "blame.h"
+#include "string-list.h"
static char blame_usage[] = N_("git blame [<options>] [<rev-opts>] [<rev>] [--] <file>");
#include "parse-options.h"
#include "fetch-pack.h"
#include "refs.h"
+#include "refspec.h"
+ #include "object-store.h"
#include "tree.h"
#include "tree-walk.h"
#include "unpack-trees.h"
return 0;
}
+static int opt_parse_rename_score(const struct option *opt, const char *arg, int unset)
+{
+ const char **value = opt->value;
+ if (arg != NULL && *arg == '=')
+ arg = arg + 1;
+
+ *value = arg;
+ return 0;
+}
+
static void determine_whence(struct wt_status *s)
{
- if (file_exists(git_path_merge_head()))
+ if (file_exists(git_path_merge_head(the_repository)))
whence = FROM_MERGE;
- else if (file_exists(git_path_cherry_pick_head())) {
+ else if (file_exists(git_path_cherry_pick_head(the_repository))) {
whence = FROM_CHERRY_PICK;
if (file_exists(git_path_seq_dir()))
sequencer_in_use = 1;
#include "hashmap.h"
#include "argv-array.h"
#include "run-command.h"
+ #include "object-store.h"
#include "revision.h"
#include "list-objects.h"
+#include "commit-slab.h"
#define MAX_TAGS (FLAG_BITS - 1)
#include "cache.h"
#include "config.h"
#include "refs.h"
+#include "refspec.h"
+ #include "object-store.h"
#include "commit.h"
#include "object.h"
#include "tag.h"
#include "config.h"
#include "repository.h"
#include "refs.h"
+#include "refspec.h"
+ #include "object-store.h"
#include "commit.h"
#include "builtin.h"
#include "string-list.h"
#include "builtin.h"
#include "tree-walk.h"
#include "xdiff-interface.h"
+ #include "object-store.h"
#include "blob.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
#include "merge-blobs.h"
static const char merge_tree_usage[] = "git merge-tree <base-tree> <branch1> <branch2>";
#include "strbuf.h"
#include "run-command.h"
#include "refs.h"
+#include "refspec.h"
+ #include "object-store.h"
#include "argv-array.h"
static const char * const builtin_remote_usage[] = {
return replace_object_oid(old_ref, &old_oid, "replacement", &new_oid, force);
}
- const char *graft_file = get_graft_file();
+static int convert_graft_file(int force)
+{
++ const char *graft_file = get_graft_file(the_repository);
+ FILE *fp = fopen_or_warn(graft_file, "r");
+ struct strbuf buf = STRBUF_INIT, err = STRBUF_INIT;
+ struct argv_array args = ARGV_ARRAY_INIT;
+
+ if (!fp)
+ return -1;
+
+ while (strbuf_getline(&buf, fp) != EOF) {
+ if (*buf.buf == '#')
+ continue;
+
+ argv_array_split(&args, buf.buf);
+ if (args.argc && create_graft(args.argc, args.argv, force, 1))
+ strbuf_addf(&err, "\n\t%s", buf.buf);
+ argv_array_clear(&args);
+ }
+ fclose(fp);
+
+ strbuf_release(&buf);
+
+ if (!err.len)
+ return unlink_or_warn(graft_file);
+
+ warning(_("could not convert the following graft(s):\n%s"), err.buf);
+ strbuf_release(&err);
+
+ return -1;
+}
+
int cmd_replace(int argc, const char **argv, const char *prefix)
{
int force = 0;
extern const char *get_git_common_dir(void);
extern char *get_object_directory(void);
extern char *get_index_file(void);
- extern char *get_graft_file(void);
+ extern char *get_graft_file(struct repository *r);
-extern int set_git_dir(const char *path);
+extern void set_git_dir(const char *path);
extern int get_common_dir_noenv(struct strbuf *sb, const char *gitdir);
extern int get_common_dir(struct strbuf *sb, const char *gitdir);
extern const char *get_git_namespace(void);
#include "cache.h"
#include "tag.h"
#include "commit.h"
+#include "commit-graph.h"
+ #include "repository.h"
+ #include "object-store.h"
#include "pkt-line.h"
#include "utf8.h"
#include "diff.h"
return 0;
}
- static void prepare_commit_graft(void)
+ static void prepare_commit_graft(struct repository *r)
{
- static int commit_graft_prepared;
char *graft_file;
- if (commit_graft_prepared)
+ if (r->parsed_objects->commit_graft_prepared)
return;
- graft_file = get_graft_file();
- read_graft_file(graft_file);
+ if (!startup_info->have_repository)
+ return;
+
+ graft_file = get_graft_file(r);
+ read_graft_file(r, graft_file);
/* make sure shallows are read */
- is_repository_shallow();
- commit_graft_prepared = 1;
+ is_repository_shallow(r);
+ r->parsed_objects->commit_graft_prepared = 1;
}
- struct commit_graft *lookup_commit_graft(const struct object_id *oid)
+ struct commit_graft *lookup_commit_graft(struct repository *r, const struct object_id *oid)
{
int pos;
- prepare_commit_graft();
- pos = commit_graft_pos(oid->hash);
+ prepare_commit_graft(r);
+ pos = commit_graft_pos(r, oid->hash);
if (pos < 0)
return NULL;
- return commit_graft[pos];
+ return r->parsed_objects->grafts[pos];
}
int for_each_commit_graft(each_commit_graft_fn fn, void *cb_data)
#include "quote.h"
#include "hashmap.h"
#include "string-list.h"
+ #include "object-store.h"
#include "utf8.h"
#include "dir.h"
+#include "color.h"
struct config_source {
struct config_source *prev;
return the_repository->index_file;
}
- char *get_graft_file(void)
+ char *get_graft_file(struct repository *r)
{
- if (!the_repository->graft_file)
+ if (!r->graft_file)
BUG("git environment hasn't been setup");
- return the_repository->graft_file;
+ return r->graft_file;
}
-int set_git_dir(const char *path)
+static void set_git_dir_1(const char *path)
{
if (setenv(GIT_DIR_ENVIRONMENT, path, 1))
- return error("Could not set GIT_DIR to '%s'", path);
+ die("could not set GIT_DIR to '%s'", path);
setup_git_env(path);
- return 0;
+}
+
+static void update_relative_gitdir(const char *name,
+ const char *old_cwd,
+ const char *new_cwd,
+ void *data)
+{
+ char *path = reparent_relative_path(old_cwd, new_cwd, get_git_dir());
+ trace_printf_key(&trace_setup_key,
+ "setup: move $GIT_DIR to '%s'",
+ path);
+ set_git_dir_1(path);
+ free(path);
+}
+
+void set_git_dir(const char *path)
+{
+ set_git_dir_1(path);
+ if (!is_absolute_path(path))
+ chdir_notify_register(NULL, update_relative_gitdir, NULL);
}
const char *get_log_output_encoding(void)
return ref;
}
- if (is_repository_shallow())
+static void add_shallow_requests(struct strbuf *req_buf,
+ const struct fetch_pack_args *args)
+{
- else if (is_repository_shallow() || args->deepen)
++ if (is_repository_shallow(the_repository))
+ write_shallow_commits(req_buf, 1, NULL);
+ if (args->depth > 0)
+ packet_buf_write(req_buf, "deepen %d", args->depth);
+ if (args->deepen_since) {
+ timestamp_t max_age = approxidate(args->deepen_since);
+ packet_buf_write(req_buf, "deepen-since %"PRItime, max_age);
+ }
+ if (args->deepen_not) {
+ int i;
+ for (i = 0; i < args->deepen_not->nr; i++) {
+ struct string_list_item *s = args->deepen_not->items + i;
+ packet_buf_write(req_buf, "deepen-not %s", s->string);
+ }
+ }
+}
+
+static void add_wants(const struct ref *wants, struct strbuf *req_buf)
+{
+ for ( ; wants ; wants = wants->next) {
+ const struct object_id *remote = &wants->old_oid;
+ const char *remote_hex;
+ struct object *o;
+
+ /*
+ * If that object is complete (i.e. it is an ancestor of a
+ * local ref), we tell them we have it but do not have to
+ * tell them about its ancestors, which they already know
+ * about.
+ *
+ * We use lookup_object here because we are only
+ * interested in the case we *know* the object is
+ * reachable and we have already scanned it.
+ */
+ if (((o = lookup_object(remote->hash)) != NULL) &&
+ (o->flags & COMPLETE)) {
+ continue;
+ }
+
+ remote_hex = oid_to_hex(remote);
+ packet_buf_write(req_buf, "want %s\n", remote_hex);
+ }
+}
+
+static void add_common(struct strbuf *req_buf, struct oidset *common)
+{
+ struct oidset_iter iter;
+ const struct object_id *oid;
+ oidset_iter_init(common, &iter);
+
+ while ((oid = oidset_iter_next(&iter))) {
+ packet_buf_write(req_buf, "have %s\n", oid_to_hex(oid));
+ }
+}
+
+static int add_haves(struct strbuf *req_buf, int *haves_to_send, int *in_vain)
+{
+ int ret = 0;
+ int haves_added = 0;
+ const struct object_id *oid;
+
+ while ((oid = get_rev())) {
+ packet_buf_write(req_buf, "have %s\n", oid_to_hex(oid));
+ if (++haves_added >= *haves_to_send)
+ break;
+ }
+
+ *in_vain += haves_added;
+ if (!haves_added || *in_vain >= MAX_IN_VAIN) {
+ /* Send Done */
+ packet_buf_write(req_buf, "done\n");
+ ret = 1;
+ }
+
+ /* Increase haves to send on next round */
+ *haves_to_send = next_flush(1, *haves_to_send);
+
+ return ret;
+}
+
+static int send_fetch_request(int fd_out, const struct fetch_pack_args *args,
+ const struct ref *wants, struct oidset *common,
+ int *haves_to_send, int *in_vain)
+{
+ int ret = 0;
+ struct strbuf req_buf = STRBUF_INIT;
+
+ if (server_supports_v2("fetch", 1))
+ packet_buf_write(&req_buf, "command=fetch");
+ if (server_supports_v2("agent", 0))
+ packet_buf_write(&req_buf, "agent=%s", git_user_agent_sanitized());
+ if (args->server_options && args->server_options->nr &&
+ server_supports_v2("server-option", 1)) {
+ int i;
+ for (i = 0; i < args->server_options->nr; i++)
+ packet_write_fmt(fd_out, "server-option=%s",
+ args->server_options->items[i].string);
+ }
+
+ packet_buf_delim(&req_buf);
+ if (args->use_thin_pack)
+ packet_buf_write(&req_buf, "thin-pack");
+ if (args->no_progress)
+ packet_buf_write(&req_buf, "no-progress");
+ if (args->include_tag)
+ packet_buf_write(&req_buf, "include-tag");
+ if (prefer_ofs_delta)
+ packet_buf_write(&req_buf, "ofs-delta");
+
+ /* Add shallow-info and deepen request */
+ if (server_supports_feature("fetch", "shallow", 0))
+ add_shallow_requests(&req_buf, args);
- register_shallow(&oid);
++ else if (is_repository_shallow(the_repository) || args->deepen)
+ die(_("Server does not support shallow requests"));
+
+ /* Add filter */
+ if (server_supports_feature("fetch", "filter", 0) &&
+ args->filter_options.choice) {
+ print_verbose(args, _("Server supports filter"));
+ packet_buf_write(&req_buf, "filter %s",
+ args->filter_options.filter_spec);
+ } else if (args->filter_options.choice) {
+ warning("filtering not recognized by server, ignoring");
+ }
+
+ /* add wants */
+ add_wants(wants, &req_buf);
+
+ if (args->no_dependents) {
+ packet_buf_write(&req_buf, "done");
+ ret = 1;
+ } else {
+ /* Add all of the common commits we've found in previous rounds */
+ add_common(&req_buf, common);
+
+ /* Add initial haves */
+ ret = add_haves(&req_buf, haves_to_send, in_vain);
+ }
+
+ /* Send request */
+ packet_buf_flush(&req_buf);
+ write_or_die(fd_out, req_buf.buf, req_buf.len);
+
+ strbuf_release(&req_buf);
+ return ret;
+}
+
+/*
+ * Processes a section header in a server's response and checks if it matches
+ * `section`. If the value of `peek` is 1, the header line will be peeked (and
+ * not consumed); if 0, the line will be consumed and the function will die if
+ * the section header doesn't match what was expected.
+ */
+static int process_section_header(struct packet_reader *reader,
+ const char *section, int peek)
+{
+ int ret;
+
+ if (packet_reader_peek(reader) != PACKET_READ_NORMAL)
+ die("error reading section header '%s'", section);
+
+ ret = !strcmp(reader->line, section);
+
+ if (!peek) {
+ if (!ret)
+ die("expected '%s', received '%s'",
+ section, reader->line);
+ packet_reader_read(reader);
+ }
+
+ return ret;
+}
+
+static int process_acks(struct packet_reader *reader, struct oidset *common)
+{
+ /* received */
+ int received_ready = 0;
+ int received_ack = 0;
+
+ process_section_header(reader, "acknowledgments", 0);
+ while (packet_reader_read(reader) == PACKET_READ_NORMAL) {
+ const char *arg;
+
+ if (!strcmp(reader->line, "NAK"))
+ continue;
+
+ if (skip_prefix(reader->line, "ACK ", &arg)) {
+ struct object_id oid;
+ if (!get_oid_hex(arg, &oid)) {
+ struct commit *commit;
+ oidset_insert(common, &oid);
+ commit = lookup_commit(&oid);
+ mark_common(commit, 0, 1);
+ }
+ continue;
+ }
+
+ if (!strcmp(reader->line, "ready")) {
+ clear_prio_queue(&rev_list);
+ received_ready = 1;
+ continue;
+ }
+
+ die("unexpected acknowledgment line: '%s'", reader->line);
+ }
+
+ if (reader->status != PACKET_READ_FLUSH &&
+ reader->status != PACKET_READ_DELIM)
+ die("error processing acks: %d", reader->status);
+
+ /* return 0 if no common, 1 if there are common, or 2 if ready */
+ return received_ready ? 2 : (received_ack ? 1 : 0);
+}
+
+static void receive_shallow_info(struct fetch_pack_args *args,
+ struct packet_reader *reader)
+{
+ process_section_header(reader, "shallow-info", 0);
+ while (packet_reader_read(reader) == PACKET_READ_NORMAL) {
+ const char *arg;
+ struct object_id oid;
+
+ if (skip_prefix(reader->line, "shallow ", &arg)) {
+ if (get_oid_hex(arg, &oid))
+ die(_("invalid shallow line: %s"), reader->line);
++ register_shallow(the_repository, &oid);
+ continue;
+ }
+ if (skip_prefix(reader->line, "unshallow ", &arg)) {
+ if (get_oid_hex(arg, &oid))
+ die(_("invalid unshallow line: %s"), reader->line);
+ if (!lookup_object(oid.hash))
+ die(_("object not found: %s"), reader->line);
+ /* make sure that it is parsed as shallow */
+ if (!parse_object(&oid))
+ die(_("error in object: %s"), reader->line);
+ if (unregister_shallow(&oid))
+ die(_("no shallow found: %s"), reader->line);
+ continue;
+ }
+ die(_("expected shallow/unshallow, got %s"), reader->line);
+ }
+
+ if (reader->status != PACKET_READ_FLUSH &&
+ reader->status != PACKET_READ_DELIM)
+ die("error processing shallow info: %d", reader->status);
+
+ setup_alternate_shallow(&shallow_lock, &alternate_shallow_file, NULL);
+ args->deepen = 1;
+}
+
+enum fetch_state {
+ FETCH_CHECK_LOCAL = 0,
+ FETCH_SEND_REQUEST,
+ FETCH_PROCESS_ACKS,
+ FETCH_GET_PACK,
+ FETCH_DONE,
+};
+
+static struct ref *do_fetch_pack_v2(struct fetch_pack_args *args,
+ int fd[2],
+ const struct ref *orig_ref,
+ struct ref **sought, int nr_sought,
+ char **pack_lockfile)
+{
+ struct ref *ref = copy_ref_list(orig_ref);
+ enum fetch_state state = FETCH_CHECK_LOCAL;
+ struct oidset common = OIDSET_INIT;
+ struct packet_reader reader;
+ int in_vain = 0;
+ int haves_to_send = INITIAL_FLUSH;
+ packet_reader_init(&reader, fd[0], NULL, 0,
+ PACKET_READ_CHOMP_NEWLINE);
+
+ while (state != FETCH_DONE) {
+ switch (state) {
+ case FETCH_CHECK_LOCAL:
+ sort_ref_list(&ref, ref_compare_name);
+ QSORT(sought, nr_sought, cmp_ref_by_name);
+
+ /* v2 supports these by default */
+ allow_unadvertised_object_request |= ALLOW_REACHABLE_SHA1;
+ use_sideband = 2;
+ if (args->depth > 0 || args->deepen_since || args->deepen_not)
+ args->deepen = 1;
+
+ if (marked)
+ for_each_ref(clear_marks, NULL);
+ marked = 1;
+
+ for_each_ref(rev_list_insert_ref_oid, NULL);
+ for_each_cached_alternate(insert_one_alternate_object);
+
+ /* Filter 'ref' by 'sought' and those that aren't local */
+ if (everything_local(args, &ref, sought, nr_sought))
+ state = FETCH_DONE;
+ else
+ state = FETCH_SEND_REQUEST;
+ break;
+ case FETCH_SEND_REQUEST:
+ if (send_fetch_request(fd[1], args, ref, &common,
+ &haves_to_send, &in_vain))
+ state = FETCH_GET_PACK;
+ else
+ state = FETCH_PROCESS_ACKS;
+ break;
+ case FETCH_PROCESS_ACKS:
+ /* Process ACKs/NAKs */
+ switch (process_acks(&reader, &common)) {
+ case 2:
+ state = FETCH_GET_PACK;
+ break;
+ case 1:
+ in_vain = 0;
+ /* fallthrough */
+ default:
+ state = FETCH_SEND_REQUEST;
+ break;
+ }
+ break;
+ case FETCH_GET_PACK:
+ /* Check for shallow-info section */
+ if (process_section_header(&reader, "shallow-info", 1))
+ receive_shallow_info(args, &reader);
+
+ /* get the pack */
+ process_section_header(&reader, "packfile", 0);
+ if (get_pack(args, fd, pack_lockfile))
+ die(_("git fetch-pack: fetch failed."));
+
+ state = FETCH_DONE;
+ break;
+ case FETCH_DONE:
+ continue;
+ }
+ }
+
+ oidset_clear(&common);
+ return ref;
+}
+
static void fetch_pack_config(void)
{
git_config_get_int("fetch.unpacklimit", &fetch_unpack_limit);
if (err)
return err;
}
- buffer += 41;
+ buffer = p + 1;
parent_line_count++;
}
- graft = lookup_commit_graft(&commit->object.oid);
+ graft = lookup_commit_graft(the_repository, &commit->object.oid);
parent_count = commit_list_count(commit->parents);
if (graft) {
if (graft->nr_parent == -1 && !parent_count)
void *map_sha1_file(struct repository *r, const unsigned char *sha1, unsigned long *size);
-extern int has_loose_object_nonlocal(const unsigned char *sha1);
+ extern void *read_object_file_extended(const struct object_id *oid,
+ enum object_type *type,
+ unsigned long *size, int lookup_replace);
+ static inline void *read_object_file(const struct object_id *oid, enum object_type *type, unsigned long *size)
+ {
+ return read_object_file_extended(oid, type, size, 1);
+ }
+
+ /* Read and unpack an object file into memory, write memory to an object file */
+ int oid_object_info(struct repository *r, const struct object_id *, unsigned long *);
+
+ extern int hash_object_file(const void *buf, unsigned long len,
+ const char *type, struct object_id *oid);
+
+ extern int write_object_file(const void *buf, unsigned long len,
+ const char *type, struct object_id *oid);
+
+ extern int hash_object_file_literally(const void *buf, unsigned long len,
+ const char *type, struct object_id *oid,
+ unsigned flags);
+
+ extern int pretend_object_file(void *, unsigned long, enum object_type,
+ struct object_id *oid);
+
+ extern int force_object_loose(const struct object_id *oid, time_t mtime);
+
+ /*
+ * Open the loose object at path, check its hash, and return the contents,
+ * type, and size. If the object is a blob, then "contents" may return NULL,
+ * to allow streaming of large blobs.
+ *
+ * Returns 0 on success, negative on error (details may be written to stderr).
+ */
+ int read_loose_object(const char *path,
+ const struct object_id *expected_oid,
+ enum object_type *type,
+ unsigned long *size,
+ void **contents);
+
+ /*
+ * Convenience for sha1_object_info_extended() with a NULL struct
+ * object_info. OBJECT_INFO_SKIP_CACHED is automatically set; pass
+ * nonzero flags to also set other flags.
+ */
+ extern int has_sha1_file_with_flags(const unsigned char *sha1, int flags);
+ static inline int has_sha1_file(const unsigned char *sha1)
+ {
+ return has_sha1_file_with_flags(sha1, 0);
+ }
+
+ /* Same as the above, except for struct object_id. */
+ extern int has_object_file(const struct object_id *oid);
+ extern int has_object_file_with_flags(const struct object_id *oid, int flags);
+
+ /*
+ * Return true iff an alternate object database has a loose object
+ * with the specified name. This function does not respect replace
+ * references.
+ */
++extern int has_loose_object_nonlocal(const struct object_id *);
+
+ extern void assert_oid_type(const struct object_id *oid, enum object_type expect);
+
+ struct object_info {
+ /* Request */
+ enum object_type *typep;
+ unsigned long *sizep;
+ off_t *disk_sizep;
+ unsigned char *delta_base_sha1;
+ struct strbuf *type_name;
+ void **contentp;
+
+ /* Response */
+ enum {
+ OI_CACHED,
+ OI_LOOSE,
+ OI_PACKED,
+ OI_DBCACHED
+ } whence;
+ union {
+ /*
+ * struct {
+ * ... Nothing to expose in this case
+ * } cached;
+ * struct {
+ * ... Nothing to expose in this case
+ * } loose;
+ */
+ struct {
+ struct packed_git *pack;
+ off_t offset;
+ unsigned int is_delta;
+ } packed;
+ } u;
+ };
+
+ /*
+ * Initializer for a "struct object_info" that wants no items. You may
+ * also memset() the memory to all-zeroes.
+ */
+ #define OBJECT_INFO_INIT {NULL}
+
+ /* Invoke lookup_replace_object() on the given hash */
+ #define OBJECT_INFO_LOOKUP_REPLACE 1
+ /* Allow reading from a loose object file of unknown/bogus type */
+ #define OBJECT_INFO_ALLOW_UNKNOWN_TYPE 2
+ /* Do not check cached storage */
+ #define OBJECT_INFO_SKIP_CACHED 4
+ /* Do not retry packed storage after checking packed and loose storage */
+ #define OBJECT_INFO_QUICK 8
+ /* Do not check loose object */
+ #define OBJECT_INFO_IGNORE_LOOSE 16
+
+ int oid_object_info_extended(struct repository *r,
+ const struct object_id *,
+ struct object_info *, unsigned flags);
+
#endif /* OBJECT_STORE_H */
#include "cache.h"
#include "refs.h"
#include "remote.h"
+ #include "object-store.h"
#include "strbuf.h"
#include "url.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
#include "run-command.h"
#include "vcs-svn/svndump.h"
#include "notes.h"
#include "config.h"
#include "remote.h"
#include "refs.h"
+#include "refspec.h"
+ #include "object-store.h"
#include "commit.h"
#include "diff.h"
#include "revision.h"
res = do_recursive_merge(base, next, base_label, next_label,
&head, &msgbuf, opts);
if (res < 0)
- return res;
+ goto leave;
+
res |= write_message(msgbuf.buf, msgbuf.len,
- git_path_merge_msg(), 0);
+ git_path_merge_msg(the_repository), 0);
} else {
struct commit_list *common = NULL;
struct commit_list *remotes = NULL;
static int error_failed_squash(struct commit *commit,
struct replay_opts *opts, int subject_len, const char *subject)
{
- if (rename(rebase_path_squash_msg(), rebase_path_message()))
- return error(_("could not rename '%s' to '%s'"),
+ if (copy_file(rebase_path_message(), rebase_path_squash_msg(), 0666))
+ return error(_("could not copy '%s' to '%s'"),
rebase_path_squash_msg(), rebase_path_message());
- unlink(git_path_merge_msg());
- if (copy_file(git_path_merge_msg(), rebase_path_message(), 0666))
- unlink(rebase_path_fixup_msg());
+ unlink(git_path_merge_msg(the_repository));
+ if (copy_file(git_path_merge_msg(the_repository), rebase_path_message(), 0666))
return error(_("could not copy '%s' to '%s'"),
- rebase_path_message(), git_path_merge_msg());
+ rebase_path_message(),
+ git_path_merge_msg(the_repository));
return error_with_patch(commit, subject, subject_len, opts, 1, 0);
}
return status;
}
- ret = write_message(body, len, git_path_merge_msg(), 0);
+static int safe_append(const char *filename, const char *fmt, ...)
+{
+ va_list ap;
+ struct lock_file lock = LOCK_INIT;
+ int fd = hold_lock_file_for_update(&lock, filename,
+ LOCK_REPORT_ON_ERROR);
+ struct strbuf buf = STRBUF_INIT;
+
+ if (fd < 0)
+ return -1;
+
+ if (strbuf_read_file(&buf, filename, 0) < 0 && errno != ENOENT) {
+ error_errno(_("could not read '%s'"), filename);
+ rollback_lock_file(&lock);
+ return -1;
+ }
+ strbuf_complete(&buf, '\n');
+ va_start(ap, fmt);
+ strbuf_vaddf(&buf, fmt, ap);
+ va_end(ap);
+
+ if (write_in_full(fd, buf.buf, buf.len) < 0) {
+ error_errno(_("could not write to '%s'"), filename);
+ strbuf_release(&buf);
+ rollback_lock_file(&lock);
+ return -1;
+ }
+ if (commit_lock_file(&lock) < 0) {
+ strbuf_release(&buf);
+ rollback_lock_file(&lock);
+ return error(_("failed to finalize '%s'"), filename);
+ }
+
+ strbuf_release(&buf);
+ return 0;
+}
+
+static int do_label(const char *name, int len)
+{
+ struct ref_store *refs = get_main_ref_store(the_repository);
+ struct ref_transaction *transaction;
+ struct strbuf ref_name = STRBUF_INIT, err = STRBUF_INIT;
+ struct strbuf msg = STRBUF_INIT;
+ int ret = 0;
+ struct object_id head_oid;
+
+ if (len == 1 && *name == '#')
+ return error("Illegal label name: '%.*s'", len, name);
+
+ strbuf_addf(&ref_name, "refs/rewritten/%.*s", len, name);
+ strbuf_addf(&msg, "rebase -i (label) '%.*s'", len, name);
+
+ transaction = ref_store_transaction_begin(refs, &err);
+ if (!transaction) {
+ error("%s", err.buf);
+ ret = -1;
+ } else if (get_oid("HEAD", &head_oid)) {
+ error(_("could not read HEAD"));
+ ret = -1;
+ } else if (ref_transaction_update(transaction, ref_name.buf, &head_oid,
+ NULL, 0, msg.buf, &err) < 0 ||
+ ref_transaction_commit(transaction, &err)) {
+ error("%s", err.buf);
+ ret = -1;
+ }
+ ref_transaction_free(transaction);
+ strbuf_release(&err);
+ strbuf_release(&msg);
+
+ if (!ret)
+ ret = safe_append(rebase_path_refs_to_delete(),
+ "%s\n", ref_name.buf);
+ strbuf_release(&ref_name);
+
+ return ret;
+}
+
+static const char *reflog_message(struct replay_opts *opts,
+ const char *sub_action, const char *fmt, ...);
+
+static int do_reset(const char *name, int len, struct replay_opts *opts)
+{
+ struct strbuf ref_name = STRBUF_INIT;
+ struct object_id oid;
+ struct lock_file lock = LOCK_INIT;
+ struct tree_desc desc;
+ struct tree *tree;
+ struct unpack_trees_options unpack_tree_opts;
+ int ret = 0, i;
+
+ if (hold_locked_index(&lock, LOCK_REPORT_ON_ERROR) < 0)
+ return -1;
+
+ if (len == 10 && !strncmp("[new root]", name, len)) {
+ if (!opts->have_squash_onto) {
+ const char *hex;
+ if (commit_tree("", 0, the_hash_algo->empty_tree,
+ NULL, &opts->squash_onto,
+ NULL, NULL))
+ return error(_("writing fake root commit"));
+ opts->have_squash_onto = 1;
+ hex = oid_to_hex(&opts->squash_onto);
+ if (write_message(hex, strlen(hex),
+ rebase_path_squash_onto(), 0))
+ return error(_("writing squash-onto"));
+ }
+ oidcpy(&oid, &opts->squash_onto);
+ } else {
+ /* Determine the length of the label */
+ for (i = 0; i < len; i++)
+ if (isspace(name[i]))
+ len = i;
+
+ strbuf_addf(&ref_name, "refs/rewritten/%.*s", len, name);
+ if (get_oid(ref_name.buf, &oid) &&
+ get_oid(ref_name.buf + strlen("refs/rewritten/"), &oid)) {
+ error(_("could not read '%s'"), ref_name.buf);
+ rollback_lock_file(&lock);
+ strbuf_release(&ref_name);
+ return -1;
+ }
+ }
+
+ memset(&unpack_tree_opts, 0, sizeof(unpack_tree_opts));
+ setup_unpack_trees_porcelain(&unpack_tree_opts, "reset");
+ unpack_tree_opts.head_idx = 1;
+ unpack_tree_opts.src_index = &the_index;
+ unpack_tree_opts.dst_index = &the_index;
+ unpack_tree_opts.fn = oneway_merge;
+ unpack_tree_opts.merge = 1;
+ unpack_tree_opts.update = 1;
+
+ if (read_cache_unmerged()) {
+ rollback_lock_file(&lock);
+ strbuf_release(&ref_name);
+ return error_resolve_conflict(_(action_name(opts)));
+ }
+
+ if (!fill_tree_descriptor(&desc, &oid)) {
+ error(_("failed to find tree of %s"), oid_to_hex(&oid));
+ rollback_lock_file(&lock);
+ free((void *)desc.buffer);
+ strbuf_release(&ref_name);
+ return -1;
+ }
+
+ if (unpack_trees(1, &desc, &unpack_tree_opts)) {
+ rollback_lock_file(&lock);
+ free((void *)desc.buffer);
+ strbuf_release(&ref_name);
+ return -1;
+ }
+
+ tree = parse_tree_indirect(&oid);
+ prime_cache_tree(&the_index, tree);
+
+ if (write_locked_index(&the_index, &lock, COMMIT_LOCK) < 0)
+ ret = error(_("could not write index"));
+ free((void *)desc.buffer);
+
+ if (!ret)
+ ret = update_ref(reflog_message(opts, "reset", "'%.*s'",
+ len, name), "HEAD", &oid,
+ NULL, 0, UPDATE_REFS_MSG_ON_ERR);
+
+ strbuf_release(&ref_name);
+ return ret;
+}
+
+static int do_merge(struct commit *commit, const char *arg, int arg_len,
+ int flags, struct replay_opts *opts)
+{
+ int run_commit_flags = (flags & TODO_EDIT_MERGE_MSG) ?
+ EDIT_MSG | VERIFY_MSG : 0;
+ struct strbuf ref_name = STRBUF_INIT;
+ struct commit *head_commit, *merge_commit, *i;
+ struct commit_list *bases, *j, *reversed = NULL;
+ struct merge_options o;
+ int merge_arg_len, oneline_offset, can_fast_forward, ret;
+ static struct lock_file lock;
+ const char *p;
+
+ if (hold_locked_index(&lock, LOCK_REPORT_ON_ERROR) < 0) {
+ ret = -1;
+ goto leave_merge;
+ }
+
+ head_commit = lookup_commit_reference_by_name("HEAD");
+ if (!head_commit) {
+ ret = error(_("cannot merge without a current revision"));
+ goto leave_merge;
+ }
+
+ oneline_offset = arg_len;
+ merge_arg_len = strcspn(arg, " \t\n");
+ p = arg + merge_arg_len;
+ p += strspn(p, " \t\n");
+ if (*p == '#' && (!p[1] || isspace(p[1]))) {
+ p += 1 + strspn(p + 1, " \t\n");
+ oneline_offset = p - arg;
+ } else if (p - arg < arg_len)
+ BUG("octopus merges are not supported yet: '%s'", p);
+
+ strbuf_addf(&ref_name, "refs/rewritten/%.*s", merge_arg_len, arg);
+ merge_commit = lookup_commit_reference_by_name(ref_name.buf);
+ if (!merge_commit) {
+ /* fall back to non-rewritten ref or commit */
+ strbuf_splice(&ref_name, 0, strlen("refs/rewritten/"), "", 0);
+ merge_commit = lookup_commit_reference_by_name(ref_name.buf);
+ }
+
+ if (!merge_commit) {
+ ret = error(_("could not resolve '%s'"), ref_name.buf);
+ goto leave_merge;
+ }
+
+ if (opts->have_squash_onto &&
+ !oidcmp(&head_commit->object.oid, &opts->squash_onto)) {
+ /*
+ * When the user tells us to "merge" something into a
+ * "[new root]", let's simply fast-forward to the merge head.
+ */
+ rollback_lock_file(&lock);
+ ret = fast_forward_to(&merge_commit->object.oid,
+ &head_commit->object.oid, 0, opts);
+ goto leave_merge;
+ }
+
+ if (commit) {
+ const char *message = get_commit_buffer(commit, NULL);
+ const char *body;
+ int len;
+
+ if (!message) {
+ ret = error(_("could not get commit message of '%s'"),
+ oid_to_hex(&commit->object.oid));
+ goto leave_merge;
+ }
+ write_author_script(message);
+ find_commit_subject(message, &body);
+ len = strlen(body);
- git_path_merge_msg());
++ ret = write_message(body, len, git_path_merge_msg(the_repository), 0);
+ unuse_commit_buffer(commit, message);
+ if (ret) {
+ error_errno(_("could not write '%s'"),
- ret = write_message(p, len, git_path_merge_msg(), 0);
++ git_path_merge_msg(the_repository));
+ goto leave_merge;
+ }
+ } else {
+ struct strbuf buf = STRBUF_INIT;
+ int len;
+
+ strbuf_addf(&buf, "author %s", git_author_info(0));
+ write_author_script(buf.buf);
+ strbuf_reset(&buf);
+
+ if (oneline_offset < arg_len) {
+ p = arg + oneline_offset;
+ len = arg_len - oneline_offset;
+ } else {
+ strbuf_addf(&buf, "Merge branch '%.*s'",
+ merge_arg_len, arg);
+ p = buf.buf;
+ len = buf.len;
+ }
+
- git_path_merge_msg());
++ ret = write_message(p, len, git_path_merge_msg(the_repository), 0);
+ strbuf_release(&buf);
+ if (ret) {
+ error_errno(_("could not write '%s'"),
- git_path_merge_head(), 0);
- write_message("no-ff", 5, git_path_merge_mode(), 0);
++ git_path_merge_msg(the_repository));
+ goto leave_merge;
+ }
+ }
+
+ /*
+ * If HEAD is not identical to the first parent of the original merge
+ * commit, we cannot fast-forward.
+ */
+ can_fast_forward = opts->allow_ff && commit && commit->parents &&
+ !oidcmp(&commit->parents->item->object.oid,
+ &head_commit->object.oid);
+
+ /*
+ * If the merge head is different from the original one, we cannot
+ * fast-forward.
+ */
+ if (can_fast_forward) {
+ struct commit_list *second_parent = commit->parents->next;
+
+ if (second_parent && !second_parent->next &&
+ oidcmp(&merge_commit->object.oid,
+ &second_parent->item->object.oid))
+ can_fast_forward = 0;
+ }
+
+ if (can_fast_forward && commit->parents->next &&
+ !commit->parents->next->next &&
+ !oidcmp(&commit->parents->next->item->object.oid,
+ &merge_commit->object.oid)) {
+ rollback_lock_file(&lock);
+ ret = fast_forward_to(&commit->object.oid,
+ &head_commit->object.oid, 0, opts);
+ goto leave_merge;
+ }
+
+ write_message(oid_to_hex(&merge_commit->object.oid), GIT_SHA1_HEXSZ,
- ret = !!run_git_commit(git_path_merge_msg(), opts,
++ git_path_merge_head(the_repository), 0);
++ write_message("no-ff", 5, git_path_merge_mode(the_repository), 0);
+
+ bases = get_merge_bases(head_commit, merge_commit);
+ if (bases && !oidcmp(&merge_commit->object.oid,
+ &bases->item->object.oid)) {
+ ret = 0;
+ /* skip merging an ancestor of HEAD */
+ goto leave_merge;
+ }
+
+ for (j = bases; j; j = j->next)
+ commit_list_insert(j->item, &reversed);
+ free_commit_list(bases);
+
+ read_cache();
+ init_merge_options(&o);
+ o.branch1 = "HEAD";
+ o.branch2 = ref_name.buf;
+ o.buffer_output = 2;
+
+ ret = merge_recursive(&o, head_commit, merge_commit, reversed, &i);
+ if (ret <= 0)
+ fputs(o.obuf.buf, stdout);
+ strbuf_release(&o.obuf);
+ if (ret < 0) {
+ error(_("could not even attempt to merge '%.*s'"),
+ merge_arg_len, arg);
+ goto leave_merge;
+ }
+ /*
+ * The return value of merge_recursive() is 1 on clean, and 0 on
+ * unclean merge.
+ *
+ * Let's reverse that, so that do_merge() returns 0 upon success and
+ * 1 upon failed merge (keeping the return value -1 for the cases where
+ * we will want to reschedule the `merge` command).
+ */
+ ret = !ret;
+
+ if (active_cache_changed &&
+ write_locked_index(&the_index, &lock, COMMIT_LOCK)) {
+ ret = error(_("merge: Unable to write new index file"));
+ goto leave_merge;
+ }
+
+ rollback_lock_file(&lock);
+ if (ret)
+ rerere(opts->allow_rerere_auto);
+ else
+ /*
+ * In case of problems, we now want to return a positive
+ * value (a negative one would indicate that the `merge`
+ * command needs to be rescheduled).
+ */
++ ret = !!run_git_commit(git_path_merge_msg(the_repository), opts,
+ run_commit_flags);
+
+leave_merge:
+ strbuf_release(&ref_name);
+ rollback_lock_file(&lock);
+ return ret;
+}
+
static int is_final_fixup(struct todo_list *todo_list)
{
int i = todo_list->current;
flags |= AMEND_MSG;
}
- if (run_git_commit(rebase_path_message(), opts, flags))
+ if (is_clean) {
- const char *cherry_pick_head = git_path_cherry_pick_head();
++ const char *cherry_pick_head = git_path_cherry_pick_head(the_repository);
+
+ if (file_exists(cherry_pick_head) && unlink(cherry_pick_head))
+ return error(_("could not remove CHERRY_PICK_HEAD"));
+ if (!final_fixup)
+ return 0;
+ }
+
+ if (run_git_commit(final_fixup ? NULL : rebase_path_message(),
+ opts, flags))
return error(_("could not commit staged changes."));
unlink(rebase_path_amend());
+ if (final_fixup) {
+ unlink(rebase_path_fixup_msg());
+ unlink(rebase_path_squash_msg());
+ }
+ if (opts->current_fixup_count > 0) {
+ /*
+ * Whether final fixup or not, we just cleaned up the commit
+ * message...
+ */
+ unlink(rebase_path_current_fixups());
+ strbuf_reset(&opts->current_fixups);
+ opts->current_fixup_count = 0;
+ }
return 0;
}
#include "commit-slab.h"
#include "revision.h"
#include "list-objects.h"
+#include "commit-slab.h"
+ #include "repository.h"
- static int is_shallow = -1;
- static struct stat_validity shallow_stat;
- static char *alternate_shallow_file;
-
- void set_alternate_shallow_file(const char *path, int override)
+ void set_alternate_shallow_file(struct repository *r, const char *path, int override)
{
- if (is_shallow != -1)
+ if (r->parsed_objects->is_shallow != -1)
- die("BUG: is_repository_shallow must not be called before set_alternate_shallow_file");
+ BUG("is_repository_shallow must not be called before set_alternate_shallow_file");
- if (alternate_shallow_file && !override)
+ if (r->parsed_objects->alternate_shallow_file && !override)
return;
- free(alternate_shallow_file);
- alternate_shallow_file = xstrdup_or_null(path);
+ free(r->parsed_objects->alternate_shallow_file);
+ r->parsed_objects->alternate_shallow_file = xstrdup_or_null(path);
}
- int register_shallow(const struct object_id *oid)
+ int register_shallow(struct repository *r, const struct object_id *oid)
{
struct commit_graft *graft =
xmalloc(sizeof(struct commit_graft));
struct object_id oid;
if (get_oid_hex(buf, &oid))
die("bad shallow line: %s", buf);
- register_shallow(&oid);
+ register_shallow(r, &oid);
}
fclose(fp);
- return is_shallow;
+ return r->parsed_objects->is_shallow;
}
+/*
+ * TODO: use "int" elemtype instead of "int *" when/if commit-slab
+ * supports a "valid" flag.
+ */
+define_commit_slab(commit_depth, int *);
struct commit_list *get_shallow_commits(struct object_array *heads, int depth,
int shallow_flag, int not_shallow_flag)
{
return result;
}
- static void check_shallow_file_for_update(void)
+ static void check_shallow_file_for_update(struct repository *r)
{
- if (is_shallow == -1)
+ if (r->parsed_objects->is_shallow == -1)
- die("BUG: shallow must be initialized by now");
+ BUG("shallow must be initialized by now");
- if (!stat_validity_check(&shallow_stat, git_path_shallow()))
+ if (!stat_validity_check(r->parsed_objects->shallow_stat, git_path_shallow(the_repository)))
die("shallow file has changed since we read it");
}
send_shallow(result);
free_commit_list(result);
send_unshallow(shallows);
- packet_flush(1);
+}
+
+/* Returns 1 if a shallow list is sent or 0 otherwise */
+static int send_shallow_list(int depth, int deepen_rev_list,
+ timestamp_t deepen_since,
+ struct string_list *deepen_not,
+ struct object_array *shallows)
+{
+ int ret = 0;
+
+ if (depth > 0 && deepen_rev_list)
+ die("git upload-pack: deepen and deepen-since (or deepen-not) cannot be used together");
+ if (depth > 0) {
+ deepen(depth, deepen_relative, shallows);
+ ret = 1;
+ } else if (deepen_rev_list) {
+ struct argv_array av = ARGV_ARRAY_INIT;
+ int i;
+
+ argv_array_push(&av, "rev-list");
+ if (deepen_since)
+ argv_array_pushf(&av, "--max-age=%"PRItime, deepen_since);
+ if (deepen_not->nr) {
+ argv_array_push(&av, "--not");
+ for (i = 0; i < deepen_not->nr; i++) {
+ struct string_list_item *s = deepen_not->items + i;
+ argv_array_push(&av, s->string);
+ }
+ argv_array_push(&av, "--not");
+ }
+ for (i = 0; i < want_obj.nr; i++) {
+ struct object *o = want_obj.objects[i].item;
+ argv_array_push(&av, oid_to_hex(&o->oid));
+ }
+ deepen_by_rev_list(av.argc, av.argv, shallows);
+ argv_array_clear(&av);
+ ret = 1;
+ } else {
+ if (shallows->nr > 0) {
+ int i;
+ for (i = 0; i < shallows->nr; i++)
- register_shallow(&shallows->objects[i].item->oid);
++ register_shallow(the_repository,
++ &shallows->objects[i].item->oid);
+ }
+ }
+
+ shallow_nr += shallows->nr;
+ return ret;
+}
+
+static int process_shallow(const char *line, struct object_array *shallows)
+{
+ const char *arg;
+ if (skip_prefix(line, "shallow ", &arg)) {
+ struct object_id oid;
+ struct object *object;
+ if (get_oid_hex(arg, &oid))
+ die("invalid shallow line: %s", line);
+ object = parse_object(&oid);
+ if (!object)
+ return 1;
+ if (object->type != OBJ_COMMIT)
+ die("invalid shallow object %s", oid_to_hex(&oid));
+ if (!(object->flags & CLIENT_SHALLOW)) {
+ object->flags |= CLIENT_SHALLOW;
+ add_object_array(object, NULL, shallows);
+ }
+ return 1;
+ }
+
+ return 0;
+}
+
+static int process_deepen(const char *line, int *depth)
+{
+ const char *arg;
+ if (skip_prefix(line, "deepen ", &arg)) {
+ char *end = NULL;
+ *depth = (int)strtol(arg, &end, 0);
+ if (!end || *end || *depth <= 0)
+ die("Invalid deepen: %s", line);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int process_deepen_since(const char *line, timestamp_t *deepen_since, int *deepen_rev_list)
+{
+ const char *arg;
+ if (skip_prefix(line, "deepen-since ", &arg)) {
+ char *end = NULL;
+ *deepen_since = parse_timestamp(arg, &end, 0);
+ if (!end || *end || !deepen_since ||
+ /* revisions.c's max_age -1 is special */
+ *deepen_since == -1)
+ die("Invalid deepen-since: %s", line);
+ *deepen_rev_list = 1;
+ return 1;
+ }
+ return 0;
+}
+
+static int process_deepen_not(const char *line, struct string_list *deepen_not, int *deepen_rev_list)
+{
+ const char *arg;
+ if (skip_prefix(line, "deepen-not ", &arg)) {
+ char *ref = NULL;
+ struct object_id oid;
+ if (expand_ref(arg, strlen(arg), &oid, &ref) != 1)
+ die("git upload-pack: ambiguous deepen-not: %s", line);
+ string_list_append(deepen_not, ref);
+ free(ref);
+ *deepen_rev_list = 1;
+ return 1;
+ }
+ return 0;
}
static void receive_needs(void)
return parse_hide_refs_config(var, value, "uploadpack");
}
-int cmd_main(int argc, const char **argv)
+void upload_pack(struct upload_pack_options *options)
{
- const char *dir;
- int strict = 0;
- struct option options[] = {
- OPT_BOOL(0, "stateless-rpc", &stateless_rpc,
- N_("quit after a single request/response exchange")),
- OPT_BOOL(0, "advertise-refs", &advertise_refs,
- N_("exit immediately after initial ref advertisement")),
- OPT_BOOL(0, "strict", &strict,
- N_("do not try <directory>/.git/ if <directory> is no Git directory")),
- OPT_INTEGER(0, "timeout", &timeout,
- N_("interrupt transfer after <n> seconds of inactivity")),
- OPT_END()
- };
+ struct string_list symref = STRING_LIST_INIT_DUP;
+
+ stateless_rpc = options->stateless_rpc;
+ timeout = options->timeout;
+ daemon_mode = options->daemon_mode;
- packet_trace_identity("upload-pack");
- check_replace_refs = 0;
+ git_config(upload_pack_config, NULL);
+
+ head_ref_namespaced(find_symref, &symref);
- argc = parse_options(argc, argv, NULL, options, upload_pack_usage, 0);
+ if (options->advertise_refs || !stateless_rpc) {
+ reset_timeout();
+ head_ref_namespaced(send_ref, &symref);
+ for_each_namespaced_ref(send_ref, &symref);
+ advertise_shallow_grafts(1);
+ packet_flush(1);
+ } else {
+ head_ref_namespaced(check_ref, NULL);
+ for_each_namespaced_ref(check_ref, NULL);
+ }
+ string_list_clear(&symref, 1);
+ if (options->advertise_refs)
+ return;
- if (argc != 1)
- usage_with_options(upload_pack_usage, options);
+ receive_needs();
+ if (want_obj.nr) {
+ get_common_commits();
+ create_pack_file();
+ }
+}
- if (timeout)
- daemon_mode = 1;
+struct upload_pack_data {
+ struct object_array wants;
+ struct oid_array haves;
- setup_path();
+ struct object_array shallows;
+ struct string_list deepen_not;
+ int depth;
+ timestamp_t deepen_since;
+ int deepen_rev_list;
+ int deepen_relative;
- dir = argv[0];
+ unsigned stateless_rpc : 1;
- if (!enter_repo(dir, strict))
- die("'%s' does not appear to be a git repository", dir);
+ unsigned use_thin_pack : 1;
+ unsigned use_ofs_delta : 1;
+ unsigned no_progress : 1;
+ unsigned use_include_tag : 1;
+ unsigned done : 1;
+};
+
+static void upload_pack_data_init(struct upload_pack_data *data)
+{
+ struct object_array wants = OBJECT_ARRAY_INIT;
+ struct oid_array haves = OID_ARRAY_INIT;
+ struct object_array shallows = OBJECT_ARRAY_INIT;
+ struct string_list deepen_not = STRING_LIST_INIT_DUP;
+
+ memset(data, 0, sizeof(*data));
+ data->wants = wants;
+ data->haves = haves;
+ data->shallows = shallows;
+ data->deepen_not = deepen_not;
+}
+
+static void upload_pack_data_clear(struct upload_pack_data *data)
+{
+ object_array_clear(&data->wants);
+ oid_array_clear(&data->haves);
+ object_array_clear(&data->shallows);
+ string_list_clear(&data->deepen_not, 0);
+}
+
+static int parse_want(const char *line)
+{
+ const char *arg;
+ if (skip_prefix(line, "want ", &arg)) {
+ struct object_id oid;
+ struct object *o;
+
+ if (get_oid_hex(arg, &oid))
+ die("git upload-pack: protocol error, "
+ "expected to get oid, not '%s'", line);
+
+ o = parse_object(&oid);
+ if (!o) {
+ packet_write_fmt(1,
+ "ERR upload-pack: not our ref %s",
+ oid_to_hex(&oid));
+ die("git upload-pack: not our ref %s",
+ oid_to_hex(&oid));
+ }
+
+ if (!(o->flags & WANTED)) {
+ o->flags |= WANTED;
+ add_object_array(o, NULL, &want_obj);
+ }
+
+ return 1;
+ }
+
+ return 0;
+}
+
+static int parse_have(const char *line, struct oid_array *haves)
+{
+ const char *arg;
+ if (skip_prefix(line, "have ", &arg)) {
+ struct object_id oid;
+
+ if (get_oid_hex(arg, &oid))
+ die("git upload-pack: expected SHA1 object, got '%s'", arg);
+ oid_array_append(haves, &oid);
+ return 1;
+ }
+
+ return 0;
+}
+
+static void process_args(struct packet_reader *request,
+ struct upload_pack_data *data)
+{
+ while (packet_reader_read(request) != PACKET_READ_FLUSH) {
+ const char *arg = request->line;
+ const char *p;
+
+ /* process want */
+ if (parse_want(arg))
+ continue;
+ /* process have line */
+ if (parse_have(arg, &data->haves))
+ continue;
+
+ /* process args like thin-pack */
+ if (!strcmp(arg, "thin-pack")) {
+ use_thin_pack = 1;
+ continue;
+ }
+ if (!strcmp(arg, "ofs-delta")) {
+ use_ofs_delta = 1;
+ continue;
+ }
+ if (!strcmp(arg, "no-progress")) {
+ no_progress = 1;
+ continue;
+ }
+ if (!strcmp(arg, "include-tag")) {
+ use_include_tag = 1;
+ continue;
+ }
+ if (!strcmp(arg, "done")) {
+ data->done = 1;
+ continue;
+ }
+
+ /* Shallow related arguments */
+ if (process_shallow(arg, &data->shallows))
+ continue;
+ if (process_deepen(arg, &data->depth))
+ continue;
+ if (process_deepen_since(arg, &data->deepen_since,
+ &data->deepen_rev_list))
+ continue;
+ if (process_deepen_not(arg, &data->deepen_not,
+ &data->deepen_rev_list))
+ continue;
+ if (!strcmp(arg, "deepen-relative")) {
+ data->deepen_relative = 1;
+ continue;
+ }
+
+ if (allow_filter && skip_prefix(arg, "filter ", &p)) {
+ parse_list_objects_filter(&filter_options, p);
+ continue;
+ }
+
+ /* ignore unknown lines maybe? */
+ die("unexpected line: '%s'", arg);
+ }
+}
+
+static int process_haves(struct oid_array *haves, struct oid_array *common)
+{
+ int i;
+
+ /* Process haves */
+ for (i = 0; i < haves->nr; i++) {
+ const struct object_id *oid = &haves->oid[i];
+ struct object *o;
+ int we_knew_they_have = 0;
+
+ if (!has_object_file(oid))
+ continue;
+
+ oid_array_append(common, oid);
+
+ o = parse_object(oid);
+ if (!o)
+ die("oops (%s)", oid_to_hex(oid));
+ if (o->type == OBJ_COMMIT) {
+ struct commit_list *parents;
+ struct commit *commit = (struct commit *)o;
+ if (o->flags & THEY_HAVE)
+ we_knew_they_have = 1;
+ else
+ o->flags |= THEY_HAVE;
+ if (!oldest_have || (commit->date < oldest_have))
+ oldest_have = commit->date;
+ for (parents = commit->parents;
+ parents;
+ parents = parents->next)
+ parents->item->object.flags |= THEY_HAVE;
+ }
+ if (!we_knew_they_have)
+ add_object_array(o, NULL, &have_obj);
+ }
+
+ return 0;
+}
+
+static int send_acks(struct oid_array *acks, struct strbuf *response)
+{
+ int i;
+
+ packet_buf_write(response, "acknowledgments\n");
+
+ /* Send Acks */
+ if (!acks->nr)
+ packet_buf_write(response, "NAK\n");
+
+ for (i = 0; i < acks->nr; i++) {
+ packet_buf_write(response, "ACK %s\n",
+ oid_to_hex(&acks->oid[i]));
+ }
+
+ if (ok_to_give_up()) {
+ /* Send Ready */
+ packet_buf_write(response, "ready\n");
+ return 1;
+ }
+
+ return 0;
+}
+
+static int process_haves_and_send_acks(struct upload_pack_data *data)
+{
+ struct oid_array common = OID_ARRAY_INIT;
+ struct strbuf response = STRBUF_INIT;
+ int ret = 0;
+
+ process_haves(&data->haves, &common);
+ if (data->done) {
+ ret = 1;
+ } else if (send_acks(&common, &response)) {
+ packet_buf_delim(&response);
+ ret = 1;
+ } else {
+ /* Add Flush */
+ packet_buf_flush(&response);
+ ret = 0;
+ }
+
+ /* Send response */
+ write_or_die(1, response.buf, response.len);
+ strbuf_release(&response);
+
+ oid_array_clear(&data->haves);
+ oid_array_clear(&common);
+ return ret;
+}
+
+static void send_shallow_info(struct upload_pack_data *data)
+{
+ /* No shallow info needs to be sent */
+ if (!data->depth && !data->deepen_rev_list && !data->shallows.nr &&
- !is_repository_shallow())
++ !is_repository_shallow(the_repository))
+ return;
+
+ packet_write_fmt(1, "shallow-info\n");
+
+ if (!send_shallow_list(data->depth, data->deepen_rev_list,
+ data->deepen_since, &data->deepen_not,
- &data->shallows) && is_repository_shallow())
++ &data->shallows) &&
++ is_repository_shallow(the_repository))
+ deepen(INFINITE_DEPTH, data->deepen_relative, &data->shallows);
+
+ packet_delim(1);
+}
+
+enum fetch_state {
+ FETCH_PROCESS_ARGS = 0,
+ FETCH_SEND_ACKS,
+ FETCH_SEND_PACK,
+ FETCH_DONE,
+};
+
+int upload_pack_v2(struct repository *r, struct argv_array *keys,
+ struct packet_reader *request)
+{
+ enum fetch_state state = FETCH_PROCESS_ARGS;
+ struct upload_pack_data data;
git_config(upload_pack_config, NULL);