size_t nr, alloc;
};
-static void cache_one_alternate(const char *refname,
- const struct object_id *oid,
+static void cache_one_alternate(const struct object_id *oid,
void *vcache)
{
struct alternate_object_cache *cache = vcache;
ACK_ready
};
-static void consume_shallow_list(struct fetch_pack_args *args, int fd)
+static void consume_shallow_list(struct fetch_pack_args *args,
+ struct packet_reader *reader)
{
if (args->stateless_rpc && args->deepen) {
/* If we sent a depth we will get back "duplicate"
* shallow and unshallow commands every time there
* is a block of have lines exchanged.
*/
- char *line;
- while ((line = packet_read_line(fd, NULL))) {
- if (starts_with(line, "shallow "))
+ while (packet_reader_read(reader) == PACKET_READ_NORMAL) {
+ if (starts_with(reader->line, "shallow "))
continue;
- if (starts_with(line, "unshallow "))
+ if (starts_with(reader->line, "unshallow "))
continue;
die(_("git fetch-pack: expected shallow list"));
}
+ if (reader->status != PACKET_READ_FLUSH)
+ die(_("git fetch-pack: expected a flush packet after shallow list"));
}
}
-static enum ack_type get_ack(int fd, struct object_id *result_oid)
+static enum ack_type get_ack(struct packet_reader *reader,
+ struct object_id *result_oid)
{
int len;
- char *line = packet_read_line(fd, &len);
const char *arg;
- if (!line)
+ if (packet_reader_read(reader) != PACKET_READ_NORMAL)
die(_("git fetch-pack: expected ACK/NAK, got a flush packet"));
- if (!strcmp(line, "NAK"))
+ len = reader->pktlen;
+
+ if (!strcmp(reader->line, "NAK"))
return NAK;
- if (skip_prefix(line, "ACK ", &arg)) {
+ if (skip_prefix(reader->line, "ACK ", &arg)) {
if (!get_oid_hex(arg, result_oid)) {
arg += 40;
- len -= arg - line;
+ len -= arg - reader->line;
if (len < 1)
return ACK;
if (strstr(arg, "continue"))
return ACK;
}
}
- if (skip_prefix(line, "ERR ", &arg))
- die(_("remote error: %s"), arg);
- die(_("git fetch-pack: expected ACK/NAK, got '%s'"), line);
+ die(_("git fetch-pack: expected ACK/NAK, got '%s'"), reader->line);
}
static void send_request(struct fetch_pack_args *args,
int got_ready = 0;
struct strbuf req_buf = STRBUF_INIT;
size_t state_len = 0;
+ struct packet_reader reader;
if (args->stateless_rpc && multi_ack == 1)
die(_("--stateless-rpc requires multi_ack_detailed"));
- mark_tips(negotiator, args->negotiation_tips);
- for_each_cached_alternate(negotiator, insert_one_alternate_object);
+ packet_reader_init(&reader, fd[0], NULL, 0,
+ PACKET_READ_CHOMP_NEWLINE |
+ PACKET_READ_DIE_ON_ERR_PACKET);
+
+ if (!args->no_dependents) {
+ mark_tips(negotiator, args->negotiation_tips);
+ for_each_cached_alternate(negotiator, insert_one_alternate_object);
+ }
fetching = 0;
for ( ; refs ; refs = refs->next) {
* We use lookup_object here because we are only
* interested in the case we *know* the object is
* reachable and we have already scanned it.
+ *
+ * Do this only if args->no_dependents is false (if it is true,
+ * we cannot trust the object flags).
*/
- if (((o = lookup_object(the_repository, remote->hash)) != NULL) &&
+ if (!args->no_dependents &&
+ ((o = lookup_object(the_repository, remote->hash)) != NULL) &&
(o->flags & COMPLETE)) {
continue;
}
packet_buf_write(&req_buf, "deepen-not %s", s->string);
}
}
- if (server_supports_filtering && args->filter_options.choice)
+ if (server_supports_filtering && args->filter_options.choice) {
+ struct strbuf expanded_filter_spec = STRBUF_INIT;
+ expand_list_objects_filter_spec(&args->filter_options,
+ &expanded_filter_spec);
packet_buf_write(&req_buf, "filter %s",
- args->filter_options.filter_spec);
+ expanded_filter_spec.buf);
+ strbuf_release(&expanded_filter_spec);
+ }
packet_buf_flush(&req_buf);
state_len = req_buf.len;
if (args->deepen) {
- char *line;
const char *arg;
struct object_id oid;
send_request(args, fd[1], &req_buf);
- while ((line = packet_read_line(fd[0], NULL))) {
- if (skip_prefix(line, "shallow ", &arg)) {
+ while (packet_reader_read(&reader) == PACKET_READ_NORMAL) {
+ if (skip_prefix(reader.line, "shallow ", &arg)) {
if (get_oid_hex(arg, &oid))
- die(_("invalid shallow line: %s"), line);
+ die(_("invalid shallow line: %s"), reader.line);
register_shallow(the_repository, &oid);
continue;
}
- if (skip_prefix(line, "unshallow ", &arg)) {
+ if (skip_prefix(reader.line, "unshallow ", &arg)) {
if (get_oid_hex(arg, &oid))
- die(_("invalid unshallow line: %s"), line);
+ die(_("invalid unshallow line: %s"), reader.line);
if (!lookup_object(the_repository, oid.hash))
- die(_("object not found: %s"), line);
+ die(_("object not found: %s"), reader.line);
/* make sure that it is parsed as shallow */
if (!parse_object(the_repository, &oid))
- die(_("error in object: %s"), line);
+ die(_("error in object: %s"), reader.line);
if (unregister_shallow(&oid))
- die(_("no shallow found: %s"), line);
+ die(_("no shallow found: %s"), reader.line);
continue;
}
- die(_("expected shallow/unshallow, got %s"), line);
+ die(_("expected shallow/unshallow, got %s"), reader.line);
}
} else if (!args->stateless_rpc)
send_request(args, fd[1], &req_buf);
if (!args->stateless_rpc && count == INITIAL_FLUSH)
continue;
- consume_shallow_list(args, fd[0]);
+ consume_shallow_list(args, &reader);
do {
- ack = get_ack(fd[0], result_oid);
+ ack = get_ack(&reader, result_oid);
if (ack)
print_verbose(args, _("got %s %d %s"), "ack",
ack, oid_to_hex(result_oid));
strbuf_release(&req_buf);
if (!got_ready || !no_done)
- consume_shallow_list(args, fd[0]);
+ consume_shallow_list(args, &reader);
while (flushes || multi_ack) {
- int ack = get_ack(fd[0], result_oid);
+ int ack = get_ack(&reader, result_oid);
if (ack) {
print_verbose(args, _("got %s (%d) %s"), "ack",
ack, oid_to_hex(result_oid));
oidset_insert(oids, &refs->old_oid);
}
-static int tip_oids_contain(struct oidset *tip_oids,
- struct ref *unmatched, struct ref *newlist,
- const struct object_id *id)
+static int is_unmatched_ref(const struct ref *ref)
{
- /*
- * Note that this only looks at the ref lists the first time it's
- * called. This works out in filter_refs() because even though it may
- * add to "newlist" between calls, the additions will always be for
- * oids that are already in the set.
- */
- if (!tip_oids->map.map.tablesize) {
- add_refs_to_oidset(tip_oids, unmatched);
- add_refs_to_oidset(tip_oids, newlist);
- }
- return oidset_contains(tip_oids, id);
+ struct object_id oid;
+ const char *p;
+ return ref->match_status == REF_NOT_MATCHED &&
+ !parse_oid_hex(ref->name, &oid, &p) &&
+ *p == '\0' &&
+ oideq(&oid, &ref->old_oid);
}
static void filter_refs(struct fetch_pack_args *args,
struct ref *ref, *next;
struct oidset tip_oids = OIDSET_INIT;
int i;
+ int strict = !(allow_unadvertised_object_request &
+ (ALLOW_TIP_SHA1 | ALLOW_REACHABLE_SHA1));
i = 0;
for (ref = *refs; ref; ref = next) {
}
}
+ if (strict) {
+ for (i = 0; i < nr_sought; i++) {
+ ref = sought[i];
+ if (!is_unmatched_ref(ref))
+ continue;
+
+ add_refs_to_oidset(&tip_oids, unmatched);
+ add_refs_to_oidset(&tip_oids, newlist);
+ break;
+ }
+ }
+
/* Append unmatched requests to the list */
for (i = 0; i < nr_sought; i++) {
- struct object_id oid;
- const char *p;
-
ref = sought[i];
- if (ref->match_status != REF_NOT_MATCHED)
- continue;
- if (parse_oid_hex(ref->name, &oid, &p) ||
- *p != '\0' ||
- !oideq(&oid, &ref->old_oid))
+ if (!is_unmatched_ref(ref))
continue;
- if ((allow_unadvertised_object_request &
- (ALLOW_TIP_SHA1 | ALLOW_REACHABLE_SHA1)) ||
- tip_oids_contain(&tip_oids, unmatched, newlist,
- &ref->old_oid)) {
+ if (!strict || oidset_contains(&tip_oids, &ref->old_oid)) {
ref->match_status = REF_MATCHED;
*newtail = copy_ref(ref);
newtail = &(*newtail)->next;
struct ref *refs;
};
-/*
- * If the number of refs is not larger than the number of loose objects,
- * this function stops inserting.
- */
-static int add_loose_objects_to_set(const struct object_id *oid,
- const char *path,
- void *data)
-{
- struct loose_object_iter *iter = data;
- oidset_insert(iter->loose_object_set, oid);
- if (iter->refs == NULL)
- return 1;
-
- iter->refs = iter->refs->next;
- return 0;
-}
-
/*
* Mark recent commits available locally and reachable from a local ref as
* COMPLETE. If args->no_dependents is false, also mark COMPLETE remote refs as
struct ref *ref;
int old_save_commit_buffer = save_commit_buffer;
timestamp_t cutoff = 0;
- struct oidset loose_oid_set = OIDSET_INIT;
- int use_oidset = 0;
- struct loose_object_iter iter = {&loose_oid_set, *refs};
-
- /* Enumerate all loose objects or know refs are not so many. */
- use_oidset = !for_each_loose_object(add_loose_objects_to_set,
- &iter, 0);
save_commit_buffer = 0;
for (ref = *refs; ref; ref = ref->next) {
struct object *o;
- unsigned int flags = OBJECT_INFO_QUICK;
- if (use_oidset &&
- !oidset_contains(&loose_oid_set, &ref->old_oid)) {
- /*
- * I know this does not exist in the loose form,
- * so check if it exists in a non-loose form.
- */
- flags |= OBJECT_INFO_IGNORE_LOOSE;
- }
-
- if (!has_object_file_with_flags(&ref->old_oid, flags))
+ if (!has_object_file_with_flags(&ref->old_oid,
+ OBJECT_INFO_QUICK))
continue;
o = parse_object(the_repository, &ref->old_oid);
if (!o)
}
}
- oidset_clear(&loose_oid_set);
-
- if (!args->no_dependents) {
- if (!args->deepen) {
- for_each_ref(mark_complete_oid, NULL);
- for_each_cached_alternate(NULL, mark_alternate_complete);
- commit_list_sort_by_date(&complete);
- if (cutoff)
- mark_recent_complete_commits(args, cutoff);
- }
+ if (!args->deepen) {
+ for_each_ref(mark_complete_oid, NULL);
+ for_each_cached_alternate(NULL, mark_alternate_complete);
+ commit_list_sort_by_date(&complete);
+ if (cutoff)
+ mark_recent_complete_commits(args, cutoff);
+ }
- /*
- * Mark all complete remote refs as common refs.
- * Don't mark them common yet; the server has to be told so first.
- */
- for (ref = *refs; ref; ref = ref->next) {
- struct object *o = deref_tag(the_repository,
- lookup_object(the_repository,
- ref->old_oid.hash),
- NULL, 0);
+ /*
+ * Mark all complete remote refs as common refs.
+ * Don't mark them common yet; the server has to be told so first.
+ */
+ for (ref = *refs; ref; ref = ref->next) {
+ struct object *o = deref_tag(the_repository,
+ lookup_object(the_repository,
+ ref->old_oid.hash),
+ NULL, 0);
- if (!o || o->type != OBJ_COMMIT || !(o->flags & COMPLETE))
- continue;
+ if (!o || o->type != OBJ_COMMIT || !(o->flags & COMPLETE))
+ continue;
- negotiator->known_common(negotiator,
- (struct commit *)o);
- }
+ negotiator->known_common(negotiator,
+ (struct commit *)o);
}
save_commit_buffer = old_save_commit_buffer;
if (!server_supports("deepen-relative") && args->deepen_relative)
die(_("Server does not support --deepen"));
- mark_complete_and_common_ref(&negotiator, args, &ref);
- filter_refs(args, &ref, sought, nr_sought);
- if (everything_local(args, &ref)) {
- packet_flush(fd[1]);
- goto all_done;
+ if (!args->no_dependents) {
+ mark_complete_and_common_ref(&negotiator, args, &ref);
+ filter_refs(args, &ref, sought, nr_sought);
+ if (everything_local(args, &ref)) {
+ packet_flush(fd[1]);
+ goto all_done;
+ }
+ } else {
+ filter_refs(args, &ref, sought, nr_sought);
}
if (find_common(&negotiator, args, fd, &oid, ref) < 0)
if (!args->keep_pack)
packet_buf_write(req_buf, "deepen-not %s", s->string);
}
}
+ if (args->deepen_relative)
+ packet_buf_write(req_buf, "deepen-relative\n");
}
-static void add_wants(const struct ref *wants, struct strbuf *req_buf)
+static void add_wants(int no_dependents, const struct ref *wants, struct strbuf *req_buf)
{
int use_ref_in_want = server_supports_feature("fetch", "ref-in-want", 0);
* We use lookup_object here because we are only
* interested in the case we *know* the object is
* reachable and we have already scanned it.
+ *
+ * Do this only if args->no_dependents is false (if it is true,
+ * we cannot trust the object flags).
*/
- if (((o = lookup_object(the_repository, remote->hash)) != NULL) &&
+ if (!no_dependents &&
+ ((o = lookup_object(the_repository, remote->hash)) != NULL) &&
(o->flags & COMPLETE)) {
continue;
}
static int send_fetch_request(struct fetch_negotiator *negotiator, int fd_out,
const struct fetch_pack_args *args,
const struct ref *wants, struct oidset *common,
- int *haves_to_send, int *in_vain)
+ int *haves_to_send, int *in_vain,
+ int sideband_all)
{
int ret = 0;
struct strbuf req_buf = STRBUF_INIT;
packet_buf_write(&req_buf, "include-tag");
if (prefer_ofs_delta)
packet_buf_write(&req_buf, "ofs-delta");
+ if (sideband_all)
+ packet_buf_write(&req_buf, "sideband-all");
/* Add shallow-info and deepen request */
if (server_supports_feature("fetch", "shallow", 0))
/* Add filter */
if (server_supports_feature("fetch", "filter", 0) &&
args->filter_options.choice) {
+ struct strbuf expanded_filter_spec = STRBUF_INIT;
print_verbose(args, _("Server supports filter"));
+ expand_list_objects_filter_spec(&args->filter_options,
+ &expanded_filter_spec);
packet_buf_write(&req_buf, "filter %s",
- args->filter_options.filter_spec);
+ expanded_filter_spec.buf);
+ strbuf_release(&expanded_filter_spec);
} else if (args->filter_options.choice) {
warning("filtering not recognized by server, ignoring");
}
/* add wants */
- add_wants(wants, &req_buf);
+ add_wants(args->no_dependents, wants, &req_buf);
if (args->no_dependents) {
packet_buf_write(&req_buf, "done");
reader->status != PACKET_READ_DELIM)
die(_("error processing acks: %d"), reader->status);
+ /*
+ * If an "acknowledgments" section is sent, a packfile is sent if and
+ * only if "ready" was sent in this section. The other sections
+ * ("shallow-info" and "wanted-refs") are sent only if a packfile is
+ * sent. Therefore, a DELIM is expected if "ready" is sent, and a FLUSH
+ * otherwise.
+ */
+ if (received_ready && reader->status != PACKET_READ_DELIM)
+ die(_("expected packfile to be sent after 'ready'"));
+ if (!received_ready && reader->status != PACKET_READ_FLUSH)
+ die(_("expected no other sections to be sent after no 'ready'"));
+
/* return 0 if no common, 1 if there are common, or 2 if ready */
return received_ready ? 2 : (received_ack ? 1 : 0);
}
static void receive_shallow_info(struct fetch_pack_args *args,
struct packet_reader *reader)
{
+ int line_received = 0;
+
process_section_header(reader, "shallow-info", 0);
while (packet_reader_read(reader) == PACKET_READ_NORMAL) {
const char *arg;
if (get_oid_hex(arg, &oid))
die(_("invalid shallow line: %s"), reader->line);
register_shallow(the_repository, &oid);
+ line_received = 1;
continue;
}
if (skip_prefix(reader->line, "unshallow ", &arg)) {
die(_("error in object: %s"), reader->line);
if (unregister_shallow(&oid))
die(_("no shallow found: %s"), reader->line);
+ line_received = 1;
continue;
}
die(_("expected shallow/unshallow, got %s"), reader->line);
reader->status != PACKET_READ_DELIM)
die(_("error processing shallow info: %d"), reader->status);
- setup_alternate_shallow(&shallow_lock, &alternate_shallow_file, NULL);
- args->deepen = 1;
+ if (line_received) {
+ setup_alternate_shallow(&shallow_lock, &alternate_shallow_file,
+ NULL);
+ args->deepen = 1;
+ } else {
+ alternate_shallow_file = NULL;
+ }
}
static void receive_wanted_refs(struct packet_reader *reader,
struct fetch_negotiator negotiator;
fetch_negotiator_init(&negotiator, negotiation_algorithm);
packet_reader_init(&reader, fd[0], NULL, 0,
- PACKET_READ_CHOMP_NEWLINE);
+ PACKET_READ_CHOMP_NEWLINE |
+ PACKET_READ_DIE_ON_ERR_PACKET);
+ if (git_env_bool("GIT_TEST_SIDEBAND_ALL", 1) &&
+ server_supports_feature("fetch", "sideband-all", 0)) {
+ reader.use_sideband = 1;
+ reader.me = "fetch-pack";
+ }
while (state != FETCH_DONE) {
switch (state) {
args->deepen = 1;
/* Filter 'ref' by 'sought' and those that aren't local */
- mark_complete_and_common_ref(&negotiator, args, &ref);
- filter_refs(args, &ref, sought, nr_sought);
- if (everything_local(args, &ref))
- state = FETCH_DONE;
- else
+ if (!args->no_dependents) {
+ mark_complete_and_common_ref(&negotiator, args, &ref);
+ filter_refs(args, &ref, sought, nr_sought);
+ if (everything_local(args, &ref))
+ state = FETCH_DONE;
+ else
+ state = FETCH_SEND_REQUEST;
+
+ mark_tips(&negotiator, args->negotiation_tips);
+ for_each_cached_alternate(&negotiator,
+ insert_one_alternate_object);
+ } else {
+ filter_refs(args, &ref, sought, nr_sought);
state = FETCH_SEND_REQUEST;
-
- mark_tips(&negotiator, args->negotiation_tips);
- for_each_cached_alternate(&negotiator,
- insert_one_alternate_object);
+ }
break;
case FETCH_SEND_REQUEST:
if (send_fetch_request(&negotiator, fd[1], args, ref,
&common,
- &haves_to_send, &in_vain))
+ &haves_to_send, &in_vain,
+ reader.use_sideband))
state = FETCH_GET_PACK;
else
state = FETCH_PROCESS_ACKS;
rollback_lock_file(&shallow_lock);
} else
commit_lock_file(&shallow_lock);
+ alternate_shallow_file = NULL;
return;
}
&alternate_shallow_file,
&extra);
commit_lock_file(&shallow_lock);
+ alternate_shallow_file = NULL;
}
oid_array_clear(&extra);
return;
commit_lock_file(&shallow_lock);
oid_array_clear(&extra);
oid_array_clear(&ref);
+ alternate_shallow_file = NULL;
return;
}
if (nr_sought)
nr_sought = remove_duplicates_in_refs(sought, nr_sought);
- if (!ref) {
+ if (args->no_dependents && !args->filter_options.choice) {
+ /*
+ * The protocol does not support requesting that only the
+ * wanted objects be sent, so approximate this by setting a
+ * "blob:none" filter if no filter is already set. This works
+ * for all object types: note that wanted blobs will still be
+ * sent because they are directly specified as a "want".
+ *
+ * NEEDSWORK: Add an option in the protocol to request that
+ * only the wanted objects be sent, and implement it.
+ */
+ parse_list_objects_filter(&args->filter_options, "blob:none");
+ }
+
+ if (version != protocol_v2 && !ref) {
packet_flush(fd[1]);
die(_("no matching remote head"));
}