size_t nr, alloc;
};
-static void cache_one_alternate(const char *refname,
- const struct object_id *oid,
+static void cache_one_alternate(const struct object_id *oid,
void *vcache)
{
struct alternate_object_cache *cache = vcache;
oidset_insert(oids, &refs->old_oid);
}
-static int tip_oids_contain(struct oidset *tip_oids,
- struct ref *unmatched, struct ref *newlist,
- const struct object_id *id)
+static int is_unmatched_ref(const struct ref *ref)
{
- /*
- * Note that this only looks at the ref lists the first time it's
- * called. This works out in filter_refs() because even though it may
- * add to "newlist" between calls, the additions will always be for
- * oids that are already in the set.
- */
- if (!tip_oids->map.map.tablesize) {
- add_refs_to_oidset(tip_oids, unmatched);
- add_refs_to_oidset(tip_oids, newlist);
- }
- return oidset_contains(tip_oids, id);
+ struct object_id oid;
+ const char *p;
+ return ref->match_status == REF_NOT_MATCHED &&
+ !parse_oid_hex(ref->name, &oid, &p) &&
+ *p == '\0' &&
+ oideq(&oid, &ref->old_oid);
}
static void filter_refs(struct fetch_pack_args *args,
struct ref *ref, *next;
struct oidset tip_oids = OIDSET_INIT;
int i;
+ int strict = !(allow_unadvertised_object_request &
+ (ALLOW_TIP_SHA1 | ALLOW_REACHABLE_SHA1));
i = 0;
for (ref = *refs; ref; ref = next) {
}
}
+ if (strict) {
+ for (i = 0; i < nr_sought; i++) {
+ ref = sought[i];
+ if (!is_unmatched_ref(ref))
+ continue;
+
+ add_refs_to_oidset(&tip_oids, unmatched);
+ add_refs_to_oidset(&tip_oids, newlist);
+ break;
+ }
+ }
+
/* Append unmatched requests to the list */
for (i = 0; i < nr_sought; i++) {
- struct object_id oid;
- const char *p;
-
ref = sought[i];
- if (ref->match_status != REF_NOT_MATCHED)
- continue;
- if (parse_oid_hex(ref->name, &oid, &p) ||
- *p != '\0' ||
- oidcmp(&oid, &ref->old_oid))
+ if (!is_unmatched_ref(ref))
continue;
- if ((allow_unadvertised_object_request &
- (ALLOW_TIP_SHA1 | ALLOW_REACHABLE_SHA1)) ||
- tip_oids_contain(&tip_oids, unmatched, newlist,
- &ref->old_oid)) {
+ if (!strict || oidset_contains(&tip_oids, &ref->old_oid)) {
ref->match_status = REF_MATCHED;
*newtail = copy_ref(ref);
newtail = &(*newtail)->next;
struct ref *refs;
};
-/*
- * If the number of refs is not larger than the number of loose objects,
- * this function stops inserting.
- */
-static int add_loose_objects_to_set(const struct object_id *oid,
- const char *path,
- void *data)
-{
- struct loose_object_iter *iter = data;
- oidset_insert(iter->loose_object_set, oid);
- if (iter->refs == NULL)
- return 1;
-
- iter->refs = iter->refs->next;
- return 0;
-}
-
/*
* Mark recent commits available locally and reachable from a local ref as
* COMPLETE. If args->no_dependents is false, also mark COMPLETE remote refs as
struct ref *ref;
int old_save_commit_buffer = save_commit_buffer;
timestamp_t cutoff = 0;
- struct oidset loose_oid_set = OIDSET_INIT;
- int use_oidset = 0;
- struct loose_object_iter iter = {&loose_oid_set, *refs};
-
- /* Enumerate all loose objects or know refs are not so many. */
- use_oidset = !for_each_loose_object(add_loose_objects_to_set,
- &iter, 0);
save_commit_buffer = 0;
for (ref = *refs; ref; ref = ref->next) {
struct object *o;
- unsigned int flags = OBJECT_INFO_QUICK;
-
- if (use_oidset &&
- !oidset_contains(&loose_oid_set, &ref->old_oid)) {
- /*
- * I know this does not exist in the loose form,
- * so check if it exists in a non-loose form.
- */
- flags |= OBJECT_INFO_IGNORE_LOOSE;
- }
- if (!has_object_file_with_flags(&ref->old_oid, flags))
+ if (!has_object_file_with_flags(&ref->old_oid,
+ OBJECT_INFO_QUICK))
continue;
o = parse_object(the_repository, &ref->old_oid);
if (!o)
}
}
- oidset_clear(&loose_oid_set);
-
if (!args->deepen) {
for_each_ref(mark_complete_oid, NULL);
for_each_cached_alternate(NULL, mark_alternate_complete);
packet_buf_write(req_buf, "deepen-not %s", s->string);
}
}
+ if (args->deepen_relative)
+ packet_buf_write(req_buf, "deepen-relative\n");
}
static void add_wants(int no_dependents, const struct ref *wants, struct strbuf *req_buf)
reader->status != PACKET_READ_DELIM)
die(_("error processing acks: %d"), reader->status);
+ /*
+ * If an "acknowledgments" section is sent, a packfile is sent if and
+ * only if "ready" was sent in this section. The other sections
+ * ("shallow-info" and "wanted-refs") are sent only if a packfile is
+ * sent. Therefore, a DELIM is expected if "ready" is sent, and a FLUSH
+ * otherwise.
+ */
+ if (received_ready && reader->status != PACKET_READ_DELIM)
+ die(_("expected packfile to be sent after 'ready'"));
+ if (!received_ready && reader->status != PACKET_READ_FLUSH)
+ die(_("expected no other sections to be sent after no 'ready'"));
+
/* return 0 if no common, 1 if there are common, or 2 if ready */
return received_ready ? 2 : (received_ack ? 1 : 0);
}
static void receive_shallow_info(struct fetch_pack_args *args,
struct packet_reader *reader)
{
+ int line_received = 0;
+
process_section_header(reader, "shallow-info", 0);
while (packet_reader_read(reader) == PACKET_READ_NORMAL) {
const char *arg;
if (get_oid_hex(arg, &oid))
die(_("invalid shallow line: %s"), reader->line);
register_shallow(the_repository, &oid);
+ line_received = 1;
continue;
}
if (skip_prefix(reader->line, "unshallow ", &arg)) {
die(_("error in object: %s"), reader->line);
if (unregister_shallow(&oid))
die(_("no shallow found: %s"), reader->line);
+ line_received = 1;
continue;
}
die(_("expected shallow/unshallow, got %s"), reader->line);
reader->status != PACKET_READ_DELIM)
die(_("error processing shallow info: %d"), reader->status);
- setup_alternate_shallow(&shallow_lock, &alternate_shallow_file, NULL);
- args->deepen = 1;
+ if (line_received) {
+ setup_alternate_shallow(&shallow_lock, &alternate_shallow_file,
+ NULL);
+ args->deepen = 1;
+ }
}
static void receive_wanted_refs(struct packet_reader *reader,
parse_list_objects_filter(&args->filter_options, "blob:none");
}
- if (!ref) {
+ if (version != protocol_v2 && !ref) {
packet_flush(fd[1]);
die(_("no matching remote head"));
}