memset (&list, 0, sizeof(list));
while ((de = readdir(dir))) {
- if (de->d_name[0] == '.' && (de->d_name[1] == '\0' ||
- (de->d_name[1] == '.' &&
- de->d_name[2] == '\0')))
+ if (is_dot_or_dotdot(de->d_name))
continue;
ALLOC_GROW(list.entries, list.nr + 1, list.alloc);
list.entries[list.nr++] = xstrdup(de->d_name);
if (fd < 0)
continue;
- next = alloc_ref(path->len - name_offset + 1);
+ next = alloc_ref(path->buf + name_offset);
if (read_in_full(fd, buffer, 40) != 40 ||
get_sha1_hex(buffer, next->old_sha1)) {
close(fd);
continue;
}
close(fd);
- strcpy(next->name, path->buf + name_offset);
(*tail)->next = next;
*tail = next;
}
(*list)->next->name)) > 0)
list = &(*list)->next;
if (!(*list)->next || cmp < 0) {
- struct ref *next = alloc_ref(len - 40);
+ struct ref *next = alloc_ref(buffer + 41);
buffer[40] = '\0';
if (get_sha1_hex(buffer, next->old_sha1)) {
warning ("invalid SHA-1: %s", buffer);
free(next);
continue;
}
- strcpy(next->name, buffer + 41);
next->next = (*list)->next;
(*list)->next = next;
list = &(*list)->next;
}
static int fetch_objs_via_rsync(struct transport *transport,
- int nr_objs, struct ref **to_fetch)
+ int nr_objs, const struct ref **to_fetch)
{
struct strbuf buf = STRBUF_INIT;
struct child_process rsync;
#ifndef NO_CURL /* http fetch is the only user */
static int fetch_objs_via_walker(struct transport *transport,
- int nr_objs, struct ref **to_fetch)
+ int nr_objs, const struct ref **to_fetch)
{
char *dest = xstrdup(transport->url);
struct walker *walker = transport->data;
struct ref *ref = NULL;
struct ref *last_ref = NULL;
+ struct walker *walker;
+
if (!transport->data)
transport->data = get_http_walker(transport->url,
transport->remote);
+ walker = transport->data;
+
refs_url = xmalloc(strlen(transport->url) + 11);
sprintf(refs_url, "%s/info/refs", transport->url);
run_active_slot(slot);
if (results.curl_result != CURLE_OK) {
strbuf_release(&buffer);
- if (missing_target(&results)) {
- return NULL;
- } else {
- error("%s", curl_errorstr);
- return NULL;
- }
+ if (missing_target(&results))
+ die("%s not found: did you run git update-server-info on the server?", refs_url);
+ else
+ die("%s download error - %s", refs_url, curl_errorstr);
}
} else {
strbuf_release(&buffer);
- error("Unable to start request");
- return NULL;
+ die("Unable to start HTTP request");
}
data = buffer.buf;
strbuf_release(&buffer);
+ ref = alloc_ref("HEAD");
+ if (!walker->fetch_ref(walker, ref) &&
+ !resolve_remote_symref(ref, refs)) {
+ ref->next = refs;
+ refs = ref;
+ } else {
+ free(ref);
+ }
+
return refs;
}
static int fetch_objs_via_curl(struct transport *transport,
- int nr_objs, struct ref **to_fetch)
+ int nr_objs, const struct ref **to_fetch)
{
if (!transport->data)
transport->data = get_http_walker(transport->url,
die ("Could not read bundle '%s'.", transport->url);
for (i = 0; i < data->header.references.nr; i++) {
struct ref_list_entry *e = data->header.references.list + i;
- struct ref *ref = alloc_ref(strlen(e->name) + 1);
+ struct ref *ref = alloc_ref(e->name);
hashcpy(ref->old_sha1, e->sha1);
- strcpy(ref->name, e->name);
ref->next = result;
result = ref;
}
}
static int fetch_refs_from_bundle(struct transport *transport,
- int nr_heads, struct ref **to_fetch)
+ int nr_heads, const struct ref **to_fetch)
{
struct bundle_transport_data *data = transport->data;
return unbundle(&data->header, data->fd);
struct ref *refs;
connect_setup(transport);
- get_remote_heads(data->fd[0], &refs, 0, NULL, 0);
+ get_remote_heads(data->fd[0], &refs, 0, NULL, 0, NULL);
return refs;
}
static int fetch_refs_via_pack(struct transport *transport,
- int nr_heads, struct ref **to_fetch)
+ int nr_heads, const struct ref **to_fetch)
{
struct git_transport_data *data = transport->data;
char **heads = xmalloc(nr_heads * sizeof(*heads));
args.lock_pack = 1;
args.use_thin_pack = data->thin;
args.include_tag = data->followtags;
- args.verbose = transport->verbose > 0;
+ args.verbose = (transport->verbose > 0);
+ args.quiet = (transport->verbose < 0);
+ args.no_progress = args.quiet || (!transport->progress && !isatty(1));
args.depth = data->depth;
for (i = 0; i < nr_heads; i++)
if (!data->conn) {
connect_setup(transport);
- get_remote_heads(data->fd[0], &refs_tmp, 0, NULL, 0);
+ get_remote_heads(data->fd[0], &refs_tmp, 0, NULL, 0, NULL);
}
refs = fetch_pack(&args, data->fd, data->conn,
{
const char *colon = strchr(url, ':');
const char *slash = strchr(url, '/');
- return !colon || (slash && slash < colon);
+ return !colon || (slash && slash < colon) ||
+ has_dos_drive_prefix(url);
}
static int is_file(const char *url)
return transport->remote_refs;
}
-int transport_fetch_refs(struct transport *transport, struct ref *refs)
+int transport_fetch_refs(struct transport *transport, const struct ref *refs)
{
int rc;
int nr_heads = 0, nr_alloc = 0;
- struct ref **heads = NULL;
- struct ref *rm;
+ const struct ref **heads = NULL;
+ const struct ref *rm;
for (rm = refs; rm; rm = rm->next) {
if (rm->peer_ref &&