#include "commit.h"
#include "walker.h"
#include "http.h"
+#include "list.h"
+#include "transport.h"
struct alt_base {
char *base;
struct alt_base *repo;
enum object_request_state state;
struct http_object_request *req;
- struct object_request *next;
+ struct list_head node;
};
struct alternates_request {
struct alt_base *alt;
};
-static struct object_request *object_queue_head;
+static LIST_HEAD(object_queue_head);
static void fetch_alternates(struct walker *walker, const char *base);
static void release_object_request(struct object_request *obj_req)
{
- struct object_request *entry = object_queue_head;
-
if (obj_req->req !=NULL && obj_req->req->localfile != -1)
error("fd leakage in release: %d", obj_req->req->localfile);
- if (obj_req == object_queue_head) {
- object_queue_head = obj_req->next;
- } else {
- while (entry->next != NULL && entry->next != obj_req)
- entry = entry->next;
- if (entry->next == obj_req)
- entry->next = entry->next->next;
- }
+ list_del(&obj_req->node);
free(obj_req);
}
static int fill_active_slot(struct walker *walker)
{
struct object_request *obj_req;
+ struct list_head *pos, *tmp, *head = &object_queue_head;
- for (obj_req = object_queue_head; obj_req; obj_req = obj_req->next) {
+ list_for_each_safe(pos, tmp, head) {
+ obj_req = list_entry(pos, struct object_request, node);
if (obj_req->state == WAITING) {
if (has_sha1_file(obj_req->sha1))
obj_req->state = COMPLETE;
static void prefetch(struct walker *walker, unsigned char *sha1)
{
struct object_request *newreq;
- struct object_request *tail;
struct walker_data *data = walker->data;
newreq = xmalloc(sizeof(*newreq));
newreq->repo = data->alt;
newreq->state = WAITING;
newreq->req = NULL;
- newreq->next = NULL;
http_is_verbose = walker->get_verbosely;
-
- if (object_queue_head == NULL) {
- object_queue_head = newreq;
- } else {
- tail = object_queue_head;
- while (tail->next != NULL)
- tail = tail->next;
- tail->next = newreq;
- }
+ list_add_tail(&newreq->node, &object_queue_head);
#ifdef USE_CURL_MULTI
fill_active_slots();
#endif
}
+static int is_alternate_allowed(const char *url)
+{
+ const char *protocols[] = {
+ "http", "https", "ftp", "ftps"
+ };
+ int i;
+
+ if (http_follow_config != HTTP_FOLLOW_ALWAYS) {
+ warning("alternate disabled by http.followRedirects: %s", url);
+ return 0;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(protocols); i++) {
+ const char *end;
+ if (skip_prefix(url, protocols[i], &end) &&
+ starts_with(end, "://"))
+ break;
+ }
+
+ if (i >= ARRAY_SIZE(protocols)) {
+ warning("ignoring alternate with unknown protocol: %s", url);
+ return 0;
+ }
+ if (!is_transport_allowed(protocols[i], 0)) {
+ warning("ignoring alternate with restricted protocol: %s", url);
+ return 0;
+ }
+
+ return 1;
+}
+
static void process_alternates_response(void *callback_data)
{
struct alternates_request *alt_req =
okay = 1;
}
}
- /* skip "objects\n" at end */
if (okay) {
struct strbuf target = STRBUF_INIT;
strbuf_add(&target, base, serverlen);
- strbuf_add(&target, data + i, posn - i - 7);
- if (walker->get_verbosely)
- fprintf(stderr, "Also look at %s\n",
+ strbuf_add(&target, data + i, posn - i);
+ if (!strbuf_strip_suffix(&target, "objects")) {
+ warning("ignoring alternate that does"
+ " not end in 'objects': %s",
+ target.buf);
+ strbuf_release(&target);
+ } else if (is_alternate_allowed(target.buf)) {
+ warning("adding alternate object store: %s",
target.buf);
- newalt = xmalloc(sizeof(*newalt));
- newalt->next = NULL;
- newalt->base = strbuf_detach(&target, NULL);
- newalt->got_indices = 0;
- newalt->packs = NULL;
-
- while (tail->next != NULL)
- tail = tail->next;
- tail->next = newalt;
+ newalt = xmalloc(sizeof(*newalt));
+ newalt->next = NULL;
+ newalt->base = strbuf_detach(&target, NULL);
+ newalt->got_indices = 0;
+ newalt->packs = NULL;
+
+ while (tail->next != NULL)
+ tail = tail->next;
+ tail->next = newalt;
+ } else {
+ strbuf_release(&target);
+ }
}
}
i = posn + 1;
release_object_request(obj_req);
}
-static int fetch_object(struct walker *walker, struct alt_base *repo, unsigned char *sha1)
+static int fetch_object(struct walker *walker, unsigned char *sha1)
{
char *hex = sha1_to_hex(sha1);
int ret = 0;
- struct object_request *obj_req = object_queue_head;
+ struct object_request *obj_req = NULL;
struct http_object_request *req;
+ struct list_head *pos, *head = &object_queue_head;
- while (obj_req != NULL && hashcmp(obj_req->sha1, sha1))
- obj_req = obj_req->next;
+ list_for_each(pos, head) {
+ obj_req = list_entry(pos, struct object_request, node);
+ if (!hashcmp(obj_req->sha1, sha1))
+ break;
+ }
if (obj_req == NULL)
return error("Couldn't find request for %s in the queue", hex);
req->localfile = -1;
}
+ /*
+ * we turned off CURLOPT_FAILONERROR to avoid losing a
+ * persistent connection and got CURLE_OK.
+ */
+ if (req->http_code >= 300 && req->curl_result == CURLE_OK &&
+ (starts_with(req->url, "http://") ||
+ starts_with(req->url, "https://"))) {
+ req->curl_result = CURLE_HTTP_RETURNED_ERROR;
+ xsnprintf(req->errorstr, sizeof(req->errorstr),
+ "HTTP request failed");
+ }
+
if (obj_req->state == ABORTED) {
ret = error("Request for %s aborted", hex);
} else if (req->curl_result != CURLE_OK &&
struct walker_data *data = walker->data;
struct alt_base *altbase = data->alt;
- if (!fetch_object(walker, altbase, sha1))
+ if (!fetch_object(walker, sha1))
return 0;
while (altbase) {
if (!http_fetch_pack(walker, altbase, sha1))