static const char *curl_http_proxy;
static const char *curl_cookie_file;
static int curl_save_cookies;
-static struct credential http_auth = CREDENTIAL_INIT;
+struct credential http_auth = CREDENTIAL_INIT;
static int http_proactive_auth;
static const char *user_agent;
if (slot->results != NULL) {
slot->results->curl_result = slot->curl_result;
slot->results->http_code = slot->http_code;
+#if LIBCURL_VERSION_NUM >= 0x070a08
+ curl_easy_getinfo(slot->curl, CURLINFO_HTTPAUTH_AVAIL,
+ &slot->results->auth_avail);
+#else
+ slot->results->auth_avail = 0;
+#endif
}
/* Run callback if appropriate */
credential_reject(&http_auth);
return HTTP_NOAUTH;
} else {
- credential_fill(&http_auth);
return HTTP_REAUTH;
}
} else {
}
}
+static CURLcode curlinfo_strbuf(CURL *curl, CURLINFO info, struct strbuf *buf)
+{
+ char *ptr;
+ CURLcode ret;
+
+ strbuf_reset(buf);
+ ret = curl_easy_getinfo(curl, info, &ptr);
+ if (!ret && ptr)
+ strbuf_addstr(buf, ptr);
+ return ret;
+}
+
/* http_request() targets */
#define HTTP_REQUEST_STRBUF 0
#define HTTP_REQUEST_FILE 1
-static int http_request(const char *url, struct strbuf *type,
- void *result, int target, int options)
+static int http_request(const char *url,
+ void *result, int target,
+ const struct http_get_options *options)
{
struct active_request_slot *slot;
struct slot_results results;
}
strbuf_addstr(&buf, "Pragma:");
- if (options & HTTP_NO_CACHE)
+ if (options && options->no_cache)
strbuf_addstr(&buf, " no-cache");
- if (options & HTTP_KEEP_ERROR)
+ if (options && options->keep_error)
curl_easy_setopt(slot->curl, CURLOPT_FAILONERROR, 0);
headers = curl_slist_append(headers, buf.buf);
ret = HTTP_START_FAILED;
}
- if (type) {
- char *t;
- strbuf_reset(type);
- curl_easy_getinfo(slot->curl, CURLINFO_CONTENT_TYPE, &t);
- if (t)
- strbuf_addstr(type, t);
- }
+ if (options && options->content_type)
+ curlinfo_strbuf(slot->curl, CURLINFO_CONTENT_TYPE,
+ options->content_type);
+
+ if (options && options->effective_url)
+ curlinfo_strbuf(slot->curl, CURLINFO_EFFECTIVE_URL,
+ options->effective_url);
curl_slist_free_all(headers);
strbuf_release(&buf);
return ret;
}
+/*
+ * Update the "base" url to a more appropriate value, as deduced by
+ * redirects seen when requesting a URL starting with "url".
+ *
+ * The "asked" parameter is a URL that we asked curl to access, and must begin
+ * with "base".
+ *
+ * The "got" parameter is the URL that curl reported to us as where we ended
+ * up.
+ *
+ * Returns 1 if we updated the base url, 0 otherwise.
+ *
+ * Our basic strategy is to compare "base" and "asked" to find the bits
+ * specific to our request. We then strip those bits off of "got" to yield the
+ * new base. So for example, if our base is "http://example.com/foo.git",
+ * and we ask for "http://example.com/foo.git/info/refs", we might end up
+ * with "https://other.example.com/foo.git/info/refs". We would want the
+ * new URL to become "https://other.example.com/foo.git".
+ *
+ * Note that this assumes a sane redirect scheme. It's entirely possible
+ * in the example above to end up at a URL that does not even end in
+ * "info/refs". In such a case we simply punt, as there is not much we can
+ * do (and such a scheme is unlikely to represent a real git repository,
+ * which means we are likely about to abort anyway).
+ */
+static int update_url_from_redirect(struct strbuf *base,
+ const char *asked,
+ const struct strbuf *got)
+{
+ const char *tail;
+ size_t tail_len;
+
+ if (!strcmp(asked, got->buf))
+ return 0;
+
+ if (prefixcmp(asked, base->buf))
+ die("BUG: update_url_from_redirect: %s is not a superset of %s",
+ asked, base->buf);
+
+ tail = asked + base->len;
+ tail_len = strlen(tail);
+
+ if (got->len < tail_len ||
+ strcmp(tail, got->buf + got->len - tail_len))
+ return 0; /* insane redirect scheme */
+
+ strbuf_reset(base);
+ strbuf_add(base, got->buf, got->len - tail_len);
+ return 1;
+}
+
static int http_request_reauth(const char *url,
- struct strbuf *type,
void *result, int target,
- int options)
+ struct http_get_options *options)
{
- int ret = http_request(url, type, result, target, options);
+ int ret = http_request(url, result, target, options);
+
+ if (options && options->effective_url && options->base_url) {
+ if (update_url_from_redirect(options->base_url,
+ url, options->effective_url)) {
+ credential_from_url(&http_auth, options->base_url->buf);
+ url = options->effective_url->buf;
+ }
+ }
+
if (ret != HTTP_REAUTH)
return ret;
* making our next request. We only know how to do this for
* the strbuf case, but that is enough to satisfy current callers.
*/
- if (options & HTTP_KEEP_ERROR) {
+ if (options && options->keep_error) {
switch (target) {
case HTTP_REQUEST_STRBUF:
strbuf_reset(result);
die("BUG: HTTP_KEEP_ERROR is only supported with strbufs");
}
}
- return http_request(url, type, result, target, options);
+
+ credential_fill(&http_auth);
+
+ return http_request(url, result, target, options);
}
int http_get_strbuf(const char *url,
- struct strbuf *type,
- struct strbuf *result, int options)
+ struct strbuf *result,
+ struct http_get_options *options)
{
- return http_request_reauth(url, type, result,
- HTTP_REQUEST_STRBUF, options);
+ return http_request_reauth(url, result, HTTP_REQUEST_STRBUF, options);
}
/*
* If a previous interrupted download is detected (i.e. a previous temporary
* file is still around) the download is resumed.
*/
-static int http_get_file(const char *url, const char *filename, int options)
+static int http_get_file(const char *url, const char *filename,
+ struct http_get_options *options)
{
int ret;
struct strbuf tmpfile = STRBUF_INIT;
strbuf_addf(&tmpfile, "%s.temp", filename);
result = fopen(tmpfile.buf, "a");
- if (! result) {
+ if (!result) {
error("Unable to open local file %s", tmpfile.buf);
ret = HTTP_ERROR;
goto cleanup;
}
- ret = http_request_reauth(url, NULL, result, HTTP_REQUEST_FILE, options);
+ ret = http_request_reauth(url, result, HTTP_REQUEST_FILE, options);
fclose(result);
- if ((ret == HTTP_OK) && move_temp_to_file(tmpfile.buf, filename))
+ if (ret == HTTP_OK && move_temp_to_file(tmpfile.buf, filename))
ret = HTTP_ERROR;
cleanup:
strbuf_release(&tmpfile);
int http_fetch_ref(const char *base, struct ref *ref)
{
+ struct http_get_options options = {0};
char *url;
struct strbuf buffer = STRBUF_INIT;
int ret = -1;
+ options.no_cache = 1;
+
url = quote_ref_url(base, ref->name);
- if (http_get_strbuf(url, NULL, &buffer, HTTP_NO_CACHE) == HTTP_OK) {
+ if (http_get_strbuf(url, &buffer, &options) == HTTP_OK) {
strbuf_rtrim(&buffer);
if (buffer.len == 40)
ret = get_sha1_hex(buffer.buf, ref->old_sha1);
strbuf_addf(&buf, "%s.temp", sha1_pack_index_name(sha1));
tmp = strbuf_detach(&buf, NULL);
- if (http_get_file(url, tmp, 0) != HTTP_OK) {
+ if (http_get_file(url, tmp, NULL) != HTTP_OK) {
error("Unable to get pack index %s", url);
free(tmp);
tmp = NULL;
int http_get_info_packs(const char *base_url, struct packed_git **packs_head)
{
+ struct http_get_options options = {0};
int ret = 0, i = 0;
char *url, *data;
struct strbuf buf = STRBUF_INIT;
strbuf_addstr(&buf, "objects/info/packs");
url = strbuf_detach(&buf, NULL);
- ret = http_get_strbuf(url, NULL, &buf, HTTP_NO_CACHE);
+ options.no_cache = 1;
+ ret = http_get_strbuf(url, &buf, &options);
if (ret != HTTP_OK)
goto cleanup;