Conversion from unsigned char[20] to struct object_id continues.
* bc/hash-transition-16: (35 commits)
gitweb: make hash size independent
Git.pm: make hash size independent
read-cache: read data in a hash-independent way
dir: make untracked cache extension hash size independent
builtin/difftool: use parse_oid_hex
refspec: make hash size independent
archive: convert struct archiver_args to object_id
builtin/get-tar-commit-id: make hash size independent
get-tar-commit-id: parse comment record
hash: add a function to lookup hash algorithm by length
remote-curl: make hash size independent
http: replace sha1_to_hex
http: compute hash of downloaded objects using the_hash_algo
http: replace hard-coded constant with the_hash_algo
http-walker: replace sha1_to_hex
http-push: remove remaining uses of sha1_to_hex
http-backend: allow 64-character hex names
http-push: convert to use the_hash_algo
builtin/pull: make hash-size independent
builtin/am: make hash size independent
...
while (!strbuf_getline_lf(&sb, fp)) {
struct object_id from_obj, to_obj;
+ const char *p;
- if (sb.len != GIT_SHA1_HEXSZ * 2 + 1) {
+ if (sb.len != the_hash_algo->hexsz * 2 + 1) {
ret = error(invalid_line, sb.buf);
goto finish;
}
- if (get_oid_hex(sb.buf, &from_obj)) {
+ if (parse_oid_hex(sb.buf, &from_obj, &p)) {
ret = error(invalid_line, sb.buf);
goto finish;
}
- if (sb.buf[GIT_SHA1_HEXSZ] != ' ') {
+ if (*p != ' ') {
ret = error(invalid_line, sb.buf);
goto finish;
}
- if (get_oid_hex(sb.buf + GIT_SHA1_HEXSZ + 1, &to_obj)) {
+ if (get_oid_hex(p + 1, &to_obj)) {
ret = error(invalid_line, sb.buf);
goto finish;
}
* review them with extra care to spot mismerges.
*/
struct rev_info rev_info;
- const char *diff_filter_str = "--diff-filter=AM";
repo_init_revisions(the_repository, &rev_info, NULL);
rev_info.diffopt.output_format = DIFF_FORMAT_NAME_STATUS;
- diff_opt_parse(&rev_info.diffopt, &diff_filter_str, 1, rev_info.prefix);
+ rev_info.diffopt.filter |= diff_filter_bit('A');
+ rev_info.diffopt.filter |= diff_filter_bit('M');
add_pending_oid(&rev_info, "HEAD", &our_tree, 0);
diff_setup_done(&rev_info.diffopt);
run_diff_index(&rev_info, 1);
*mode2 = (int)strtol(p + 1, &p, 8);
if (*p != ' ')
return error("expected ' ', got '%c'", *p);
- if (get_oid_hex(++p, oid1))
- return error("expected object ID, got '%s'", p + 1);
- p += GIT_SHA1_HEXSZ;
+ if (parse_oid_hex(++p, oid1, (const char **)&p))
+ return error("expected object ID, got '%s'", p);
if (*p != ' ')
return error("expected ' ', got '%c'", *p);
- if (get_oid_hex(++p, oid2))
- return error("expected object ID, got '%s'", p + 1);
- p += GIT_SHA1_HEXSZ;
+ if (parse_oid_hex(++p, oid2, (const char **)&p))
+ return error("expected object ID, got '%s'", p);
if (*p != ' ')
return error("expected ' ', got '%c'", *p);
*status = *++p;
int cmd_difftool(int argc, const char **argv, const char *prefix)
{
int use_gui_tool = 0, dir_diff = 0, prompt = -1, symlinks = 0,
- tool_help = 0;
+ tool_help = 0, no_index = 0;
static char *difftool_cmd = NULL, *extcmd = NULL;
struct option builtin_difftool_options[] = {
OPT_BOOL('g', "gui", &use_gui_tool,
"tool returns a non - zero exit code")),
OPT_STRING('x', "extcmd", &extcmd, N_("command"),
N_("specify a custom command for viewing diffs")),
+ OPT_ARGUMENT("no-index", &no_index, N_("passed to `diff`")),
OPT_END()
};
if (tool_help)
return print_tool_help();
- /* NEEDSWORK: once we no longer spawn anything, remove this */
- setenv(GIT_DIR_ENVIRONMENT, absolute_path(get_git_dir()), 1);
- setenv(GIT_WORK_TREE_ENVIRONMENT, absolute_path(get_git_work_tree()), 1);
+ if (!no_index && !startup_info->have_repository)
+ die(_("difftool requires worktree or --no-index"));
+
+ if (!no_index){
+ setup_work_tree();
+ setenv(GIT_DIR_ENVIRONMENT, absolute_path(get_git_dir()), 1);
+ setenv(GIT_WORK_TREE_ENVIRONMENT, absolute_path(get_git_work_tree()), 1);
+ }
if (use_gui_tool && diff_gui_tool && *diff_gui_tool)
setenv("GIT_DIFF_TOOL", diff_gui_tool, 1);
}
int report_path_error(const char *ps_matched,
- const struct pathspec *pathspec,
- const char *prefix)
+ const struct pathspec *pathspec)
{
/*
* Make sure all pathspec matched; otherwise it is an error.
struct stat_data info_exclude_stat;
struct stat_data excludes_file_stat;
uint32_t dir_flags;
- unsigned char info_exclude_sha1[20];
- unsigned char excludes_file_sha1[20];
- char exclude_per_dir[FLEX_ARRAY];
};
#define ouc_offset(x) offsetof(struct ondisk_untracked_cache, x)
- #define ouc_size(len) (ouc_offset(exclude_per_dir) + len + 1)
struct write_data {
int index; /* number of written untracked_cache_dir */
struct write_data wd;
unsigned char varbuf[16];
int varint_len;
- size_t len = strlen(untracked->exclude_per_dir);
+ const unsigned hashsz = the_hash_algo->rawsz;
- FLEX_ALLOC_MEM(ouc, exclude_per_dir, untracked->exclude_per_dir, len);
+ ouc = xcalloc(1, sizeof(*ouc));
stat_data_to_disk(&ouc->info_exclude_stat, &untracked->ss_info_exclude.stat);
stat_data_to_disk(&ouc->excludes_file_stat, &untracked->ss_excludes_file.stat);
- hashcpy(ouc->info_exclude_sha1, untracked->ss_info_exclude.oid.hash);
- hashcpy(ouc->excludes_file_sha1, untracked->ss_excludes_file.oid.hash);
ouc->dir_flags = htonl(untracked->dir_flags);
varint_len = encode_varint(untracked->ident.len, varbuf);
strbuf_add(out, varbuf, varint_len);
strbuf_addbuf(out, &untracked->ident);
- strbuf_add(out, ouc, ouc_size(len));
+ strbuf_add(out, ouc, sizeof(*ouc));
+ strbuf_add(out, untracked->ss_info_exclude.oid.hash, hashsz);
+ strbuf_add(out, untracked->ss_excludes_file.oid.hash, hashsz);
+ strbuf_add(out, untracked->exclude_per_dir, strlen(untracked->exclude_per_dir) + 1);
FREE_AND_NULL(ouc);
if (!untracked->root) {
int ident_len;
ssize_t len;
const char *exclude_per_dir;
+ const unsigned hashsz = the_hash_algo->rawsz;
+ const unsigned offset = sizeof(struct ondisk_untracked_cache);
+ const unsigned exclude_per_dir_offset = offset + 2 * hashsz;
if (sz <= 1 || end[-1] != '\0')
return NULL;
ident = (const char *)next;
next += ident_len;
- if (next + ouc_size(0) > end)
+ if (next + exclude_per_dir_offset + 1 > end)
return NULL;
uc = xcalloc(1, sizeof(*uc));
strbuf_add(&uc->ident, ident, ident_len);
load_oid_stat(&uc->ss_info_exclude,
next + ouc_offset(info_exclude_stat),
- next + ouc_offset(info_exclude_sha1));
+ next + offset);
load_oid_stat(&uc->ss_excludes_file,
next + ouc_offset(excludes_file_stat),
- next + ouc_offset(excludes_file_sha1));
+ next + offset + hashsz);
uc->dir_flags = get_be32(next + ouc_offset(dir_flags));
- exclude_per_dir = (const char *)next + ouc_offset(exclude_per_dir);
+ exclude_per_dir = (const char *)next + exclude_per_dir_offset;
uc->exclude_per_dir = xstrdup(exclude_per_dir);
/* NUL after exclude_per_dir is covered by sizeof(*ouc) */
- next += ouc_size(strlen(exclude_per_dir));
+ next += exclude_per_dir_offset + strlen(exclude_per_dir) + 1;
if (next >= end)
goto done2;
*/
#define NO_DELTA S_ISUID
+ /*
+ * The amount of additional space required in order to write an object into the
+ * current pack. This is the hash lengths at the end of the pack, plus the
+ * length of one object ID.
+ */
+ #define PACK_SIZE_THRESHOLD (the_hash_algo->rawsz * 3)
+
struct object_entry {
struct pack_idx_entry idx;
struct object_entry *next;
if (c != last)
die("internal consistency error creating the index");
- tmpfile = write_idx_file(NULL, idx, object_count, &pack_idx_opts, pack_data->sha1);
+ tmpfile = write_idx_file(NULL, idx, object_count, &pack_idx_opts,
+ pack_data->hash);
free(idx);
return tmpfile;
}
struct strbuf name = STRBUF_INIT;
int keep_fd;
- odb_pack_name(&name, pack_data->sha1, "keep");
+ odb_pack_name(&name, pack_data->hash, "keep");
keep_fd = odb_pack_keep(name.buf);
if (keep_fd < 0)
die_errno("cannot create keep file");
if (close(keep_fd))
die_errno("failed to write keep file");
- odb_pack_name(&name, pack_data->sha1, "pack");
+ odb_pack_name(&name, pack_data->hash, "pack");
if (finalize_object_file(pack_data->pack_name, name.buf))
die("cannot store pack file");
- odb_pack_name(&name, pack_data->sha1, "idx");
+ odb_pack_name(&name, pack_data->hash, "idx");
if (finalize_object_file(curr_index_name, name.buf))
die("cannot store index file");
free((void *)curr_index_name);
for (k = 0; k < pack_id; k++) {
struct packed_git *p = all_packs[k];
- odb_pack_name(&name, p->sha1, "keep");
+ odb_pack_name(&name, p->hash, "keep");
unlink_or_warn(name.buf);
}
strbuf_release(&name);
close_pack_windows(pack_data);
finalize_hashfile(pack_file, cur_pack_oid.hash, 0);
- fixup_pack_header_footer(pack_data->pack_fd, pack_data->sha1,
- pack_data->pack_name, object_count,
- cur_pack_oid.hash, pack_size);
+ fixup_pack_header_footer(pack_data->pack_fd, pack_data->hash,
+ pack_data->pack_name, object_count,
+ cur_pack_oid.hash, pack_size);
if (object_count <= unpack_limit) {
if (!loosen_small_pack(pack_data)) {
git_deflate_end(&s);
/* Determine if we should auto-checkpoint. */
- if ((max_packsize && (pack_size + 60 + s.total_out) > max_packsize)
- || (pack_size + 60 + s.total_out) < pack_size) {
+ if ((max_packsize
+ && (pack_size + PACK_SIZE_THRESHOLD + s.total_out) > max_packsize)
+ || (pack_size + PACK_SIZE_THRESHOLD + s.total_out) < pack_size) {
/* This new object needs to *not* have the current pack_id. */
e->pack_id = pack_id + 1;
int status = Z_OK;
/* Determine if we should auto-checkpoint. */
- if ((max_packsize && (pack_size + 60 + len) > max_packsize)
- || (pack_size + 60 + len) < pack_size)
+ if ((max_packsize
+ && (pack_size + PACK_SIZE_THRESHOLD + len) > max_packsize)
+ || (pack_size + PACK_SIZE_THRESHOLD + len) < pack_size)
cycle_packfile();
hashfile_checkpoint(pack_file, &checkpoint);
c += e->name->str_len + 1;
hashcpy(e->versions[0].oid.hash, (unsigned char *)c);
hashcpy(e->versions[1].oid.hash, (unsigned char *)c);
- c += GIT_SHA1_RAWSZ;
+ c += the_hash_algo->rawsz;
}
free(buf);
}
strbuf_addf(b, "%o %s%c",
(unsigned int)(e->versions[v].mode & ~NO_DELTA),
e->name->str_dat, '\0');
- strbuf_add(b, e->versions[v].oid.hash, GIT_SHA1_RAWSZ);
+ strbuf_add(b, e->versions[v].oid.hash, the_hash_algo->rawsz);
}
}
}
for (;;) {
- const char *p;
-
if (unread_command_buf) {
unread_command_buf = 0;
} else {
rc->prev->next = rc;
cmd_tail = rc;
}
- if (skip_prefix(command_buf.buf, "get-mark ", &p)) {
- parse_get_mark(p);
- continue;
- }
- if (skip_prefix(command_buf.buf, "cat-blob ", &p)) {
- parse_cat_blob(p);
- continue;
- }
if (command_buf.buf[0] == '#')
continue;
return 0;
unsigned int i, tmp_hex_oid_len, tmp_fullpath_len;
uintmax_t num_notes = 0;
struct object_id oid;
- char realpath[60];
+ /* hex oid + '/' between each pair of hex digits + NUL */
+ char realpath[GIT_MAX_HEXSZ + ((GIT_MAX_HEXSZ / 2) - 1) + 1];
+ const unsigned hexsz = the_hash_algo->hexsz;
if (!root->tree)
load_tree(root);
* of 2 chars.
*/
if (!e->versions[1].mode ||
- tmp_hex_oid_len > GIT_SHA1_HEXSZ ||
+ tmp_hex_oid_len > hexsz ||
e->name->str_len % 2)
continue;
tmp_fullpath_len += e->name->str_len;
fullpath[tmp_fullpath_len] = '\0';
- if (tmp_hex_oid_len == GIT_SHA1_HEXSZ && !get_oid_hex(hex_oid, &oid)) {
+ if (tmp_hex_oid_len == hexsz && !get_oid_hex(hex_oid, &oid)) {
/* This is a note entry */
if (fanout == 0xff) {
/* Counting mode, no rename */
strbuf_addstr(&uq, p);
p = uq.buf;
}
- read_next_command();
- parse_and_store_blob(&last_blob, &oid, 0);
+ while (read_next_command() != EOF) {
+ const char *v;
+ if (skip_prefix(command_buf.buf, "cat-blob ", &v))
+ parse_cat_blob(v);
+ else {
+ parse_and_store_blob(&last_blob, &oid, 0);
+ break;
+ }
+ }
} else {
enum object_type expected = S_ISDIR(mode) ?
OBJ_TREE: OBJ_BLOB;
struct object_entry *oe;
struct branch *s;
struct object_id oid, commit_oid;
- char path[60];
+ char path[GIT_MAX_RAWSZ * 3];
uint16_t inline_data = 0;
unsigned char new_fanout;
char *buf = read_object_with_reference(&commit_oid,
commit_type, &size,
&commit_oid);
- if (!buf || size < 46)
+ if (!buf || size < the_hash_algo->hexsz + 6)
die("Not a valid commit: %s", p);
free(buf);
} else
static void parse_from_commit(struct branch *b, char *buf, unsigned long size)
{
- if (!buf || size < GIT_SHA1_HEXSZ + 6)
+ if (!buf || size < the_hash_algo->hexsz + 6)
die("Not a valid commit: %s", oid_to_hex(&b->oid));
if (memcmp("tree ", buf, 5)
|| get_oid_hex(buf + 5, &b->branch_tree.versions[1].oid))
char *buf = read_object_with_reference(&n->oid,
commit_type,
&size, &n->oid);
- if (!buf || size < 46)
+ if (!buf || size < the_hash_algo->hexsz + 6)
die("Not a valid commit: %s", from);
free(buf);
} else
file_change_deleteall(b);
else if (skip_prefix(command_buf.buf, "ls ", &v))
parse_ls(v, b);
+ else if (skip_prefix(command_buf.buf, "cat-blob ", &v))
+ parse_cat_blob(v);
else {
unread_command_buf = 1;
break;
die("Unknown mark: %s", command_buf.buf);
xsnprintf(output, sizeof(output), "%s\n", oid_to_hex(&oe->idx.oid));
- cat_blob_write(output, GIT_SHA1_HEXSZ + 1);
+ cat_blob_write(output, the_hash_algo->hexsz + 1);
}
static void parse_cat_blob(const char *p)
{
unsigned long size;
char *buf = NULL;
+ const unsigned hexsz = the_hash_algo->hexsz;
+
if (!oe) {
enum object_type type = oid_object_info(the_repository, oid,
NULL);
/* Peel one layer. */
switch (oe->type) {
case OBJ_TAG:
- if (size < GIT_SHA1_HEXSZ + strlen("object ") ||
+ if (size < hexsz + strlen("object ") ||
get_oid_hex(buf + strlen("object "), oid))
die("Invalid SHA1 in tag: %s", command_buf.buf);
break;
case OBJ_COMMIT:
- if (size < GIT_SHA1_HEXSZ + strlen("tree ") ||
+ if (size < hexsz + strlen("tree ") ||
get_oid_hex(buf + strlen("tree "), oid))
die("Invalid SHA1 in commit: %s", command_buf.buf);
}
return e;
}
- static void print_ls(int mode, const unsigned char *sha1, const char *path)
+ static void print_ls(int mode, const unsigned char *hash, const char *path)
{
static struct strbuf line = STRBUF_INIT;
/* mode SP type SP object_name TAB path LF */
strbuf_reset(&line);
strbuf_addf(&line, "%06o %s %s\t",
- mode & ~NO_DELTA, type, sha1_to_hex(sha1));
+ mode & ~NO_DELTA, type, hash_to_hex(hash));
quote_c_style(path, &line, NULL, 0);
strbuf_addch(&line, '\n');
}
const char *v;
if (!strcmp("blob", command_buf.buf))
parse_new_blob();
- else if (skip_prefix(command_buf.buf, "ls ", &v))
- parse_ls(v, NULL);
else if (skip_prefix(command_buf.buf, "commit ", &v))
parse_new_commit(v);
else if (skip_prefix(command_buf.buf, "tag ", &v))
parse_new_tag(v);
else if (skip_prefix(command_buf.buf, "reset ", &v))
parse_reset_branch(v);
+ else if (skip_prefix(command_buf.buf, "ls ", &v))
+ parse_ls(v, NULL);
+ else if (skip_prefix(command_buf.buf, "cat-blob ", &v))
+ parse_cat_blob(v);
+ else if (skip_prefix(command_buf.buf, "get-mark ", &v))
+ parse_get_mark(v);
else if (!strcmp("checkpoint", command_buf.buf))
parse_checkpoint();
else if (!strcmp("done", command_buf.buf))
process_http_object_request(obj_req->req);
obj_req->state = COMPLETE;
+ normalize_curl_result(&obj_req->req->curl_result,
+ obj_req->req->http_code,
+ obj_req->req->errorstr,
+ sizeof(obj_req->req->errorstr));
+
/* Use alternates if necessary */
if (missing_target(obj_req->req)) {
fetch_alternates(walker, alt->base);
char *data;
int i = 0;
+ normalize_curl_result(&slot->curl_result, slot->http_code,
+ curl_errorstr, sizeof(curl_errorstr));
+
if (alt_req->http_specific) {
if (slot->curl_result != CURLE_OK ||
!alt_req->buffer->len) {
if (walker->get_verbosely) {
fprintf(stderr, "Getting pack %s\n",
- sha1_to_hex(target->sha1));
+ hash_to_hex(target->hash));
fprintf(stderr, " which contains %s\n",
- sha1_to_hex(sha1));
+ hash_to_hex(sha1));
}
preq = new_http_pack_request(target, repo->base);
release_object_request(obj_req);
}
- static int fetch_object(struct walker *walker, unsigned char *sha1)
+ static int fetch_object(struct walker *walker, unsigned char *hash)
{
- char *hex = sha1_to_hex(sha1);
+ char *hex = hash_to_hex(hash);
int ret = 0;
struct object_request *obj_req = NULL;
struct http_object_request *req;
list_for_each(pos, head) {
obj_req = list_entry(pos, struct object_request, node);
- if (hasheq(obj_req->oid.hash, sha1))
+ if (hasheq(obj_req->oid.hash, hash))
break;
}
if (obj_req == NULL)
req->localfile = -1;
}
- /*
- * we turned off CURLOPT_FAILONERROR to avoid losing a
- * persistent connection and got CURLE_OK.
- */
- if (req->http_code >= 300 && req->curl_result == CURLE_OK &&
- (starts_with(req->url, "http://") ||
- starts_with(req->url, "https://"))) {
- req->curl_result = CURLE_HTTP_RETURNED_ERROR;
- xsnprintf(req->errorstr, sizeof(req->errorstr),
- "HTTP request failed");
- }
+ normalize_curl_result(&req->curl_result, req->http_code,
+ req->errorstr, sizeof(req->errorstr));
if (obj_req->state == ABORTED) {
ret = error("Request for %s aborted", hex);
return ret;
}
- static int fetch(struct walker *walker, unsigned char *sha1)
+ static int fetch(struct walker *walker, unsigned char *hash)
{
struct walker_data *data = walker->data;
struct alt_base *altbase = data->alt;
- if (!fetch_object(walker, sha1))
+ if (!fetch_object(walker, hash))
return 0;
while (altbase) {
- if (!http_fetch_pack(walker, altbase, sha1))
+ if (!http_fetch_pack(walker, altbase, hash))
return 0;
fetch_alternates(walker, data->alt->base);
altbase = altbase->next;
}
- return error("Unable to find %s under %s", sha1_to_hex(sha1),
+ return error("Unable to find %s under %s", hash_to_hex(hash),
data->alt->base);
}
return strbuf_detach(&buf, NULL);
}
-static int handle_curl_result(struct slot_results *results)
+void normalize_curl_result(CURLcode *result, long http_code,
+ char *errorstr, size_t errorlen)
{
/*
* If we see a failing http code with CURLE_OK, we have turned off
* Likewise, if we see a redirect (30x code), that means we turned off
* redirect-following, and we should treat the result as an error.
*/
- if (results->curl_result == CURLE_OK &&
- results->http_code >= 300) {
- results->curl_result = CURLE_HTTP_RETURNED_ERROR;
+ if (*result == CURLE_OK && http_code >= 300) {
+ *result = CURLE_HTTP_RETURNED_ERROR;
/*
* Normally curl will already have put the "reason phrase"
* from the server into curl_errorstr; unfortunately without
* FAILONERROR it is lost, so we can give only the numeric
* status code.
*/
- xsnprintf(curl_errorstr, sizeof(curl_errorstr),
+ xsnprintf(errorstr, errorlen,
"The requested URL returned error: %ld",
- results->http_code);
+ http_code);
}
+}
+
+static int handle_curl_result(struct slot_results *results)
+{
+ normalize_curl_result(&results->curl_result, results->http_code,
+ curl_errorstr, sizeof(curl_errorstr));
if (results->curl_result == CURLE_OK) {
credential_approve(&http_auth);
url = quote_ref_url(base, ref->name);
if (http_get_strbuf(url, &buffer, &options) == HTTP_OK) {
strbuf_rtrim(&buffer);
- if (buffer.len == 40)
+ if (buffer.len == the_hash_algo->hexsz)
ret = get_oid_hex(buffer.buf, &ref->old_oid);
else if (starts_with(buffer.buf, "ref: ")) {
ref->symref = xstrdup(buffer.buf + 5);
}
/* Helpers for fetching packs */
- static char *fetch_pack_index(unsigned char *sha1, const char *base_url)
+ static char *fetch_pack_index(unsigned char *hash, const char *base_url)
{
char *url, *tmp;
struct strbuf buf = STRBUF_INIT;
if (http_is_verbose)
- fprintf(stderr, "Getting index for pack %s\n", sha1_to_hex(sha1));
+ fprintf(stderr, "Getting index for pack %s\n", hash_to_hex(hash));
end_url_with_slash(&buf, base_url);
- strbuf_addf(&buf, "objects/pack/pack-%s.idx", sha1_to_hex(sha1));
+ strbuf_addf(&buf, "objects/pack/pack-%s.idx", hash_to_hex(hash));
url = strbuf_detach(&buf, NULL);
- strbuf_addf(&buf, "%s.temp", sha1_pack_index_name(sha1));
+ strbuf_addf(&buf, "%s.temp", sha1_pack_index_name(hash));
tmp = strbuf_detach(&buf, NULL);
if (http_get_file(url, tmp, NULL) != HTTP_OK) {
int http_get_info_packs(const char *base_url, struct packed_git **packs_head)
{
struct http_get_options options = {0};
- int ret = 0, i = 0;
- char *url, *data;
+ int ret = 0;
+ char *url;
+ const char *data;
struct strbuf buf = STRBUF_INIT;
- unsigned char hash[GIT_MAX_RAWSZ];
- const unsigned hexsz = the_hash_algo->hexsz;
+ struct object_id oid;
end_url_with_slash(&buf, base_url);
strbuf_addstr(&buf, "objects/info/packs");
goto cleanup;
data = buf.buf;
- while (i < buf.len) {
- switch (data[i]) {
- case 'P':
- i++;
- if (i + hexsz + 12 <= buf.len &&
- starts_with(data + i, " pack-") &&
- starts_with(data + i + hexsz + 6, ".pack\n")) {
- get_sha1_hex(data + i + 6, hash);
- fetch_and_setup_pack_index(packs_head, hash,
- base_url);
- i += hexsz + 11;
- break;
- }
- default:
- while (i < buf.len && data[i] != '\n')
- i++;
+ while (*data) {
+ if (skip_prefix(data, "P pack-", &data) &&
+ !parse_oid_hex(data, &oid, &data) &&
+ skip_prefix(data, ".pack", &data) &&
+ (*data == '\n' || *data == '\0')) {
+ fetch_and_setup_pack_index(packs_head, oid.hash, base_url);
+ } else {
+ data = strchrnul(data, '\n');
}
- i++;
+ if (*data)
+ data++; /* skip past newline */
}
cleanup:
return -1;
}
- unlink(sha1_pack_index_name(p->sha1));
+ unlink(sha1_pack_index_name(p->hash));
- if (finalize_object_file(preq->tmpfile.buf, sha1_pack_name(p->sha1))
- || finalize_object_file(tmp_idx, sha1_pack_index_name(p->sha1))) {
+ if (finalize_object_file(preq->tmpfile.buf, sha1_pack_name(p->hash))
+ || finalize_object_file(tmp_idx, sha1_pack_index_name(p->hash))) {
free(tmp_idx);
return -1;
}
end_url_with_slash(&buf, base_url);
strbuf_addf(&buf, "objects/pack/pack-%s.pack",
- sha1_to_hex(target->sha1));
+ hash_to_hex(target->hash));
preq->url = strbuf_detach(&buf, NULL);
- strbuf_addf(&preq->tmpfile, "%s.temp", sha1_pack_name(target->sha1));
+ strbuf_addf(&preq->tmpfile, "%s.temp", sha1_pack_name(target->hash));
preq->packfile = fopen(preq->tmpfile.buf, "a");
if (!preq->packfile) {
error("Unable to open local file %s for pack",
if (http_is_verbose)
fprintf(stderr,
"Resuming fetch of pack %s at byte %"PRIuMAX"\n",
- sha1_to_hex(target->sha1), (uintmax_t)prev_posn);
+ hash_to_hex(target->hash),
+ (uintmax_t)prev_posn);
http_opt_request_remainder(preq->slot->curl, prev_posn);
}
freq->stream.next_out = expn;
freq->stream.avail_out = sizeof(expn);
freq->zret = git_inflate(&freq->stream, Z_SYNC_FLUSH);
- git_SHA1_Update(&freq->c, expn,
- sizeof(expn) - freq->stream.avail_out);
+ the_hash_algo->update_fn(&freq->c, expn,
+ sizeof(expn) - freq->stream.avail_out);
} while (freq->stream.avail_in && freq->zret == Z_OK);
return size;
}
git_inflate_init(&freq->stream);
- git_SHA1_Init(&freq->c);
+ the_hash_algo->init_fn(&freq->c);
freq->url = get_remote_object_url(base_url, hex, 0);
if (prev_read == -1) {
memset(&freq->stream, 0, sizeof(freq->stream));
git_inflate_init(&freq->stream);
- git_SHA1_Init(&freq->c);
+ the_hash_algo->init_fn(&freq->c);
if (prev_posn>0) {
prev_posn = 0;
lseek(freq->localfile, 0, SEEK_SET);
}
git_inflate_end(&freq->stream);
- git_SHA1_Final(freq->real_oid.hash, &freq->c);
+ the_hash_algo->final_fn(freq->real_oid.hash, &freq->c);
if (freq->zret != Z_STREAM_END) {
unlink_or_warn(freq->tmpfile.buf);
return -1;
#define missing_target(a) missing__target((a)->http_code, (a)->curl_result)
+/*
+ * Normalize curl results to handle CURL_FAILONERROR (or lack thereof). Failing
+ * http codes have their "result" converted to CURLE_HTTP_RETURNED_ERROR, and
+ * an appropriate string placed in the errorstr buffer (pass curl_errorstr if
+ * you don't have a custom buffer).
+ */
+void normalize_curl_result(CURLcode *result, long http_code, char *errorstr,
+ size_t errorlen);
+
/* Helpers for modifying and creating URLs */
extern void append_remote_object_url(struct strbuf *buf, const char *url,
const char *hex,
long http_code;
struct object_id oid;
struct object_id real_oid;
- git_SHA_CTX c;
+ git_hash_ctx c;
git_zstream stream;
int zret;
int rename;
* commit.
*/
struct stored_bitmap {
- unsigned char sha1[20];
+ struct object_id oid;
struct ewah_bitmap *root;
struct stored_bitmap *xor;
int flags;
struct ewah_bitmap *blobs;
struct ewah_bitmap *tags;
- /* Map from SHA1 -> `stored_bitmap` for all the bitmapped commits */
- khash_sha1 *bitmaps;
+ /* Map from object ID -> `stored_bitmap` for all the bitmapped commits */
+ kh_oid_map_t *bitmaps;
/* Number of bitmapped commits */
uint32_t entry_count;
struct object **objects;
uint32_t *hashes;
uint32_t count, alloc;
- khash_sha1_pos *positions;
+ kh_oid_pos_t *positions;
} ext_index;
/* Bitmap result of the last performed walk */
{
struct bitmap_disk_header *header = (void *)index->map;
- if (index->map_size < sizeof(*header) + 20)
+ if (index->map_size < sizeof(*header) + the_hash_algo->rawsz)
return error("Corrupted bitmap index (missing header data)");
if (memcmp(header->magic, BITMAP_IDX_SIGNATURE, sizeof(BITMAP_IDX_SIGNATURE)) != 0)
"(Git requires BITMAP_OPT_FULL_DAG)");
if (flags & BITMAP_OPT_HASH_CACHE) {
- unsigned char *end = index->map + index->map_size - 20;
+ unsigned char *end = index->map + index->map_size - the_hash_algo->rawsz;
index->hashes = ((uint32_t *)end) - index->pack->num_objects;
}
}
index->entry_count = ntohl(header->entry_count);
- index->map_pos += sizeof(*header);
+ index->map_pos += sizeof(*header) - GIT_MAX_RAWSZ + the_hash_algo->rawsz;
return 0;
}
static struct stored_bitmap *store_bitmap(struct bitmap_index *index,
struct ewah_bitmap *root,
- const unsigned char *sha1,
+ const unsigned char *hash,
struct stored_bitmap *xor_with,
int flags)
{
stored->root = root;
stored->xor = xor_with;
stored->flags = flags;
- hashcpy(stored->sha1, sha1);
+ oidread(&stored->oid, hash);
- hash_pos = kh_put_sha1(index->bitmaps, stored->sha1, &ret);
+ hash_pos = kh_put_oid_map(index->bitmaps, stored->oid, &ret);
/* a 0 return code means the insertion succeeded with no changes,
* because the SHA1 already existed on the map. this is bad, there
* shouldn't be duplicated commits in the index */
if (ret == 0) {
- error("Duplicate entry in bitmap index: %s", sha1_to_hex(sha1));
+ error("Duplicate entry in bitmap index: %s", hash_to_hex(hash));
return NULL;
}
{
assert(bitmap_git->map);
- bitmap_git->bitmaps = kh_init_sha1();
- bitmap_git->ext_index.positions = kh_init_sha1_pos();
+ bitmap_git->bitmaps = kh_init_oid_map();
+ bitmap_git->ext_index.positions = kh_init_oid_pos();
- load_pack_revindex(bitmap_git->pack);
+ if (load_pack_revindex(bitmap_git->pack))
+ goto failed;
if (!(bitmap_git->commits = read_bitmap_1(bitmap_git)) ||
!(bitmap_git->trees = read_bitmap_1(bitmap_git)) ||
};
static inline int bitmap_position_extended(struct bitmap_index *bitmap_git,
- const unsigned char *sha1)
+ const struct object_id *oid)
{
- khash_sha1_pos *positions = bitmap_git->ext_index.positions;
- khiter_t pos = kh_get_sha1_pos(positions, sha1);
+ khash_oid_pos *positions = bitmap_git->ext_index.positions;
+ khiter_t pos = kh_get_oid_pos(positions, *oid);
if (pos < kh_end(positions)) {
int bitmap_pos = kh_value(positions, pos);
}
static inline int bitmap_position_packfile(struct bitmap_index *bitmap_git,
- const unsigned char *sha1)
+ const struct object_id *oid)
{
- off_t offset = find_pack_entry_one(sha1, bitmap_git->pack);
+ off_t offset = find_pack_entry_one(oid->hash, bitmap_git->pack);
if (!offset)
return -1;
}
static int bitmap_position(struct bitmap_index *bitmap_git,
- const unsigned char *sha1)
+ const struct object_id *oid)
{
- int pos = bitmap_position_packfile(bitmap_git, sha1);
- return (pos >= 0) ? pos : bitmap_position_extended(bitmap_git, sha1);
+ int pos = bitmap_position_packfile(bitmap_git, oid);
+ return (pos >= 0) ? pos : bitmap_position_extended(bitmap_git, oid);
}
static int ext_index_add_object(struct bitmap_index *bitmap_git,
int hash_ret;
int bitmap_pos;
- hash_pos = kh_put_sha1_pos(eindex->positions, object->oid.hash, &hash_ret);
+ hash_pos = kh_put_oid_pos(eindex->positions, object->oid, &hash_ret);
if (hash_ret > 0) {
if (eindex->count >= eindex->alloc) {
eindex->alloc = (eindex->alloc + 16) * 3 / 2;
struct bitmap_show_data *data = data_;
int bitmap_pos;
- bitmap_pos = bitmap_position(data->bitmap_git, object->oid.hash);
+ bitmap_pos = bitmap_position(data->bitmap_git, &object->oid);
if (bitmap_pos < 0)
bitmap_pos = ext_index_add_object(data->bitmap_git, object,
static int add_to_include_set(struct bitmap_index *bitmap_git,
struct include_data *data,
- const unsigned char *sha1,
+ const struct object_id *oid,
int bitmap_pos)
{
khiter_t hash_pos;
if (bitmap_get(data->base, bitmap_pos))
return 0;
- hash_pos = kh_get_sha1(bitmap_git->bitmaps, sha1);
+ hash_pos = kh_get_oid_map(bitmap_git->bitmaps, *oid);
if (hash_pos < kh_end(bitmap_git->bitmaps)) {
struct stored_bitmap *st = kh_value(bitmap_git->bitmaps, hash_pos);
bitmap_or_ewah(data->base, lookup_stored_bitmap(st));
struct include_data *data = _data;
int bitmap_pos;
- bitmap_pos = bitmap_position(data->bitmap_git, commit->object.oid.hash);
+ bitmap_pos = bitmap_position(data->bitmap_git, &commit->object.oid);
if (bitmap_pos < 0)
bitmap_pos = ext_index_add_object(data->bitmap_git,
(struct object *)commit,
NULL);
- if (!add_to_include_set(data->bitmap_git, data, commit->object.oid.hash,
+ if (!add_to_include_set(data->bitmap_git, data, &commit->object.oid,
bitmap_pos)) {
struct commit_list *parent = commit->parents;
roots = roots->next;
if (object->type == OBJ_COMMIT) {
- khiter_t pos = kh_get_sha1(bitmap_git->bitmaps, object->oid.hash);
+ khiter_t pos = kh_get_oid_map(bitmap_git->bitmaps, object->oid);
if (pos < kh_end(bitmap_git->bitmaps)) {
struct stored_bitmap *st = kh_value(bitmap_git->bitmaps, pos);
int pos;
roots = roots->next;
- pos = bitmap_position(bitmap_git, object->oid.hash);
+ pos = bitmap_position(bitmap_git, &object->oid);
if (pos < 0 || base == NULL || !bitmap_get(base, pos)) {
object->flags &= ~UNINTERESTING;
fprintf(stderr, "Failed to reuse at %d (%016llx)\n",
reuse_objects, result->words[i]);
- fprintf(stderr, " %s\n", sha1_to_hex(sha1));
+ fprintf(stderr, " %s\n", hash_to_hex(sha1));
}
#endif
struct bitmap_test_data *tdata = data;
int bitmap_pos;
- bitmap_pos = bitmap_position(tdata->bitmap_git, object->oid.hash);
+ bitmap_pos = bitmap_position(tdata->bitmap_git, &object->oid);
if (bitmap_pos < 0)
die("Object not in bitmap: %s\n", oid_to_hex(&object->oid));
int bitmap_pos;
bitmap_pos = bitmap_position(tdata->bitmap_git,
- commit->object.oid.hash);
+ &commit->object.oid);
if (bitmap_pos < 0)
die("Object not in bitmap: %s\n", oid_to_hex(&commit->object.oid));
bitmap_git->version, bitmap_git->entry_count);
root = revs->pending.objects[0].item;
- pos = kh_get_sha1(bitmap_git->bitmaps, root->oid.hash);
+ pos = kh_get_oid_map(bitmap_git->bitmaps, root->oid);
if (pos < kh_end(bitmap_git->bitmaps)) {
struct stored_bitmap *st = kh_value(bitmap_git->bitmaps, pos);
lookup_stored_bitmap(stored),
rebuild)) {
hash_pos = kh_put_sha1(reused_bitmaps,
- stored->sha1,
+ stored->oid.hash,
&hash_ret);
kh_value(reused_bitmaps, hash_pos) =
bitmap_to_ewah(rebuild);
ewah_pool_free(b->trees);
ewah_pool_free(b->blobs);
ewah_pool_free(b->tags);
- kh_destroy_sha1(b->bitmaps);
+ kh_destroy_oid_map(b->bitmaps);
free(b->ext_index.objects);
free(b->ext_index.hashes);
bitmap_free(b->result);
free(b);
}
- int bitmap_has_sha1_in_uninteresting(struct bitmap_index *bitmap_git,
- const unsigned char *sha1)
+ int bitmap_has_oid_in_uninteresting(struct bitmap_index *bitmap_git,
+ const struct object_id *oid)
{
int pos;
if (!bitmap_git->haves)
return 0; /* walk had no "haves" */
- pos = bitmap_position_packfile(bitmap_git, sha1);
+ pos = bitmap_position_packfile(bitmap_git, oid);
if (pos < 0)
return 0;
struct packed_git *p = alloc_packed_git(alloc);
memcpy(p->pack_name, path, alloc); /* includes NUL */
- hashcpy(p->sha1, sha1);
+ hashcpy(p->hash, sha1);
if (check_packed_git_idx(idx_path, p)) {
free(p);
return NULL;
}
}
-static int close_pack_fd(struct packed_git *p)
+int close_pack_fd(struct packed_git *p)
{
if (p->pack_fd < 0)
return 0;
#endif
}
+const char *pack_basename(struct packed_git *p)
+{
+ const char *ret = strrchr(p->pack_name, '/');
+ if (ret)
+ ret = ret + 1; /* skip past slash */
+ else
+ ret = p->pack_name; /* we only have a base */
+ return ret;
+}
+
/*
* Do not call this directly as this leaks p->pack_fd on error return;
* call open_packed_git() instead.
if (!p->index_data) {
struct multi_pack_index *m;
- const char *pack_name = strrchr(p->pack_name, '/');
+ const char *pack_name = pack_basename(p);
for (m = the_repository->objects->multi_pack_index;
m; m = m->next) {
p->pack_local = local;
p->mtime = st.st_mtime;
if (path_len < the_hash_algo->hexsz ||
- get_sha1_hex(path + path_len - the_hash_algo->hexsz, p->sha1))
- hashclr(p->sha1);
+ get_sha1_hex(path + path_len - the_hash_algo->hexsz, p->hash))
+ hashclr(p->hash);
return p;
}
uint32_t i;
int r = 0;
- if (flags & FOR_EACH_OBJECT_PACK_ORDER)
- load_pack_revindex(p);
+ if (flags & FOR_EACH_OBJECT_PACK_ORDER) {
+ if (load_pack_revindex(p))
+ return -1;
+ }
for (i = 0; i < p->num_objects; i++) {
uint32_t pos;
#include "commit.h"
#include "blob.h"
#include "resolve-undo.h"
+#include "run-command.h"
#include "strbuf.h"
#include "varint.h"
#include "split-index.h"
uint32_t uid;
uint32_t gid;
uint32_t size;
- unsigned char sha1[20];
- uint16_t flags;
- char name[FLEX_ARRAY]; /* more */
- };
-
- /*
- * This struct is used when CE_EXTENDED bit is 1
- * The struct must match ondisk_cache_entry exactly from
- * ctime till flags
- */
- struct ondisk_cache_entry_extended {
- struct cache_time ctime;
- struct cache_time mtime;
- uint32_t dev;
- uint32_t ino;
- uint32_t mode;
- uint32_t uid;
- uint32_t gid;
- uint32_t size;
- unsigned char sha1[20];
- uint16_t flags;
- uint16_t flags2;
- char name[FLEX_ARRAY]; /* more */
+ /*
+ * unsigned char hash[hashsz];
+ * uint16_t flags;
+ * if (flags & CE_EXTENDED)
+ * uint16_t flags2;
+ */
+ unsigned char data[GIT_MAX_RAWSZ + 2 * sizeof(uint16_t)];
+ char name[FLEX_ARRAY];
};
/* These are only used for v3 or lower */
#define align_padding_size(size, len) ((size + (len) + 8) & ~7) - (size + len)
- #define align_flex_name(STRUCT,len) ((offsetof(struct STRUCT,name) + (len) + 8) & ~7)
+ #define align_flex_name(STRUCT,len) ((offsetof(struct STRUCT,data) + (len) + 8) & ~7)
#define ondisk_cache_entry_size(len) align_flex_name(ondisk_cache_entry,len)
- #define ondisk_cache_entry_extended_size(len) align_flex_name(ondisk_cache_entry_extended,len)
- #define ondisk_ce_size(ce) (((ce)->ce_flags & CE_EXTENDED) ? \
- ondisk_cache_entry_extended_size(ce_namelen(ce)) : \
- ondisk_cache_entry_size(ce_namelen(ce)))
+ #define ondisk_data_size(flags, len) (the_hash_algo->rawsz + \
+ ((flags & CE_EXTENDED) ? 2 : 1) * sizeof(uint16_t) + len)
+ #define ondisk_data_size_max(len) (ondisk_data_size(CE_EXTENDED, len))
+ #define ondisk_ce_size(ce) (ondisk_cache_entry_size(ondisk_data_size((ce)->ce_flags, ce_namelen(ce))))
/* Allow fsck to force verification of the index checksum. */
int verify_index_checksum;
struct cache_entry *ce;
size_t len;
const char *name;
+ const unsigned hashsz = the_hash_algo->rawsz;
+ const uint16_t *flagsp = (const uint16_t *)(ondisk->data + hashsz);
unsigned int flags;
size_t copy_len = 0;
/*
int expand_name_field = version == 4;
/* On-disk flags are just 16 bits */
- flags = get_be16(&ondisk->flags);
+ flags = get_be16(flagsp);
len = flags & CE_NAMEMASK;
if (flags & CE_EXTENDED) {
- struct ondisk_cache_entry_extended *ondisk2;
int extended_flags;
- ondisk2 = (struct ondisk_cache_entry_extended *)ondisk;
- extended_flags = get_be16(&ondisk2->flags2) << 16;
+ extended_flags = get_be16(flagsp + 1) << 16;
/* We do not yet understand any bit out of CE_EXTENDED_FLAGS */
if (extended_flags & ~CE_EXTENDED_FLAGS)
die(_("unknown index entry format 0x%08x"), extended_flags);
flags |= extended_flags;
- name = ondisk2->name;
+ name = (const char *)(flagsp + 2);
}
else
- name = ondisk->name;
+ name = (const char *)(flagsp + 1);
if (expand_name_field) {
const unsigned char *cp = (const unsigned char *)name;
ce->ce_flags = flags & ~CE_NAMEMASK;
ce->ce_namelen = len;
ce->index = 0;
- hashcpy(ce->oid.hash, ondisk->sha1);
+ hashcpy(ce->oid.hash, ondisk->data);
+ memcpy(ce->name, name, len);
+ ce->name[len] = '\0';
if (expand_name_field) {
if (copy_len)
struct cache_entry *ce)
{
short flags;
+ const unsigned hashsz = the_hash_algo->rawsz;
+ uint16_t *flagsp = (uint16_t *)(ondisk->data + hashsz);
ondisk->ctime.sec = htonl(ce->ce_stat_data.sd_ctime.sec);
ondisk->mtime.sec = htonl(ce->ce_stat_data.sd_mtime.sec);
ondisk->uid = htonl(ce->ce_stat_data.sd_uid);
ondisk->gid = htonl(ce->ce_stat_data.sd_gid);
ondisk->size = htonl(ce->ce_stat_data.sd_size);
- hashcpy(ondisk->sha1, ce->oid.hash);
+ hashcpy(ondisk->data, ce->oid.hash);
flags = ce->ce_flags & ~CE_NAMEMASK;
flags |= (ce_namelen(ce) >= CE_NAMEMASK ? CE_NAMEMASK : ce_namelen(ce));
- ondisk->flags = htons(flags);
+ flagsp[0] = htons(flags);
if (ce->ce_flags & CE_EXTENDED) {
- struct ondisk_cache_entry_extended *ondisk2;
- ondisk2 = (struct ondisk_cache_entry_extended *)ondisk;
- ondisk2->flags2 = htons((ce->ce_flags & CE_EXTENDED_FLAGS) >> 16);
+ flagsp[1] = htons((ce->ce_flags & CE_EXTENDED_FLAGS) >> 16);
}
}
stripped_name = 1;
}
- if (ce->ce_flags & CE_EXTENDED)
- size = offsetof(struct ondisk_cache_entry_extended, name);
- else
- size = offsetof(struct ondisk_cache_entry, name);
+ size = offsetof(struct ondisk_cache_entry,data) + ondisk_data_size(ce->ce_flags, 0);
if (!previous_name) {
int len = ce_namelen(ce);
struct cache_entry **cache = istate->cache;
int entries = istate->cache_nr;
struct stat st;
- struct ondisk_cache_entry_extended ondisk;
+ struct ondisk_cache_entry ondisk;
struct strbuf previous_name_buf = STRBUF_INIT, *previous_name;
int drop_cache_tree = istate->drop_cache_tree;
off_t offset;
if (ret)
return ret;
if (flags & COMMIT_LOCK)
- return commit_locked_index(lock);
- return close_lock_file_gently(lock);
+ ret = commit_locked_index(lock);
+ else
+ ret = close_lock_file_gently(lock);
+
+ run_hook_le(NULL, "post-index-change",
+ istate->updated_workdir ? "1" : "0",
+ istate->updated_skipworktree ? "1" : "0", NULL);
+ istate->updated_workdir = 0;
+ istate->updated_skipworktree = 0;
+
+ return ret;
}
static int write_split_index(struct index_state *istate,
#include "send-pack.h"
#include "protocol.h"
#include "quote.h"
+#include "transport.h"
static struct remote *remote;
/* always ends with a trailing slash */
else {
struct strbuf unquoted = STRBUF_INIT;
if (unquote_c_style(&unquoted, value, NULL) < 0)
- die("invalid quoting in push-option value");
+ die(_("invalid quoting in push-option value: '%s'"), value);
string_list_append_nodup(&options.push_options,
strbuf_detach(&unquoted, NULL));
}
if (data[i] == '\t')
mid = &data[i];
if (data[i] == '\n') {
- if (mid - start != 40)
+ if (mid - start != the_hash_algo->hexsz)
- die("%sinfo/refs not valid: is this a git repository?",
- url.buf);
+ die(_("%sinfo/refs not valid: is this a git repository?"),
+ transport_anonymize_url(url.buf));
data[i] = 0;
ref_name = mid + 1;
ref = alloc_ref(ref_name);
PACKET_READ_CHOMP_NEWLINE |
PACKET_READ_DIE_ON_ERR_PACKET);
if (packet_reader_read(&reader) != PACKET_READ_NORMAL)
- die("invalid server response; expected service, got flush packet");
+ die(_("invalid server response; expected service, got flush packet"));
if (skip_prefix(reader.line, "# service=", &p) && !strcmp(p, service)) {
/*
d->proto_git = 1;
} else {
- die("invalid server response; got '%s'", reader.line);
+ die(_("invalid server response; got '%s'"), reader.line);
}
}
break;
case HTTP_MISSING_TARGET:
show_http_message(&type, &charset, &buffer);
- die("repository '%s' not found", url.buf);
+ die(_("repository '%s' not found"),
+ transport_anonymize_url(url.buf));
case HTTP_NOAUTH:
show_http_message(&type, &charset, &buffer);
- die("Authentication failed for '%s'", url.buf);
+ die(_("Authentication failed for '%s'"),
+ transport_anonymize_url(url.buf));
default:
show_http_message(&type, &charset, &buffer);
- die("unable to access '%s': %s", url.buf, curl_errorstr);
+ die(_("unable to access '%s': %s"),
+ transport_anonymize_url(url.buf), curl_errorstr);
}
- if (options.verbosity && !starts_with(refs_url.buf, url.buf))
- warning(_("redirecting to %s"), url.buf);
+ if (options.verbosity && !starts_with(refs_url.buf, url.buf)) {
+ char *u = transport_anonymize_url(url.buf);
+ warning(_("redirecting to %s"), u);
+ free(u);
+ }
last= xcalloc(1, sizeof(*last_discovery));
last->service = xstrdup(service);
switch (*status) {
case PACKET_READ_EOF:
if (!(options & PACKET_READ_GENTLE_ON_EOF))
- die("shouldn't have EOF when not gentle on EOF");
+ die(_("shouldn't have EOF when not gentle on EOF"));
break;
case PACKET_READ_NORMAL:
set_packet_header(buf - 4, *appended);
rpc->pos = 0;
return CURLIOE_OK;
}
- error("unable to rewind rpc post data - try increasing http.postBuffer");
+ error(_("unable to rewind rpc post data - try increasing http.postBuffer"));
return CURLIOE_FAILRESTART;
default:
strbuf_addstr(&msg, curl_errorstr);
}
}
- error("RPC failed; %s", msg.buf);
+ error(_("RPC failed; %s"), msg.buf);
strbuf_release(&msg);
}
{
uintmax_t size = len;
if (size > maximum_signed_value_of_type(curl_off_t))
- die("cannot handle pushes this big");
+ die(_("cannot handle pushes this big"));
return (curl_off_t)size;
}
ret = git_deflate(&stream, Z_FINISH);
if (ret != Z_STREAM_END)
- die("cannot deflate request; zlib deflate error %d", ret);
+ die(_("cannot deflate request; zlib deflate error %d"), ret);
ret = git_deflate_end_gently(&stream);
if (ret != Z_OK)
- die("cannot deflate request; zlib end error %d", ret);
+ die(_("cannot deflate request; zlib end error %d"), ret);
gzip_size = stream.total_out;
ALLOC_ARRAY(targets, nr_heads);
if (options.depth || options.deepen_since)
- die("dumb http transport does not support shallow capabilities");
+ die(_("dumb http transport does not support shallow capabilities"));
for (i = 0; i < nr_heads; i++)
targets[i] = xstrdup(oid_to_hex(&to_fetch[i]->old_oid));
free(targets[i]);
free(targets);
- return ret ? error("fetch failed.") : 0;
+ return ret ? error(_("fetch failed.")) : 0;
}
static int fetch_git(struct discovery *heads,
for (i = 0; i < nr_heads; i++) {
struct ref *ref = to_fetch[i];
if (!*ref->name)
- die("cannot fetch by sha1 over smart http");
+ die(_("cannot fetch by sha1 over smart http"));
packet_buf_write(&preamble, "%s %s\n",
oid_to_hex(&ref->old_oid), ref->name);
}
const char *name;
struct ref *ref;
struct object_id old_oid;
+ const char *q;
- if (get_oid_hex(p, &old_oid))
+ if (parse_oid_hex(p, &old_oid, &q))
- die("protocol error: expected sha/ref, got %s'", p);
+ die(_("protocol error: expected sha/ref, got %s'"), p);
- if (p[GIT_SHA1_HEXSZ] == ' ')
- name = p + GIT_SHA1_HEXSZ + 1;
- else if (!p[GIT_SHA1_HEXSZ])
+ if (*q == ' ')
+ name = q + 1;
+ else if (!*q)
name = "";
else
- die("protocol error: expected sha/ref, got %s'", p);
+ die(_("protocol error: expected sha/ref, got %s'"), p);
ref = alloc_ref(name);
oidcpy(&ref->old_oid, &old_oid);
to_fetch[nr_heads++] = ref;
}
else
- die("http transport does not support %s", buf->buf);
+ die(_("http transport does not support %s"), buf->buf);
strbuf_reset(buf);
if (strbuf_getline_lf(buf, stdin) == EOF)
argv_array_push(&child.args, specs[i]);
if (run_command(&child))
- die("git-http-push failed");
+ die(_("git-http-push failed"));
return 0;
}
specs[nr_spec++] = xstrdup(buf->buf + 5);
}
else
- die("http transport does not support %s", buf->buf);
+ die(_("http transport does not support %s"), buf->buf);
strbuf_reset(buf);
if (strbuf_getline_lf(buf, stdin) == EOF)
setup_git_directory_gently(&nongit);
if (argc < 2) {
- error("remote-curl: usage: git remote-curl <remote> [<url>]");
+ error(_("remote-curl: usage: git remote-curl <remote> [<url>]"));
return 1;
}
if (strbuf_getline_lf(&buf, stdin) == EOF) {
if (ferror(stdin))
- error("remote-curl: error reading command stream from git");
+ error(_("remote-curl: error reading command stream from git"));
return 1;
}
if (buf.len == 0)
break;
if (starts_with(buf.buf, "fetch ")) {
if (nongit)
- die("remote-curl: fetch attempted without a local repo");
+ die(_("remote-curl: fetch attempted without a local repo"));
parse_fetch(&buf);
} else if (!strcmp(buf.buf, "list") || starts_with(buf.buf, "list ")) {
if (!stateless_connect(arg))
break;
} else {
- error("remote-curl: unknown command '%s' from git", buf.buf);
+ error(_("remote-curl: unknown command '%s' from git"), buf.buf);
return 1;
}
strbuf_reset(&buf);
if (start_command(&cp))
die("Could not run 'git rev-list <commits> --not --remotes -n 1' command in submodule %s",
path);
- if (strbuf_read(&buf, cp.out, 41))
+ if (strbuf_read(&buf, cp.out, the_hash_algo->hexsz + 1))
needs_pushing = 1;
finish_command(&cp);
close(cp.out);
struct oid_array *commits;
if (retvalue)
+ /*
+ * NEEDSWORK: This indicates that the overall fetch
+ * failed, even though there may be a subsequent fetch
+ * by commit hash that might work. It may be a good
+ * idea to not indicate failure in this case, and only
+ * indicate failure if the subsequent fetch fails.
+ */
spf->result = 1;
if (!task || !task->sub)