static void git_hash_unknown_init(git_hash_ctx *ctx)
{
- die("trying to init unknown hash");
+ BUG("trying to init unknown hash");
}
static void git_hash_unknown_update(git_hash_ctx *ctx, const void *data, size_t len)
{
- die("trying to update unknown hash");
+ BUG("trying to update unknown hash");
}
static void git_hash_unknown_final(unsigned char *hash, git_hash_ctx *ctx)
{
- die("trying to finalize unknown hash");
+ BUG("trying to finalize unknown hash");
}
const struct git_hash_algo hash_algos[GIT_HASH_NALGOS] = {
/* Detect cases where alternate disappeared */
if (!is_directory(path->buf)) {
- error("object directory %s does not exist; "
- "check .git/objects/info/alternates.",
+ error(_("object directory %s does not exist; "
+ "check .git/objects/info/alternates"),
path->buf);
return 0;
}
strbuf_addstr(&pathbuf, entry);
if (strbuf_normalize_path(&pathbuf) < 0 && relative_base) {
- error("unable to normalize alternate object path: %s",
+ error(_("unable to normalize alternate object path: %s"),
pathbuf.buf);
strbuf_release(&pathbuf);
return -1;
return;
if (depth > 5) {
- error("%s: ignoring alternate object stores, nesting too deep.",
+ error(_("%s: ignoring alternate object stores, nesting too deep"),
relative_base);
return;
}
strbuf_add_absolute_path(&objdirbuf, r->objects->objectdir);
if (strbuf_normalize_path(&objdirbuf) < 0)
- die("unable to normalize object directory: %s",
+ die(_("unable to normalize object directory: %s"),
objdirbuf.buf);
while (*alt) {
hold_lock_file_for_update(&lock, alts, LOCK_DIE_ON_ERROR);
out = fdopen_lock_file(&lock, "w");
if (!out)
- die_errno("unable to fdopen alternates lockfile");
+ die_errno(_("unable to fdopen alternates lockfile"));
in = fopen(alts, "r");
if (in) {
fclose(in);
}
else if (errno != ENOENT)
- die_errno("unable to read alternates file");
+ die_errno(_("unable to read alternates file"));
if (found) {
rollback_lock_file(&lock);
} else {
fprintf_or_die(out, "%s\n", reference);
if (commit_lock_file(&lock))
- die_errno("unable to move new alternates file into place");
+ die_errno(_("unable to move new alternates file into place"));
if (the_repository->objects->alt_odb_tail)
link_alt_odb_entries(the_repository, reference,
'\n', NULL, 0);
limit = SIZE_MAX;
}
if (length > limit)
- die("attempting to mmap %"PRIuMAX" over limit %"PRIuMAX,
+ die(_("attempting to mmap %"PRIuMAX" over limit %"PRIuMAX),
(uintmax_t)length, (uintmax_t)limit);
}
{
void *ret = xmmap_gently(start, length, prot, flags, fd, offset);
if (ret == MAP_FAILED)
- die_errno("mmap failed");
+ die_errno(_("mmap failed"));
return ret;
}
*size = xsize_t(st.st_size);
if (!*size) {
/* mmap() is forbidden on empty files */
- error("object file %s is empty", path);
+ error(_("object file %s is empty"), path);
return NULL;
}
map = xmmap(NULL, *size, PROT_READ, MAP_PRIVATE, fd, 0);
}
if (status < 0)
- error("corrupt loose object '%s'", sha1_to_hex(sha1));
+ error(_("corrupt loose object '%s'"), sha1_to_hex(sha1));
else if (stream->avail_in)
- error("garbage at end of loose object '%s'",
+ error(_("garbage at end of loose object '%s'"),
sha1_to_hex(sha1));
free(buf);
return NULL;
if ((flags & OBJECT_INFO_ALLOW_UNKNOWN_TYPE) && (type < 0))
type = 0;
else if (type < 0)
- die("invalid object type");
+ die(_("invalid object type"));
if (oi->typep)
*oi->typep = type;
*oi->disk_sizep = mapsize;
if ((flags & OBJECT_INFO_ALLOW_UNKNOWN_TYPE)) {
if (unpack_sha1_header_to_strbuf(&stream, map, mapsize, hdr, sizeof(hdr), &hdrbuf) < 0)
- status = error("unable to unpack %s header with --allow-unknown-type",
+ status = error(_("unable to unpack %s header with --allow-unknown-type"),
sha1_to_hex(sha1));
} else if (unpack_sha1_header(&stream, map, mapsize, hdr, sizeof(hdr)) < 0)
- status = error("unable to unpack %s header",
+ status = error(_("unable to unpack %s header"),
sha1_to_hex(sha1));
if (status < 0)
; /* Do nothing */
else if (hdrbuf.len) {
if ((status = parse_sha1_header_extended(hdrbuf.buf, oi, flags)) < 0)
- status = error("unable to parse %s header with --allow-unknown-type",
+ status = error(_("unable to parse %s header with --allow-unknown-type"),
sha1_to_hex(sha1));
} else if ((status = parse_sha1_header_extended(hdr, oi, flags)) < 0)
- status = error("unable to parse %s header", sha1_to_hex(sha1));
+ status = error(_("unable to parse %s header"), sha1_to_hex(sha1));
if (status >= 0 && oi->contentp) {
*oi->contentp = unpack_sha1_rest(&stream, hdr,
return data;
if (errno && errno != ENOENT)
- die_errno("failed to read object %s", oid_to_hex(oid));
+ die_errno(_("failed to read object %s"), oid_to_hex(oid));
/* die if we replaced an object with one that does not exist */
if (repl != oid)
- die("replacement %s not found for %s",
+ die(_("replacement %s not found for %s"),
oid_to_hex(repl), oid_to_hex(oid));
if (!stat_sha1_file(the_repository, repl->hash, &st, &path))
- die("loose object %s (stored in %s) is corrupt",
+ die(_("loose object %s (stored in %s) is corrupt"),
oid_to_hex(repl), path);
if ((p = has_packed_and_bad(repl->hash)) != NULL)
- die("packed object %s (stored in %s) is corrupt",
+ die(_("packed object %s (stored in %s) is corrupt"),
oid_to_hex(repl), p->pack_name);
return NULL;
unlink_or_warn(tmpfile);
if (ret) {
if (ret != EEXIST) {
- return error_errno("unable to write sha1 filename %s", filename);
+ return error_errno(_("unable to write sha1 filename %s"), filename);
}
/* FIXME!!! Collision check here ? */
}
out:
if (adjust_shared_perm(filename))
- return error("unable to set permission to '%s'", filename);
+ return error(_("unable to set permission to '%s'"), filename);
return 0;
}
static int write_buffer(int fd, const void *buf, size_t len)
{
if (write_in_full(fd, buf, len) < 0)
- return error_errno("file write error");
+ return error_errno(_("file write error"));
return 0;
}
if (fsync_object_files)
fsync_or_die(fd, "sha1 file");
if (close(fd) != 0)
- die_errno("error when closing sha1 file");
+ die_errno(_("error when closing sha1 file"));
}
/* Size of directory component, including the ending '/' */
fd = create_tmpfile(&tmp_file, filename.buf);
if (fd < 0) {
if (errno == EACCES)
- return error("insufficient permission for adding an object to repository database %s", get_object_directory());
+ return error(_("insufficient permission for adding an object to repository database %s"), get_object_directory());
else
- return error_errno("unable to create temporary file");
+ return error_errno(_("unable to create temporary file"));
}
/* Set it up */
ret = git_deflate(&stream, Z_FINISH);
the_hash_algo->update_fn(&c, in0, stream.next_in - in0);
if (write_buffer(fd, compressed, stream.next_out - compressed) < 0)
- die("unable to write sha1 file");
+ die(_("unable to write sha1 file"));
stream.next_out = compressed;
stream.avail_out = sizeof(compressed);
} while (ret == Z_OK);
if (ret != Z_STREAM_END)
- die("unable to deflate new object %s (%d)", oid_to_hex(oid),
+ die(_("unable to deflate new object %s (%d)"), oid_to_hex(oid),
ret);
ret = git_deflate_end_gently(&stream);
if (ret != Z_OK)
- die("deflateEnd on object %s failed (%d)", oid_to_hex(oid),
+ die(_("deflateEnd on object %s failed (%d)"), oid_to_hex(oid),
ret);
the_hash_algo->final_fn(parano_oid.hash, &c);
if (oidcmp(oid, ¶no_oid) != 0)
- die("confused by unstable object source data for %s",
+ die(_("confused by unstable object source data for %s"),
oid_to_hex(oid));
close_sha1_file(fd);
utb.actime = mtime;
utb.modtime = mtime;
if (utime(tmp_file.buf, &utb) < 0)
- warning_errno("failed utime() on %s", tmp_file.buf);
+ warning_errno(_("failed utime() on %s"), tmp_file.buf);
}
return finalize_object_file(tmp_file.buf, filename.buf);
return 0;
buf = read_object(oid->hash, &type, &len);
if (!buf)
- return error("cannot read sha1_file for %s", oid_to_hex(oid));
+ return error(_("cannot read sha1_file for %s"), oid_to_hex(oid));
hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", type_name(type), len) + 1;
ret = write_loose_object(oid, hdr, hdrlen, buf, len, mtime);
free(buf);
{
struct commit c;
memset(&c, 0, sizeof(c));
- if (parse_commit_buffer(&c, buf, size, 0))
- die("corrupt commit");
+ if (parse_commit_buffer(the_repository, &c, buf, size, 0))
+ die(_("corrupt commit"));
}
static void check_tag(const void *buf, size_t size)
{
struct tag t;
memset(&t, 0, sizeof(t));
- if (parse_tag_buffer(&t, buf, size))
- die("corrupt tag");
+ if (parse_tag_buffer(the_repository, &t, buf, size))
+ die(_("corrupt tag"));
}
static int index_mem(struct object_id *oid, void *buf, size_t size,
char *buf = xmalloc(size);
ssize_t read_result = read_in_full(fd, buf, size);
if (read_result < 0)
- ret = error_errno("read error while indexing %s",
+ ret = error_errno(_("read error while indexing %s"),
path ? path : "<unknown>");
else if (read_result != size)
- ret = error("short read while indexing %s",
+ ret = error(_("short read while indexing %s"),
path ? path : "<unknown>");
else
ret = index_mem(oid, buf, size, type, path, flags);
if (fd < 0)
return error_errno("open(\"%s\")", path);
if (index_fd(oid, fd, st, OBJ_BLOB, path, flags) < 0)
- return error("%s: failed to insert into database",
+ return error(_("%s: failed to insert into database"),
path);
break;
case S_IFLNK:
if (!(flags & HASH_WRITE_OBJECT))
hash_object_file(sb.buf, sb.len, blob_type, oid);
else if (write_object_file(sb.buf, sb.len, blob_type, oid))
- rc = error("%s: failed to insert into database", path);
+ rc = error(_("%s: failed to insert into database"), path);
strbuf_release(&sb);
break;
case S_IFDIR:
return resolve_gitlink_ref(path, "HEAD", oid);
default:
- return error("%s: unsupported file type", path);
+ return error(_("%s: unsupported file type"), path);
}
return rc;
}
{
enum object_type type = oid_object_info(the_repository, oid, NULL);
if (type < 0)
- die("%s is not a valid object", oid_to_hex(oid));
+ die(_("%s is not a valid object"), oid_to_hex(oid));
if (type != expect)
- die("%s is not a valid '%s' object", oid_to_hex(oid),
+ die(_("%s is not a valid '%s' object"), oid_to_hex(oid),
type_name(expect));
}
dir = opendir(path->buf);
if (!dir) {
if (errno != ENOENT)
- r = error_errno("unable to open %s", path->buf);
+ r = error_errno(_("unable to open %s"), path->buf);
strbuf_setlen(path, origlen);
return r;
}
git_inflate_end(stream);
if (status != Z_STREAM_END) {
- error("corrupt loose object '%s'", sha1_to_hex(expected_sha1));
+ error(_("corrupt loose object '%s'"), sha1_to_hex(expected_sha1));
return -1;
}
if (stream->avail_in) {
- error("garbage at end of loose object '%s'",
+ error(_("garbage at end of loose object '%s'"),
sha1_to_hex(expected_sha1));
return -1;
}
the_hash_algo->final_fn(real_sha1, &c);
if (hashcmp(expected_sha1, real_sha1)) {
- error("sha1 mismatch for %s (expected %s)", path,
+ error(_("sha1 mismatch for %s (expected %s)"), path,
sha1_to_hex(expected_sha1));
return -1;
}
map = map_sha1_file_1(the_repository, path, NULL, &mapsize);
if (!map) {
- error_errno("unable to mmap %s", path);
+ error_errno(_("unable to mmap %s"), path);
goto out;
}
if (unpack_sha1_header(&stream, map, mapsize, hdr, sizeof(hdr)) < 0) {
- error("unable to unpack header of %s", path);
+ error(_("unable to unpack header of %s"), path);
goto out;
}
*type = parse_sha1_header(hdr, size);
if (*type < 0) {
- error("unable to parse header of %s", path);
+ error(_("unable to parse header of %s"), path);
git_inflate_end(&stream);
goto out;
}
} else {
*contents = unpack_sha1_rest(&stream, hdr, *size, expected_oid->hash);
if (!*contents) {
- error("unable to unpack contents of %s", path);
+ error(_("unable to unpack contents of %s"), path);
git_inflate_end(&stream);
goto out;
}
if (check_object_signature(expected_oid, *contents,
*size, type_name(*type))) {
- error("sha1 mismatch for %s (expected %s)", path,
+ error(_("sha1 mismatch for %s (expected %s)"), path,
oid_to_hex(expected_oid));
free(*contents);
goto out;