return 0;
}
+static inline int offset_1st_component(const char *path)
+{
+ if (has_dos_drive_prefix(path))
+ return 2 + (path[2] == '/');
+ return *path == '/';
+}
+
int safe_create_leading_directories(char *path)
{
- char *pos = path;
+ char *pos = path + offset_1st_component(path);
struct stat st;
- if (is_absolute_path(path))
- pos++;
-
while (pos) {
pos = strchr(pos, '/');
if (!pos)
break;
- *pos = 0;
+ while (*++pos == '/')
+ ;
+ if (!*pos)
+ break;
+ *--pos = '\0';
if (!stat(path, &st)) {
/* path exists */
if (!S_ISDIR(st.st_mode)) {
*/
static int link_alt_odb_entry(const char * entry, int len, const char * relative_base, int depth)
{
- struct stat st;
const char *objdir = get_object_directory();
struct alternate_object_database *ent;
struct alternate_object_database *alt;
ent->base[pfxlen] = ent->base[entlen-1] = 0;
/* Detect cases where alternate disappeared */
- if (stat(ent->base, &st) || !S_ISDIR(st.st_mode)) {
+ if (!is_directory(ent->base)) {
error("object directory %s does not exist; "
"check .git/objects/info/alternates.",
ent->base);
void add_to_alternates_file(const char *reference)
{
struct lock_file *lock = xcalloc(1, sizeof(struct lock_file));
- int fd = hold_lock_file_for_append(lock, git_path("objects/info/alternates"), 1);
+ int fd = hold_lock_file_for_append(lock, git_path("objects/info/alternates"), LOCK_DIE_ON_ERROR);
char *alt = mkpath("%s/objects\n", reference);
write_or_die(fd, alt, strlen(alt));
if (commit_lock_file(lock))
link_alt_odb_entries(alt, alt + strlen(alt), '\n', NULL, 0);
}
+void foreach_alt_odb(alt_odb_fn fn, void *cb)
+{
+ struct alternate_object_database *ent;
+
+ prepare_alt_odb();
+ for (ent = alt_odb_list; ent; ent = ent->next)
+ if (fn(ent, cb))
+ return;
+}
+
void prepare_alt_odb(void)
{
const char *alt;
if (!alt) alt = "";
alt_odb_tail = &alt_odb_list;
- link_alt_odb_entries(alt, alt + strlen(alt), ':', NULL, 0);
+ link_alt_odb_entries(alt, alt + strlen(alt), PATH_SEP, NULL, 0);
read_info_alternates(get_object_directory(), 0);
}
-static int has_loose_object(const unsigned char *sha1)
+static int has_loose_object_local(const unsigned char *sha1)
{
char *name = sha1_file_name(sha1);
- struct alternate_object_database *alt;
+ return !access(name, F_OK);
+}
- if (!access(name, F_OK))
- return 1;
+int has_loose_object_nonlocal(const unsigned char *sha1)
+{
+ struct alternate_object_database *alt;
prepare_alt_odb();
for (alt = alt_odb_list; alt; alt = alt->next) {
- name = alt->name;
- fill_sha1_path(name, sha1);
+ fill_sha1_path(alt->name, sha1);
if (!access(alt->base, F_OK))
return 1;
}
return 0;
}
+static int has_loose_object(const unsigned char *sha1)
+{
+ return has_loose_object_local(sha1) ||
+ has_loose_object_nonlocal(sha1);
+}
+
static unsigned int pack_used_ctr;
static unsigned int pack_mmap_calls;
static unsigned int peak_pack_open_windows;
version = ntohl(hdr->idx_version);
if (version < 2 || version > 2) {
munmap(idx_map, idx_size);
- return error("index file %s is version %d"
+ return error("index file %s is version %"PRIu32
" and is not supported by this binary"
" (try upgrading GIT to a newer version)",
path, version);
}
}
+/*
+ * This is used by git-repack in case a newly created pack happens to
+ * contain the same set of objects as an existing one. In that case
+ * the resulting file might be different even if its name would be the
+ * same. It is best to close any reference to the old pack before it is
+ * replaced on disk. Of course no index pointers nor windows for given pack
+ * must subsist at this point. If ever objects from this pack are requested
+ * again, the new version of the pack will be reinitialized through
+ * reprepare_packed_git().
+ */
+void free_pack_by_name(const char *pack_name)
+{
+ struct packed_git *p, **pp = &packed_git;
+
+ while (*pp) {
+ p = *pp;
+ if (strcmp(pack_name, p->pack_name) == 0) {
+ clear_delta_base_cache();
+ close_pack_windows(p);
+ if (p->pack_fd != -1)
+ close(p->pack_fd);
+ if (p->index_data)
+ munmap((void *)p->index_data, p->index_size);
+ free(p->bad_object_sha1);
+ *pp = p->next;
+ free(p);
+ return;
+ }
+ pp = &p->next;
+ }
+}
+
/*
* Do not call this directly as this leaks p->pack_fd on error return;
* call open_packed_git() instead.
if (hdr.hdr_signature != htonl(PACK_SIGNATURE))
return error("file %s is not a GIT packfile", p->pack_name);
if (!pack_version_ok(hdr.hdr_version))
- return error("packfile %s is version %u and not supported"
- " (try upgrading GIT to a newer version)",
+ return error("packfile %s is version %"PRIu32" and not"
+ " supported (try upgrading GIT to a newer version)",
p->pack_name, ntohl(hdr.hdr_version));
/* Verify the pack matches its index. */
if (p->num_objects != ntohl(hdr.hdr_entries))
- return error("packfile %s claims to have %u objects"
- " while index indicates %u objects",
+ return error("packfile %s claims to have %"PRIu32" objects"
+ " while index indicates %"PRIu32" objects",
p->pack_name, ntohl(hdr.hdr_entries),
p->num_objects);
if (lseek(p->pack_fd, p->pack_size - sizeof(sha1), SEEK_SET) == -1)
return win->base + offset;
}
+static struct packed_git *alloc_packed_git(int extra)
+{
+ struct packed_git *p = xmalloc(sizeof(*p) + extra);
+ memset(p, 0, sizeof(*p));
+ p->pack_fd = -1;
+ return p;
+}
+
struct packed_git *add_packed_git(const char *path, int path_len, int local)
{
struct stat st;
- struct packed_git *p = xmalloc(sizeof(*p) + path_len + 2);
+ struct packed_git *p = alloc_packed_git(path_len + 2);
/*
* Make sure a corresponding .pack file exists and that
* the index looks sane.
*/
path_len -= strlen(".idx");
- if (path_len < 1)
+ if (path_len < 1) {
+ free(p);
return NULL;
+ }
memcpy(p->pack_name, path, path_len);
+
+ strcpy(p->pack_name + path_len, ".keep");
+ if (!access(p->pack_name, F_OK))
+ p->pack_keep = 1;
+
strcpy(p->pack_name + path_len, ".pack");
if (stat(p->pack_name, &st) || !S_ISREG(st.st_mode)) {
free(p);
/* ok, it looks sane as far as we can check without
* actually mapping the pack file.
*/
- p->index_version = 0;
- p->index_data = NULL;
- p->index_size = 0;
- p->num_objects = 0;
p->pack_size = st.st_size;
- p->next = NULL;
- p->windows = NULL;
- p->pack_fd = -1;
p->pack_local = local;
p->mtime = st.st_mtime;
if (path_len < 40 || get_sha1_hex(path + path_len - 40, p->sha1))
{
const char *idx_path = sha1_pack_index_name(sha1);
const char *path = sha1_pack_name(sha1);
- struct packed_git *p = xmalloc(sizeof(*p) + strlen(path) + 2);
+ struct packed_git *p = alloc_packed_git(strlen(path) + 1);
+ strcpy(p->pack_name, path);
+ hashcpy(p->sha1, sha1);
if (check_packed_git_idx(idx_path, p)) {
free(p);
return NULL;
}
- strcpy(p->pack_name, path);
- p->pack_size = 0;
- p->next = NULL;
- p->windows = NULL;
- p->pack_fd = -1;
- hashcpy(p->sha1, sha1);
return p;
}
void reprepare_packed_git(void)
{
+ discard_revindex();
prepare_packed_git_run_once = 0;
prepare_packed_git();
}
+static void mark_bad_packed_object(struct packed_git *p,
+ const unsigned char *sha1)
+{
+ unsigned i;
+ for (i = 0; i < p->num_bad_objects; i++)
+ if (!hashcmp(sha1, p->bad_object_sha1 + 20 * i))
+ return;
+ p->bad_object_sha1 = xrealloc(p->bad_object_sha1, 20 * (p->num_bad_objects + 1));
+ hashcpy(p->bad_object_sha1 + 20 * p->num_bad_objects, sha1);
+ p->num_bad_objects++;
+}
+
+static int has_packed_and_bad(const unsigned char *sha1)
+{
+ struct packed_git *p;
+ unsigned i;
+
+ for (p = packed_git; p; p = p->next)
+ for (i = 0; i < p->num_bad_objects; i++)
+ if (!hashcmp(sha1, p->bad_object_sha1 + 20 * i))
+ return 1;
+ return 0;
+}
+
int check_sha1_signature(const unsigned char *sha1, void *map, unsigned long size, const char *type)
{
unsigned char real_sha1[20];
return 0;
}
-unsigned long unpack_object_header_gently(const unsigned char *buf, unsigned long len, enum object_type *type, unsigned long *sizep)
+unsigned long unpack_object_header_buffer(const unsigned char *buf,
+ unsigned long len, enum object_type *type, unsigned long *sizep)
{
unsigned shift;
unsigned char c;
size = c & 15;
shift = 4;
while (c & 0x80) {
- if (len <= used)
- return 0;
- if (sizeof(long) * 8 <= shift)
+ if (len <= used || sizeof(long) * 8 <= shift) {
+ error("bad object header");
return 0;
+ }
c = buf[used++];
size += (c & 0x7f) << shift;
shift += 7;
stream->avail_out = bufsiz;
if (legacy_loose_object(map)) {
- inflateInit(stream);
- return inflate(stream, 0);
+ git_inflate_init(stream);
+ return git_inflate(stream, 0);
}
* really worth it and we don't write it any longer. But we
* can still read it.
*/
- used = unpack_object_header_gently(map, mapsize, &type, &size);
+ used = unpack_object_header_buffer(map, mapsize, &type, &size);
if (!used || !valid_loose_object_type[type])
return -1;
map += used;
/* Set up the stream for the rest.. */
stream->next_in = map;
stream->avail_in = mapsize;
- inflateInit(stream);
+ git_inflate_init(stream);
/* And generate the fake traditional header */
stream->total_out = 1 + snprintf(buffer, bufsiz, "%s %lu",
stream->next_out = buf + bytes;
stream->avail_out = size - bytes;
while (status == Z_OK)
- status = inflate(stream, Z_FINISH);
+ status = git_inflate(stream, Z_FINISH);
}
buf[size] = 0;
if (status == Z_STREAM_END && !stream->avail_in) {
- inflateEnd(stream);
+ git_inflate_end(stream);
return buf;
}
stream.next_out = delta_head;
stream.avail_out = sizeof(delta_head);
- inflateInit(&stream);
+ git_inflate_init(&stream);
do {
in = use_pack(p, w_curs, curpos, &stream.avail_in);
stream.next_in = in;
- st = inflate(&stream, Z_FINISH);
+ st = git_inflate(&stream, Z_FINISH);
curpos += stream.next_in - in;
} while ((st == Z_OK || st == Z_BUF_ERROR) &&
stream.total_out < sizeof(delta_head));
- inflateEnd(&stream);
- if ((st != Z_STREAM_END) && stream.total_out != sizeof(delta_head))
- die("delta data unpack-initial failed");
+ git_inflate_end(&stream);
+ if ((st != Z_STREAM_END) && stream.total_out != sizeof(delta_head)) {
+ error("delta data unpack-initial failed");
+ return 0;
+ }
/* Examine the initial part of the delta to figure out
* the result size.
while (c & 128) {
base_offset += 1;
if (!base_offset || MSB(base_offset, 7))
- die("offset value overflow for delta base object");
+ return 0; /* overflow */
c = base_info[used++];
base_offset = (base_offset << 7) + (c & 127);
}
base_offset = delta_obj_offset - base_offset;
- if (base_offset >= delta_obj_offset)
- die("delta base offset out of bound");
+ if (base_offset <= 0 || base_offset >= delta_obj_offset)
+ return 0; /* out of bound */
*curpos += used;
} else if (type == OBJ_REF_DELTA) {
/* The base entry _must_ be in the same pack */
base_offset = find_pack_entry_one(base_info, p);
- if (!base_offset)
- die("failed to find delta-pack base object %s",
- sha1_to_hex(base_info));
*curpos += 20;
} else
die("I am totally screwed");
off_t base_offset;
base_offset = get_delta_base(p, w_curs, &curpos, type, obj_offset);
+ if (!base_offset)
+ return OBJ_BAD;
type = packed_object_info(p, base_offset, NULL);
+ if (type <= OBJ_NONE) {
+ struct revindex_entry *revidx;
+ const unsigned char *base_sha1;
+ revidx = find_pack_revindex(p, base_offset);
+ if (!revidx)
+ return OBJ_BAD;
+ base_sha1 = nth_packed_object_sha1(p, revidx->nr);
+ mark_bad_packed_object(p, base_sha1);
+ type = sha1_object_info(base_sha1, NULL);
+ if (type <= OBJ_NONE)
+ return OBJ_BAD;
+ }
/* We choose to only get the type of the base object and
* ignore potentially corrupt pack file that expects the delta
* based on a base with a wrong size. This saves tons of
* inflate() calls.
*/
- if (sizep)
+ if (sizep) {
*sizep = get_size_from_delta(p, w_curs, curpos);
+ if (*sizep == 0)
+ type = OBJ_BAD;
+ }
return type;
}
* insane, so we know won't exceed what we have been given.
*/
base = use_pack(p, w_curs, *curpos, &left);
- used = unpack_object_header_gently(base, left, &type, sizep);
- if (!used)
- die("object offset outside of pack file");
- *curpos += used;
+ used = unpack_object_header_buffer(base, left, &type, sizep);
+ if (!used) {
+ type = OBJ_BAD;
+ } else
+ *curpos += used;
return type;
}
return typename(type);
case OBJ_OFS_DELTA:
obj_offset = get_delta_base(p, &w_curs, &curpos, type, obj_offset);
+ if (!obj_offset)
+ die("pack %s contains bad delta base reference of type %s",
+ p->pack_name, typename(type));
if (*delta_chain_length == 0) {
revidx = find_pack_revindex(p, obj_offset);
hashcpy(base_sha1, nth_packed_object_sha1(p, revidx->nr));
*sizep = size;
break;
default:
- die("pack %s contains unknown object type %d",
- p->pack_name, type);
+ error("unknown object type %i at offset %"PRIuMAX" in %s",
+ type, (uintmax_t)obj_offset, p->pack_name);
+ type = OBJ_BAD;
}
unuse_pack(&w_curs);
return type;
stream.next_out = buffer;
stream.avail_out = size;
- inflateInit(&stream);
+ git_inflate_init(&stream);
do {
in = use_pack(p, w_curs, curpos, &stream.avail_in);
stream.next_in = in;
- st = inflate(&stream, Z_FINISH);
+ st = git_inflate(&stream, Z_FINISH);
curpos += stream.next_in - in;
} while (st == Z_OK || st == Z_BUF_ERROR);
- inflateEnd(&stream);
+ git_inflate_end(&stream);
if ((st != Z_STREAM_END) || stream.total_out != size) {
free(buffer);
return NULL;
struct delta_base_cache_entry *ent = delta_base_cache + hash;
ret = ent->data;
- if (ret && ent->p == p && ent->base_offset == base_offset)
- goto found_cache_entry;
- return unpack_entry(p, base_offset, type, base_size);
+ if (!ret || ent->p != p || ent->base_offset != base_offset)
+ return unpack_entry(p, base_offset, type, base_size);
-found_cache_entry:
if (!keep_cache) {
ent->data = NULL;
ent->lru.next->prev = ent->lru.prev;
}
}
+void clear_delta_base_cache(void)
+{
+ unsigned long p;
+ for (p = 0; p < MAX_DELTA_CACHE; p++)
+ release_delta_base_cache(&delta_base_cache[p]);
+}
+
static void add_delta_base_cache(struct packed_git *p, off_t base_offset,
void *base, unsigned long base_size, enum object_type type)
{
off_t base_offset;
base_offset = get_delta_base(p, w_curs, &curpos, *type, obj_offset);
+ if (!base_offset) {
+ error("failed to validate delta base reference "
+ "at offset %"PRIuMAX" from %s",
+ (uintmax_t)curpos, p->pack_name);
+ return NULL;
+ }
unuse_pack(w_curs);
base = cache_or_unpack_entry(p, base_offset, &base_size, type, 0);
- if (!base)
- die("failed to read delta base object"
- " at %"PRIuMAX" from %s",
- (uintmax_t)base_offset, p->pack_name);
+ if (!base) {
+ /*
+ * We're probably in deep shit, but let's try to fetch
+ * the required base anyway from another pack or loose.
+ * This is costly but should happen only in the presence
+ * of a corrupted pack, and is better than failing outright.
+ */
+ struct revindex_entry *revidx;
+ const unsigned char *base_sha1;
+ revidx = find_pack_revindex(p, base_offset);
+ if (!revidx)
+ return NULL;
+ base_sha1 = nth_packed_object_sha1(p, revidx->nr);
+ error("failed to read delta base object %s"
+ " at offset %"PRIuMAX" from %s",
+ sha1_to_hex(base_sha1), (uintmax_t)base_offset,
+ p->pack_name);
+ mark_bad_packed_object(p, base_sha1);
+ base = read_object(base_sha1, type, &base_size);
+ if (!base)
+ return NULL;
+ }
delta_data = unpack_compressed_entry(p, w_curs, curpos, delta_size);
- if (!delta_data)
- die("failed to unpack compressed delta"
- " at %"PRIuMAX" from %s",
- (uintmax_t)curpos, p->pack_name);
+ if (!delta_data) {
+ error("failed to unpack compressed delta "
+ "at offset %"PRIuMAX" from %s",
+ (uintmax_t)curpos, p->pack_name);
+ free(base);
+ return NULL;
+ }
result = patch_delta(base, base_size,
delta_data, delta_size,
sizep);
return result;
}
+int do_check_packed_object_crc;
+
void *unpack_entry(struct packed_git *p, off_t obj_offset,
enum object_type *type, unsigned long *sizep)
{
off_t curpos = obj_offset;
void *data;
+ if (do_check_packed_object_crc && p->index_version > 1) {
+ struct revindex_entry *revidx = find_pack_revindex(p, obj_offset);
+ unsigned long len = revidx[1].offset - obj_offset;
+ if (check_pack_crc(p, &w_curs, obj_offset, len, revidx->nr)) {
+ const unsigned char *sha1 =
+ nth_packed_object_sha1(p, revidx->nr);
+ error("bad packed object CRC for %s",
+ sha1_to_hex(sha1));
+ mark_bad_packed_object(p, sha1);
+ unuse_pack(&w_curs);
+ return NULL;
+ }
+ }
+
*type = unpack_object_header(p, &w_curs, &curpos, sizep);
switch (*type) {
case OBJ_OFS_DELTA:
data = unpack_compressed_entry(p, &w_curs, curpos, *sizep);
break;
default:
- die("unknown object type %i in %s", *type, p->pack_name);
+ data = NULL;
+ error("unknown object type %i at offset %"PRIuMAX" in %s",
+ *type, (uintmax_t)obj_offset, p->pack_name);
}
unuse_pack(&w_curs);
return data;
}
}
-static off_t nth_packed_object_offset(const struct packed_git *p, uint32_t n)
+off_t nth_packed_object_offset(const struct packed_git *p, uint32_t n)
{
const unsigned char *index = p->index_data;
index += 4 * 256;
}
if (debug_lookup)
- printf("%02x%02x%02x... lo %u hi %u nr %u\n",
+ printf("%02x%02x%02x... lo %u hi %u nr %"PRIu32"\n",
sha1[0], sha1[1], sha1[2], lo, hi, p->num_objects);
if (use_lookup < 0)
return 0;
}
-int matches_pack_name(struct packed_git *p, const char *name)
-{
- const char *last_c, *c;
-
- if (!strcmp(p->pack_name, name))
- return 1;
-
- for (c = p->pack_name, last_c = c; *c;)
- if (*c == '/')
- last_c = ++c;
- else
- ++c;
- if (!strcmp(last_c, name))
- return 1;
-
- return 0;
-}
-
-static int find_pack_entry(const unsigned char *sha1, struct pack_entry *e, const char **ignore_packed)
+static int find_pack_entry(const unsigned char *sha1, struct pack_entry *e)
{
static struct packed_git *last_found = (void *)1;
struct packed_git *p;
p = (last_found == (void *)1) ? packed_git : last_found;
do {
- if (ignore_packed) {
- const char **ig;
- for (ig = ignore_packed; *ig; ig++)
- if (matches_pack_name(p, *ig))
- break;
- if (*ig)
- goto next;
+ if (p->num_bad_objects) {
+ unsigned i;
+ for (i = 0; i < p->num_bad_objects; i++)
+ if (!hashcmp(sha1, p->bad_object_sha1 + 20 * i))
+ goto next;
}
offset = find_pack_entry_one(sha1, p);
status = error("unable to parse %s header", sha1_to_hex(sha1));
else if (sizep)
*sizep = size;
- inflateEnd(&stream);
+ git_inflate_end(&stream);
munmap(map, mapsize);
return status;
}
int sha1_object_info(const unsigned char *sha1, unsigned long *sizep)
{
struct pack_entry e;
+ int status;
- if (!find_pack_entry(sha1, &e, NULL)) {
+ if (!find_pack_entry(sha1, &e)) {
+ /* Most likely it's a loose object. */
+ status = sha1_loose_object_info(sha1, sizep);
+ if (status >= 0)
+ return status;
+
+ /* Not a loose object; someone else may have just packed it. */
reprepare_packed_git();
- if (!find_pack_entry(sha1, &e, NULL))
- return sha1_loose_object_info(sha1, sizep);
+ if (!find_pack_entry(sha1, &e))
+ return status;
+ }
+
+ status = packed_object_info(e.p, e.offset, sizep);
+ if (status < 0) {
+ mark_bad_packed_object(e.p, sha1);
+ status = sha1_object_info(sha1, sizep);
}
- return packed_object_info(e.p, e.offset, sizep);
+
+ return status;
}
static void *read_packed_sha1(const unsigned char *sha1,
enum object_type *type, unsigned long *size)
{
struct pack_entry e;
+ void *data;
- if (!find_pack_entry(sha1, &e, NULL))
+ if (!find_pack_entry(sha1, &e))
return NULL;
- else
- return cache_or_unpack_entry(e.p, e.offset, size, type, 1);
+ data = cache_or_unpack_entry(e.p, e.offset, size, type, 1);
+ if (!data) {
+ /*
+ * We're probably in deep shit, but let's try to fetch
+ * the required object anyway from another pack or loose.
+ * This should happen only in the presence of a corrupted
+ * pack, and is better than failing outright.
+ */
+ error("failed to read object %s at offset %"PRIuMAX" from %s",
+ sha1_to_hex(sha1), (uintmax_t)e.offset, e.p->pack_name);
+ mark_bad_packed_object(e.p, sha1);
+ data = read_object(sha1, type, size);
+ }
+ return data;
}
/*
static int cached_object_nr, cached_object_alloc;
static struct cached_object empty_tree = {
- /* empty tree sha1: 4b825dc642cb6eb9a060e54bf8d69288fbee4904 */
- "\x4b\x82\x5d\xc6\x42\xcb\x6e\xb9\xa0\x60"
- "\xe5\x4b\xf8\xd6\x92\x88\xfb\xee\x49\x04",
+ EMPTY_TREE_SHA1_BIN,
OBJ_TREE,
"",
0
return 0;
}
-void *read_sha1_file(const unsigned char *sha1, enum object_type *type,
- unsigned long *size)
+void *read_object(const unsigned char *sha1, enum object_type *type,
+ unsigned long *size)
{
unsigned long mapsize;
void *map, *buf;
return read_packed_sha1(sha1, type, size);
}
+void *read_sha1_file(const unsigned char *sha1, enum object_type *type,
+ unsigned long *size)
+{
+ void *data = read_object(sha1, type, size);
+ /* legacy behavior is to die on corrupted objects */
+ if (!data && (has_loose_object(sha1) || has_packed_and_bad(sha1)))
+ die("object %s is corrupted", sha1_to_hex(sha1));
+ return data;
+}
+
void *read_object_with_reference(const unsigned char *sha1,
const char *required_type_name,
unsigned long *size,
const char *type, unsigned char *sha1,
char *hdr, int *hdrlen)
{
- SHA_CTX c;
+ git_SHA_CTX c;
/* Generate the header */
*hdrlen = sprintf(hdr, "%s %lu", type, len)+1;
/* Sha1.. */
- SHA1_Init(&c);
- SHA1_Update(&c, hdr, *hdrlen);
- SHA1_Update(&c, buf, len);
- SHA1_Final(sha1, &c);
+ git_SHA1_Init(&c);
+ git_SHA1_Update(&c, hdr, *hdrlen);
+ git_SHA1_Update(&c, buf, len);
+ git_SHA1_Final(sha1, &c);
}
/*
*/
int move_temp_to_file(const char *tmpfile, const char *filename)
{
- int ret = link(tmpfile, filename);
+ int ret = 0;
+ if (link(tmpfile, filename))
+ ret = errno;
/*
* Coda hack - coda doesn't like cross-directory links,
fsync_or_die(fd, "sha1 file");
fchmod(fd, 0444);
if (close(fd) != 0)
- die("unable to write sha1 file");
+ die("error when closing sha1 file (%s)", strerror(errno));
}
/* Size of directory component, including the ending '/' */
memcpy(buffer, filename, dirlen);
strcpy(buffer + dirlen, "tmp_obj_XXXXXX");
fd = mkstemp(buffer);
- if (fd < 0 && dirlen) {
+ if (fd < 0 && dirlen && errno == ENOENT) {
/* Make sure the directory exists */
memcpy(buffer, filename, dirlen);
buffer[dirlen-1] = 0;
static int write_loose_object(const unsigned char *sha1, char *hdr, int hdrlen,
void *buf, unsigned long len, time_t mtime)
{
- int fd, size, ret;
+ int fd, ret;
+ size_t size;
unsigned char *compressed;
z_stream stream;
char *filename;
filename = sha1_file_name(sha1);
fd = create_tmpfile(tmpfile, sizeof(tmpfile), filename);
if (fd < 0) {
- if (errno == EPERM)
+ if (errno == EACCES)
return error("insufficient permission for adding an object to repository database %s\n", get_object_directory());
else
return error("unable to create temporary sha1 filename %s: %s\n", tmpfile, strerror(errno));
enum object_type type;
char hdr[32];
int hdrlen;
+ int ret;
if (has_loose_object(sha1))
return 0;
if (!buf)
return error("cannot read sha1_file for %s", sha1_to_hex(sha1));
hdrlen = sprintf(hdr, "%s %lu", typename(type), len) + 1;
- return write_loose_object(sha1, hdr, hdrlen, buf, len, mtime);
+ ret = write_loose_object(sha1, hdr, hdrlen, buf, len, mtime);
+ free(buf);
+
+ return ret;
}
int has_pack_index(const unsigned char *sha1)
return 1;
}
-int has_sha1_pack(const unsigned char *sha1, const char **ignore_packed)
+int has_sha1_pack(const unsigned char *sha1)
{
struct pack_entry e;
- return find_pack_entry(sha1, &e, ignore_packed);
+ return find_pack_entry(sha1, &e);
}
int has_sha1_file(const unsigned char *sha1)
{
struct pack_entry e;
- if (find_pack_entry(sha1, &e, NULL))
+ if (find_pack_entry(sha1, &e))
return 1;
return has_loose_object(sha1);
}
-int index_pipe(unsigned char *sha1, int fd, const char *type, int write_object)
+static int index_mem(unsigned char *sha1, void *buf, size_t size,
+ int write_object, enum object_type type, const char *path)
{
- struct strbuf buf;
- int ret;
-
- strbuf_init(&buf, 0);
- if (strbuf_read(&buf, fd, 4096) < 0) {
- strbuf_release(&buf);
- return -1;
- }
-
- if (!type)
- type = blob_type;
- if (write_object)
- ret = write_sha1_file(buf.buf, buf.len, type, sha1);
- else
- ret = hash_sha1_file(buf.buf, buf.len, type, sha1);
- strbuf_release(&buf);
-
- return ret;
-}
-
-int index_fd(unsigned char *sha1, int fd, struct stat *st, int write_object,
- enum object_type type, const char *path)
-{
- size_t size = xsize_t(st->st_size);
- void *buf = NULL;
int ret, re_allocated = 0;
- if (size)
- buf = xmmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0);
- close(fd);
-
if (!type)
type = OBJ_BLOB;
/*
* Convert blobs to git internal format
*/
- if ((type == OBJ_BLOB) && S_ISREG(st->st_mode)) {
- struct strbuf nbuf;
- strbuf_init(&nbuf, 0);
+ if ((type == OBJ_BLOB) && path) {
+ struct strbuf nbuf = STRBUF_INIT;
if (convert_to_git(path, buf, size, &nbuf,
write_object ? safe_crlf : 0)) {
- munmap(buf, size);
buf = strbuf_detach(&nbuf, &size);
re_allocated = 1;
}
ret = write_sha1_file(buf, size, typename(type), sha1);
else
ret = hash_sha1_file(buf, size, typename(type), sha1);
- if (re_allocated) {
+ if (re_allocated)
free(buf);
- return ret;
- }
- if (size)
+ return ret;
+}
+
+int index_fd(unsigned char *sha1, int fd, struct stat *st, int write_object,
+ enum object_type type, const char *path)
+{
+ int ret;
+ size_t size = xsize_t(st->st_size);
+
+ if (!S_ISREG(st->st_mode)) {
+ struct strbuf sbuf = STRBUF_INIT;
+ if (strbuf_read(&sbuf, fd, 4096) >= 0)
+ ret = index_mem(sha1, sbuf.buf, sbuf.len, write_object,
+ type, path);
+ else
+ ret = -1;
+ strbuf_release(&sbuf);
+ } else if (size) {
+ void *buf = xmmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0);
+ ret = index_mem(sha1, buf, size, write_object, type, path);
munmap(buf, size);
+ } else
+ ret = index_mem(sha1, NULL, size, write_object, type, path);
+ close(fd);
return ret;
}
int index_path(unsigned char *sha1, const char *path, struct stat *st, int write_object)
{
int fd;
- char *target;
- size_t len;
+ struct strbuf sb = STRBUF_INIT;
switch (st->st_mode & S_IFMT) {
case S_IFREG:
path);
break;
case S_IFLNK:
- len = xsize_t(st->st_size);
- target = xmalloc(len + 1);
- if (readlink(path, target, len + 1) != st->st_size) {
+ if (strbuf_readlink(&sb, path, st->st_size)) {
char *errstr = strerror(errno);
- free(target);
return error("readlink(\"%s\"): %s", path,
errstr);
}
if (!write_object)
- hash_sha1_file(target, len, blob_type, sha1);
- else if (write_sha1_file(target, len, blob_type, sha1))
+ hash_sha1_file(sb.buf, sb.len, blob_type, sha1);
+ else if (write_sha1_file(sb.buf, sb.len, blob_type, sha1))
return error("%s: failed to insert into database",
path);
- free(target);
+ strbuf_release(&sb);
break;
case S_IFDIR:
return resolve_gitlink_ref(path, "HEAD", sha1);