static unsigned int pack_open_windows;
static size_t peak_pack_mapped;
static size_t pack_mapped;
-static size_t page_size;
struct packed_git *packed_git;
void pack_report()
"pack_report: getpagesize() = %10" SZ_FMT "\n"
"pack_report: core.packedGitWindowSize = %10" SZ_FMT "\n"
"pack_report: core.packedGitLimit = %10" SZ_FMT "\n",
- page_size,
+ (size_t) getpagesize(),
packed_git_window_size,
packed_git_limit);
fprintf(stderr,
void **idx_map_)
{
void *idx_map;
- unsigned int *index;
+ uint32_t *index;
unsigned long idx_size;
int nr, i;
int fd = open(path, O_RDONLY);
}
}
-static void open_packed_git(struct packed_git *p)
+/*
+ * Do not call this directly as this leaks p->pack_fd on error return;
+ * call open_packed_git() instead.
+ */
+static int open_packed_git_1(struct packed_git *p)
{
struct stat st;
struct pack_header hdr;
p->pack_fd = open(p->pack_name, O_RDONLY);
if (p->pack_fd < 0 || fstat(p->pack_fd, &st))
- die("packfile %s cannot be opened", p->pack_name);
+ return -1;
/* If we created the struct before we had the pack we lack size. */
if (!p->pack_size) {
if (!S_ISREG(st.st_mode))
- die("packfile %s not a regular file", p->pack_name);
+ return error("packfile %s not a regular file", p->pack_name);
p->pack_size = st.st_size;
} else if (p->pack_size != st.st_size)
- die("packfile %s size changed", p->pack_name);
+ return error("packfile %s size changed", p->pack_name);
/* We leave these file descriptors open with sliding mmap;
* there is no point keeping them open across exec(), though.
*/
fd_flag = fcntl(p->pack_fd, F_GETFD, 0);
if (fd_flag < 0)
- die("cannot determine file descriptor flags");
+ return error("cannot determine file descriptor flags");
fd_flag |= FD_CLOEXEC;
if (fcntl(p->pack_fd, F_SETFD, fd_flag) == -1)
- die("cannot set FD_CLOEXEC");
+ return error("cannot set FD_CLOEXEC");
/* Verify we recognize this pack file format. */
if (read_in_full(p->pack_fd, &hdr, sizeof(hdr)) != sizeof(hdr))
- die("file %s is far too short to be a packfile", p->pack_name);
+ return error("file %s is far too short to be a packfile", p->pack_name);
if (hdr.hdr_signature != htonl(PACK_SIGNATURE))
- die("file %s is not a GIT packfile", p->pack_name);
+ return error("file %s is not a GIT packfile", p->pack_name);
if (!pack_version_ok(hdr.hdr_version))
- die("packfile %s is version %u and not supported"
+ return error("packfile %s is version %u and not supported"
" (try upgrading GIT to a newer version)",
p->pack_name, ntohl(hdr.hdr_version));
/* Verify the pack matches its index. */
if (num_packed_objects(p) != ntohl(hdr.hdr_entries))
- die("packfile %s claims to have %u objects"
+ return error("packfile %s claims to have %u objects"
" while index size indicates %u objects",
p->pack_name, ntohl(hdr.hdr_entries),
num_packed_objects(p));
if (lseek(p->pack_fd, p->pack_size - sizeof(sha1), SEEK_SET) == -1)
- die("end of packfile %s is unavailable", p->pack_name);
+ return error("end of packfile %s is unavailable", p->pack_name);
if (read_in_full(p->pack_fd, sha1, sizeof(sha1)) != sizeof(sha1))
- die("packfile %s signature is unavailable", p->pack_name);
+ return error("packfile %s signature is unavailable", p->pack_name);
idx_sha1 = ((unsigned char *)p->index_base) + p->index_size - 40;
if (hashcmp(sha1, idx_sha1))
- die("packfile %s does not match index", p->pack_name);
+ return error("packfile %s does not match index", p->pack_name);
+ return 0;
+}
+
+static int open_packed_git(struct packed_git *p)
+{
+ if (!open_packed_git_1(p))
+ return 0;
+ if (p->pack_fd != -1) {
+ close(p->pack_fd);
+ p->pack_fd = -1;
+ }
+ return -1;
}
static int in_window(struct pack_window *win, unsigned long offset)
{
struct pack_window *win = *w_cursor;
- if (p->pack_fd == -1)
- open_packed_git(p);
+ if (p->pack_fd == -1 && open_packed_git(p))
+ die("packfile %s cannot be accessed", p->pack_name);
/* Since packfiles end in a hash of their content and its
* pointless to ask for an offset into the middle of that
break;
}
if (!win) {
- if (!page_size)
- page_size = getpagesize();
+ size_t window_align = packed_git_window_size / 2;
win = xcalloc(1, sizeof(*win));
- win->offset = (offset / page_size) * page_size;
+ win->offset = (offset / window_align) * window_align;
win->len = p->pack_size - win->offset;
if (win->len > packed_git_window_size)
win->len = packed_git_window_size;
if (!has_extension(de->d_name, ".idx"))
continue;
- /* we have .idx. Is it a file we can map? */
+ /* Don't reopen a pack we already have. */
strcpy(path + len, de->d_name);
for (p = packed_git; p; p = p->next) {
if (!memcmp(path, p->pack_name, len + namelen - 4))
}
if (p)
continue;
+ /* See if it really is a valid .idx file with corresponding
+ * .pack file that we can map.
+ */
p = add_packed_git(path, len + namelen, local);
if (!p)
continue;
- p->next = packed_git;
- packed_git = p;
+ install_packed_git(p);
}
closedir(dir);
}
/* use_pack() assures us we have [base, base + 20) available
* as a range that we can look at at. (Its actually the hash
- * size that is assurred.) With our object header encoding
+ * size that is assured.) With our object header encoding
* the maximum deflated object size is 2^137, which is just
* insane, so we know won't exceed what we have been given.
*/
unsigned long find_pack_entry_one(const unsigned char *sha1,
struct packed_git *p)
{
- unsigned int *level1_ofs = p->index_base;
+ uint32_t *level1_ofs = p->index_base;
int hi = ntohl(level1_ofs[*sha1]);
int lo = ((*sha1 == 0x0) ? 0 : ntohl(level1_ofs[*sha1 - 1]));
void *index = p->index_base + 256;
int mi = (lo + hi) / 2;
int cmp = hashcmp((unsigned char *)index + (24 * mi) + 4, sha1);
if (!cmp)
- return ntohl(*((unsigned int *) ((char *) index + (24 * mi))));
+ return ntohl(*((uint32_t *)((char *)index + (24 * mi))));
if (cmp > 0)
hi = mi;
else
}
offset = find_pack_entry_one(sha1, p);
if (offset) {
+ /*
+ * We are about to tell the caller where they can
+ * locate the requested object. We better make
+ * sure the packfile is still here and can be
+ * accessed before supplying that answer, as
+ * it may have been deleted since the index
+ * was loaded!
+ */
+ if (p->pack_fd == -1 && open_packed_git(p)) {
+ error("packfile %s cannot be accessed", p->pack_name);
+ continue;
+ }
e->offset = offset;
e->p = p;
hashcpy(e->sha1, sha1);
{
struct pack_entry e;
- if (!find_pack_entry(sha1, &e, NULL)) {
- error("cannot read sha1_file for %s", sha1_to_hex(sha1));
+ if (!find_pack_entry(sha1, &e, NULL))
return NULL;
+ else
+ return unpack_entry(e.p, e.offset, type, size);
+}
+
+/*
+ * This is meant to hold a *small* number of objects that you would
+ * want read_sha1_file() to be able to return, but yet you do not want
+ * to write them into the object store (e.g. a browse-only
+ * application).
+ */
+static struct cached_object {
+ unsigned char sha1[20];
+ const char *type;
+ void *buf;
+ unsigned long size;
+} *cached_objects;
+static int cached_object_nr, cached_object_alloc;
+
+static struct cached_object *find_cached_object(const unsigned char *sha1)
+{
+ int i;
+ struct cached_object *co = cached_objects;
+
+ for (i = 0; i < cached_object_nr; i++, co++) {
+ if (!hashcmp(co->sha1, sha1))
+ return co;
+ }
+ return NULL;
+}
+
+int pretend_sha1_file(void *buf, unsigned long len, const char *type, unsigned char *sha1)
+{
+ struct cached_object *co;
+
+ hash_sha1_file(buf, len, type, sha1);
+ if (has_sha1_file(sha1) || find_cached_object(sha1))
+ return 0;
+ if (cached_object_alloc <= cached_object_nr) {
+ cached_object_alloc = alloc_nr(cached_object_alloc);
+ cached_objects = xrealloc(cached_objects,
+ sizeof(*cached_objects) *
+ cached_object_alloc);
}
- return unpack_entry(e.p, e.offset, type, size);
+ co = &cached_objects[cached_object_nr++];
+ co->size = len;
+ co->type = strdup(type);
+ co->buf = xmalloc(len);
+ memcpy(co->buf, buf, len);
+ hashcpy(co->sha1, sha1);
+ return 0;
}
-void * read_sha1_file(const unsigned char *sha1, char *type, unsigned long *size)
+void *read_sha1_file(const unsigned char *sha1, char *type, unsigned long *size)
{
unsigned long mapsize;
void *map, *buf;
- struct pack_entry e;
+ struct cached_object *co;
+
+ co = find_cached_object(sha1);
+ if (co) {
+ buf = xmalloc(co->size + 1);
+ memcpy(buf, co->buf, co->size);
+ ((char*)buf)[co->size] = 0;
+ strcpy(type, co->type);
+ *size = co->size;
+ return buf;
+ }
- if (find_pack_entry(sha1, &e, NULL))
- return read_packed_sha1(sha1, type, size);
+ buf = read_packed_sha1(sha1, type, size);
+ if (buf)
+ return buf;
map = map_sha1_file(sha1, &mapsize);
if (map) {
buf = unpack_sha1_file(map, mapsize, type, size);
return buf;
}
reprepare_packed_git();
- if (find_pack_entry(sha1, &e, NULL))
- return read_packed_sha1(sha1, type, size);
- return NULL;
+ return read_packed_sha1(sha1, type, size);
}
void *read_object_with_reference(const unsigned char *sha1,
/* need to unpack and recompress it by itself */
unpacked = read_packed_sha1(sha1, type, &len);
+ if (!unpacked)
+ error("cannot read sha1_file for %s", sha1_to_hex(sha1));
hdrlen = sprintf(hdr, "%s %lu", type, len) + 1;
if (!type)
type = blob_type;
+ /* FIXME: CRLF -> LF conversion here for blobs! We'll need the path! */
if (write_object)
ret = write_sha1_file(buf, size, type, sha1);
else
}
return 0;
}
+
+int read_pack_header(int fd, struct pack_header *header)
+{
+ char *c = (char*)header;
+ ssize_t remaining = sizeof(struct pack_header);
+ do {
+ ssize_t r = xread(fd, c, remaining);
+ if (r <= 0)
+ /* "eof before pack header was fully read" */
+ return PH_ERROR_EOF;
+ remaining -= r;
+ c += r;
+ } while (remaining > 0);
+ if (header->hdr_signature != htonl(PACK_SIGNATURE))
+ /* "protocol error (pack signature mismatch detected)" */
+ return PH_ERROR_PACK_SIGNATURE;
+ if (!pack_version_ok(header->hdr_version))
+ /* "protocol error (pack version unsupported)" */
+ return PH_ERROR_PROTOCOL;
+ return 0;
+}