return result;
}
-static void fill_sha1_path(char *pathbuf, const unsigned char *sha1)
+static void fill_sha1_path(struct strbuf *buf, const unsigned char *sha1)
{
int i;
for (i = 0; i < 20; i++) {
static char hex[] = "0123456789abcdef";
unsigned int val = sha1[i];
- char *pos = pathbuf + i*2 + (i > 0);
- *pos++ = hex[val >> 4];
- *pos = hex[val & 0xf];
+ strbuf_addch(buf, hex[val >> 4]);
+ strbuf_addch(buf, hex[val & 0xf]);
+ if (!i)
+ strbuf_addch(buf, '/');
}
}
const char *sha1_file_name(const unsigned char *sha1)
{
- static char buf[PATH_MAX];
- const char *objdir;
- int len;
+ static struct strbuf buf = STRBUF_INIT;
+
+ strbuf_reset(&buf);
+ strbuf_addf(&buf, "%s/", get_object_directory());
+
+ fill_sha1_path(&buf, sha1);
+ return buf.buf;
+}
- objdir = get_object_directory();
- len = strlen(objdir);
+struct strbuf *alt_scratch_buf(struct alternate_object_database *alt)
+{
+ strbuf_setlen(&alt->scratch, alt->base_len);
+ return &alt->scratch;
+}
- /* '/' + sha1(2) + '/' + sha1(38) + '\0' */
- if (len + 43 > PATH_MAX)
- die("insanely long object directory %s", objdir);
- memcpy(buf, objdir, len);
- buf[len] = '/';
- buf[len+3] = '/';
- buf[len+42] = '\0';
- fill_sha1_path(buf + len + 1, sha1);
- return buf;
+static const char *alt_sha1_path(struct alternate_object_database *alt,
+ const unsigned char *sha1)
+{
+ struct strbuf *buf = alt_scratch_buf(alt);
+ fill_sha1_path(buf, sha1);
+ return buf->buf;
}
/*
* thing twice, or object directory itself.
*/
for (alt = alt_odb_list; alt; alt = alt->next) {
- if (path->len == alt->name - alt->base - 1 &&
- !memcmp(path->buf, alt->base, path->len))
+ if (!fspathcmp(path->buf, alt->path))
return 0;
}
if (!fspathcmp(path->buf, normalized_objdir))
}
strbuf_addstr(&pathbuf, entry);
- if (strbuf_normalize_path(&pathbuf) < 0) {
+ if (strbuf_normalize_path(&pathbuf) < 0 && relative_base) {
error("unable to normalize alternate object path: %s",
pathbuf.buf);
strbuf_release(&pathbuf);
const char *entry = entries.items[i].string;
if (entry[0] == '\0' || entry[0] == '#')
continue;
- if (!is_absolute_path(entry) && depth) {
- error("%s: ignoring relative alternate object store %s",
- relative_base, entry);
- } else {
- link_alt_odb_entry(entry, relative_base, depth, objdirbuf.buf);
- }
+ link_alt_odb_entry(entry, relative_base, depth, objdirbuf.buf);
}
string_list_clear(&entries, 0);
free(alt_copy);
int fd;
path = xstrfmt("%s/info/alternates", relative_base);
- fd = git_open_noatime(path);
+ fd = git_open(path);
free(path);
if (fd < 0)
return;
struct alternate_object_database *alloc_alt_odb(const char *dir)
{
struct alternate_object_database *ent;
- size_t dirlen = strlen(dir);
- size_t entlen;
- entlen = st_add(dirlen, 43); /* '/' + 2 hex + '/' + 38 hex + NUL */
- ent = xmalloc(st_add(sizeof(*ent), entlen));
- memcpy(ent->base, dir, dirlen);
-
- ent->name = ent->base + dirlen + 1;
- ent->base[dirlen] = '/';
- ent->base[dirlen + 3] = '/';
- ent->base[entlen-1] = 0;
+ FLEX_ALLOC_STR(ent, path, dir);
+ strbuf_init(&ent->scratch, 0);
+ strbuf_addf(&ent->scratch, "%s/", dir);
+ ent->base_len = ent->scratch.len;
return ent;
}
struct alternate_object_database *alt;
prepare_alt_odb();
for (alt = alt_odb_list; alt; alt = alt->next) {
- fill_sha1_path(alt->name, sha1);
- if (check_and_freshen_file(alt->base, freshen))
+ const char *path = alt_sha1_path(alt, sha1);
+ if (check_and_freshen_file(path, freshen))
return 1;
}
return 0;
struct pack_idx_header *hdr;
size_t idx_size;
uint32_t version, nr, i, *index;
- int fd = git_open_noatime(path);
+ int fd = git_open(path);
struct stat st;
if (fd < 0)
while (pack_max_fds <= pack_open_fds && close_one_pack())
; /* nothing */
- p->pack_fd = git_open_noatime(p->pack_name);
+ p->pack_fd = git_open(p->pack_name);
if (p->pack_fd < 0 || fstat(p->pack_fd, &st))
return -1;
pack_open_fds++;
strbuf_release(&path);
}
+static int approximate_object_count_valid;
+
+/*
+ * Give a fast, rough count of the number of objects in the repository. This
+ * ignores loose objects completely. If you have a lot of them, then either
+ * you should repack because your performance will be awful, or they are
+ * all unreachable objects about to be pruned, in which case they're not really
+ * interesting as a measure of repo size in the first place.
+ */
+unsigned long approximate_object_count(void)
+{
+ static unsigned long count;
+ if (!approximate_object_count_valid) {
+ struct packed_git *p;
+
+ prepare_packed_git();
+ count = 0;
+ for (p = packed_git; p; p = p->next) {
+ if (open_pack_index(p))
+ continue;
+ count += p->num_objects;
+ }
+ }
+ return count;
+}
+
static void *get_next_packed_git(const void *p)
{
return ((const struct packed_git *)p)->next;
return;
prepare_packed_git_one(get_object_directory(), 1);
prepare_alt_odb();
- for (alt = alt_odb_list; alt; alt = alt->next) {
- alt->name[-1] = 0;
- prepare_packed_git_one(alt->base, 0);
- alt->name[-1] = '/';
- }
+ for (alt = alt_odb_list; alt; alt = alt->next)
+ prepare_packed_git_one(alt->path, 0);
rearrange_packed_git();
prepare_packed_git_mru();
prepare_packed_git_run_once = 1;
void reprepare_packed_git(void)
{
+ approximate_object_count_valid = 0;
prepare_packed_git_run_once = 0;
prepare_packed_git();
}
return hashcmp(sha1, real_sha1) ? -1 : 0;
}
-int git_open_noatime(const char *name)
+int git_open(const char *name)
{
- static int sha1_file_open_flag = O_NOATIME;
+ static int sha1_file_open_flag = O_NOATIME | O_CLOEXEC;
for (;;) {
int fd;
if (fd >= 0)
return fd;
- /* Might the failure be due to O_NOATIME? */
- if (errno != ENOENT && sha1_file_open_flag) {
- sha1_file_open_flag = 0;
+ /* Try again w/o O_CLOEXEC: the kernel might not support it */
+ if ((sha1_file_open_flag & O_CLOEXEC) && errno == EINVAL) {
+ sha1_file_open_flag &= ~O_CLOEXEC;
continue;
}
+ /* Might the failure be due to O_NOATIME? */
+ if (errno != ENOENT && (sha1_file_open_flag & O_NOATIME)) {
+ sha1_file_open_flag &= ~O_NOATIME;
+ continue;
+ }
return -1;
}
}
prepare_alt_odb();
errno = ENOENT;
for (alt = alt_odb_list; alt; alt = alt->next) {
- fill_sha1_path(alt->name, sha1);
- if (!lstat(alt->base, st))
+ const char *path = alt_sha1_path(alt, sha1);
+ if (!lstat(path, st))
return 0;
}
struct alternate_object_database *alt;
int most_interesting_errno;
- fd = git_open_noatime(sha1_file_name(sha1));
+ fd = git_open(sha1_file_name(sha1));
if (fd >= 0)
return fd;
most_interesting_errno = errno;
prepare_alt_odb();
for (alt = alt_odb_list; alt; alt = alt->next) {
- fill_sha1_path(alt->name, sha1);
- fd = git_open_noatime(alt->base);
+ const char *path = alt_sha1_path(alt, sha1);
+ fd = git_open(path);
if (fd >= 0)
return fd;
if (most_interesting_errno == ENOENT)
int parse_sha1_header(const char *hdr, unsigned long *sizep)
{
- struct object_info oi;
+ struct object_info oi = OBJECT_INFO_INIT;
oi.sizep = sizep;
- oi.typename = NULL;
- oi.typep = NULL;
return parse_sha1_header_extended(hdr, &oi, LOOKUP_REPLACE_OBJECT);
}
goto out;
}
-static int packed_object_info(struct packed_git *p, off_t obj_offset,
- struct object_info *oi)
+int packed_object_info(struct packed_git *p, off_t obj_offset,
+ struct object_info *oi)
{
struct pack_window *w_curs = NULL;
unsigned long size;
int sha1_object_info(const unsigned char *sha1, unsigned long *sizep)
{
enum object_type type;
- struct object_info oi = {NULL};
+ struct object_info oi = OBJECT_INFO_INIT;
oi.typep = &type;
oi.sizep = sizep;
return has_sha1_file(oid->hash);
}
+int has_object_file_with_flags(const struct object_id *oid, int flags)
+{
+ return has_sha1_file_with_flags(oid->hash, flags);
+}
+
static void check_tree(const void *buf, size_t size)
{
struct tree_desc desc;
struct strbuf buf = STRBUF_INIT;
int r;
- /* copy base not including trailing '/' */
- strbuf_add(&buf, alt->base, alt->name - alt->base - 1);
+ strbuf_addstr(&buf, alt->path);
r = for_each_loose_file_in_objdir_buf(&buf,
data->cb, NULL, NULL,
data->data);