#include "list-objects.h"
#include "progress.h"
#include "refs.h"
-
-#ifndef NO_PTHREADS
#include "thread-utils.h"
-#include <pthread.h>
-#endif
static const char pack_usage[] =
- "git pack-objects [{ -q | --progress | --all-progress }]\n"
+ "git pack-objects [ -q | --progress | --all-progress ]\n"
" [--all-progress-implied]\n"
- " [--max-pack-size=N] [--local] [--incremental]\n"
- " [--window=N] [--window-memory=N] [--depth=N]\n"
+ " [--max-pack-size=<n>] [--local] [--incremental]\n"
+ " [--window=<n>] [--window-memory=<n>] [--depth=<n>]\n"
" [--no-reuse-delta] [--no-reuse-object] [--delta-base-offset]\n"
- " [--threads=N] [--non-empty] [--revs [--unpacked | --all]*]\n"
+ " [--threads=<n>] [--non-empty] [--revs [--unpacked | --all]]\n"
" [--reflog] [--stdout | base-name] [--include-tag]\n"
- " [--keep-unreachable | --unpack-unreachable \n"
- " [<ref-list | <object-list]";
+ " [--keep-unreachable | --unpack-unreachable]\n"
+ " [< ref-list | < object-list]";
struct object_entry {
struct pack_idx_entry idx;
* objects against.
*/
unsigned char no_try_delta;
+ unsigned char tagged; /* near the very tip of refs */
+ unsigned char filled; /* assigned write-order */
};
/*
static int incremental;
static int ignore_packed_keep;
static int allow_ofs_delta;
+static struct pack_idx_option pack_idx_opts;
static const char *base_name;
static int progress = 1;
static int window = 10;
*/
static int *object_ix;
static int object_ix_hashsz;
+static struct object_entry *locate_object_entry(const unsigned char *sha1);
/*
* stats
static unsigned long do_compress(void **pptr, unsigned long size)
{
- z_stream stream;
+ git_zstream stream;
void *in, *out;
unsigned long maxsize;
memset(&stream, 0, sizeof(stream));
- deflateInit(&stream, pack_compression_level);
- maxsize = deflateBound(&stream, size);
+ git_deflate_init(&stream, pack_compression_level);
+ maxsize = git_deflate_bound(&stream, size);
in = *pptr;
out = xmalloc(maxsize);
stream.avail_in = size;
stream.next_out = out;
stream.avail_out = maxsize;
- while (deflate(&stream, Z_FINISH) == Z_OK)
+ while (git_deflate(&stream, Z_FINISH) == Z_OK)
; /* nothing */
- deflateEnd(&stream);
+ git_deflate_end(&stream);
free(in);
return stream.total_out;
}
-/*
- * The per-object header is a pretty dense thing, which is
- * - first byte: low four bits are "size", then three bits of "type",
- * and the high bit is "size continues".
- * - each byte afterwards: low seven bits are size continuation,
- * with the high bit being "size continues"
- */
-static int encode_header(enum object_type type, unsigned long size, unsigned char *hdr)
-{
- int n = 1;
- unsigned char c;
-
- if (type < OBJ_COMMIT || type > OBJ_REF_DELTA)
- die("bad type %d", type);
-
- c = (type << 4) | (size & 15);
- size >>= 4;
- while (size) {
- *hdr++ = c | 0x80;
- c = size & 0x7f;
- size >>= 7;
- n++;
- }
- *hdr = c;
- return n;
-}
-
/*
* we are going to reuse the existing object data as is. make
* sure it is not corrupt.
off_t len,
unsigned long expect)
{
- z_stream stream;
+ git_zstream stream;
unsigned char fakebuf[4096], *in;
int st;
off_t len)
{
unsigned char *in;
- unsigned int avail;
+ unsigned long avail;
while (len) {
in = use_pack(p, w_curs, offset, &avail);
if (avail > len)
- avail = (unsigned int)len;
+ avail = (unsigned long)len;
sha1write(f, in, avail);
offset += avail;
len -= avail;
}
}
+/* Return 0 if we will bust the pack-size limit */
static unsigned long write_object(struct sha1file *f,
struct object_entry *entry,
off_t write_offset)
* The object header is a byte of 'type' followed by zero or
* more bytes of length.
*/
- hdrlen = encode_header(type, size, header);
+ hdrlen = encode_in_pack_object_header(type, size, header);
if (type == OBJ_OFS_DELTA) {
/*
if (entry->delta)
type = (allow_ofs_delta && entry->delta->idx.offset) ?
OBJ_OFS_DELTA : OBJ_REF_DELTA;
- hdrlen = encode_header(type, entry->size, header);
+ hdrlen = encode_in_pack_object_header(type, entry->size, header);
offset = entry->in_pack_offset;
revidx = find_pack_revindex(p, offset);
return hdrlen + datalen;
}
-static int write_one(struct sha1file *f,
- struct object_entry *e,
- off_t *offset)
+enum write_one_status {
+ WRITE_ONE_SKIP = -1, /* already written */
+ WRITE_ONE_BREAK = 0, /* writing this will bust the limit; not written */
+ WRITE_ONE_WRITTEN = 1, /* normal */
+ WRITE_ONE_RECURSIVE = 2 /* already scheduled to be written */
+};
+
+static enum write_one_status write_one(struct sha1file *f,
+ struct object_entry *e,
+ off_t *offset)
{
unsigned long size;
+ int recursing;
- /* offset is non zero if object is written already. */
- if (e->idx.offset || e->preferred_base)
- return -1;
+ /*
+ * we set offset to 1 (which is an impossible value) to mark
+ * the fact that this object is involved in "write its base
+ * first before writing a deltified object" recursion.
+ */
+ recursing = (e->idx.offset == 1);
+ if (recursing) {
+ warning("recursive delta detected for object %s",
+ sha1_to_hex(e->idx.sha1));
+ return WRITE_ONE_RECURSIVE;
+ } else if (e->idx.offset || e->preferred_base) {
+ /* offset is non zero if object is written already. */
+ return WRITE_ONE_SKIP;
+ }
/* if we are deltified, write out base object first. */
- if (e->delta && !write_one(f, e->delta, offset))
- return 0;
+ if (e->delta) {
+ e->idx.offset = 1; /* now recurse */
+ switch (write_one(f, e->delta, offset)) {
+ case WRITE_ONE_RECURSIVE:
+ /* we cannot depend on this one */
+ e->delta = NULL;
+ break;
+ default:
+ break;
+ case WRITE_ONE_BREAK:
+ e->idx.offset = recursing;
+ return WRITE_ONE_BREAK;
+ }
+ }
e->idx.offset = *offset;
size = write_object(f, e, *offset);
if (!size) {
- e->idx.offset = 0;
- return 0;
+ e->idx.offset = recursing;
+ return WRITE_ONE_BREAK;
}
written_list[nr_written++] = &e->idx;
/* make sure off_t is sufficiently large not to wrap */
- if (*offset > *offset + size)
+ if (signed_add_overflows(*offset, size))
die("pack too large for current definition of off_t");
*offset += size;
- return 1;
+ return WRITE_ONE_WRITTEN;
}
-/* forward declaration for write_pack_file */
-static int adjust_perm(const char *path, mode_t mode);
+static int mark_tagged(const char *path, const unsigned char *sha1, int flag,
+ void *cb_data)
+{
+ unsigned char peeled[20];
+ struct object_entry *entry = locate_object_entry(sha1);
+
+ if (entry)
+ entry->tagged = 1;
+ if (!peel_ref(path, peeled)) {
+ entry = locate_object_entry(peeled);
+ if (entry)
+ entry->tagged = 1;
+ }
+ return 0;
+}
+
+static inline void add_to_write_order(struct object_entry **wo,
+ unsigned int *endp,
+ struct object_entry *e)
+{
+ if (e->filled)
+ return;
+ wo[(*endp)++] = e;
+ e->filled = 1;
+}
+
+static void add_descendants_to_write_order(struct object_entry **wo,
+ unsigned int *endp,
+ struct object_entry *e)
+{
+ int add_to_order = 1;
+ while (e) {
+ if (add_to_order) {
+ struct object_entry *s;
+ /* add this node... */
+ add_to_write_order(wo, endp, e);
+ /* all its siblings... */
+ for (s = e->delta_sibling; s; s = s->delta_sibling) {
+ add_to_write_order(wo, endp, s);
+ }
+ }
+ /* drop down a level to add left subtree nodes if possible */
+ if (e->delta_child) {
+ add_to_order = 1;
+ e = e->delta_child;
+ } else {
+ add_to_order = 0;
+ /* our sibling might have some children, it is next */
+ if (e->delta_sibling) {
+ e = e->delta_sibling;
+ continue;
+ }
+ /* go back to our parent node */
+ e = e->delta;
+ while (e && !e->delta_sibling) {
+ /* we're on the right side of a subtree, keep
+ * going up until we can go right again */
+ e = e->delta;
+ }
+ if (!e) {
+ /* done- we hit our original root node */
+ return;
+ }
+ /* pass it off to sibling at this level */
+ e = e->delta_sibling;
+ }
+ };
+}
+
+static void add_family_to_write_order(struct object_entry **wo,
+ unsigned int *endp,
+ struct object_entry *e)
+{
+ struct object_entry *root;
+
+ for (root = e; root->delta; root = root->delta)
+ ; /* nothing */
+ add_descendants_to_write_order(wo, endp, root);
+}
+
+static struct object_entry **compute_write_order(void)
+{
+ unsigned int i, wo_end, last_untagged;
+
+ struct object_entry **wo = xmalloc(nr_objects * sizeof(*wo));
+
+ for (i = 0; i < nr_objects; i++) {
+ objects[i].tagged = 0;
+ objects[i].filled = 0;
+ objects[i].delta_child = NULL;
+ objects[i].delta_sibling = NULL;
+ }
+
+ /*
+ * Fully connect delta_child/delta_sibling network.
+ * Make sure delta_sibling is sorted in the original
+ * recency order.
+ */
+ for (i = nr_objects; i > 0;) {
+ struct object_entry *e = &objects[--i];
+ if (!e->delta)
+ continue;
+ /* Mark me as the first child */
+ e->delta_sibling = e->delta->delta_child;
+ e->delta->delta_child = e;
+ }
+
+ /*
+ * Mark objects that are at the tip of tags.
+ */
+ for_each_tag_ref(mark_tagged, NULL);
+
+ /*
+ * Give the objects in the original recency order until
+ * we see a tagged tip.
+ */
+ for (i = wo_end = 0; i < nr_objects; i++) {
+ if (objects[i].tagged)
+ break;
+ add_to_write_order(wo, &wo_end, &objects[i]);
+ }
+ last_untagged = i;
+
+ /*
+ * Then fill all the tagged tips.
+ */
+ for (; i < nr_objects; i++) {
+ if (objects[i].tagged)
+ add_to_write_order(wo, &wo_end, &objects[i]);
+ }
+
+ /*
+ * And then all remaining commits and tags.
+ */
+ for (i = last_untagged; i < nr_objects; i++) {
+ if (objects[i].type != OBJ_COMMIT &&
+ objects[i].type != OBJ_TAG)
+ continue;
+ add_to_write_order(wo, &wo_end, &objects[i]);
+ }
+
+ /*
+ * And then all the trees.
+ */
+ for (i = last_untagged; i < nr_objects; i++) {
+ if (objects[i].type != OBJ_TREE)
+ continue;
+ add_to_write_order(wo, &wo_end, &objects[i]);
+ }
+
+ /*
+ * Finally all the rest in really tight order
+ */
+ for (i = last_untagged; i < nr_objects; i++) {
+ if (!objects[i].filled)
+ add_family_to_write_order(wo, &wo_end, &objects[i]);
+ }
+
+ if (wo_end != nr_objects)
+ die("ordered %u objects, expected %"PRIu32, wo_end, nr_objects);
+
+ return wo;
+}
static void write_pack_file(void)
{
struct pack_header hdr;
uint32_t nr_remaining = nr_result;
time_t last_mtime = 0;
+ struct object_entry **write_order;
if (progress > pack_to_stdout)
progress_state = start_progress("Writing objects", nr_result);
written_list = xmalloc(nr_objects * sizeof(*written_list));
+ write_order = compute_write_order();
do {
unsigned char sha1[20];
offset = sizeof(hdr);
nr_written = 0;
for (; i < nr_objects; i++) {
- if (!write_one(f, objects + i, &offset))
+ struct object_entry *e = write_order[i];
+ if (write_one(f, e, &offset) == WRITE_ONE_BREAK)
break;
display_progress(progress_state, written);
}
}
if (!pack_to_stdout) {
- mode_t mode = umask(0);
struct stat st;
const char *idx_tmp_name;
char tmpname[PATH_MAX];
- umask(mode);
- mode = 0444 & ~mode;
-
- idx_tmp_name = write_idx_file(NULL, written_list,
- nr_written, sha1);
+ idx_tmp_name = write_idx_file(NULL, written_list, nr_written,
+ &pack_idx_opts, sha1);
snprintf(tmpname, sizeof(tmpname), "%s-%s.pack",
base_name, sha1_to_hex(sha1));
free_pack_by_name(tmpname);
- if (adjust_perm(pack_tmp_name, mode))
+ if (adjust_shared_perm(pack_tmp_name))
die_errno("unable to make temporary pack file readable");
if (rename(pack_tmp_name, tmpname))
die_errno("unable to rename temporary pack file");
snprintf(tmpname, sizeof(tmpname), "%s-%s.idx",
base_name, sha1_to_hex(sha1));
- if (adjust_perm(idx_tmp_name, mode))
+ if (adjust_shared_perm(idx_tmp_name))
die_errno("unable to make temporary index file readable");
if (rename(idx_tmp_name, tmpname))
die_errno("unable to rename temporary index file");
} while (nr_remaining && i < nr_objects);
free(written_list);
+ free(write_order);
stop_progress(&progress_state);
if (written != nr_result)
die("wrote %"PRIu32" objects while expecting %"PRIu32,
struct git_attr_check check[1];
setup_delta_attr_check(check);
- if (git_checkattr(path, ARRAY_SIZE(check), check))
+ if (git_check_attr(path, ARRAY_SIZE(check), check))
return 0;
if (ATTR_FALSE(check->value))
return 1;
off_t offset = find_pack_entry_one(sha1, p);
if (offset) {
if (!found_pack) {
+ if (!is_pack_valid(p)) {
+ warning("packfile %s cannot be accessed", p->pack_name);
+ continue;
+ }
found_offset = offset;
found_pack = p;
}
while (tree_entry(tree,&entry)) {
if (S_ISGITLINK(entry.mode))
continue;
- cmp = tree_entry_len(entry.path, entry.sha1) != cmplen ? 1 :
+ cmp = tree_entry_len(&entry) != cmplen ? 1 :
memcmp(name, entry.path, cmplen);
if (cmp > 0)
continue;
const unsigned char *base_ref = NULL;
struct object_entry *base_entry;
unsigned long used, used_0;
- unsigned int avail;
+ unsigned long avail;
off_t ofs;
unsigned char *buf, c;
sorted_by_offset[i] = objects + i;
qsort(sorted_by_offset, nr_objects, sizeof(*sorted_by_offset), pack_offset_sort);
- for (i = 0; i < nr_objects; i++)
- check_object(sorted_by_offset[i]);
+ for (i = 0; i < nr_objects; i++) {
+ struct object_entry *entry = sorted_by_offset[i];
+ check_object(entry);
+ if (big_file_threshold <= entry->size)
+ entry->no_try_delta = 1;
+ }
free(sorted_by_offset);
}
read_lock();
src->data = read_sha1_file(src_entry->idx.sha1, &type, &sz);
read_unlock();
- if (!src->data)
+ if (!src->data) {
+ if (src_entry->preferred_base) {
+ static int warned = 0;
+ if (!warned++)
+ warning("object %s cannot be read",
+ sha1_to_hex(src_entry->idx.sha1));
+ /*
+ * Those objects are not included in the
+ * resulting pack. Be resilient and ignore
+ * them if they can't be read, in case the
+ * pack could be created nevertheless.
+ */
+ return 0;
+ }
die("object %s cannot be read",
sha1_to_hex(src_entry->idx.sha1));
+ }
if (sz != src_size)
die("object %s inconsistent object length (%lu vs %lu)",
sha1_to_hex(src_entry->idx.sha1), sz, src_size);
#ifndef NO_PTHREADS
+static void try_to_free_from_threads(size_t size)
+{
+ read_lock();
+ release_pack_memory(size, -1);
+ read_unlock();
+}
+
+static try_to_free_t old_try_to_free_routine;
+
/*
* The main thread waits on the condition that (at least) one of the workers
* has stopped working (which is indicated in the .working member of
*/
static void init_threaded_search(void)
{
- pthread_mutex_init(&read_mutex, NULL);
+ init_recursive_mutex(&read_mutex);
pthread_mutex_init(&cache_mutex, NULL);
pthread_mutex_init(&progress_mutex, NULL);
pthread_cond_init(&progress_cond, NULL);
+ old_try_to_free_routine = set_try_to_free_routine(try_to_free_from_threads);
}
static void cleanup_threaded_search(void)
{
+ set_try_to_free_routine(old_try_to_free_routine);
pthread_cond_destroy(&progress_cond);
pthread_mutex_destroy(&read_mutex);
pthread_mutex_destroy(&cache_mutex);
return 0;
}
if (!strcmp(k, "pack.indexversion")) {
- pack_idx_default_version = git_config_int(k, v);
- if (pack_idx_default_version > 2)
+ pack_idx_opts.version = git_config_int(k, v);
+ if (pack_idx_opts.version > 2)
die("bad pack.indexversion=%"PRIu32,
- pack_idx_default_version);
+ pack_idx_opts.version);
return 0;
}
if (!strcmp(k, "pack.packsizelimit")) {
commit->object.flags |= OBJECT_ADDED;
}
-static void show_object(struct object *obj, const struct name_path *path, const char *last)
+static void show_object(struct object *obj,
+ const struct name_path *path, const char *last,
+ void *data)
{
char *name = path_name(path, last);
loosen_unused_packed_objects(&revs);
}
-static int adjust_perm(const char *path, mode_t mode)
-{
- if (chmod(path, mode))
- return -1;
- return adjust_shared_perm(path);
-}
-
int cmd_pack_objects(int argc, const char **argv, const char *prefix)
{
int use_internal_rev_list = 0;
rp_av[1] = "--objects"; /* --thin will make it --objects-edge */
rp_ac = 2;
+ reset_pack_idx_option(&pack_idx_opts);
git_config(git_pack_config, NULL);
if (!pack_compression_seen && core_compression_seen)
pack_compression_level = core_compression_level;
}
if (!prefixcmp(arg, "--index-version=")) {
char *c;
- pack_idx_default_version = strtoul(arg + 16, &c, 10);
- if (pack_idx_default_version > 2)
+ pack_idx_opts.version = strtoul(arg + 16, &c, 10);
+ if (pack_idx_opts.version > 2)
die("bad %s", arg);
if (*c == ',')
- pack_idx_off32_limit = strtoul(c+1, &c, 0);
- if (*c || pack_idx_off32_limit & 0x80000000)
+ pack_idx_opts.off32_limit = strtoul(c+1, &c, 0);
+ if (*c || pack_idx_opts.off32_limit & 0x80000000)
die("bad %s", arg);
continue;
}