#include "streaming.h"
#include "thread-utils.h"
#include "pack-bitmap.h"
+#include "delta-islands.h"
#include "reachable.h"
#include "sha1-array.h"
#include "argv-array.h"
#include "packfile.h"
#include "object-store.h"
#include "dir.h"
+#include "midx.h"
+
+#define IN_PACK(obj) oe_in_pack(&to_pack, obj)
+#define SIZE(obj) oe_size(&to_pack, obj)
+#define SET_SIZE(obj,size) oe_set_size(&to_pack, obj, size)
+#define DELTA_SIZE(obj) oe_delta_size(&to_pack, obj)
+#define DELTA(obj) oe_delta(&to_pack, obj)
+#define DELTA_CHILD(obj) oe_delta_child(&to_pack, obj)
+#define DELTA_SIBLING(obj) oe_delta_sibling(&to_pack, obj)
+#define SET_DELTA(obj, val) oe_set_delta(&to_pack, obj, val)
+#define SET_DELTA_EXT(obj, oid) oe_set_delta_ext(&to_pack, obj, oid)
+#define SET_DELTA_SIZE(obj, val) oe_set_delta_size(&to_pack, obj, val)
+#define SET_DELTA_CHILD(obj, val) oe_set_delta_child(&to_pack, obj, val)
+#define SET_DELTA_SIBLING(obj, val) oe_set_delta_sibling(&to_pack, obj, val)
static const char *pack_usage[] = {
N_("git pack-objects --stdout [<options>...] [< <ref-list> | < <object-list>]"),
static struct packing_data to_pack;
static struct pack_idx_entry **written_list;
-static uint32_t nr_result, nr_written;
+static uint32_t nr_result, nr_written, nr_seen;
+static struct bitmap_index *bitmap_git;
+static uint32_t write_layer;
static int non_empty;
static int reuse_delta = 1, reuse_object = 1;
static int depth = 50;
static int delta_search_threads;
static int pack_to_stdout;
+static int thin;
static int num_preferred_base;
static struct progress *progress_state;
static int exclude_promisor_objects;
+static int use_delta_islands;
+
static unsigned long delta_cache_size = 0;
static unsigned long max_delta_cache_size = DEFAULT_DELTA_CACHE_SIZE;
static unsigned long cache_max_small_delta_size = 1000;
buf = read_object_file(&entry->idx.oid, &type, &size);
if (!buf)
- die("unable to read %s", oid_to_hex(&entry->idx.oid));
- base_buf = read_object_file(&entry->delta->idx.oid, &type, &base_size);
+ die(_("unable to read %s"), oid_to_hex(&entry->idx.oid));
+ base_buf = read_object_file(&DELTA(entry)->idx.oid, &type,
+ &base_size);
if (!base_buf)
die("unable to read %s",
- oid_to_hex(&entry->delta->idx.oid));
+ oid_to_hex(&DELTA(entry)->idx.oid));
delta_buf = diff_delta(base_buf, base_size,
buf, size, &delta_size, 0);
- if (!delta_buf || delta_size != entry->delta_size)
- die("delta size changed");
+ /*
+ * We succesfully computed this delta once but dropped it for
+ * memory reasons. Something is very wrong if this time we
+ * recompute and create a different delta.
+ */
+ if (!delta_buf || delta_size != DELTA_SIZE(entry))
+ BUG("delta size changed");
free(buf);
free(base_buf);
return delta_buf;
enum object_type type;
void *buf;
struct git_istream *st = NULL;
+ const unsigned hashsz = the_hash_algo->rawsz;
if (!usable_delta) {
- if (entry->type == OBJ_BLOB &&
- entry->size > big_file_threshold &&
+ if (oe_type(entry) == OBJ_BLOB &&
+ oe_size_greater_than(&to_pack, entry, big_file_threshold) &&
(st = open_istream(&entry->idx.oid, &type, &size, NULL)) != NULL)
buf = NULL;
else {
FREE_AND_NULL(entry->delta_data);
entry->z_delta_size = 0;
} else if (entry->delta_data) {
- size = entry->delta_size;
+ size = DELTA_SIZE(entry);
buf = entry->delta_data;
entry->delta_data = NULL;
- type = (allow_ofs_delta && entry->delta->idx.offset) ?
+ type = (allow_ofs_delta && DELTA(entry)->idx.offset) ?
OBJ_OFS_DELTA : OBJ_REF_DELTA;
} else {
buf = get_delta(entry);
- size = entry->delta_size;
- type = (allow_ofs_delta && entry->delta->idx.offset) ?
+ size = DELTA_SIZE(entry);
+ type = (allow_ofs_delta && DELTA(entry)->idx.offset) ?
OBJ_OFS_DELTA : OBJ_REF_DELTA;
}
* encoding of the relative offset for the delta
* base from this object's position in the pack.
*/
- off_t ofs = entry->idx.offset - entry->delta->idx.offset;
+ off_t ofs = entry->idx.offset - DELTA(entry)->idx.offset;
unsigned pos = sizeof(dheader) - 1;
dheader[pos] = ofs & 127;
while (ofs >>= 7)
dheader[--pos] = 128 | (--ofs & 127);
- if (limit && hdrlen + sizeof(dheader) - pos + datalen + 20 >= limit) {
+ if (limit && hdrlen + sizeof(dheader) - pos + datalen + hashsz >= limit) {
if (st)
close_istream(st);
free(buf);
} else if (type == OBJ_REF_DELTA) {
/*
* Deltas with a base reference contain
- * an additional 20 bytes for the base sha1.
+ * additional bytes for the base object ID.
*/
- if (limit && hdrlen + 20 + datalen + 20 >= limit) {
+ if (limit && hdrlen + hashsz + datalen + hashsz >= limit) {
if (st)
close_istream(st);
free(buf);
return 0;
}
hashwrite(f, header, hdrlen);
- hashwrite(f, entry->delta->idx.oid.hash, 20);
- hdrlen += 20;
+ hashwrite(f, DELTA(entry)->idx.oid.hash, hashsz);
+ hdrlen += hashsz;
} else {
- if (limit && hdrlen + datalen + 20 >= limit) {
+ if (limit && hdrlen + datalen + hashsz >= limit) {
if (st)
close_istream(st);
free(buf);
static off_t write_reuse_object(struct hashfile *f, struct object_entry *entry,
unsigned long limit, int usable_delta)
{
- struct packed_git *p = entry->in_pack;
+ struct packed_git *p = IN_PACK(entry);
struct pack_window *w_curs = NULL;
struct revindex_entry *revidx;
off_t offset;
- enum object_type type = entry->type;
+ enum object_type type = oe_type(entry);
off_t datalen;
unsigned char header[MAX_PACK_OBJECT_HEADER],
dheader[MAX_PACK_OBJECT_HEADER];
unsigned hdrlen;
+ const unsigned hashsz = the_hash_algo->rawsz;
+ unsigned long entry_size = SIZE(entry);
- if (entry->delta)
- type = (allow_ofs_delta && entry->delta->idx.offset) ?
+ if (DELTA(entry))
+ type = (allow_ofs_delta && DELTA(entry)->idx.offset) ?
OBJ_OFS_DELTA : OBJ_REF_DELTA;
hdrlen = encode_in_pack_object_header(header, sizeof(header),
- type, entry->size);
+ type, entry_size);
offset = entry->in_pack_offset;
revidx = find_pack_revindex(p, offset);
datalen = revidx[1].offset - offset;
if (!pack_to_stdout && p->index_version > 1 &&
check_pack_crc(p, &w_curs, offset, datalen, revidx->nr)) {
- error("bad packed object CRC for %s",
+ error(_("bad packed object CRC for %s"),
oid_to_hex(&entry->idx.oid));
unuse_pack(&w_curs);
return write_no_reuse_object(f, entry, limit, usable_delta);
datalen -= entry->in_pack_header_size;
if (!pack_to_stdout && p->index_version == 1 &&
- check_pack_inflate(p, &w_curs, offset, datalen, entry->size)) {
- error("corrupt packed object for %s",
+ check_pack_inflate(p, &w_curs, offset, datalen, entry_size)) {
+ error(_("corrupt packed object for %s"),
oid_to_hex(&entry->idx.oid));
unuse_pack(&w_curs);
return write_no_reuse_object(f, entry, limit, usable_delta);
}
if (type == OBJ_OFS_DELTA) {
- off_t ofs = entry->idx.offset - entry->delta->idx.offset;
+ off_t ofs = entry->idx.offset - DELTA(entry)->idx.offset;
unsigned pos = sizeof(dheader) - 1;
dheader[pos] = ofs & 127;
while (ofs >>= 7)
dheader[--pos] = 128 | (--ofs & 127);
- if (limit && hdrlen + sizeof(dheader) - pos + datalen + 20 >= limit) {
+ if (limit && hdrlen + sizeof(dheader) - pos + datalen + hashsz >= limit) {
unuse_pack(&w_curs);
return 0;
}
hdrlen += sizeof(dheader) - pos;
reused_delta++;
} else if (type == OBJ_REF_DELTA) {
- if (limit && hdrlen + 20 + datalen + 20 >= limit) {
+ if (limit && hdrlen + hashsz + datalen + hashsz >= limit) {
unuse_pack(&w_curs);
return 0;
}
hashwrite(f, header, hdrlen);
- hashwrite(f, entry->delta->idx.oid.hash, 20);
- hdrlen += 20;
+ hashwrite(f, DELTA(entry)->idx.oid.hash, hashsz);
+ hdrlen += hashsz;
reused_delta++;
} else {
- if (limit && hdrlen + datalen + 20 >= limit) {
+ if (limit && hdrlen + datalen + hashsz >= limit) {
unuse_pack(&w_curs);
return 0;
}
else
limit = pack_size_limit - write_offset;
- if (!entry->delta)
+ if (!DELTA(entry))
usable_delta = 0; /* no delta */
else if (!pack_size_limit)
usable_delta = 1; /* unlimited packfile */
- else if (entry->delta->idx.offset == (off_t)-1)
+ else if (DELTA(entry)->idx.offset == (off_t)-1)
usable_delta = 0; /* base was written to another pack */
- else if (entry->delta->idx.offset)
+ else if (DELTA(entry)->idx.offset)
usable_delta = 1; /* base already exists in this pack */
else
usable_delta = 0; /* base could end up in another pack */
if (!reuse_object)
to_reuse = 0; /* explicit */
- else if (!entry->in_pack)
+ else if (!IN_PACK(entry))
to_reuse = 0; /* can't reuse what we don't have */
- else if (entry->type == OBJ_REF_DELTA || entry->type == OBJ_OFS_DELTA)
+ else if (oe_type(entry) == OBJ_REF_DELTA ||
+ oe_type(entry) == OBJ_OFS_DELTA)
/* check_object() decided it for us ... */
to_reuse = usable_delta;
/* ... but pack split may override that */
- else if (entry->type != entry->in_pack_type)
+ else if (oe_type(entry) != entry->in_pack_type)
to_reuse = 0; /* pack has delta which is unusable */
- else if (entry->delta)
+ else if (DELTA(entry))
to_reuse = 0; /* we want to pack afresh */
else
to_reuse = 1; /* we have it in-pack undeltified,
*/
recursing = (e->idx.offset == 1);
if (recursing) {
- warning("recursive delta detected for object %s",
+ warning(_("recursive delta detected for object %s"),
oid_to_hex(&e->idx.oid));
return WRITE_ONE_RECURSIVE;
} else if (e->idx.offset || e->preferred_base) {
}
/* if we are deltified, write out base object first. */
- if (e->delta) {
+ if (DELTA(e)) {
e->idx.offset = 1; /* now recurse */
- switch (write_one(f, e->delta, offset)) {
+ switch (write_one(f, DELTA(e), offset)) {
case WRITE_ONE_RECURSIVE:
/* we cannot depend on this one */
- e->delta = NULL;
+ SET_DELTA(e, NULL);
break;
default:
break;
/* make sure off_t is sufficiently large not to wrap */
if (signed_add_overflows(*offset, size))
- die("pack too large for current definition of off_t");
+ die(_("pack too large for current definition of off_t"));
*offset += size;
return WRITE_ONE_WRITTEN;
}
unsigned int *endp,
struct object_entry *e)
{
- if (e->filled)
+ if (e->filled || oe_layer(&to_pack, e) != write_layer)
return;
wo[(*endp)++] = e;
e->filled = 1;
/* add this node... */
add_to_write_order(wo, endp, e);
/* all its siblings... */
- for (s = e->delta_sibling; s; s = s->delta_sibling) {
+ for (s = DELTA_SIBLING(e); s; s = DELTA_SIBLING(s)) {
add_to_write_order(wo, endp, s);
}
}
/* drop down a level to add left subtree nodes if possible */
- if (e->delta_child) {
+ if (DELTA_CHILD(e)) {
add_to_order = 1;
- e = e->delta_child;
+ e = DELTA_CHILD(e);
} else {
add_to_order = 0;
/* our sibling might have some children, it is next */
- if (e->delta_sibling) {
- e = e->delta_sibling;
+ if (DELTA_SIBLING(e)) {
+ e = DELTA_SIBLING(e);
continue;
}
/* go back to our parent node */
- e = e->delta;
- while (e && !e->delta_sibling) {
+ e = DELTA(e);
+ while (e && !DELTA_SIBLING(e)) {
/* we're on the right side of a subtree, keep
* going up until we can go right again */
- e = e->delta;
+ e = DELTA(e);
}
if (!e) {
/* done- we hit our original root node */
return;
}
/* pass it off to sibling at this level */
- e = e->delta_sibling;
+ e = DELTA_SIBLING(e);
}
};
}
{
struct object_entry *root;
- for (root = e; root->delta; root = root->delta)
+ for (root = e; DELTA(root); root = DELTA(root))
; /* nothing */
add_descendants_to_write_order(wo, endp, root);
}
-static struct object_entry **compute_write_order(void)
+static void compute_layer_order(struct object_entry **wo, unsigned int *wo_end)
{
- unsigned int i, wo_end, last_untagged;
-
- struct object_entry **wo;
+ unsigned int i, last_untagged;
struct object_entry *objects = to_pack.objects;
for (i = 0; i < to_pack.nr_objects; i++) {
- objects[i].tagged = 0;
- objects[i].filled = 0;
- objects[i].delta_child = NULL;
- objects[i].delta_sibling = NULL;
- }
-
- /*
- * Fully connect delta_child/delta_sibling network.
- * Make sure delta_sibling is sorted in the original
- * recency order.
- */
- for (i = to_pack.nr_objects; i > 0;) {
- struct object_entry *e = &objects[--i];
- if (!e->delta)
- continue;
- /* Mark me as the first child */
- e->delta_sibling = e->delta->delta_child;
- e->delta->delta_child = e;
- }
-
- /*
- * Mark objects that are at the tip of tags.
- */
- for_each_tag_ref(mark_tagged, NULL);
-
- /*
- * Give the objects in the original recency order until
- * we see a tagged tip.
- */
- ALLOC_ARRAY(wo, to_pack.nr_objects);
- for (i = wo_end = 0; i < to_pack.nr_objects; i++) {
if (objects[i].tagged)
break;
- add_to_write_order(wo, &wo_end, &objects[i]);
+ add_to_write_order(wo, wo_end, &objects[i]);
}
last_untagged = i;
*/
for (; i < to_pack.nr_objects; i++) {
if (objects[i].tagged)
- add_to_write_order(wo, &wo_end, &objects[i]);
+ add_to_write_order(wo, wo_end, &objects[i]);
}
/*
* And then all remaining commits and tags.
*/
for (i = last_untagged; i < to_pack.nr_objects; i++) {
- if (objects[i].type != OBJ_COMMIT &&
- objects[i].type != OBJ_TAG)
+ if (oe_type(&objects[i]) != OBJ_COMMIT &&
+ oe_type(&objects[i]) != OBJ_TAG)
continue;
- add_to_write_order(wo, &wo_end, &objects[i]);
+ add_to_write_order(wo, wo_end, &objects[i]);
}
/*
* And then all the trees.
*/
for (i = last_untagged; i < to_pack.nr_objects; i++) {
- if (objects[i].type != OBJ_TREE)
+ if (oe_type(&objects[i]) != OBJ_TREE)
continue;
- add_to_write_order(wo, &wo_end, &objects[i]);
+ add_to_write_order(wo, wo_end, &objects[i]);
}
/*
* Finally all the rest in really tight order
*/
for (i = last_untagged; i < to_pack.nr_objects; i++) {
- if (!objects[i].filled)
- add_family_to_write_order(wo, &wo_end, &objects[i]);
+ if (!objects[i].filled && oe_layer(&to_pack, &objects[i]) == write_layer)
+ add_family_to_write_order(wo, wo_end, &objects[i]);
+ }
+}
+
+static struct object_entry **compute_write_order(void)
+{
+ uint32_t max_layers = 1;
+ unsigned int i, wo_end;
+
+ struct object_entry **wo;
+ struct object_entry *objects = to_pack.objects;
+
+ for (i = 0; i < to_pack.nr_objects; i++) {
+ objects[i].tagged = 0;
+ objects[i].filled = 0;
+ SET_DELTA_CHILD(&objects[i], NULL);
+ SET_DELTA_SIBLING(&objects[i], NULL);
+ }
+
+ /*
+ * Fully connect delta_child/delta_sibling network.
+ * Make sure delta_sibling is sorted in the original
+ * recency order.
+ */
+ for (i = to_pack.nr_objects; i > 0;) {
+ struct object_entry *e = &objects[--i];
+ if (!DELTA(e))
+ continue;
+ /* Mark me as the first child */
+ e->delta_sibling_idx = DELTA(e)->delta_child_idx;
+ SET_DELTA_CHILD(DELTA(e), e);
}
+ /*
+ * Mark objects that are at the tip of tags.
+ */
+ for_each_tag_ref(mark_tagged, NULL);
+
+ if (use_delta_islands)
+ max_layers = compute_pack_layers(&to_pack);
+
+ ALLOC_ARRAY(wo, to_pack.nr_objects);
+ wo_end = 0;
+
+ for (; write_layer < max_layers; ++write_layer)
+ compute_layer_order(wo, &wo_end);
+
if (wo_end != to_pack.nr_objects)
- die("ordered %u objects, expected %"PRIu32, wo_end, to_pack.nr_objects);
+ die(_("ordered %u objects, expected %"PRIu32),
+ wo_end, to_pack.nr_objects);
return wo;
}
int fd;
if (!is_pack_valid(reuse_packfile))
- die("packfile is invalid: %s", reuse_packfile->pack_name);
+ die(_("packfile is invalid: %s"), reuse_packfile->pack_name);
fd = git_open(reuse_packfile->pack_name);
if (fd < 0)
- die_errno("unable to open packfile for reuse: %s",
+ die_errno(_("unable to open packfile for reuse: %s"),
reuse_packfile->pack_name);
if (lseek(fd, sizeof(struct pack_header), SEEK_SET) == -1)
- die_errno("unable to seek in reused packfile");
+ die_errno(_("unable to seek in reused packfile"));
if (reuse_packfile_offset < 0)
- reuse_packfile_offset = reuse_packfile->pack_size - 20;
+ reuse_packfile_offset = reuse_packfile->pack_size - the_hash_algo->rawsz;
total = to_write = reuse_packfile_offset - sizeof(struct pack_header);
int read_pack = xread(fd, buffer, sizeof(buffer));
if (read_pack <= 0)
- die_errno("unable to read from reused packfile");
+ die_errno(_("unable to read from reused packfile"));
if (read_pack > to_write)
read_pack = to_write;
* If so, rewrite it like in fast-import
*/
if (pack_to_stdout) {
- hashclose(f, oid.hash, CSUM_CLOSE);
+ finalize_hashfile(f, oid.hash, CSUM_HASH_IN_STREAM | CSUM_CLOSE);
} else if (nr_written == nr_remaining) {
- hashclose(f, oid.hash, CSUM_FSYNC);
+ finalize_hashfile(f, oid.hash, CSUM_HASH_IN_STREAM | CSUM_FSYNC | CSUM_CLOSE);
} else {
- int fd = hashclose(f, oid.hash, 0);
+ int fd = finalize_hashfile(f, oid.hash, 0);
fixup_pack_header_footer(fd, oid.hash, pack_tmp_name,
nr_written, oid.hash, offset);
close(fd);
* to preserve this property.
*/
if (stat(pack_tmp_name, &st) < 0) {
- warning_errno("failed to stat %s", pack_tmp_name);
+ warning_errno(_("failed to stat %s"), pack_tmp_name);
} else if (!last_mtime) {
last_mtime = st.st_mtime;
} else {
utb.actime = st.st_atime;
utb.modtime = --last_mtime;
if (utime(pack_tmp_name, &utb) < 0)
- warning_errno("failed utime() on %s", pack_tmp_name);
+ warning_errno(_("failed utime() on %s"), pack_tmp_name);
}
strbuf_addf(&tmpname, "%s-", base_name);
if (write_bitmap_index) {
bitmap_writer_set_checksum(oid.hash);
- bitmap_writer_build_type_index(written_list, nr_written);
+ bitmap_writer_build_type_index(
+ &to_pack, written_list, nr_written);
}
finish_tmp_packfile(&tmpname, pack_tmp_name,
free(write_order);
stop_progress(&progress_state);
if (written != nr_result)
- die("wrote %"PRIu32" objects while expecting %"PRIu32,
- written, nr_result);
+ die(_("wrote %"PRIu32" objects while expecting %"PRIu32),
+ written, nr_result);
}
static int no_try_delta(const char *path)
if (!check)
check = attr_check_initl("delta", NULL);
- if (git_check_attr(path, check))
- return 0;
+ git_check_attr(&the_index, path, check);
if (ATTR_FALSE(check->items[0].value))
return 1;
return 0;
{
int want;
struct list_head *pos;
+ struct multi_pack_index *m;
- if (!exclude && local && has_loose_object_nonlocal(oid->hash))
+ if (!exclude && local && has_loose_object_nonlocal(oid))
return 0;
/*
if (want != -1)
return want;
}
+
+ for (m = get_multi_pack_index(the_repository); m; m = m->next) {
+ struct pack_entry e;
+ if (fill_midx_entry(oid, &e, m)) {
+ struct packed_git *p = e.p;
+ off_t offset;
+
+ if (p == *found_pack)
+ offset = *found_offset;
+ else
+ offset = find_pack_entry_one(oid->hash, p);
+
+ if (offset) {
+ if (!*found_pack) {
+ if (!is_pack_valid(p))
+ continue;
+ *found_offset = offset;
+ *found_pack = p;
+ }
+ want = want_found_object(exclude, p);
+ if (want != -1)
+ return want;
+ }
+ }
+ }
+
list_for_each(pos, get_packed_git_mru(the_repository)) {
struct packed_git *p = list_entry(pos, struct packed_git, mru);
off_t offset;
entry = packlist_alloc(&to_pack, oid->hash, index_pos);
entry->hash = hash;
- if (type)
- entry->type = type;
+ oe_set_type(entry, type);
if (exclude)
entry->preferred_base = 1;
else
nr_result++;
if (found_pack) {
- entry->in_pack = found_pack;
+ oe_set_in_pack(&to_pack, entry, found_pack);
entry->in_pack_offset = found_offset;
}
off_t found_offset = 0;
uint32_t index_pos;
+ display_progress(progress_state, ++nr_seen);
+
if (have_duplicate_entry(oid, exclude, &index_pos))
return 0;
create_object_entry(oid, type, pack_name_hash(name),
exclude, name && no_try_delta(name),
index_pos, found_pack, found_offset);
-
- display_progress(progress_state, nr_result);
return 1;
}
{
uint32_t index_pos;
+ display_progress(progress_state, ++nr_seen);
+
if (have_duplicate_entry(oid, 0, &index_pos))
return 0;
return 0;
create_object_entry(oid, type, name_hash, 0, 0, index_pos, pack, offset);
-
- display_progress(progress_state, nr_result);
return 1;
}
*/
for (neigh = 0; neigh < 8; neigh++) {
ent = pbase_tree_cache[my_ix];
- if (ent && !oidcmp(&ent->oid, oid)) {
+ if (ent && oideq(&ent->oid, oid)) {
ent->ref++;
return ent;
}
return;
for (it = pbase_tree; it; it = it->next) {
- if (!oidcmp(&it->pcache.oid, &tree_oid)) {
+ if (oideq(&it->pcache.oid, &tree_oid)) {
free(data);
return;
}
done_pbase_paths_num = done_pbase_paths_alloc = 0;
}
+/*
+ * Return 1 iff the object specified by "delta" can be sent
+ * literally as a delta against the base in "base_sha1". If
+ * so, then *base_out will point to the entry in our packing
+ * list, or NULL if we must use the external-base list.
+ *
+ * Depth value does not matter - find_deltas() will
+ * never consider reused delta as the base object to
+ * deltify other objects against, in order to avoid
+ * circular deltas.
+ */
+static int can_reuse_delta(const unsigned char *base_sha1,
+ struct object_entry *delta,
+ struct object_entry **base_out)
+{
+ struct object_entry *base;
+
+ if (!base_sha1)
+ return 0;
+
+ /*
+ * First see if we're already sending the base (or it's explicitly in
+ * our "excluded" list).
+ */
+ base = packlist_find(&to_pack, base_sha1, NULL);
+ if (base) {
+ if (!in_same_island(&delta->idx.oid, &base->idx.oid))
+ return 0;
+ *base_out = base;
+ return 1;
+ }
+
+ /*
+ * Otherwise, reachability bitmaps may tell us if the receiver has it,
+ * even if it was buried too deep in history to make it into the
+ * packing list.
+ */
+ if (thin && bitmap_has_sha1_in_uninteresting(bitmap_git, base_sha1)) {
+ if (use_delta_islands) {
+ struct object_id base_oid;
+ hashcpy(base_oid.hash, base_sha1);
+ if (!in_same_island(&delta->idx.oid, &base_oid))
+ return 0;
+ }
+ *base_out = NULL;
+ return 1;
+ }
+
+ return 0;
+}
+
static void check_object(struct object_entry *entry)
{
- if (entry->in_pack) {
- struct packed_git *p = entry->in_pack;
+ unsigned long canonical_size;
+
+ if (IN_PACK(entry)) {
+ struct packed_git *p = IN_PACK(entry);
struct pack_window *w_curs = NULL;
const unsigned char *base_ref = NULL;
struct object_entry *base_entry;
unsigned long avail;
off_t ofs;
unsigned char *buf, c;
+ enum object_type type;
+ unsigned long in_pack_size;
buf = use_pack(p, &w_curs, entry->in_pack_offset, &avail);
* since non-delta representations could still be reused.
*/
used = unpack_object_header_buffer(buf, avail,
- &entry->in_pack_type,
- &entry->size);
+ &type,
+ &in_pack_size);
if (used == 0)
goto give_up;
+ if (type < 0)
+ BUG("invalid type %d", type);
+ entry->in_pack_type = type;
+
/*
* Determine if this is a delta and if so whether we can
* reuse it or not. Otherwise let's find out as cheaply as
switch (entry->in_pack_type) {
default:
/* Not a delta hence we've already got all we need. */
- entry->type = entry->in_pack_type;
+ oe_set_type(entry, entry->in_pack_type);
+ SET_SIZE(entry, in_pack_size);
entry->in_pack_header_size = used;
- if (entry->type < OBJ_COMMIT || entry->type > OBJ_BLOB)
+ if (oe_type(entry) < OBJ_COMMIT || oe_type(entry) > OBJ_BLOB)
goto give_up;
unuse_pack(&w_curs);
return;
if (reuse_delta && !entry->preferred_base)
base_ref = use_pack(p, &w_curs,
entry->in_pack_offset + used, NULL);
- entry->in_pack_header_size = used + 20;
+ entry->in_pack_header_size = used + the_hash_algo->rawsz;
break;
case OBJ_OFS_DELTA:
buf = use_pack(p, &w_curs,
while (c & 128) {
ofs += 1;
if (!ofs || MSB(ofs, 7)) {
- error("delta base offset overflow in pack for %s",
+ error(_("delta base offset overflow in pack for %s"),
oid_to_hex(&entry->idx.oid));
goto give_up;
}
}
ofs = entry->in_pack_offset - ofs;
if (ofs <= 0 || ofs >= entry->in_pack_offset) {
- error("delta base offset out of bound for %s",
+ error(_("delta base offset out of bound for %s"),
oid_to_hex(&entry->idx.oid));
goto give_up;
}
break;
}
- if (base_ref && (base_entry = packlist_find(&to_pack, base_ref, NULL))) {
- /*
- * If base_ref was set above that means we wish to
- * reuse delta data, and we even found that base
- * in the list of objects we want to pack. Goodie!
- *
- * Depth value does not matter - find_deltas() will
- * never consider reused delta as the base object to
- * deltify other objects against, in order to avoid
- * circular deltas.
- */
- entry->type = entry->in_pack_type;
- entry->delta = base_entry;
- entry->delta_size = entry->size;
- entry->delta_sibling = base_entry->delta_child;
- base_entry->delta_child = entry;
+ if (can_reuse_delta(base_ref, entry, &base_entry)) {
+ oe_set_type(entry, entry->in_pack_type);
+ SET_SIZE(entry, in_pack_size); /* delta size */
+ SET_DELTA_SIZE(entry, in_pack_size);
+
+ if (base_entry) {
+ SET_DELTA(entry, base_entry);
+ entry->delta_sibling_idx = base_entry->delta_child_idx;
+ SET_DELTA_CHILD(base_entry, entry);
+ } else {
+ SET_DELTA_EXT(entry, base_ref);
+ }
+
unuse_pack(&w_curs);
return;
}
- if (entry->type) {
+ if (oe_type(entry)) {
+ off_t delta_pos;
+
/*
* This must be a delta and we already know what the
* final object type is. Let's extract the actual
* object size from the delta header.
*/
- entry->size = get_size_from_delta(p, &w_curs,
- entry->in_pack_offset + entry->in_pack_header_size);
- if (entry->size == 0)
+ delta_pos = entry->in_pack_offset + entry->in_pack_header_size;
+ canonical_size = get_size_from_delta(p, &w_curs, delta_pos);
+ if (canonical_size == 0)
goto give_up;
+ SET_SIZE(entry, canonical_size);
unuse_pack(&w_curs);
return;
}
unuse_pack(&w_curs);
}
- entry->type = oid_object_info(&entry->idx.oid, &entry->size);
- /*
- * The error condition is checked in prepare_pack(). This is
- * to permit a missing preferred base object to be ignored
- * as a preferred base. Doing so can result in a larger
- * pack file, but the transfer will still take place.
- */
+ oe_set_type(entry,
+ oid_object_info(the_repository, &entry->idx.oid, &canonical_size));
+ if (entry->type_valid) {
+ SET_SIZE(entry, canonical_size);
+ } else {
+ /*
+ * Bad object type is checked in prepare_pack(). This is
+ * to permit a missing preferred base object to be ignored
+ * as a preferred base. Doing so can result in a larger
+ * pack file, but the transfer will still take place.
+ */
+ }
}
static int pack_offset_sort(const void *_a, const void *_b)
{
const struct object_entry *a = *(struct object_entry **)_a;
const struct object_entry *b = *(struct object_entry **)_b;
+ const struct packed_git *a_in_pack = IN_PACK(a);
+ const struct packed_git *b_in_pack = IN_PACK(b);
/* avoid filesystem trashing with loose objects */
- if (!a->in_pack && !b->in_pack)
+ if (!a_in_pack && !b_in_pack)
return oidcmp(&a->idx.oid, &b->idx.oid);
- if (a->in_pack < b->in_pack)
+ if (a_in_pack < b_in_pack)
return -1;
- if (a->in_pack > b->in_pack)
+ if (a_in_pack > b_in_pack)
return 1;
return a->in_pack_offset < b->in_pack_offset ? -1 :
(a->in_pack_offset > b->in_pack_offset);
*/
static void drop_reused_delta(struct object_entry *entry)
{
- struct object_entry **p = &entry->delta->delta_child;
+ unsigned *idx = &to_pack.objects[entry->delta_idx - 1].delta_child_idx;
struct object_info oi = OBJECT_INFO_INIT;
+ enum object_type type;
+ unsigned long size;
- while (*p) {
- if (*p == entry)
- *p = (*p)->delta_sibling;
+ while (*idx) {
+ struct object_entry *oe = &to_pack.objects[*idx - 1];
+
+ if (oe == entry)
+ *idx = oe->delta_sibling_idx;
else
- p = &(*p)->delta_sibling;
+ idx = &oe->delta_sibling_idx;
}
- entry->delta = NULL;
+ SET_DELTA(entry, NULL);
entry->depth = 0;
- oi.sizep = &entry->size;
- oi.typep = &entry->type;
- if (packed_object_info(entry->in_pack, entry->in_pack_offset, &oi) < 0) {
+ oi.sizep = &size;
+ oi.typep = &type;
+ if (packed_object_info(the_repository, IN_PACK(entry), entry->in_pack_offset, &oi) < 0) {
/*
* We failed to get the info from this pack for some reason;
* fall back to sha1_object_info, which may find another copy.
- * And if that fails, the error will be recorded in entry->type
+ * And if that fails, the error will be recorded in oe_type(entry)
* and dealt with in prepare_pack().
*/
- entry->type = oid_object_info(&entry->idx.oid, &entry->size);
+ oe_set_type(entry,
+ oid_object_info(the_repository, &entry->idx.oid, &size));
+ } else {
+ oe_set_type(entry, type);
}
+ SET_SIZE(entry, size);
}
/*
for (cur = entry, total_depth = 0;
cur;
- cur = cur->delta, total_depth++) {
+ cur = DELTA(cur), total_depth++) {
if (cur->dfs_state == DFS_DONE) {
/*
* We've already seen this object and know it isn't
* is a bug.
*/
if (cur->dfs_state != DFS_NONE)
- die("BUG: confusing delta dfs state in first pass: %d",
+ BUG("confusing delta dfs state in first pass: %d",
cur->dfs_state);
/*
* it's not a delta, we're done traversing, but we'll mark it
* done to save time on future traversals.
*/
- if (!cur->delta) {
+ if (!DELTA(cur)) {
cur->dfs_state = DFS_DONE;
break;
}
* We keep all commits in the chain that we examined.
*/
cur->dfs_state = DFS_ACTIVE;
- if (cur->delta->dfs_state == DFS_ACTIVE) {
+ if (DELTA(cur)->dfs_state == DFS_ACTIVE) {
drop_reused_delta(cur);
cur->dfs_state = DFS_DONE;
break;
* an extra "next" pointer to keep going after we reset cur->delta.
*/
for (cur = entry; cur; cur = next) {
- next = cur->delta;
+ next = DELTA(cur);
/*
* We should have a chain of zero or more ACTIVE states down to
if (cur->dfs_state == DFS_DONE)
break;
else if (cur->dfs_state != DFS_ACTIVE)
- die("BUG: confusing delta dfs state in second pass: %d",
+ BUG("confusing delta dfs state in second pass: %d",
cur->dfs_state);
/*
uint32_t i;
struct object_entry **sorted_by_offset;
+ if (progress)
+ progress_state = start_progress(_("Counting objects"),
+ to_pack.nr_objects);
+
sorted_by_offset = xcalloc(to_pack.nr_objects, sizeof(struct object_entry *));
for (i = 0; i < to_pack.nr_objects; i++)
sorted_by_offset[i] = to_pack.objects + i;
for (i = 0; i < to_pack.nr_objects; i++) {
struct object_entry *entry = sorted_by_offset[i];
check_object(entry);
- if (big_file_threshold < entry->size)
+ if (entry->type_valid &&
+ oe_size_greater_than(&to_pack, entry, big_file_threshold))
entry->no_try_delta = 1;
+ display_progress(progress_state, i + 1);
}
+ stop_progress(&progress_state);
/*
* This must happen in a second pass, since we rely on the delta
{
const struct object_entry *a = *(struct object_entry **)_a;
const struct object_entry *b = *(struct object_entry **)_b;
+ enum object_type a_type = oe_type(a);
+ enum object_type b_type = oe_type(b);
+ unsigned long a_size = SIZE(a);
+ unsigned long b_size = SIZE(b);
- if (a->type > b->type)
+ if (a_type > b_type)
return -1;
- if (a->type < b->type)
+ if (a_type < b_type)
return 1;
if (a->hash > b->hash)
return -1;
return -1;
if (a->preferred_base < b->preferred_base)
return 1;
- if (a->size > b->size)
+ if (use_delta_islands) {
+ int island_cmp = island_delta_cmp(&a->idx.oid, &b->idx.oid);
+ if (island_cmp)
+ return island_cmp;
+ }
+ if (a_size > b_size)
return -1;
- if (a->size < b->size)
+ if (a_size < b_size)
return 1;
return a < b ? -1 : (a > b); /* newest first */
}
return 0;
}
-#ifndef NO_PTHREADS
-
+/* Protect access to object database */
static pthread_mutex_t read_mutex;
#define read_lock() pthread_mutex_lock(&read_mutex)
#define read_unlock() pthread_mutex_unlock(&read_mutex)
+/* Protect delta_cache_size */
static pthread_mutex_t cache_mutex;
#define cache_lock() pthread_mutex_lock(&cache_mutex)
#define cache_unlock() pthread_mutex_unlock(&cache_mutex)
+/*
+ * Protect object list partitioning (e.g. struct thread_param) and
+ * progress_state
+ */
static pthread_mutex_t progress_mutex;
#define progress_lock() pthread_mutex_lock(&progress_mutex)
#define progress_unlock() pthread_mutex_unlock(&progress_mutex)
-#else
+/*
+ * Access to struct object_entry is unprotected since each thread owns
+ * a portion of the main object list. Just don't access object entries
+ * ahead in the list because they can be stolen and would need
+ * progress_mutex for protection.
+ */
-#define read_lock() (void)0
-#define read_unlock() (void)0
-#define cache_lock() (void)0
-#define cache_unlock() (void)0
-#define progress_lock() (void)0
-#define progress_unlock() (void)0
+/*
+ * Return the size of the object without doing any delta
+ * reconstruction (so non-deltas are true object sizes, but deltas
+ * return the size of the delta data).
+ */
+unsigned long oe_get_size_slow(struct packing_data *pack,
+ const struct object_entry *e)
+{
+ struct packed_git *p;
+ struct pack_window *w_curs;
+ unsigned char *buf;
+ enum object_type type;
+ unsigned long used, avail, size;
-#endif
+ if (e->type_ != OBJ_OFS_DELTA && e->type_ != OBJ_REF_DELTA) {
+ read_lock();
+ if (oid_object_info(the_repository, &e->idx.oid, &size) < 0)
+ die(_("unable to get size of %s"),
+ oid_to_hex(&e->idx.oid));
+ read_unlock();
+ return size;
+ }
+
+ p = oe_in_pack(pack, e);
+ if (!p)
+ BUG("when e->type is a delta, it must belong to a pack");
+
+ read_lock();
+ w_curs = NULL;
+ buf = use_pack(p, &w_curs, e->in_pack_offset, &avail);
+ used = unpack_object_header_buffer(buf, avail, &type, &size);
+ if (used == 0)
+ die(_("unable to parse object header of %s"),
+ oid_to_hex(&e->idx.oid));
+
+ unuse_pack(&w_curs);
+ read_unlock();
+ return size;
+}
static int try_delta(struct unpacked *trg, struct unpacked *src,
unsigned max_depth, unsigned long *mem_usage)
void *delta_buf;
/* Don't bother doing diffs between different types */
- if (trg_entry->type != src_entry->type)
+ if (oe_type(trg_entry) != oe_type(src_entry))
return -1;
/*
* it, we will still save the transfer cost, as we already know
* the other side has it and we won't send src_entry at all.
*/
- if (reuse_delta && trg_entry->in_pack &&
- trg_entry->in_pack == src_entry->in_pack &&
+ if (reuse_delta && IN_PACK(trg_entry) &&
+ IN_PACK(trg_entry) == IN_PACK(src_entry) &&
!src_entry->preferred_base &&
trg_entry->in_pack_type != OBJ_REF_DELTA &&
trg_entry->in_pack_type != OBJ_OFS_DELTA)
return 0;
/* Now some size filtering heuristics. */
- trg_size = trg_entry->size;
- if (!trg_entry->delta) {
- max_size = trg_size/2 - 20;
+ trg_size = SIZE(trg_entry);
+ if (!DELTA(trg_entry)) {
+ max_size = trg_size/2 - the_hash_algo->rawsz;
ref_depth = 1;
} else {
- max_size = trg_entry->delta_size;
+ max_size = DELTA_SIZE(trg_entry);
ref_depth = trg->depth;
}
max_size = (uint64_t)max_size * (max_depth - src->depth) /
(max_depth - ref_depth + 1);
if (max_size == 0)
return 0;
- src_size = src_entry->size;
+ src_size = SIZE(src_entry);
sizediff = src_size < trg_size ? trg_size - src_size : 0;
if (sizediff >= max_size)
return 0;
if (trg_size < src_size / 32)
return 0;
+ if (!in_same_island(&trg->entry->idx.oid, &src->entry->idx.oid))
+ return 0;
+
/* Load data if not already done */
if (!trg->data) {
read_lock();
trg->data = read_object_file(&trg_entry->idx.oid, &type, &sz);
read_unlock();
if (!trg->data)
- die("object %s cannot be read",
+ die(_("object %s cannot be read"),
oid_to_hex(&trg_entry->idx.oid));
if (sz != trg_size)
- die("object %s inconsistent object length (%lu vs %lu)",
- oid_to_hex(&trg_entry->idx.oid), sz,
- trg_size);
+ die(_("object %s inconsistent object length (%"PRIuMAX" vs %"PRIuMAX")"),
+ oid_to_hex(&trg_entry->idx.oid), (uintmax_t)sz,
+ (uintmax_t)trg_size);
*mem_usage += sz;
}
if (!src->data) {
if (src_entry->preferred_base) {
static int warned = 0;
if (!warned++)
- warning("object %s cannot be read",
+ warning(_("object %s cannot be read"),
oid_to_hex(&src_entry->idx.oid));
/*
* Those objects are not included in the
*/
return 0;
}
- die("object %s cannot be read",
+ die(_("object %s cannot be read"),
oid_to_hex(&src_entry->idx.oid));
}
if (sz != src_size)
- die("object %s inconsistent object length (%lu vs %lu)",
- oid_to_hex(&src_entry->idx.oid), sz,
- src_size);
+ die(_("object %s inconsistent object length (%"PRIuMAX" vs %"PRIuMAX")"),
+ oid_to_hex(&src_entry->idx.oid), (uintmax_t)sz,
+ (uintmax_t)src_size);
*mem_usage += sz;
}
if (!src->index) {
if (!src->index) {
static int warned = 0;
if (!warned++)
- warning("suboptimal pack - out of memory");
+ warning(_("suboptimal pack - out of memory"));
return 0;
}
*mem_usage += sizeof_delta_index(src->index);
if (!delta_buf)
return 0;
- if (trg_entry->delta) {
+ if (DELTA(trg_entry)) {
/* Prefer only shallower same-sized deltas. */
- if (delta_size == trg_entry->delta_size &&
+ if (delta_size == DELTA_SIZE(trg_entry) &&
src->depth + 1 >= trg->depth) {
free(delta_buf);
return 0;
free(trg_entry->delta_data);
cache_lock();
if (trg_entry->delta_data) {
- delta_cache_size -= trg_entry->delta_size;
+ delta_cache_size -= DELTA_SIZE(trg_entry);
trg_entry->delta_data = NULL;
}
if (delta_cacheable(src_size, trg_size, delta_size)) {
free(delta_buf);
}
- trg_entry->delta = src_entry;
- trg_entry->delta_size = delta_size;
+ SET_DELTA(trg_entry, src_entry);
+ SET_DELTA_SIZE(trg_entry, delta_size);
trg->depth = src->depth + 1;
return 1;
static unsigned int check_delta_limit(struct object_entry *me, unsigned int n)
{
- struct object_entry *child = me->delta_child;
+ struct object_entry *child = DELTA_CHILD(me);
unsigned int m = n;
while (child) {
unsigned int c = check_delta_limit(child, n + 1);
if (m < c)
m = c;
- child = child->delta_sibling;
+ child = DELTA_SIBLING(child);
}
return m;
}
free_delta_index(n->index);
n->index = NULL;
if (n->data) {
- freed_mem += n->entry->size;
+ freed_mem += SIZE(n->entry);
FREE_AND_NULL(n->data);
}
n->entry = NULL;
* otherwise they would become too deep.
*/
max_depth = depth;
- if (entry->delta_child) {
+ if (DELTA_CHILD(entry)) {
max_depth -= check_delta_limit(entry, 0);
if (max_depth <= 0)
goto next;
* between writes at that moment.
*/
if (entry->delta_data && !pack_to_stdout) {
- entry->z_delta_size = do_compress(&entry->delta_data,
- entry->delta_size);
- cache_lock();
- delta_cache_size -= entry->delta_size;
- delta_cache_size += entry->z_delta_size;
- cache_unlock();
+ unsigned long size;
+
+ size = do_compress(&entry->delta_data, DELTA_SIZE(entry));
+ if (size < (1U << OE_Z_DELTA_BITS)) {
+ entry->z_delta_size = size;
+ cache_lock();
+ delta_cache_size -= DELTA_SIZE(entry);
+ delta_cache_size += entry->z_delta_size;
+ cache_unlock();
+ } else {
+ FREE_AND_NULL(entry->delta_data);
+ entry->z_delta_size = 0;
+ }
}
/* if we made n a delta, and if n is already at max
* depth, leaving it in the window is pointless. we
* should evict it first.
*/
- if (entry->delta && max_depth <= n->depth)
+ if (DELTA(entry) && max_depth <= n->depth)
continue;
/*
* currently deltified object, to keep it longer. It will
* be the first base object to be attempted next.
*/
- if (entry->delta) {
+ if (DELTA(entry)) {
struct unpacked swap = array[best_base];
int dist = (window + idx - best_base) % window;
int dst = best_base;
free(array);
}
-#ifndef NO_PTHREADS
-
static void try_to_free_from_threads(size_t size)
{
read_lock();
static try_to_free_t old_try_to_free_routine;
/*
+ * The main object list is split into smaller lists, each is handed to
+ * one worker.
+ *
* The main thread waits on the condition that (at least) one of the workers
* has stopped working (which is indicated in the .working member of
* struct thread_params).
+ *
* When a work thread has completed its work, it sets .working to 0 and
* signals the main thread and waits on the condition that .data_ready
* becomes 1.
+ *
+ * The main thread steals half of the work from the worker that has
+ * most work left to hand it to the idle worker.
*/
struct thread_params {
return;
}
if (progress > pack_to_stdout)
- fprintf(stderr, "Delta compression using up to %d threads.\n",
- delta_search_threads);
+ fprintf_ln(stderr, _("Delta compression using up to %d threads"),
+ delta_search_threads);
p = xcalloc(delta_search_threads, sizeof(*p));
/* Partition the work amongst work threads. */
ret = pthread_create(&p[i].thread, NULL,
threaded_find_deltas, &p[i]);
if (ret)
- die("unable to create thread: %s", strerror(ret));
+ die(_("unable to create thread: %s"), strerror(ret));
active_threads++;
}
free(p);
}
-#else
-#define ll_find_deltas(l, s, w, d, p) find_deltas(l, &s, w, d, p)
-#endif
-
static void add_tag_chain(const struct object_id *oid)
{
struct tag *tag;
if (packlist_find(&to_pack, oid->hash, NULL))
return;
- tag = lookup_tag(oid);
+ tag = lookup_tag(the_repository, oid);
while (1) {
if (!tag || parse_tag(tag) || !tag->tagged)
- die("unable to pack objects reachable from tag %s",
+ die(_("unable to pack objects reachable from tag %s"),
oid_to_hex(oid));
add_object_entry(&tag->object.oid, OBJ_TAG, NULL, 0);
uint32_t i, nr_deltas;
unsigned n;
+ if (use_delta_islands)
+ resolve_tree_islands(progress, &to_pack);
+
get_object_details();
/*
for (i = 0; i < to_pack.nr_objects; i++) {
struct object_entry *entry = to_pack.objects + i;
- if (entry->delta)
+ if (DELTA(entry))
/* This happens if we decided to reuse existing
* delta from a pack. "reuse_delta &&" is implied.
*/
continue;
- if (entry->size < 50)
+ if (!entry->type_valid ||
+ oe_size_less_than(&to_pack, entry, 50))
continue;
if (entry->no_try_delta)
if (!entry->preferred_base) {
nr_deltas++;
- if (entry->type < 0)
- die("unable to get type of object %s",
+ if (oe_type(entry) < 0)
+ die(_("unable to get type of object %s"),
oid_to_hex(&entry->idx.oid));
} else {
- if (entry->type < 0) {
+ if (oe_type(entry) < 0) {
/*
* This object is not found, but we
* don't have to include it anyway.
ll_find_deltas(delta_list, n, window+1, depth, &nr_done);
stop_progress(&progress_state);
if (nr_done != nr_deltas)
- die("inconsistency with delta count");
+ die(_("inconsistency with delta count"));
}
free(delta_list);
}
if (!strcmp(k, "pack.threads")) {
delta_search_threads = git_config_int(k, v);
if (delta_search_threads < 0)
- die("invalid number of threads specified (%d)",
+ die(_("invalid number of threads specified (%d)"),
delta_search_threads);
-#ifdef NO_PTHREADS
- if (delta_search_threads != 1) {
- warning("no threads support, ignoring %s", k);
+ if (!HAVE_THREADS && delta_search_threads != 1) {
+ warning(_("no threads support, ignoring %s"), k);
delta_search_threads = 0;
}
-#endif
return 0;
}
if (!strcmp(k, "pack.indexversion")) {
pack_idx_opts.version = git_config_int(k, v);
if (pack_idx_opts.version > 2)
- die("bad pack.indexversion=%"PRIu32,
+ die(_("bad pack.indexversion=%"PRIu32),
pack_idx_opts.version);
return 0;
}
if (feof(stdin))
break;
if (!ferror(stdin))
- die("fgets returned NULL, not EOF, not error!");
+ die("BUG: fgets returned NULL, not EOF, not error!");
if (errno != EINTR)
die_errno("fgets");
clearerr(stdin);
}
if (line[0] == '-') {
if (get_oid_hex(line+1, &oid))
- die("expected edge object ID, got garbage:\n %s",
+ die(_("expected edge object ID, got garbage:\n %s"),
line);
add_preferred_base(&oid);
continue;
}
if (parse_oid_hex(line, &oid, &p))
- die("expected object ID, got garbage:\n %s", line);
+ die(_("expected object ID, got garbage:\n %s"), line);
add_preferred_base_object(p + 1);
- add_object_entry(&oid, 0, p + 1, 0);
+ add_object_entry(&oid, OBJ_NONE, p + 1, 0);
}
}
if (write_bitmap_index)
index_commit_for_bitmap(commit);
+
+ if (use_delta_islands)
+ propagate_island_marks(commit);
}
static void show_object(struct object *obj, const char *name, void *data)
add_preferred_base_object(name);
add_object_entry(&obj->oid, obj->type, name, 0);
obj->flags |= OBJECT_ADDED;
+
+ if (use_delta_islands) {
+ const char *p;
+ unsigned depth;
+ struct object_entry *ent;
+
+ /* the empty string is a root tree, which is depth 0 */
+ depth = *name ? 1 : 0;
+ for (p = strchr(name, '/'); p; p = strchr(p + 1, '/'))
+ depth++;
+
+ ent = packlist_find(&to_pack, obj->oid.hash, NULL);
+ if (ent && depth > oe_tree_depth(&to_pack, ent))
+ oe_set_tree_depth(&to_pack, ent, depth);
+ }
}
static void show_object__ma_allow_any(struct object *obj, const char *name, void *data)
memset(&in_pack, 0, sizeof(in_pack));
- for (p = get_packed_git(the_repository); p; p = p->next) {
+ for (p = get_all_packs(the_repository); p; p = p->next) {
struct object_id oid;
struct object *o;
if (!p->pack_local || p->pack_keep || p->pack_keep_in_core)
continue;
if (open_pack_index(p))
- die("cannot open pack index");
+ die(_("cannot open pack index"));
ALLOC_GROW(in_pack.array,
in_pack.nr + p->num_objects,
static int add_loose_object(const struct object_id *oid, const char *path,
void *data)
{
- enum object_type type = oid_object_info(oid, NULL);
+ enum object_type type = oid_object_info(the_repository, oid, NULL);
if (type < 0) {
- warning("loose object at %s could not be examined", path);
+ warning(_("loose object at %s could not be examined"), path);
return 0;
}
struct packed_git *p;
p = (last_found != (void *)1) ? last_found :
- get_packed_git(the_repository);
+ get_all_packs(the_repository);
while (p) {
if ((!p->pack_local || p->pack_keep ||
return 1;
}
if (p == last_found)
- p = get_packed_git(the_repository);
+ p = get_all_packs(the_repository);
else
p = p->next;
if (p == last_found)
uint32_t i;
struct object_id oid;
- for (p = get_packed_git(the_repository); p; p = p->next) {
+ for (p = get_all_packs(the_repository); p; p = p->next) {
if (!p->pack_local || p->pack_keep || p->pack_keep_in_core)
continue;
if (open_pack_index(p))
- die("cannot open pack index");
+ die(_("cannot open pack index"));
for (i = 0; i < p->num_objects; i++) {
nth_packed_object_oid(&oid, p, i);
!has_sha1_pack_kept_or_nonlocal(&oid) &&
!loosened_object_can_be_discarded(&oid, p->mtime))
if (force_object_loose(&oid, p->mtime))
- die("unable to force loose object");
+ die(_("unable to force loose object"));
}
}
}
static int get_object_list_from_bitmap(struct rev_info *revs)
{
- if (prepare_bitmap_walk(revs) < 0)
+ if (!(bitmap_git = prepare_bitmap_walk(revs)))
return -1;
if (pack_options_allow_reuse() &&
!reuse_partial_packfile_from_bitmap(
+ bitmap_git,
&reuse_packfile,
&reuse_packfile_objects,
&reuse_packfile_offset)) {
display_progress(progress_state, nr_result);
}
- traverse_bitmap_commit_list(&add_object_entry_from_bitmap);
+ traverse_bitmap_commit_list(bitmap_git, &add_object_entry_from_bitmap);
return 0;
}
struct rev_info revs;
char line[1000];
int flags = 0;
+ int save_warning;
- init_revisions(&revs, NULL);
+ repo_init_revisions(the_repository, &revs, NULL);
save_commit_buffer = 0;
+ revs.allow_exclude_promisor_objects_opt = 1;
setup_revisions(ac, av, &revs, NULL);
/* make sure shallows are read */
- is_repository_shallow();
+ is_repository_shallow(the_repository);
+
+ save_warning = warn_on_object_refname_ambiguity;
+ warn_on_object_refname_ambiguity = 0;
while (fgets(line, sizeof(line), stdin) != NULL) {
int len = strlen(line);
struct object_id oid;
if (get_oid_hex(line + 10, &oid))
die("not an SHA-1 '%s'", line + 10);
- register_shallow(&oid);
+ register_shallow(the_repository, &oid);
use_bitmap_index = 0;
continue;
}
- die("not a rev '%s'", line);
+ die(_("not a rev '%s'"), line);
}
if (handle_revision_arg(line, &revs, flags, REVARG_CANNOT_BE_FILENAME))
- die("bad revision '%s'", line);
+ die(_("bad revision '%s'"), line);
}
+ warn_on_object_refname_ambiguity = save_warning;
+
if (use_bitmap_index && !get_object_list_from_bitmap(&revs))
return;
+ if (use_delta_islands)
+ load_delta_islands();
+
if (prepare_revision_walk(&revs))
- die("revision walk setup failed");
+ die(_("revision walk setup failed"));
mark_edges_uninteresting(&revs, show_edge);
if (!fn_show_object)
revs.ignore_missing_links = 1;
if (add_unseen_recent_objects_to_traversal(&revs,
unpack_unreachable_expiration))
- die("unable to add recent objects");
+ die(_("unable to add recent objects"));
if (prepare_revision_walk(&revs))
- die("revision walk setup failed");
+ die(_("revision walk setup failed"));
traverse_commit_list(&revs, record_recent_commit,
record_recent_object, NULL);
}
if (!names->nr)
return;
- for (p = get_packed_git(the_repository); p; p = p->next) {
+ for (p = get_all_packs(the_repository); p; p = p->next) {
const char *name = basename(p->pack_name);
int i;
{
char *c;
const char *val = arg;
+
+ BUG_ON_OPT_NEG(unset);
+
pack_idx_opts.version = strtoul(val, &c, 10);
if (pack_idx_opts.version > 2)
die(_("unsupported index version %s"), val);
int cmd_pack_objects(int argc, const char **argv, const char *prefix)
{
int use_internal_rev_list = 0;
- int thin = 0;
int shallow = 0;
int all_progress_implied = 0;
struct argv_array rp = ARGV_ARRAY_INIT;
OPT_BOOL(0, "all-progress-implied",
&all_progress_implied,
N_("similar to --all-progress when progress meter is shown")),
- { OPTION_CALLBACK, 0, "index-version", NULL, N_("version[,offset]"),
+ { OPTION_CALLBACK, 0, "index-version", NULL, N_("<version>[,<offset>]"),
N_("write the pack index file in the specified idx format version"),
- 0, option_parse_index_version },
+ PARSE_OPT_NONEG, option_parse_index_version },
OPT_MAGNITUDE(0, "max-pack-size", &pack_size_limit,
N_("maximum size of each output pack file")),
OPT_BOOL(0, "local", &local,
N_("do not create an empty pack output")),
OPT_BOOL(0, "revs", &use_internal_rev_list,
N_("read revision arguments from standard input")),
- { OPTION_SET_INT, 0, "unpacked", &rev_list_unpacked, NULL,
- N_("limit the objects to those that are not yet packed"),
- PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, 1 },
- { OPTION_SET_INT, 0, "all", &rev_list_all, NULL,
- N_("include objects reachable from any reference"),
- PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, 1 },
- { OPTION_SET_INT, 0, "reflog", &rev_list_reflog, NULL,
- N_("include objects referred by reflog entries"),
- PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, 1 },
- { OPTION_SET_INT, 0, "indexed-objects", &rev_list_index, NULL,
- N_("include objects referred to by the index"),
- PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, 1 },
+ OPT_SET_INT_F(0, "unpacked", &rev_list_unpacked,
+ N_("limit the objects to those that are not yet packed"),
+ 1, PARSE_OPT_NONEG),
+ OPT_SET_INT_F(0, "all", &rev_list_all,
+ N_("include objects reachable from any reference"),
+ 1, PARSE_OPT_NONEG),
+ OPT_SET_INT_F(0, "reflog", &rev_list_reflog,
+ N_("include objects referred by reflog entries"),
+ 1, PARSE_OPT_NONEG),
+ OPT_SET_INT_F(0, "indexed-objects", &rev_list_index,
+ N_("include objects referred to by the index"),
+ 1, PARSE_OPT_NONEG),
OPT_BOOL(0, "stdout", &pack_to_stdout,
N_("output pack to stdout")),
OPT_BOOL(0, "include-tag", &include_tag,
option_parse_missing_action },
OPT_BOOL(0, "exclude-promisor-objects", &exclude_promisor_objects,
N_("do not pack objects in promisor packfiles")),
+ OPT_BOOL(0, "delta-islands", &use_delta_islands,
+ N_("respect islands during delta compression")),
OPT_END(),
};
- check_replace_refs = 0;
+ if (DFS_NUM_STATES > (1 << OE_DFS_STATE_BITS))
+ BUG("too many dfs states, increase OE_DFS_STATE_BITS");
+
+ read_replace_refs = 0;
reset_pack_idx_option(&pack_idx_opts);
git_config(git_pack_config, NULL);
if (pack_to_stdout != !base_name || argc)
usage_with_options(pack_usage, pack_objects_options);
+ if (depth >= (1 << OE_DEPTH_BITS)) {
+ warning(_("delta chain depth %d is too deep, forcing %d"),
+ depth, (1 << OE_DEPTH_BITS) - 1);
+ depth = (1 << OE_DEPTH_BITS) - 1;
+ }
+ if (cache_max_small_delta_size >= (1U << OE_Z_DELTA_BITS)) {
+ warning(_("pack.deltaCacheLimit is too high, forcing %d"),
+ (1U << OE_Z_DELTA_BITS) - 1);
+ cache_max_small_delta_size = (1U << OE_Z_DELTA_BITS) - 1;
+ }
+
argv_array_push(&rp, "pack-objects");
if (thin) {
use_internal_rev_list = 1;
fetch_if_missing = 0;
argv_array_push(&rp, "--exclude-promisor-objects");
}
+ if (unpack_unreachable || keep_unreachable || pack_loose_unreachable)
+ use_internal_rev_list = 1;
if (!reuse_object)
reuse_delta = 0;
if (pack_compression_level == -1)
pack_compression_level = Z_DEFAULT_COMPRESSION;
else if (pack_compression_level < 0 || pack_compression_level > Z_BEST_COMPRESSION)
- die("bad pack compression level %d", pack_compression_level);
+ die(_("bad pack compression level %d"), pack_compression_level);
if (!delta_search_threads) /* --threads=0 means autodetect */
delta_search_threads = online_cpus();
-#ifdef NO_PTHREADS
- if (delta_search_threads != 1)
- warning("no threads support, ignoring --threads");
-#endif
+ if (!HAVE_THREADS && delta_search_threads != 1)
+ warning(_("no threads support, ignoring --threads"));
if (!pack_to_stdout && !pack_size_limit)
pack_size_limit = pack_size_limit_cfg;
if (pack_to_stdout && pack_size_limit)
- die("--max-pack-size cannot be used to build a pack for transfer.");
+ die(_("--max-pack-size cannot be used to build a pack for transfer"));
if (pack_size_limit && pack_size_limit < 1024*1024) {
- warning("minimum pack size limit is 1 MiB");
+ warning(_("minimum pack size limit is 1 MiB"));
pack_size_limit = 1024*1024;
}
if (!pack_to_stdout && thin)
- die("--thin cannot be used to build an indexable pack.");
+ die(_("--thin cannot be used to build an indexable pack"));
if (keep_unreachable && unpack_unreachable)
- die("--keep-unreachable and --unpack-unreachable are incompatible.");
+ die(_("--keep-unreachable and --unpack-unreachable are incompatible"));
if (!rev_list_all || !rev_list_reflog || !rev_list_index)
unpack_unreachable_expiration = 0;
if (filter_options.choice) {
if (!pack_to_stdout)
- die("cannot use --filter without --stdout.");
+ die(_("cannot use --filter without --stdout"));
use_bitmap_index = 0;
}
use_bitmap_index = use_bitmap_index_default;
/* "hard" reasons not to use bitmaps; these just won't work at all */
- if (!use_internal_rev_list || (!pack_to_stdout && write_bitmap_index) || is_repository_shallow())
+ if (!use_internal_rev_list || (!pack_to_stdout && write_bitmap_index) || is_repository_shallow(the_repository))
use_bitmap_index = 0;
if (pack_to_stdout || !rev_list_all)
write_bitmap_index = 0;
+ if (use_delta_islands)
+ argv_array_push(&rp, "--topo-order");
+
if (progress && all_progress_implied)
progress = 2;
add_extra_kept_packs(&keep_pack_list);
if (ignore_packed_keep_on_disk) {
struct packed_git *p;
- for (p = get_packed_git(the_repository); p; p = p->next)
+ for (p = get_all_packs(the_repository); p; p = p->next)
if (p->pack_local && p->pack_keep)
break;
if (!p) /* no keep-able packs found */
* it also covers non-local objects
*/
struct packed_git *p;
- for (p = get_packed_git(the_repository); p; p = p->next) {
+ for (p = get_all_packs(the_repository); p; p = p->next) {
if (!p->pack_local) {
have_non_local_packs = 1;
break;
}
}
+ prepare_packing_data(&to_pack);
+
if (progress)
- progress_state = start_progress(_("Counting objects"), 0);
+ progress_state = start_progress(_("Enumerating objects"), 0);
if (!use_internal_rev_list)
read_object_list_from_stdin();
else {
prepare_pack(window, depth);
write_pack_file();
if (progress)
- fprintf(stderr, "Total %"PRIu32" (delta %"PRIu32"),"
- " reused %"PRIu32" (delta %"PRIu32")\n",
- written, written_delta, reused, reused_delta);
+ fprintf_ln(stderr,
+ _("Total %"PRIu32" (delta %"PRIu32"),"
+ " reused %"PRIu32" (delta %"PRIu32")"),
+ written, written_delta, reused, reused_delta);
return 0;
}