#include "list-objects.h"
#include "progress.h"
#include "refs.h"
+#include "streaming.h"
#include "thread-utils.h"
static const char *pack_usage[] = {
- "git pack-objects --stdout [options...] [< ref-list | < object-list]",
- "git pack-objects [options...] base-name [< ref-list | < object-list]",
+ N_("git pack-objects --stdout [options...] [< ref-list | < object-list]"),
+ N_("git pack-objects [options...] base-name [< ref-list | < object-list]"),
NULL
};
static int non_empty;
static int reuse_delta = 1, reuse_object = 1;
static int keep_unreachable, unpack_unreachable, include_tag;
+static unsigned long unpack_unreachable_expiration;
static int local;
static int incremental;
static int ignore_packed_keep;
return stream.total_out;
}
+static unsigned long write_large_blob_data(struct git_istream *st, struct sha1file *f,
+ const unsigned char *sha1)
+{
+ git_zstream stream;
+ unsigned char ibuf[1024 * 16];
+ unsigned char obuf[1024 * 16];
+ unsigned long olen = 0;
+
+ memset(&stream, 0, sizeof(stream));
+ git_deflate_init(&stream, pack_compression_level);
+
+ for (;;) {
+ ssize_t readlen;
+ int zret = Z_OK;
+ readlen = read_istream(st, ibuf, sizeof(ibuf));
+ if (readlen == -1)
+ die(_("unable to read %s"), sha1_to_hex(sha1));
+
+ stream.next_in = ibuf;
+ stream.avail_in = readlen;
+ while ((stream.avail_in || readlen == 0) &&
+ (zret == Z_OK || zret == Z_BUF_ERROR)) {
+ stream.next_out = obuf;
+ stream.avail_out = sizeof(obuf);
+ zret = git_deflate(&stream, readlen ? 0 : Z_FINISH);
+ sha1write(f, obuf, stream.next_out - obuf);
+ olen += stream.next_out - obuf;
+ }
+ if (stream.avail_in)
+ die(_("deflate error (%d)"), zret);
+ if (readlen == 0) {
+ if (zret != Z_STREAM_END)
+ die(_("deflate error (%d)"), zret);
+ break;
+ }
+ }
+ git_deflate_end(&stream);
+ return olen;
+}
+
/*
* we are going to reuse the existing object data as is. make
* sure it is not corrupt.
}
/* Return 0 if we will bust the pack-size limit */
-static unsigned long write_object(struct sha1file *f,
- struct object_entry *entry,
- off_t write_offset)
+static unsigned long write_no_reuse_object(struct sha1file *f, struct object_entry *entry,
+ unsigned long limit, int usable_delta)
{
- unsigned long size, limit, datalen;
- void *buf;
+ unsigned long size, datalen;
unsigned char header[10], dheader[10];
unsigned hdrlen;
enum object_type type;
+ void *buf;
+ struct git_istream *st = NULL;
+
+ if (!usable_delta) {
+ if (entry->type == OBJ_BLOB &&
+ entry->size > big_file_threshold &&
+ (st = open_istream(entry->idx.sha1, &type, &size, NULL)) != NULL)
+ buf = NULL;
+ else {
+ buf = read_sha1_file(entry->idx.sha1, &type, &size);
+ if (!buf)
+ die(_("unable to read %s"), sha1_to_hex(entry->idx.sha1));
+ }
+ /*
+ * make sure no cached delta data remains from a
+ * previous attempt before a pack split occurred.
+ */
+ free(entry->delta_data);
+ entry->delta_data = NULL;
+ entry->z_delta_size = 0;
+ } else if (entry->delta_data) {
+ size = entry->delta_size;
+ buf = entry->delta_data;
+ entry->delta_data = NULL;
+ type = (allow_ofs_delta && entry->delta->idx.offset) ?
+ OBJ_OFS_DELTA : OBJ_REF_DELTA;
+ } else {
+ buf = get_delta(entry);
+ size = entry->delta_size;
+ type = (allow_ofs_delta && entry->delta->idx.offset) ?
+ OBJ_OFS_DELTA : OBJ_REF_DELTA;
+ }
+
+ if (st) /* large blob case, just assume we don't compress well */
+ datalen = size;
+ else if (entry->z_delta_size)
+ datalen = entry->z_delta_size;
+ else
+ datalen = do_compress(&buf, size);
+
+ /*
+ * The object header is a byte of 'type' followed by zero or
+ * more bytes of length.
+ */
+ hdrlen = encode_in_pack_object_header(type, size, header);
+
+ if (type == OBJ_OFS_DELTA) {
+ /*
+ * Deltas with relative base contain an additional
+ * encoding of the relative offset for the delta
+ * base from this object's position in the pack.
+ */
+ off_t ofs = entry->idx.offset - entry->delta->idx.offset;
+ unsigned pos = sizeof(dheader) - 1;
+ dheader[pos] = ofs & 127;
+ while (ofs >>= 7)
+ dheader[--pos] = 128 | (--ofs & 127);
+ if (limit && hdrlen + sizeof(dheader) - pos + datalen + 20 >= limit) {
+ if (st)
+ close_istream(st);
+ free(buf);
+ return 0;
+ }
+ sha1write(f, header, hdrlen);
+ sha1write(f, dheader + pos, sizeof(dheader) - pos);
+ hdrlen += sizeof(dheader) - pos;
+ } else if (type == OBJ_REF_DELTA) {
+ /*
+ * Deltas with a base reference contain
+ * an additional 20 bytes for the base sha1.
+ */
+ if (limit && hdrlen + 20 + datalen + 20 >= limit) {
+ if (st)
+ close_istream(st);
+ free(buf);
+ return 0;
+ }
+ sha1write(f, header, hdrlen);
+ sha1write(f, entry->delta->idx.sha1, 20);
+ hdrlen += 20;
+ } else {
+ if (limit && hdrlen + datalen + 20 >= limit) {
+ if (st)
+ close_istream(st);
+ free(buf);
+ return 0;
+ }
+ sha1write(f, header, hdrlen);
+ }
+ if (st) {
+ datalen = write_large_blob_data(st, f, entry->idx.sha1);
+ close_istream(st);
+ } else {
+ sha1write(f, buf, datalen);
+ free(buf);
+ }
+
+ return hdrlen + datalen;
+}
+
+/* Return 0 if we will bust the pack-size limit */
+static unsigned long write_reuse_object(struct sha1file *f, struct object_entry *entry,
+ unsigned long limit, int usable_delta)
+{
+ struct packed_git *p = entry->in_pack;
+ struct pack_window *w_curs = NULL;
+ struct revindex_entry *revidx;
+ off_t offset;
+ enum object_type type = entry->type;
+ unsigned long datalen;
+ unsigned char header[10], dheader[10];
+ unsigned hdrlen;
+
+ if (entry->delta)
+ type = (allow_ofs_delta && entry->delta->idx.offset) ?
+ OBJ_OFS_DELTA : OBJ_REF_DELTA;
+ hdrlen = encode_in_pack_object_header(type, entry->size, header);
+
+ offset = entry->in_pack_offset;
+ revidx = find_pack_revindex(p, offset);
+ datalen = revidx[1].offset - offset;
+ if (!pack_to_stdout && p->index_version > 1 &&
+ check_pack_crc(p, &w_curs, offset, datalen, revidx->nr)) {
+ error("bad packed object CRC for %s", sha1_to_hex(entry->idx.sha1));
+ unuse_pack(&w_curs);
+ return write_no_reuse_object(f, entry, limit, usable_delta);
+ }
+
+ offset += entry->in_pack_header_size;
+ datalen -= entry->in_pack_header_size;
+
+ if (!pack_to_stdout && p->index_version == 1 &&
+ check_pack_inflate(p, &w_curs, offset, datalen, entry->size)) {
+ error("corrupt packed object for %s", sha1_to_hex(entry->idx.sha1));
+ unuse_pack(&w_curs);
+ return write_no_reuse_object(f, entry, limit, usable_delta);
+ }
+
+ if (type == OBJ_OFS_DELTA) {
+ off_t ofs = entry->idx.offset - entry->delta->idx.offset;
+ unsigned pos = sizeof(dheader) - 1;
+ dheader[pos] = ofs & 127;
+ while (ofs >>= 7)
+ dheader[--pos] = 128 | (--ofs & 127);
+ if (limit && hdrlen + sizeof(dheader) - pos + datalen + 20 >= limit) {
+ unuse_pack(&w_curs);
+ return 0;
+ }
+ sha1write(f, header, hdrlen);
+ sha1write(f, dheader + pos, sizeof(dheader) - pos);
+ hdrlen += sizeof(dheader) - pos;
+ reused_delta++;
+ } else if (type == OBJ_REF_DELTA) {
+ if (limit && hdrlen + 20 + datalen + 20 >= limit) {
+ unuse_pack(&w_curs);
+ return 0;
+ }
+ sha1write(f, header, hdrlen);
+ sha1write(f, entry->delta->idx.sha1, 20);
+ hdrlen += 20;
+ reused_delta++;
+ } else {
+ if (limit && hdrlen + datalen + 20 >= limit) {
+ unuse_pack(&w_curs);
+ return 0;
+ }
+ sha1write(f, header, hdrlen);
+ }
+ copy_pack_data(f, p, &w_curs, offset, datalen);
+ unuse_pack(&w_curs);
+ reused++;
+ return hdrlen + datalen;
+}
+
+/* Return 0 if we will bust the pack-size limit */
+static unsigned long write_object(struct sha1file *f,
+ struct object_entry *entry,
+ off_t write_offset)
+{
+ unsigned long limit, len;
int usable_delta, to_reuse;
if (!pack_to_stdout)
crc32_begin(f);
- type = entry->type;
-
/* apply size limit if limited packsize and not first object */
if (!pack_size_limit || !nr_written)
limit = 0;
to_reuse = 0; /* explicit */
else if (!entry->in_pack)
to_reuse = 0; /* can't reuse what we don't have */
- else if (type == OBJ_REF_DELTA || type == OBJ_OFS_DELTA)
+ else if (entry->type == OBJ_REF_DELTA || entry->type == OBJ_OFS_DELTA)
/* check_object() decided it for us ... */
to_reuse = usable_delta;
/* ... but pack split may override that */
- else if (type != entry->in_pack_type)
+ else if (entry->type != entry->in_pack_type)
to_reuse = 0; /* pack has delta which is unusable */
else if (entry->delta)
to_reuse = 0; /* we want to pack afresh */
* and we do not need to deltify it.
*/
- if (!to_reuse) {
- no_reuse:
- if (!usable_delta) {
- buf = read_sha1_file(entry->idx.sha1, &type, &size);
- if (!buf)
- die("unable to read %s", sha1_to_hex(entry->idx.sha1));
- /*
- * make sure no cached delta data remains from a
- * previous attempt before a pack split occurred.
- */
- free(entry->delta_data);
- entry->delta_data = NULL;
- entry->z_delta_size = 0;
- } else if (entry->delta_data) {
- size = entry->delta_size;
- buf = entry->delta_data;
- entry->delta_data = NULL;
- type = (allow_ofs_delta && entry->delta->idx.offset) ?
- OBJ_OFS_DELTA : OBJ_REF_DELTA;
- } else {
- buf = get_delta(entry);
- size = entry->delta_size;
- type = (allow_ofs_delta && entry->delta->idx.offset) ?
- OBJ_OFS_DELTA : OBJ_REF_DELTA;
- }
-
- if (entry->z_delta_size)
- datalen = entry->z_delta_size;
- else
- datalen = do_compress(&buf, size);
-
- /*
- * The object header is a byte of 'type' followed by zero or
- * more bytes of length.
- */
- hdrlen = encode_in_pack_object_header(type, size, header);
-
- if (type == OBJ_OFS_DELTA) {
- /*
- * Deltas with relative base contain an additional
- * encoding of the relative offset for the delta
- * base from this object's position in the pack.
- */
- off_t ofs = entry->idx.offset - entry->delta->idx.offset;
- unsigned pos = sizeof(dheader) - 1;
- dheader[pos] = ofs & 127;
- while (ofs >>= 7)
- dheader[--pos] = 128 | (--ofs & 127);
- if (limit && hdrlen + sizeof(dheader) - pos + datalen + 20 >= limit) {
- free(buf);
- return 0;
- }
- sha1write(f, header, hdrlen);
- sha1write(f, dheader + pos, sizeof(dheader) - pos);
- hdrlen += sizeof(dheader) - pos;
- } else if (type == OBJ_REF_DELTA) {
- /*
- * Deltas with a base reference contain
- * an additional 20 bytes for the base sha1.
- */
- if (limit && hdrlen + 20 + datalen + 20 >= limit) {
- free(buf);
- return 0;
- }
- sha1write(f, header, hdrlen);
- sha1write(f, entry->delta->idx.sha1, 20);
- hdrlen += 20;
- } else {
- if (limit && hdrlen + datalen + 20 >= limit) {
- free(buf);
- return 0;
- }
- sha1write(f, header, hdrlen);
- }
- sha1write(f, buf, datalen);
- free(buf);
- }
- else {
- struct packed_git *p = entry->in_pack;
- struct pack_window *w_curs = NULL;
- struct revindex_entry *revidx;
- off_t offset;
-
- if (entry->delta)
- type = (allow_ofs_delta && entry->delta->idx.offset) ?
- OBJ_OFS_DELTA : OBJ_REF_DELTA;
- hdrlen = encode_in_pack_object_header(type, entry->size, header);
-
- offset = entry->in_pack_offset;
- revidx = find_pack_revindex(p, offset);
- datalen = revidx[1].offset - offset;
- if (!pack_to_stdout && p->index_version > 1 &&
- check_pack_crc(p, &w_curs, offset, datalen, revidx->nr)) {
- error("bad packed object CRC for %s", sha1_to_hex(entry->idx.sha1));
- unuse_pack(&w_curs);
- goto no_reuse;
- }
-
- offset += entry->in_pack_header_size;
- datalen -= entry->in_pack_header_size;
- if (!pack_to_stdout && p->index_version == 1 &&
- check_pack_inflate(p, &w_curs, offset, datalen, entry->size)) {
- error("corrupt packed object for %s", sha1_to_hex(entry->idx.sha1));
- unuse_pack(&w_curs);
- goto no_reuse;
- }
+ if (!to_reuse)
+ len = write_no_reuse_object(f, entry, limit, usable_delta);
+ else
+ len = write_reuse_object(f, entry, limit, usable_delta);
+ if (!len)
+ return 0;
- if (type == OBJ_OFS_DELTA) {
- off_t ofs = entry->idx.offset - entry->delta->idx.offset;
- unsigned pos = sizeof(dheader) - 1;
- dheader[pos] = ofs & 127;
- while (ofs >>= 7)
- dheader[--pos] = 128 | (--ofs & 127);
- if (limit && hdrlen + sizeof(dheader) - pos + datalen + 20 >= limit) {
- unuse_pack(&w_curs);
- return 0;
- }
- sha1write(f, header, hdrlen);
- sha1write(f, dheader + pos, sizeof(dheader) - pos);
- hdrlen += sizeof(dheader) - pos;
- reused_delta++;
- } else if (type == OBJ_REF_DELTA) {
- if (limit && hdrlen + 20 + datalen + 20 >= limit) {
- unuse_pack(&w_curs);
- return 0;
- }
- sha1write(f, header, hdrlen);
- sha1write(f, entry->delta->idx.sha1, 20);
- hdrlen += 20;
- reused_delta++;
- } else {
- if (limit && hdrlen + datalen + 20 >= limit) {
- unuse_pack(&w_curs);
- return 0;
- }
- sha1write(f, header, hdrlen);
- }
- copy_pack_data(f, p, &w_curs, offset, datalen);
- unuse_pack(&w_curs);
- reused++;
- }
if (usable_delta)
written_delta++;
written++;
if (!pack_to_stdout)
entry->idx.crc32 = crc32_end(f);
- return hdrlen + datalen;
+ return len;
}
enum write_one_status {
for (i = 0; i < nr_objects; i++) {
struct object_entry *entry = sorted_by_offset[i];
check_object(entry);
- if (big_file_threshold <= entry->size)
+ if (big_file_threshold < entry->size)
entry->no_try_delta = 1;
}
if (!p->pack_local || p->pack_keep)
continue;
+ if (unpack_unreachable_expiration &&
+ p->mtime < unpack_unreachable_expiration)
+ continue;
+
if (open_pack_index(p))
die("cannot open pack index");
}
die("not a rev '%s'", line);
}
- if (handle_revision_arg(line, &revs, flags, 1))
+ if (handle_revision_arg(line, &revs, flags, REVARG_CANNOT_BE_FILENAME))
die("bad revision '%s'", line);
}
return 0;
}
+static int option_parse_unpack_unreachable(const struct option *opt,
+ const char *arg, int unset)
+{
+ if (unset) {
+ unpack_unreachable = 0;
+ unpack_unreachable_expiration = 0;
+ }
+ else {
+ unpack_unreachable = 1;
+ if (arg)
+ unpack_unreachable_expiration = approxidate(arg);
+ }
+ return 0;
+}
+
static int option_parse_ulong(const struct option *opt,
const char *arg, int unset)
{
int rev_list_unpacked = 0, rev_list_all = 0, rev_list_reflog = 0;
struct option pack_objects_options[] = {
OPT_SET_INT('q', "quiet", &progress,
- "do not show progress meter", 0),
+ N_("do not show progress meter"), 0),
OPT_SET_INT(0, "progress", &progress,
- "show progress meter", 1),
+ N_("show progress meter"), 1),
OPT_SET_INT(0, "all-progress", &progress,
- "show progress meter during object writing phase", 2),
+ N_("show progress meter during object writing phase"), 2),
OPT_BOOL(0, "all-progress-implied",
&all_progress_implied,
- "similar to --all-progress when progress meter is shown"),
- { OPTION_CALLBACK, 0, "index-version", NULL, "version[,offset]",
- "write the pack index file in the specified idx format version",
+ N_("similar to --all-progress when progress meter is shown")),
+ { OPTION_CALLBACK, 0, "index-version", NULL, N_("version[,offset]"),
+ N_("write the pack index file in the specified idx format version"),
0, option_parse_index_version },
OPT_ULONG(0, "max-pack-size", &pack_size_limit,
- "maximum size of each output pack file"),
+ N_("maximum size of each output pack file")),
OPT_BOOL(0, "local", &local,
- "ignore borrowed objects from alternate object store"),
+ N_("ignore borrowed objects from alternate object store")),
OPT_BOOL(0, "incremental", &incremental,
- "ignore packed objects"),
+ N_("ignore packed objects")),
OPT_INTEGER(0, "window", &window,
- "limit pack window by objects"),
+ N_("limit pack window by objects")),
OPT_ULONG(0, "window-memory", &window_memory_limit,
- "limit pack window by memory in addition to object limit"),
+ N_("limit pack window by memory in addition to object limit")),
OPT_INTEGER(0, "depth", &depth,
- "maximum length of delta chain allowed in the resulting pack"),
+ N_("maximum length of delta chain allowed in the resulting pack")),
OPT_BOOL(0, "reuse-delta", &reuse_delta,
- "reuse existing deltas"),
+ N_("reuse existing deltas")),
OPT_BOOL(0, "reuse-object", &reuse_object,
- "reuse existing objects"),
+ N_("reuse existing objects")),
OPT_BOOL(0, "delta-base-offset", &allow_ofs_delta,
- "use OFS_DELTA objects"),
+ N_("use OFS_DELTA objects")),
OPT_INTEGER(0, "threads", &delta_search_threads,
- "use threads when searching for best delta matches"),
+ N_("use threads when searching for best delta matches")),
OPT_BOOL(0, "non-empty", &non_empty,
- "do not create an empty pack output"),
+ N_("do not create an empty pack output")),
OPT_BOOL(0, "revs", &use_internal_rev_list,
- "read revision arguments from standard input"),
+ N_("read revision arguments from standard input")),
{ OPTION_SET_INT, 0, "unpacked", &rev_list_unpacked, NULL,
- "limit the objects to those that are not yet packed",
+ N_("limit the objects to those that are not yet packed"),
PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, 1 },
{ OPTION_SET_INT, 0, "all", &rev_list_all, NULL,
- "include objects reachable from any reference",
+ N_("include objects reachable from any reference"),
PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, 1 },
{ OPTION_SET_INT, 0, "reflog", &rev_list_reflog, NULL,
- "include objects referred by reflog entries",
+ N_("include objects referred by reflog entries"),
PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, 1 },
OPT_BOOL(0, "stdout", &pack_to_stdout,
- "output pack to stdout"),
+ N_("output pack to stdout")),
OPT_BOOL(0, "include-tag", &include_tag,
- "include tag objects that refer to objects to be packed"),
+ N_("include tag objects that refer to objects to be packed")),
OPT_BOOL(0, "keep-unreachable", &keep_unreachable,
- "keep unreachable objects"),
- OPT_BOOL(0, "unpack-unreachable", &unpack_unreachable,
- "unpack unreachable objects"),
+ N_("keep unreachable objects")),
+ { OPTION_CALLBACK, 0, "unpack-unreachable", NULL, N_("time"),
+ N_("unpack unreachable objects newer than <time>"),
+ PARSE_OPT_OPTARG, option_parse_unpack_unreachable },
OPT_BOOL(0, "thin", &thin,
- "create thin packs"),
+ N_("create thin packs")),
OPT_BOOL(0, "honor-pack-keep", &ignore_packed_keep,
- "ignore packs that have companion .keep file"),
+ N_("ignore packs that have companion .keep file")),
OPT_INTEGER(0, "compression", &pack_compression_level,
- "pack compression level"),
+ N_("pack compression level")),
OPT_SET_INT(0, "keep-true-parents", &grafts_replace_parents,
- "do not hide commits by grafts", 0),
+ N_("do not hide commits by grafts"), 0),
OPT_END(),
};