commit_msg
('from' sp committish lf)?
('merge' sp committish lf)*
- file_change*
+ (file_change | ls)*
lf?;
commit_msg ::= data;
+ ls ::= 'ls' sp '"' quoted(path) '"' lf;
+
file_change ::= file_clr
| file_del
| file_rnm
ts ::= # time since the epoch in seconds, ascii base10 notation;
tz ::= # GIT style timezone;
- # note: comments and cat requests may appear anywhere
+ # note: comments, ls and cat requests may appear anywhere
# in the input, except within a data command. Any form
# of the data command always escapes the related input
# from comment processing.
# must be the first character on that line (an lf
# preceded it).
#
+
cat_blob ::= 'cat-blob' sp (hexsha1 | idnum) lf;
+ ls_tree ::= 'ls' sp (hexsha1 | idnum) sp path_str lf;
comment ::= '#' not_lf* lf;
not_lf ::= # Any byte that is not ASCII newline (LF);
#define DEPTH_BITS 13
#define MAX_DEPTH ((1<<DEPTH_BITS)-1)
+/*
+ * We abuse the setuid bit on directories to mean "do not delta".
+ */
+#define NO_DELTA S_ISUID
+
struct object_entry {
struct pack_idx_entry idx;
struct object_entry *next;
/* Configured limits on output */
static unsigned long max_depth = 10;
static off_t max_packsize;
-static uintmax_t big_file_threshold = 512 * 1024 * 1024;
static int force_update;
static int pack_compression_level = Z_DEFAULT_COMPRESSION;
static int pack_compression_seen;
static uintmax_t object_count_by_type[1 << TYPE_BITS];
static uintmax_t duplicate_count_by_type[1 << TYPE_BITS];
static uintmax_t delta_count_by_type[1 << TYPE_BITS];
+static uintmax_t delta_count_attempts_by_type[1 << TYPE_BITS];
static unsigned long object_count;
static unsigned long branch_count;
static unsigned long branch_load_count;
static struct atom_str **atom_table;
/* The .pack file being generated */
+static struct pack_idx_option pack_idx_opts;
static unsigned int pack_id;
static struct sha1file *pack_file;
static struct packed_git *pack_data;
static const char *export_marks_file;
static const char *import_marks_file;
static int import_marks_file_from_stream;
+static int import_marks_file_ignore_missing;
static int relative_marks_paths;
/* Our last blob */
static uintmax_t next_mark;
static struct strbuf new_data = STRBUF_INIT;
static int seen_data_command;
+static int require_explicit_termination;
/* Signal handling */
static volatile sig_atomic_t checkpoint_requested;
static void parse_argv(void);
static void parse_cat_blob(void);
+static void parse_ls(struct branch *b);
static void write_branch_report(FILE *rpt, struct branch *b)
{
if (b)
die("Invalid attempt to create duplicate branch: %s", name);
- switch (check_ref_format(name)) {
- case 0: break; /* its valid */
- case CHECK_REF_FORMAT_ONELEVEL:
- break; /* valid, but too few '/', allow anyway */
- default:
+ if (check_refname_format(name, REFNAME_ALLOW_ONELEVEL))
die("Branch name doesn't conform to GIT standards: %s", name);
- }
b = pool_calloc(1, sizeof(struct branch));
b->name = pool_strdup(name);
if (c != last)
die("internal consistency error creating the index");
- tmpfile = write_idx_file(NULL, idx, object_count, pack_data->sha1);
+ tmpfile = write_idx_file(NULL, idx, object_count, &pack_idx_opts, pack_data->sha1);
free(idx);
return tmpfile;
}
unsigned char sha1[20];
unsigned long hdrlen, deltalen;
git_SHA_CTX c;
- z_stream s;
+ git_zstream s;
hdrlen = sprintf((char *)hdr,"%s %lu", typename(type),
(unsigned long)dat->len) + 1;
}
if (last && last->data.buf && last->depth < max_depth && dat->len > 20) {
+ delta_count_attempts_by_type[type]++;
delta = diff_delta(last->data.buf, last->data.len,
dat->buf, dat->len,
&deltalen, dat->len - 20);
delta = NULL;
memset(&s, 0, sizeof(s));
- deflateInit(&s, pack_compression_level);
+ git_deflate_init(&s, pack_compression_level);
if (delta) {
s.next_in = delta;
s.avail_in = deltalen;
s.next_in = (void *)dat->buf;
s.avail_in = dat->len;
}
- s.avail_out = deflateBound(&s, s.avail_in);
+ s.avail_out = git_deflate_bound(&s, s.avail_in);
s.next_out = out = xmalloc(s.avail_out);
- while (deflate(&s, Z_FINISH) == Z_OK)
- /* nothing */;
- deflateEnd(&s);
+ while (git_deflate(&s, Z_FINISH) == Z_OK)
+ ; /* nothing */
+ git_deflate_end(&s);
/* Determine if we should auto-checkpoint. */
if ((max_packsize && (pack_size + 60 + s.total_out) > max_packsize)
delta = NULL;
memset(&s, 0, sizeof(s));
- deflateInit(&s, pack_compression_level);
+ git_deflate_init(&s, pack_compression_level);
s.next_in = (void *)dat->buf;
s.avail_in = dat->len;
- s.avail_out = deflateBound(&s, s.avail_in);
+ s.avail_out = git_deflate_bound(&s, s.avail_in);
s.next_out = out = xrealloc(out, s.avail_out);
- while (deflate(&s, Z_FINISH) == Z_OK)
- /* nothing */;
- deflateEnd(&s);
+ while (git_deflate(&s, Z_FINISH) == Z_OK)
+ ; /* nothing */
+ git_deflate_end(&s);
}
}
off_t offset;
git_SHA_CTX c;
git_SHA_CTX pack_file_ctx;
- z_stream s;
+ git_zstream s;
int status = Z_OK;
/* Determine if we should auto-checkpoint. */
crc32_begin(pack_file);
memset(&s, 0, sizeof(s));
- deflateInit(&s, pack_compression_level);
+ git_deflate_init(&s, pack_compression_level);
hdrlen = encode_in_pack_object_header(OBJ_BLOB, len, out_buf);
if (out_sz <= hdrlen)
len -= n;
}
- status = deflate(&s, len ? 0 : Z_FINISH);
+ status = git_deflate(&s, len ? 0 : Z_FINISH);
if (!s.avail_out || status == Z_STREAM_END) {
size_t n = s.next_out - out_buf;
die("unexpected deflate failure: %d", status);
}
}
- deflateEnd(&s);
+ git_deflate_end(&s);
git_SHA1_Final(sha1, &c);
if (sha1out)
struct tree_entry *e = t->entries[i];
if (!e->versions[v].mode)
continue;
- strbuf_addf(b, "%o %s%c", (unsigned int)e->versions[v].mode,
- e->name->str_dat, '\0');
+ strbuf_addf(b, "%o %s%c",
+ (unsigned int)(e->versions[v].mode & ~NO_DELTA),
+ e->name->str_dat, '\0');
strbuf_add(b, e->versions[v].sha1, 20);
}
}
struct tree_content *t = root->tree;
unsigned int i, j, del;
struct last_object lo = { STRBUF_INIT, 0, 0, /* no_swap */ 1 };
- struct object_entry *le;
+ struct object_entry *le = NULL;
if (!is_null_sha1(root->versions[1].sha1))
return;
store_tree(t->entries[i]);
}
- le = find_object(root->versions[0].sha1);
+ if (!(root->versions[0].mode & NO_DELTA))
+ le = find_object(root->versions[0].sha1);
if (S_ISDIR(root->versions[0].mode) && le && le->pack_id == pack_id) {
mktree(t, 0, &old_tree);
lo.data = old_tree;
{
if (!S_ISDIR(mode))
die("Root cannot be a non-directory");
+ hashclr(root->versions[0].sha1);
hashcpy(root->versions[1].sha1, sha1);
if (root->tree)
release_tree_content_recursive(root->tree);
if (e->tree)
release_tree_content_recursive(e->tree);
e->tree = subtree;
+
+ /*
+ * We need to leave e->versions[0].sha1 alone
+ * to avoid modifying the preimage tree used
+ * when writing out the parent directory.
+ * But after replacing the subdir with a
+ * completely different one, it's not a good
+ * delta base any more, and besides, we've
+ * thrown away the tree entries needed to
+ * make a delta against it.
+ *
+ * So let's just explicitly disable deltas
+ * for the subtree.
+ */
+ if (S_ISDIR(e->versions[0].mode))
+ e->versions[0].mode |= NO_DELTA;
+
hashclr(root->versions[1].sha1);
return 1;
}
{
char line[512];
FILE *f = fopen(import_marks_file, "r");
- if (!f)
+ if (f)
+ ;
+ else if (import_marks_file_ignore_missing && errno == ENOENT)
+ return; /* Marks file does not exist */
+ else
die_errno("cannot read '%s'", import_marks_file);
while (fgets(line, sizeof(line), f)) {
uintmax_t mark;
static char *parse_ident(const char *buf)
{
- const char *gt;
+ const char *ltgt;
size_t name_len;
char *ident;
- gt = strrchr(buf, '>');
- if (!gt)
+ /* ensure there is a space delimiter even if there is no name */
+ if (*buf == '<')
+ --buf;
+
+ ltgt = buf + strcspn(buf, "<>");
+ if (*ltgt != '<')
+ die("Missing < in ident string: %s", buf);
+ if (ltgt != buf && ltgt[-1] != ' ')
+ die("Missing space before < in ident string: %s", buf);
+ ltgt = ltgt + 1 + strcspn(ltgt + 1, "<>");
+ if (*ltgt != '>')
die("Missing > in ident string: %s", buf);
- gt++;
- if (*gt != ' ')
+ ltgt++;
+ if (*ltgt != ' ')
die("Missing space after > in ident string: %s", buf);
- gt++;
- name_len = gt - buf;
+ ltgt++;
+ name_len = ltgt - buf;
ident = xmalloc(name_len + 24);
strncpy(ident, buf, name_len);
switch (whenspec) {
case WHENSPEC_RAW:
- if (validate_raw_date(gt, ident + name_len, 24) < 0)
- die("Invalid raw date \"%s\" in ident: %s", gt, buf);
+ if (validate_raw_date(ltgt, ident + name_len, 24) < 0)
+ die("Invalid raw date \"%s\" in ident: %s", ltgt, buf);
break;
case WHENSPEC_RFC2822:
- if (parse_date(gt, ident + name_len, 24) < 0)
- die("Invalid rfc2822 date \"%s\" in ident: %s", gt, buf);
+ if (parse_date(ltgt, ident + name_len, 24) < 0)
+ die("Invalid rfc2822 date \"%s\" in ident: %s", ltgt, buf);
break;
case WHENSPEC_NOW:
- if (strcmp("now", gt))
+ if (strcmp("now", ltgt))
die("Date in ident must be 'now': %s", buf);
datestamp(ident + name_len, 24);
break;
/* <committish> */
s = lookup_branch(p);
if (s) {
+ if (is_null_sha1(s->sha1))
+ die("Can't add a note on empty branch.");
hashcpy(commit_sha1, s->sha1);
} else if (*p == ':') {
uintmax_t commit_mark = strtoumax(p + 1, NULL, 10);
note_change_n(b, prev_fanout);
else if (!strcmp("deleteall", command_buf.buf))
file_change_deleteall(b);
+ else if (!prefixcmp(command_buf.buf, "ls "))
+ parse_ls(b);
else {
unread_command_buf = 1;
break;
from = strchr(command_buf.buf, ' ') + 1;
s = lookup_branch(from);
if (s) {
+ if (is_null_sha1(s->sha1))
+ die("Can't tag an empty branch.");
hashcpy(sha1, s->sha1);
type = OBJ_COMMIT;
} else if (*from == ':') {
type = oe->type;
hashcpy(sha1, oe->idx.sha1);
} else if (!get_sha1(from, sha1)) {
- unsigned long size;
- char *buf;
-
- buf = read_sha1_file(sha1, &type, &size);
- if (!buf || size < 46)
- die("Not a valid commit: %s", from);
- free(buf);
+ struct object_entry *oe = find_object(sha1);
+ if (!oe) {
+ type = sha1_object_info(sha1, NULL);
+ if (type < 0)
+ die("Not a valid object: %s", from);
+ } else
+ type = oe->type;
} else
die("Invalid ref name or SHA1 expression: %s", from);
read_next_command();
strbuf_release(&line);
cat_blob_write(buf, size);
cat_blob_write("\n", 1);
- free(buf);
+ if (oe && oe->pack_id == pack_id) {
+ last_blob.offset = oe->idx.offset;
+ strbuf_attach(&last_blob.data, buf, size, size);
+ last_blob.depth = oe->depth;
+ } else
+ free(buf);
}
static void parse_cat_blob(void)
cat_blob(oe, sha1);
}
+static struct object_entry *dereference(struct object_entry *oe,
+ unsigned char sha1[20])
+{
+ unsigned long size;
+ char *buf = NULL;
+ if (!oe) {
+ enum object_type type = sha1_object_info(sha1, NULL);
+ if (type < 0)
+ die("object not found: %s", sha1_to_hex(sha1));
+ /* cache it! */
+ oe = insert_object(sha1);
+ oe->type = type;
+ oe->pack_id = MAX_PACK_ID;
+ oe->idx.offset = 1;
+ }
+ switch (oe->type) {
+ case OBJ_TREE: /* easy case. */
+ return oe;
+ case OBJ_COMMIT:
+ case OBJ_TAG:
+ break;
+ default:
+ die("Not a treeish: %s", command_buf.buf);
+ }
+
+ if (oe->pack_id != MAX_PACK_ID) { /* in a pack being written */
+ buf = gfi_unpack_entry(oe, &size);
+ } else {
+ enum object_type unused;
+ buf = read_sha1_file(sha1, &unused, &size);
+ }
+ if (!buf)
+ die("Can't load object %s", sha1_to_hex(sha1));
+
+ /* Peel one layer. */
+ switch (oe->type) {
+ case OBJ_TAG:
+ if (size < 40 + strlen("object ") ||
+ get_sha1_hex(buf + strlen("object "), sha1))
+ die("Invalid SHA1 in tag: %s", command_buf.buf);
+ break;
+ case OBJ_COMMIT:
+ if (size < 40 + strlen("tree ") ||
+ get_sha1_hex(buf + strlen("tree "), sha1))
+ die("Invalid SHA1 in commit: %s", command_buf.buf);
+ }
+
+ free(buf);
+ return find_object(sha1);
+}
+
+static struct object_entry *parse_treeish_dataref(const char **p)
+{
+ unsigned char sha1[20];
+ struct object_entry *e;
+
+ if (**p == ':') { /* <mark> */
+ char *endptr;
+ e = find_mark(strtoumax(*p + 1, &endptr, 10));
+ if (endptr == *p + 1)
+ die("Invalid mark: %s", command_buf.buf);
+ if (!e)
+ die("Unknown mark: %s", command_buf.buf);
+ *p = endptr;
+ hashcpy(sha1, e->idx.sha1);
+ } else { /* <sha1> */
+ if (get_sha1_hex(*p, sha1))
+ die("Invalid SHA1: %s", command_buf.buf);
+ e = find_object(sha1);
+ *p += 40;
+ }
+
+ while (!e || e->type != OBJ_TREE)
+ e = dereference(e, sha1);
+ return e;
+}
+
+static void print_ls(int mode, const unsigned char *sha1, const char *path)
+{
+ static struct strbuf line = STRBUF_INIT;
+
+ /* See show_tree(). */
+ const char *type =
+ S_ISGITLINK(mode) ? commit_type :
+ S_ISDIR(mode) ? tree_type :
+ blob_type;
+
+ if (!mode) {
+ /* missing SP path LF */
+ strbuf_reset(&line);
+ strbuf_addstr(&line, "missing ");
+ quote_c_style(path, &line, NULL, 0);
+ strbuf_addch(&line, '\n');
+ } else {
+ /* mode SP type SP object_name TAB path LF */
+ strbuf_reset(&line);
+ strbuf_addf(&line, "%06o %s %s\t",
+ mode & ~NO_DELTA, type, sha1_to_hex(sha1));
+ quote_c_style(path, &line, NULL, 0);
+ strbuf_addch(&line, '\n');
+ }
+ cat_blob_write(line.buf, line.len);
+}
+
+static void parse_ls(struct branch *b)
+{
+ const char *p;
+ struct tree_entry *root = NULL;
+ struct tree_entry leaf = {NULL};
+
+ /* ls SP (<treeish> SP)? <path> */
+ p = command_buf.buf + strlen("ls ");
+ if (*p == '"') {
+ if (!b)
+ die("Not in a commit: %s", command_buf.buf);
+ root = &b->branch_tree;
+ } else {
+ struct object_entry *e = parse_treeish_dataref(&p);
+ root = new_tree_entry();
+ hashcpy(root->versions[1].sha1, e->idx.sha1);
+ load_tree(root);
+ if (*p++ != ' ')
+ die("Missing space after tree-ish: %s", command_buf.buf);
+ }
+ if (*p == '"') {
+ static struct strbuf uq = STRBUF_INIT;
+ const char *endp;
+ strbuf_reset(&uq);
+ if (unquote_c_style(&uq, p, &endp))
+ die("Invalid path: %s", command_buf.buf);
+ if (*endp)
+ die("Garbage after path in: %s", command_buf.buf);
+ p = uq.buf;
+ }
+ tree_content_get(root, p, &leaf);
+ /*
+ * A directory in preparation would have a sha1 of zero
+ * until it is saved. Save, for simplicity.
+ */
+ if (S_ISDIR(leaf.versions[1].mode))
+ store_tree(&leaf);
+
+ print_ls(leaf.versions[1].mode, leaf.versions[1].sha1, p);
+ if (!b || root != &b->branch_tree)
+ release_tree_entry(root);
+}
+
static void checkpoint(void)
{
checkpoint_requested = 0;
return strbuf_detach(&abs_path, NULL);
}
-static void option_import_marks(const char *marks, int from_stream)
+static void option_import_marks(const char *marks,
+ int from_stream, int ignore_missing)
{
if (import_marks_file) {
if (from_stream)
import_marks_file = make_fast_import_path(marks);
safe_create_leading_directories_const(import_marks_file);
import_marks_file_from_stream = from_stream;
+ import_marks_file_ignore_missing = ignore_missing;
}
static void option_date_format(const char *fmt)
if (!prefixcmp(feature, "date-format=")) {
option_date_format(feature + 12);
} else if (!prefixcmp(feature, "import-marks=")) {
- option_import_marks(feature + 13, from_stream);
+ option_import_marks(feature + 13, from_stream, 0);
+ } else if (!prefixcmp(feature, "import-marks-if-exists=")) {
+ option_import_marks(feature + strlen("import-marks-if-exists="),
+ from_stream, 1);
} else if (!prefixcmp(feature, "export-marks=")) {
option_export_marks(feature + 13);
} else if (!strcmp(feature, "cat-blob")) {
; /* Don't die - this feature is supported */
- } else if (!prefixcmp(feature, "relative-marks")) {
+ } else if (!strcmp(feature, "relative-marks")) {
relative_marks_paths = 1;
- } else if (!prefixcmp(feature, "no-relative-marks")) {
+ } else if (!strcmp(feature, "no-relative-marks")) {
relative_marks_paths = 0;
- } else if (!prefixcmp(feature, "force")) {
+ } else if (!strcmp(feature, "done")) {
+ require_explicit_termination = 1;
+ } else if (!strcmp(feature, "force")) {
force_update = 1;
- } else if (!strcmp(feature, "notes")) {
+ } else if (!strcmp(feature, "notes") || !strcmp(feature, "ls")) {
; /* do nothing; we have the feature */
} else {
return 0;
return 0;
}
if (!strcmp(k, "pack.indexversion")) {
- pack_idx_default_version = git_config_int(k, v);
- if (pack_idx_default_version > 2)
+ pack_idx_opts.version = git_config_int(k, v);
+ if (pack_idx_opts.version > 2)
die("bad pack.indexversion=%"PRIu32,
- pack_idx_default_version);
+ pack_idx_opts.version);
return 0;
}
if (!strcmp(k, "pack.packsizelimit")) {
max_packsize = git_config_ulong(k, v);
return 0;
}
- if (!strcmp(k, "core.bigfilethreshold")) {
- long n = git_config_int(k, v);
- big_file_threshold = 0 < n ? n : 0;
- }
return git_default_config(k, v, cb);
}
usage(fast_import_usage);
setup_git_directory();
+ reset_pack_idx_option(&pack_idx_opts);
git_config(git_pack_config, NULL);
if (!pack_compression_seen && core_compression_seen)
pack_compression_level = core_compression_level;
while (read_next_command() != EOF) {
if (!strcmp("blob", command_buf.buf))
parse_new_blob();
+ else if (!prefixcmp(command_buf.buf, "ls "))
+ parse_ls(NULL);
else if (!prefixcmp(command_buf.buf, "commit "))
parse_new_commit();
else if (!prefixcmp(command_buf.buf, "tag "))
parse_reset_branch();
else if (!strcmp("checkpoint", command_buf.buf))
parse_checkpoint();
+ else if (!strcmp("done", command_buf.buf))
+ break;
else if (!prefixcmp(command_buf.buf, "progress "))
parse_progress();
else if (!prefixcmp(command_buf.buf, "feature "))
if (!seen_data_command)
parse_argv();
+ if (require_explicit_termination && feof(stdin))
+ die("stream ends early");
+
end_packfile();
dump_branches();
fprintf(stderr, "---------------------------------------------------------------------\n");
fprintf(stderr, "Alloc'd objects: %10" PRIuMAX "\n", alloc_count);
fprintf(stderr, "Total objects: %10" PRIuMAX " (%10" PRIuMAX " duplicates )\n", total_count, duplicate_count);
- fprintf(stderr, " blobs : %10" PRIuMAX " (%10" PRIuMAX " duplicates %10" PRIuMAX " deltas)\n", object_count_by_type[OBJ_BLOB], duplicate_count_by_type[OBJ_BLOB], delta_count_by_type[OBJ_BLOB]);
- fprintf(stderr, " trees : %10" PRIuMAX " (%10" PRIuMAX " duplicates %10" PRIuMAX " deltas)\n", object_count_by_type[OBJ_TREE], duplicate_count_by_type[OBJ_TREE], delta_count_by_type[OBJ_TREE]);
- fprintf(stderr, " commits: %10" PRIuMAX " (%10" PRIuMAX " duplicates %10" PRIuMAX " deltas)\n", object_count_by_type[OBJ_COMMIT], duplicate_count_by_type[OBJ_COMMIT], delta_count_by_type[OBJ_COMMIT]);
- fprintf(stderr, " tags : %10" PRIuMAX " (%10" PRIuMAX " duplicates %10" PRIuMAX " deltas)\n", object_count_by_type[OBJ_TAG], duplicate_count_by_type[OBJ_TAG], delta_count_by_type[OBJ_TAG]);
+ fprintf(stderr, " blobs : %10" PRIuMAX " (%10" PRIuMAX " duplicates %10" PRIuMAX " deltas of %10" PRIuMAX" attempts)\n", object_count_by_type[OBJ_BLOB], duplicate_count_by_type[OBJ_BLOB], delta_count_by_type[OBJ_BLOB], delta_count_attempts_by_type[OBJ_BLOB]);
+ fprintf(stderr, " trees : %10" PRIuMAX " (%10" PRIuMAX " duplicates %10" PRIuMAX " deltas of %10" PRIuMAX" attempts)\n", object_count_by_type[OBJ_TREE], duplicate_count_by_type[OBJ_TREE], delta_count_by_type[OBJ_TREE], delta_count_attempts_by_type[OBJ_TREE]);
+ fprintf(stderr, " commits: %10" PRIuMAX " (%10" PRIuMAX " duplicates %10" PRIuMAX " deltas of %10" PRIuMAX" attempts)\n", object_count_by_type[OBJ_COMMIT], duplicate_count_by_type[OBJ_COMMIT], delta_count_by_type[OBJ_COMMIT], delta_count_attempts_by_type[OBJ_COMMIT]);
+ fprintf(stderr, " tags : %10" PRIuMAX " (%10" PRIuMAX " duplicates %10" PRIuMAX " deltas of %10" PRIuMAX" attempts)\n", object_count_by_type[OBJ_TAG], duplicate_count_by_type[OBJ_TAG], delta_count_by_type[OBJ_TAG], delta_count_attempts_by_type[OBJ_TAG]);
fprintf(stderr, "Total branches: %10lu (%10lu loads )\n", branch_count, branch_load_count);
fprintf(stderr, " marks: %10" PRIuMAX " (%10" PRIuMAX " unique )\n", (((uintmax_t)1) << marks->shift) * 1024, marks_set_count);
fprintf(stderr, " atoms: %10u\n", atom_cnt);