static unsigned long max_depth = 10;
static off_t max_packsize = (1LL << 32) - 1;
static int force_update;
+static int pack_compression_level = Z_DEFAULT_COMPRESSION;
+static int pack_compression_seen;
/* Stats and misc. counters */
static uintmax_t alloc_count;
fputc('\n', rpt);
}
+static void dump_marks_helper(FILE *, uintmax_t, struct mark_set *);
+
static void write_crash_report(const char *err)
{
char *loc = git_path("fast_import_crash_%d", getpid());
write_branch_report(rpt, b);
}
+ if (first_tag) {
+ struct tag *tg;
+ fputc('\n', rpt);
+ fputs("Annotated Tags\n", rpt);
+ fputs("--------------\n", rpt);
+ for (tg = first_tag; tg; tg = tg->next_tag) {
+ fputs(sha1_to_hex(tg->sha1), rpt);
+ fputc(' ', rpt);
+ fputs(tg->name, rpt);
+ fputc('\n', rpt);
+ }
+ }
+
+ fputc('\n', rpt);
+ fputs("Marks\n", rpt);
+ fputs("-----\n", rpt);
+ if (mark_file)
+ fprintf(rpt, " exported to %s\n", mark_file);
+ else
+ dump_marks_helper(rpt, 0, marks);
+
fputc('\n', rpt);
fputs("-------------------\n", rpt);
fputs("END OF CRASH REPORT\n", rpt);
fclose(rpt);
}
+static void end_packfile(void);
+static void unkeep_all_packs(void);
+static void dump_marks(void);
+
static NORETURN void die_nicely(const char *err, va_list params)
{
static int zombie;
if (!zombie) {
zombie = 1;
write_crash_report(message);
+ end_packfile();
+ unkeep_all_packs();
+ dump_marks();
}
exit(128);
}
if (b)
die("Invalid attempt to create duplicate branch: %s", name);
switch (check_ref_format(name)) {
- case 0: break; /* its valid */
- case -2: break; /* valid, but too few '/', allow anyway */
+ case 0: break; /* its valid */
+ case CHECK_REF_FORMAT_ONELEVEL:
+ break; /* valid, but too few '/', allow anyway */
default:
die("Branch name doesn't conform to GIT standards: %s", name);
}
SHA1_Update(&ctx, (*c)->sha1, 20);
}
sha1write(f, pack_data->sha1, sizeof(pack_data->sha1));
- sha1close(f, NULL, 1);
+ sha1close(f, NULL, CSUM_FSYNC);
free(idx);
SHA1_Final(pack_data->sha1, &ctx);
return tmpfile;
keep_fd = open(name, O_RDWR|O_CREAT|O_EXCL, 0600);
if (keep_fd < 0)
die("cannot create keep file");
- write(keep_fd, keep_msg, strlen(keep_msg));
- close(keep_fd);
+ write_or_die(keep_fd, keep_msg, strlen(keep_msg));
+ if (close(keep_fd))
+ die("failed to write keep file");
snprintf(name, sizeof(name), "%s/pack/pack-%s.pack",
get_object_directory(), sha1_to_hex(pack_data->sha1));
struct branch *b;
struct tag *t;
+ close_pack_windows(pack_data);
fixup_pack_header_footer(pack_data->pack_fd, pack_data->sha1,
pack_data->pack_name, object_count);
close(pack_data->pack_fd);
new_p = add_packed_git(idx_name, strlen(idx_name), 1);
if (!new_p)
die("core git rejected index %s", idx_name);
- new_p->windows = old_p->windows;
all_packs[pack_id] = new_p;
install_packed_git(new_p);
delta = NULL;
memset(&s, 0, sizeof(s));
- deflateInit(&s, zlib_compression_level);
+ deflateInit(&s, pack_compression_level);
if (delta) {
s.next_in = delta;
s.avail_in = deltalen;
delta = NULL;
memset(&s, 0, sizeof(s));
- deflateInit(&s, zlib_compression_level);
+ deflateInit(&s, pack_compression_level);
s.next_in = (void *)dat->buf;
s.avail_in = dat->len;
s.avail_out = deflateBound(&s, s.avail_in);
return 0;
}
+/* All calls must be guarded by find_object() or find_mark() to
+ * ensure the 'struct object_entry' passed was written by this
+ * process instance. We unpack the entry by the offset, avoiding
+ * the need for the corresponding .idx file. This unpacking rule
+ * works because we only use OBJ_REF_DELTA within the packfiles
+ * created by fast-import.
+ *
+ * oe must not be NULL. Such an oe usually comes from giving
+ * an unknown SHA-1 to find_object() or an undefined mark to
+ * find_mark(). Callers must test for this condition and use
+ * the standard read_sha1_file() when it happens.
+ *
+ * oe->pack_id must not be MAX_PACK_ID. Such an oe is usually from
+ * find_mark(), where the mark was reloaded from an existing marks
+ * file and is referencing an object that this fast-import process
+ * instance did not write out to a packfile. Callers must test for
+ * this condition and use read_sha1_file() instead.
+ */
static void *gfi_unpack_entry(
struct object_entry *oe,
unsigned long *sizep)
{
enum object_type type;
struct packed_git *p = all_packs[oe->pack_id];
- if (p == pack_data)
+ if (p == pack_data && p->pack_size < (pack_size + 20)) {
+ /* The object is stored in the packfile we are writing to
+ * and we have modified it since the last time we scanned
+ * back to read a previously written object. If an old
+ * window covered [p->pack_size, p->pack_size + 20) its
+ * data is stale and is not valid. Closing all windows
+ * and updating the packfile length ensures we can read
+ * the newly written data.
+ */
+ close_pack_windows(p);
+
+ /* We have to offer 20 bytes additional on the end of
+ * the packfile as the core unpacker code assumes the
+ * footer is present at the file end and must promise
+ * at least 20 bytes within any window it maps. But
+ * we don't actually create the footer here.
+ */
p->pack_size = pack_size + 20;
+ }
return unpack_entry(p, oe->offset, &type, sizep);
}
die("Not a tree: %s", sha1_to_hex(sha1));
t->delta_depth = myoe->depth;
buf = gfi_unpack_entry(myoe, &size);
+ if (!buf)
+ die("Can't load tree %s", sha1_to_hex(sha1));
} else {
enum object_type type;
buf = read_sha1_file(sha1, &type, &size);
struct ref_lock *lock;
unsigned char old_sha1[20];
+ if (is_null_sha1(b->sha1))
+ return 0;
if (read_ref(b->name, old_sha1))
hashclr(old_sha1);
lock = lock_any_ref_for_update(b->name, old_sha1, 0);
f = fdopen(mark_fd, "w");
if (!f) {
+ int saved_errno = errno;
rollback_lock_file(&mark_lock);
failure |= error("Unable to write marks file %s: %s",
- mark_file, strerror(errno));
+ mark_file, strerror(saved_errno));
return;
}
+ /*
+ * Since the lock file was fdopen()'ed, it should not be close()'ed.
+ * Assign -1 to the lock file descriptor so that commit_lock_file()
+ * won't try to close() it.
+ */
+ mark_lock.fd = -1;
+
dump_marks_helper(f, 0, marks);
- fclose(f);
- if (commit_lock_file(&mark_lock))
+ if (ferror(f) || fclose(f)) {
+ int saved_errno = errno;
+ rollback_lock_file(&mark_lock);
failure |= error("Unable to write marks file %s: %s",
- mark_file, strerror(errno));
+ mark_file, strerror(saved_errno));
+ return;
+ }
+
+ if (commit_lock_file(&mark_lock)) {
+ int saved_errno = errno;
+ rollback_lock_file(&mark_lock);
+ failure |= error("Unable to commit marks file %s: %s",
+ mark_file, strerror(saved_errno));
+ return;
+ }
}
static int read_next_command(void)
ungetc(term_char, stdin);
}
-static void cmd_mark(void)
+static void parse_mark(void)
{
if (!prefixcmp(command_buf.buf, "mark :")) {
next_mark = strtoumax(command_buf.buf + 6, NULL, 10);
next_mark = 0;
}
-static void cmd_data(struct strbuf *sb)
+static void parse_data(struct strbuf *sb)
{
strbuf_reset(sb);
return ident;
}
-static void cmd_new_blob(void)
+static void parse_new_blob(void)
{
static struct strbuf buf = STRBUF_INIT;
read_next_command();
- cmd_mark();
- cmd_data(&buf);
+ parse_mark();
+ parse_data(&buf);
store_object(OBJ_BLOB, &buf, &last_blob, NULL, next_mark);
}
p = uq.buf;
}
read_next_command();
- cmd_data(&buf);
+ parse_data(&buf);
store_object(OBJ_BLOB, &buf, &last_blob, sha1, 0);
} else if (oe) {
if (oe->type != OBJ_BLOB)
load_tree(&b->branch_tree);
}
-static void cmd_from_commit(struct branch *b, char *buf, unsigned long size)
+static void parse_from_commit(struct branch *b, char *buf, unsigned long size)
{
if (!buf || size < 46)
die("Not a valid commit: %s", sha1_to_hex(b->sha1));
b->branch_tree.versions[1].sha1);
}
-static void cmd_from_existing(struct branch *b)
+static void parse_from_existing(struct branch *b)
{
if (is_null_sha1(b->sha1)) {
hashclr(b->branch_tree.versions[0].sha1);
buf = read_object_with_reference(b->sha1,
commit_type, &size, b->sha1);
- cmd_from_commit(b, buf, size);
+ parse_from_commit(b, buf, size);
free(buf);
}
}
-static int cmd_from(struct branch *b)
+static int parse_from(struct branch *b)
{
const char *from;
struct branch *s;
if (oe->pack_id != MAX_PACK_ID) {
unsigned long size;
char *buf = gfi_unpack_entry(oe, &size);
- cmd_from_commit(b, buf, size);
+ parse_from_commit(b, buf, size);
free(buf);
} else
- cmd_from_existing(b);
+ parse_from_existing(b);
} else if (!get_sha1(from, b->sha1))
- cmd_from_existing(b);
+ parse_from_existing(b);
else
die("Invalid ref name or SHA1 expression: %s", from);
return 1;
}
-static struct hash_list *cmd_merge(unsigned int *count)
+static struct hash_list *parse_merge(unsigned int *count)
{
struct hash_list *list = NULL, *n, *e = e;
const char *from;
return list;
}
-static void cmd_new_commit(void)
+static void parse_new_commit(void)
{
static struct strbuf msg = STRBUF_INIT;
struct branch *b;
b = new_branch(sp);
read_next_command();
- cmd_mark();
+ parse_mark();
if (!prefixcmp(command_buf.buf, "author ")) {
author = parse_ident(command_buf.buf + 7);
read_next_command();
}
if (!committer)
die("Expected committer but didn't get one");
- cmd_data(&msg);
+ parse_data(&msg);
read_next_command();
- cmd_from(b);
- merge_list = cmd_merge(&merge_count);
+ parse_from(b);
+ merge_list = parse_merge(&merge_count);
/* ensure the branch is active/loaded */
if (!b->branch_tree.tree || !max_active_branches) {
b->last_commit = object_count_by_type[OBJ_COMMIT];
}
-static void cmd_new_tag(void)
+static void parse_new_tag(void)
{
static struct strbuf msg = STRBUF_INIT;
char *sp;
/* tag payload/message */
read_next_command();
- cmd_data(&msg);
+ parse_data(&msg);
/* build the tag object */
strbuf_reset(&new_data);
t->pack_id = pack_id;
}
-static void cmd_reset_branch(void)
+static void parse_reset_branch(void)
{
struct branch *b;
char *sp;
else
b = new_branch(sp);
read_next_command();
- if (!cmd_from(b) && command_buf.len > 0)
+ parse_from(b);
+ if (command_buf.len > 0)
unread_command_buf = 1;
}
-static void cmd_checkpoint(void)
+static void parse_checkpoint(void)
{
if (object_count) {
cycle_packfile();
skip_optional_lf();
}
-static void cmd_progress(void)
+static void parse_progress(void)
{
fwrite(command_buf.buf, 1, command_buf.len, stdout);
fputc('\n', stdout);
fclose(f);
}
+static int git_pack_config(const char *k, const char *v, void *cb)
+{
+ if (!strcmp(k, "pack.depth")) {
+ max_depth = git_config_int(k, v);
+ if (max_depth > MAX_DEPTH)
+ max_depth = MAX_DEPTH;
+ return 0;
+ }
+ if (!strcmp(k, "pack.compression")) {
+ int level = git_config_int(k, v);
+ if (level == -1)
+ level = Z_DEFAULT_COMPRESSION;
+ else if (level < 0 || level > Z_BEST_COMPRESSION)
+ die("bad pack compression level %d", level);
+ pack_compression_level = level;
+ pack_compression_seen = 1;
+ return 0;
+ }
+ return git_default_config(k, v, cb);
+}
+
static const char fast_import_usage[] =
"git-fast-import [--date-format=f] [--max-pack-size=n] [--depth=n] [--active-branches=n] [--export-marks=marks.file]";
{
unsigned int i, show_stats = 1;
- git_config(git_default_config);
+ setup_git_directory();
+ git_config(git_pack_config, NULL);
+ if (!pack_compression_seen && core_compression_seen)
+ pack_compression_level = core_compression_level;
+
alloc_objects(object_entry_alloc);
strbuf_init(&command_buf, 0);
atom_table = xcalloc(atom_table_sz, sizeof(struct atom_str*));
set_die_routine(die_nicely);
while (read_next_command() != EOF) {
if (!strcmp("blob", command_buf.buf))
- cmd_new_blob();
+ parse_new_blob();
else if (!prefixcmp(command_buf.buf, "commit "))
- cmd_new_commit();
+ parse_new_commit();
else if (!prefixcmp(command_buf.buf, "tag "))
- cmd_new_tag();
+ parse_new_tag();
else if (!prefixcmp(command_buf.buf, "reset "))
- cmd_reset_branch();
+ parse_reset_branch();
else if (!strcmp("checkpoint", command_buf.buf))
- cmd_checkpoint();
+ parse_checkpoint();
else if (!prefixcmp(command_buf.buf, "progress "))
- cmd_progress();
+ parse_progress();
else
die("Unsupported command: %s", command_buf.buf);
}