From: Junio C Hamano Date: Fri, 16 Mar 2012 15:19:18 +0000 (-0700) Subject: Merge "two fixes for fast-import's 'ls' command" from Jonathan X-Git-Tag: v1.7.10-rc2~10 X-Git-Url: https://git.lorimer.id.au/gitweb.git/diff_plain/5087aace2d5467046c7204e038009b30fdebbca6?ds=inline;hp=-c Merge "two fixes for fast-import's 'ls' command" from Jonathan Andrew Sayers noticed that the svn-fe | git fast-import pipeline mishandles a subversion history that copies the root directory to a sub-directory (e.g. doing `svn cp . trunk` to standardise your layout). As David Barr explained, the bug arises when the following command is sent to git fast-import: 'ls' SP ':1' SP LF Instead of reading back what is at the root of r1, it unconditionally reports the path as missing. After sleeping on it, here are two patches for 'maint'. One plugs a memory leak. The other ensures that trying to pass an empty path to the 'ls' command results in an error message that can help the frontend author instead of the silently broken conversion Andrew found. Then we can carefully add 'ls ""' support in 1.7.11. * commit 'refs/pull-request-tags/jn/maint-fast-import-empty-ls': fast-import: don't allow 'ls' of path with empty components fast-import: leakfix for 'ls' of dirty trees --- 5087aace2d5467046c7204e038009b30fdebbca6 diff --combined fast-import.c index c1486cabba,47f61f3cba..a85275dc68 --- a/fast-import.c +++ b/fast-import.c @@@ -170,12 -170,8 +170,12 @@@ Format of STDIN stream #define DEPTH_BITS 13 #define MAX_DEPTH ((1<name = pool_strdup(name); @@@ -855,17 -867,16 +855,17 @@@ static struct tree_content *dup_tree_co static void start_packfile(void) { - static char tmpfile[PATH_MAX]; + static char tmp_file[PATH_MAX]; struct packed_git *p; struct pack_header hdr; int pack_fd; - pack_fd = odb_mkstemp(tmpfile, sizeof(tmpfile), + pack_fd = odb_mkstemp(tmp_file, sizeof(tmp_file), "pack/tmp_pack_XXXXXX"); - p = xcalloc(1, sizeof(*p) + strlen(tmpfile) + 2); - strcpy(p->pack_name, tmpfile); + p = xcalloc(1, sizeof(*p) + strlen(tmp_file) + 2); + strcpy(p->pack_name, tmp_file); p->pack_fd = pack_fd; + p->do_not_close = 1; pack_file = sha1fd(pack_fd, p->pack_name); hdr.hdr_signature = htonl(PACK_SIGNATURE); @@@ -899,7 -910,7 +899,7 @@@ static const char *create_index(void if (c != last) die("internal consistency error creating the index"); - tmpfile = write_idx_file(NULL, idx, object_count, pack_data->sha1); + tmpfile = write_idx_file(NULL, idx, object_count, &pack_idx_opts, pack_data->sha1); free(idx); return tmpfile; } @@@ -1020,7 -1031,7 +1020,7 @@@ static int store_object unsigned char sha1[20]; unsigned long hdrlen, deltalen; git_SHA_CTX c; - z_stream s; + git_zstream s; hdrlen = sprintf((char *)hdr,"%s %lu", typename(type), (unsigned long)dat->len) + 1; @@@ -1046,7 -1057,6 +1046,7 @@@ } if (last && last->data.buf && last->depth < max_depth && dat->len > 20) { + delta_count_attempts_by_type[type]++; delta = diff_delta(last->data.buf, last->data.len, dat->buf, dat->len, &deltalen, dat->len - 20); @@@ -1054,7 -1064,7 +1054,7 @@@ delta = NULL; memset(&s, 0, sizeof(s)); - deflateInit(&s, pack_compression_level); + git_deflate_init(&s, pack_compression_level); if (delta) { s.next_in = delta; s.avail_in = deltalen; @@@ -1062,11 -1072,11 +1062,11 @@@ s.next_in = (void *)dat->buf; s.avail_in = dat->len; } - s.avail_out = deflateBound(&s, s.avail_in); + s.avail_out = git_deflate_bound(&s, s.avail_in); s.next_out = out = xmalloc(s.avail_out); - while (deflate(&s, Z_FINISH) == Z_OK) - /* nothing */; - deflateEnd(&s); + while (git_deflate(&s, Z_FINISH) == Z_OK) + ; /* nothing */ + git_deflate_end(&s); /* Determine if we should auto-checkpoint. */ if ((max_packsize && (pack_size + 60 + s.total_out) > max_packsize) @@@ -1082,14 -1092,14 +1082,14 @@@ delta = NULL; memset(&s, 0, sizeof(s)); - deflateInit(&s, pack_compression_level); + git_deflate_init(&s, pack_compression_level); s.next_in = (void *)dat->buf; s.avail_in = dat->len; - s.avail_out = deflateBound(&s, s.avail_in); + s.avail_out = git_deflate_bound(&s, s.avail_in); s.next_out = out = xrealloc(out, s.avail_out); - while (deflate(&s, Z_FINISH) == Z_OK) - /* nothing */; - deflateEnd(&s); + while (git_deflate(&s, Z_FINISH) == Z_OK) + ; /* nothing */ + git_deflate_end(&s); } } @@@ -1143,11 -1153,17 +1143,11 @@@ return 0; } -static void truncate_pack(off_t to, git_SHA_CTX *ctx) +static void truncate_pack(struct sha1file_checkpoint *checkpoint) { - if (ftruncate(pack_data->pack_fd, to) - || lseek(pack_data->pack_fd, to, SEEK_SET) != to) + if (sha1file_truncate(pack_file, checkpoint)) die_errno("cannot truncate pack to skip duplicate"); - pack_size = to; - - /* yes this is a layering violation */ - pack_file->total = to; - pack_file->offset = 0; - pack_file->ctx = *ctx; + pack_size = checkpoint->offset; } static void stream_blob(uintmax_t len, unsigned char *sha1out, uintmax_t mark) @@@ -1160,8 -1176,8 +1160,8 @@@ unsigned long hdrlen; off_t offset; git_SHA_CTX c; - git_SHA_CTX pack_file_ctx; - z_stream s; + git_zstream s; + struct sha1file_checkpoint checkpoint; int status = Z_OK; /* Determine if we should auto-checkpoint. */ @@@ -1169,8 -1185,11 +1169,8 @@@ || (pack_size + 60 + len) < pack_size) cycle_packfile(); - offset = pack_size; - - /* preserve the pack_file SHA1 ctx in case we have to truncate later */ - sha1flush(pack_file); - pack_file_ctx = pack_file->ctx; + sha1file_checkpoint(pack_file, &checkpoint); + offset = checkpoint.offset; hdrlen = snprintf((char *)out_buf, out_sz, "blob %" PRIuMAX, len) + 1; if (out_sz <= hdrlen) @@@ -1182,7 -1201,7 +1182,7 @@@ crc32_begin(pack_file); memset(&s, 0, sizeof(s)); - deflateInit(&s, pack_compression_level); + git_deflate_init(&s, pack_compression_level); hdrlen = encode_in_pack_object_header(OBJ_BLOB, len, out_buf); if (out_sz <= hdrlen) @@@ -1204,7 -1223,7 +1204,7 @@@ len -= n; } - status = deflate(&s, len ? 0 : Z_FINISH); + status = git_deflate(&s, len ? 0 : Z_FINISH); if (!s.avail_out || status == Z_STREAM_END) { size_t n = s.next_out - out_buf; @@@ -1223,7 -1242,7 +1223,7 @@@ die("unexpected deflate failure: %d", status); } } - deflateEnd(&s); + git_deflate_end(&s); git_SHA1_Final(sha1, &c); if (sha1out) @@@ -1236,14 -1255,14 +1236,14 @@@ if (e->idx.offset) { duplicate_count_by_type[OBJ_BLOB]++; - truncate_pack(offset, &pack_file_ctx); + truncate_pack(&checkpoint); } else if (find_sha1_pack(sha1, packed_git)) { e->type = OBJ_BLOB; e->pack_id = MAX_PACK_ID; e->idx.offset = 1; /* just not zero! */ duplicate_count_by_type[OBJ_BLOB]++; - truncate_pack(offset, &pack_file_ctx); + truncate_pack(&checkpoint); } else { e->depth = 0; @@@ -1409,9 -1428,8 +1409,9 @@@ static void mktree(struct tree_content struct tree_entry *e = t->entries[i]; if (!e->versions[v].mode) continue; - strbuf_addf(b, "%o %s%c", (unsigned int)e->versions[v].mode, - e->name->str_dat, '\0'); + strbuf_addf(b, "%o %s%c", + (unsigned int)(e->versions[v].mode & ~NO_DELTA), + e->name->str_dat, '\0'); strbuf_add(b, e->versions[v].sha1, 20); } } @@@ -1421,7 -1439,7 +1421,7 @@@ static void store_tree(struct tree_entr struct tree_content *t = root->tree; unsigned int i, j, del; struct last_object lo = { STRBUF_INIT, 0, 0, /* no_swap */ 1 }; - struct object_entry *le; + struct object_entry *le = NULL; if (!is_null_sha1(root->versions[1].sha1)) return; @@@ -1431,8 -1449,7 +1431,8 @@@ store_tree(t->entries[i]); } - le = find_object(root->versions[0].sha1); + if (!(root->versions[0].mode & NO_DELTA)) + le = find_object(root->versions[0].sha1); if (S_ISDIR(root->versions[0].mode) && le && le->pack_id == pack_id) { mktree(t, 0, &old_tree); lo.data = old_tree; @@@ -1466,7 -1483,6 +1466,7 @@@ static void tree_content_replace { if (!S_ISDIR(mode)) die("Root cannot be a non-directory"); + hashclr(root->versions[0].sha1); hashcpy(root->versions[1].sha1, sha1); if (root->tree) release_tree_content_recursive(root->tree); @@@ -1511,23 -1527,6 +1511,23 @@@ static int tree_content_set if (e->tree) release_tree_content_recursive(e->tree); e->tree = subtree; + + /* + * We need to leave e->versions[0].sha1 alone + * to avoid modifying the preimage tree used + * when writing out the parent directory. + * But after replacing the subdir with a + * completely different one, it's not a good + * delta base any more, and besides, we've + * thrown away the tree entries needed to + * make a delta against it. + * + * So let's just explicitly disable deltas + * for the subtree. + */ + if (S_ISDIR(e->versions[0].mode)) + e->versions[0].mode |= NO_DELTA; + hashclr(root->versions[1].sha1); return 1; } @@@ -1641,6 -1640,8 +1641,8 @@@ static int tree_content_get n = slash1 - p; else n = strlen(p); + if (!n) + die("Empty path component found in input"); if (!root->tree) load_tree(root); @@@ -1982,41 -1983,32 +1984,41 @@@ static int validate_raw_date(const cha static char *parse_ident(const char *buf) { - const char *gt; + const char *ltgt; size_t name_len; char *ident; - gt = strrchr(buf, '>'); - if (!gt) + /* ensure there is a space delimiter even if there is no name */ + if (*buf == '<') + --buf; + + ltgt = buf + strcspn(buf, "<>"); + if (*ltgt != '<') + die("Missing < in ident string: %s", buf); + if (ltgt != buf && ltgt[-1] != ' ') + die("Missing space before < in ident string: %s", buf); + ltgt = ltgt + 1 + strcspn(ltgt + 1, "<>"); + if (*ltgt != '>') die("Missing > in ident string: %s", buf); - gt++; - if (*gt != ' ') + ltgt++; + if (*ltgt != ' ') die("Missing space after > in ident string: %s", buf); - gt++; - name_len = gt - buf; + ltgt++; + name_len = ltgt - buf; ident = xmalloc(name_len + 24); strncpy(ident, buf, name_len); switch (whenspec) { case WHENSPEC_RAW: - if (validate_raw_date(gt, ident + name_len, 24) < 0) - die("Invalid raw date \"%s\" in ident: %s", gt, buf); + if (validate_raw_date(ltgt, ident + name_len, 24) < 0) + die("Invalid raw date \"%s\" in ident: %s", ltgt, buf); break; case WHENSPEC_RFC2822: - if (parse_date(gt, ident + name_len, 24) < 0) - die("Invalid rfc2822 date \"%s\" in ident: %s", gt, buf); + if (parse_date(ltgt, ident + name_len, 24) < 0) + die("Invalid rfc2822 date \"%s\" in ident: %s", ltgt, buf); break; case WHENSPEC_NOW: - if (strcmp("now", gt)) + if (strcmp("now", ltgt)) die("Date in ident must be 'now': %s", buf); datestamp(ident + name_len, 24); break; @@@ -2164,11 -2156,6 +2166,11 @@@ static uintmax_t do_change_note_fanout if (tmp_hex_sha1_len == 40 && !get_sha1_hex(hex_sha1, sha1)) { /* This is a note entry */ + if (fanout == 0xff) { + /* Counting mode, no rename */ + num_notes++; + continue; + } construct_path_with_fanout(hex_sha1, fanout, realpath); if (!strcmp(fullpath, realpath)) { /* Note entry is in correct location */ @@@ -2375,7 -2362,7 +2377,7 @@@ static void file_change_cr(struct branc leaf.tree); } -static void note_change_n(struct branch *b, unsigned char old_fanout) +static void note_change_n(struct branch *b, unsigned char *old_fanout) { const char *p = command_buf.buf + 2; static struct strbuf uq = STRBUF_INIT; @@@ -2386,23 -2373,6 +2388,23 @@@ uint16_t inline_data = 0; unsigned char new_fanout; + /* + * When loading a branch, we don't traverse its tree to count the real + * number of notes (too expensive to do this for all non-note refs). + * This means that recently loaded notes refs might incorrectly have + * b->num_notes == 0, and consequently, old_fanout might be wrong. + * + * Fix this by traversing the tree and counting the number of notes + * when b->num_notes == 0. If the notes tree is truly empty, the + * calculation should not take long. + */ + if (b->num_notes == 0 && *old_fanout == 0) { + /* Invoke change_note_fanout() in "counting mode". */ + b->num_notes = change_note_fanout(&b->branch_tree, 0xff); + *old_fanout = convert_num_notes_to_fanout(b->num_notes); + } + + /* Now parse the notemodify command. */ /* or 'inline' */ if (*p == ':') { char *x; @@@ -2424,8 -2394,6 +2426,8 @@@ /* */ s = lookup_branch(p); if (s) { + if (is_null_sha1(s->sha1)) + die("Can't add a note on empty branch."); hashcpy(commit_sha1, s->sha1); } else if (*p == ':') { uintmax_t commit_mark = strtoumax(p + 1, NULL, 10); @@@ -2463,7 -2431,7 +2465,7 @@@ typename(type), command_buf.buf); } - construct_path_with_fanout(sha1_to_hex(commit_sha1), old_fanout, path); + construct_path_with_fanout(sha1_to_hex(commit_sha1), *old_fanout, path); if (tree_content_remove(&b->branch_tree, path, NULL)) b->num_notes--; @@@ -2650,7 -2618,7 +2652,7 @@@ static void parse_new_commit(void else if (!prefixcmp(command_buf.buf, "C ")) file_change_cr(b, 0); else if (!prefixcmp(command_buf.buf, "N ")) - note_change_n(b, prev_fanout); + note_change_n(b, &prev_fanout); else if (!strcmp("deleteall", command_buf.buf)) file_change_deleteall(b); else if (!prefixcmp(command_buf.buf, "ls ")) @@@ -2712,7 -2680,7 +2714,7 @@@ static void parse_new_tag(void /* Obtain the new tag name from the rest of our command */ sp = strchr(command_buf.buf, ' ') + 1; t = pool_alloc(sizeof(struct tag)); - t->next_tag = NULL; + memset(t, 0, sizeof(struct tag)); t->name = pool_strdup(sp); if (last_tag) last_tag->next_tag = t; @@@ -2727,8 -2695,6 +2729,8 @@@ from = strchr(command_buf.buf, ' ') + 1; s = lookup_branch(from); if (s) { + if (is_null_sha1(s->sha1)) + die("Can't tag an empty branch."); hashcpy(sha1, s->sha1); type = OBJ_COMMIT; } else if (*from == ':') { @@@ -2738,13 -2704,13 +2740,13 @@@ type = oe->type; hashcpy(sha1, oe->idx.sha1); } else if (!get_sha1(from, sha1)) { - unsigned long size; - char *buf; - - buf = read_sha1_file(sha1, &type, &size); - if (!buf || size < 46) - die("Not a valid commit: %s", from); - free(buf); + struct object_entry *oe = find_object(sha1); + if (!oe) { + type = sha1_object_info(sha1, NULL); + if (type < 0) + die("Not a valid object: %s", from); + } else + type = oe->type; } else die("Invalid ref name or SHA1 expression: %s", from); read_next_command(); @@@ -2848,12 -2814,7 +2850,12 @@@ static void cat_blob(struct object_entr strbuf_release(&line); cat_blob_write(buf, size); cat_blob_write("\n", 1); - free(buf); + if (oe && oe->pack_id == pack_id) { + last_blob.offset = oe->idx.offset; + strbuf_attach(&last_blob.data, buf, size, size); + last_blob.depth = oe->depth; + } else + free(buf); } static void parse_cat_blob(void) @@@ -2889,7 -2850,7 +2891,7 @@@ static struct object_entry *dereference unsigned char sha1[20]) { unsigned long size; - void *buf = NULL; + char *buf = NULL; if (!oe) { enum object_type type = sha1_object_info(sha1, NULL); if (type < 0) @@@ -2982,7 -2943,7 +2984,7 @@@ static void print_ls(int mode, const un /* mode SP type SP object_name TAB path LF */ strbuf_reset(&line); strbuf_addf(&line, "%06o %s %s\t", - mode, type, sha1_to_hex(sha1)); + mode & ~NO_DELTA, type, sha1_to_hex(sha1)); quote_c_style(path, &line, NULL, 0); strbuf_addch(&line, '\n'); } @@@ -2993,7 -2954,7 +2995,7 @@@ static void parse_ls(struct branch *b { const char *p; struct tree_entry *root = NULL; - struct tree_entry leaf = {0}; + struct tree_entry leaf = {NULL}; /* ls SP ( SP)? */ p = command_buf.buf + strlen("ls "); @@@ -3028,6 -2989,8 +3030,8 @@@ store_tree(&leaf); print_ls(leaf.versions[1].mode, leaf.versions[1].sha1, p); + if (leaf.tree) + release_tree_content_recursive(leaf.tree); if (!b || root != &b->branch_tree) release_tree_entry(root); } @@@ -3190,13 -3153,11 +3194,13 @@@ static int parse_one_feature(const cha option_export_marks(feature + 13); } else if (!strcmp(feature, "cat-blob")) { ; /* Don't die - this feature is supported */ - } else if (!prefixcmp(feature, "relative-marks")) { + } else if (!strcmp(feature, "relative-marks")) { relative_marks_paths = 1; - } else if (!prefixcmp(feature, "no-relative-marks")) { + } else if (!strcmp(feature, "no-relative-marks")) { relative_marks_paths = 0; - } else if (!prefixcmp(feature, "force")) { + } else if (!strcmp(feature, "done")) { + require_explicit_termination = 1; + } else if (!strcmp(feature, "force")) { force_update = 1; } else if (!strcmp(feature, "notes") || !strcmp(feature, "ls")) { ; /* do nothing; we have the feature */ @@@ -3252,16 -3213,20 +3256,16 @@@ static int git_pack_config(const char * return 0; } if (!strcmp(k, "pack.indexversion")) { - pack_idx_default_version = git_config_int(k, v); - if (pack_idx_default_version > 2) + pack_idx_opts.version = git_config_int(k, v); + if (pack_idx_opts.version > 2) die("bad pack.indexversion=%"PRIu32, - pack_idx_default_version); + pack_idx_opts.version); return 0; } if (!strcmp(k, "pack.packsizelimit")) { max_packsize = git_config_ulong(k, v); return 0; } - if (!strcmp(k, "core.bigfilethreshold")) { - long n = git_config_int(k, v); - big_file_threshold = 0 < n ? n : 0; - } return git_default_config(k, v, cb); } @@@ -3305,13 -3270,10 +3309,13 @@@ int main(int argc, const char **argv git_extract_argv0_path(argv[0]); + git_setup_gettext(); + if (argc == 2 && !strcmp(argv[1], "-h")) usage(fast_import_usage); setup_git_directory(); + reset_pack_idx_option(&pack_idx_opts); git_config(git_pack_config, NULL); if (!pack_compression_seen && core_compression_seen) pack_compression_level = core_compression_level; @@@ -3348,8 -3310,6 +3352,8 @@@ parse_reset_branch(); else if (!strcmp("checkpoint", command_buf.buf)) parse_checkpoint(); + else if (!strcmp("done", command_buf.buf)) + break; else if (!prefixcmp(command_buf.buf, "progress ")) parse_progress(); else if (!prefixcmp(command_buf.buf, "feature ")) @@@ -3369,9 -3329,6 +3373,9 @@@ if (!seen_data_command) parse_argv(); + if (require_explicit_termination && feof(stdin)) + die("stream ends early"); + end_packfile(); dump_branches(); @@@ -3393,10 -3350,10 +3397,10 @@@ fprintf(stderr, "---------------------------------------------------------------------\n"); fprintf(stderr, "Alloc'd objects: %10" PRIuMAX "\n", alloc_count); fprintf(stderr, "Total objects: %10" PRIuMAX " (%10" PRIuMAX " duplicates )\n", total_count, duplicate_count); - fprintf(stderr, " blobs : %10" PRIuMAX " (%10" PRIuMAX " duplicates %10" PRIuMAX " deltas)\n", object_count_by_type[OBJ_BLOB], duplicate_count_by_type[OBJ_BLOB], delta_count_by_type[OBJ_BLOB]); - fprintf(stderr, " trees : %10" PRIuMAX " (%10" PRIuMAX " duplicates %10" PRIuMAX " deltas)\n", object_count_by_type[OBJ_TREE], duplicate_count_by_type[OBJ_TREE], delta_count_by_type[OBJ_TREE]); - fprintf(stderr, " commits: %10" PRIuMAX " (%10" PRIuMAX " duplicates %10" PRIuMAX " deltas)\n", object_count_by_type[OBJ_COMMIT], duplicate_count_by_type[OBJ_COMMIT], delta_count_by_type[OBJ_COMMIT]); - fprintf(stderr, " tags : %10" PRIuMAX " (%10" PRIuMAX " duplicates %10" PRIuMAX " deltas)\n", object_count_by_type[OBJ_TAG], duplicate_count_by_type[OBJ_TAG], delta_count_by_type[OBJ_TAG]); + fprintf(stderr, " blobs : %10" PRIuMAX " (%10" PRIuMAX " duplicates %10" PRIuMAX " deltas of %10" PRIuMAX" attempts)\n", object_count_by_type[OBJ_BLOB], duplicate_count_by_type[OBJ_BLOB], delta_count_by_type[OBJ_BLOB], delta_count_attempts_by_type[OBJ_BLOB]); + fprintf(stderr, " trees : %10" PRIuMAX " (%10" PRIuMAX " duplicates %10" PRIuMAX " deltas of %10" PRIuMAX" attempts)\n", object_count_by_type[OBJ_TREE], duplicate_count_by_type[OBJ_TREE], delta_count_by_type[OBJ_TREE], delta_count_attempts_by_type[OBJ_TREE]); + fprintf(stderr, " commits: %10" PRIuMAX " (%10" PRIuMAX " duplicates %10" PRIuMAX " deltas of %10" PRIuMAX" attempts)\n", object_count_by_type[OBJ_COMMIT], duplicate_count_by_type[OBJ_COMMIT], delta_count_by_type[OBJ_COMMIT], delta_count_attempts_by_type[OBJ_COMMIT]); + fprintf(stderr, " tags : %10" PRIuMAX " (%10" PRIuMAX " duplicates %10" PRIuMAX " deltas of %10" PRIuMAX" attempts)\n", object_count_by_type[OBJ_TAG], duplicate_count_by_type[OBJ_TAG], delta_count_by_type[OBJ_TAG], delta_count_attempts_by_type[OBJ_TAG]); fprintf(stderr, "Total branches: %10lu (%10lu loads )\n", branch_count, branch_load_count); fprintf(stderr, " marks: %10" PRIuMAX " (%10" PRIuMAX " unique )\n", (((uintmax_t)1) << marks->shift) * 1024, marks_set_count); fprintf(stderr, " atoms: %10u\n", atom_cnt); diff --combined t/t9300-fast-import.sh index 438aaf6b14,2cd0f0614d..0f5b5e5964 --- a/t/t9300-fast-import.sh +++ b/t/t9300-fast-import.sh @@@ -94,12 -94,6 +94,12 @@@ data <expect <actual && + test_cmp expect actual +' + cat >expect <input < 0 +0000 +data 0 +M 644 :6 new_blob +#pretend we got sha1 from fast-import +ls "new_blob" + +tag series-A-blob-3 +from $new_blob +data <expect <actual && + git cat-file tag tags/series-A-blob-3 >>actual && + test_cmp expect actual' + test_tick cat >input </dev/null >/dev/null +git prune 2>/dev/null >/dev/null + +cat >input < $GIT_COMMITTER_DATE +data </dev/null >/dev/null +git prune 2>/dev/null >/dev/null + +cat >input < $GIT_COMMITTER_DATE +data </dev/null >/dev/null +git prune 2>/dev/null >/dev/null + +cat >input < $GIT_COMMITTER_DATE +data <input < $GIT_COMMITTER_DATE +data <input <> $GIT_COMMITTER_DATE +data <input <input < $GIT_COMMITTER_DATE +data <input <output && test_cmp expect output' +cat >input < 1112912473 -0700 +data < 1112912473 -0700 +data <expect +g/b/f +g/b/h +EOF + +test_expect_success \ + 'L: nested tree copy does not corrupt deltas' \ + 'git fast-import tmp && + cat tmp | cut -f 2 >actual && + test_cmp expect actual && + git fsck `git rev-parse L2`' + +git update-ref -d refs/heads/L2 + ### ### series M ### @@@ -1306,6 -1087,45 +1306,45 @@@ test_expect_success M 040000 $subdir file3/ INPUT_END' + test_expect_success \ + 'N: reject foo/ syntax in copy source' \ + 'test_must_fail git fast-import <<-INPUT_END + commit refs/heads/N5C + committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE + data < $GIT_COMMITTER_DATE + data < $GIT_COMMITTER_DATE + data <expect.foo && @@@ -1987,23 -1807,6 +2026,23 @@@ test_expect_success 'Q: verify second note for second commit' \ 'git cat-file blob refs/notes/foobar:$commit2 >actual && test_cmp expect actual' +cat >input < $GIT_COMMITTER_DATE +data <expect && + + git fast-import --export-marks=io.marks <<-\EOF && + feature import-marks-if-exists=not_io.marks + EOF + test_cmp expect io.marks && + + blob=$(echo hi | git hash-object --stdin) && + + echo ":1 $blob" >io.marks && + echo ":1 $blob" >expect && + echo ":2 $blob" >>expect && + + git fast-import --export-marks=io.marks <<-\EOF && + feature import-marks-if-exists=io.marks + blob + mark :2 + data 3 + hi + + EOF + test_cmp expect io.marks && + + echo ":3 $blob" >>expect && + + git fast-import --import-marks=io.marks \ + --export-marks=io.marks <<-\EOF && + feature import-marks-if-exists=not_io.marks + blob + mark :3 + data 3 + hi + + EOF + test_cmp expect io.marks && + + >expect && + + git fast-import --import-marks-if-exists=not_io.marks \ + --export-marks=io.marks <<-\EOF + feature import-marks-if-exists=io.marks + EOF + test_cmp expect io.marks +' + cat >input << EOF feature import-marks=marks.out feature export-marks=marks.new @@@ -2176,7 -1932,7 +2215,7 @@@ test_expect_success test_cmp marks.out marks.new' cat >input <input <expect <<-EOF && ${blob} blob 11 @@@ -2249,7 -2005,7 +2288,7 @@@ test_cmp expect actual ' -test_expect_success 'R: in-stream cat-blob-fd not respected' ' +test_expect_success NOT_MINGW 'R: in-stream cat-blob-fd not respected' ' echo hello >greeting && blob=$(git hash-object -w greeting) && cat >expect <<-EOF && @@@ -2270,7 -2026,7 +2309,7 @@@ test_cmp expect actual.1 ' -test_expect_success 'R: print new blob' ' +test_expect_success NOT_MINGW 'R: print new blob' ' blob=$(echo "yep yep yep" | git hash-object --stdin) && cat >expect <<-EOF && ${blob} blob 12 @@@ -2288,7 -2044,7 +2327,7 @@@ test_cmp expect actual ' -test_expect_success 'R: print new blob by sha1' ' +test_expect_success NOT_MINGW 'R: print new blob by sha1' ' blob=$(echo "a new blob named by sha1" | git hash-object --stdin) && cat >expect <<-EOF && ${blob} blob 25 @@@ -2480,48 -2236,6 +2519,48 @@@ test_expect_success 'R: quiet option re test_cmp empty output ' +test_expect_success 'R: feature done means terminating "done" is mandatory' ' + echo feature done | test_must_fail git fast-import && + test_must_fail git fast-import --done expect <<-\EOF && + OBJID + :000000 100644 OBJID OBJID A hello.c + :000000 100644 OBJID OBJID A hello2.c + EOF + git fast-import <<-EOF && + commit refs/heads/done-ends + committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE + data <actual && + test_cmp expect actual +' + cat >input <