Merge branch 'nd/no-the-index'
authorJunio C Hamano <gitster@pobox.com>
Mon, 20 Aug 2018 18:33:53 +0000 (11:33 -0700)
committerJunio C Hamano <gitster@pobox.com>
Mon, 20 Aug 2018 18:33:53 +0000 (11:33 -0700)
The more library-ish parts of the codebase learned to work on the
in-core index-state instance that is passed in by their callers,
instead of always working on the singleton "the_index" instance.

* nd/no-the-index: (24 commits)
blame.c: remove implicit dependency on the_index
apply.c: remove implicit dependency on the_index
apply.c: make init_apply_state() take a struct repository
apply.c: pass struct apply_state to more functions
resolve-undo.c: use the right index instead of the_index
archive-*.c: use the right repository
archive.c: avoid access to the_index
grep: use the right index instead of the_index
attr: remove index from git_attr_set_direction()
entry.c: use the right index instead of the_index
submodule.c: use the right index instead of the_index
pathspec.c: use the right index instead of the_index
unpack-trees: avoid the_index in verify_absent()
unpack-trees: convert clear_ce_flags* to avoid the_index
unpack-trees: don't shadow global var the_index
unpack-trees: add a note about path invalidation
unpack-trees: remove 'extern' on function declaration
ls-files: correct index argument to get_convert_attr_ascii()
preload-index.c: use the right index instead of the_index
dir.c: remove an implicit dependency on the_index in pathspec code
...

18 files changed:
1  2 
archive-tar.c
archive-zip.c
builtin/add.c
builtin/blame.c
builtin/cat-file.c
builtin/checkout.c
builtin/commit.c
builtin/grep.c
builtin/pack-objects.c
builtin/rm.c
builtin/update-index.c
convert.c
diff.c
dir.c
merge-recursive.c
read-cache.c
sequencer.c
sha1-file.c
diff --combined archive-tar.c
index 0bc50f6e8944243c4f66659786beb63d6df48df6,69ff23dfb0ae678a822279c38982a02cb613a423..7a535cba24a2a0535b412f1cfb3531ddde155c1e
@@@ -122,7 -122,7 +122,7 @@@ static int stream_blocked(const struct 
  
        st = open_istream(oid, &type, &sz, NULL);
        if (!st)
 -              return error("cannot stream blob %s", oid_to_hex(oid));
 +              return error(_("cannot stream blob %s"), oid_to_hex(oid));
        for (;;) {
                readlen = read_istream(st, buf, sizeof(buf));
                if (readlen <= 0)
@@@ -257,7 -257,7 +257,7 @@@ static int write_tar_entry(struct archi
                *header.typeflag = TYPEFLAG_REG;
                mode = (mode | ((mode & 0100) ? 0777 : 0666)) & ~tar_umask;
        } else {
 -              return error("unsupported file mode: 0%o (SHA1: %s)",
 +              return error(_("unsupported file mode: 0%o (SHA1: %s)"),
                             mode, oid_to_hex(oid));
        }
        if (pathlen > sizeof(header.name)) {
                memcpy(header.name, path, pathlen);
  
        if (S_ISREG(mode) && !args->convert &&
-           oid_object_info(the_repository, oid, &size) == OBJ_BLOB &&
+           oid_object_info(args->repo, oid, &size) == OBJ_BLOB &&
            size > big_file_threshold)
                buffer = NULL;
        else if (S_ISLNK(mode) || S_ISREG(mode)) {
                enum object_type type;
                buffer = object_file_to_archive(args, path, oid, old_mode, &type, &size);
                if (!buffer)
 -                      return error("cannot read %s", oid_to_hex(oid));
 +                      return error(_("cannot read %s"), oid_to_hex(oid));
        } else {
                buffer = NULL;
                size = 0;
@@@ -455,17 -455,17 +455,17 @@@ static int write_tar_filter_archive(con
        filter.in = -1;
  
        if (start_command(&filter) < 0)
 -              die_errno("unable to start '%s' filter", argv[0]);
 +              die_errno(_("unable to start '%s' filter"), argv[0]);
        close(1);
        if (dup2(filter.in, 1) < 0)
 -              die_errno("unable to redirect descriptor");
 +              die_errno(_("unable to redirect descriptor"));
        close(filter.in);
  
        r = write_tar_archive(ar, args);
  
        close(1);
        if (finish_command(&filter) != 0)
 -              die("'%s' filter reported error", argv[0]);
 +              die(_("'%s' filter reported error"), argv[0]);
  
        strbuf_release(&cmd);
        return r;
diff --combined archive-zip.c
index 024267ff0f771a73d4e80dec32934a8e5b8455f5,107da5f97ed3e37b66b298511159223affde4060..5a62351ab1a46f6a0827a7d41e794c00a4839edc
@@@ -310,11 -310,11 +310,11 @@@ static int write_zip_entry(struct archi
                if (is_utf8(path))
                        flags |= ZIP_UTF8;
                else
 -                      warning("Path is not valid UTF-8: %s", path);
 +                      warning(_("path is not valid UTF-8: %s"), path);
        }
  
        if (pathlen > 0xffff) {
 -              return error("path too long (%d chars, SHA1: %s): %s",
 +              return error(_("path too long (%d chars, SHA1: %s): %s"),
                                (int)pathlen, oid_to_hex(oid), path);
        }
  
                compressed_size = 0;
                buffer = NULL;
        } else if (S_ISREG(mode) || S_ISLNK(mode)) {
-               enum object_type type = oid_object_info(the_repository, oid,
+               enum object_type type = oid_object_info(args->repo, oid,
                                                        &size);
  
                method = 0;
                    size > big_file_threshold) {
                        stream = open_istream(oid, &type, &size, NULL);
                        if (!stream)
 -                              return error("cannot stream blob %s",
 +                              return error(_("cannot stream blob %s"),
                                             oid_to_hex(oid));
                        flags |= ZIP_STREAM;
                        out = buffer = NULL;
                        buffer = object_file_to_archive(args, path, oid, mode,
                                                        &type, &size);
                        if (!buffer)
 -                              return error("cannot read %s",
 +                              return error(_("cannot read %s"),
                                             oid_to_hex(oid));
                        crc = crc32(crc, buffer, size);
                        is_binary = entry_is_binary(path_without_prefix,
                }
                compressed_size = (method == 0) ? size : 0;
        } else {
 -              return error("unsupported file mode: 0%o (SHA1: %s)", mode,
 +              return error(_("unsupported file mode: 0%o (SHA1: %s)"), mode,
                                oid_to_hex(oid));
        }
  
                        zstream.avail_in = readlen;
                        result = git_deflate(&zstream, 0);
                        if (result != Z_OK)
 -                              die("deflate error (%d)", result);
 +                              die(_("deflate error (%d)"), result);
                        out_len = zstream.next_out - compressed;
  
                        if (out_len > 0) {
@@@ -602,7 -602,7 +602,7 @@@ static void dos_time(timestamp_t *times
        struct tm *t;
  
        if (date_overflows(*timestamp))
 -              die("timestamp too large for this system: %"PRItime,
 +              die(_("timestamp too large for this system: %"PRItime),
                    *timestamp);
        time = (time_t)*timestamp;
        t = localtime(&time);
diff --combined builtin/add.c
index ba1ff5689d07d1e40bcdd03a13e7c58752b28587,066623a195b8ba479f7c15c84d5483916ec3bc97..9916498a29bbd8fa7c5c5d8e7bd32e1dc184909b
@@@ -40,7 -40,7 +40,7 @@@ static void chmod_pathspec(struct paths
        for (i = 0; i < active_nr; i++) {
                struct cache_entry *ce = active_cache[i];
  
-               if (pathspec && !ce_path_match(ce, pathspec, NULL))
+               if (pathspec && !ce_path_match(&the_index, ce, pathspec, NULL))
                        continue;
  
                if (chmod_cache_entry(ce, flip) < 0)
@@@ -135,7 -135,7 +135,7 @@@ static int renormalize_tracked_files(co
                        continue; /* do not touch unmerged paths */
                if (!S_ISREG(ce->ce_mode) && !S_ISLNK(ce->ce_mode))
                        continue; /* do not touch non blobs */
-               if (pathspec && !ce_path_match(ce, pathspec, NULL))
+               if (pathspec && !ce_path_match(&the_index, ce, pathspec, NULL))
                        continue;
                retval |= add_file_to_cache(ce->name, flags | HASH_RENORMALIZE);
        }
@@@ -155,7 -155,7 +155,7 @@@ static char *prune_directory(struct dir
        i = dir->nr;
        while (--i >= 0) {
                struct dir_entry *entry = *src++;
-               if (dir_path_match(entry, pathspec, prefix, seen))
+               if (dir_path_match(&the_index, entry, pathspec, prefix, seen))
                        *dst++ = entry;
        }
        dir->nr = dst - dir->entries;
@@@ -304,8 -304,7 +304,8 @@@ static struct option builtin_add_option
        OPT_BOOL( 0 , "refresh", &refresh_only, N_("don't add, only refresh the index")),
        OPT_BOOL( 0 , "ignore-errors", &ignore_add_errors, N_("just skip files which cannot be added because of errors")),
        OPT_BOOL( 0 , "ignore-missing", &ignore_missing, N_("check if - even missing - files are ignored in dry run")),
 -      OPT_STRING( 0 , "chmod", &chmod_arg, N_("(+/-)x"), N_("override the executable bit of the listed files")),
 +      OPT_STRING(0, "chmod", &chmod_arg, "(+|-)x",
 +                 N_("override the executable bit of the listed files")),
        OPT_HIDDEN_BOOL(0, "warn-embedded-repo", &warn_on_embedded_repo,
                        N_("warn when adding an embedded repository")),
        OPT_END(),
diff --combined builtin/blame.c
index 97f6ecaf370dfdda1f0a9e5a1fe824a034fdbf61,cbbcb26f89764ef897e3fb1cd7abb26c4c3a5813..c2da673ac802b02609d54de3932fa8a2db221dbd
@@@ -410,7 -410,7 +410,7 @@@ static void parse_color_fields(const ch
        }
  
        if (next == EXPECT_COLOR)
 -              die (_("must end with a color"));
 +              die(_("must end with a color"));
  
        colorfield[colorfield_nr].hop = TIME_MAX;
        string_list_clear(&l, 0);
@@@ -988,6 -988,7 +988,7 @@@ parse_done
        sb.revs = &revs;
        sb.contents_from = contents_from;
        sb.reverse = reverse;
+       sb.repo = the_repository;
        setup_scoreboard(&sb, path, &o);
        lno = sb.num_lines;
  
diff --combined builtin/cat-file.c
index 08dced261420df86e6051928c0fbbdceafe64a75,c726ee10925b3489508da87235b182f24a8561ba..64ec1745ab2c20ef18a9292ef3b3c82efd46de17
@@@ -21,7 -21,6 +21,7 @@@ struct batch_options 
        int print_contents;
        int buffer_output;
        int all_objects;
 +      int unordered;
        int cmdmode; /* may be 'w' or 'c' for --filters or --textconv */
        const char *format;
  };
@@@ -40,7 -39,7 +40,7 @@@ static int filter_object(const char *pa
                             oid_to_hex(oid), path);
        if ((type == OBJ_BLOB) && S_ISREG(mode)) {
                struct strbuf strbuf = STRBUF_INIT;
-               if (convert_to_working_tree(path, *buf, *size, &strbuf)) {
+               if (convert_to_working_tree(&the_index, path, *buf, *size, &strbuf)) {
                        free(*buf);
                        *size = strbuf.len;
                        *buf = strbuf_detach(&strbuf, NULL);
@@@ -338,11 -337,11 +338,11 @@@ static void print_object_or_die(struct 
        }
  }
  
 -static void batch_object_write(const char *obj_name, struct batch_options *opt,
 +static void batch_object_write(const char *obj_name,
 +                             struct strbuf *scratch,
 +                             struct batch_options *opt,
                               struct expand_data *data)
  {
 -      struct strbuf buf = STRBUF_INIT;
 -
        if (!data->skip_object_info &&
            oid_object_info_extended(the_repository, &data->oid, &data->info,
                                     OBJECT_INFO_LOOKUP_REPLACE) < 0) {
                return;
        }
  
 -      strbuf_expand(&buf, opt->format, expand_format, data);
 -      strbuf_addch(&buf, '\n');
 -      batch_write(opt, buf.buf, buf.len);
 -      strbuf_release(&buf);
 +      strbuf_reset(scratch);
 +      strbuf_expand(scratch, opt->format, expand_format, data);
 +      strbuf_addch(scratch, '\n');
 +      batch_write(opt, scratch->buf, scratch->len);
  
        if (opt->print_contents) {
                print_object_or_die(opt, data);
        }
  }
  
 -static void batch_one_object(const char *obj_name, struct batch_options *opt,
 +static void batch_one_object(const char *obj_name,
 +                           struct strbuf *scratch,
 +                           struct batch_options *opt,
                             struct expand_data *data)
  {
        struct object_context ctx;
                return;
        }
  
 -      batch_object_write(obj_name, opt, data);
 +      batch_object_write(obj_name, scratch, opt, data);
  }
  
  struct object_cb_data {
        struct batch_options *opt;
        struct expand_data *expand;
 +      struct oidset *seen;
 +      struct strbuf *scratch;
  };
  
  static int batch_object_cb(const struct object_id *oid, void *vdata)
  {
        struct object_cb_data *data = vdata;
        oidcpy(&data->expand->oid, oid);
 -      batch_object_write(NULL, data->opt, data->expand);
 +      batch_object_write(NULL, data->scratch, data->opt, data->expand);
        return 0;
  }
  
 -static int batch_loose_object(const struct object_id *oid,
 -                            const char *path,
 -                            void *data)
 +static int collect_loose_object(const struct object_id *oid,
 +                              const char *path,
 +                              void *data)
  {
        oid_array_append(data, oid);
        return 0;
  }
  
 -static int batch_packed_object(const struct object_id *oid,
 -                             struct packed_git *pack,
 -                             uint32_t pos,
 -                             void *data)
 +static int collect_packed_object(const struct object_id *oid,
 +                               struct packed_git *pack,
 +                               uint32_t pos,
 +                               void *data)
  {
        oid_array_append(data, oid);
        return 0;
  }
  
 +static int batch_unordered_object(const struct object_id *oid, void *vdata)
 +{
 +      struct object_cb_data *data = vdata;
 +
 +      if (oidset_insert(data->seen, oid))
 +              return 0;
 +
 +      return batch_object_cb(oid, data);
 +}
 +
 +static int batch_unordered_loose(const struct object_id *oid,
 +                               const char *path,
 +                               void *data)
 +{
 +      return batch_unordered_object(oid, data);
 +}
 +
 +static int batch_unordered_packed(const struct object_id *oid,
 +                                struct packed_git *pack,
 +                                uint32_t pos,
 +                                void *data)
 +{
 +      return batch_unordered_object(oid, data);
 +}
 +
  static int batch_objects(struct batch_options *opt)
  {
 -      struct strbuf buf = STRBUF_INIT;
 +      struct strbuf input = STRBUF_INIT;
 +      struct strbuf output = STRBUF_INIT;
        struct expand_data data;
        int save_warning;
        int retval = 0;
         */
        memset(&data, 0, sizeof(data));
        data.mark_query = 1;
 -      strbuf_expand(&buf, opt->format, expand_format, &data);
 +      strbuf_expand(&output, opt->format, expand_format, &data);
        data.mark_query = 0;
 +      strbuf_release(&output);
        if (opt->cmdmode)
                data.split_on_whitespace = 1;
  
                data.info.typep = &data.type;
  
        if (opt->all_objects) {
 -              struct oid_array sa = OID_ARRAY_INIT;
                struct object_cb_data cb;
  
 -              for_each_loose_object(batch_loose_object, &sa, 0);
 -              for_each_packed_object(batch_packed_object, &sa, 0);
                if (repository_format_partial_clone)
                        warning("This repository has extensions.partialClone set. Some objects may not be loaded.");
  
                cb.opt = opt;
                cb.expand = &data;
 -              oid_array_for_each_unique(&sa, batch_object_cb, &cb);
 +              cb.scratch = &output;
 +
 +              if (opt->unordered) {
 +                      struct oidset seen = OIDSET_INIT;
 +
 +                      cb.seen = &seen;
 +
 +                      for_each_loose_object(batch_unordered_loose, &cb, 0);
 +                      for_each_packed_object(batch_unordered_packed, &cb,
 +                                             FOR_EACH_OBJECT_PACK_ORDER);
 +
 +                      oidset_clear(&seen);
 +              } else {
 +                      struct oid_array sa = OID_ARRAY_INIT;
 +
 +                      for_each_loose_object(collect_loose_object, &sa, 0);
 +                      for_each_packed_object(collect_packed_object, &sa, 0);
 +
 +                      oid_array_for_each_unique(&sa, batch_object_cb, &cb);
 +
 +                      oid_array_clear(&sa);
 +              }
  
 -              oid_array_clear(&sa);
 +              strbuf_release(&output);
                return 0;
        }
  
        save_warning = warn_on_object_refname_ambiguity;
        warn_on_object_refname_ambiguity = 0;
  
 -      while (strbuf_getline(&buf, stdin) != EOF) {
 +      while (strbuf_getline(&input, stdin) != EOF) {
                if (data.split_on_whitespace) {
                        /*
                         * Split at first whitespace, tying off the beginning
                         * of the string and saving the remainder (or NULL) in
                         * data.rest.
                         */
 -                      char *p = strpbrk(buf.buf, " \t");
 +                      char *p = strpbrk(input.buf, " \t");
                        if (p) {
                                while (*p && strchr(" \t", *p))
                                        *p++ = '\0';
                        data.rest = p;
                }
  
 -              batch_one_object(buf.buf, opt, &data);
 +              batch_one_object(input.buf, &output, opt, &data);
        }
  
 -      strbuf_release(&buf);
 +      strbuf_release(&input);
 +      strbuf_release(&output);
        warn_on_object_refname_ambiguity = save_warning;
        return retval;
  }
@@@ -637,8 -586,6 +637,8 @@@ int cmd_cat_file(int argc, const char *
                         N_("follow in-tree symlinks (used with --batch or --batch-check)")),
                OPT_BOOL(0, "batch-all-objects", &batch.all_objects,
                         N_("show all objects with --batch or --batch-check")),
 +              OPT_BOOL(0, "unordered", &batch.unordered,
 +                       N_("do not order --batch-all-objects output")),
                OPT_END()
        };
  
diff --combined builtin/checkout.c
index cb6bb76312a00cbcf997ef44e43128964777e86e,54acb8d268e04580a45a466595360d38fc19a168..29ef50013dccbd118093af0b4dc08eb907953cc2
@@@ -318,7 -318,7 +318,7 @@@ static int checkout_paths(const struct 
                 * match_pathspec() for _all_ entries when
                 * opts->source_tree != NULL.
                 */
-               if (ce_path_match(ce, &opts->pathspec, ps_matched))
+               if (ce_path_match(&the_index, ce, &opts->pathspec, ps_matched))
                        ce->ce_flags |= CE_MATCHED;
        }
  
@@@ -1198,12 -1198,12 +1198,12 @@@ int cmd_checkout(int argc, const char *
        if (opts.track != BRANCH_TRACK_UNSPECIFIED && !opts.new_branch) {
                const char *argv0 = argv[0];
                if (!argc || !strcmp(argv0, "--"))
 -                      die (_("--track needs a branch name"));
 +                      die(_("--track needs a branch name"));
                skip_prefix(argv0, "refs/", &argv0);
                skip_prefix(argv0, "remotes/", &argv0);
                argv0 = strchr(argv0, '/');
                if (!argv0 || !argv0[1])
 -                      die (_("Missing branch name; try -b"));
 +                      die(_("missing branch name; try -b"));
                opts.new_branch = argv0 + 1;
        }
  
diff --combined builtin/commit.c
index 213fca2d8ecdf726661e6b31b7ec6cb004acb2f7,21277dd42e8e2ea60e42eb770f57502997fd63de..0d9828e29ebe89f037e68761d5cd5b407339cd48
@@@ -251,7 -251,7 +251,7 @@@ static int list_paths(struct string_lis
  
                if (ce->ce_flags & CE_UPDATE)
                        continue;
-               if (!ce_path_match(ce, pattern, m))
+               if (!ce_path_match(&the_index, ce, pattern, m))
                        continue;
                item = string_list_insert(list, ce->name);
                if (ce_skip_worktree(ce))
@@@ -1647,9 -1647,9 +1647,9 @@@ int cmd_commit(int argc, const char **a
        unlink(git_path_squash_msg(the_repository));
  
        if (commit_index_files())
 -              die (_("Repository has been updated, but unable to write\n"
 -                   "new_index file. Check that disk is not full and quota is\n"
 -                   "not exceeded, and then \"git reset HEAD\" to recover."));
 +              die(_("repository has been updated, but unable to write\n"
 +                    "new_index file. Check that disk is not full and quota is\n"
 +                    "not exceeded, and then \"git reset HEAD\" to recover."));
  
        rerere(0);
        run_command_v_opt(argv_gc_auto, RUN_GIT_CMD);
diff --combined builtin/grep.c
index ee5a1bd355a5f239a23b524a3e05b9c8646f6bc8,b7033954ace2c1cf96190ce289ed49e67d39611e..601f801158f097b302dcf6615016bb4cefbc0225
@@@ -489,7 -489,7 +489,7 @@@ static int grep_cache(struct grep_opt *
        }
  
        if (repo_read_index(repo) < 0)
 -              die("index file corrupt");
 +              die(_("index file corrupt"));
  
        for (nr = 0; nr < repo->index->cache_nr; nr++) {
                const struct cache_entry *ce = repo->index->cache[nr];
                strbuf_addstr(&name, ce->name);
  
                if (S_ISREG(ce->ce_mode) &&
-                   match_pathspec(pathspec, name.buf, name.len, 0, NULL,
+                   match_pathspec(repo->index, pathspec, name.buf, name.len, 0, NULL,
                                   S_ISDIR(ce->ce_mode) ||
                                   S_ISGITLINK(ce->ce_mode))) {
                        /*
                                hit |= grep_file(opt, name.buf);
                        }
                } else if (recurse_submodules && S_ISGITLINK(ce->ce_mode) &&
-                          submodule_path_match(pathspec, name.buf, NULL)) {
+                          submodule_path_match(repo->index, pathspec, name.buf, NULL)) {
                        hit |= grep_submodule(opt, repo, pathspec, NULL, ce->name, ce->name);
                } else {
                        continue;
@@@ -679,7 -679,7 +679,7 @@@ static int grep_directory(struct grep_o
  
        fill_directory(&dir, &the_index, pathspec);
        for (i = 0; i < dir.nr; i++) {
-               if (!dir_path_match(dir.entries[i], pathspec, 0, NULL))
+               if (!dir_path_match(&the_index, dir.entries[i], pathspec, 0, NULL))
                        continue;
                hit |= grep_file(opt, dir.entries[i]->name);
                if (hit && opt->status_only)
@@@ -963,7 -963,7 +963,7 @@@ int cmd_grep(int argc, const char **arg
        }
  
        if (!opt.pattern_list)
 -              die(_("no pattern given."));
 +              die(_("no pattern given"));
  
        /* --only-matching has no effect with --invert. */
        if (opt.invert)
        }
  
        if (recurse_submodules && (!use_index || untracked))
 -              die(_("option not supported with --recurse-submodules."));
 +              die(_("option not supported with --recurse-submodules"));
  
        if (!show_in_pager && !opt.status_only)
                setup_pager();
  
        if (!use_index && (untracked || cached))
 -              die(_("--cached or --untracked cannot be used with --no-index."));
 +              die(_("--cached or --untracked cannot be used with --no-index"));
  
        if (!use_index || untracked) {
                int use_exclude = (opt_exclude < 0) ? use_index : !!opt_exclude;
                hit = grep_directory(&opt, &pathspec, use_exclude, use_index);
        } else if (0 <= opt_exclude) {
 -              die(_("--[no-]exclude-standard cannot be used for tracked contents."));
 +              die(_("--[no-]exclude-standard cannot be used for tracked contents"));
        } else if (!list.nr) {
                if (!cached)
                        setup_work_tree();
                hit = grep_cache(&opt, the_repository, &pathspec, cached);
        } else {
                if (cached)
 -                      die(_("both --cached and trees are given."));
 +                      die(_("both --cached and trees are given"));
  
                hit = grep_objects(&opt, &pathspec, &list);
        }
diff --combined builtin/pack-objects.c
index c0741baa8bbaf97f7493a12c375930dfbb5c8eb9,3ff6da441f369dd313c645fa36a3fb334d397323..0d80dee2ba1ad82fd21622f73dac86b6010da4b6
@@@ -140,7 -140,7 +140,7 @@@ static void *get_delta(struct object_en
  
        buf = read_object_file(&entry->idx.oid, &type, &size);
        if (!buf)
 -              die("unable to read %s", oid_to_hex(&entry->idx.oid));
 +              die(_("unable to read %s"), oid_to_hex(&entry->idx.oid));
        base_buf = read_object_file(&DELTA(entry)->idx.oid, &type,
                                    &base_size);
        if (!base_buf)
                    oid_to_hex(&DELTA(entry)->idx.oid));
        delta_buf = diff_delta(base_buf, base_size,
                               buf, size, &delta_size, 0);
 +      /*
 +       * We succesfully computed this delta once but dropped it for
 +       * memory reasons. Something is very wrong if this time we
 +       * recompute and create a different delta.
 +       */
        if (!delta_buf || delta_size != DELTA_SIZE(entry))
 -              die("delta size changed");
 +              BUG("delta size changed");
        free(buf);
        free(base_buf);
        return delta_buf;
@@@ -411,7 -406,7 +411,7 @@@ static off_t write_reuse_object(struct 
        datalen = revidx[1].offset - offset;
        if (!pack_to_stdout && p->index_version > 1 &&
            check_pack_crc(p, &w_curs, offset, datalen, revidx->nr)) {
 -              error("bad packed object CRC for %s",
 +              error(_("bad packed object CRC for %s"),
                      oid_to_hex(&entry->idx.oid));
                unuse_pack(&w_curs);
                return write_no_reuse_object(f, entry, limit, usable_delta);
  
        if (!pack_to_stdout && p->index_version == 1 &&
            check_pack_inflate(p, &w_curs, offset, datalen, entry_size)) {
 -              error("corrupt packed object for %s",
 +              error(_("corrupt packed object for %s"),
                      oid_to_hex(&entry->idx.oid));
                unuse_pack(&w_curs);
                return write_no_reuse_object(f, entry, limit, usable_delta);
@@@ -553,7 -548,7 +553,7 @@@ static enum write_one_status write_one(
         */
        recursing = (e->idx.offset == 1);
        if (recursing) {
 -              warning("recursive delta detected for object %s",
 +              warning(_("recursive delta detected for object %s"),
                        oid_to_hex(&e->idx.oid));
                return WRITE_ONE_RECURSIVE;
        } else if (e->idx.offset || e->preferred_base) {
  
        /* make sure off_t is sufficiently large not to wrap */
        if (signed_add_overflows(*offset, size))
 -              die("pack too large for current definition of off_t");
 +              die(_("pack too large for current definition of off_t"));
        *offset += size;
        return WRITE_ONE_WRITTEN;
  }
@@@ -753,8 -748,7 +753,8 @@@ static struct object_entry **compute_wr
        }
  
        if (wo_end != to_pack.nr_objects)
 -              die("ordered %u objects, expected %"PRIu32, wo_end, to_pack.nr_objects);
 +              die(_("ordered %u objects, expected %"PRIu32),
 +                  wo_end, to_pack.nr_objects);
  
        return wo;
  }
@@@ -766,15 -760,15 +766,15 @@@ static off_t write_reused_pack(struct h
        int fd;
  
        if (!is_pack_valid(reuse_packfile))
 -              die("packfile is invalid: %s", reuse_packfile->pack_name);
 +              die(_("packfile is invalid: %s"), reuse_packfile->pack_name);
  
        fd = git_open(reuse_packfile->pack_name);
        if (fd < 0)
 -              die_errno("unable to open packfile for reuse: %s",
 +              die_errno(_("unable to open packfile for reuse: %s"),
                          reuse_packfile->pack_name);
  
        if (lseek(fd, sizeof(struct pack_header), SEEK_SET) == -1)
 -              die_errno("unable to seek in reused packfile");
 +              die_errno(_("unable to seek in reused packfile"));
  
        if (reuse_packfile_offset < 0)
                reuse_packfile_offset = reuse_packfile->pack_size - the_hash_algo->rawsz;
                int read_pack = xread(fd, buffer, sizeof(buffer));
  
                if (read_pack <= 0)
 -                      die_errno("unable to read from reused packfile");
 +                      die_errno(_("unable to read from reused packfile"));
  
                if (read_pack > to_write)
                        read_pack = to_write;
@@@ -888,7 -882,7 +888,7 @@@ static void write_pack_file(void
                         * to preserve this property.
                         */
                        if (stat(pack_tmp_name, &st) < 0) {
 -                              warning_errno("failed to stat %s", pack_tmp_name);
 +                              warning_errno(_("failed to stat %s"), pack_tmp_name);
                        } else if (!last_mtime) {
                                last_mtime = st.st_mtime;
                        } else {
                                utb.actime = st.st_atime;
                                utb.modtime = --last_mtime;
                                if (utime(pack_tmp_name, &utb) < 0)
 -                                      warning_errno("failed utime() on %s", pack_tmp_name);
 +                                      warning_errno(_("failed utime() on %s"), pack_tmp_name);
                        }
  
                        strbuf_addf(&tmpname, "%s-", base_name);
        free(write_order);
        stop_progress(&progress_state);
        if (written != nr_result)
 -              die("wrote %"PRIu32" objects while expecting %"PRIu32,
 -                      written, nr_result);
 +              die(_("wrote %"PRIu32" objects while expecting %"PRIu32),
 +                  written, nr_result);
  }
  
  static int no_try_delta(const char *path)
  
        if (!check)
                check = attr_check_initl("delta", NULL);
-       if (git_check_attr(path, check))
+       if (git_check_attr(&the_index, path, check))
                return 0;
        if (ATTR_FALSE(check->items[0].value))
                return 1;
@@@ -1486,7 -1480,7 +1486,7 @@@ static void check_object(struct object_
                        while (c & 128) {
                                ofs += 1;
                                if (!ofs || MSB(ofs, 7)) {
 -                                      error("delta base offset overflow in pack for %s",
 +                                      error(_("delta base offset overflow in pack for %s"),
                                              oid_to_hex(&entry->idx.oid));
                                        goto give_up;
                                }
                        }
                        ofs = entry->in_pack_offset - ofs;
                        if (ofs <= 0 || ofs >= entry->in_pack_offset) {
 -                              error("delta base offset out of bound for %s",
 +                              error(_("delta base offset out of bound for %s"),
                                      oid_to_hex(&entry->idx.oid));
                                goto give_up;
                        }
@@@ -1858,30 -1852,18 +1858,30 @@@ static int delta_cacheable(unsigned lon
  
  #ifndef NO_PTHREADS
  
 +/* Protect access to object database */
  static pthread_mutex_t read_mutex;
  #define read_lock()           pthread_mutex_lock(&read_mutex)
  #define read_unlock()         pthread_mutex_unlock(&read_mutex)
  
 +/* Protect delta_cache_size */
  static pthread_mutex_t cache_mutex;
  #define cache_lock()          pthread_mutex_lock(&cache_mutex)
  #define cache_unlock()                pthread_mutex_unlock(&cache_mutex)
  
 +/*
 + * Protect object list partitioning (e.g. struct thread_param) and
 + * progress_state
 + */
  static pthread_mutex_t progress_mutex;
  #define progress_lock()               pthread_mutex_lock(&progress_mutex)
  #define progress_unlock()     pthread_mutex_unlock(&progress_mutex)
  
 +/*
 + * Access to struct object_entry is unprotected since each thread owns
 + * a portion of the main object list. Just don't access object entries
 + * ahead in the list because they can be stolen and would need
 + * progress_mutex for protection.
 + */
  #else
  
  #define read_lock()           (void)0
@@@ -1992,10 -1974,10 +1992,10 @@@ static int try_delta(struct unpacked *t
                trg->data = read_object_file(&trg_entry->idx.oid, &type, &sz);
                read_unlock();
                if (!trg->data)
 -                      die("object %s cannot be read",
 +                      die(_("object %s cannot be read"),
                            oid_to_hex(&trg_entry->idx.oid));
                if (sz != trg_size)
 -                      die("object %s inconsistent object length (%lu vs %lu)",
 +                      die(_("object %s inconsistent object length (%lu vs %lu)"),
                            oid_to_hex(&trg_entry->idx.oid), sz,
                            trg_size);
                *mem_usage += sz;
                        if (src_entry->preferred_base) {
                                static int warned = 0;
                                if (!warned++)
 -                                      warning("object %s cannot be read",
 +                                      warning(_("object %s cannot be read"),
                                                oid_to_hex(&src_entry->idx.oid));
                                /*
                                 * Those objects are not included in the
                                 */
                                return 0;
                        }
 -                      die("object %s cannot be read",
 +                      die(_("object %s cannot be read"),
                            oid_to_hex(&src_entry->idx.oid));
                }
                if (sz != src_size)
 -                      die("object %s inconsistent object length (%lu vs %lu)",
 +                      die(_("object %s inconsistent object length (%lu vs %lu)"),
                            oid_to_hex(&src_entry->idx.oid), sz,
                            src_size);
                *mem_usage += sz;
                if (!src->index) {
                        static int warned = 0;
                        if (!warned++)
 -                              warning("suboptimal pack - out of memory");
 +                              warning(_("suboptimal pack - out of memory"));
                        return 0;
                }
                *mem_usage += sizeof_delta_index(src->index);
@@@ -2263,19 -2245,12 +2263,19 @@@ static void try_to_free_from_threads(si
  static try_to_free_t old_try_to_free_routine;
  
  /*
 + * The main object list is split into smaller lists, each is handed to
 + * one worker.
 + *
   * The main thread waits on the condition that (at least) one of the workers
   * has stopped working (which is indicated in the .working member of
   * struct thread_params).
 + *
   * When a work thread has completed its work, it sets .working to 0 and
   * signals the main thread and waits on the condition that .data_ready
   * becomes 1.
 + *
 + * The main thread steals half of the work from the worker that has
 + * most work left to hand it to the idle worker.
   */
  
  struct thread_params {
@@@ -2366,8 -2341,8 +2366,8 @@@ static void ll_find_deltas(struct objec
                return;
        }
        if (progress > pack_to_stdout)
 -              fprintf(stderr, "Delta compression using up to %d threads.\n",
 -                              delta_search_threads);
 +              fprintf_ln(stderr, _("Delta compression using up to %d threads"),
 +                         delta_search_threads);
        p = xcalloc(delta_search_threads, sizeof(*p));
  
        /* Partition the work amongst work threads. */
                ret = pthread_create(&p[i].thread, NULL,
                                     threaded_find_deltas, &p[i]);
                if (ret)
 -                      die("unable to create thread: %s", strerror(ret));
 +                      die(_("unable to create thread: %s"), strerror(ret));
                active_threads++;
        }
  
@@@ -2502,7 -2477,7 +2502,7 @@@ static void add_tag_chain(const struct 
        tag = lookup_tag(the_repository, oid);
        while (1) {
                if (!tag || parse_tag(tag) || !tag->tagged)
 -                      die("unable to pack objects reachable from tag %s",
 +                      die(_("unable to pack objects reachable from tag %s"),
                            oid_to_hex(oid));
  
                add_object_entry(&tag->object.oid, OBJ_TAG, NULL, 0);
@@@ -2568,7 -2543,7 +2568,7 @@@ static void prepare_pack(int window, in
                if (!entry->preferred_base) {
                        nr_deltas++;
                        if (oe_type(entry) < 0)
 -                              die("unable to get type of object %s",
 +                              die(_("unable to get type of object %s"),
                                    oid_to_hex(&entry->idx.oid));
                } else {
                        if (oe_type(entry) < 0) {
                ll_find_deltas(delta_list, n, window+1, depth, &nr_done);
                stop_progress(&progress_state);
                if (nr_done != nr_deltas)
 -                      die("inconsistency with delta count");
 +                      die(_("inconsistency with delta count"));
        }
        free(delta_list);
  }
@@@ -2632,11 -2607,11 +2632,11 @@@ static int git_pack_config(const char *
        if (!strcmp(k, "pack.threads")) {
                delta_search_threads = git_config_int(k, v);
                if (delta_search_threads < 0)
 -                      die("invalid number of threads specified (%d)",
 +                      die(_("invalid number of threads specified (%d)"),
                            delta_search_threads);
  #ifdef NO_PTHREADS
                if (delta_search_threads != 1) {
 -                      warning("no threads support, ignoring %s", k);
 +                      warning(_("no threads support, ignoring %s"), k);
                        delta_search_threads = 0;
                }
  #endif
        if (!strcmp(k, "pack.indexversion")) {
                pack_idx_opts.version = git_config_int(k, v);
                if (pack_idx_opts.version > 2)
 -                      die("bad pack.indexversion=%"PRIu32,
 +                      die(_("bad pack.indexversion=%"PRIu32),
                            pack_idx_opts.version);
                return 0;
        }
@@@ -2663,7 -2638,7 +2663,7 @@@ static void read_object_list_from_stdin
                        if (feof(stdin))
                                break;
                        if (!ferror(stdin))
 -                              die("fgets returned NULL, not EOF, not error!");
 +                              die("BUG: fgets returned NULL, not EOF, not error!");
                        if (errno != EINTR)
                                die_errno("fgets");
                        clearerr(stdin);
                }
                if (line[0] == '-') {
                        if (get_oid_hex(line+1, &oid))
 -                              die("expected edge object ID, got garbage:\n %s",
 +                              die(_("expected edge object ID, got garbage:\n %s"),
                                    line);
                        add_preferred_base(&oid);
                        continue;
                }
                if (parse_oid_hex(line, &oid, &p))
 -                      die("expected object ID, got garbage:\n %s", line);
 +                      die(_("expected object ID, got garbage:\n %s"), line);
  
                add_preferred_base_object(p + 1);
                add_object_entry(&oid, OBJ_NONE, p + 1, 0);
@@@ -2816,7 -2791,7 +2816,7 @@@ static void add_objects_in_unpacked_pac
                if (!p->pack_local || p->pack_keep || p->pack_keep_in_core)
                        continue;
                if (open_pack_index(p))
 -                      die("cannot open pack index");
 +                      die(_("cannot open pack index"));
  
                ALLOC_GROW(in_pack.array,
                           in_pack.nr + p->num_objects,
@@@ -2847,7 -2822,7 +2847,7 @@@ static int add_loose_object(const struc
        enum object_type type = oid_object_info(the_repository, oid, NULL);
  
        if (type < 0) {
 -              warning("loose object at %s could not be examined", path);
 +              warning(_("loose object at %s could not be examined"), path);
                return 0;
        }
  
@@@ -2924,7 -2899,7 +2924,7 @@@ static void loosen_unused_packed_object
                        continue;
  
                if (open_pack_index(p))
 -                      die("cannot open pack index");
 +                      die(_("cannot open pack index"));
  
                for (i = 0; i < p->num_objects; i++) {
                        nth_packed_object_oid(&oid, p, i);
                            !has_sha1_pack_kept_or_nonlocal(&oid) &&
                            !loosened_object_can_be_discarded(&oid, p->mtime))
                                if (force_object_loose(&oid, p->mtime))
 -                                      die("unable to force loose object");
 +                                      die(_("unable to force loose object"));
                }
        }
  }
@@@ -3019,17 -2994,17 +3019,17 @@@ static void get_object_list(int ac, con
                                use_bitmap_index = 0;
                                continue;
                        }
 -                      die("not a rev '%s'", line);
 +                      die(_("not a rev '%s'"), line);
                }
                if (handle_revision_arg(line, &revs, flags, REVARG_CANNOT_BE_FILENAME))
 -                      die("bad revision '%s'", line);
 +                      die(_("bad revision '%s'"), line);
        }
  
        if (use_bitmap_index && !get_object_list_from_bitmap(&revs))
                return;
  
        if (prepare_revision_walk(&revs))
 -              die("revision walk setup failed");
 +              die(_("revision walk setup failed"));
        mark_edges_uninteresting(&revs, show_edge);
  
        if (!fn_show_object)
                revs.ignore_missing_links = 1;
                if (add_unseen_recent_objects_to_traversal(&revs,
                                unpack_unreachable_expiration))
 -                      die("unable to add recent objects");
 +                      die(_("unable to add recent objects"));
                if (prepare_revision_walk(&revs))
 -                      die("revision walk setup failed");
 +                      die(_("revision walk setup failed"));
                traverse_commit_list(&revs, record_recent_commit,
                                     record_recent_object, NULL);
        }
@@@ -3135,7 -3110,7 +3135,7 @@@ int cmd_pack_objects(int argc, const ch
                OPT_BOOL(0, "all-progress-implied",
                         &all_progress_implied,
                         N_("similar to --all-progress when progress meter is shown")),
 -              { OPTION_CALLBACK, 0, "index-version", NULL, N_("version[,offset]"),
 +              { OPTION_CALLBACK, 0, "index-version", NULL, N_("<version>[,<offset>]"),
                  N_("write the pack index file in the specified idx format version"),
                  0, option_parse_index_version },
                OPT_MAGNITUDE(0, "max-pack-size", &pack_size_limit,
        if (DFS_NUM_STATES > (1 << OE_DFS_STATE_BITS))
                BUG("too many dfs states, increase OE_DFS_STATE_BITS");
  
 -      check_replace_refs = 0;
 +      read_replace_refs = 0;
  
        reset_pack_idx_option(&pack_idx_opts);
        git_config(git_pack_config, NULL);
        if (pack_compression_level == -1)
                pack_compression_level = Z_DEFAULT_COMPRESSION;
        else if (pack_compression_level < 0 || pack_compression_level > Z_BEST_COMPRESSION)
 -              die("bad pack compression level %d", pack_compression_level);
 +              die(_("bad pack compression level %d"), pack_compression_level);
  
        if (!delta_search_threads)      /* --threads=0 means autodetect */
                delta_search_threads = online_cpus();
  
  #ifdef NO_PTHREADS
        if (delta_search_threads != 1)
 -              warning("no threads support, ignoring --threads");
 +              warning(_("no threads support, ignoring --threads"));
  #endif
        if (!pack_to_stdout && !pack_size_limit)
                pack_size_limit = pack_size_limit_cfg;
        if (pack_to_stdout && pack_size_limit)
 -              die("--max-pack-size cannot be used to build a pack for transfer.");
 +              die(_("--max-pack-size cannot be used to build a pack for transfer"));
        if (pack_size_limit && pack_size_limit < 1024*1024) {
 -              warning("minimum pack size limit is 1 MiB");
 +              warning(_("minimum pack size limit is 1 MiB"));
                pack_size_limit = 1024*1024;
        }
  
        if (!pack_to_stdout && thin)
 -              die("--thin cannot be used to build an indexable pack.");
 +              die(_("--thin cannot be used to build an indexable pack"));
  
        if (keep_unreachable && unpack_unreachable)
 -              die("--keep-unreachable and --unpack-unreachable are incompatible.");
 +              die(_("--keep-unreachable and --unpack-unreachable are incompatible"));
        if (!rev_list_all || !rev_list_reflog || !rev_list_index)
                unpack_unreachable_expiration = 0;
  
        if (filter_options.choice) {
                if (!pack_to_stdout)
 -                      die("cannot use --filter without --stdout.");
 +                      die(_("cannot use --filter without --stdout"));
                use_bitmap_index = 0;
        }
  
                prepare_pack(window, depth);
        write_pack_file();
        if (progress)
 -              fprintf(stderr, "Total %"PRIu32" (delta %"PRIu32"),"
 -                      " reused %"PRIu32" (delta %"PRIu32")\n",
 -                      written, written_delta, reused, reused_delta);
 +              fprintf_ln(stderr,
 +                         _("Total %"PRIu32" (delta %"PRIu32"),"
 +                           " reused %"PRIu32" (delta %"PRIu32")"),
 +                         written, written_delta, reused, reused_delta);
        return 0;
  }
diff --combined builtin/rm.c
index f4d3f000b624564737bf7b851f4bf9abf5497684,eebdc4aab17d6b96c8f4ba79668d9121ce82f49f..2cbe89e0ae3b7a4801ac27d25fd37306813104ba
@@@ -278,14 -278,14 +278,14 @@@ int cmd_rm(int argc, const char **argv
  
        for (i = 0; i < active_nr; i++) {
                const struct cache_entry *ce = active_cache[i];
-               if (!ce_path_match(ce, &pathspec, seen))
+               if (!ce_path_match(&the_index, ce, &pathspec, seen))
                        continue;
                ALLOC_GROW(list.entry, list.nr + 1, list.alloc);
                list.entry[list.nr].name = xstrdup(ce->name);
                list.entry[list.nr].is_submodule = S_ISGITLINK(ce->ce_mode);
                if (list.entry[list.nr++].is_submodule &&
                    !is_staging_gitmodules_ok(&the_index))
 -                      die (_("Please stage your changes to .gitmodules or stash them to proceed"));
 +                      die(_("please stage your changes to .gitmodules or stash them to proceed"));
        }
  
        if (pathspec.nr) {
diff --combined builtin/update-index.c
index 5aee2eaa66c3ee719ed584a1a10715d3e8d11f6a,f75fd24083d18bed569946371a253d532a0b15b5..fe84003b4fa05c377bb4ab1de04a7cd1c9ae4d5d
@@@ -748,7 -748,7 +748,7 @@@ static int do_reupdate(int ac, const ch
                int save_nr;
                char *path;
  
-               if (ce_stage(ce) || !ce_path_match(ce, &pathspec, NULL))
+               if (ce_stage(ce) || !ce_path_match(&the_index, ce, &pathspec, NULL))
                        continue;
                if (has_head)
                        old = read_one_ent(NULL, &head_oid,
@@@ -969,9 -969,9 +969,9 @@@ int cmd_update_index(int argc, const ch
                        PARSE_OPT_NOARG | /* disallow --cacheinfo=<mode> form */
                        PARSE_OPT_NONEG | PARSE_OPT_LITERAL_ARGHELP,
                        (parse_opt_cb *) cacheinfo_callback},
 -              {OPTION_CALLBACK, 0, "chmod", &set_executable_bit, N_("(+/-)x"),
 +              {OPTION_CALLBACK, 0, "chmod", &set_executable_bit, "(+|-)x",
                        N_("override the executable bit of the listed files"),
 -                      PARSE_OPT_NONEG | PARSE_OPT_LITERAL_ARGHELP,
 +                      PARSE_OPT_NONEG,
                        chmod_callback},
                {OPTION_SET_INT, 0, "assume-unchanged", &mark_valid_only, NULL,
                        N_("mark files as \"not changing\""),
diff --combined convert.c
index ce7ea0db067ea9fe48933b6d85397c64c80dea36,8acfe8ae457e91908b34febf5cfcf2492334a241..6057f1f58015ad7cae9792309dd635a1e7933586
+++ b/convert.c
@@@ -191,7 -191,7 +191,7 @@@ static enum eol output_eol(enum crlf_ac
                /* fall through */
                return text_eol_is_crlf() ? EOL_CRLF : EOL_LF;
        }
 -      warning("Illegal crlf_action %d\n", (int)crlf_action);
 +      warning(_("illegal crlf_action %d"), (int)crlf_action);
        return core_eol;
  }
  
@@@ -204,11 -204,11 +204,11 @@@ static void check_global_conv_flags_eol
                 * CRLFs would not be restored by checkout
                 */
                if (conv_flags & CONV_EOL_RNDTRP_DIE)
 -                      die(_("CRLF would be replaced by LF in %s."), path);
 +                      die(_("CRLF would be replaced by LF in %s"), path);
                else if (conv_flags & CONV_EOL_RNDTRP_WARN)
                        warning(_("CRLF will be replaced by LF in %s.\n"
                                  "The file will have its original line"
 -                                " endings in your working directory."), path);
 +                                " endings in your working directory"), path);
        } else if (old_stats->lonelf && !new_stats->lonelf ) {
                /*
                 * CRLFs would be added by checkout
                else if (conv_flags & CONV_EOL_RNDTRP_WARN)
                        warning(_("LF will be replaced by CRLF in %s.\n"
                                  "The file will have its original line"
 -                                " endings in your working directory."), path);
 +                                " endings in your working directory"), path);
        }
  }
  
@@@ -390,7 -390,7 +390,7 @@@ static int encode_to_git(const char *pa
                         struct strbuf *buf, const char *enc, int conv_flags)
  {
        char *dst;
 -      int dst_len;
 +      size_t dst_len;
        int die_on_error = conv_flags & CONV_WRITE_OBJECT;
  
        /*
         */
        if (die_on_error && check_roundtrip(enc)) {
                char *re_src;
 -              int re_src_len;
 +              size_t re_src_len;
  
                re_src = reencode_string_len(dst, dst_len,
                                             enc, default_encoding,
@@@ -481,7 -481,7 +481,7 @@@ static int encode_to_worktree(const cha
                              struct strbuf *buf, const char *enc)
  {
        char *dst;
 -      int dst_len;
 +      size_t dst_len;
  
        /*
         * No encoding is specified or there is nothing to encode.
        dst = reencode_string_len(src, src_len, enc, default_encoding,
                                  &dst_len);
        if (!dst) {
 -              error("failed to encode '%s' from %s to %s",
 -                      path, default_encoding, enc);
 +              error(_("failed to encode '%s' from %s to %s"),
 +                    path, default_encoding, enc);
                return 0;
        }
  
@@@ -671,8 -671,7 +671,8 @@@ static int filter_buffer_or_fd(int in, 
  
        if (start_command(&child_process)) {
                strbuf_release(&cmd);
 -              return error("cannot fork to run external filter '%s'", params->cmd);
 +              return error(_("cannot fork to run external filter '%s'"),
 +                           params->cmd);
        }
  
        sigchain_push(SIGPIPE, SIG_IGN);
        if (close(child_process.in))
                write_err = 1;
        if (write_err)
 -              error("cannot feed the input to external filter '%s'", params->cmd);
 +              error(_("cannot feed the input to external filter '%s'"),
 +                    params->cmd);
  
        sigchain_pop(SIGPIPE);
  
        status = finish_command(&child_process);
        if (status)
 -              error("external filter '%s' failed %d", params->cmd, status);
 +              error(_("external filter '%s' failed %d"), params->cmd, status);
  
        strbuf_release(&cmd);
        return (write_err || status);
@@@ -733,13 -731,13 +733,13 @@@ static int apply_single_file_filter(con
                return 0;       /* error was already reported */
  
        if (strbuf_read(&nbuf, async.out, len) < 0) {
 -              err = error("read from external filter '%s' failed", cmd);
 +              err = error(_("read from external filter '%s' failed"), cmd);
        }
        if (close(async.out)) {
 -              err = error("read from external filter '%s' failed", cmd);
 +              err = error(_("read from external filter '%s' failed"), cmd);
        }
        if (finish_async(&async)) {
 -              err = error("external filter '%s' failed", cmd);
 +              err = error(_("external filter '%s' failed"), cmd);
        }
  
        if (!err) {
@@@ -793,7 -791,7 +793,7 @@@ static void handle_filter_error(const s
                 * Something went wrong with the protocol filter.
                 * Force shutdown and restart if another blob requires filtering.
                 */
 -              error("external filter '%s' failed", entry->subprocess.cmd);
 +              error(_("external filter '%s' failed"), entry->subprocess.cmd);
                subprocess_stop(&subprocess_map, &entry->subprocess);
                free(entry);
        }
@@@ -841,7 -839,7 +841,7 @@@ static int apply_multi_file_filter(cons
        else if (wanted_capability & CAP_SMUDGE)
                filter_type = "smudge";
        else
 -              die("unexpected filter type");
 +              die(_("unexpected filter type"));
  
        sigchain_push(SIGPIPE, SIG_IGN);
  
  
        err = strlen(path) > LARGE_PACKET_DATA_MAX - strlen("pathname=\n");
        if (err) {
 -              error("path name too long for external filter");
 +              error(_("path name too long for external filter"));
                goto done;
        }
  
@@@ -926,8 -924,8 +926,8 @@@ int async_query_available_blobs(const c
        assert(subprocess_map_initialized);
        entry = (struct cmd2process *)subprocess_find_entry(&subprocess_map, cmd);
        if (!entry) {
 -              error("external filter '%s' is not available anymore although "
 -                    "not all paths have been filtered", cmd);
 +              error(_("external filter '%s' is not available anymore although "
 +                      "not all paths have been filtered"), cmd);
                return 0;
        }
        process = &entry->subprocess.process;
@@@ -1293,7 -1291,8 +1293,8 @@@ struct conv_attrs 
        const char *working_tree_encoding; /* Supported encoding or default encoding if NULL */
  };
  
- static void convert_attrs(struct conv_attrs *ca, const char *path)
+ static void convert_attrs(const struct index_state *istate,
+                         struct conv_attrs *ca, const char *path)
  {
        static struct attr_check *check;
  
                git_config(read_convert_config, NULL);
        }
  
-       if (!git_check_attr(path, check)) {
+       if (!git_check_attr(istate, path, check)) {
                struct attr_check_item *ccheck = check->items;
                ca->crlf_action = git_path_check_crlf(ccheck + 4);
                if (ca->crlf_action == CRLF_UNDEFINED)
                ca->crlf_action = CRLF_AUTO_INPUT;
  }
  
- int would_convert_to_git_filter_fd(const char *path)
+ int would_convert_to_git_filter_fd(const struct index_state *istate, const char *path)
  {
        struct conv_attrs ca;
  
-       convert_attrs(&ca, path);
+       convert_attrs(istate, &ca, path);
        if (!ca.drv)
                return 0;
  
        return apply_filter(path, NULL, 0, -1, NULL, ca.drv, CAP_CLEAN, NULL);
  }
  
- const char *get_convert_attr_ascii(const char *path)
+ const char *get_convert_attr_ascii(const struct index_state *istate, const char *path)
  {
        struct conv_attrs ca;
  
-       convert_attrs(&ca, path);
+       convert_attrs(istate, &ca, path);
        switch (ca.attr_action) {
        case CRLF_UNDEFINED:
                return "";
@@@ -1394,11 -1393,11 +1395,11 @@@ int convert_to_git(const struct index_s
        int ret = 0;
        struct conv_attrs ca;
  
-       convert_attrs(&ca, path);
+       convert_attrs(istate, &ca, path);
  
        ret |= apply_filter(path, src, len, -1, dst, ca.drv, CAP_CLEAN, NULL);
        if (!ret && ca.drv && ca.drv->required)
 -              die("%s: clean filter '%s' failed", path, ca.drv->name);
 +              die(_("%s: clean filter '%s' failed"), path, ca.drv->name);
  
        if (ret && dst) {
                src = dst->buf;
@@@ -1426,27 -1425,28 +1427,28 @@@ void convert_to_git_filter_fd(const str
                              int conv_flags)
  {
        struct conv_attrs ca;
-       convert_attrs(&ca, path);
+       convert_attrs(istate, &ca, path);
  
        assert(ca.drv);
        assert(ca.drv->clean || ca.drv->process);
  
        if (!apply_filter(path, NULL, 0, fd, dst, ca.drv, CAP_CLEAN, NULL))
 -              die("%s: clean filter '%s' failed", path, ca.drv->name);
 +              die(_("%s: clean filter '%s' failed"), path, ca.drv->name);
  
        encode_to_git(path, dst->buf, dst->len, dst, ca.working_tree_encoding, conv_flags);
        crlf_to_git(istate, path, dst->buf, dst->len, dst, ca.crlf_action, conv_flags);
        ident_to_git(path, dst->buf, dst->len, dst, ca.ident);
  }
  
- static int convert_to_working_tree_internal(const char *path, const char *src,
+ static int convert_to_working_tree_internal(const struct index_state *istate,
+                                           const char *path, const char *src,
                                            size_t len, struct strbuf *dst,
                                            int normalizing, struct delayed_checkout *dco)
  {
        int ret = 0, ret_filter = 0;
        struct conv_attrs ca;
  
-       convert_attrs(&ca, path);
+       convert_attrs(istate, &ca, path);
  
        ret |= ident_to_worktree(path, src, len, dst, ca.ident);
        if (ret) {
        ret_filter = apply_filter(
                path, src, len, -1, dst, ca.drv, CAP_SMUDGE, dco);
        if (!ret_filter && ca.drv && ca.drv->required)
 -              die("%s: smudge filter %s failed", path, ca.drv->name);
 +              die(_("%s: smudge filter %s failed"), path, ca.drv->name);
  
        return ret | ret_filter;
  }
  
- int async_convert_to_working_tree(const char *path, const char *src,
+ int async_convert_to_working_tree(const struct index_state *istate,
+                                 const char *path, const char *src,
                                  size_t len, struct strbuf *dst,
                                  void *dco)
  {
-       return convert_to_working_tree_internal(path, src, len, dst, 0, dco);
+       return convert_to_working_tree_internal(istate, path, src, len, dst, 0, dco);
  }
  
- int convert_to_working_tree(const char *path, const char *src, size_t len, struct strbuf *dst)
+ int convert_to_working_tree(const struct index_state *istate,
+                           const char *path, const char *src,
+                           size_t len, struct strbuf *dst)
  {
-       return convert_to_working_tree_internal(path, src, len, dst, 0, NULL);
+       return convert_to_working_tree_internal(istate, path, src, len, dst, 0, NULL);
  }
  
  int renormalize_buffer(const struct index_state *istate, const char *path,
                       const char *src, size_t len, struct strbuf *dst)
  {
-       int ret = convert_to_working_tree_internal(path, src, len, dst, 1, NULL);
+       int ret = convert_to_working_tree_internal(istate, path, src, len, dst, 1, NULL);
        if (ret) {
                src = dst->buf;
                len = dst->len;
@@@ -1929,12 -1932,14 +1934,14 @@@ static struct stream_filter *ident_filt
   * Note that you would be crazy to set CRLF, smuge/clean or ident to a
   * large binary blob you would want us not to slurp into the memory!
   */
- struct stream_filter *get_stream_filter(const char *path, const struct object_id *oid)
+ struct stream_filter *get_stream_filter(const struct index_state *istate,
+                                       const char *path,
+                                       const struct object_id *oid)
  {
        struct conv_attrs ca;
        struct stream_filter *filter = NULL;
  
-       convert_attrs(&ca, path);
+       convert_attrs(istate, &ca, path);
        if (ca.drv && (ca.drv->process || ca.drv->smudge || ca.drv->clean))
                return NULL;
  
diff --combined diff.c
index f830afac79134219e706730c05d98f81d4a1d8cd,3670206d230b12befdb6f3bb7ea7277c419bda3a..77e4684f36c9d092e1bde4900e293277096b99f7
--- 1/diff.c
--- 2/diff.c
+++ b/diff.c
@@@ -271,12 -271,10 +271,12 @@@ static int parse_color_moved(const cha
                return COLOR_MOVED_ZEBRA;
        else if (!strcmp(arg, "default"))
                return COLOR_MOVED_DEFAULT;
 +      else if (!strcmp(arg, "dimmed-zebra"))
 +              return COLOR_MOVED_ZEBRA_DIM;
        else if (!strcmp(arg, "dimmed_zebra"))
                return COLOR_MOVED_ZEBRA_DIM;
        else
 -              return error(_("color moved setting must be one of 'no', 'default', 'blocks', 'zebra', 'dimmed_zebra', 'plain'"));
 +              return error(_("color moved setting must be one of 'no', 'default', 'blocks', 'zebra', 'dimmed-zebra', 'plain'"));
  }
  
  static int parse_color_moved_ws(const char *arg)
@@@ -2071,8 -2069,8 +2071,8 @@@ static void init_diff_words_data(struc
                if (regcomp(ecbdata->diff_words->word_regex,
                            o->word_regex,
                            REG_EXTENDED | REG_NEWLINE))
 -                      die ("Invalid regular expression: %s",
 -                           o->word_regex);
 +                      die("invalid regular expression: %s",
 +                          o->word_regex);
        }
        for (i = 0; i < ARRAY_SIZE(diff_words_styles); i++) {
                if (o->word_diff == diff_words_styles[i].type) {
@@@ -3895,7 -3893,7 +3895,7 @@@ static void prep_temp_blob(const char *
        temp->tempfile = mks_tempfile_ts(tempfile.buf, strlen(base) + 1);
        if (!temp->tempfile)
                die_errno("unable to create temp-file");
-       if (convert_to_working_tree(path,
+       if (convert_to_working_tree(&the_index, path,
                        (const char *)blob, (size_t)size, &buf)) {
                blob = buf.buf;
                size = buf.len;
@@@ -4416,16 -4414,6 +4416,6 @@@ void diff_setup_done(struct diff_option
  
        if (options->detect_rename && options->rename_limit < 0)
                options->rename_limit = diff_rename_limit_default;
-       if (options->setup & DIFF_SETUP_USE_CACHE) {
-               if (!active_cache)
-                       /* read-cache does not die even when it fails
-                        * so it is safe for us to do this here.  Also
-                        * it does not smudge active_cache or active_nr
-                        * when it fails, so we do not have to worry about
-                        * cleaning it up ourselves either.
-                        */
-                       read_cache();
-       }
        if (hexsz < options->abbrev)
                options->abbrev = hexsz; /* full */
  
diff --combined dir.c
index 32f5f7275981ea1bd9a4be2fe87f5f7f47ffb571,e25aed013bcdb86d8daca3c98e60727bda7dac50..aceb0d48692b7d727cfd2645ae88b0d45d660c09
--- 1/dir.c
--- 2/dir.c
+++ b/dir.c
@@@ -276,12 -276,13 +276,13 @@@ static int do_read_blob(const struct ob
  #define DO_MATCH_DIRECTORY (1<<1)
  #define DO_MATCH_SUBMODULE (1<<2)
  
- static int match_attrs(const char *name, int namelen,
+ static int match_attrs(const struct index_state *istate,
+                      const char *name, int namelen,
                       const struct pathspec_item *item)
  {
        int i;
  
-       git_check_attr(name, item->attr_check);
+       git_check_attr(istate, name, item->attr_check);
        for (i = 0; i < item->attr_match_nr; i++) {
                const char *value;
                int matched;
   *
   * It returns 0 when there is no match.
   */
- static int match_pathspec_item(const struct pathspec_item *item, int prefix,
+ static int match_pathspec_item(const struct index_state *istate,
+                              const struct pathspec_item *item, int prefix,
                               const char *name, int namelen, unsigned flags)
  {
        /* name/namelen has prefix cut off by caller */
            strncmp(item->match, name - prefix, item->prefix))
                return 0;
  
-       if (item->attr_match_nr && !match_attrs(name, namelen, item))
+       if (item->attr_match_nr && !match_attrs(istate, name, namelen, item))
                return 0;
  
        /* If the match was just the prefix, we matched */
   * pathspec did not match any names, which could indicate that the
   * user mistyped the nth pathspec.
   */
- static int do_match_pathspec(const struct pathspec *ps,
+ static int do_match_pathspec(const struct index_state *istate,
+                            const struct pathspec *ps,
                             const char *name, int namelen,
                             int prefix, char *seen,
                             unsigned flags)
                 */
                if (seen && ps->items[i].magic & PATHSPEC_EXCLUDE)
                        seen[i] = MATCHED_FNMATCH;
-               how = match_pathspec_item(ps->items+i, prefix, name,
+               how = match_pathspec_item(istate, ps->items+i, prefix, name,
                                          namelen, flags);
                if (ps->recursive &&
                    (ps->magic & PATHSPEC_MAXDEPTH) &&
        return retval;
  }
  
- int match_pathspec(const struct pathspec *ps,
+ int match_pathspec(const struct index_state *istate,
+                  const struct pathspec *ps,
                   const char *name, int namelen,
                   int prefix, char *seen, int is_dir)
  {
        int positive, negative;
        unsigned flags = is_dir ? DO_MATCH_DIRECTORY : 0;
-       positive = do_match_pathspec(ps, name, namelen,
+       positive = do_match_pathspec(istate, ps, name, namelen,
                                     prefix, seen, flags);
        if (!(ps->magic & PATHSPEC_EXCLUDE) || !positive)
                return positive;
-       negative = do_match_pathspec(ps, name, namelen,
+       negative = do_match_pathspec(istate, ps, name, namelen,
                                     prefix, seen,
                                     flags | DO_MATCH_EXCLUDE);
        return negative ? 0 : positive;
  /**
   * Check if a submodule is a superset of the pathspec
   */
- int submodule_path_match(const struct pathspec *ps,
+ int submodule_path_match(const struct index_state *istate,
+                        const struct pathspec *ps,
                         const char *submodule_name,
                         char *seen)
  {
-       int matched = do_match_pathspec(ps, submodule_name,
+       int matched = do_match_pathspec(istate, ps, submodule_name,
                                        strlen(submodule_name),
                                        0, seen,
                                        DO_MATCH_DIRECTORY |
@@@ -561,7 -566,7 +566,7 @@@ int report_path_error(const char *ps_ma
                if (found_dup)
                        continue;
  
 -              error("pathspec '%s' did not match any file(s) known to git.",
 +              error(_("pathspec '%s' did not match any file(s) known to git"),
                      pathspec->items[num].original);
                errors++;
        }
@@@ -950,7 -955,7 +955,7 @@@ static void add_excludes_from_file_1(st
                dir->unmanaged_exclude_files++;
        el = add_exclude_list(dir, EXC_FILE, fname);
        if (add_excludes(fname, "", 0, el, NULL, oid_stat) < 0)
 -              die("cannot use %s as an exclude file", fname);
 +              die(_("cannot use %s as an exclude file"), fname);
  }
  
  void add_excludes_from_file(struct dir_struct *dir, const char *fname)
@@@ -2231,7 -2236,7 +2236,7 @@@ static struct untracked_cache_dir *vali
                return NULL;
  
        if (!ident_in_untracked(dir->untracked)) {
 -              warning(_("Untracked cache is disabled on this system or location."));
 +              warning(_("untracked cache is disabled on this system or location"));
                return NULL;
        }
  
@@@ -3029,7 -3034,7 +3034,7 @@@ static void connect_wt_gitdir_in_nested
                return;
  
        if (repo_read_index(&subrepo) < 0)
 -              die("index file corrupt in repo %s", subrepo.gitdir);
 +              die(_("index file corrupt in repo %s"), subrepo.gitdir);
  
        for (i = 0; i < subrepo.index->cache_nr; i++) {
                const struct cache_entry *ce = subrepo.index->cache[i];
diff --combined merge-recursive.c
index bd053c70dfc518b6265ec273aad2e61c05d60d18,defcbff93be46bb4d311765266fbce4ec2825961..dcdc93019cec870f196191caf3055611faae4ede
@@@ -966,7 -966,7 +966,7 @@@ static int update_file_flags(struct mer
                }
                if (S_ISREG(mode)) {
                        struct strbuf strbuf = STRBUF_INIT;
-                       if (convert_to_working_tree(path, buf, size, &strbuf)) {
+                       if (convert_to_working_tree(&the_index, path, buf, size, &strbuf)) {
                                free(buf);
                                size = strbuf.len;
                                buf = strbuf_detach(&strbuf, NULL);
@@@ -3070,26 -3070,10 +3070,26 @@@ static int merge_content(struct merge_o
        if (mfi.clean &&
            was_tracked_and_matches(o, path, &mfi.oid, mfi.mode) &&
            !df_conflict_remains) {
 +              int pos;
 +              struct cache_entry *ce;
 +
                output(o, 3, _("Skipped %s (merged same as existing)"), path);
                if (add_cacheinfo(o, mfi.mode, &mfi.oid, path,
                                  0, (!o->call_depth && !is_dirty), 0))
                        return -1;
 +              /*
 +               * However, add_cacheinfo() will delete the old cache entry
 +               * and add a new one.  We need to copy over any skip_worktree
 +               * flag to avoid making the file appear as if it were
 +               * deleted by the user.
 +               */
 +              pos = index_name_pos(&o->orig_index, path, strlen(path));
 +              ce = o->orig_index.cache[pos];
 +              if (ce_skip_worktree(ce)) {
 +                      pos = index_name_pos(&the_index, path, strlen(path));
 +                      ce = the_index.cache[pos];
 +                      ce->ce_flags |= CE_SKIP_WORKTREE;
 +              }
                return mfi.clean;
        }
  
diff --combined read-cache.c
index c5fabc844aad68f60a30d0ff7498ff7e5d692a7a,3a930bbcc1af792840e0af9dfe65b94b19faa00c..7b1354d7590a70ecbd6e508bdd95eafd4793efcc
@@@ -1493,7 -1493,7 +1493,7 @@@ int refresh_index(struct index_state *i
                if (ignore_submodules && S_ISGITLINK(ce->ce_mode))
                        continue;
  
-               if (pathspec && !ce_path_match(ce, pathspec, seen))
+               if (pathspec && !ce_path_match(&the_index, ce, pathspec, seen))
                        filtered = 1;
  
                if (ce_stage(ce)) {
  
  /*
   * Read the index file that is potentially unmerged into given
 - * index_state, dropping any unmerged entries.  Returns true if
 - * the index is unmerged.  Callers who want to refuse to work
 - * from an unmerged state can call this and check its return value,
 - * instead of calling read_cache().
 + * index_state, dropping any unmerged entries to stage #0 (potentially
 + * resulting in a path appearing as both a file and a directory in the
 + * index; the caller is responsible to clear out the extra entries
 + * before writing the index to a tree).  Returns true if the index is
 + * unmerged.  Callers who want to refuse to work from an unmerged
 + * state can call this and check its return value, instead of calling
 + * read_cache().
   */
  int read_index_unmerged(struct index_state *istate)
  {
                new_ce->ce_flags = create_ce_flags(0) | CE_CONFLICTED;
                new_ce->ce_namelen = len;
                new_ce->ce_mode = ce->ce_mode;
 -              if (add_index_entry(istate, new_ce, 0))
 +              if (add_index_entry(istate, new_ce, ADD_CACHE_SKIP_DFCHECK))
                        return error("%s: cannot drop to stage #0",
                                     new_ce->name);
        }
diff --combined sequencer.c
index b8f274bdec7aca3b93b13f985b6083f95ea5cfac,4d40f50c1c9d5aae301bb1ead04d67d02d113cba..c00eedd8568519433462b7d94dd5ef4d46fef96f
@@@ -307,7 -307,7 +307,7 @@@ static const char *action_name(const st
        case REPLAY_INTERACTIVE_REBASE:
                return N_("rebase -i");
        }
 -      die(_("Unknown action: %d"), opts->action);
 +      die(_("unknown action: %d"), opts->action);
  }
  
  struct commit_message {
@@@ -654,7 -654,6 +654,7 @@@ missing_author
                        strbuf_addch(&buf, *(message++));
                else
                        strbuf_addf(&buf, "'\\\\%c'", *(message++));
 +      strbuf_addch(&buf, '\'');
        res = write_message(buf.buf, buf.len, rebase_path_author_script(), 1);
        strbuf_release(&buf);
        return res;
@@@ -709,51 -708,43 +709,51 @@@ static const char *read_author_ident(st
        const char *keys[] = {
                "GIT_AUTHOR_NAME=", "GIT_AUTHOR_EMAIL=", "GIT_AUTHOR_DATE="
        };
 -      char *in, *out, *eol;
 -      int i = 0, len;
 +      struct strbuf out = STRBUF_INIT;
 +      char *in, *eol;
 +      const char *val[3];
 +      int i = 0;
  
        if (strbuf_read_file(buf, rebase_path_author_script(), 256) <= 0)
                return NULL;
  
        /* dequote values and construct ident line in-place */
 -      for (in = out = buf->buf; i < 3 && in - buf->buf < buf->len; i++) {
 +      for (in = buf->buf; i < 3 && in - buf->buf < buf->len; i++) {
                if (!skip_prefix(in, keys[i], (const char **)&in)) {
 -                      warning("could not parse '%s' (looking for '%s'",
 +                      warning(_("could not parse '%s' (looking for '%s'"),
                                rebase_path_author_script(), keys[i]);
                        return NULL;
                }
  
                eol = strchrnul(in, '\n');
                *eol = '\0';
 -              sq_dequote(in);
 -              len = strlen(in);
 -
 -              if (i > 0) /* separate values by spaces */
 -                      *(out++) = ' ';
 -              if (i == 1) /* email needs to be surrounded by <...> */
 -                      *(out++) = '<';
 -              memmove(out, in, len);
 -              out += len;
 -              if (i == 1) /* email needs to be surrounded by <...> */
 -                      *(out++) = '>';
 +              if (!sq_dequote(in)) {
 +                      warning(_("bad quoting on %s value in '%s'"),
 +                              keys[i], rebase_path_author_script());
 +                      return NULL;
 +              }
 +              val[i] = in;
                in = eol + 1;
        }
  
        if (i < 3) {
 -              warning("could not parse '%s' (looking for '%s')",
 +              warning(_("could not parse '%s' (looking for '%s')"),
                        rebase_path_author_script(), keys[i]);
                return NULL;
        }
  
 -      buf->len = out - buf->buf;
 +      /* validate date since fmt_ident() will die() on bad value */
 +      if (parse_date(val[2], &out)){
 +              warning(_("invalid date format '%s' in '%s'"),
 +                      val[2], rebase_path_author_script());
 +              strbuf_release(&out);
 +              return NULL;
 +      }
 +
 +      strbuf_reset(&out);
 +      strbuf_addstr(&out, fmt_ident(val[0], val[1], val[2], 0));
 +      strbuf_swap(buf, &out);
 +      strbuf_release(&out);
        return buf->buf;
  }
  
@@@ -1253,7 -1244,7 +1253,7 @@@ static int try_to_commit(struct strbuf 
                commit_list_insert(current_head, &parents);
        }
  
-       if (write_cache_as_tree(&tree, 0, NULL)) {
+       if (write_index_as_tree(&tree, &the_index, get_index_file(), 0, NULL)) {
                res = error(_("git write-tree failed to write a tree"));
                goto out;
        }
@@@ -1454,7 -1445,7 +1454,7 @@@ static const char *command_to_string(co
  {
        if (command < TODO_COMMENT)
                return todo_command_info[command].str;
 -      die("Unknown command: %d", command);
 +      die(_("unknown command: %d"), command);
  }
  
  static char command_to_char(const enum todo_command command)
@@@ -1639,7 -1630,7 +1639,7 @@@ static int do_pick_commit(enum todo_com
                 * that represents the "current" state for merge-recursive
                 * to work on.
                 */
-               if (write_cache_as_tree(&head, 0, NULL))
+               if (write_index_as_tree(&head, &the_index, get_index_file(), 0, NULL))
                        return error(_("your index file is unmerged."));
        } else {
                unborn = get_oid("HEAD", &head);
@@@ -2617,17 -2608,15 +2617,17 @@@ static int error_with_patch(struct comm
                if (intend_to_amend())
                        return -1;
  
 -              fprintf(stderr, "You can amend the commit now, with\n"
 -                      "\n"
 -                      "  git commit --amend %s\n"
 -                      "\n"
 -                      "Once you are satisfied with your changes, run\n"
 -                      "\n"
 -                      "  git rebase --continue\n", gpg_sign_opt_quoted(opts));
 +              fprintf(stderr,
 +                      _("You can amend the commit now, with\n"
 +                        "\n"
 +                        "  git commit --amend %s\n"
 +                        "\n"
 +                        "Once you are satisfied with your changes, run\n"
 +                        "\n"
 +                        "  git rebase --continue\n"),
 +                      gpg_sign_opt_quoted(opts));
        } else if (exit_code)
 -              fprintf(stderr, "Could not apply %s... %.*s\n",
 +              fprintf_ln(stderr, _("Could not apply %s... %.*s"),
                        short_commit_name(commit), subject_len, subject);
  
        return exit_code;
@@@ -2741,7 -2730,7 +2741,7 @@@ static int do_label(const char *name, i
        struct object_id head_oid;
  
        if (len == 1 && *name == '#')
 -              return error("Illegal label name: '%.*s'", len, name);
 +              return error(_("illegal label name: '%.*s'"), len, name);
  
        strbuf_addf(&ref_name, "refs/rewritten/%.*s", len, name);
        strbuf_addf(&msg, "rebase -i (label) '%.*s'", len, name);
@@@ -4255,9 -4244,10 +4255,9 @@@ int sequencer_add_exec_commands(const c
  {
        const char *todo_file = rebase_path_todo();
        struct todo_list todo_list = TODO_LIST_INIT;
 -      struct todo_item *item;
        struct strbuf *buf = &todo_list.buf;
        size_t offset = 0, commands_len = strlen(commands);
 -      int i, first;
 +      int i, insert;
  
        if (strbuf_read_file(&todo_list.buf, todo_file, 0) < 0)
                return error(_("could not read '%s'."), todo_file);
                return error(_("unusable todo list: '%s'"), todo_file);
        }
  
 -      first = 1;
 -      /* insert <commands> before every pick except the first one */
 -      for (item = todo_list.items, i = 0; i < todo_list.nr; i++, item++) {
 -              if (item->command == TODO_PICK && !first) {
 -                      strbuf_insert(buf, item->offset_in_buf + offset,
 -                                    commands, commands_len);
 +      /*
 +       * Insert <commands> after every pick. Here, fixup/squash chains
 +       * are considered part of the pick, so we insert the commands *after*
 +       * those chains if there are any.
 +       */
 +      insert = -1;
 +      for (i = 0; i < todo_list.nr; i++) {
 +              enum todo_command command = todo_list.items[i].command;
 +
 +              if (insert >= 0) {
 +                      /* skip fixup/squash chains */
 +                      if (command == TODO_COMMENT)
 +                              continue;
 +                      else if (is_fixup(command)) {
 +                              insert = i + 1;
 +                              continue;
 +                      }
 +                      strbuf_insert(buf,
 +                                    todo_list.items[insert].offset_in_buf +
 +                                    offset, commands, commands_len);
                        offset += commands_len;
 +                      insert = -1;
                }
 -              first = 0;
 +
 +              if (command == TODO_PICK || command == TODO_MERGE)
 +                      insert = i + 1;
        }
  
 -      /* append final <commands> */
 -      strbuf_add(buf, commands, commands_len);
 +      /* insert or append final <commands> */
 +      if (insert >= 0 && insert < todo_list.nr)
 +              strbuf_insert(buf, todo_list.items[insert].offset_in_buf +
 +                            offset, commands, commands_len);
 +      else if (insert >= 0 || !offset)
 +              strbuf_add(buf, commands, commands_len);
  
        i = write_message(buf->buf, buf->len, todo_file, 0);
        todo_list_release(&todo_list);
diff --combined sha1-file.c
index 56e5329caf23c50d783c88b4058363cc1d847d57,0016d460526f631af78ae83a249c4bfe6447400a..97b74238483e00c3f07bd5ab0879eb84bf5c8dfa
@@@ -71,17 -71,17 +71,17 @@@ static void git_hash_sha1_final(unsigne
  
  static void git_hash_unknown_init(git_hash_ctx *ctx)
  {
 -      die("trying to init unknown hash");
 +      BUG("trying to init unknown hash");
  }
  
  static void git_hash_unknown_update(git_hash_ctx *ctx, const void *data, size_t len)
  {
 -      die("trying to update unknown hash");
 +      BUG("trying to update unknown hash");
  }
  
  static void git_hash_unknown_final(unsigned char *hash, git_hash_ctx *ctx)
  {
 -      die("trying to finalize unknown hash");
 +      BUG("trying to finalize unknown hash");
  }
  
  const struct git_hash_algo hash_algos[GIT_HASH_NALGOS] = {
@@@ -378,8 -378,8 +378,8 @@@ static int alt_odb_usable(struct raw_ob
  
        /* Detect cases where alternate disappeared */
        if (!is_directory(path->buf)) {
 -              error("object directory %s does not exist; "
 -                    "check .git/objects/info/alternates.",
 +              error(_("object directory %s does not exist; "
 +                      "check .git/objects/info/alternates"),
                      path->buf);
                return 0;
        }
@@@ -429,7 -429,7 +429,7 @@@ static int link_alt_odb_entry(struct re
        strbuf_addstr(&pathbuf, entry);
  
        if (strbuf_normalize_path(&pathbuf) < 0 && relative_base) {
 -              error("unable to normalize alternate object path: %s",
 +              error(_("unable to normalize alternate object path: %s"),
                      pathbuf.buf);
                strbuf_release(&pathbuf);
                return -1;
@@@ -500,14 -500,14 +500,14 @@@ static void link_alt_odb_entries(struc
                return;
  
        if (depth > 5) {
 -              error("%s: ignoring alternate object stores, nesting too deep.",
 +              error(_("%s: ignoring alternate object stores, nesting too deep"),
                                relative_base);
                return;
        }
  
        strbuf_add_absolute_path(&objdirbuf, r->objects->objectdir);
        if (strbuf_normalize_path(&objdirbuf) < 0)
 -              die("unable to normalize object directory: %s",
 +              die(_("unable to normalize object directory: %s"),
                    objdirbuf.buf);
  
        while (*alt) {
@@@ -562,7 -562,7 +562,7 @@@ void add_to_alternates_file(const char 
        hold_lock_file_for_update(&lock, alts, LOCK_DIE_ON_ERROR);
        out = fdopen_lock_file(&lock, "w");
        if (!out)
 -              die_errno("unable to fdopen alternates lockfile");
 +              die_errno(_("unable to fdopen alternates lockfile"));
  
        in = fopen(alts, "r");
        if (in) {
                fclose(in);
        }
        else if (errno != ENOENT)
 -              die_errno("unable to read alternates file");
 +              die_errno(_("unable to read alternates file"));
  
        if (found) {
                rollback_lock_file(&lock);
        } else {
                fprintf_or_die(out, "%s\n", reference);
                if (commit_lock_file(&lock))
 -                      die_errno("unable to move new alternates file into place");
 +                      die_errno(_("unable to move new alternates file into place"));
                if (the_repository->objects->alt_odb_tail)
                        link_alt_odb_entries(the_repository, reference,
                                             '\n', NULL, 0);
@@@ -778,7 -778,7 +778,7 @@@ static void mmap_limit_check(size_t len
                        limit = SIZE_MAX;
        }
        if (length > limit)
 -              die("attempting to mmap %"PRIuMAX" over limit %"PRIuMAX,
 +              die(_("attempting to mmap %"PRIuMAX" over limit %"PRIuMAX),
                    (uintmax_t)length, (uintmax_t)limit);
  }
  
@@@ -803,7 -803,7 +803,7 @@@ void *xmmap(void *start, size_t length
  {
        void *ret = xmmap_gently(start, length, prot, flags, fd, offset);
        if (ret == MAP_FAILED)
 -              die_errno("mmap failed");
 +              die_errno(_("mmap failed"));
        return ret;
  }
  
@@@ -970,7 -970,7 +970,7 @@@ static void *map_sha1_file_1(struct rep
                        *size = xsize_t(st.st_size);
                        if (!*size) {
                                /* mmap() is forbidden on empty files */
 -                              error("object file %s is empty", path);
 +                              error(_("object file %s is empty"), path);
                                return NULL;
                        }
                        map = xmmap(NULL, *size, PROT_READ, MAP_PRIVATE, fd, 0);
@@@ -1090,9 -1090,9 +1090,9 @@@ static void *unpack_sha1_rest(git_zstre
        }
  
        if (status < 0)
 -              error("corrupt loose object '%s'", sha1_to_hex(sha1));
 +              error(_("corrupt loose object '%s'"), sha1_to_hex(sha1));
        else if (stream->avail_in)
 -              error("garbage at end of loose object '%s'",
 +              error(_("garbage at end of loose object '%s'"),
                      sha1_to_hex(sha1));
        free(buf);
        return NULL;
@@@ -1134,7 -1134,7 +1134,7 @@@ static int parse_sha1_header_extended(c
        if ((flags & OBJECT_INFO_ALLOW_UNKNOWN_TYPE) && (type < 0))
                type = 0;
        else if (type < 0)
 -              die("invalid object type");
 +              die(_("invalid object type"));
        if (oi->typep)
                *oi->typep = type;
  
@@@ -1216,19 -1216,19 +1216,19 @@@ static int sha1_loose_object_info(struc
                *oi->disk_sizep = mapsize;
        if ((flags & OBJECT_INFO_ALLOW_UNKNOWN_TYPE)) {
                if (unpack_sha1_header_to_strbuf(&stream, map, mapsize, hdr, sizeof(hdr), &hdrbuf) < 0)
 -                      status = error("unable to unpack %s header with --allow-unknown-type",
 +                      status = error(_("unable to unpack %s header with --allow-unknown-type"),
                                       sha1_to_hex(sha1));
        } else if (unpack_sha1_header(&stream, map, mapsize, hdr, sizeof(hdr)) < 0)
 -              status = error("unable to unpack %s header",
 +              status = error(_("unable to unpack %s header"),
                               sha1_to_hex(sha1));
        if (status < 0)
                ; /* Do nothing */
        else if (hdrbuf.len) {
                if ((status = parse_sha1_header_extended(hdrbuf.buf, oi, flags)) < 0)
 -                      status = error("unable to parse %s header with --allow-unknown-type",
 +                      status = error(_("unable to parse %s header with --allow-unknown-type"),
                                       sha1_to_hex(sha1));
        } else if ((status = parse_sha1_header_extended(hdr, oi, flags)) < 0)
 -              status = error("unable to parse %s header", sha1_to_hex(sha1));
 +              status = error(_("unable to parse %s header"), sha1_to_hex(sha1));
  
        if (status >= 0 && oi->contentp) {
                *oi->contentp = unpack_sha1_rest(&stream, hdr,
@@@ -1419,19 -1419,19 +1419,19 @@@ void *read_object_file_extended(const s
                return data;
  
        if (errno && errno != ENOENT)
 -              die_errno("failed to read object %s", oid_to_hex(oid));
 +              die_errno(_("failed to read object %s"), oid_to_hex(oid));
  
        /* die if we replaced an object with one that does not exist */
        if (repl != oid)
 -              die("replacement %s not found for %s",
 +              die(_("replacement %s not found for %s"),
                    oid_to_hex(repl), oid_to_hex(oid));
  
        if (!stat_sha1_file(the_repository, repl->hash, &st, &path))
 -              die("loose object %s (stored in %s) is corrupt",
 +              die(_("loose object %s (stored in %s) is corrupt"),
                    oid_to_hex(repl), path);
  
        if ((p = has_packed_and_bad(repl->hash)) != NULL)
 -              die("packed object %s (stored in %s) is corrupt",
 +              die(_("packed object %s (stored in %s) is corrupt"),
                    oid_to_hex(repl), p->pack_name);
  
        return NULL;
@@@ -1533,21 -1533,21 +1533,21 @@@ int finalize_object_file(const char *tm
        unlink_or_warn(tmpfile);
        if (ret) {
                if (ret != EEXIST) {
 -                      return error_errno("unable to write sha1 filename %s", filename);
 +                      return error_errno(_("unable to write sha1 filename %s"), filename);
                }
                /* FIXME!!! Collision check here ? */
        }
  
  out:
        if (adjust_shared_perm(filename))
 -              return error("unable to set permission to '%s'", filename);
 +              return error(_("unable to set permission to '%s'"), filename);
        return 0;
  }
  
  static int write_buffer(int fd, const void *buf, size_t len)
  {
        if (write_in_full(fd, buf, len) < 0)
 -              return error_errno("file write error");
 +              return error_errno(_("file write error"));
        return 0;
  }
  
@@@ -1566,7 -1566,7 +1566,7 @@@ static void close_sha1_file(int fd
        if (fsync_object_files)
                fsync_or_die(fd, "sha1 file");
        if (close(fd) != 0)
 -              die_errno("error when closing sha1 file");
 +              die_errno(_("error when closing sha1 file"));
  }
  
  /* Size of directory component, including the ending '/' */
@@@ -1632,9 -1632,9 +1632,9 @@@ static int write_loose_object(const str
        fd = create_tmpfile(&tmp_file, filename.buf);
        if (fd < 0) {
                if (errno == EACCES)
 -                      return error("insufficient permission for adding an object to repository database %s", get_object_directory());
 +                      return error(_("insufficient permission for adding an object to repository database %s"), get_object_directory());
                else
 -                      return error_errno("unable to create temporary file");
 +                      return error_errno(_("unable to create temporary file"));
        }
  
        /* Set it up */
                ret = git_deflate(&stream, Z_FINISH);
                the_hash_algo->update_fn(&c, in0, stream.next_in - in0);
                if (write_buffer(fd, compressed, stream.next_out - compressed) < 0)
 -                      die("unable to write sha1 file");
 +                      die(_("unable to write sha1 file"));
                stream.next_out = compressed;
                stream.avail_out = sizeof(compressed);
        } while (ret == Z_OK);
  
        if (ret != Z_STREAM_END)
 -              die("unable to deflate new object %s (%d)", oid_to_hex(oid),
 +              die(_("unable to deflate new object %s (%d)"), oid_to_hex(oid),
                    ret);
        ret = git_deflate_end_gently(&stream);
        if (ret != Z_OK)
 -              die("deflateEnd on object %s failed (%d)", oid_to_hex(oid),
 +              die(_("deflateEnd on object %s failed (%d)"), oid_to_hex(oid),
                    ret);
        the_hash_algo->final_fn(parano_oid.hash, &c);
        if (oidcmp(oid, &parano_oid) != 0)
 -              die("confused by unstable object source data for %s",
 +              die(_("confused by unstable object source data for %s"),
                    oid_to_hex(oid));
  
        close_sha1_file(fd);
                utb.actime = mtime;
                utb.modtime = mtime;
                if (utime(tmp_file.buf, &utb) < 0)
 -                      warning_errno("failed utime() on %s", tmp_file.buf);
 +                      warning_errno(_("failed utime() on %s"), tmp_file.buf);
        }
  
        return finalize_object_file(tmp_file.buf, filename.buf);
@@@ -1757,7 -1757,7 +1757,7 @@@ int force_object_loose(const struct obj
                return 0;
        buf = read_object(oid->hash, &type, &len);
        if (!buf)
 -              return error("cannot read sha1_file for %s", oid_to_hex(oid));
 +              return error(_("cannot read sha1_file for %s"), oid_to_hex(oid));
        hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", type_name(type), len) + 1;
        ret = write_loose_object(oid, hdr, hdrlen, buf, len, mtime);
        free(buf);
@@@ -1802,7 -1802,7 +1802,7 @@@ static void check_commit(const void *bu
        struct commit c;
        memset(&c, 0, sizeof(c));
        if (parse_commit_buffer(the_repository, &c, buf, size, 0))
 -              die("corrupt commit");
 +              die(_("corrupt commit"));
  }
  
  static void check_tag(const void *buf, size_t size)
        struct tag t;
        memset(&t, 0, sizeof(t));
        if (parse_tag_buffer(the_repository, &t, buf, size))
 -              die("corrupt tag");
 +              die(_("corrupt tag"));
  }
  
  static int index_mem(struct object_id *oid, void *buf, size_t size,
@@@ -1860,7 -1860,7 +1860,7 @@@ static int index_stream_convert_blob(st
        struct strbuf sbuf = STRBUF_INIT;
  
        assert(path);
-       assert(would_convert_to_git_filter_fd(path));
+       assert(would_convert_to_git_filter_fd(&the_index, path));
  
        convert_to_git_filter_fd(&the_index, path, fd, &sbuf,
                                 get_conv_flags(flags));
@@@ -1903,10 -1903,10 +1903,10 @@@ static int index_core(struct object_id 
                char *buf = xmalloc(size);
                ssize_t read_result = read_in_full(fd, buf, size);
                if (read_result < 0)
 -                      ret = error_errno("read error while indexing %s",
 +                      ret = error_errno(_("read error while indexing %s"),
                                          path ? path : "<unknown>");
                else if (read_result != size)
 -                      ret = error("short read while indexing %s",
 +                      ret = error(_("short read while indexing %s"),
                                    path ? path : "<unknown>");
                else
                        ret = index_mem(oid, buf, size, type, path, flags);
@@@ -1950,7 -1950,7 +1950,7 @@@ int index_fd(struct object_id *oid, in
         * Call xsize_t() only when needed to avoid potentially unnecessary
         * die() for large files.
         */
-       if (type == OBJ_BLOB && path && would_convert_to_git_filter_fd(path))
+       if (type == OBJ_BLOB && path && would_convert_to_git_filter_fd(&the_index, path))
                ret = index_stream_convert_blob(oid, fd, path, flags);
        else if (!S_ISREG(st->st_mode))
                ret = index_pipe(oid, fd, type, path, flags);
@@@ -1977,7 -1977,7 +1977,7 @@@ int index_path(struct object_id *oid, c
                if (fd < 0)
                        return error_errno("open(\"%s\")", path);
                if (index_fd(oid, fd, st, OBJ_BLOB, path, flags) < 0)
 -                      return error("%s: failed to insert into database",
 +                      return error(_("%s: failed to insert into database"),
                                     path);
                break;
        case S_IFLNK:
                if (!(flags & HASH_WRITE_OBJECT))
                        hash_object_file(sb.buf, sb.len, blob_type, oid);
                else if (write_object_file(sb.buf, sb.len, blob_type, oid))
 -                      rc = error("%s: failed to insert into database", path);
 +                      rc = error(_("%s: failed to insert into database"), path);
                strbuf_release(&sb);
                break;
        case S_IFDIR:
                return resolve_gitlink_ref(path, "HEAD", oid);
        default:
 -              return error("%s: unsupported file type", path);
 +              return error(_("%s: unsupported file type"), path);
        }
        return rc;
  }
@@@ -2016,9 -2016,9 +2016,9 @@@ void assert_oid_type(const struct objec
  {
        enum object_type type = oid_object_info(the_repository, oid, NULL);
        if (type < 0)
 -              die("%s is not a valid object", oid_to_hex(oid));
 +              die(_("%s is not a valid object"), oid_to_hex(oid));
        if (type != expect)
 -              die("%s is not a valid '%s' object", oid_to_hex(oid),
 +              die(_("%s is not a valid '%s' object"), oid_to_hex(oid),
                    type_name(expect));
  }
  
@@@ -2045,7 -2045,7 +2045,7 @@@ int for_each_file_in_obj_subdir(unsigne
        dir = opendir(path->buf);
        if (!dir) {
                if (errno != ENOENT)
 -                      r = error_errno("unable to open %s", path->buf);
 +                      r = error_errno(_("unable to open %s"), path->buf);
                strbuf_setlen(path, origlen);
                return r;
        }
@@@ -2146,8 -2146,7 +2146,8 @@@ static int loose_from_alt_odb(struct al
        return r;
  }
  
 -int for_each_loose_object(each_loose_object_fn cb, void *data, unsigned flags)
 +int for_each_loose_object(each_loose_object_fn cb, void *data,
 +                        enum for_each_object_flags flags)
  {
        struct loose_alt_odb_data alt;
        int r;
@@@ -2203,18 -2202,18 +2203,18 @@@ static int check_stream_sha1(git_zstrea
        git_inflate_end(stream);
  
        if (status != Z_STREAM_END) {
 -              error("corrupt loose object '%s'", sha1_to_hex(expected_sha1));
 +              error(_("corrupt loose object '%s'"), sha1_to_hex(expected_sha1));
                return -1;
        }
        if (stream->avail_in) {
 -              error("garbage at end of loose object '%s'",
 +              error(_("garbage at end of loose object '%s'"),
                      sha1_to_hex(expected_sha1));
                return -1;
        }
  
        the_hash_algo->final_fn(real_sha1, &c);
        if (hashcmp(expected_sha1, real_sha1)) {
 -              error("sha1 mismatch for %s (expected %s)", path,
 +              error(_("sha1 mismatch for %s (expected %s)"), path,
                      sha1_to_hex(expected_sha1));
                return -1;
        }
@@@ -2238,18 -2237,18 +2238,18 @@@ int read_loose_object(const char *path
  
        map = map_sha1_file_1(the_repository, path, NULL, &mapsize);
        if (!map) {
 -              error_errno("unable to mmap %s", path);
 +              error_errno(_("unable to mmap %s"), path);
                goto out;
        }
  
        if (unpack_sha1_header(&stream, map, mapsize, hdr, sizeof(hdr)) < 0) {
 -              error("unable to unpack header of %s", path);
 +              error(_("unable to unpack header of %s"), path);
                goto out;
        }
  
        *type = parse_sha1_header(hdr, size);
        if (*type < 0) {
 -              error("unable to parse header of %s", path);
 +              error(_("unable to parse header of %s"), path);
                git_inflate_end(&stream);
                goto out;
        }
        } else {
                *contents = unpack_sha1_rest(&stream, hdr, *size, expected_oid->hash);
                if (!*contents) {
 -                      error("unable to unpack contents of %s", path);
 +                      error(_("unable to unpack contents of %s"), path);
                        git_inflate_end(&stream);
                        goto out;
                }
                if (check_object_signature(expected_oid, *contents,
                                         *size, type_name(*type))) {
 -                      error("sha1 mismatch for %s (expected %s)", path,
 +                      error(_("sha1 mismatch for %s (expected %s)"), path,
                              oid_to_hex(expected_oid));
                        free(*contents);
                        goto out;