Merge branch 'nd/status-refresh-progress'
authorJunio C Hamano <gitster@pobox.com>
Fri, 19 Oct 2018 04:34:03 +0000 (13:34 +0900)
committerJunio C Hamano <gitster@pobox.com>
Fri, 19 Oct 2018 04:34:03 +0000 (13:34 +0900)
"git status" learns to show progress bar when refreshing the index
takes a long time.

* nd/status-refresh-progress:
status: show progress bar if refreshing the index takes too long

1  2 
builtin/am.c
builtin/commit.c
cache.h
preload-index.c
read-cache.c
sequencer.c
diff --combined builtin/am.c
index d79f092a96f8e9aab59c6d6c623c8fd7fbcb677d,22a93cfef34c0807f0e717031e0e02b6f11bebf0..3ee9a9d2a92aaa0e9716719640deaec7e260c811
@@@ -1244,10 -1244,6 +1244,10 @@@ static int parse_mail(struct am_state *
        fclose(mi.input);
        fclose(mi.output);
  
 +      if (mi.format_flowed)
 +              warning(_("Patch sent with format=flowed; "
 +                        "space at the end of lines might be lost."));
 +
        /* Extract message and author information */
        fp = xfopen(am_path(state, "info"), "r");
        while (!strbuf_getline_lf(&sb, fp)) {
@@@ -1376,7 -1372,7 +1376,7 @@@ static void write_commit_patch(const st
        FILE *fp;
  
        fp = xfopen(am_path(state, "patch"), "w");
 -      init_revisions(&rev_info, NULL);
 +      repo_init_revisions(the_repository, &rev_info, NULL);
        rev_info.diff = 1;
        rev_info.abbrev = 0;
        rev_info.disable_stdin = 1;
@@@ -1411,7 -1407,7 +1411,7 @@@ static void write_index_patch(const str
                                   the_repository->hash_algo->empty_tree);
  
        fp = xfopen(am_path(state, "patch"), "w");
 -      init_revisions(&rev_info, NULL);
 +      repo_init_revisions(the_repository, &rev_info, NULL);
        rev_info.diff = 1;
        rev_info.disable_stdin = 1;
        rev_info.no_commit_id = 1;
@@@ -1569,7 -1565,7 +1569,7 @@@ static int fall_back_threeway(const str
                struct rev_info rev_info;
                const char *diff_filter_str = "--diff-filter=AM";
  
 -              init_revisions(&rev_info, NULL);
 +              repo_init_revisions(the_repository, &rev_info, NULL);
                rev_info.diffopt.output_format = DIFF_FORMAT_NAME_STATUS;
                diff_opt_parse(&rev_info.diffopt, &diff_filter_str, 1, rev_info.prefix);
                add_pending_oid(&rev_info, "HEAD", &our_tree, 0);
                o.verbosity = 0;
  
        if (merge_recursive_generic(&o, &our_tree, &their_tree, 1, bases, &result)) {
 -              rerere(state->allow_rerere_autoupdate);
 +              repo_rerere(the_repository, state->allow_rerere_autoupdate);
                free(their_tree_name);
                return error(_("Failed to merge in the changes."));
        }
@@@ -1903,7 -1899,7 +1903,7 @@@ static void am_resolve(struct am_state 
                        goto next;
        }
  
 -      rerere(0);
 +      repo_rerere(the_repository, 0);
  
        do_commit(state);
  
@@@ -2082,7 -2078,7 +2082,7 @@@ static int safe_to_abort(const struct a
        if (get_oid("HEAD", &head))
                oidclr(&head);
  
 -      if (!oidcmp(&head, &abort_safety))
 +      if (oideq(&head, &abort_safety))
                return 1;
  
        warning(_("You seem to have moved HEAD since the last 'am' failure.\n"
@@@ -2328,7 -2324,7 +2328,7 @@@ int cmd_am(int argc, const char **argv
        /* Ensure a valid committer ident can be constructed */
        git_committer_info(IDENT_STRICT);
  
-       if (read_index_preload(&the_index, NULL) < 0)
+       if (read_index_preload(&the_index, NULL, 0) < 0)
                die(_("failed to read the index"));
  
        if (in_progress) {
diff --combined builtin/commit.c
index 31b44e5d629db82781ee1bd1afc45a47c8936ba9,752346306690a8aa7421882c70660b4af3ec5cbc..074bd9a55160a8efcaa1c108afcf6bd6bc360626
@@@ -33,8 -33,6 +33,8 @@@
  #include "sequencer.h"
  #include "mailmap.h"
  #include "help.h"
 +#include "commit-reach.h"
 +#include "commit-graph.h"
  
  static const char * const builtin_commit_usage[] = {
        N_("git commit [<options>] [--] <pathspec>..."),
@@@ -508,9 -506,8 +508,9 @@@ static int run_status(FILE *fp, const c
  
        wt_status_collect(s);
        wt_status_print(s);
 +      wt_status_collect_free_buffers(s);
  
 -      return s->commitable;
 +      return s->committable;
  }
  
  static int is_a_merge(const struct commit *current_head)
@@@ -656,7 -653,7 +656,7 @@@ static int prepare_to_commit(const cha
  {
        struct stat statbuf;
        struct strbuf committer_ident = STRBUF_INIT;
 -      int commitable;
 +      int committable;
        struct strbuf sb = STRBUF_INIT;
        const char *hook_arg1 = NULL;
        const char *hook_arg2 = NULL;
  
                saved_color_setting = s->use_color;
                s->use_color = 0;
 -              commitable = run_status(s->fp, index_file, prefix, 1, s);
 +              committable = run_status(s->fp, index_file, prefix, 1, s);
                s->use_color = saved_color_setting;
 +              string_list_clear(&s->change, 1);
        } else {
                struct object_id oid;
                const char *parent = "HEAD";
                        for (i = 0; i < active_nr; i++)
                                if (ce_intent_to_add(active_cache[i]))
                                        ita_nr++;
 -                      commitable = active_nr - ita_nr > 0;
 +                      committable = active_nr - ita_nr > 0;
                } else {
                        /*
                         * Unless the user did explicitly request a submodule
                        if (ignore_submodule_arg &&
                            !strcmp(ignore_submodule_arg, "all"))
                                flags.ignore_submodules = 1;
 -                      commitable = index_differs_from(parent, &flags, 1);
 +                      committable = index_differs_from(parent, &flags, 1);
                }
        }
        strbuf_release(&committer_ident);
         * explicit --allow-empty. In the cherry-pick case, it may be
         * empty due to conflict resolution, which the user should okay.
         */
 -      if (!commitable && whence != FROM_MERGE && !allow_empty &&
 +      if (!committable && whence != FROM_MERGE && !allow_empty &&
            !(amend && is_a_merge(current_head))) {
                s->display_comment_prefix = old_display_comment_prefix;
                run_status(stdout, index_file, prefix, 0, s);
@@@ -984,7 -980,7 +984,7 @@@ static const char *find_author_by_nickn
        const char *av[20];
        int ac = 0;
  
 -      init_revisions(&revs, NULL);
 +      repo_init_revisions(the_repository, &revs, NULL);
        strbuf_addf(&buf, "--author=%s", name);
        av[++ac] = "--all";
        av[++ac] = "-i";
@@@ -1190,14 -1186,14 +1190,14 @@@ static int parse_and_validate_options(i
  static int dry_run_commit(int argc, const char **argv, const char *prefix,
                          const struct commit *current_head, struct wt_status *s)
  {
 -      int commitable;
 +      int committable;
        const char *index_file;
  
        index_file = prepare_index(argc, argv, prefix, current_head, 1);
 -      commitable = run_status(stdout, index_file, prefix, 0, s);
 +      committable = run_status(stdout, index_file, prefix, 0, s);
        rollback_index_files();
  
 -      return commitable ? 0 : 1;
 +      return committable ? 0 : 1;
  }
  
  define_list_config_array_extra(color_status_slots, {"added"});
@@@ -1299,6 -1295,7 +1299,7 @@@ int cmd_status(int argc, const char **a
        static int no_renames = -1;
        static const char *rename_score_arg = (const char *)-1;
        static struct wt_status s;
+       unsigned int progress_flag = 0;
        int fd;
        struct object_id oid;
        static struct option builtin_status_options[] = {
                       PATHSPEC_PREFER_FULL,
                       prefix, argv);
  
-       read_cache_preload(&s.pathspec);
-       refresh_index(&the_index, REFRESH_QUIET|REFRESH_UNMERGED, &s.pathspec, NULL, NULL);
+       if (status_format != STATUS_FORMAT_PORCELAIN &&
+           status_format != STATUS_FORMAT_PORCELAIN_V2)
+               progress_flag = REFRESH_PROGRESS;
+       read_index_preload(&the_index, &s.pathspec, progress_flag);
+       refresh_index(&the_index,
+                     REFRESH_QUIET|REFRESH_UNMERGED|progress_flag,
+                     &s.pathspec, NULL, NULL);
  
        if (use_optional_locks())
                fd = hold_locked_index(&index_lock, 0);
                s.prefix = prefix;
  
        wt_status_print(&s);
 +      wt_status_collect_free_buffers(&s);
 +
        return 0;
  }
  
@@@ -1657,10 -1657,7 +1663,10 @@@ int cmd_commit(int argc, const char **a
                      "new_index file. Check that disk is not full and quota is\n"
                      "not exceeded, and then \"git reset HEAD\" to recover."));
  
 -      rerere(0);
 +      if (git_env_bool(GIT_TEST_COMMIT_GRAPH, 0))
 +              write_commit_graph_reachable(get_object_directory(), 0, 0);
 +
 +      repo_rerere(the_repository, 0);
        run_command_v_opt(argv_gc_auto, RUN_GIT_CMD);
        run_commit_hook(use_editor, get_index_file(), "post-commit", NULL);
        if (amend && !no_post_rewrite) {
diff --combined cache.h
index 1d749abf6916831fe81ea6e256e515ae8fcd149b,35da02be9004fb9b12f9425f38637f468344f21c..59c8a930466d034ea2ce84b362e464803e19a100
+++ b/cache.h
@@@ -410,7 -410,7 +410,7 @@@ void validate_cache_entries(const struc
  
  #define read_cache() read_index(&the_index)
  #define read_cache_from(path) read_index_from(&the_index, (path), (get_git_dir()))
- #define read_cache_preload(pathspec) read_index_preload(&the_index, (pathspec))
+ #define read_cache_preload(pathspec) read_index_preload(&the_index, (pathspec), 0)
  #define is_cache_unborn() is_index_unborn(&the_index)
  #define read_cache_unmerged() read_index_unmerged(&the_index)
  #define discard_cache() discard_index(&the_index)
@@@ -659,7 -659,9 +659,9 @@@ extern int daemonize(void)
  /* Initialize and use the cache information */
  struct lock_file;
  extern int read_index(struct index_state *);
- extern int read_index_preload(struct index_state *, const struct pathspec *pathspec);
+ extern int read_index_preload(struct index_state *,
+                             const struct pathspec *pathspec,
+                             unsigned int refresh_flags);
  extern int do_read_index(struct index_state *istate, const char *path,
                         int must_exist); /* for testting only! */
  extern int read_index_from(struct index_state *, const char *path,
@@@ -703,7 -705,7 +705,7 @@@ extern int unmerged_index(const struct 
   * provided, the space-separated list of files that differ will be appended
   * to it.
   */
 -extern int index_has_changes(const struct index_state *istate,
 +extern int index_has_changes(struct index_state *istate,
                             struct tree *tree,
                             struct strbuf *sb);
  
@@@ -787,8 -789,8 +789,8 @@@ extern int ie_modified(struct index_sta
  #define HASH_WRITE_OBJECT 1
  #define HASH_FORMAT_CHECK 2
  #define HASH_RENORMALIZE  4
 -extern int index_fd(struct object_id *oid, int fd, struct stat *st, enum object_type type, const char *path, unsigned flags);
 -extern int index_path(struct object_id *oid, const char *path, struct stat *st, unsigned flags);
 +extern int index_fd(struct index_state *istate, struct object_id *oid, int fd, struct stat *st, enum object_type type, const char *path, unsigned flags);
 +extern int index_path(struct index_state *istate, struct object_id *oid, const char *path, struct stat *st, unsigned flags);
  
  /*
   * Record to sd the data from st that we use to check whether a file
@@@ -814,6 -816,7 +816,7 @@@ extern void fill_stat_cache_info(struc
  #define REFRESH_IGNORE_MISSING        0x0008  /* ignore non-existent */
  #define REFRESH_IGNORE_SUBMODULES     0x0010  /* ignore submodules */
  #define REFRESH_IN_PORCELAIN  0x0020  /* user friendly output, not "needs update" */
+ #define REFRESH_PROGRESS      0x0040  /* show progress bar if stderr is tty */
  extern int refresh_index(struct index_state *, unsigned int flags, const struct pathspec *pathspec, char *seen, const char *header_msg);
  extern struct cache_entry *refresh_cache_entry(struct index_state *, struct cache_entry *, unsigned int);
  
@@@ -1041,24 -1044,14 +1044,24 @@@ static inline int oidcmp(const struct o
        return hashcmp(oid1->hash, oid2->hash);
  }
  
 +static inline int hasheq(const unsigned char *sha1, const unsigned char *sha2)
 +{
 +      return !hashcmp(sha1, sha2);
 +}
 +
 +static inline int oideq(const struct object_id *oid1, const struct object_id *oid2)
 +{
 +      return hasheq(oid1->hash, oid2->hash);
 +}
 +
  static inline int is_null_sha1(const unsigned char *sha1)
  {
 -      return !hashcmp(sha1, null_sha1);
 +      return hasheq(sha1, null_sha1);
  }
  
  static inline int is_null_oid(const struct object_id *oid)
  {
 -      return !hashcmp(oid->hash, null_sha1);
 +      return hasheq(oid->hash, null_sha1);
  }
  
  static inline void hashcpy(unsigned char *sha_dst, const unsigned char *sha_src)
@@@ -1095,22 -1088,22 +1098,22 @@@ static inline void oidread(struct objec
  
  static inline int is_empty_blob_sha1(const unsigned char *sha1)
  {
 -      return !hashcmp(sha1, the_hash_algo->empty_blob->hash);
 +      return hasheq(sha1, the_hash_algo->empty_blob->hash);
  }
  
  static inline int is_empty_blob_oid(const struct object_id *oid)
  {
 -      return !oidcmp(oid, the_hash_algo->empty_blob);
 +      return oideq(oid, the_hash_algo->empty_blob);
  }
  
  static inline int is_empty_tree_sha1(const unsigned char *sha1)
  {
 -      return !hashcmp(sha1, the_hash_algo->empty_tree->hash);
 +      return hasheq(sha1, the_hash_algo->empty_tree->hash);
  }
  
  static inline int is_empty_tree_oid(const struct object_id *oid)
  {
 -      return !oidcmp(oid, the_hash_algo->empty_tree);
 +      return oideq(oid, the_hash_algo->empty_tree);
  }
  
  const char *empty_tree_oid_hex(void);
@@@ -1528,7 -1521,6 +1531,7 @@@ struct checkout 
        unsigned force:1,
                 quiet:1,
                 not_new:1,
 +               clone:1,
                 refresh_cache:1;
  };
  #define CHECKOUT_INIT { NULL, "" }
@@@ -1705,7 -1697,7 +1708,7 @@@ void shift_tree_by(const struct object_
  /* All WS_* -- when extended, adapt diff.c emit_symbol */
  #define WS_RULE_MASK           07777
  extern unsigned whitespace_rule_cfg;
 -extern unsigned whitespace_rule(const char *);
 +extern unsigned whitespace_rule(struct index_state *, const char *);
  extern unsigned parse_whitespace_rule(const char *);
  extern unsigned ws_check(const char *line, int len, unsigned ws_rule);
  extern void ws_check_emit(const char *line, int len, unsigned ws_rule, FILE *stream, const char *set, const char *reset, const char *ws);
@@@ -1727,12 -1719,10 +1730,12 @@@ extern struct startup_info *startup_inf
  
  /* merge.c */
  struct commit_list;
 -int try_merge_command(const char *strategy, size_t xopts_nr,
 +int try_merge_command(struct repository *r,
 +              const char *strategy, size_t xopts_nr,
                const char **xopts, struct commit_list *common,
                const char *head_arg, struct commit_list *remotes);
 -int checkout_fast_forward(const struct object_id *from,
 +int checkout_fast_forward(struct repository *r,
 +                        const struct object_id *from,
                          const struct object_id *to,
                          int overwrite_ignore);
  
diff --combined preload-index.c
index 16dc5ad868ff0d69a49a30d9130779997267c63b,2541b307e8bafc7280c0ded43346a0d723af1768..9e7152ab14d9359d0a48da8b25cd58253a13fc0c
@@@ -5,11 -5,12 +5,13 @@@
  #include "pathspec.h"
  #include "dir.h"
  #include "fsmonitor.h"
 +#include "config.h"
+ #include "progress.h"
  
  #ifdef NO_PTHREADS
  static void preload_index(struct index_state *index,
-                         const struct pathspec *pathspec)
+                         const struct pathspec *pathspec,
+                         unsigned int refresh_flags)
  {
        ; /* nothing */
  }
  #define MAX_PARALLEL (20)
  #define THREAD_COST (500)
  
+ struct progress_data {
+       unsigned long n;
+       struct progress *progress;
+       pthread_mutex_t mutex;
+ };
  struct thread_data {
        pthread_t pthread;
        struct index_state *index;
        struct pathspec pathspec;
+       struct progress_data *progress;
        int offset, nr;
  };
  
  static void *preload_thread(void *_data)
  {
-       int nr;
+       int nr, last_nr;
        struct thread_data *p = _data;
        struct index_state *index = p->index;
        struct cache_entry **cep = index->cache + p->offset;
@@@ -44,6 -52,7 +53,7 @@@
        nr = p->nr;
        if (nr + p->offset > index->cache_nr)
                nr = index->cache_nr - p->offset;
+       last_nr = nr;
  
        do {
                struct cache_entry *ce = *cep++;
                        continue;
                if (ce->ce_flags & CE_FSMONITOR_VALID)
                        continue;
+               if (p->progress && !(nr & 31)) {
+                       struct progress_data *pd = p->progress;
+                       pthread_mutex_lock(&pd->mutex);
+                       pd->n += last_nr - nr;
+                       display_progress(pd->progress, pd->n);
+                       pthread_mutex_unlock(&pd->mutex);
+                       last_nr = nr;
+               }
                if (!ce_path_match(index, ce, &p->pathspec, NULL))
                        continue;
                if (threaded_has_symlink_leading_path(&cache, ce->name, ce_namelen(ce)))
                ce_mark_uptodate(ce);
                mark_fsmonitor_valid(ce);
        } while (--nr > 0);
+       if (p->progress) {
+               struct progress_data *pd = p->progress;
+               pthread_mutex_lock(&pd->mutex);
+               display_progress(pd->progress, pd->n + last_nr);
+               pthread_mutex_unlock(&pd->mutex);
+       }
        cache_def_clear(&cache);
        return NULL;
  }
  
  static void preload_index(struct index_state *index,
-                         const struct pathspec *pathspec)
+                         const struct pathspec *pathspec,
+                         unsigned int refresh_flags)
  {
        int threads, i, work, offset;
        struct thread_data data[MAX_PARALLEL];
 -      uint64_t start = getnanotime();
+       struct progress_data pd;
  
        if (!core_preload_index)
                return;
  
        threads = index->cache_nr / THREAD_COST;
 -      if ((index->cache_nr > 1) && (threads < 2) && getenv("GIT_FORCE_PRELOAD_TEST"))
 +      if ((index->cache_nr > 1) && (threads < 2) && git_env_bool("GIT_TEST_PRELOAD_INDEX", 0))
                threads = 2;
        if (threads < 2)
                return;
 +      trace_performance_enter();
        if (threads > MAX_PARALLEL)
                threads = MAX_PARALLEL;
        offset = 0;
        work = DIV_ROUND_UP(index->cache_nr, threads);
        memset(&data, 0, sizeof(data));
+       memset(&pd, 0, sizeof(pd));
+       if (refresh_flags & REFRESH_PROGRESS && isatty(2)) {
+               pd.progress = start_delayed_progress(_("Refreshing index"), index->cache_nr);
+               pthread_mutex_init(&pd.mutex, NULL);
+       }
        for (i = 0; i < threads; i++) {
                struct thread_data *p = data+i;
                p->index = index;
                        copy_pathspec(&p->pathspec, pathspec);
                p->offset = offset;
                p->nr = work;
+               if (pd.progress)
+                       p->progress = &pd;
                offset += work;
                if (pthread_create(&p->pthread, NULL, preload_thread, p))
                        die("unable to create threaded lstat");
                if (pthread_join(p->pthread, NULL))
                        die("unable to join threaded lstat");
        }
 -      trace_performance_since(start, "preload index");
+       stop_progress(&pd.progress);
 +      trace_performance_leave("preload index");
  }
  #endif
  
  int read_index_preload(struct index_state *index,
-                      const struct pathspec *pathspec)
+                      const struct pathspec *pathspec,
+                      unsigned int refresh_flags)
  {
        int retval = read_index(index);
  
-       preload_index(index, pathspec);
+       preload_index(index, pathspec, refresh_flags);
        return retval;
  }
diff --combined read-cache.c
index 4c994e4b65cc820351117ccc90647d4534cde415,5969ca93c7e5d933aba0e882a0299b29419a2990..1df5c16dbc31d444b98478bc892821bf1e2dda85
@@@ -23,7 -23,7 +23,8 @@@
  #include "split-index.h"
  #include "utf8.h"
  #include "fsmonitor.h"
 +#include "thread-utils.h"
+ #include "progress.h"
  
  /* Mask for the name length in ce_flags in the on-disk index */
  
@@@ -44,8 -44,6 +45,8 @@@
  #define CACHE_EXT_LINK 0x6c696e6b       /* "link" */
  #define CACHE_EXT_UNTRACKED 0x554E5452          /* "UNTR" */
  #define CACHE_EXT_FSMONITOR 0x46534D4E          /* "FSMN" */
 +#define CACHE_EXT_ENDOFINDEXENTRIES 0x454F4945        /* "EOIE" */
 +#define CACHE_EXT_INDEXENTRYOFFSETTABLE 0x49454F54 /* "IEOT" */
  
  /* changes that can be kept in $GIT_DIR/index (basically all extensions) */
  #define EXTMASK (RESOLVE_UNDO_CHANGED | CACHE_TREE_CHANGED | \
@@@ -208,17 -206,15 +209,17 @@@ void fill_stat_cache_info(struct cache_
        }
  }
  
 -static int ce_compare_data(const struct cache_entry *ce, struct stat *st)
 +static int ce_compare_data(struct index_state *istate,
 +                         const struct cache_entry *ce,
 +                         struct stat *st)
  {
        int match = -1;
        int fd = git_open_cloexec(ce->name, O_RDONLY);
  
        if (fd >= 0) {
                struct object_id oid;
 -              if (!index_fd(&oid, fd, st, OBJ_BLOB, ce->name, 0))
 -                      match = oidcmp(&oid, &ce->oid);
 +              if (!index_fd(istate, &oid, fd, st, OBJ_BLOB, ce->name, 0))
 +                      match = !oideq(&oid, &ce->oid);
                /* index_fd() closed the file descriptor already */
        }
        return match;
@@@ -259,16 -255,14 +260,16 @@@ static int ce_compare_gitlink(const str
         */
        if (resolve_gitlink_ref(ce->name, "HEAD", &oid) < 0)
                return 0;
 -      return oidcmp(&oid, &ce->oid);
 +      return !oideq(&oid, &ce->oid);
  }
  
 -static int ce_modified_check_fs(const struct cache_entry *ce, struct stat *st)
 +static int ce_modified_check_fs(struct index_state *istate,
 +                              const struct cache_entry *ce,
 +                              struct stat *st)
  {
        switch (st->st_mode & S_IFMT) {
        case S_IFREG:
 -              if (ce_compare_data(ce, st))
 +              if (ce_compare_data(istate, ce, st))
                        return DATA_CHANGED;
                break;
        case S_IFLNK:
@@@ -414,7 -408,7 +415,7 @@@ int ie_match_stat(struct index_state *i
                if (assume_racy_is_modified)
                        changed |= DATA_CHANGED;
                else
 -                      changed |= ce_modified_check_fs(ce, st);
 +                      changed |= ce_modified_check_fs(istate, ce, st);
        }
  
        return changed;
@@@ -454,7 -448,7 +455,7 @@@ int ie_modified(struct index_state *ist
            (S_ISGITLINK(ce->ce_mode) || ce->ce_stat_data.sd_size != 0))
                return changed;
  
 -      changed_fs = ce_modified_check_fs(ce, st);
 +      changed_fs = ce_modified_check_fs(istate, ce, st);
        if (changed_fs)
                return changed | changed_fs;
        return 0;
@@@ -760,7 -754,7 +761,7 @@@ int add_to_index(struct index_state *is
                }
        }
        if (!intent_only) {
 -              if (index_path(&ce->oid, path, st, newflags)) {
 +              if (index_path(istate, &ce->oid, path, st, newflags)) {
                        discard_cache_entry(ce);
                        return error("unable to index file %s", path);
                }
        /* It was suspected to be racily clean, but it turns out to be Ok */
        was_same = (alias &&
                    !ce_stage(alias) &&
 -                  !oidcmp(&alias->oid, &ce->oid) &&
 +                  oideq(&alias->oid, &ce->oid) &&
                    ce->ce_mode == alias->ce_mode);
  
        if (pretend)
@@@ -830,7 -824,7 +831,7 @@@ struct cache_entry *make_cache_entry(st
        ce->ce_namelen = len;
        ce->ce_mode = create_ce_mode(mode);
  
 -      ret = refresh_cache_entry(&the_index, ce, refresh_options);
 +      ret = refresh_cache_entry(istate, ce, refresh_options);
        if (ret != ce)
                discard_cache_entry(ce);
        return ret;
@@@ -1483,8 -1477,13 +1484,13 @@@ int refresh_index(struct index_state *i
        const char *typechange_fmt;
        const char *added_fmt;
        const char *unmerged_fmt;
 -      uint64_t start = getnanotime();
+       struct progress *progress = NULL;
+       if (flags & REFRESH_PROGRESS && isatty(2))
+               progress = start_delayed_progress(_("Refresh index"),
+                                                 istate->cache_nr);
  
 +      trace_performance_enter();
        modified_fmt = (in_porcelain ? "M\t%s\n" : "%s: needs update\n");
        deleted_fmt = (in_porcelain ? "D\t%s\n" : "%s: needs update\n");
        typechange_fmt = (in_porcelain ? "T\t%s\n" : "%s needs update\n");
                if (ignore_submodules && S_ISGITLINK(ce->ce_mode))
                        continue;
  
 -              if (pathspec && !ce_path_match(&the_index, ce, pathspec, seen))
 +              if (pathspec && !ce_path_match(istate, ce, pathspec, seen))
                        filtered = 1;
  
                if (ce_stage(ce)) {
                new_entry = refresh_cache_ent(istate, ce, options, &cache_errno, &changed);
                if (new_entry == ce)
                        continue;
+               if (progress)
+                       display_progress(progress, i);
                if (!new_entry) {
                        const char *fmt;
  
  
                replace_index_entry(istate, i, new_entry);
        }
 -      trace_performance_since(start, "refresh index");
+       if (progress) {
+               display_progress(progress, istate->cache_nr);
+               stop_progress(&progress);
+       }
 +      trace_performance_leave("refresh index");
        return has_errors;
  }
  
@@@ -1657,7 -1662,7 +1669,7 @@@ int verify_index_checksum
  /* Allow fsck to force verification of the cache entry order. */
  int verify_ce_order;
  
 -static int verify_hdr(struct cache_header *hdr, unsigned long size)
 +static int verify_hdr(const struct cache_header *hdr, unsigned long size)
  {
        git_hash_ctx c;
        unsigned char hash[GIT_MAX_RAWSZ];
        the_hash_algo->init_fn(&c);
        the_hash_algo->update_fn(&c, hdr, size - the_hash_algo->rawsz);
        the_hash_algo->final_fn(hash, &c);
 -      if (hashcmp(hash, (unsigned char *)hdr + size - the_hash_algo->rawsz))
 +      if (!hasheq(hash, (unsigned char *)hdr + size - the_hash_algo->rawsz))
                return error("bad index file sha1 signature");
        return 0;
  }
  
  static int read_index_extension(struct index_state *istate,
 -                              const char *ext, void *data, unsigned long sz)
 +                              const char *ext, const char *data, unsigned long sz)
  {
        switch (CACHE_EXT(ext)) {
        case CACHE_EXT_TREE:
        case CACHE_EXT_FSMONITOR:
                read_fsmonitor_extension(istate, data, sz);
                break;
 +      case CACHE_EXT_ENDOFINDEXENTRIES:
 +      case CACHE_EXT_INDEXENTRYOFFSETTABLE:
 +              /* already handled in do_read_index() */
 +              break;
        default:
                if (*ext < 'A' || 'Z' < *ext)
                        return error("index uses %.4s extension, which we do not understand",
@@@ -1724,25 -1725,63 +1736,25 @@@ int read_index(struct index_state *ista
        return read_index_from(istate, get_index_file(), get_git_dir());
  }
  
 -static struct cache_entry *cache_entry_from_ondisk(struct mem_pool *mem_pool,
 -                                                 struct ondisk_cache_entry *ondisk,
 -                                                 unsigned int flags,
 -                                                 const char *name,
 -                                                 size_t len)
 -{
 -      struct cache_entry *ce = mem_pool__ce_alloc(mem_pool, len);
 -
 -      ce->ce_stat_data.sd_ctime.sec = get_be32(&ondisk->ctime.sec);
 -      ce->ce_stat_data.sd_mtime.sec = get_be32(&ondisk->mtime.sec);
 -      ce->ce_stat_data.sd_ctime.nsec = get_be32(&ondisk->ctime.nsec);
 -      ce->ce_stat_data.sd_mtime.nsec = get_be32(&ondisk->mtime.nsec);
 -      ce->ce_stat_data.sd_dev   = get_be32(&ondisk->dev);
 -      ce->ce_stat_data.sd_ino   = get_be32(&ondisk->ino);
 -      ce->ce_mode  = get_be32(&ondisk->mode);
 -      ce->ce_stat_data.sd_uid   = get_be32(&ondisk->uid);
 -      ce->ce_stat_data.sd_gid   = get_be32(&ondisk->gid);
 -      ce->ce_stat_data.sd_size  = get_be32(&ondisk->size);
 -      ce->ce_flags = flags & ~CE_NAMEMASK;
 -      ce->ce_namelen = len;
 -      ce->index = 0;
 -      hashcpy(ce->oid.hash, ondisk->sha1);
 -      memcpy(ce->name, name, len);
 -      ce->name[len] = '\0';
 -      return ce;
 -}
 -
 -/*
 - * Adjacent cache entries tend to share the leading paths, so it makes
 - * sense to only store the differences in later entries.  In the v4
 - * on-disk format of the index, each on-disk cache entry stores the
 - * number of bytes to be stripped from the end of the previous name,
 - * and the bytes to append to the result, to come up with its name.
 - */
 -static unsigned long expand_name_field(struct strbuf *name, const char *cp_)
 -{
 -      const unsigned char *ep, *cp = (const unsigned char *)cp_;
 -      size_t len = decode_varint(&cp);
 -
 -      if (name->len < len)
 -              die("malformed name field in the index");
 -      strbuf_remove(name, name->len - len, len);
 -      for (ep = cp; *ep; ep++)
 -              ; /* find the end */
 -      strbuf_add(name, cp, ep - cp);
 -      return (const char *)ep + 1 - cp_;
 -}
 -
 -static struct cache_entry *create_from_disk(struct mem_pool *mem_pool,
 +static struct cache_entry *create_from_disk(struct mem_pool *ce_mem_pool,
 +                                          unsigned int version,
                                            struct ondisk_cache_entry *ondisk,
                                            unsigned long *ent_size,
 -                                          struct strbuf *previous_name)
 +                                          const struct cache_entry *previous_ce)
  {
        struct cache_entry *ce;
        size_t len;
        const char *name;
        unsigned int flags;
 +      size_t copy_len;
 +      /*
 +       * Adjacent cache entries tend to share the leading paths, so it makes
 +       * sense to only store the differences in later entries.  In the v4
 +       * on-disk format of the index, each on-disk cache entry stores the
 +       * number of bytes to be stripped from the end of the previous name,
 +       * and the bytes to append to the result, to come up with its name.
 +       */
 +      int expand_name_field = version == 4;
  
        /* On-disk flags are just 16 bits */
        flags = get_be16(&ondisk->flags);
        else
                name = ondisk->name;
  
 -      if (!previous_name) {
 -              /* v3 and earlier */
 -              if (len == CE_NAMEMASK)
 -                      len = strlen(name);
 -              ce = cache_entry_from_ondisk(mem_pool, ondisk, flags, name, len);
 +      if (expand_name_field) {
 +              const unsigned char *cp = (const unsigned char *)name;
 +              size_t strip_len, previous_len;
 +
 +              /* If we're at the begining of a block, ignore the previous name */
 +              strip_len = decode_varint(&cp);
 +              if (previous_ce) {
 +                      previous_len = previous_ce->ce_namelen;
 +                      if (previous_len < strip_len)
 +                              die(_("malformed name field in the index, near path '%s'"),
 +                                      previous_ce->name);
 +                      copy_len = previous_len - strip_len;
 +              } else {
 +                      copy_len = 0;
 +              }
 +              name = (const char *)cp;
 +      }
  
 -              *ent_size = ondisk_ce_size(ce);
 -      } else {
 -              unsigned long consumed;
 -              consumed = expand_name_field(previous_name, name);
 -              ce = cache_entry_from_ondisk(mem_pool, ondisk, flags,
 -                                           previous_name->buf,
 -                                           previous_name->len);
 +      if (len == CE_NAMEMASK) {
 +              len = strlen(name);
 +              if (expand_name_field)
 +                      len += copy_len;
 +      }
 +
 +      ce = mem_pool__ce_alloc(ce_mem_pool, len);
 +
 +      ce->ce_stat_data.sd_ctime.sec = get_be32(&ondisk->ctime.sec);
 +      ce->ce_stat_data.sd_mtime.sec = get_be32(&ondisk->mtime.sec);
 +      ce->ce_stat_data.sd_ctime.nsec = get_be32(&ondisk->ctime.nsec);
 +      ce->ce_stat_data.sd_mtime.nsec = get_be32(&ondisk->mtime.nsec);
 +      ce->ce_stat_data.sd_dev   = get_be32(&ondisk->dev);
 +      ce->ce_stat_data.sd_ino   = get_be32(&ondisk->ino);
 +      ce->ce_mode  = get_be32(&ondisk->mode);
 +      ce->ce_stat_data.sd_uid   = get_be32(&ondisk->uid);
 +      ce->ce_stat_data.sd_gid   = get_be32(&ondisk->gid);
 +      ce->ce_stat_data.sd_size  = get_be32(&ondisk->size);
 +      ce->ce_flags = flags & ~CE_NAMEMASK;
 +      ce->ce_namelen = len;
 +      ce->index = 0;
 +      hashcpy(ce->oid.hash, ondisk->sha1);
  
 -              *ent_size = (name - ((char *)ondisk)) + consumed;
 +      if (expand_name_field) {
 +              if (copy_len)
 +                      memcpy(ce->name, previous_ce->name, copy_len);
 +              memcpy(ce->name + copy_len, name, len + 1 - copy_len);
 +              *ent_size = (name - ((char *)ondisk)) + len + 1 - copy_len;
 +      } else {
 +              memcpy(ce->name, name, len + 1);
 +              *ent_size = ondisk_ce_size(ce);
        }
        return ce;
  }
@@@ -1896,237 -1901,16 +1908,237 @@@ static size_t estimate_cache_size(size_
        return ondisk_size + entries * per_entry;
  }
  
 +struct index_entry_offset
 +{
 +      /* starting byte offset into index file, count of index entries in this block */
 +      int offset, nr;
 +};
 +
 +struct index_entry_offset_table
 +{
 +      int nr;
 +      struct index_entry_offset entries[FLEX_ARRAY];
 +};
 +
 +#ifndef NO_PTHREADS
 +static struct index_entry_offset_table *read_ieot_extension(const char *mmap, size_t mmap_size, size_t offset);
 +static void write_ieot_extension(struct strbuf *sb, struct index_entry_offset_table *ieot);
 +#endif
 +
 +static size_t read_eoie_extension(const char *mmap, size_t mmap_size);
 +static void write_eoie_extension(struct strbuf *sb, git_hash_ctx *eoie_context, size_t offset);
 +
 +struct load_index_extensions
 +{
 +#ifndef NO_PTHREADS
 +      pthread_t pthread;
 +#endif
 +      struct index_state *istate;
 +      const char *mmap;
 +      size_t mmap_size;
 +      unsigned long src_offset;
 +};
 +
 +static void *load_index_extensions(void *_data)
 +{
 +      struct load_index_extensions *p = _data;
 +      unsigned long src_offset = p->src_offset;
 +
 +      while (src_offset <= p->mmap_size - the_hash_algo->rawsz - 8) {
 +              /* After an array of active_nr index entries,
 +               * there can be arbitrary number of extended
 +               * sections, each of which is prefixed with
 +               * extension name (4-byte) and section length
 +               * in 4-byte network byte order.
 +               */
 +              uint32_t extsize = get_be32(p->mmap + src_offset + 4);
 +              if (read_index_extension(p->istate,
 +                                       p->mmap + src_offset,
 +                                       p->mmap + src_offset + 8,
 +                                       extsize) < 0) {
 +                      munmap((void *)p->mmap, p->mmap_size);
 +                      die(_("index file corrupt"));
 +              }
 +              src_offset += 8;
 +              src_offset += extsize;
 +      }
 +
 +      return NULL;
 +}
 +
 +/*
 + * A helper function that will load the specified range of cache entries
 + * from the memory mapped file and add them to the given index.
 + */
 +static unsigned long load_cache_entry_block(struct index_state *istate,
 +                      struct mem_pool *ce_mem_pool, int offset, int nr, const char *mmap,
 +                      unsigned long start_offset, const struct cache_entry *previous_ce)
 +{
 +      int i;
 +      unsigned long src_offset = start_offset;
 +
 +      for (i = offset; i < offset + nr; i++) {
 +              struct ondisk_cache_entry *disk_ce;
 +              struct cache_entry *ce;
 +              unsigned long consumed;
 +
 +              disk_ce = (struct ondisk_cache_entry *)(mmap + src_offset);
 +              ce = create_from_disk(ce_mem_pool, istate->version, disk_ce, &consumed, previous_ce);
 +              set_index_entry(istate, i, ce);
 +
 +              src_offset += consumed;
 +              previous_ce = ce;
 +      }
 +      return src_offset - start_offset;
 +}
 +
 +static unsigned long load_all_cache_entries(struct index_state *istate,
 +                      const char *mmap, size_t mmap_size, unsigned long src_offset)
 +{
 +      unsigned long consumed;
 +
 +      if (istate->version == 4) {
 +              mem_pool_init(&istate->ce_mem_pool,
 +                              estimate_cache_size_from_compressed(istate->cache_nr));
 +      } else {
 +              mem_pool_init(&istate->ce_mem_pool,
 +                              estimate_cache_size(mmap_size, istate->cache_nr));
 +      }
 +
 +      consumed = load_cache_entry_block(istate, istate->ce_mem_pool,
 +                                      0, istate->cache_nr, mmap, src_offset, NULL);
 +      return consumed;
 +}
 +
 +#ifndef NO_PTHREADS
 +
 +/*
 + * Mostly randomly chosen maximum thread counts: we
 + * cap the parallelism to online_cpus() threads, and we want
 + * to have at least 10000 cache entries per thread for it to
 + * be worth starting a thread.
 + */
 +
 +#define THREAD_COST           (10000)
 +
 +struct load_cache_entries_thread_data
 +{
 +      pthread_t pthread;
 +      struct index_state *istate;
 +      struct mem_pool *ce_mem_pool;
 +      int offset;
 +      const char *mmap;
 +      struct index_entry_offset_table *ieot;
 +      int ieot_start;         /* starting index into the ieot array */
 +      int ieot_blocks;        /* count of ieot entries to process */
 +      unsigned long consumed; /* return # of bytes in index file processed */
 +};
 +
 +/*
 + * A thread proc to run the load_cache_entries() computation
 + * across multiple background threads.
 + */
 +static void *load_cache_entries_thread(void *_data)
 +{
 +      struct load_cache_entries_thread_data *p = _data;
 +      int i;
 +
 +      /* iterate across all ieot blocks assigned to this thread */
 +      for (i = p->ieot_start; i < p->ieot_start + p->ieot_blocks; i++) {
 +              p->consumed += load_cache_entry_block(p->istate, p->ce_mem_pool,
 +                      p->offset, p->ieot->entries[i].nr, p->mmap, p->ieot->entries[i].offset, NULL);
 +              p->offset += p->ieot->entries[i].nr;
 +      }
 +      return NULL;
 +}
 +
 +static unsigned long load_cache_entries_threaded(struct index_state *istate, const char *mmap, size_t mmap_size,
 +                      unsigned long src_offset, int nr_threads, struct index_entry_offset_table *ieot)
 +{
 +      int i, offset, ieot_blocks, ieot_start, err;
 +      struct load_cache_entries_thread_data *data;
 +      unsigned long consumed = 0;
 +
 +      /* a little sanity checking */
 +      if (istate->name_hash_initialized)
 +              BUG("the name hash isn't thread safe");
 +
 +      mem_pool_init(&istate->ce_mem_pool, 0);
 +
 +      /* ensure we have no more threads than we have blocks to process */
 +      if (nr_threads > ieot->nr)
 +              nr_threads = ieot->nr;
 +      data = xcalloc(nr_threads, sizeof(*data));
 +
 +      offset = ieot_start = 0;
 +      ieot_blocks = DIV_ROUND_UP(ieot->nr, nr_threads);
 +      for (i = 0; i < nr_threads; i++) {
 +              struct load_cache_entries_thread_data *p = &data[i];
 +              int nr, j;
 +
 +              if (ieot_start + ieot_blocks > ieot->nr)
 +                      ieot_blocks = ieot->nr - ieot_start;
 +
 +              p->istate = istate;
 +              p->offset = offset;
 +              p->mmap = mmap;
 +              p->ieot = ieot;
 +              p->ieot_start = ieot_start;
 +              p->ieot_blocks = ieot_blocks;
 +
 +              /* create a mem_pool for each thread */
 +              nr = 0;
 +              for (j = p->ieot_start; j < p->ieot_start + p->ieot_blocks; j++)
 +                      nr += p->ieot->entries[j].nr;
 +              if (istate->version == 4) {
 +                      mem_pool_init(&p->ce_mem_pool,
 +                              estimate_cache_size_from_compressed(nr));
 +              } else {
 +                      mem_pool_init(&p->ce_mem_pool,
 +                              estimate_cache_size(mmap_size, nr));
 +              }
 +
 +              err = pthread_create(&p->pthread, NULL, load_cache_entries_thread, p);
 +              if (err)
 +                      die(_("unable to create load_cache_entries thread: %s"), strerror(err));
 +
 +              /* increment by the number of cache entries in the ieot block being processed */
 +              for (j = 0; j < ieot_blocks; j++)
 +                      offset += ieot->entries[ieot_start + j].nr;
 +              ieot_start += ieot_blocks;
 +      }
 +
 +      for (i = 0; i < nr_threads; i++) {
 +              struct load_cache_entries_thread_data *p = &data[i];
 +
 +              err = pthread_join(p->pthread, NULL);
 +              if (err)
 +                      die(_("unable to join load_cache_entries thread: %s"), strerror(err));
 +              mem_pool_combine(istate->ce_mem_pool, p->ce_mem_pool);
 +              consumed += p->consumed;
 +      }
 +
 +      free(data);
 +
 +      return consumed;
 +}
 +#endif
 +
  /* remember to discard_cache() before reading a different cache! */
  int do_read_index(struct index_state *istate, const char *path, int must_exist)
  {
 -      int fd, i;
 +      int fd;
        struct stat st;
        unsigned long src_offset;
 -      struct cache_header *hdr;
 -      void *mmap;
 +      const struct cache_header *hdr;
 +      const char *mmap;
        size_t mmap_size;
 -      struct strbuf previous_name_buf = STRBUF_INIT, *previous_name;
 +      struct load_index_extensions p;
 +      size_t extension_offset = 0;
 +#ifndef NO_PTHREADS
 +      int nr_threads, cpus;
 +      struct index_entry_offset_table *ieot = NULL;
 +#endif
  
        if (istate->initialized)
                return istate->cache_nr;
                die_errno("unable to map index file");
        close(fd);
  
 -      hdr = mmap;
 +      hdr = (const struct cache_header *)mmap;
        if (verify_hdr(hdr, mmap_size) < 0)
                goto unmap;
  
        istate->cache = xcalloc(istate->cache_alloc, sizeof(*istate->cache));
        istate->initialized = 1;
  
 -      if (istate->version == 4) {
 -              previous_name = &previous_name_buf;
 -              mem_pool_init(&istate->ce_mem_pool,
 -                            estimate_cache_size_from_compressed(istate->cache_nr));
 -      } else {
 -              previous_name = NULL;
 -              mem_pool_init(&istate->ce_mem_pool,
 -                            estimate_cache_size(mmap_size, istate->cache_nr));
 -      }
 +      p.istate = istate;
 +      p.mmap = mmap;
 +      p.mmap_size = mmap_size;
  
        src_offset = sizeof(*hdr);
 -      for (i = 0; i < istate->cache_nr; i++) {
 -              struct ondisk_cache_entry *disk_ce;
 -              struct cache_entry *ce;
 -              unsigned long consumed;
  
 -              disk_ce = (struct ondisk_cache_entry *)((char *)mmap + src_offset);
 -              ce = create_from_disk(istate->ce_mem_pool, disk_ce, &consumed, previous_name);
 -              set_index_entry(istate, i, ce);
 +#ifndef NO_PTHREADS
 +      nr_threads = git_config_get_index_threads();
  
 -              src_offset += consumed;
 +      /* TODO: does creating more threads than cores help? */
 +      if (!nr_threads) {
 +              nr_threads = istate->cache_nr / THREAD_COST;
 +              cpus = online_cpus();
 +              if (nr_threads > cpus)
 +                      nr_threads = cpus;
        }
 -      strbuf_release(&previous_name_buf);
 +
 +      if (nr_threads > 1) {
 +              extension_offset = read_eoie_extension(mmap, mmap_size);
 +              if (extension_offset) {
 +                      int err;
 +
 +                      p.src_offset = extension_offset;
 +                      err = pthread_create(&p.pthread, NULL, load_index_extensions, &p);
 +                      if (err)
 +                              die(_("unable to create load_index_extensions thread: %s"), strerror(err));
 +
 +                      nr_threads--;
 +              }
 +      }
 +
 +      /*
 +       * Locate and read the index entry offset table so that we can use it
 +       * to multi-thread the reading of the cache entries.
 +       */
 +      if (extension_offset && nr_threads > 1)
 +              ieot = read_ieot_extension(mmap, mmap_size, extension_offset);
 +
 +      if (ieot) {
 +              src_offset += load_cache_entries_threaded(istate, mmap, mmap_size, src_offset, nr_threads, ieot);
 +              free(ieot);
 +      } else {
 +              src_offset += load_all_cache_entries(istate, mmap, mmap_size, src_offset);
 +      }
 +#else
 +      src_offset += load_all_cache_entries(istate, mmap, mmap_size, src_offset);
 +#endif
 +
        istate->timestamp.sec = st.st_mtime;
        istate->timestamp.nsec = ST_MTIME_NSEC(st);
  
 -      while (src_offset <= mmap_size - the_hash_algo->rawsz - 8) {
 -              /* After an array of active_nr index entries,
 -               * there can be arbitrary number of extended
 -               * sections, each of which is prefixed with
 -               * extension name (4-byte) and section length
 -               * in 4-byte network byte order.
 -               */
 -              uint32_t extsize;
 -              memcpy(&extsize, (char *)mmap + src_offset + 4, 4);
 -              extsize = ntohl(extsize);
 -              if (read_index_extension(istate,
 -                                       (const char *) mmap + src_offset,
 -                                       (char *) mmap + src_offset + 8,
 -                                       extsize) < 0)
 -                      goto unmap;
 -              src_offset += 8;
 -              src_offset += extsize;
 +      /* if we created a thread, join it otherwise load the extensions on the primary thread */
 +#ifndef NO_PTHREADS
 +      if (extension_offset) {
 +              int ret = pthread_join(p.pthread, NULL);
 +              if (ret)
 +                      die(_("unable to join load_index_extensions thread: %s"), strerror(ret));
        }
 -      munmap(mmap, mmap_size);
 +#endif
 +      if (!extension_offset) {
 +              p.src_offset = src_offset;
 +              load_index_extensions(&p);
 +      }
 +      munmap((void *)mmap, mmap_size);
        return istate->cache_nr;
  
  unmap:
 -      munmap(mmap, mmap_size);
 +      munmap((void *)mmap, mmap_size);
        die("index file corrupt");
  }
  
@@@ -2249,6 -2014,7 +2261,6 @@@ static void freshen_shared_index(const 
  int read_index_from(struct index_state *istate, const char *path,
                    const char *gitdir)
  {
 -      uint64_t start = getnanotime();
        struct split_index *split_index;
        int ret;
        char *base_oid_hex;
        if (istate->initialized)
                return istate->cache_nr;
  
 +      trace_performance_enter();
        ret = do_read_index(istate, path, 0);
 -      trace_performance_since(start, "read cache %s", path);
 +      trace_performance_leave("read cache %s", path);
  
        split_index = istate->split_index;
        if (!split_index || is_null_oid(&split_index->base_oid)) {
                return ret;
        }
  
 +      trace_performance_enter();
        if (split_index->base)
                discard_index(split_index->base);
        else
        base_oid_hex = oid_to_hex(&split_index->base_oid);
        base_path = xstrfmt("%s/sharedindex.%s", gitdir, base_oid_hex);
        ret = do_read_index(split_index->base, base_path, 1);
 -      if (oidcmp(&split_index->base_oid, &split_index->base->oid))
 +      if (!oideq(&split_index->base_oid, &split_index->base->oid))
                die("broken index, expect %s in %s, got %s",
                    base_oid_hex, base_path,
                    oid_to_hex(&split_index->base->oid));
        freshen_shared_index(base_path, 0);
        merge_base_index(istate);
        post_read_index_from(istate);
 -      trace_performance_since(start, "read cache %s", base_path);
        free(base_path);
 +      trace_performance_leave("read cache %s", base_path);
        return ret;
  }
  
@@@ -2370,7 -2134,7 +2382,7 @@@ int unmerged_index(const struct index_s
        return 0;
  }
  
 -int index_has_changes(const struct index_state *istate,
 +int index_has_changes(struct index_state *istate,
                      struct tree *tree,
                      struct strbuf *sb)
  {
        if (tree || !get_oid_tree("HEAD", &cmp)) {
                struct diff_options opt;
  
 -              diff_setup(&opt);
 +              repo_diff_setup(the_repository, &opt);
                opt.flags.exit_with_status = 1;
                if (!sb)
                        opt.flags.quick = 1;
@@@ -2446,15 -2210,11 +2458,15 @@@ static int ce_write(git_hash_ctx *conte
        return 0;
  }
  
 -static int write_index_ext_header(git_hash_ctx *context, int fd,
 -                                unsigned int ext, unsigned int sz)
 +static int write_index_ext_header(git_hash_ctx *context, git_hash_ctx *eoie_context,
 +                                int fd, unsigned int ext, unsigned int sz)
  {
        ext = htonl(ext);
        sz = htonl(sz);
 +      if (eoie_context) {
 +              the_hash_algo->update_fn(eoie_context, &ext, 4);
 +              the_hash_algo->update_fn(eoie_context, &sz, 4);
 +      }
        return ((ce_write(context, fd, &ext, 4) < 0) ||
                (ce_write(context, fd, &sz, 4) < 0)) ? -1 : 0;
  }
@@@ -2482,8 -2242,7 +2494,8 @@@ static int ce_flush(git_hash_ctx *conte
        return (write_in_full(fd, write_buffer, left) < 0) ? -1 : 0;
  }
  
 -static void ce_smudge_racily_clean_entry(struct cache_entry *ce)
 +static void ce_smudge_racily_clean_entry(struct index_state *istate,
 +                                       struct cache_entry *ce)
  {
        /*
         * The only thing we care about in this function is to smudge the
                return;
        if (ce_match_stat_basic(ce, &st))
                return;
 -      if (ce_modified_check_fs(ce, &st)) {
 +      if (ce_modified_check_fs(istate, ce, &st)) {
                /* This is "racily clean"; smudge it.  Note that this
                 * is a tricky code.  At first glance, it may appear
                 * that it can break with this sequence:
@@@ -2648,7 -2407,7 +2660,7 @@@ static int verify_index_from(const stru
        if (n != the_hash_algo->rawsz)
                goto out;
  
 -      if (hashcmp(istate->oid.hash, hash))
 +      if (!hasheq(istate->oid.hash, hash))
                goto out;
  
        close(fd);
@@@ -2698,7 -2457,7 +2710,7 @@@ static int do_write_index(struct index_
  {
        uint64_t start = getnanotime();
        int newfd = tempfile->fd;
 -      git_hash_ctx c;
 +      git_hash_ctx c, eoie_c;
        struct cache_header hdr;
        int i, err = 0, removed, extended, hdr_version;
        struct cache_entry **cache = istate->cache;
        struct ondisk_cache_entry_extended ondisk;
        struct strbuf previous_name_buf = STRBUF_INIT, *previous_name;
        int drop_cache_tree = istate->drop_cache_tree;
 +      off_t offset;
 +      int ieot_entries = 1;
 +      struct index_entry_offset_table *ieot = NULL;
 +      int nr, nr_threads;
  
        for (i = removed = extended = 0; i < entries; i++) {
                if (cache[i]->ce_flags & CE_REMOVE)
        if (ce_write(&c, newfd, &hdr, sizeof(hdr)) < 0)
                return -1;
  
 +#ifndef NO_PTHREADS
 +      nr_threads = git_config_get_index_threads();
 +      if (nr_threads != 1) {
 +              int ieot_blocks, cpus;
 +
 +              /*
 +               * ensure default number of ieot blocks maps evenly to the
 +               * default number of threads that will process them leaving
 +               * room for the thread to load the index extensions.
 +               */
 +              if (!nr_threads) {
 +                      ieot_blocks = istate->cache_nr / THREAD_COST;
 +                      cpus = online_cpus();
 +                      if (ieot_blocks > cpus - 1)
 +                              ieot_blocks = cpus - 1;
 +              } else {
 +                      ieot_blocks = nr_threads;
 +                      if (ieot_blocks > istate->cache_nr)
 +                              ieot_blocks = istate->cache_nr;
 +              }
 +
 +              /*
 +               * no reason to write out the IEOT extension if we don't
 +               * have enough blocks to utilize multi-threading
 +               */
 +              if (ieot_blocks > 1) {
 +                      ieot = xcalloc(1, sizeof(struct index_entry_offset_table)
 +                              + (ieot_blocks * sizeof(struct index_entry_offset)));
 +                      ieot_entries = DIV_ROUND_UP(entries, ieot_blocks);
 +              }
 +      }
 +#endif
 +
 +      offset = lseek(newfd, 0, SEEK_CUR);
 +      if (offset < 0) {
 +              free(ieot);
 +              return -1;
 +      }
 +      offset += write_buffer_len;
 +      nr = 0;
        previous_name = (hdr_version == 4) ? &previous_name_buf : NULL;
  
        for (i = 0; i < entries; i++) {
                if (ce->ce_flags & CE_REMOVE)
                        continue;
                if (!ce_uptodate(ce) && is_racy_timestamp(istate, ce))
 -                      ce_smudge_racily_clean_entry(ce);
 +                      ce_smudge_racily_clean_entry(istate, ce);
                if (is_null_oid(&ce->oid)) {
                        static const char msg[] = "cache entry has null sha1: %s";
                        static int allow = -1;
  
                        drop_cache_tree = 1;
                }
 +              if (ieot && i && (i % ieot_entries == 0)) {
 +                      ieot->entries[ieot->nr].nr = nr;
 +                      ieot->entries[ieot->nr].offset = offset;
 +                      ieot->nr++;
 +                      /*
 +                       * If we have a V4 index, set the first byte to an invalid
 +                       * character to ensure there is nothing common with the previous
 +                       * entry
 +                       */
 +                      if (previous_name)
 +                              previous_name->buf[0] = 0;
 +                      nr = 0;
 +                      offset = lseek(newfd, 0, SEEK_CUR);
 +                      if (offset < 0) {
 +                              free(ieot);
 +                              return -1;
 +                      }
 +                      offset += write_buffer_len;
 +              }
                if (ce_write_entry(&c, newfd, ce, previous_name, (struct ondisk_cache_entry *)&ondisk) < 0)
                        err = -1;
  
                if (err)
                        break;
 +              nr++;
 +      }
 +      if (ieot && nr) {
 +              ieot->entries[ieot->nr].nr = nr;
 +              ieot->entries[ieot->nr].offset = offset;
 +              ieot->nr++;
        }
        strbuf_release(&previous_name_buf);
  
 -      if (err)
 +      if (err) {
 +              free(ieot);
                return err;
 +      }
  
        /* Write extension data here */
 +      offset = lseek(newfd, 0, SEEK_CUR);
 +      if (offset < 0) {
 +              free(ieot);
 +              return -1;
 +      }
 +      offset += write_buffer_len;
 +      the_hash_algo->init_fn(&eoie_c);
 +
 +      /*
 +       * Lets write out CACHE_EXT_INDEXENTRYOFFSETTABLE first so that we
 +       * can minimize the number of extensions we have to scan through to
 +       * find it during load.  Write it out regardless of the
 +       * strip_extensions parameter as we need it when loading the shared
 +       * index.
 +       */
 +#ifndef NO_PTHREADS
 +      if (ieot) {
 +              struct strbuf sb = STRBUF_INIT;
 +
 +              write_ieot_extension(&sb, ieot);
 +              err = write_index_ext_header(&c, &eoie_c, newfd, CACHE_EXT_INDEXENTRYOFFSETTABLE, sb.len) < 0
 +                      || ce_write(&c, newfd, sb.buf, sb.len) < 0;
 +              strbuf_release(&sb);
 +              free(ieot);
 +              if (err)
 +                      return -1;
 +      }
 +#endif
 +
        if (!strip_extensions && istate->split_index) {
                struct strbuf sb = STRBUF_INIT;
  
                err = write_link_extension(&sb, istate) < 0 ||
 -                      write_index_ext_header(&c, newfd, CACHE_EXT_LINK,
 +                      write_index_ext_header(&c, &eoie_c, newfd, CACHE_EXT_LINK,
                                               sb.len) < 0 ||
                        ce_write(&c, newfd, sb.buf, sb.len) < 0;
                strbuf_release(&sb);
                struct strbuf sb = STRBUF_INIT;
  
                cache_tree_write(&sb, istate->cache_tree);
 -              err = write_index_ext_header(&c, newfd, CACHE_EXT_TREE, sb.len) < 0
 +              err = write_index_ext_header(&c, &eoie_c, newfd, CACHE_EXT_TREE, sb.len) < 0
                        || ce_write(&c, newfd, sb.buf, sb.len) < 0;
                strbuf_release(&sb);
                if (err)
                struct strbuf sb = STRBUF_INIT;
  
                resolve_undo_write(&sb, istate->resolve_undo);
 -              err = write_index_ext_header(&c, newfd, CACHE_EXT_RESOLVE_UNDO,
 +              err = write_index_ext_header(&c, &eoie_c, newfd, CACHE_EXT_RESOLVE_UNDO,
                                             sb.len) < 0
                        || ce_write(&c, newfd, sb.buf, sb.len) < 0;
                strbuf_release(&sb);
                struct strbuf sb = STRBUF_INIT;
  
                write_untracked_extension(&sb, istate->untracked);
 -              err = write_index_ext_header(&c, newfd, CACHE_EXT_UNTRACKED,
 +              err = write_index_ext_header(&c, &eoie_c, newfd, CACHE_EXT_UNTRACKED,
                                             sb.len) < 0 ||
                        ce_write(&c, newfd, sb.buf, sb.len) < 0;
                strbuf_release(&sb);
                struct strbuf sb = STRBUF_INIT;
  
                write_fsmonitor_extension(&sb, istate);
 -              err = write_index_ext_header(&c, newfd, CACHE_EXT_FSMONITOR, sb.len) < 0
 +              err = write_index_ext_header(&c, &eoie_c, newfd, CACHE_EXT_FSMONITOR, sb.len) < 0
 +                      || ce_write(&c, newfd, sb.buf, sb.len) < 0;
 +              strbuf_release(&sb);
 +              if (err)
 +                      return -1;
 +      }
 +
 +      /*
 +       * CACHE_EXT_ENDOFINDEXENTRIES must be written as the last entry before the SHA1
 +       * so that it can be found and processed before all the index entries are
 +       * read.  Write it out regardless of the strip_extensions parameter as we need it
 +       * when loading the shared index.
 +       */
 +      if (offset) {
 +              struct strbuf sb = STRBUF_INIT;
 +
 +              write_eoie_extension(&sb, &eoie_c, offset);
 +              err = write_index_ext_header(&c, NULL, newfd, CACHE_EXT_ENDOFINDEXENTRIES, sb.len) < 0
                        || ce_write(&c, newfd, sb.buf, sb.len) < 0;
                strbuf_release(&sb);
                if (err)
@@@ -3113,9 -2755,6 +3125,9 @@@ int write_locked_index(struct index_sta
        int new_shared_index, ret;
        struct split_index *si = istate->split_index;
  
 +      if (git_env_bool("GIT_TEST_CHECK_CACHE_TREE", 0))
 +              cache_tree_verify(istate);
 +
        if ((flags & SKIP_IF_UNCHANGED) && !istate->cache_changed) {
                if (flags & COMMIT_LOCK)
                        rollback_lock_file(lock);
@@@ -3312,8 -2951,6 +3324,8 @@@ void move_index_extensions(struct index
  {
        dst->untracked = src->untracked;
        src->untracked = NULL;
 +      dst->cache_tree = src->cache_tree;
 +      src->cache_tree = NULL;
  }
  
  struct cache_entry *dup_cache_entry(const struct cache_entry *ce,
@@@ -3353,181 -2990,3 +3365,181 @@@ int should_validate_cache_entries(void
  
        return validate_index_cache_entries;
  }
 +
 +#define EOIE_SIZE (4 + GIT_SHA1_RAWSZ) /* <4-byte offset> + <20-byte hash> */
 +#define EOIE_SIZE_WITH_HEADER (4 + 4 + EOIE_SIZE) /* <4-byte signature> + <4-byte length> + EOIE_SIZE */
 +
 +static size_t read_eoie_extension(const char *mmap, size_t mmap_size)
 +{
 +      /*
 +       * The end of index entries (EOIE) extension is guaranteed to be last
 +       * so that it can be found by scanning backwards from the EOF.
 +       *
 +       * "EOIE"
 +       * <4-byte length>
 +       * <4-byte offset>
 +       * <20-byte hash>
 +       */
 +      const char *index, *eoie;
 +      uint32_t extsize;
 +      size_t offset, src_offset;
 +      unsigned char hash[GIT_MAX_RAWSZ];
 +      git_hash_ctx c;
 +
 +      /* ensure we have an index big enough to contain an EOIE extension */
 +      if (mmap_size < sizeof(struct cache_header) + EOIE_SIZE_WITH_HEADER + the_hash_algo->rawsz)
 +              return 0;
 +
 +      /* validate the extension signature */
 +      index = eoie = mmap + mmap_size - EOIE_SIZE_WITH_HEADER - the_hash_algo->rawsz;
 +      if (CACHE_EXT(index) != CACHE_EXT_ENDOFINDEXENTRIES)
 +              return 0;
 +      index += sizeof(uint32_t);
 +
 +      /* validate the extension size */
 +      extsize = get_be32(index);
 +      if (extsize != EOIE_SIZE)
 +              return 0;
 +      index += sizeof(uint32_t);
 +
 +      /*
 +       * Validate the offset we're going to look for the first extension
 +       * signature is after the index header and before the eoie extension.
 +       */
 +      offset = get_be32(index);
 +      if (mmap + offset < mmap + sizeof(struct cache_header))
 +              return 0;
 +      if (mmap + offset >= eoie)
 +              return 0;
 +      index += sizeof(uint32_t);
 +
 +      /*
 +       * The hash is computed over extension types and their sizes (but not
 +       * their contents).  E.g. if we have "TREE" extension that is N-bytes
 +       * long, "REUC" extension that is M-bytes long, followed by "EOIE",
 +       * then the hash would be:
 +       *
 +       * SHA-1("TREE" + <binary representation of N> +
 +       *       "REUC" + <binary representation of M>)
 +       */
 +      src_offset = offset;
 +      the_hash_algo->init_fn(&c);
 +      while (src_offset < mmap_size - the_hash_algo->rawsz - EOIE_SIZE_WITH_HEADER) {
 +              /* After an array of active_nr index entries,
 +               * there can be arbitrary number of extended
 +               * sections, each of which is prefixed with
 +               * extension name (4-byte) and section length
 +               * in 4-byte network byte order.
 +               */
 +              uint32_t extsize;
 +              memcpy(&extsize, mmap + src_offset + 4, 4);
 +              extsize = ntohl(extsize);
 +
 +              /* verify the extension size isn't so large it will wrap around */
 +              if (src_offset + 8 + extsize < src_offset)
 +                      return 0;
 +
 +              the_hash_algo->update_fn(&c, mmap + src_offset, 8);
 +
 +              src_offset += 8;
 +              src_offset += extsize;
 +      }
 +      the_hash_algo->final_fn(hash, &c);
 +      if (!hasheq(hash, (const unsigned char *)index))
 +              return 0;
 +
 +      /* Validate that the extension offsets returned us back to the eoie extension. */
 +      if (src_offset != mmap_size - the_hash_algo->rawsz - EOIE_SIZE_WITH_HEADER)
 +              return 0;
 +
 +      return offset;
 +}
 +
 +static void write_eoie_extension(struct strbuf *sb, git_hash_ctx *eoie_context, size_t offset)
 +{
 +      uint32_t buffer;
 +      unsigned char hash[GIT_MAX_RAWSZ];
 +
 +      /* offset */
 +      put_be32(&buffer, offset);
 +      strbuf_add(sb, &buffer, sizeof(uint32_t));
 +
 +      /* hash */
 +      the_hash_algo->final_fn(hash, eoie_context);
 +      strbuf_add(sb, hash, the_hash_algo->rawsz);
 +}
 +
 +#ifndef NO_PTHREADS
 +#define IEOT_VERSION  (1)
 +
 +static struct index_entry_offset_table *read_ieot_extension(const char *mmap, size_t mmap_size, size_t offset)
 +{
 +       const char *index = NULL;
 +       uint32_t extsize, ext_version;
 +       struct index_entry_offset_table *ieot;
 +       int i, nr;
 +
 +       /* find the IEOT extension */
 +       if (!offset)
 +             return NULL;
 +       while (offset <= mmap_size - the_hash_algo->rawsz - 8) {
 +             extsize = get_be32(mmap + offset + 4);
 +             if (CACHE_EXT((mmap + offset)) == CACHE_EXT_INDEXENTRYOFFSETTABLE) {
 +                     index = mmap + offset + 4 + 4;
 +                     break;
 +             }
 +             offset += 8;
 +             offset += extsize;
 +       }
 +       if (!index)
 +             return NULL;
 +
 +       /* validate the version is IEOT_VERSION */
 +       ext_version = get_be32(index);
 +       if (ext_version != IEOT_VERSION) {
 +             error("invalid IEOT version %d", ext_version);
 +             return NULL;
 +       }
 +       index += sizeof(uint32_t);
 +
 +       /* extension size - version bytes / bytes per entry */
 +       nr = (extsize - sizeof(uint32_t)) / (sizeof(uint32_t) + sizeof(uint32_t));
 +       if (!nr) {
 +             error("invalid number of IEOT entries %d", nr);
 +             return NULL;
 +       }
 +       ieot = xmalloc(sizeof(struct index_entry_offset_table)
 +             + (nr * sizeof(struct index_entry_offset)));
 +       ieot->nr = nr;
 +       for (i = 0; i < nr; i++) {
 +             ieot->entries[i].offset = get_be32(index);
 +             index += sizeof(uint32_t);
 +             ieot->entries[i].nr = get_be32(index);
 +             index += sizeof(uint32_t);
 +       }
 +
 +       return ieot;
 +}
 +
 +static void write_ieot_extension(struct strbuf *sb, struct index_entry_offset_table *ieot)
 +{
 +       uint32_t buffer;
 +       int i;
 +
 +       /* version */
 +       put_be32(&buffer, IEOT_VERSION);
 +       strbuf_add(sb, &buffer, sizeof(uint32_t));
 +
 +       /* ieot */
 +       for (i = 0; i < ieot->nr; i++) {
 +
 +             /* offset */
 +             put_be32(&buffer, ieot->entries[i].offset);
 +             strbuf_add(sb, &buffer, sizeof(uint32_t));
 +
 +             /* count */
 +             put_be32(&buffer, ieot->entries[i].nr);
 +             strbuf_add(sb, &buffer, sizeof(uint32_t));
 +       }
 +}
 +#endif
diff --combined sequencer.c
index 83f17721d4fc44cd5da211878c335f4e0e07cde9,e0cd17df701de6d625b9e89975361dc42cfbccbe..0c164d5f98f152156ef21a79159f6348fecb0273
@@@ -30,7 -30,6 +30,7 @@@
  #include "oidset.h"
  #include "commit-slab.h"
  #include "alias.h"
 +#include "commit-reach.h"
  
  #define GIT_REFLOG_ACTION "GIT_REFLOG_ACTION"
  
@@@ -226,16 -225,13 +226,16 @@@ static const char *get_todo_path(const 
   * Returns 3 when sob exists within conforming footer as last entry
   */
  static int has_conforming_footer(struct strbuf *sb, struct strbuf *sob,
 -      int ignore_footer)
 +      size_t ignore_footer)
  {
 +      struct process_trailer_options opts = PROCESS_TRAILER_OPTIONS_INIT;
        struct trailer_info info;
 -      int i;
 +      size_t i;
        int found_sob = 0, found_sob_last = 0;
  
 -      trailer_info_get(&info, sb->buf);
 +      opts.no_divider = 1;
 +
 +      trailer_info_get(&info, sb->buf, &opts);
  
        if (info.trailer_start == info.trailer_end)
                return 0;
@@@ -474,8 -470,8 +474,8 @@@ static int fast_forward_to(const struc
        struct strbuf sb = STRBUF_INIT;
        struct strbuf err = STRBUF_INIT;
  
 -      read_cache();
 -      if (checkout_fast_forward(from, to, 1))
 +      read_index(&the_index);
 +      if (checkout_fast_forward(the_repository, from, to, 1))
                return -1; /* the callee should have complained already */
  
        strbuf_addf(&sb, _("%s: fast-forward"), _(action_name(opts)));
@@@ -614,7 -610,7 +614,7 @@@ static int is_index_unchanged(void
        if (!(cache_tree_oid = get_cache_tree_oid()))
                return -1;
  
 -      return !oidcmp(cache_tree_oid, get_commit_tree_oid(head_commit));
 +      return oideq(cache_tree_oid, get_commit_tree_oid(head_commit));
  }
  
  static int write_author_script(const char *message)
@@@ -903,7 -899,7 +903,7 @@@ static int run_git_commit(const char *d
        if ((flags & ALLOW_EMPTY))
                argv_array_push(&cmd.args, "--allow-empty");
  
 -      if (opts->allow_empty_message)
 +      if (!(flags & EDIT_MSG))
                argv_array_push(&cmd.args, "--allow-empty-message");
  
        if (cmd.err == -1) {
@@@ -1176,7 -1172,7 +1176,7 @@@ void print_commit_summary(const char *p
        strbuf_release(&author_ident);
        strbuf_release(&committer_ident);
  
 -      init_revisions(&rev, prefix);
 +      repo_init_revisions(the_repository, &rev, prefix);
        setup_revisions(0, NULL, &rev, NULL);
  
        rev.diff = 1;
@@@ -1221,7 -1217,7 +1221,7 @@@ static int parse_head(struct commit **h
                current_head = lookup_commit_reference(the_repository, &oid);
                if (!current_head)
                        return error(_("could not parse HEAD"));
 -              if (oidcmp(&oid, &current_head->object.oid)) {
 +              if (!oideq(&oid, &current_head->object.oid)) {
                        warning(_("HEAD %s is not a commit!"),
                                oid_to_hex(&oid));
                }
@@@ -1291,9 -1287,9 +1291,9 @@@ static int try_to_commit(struct strbuf 
                goto out;
        }
  
 -      if (!(flags & ALLOW_EMPTY) && !oidcmp(current_head ?
 -                                            get_commit_tree_oid(current_head) :
 -                                            the_hash_algo->empty_tree, &tree)) {
 +      if (!(flags & ALLOW_EMPTY) && oideq(current_head ?
 +                                          get_commit_tree_oid(current_head) :
 +                                          the_hash_algo->empty_tree, &tree)) {
                res = 1; /* run 'git commit' to display error message */
                goto out;
        }
  
        if (cleanup != COMMIT_MSG_CLEANUP_NONE)
                strbuf_stripspace(msg, cleanup == COMMIT_MSG_CLEANUP_ALL);
 -      if (!opts->allow_empty_message && message_is_empty(msg, cleanup)) {
 +      if ((flags & EDIT_MSG) && message_is_empty(msg, cleanup)) {
                res = 1; /* run 'git commit' to display error message */
                goto out;
        }
@@@ -1398,7 -1394,7 +1398,7 @@@ static int is_original_commit_empty(str
                ptree_oid = the_hash_algo->empty_tree; /* commit is root */
        }
  
 -      return !oidcmp(ptree_oid, get_commit_tree_oid(commit));
 +      return oideq(ptree_oid, get_commit_tree_oid(commit));
  }
  
  /*
@@@ -1678,7 -1674,7 +1678,7 @@@ static int do_pick_commit(enum todo_com
                unborn = get_oid("HEAD", &head);
                /* Do we want to generate a root commit? */
                if (is_pick_or_similar(command) && opts->have_squash_onto &&
 -                  !oidcmp(&head, &opts->squash_onto)) {
 +                  oideq(&head, &opts->squash_onto)) {
                        if (is_fixup(command))
                                return error(_("cannot fixup root commit"));
                        flags |= CREATE_ROOT_COMMIT;
                        oid_to_hex(&commit->object.oid));
  
        if (opts->allow_ff && !is_fixup(command) &&
 -          ((parent && !oidcmp(&parent->object.oid, &head)) ||
 +          ((parent && oideq(&parent->object.oid, &head)) ||
             (!parent && unborn))) {
                if (is_rebase_i(opts))
                        write_author_script(msg.message);
  
                commit_list_insert(base, &common);
                commit_list_insert(next, &remotes);
 -              res |= try_merge_command(opts->strategy,
 +              res |= try_merge_command(the_repository, opts->strategy,
                                         opts->xopts_nr, (const char **)opts->xopts,
                                        common, oid_to_hex(&head), remotes);
                free_commit_list(common);
                      : _("could not apply %s... %s"),
                      short_commit_name(commit), msg.subject);
                print_advice(res == 1, opts);
 -              rerere(opts->allow_rerere_auto);
 +              repo_rerere(the_repository, opts->allow_rerere_auto);
                goto leave;
        }
  
@@@ -1913,7 -1909,7 +1913,7 @@@ static int read_and_refresh_cache(struc
  {
        struct lock_file index_lock = LOCK_INIT;
        int index_fd = hold_locked_index(&index_lock, 0);
-       if (read_index_preload(&the_index, NULL) < 0) {
+       if (read_index_preload(&the_index, NULL, 0) < 0) {
                rollback_lock_file(&index_lock);
                return error(_("git %s: failed to read the index"),
                        _(action_name(opts)));
@@@ -2426,7 -2422,7 +2426,7 @@@ static int rollback_is_safe(void
        if (get_oid("HEAD", &actual_head))
                oidclr(&actual_head);
  
 -      return !oidcmp(&actual_head, &expected_head);
 +      return oideq(&actual_head, &expected_head);
  }
  
  static int reset_for_rollback(const struct object_id *oid)
@@@ -2599,7 -2595,7 +2599,7 @@@ static int make_patch(struct commit *co
  
        strbuf_addf(&buf, "%s/patch", get_dir(opts));
        memset(&log_tree_opt, 0, sizeof(log_tree_opt));
 -      init_revisions(&log_tree_opt, NULL);
 +      repo_init_revisions(the_repository, &log_tree_opt, NULL);
        log_tree_opt.abbrev = 0;
        log_tree_opt.diff = 1;
        log_tree_opt.diffopt.output_format = DIFF_FORMAT_PATCH;
@@@ -2987,7 -2983,7 +2987,7 @@@ static int do_merge(struct commit *comm
        }
  
        if (opts->have_squash_onto &&
 -          !oidcmp(&head_commit->object.oid, &opts->squash_onto)) {
 +          oideq(&head_commit->object.oid, &opts->squash_onto)) {
                /*
                 * When the user tells us to "merge" something into a
                 * "[new root]", let's simply fast-forward to the merge head.
         * commit, we cannot fast-forward.
         */
        can_fast_forward = opts->allow_ff && commit && commit->parents &&
 -              !oidcmp(&commit->parents->item->object.oid,
 -                      &head_commit->object.oid);
 +              oideq(&commit->parents->item->object.oid,
 +                    &head_commit->object.oid);
  
        /*
         * If any merge head is different from the original one, we cannot
                struct commit_list *p = commit->parents->next;
  
                for (j = to_merge; j && p; j = j->next, p = p->next)
 -                      if (oidcmp(&j->item->object.oid,
 +                      if (!oideq(&j->item->object.oid,
                                   &p->item->object.oid)) {
                                can_fast_forward = 0;
                                break;
        write_message("no-ff", 5, git_path_merge_mode(the_repository), 0);
  
        bases = get_merge_bases(head_commit, merge_commit);
 -      if (bases && !oidcmp(&merge_commit->object.oid,
 -                           &bases->item->object.oid)) {
 +      if (bases && oideq(&merge_commit->object.oid,
 +                         &bases->item->object.oid)) {
                ret = 0;
                /* skip merging an ancestor of HEAD */
                goto leave_merge;
  
        rollback_lock_file(&lock);
        if (ret)
 -              rerere(opts->allow_rerere_auto);
 +              repo_rerere(the_repository, opts->allow_rerere_auto);
        else
                /*
                 * In case of problems, we now want to return a positive
@@@ -3382,9 -3378,9 +3382,9 @@@ static int pick_commits(struct todo_lis
                                 */
                                if (item->command == TODO_REWORD &&
                                    !get_oid("HEAD", &oid) &&
 -                                  (!oidcmp(&item->commit->object.oid, &oid) ||
 +                                  (oideq(&item->commit->object.oid, &oid) ||
                                     (opts->have_squash_onto &&
 -                                    !oidcmp(&opts->squash_onto, &oid))))
 +                                    oideq(&opts->squash_onto, &oid))))
                                        to_amend = 1;
  
                                return res | error_with_patch(item->commit,
@@@ -3510,7 -3506,7 +3510,7 @@@ cleanup_head_ref
                        struct object_id orig, head;
  
                        memset(&log_tree_opt, 0, sizeof(log_tree_opt));
 -                      init_revisions(&log_tree_opt, NULL);
 +                      repo_init_revisions(the_repository, &log_tree_opt, NULL);
                        log_tree_opt.diff = 1;
                        log_tree_opt.diffopt.output_format =
                                DIFF_FORMAT_DIFFSTAT;
@@@ -3599,7 -3595,7 +3599,7 @@@ static int commit_staged_changes(struc
                if (get_oid_hex(rev.buf, &to_amend))
                        return error(_("invalid contents: '%s'"),
                                rebase_path_amend());
 -              if (!is_clean && oidcmp(&head, &to_amend))
 +              if (!is_clean && !oideq(&head, &to_amend))
                        return error(_("\nYou have uncommitted changes in your "
                                       "working tree. Please, commit them\n"
                                       "first and then run 'git rebase "
                 * the commit message and if there was a squash, let the user
                 * edit it.
                 */
 -              if (is_clean && !oidcmp(&head, &to_amend) &&
 -                  opts->current_fixup_count > 0 &&
 -                  file_exists(rebase_path_stopped_sha())) {
 +              if (!is_clean || !opts->current_fixup_count)
 +                      ; /* this is not the final fixup */
 +              else if (!oideq(&head, &to_amend) ||
 +                       !file_exists(rebase_path_stopped_sha())) {
 +                      /* was a final fixup or squash done manually? */
 +                      if (!is_fixup(peek_command(todo_list, 0))) {
 +                              unlink(rebase_path_fixup_msg());
 +                              unlink(rebase_path_squash_msg());
 +                              unlink(rebase_path_current_fixups());
 +                              strbuf_reset(&opts->current_fixups);
 +                              opts->current_fixup_count = 0;
 +                      }
 +              } else {
 +                      /* we are in a fixup/squash chain */
                        const char *p = opts->current_fixups.buf;
                        int len = opts->current_fixups.len;
  
@@@ -3843,7 -3828,7 +3843,7 @@@ int sequencer_pick_revisions(struct rep
        return res;
  }
  
 -void append_signoff(struct strbuf *msgbuf, int ignore_footer, unsigned flag)
 +void append_signoff(struct strbuf *msgbuf, size_t ignore_footer, unsigned flag)
  {
        unsigned no_dup_sob = flag & APPEND_SIGNOFF_DEDUP;
        struct strbuf sob = STRBUF_INIT;
@@@ -4146,7 -4131,9 +4146,7 @@@ static int make_script_with_merges(stru
                        struct object_id *oid = &parent->item->object.oid;
                        if (!oidset_contains(&interesting, oid))
                                continue;
 -                      if (!oidset_contains(&child_seen, oid))
 -                              oidset_insert(&child_seen, oid);
 -                      else
 +                      if (oidset_insert(&child_seen, oid))
                                label_oid(oid, "branch-point", &state);
                }
  
@@@ -4254,7 -4241,7 +4254,7 @@@ int sequencer_make_script(FILE *out, in
        const char *insn = flags & TODO_LIST_ABBREVIATE_CMDS ? "p" : "pick";
        int rebase_merges = flags & TODO_LIST_REBASE_MERGES;
  
 -      init_revisions(&revs, NULL);
 +      repo_init_revisions(the_repository, &revs, NULL);
        revs.verbose_header = 1;
        if (!rebase_merges)
                revs.max_parents = 1;
@@@ -4587,7 -4574,7 +4587,7 @@@ int skip_unnecessary_picks(void
                if (item->commit->parents->next)
                        break; /* merge commit */
                parent_oid = &item->commit->parents->item->object.oid;
 -              if (hashcmp(parent_oid->hash, oid->hash))
 +              if (!oideq(parent_oid, oid))
                        break;
                oid = &item->commit->object.oid;
        }