preload-index.con commit Ninth batch for 2.20 (8858448)
   1/*
   2 * Copyright (C) 2008 Linus Torvalds
   3 */
   4#include "cache.h"
   5#include "pathspec.h"
   6#include "dir.h"
   7#include "fsmonitor.h"
   8#include "config.h"
   9#include "progress.h"
  10
  11#ifdef NO_PTHREADS
  12static void preload_index(struct index_state *index,
  13                          const struct pathspec *pathspec,
  14                          unsigned int refresh_flags)
  15{
  16        ; /* nothing */
  17}
  18#else
  19
  20#include <pthread.h>
  21
  22/*
  23 * Mostly randomly chosen maximum thread counts: we
  24 * cap the parallelism to 20 threads, and we want
  25 * to have at least 500 lstat's per thread for it to
  26 * be worth starting a thread.
  27 */
  28#define MAX_PARALLEL (20)
  29#define THREAD_COST (500)
  30
  31struct progress_data {
  32        unsigned long n;
  33        struct progress *progress;
  34        pthread_mutex_t mutex;
  35};
  36
  37struct thread_data {
  38        pthread_t pthread;
  39        struct index_state *index;
  40        struct pathspec pathspec;
  41        struct progress_data *progress;
  42        int offset, nr;
  43};
  44
  45static void *preload_thread(void *_data)
  46{
  47        int nr, last_nr;
  48        struct thread_data *p = _data;
  49        struct index_state *index = p->index;
  50        struct cache_entry **cep = index->cache + p->offset;
  51        struct cache_def cache = CACHE_DEF_INIT;
  52
  53        nr = p->nr;
  54        if (nr + p->offset > index->cache_nr)
  55                nr = index->cache_nr - p->offset;
  56        last_nr = nr;
  57
  58        do {
  59                struct cache_entry *ce = *cep++;
  60                struct stat st;
  61
  62                if (ce_stage(ce))
  63                        continue;
  64                if (S_ISGITLINK(ce->ce_mode))
  65                        continue;
  66                if (ce_uptodate(ce))
  67                        continue;
  68                if (ce_skip_worktree(ce))
  69                        continue;
  70                if (ce->ce_flags & CE_FSMONITOR_VALID)
  71                        continue;
  72                if (p->progress && !(nr & 31)) {
  73                        struct progress_data *pd = p->progress;
  74
  75                        pthread_mutex_lock(&pd->mutex);
  76                        pd->n += last_nr - nr;
  77                        display_progress(pd->progress, pd->n);
  78                        pthread_mutex_unlock(&pd->mutex);
  79                        last_nr = nr;
  80                }
  81                if (!ce_path_match(index, ce, &p->pathspec, NULL))
  82                        continue;
  83                if (threaded_has_symlink_leading_path(&cache, ce->name, ce_namelen(ce)))
  84                        continue;
  85                if (lstat(ce->name, &st))
  86                        continue;
  87                if (ie_match_stat(index, ce, &st, CE_MATCH_RACY_IS_DIRTY|CE_MATCH_IGNORE_FSMONITOR))
  88                        continue;
  89                ce_mark_uptodate(ce);
  90                mark_fsmonitor_valid(ce);
  91        } while (--nr > 0);
  92        if (p->progress) {
  93                struct progress_data *pd = p->progress;
  94
  95                pthread_mutex_lock(&pd->mutex);
  96                display_progress(pd->progress, pd->n + last_nr);
  97                pthread_mutex_unlock(&pd->mutex);
  98        }
  99        cache_def_clear(&cache);
 100        return NULL;
 101}
 102
 103static void preload_index(struct index_state *index,
 104                          const struct pathspec *pathspec,
 105                          unsigned int refresh_flags)
 106{
 107        int threads, i, work, offset;
 108        struct thread_data data[MAX_PARALLEL];
 109        struct progress_data pd;
 110
 111        if (!core_preload_index)
 112                return;
 113
 114        threads = index->cache_nr / THREAD_COST;
 115        if ((index->cache_nr > 1) && (threads < 2) && git_env_bool("GIT_TEST_PRELOAD_INDEX", 0))
 116                threads = 2;
 117        if (threads < 2)
 118                return;
 119        trace_performance_enter();
 120        if (threads > MAX_PARALLEL)
 121                threads = MAX_PARALLEL;
 122        offset = 0;
 123        work = DIV_ROUND_UP(index->cache_nr, threads);
 124        memset(&data, 0, sizeof(data));
 125
 126        memset(&pd, 0, sizeof(pd));
 127        if (refresh_flags & REFRESH_PROGRESS && isatty(2)) {
 128                pd.progress = start_delayed_progress(_("Refreshing index"), index->cache_nr);
 129                pthread_mutex_init(&pd.mutex, NULL);
 130        }
 131
 132        for (i = 0; i < threads; i++) {
 133                struct thread_data *p = data+i;
 134                p->index = index;
 135                if (pathspec)
 136                        copy_pathspec(&p->pathspec, pathspec);
 137                p->offset = offset;
 138                p->nr = work;
 139                if (pd.progress)
 140                        p->progress = &pd;
 141                offset += work;
 142                if (pthread_create(&p->pthread, NULL, preload_thread, p))
 143                        die("unable to create threaded lstat");
 144        }
 145        for (i = 0; i < threads; i++) {
 146                struct thread_data *p = data+i;
 147                if (pthread_join(p->pthread, NULL))
 148                        die("unable to join threaded lstat");
 149        }
 150        stop_progress(&pd.progress);
 151
 152        trace_performance_leave("preload index");
 153}
 154#endif
 155
 156int read_index_preload(struct index_state *index,
 157                       const struct pathspec *pathspec,
 158                       unsigned int refresh_flags)
 159{
 160        int retval = read_index(index);
 161
 162        preload_index(index, pathspec, refresh_flags);
 163        return retval;
 164}