+static int use_threads = 1;
+
+#ifndef NO_PTHREADS
+#define THREADS 8
+static pthread_t threads[THREADS];
+
+static void *load_sha1(const unsigned char *sha1, unsigned long *size,
+ const char *name);
+static void *load_file(const char *filename, size_t *sz);
+
+enum work_type {WORK_SHA1, WORK_FILE};
+
+/* We use one producer thread and THREADS consumer
+ * threads. The producer adds struct work_items to 'todo' and the
+ * consumers pick work items from the same array.
+ */
+struct work_item
+{
+ enum work_type type;
+ char *name;
+
+ /* if type == WORK_SHA1, then 'identifier' is a SHA1,
+ * otherwise type == WORK_FILE, and 'identifier' is a NUL
+ * terminated filename.
+ */
+ void *identifier;
+ char done;
+ struct strbuf out;
+};
+
+/* In the range [todo_done, todo_start) in 'todo' we have work_items
+ * that have been or are processed by a consumer thread. We haven't
+ * written the result for these to stdout yet.
+ *
+ * The work_items in [todo_start, todo_end) are waiting to be picked
+ * up by a consumer thread.
+ *
+ * The ranges are modulo TODO_SIZE.
+ */
+#define TODO_SIZE 128
+static struct work_item todo[TODO_SIZE];
+static int todo_start;
+static int todo_end;
+static int todo_done;
+
+/* Has all work items been added? */
+static int all_work_added;
+
+/* This lock protects all the variables above. */
+static pthread_mutex_t grep_mutex;
+
+/* Used to serialize calls to read_sha1_file. */
+static pthread_mutex_t read_sha1_mutex;
+
+#define grep_lock() pthread_mutex_lock(&grep_mutex)
+#define grep_unlock() pthread_mutex_unlock(&grep_mutex)
+#define read_sha1_lock() pthread_mutex_lock(&read_sha1_mutex)
+#define read_sha1_unlock() pthread_mutex_unlock(&read_sha1_mutex)
+
+/* Signalled when a new work_item is added to todo. */
+static pthread_cond_t cond_add;
+
+/* Signalled when the result from one work_item is written to
+ * stdout.
+ */
+static pthread_cond_t cond_write;
+
+/* Signalled when we are finished with everything. */
+static pthread_cond_t cond_result;
+
+static void add_work(enum work_type type, char *name, void *id)
+{
+ grep_lock();
+
+ while ((todo_end+1) % ARRAY_SIZE(todo) == todo_done) {
+ pthread_cond_wait(&cond_write, &grep_mutex);
+ }
+
+ todo[todo_end].type = type;
+ todo[todo_end].name = name;
+ todo[todo_end].identifier = id;
+ todo[todo_end].done = 0;
+ strbuf_reset(&todo[todo_end].out);
+ todo_end = (todo_end + 1) % ARRAY_SIZE(todo);
+
+ pthread_cond_signal(&cond_add);
+ grep_unlock();
+}
+
+static struct work_item *get_work(void)
+{
+ struct work_item *ret;
+
+ grep_lock();
+ while (todo_start == todo_end && !all_work_added) {
+ pthread_cond_wait(&cond_add, &grep_mutex);
+ }
+
+ if (todo_start == todo_end && all_work_added) {
+ ret = NULL;
+ } else {
+ ret = &todo[todo_start];
+ todo_start = (todo_start + 1) % ARRAY_SIZE(todo);
+ }
+ grep_unlock();
+ return ret;
+}
+
+static void grep_sha1_async(struct grep_opt *opt, char *name,
+ const unsigned char *sha1)
+{
+ unsigned char *s;
+ s = xmalloc(20);
+ memcpy(s, sha1, 20);
+ add_work(WORK_SHA1, name, s);
+}
+
+static void grep_file_async(struct grep_opt *opt, char *name,
+ const char *filename)
+{
+ add_work(WORK_FILE, name, xstrdup(filename));
+}
+
+static void work_done(struct work_item *w)
+{
+ int old_done;
+
+ grep_lock();
+ w->done = 1;
+ old_done = todo_done;
+ for(; todo[todo_done].done && todo_done != todo_start;
+ todo_done = (todo_done+1) % ARRAY_SIZE(todo)) {
+ w = &todo[todo_done];
+ write_or_die(1, w->out.buf, w->out.len);
+ free(w->name);
+ free(w->identifier);
+ }
+
+ if (old_done != todo_done)
+ pthread_cond_signal(&cond_write);
+
+ if (all_work_added && todo_done == todo_end)
+ pthread_cond_signal(&cond_result);
+
+ grep_unlock();
+}
+
+static void *run(void *arg)
+{
+ int hit = 0;
+ struct grep_opt *opt = arg;
+
+ while (1) {
+ struct work_item *w = get_work();
+ if (!w)
+ break;
+
+ opt->output_priv = w;
+ if (w->type == WORK_SHA1) {
+ unsigned long sz;
+ void* data = load_sha1(w->identifier, &sz, w->name);
+
+ if (data) {
+ hit |= grep_buffer(opt, w->name, data, sz);
+ free(data);
+ }
+ } else if (w->type == WORK_FILE) {
+ size_t sz;
+ void* data = load_file(w->identifier, &sz);
+ if (data) {
+ hit |= grep_buffer(opt, w->name, data, sz);
+ free(data);
+ }
+ } else {
+ assert(0);
+ }
+
+ work_done(w);
+ }
+
+ return (void*) (intptr_t) hit;
+}
+
+static void strbuf_out(struct grep_opt *opt, const void *buf, size_t size)
+{
+ struct work_item *w = opt->output_priv;
+ strbuf_add(&w->out, buf, size);
+}
+
+static void start_threads(struct grep_opt *opt)
+{
+ int i;
+
+ pthread_mutex_init(&grep_mutex, NULL);
+ pthread_mutex_init(&read_sha1_mutex, NULL);
+ pthread_cond_init(&cond_add, NULL);
+ pthread_cond_init(&cond_write, NULL);
+ pthread_cond_init(&cond_result, NULL);
+
+ for (i = 0; i < ARRAY_SIZE(todo); i++) {
+ strbuf_init(&todo[i].out, 0);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(threads); i++) {
+ int err;
+ struct grep_opt *o = grep_opt_dup(opt);
+ o->output = strbuf_out;
+ compile_grep_patterns(o);
+ err = pthread_create(&threads[i], NULL, run, o);
+
+ if (err)
+ die("grep: failed to create thread: %s",
+ strerror(err));
+ }
+}
+
+static int wait_all(void)
+{
+ int hit = 0;
+ int i;
+
+ grep_lock();
+ all_work_added = 1;
+
+ /* Wait until all work is done. */
+ while (todo_done != todo_end)
+ pthread_cond_wait(&cond_result, &grep_mutex);
+
+ /* Wake up all the consumer threads so they can see that there
+ * is no more work to do.
+ */
+ pthread_cond_broadcast(&cond_add);
+ grep_unlock();
+
+ for (i = 0; i < ARRAY_SIZE(threads); i++) {
+ void *h;
+ pthread_join(threads[i], &h);
+ hit |= (int) (intptr_t) h;
+ }
+
+ pthread_mutex_destroy(&grep_mutex);
+ pthread_mutex_destroy(&read_sha1_mutex);
+ pthread_cond_destroy(&cond_add);
+ pthread_cond_destroy(&cond_write);
+ pthread_cond_destroy(&cond_result);
+
+ return hit;
+}
+#else /* !NO_PTHREADS */
+#define read_sha1_lock()
+#define read_sha1_unlock()
+
+static int wait_all(void)
+{
+ return 0;
+}
+#endif
+