+
+ /*
+ * To prevent later ce_match_stat() from always falling into
+ * check_fs(), if we have too many entries that can trigger
+ * racily clean check, we are better off delaying the return.
+ * We arbitrarily say if more than 20 paths or 25% of total
+ * paths are very new, we delay the return until the index
+ * file gets a new timestamp.
+ *
+ * NOTE! NOTE! NOTE!
+ *
+ * This assumes that nobody is touching the working tree while
+ * we are updating the index.
+ */
+ if (20 < recent || entries <= recent * 4) {
+ now = fstat(newfd, &st) ? 0 : st.st_mtime;
+ while (now && !fstat(newfd, &st) && st.st_mtime <= now) {
+ struct timespec rq, rm;
+ off_t where = lseek(newfd, 0, SEEK_CUR);
+ rq.tv_sec = 0;
+ rq.tv_nsec = 250000000;
+ nanosleep(&rq, &rm);
+ if ((where == (off_t) -1) ||
+ (write(newfd, "", 1) != 1) ||
+ (lseek(newfd, -1, SEEK_CUR) != where) ||
+ ftruncate(newfd, where))
+ break;
+ }
+ }