read-cache.con commit Merge branch 'jc/racy-delay' (c1e4572)
   1/*
   2 * GIT - The information manager from hell
   3 *
   4 * Copyright (C) Linus Torvalds, 2005
   5 */
   6#include "cache.h"
   7#include "cache-tree.h"
   8#include <time.h>
   9
  10/* Index extensions.
  11 *
  12 * The first letter should be 'A'..'Z' for extensions that are not
  13 * necessary for a correct operation (i.e. optimization data).
  14 * When new extensions are added that _needs_ to be understood in
  15 * order to correctly interpret the index file, pick character that
  16 * is outside the range, to cause the reader to abort.
  17 */
  18
  19#define CACHE_EXT(s) ( (s[0]<<24)|(s[1]<<16)|(s[2]<<8)|(s[3]) )
  20#define CACHE_EXT_TREE 0x54524545       /* "TREE" */
  21
  22struct cache_entry **active_cache = NULL;
  23static time_t index_file_timestamp;
  24unsigned int active_nr = 0, active_alloc = 0, active_cache_changed = 0;
  25
  26struct cache_tree *active_cache_tree = NULL;
  27
  28int cache_errno = 0;
  29
  30static void *cache_mmap = NULL;
  31static size_t cache_mmap_size = 0;
  32
  33/*
  34 * This only updates the "non-critical" parts of the directory
  35 * cache, ie the parts that aren't tracked by GIT, and only used
  36 * to validate the cache.
  37 */
  38void fill_stat_cache_info(struct cache_entry *ce, struct stat *st)
  39{
  40        ce->ce_ctime.sec = htonl(st->st_ctime);
  41        ce->ce_mtime.sec = htonl(st->st_mtime);
  42#ifdef USE_NSEC
  43        ce->ce_ctime.nsec = htonl(st->st_ctim.tv_nsec);
  44        ce->ce_mtime.nsec = htonl(st->st_mtim.tv_nsec);
  45#endif
  46        ce->ce_dev = htonl(st->st_dev);
  47        ce->ce_ino = htonl(st->st_ino);
  48        ce->ce_uid = htonl(st->st_uid);
  49        ce->ce_gid = htonl(st->st_gid);
  50        ce->ce_size = htonl(st->st_size);
  51
  52        if (assume_unchanged)
  53                ce->ce_flags |= htons(CE_VALID);
  54}
  55
  56static int ce_compare_data(struct cache_entry *ce, struct stat *st)
  57{
  58        int match = -1;
  59        int fd = open(ce->name, O_RDONLY);
  60
  61        if (fd >= 0) {
  62                unsigned char sha1[20];
  63                if (!index_fd(sha1, fd, st, 0, NULL))
  64                        match = memcmp(sha1, ce->sha1, 20);
  65                /* index_fd() closed the file descriptor already */
  66        }
  67        return match;
  68}
  69
  70static int ce_compare_link(struct cache_entry *ce, unsigned long expected_size)
  71{
  72        int match = -1;
  73        char *target;
  74        void *buffer;
  75        unsigned long size;
  76        char type[10];
  77        int len;
  78
  79        target = xmalloc(expected_size);
  80        len = readlink(ce->name, target, expected_size);
  81        if (len != expected_size) {
  82                free(target);
  83                return -1;
  84        }
  85        buffer = read_sha1_file(ce->sha1, type, &size);
  86        if (!buffer) {
  87                free(target);
  88                return -1;
  89        }
  90        if (size == expected_size)
  91                match = memcmp(buffer, target, size);
  92        free(buffer);
  93        free(target);
  94        return match;
  95}
  96
  97static int ce_modified_check_fs(struct cache_entry *ce, struct stat *st)
  98{
  99        switch (st->st_mode & S_IFMT) {
 100        case S_IFREG:
 101                if (ce_compare_data(ce, st))
 102                        return DATA_CHANGED;
 103                break;
 104        case S_IFLNK:
 105                if (ce_compare_link(ce, st->st_size))
 106                        return DATA_CHANGED;
 107                break;
 108        default:
 109                return TYPE_CHANGED;
 110        }
 111        return 0;
 112}
 113
 114static int ce_match_stat_basic(struct cache_entry *ce, struct stat *st)
 115{
 116        unsigned int changed = 0;
 117
 118        switch (ntohl(ce->ce_mode) & S_IFMT) {
 119        case S_IFREG:
 120                changed |= !S_ISREG(st->st_mode) ? TYPE_CHANGED : 0;
 121                /* We consider only the owner x bit to be relevant for
 122                 * "mode changes"
 123                 */
 124                if (trust_executable_bit &&
 125                    (0100 & (ntohl(ce->ce_mode) ^ st->st_mode)))
 126                        changed |= MODE_CHANGED;
 127                break;
 128        case S_IFLNK:
 129                changed |= !S_ISLNK(st->st_mode) ? TYPE_CHANGED : 0;
 130                break;
 131        default:
 132                die("internal error: ce_mode is %o", ntohl(ce->ce_mode));
 133        }
 134        if (ce->ce_mtime.sec != htonl(st->st_mtime))
 135                changed |= MTIME_CHANGED;
 136        if (ce->ce_ctime.sec != htonl(st->st_ctime))
 137                changed |= CTIME_CHANGED;
 138
 139#ifdef USE_NSEC
 140        /*
 141         * nsec seems unreliable - not all filesystems support it, so
 142         * as long as it is in the inode cache you get right nsec
 143         * but after it gets flushed, you get zero nsec.
 144         */
 145        if (ce->ce_mtime.nsec != htonl(st->st_mtim.tv_nsec))
 146                changed |= MTIME_CHANGED;
 147        if (ce->ce_ctime.nsec != htonl(st->st_ctim.tv_nsec))
 148                changed |= CTIME_CHANGED;
 149#endif  
 150
 151        if (ce->ce_uid != htonl(st->st_uid) ||
 152            ce->ce_gid != htonl(st->st_gid))
 153                changed |= OWNER_CHANGED;
 154        if (ce->ce_ino != htonl(st->st_ino))
 155                changed |= INODE_CHANGED;
 156
 157#ifdef USE_STDEV
 158        /*
 159         * st_dev breaks on network filesystems where different
 160         * clients will have different views of what "device"
 161         * the filesystem is on
 162         */
 163        if (ce->ce_dev != htonl(st->st_dev))
 164                changed |= INODE_CHANGED;
 165#endif
 166
 167        if (ce->ce_size != htonl(st->st_size))
 168                changed |= DATA_CHANGED;
 169
 170        return changed;
 171}
 172
 173int ce_match_stat(struct cache_entry *ce, struct stat *st, int ignore_valid)
 174{
 175        unsigned int changed;
 176
 177        /*
 178         * If it's marked as always valid in the index, it's
 179         * valid whatever the checked-out copy says.
 180         */
 181        if (!ignore_valid && (ce->ce_flags & htons(CE_VALID)))
 182                return 0;
 183
 184        changed = ce_match_stat_basic(ce, st);
 185
 186        /*
 187         * Within 1 second of this sequence:
 188         *      echo xyzzy >file && git-update-index --add file
 189         * running this command:
 190         *      echo frotz >file
 191         * would give a falsely clean cache entry.  The mtime and
 192         * length match the cache, and other stat fields do not change.
 193         *
 194         * We could detect this at update-index time (the cache entry
 195         * being registered/updated records the same time as "now")
 196         * and delay the return from git-update-index, but that would
 197         * effectively mean we can make at most one commit per second,
 198         * which is not acceptable.  Instead, we check cache entries
 199         * whose mtime are the same as the index file timestamp more
 200         * carefully than others.
 201         */
 202        if (!changed &&
 203            index_file_timestamp &&
 204            index_file_timestamp <= ntohl(ce->ce_mtime.sec))
 205                changed |= ce_modified_check_fs(ce, st);
 206
 207        return changed;
 208}
 209
 210int ce_modified(struct cache_entry *ce, struct stat *st, int really)
 211{
 212        int changed, changed_fs;
 213        changed = ce_match_stat(ce, st, really);
 214        if (!changed)
 215                return 0;
 216        /*
 217         * If the mode or type has changed, there's no point in trying
 218         * to refresh the entry - it's not going to match
 219         */
 220        if (changed & (MODE_CHANGED | TYPE_CHANGED))
 221                return changed;
 222
 223        /* Immediately after read-tree or update-index --cacheinfo,
 224         * the length field is zero.  For other cases the ce_size
 225         * should match the SHA1 recorded in the index entry.
 226         */
 227        if ((changed & DATA_CHANGED) && ce->ce_size != htonl(0))
 228                return changed;
 229
 230        changed_fs = ce_modified_check_fs(ce, st);
 231        if (changed_fs)
 232                return changed | changed_fs;
 233        return 0;
 234}
 235
 236int base_name_compare(const char *name1, int len1, int mode1,
 237                      const char *name2, int len2, int mode2)
 238{
 239        unsigned char c1, c2;
 240        int len = len1 < len2 ? len1 : len2;
 241        int cmp;
 242
 243        cmp = memcmp(name1, name2, len);
 244        if (cmp)
 245                return cmp;
 246        c1 = name1[len];
 247        c2 = name2[len];
 248        if (!c1 && S_ISDIR(mode1))
 249                c1 = '/';
 250        if (!c2 && S_ISDIR(mode2))
 251                c2 = '/';
 252        return (c1 < c2) ? -1 : (c1 > c2) ? 1 : 0;
 253}
 254
 255int cache_name_compare(const char *name1, int flags1, const char *name2, int flags2)
 256{
 257        int len1 = flags1 & CE_NAMEMASK;
 258        int len2 = flags2 & CE_NAMEMASK;
 259        int len = len1 < len2 ? len1 : len2;
 260        int cmp;
 261
 262        cmp = memcmp(name1, name2, len);
 263        if (cmp)
 264                return cmp;
 265        if (len1 < len2)
 266                return -1;
 267        if (len1 > len2)
 268                return 1;
 269
 270        /* Compare stages  */
 271        flags1 &= CE_STAGEMASK;
 272        flags2 &= CE_STAGEMASK;
 273
 274        if (flags1 < flags2)
 275                return -1;
 276        if (flags1 > flags2)
 277                return 1;
 278        return 0;
 279}
 280
 281int cache_name_pos(const char *name, int namelen)
 282{
 283        int first, last;
 284
 285        first = 0;
 286        last = active_nr;
 287        while (last > first) {
 288                int next = (last + first) >> 1;
 289                struct cache_entry *ce = active_cache[next];
 290                int cmp = cache_name_compare(name, namelen, ce->name, ntohs(ce->ce_flags));
 291                if (!cmp)
 292                        return next;
 293                if (cmp < 0) {
 294                        last = next;
 295                        continue;
 296                }
 297                first = next+1;
 298        }
 299        return -first-1;
 300}
 301
 302/* Remove entry, return true if there are more entries to go.. */
 303int remove_cache_entry_at(int pos)
 304{
 305        active_cache_changed = 1;
 306        active_nr--;
 307        if (pos >= active_nr)
 308                return 0;
 309        memmove(active_cache + pos, active_cache + pos + 1, (active_nr - pos) * sizeof(struct cache_entry *));
 310        return 1;
 311}
 312
 313int remove_file_from_cache(const char *path)
 314{
 315        int pos = cache_name_pos(path, strlen(path));
 316        if (pos < 0)
 317                pos = -pos-1;
 318        while (pos < active_nr && !strcmp(active_cache[pos]->name, path))
 319                remove_cache_entry_at(pos);
 320        return 0;
 321}
 322
 323int add_file_to_index(const char *path, int verbose)
 324{
 325        int size, namelen;
 326        struct stat st;
 327        struct cache_entry *ce;
 328
 329        if (lstat(path, &st))
 330                die("%s: unable to stat (%s)", path, strerror(errno));
 331
 332        if (!S_ISREG(st.st_mode) && !S_ISLNK(st.st_mode))
 333                die("%s: can only add regular files or symbolic links", path);
 334
 335        namelen = strlen(path);
 336        size = cache_entry_size(namelen);
 337        ce = xcalloc(1, size);
 338        memcpy(ce->name, path, namelen);
 339        ce->ce_flags = htons(namelen);
 340        fill_stat_cache_info(ce, &st);
 341
 342        ce->ce_mode = create_ce_mode(st.st_mode);
 343        if (!trust_executable_bit) {
 344                /* If there is an existing entry, pick the mode bits
 345                 * from it.
 346                 */
 347                int pos = cache_name_pos(path, namelen);
 348                if (pos >= 0)
 349                        ce->ce_mode = active_cache[pos]->ce_mode;
 350        }
 351
 352        if (index_path(ce->sha1, path, &st, 1))
 353                die("unable to index file %s", path);
 354        if (add_cache_entry(ce, ADD_CACHE_OK_TO_ADD))
 355                die("unable to add %s to index",path);
 356        if (verbose)
 357                printf("add '%s'\n", path);
 358        cache_tree_invalidate_path(active_cache_tree, path);
 359        return 0;
 360}
 361
 362int ce_same_name(struct cache_entry *a, struct cache_entry *b)
 363{
 364        int len = ce_namelen(a);
 365        return ce_namelen(b) == len && !memcmp(a->name, b->name, len);
 366}
 367
 368int ce_path_match(const struct cache_entry *ce, const char **pathspec)
 369{
 370        const char *match, *name;
 371        int len;
 372
 373        if (!pathspec)
 374                return 1;
 375
 376        len = ce_namelen(ce);
 377        name = ce->name;
 378        while ((match = *pathspec++) != NULL) {
 379                int matchlen = strlen(match);
 380                if (matchlen > len)
 381                        continue;
 382                if (memcmp(name, match, matchlen))
 383                        continue;
 384                if (matchlen && name[matchlen-1] == '/')
 385                        return 1;
 386                if (name[matchlen] == '/' || !name[matchlen])
 387                        return 1;
 388                if (!matchlen)
 389                        return 1;
 390        }
 391        return 0;
 392}
 393
 394/*
 395 * We fundamentally don't like some paths: we don't want
 396 * dot or dot-dot anywhere, and for obvious reasons don't
 397 * want to recurse into ".git" either.
 398 *
 399 * Also, we don't want double slashes or slashes at the
 400 * end that can make pathnames ambiguous.
 401 */
 402static int verify_dotfile(const char *rest)
 403{
 404        /*
 405         * The first character was '.', but that
 406         * has already been discarded, we now test
 407         * the rest.
 408         */
 409        switch (*rest) {
 410        /* "." is not allowed */
 411        case '\0': case '/':
 412                return 0;
 413
 414        /*
 415         * ".git" followed by  NUL or slash is bad. This
 416         * shares the path end test with the ".." case.
 417         */
 418        case 'g':
 419                if (rest[1] != 'i')
 420                        break;
 421                if (rest[2] != 't')
 422                        break;
 423                rest += 2;
 424        /* fallthrough */
 425        case '.':
 426                if (rest[1] == '\0' || rest[1] == '/')
 427                        return 0;
 428        }
 429        return 1;
 430}
 431
 432int verify_path(const char *path)
 433{
 434        char c;
 435
 436        goto inside;
 437        for (;;) {
 438                if (!c)
 439                        return 1;
 440                if (c == '/') {
 441inside:
 442                        c = *path++;
 443                        switch (c) {
 444                        default:
 445                                continue;
 446                        case '/': case '\0':
 447                                break;
 448                        case '.':
 449                                if (verify_dotfile(path))
 450                                        continue;
 451                        }
 452                        return 0;
 453                }
 454                c = *path++;
 455        }
 456}
 457
 458/*
 459 * Do we have another file that has the beginning components being a
 460 * proper superset of the name we're trying to add?
 461 */
 462static int has_file_name(const struct cache_entry *ce, int pos, int ok_to_replace)
 463{
 464        int retval = 0;
 465        int len = ce_namelen(ce);
 466        int stage = ce_stage(ce);
 467        const char *name = ce->name;
 468
 469        while (pos < active_nr) {
 470                struct cache_entry *p = active_cache[pos++];
 471
 472                if (len >= ce_namelen(p))
 473                        break;
 474                if (memcmp(name, p->name, len))
 475                        break;
 476                if (ce_stage(p) != stage)
 477                        continue;
 478                if (p->name[len] != '/')
 479                        continue;
 480                retval = -1;
 481                if (!ok_to_replace)
 482                        break;
 483                remove_cache_entry_at(--pos);
 484        }
 485        return retval;
 486}
 487
 488/*
 489 * Do we have another file with a pathname that is a proper
 490 * subset of the name we're trying to add?
 491 */
 492static int has_dir_name(const struct cache_entry *ce, int pos, int ok_to_replace)
 493{
 494        int retval = 0;
 495        int stage = ce_stage(ce);
 496        const char *name = ce->name;
 497        const char *slash = name + ce_namelen(ce);
 498
 499        for (;;) {
 500                int len;
 501
 502                for (;;) {
 503                        if (*--slash == '/')
 504                                break;
 505                        if (slash <= ce->name)
 506                                return retval;
 507                }
 508                len = slash - name;
 509
 510                pos = cache_name_pos(name, ntohs(create_ce_flags(len, stage)));
 511                if (pos >= 0) {
 512                        retval = -1;
 513                        if (ok_to_replace)
 514                                break;
 515                        remove_cache_entry_at(pos);
 516                        continue;
 517                }
 518
 519                /*
 520                 * Trivial optimization: if we find an entry that
 521                 * already matches the sub-directory, then we know
 522                 * we're ok, and we can exit.
 523                 */
 524                pos = -pos-1;
 525                while (pos < active_nr) {
 526                        struct cache_entry *p = active_cache[pos];
 527                        if ((ce_namelen(p) <= len) ||
 528                            (p->name[len] != '/') ||
 529                            memcmp(p->name, name, len))
 530                                break; /* not our subdirectory */
 531                        if (ce_stage(p) == stage)
 532                                /* p is at the same stage as our entry, and
 533                                 * is a subdirectory of what we are looking
 534                                 * at, so we cannot have conflicts at our
 535                                 * level or anything shorter.
 536                                 */
 537                                return retval;
 538                        pos++;
 539                }
 540        }
 541        return retval;
 542}
 543
 544/* We may be in a situation where we already have path/file and path
 545 * is being added, or we already have path and path/file is being
 546 * added.  Either one would result in a nonsense tree that has path
 547 * twice when git-write-tree tries to write it out.  Prevent it.
 548 * 
 549 * If ok-to-replace is specified, we remove the conflicting entries
 550 * from the cache so the caller should recompute the insert position.
 551 * When this happens, we return non-zero.
 552 */
 553static int check_file_directory_conflict(const struct cache_entry *ce, int pos, int ok_to_replace)
 554{
 555        /*
 556         * We check if the path is a sub-path of a subsequent pathname
 557         * first, since removing those will not change the position
 558         * in the array
 559         */
 560        int retval = has_file_name(ce, pos, ok_to_replace);
 561        /*
 562         * Then check if the path might have a clashing sub-directory
 563         * before it.
 564         */
 565        return retval + has_dir_name(ce, pos, ok_to_replace);
 566}
 567
 568int add_cache_entry(struct cache_entry *ce, int option)
 569{
 570        int pos;
 571        int ok_to_add = option & ADD_CACHE_OK_TO_ADD;
 572        int ok_to_replace = option & ADD_CACHE_OK_TO_REPLACE;
 573        int skip_df_check = option & ADD_CACHE_SKIP_DFCHECK;
 574
 575        pos = cache_name_pos(ce->name, ntohs(ce->ce_flags));
 576
 577        /* existing match? Just replace it. */
 578        if (pos >= 0) {
 579                active_cache_changed = 1;
 580                active_cache[pos] = ce;
 581                return 0;
 582        }
 583        pos = -pos-1;
 584
 585        /*
 586         * Inserting a merged entry ("stage 0") into the index
 587         * will always replace all non-merged entries..
 588         */
 589        if (pos < active_nr && ce_stage(ce) == 0) {
 590                while (ce_same_name(active_cache[pos], ce)) {
 591                        ok_to_add = 1;
 592                        if (!remove_cache_entry_at(pos))
 593                                break;
 594                }
 595        }
 596
 597        if (!ok_to_add)
 598                return -1;
 599        if (!verify_path(ce->name))
 600                return -1;
 601
 602        if (!skip_df_check &&
 603            check_file_directory_conflict(ce, pos, ok_to_replace)) {
 604                if (!ok_to_replace)
 605                        return -1;
 606                pos = cache_name_pos(ce->name, ntohs(ce->ce_flags));
 607                pos = -pos-1;
 608        }
 609
 610        /* Make sure the array is big enough .. */
 611        if (active_nr == active_alloc) {
 612                active_alloc = alloc_nr(active_alloc);
 613                active_cache = xrealloc(active_cache, active_alloc * sizeof(struct cache_entry *));
 614        }
 615
 616        /* Add it in.. */
 617        active_nr++;
 618        if (active_nr > pos)
 619                memmove(active_cache + pos + 1, active_cache + pos, (active_nr - pos - 1) * sizeof(ce));
 620        active_cache[pos] = ce;
 621        active_cache_changed = 1;
 622        return 0;
 623}
 624
 625/*
 626 * "refresh" does not calculate a new sha1 file or bring the
 627 * cache up-to-date for mode/content changes. But what it
 628 * _does_ do is to "re-match" the stat information of a file
 629 * with the cache, so that you can refresh the cache for a
 630 * file that hasn't been changed but where the stat entry is
 631 * out of date.
 632 *
 633 * For example, you'd want to do this after doing a "git-read-tree",
 634 * to link up the stat cache details with the proper files.
 635 */
 636struct cache_entry *refresh_cache_entry(struct cache_entry *ce, int really)
 637{
 638        struct stat st;
 639        struct cache_entry *updated;
 640        int changed, size;
 641
 642        if (lstat(ce->name, &st) < 0) {
 643                cache_errno = errno;
 644                return NULL;
 645        }
 646
 647        changed = ce_match_stat(ce, &st, really);
 648        if (!changed) {
 649                if (really && assume_unchanged &&
 650                    !(ce->ce_flags & htons(CE_VALID)))
 651                        ; /* mark this one VALID again */
 652                else
 653                        return ce;
 654        }
 655
 656        if (ce_modified(ce, &st, really)) {
 657                cache_errno = EINVAL;
 658                return NULL;
 659        }
 660
 661        size = ce_size(ce);
 662        updated = xmalloc(size);
 663        memcpy(updated, ce, size);
 664        fill_stat_cache_info(updated, &st);
 665
 666        /* In this case, if really is not set, we should leave
 667         * CE_VALID bit alone.  Otherwise, paths marked with
 668         * --no-assume-unchanged (i.e. things to be edited) will
 669         * reacquire CE_VALID bit automatically, which is not
 670         * really what we want.
 671         */
 672        if (!really && assume_unchanged && !(ce->ce_flags & htons(CE_VALID)))
 673                updated->ce_flags &= ~htons(CE_VALID);
 674
 675        return updated;
 676}
 677
 678int refresh_cache(unsigned int flags)
 679{
 680        int i;
 681        int has_errors = 0;
 682        int really = (flags & REFRESH_REALLY) != 0;
 683        int allow_unmerged = (flags & REFRESH_UNMERGED) != 0;
 684        int quiet = (flags & REFRESH_QUIET) != 0;
 685        int not_new = (flags & REFRESH_IGNORE_MISSING) != 0;
 686
 687        for (i = 0; i < active_nr; i++) {
 688                struct cache_entry *ce, *new;
 689                ce = active_cache[i];
 690                if (ce_stage(ce)) {
 691                        while ((i < active_nr) &&
 692                               ! strcmp(active_cache[i]->name, ce->name))
 693                                i++;
 694                        i--;
 695                        if (allow_unmerged)
 696                                continue;
 697                        printf("%s: needs merge\n", ce->name);
 698                        has_errors = 1;
 699                        continue;
 700                }
 701
 702                new = refresh_cache_entry(ce, really);
 703                if (new == ce)
 704                        continue;
 705                if (!new) {
 706                        if (not_new && cache_errno == ENOENT)
 707                                continue;
 708                        if (really && cache_errno == EINVAL) {
 709                                /* If we are doing --really-refresh that
 710                                 * means the index is not valid anymore.
 711                                 */
 712                                ce->ce_flags &= ~htons(CE_VALID);
 713                                active_cache_changed = 1;
 714                        }
 715                        if (quiet)
 716                                continue;
 717                        printf("%s: needs update\n", ce->name);
 718                        has_errors = 1;
 719                        continue;
 720                }
 721                active_cache_changed = 1;
 722                /* You can NOT just free active_cache[i] here, since it
 723                 * might not be necessarily malloc()ed but can also come
 724                 * from mmap(). */
 725                active_cache[i] = new;
 726        }
 727        return has_errors;
 728}
 729
 730static int verify_hdr(struct cache_header *hdr, unsigned long size)
 731{
 732        SHA_CTX c;
 733        unsigned char sha1[20];
 734
 735        if (hdr->hdr_signature != htonl(CACHE_SIGNATURE))
 736                return error("bad signature");
 737        if (hdr->hdr_version != htonl(2))
 738                return error("bad index version");
 739        SHA1_Init(&c);
 740        SHA1_Update(&c, hdr, size - 20);
 741        SHA1_Final(sha1, &c);
 742        if (memcmp(sha1, (char *) hdr + size - 20, 20))
 743                return error("bad index file sha1 signature");
 744        return 0;
 745}
 746
 747static int read_index_extension(const char *ext, void *data, unsigned long sz)
 748{
 749        switch (CACHE_EXT(ext)) {
 750        case CACHE_EXT_TREE:
 751                active_cache_tree = cache_tree_read(data, sz);
 752                break;
 753        default:
 754                if (*ext < 'A' || 'Z' < *ext)
 755                        return error("index uses %.4s extension, which we do not understand",
 756                                     ext);
 757                fprintf(stderr, "ignoring %.4s extension\n", ext);
 758                break;
 759        }
 760        return 0;
 761}
 762
 763int read_cache(void)
 764{
 765        return read_cache_from(get_index_file());
 766}
 767
 768/* remember to discard_cache() before reading a different cache! */
 769int read_cache_from(const char *path)
 770{
 771        int fd, i;
 772        struct stat st;
 773        unsigned long offset;
 774        struct cache_header *hdr;
 775
 776        errno = EBUSY;
 777        if (cache_mmap)
 778                return active_nr;
 779
 780        errno = ENOENT;
 781        index_file_timestamp = 0;
 782        fd = open(path, O_RDONLY);
 783        if (fd < 0) {
 784                if (errno == ENOENT)
 785                        return 0;
 786                die("index file open failed (%s)", strerror(errno));
 787        }
 788
 789        cache_mmap = MAP_FAILED;
 790        if (!fstat(fd, &st)) {
 791                cache_mmap_size = st.st_size;
 792                errno = EINVAL;
 793                if (cache_mmap_size >= sizeof(struct cache_header) + 20)
 794                        cache_mmap = mmap(NULL, cache_mmap_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
 795        }
 796        close(fd);
 797        if (cache_mmap == MAP_FAILED)
 798                die("index file mmap failed (%s)", strerror(errno));
 799
 800        hdr = cache_mmap;
 801        if (verify_hdr(hdr, cache_mmap_size) < 0)
 802                goto unmap;
 803
 804        active_nr = ntohl(hdr->hdr_entries);
 805        active_alloc = alloc_nr(active_nr);
 806        active_cache = xcalloc(active_alloc, sizeof(struct cache_entry *));
 807
 808        offset = sizeof(*hdr);
 809        for (i = 0; i < active_nr; i++) {
 810                struct cache_entry *ce = (struct cache_entry *) ((char *) cache_mmap + offset);
 811                offset = offset + ce_size(ce);
 812                active_cache[i] = ce;
 813        }
 814        index_file_timestamp = st.st_mtime;
 815        while (offset <= cache_mmap_size - 20 - 8) {
 816                /* After an array of active_nr index entries,
 817                 * there can be arbitrary number of extended
 818                 * sections, each of which is prefixed with
 819                 * extension name (4-byte) and section length
 820                 * in 4-byte network byte order.
 821                 */
 822                unsigned long extsize;
 823                memcpy(&extsize, (char *) cache_mmap + offset + 4, 4);
 824                extsize = ntohl(extsize);
 825                if (read_index_extension(((const char *) cache_mmap) + offset,
 826                                         (char *) cache_mmap + offset + 8,
 827                                         extsize) < 0)
 828                        goto unmap;
 829                offset += 8;
 830                offset += extsize;
 831        }
 832        return active_nr;
 833
 834unmap:
 835        munmap(cache_mmap, cache_mmap_size);
 836        errno = EINVAL;
 837        die("index file corrupt");
 838}
 839
 840#define WRITE_BUFFER_SIZE 8192
 841static unsigned char write_buffer[WRITE_BUFFER_SIZE];
 842static unsigned long write_buffer_len;
 843
 844static int ce_write_flush(SHA_CTX *context, int fd)
 845{
 846        unsigned int buffered = write_buffer_len;
 847        if (buffered) {
 848                SHA1_Update(context, write_buffer, buffered);
 849                if (write(fd, write_buffer, buffered) != buffered)
 850                        return -1;
 851                write_buffer_len = 0;
 852        }
 853        return 0;
 854}
 855
 856static int ce_write(SHA_CTX *context, int fd, void *data, unsigned int len)
 857{
 858        while (len) {
 859                unsigned int buffered = write_buffer_len;
 860                unsigned int partial = WRITE_BUFFER_SIZE - buffered;
 861                if (partial > len)
 862                        partial = len;
 863                memcpy(write_buffer + buffered, data, partial);
 864                buffered += partial;
 865                if (buffered == WRITE_BUFFER_SIZE) {
 866                        write_buffer_len = buffered;
 867                        if (ce_write_flush(context, fd))
 868                                return -1;
 869                        buffered = 0;
 870                }
 871                write_buffer_len = buffered;
 872                len -= partial;
 873                data = (char *) data + partial;
 874        }
 875        return 0;
 876}
 877
 878static int write_index_ext_header(SHA_CTX *context, int fd,
 879                                  unsigned int ext, unsigned int sz)
 880{
 881        ext = htonl(ext);
 882        sz = htonl(sz);
 883        if ((ce_write(context, fd, &ext, 4) < 0) ||
 884            (ce_write(context, fd, &sz, 4) < 0))
 885                return -1;
 886        return 0;
 887}
 888
 889static int ce_flush(SHA_CTX *context, int fd)
 890{
 891        unsigned int left = write_buffer_len;
 892
 893        if (left) {
 894                write_buffer_len = 0;
 895                SHA1_Update(context, write_buffer, left);
 896        }
 897
 898        /* Flush first if not enough space for SHA1 signature */
 899        if (left + 20 > WRITE_BUFFER_SIZE) {
 900                if (write(fd, write_buffer, left) != left)
 901                        return -1;
 902                left = 0;
 903        }
 904
 905        /* Append the SHA1 signature at the end */
 906        SHA1_Final(write_buffer + left, context);
 907        left += 20;
 908        if (write(fd, write_buffer, left) != left)
 909                return -1;
 910        return 0;
 911}
 912
 913static void ce_smudge_racily_clean_entry(struct cache_entry *ce)
 914{
 915        /*
 916         * The only thing we care about in this function is to smudge the
 917         * falsely clean entry due to touch-update-touch race, so we leave
 918         * everything else as they are.  We are called for entries whose
 919         * ce_mtime match the index file mtime.
 920         */
 921        struct stat st;
 922
 923        if (lstat(ce->name, &st) < 0)
 924                return;
 925        if (ce_match_stat_basic(ce, &st))
 926                return;
 927        if (ce_modified_check_fs(ce, &st)) {
 928                /* This is "racily clean"; smudge it.  Note that this
 929                 * is a tricky code.  At first glance, it may appear
 930                 * that it can break with this sequence:
 931                 *
 932                 * $ echo xyzzy >frotz
 933                 * $ git-update-index --add frotz
 934                 * $ : >frotz
 935                 * $ sleep 3
 936                 * $ echo filfre >nitfol
 937                 * $ git-update-index --add nitfol
 938                 *
 939                 * but it does not.  When the second update-index runs,
 940                 * it notices that the entry "frotz" has the same timestamp
 941                 * as index, and if we were to smudge it by resetting its
 942                 * size to zero here, then the object name recorded
 943                 * in index is the 6-byte file but the cached stat information
 944                 * becomes zero --- which would then match what we would
 945                 * obtain from the filesystem next time we stat("frotz"). 
 946                 *
 947                 * However, the second update-index, before calling
 948                 * this function, notices that the cached size is 6
 949                 * bytes and what is on the filesystem is an empty
 950                 * file, and never calls us, so the cached size information
 951                 * for "frotz" stays 6 which does not match the filesystem.
 952                 */
 953                ce->ce_size = htonl(0);
 954        }
 955}
 956
 957int write_cache(int newfd, struct cache_entry **cache, int entries)
 958{
 959        SHA_CTX c;
 960        struct cache_header hdr;
 961        int i, removed, recent;
 962        struct stat st;
 963        time_t now;
 964
 965        for (i = removed = 0; i < entries; i++)
 966                if (!cache[i]->ce_mode)
 967                        removed++;
 968
 969        hdr.hdr_signature = htonl(CACHE_SIGNATURE);
 970        hdr.hdr_version = htonl(2);
 971        hdr.hdr_entries = htonl(entries - removed);
 972
 973        SHA1_Init(&c);
 974        if (ce_write(&c, newfd, &hdr, sizeof(hdr)) < 0)
 975                return -1;
 976
 977        for (i = 0; i < entries; i++) {
 978                struct cache_entry *ce = cache[i];
 979                if (!ce->ce_mode)
 980                        continue;
 981                if (index_file_timestamp &&
 982                    index_file_timestamp <= ntohl(ce->ce_mtime.sec))
 983                        ce_smudge_racily_clean_entry(ce);
 984                if (ce_write(&c, newfd, ce, ce_size(ce)) < 0)
 985                        return -1;
 986        }
 987
 988        /* Write extension data here */
 989        if (active_cache_tree) {
 990                unsigned long sz;
 991                void *data = cache_tree_write(active_cache_tree, &sz);
 992                if (data &&
 993                    !write_index_ext_header(&c, newfd, CACHE_EXT_TREE, sz) &&
 994                    !ce_write(&c, newfd, data, sz))
 995                        ;
 996                else {
 997                        free(data);
 998                        return -1;
 999                }
1000        }
1001
1002        /*
1003         * To prevent later ce_match_stat() from always falling into
1004         * check_fs(), if we have too many entries that can trigger
1005         * racily clean check, we are better off delaying the return.
1006         * We arbitrarily say if more than 20 paths or 25% of total
1007         * paths are very new, we delay the return until the index
1008         * file gets a new timestamp.
1009         *
1010         * NOTE! NOTE! NOTE!
1011         *
1012         * This assumes that nobody is touching the working tree while
1013         * we are updating the index.
1014         */
1015
1016        /* Make sure that the new index file has st_mtime
1017         * that is current enough -- ce_write() batches the data
1018         * so it might not have written anything yet.
1019         */
1020        ce_write_flush(&c, newfd);
1021
1022        now = fstat(newfd, &st) ? 0 : st.st_mtime;
1023        if (now) {
1024                recent = 0;
1025                for (i = 0; i < entries; i++) {
1026                        struct cache_entry *ce = cache[i];
1027                        time_t entry_time = (time_t) ntohl(ce->ce_mtime.sec);
1028                        if (!ce->ce_mode)
1029                                continue;
1030                        if (now && now <= entry_time)
1031                                recent++;
1032                }
1033                if (20 < recent && entries <= recent * 4) {
1034#if 0
1035                        fprintf(stderr, "entries    %d\n", entries);
1036                        fprintf(stderr, "recent     %d\n", recent);
1037                        fprintf(stderr, "now        %lu\n", now);
1038#endif
1039                        while (!fstat(newfd, &st) && st.st_mtime <= now) {
1040                                struct timespec rq, rm;
1041                                off_t where = lseek(newfd, 0, SEEK_CUR);
1042                                rq.tv_sec = 0;
1043                                rq.tv_nsec = 250000000;
1044                                nanosleep(&rq, &rm);
1045                                if ((where == (off_t) -1) ||
1046                                    (write(newfd, "", 1) != 1) ||
1047                                    (lseek(newfd, -1, SEEK_CUR) != where) ||
1048                                    ftruncate(newfd, where))
1049                                        break;
1050                        }
1051                }
1052        }
1053        return ce_flush(&c, newfd);
1054}