1/* 2 * GIT - The information manager from hell 3 * 4 * Copyright (C) Linus Torvalds, 2005 5 */ 6#include "cache.h" 7#include "cache-tree.h" 8 9/* Index extensions. 10 * 11 * The first letter should be 'A'..'Z' for extensions that are not 12 * necessary for a correct operation (i.e. optimization data). 13 * When new extensions are added that _needs_ to be understood in 14 * order to correctly interpret the index file, pick character that 15 * is outside the range, to cause the reader to abort. 16 */ 17 18#define CACHE_EXT(s) ( (s[0]<<24)|(s[1]<<16)|(s[2]<<8)|(s[3]) ) 19#define CACHE_EXT_TREE 0x54524545 /* "TREE" */ 20 21struct cache_entry **active_cache = NULL; 22static time_t index_file_timestamp; 23unsigned int active_nr = 0, active_alloc = 0, active_cache_changed = 0; 24 25struct cache_tree *active_cache_tree = NULL; 26 27/* 28 * This only updates the "non-critical" parts of the directory 29 * cache, ie the parts that aren't tracked by GIT, and only used 30 * to validate the cache. 31 */ 32void fill_stat_cache_info(struct cache_entry *ce, struct stat *st) 33{ 34 ce->ce_ctime.sec = htonl(st->st_ctime); 35 ce->ce_mtime.sec = htonl(st->st_mtime); 36#ifdef USE_NSEC 37 ce->ce_ctime.nsec = htonl(st->st_ctim.tv_nsec); 38 ce->ce_mtime.nsec = htonl(st->st_mtim.tv_nsec); 39#endif 40 ce->ce_dev = htonl(st->st_dev); 41 ce->ce_ino = htonl(st->st_ino); 42 ce->ce_uid = htonl(st->st_uid); 43 ce->ce_gid = htonl(st->st_gid); 44 ce->ce_size = htonl(st->st_size); 45 46 if (assume_unchanged) 47 ce->ce_flags |= htons(CE_VALID); 48} 49 50static int ce_compare_data(struct cache_entry *ce, struct stat *st) 51{ 52 int match = -1; 53 int fd = open(ce->name, O_RDONLY); 54 55 if (fd >= 0) { 56 unsigned char sha1[20]; 57 if (!index_fd(sha1, fd, st, 0, NULL)) 58 match = memcmp(sha1, ce->sha1, 20); 59 close(fd); 60 } 61 return match; 62} 63 64static int ce_compare_link(struct cache_entry *ce, unsigned long expected_size) 65{ 66 int match = -1; 67 char *target; 68 void *buffer; 69 unsigned long size; 70 char type[10]; 71 int len; 72 73 target = xmalloc(expected_size); 74 len = readlink(ce->name, target, expected_size); 75 if (len != expected_size) { 76 free(target); 77 return -1; 78 } 79 buffer = read_sha1_file(ce->sha1, type, &size); 80 if (!buffer) { 81 free(target); 82 return -1; 83 } 84 if (size == expected_size) 85 match = memcmp(buffer, target, size); 86 free(buffer); 87 free(target); 88 return match; 89} 90 91static int ce_modified_check_fs(struct cache_entry *ce, struct stat *st) 92{ 93 switch (st->st_mode & S_IFMT) { 94 case S_IFREG: 95 if (ce_compare_data(ce, st)) 96 return DATA_CHANGED; 97 break; 98 case S_IFLNK: 99 if (ce_compare_link(ce, st->st_size)) 100 return DATA_CHANGED; 101 break; 102 default: 103 return TYPE_CHANGED; 104 } 105 return 0; 106} 107 108static int ce_match_stat_basic(struct cache_entry *ce, struct stat *st) 109{ 110 unsigned int changed = 0; 111 112 switch (ntohl(ce->ce_mode) & S_IFMT) { 113 case S_IFREG: 114 changed |= !S_ISREG(st->st_mode) ? TYPE_CHANGED : 0; 115 /* We consider only the owner x bit to be relevant for 116 * "mode changes" 117 */ 118 if (trust_executable_bit && 119 (0100 & (ntohl(ce->ce_mode) ^ st->st_mode))) 120 changed |= MODE_CHANGED; 121 break; 122 case S_IFLNK: 123 changed |= !S_ISLNK(st->st_mode) ? TYPE_CHANGED : 0; 124 break; 125 default: 126 die("internal error: ce_mode is %o", ntohl(ce->ce_mode)); 127 } 128 if (ce->ce_mtime.sec != htonl(st->st_mtime)) 129 changed |= MTIME_CHANGED; 130 if (ce->ce_ctime.sec != htonl(st->st_ctime)) 131 changed |= CTIME_CHANGED; 132 133#ifdef USE_NSEC 134 /* 135 * nsec seems unreliable - not all filesystems support it, so 136 * as long as it is in the inode cache you get right nsec 137 * but after it gets flushed, you get zero nsec. 138 */ 139 if (ce->ce_mtime.nsec != htonl(st->st_mtim.tv_nsec)) 140 changed |= MTIME_CHANGED; 141 if (ce->ce_ctime.nsec != htonl(st->st_ctim.tv_nsec)) 142 changed |= CTIME_CHANGED; 143#endif 144 145 if (ce->ce_uid != htonl(st->st_uid) || 146 ce->ce_gid != htonl(st->st_gid)) 147 changed |= OWNER_CHANGED; 148 if (ce->ce_ino != htonl(st->st_ino)) 149 changed |= INODE_CHANGED; 150 151#ifdef USE_STDEV 152 /* 153 * st_dev breaks on network filesystems where different 154 * clients will have different views of what "device" 155 * the filesystem is on 156 */ 157 if (ce->ce_dev != htonl(st->st_dev)) 158 changed |= INODE_CHANGED; 159#endif 160 161 if (ce->ce_size != htonl(st->st_size)) 162 changed |= DATA_CHANGED; 163 164 return changed; 165} 166 167int ce_match_stat(struct cache_entry *ce, struct stat *st, int ignore_valid) 168{ 169 unsigned int changed; 170 171 /* 172 * If it's marked as always valid in the index, it's 173 * valid whatever the checked-out copy says. 174 */ 175 if (!ignore_valid && (ce->ce_flags & htons(CE_VALID))) 176 return 0; 177 178 changed = ce_match_stat_basic(ce, st); 179 180 /* 181 * Within 1 second of this sequence: 182 * echo xyzzy >file && git-update-index --add file 183 * running this command: 184 * echo frotz >file 185 * would give a falsely clean cache entry. The mtime and 186 * length match the cache, and other stat fields do not change. 187 * 188 * We could detect this at update-index time (the cache entry 189 * being registered/updated records the same time as "now") 190 * and delay the return from git-update-index, but that would 191 * effectively mean we can make at most one commit per second, 192 * which is not acceptable. Instead, we check cache entries 193 * whose mtime are the same as the index file timestamp more 194 * carefully than others. 195 */ 196 if (!changed && 197 index_file_timestamp && 198 index_file_timestamp <= ntohl(ce->ce_mtime.sec)) 199 changed |= ce_modified_check_fs(ce, st); 200 201 return changed; 202} 203 204int ce_modified(struct cache_entry *ce, struct stat *st, int really) 205{ 206 int changed, changed_fs; 207 changed = ce_match_stat(ce, st, really); 208 if (!changed) 209 return 0; 210 /* 211 * If the mode or type has changed, there's no point in trying 212 * to refresh the entry - it's not going to match 213 */ 214 if (changed & (MODE_CHANGED | TYPE_CHANGED)) 215 return changed; 216 217 /* Immediately after read-tree or update-index --cacheinfo, 218 * the length field is zero. For other cases the ce_size 219 * should match the SHA1 recorded in the index entry. 220 */ 221 if ((changed & DATA_CHANGED) && ce->ce_size != htonl(0)) 222 return changed; 223 224 changed_fs = ce_modified_check_fs(ce, st); 225 if (changed_fs) 226 return changed | changed_fs; 227 return 0; 228} 229 230int base_name_compare(const char *name1, int len1, int mode1, 231 const char *name2, int len2, int mode2) 232{ 233 unsigned char c1, c2; 234 int len = len1 < len2 ? len1 : len2; 235 int cmp; 236 237 cmp = memcmp(name1, name2, len); 238 if (cmp) 239 return cmp; 240 c1 = name1[len]; 241 c2 = name2[len]; 242 if (!c1 && S_ISDIR(mode1)) 243 c1 = '/'; 244 if (!c2 && S_ISDIR(mode2)) 245 c2 = '/'; 246 return (c1 < c2) ? -1 : (c1 > c2) ? 1 : 0; 247} 248 249int cache_name_compare(const char *name1, int flags1, const char *name2, int flags2) 250{ 251 int len1 = flags1 & CE_NAMEMASK; 252 int len2 = flags2 & CE_NAMEMASK; 253 int len = len1 < len2 ? len1 : len2; 254 int cmp; 255 256 cmp = memcmp(name1, name2, len); 257 if (cmp) 258 return cmp; 259 if (len1 < len2) 260 return -1; 261 if (len1 > len2) 262 return 1; 263 264 /* Compare stages */ 265 flags1 &= CE_STAGEMASK; 266 flags2 &= CE_STAGEMASK; 267 268 if (flags1 < flags2) 269 return -1; 270 if (flags1 > flags2) 271 return 1; 272 return 0; 273} 274 275int cache_name_pos(const char *name, int namelen) 276{ 277 int first, last; 278 279 first = 0; 280 last = active_nr; 281 while (last > first) { 282 int next = (last + first) >> 1; 283 struct cache_entry *ce = active_cache[next]; 284 int cmp = cache_name_compare(name, namelen, ce->name, ntohs(ce->ce_flags)); 285 if (!cmp) 286 return next; 287 if (cmp < 0) { 288 last = next; 289 continue; 290 } 291 first = next+1; 292 } 293 return -first-1; 294} 295 296/* Remove entry, return true if there are more entries to go.. */ 297int remove_cache_entry_at(int pos) 298{ 299 active_cache_changed = 1; 300 active_nr--; 301 if (pos >= active_nr) 302 return 0; 303 memmove(active_cache + pos, active_cache + pos + 1, (active_nr - pos) * sizeof(struct cache_entry *)); 304 return 1; 305} 306 307int remove_file_from_cache(const char *path) 308{ 309 int pos = cache_name_pos(path, strlen(path)); 310 if (pos < 0) 311 pos = -pos-1; 312 while (pos < active_nr && !strcmp(active_cache[pos]->name, path)) 313 remove_cache_entry_at(pos); 314 return 0; 315} 316 317int ce_same_name(struct cache_entry *a, struct cache_entry *b) 318{ 319 int len = ce_namelen(a); 320 return ce_namelen(b) == len && !memcmp(a->name, b->name, len); 321} 322 323int ce_path_match(const struct cache_entry *ce, const char **pathspec) 324{ 325 const char *match, *name; 326 int len; 327 328 if (!pathspec) 329 return 1; 330 331 len = ce_namelen(ce); 332 name = ce->name; 333 while ((match = *pathspec++) != NULL) { 334 int matchlen = strlen(match); 335 if (matchlen > len) 336 continue; 337 if (memcmp(name, match, matchlen)) 338 continue; 339 if (matchlen && name[matchlen-1] == '/') 340 return 1; 341 if (name[matchlen] == '/' || !name[matchlen]) 342 return 1; 343 if (!matchlen) 344 return 1; 345 } 346 return 0; 347} 348 349/* 350 * Do we have another file that has the beginning components being a 351 * proper superset of the name we're trying to add? 352 */ 353static int has_file_name(const struct cache_entry *ce, int pos, int ok_to_replace) 354{ 355 int retval = 0; 356 int len = ce_namelen(ce); 357 int stage = ce_stage(ce); 358 const char *name = ce->name; 359 360 while (pos < active_nr) { 361 struct cache_entry *p = active_cache[pos++]; 362 363 if (len >= ce_namelen(p)) 364 break; 365 if (memcmp(name, p->name, len)) 366 break; 367 if (ce_stage(p) != stage) 368 continue; 369 if (p->name[len] != '/') 370 continue; 371 retval = -1; 372 if (!ok_to_replace) 373 break; 374 remove_cache_entry_at(--pos); 375 } 376 return retval; 377} 378 379/* 380 * Do we have another file with a pathname that is a proper 381 * subset of the name we're trying to add? 382 */ 383static int has_dir_name(const struct cache_entry *ce, int pos, int ok_to_replace) 384{ 385 int retval = 0; 386 int stage = ce_stage(ce); 387 const char *name = ce->name; 388 const char *slash = name + ce_namelen(ce); 389 390 for (;;) { 391 int len; 392 393 for (;;) { 394 if (*--slash == '/') 395 break; 396 if (slash <= ce->name) 397 return retval; 398 } 399 len = slash - name; 400 401 pos = cache_name_pos(name, ntohs(create_ce_flags(len, stage))); 402 if (pos >= 0) { 403 retval = -1; 404 if (ok_to_replace) 405 break; 406 remove_cache_entry_at(pos); 407 continue; 408 } 409 410 /* 411 * Trivial optimization: if we find an entry that 412 * already matches the sub-directory, then we know 413 * we're ok, and we can exit. 414 */ 415 pos = -pos-1; 416 while (pos < active_nr) { 417 struct cache_entry *p = active_cache[pos]; 418 if ((ce_namelen(p) <= len) || 419 (p->name[len] != '/') || 420 memcmp(p->name, name, len)) 421 break; /* not our subdirectory */ 422 if (ce_stage(p) == stage) 423 /* p is at the same stage as our entry, and 424 * is a subdirectory of what we are looking 425 * at, so we cannot have conflicts at our 426 * level or anything shorter. 427 */ 428 return retval; 429 pos++; 430 } 431 } 432 return retval; 433} 434 435/* We may be in a situation where we already have path/file and path 436 * is being added, or we already have path and path/file is being 437 * added. Either one would result in a nonsense tree that has path 438 * twice when git-write-tree tries to write it out. Prevent it. 439 * 440 * If ok-to-replace is specified, we remove the conflicting entries 441 * from the cache so the caller should recompute the insert position. 442 * When this happens, we return non-zero. 443 */ 444static int check_file_directory_conflict(const struct cache_entry *ce, int pos, int ok_to_replace) 445{ 446 /* 447 * We check if the path is a sub-path of a subsequent pathname 448 * first, since removing those will not change the position 449 * in the array 450 */ 451 int retval = has_file_name(ce, pos, ok_to_replace); 452 /* 453 * Then check if the path might have a clashing sub-directory 454 * before it. 455 */ 456 return retval + has_dir_name(ce, pos, ok_to_replace); 457} 458 459int add_cache_entry(struct cache_entry *ce, int option) 460{ 461 int pos; 462 int ok_to_add = option & ADD_CACHE_OK_TO_ADD; 463 int ok_to_replace = option & ADD_CACHE_OK_TO_REPLACE; 464 int skip_df_check = option & ADD_CACHE_SKIP_DFCHECK; 465 466 pos = cache_name_pos(ce->name, ntohs(ce->ce_flags)); 467 468 /* existing match? Just replace it. */ 469 if (pos >= 0) { 470 active_cache_changed = 1; 471 active_cache[pos] = ce; 472 return 0; 473 } 474 pos = -pos-1; 475 476 /* 477 * Inserting a merged entry ("stage 0") into the index 478 * will always replace all non-merged entries.. 479 */ 480 if (pos < active_nr && ce_stage(ce) == 0) { 481 while (ce_same_name(active_cache[pos], ce)) { 482 ok_to_add = 1; 483 if (!remove_cache_entry_at(pos)) 484 break; 485 } 486 } 487 488 if (!ok_to_add) 489 return -1; 490 491 if (!skip_df_check && 492 check_file_directory_conflict(ce, pos, ok_to_replace)) { 493 if (!ok_to_replace) 494 return -1; 495 pos = cache_name_pos(ce->name, ntohs(ce->ce_flags)); 496 pos = -pos-1; 497 } 498 499 /* Make sure the array is big enough .. */ 500 if (active_nr == active_alloc) { 501 active_alloc = alloc_nr(active_alloc); 502 active_cache = xrealloc(active_cache, active_alloc * sizeof(struct cache_entry *)); 503 } 504 505 /* Add it in.. */ 506 active_nr++; 507 if (active_nr > pos) 508 memmove(active_cache + pos + 1, active_cache + pos, (active_nr - pos - 1) * sizeof(ce)); 509 active_cache[pos] = ce; 510 active_cache_changed = 1; 511 return 0; 512} 513 514static int verify_hdr(struct cache_header *hdr, unsigned long size) 515{ 516 SHA_CTX c; 517 unsigned char sha1[20]; 518 519 if (hdr->hdr_signature != htonl(CACHE_SIGNATURE)) 520 return error("bad signature"); 521 if (hdr->hdr_version != htonl(2)) 522 return error("bad index version"); 523 SHA1_Init(&c); 524 SHA1_Update(&c, hdr, size - 20); 525 SHA1_Final(sha1, &c); 526 if (memcmp(sha1, (void *)hdr + size - 20, 20)) 527 return error("bad index file sha1 signature"); 528 return 0; 529} 530 531static int read_index_extension(const char *ext, void *data, unsigned long sz) 532{ 533 switch (CACHE_EXT(ext)) { 534 case CACHE_EXT_TREE: 535 active_cache_tree = cache_tree_read(data, sz); 536 break; 537 default: 538 if (*ext < 'A' || 'Z' < *ext) 539 return error("index uses %.4s extension, which we do not understand", 540 ext); 541 fprintf(stderr, "ignoring %.4s extension\n", ext); 542 break; 543 } 544 return 0; 545} 546 547int read_cache(void) 548{ 549 int fd, i; 550 struct stat st; 551 unsigned long size, offset; 552 void *map; 553 struct cache_header *hdr; 554 555 errno = EBUSY; 556 if (active_cache) 557 return active_nr; 558 559 errno = ENOENT; 560 index_file_timestamp = 0; 561 fd = open(get_index_file(), O_RDONLY); 562 if (fd < 0) { 563 if (errno == ENOENT) 564 return 0; 565 die("index file open failed (%s)", strerror(errno)); 566 } 567 568 size = 0; // avoid gcc warning 569 map = MAP_FAILED; 570 if (!fstat(fd, &st)) { 571 size = st.st_size; 572 errno = EINVAL; 573 if (size >= sizeof(struct cache_header) + 20) 574 map = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); 575 } 576 close(fd); 577 if (map == MAP_FAILED) 578 die("index file mmap failed (%s)", strerror(errno)); 579 580 hdr = map; 581 if (verify_hdr(hdr, size) < 0) 582 goto unmap; 583 584 active_nr = ntohl(hdr->hdr_entries); 585 active_alloc = alloc_nr(active_nr); 586 active_cache = calloc(active_alloc, sizeof(struct cache_entry *)); 587 588 offset = sizeof(*hdr); 589 for (i = 0; i < active_nr; i++) { 590 struct cache_entry *ce = map + offset; 591 offset = offset + ce_size(ce); 592 active_cache[i] = ce; 593 } 594 index_file_timestamp = st.st_mtime; 595 while (offset <= size - 20 - 8) { 596 /* After an array of active_nr index entries, 597 * there can be arbitrary number of extended 598 * sections, each of which is prefixed with 599 * extension name (4-byte) and section length 600 * in 4-byte network byte order. 601 */ 602 unsigned long extsize; 603 memcpy(&extsize, map + offset + 4, 4); 604 extsize = ntohl(extsize); 605 if (read_index_extension(map + offset, 606 map + offset + 8, extsize) < 0) 607 goto unmap; 608 offset += 8; 609 offset += extsize; 610 } 611 return active_nr; 612 613unmap: 614 munmap(map, size); 615 errno = EINVAL; 616 die("index file corrupt"); 617} 618 619#define WRITE_BUFFER_SIZE 8192 620static unsigned char write_buffer[WRITE_BUFFER_SIZE]; 621static unsigned long write_buffer_len; 622 623static int ce_write(SHA_CTX *context, int fd, void *data, unsigned int len) 624{ 625 while (len) { 626 unsigned int buffered = write_buffer_len; 627 unsigned int partial = WRITE_BUFFER_SIZE - buffered; 628 if (partial > len) 629 partial = len; 630 memcpy(write_buffer + buffered, data, partial); 631 buffered += partial; 632 if (buffered == WRITE_BUFFER_SIZE) { 633 SHA1_Update(context, write_buffer, WRITE_BUFFER_SIZE); 634 if (write(fd, write_buffer, WRITE_BUFFER_SIZE) != WRITE_BUFFER_SIZE) 635 return -1; 636 buffered = 0; 637 } 638 write_buffer_len = buffered; 639 len -= partial; 640 data += partial; 641 } 642 return 0; 643} 644 645static int write_index_ext_header(SHA_CTX *context, int fd, 646 unsigned long ext, unsigned long sz) 647{ 648 ext = htonl(ext); 649 sz = htonl(sz); 650 if ((ce_write(context, fd, &ext, 4) < 0) || 651 (ce_write(context, fd, &sz, 4) < 0)) 652 return -1; 653 return 0; 654} 655 656static int ce_flush(SHA_CTX *context, int fd) 657{ 658 unsigned int left = write_buffer_len; 659 660 if (left) { 661 write_buffer_len = 0; 662 SHA1_Update(context, write_buffer, left); 663 } 664 665 /* Flush first if not enough space for SHA1 signature */ 666 if (left + 20 > WRITE_BUFFER_SIZE) { 667 if (write(fd, write_buffer, left) != left) 668 return -1; 669 left = 0; 670 } 671 672 /* Append the SHA1 signature at the end */ 673 SHA1_Final(write_buffer + left, context); 674 left += 20; 675 if (write(fd, write_buffer, left) != left) 676 return -1; 677 return 0; 678} 679 680static void ce_smudge_racily_clean_entry(struct cache_entry *ce) 681{ 682 /* 683 * The only thing we care about in this function is to smudge the 684 * falsely clean entry due to touch-update-touch race, so we leave 685 * everything else as they are. We are called for entries whose 686 * ce_mtime match the index file mtime. 687 */ 688 struct stat st; 689 690 if (lstat(ce->name, &st) < 0) 691 return; 692 if (ce_match_stat_basic(ce, &st)) 693 return; 694 if (ce_modified_check_fs(ce, &st)) { 695 /* This is "racily clean"; smudge it. Note that this 696 * is a tricky code. At first glance, it may appear 697 * that it can break with this sequence: 698 * 699 * $ echo xyzzy >frotz 700 * $ git-update-index --add frotz 701 * $ : >frotz 702 * $ sleep 3 703 * $ echo filfre >nitfol 704 * $ git-update-index --add nitfol 705 * 706 * but it does not. Whe the second update-index runs, 707 * it notices that the entry "frotz" has the same timestamp 708 * as index, and if we were to smudge it by resetting its 709 * size to zero here, then the object name recorded 710 * in index is the 6-byte file but the cached stat information 711 * becomes zero --- which would then match what we would 712 * obtain from the filesystem next time we stat("frotz"). 713 * 714 * However, the second update-index, before calling 715 * this function, notices that the cached size is 6 716 * bytes and what is on the filesystem is an empty 717 * file, and never calls us, so the cached size information 718 * for "frotz" stays 6 which does not match the filesystem. 719 */ 720 ce->ce_size = htonl(0); 721 } 722} 723 724int write_cache(int newfd, struct cache_entry **cache, int entries) 725{ 726 SHA_CTX c; 727 struct cache_header hdr; 728 int i, removed; 729 730 for (i = removed = 0; i < entries; i++) 731 if (!cache[i]->ce_mode) 732 removed++; 733 734 hdr.hdr_signature = htonl(CACHE_SIGNATURE); 735 hdr.hdr_version = htonl(2); 736 hdr.hdr_entries = htonl(entries - removed); 737 738 SHA1_Init(&c); 739 if (ce_write(&c, newfd, &hdr, sizeof(hdr)) < 0) 740 return -1; 741 742 for (i = 0; i < entries; i++) { 743 struct cache_entry *ce = cache[i]; 744 if (!ce->ce_mode) 745 continue; 746 if (index_file_timestamp && 747 index_file_timestamp <= ntohl(ce->ce_mtime.sec)) 748 ce_smudge_racily_clean_entry(ce); 749 if (ce_write(&c, newfd, ce, ce_size(ce)) < 0) 750 return -1; 751 } 752 753 /* Write extension data here */ 754 if (active_cache_tree) { 755 unsigned long sz; 756 void *data = cache_tree_write(active_cache_tree, &sz); 757 if (data && 758 !write_index_ext_header(&c, newfd, CACHE_EXT_TREE, sz) && 759 !ce_write(&c, newfd, data, sz)) 760 ; 761 else { 762 free(data); 763 return -1; 764 } 765 } 766 return ce_flush(&c, newfd); 767}