1/*
2 * GIT - The information manager from hell
3 *
4 * Copyright (C) Linus Torvalds, 2005
5 */
6#include "cache.h"
7#include "cache-tree.h"
8
9/* Index extensions.
10 *
11 * The first letter should be 'A'..'Z' for extensions that are not
12 * necessary for a correct operation (i.e. optimization data).
13 * When new extensions are added that _needs_ to be understood in
14 * order to correctly interpret the index file, pick character that
15 * is outside the range, to cause the reader to abort.
16 */
17
18#define CACHE_EXT(s) ( (s[0]<<24)|(s[1]<<16)|(s[2]<<8)|(s[3]) )
19#define CACHE_EXT_TREE 0x54524545 /* "TREE" */
20
21struct cache_entry **active_cache = NULL;
22static time_t index_file_timestamp;
23unsigned int active_nr = 0, active_alloc = 0, active_cache_changed = 0;
24
25struct cache_tree *active_cache_tree = NULL;
26
27/*
28 * This only updates the "non-critical" parts of the directory
29 * cache, ie the parts that aren't tracked by GIT, and only used
30 * to validate the cache.
31 */
32void fill_stat_cache_info(struct cache_entry *ce, struct stat *st)
33{
34 ce->ce_ctime.sec = htonl(st->st_ctime);
35 ce->ce_mtime.sec = htonl(st->st_mtime);
36#ifdef USE_NSEC
37 ce->ce_ctime.nsec = htonl(st->st_ctim.tv_nsec);
38 ce->ce_mtime.nsec = htonl(st->st_mtim.tv_nsec);
39#endif
40 ce->ce_dev = htonl(st->st_dev);
41 ce->ce_ino = htonl(st->st_ino);
42 ce->ce_uid = htonl(st->st_uid);
43 ce->ce_gid = htonl(st->st_gid);
44 ce->ce_size = htonl(st->st_size);
45
46 if (assume_unchanged)
47 ce->ce_flags |= htons(CE_VALID);
48}
49
50static int ce_compare_data(struct cache_entry *ce, struct stat *st)
51{
52 int match = -1;
53 int fd = open(ce->name, O_RDONLY);
54
55 if (fd >= 0) {
56 unsigned char sha1[20];
57 if (!index_fd(sha1, fd, st, 0, NULL))
58 match = memcmp(sha1, ce->sha1, 20);
59 close(fd);
60 }
61 return match;
62}
63
64static int ce_compare_link(struct cache_entry *ce, unsigned long expected_size)
65{
66 int match = -1;
67 char *target;
68 void *buffer;
69 unsigned long size;
70 char type[10];
71 int len;
72
73 target = xmalloc(expected_size);
74 len = readlink(ce->name, target, expected_size);
75 if (len != expected_size) {
76 free(target);
77 return -1;
78 }
79 buffer = read_sha1_file(ce->sha1, type, &size);
80 if (!buffer) {
81 free(target);
82 return -1;
83 }
84 if (size == expected_size)
85 match = memcmp(buffer, target, size);
86 free(buffer);
87 free(target);
88 return match;
89}
90
91static int ce_modified_check_fs(struct cache_entry *ce, struct stat *st)
92{
93 switch (st->st_mode & S_IFMT) {
94 case S_IFREG:
95 if (ce_compare_data(ce, st))
96 return DATA_CHANGED;
97 break;
98 case S_IFLNK:
99 if (ce_compare_link(ce, st->st_size))
100 return DATA_CHANGED;
101 break;
102 default:
103 return TYPE_CHANGED;
104 }
105 return 0;
106}
107
108static int ce_match_stat_basic(struct cache_entry *ce, struct stat *st)
109{
110 unsigned int changed = 0;
111
112 switch (ntohl(ce->ce_mode) & S_IFMT) {
113 case S_IFREG:
114 changed |= !S_ISREG(st->st_mode) ? TYPE_CHANGED : 0;
115 /* We consider only the owner x bit to be relevant for
116 * "mode changes"
117 */
118 if (trust_executable_bit &&
119 (0100 & (ntohl(ce->ce_mode) ^ st->st_mode)))
120 changed |= MODE_CHANGED;
121 break;
122 case S_IFLNK:
123 changed |= !S_ISLNK(st->st_mode) ? TYPE_CHANGED : 0;
124 break;
125 default:
126 die("internal error: ce_mode is %o", ntohl(ce->ce_mode));
127 }
128 if (ce->ce_mtime.sec != htonl(st->st_mtime))
129 changed |= MTIME_CHANGED;
130 if (ce->ce_ctime.sec != htonl(st->st_ctime))
131 changed |= CTIME_CHANGED;
132
133#ifdef USE_NSEC
134 /*
135 * nsec seems unreliable - not all filesystems support it, so
136 * as long as it is in the inode cache you get right nsec
137 * but after it gets flushed, you get zero nsec.
138 */
139 if (ce->ce_mtime.nsec != htonl(st->st_mtim.tv_nsec))
140 changed |= MTIME_CHANGED;
141 if (ce->ce_ctime.nsec != htonl(st->st_ctim.tv_nsec))
142 changed |= CTIME_CHANGED;
143#endif
144
145 if (ce->ce_uid != htonl(st->st_uid) ||
146 ce->ce_gid != htonl(st->st_gid))
147 changed |= OWNER_CHANGED;
148 if (ce->ce_ino != htonl(st->st_ino))
149 changed |= INODE_CHANGED;
150
151#ifdef USE_STDEV
152 /*
153 * st_dev breaks on network filesystems where different
154 * clients will have different views of what "device"
155 * the filesystem is on
156 */
157 if (ce->ce_dev != htonl(st->st_dev))
158 changed |= INODE_CHANGED;
159#endif
160
161 if (ce->ce_size != htonl(st->st_size))
162 changed |= DATA_CHANGED;
163
164 return changed;
165}
166
167int ce_match_stat(struct cache_entry *ce, struct stat *st, int ignore_valid)
168{
169 unsigned int changed;
170
171 /*
172 * If it's marked as always valid in the index, it's
173 * valid whatever the checked-out copy says.
174 */
175 if (!ignore_valid && (ce->ce_flags & htons(CE_VALID)))
176 return 0;
177
178 changed = ce_match_stat_basic(ce, st);
179
180 /*
181 * Within 1 second of this sequence:
182 * echo xyzzy >file && git-update-index --add file
183 * running this command:
184 * echo frotz >file
185 * would give a falsely clean cache entry. The mtime and
186 * length match the cache, and other stat fields do not change.
187 *
188 * We could detect this at update-index time (the cache entry
189 * being registered/updated records the same time as "now")
190 * and delay the return from git-update-index, but that would
191 * effectively mean we can make at most one commit per second,
192 * which is not acceptable. Instead, we check cache entries
193 * whose mtime are the same as the index file timestamp more
194 * carefully than others.
195 */
196 if (!changed &&
197 index_file_timestamp &&
198 index_file_timestamp <= ntohl(ce->ce_mtime.sec))
199 changed |= ce_modified_check_fs(ce, st);
200
201 return changed;
202}
203
204int ce_modified(struct cache_entry *ce, struct stat *st, int really)
205{
206 int changed, changed_fs;
207 changed = ce_match_stat(ce, st, really);
208 if (!changed)
209 return 0;
210 /*
211 * If the mode or type has changed, there's no point in trying
212 * to refresh the entry - it's not going to match
213 */
214 if (changed & (MODE_CHANGED | TYPE_CHANGED))
215 return changed;
216
217 /* Immediately after read-tree or update-index --cacheinfo,
218 * the length field is zero. For other cases the ce_size
219 * should match the SHA1 recorded in the index entry.
220 */
221 if ((changed & DATA_CHANGED) && ce->ce_size != htonl(0))
222 return changed;
223
224 changed_fs = ce_modified_check_fs(ce, st);
225 if (changed_fs)
226 return changed | changed_fs;
227 return 0;
228}
229
230int base_name_compare(const char *name1, int len1, int mode1,
231 const char *name2, int len2, int mode2)
232{
233 unsigned char c1, c2;
234 int len = len1 < len2 ? len1 : len2;
235 int cmp;
236
237 cmp = memcmp(name1, name2, len);
238 if (cmp)
239 return cmp;
240 c1 = name1[len];
241 c2 = name2[len];
242 if (!c1 && S_ISDIR(mode1))
243 c1 = '/';
244 if (!c2 && S_ISDIR(mode2))
245 c2 = '/';
246 return (c1 < c2) ? -1 : (c1 > c2) ? 1 : 0;
247}
248
249int cache_name_compare(const char *name1, int flags1, const char *name2, int flags2)
250{
251 int len1 = flags1 & CE_NAMEMASK;
252 int len2 = flags2 & CE_NAMEMASK;
253 int len = len1 < len2 ? len1 : len2;
254 int cmp;
255
256 cmp = memcmp(name1, name2, len);
257 if (cmp)
258 return cmp;
259 if (len1 < len2)
260 return -1;
261 if (len1 > len2)
262 return 1;
263
264 /* Compare stages */
265 flags1 &= CE_STAGEMASK;
266 flags2 &= CE_STAGEMASK;
267
268 if (flags1 < flags2)
269 return -1;
270 if (flags1 > flags2)
271 return 1;
272 return 0;
273}
274
275int cache_name_pos(const char *name, int namelen)
276{
277 int first, last;
278
279 first = 0;
280 last = active_nr;
281 while (last > first) {
282 int next = (last + first) >> 1;
283 struct cache_entry *ce = active_cache[next];
284 int cmp = cache_name_compare(name, namelen, ce->name, ntohs(ce->ce_flags));
285 if (!cmp)
286 return next;
287 if (cmp < 0) {
288 last = next;
289 continue;
290 }
291 first = next+1;
292 }
293 return -first-1;
294}
295
296/* Remove entry, return true if there are more entries to go.. */
297int remove_cache_entry_at(int pos)
298{
299 active_cache_changed = 1;
300 active_nr--;
301 if (pos >= active_nr)
302 return 0;
303 memmove(active_cache + pos, active_cache + pos + 1, (active_nr - pos) * sizeof(struct cache_entry *));
304 return 1;
305}
306
307int remove_file_from_cache(const char *path)
308{
309 int pos = cache_name_pos(path, strlen(path));
310 if (pos < 0)
311 pos = -pos-1;
312 while (pos < active_nr && !strcmp(active_cache[pos]->name, path))
313 remove_cache_entry_at(pos);
314 return 0;
315}
316
317int ce_same_name(struct cache_entry *a, struct cache_entry *b)
318{
319 int len = ce_namelen(a);
320 return ce_namelen(b) == len && !memcmp(a->name, b->name, len);
321}
322
323int ce_path_match(const struct cache_entry *ce, const char **pathspec)
324{
325 const char *match, *name;
326 int len;
327
328 if (!pathspec)
329 return 1;
330
331 len = ce_namelen(ce);
332 name = ce->name;
333 while ((match = *pathspec++) != NULL) {
334 int matchlen = strlen(match);
335 if (matchlen > len)
336 continue;
337 if (memcmp(name, match, matchlen))
338 continue;
339 if (matchlen && name[matchlen-1] == '/')
340 return 1;
341 if (name[matchlen] == '/' || !name[matchlen])
342 return 1;
343 if (!matchlen)
344 return 1;
345 }
346 return 0;
347}
348
349/*
350 * Do we have another file that has the beginning components being a
351 * proper superset of the name we're trying to add?
352 */
353static int has_file_name(const struct cache_entry *ce, int pos, int ok_to_replace)
354{
355 int retval = 0;
356 int len = ce_namelen(ce);
357 int stage = ce_stage(ce);
358 const char *name = ce->name;
359
360 while (pos < active_nr) {
361 struct cache_entry *p = active_cache[pos++];
362
363 if (len >= ce_namelen(p))
364 break;
365 if (memcmp(name, p->name, len))
366 break;
367 if (ce_stage(p) != stage)
368 continue;
369 if (p->name[len] != '/')
370 continue;
371 retval = -1;
372 if (!ok_to_replace)
373 break;
374 remove_cache_entry_at(--pos);
375 }
376 return retval;
377}
378
379/*
380 * Do we have another file with a pathname that is a proper
381 * subset of the name we're trying to add?
382 */
383static int has_dir_name(const struct cache_entry *ce, int pos, int ok_to_replace)
384{
385 int retval = 0;
386 int stage = ce_stage(ce);
387 const char *name = ce->name;
388 const char *slash = name + ce_namelen(ce);
389
390 for (;;) {
391 int len;
392
393 for (;;) {
394 if (*--slash == '/')
395 break;
396 if (slash <= ce->name)
397 return retval;
398 }
399 len = slash - name;
400
401 pos = cache_name_pos(name, ntohs(create_ce_flags(len, stage)));
402 if (pos >= 0) {
403 retval = -1;
404 if (ok_to_replace)
405 break;
406 remove_cache_entry_at(pos);
407 continue;
408 }
409
410 /*
411 * Trivial optimization: if we find an entry that
412 * already matches the sub-directory, then we know
413 * we're ok, and we can exit.
414 */
415 pos = -pos-1;
416 while (pos < active_nr) {
417 struct cache_entry *p = active_cache[pos];
418 if ((ce_namelen(p) <= len) ||
419 (p->name[len] != '/') ||
420 memcmp(p->name, name, len))
421 break; /* not our subdirectory */
422 if (ce_stage(p) == stage)
423 /* p is at the same stage as our entry, and
424 * is a subdirectory of what we are looking
425 * at, so we cannot have conflicts at our
426 * level or anything shorter.
427 */
428 return retval;
429 pos++;
430 }
431 }
432 return retval;
433}
434
435/* We may be in a situation where we already have path/file and path
436 * is being added, or we already have path and path/file is being
437 * added. Either one would result in a nonsense tree that has path
438 * twice when git-write-tree tries to write it out. Prevent it.
439 *
440 * If ok-to-replace is specified, we remove the conflicting entries
441 * from the cache so the caller should recompute the insert position.
442 * When this happens, we return non-zero.
443 */
444static int check_file_directory_conflict(const struct cache_entry *ce, int pos, int ok_to_replace)
445{
446 /*
447 * We check if the path is a sub-path of a subsequent pathname
448 * first, since removing those will not change the position
449 * in the array
450 */
451 int retval = has_file_name(ce, pos, ok_to_replace);
452 /*
453 * Then check if the path might have a clashing sub-directory
454 * before it.
455 */
456 return retval + has_dir_name(ce, pos, ok_to_replace);
457}
458
459int add_cache_entry(struct cache_entry *ce, int option)
460{
461 int pos;
462 int ok_to_add = option & ADD_CACHE_OK_TO_ADD;
463 int ok_to_replace = option & ADD_CACHE_OK_TO_REPLACE;
464 int skip_df_check = option & ADD_CACHE_SKIP_DFCHECK;
465
466 pos = cache_name_pos(ce->name, ntohs(ce->ce_flags));
467
468 /* existing match? Just replace it. */
469 if (pos >= 0) {
470 active_cache_changed = 1;
471 active_cache[pos] = ce;
472 return 0;
473 }
474 pos = -pos-1;
475
476 /*
477 * Inserting a merged entry ("stage 0") into the index
478 * will always replace all non-merged entries..
479 */
480 if (pos < active_nr && ce_stage(ce) == 0) {
481 while (ce_same_name(active_cache[pos], ce)) {
482 ok_to_add = 1;
483 if (!remove_cache_entry_at(pos))
484 break;
485 }
486 }
487
488 if (!ok_to_add)
489 return -1;
490
491 if (!skip_df_check &&
492 check_file_directory_conflict(ce, pos, ok_to_replace)) {
493 if (!ok_to_replace)
494 return -1;
495 pos = cache_name_pos(ce->name, ntohs(ce->ce_flags));
496 pos = -pos-1;
497 }
498
499 /* Make sure the array is big enough .. */
500 if (active_nr == active_alloc) {
501 active_alloc = alloc_nr(active_alloc);
502 active_cache = xrealloc(active_cache, active_alloc * sizeof(struct cache_entry *));
503 }
504
505 /* Add it in.. */
506 active_nr++;
507 if (active_nr > pos)
508 memmove(active_cache + pos + 1, active_cache + pos, (active_nr - pos - 1) * sizeof(ce));
509 active_cache[pos] = ce;
510 active_cache_changed = 1;
511 return 0;
512}
513
514/* Three functions to allow overloaded pointer return; see linux/err.h */
515static inline void *ERR_PTR(long error)
516{
517 return (void *) error;
518}
519
520static inline long PTR_ERR(const void *ptr)
521{
522 return (long) ptr;
523}
524
525static inline long IS_ERR(const void *ptr)
526{
527 return (unsigned long)ptr > (unsigned long)-1000L;
528}
529
530/*
531 * "refresh" does not calculate a new sha1 file or bring the
532 * cache up-to-date for mode/content changes. But what it
533 * _does_ do is to "re-match" the stat information of a file
534 * with the cache, so that you can refresh the cache for a
535 * file that hasn't been changed but where the stat entry is
536 * out of date.
537 *
538 * For example, you'd want to do this after doing a "git-read-tree",
539 * to link up the stat cache details with the proper files.
540 */
541static struct cache_entry *refresh_entry(struct cache_entry *ce, int really)
542{
543 struct stat st;
544 struct cache_entry *updated;
545 int changed, size;
546
547 if (lstat(ce->name, &st) < 0)
548 return ERR_PTR(-errno);
549
550 changed = ce_match_stat(ce, &st, really);
551 if (!changed) {
552 if (really && assume_unchanged &&
553 !(ce->ce_flags & htons(CE_VALID)))
554 ; /* mark this one VALID again */
555 else
556 return NULL;
557 }
558
559 if (ce_modified(ce, &st, really))
560 return ERR_PTR(-EINVAL);
561
562 size = ce_size(ce);
563 updated = xmalloc(size);
564 memcpy(updated, ce, size);
565 fill_stat_cache_info(updated, &st);
566
567 /* In this case, if really is not set, we should leave
568 * CE_VALID bit alone. Otherwise, paths marked with
569 * --no-assume-unchanged (i.e. things to be edited) will
570 * reacquire CE_VALID bit automatically, which is not
571 * really what we want.
572 */
573 if (!really && assume_unchanged && !(ce->ce_flags & htons(CE_VALID)))
574 updated->ce_flags &= ~htons(CE_VALID);
575
576 return updated;
577}
578
579int refresh_cache(unsigned int flags)
580{
581 int i;
582 int has_errors = 0;
583 int really = (flags & REFRESH_REALLY) != 0;
584 int allow_unmerged = (flags & REFRESH_UNMERGED) != 0;
585 int quiet = (flags & REFRESH_QUIET) != 0;
586 int not_new = (flags & REFRESH_IGNORE_MISSING) != 0;
587
588 for (i = 0; i < active_nr; i++) {
589 struct cache_entry *ce, *new;
590 ce = active_cache[i];
591 if (ce_stage(ce)) {
592 while ((i < active_nr) &&
593 ! strcmp(active_cache[i]->name, ce->name))
594 i++;
595 i--;
596 if (allow_unmerged)
597 continue;
598 printf("%s: needs merge\n", ce->name);
599 has_errors = 1;
600 continue;
601 }
602
603 new = refresh_entry(ce, really);
604 if (!new)
605 continue;
606 if (IS_ERR(new)) {
607 if (not_new && PTR_ERR(new) == -ENOENT)
608 continue;
609 if (really && PTR_ERR(new) == -EINVAL) {
610 /* If we are doing --really-refresh that
611 * means the index is not valid anymore.
612 */
613 ce->ce_flags &= ~htons(CE_VALID);
614 active_cache_changed = 1;
615 }
616 if (quiet)
617 continue;
618 printf("%s: needs update\n", ce->name);
619 has_errors = 1;
620 continue;
621 }
622 active_cache_changed = 1;
623 /* You can NOT just free active_cache[i] here, since it
624 * might not be necessarily malloc()ed but can also come
625 * from mmap(). */
626 active_cache[i] = new;
627 }
628 return has_errors;
629}
630
631static int verify_hdr(struct cache_header *hdr, unsigned long size)
632{
633 SHA_CTX c;
634 unsigned char sha1[20];
635
636 if (hdr->hdr_signature != htonl(CACHE_SIGNATURE))
637 return error("bad signature");
638 if (hdr->hdr_version != htonl(2))
639 return error("bad index version");
640 SHA1_Init(&c);
641 SHA1_Update(&c, hdr, size - 20);
642 SHA1_Final(sha1, &c);
643 if (memcmp(sha1, (void *)hdr + size - 20, 20))
644 return error("bad index file sha1 signature");
645 return 0;
646}
647
648static int read_index_extension(const char *ext, void *data, unsigned long sz)
649{
650 switch (CACHE_EXT(ext)) {
651 case CACHE_EXT_TREE:
652 active_cache_tree = cache_tree_read(data, sz);
653 break;
654 default:
655 if (*ext < 'A' || 'Z' < *ext)
656 return error("index uses %.4s extension, which we do not understand",
657 ext);
658 fprintf(stderr, "ignoring %.4s extension\n", ext);
659 break;
660 }
661 return 0;
662}
663
664int read_cache(void)
665{
666 int fd, i;
667 struct stat st;
668 unsigned long size, offset;
669 void *map;
670 struct cache_header *hdr;
671
672 errno = EBUSY;
673 if (active_cache)
674 return active_nr;
675
676 errno = ENOENT;
677 index_file_timestamp = 0;
678 fd = open(get_index_file(), O_RDONLY);
679 if (fd < 0) {
680 if (errno == ENOENT)
681 return 0;
682 die("index file open failed (%s)", strerror(errno));
683 }
684
685 size = 0; // avoid gcc warning
686 map = MAP_FAILED;
687 if (!fstat(fd, &st)) {
688 size = st.st_size;
689 errno = EINVAL;
690 if (size >= sizeof(struct cache_header) + 20)
691 map = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
692 }
693 close(fd);
694 if (map == MAP_FAILED)
695 die("index file mmap failed (%s)", strerror(errno));
696
697 hdr = map;
698 if (verify_hdr(hdr, size) < 0)
699 goto unmap;
700
701 active_nr = ntohl(hdr->hdr_entries);
702 active_alloc = alloc_nr(active_nr);
703 active_cache = xcalloc(active_alloc, sizeof(struct cache_entry *));
704
705 offset = sizeof(*hdr);
706 for (i = 0; i < active_nr; i++) {
707 struct cache_entry *ce = map + offset;
708 offset = offset + ce_size(ce);
709 active_cache[i] = ce;
710 }
711 index_file_timestamp = st.st_mtime;
712 while (offset <= size - 20 - 8) {
713 /* After an array of active_nr index entries,
714 * there can be arbitrary number of extended
715 * sections, each of which is prefixed with
716 * extension name (4-byte) and section length
717 * in 4-byte network byte order.
718 */
719 unsigned long extsize;
720 memcpy(&extsize, map + offset + 4, 4);
721 extsize = ntohl(extsize);
722 if (read_index_extension(map + offset,
723 map + offset + 8, extsize) < 0)
724 goto unmap;
725 offset += 8;
726 offset += extsize;
727 }
728 return active_nr;
729
730unmap:
731 munmap(map, size);
732 errno = EINVAL;
733 die("index file corrupt");
734}
735
736#define WRITE_BUFFER_SIZE 8192
737static unsigned char write_buffer[WRITE_BUFFER_SIZE];
738static unsigned long write_buffer_len;
739
740static int ce_write(SHA_CTX *context, int fd, void *data, unsigned int len)
741{
742 while (len) {
743 unsigned int buffered = write_buffer_len;
744 unsigned int partial = WRITE_BUFFER_SIZE - buffered;
745 if (partial > len)
746 partial = len;
747 memcpy(write_buffer + buffered, data, partial);
748 buffered += partial;
749 if (buffered == WRITE_BUFFER_SIZE) {
750 SHA1_Update(context, write_buffer, WRITE_BUFFER_SIZE);
751 if (write(fd, write_buffer, WRITE_BUFFER_SIZE) != WRITE_BUFFER_SIZE)
752 return -1;
753 buffered = 0;
754 }
755 write_buffer_len = buffered;
756 len -= partial;
757 data += partial;
758 }
759 return 0;
760}
761
762static int write_index_ext_header(SHA_CTX *context, int fd,
763 unsigned long ext, unsigned long sz)
764{
765 ext = htonl(ext);
766 sz = htonl(sz);
767 if ((ce_write(context, fd, &ext, 4) < 0) ||
768 (ce_write(context, fd, &sz, 4) < 0))
769 return -1;
770 return 0;
771}
772
773static int ce_flush(SHA_CTX *context, int fd)
774{
775 unsigned int left = write_buffer_len;
776
777 if (left) {
778 write_buffer_len = 0;
779 SHA1_Update(context, write_buffer, left);
780 }
781
782 /* Flush first if not enough space for SHA1 signature */
783 if (left + 20 > WRITE_BUFFER_SIZE) {
784 if (write(fd, write_buffer, left) != left)
785 return -1;
786 left = 0;
787 }
788
789 /* Append the SHA1 signature at the end */
790 SHA1_Final(write_buffer + left, context);
791 left += 20;
792 if (write(fd, write_buffer, left) != left)
793 return -1;
794 return 0;
795}
796
797static void ce_smudge_racily_clean_entry(struct cache_entry *ce)
798{
799 /*
800 * The only thing we care about in this function is to smudge the
801 * falsely clean entry due to touch-update-touch race, so we leave
802 * everything else as they are. We are called for entries whose
803 * ce_mtime match the index file mtime.
804 */
805 struct stat st;
806
807 if (lstat(ce->name, &st) < 0)
808 return;
809 if (ce_match_stat_basic(ce, &st))
810 return;
811 if (ce_modified_check_fs(ce, &st)) {
812 /* This is "racily clean"; smudge it. Note that this
813 * is a tricky code. At first glance, it may appear
814 * that it can break with this sequence:
815 *
816 * $ echo xyzzy >frotz
817 * $ git-update-index --add frotz
818 * $ : >frotz
819 * $ sleep 3
820 * $ echo filfre >nitfol
821 * $ git-update-index --add nitfol
822 *
823 * but it does not. Whe the second update-index runs,
824 * it notices that the entry "frotz" has the same timestamp
825 * as index, and if we were to smudge it by resetting its
826 * size to zero here, then the object name recorded
827 * in index is the 6-byte file but the cached stat information
828 * becomes zero --- which would then match what we would
829 * obtain from the filesystem next time we stat("frotz").
830 *
831 * However, the second update-index, before calling
832 * this function, notices that the cached size is 6
833 * bytes and what is on the filesystem is an empty
834 * file, and never calls us, so the cached size information
835 * for "frotz" stays 6 which does not match the filesystem.
836 */
837 ce->ce_size = htonl(0);
838 }
839}
840
841int write_cache(int newfd, struct cache_entry **cache, int entries)
842{
843 SHA_CTX c;
844 struct cache_header hdr;
845 int i, removed;
846
847 for (i = removed = 0; i < entries; i++)
848 if (!cache[i]->ce_mode)
849 removed++;
850
851 hdr.hdr_signature = htonl(CACHE_SIGNATURE);
852 hdr.hdr_version = htonl(2);
853 hdr.hdr_entries = htonl(entries - removed);
854
855 SHA1_Init(&c);
856 if (ce_write(&c, newfd, &hdr, sizeof(hdr)) < 0)
857 return -1;
858
859 for (i = 0; i < entries; i++) {
860 struct cache_entry *ce = cache[i];
861 if (!ce->ce_mode)
862 continue;
863 if (index_file_timestamp &&
864 index_file_timestamp <= ntohl(ce->ce_mtime.sec))
865 ce_smudge_racily_clean_entry(ce);
866 if (ce_write(&c, newfd, ce, ce_size(ce)) < 0)
867 return -1;
868 }
869
870 /* Write extension data here */
871 if (active_cache_tree) {
872 unsigned long sz;
873 void *data = cache_tree_write(active_cache_tree, &sz);
874 if (data &&
875 !write_index_ext_header(&c, newfd, CACHE_EXT_TREE, sz) &&
876 !ce_write(&c, newfd, data, sz))
877 ;
878 else {
879 free(data);
880 return -1;
881 }
882 }
883 return ce_flush(&c, newfd);
884}