1#include "cache.h"
2#include "dir.h"
3#include "tree.h"
4#include "tree-walk.h"
5#include "cache-tree.h"
6#include "unpack-trees.h"
7
8#define DBRT_DEBUG 1
9
10struct tree_entry_list {
11 struct tree_entry_list *next;
12 unsigned directory : 1;
13 unsigned executable : 1;
14 unsigned symlink : 1;
15 unsigned int mode;
16 const char *name;
17 const unsigned char *sha1;
18};
19
20static struct tree_entry_list *create_tree_entry_list(struct tree *tree)
21{
22 struct tree_desc desc;
23 struct name_entry one;
24 struct tree_entry_list *ret = NULL;
25 struct tree_entry_list **list_p = &ret;
26
27 if (!tree->object.parsed)
28 parse_tree(tree);
29
30 init_tree_desc(&desc, tree->buffer, tree->size);
31
32 while (tree_entry(&desc, &one)) {
33 struct tree_entry_list *entry;
34
35 entry = xmalloc(sizeof(struct tree_entry_list));
36 entry->name = one.path;
37 entry->sha1 = one.sha1;
38 entry->mode = one.mode;
39 entry->directory = S_ISDIR(one.mode) != 0;
40 entry->executable = (one.mode & S_IXUSR) != 0;
41 entry->symlink = S_ISLNK(one.mode) != 0;
42 entry->next = NULL;
43
44 *list_p = entry;
45 list_p = &entry->next;
46 }
47 return ret;
48}
49
50static int entcmp(const char *name1, int dir1, const char *name2, int dir2)
51{
52 int len1 = strlen(name1);
53 int len2 = strlen(name2);
54 int len = len1 < len2 ? len1 : len2;
55 int ret = memcmp(name1, name2, len);
56 unsigned char c1, c2;
57 if (ret)
58 return ret;
59 c1 = name1[len];
60 c2 = name2[len];
61 if (!c1 && dir1)
62 c1 = '/';
63 if (!c2 && dir2)
64 c2 = '/';
65 ret = (c1 < c2) ? -1 : (c1 > c2) ? 1 : 0;
66 if (c1 && c2 && !ret)
67 ret = len1 - len2;
68 return ret;
69}
70
71static int unpack_trees_rec(struct tree_entry_list **posns, int len,
72 const char *base, struct unpack_trees_options *o,
73 int *indpos,
74 struct tree_entry_list *df_conflict_list)
75{
76 int baselen = strlen(base);
77 int src_size = len + 1;
78 int i_stk = i_stk;
79 int retval = 0;
80
81 if (o->dir)
82 i_stk = push_exclude_per_directory(o->dir, base, strlen(base));
83
84 do {
85 int i;
86 const char *first;
87 int firstdir = 0;
88 int pathlen;
89 unsigned ce_size;
90 struct tree_entry_list **subposns;
91 struct cache_entry **src;
92 int any_files = 0;
93 int any_dirs = 0;
94 char *cache_name;
95 int ce_stage;
96
97 /* Find the first name in the input. */
98
99 first = NULL;
100 cache_name = NULL;
101
102 /* Check the cache */
103 if (o->merge && *indpos < active_nr) {
104 /* This is a bit tricky: */
105 /* If the index has a subdirectory (with
106 * contents) as the first name, it'll get a
107 * filename like "foo/bar". But that's after
108 * "foo", so the entry in trees will get
109 * handled first, at which point we'll go into
110 * "foo", and deal with "bar" from the index,
111 * because the base will be "foo/". The only
112 * way we can actually have "foo/bar" first of
113 * all the things is if the trees don't
114 * contain "foo" at all, in which case we'll
115 * handle "foo/bar" without going into the
116 * directory, but that's fine (and will return
117 * an error anyway, with the added unknown
118 * file case.
119 */
120
121 cache_name = active_cache[*indpos]->name;
122 if (strlen(cache_name) > baselen &&
123 !memcmp(cache_name, base, baselen)) {
124 cache_name += baselen;
125 first = cache_name;
126 } else {
127 cache_name = NULL;
128 }
129 }
130
131#if DBRT_DEBUG > 1
132 if (first)
133 printf("index %s\n", first);
134#endif
135 for (i = 0; i < len; i++) {
136 if (!posns[i] || posns[i] == df_conflict_list)
137 continue;
138#if DBRT_DEBUG > 1
139 printf("%d %s\n", i + 1, posns[i]->name);
140#endif
141 if (!first || entcmp(first, firstdir,
142 posns[i]->name,
143 posns[i]->directory) > 0) {
144 first = posns[i]->name;
145 firstdir = posns[i]->directory;
146 }
147 }
148 /* No name means we're done */
149 if (!first)
150 goto leave_directory;
151
152 pathlen = strlen(first);
153 ce_size = cache_entry_size(baselen + pathlen);
154
155 src = xcalloc(src_size, sizeof(struct cache_entry *));
156
157 subposns = xcalloc(len, sizeof(struct tree_list_entry *));
158
159 if (cache_name && !strcmp(cache_name, first)) {
160 any_files = 1;
161 src[0] = active_cache[*indpos];
162 remove_cache_entry_at(*indpos);
163 }
164
165 for (i = 0; i < len; i++) {
166 struct cache_entry *ce;
167
168 if (!posns[i] ||
169 (posns[i] != df_conflict_list &&
170 strcmp(first, posns[i]->name))) {
171 continue;
172 }
173
174 if (posns[i] == df_conflict_list) {
175 src[i + o->merge] = o->df_conflict_entry;
176 continue;
177 }
178
179 if (posns[i]->directory) {
180 struct tree *tree = lookup_tree(posns[i]->sha1);
181 any_dirs = 1;
182 parse_tree(tree);
183 subposns[i] = create_tree_entry_list(tree);
184 posns[i] = posns[i]->next;
185 src[i + o->merge] = o->df_conflict_entry;
186 continue;
187 }
188
189 if (!o->merge)
190 ce_stage = 0;
191 else if (i + 1 < o->head_idx)
192 ce_stage = 1;
193 else if (i + 1 > o->head_idx)
194 ce_stage = 3;
195 else
196 ce_stage = 2;
197
198 ce = xcalloc(1, ce_size);
199 ce->ce_mode = create_ce_mode(posns[i]->mode);
200 ce->ce_flags = create_ce_flags(baselen + pathlen,
201 ce_stage);
202 memcpy(ce->name, base, baselen);
203 memcpy(ce->name + baselen, first, pathlen + 1);
204
205 any_files = 1;
206
207 hashcpy(ce->sha1, posns[i]->sha1);
208 src[i + o->merge] = ce;
209 subposns[i] = df_conflict_list;
210 posns[i] = posns[i]->next;
211 }
212 if (any_files) {
213 if (o->merge) {
214 int ret;
215
216#if DBRT_DEBUG > 1
217 printf("%s:\n", first);
218 for (i = 0; i < src_size; i++) {
219 printf(" %d ", i);
220 if (src[i])
221 printf("%s\n", sha1_to_hex(src[i]->sha1));
222 else
223 printf("\n");
224 }
225#endif
226 ret = o->fn(src, o);
227
228#if DBRT_DEBUG > 1
229 printf("Added %d entries\n", ret);
230#endif
231 *indpos += ret;
232 } else {
233 for (i = 0; i < src_size; i++) {
234 if (src[i]) {
235 add_cache_entry(src[i], ADD_CACHE_OK_TO_ADD|ADD_CACHE_SKIP_DFCHECK);
236 }
237 }
238 }
239 }
240 if (any_dirs) {
241 char *newbase = xmalloc(baselen + 2 + pathlen);
242 memcpy(newbase, base, baselen);
243 memcpy(newbase + baselen, first, pathlen);
244 newbase[baselen + pathlen] = '/';
245 newbase[baselen + pathlen + 1] = '\0';
246 if (unpack_trees_rec(subposns, len, newbase, o,
247 indpos, df_conflict_list)) {
248 retval = -1;
249 goto leave_directory;
250 }
251 free(newbase);
252 }
253 free(subposns);
254 free(src);
255 } while (1);
256
257 leave_directory:
258 if (o->dir)
259 pop_exclude_per_directory(o->dir, i_stk);
260 return retval;
261}
262
263/* Unlink the last component and attempt to remove leading
264 * directories, in case this unlink is the removal of the
265 * last entry in the directory -- empty directories are removed.
266 */
267static void unlink_entry(char *name)
268{
269 char *cp, *prev;
270
271 if (unlink(name))
272 return;
273 prev = NULL;
274 while (1) {
275 int status;
276 cp = strrchr(name, '/');
277 if (prev)
278 *prev = '/';
279 if (!cp)
280 break;
281
282 *cp = 0;
283 status = rmdir(name);
284 if (status) {
285 *cp = '/';
286 break;
287 }
288 prev = cp;
289 }
290}
291
292static volatile sig_atomic_t progress_update;
293
294static void progress_interval(int signum)
295{
296 progress_update = 1;
297}
298
299static void setup_progress_signal(void)
300{
301 struct sigaction sa;
302 struct itimerval v;
303
304 memset(&sa, 0, sizeof(sa));
305 sa.sa_handler = progress_interval;
306 sigemptyset(&sa.sa_mask);
307 sa.sa_flags = SA_RESTART;
308 sigaction(SIGALRM, &sa, NULL);
309
310 v.it_interval.tv_sec = 1;
311 v.it_interval.tv_usec = 0;
312 v.it_value = v.it_interval;
313 setitimer(ITIMER_REAL, &v, NULL);
314}
315
316static struct checkout state;
317static void check_updates(struct cache_entry **src, int nr,
318 struct unpack_trees_options *o)
319{
320 unsigned short mask = htons(CE_UPDATE);
321 unsigned last_percent = 200, cnt = 0, total = 0;
322
323 if (o->update && o->verbose_update) {
324 for (total = cnt = 0; cnt < nr; cnt++) {
325 struct cache_entry *ce = src[cnt];
326 if (!ce->ce_mode || ce->ce_flags & mask)
327 total++;
328 }
329
330 /* Don't bother doing this for very small updates */
331 if (total < 250)
332 total = 0;
333
334 if (total) {
335 fprintf(stderr, "Checking files out...\n");
336 setup_progress_signal();
337 progress_update = 1;
338 }
339 cnt = 0;
340 }
341
342 while (nr--) {
343 struct cache_entry *ce = *src++;
344
345 if (total) {
346 if (!ce->ce_mode || ce->ce_flags & mask) {
347 unsigned percent;
348 cnt++;
349 percent = (cnt * 100) / total;
350 if (percent != last_percent ||
351 progress_update) {
352 fprintf(stderr, "%4u%% (%u/%u) done\r",
353 percent, cnt, total);
354 last_percent = percent;
355 progress_update = 0;
356 }
357 }
358 }
359 if (!ce->ce_mode) {
360 if (o->update)
361 unlink_entry(ce->name);
362 continue;
363 }
364 if (ce->ce_flags & mask) {
365 ce->ce_flags &= ~mask;
366 if (o->update)
367 checkout_entry(ce, &state, NULL);
368 }
369 }
370 if (total) {
371 signal(SIGALRM, SIG_IGN);
372 fputc('\n', stderr);
373 }
374}
375
376int unpack_trees(struct object_list *trees, struct unpack_trees_options *o)
377{
378 int indpos = 0;
379 unsigned len = object_list_length(trees);
380 struct tree_entry_list **posns;
381 int i;
382 struct object_list *posn = trees;
383 struct tree_entry_list df_conflict_list;
384 static struct cache_entry *dfc;
385
386 memset(&df_conflict_list, 0, sizeof(df_conflict_list));
387 df_conflict_list.next = &df_conflict_list;
388 memset(&state, 0, sizeof(state));
389 state.base_dir = "";
390 state.force = 1;
391 state.quiet = 1;
392 state.refresh_cache = 1;
393
394 o->merge_size = len;
395
396 if (!dfc)
397 dfc = xcalloc(1, sizeof(struct cache_entry) + 1);
398 o->df_conflict_entry = dfc;
399
400 if (len) {
401 posns = xmalloc(len * sizeof(struct tree_entry_list *));
402 for (i = 0; i < len; i++) {
403 posns[i] = create_tree_entry_list((struct tree *) posn->item);
404 posn = posn->next;
405 }
406 if (unpack_trees_rec(posns, len, o->prefix ? o->prefix : "",
407 o, &indpos, &df_conflict_list))
408 return -1;
409 }
410
411 if (o->trivial_merges_only && o->nontrivial_merge)
412 die("Merge requires file-level merging");
413
414 check_updates(active_cache, active_nr, o);
415 return 0;
416}
417
418/* Here come the merge functions */
419
420static void reject_merge(struct cache_entry *ce)
421{
422 die("Entry '%s' would be overwritten by merge. Cannot merge.",
423 ce->name);
424}
425
426static int same(struct cache_entry *a, struct cache_entry *b)
427{
428 if (!!a != !!b)
429 return 0;
430 if (!a && !b)
431 return 1;
432 return a->ce_mode == b->ce_mode &&
433 !hashcmp(a->sha1, b->sha1);
434}
435
436
437/*
438 * When a CE gets turned into an unmerged entry, we
439 * want it to be up-to-date
440 */
441static void verify_uptodate(struct cache_entry *ce,
442 struct unpack_trees_options *o)
443{
444 struct stat st;
445
446 if (o->index_only || o->reset)
447 return;
448
449 if (!lstat(ce->name, &st)) {
450 unsigned changed = ce_match_stat(ce, &st, 1);
451 if (!changed)
452 return;
453 errno = 0;
454 }
455 if (o->reset) {
456 ce->ce_flags |= htons(CE_UPDATE);
457 return;
458 }
459 if (errno == ENOENT)
460 return;
461 die("Entry '%s' not uptodate. Cannot merge.", ce->name);
462}
463
464static void invalidate_ce_path(struct cache_entry *ce)
465{
466 if (ce)
467 cache_tree_invalidate_path(active_cache_tree, ce->name);
468}
469
470/*
471 * We do not want to remove or overwrite a working tree file that
472 * is not tracked, unless it is ignored.
473 */
474static void verify_absent(const char *path, const char *action,
475 struct unpack_trees_options *o)
476{
477 struct stat st;
478
479 if (o->index_only || o->reset || !o->update)
480 return;
481 if (!lstat(path, &st) && !(o->dir && excluded(o->dir, path)))
482 die("Untracked working tree file '%s' "
483 "would be %s by merge.", path, action);
484}
485
486static int merged_entry(struct cache_entry *merge, struct cache_entry *old,
487 struct unpack_trees_options *o)
488{
489 merge->ce_flags |= htons(CE_UPDATE);
490 if (old) {
491 /*
492 * See if we can re-use the old CE directly?
493 * That way we get the uptodate stat info.
494 *
495 * This also removes the UPDATE flag on
496 * a match.
497 */
498 if (same(old, merge)) {
499 *merge = *old;
500 } else {
501 verify_uptodate(old, o);
502 invalidate_ce_path(old);
503 }
504 }
505 else {
506 verify_absent(merge->name, "overwritten", o);
507 invalidate_ce_path(merge);
508 }
509
510 merge->ce_flags &= ~htons(CE_STAGEMASK);
511 add_cache_entry(merge, ADD_CACHE_OK_TO_ADD|ADD_CACHE_OK_TO_REPLACE);
512 return 1;
513}
514
515static int deleted_entry(struct cache_entry *ce, struct cache_entry *old,
516 struct unpack_trees_options *o)
517{
518 if (old)
519 verify_uptodate(old, o);
520 else
521 verify_absent(ce->name, "removed", o);
522 ce->ce_mode = 0;
523 add_cache_entry(ce, ADD_CACHE_OK_TO_ADD|ADD_CACHE_OK_TO_REPLACE);
524 invalidate_ce_path(ce);
525 return 1;
526}
527
528static int keep_entry(struct cache_entry *ce)
529{
530 add_cache_entry(ce, ADD_CACHE_OK_TO_ADD);
531 return 1;
532}
533
534#if DBRT_DEBUG
535static void show_stage_entry(FILE *o,
536 const char *label, const struct cache_entry *ce)
537{
538 if (!ce)
539 fprintf(o, "%s (missing)\n", label);
540 else
541 fprintf(o, "%s%06o %s %d\t%s\n",
542 label,
543 ntohl(ce->ce_mode),
544 sha1_to_hex(ce->sha1),
545 ce_stage(ce),
546 ce->name);
547}
548#endif
549
550int threeway_merge(struct cache_entry **stages,
551 struct unpack_trees_options *o)
552{
553 struct cache_entry *index;
554 struct cache_entry *head;
555 struct cache_entry *remote = stages[o->head_idx + 1];
556 int count;
557 int head_match = 0;
558 int remote_match = 0;
559 const char *path = NULL;
560
561 int df_conflict_head = 0;
562 int df_conflict_remote = 0;
563
564 int any_anc_missing = 0;
565 int no_anc_exists = 1;
566 int i;
567
568 for (i = 1; i < o->head_idx; i++) {
569 if (!stages[i])
570 any_anc_missing = 1;
571 else {
572 if (!path)
573 path = stages[i]->name;
574 no_anc_exists = 0;
575 }
576 }
577
578 index = stages[0];
579 head = stages[o->head_idx];
580
581 if (head == o->df_conflict_entry) {
582 df_conflict_head = 1;
583 head = NULL;
584 }
585
586 if (remote == o->df_conflict_entry) {
587 df_conflict_remote = 1;
588 remote = NULL;
589 }
590
591 if (!path && index)
592 path = index->name;
593 if (!path && head)
594 path = head->name;
595 if (!path && remote)
596 path = remote->name;
597
598 /* First, if there's a #16 situation, note that to prevent #13
599 * and #14.
600 */
601 if (!same(remote, head)) {
602 for (i = 1; i < o->head_idx; i++) {
603 if (same(stages[i], head)) {
604 head_match = i;
605 }
606 if (same(stages[i], remote)) {
607 remote_match = i;
608 }
609 }
610 }
611
612 /* We start with cases where the index is allowed to match
613 * something other than the head: #14(ALT) and #2ALT, where it
614 * is permitted to match the result instead.
615 */
616 /* #14, #14ALT, #2ALT */
617 if (remote && !df_conflict_head && head_match && !remote_match) {
618 if (index && !same(index, remote) && !same(index, head))
619 reject_merge(index);
620 return merged_entry(remote, index, o);
621 }
622 /*
623 * If we have an entry in the index cache, then we want to
624 * make sure that it matches head.
625 */
626 if (index && !same(index, head)) {
627 reject_merge(index);
628 }
629
630 if (head) {
631 /* #5ALT, #15 */
632 if (same(head, remote))
633 return merged_entry(head, index, o);
634 /* #13, #3ALT */
635 if (!df_conflict_remote && remote_match && !head_match)
636 return merged_entry(head, index, o);
637 }
638
639 /* #1 */
640 if (!head && !remote && any_anc_missing)
641 return 0;
642
643 /* Under the new "aggressive" rule, we resolve mostly trivial
644 * cases that we historically had git-merge-one-file resolve.
645 */
646 if (o->aggressive) {
647 int head_deleted = !head && !df_conflict_head;
648 int remote_deleted = !remote && !df_conflict_remote;
649 /*
650 * Deleted in both.
651 * Deleted in one and unchanged in the other.
652 */
653 if ((head_deleted && remote_deleted) ||
654 (head_deleted && remote && remote_match) ||
655 (remote_deleted && head && head_match)) {
656 if (index)
657 return deleted_entry(index, index, o);
658 else if (path && !head_deleted)
659 verify_absent(path, "removed", o);
660 return 0;
661 }
662 /*
663 * Added in both, identically.
664 */
665 if (no_anc_exists && head && remote && same(head, remote))
666 return merged_entry(head, index, o);
667
668 }
669
670 /* Below are "no merge" cases, which require that the index be
671 * up-to-date to avoid the files getting overwritten with
672 * conflict resolution files.
673 */
674 if (index) {
675 verify_uptodate(index, o);
676 }
677
678 o->nontrivial_merge = 1;
679
680 /* #2, #3, #4, #6, #7, #9, #11. */
681 count = 0;
682 if (!head_match || !remote_match) {
683 for (i = 1; i < o->head_idx; i++) {
684 if (stages[i]) {
685 keep_entry(stages[i]);
686 count++;
687 break;
688 }
689 }
690 }
691#if DBRT_DEBUG
692 else {
693 fprintf(stderr, "read-tree: warning #16 detected\n");
694 show_stage_entry(stderr, "head ", stages[head_match]);
695 show_stage_entry(stderr, "remote ", stages[remote_match]);
696 }
697#endif
698 if (head) { count += keep_entry(head); }
699 if (remote) { count += keep_entry(remote); }
700 return count;
701}
702
703/*
704 * Two-way merge.
705 *
706 * The rule is to "carry forward" what is in the index without losing
707 * information across a "fast forward", favoring a successful merge
708 * over a merge failure when it makes sense. For details of the
709 * "carry forward" rule, please see <Documentation/git-read-tree.txt>.
710 *
711 */
712int twoway_merge(struct cache_entry **src,
713 struct unpack_trees_options *o)
714{
715 struct cache_entry *current = src[0];
716 struct cache_entry *oldtree = src[1], *newtree = src[2];
717
718 if (o->merge_size != 2)
719 return error("Cannot do a twoway merge of %d trees",
720 o->merge_size);
721
722 if (current) {
723 if ((!oldtree && !newtree) || /* 4 and 5 */
724 (!oldtree && newtree &&
725 same(current, newtree)) || /* 6 and 7 */
726 (oldtree && newtree &&
727 same(oldtree, newtree)) || /* 14 and 15 */
728 (oldtree && newtree &&
729 !same(oldtree, newtree) && /* 18 and 19*/
730 same(current, newtree))) {
731 return keep_entry(current);
732 }
733 else if (oldtree && !newtree && same(current, oldtree)) {
734 /* 10 or 11 */
735 return deleted_entry(oldtree, current, o);
736 }
737 else if (oldtree && newtree &&
738 same(current, oldtree) && !same(current, newtree)) {
739 /* 20 or 21 */
740 return merged_entry(newtree, current, o);
741 }
742 else {
743 /* all other failures */
744 if (oldtree)
745 reject_merge(oldtree);
746 if (current)
747 reject_merge(current);
748 if (newtree)
749 reject_merge(newtree);
750 return -1;
751 }
752 }
753 else if (newtree)
754 return merged_entry(newtree, current, o);
755 else
756 return deleted_entry(oldtree, current, o);
757}
758
759/*
760 * Bind merge.
761 *
762 * Keep the index entries at stage0, collapse stage1 but make sure
763 * stage0 does not have anything there.
764 */
765int bind_merge(struct cache_entry **src,
766 struct unpack_trees_options *o)
767{
768 struct cache_entry *old = src[0];
769 struct cache_entry *a = src[1];
770
771 if (o->merge_size != 1)
772 return error("Cannot do a bind merge of %d trees\n",
773 o->merge_size);
774 if (a && old)
775 die("Entry '%s' overlaps. Cannot bind.", a->name);
776 if (!a)
777 return keep_entry(old);
778 else
779 return merged_entry(a, NULL, o);
780}
781
782/*
783 * One-way merge.
784 *
785 * The rule is:
786 * - take the stat information from stage0, take the data from stage1
787 */
788int oneway_merge(struct cache_entry **src,
789 struct unpack_trees_options *o)
790{
791 struct cache_entry *old = src[0];
792 struct cache_entry *a = src[1];
793
794 if (o->merge_size != 1)
795 return error("Cannot do a oneway merge of %d trees",
796 o->merge_size);
797
798 if (!a)
799 return deleted_entry(old, old, o);
800 if (old && same(old, a)) {
801 if (o->reset) {
802 struct stat st;
803 if (lstat(old->name, &st) ||
804 ce_match_stat(old, &st, 1))
805 old->ce_flags |= htons(CE_UPDATE);
806 }
807 return keep_entry(old);
808 }
809 return merged_entry(a, old, o);
810}