1/*
2 * GIT - The information manager from hell
3 *
4 * Copyright (C) Linus Torvalds, 2005
5 */
6#define DBRT_DEBUG 1
7
8#include "cache.h"
9
10#include "object.h"
11#include "tree.h"
12#include "tree-walk.h"
13#include "cache-tree.h"
14#include <sys/time.h>
15#include <signal.h>
16#include "builtin.h"
17
18static int reset = 0;
19static int merge = 0;
20static int update = 0;
21static int index_only = 0;
22static int nontrivial_merge = 0;
23static int trivial_merges_only = 0;
24static int aggressive = 0;
25static int verbose_update = 0;
26static volatile int progress_update = 0;
27static const char *prefix = NULL;
28
29static int head_idx = -1;
30static int merge_size = 0;
31
32static struct object_list *trees = NULL;
33
34static struct cache_entry df_conflict_entry = {
35};
36
37struct tree_entry_list {
38 struct tree_entry_list *next;
39 unsigned directory : 1;
40 unsigned executable : 1;
41 unsigned symlink : 1;
42 unsigned int mode;
43 const char *name;
44 const unsigned char *sha1;
45};
46
47static struct tree_entry_list df_conflict_list = {
48 .name = NULL,
49 .next = &df_conflict_list
50};
51
52typedef int (*merge_fn_t)(struct cache_entry **src);
53
54static struct tree_entry_list *create_tree_entry_list(struct tree *tree)
55{
56 struct tree_desc desc;
57 struct name_entry one;
58 struct tree_entry_list *ret = NULL;
59 struct tree_entry_list **list_p = &ret;
60
61 desc.buf = tree->buffer;
62 desc.size = tree->size;
63
64 while (tree_entry(&desc, &one)) {
65 struct tree_entry_list *entry;
66
67 entry = xmalloc(sizeof(struct tree_entry_list));
68 entry->name = one.path;
69 entry->sha1 = one.sha1;
70 entry->mode = one.mode;
71 entry->directory = S_ISDIR(one.mode) != 0;
72 entry->executable = (one.mode & S_IXUSR) != 0;
73 entry->symlink = S_ISLNK(one.mode) != 0;
74 entry->next = NULL;
75
76 *list_p = entry;
77 list_p = &entry->next;
78 }
79 return ret;
80}
81
82static int entcmp(const char *name1, int dir1, const char *name2, int dir2)
83{
84 int len1 = strlen(name1);
85 int len2 = strlen(name2);
86 int len = len1 < len2 ? len1 : len2;
87 int ret = memcmp(name1, name2, len);
88 unsigned char c1, c2;
89 if (ret)
90 return ret;
91 c1 = name1[len];
92 c2 = name2[len];
93 if (!c1 && dir1)
94 c1 = '/';
95 if (!c2 && dir2)
96 c2 = '/';
97 ret = (c1 < c2) ? -1 : (c1 > c2) ? 1 : 0;
98 if (c1 && c2 && !ret)
99 ret = len1 - len2;
100 return ret;
101}
102
103static int unpack_trees_rec(struct tree_entry_list **posns, int len,
104 const char *base, merge_fn_t fn, int *indpos)
105{
106 int baselen = strlen(base);
107 int src_size = len + 1;
108 do {
109 int i;
110 const char *first;
111 int firstdir = 0;
112 int pathlen;
113 unsigned ce_size;
114 struct tree_entry_list **subposns;
115 struct cache_entry **src;
116 int any_files = 0;
117 int any_dirs = 0;
118 char *cache_name;
119 int ce_stage;
120
121 /* Find the first name in the input. */
122
123 first = NULL;
124 cache_name = NULL;
125
126 /* Check the cache */
127 if (merge && *indpos < active_nr) {
128 /* This is a bit tricky: */
129 /* If the index has a subdirectory (with
130 * contents) as the first name, it'll get a
131 * filename like "foo/bar". But that's after
132 * "foo", so the entry in trees will get
133 * handled first, at which point we'll go into
134 * "foo", and deal with "bar" from the index,
135 * because the base will be "foo/". The only
136 * way we can actually have "foo/bar" first of
137 * all the things is if the trees don't
138 * contain "foo" at all, in which case we'll
139 * handle "foo/bar" without going into the
140 * directory, but that's fine (and will return
141 * an error anyway, with the added unknown
142 * file case.
143 */
144
145 cache_name = active_cache[*indpos]->name;
146 if (strlen(cache_name) > baselen &&
147 !memcmp(cache_name, base, baselen)) {
148 cache_name += baselen;
149 first = cache_name;
150 } else {
151 cache_name = NULL;
152 }
153 }
154
155#if DBRT_DEBUG > 1
156 if (first)
157 printf("index %s\n", first);
158#endif
159 for (i = 0; i < len; i++) {
160 if (!posns[i] || posns[i] == &df_conflict_list)
161 continue;
162#if DBRT_DEBUG > 1
163 printf("%d %s\n", i + 1, posns[i]->name);
164#endif
165 if (!first || entcmp(first, firstdir,
166 posns[i]->name,
167 posns[i]->directory) > 0) {
168 first = posns[i]->name;
169 firstdir = posns[i]->directory;
170 }
171 }
172 /* No name means we're done */
173 if (!first)
174 return 0;
175
176 pathlen = strlen(first);
177 ce_size = cache_entry_size(baselen + pathlen);
178
179 src = xcalloc(src_size, sizeof(struct cache_entry *));
180
181 subposns = xcalloc(len, sizeof(struct tree_list_entry *));
182
183 if (cache_name && !strcmp(cache_name, first)) {
184 any_files = 1;
185 src[0] = active_cache[*indpos];
186 remove_cache_entry_at(*indpos);
187 }
188
189 for (i = 0; i < len; i++) {
190 struct cache_entry *ce;
191
192 if (!posns[i] ||
193 (posns[i] != &df_conflict_list &&
194 strcmp(first, posns[i]->name))) {
195 continue;
196 }
197
198 if (posns[i] == &df_conflict_list) {
199 src[i + merge] = &df_conflict_entry;
200 continue;
201 }
202
203 if (posns[i]->directory) {
204 struct tree *tree = lookup_tree(posns[i]->sha1);
205 any_dirs = 1;
206 parse_tree(tree);
207 subposns[i] = create_tree_entry_list(tree);
208 posns[i] = posns[i]->next;
209 src[i + merge] = &df_conflict_entry;
210 continue;
211 }
212
213 if (!merge)
214 ce_stage = 0;
215 else if (i + 1 < head_idx)
216 ce_stage = 1;
217 else if (i + 1 > head_idx)
218 ce_stage = 3;
219 else
220 ce_stage = 2;
221
222 ce = xcalloc(1, ce_size);
223 ce->ce_mode = create_ce_mode(posns[i]->mode);
224 ce->ce_flags = create_ce_flags(baselen + pathlen,
225 ce_stage);
226 memcpy(ce->name, base, baselen);
227 memcpy(ce->name + baselen, first, pathlen + 1);
228
229 any_files = 1;
230
231 memcpy(ce->sha1, posns[i]->sha1, 20);
232 src[i + merge] = ce;
233 subposns[i] = &df_conflict_list;
234 posns[i] = posns[i]->next;
235 }
236 if (any_files) {
237 if (merge) {
238 int ret;
239
240#if DBRT_DEBUG > 1
241 printf("%s:\n", first);
242 for (i = 0; i < src_size; i++) {
243 printf(" %d ", i);
244 if (src[i])
245 printf("%s\n", sha1_to_hex(src[i]->sha1));
246 else
247 printf("\n");
248 }
249#endif
250 ret = fn(src);
251
252#if DBRT_DEBUG > 1
253 printf("Added %d entries\n", ret);
254#endif
255 *indpos += ret;
256 } else {
257 for (i = 0; i < src_size; i++) {
258 if (src[i]) {
259 add_cache_entry(src[i], ADD_CACHE_OK_TO_ADD|ADD_CACHE_SKIP_DFCHECK);
260 }
261 }
262 }
263 }
264 if (any_dirs) {
265 char *newbase = xmalloc(baselen + 2 + pathlen);
266 memcpy(newbase, base, baselen);
267 memcpy(newbase + baselen, first, pathlen);
268 newbase[baselen + pathlen] = '/';
269 newbase[baselen + pathlen + 1] = '\0';
270 if (unpack_trees_rec(subposns, len, newbase, fn,
271 indpos))
272 return -1;
273 free(newbase);
274 }
275 free(subposns);
276 free(src);
277 } while (1);
278}
279
280static void reject_merge(struct cache_entry *ce)
281{
282 die("Entry '%s' would be overwritten by merge. Cannot merge.",
283 ce->name);
284}
285
286/* Unlink the last component and attempt to remove leading
287 * directories, in case this unlink is the removal of the
288 * last entry in the directory -- empty directories are removed.
289 */
290static void unlink_entry(char *name)
291{
292 char *cp, *prev;
293
294 if (unlink(name))
295 return;
296 prev = NULL;
297 while (1) {
298 int status;
299 cp = strrchr(name, '/');
300 if (prev)
301 *prev = '/';
302 if (!cp)
303 break;
304
305 *cp = 0;
306 status = rmdir(name);
307 if (status) {
308 *cp = '/';
309 break;
310 }
311 prev = cp;
312 }
313}
314
315static void progress_interval(int signum)
316{
317 progress_update = 1;
318}
319
320static void setup_progress_signal(void)
321{
322 struct sigaction sa;
323 struct itimerval v;
324
325 memset(&sa, 0, sizeof(sa));
326 sa.sa_handler = progress_interval;
327 sigemptyset(&sa.sa_mask);
328 sa.sa_flags = SA_RESTART;
329 sigaction(SIGALRM, &sa, NULL);
330
331 v.it_interval.tv_sec = 1;
332 v.it_interval.tv_usec = 0;
333 v.it_value = v.it_interval;
334 setitimer(ITIMER_REAL, &v, NULL);
335}
336
337static void check_updates(struct cache_entry **src, int nr)
338{
339 static struct checkout state = {
340 .base_dir = "",
341 .force = 1,
342 .quiet = 1,
343 .refresh_cache = 1,
344 };
345 unsigned short mask = htons(CE_UPDATE);
346 unsigned last_percent = 200, cnt = 0, total = 0;
347
348 if (update && verbose_update) {
349 for (total = cnt = 0; cnt < nr; cnt++) {
350 struct cache_entry *ce = src[cnt];
351 if (!ce->ce_mode || ce->ce_flags & mask)
352 total++;
353 }
354
355 /* Don't bother doing this for very small updates */
356 if (total < 250)
357 total = 0;
358
359 if (total) {
360 fprintf(stderr, "Checking files out...\n");
361 setup_progress_signal();
362 progress_update = 1;
363 }
364 cnt = 0;
365 }
366
367 while (nr--) {
368 struct cache_entry *ce = *src++;
369
370 if (total) {
371 if (!ce->ce_mode || ce->ce_flags & mask) {
372 unsigned percent;
373 cnt++;
374 percent = (cnt * 100) / total;
375 if (percent != last_percent ||
376 progress_update) {
377 fprintf(stderr, "%4u%% (%u/%u) done\r",
378 percent, cnt, total);
379 last_percent = percent;
380 }
381 }
382 }
383 if (!ce->ce_mode) {
384 if (update)
385 unlink_entry(ce->name);
386 continue;
387 }
388 if (ce->ce_flags & mask) {
389 ce->ce_flags &= ~mask;
390 if (update)
391 checkout_entry(ce, &state, NULL);
392 }
393 }
394 if (total) {
395 signal(SIGALRM, SIG_IGN);
396 fputc('\n', stderr);
397 }
398}
399
400static int unpack_trees(merge_fn_t fn)
401{
402 int indpos = 0;
403 unsigned len = object_list_length(trees);
404 struct tree_entry_list **posns;
405 int i;
406 struct object_list *posn = trees;
407 merge_size = len;
408
409 if (len) {
410 posns = xmalloc(len * sizeof(struct tree_entry_list *));
411 for (i = 0; i < len; i++) {
412 posns[i] = create_tree_entry_list((struct tree *) posn->item);
413 posn = posn->next;
414 }
415 if (unpack_trees_rec(posns, len, prefix ? prefix : "",
416 fn, &indpos))
417 return -1;
418 }
419
420 if (trivial_merges_only && nontrivial_merge)
421 die("Merge requires file-level merging");
422
423 check_updates(active_cache, active_nr);
424 return 0;
425}
426
427static int list_tree(unsigned char *sha1)
428{
429 struct tree *tree = parse_tree_indirect(sha1);
430 if (!tree)
431 return -1;
432 object_list_append(&tree->object, &trees);
433 return 0;
434}
435
436static int same(struct cache_entry *a, struct cache_entry *b)
437{
438 if (!!a != !!b)
439 return 0;
440 if (!a && !b)
441 return 1;
442 return a->ce_mode == b->ce_mode &&
443 !memcmp(a->sha1, b->sha1, 20);
444}
445
446
447/*
448 * When a CE gets turned into an unmerged entry, we
449 * want it to be up-to-date
450 */
451static void verify_uptodate(struct cache_entry *ce)
452{
453 struct stat st;
454
455 if (index_only || reset)
456 return;
457
458 if (!lstat(ce->name, &st)) {
459 unsigned changed = ce_match_stat(ce, &st, 1);
460 if (!changed)
461 return;
462 errno = 0;
463 }
464 if (reset) {
465 ce->ce_flags |= htons(CE_UPDATE);
466 return;
467 }
468 if (errno == ENOENT)
469 return;
470 die("Entry '%s' not uptodate. Cannot merge.", ce->name);
471}
472
473static void invalidate_ce_path(struct cache_entry *ce)
474{
475 if (ce)
476 cache_tree_invalidate_path(active_cache_tree, ce->name);
477}
478
479/*
480 * We do not want to remove or overwrite a working tree file that
481 * is not tracked.
482 */
483static void verify_absent(const char *path, const char *action)
484{
485 struct stat st;
486
487 if (index_only || reset || !update)
488 return;
489 if (!lstat(path, &st))
490 die("Untracked working tree file '%s' "
491 "would be %s by merge.", path, action);
492}
493
494static int merged_entry(struct cache_entry *merge, struct cache_entry *old)
495{
496 merge->ce_flags |= htons(CE_UPDATE);
497 if (old) {
498 /*
499 * See if we can re-use the old CE directly?
500 * That way we get the uptodate stat info.
501 *
502 * This also removes the UPDATE flag on
503 * a match.
504 */
505 if (same(old, merge)) {
506 *merge = *old;
507 } else {
508 verify_uptodate(old);
509 invalidate_ce_path(old);
510 }
511 }
512 else {
513 verify_absent(merge->name, "overwritten");
514 invalidate_ce_path(merge);
515 }
516
517 merge->ce_flags &= ~htons(CE_STAGEMASK);
518 add_cache_entry(merge, ADD_CACHE_OK_TO_ADD);
519 return 1;
520}
521
522static int deleted_entry(struct cache_entry *ce, struct cache_entry *old)
523{
524 if (old)
525 verify_uptodate(old);
526 else
527 verify_absent(ce->name, "removed");
528 ce->ce_mode = 0;
529 add_cache_entry(ce, ADD_CACHE_OK_TO_ADD);
530 invalidate_ce_path(ce);
531 return 1;
532}
533
534static int keep_entry(struct cache_entry *ce)
535{
536 add_cache_entry(ce, ADD_CACHE_OK_TO_ADD);
537 return 1;
538}
539
540#if DBRT_DEBUG
541static void show_stage_entry(FILE *o,
542 const char *label, const struct cache_entry *ce)
543{
544 if (!ce)
545 fprintf(o, "%s (missing)\n", label);
546 else
547 fprintf(o, "%s%06o %s %d\t%s\n",
548 label,
549 ntohl(ce->ce_mode),
550 sha1_to_hex(ce->sha1),
551 ce_stage(ce),
552 ce->name);
553}
554#endif
555
556static int threeway_merge(struct cache_entry **stages)
557{
558 struct cache_entry *index;
559 struct cache_entry *head;
560 struct cache_entry *remote = stages[head_idx + 1];
561 int count;
562 int head_match = 0;
563 int remote_match = 0;
564 const char *path = NULL;
565
566 int df_conflict_head = 0;
567 int df_conflict_remote = 0;
568
569 int any_anc_missing = 0;
570 int no_anc_exists = 1;
571 int i;
572
573 for (i = 1; i < head_idx; i++) {
574 if (!stages[i])
575 any_anc_missing = 1;
576 else {
577 if (!path)
578 path = stages[i]->name;
579 no_anc_exists = 0;
580 }
581 }
582
583 index = stages[0];
584 head = stages[head_idx];
585
586 if (head == &df_conflict_entry) {
587 df_conflict_head = 1;
588 head = NULL;
589 }
590
591 if (remote == &df_conflict_entry) {
592 df_conflict_remote = 1;
593 remote = NULL;
594 }
595
596 if (!path && index)
597 path = index->name;
598 if (!path && head)
599 path = head->name;
600 if (!path && remote)
601 path = remote->name;
602
603 /* First, if there's a #16 situation, note that to prevent #13
604 * and #14.
605 */
606 if (!same(remote, head)) {
607 for (i = 1; i < head_idx; i++) {
608 if (same(stages[i], head)) {
609 head_match = i;
610 }
611 if (same(stages[i], remote)) {
612 remote_match = i;
613 }
614 }
615 }
616
617 /* We start with cases where the index is allowed to match
618 * something other than the head: #14(ALT) and #2ALT, where it
619 * is permitted to match the result instead.
620 */
621 /* #14, #14ALT, #2ALT */
622 if (remote && !df_conflict_head && head_match && !remote_match) {
623 if (index && !same(index, remote) && !same(index, head))
624 reject_merge(index);
625 return merged_entry(remote, index);
626 }
627 /*
628 * If we have an entry in the index cache, then we want to
629 * make sure that it matches head.
630 */
631 if (index && !same(index, head)) {
632 reject_merge(index);
633 }
634
635 if (head) {
636 /* #5ALT, #15 */
637 if (same(head, remote))
638 return merged_entry(head, index);
639 /* #13, #3ALT */
640 if (!df_conflict_remote && remote_match && !head_match)
641 return merged_entry(head, index);
642 }
643
644 /* #1 */
645 if (!head && !remote && any_anc_missing)
646 return 0;
647
648 /* Under the new "aggressive" rule, we resolve mostly trivial
649 * cases that we historically had git-merge-one-file resolve.
650 */
651 if (aggressive) {
652 int head_deleted = !head && !df_conflict_head;
653 int remote_deleted = !remote && !df_conflict_remote;
654 /*
655 * Deleted in both.
656 * Deleted in one and unchanged in the other.
657 */
658 if ((head_deleted && remote_deleted) ||
659 (head_deleted && remote && remote_match) ||
660 (remote_deleted && head && head_match)) {
661 if (index)
662 return deleted_entry(index, index);
663 else if (path)
664 verify_absent(path, "removed");
665 return 0;
666 }
667 /*
668 * Added in both, identically.
669 */
670 if (no_anc_exists && head && remote && same(head, remote))
671 return merged_entry(head, index);
672
673 }
674
675 /* Below are "no merge" cases, which require that the index be
676 * up-to-date to avoid the files getting overwritten with
677 * conflict resolution files.
678 */
679 if (index) {
680 verify_uptodate(index);
681 }
682 else if (path)
683 verify_absent(path, "overwritten");
684
685 nontrivial_merge = 1;
686
687 /* #2, #3, #4, #6, #7, #9, #11. */
688 count = 0;
689 if (!head_match || !remote_match) {
690 for (i = 1; i < head_idx; i++) {
691 if (stages[i]) {
692 keep_entry(stages[i]);
693 count++;
694 break;
695 }
696 }
697 }
698#if DBRT_DEBUG
699 else {
700 fprintf(stderr, "read-tree: warning #16 detected\n");
701 show_stage_entry(stderr, "head ", stages[head_match]);
702 show_stage_entry(stderr, "remote ", stages[remote_match]);
703 }
704#endif
705 if (head) { count += keep_entry(head); }
706 if (remote) { count += keep_entry(remote); }
707 return count;
708}
709
710/*
711 * Two-way merge.
712 *
713 * The rule is to "carry forward" what is in the index without losing
714 * information across a "fast forward", favoring a successful merge
715 * over a merge failure when it makes sense. For details of the
716 * "carry forward" rule, please see <Documentation/git-read-tree.txt>.
717 *
718 */
719static int twoway_merge(struct cache_entry **src)
720{
721 struct cache_entry *current = src[0];
722 struct cache_entry *oldtree = src[1], *newtree = src[2];
723
724 if (merge_size != 2)
725 return error("Cannot do a twoway merge of %d trees",
726 merge_size);
727
728 if (current) {
729 if ((!oldtree && !newtree) || /* 4 and 5 */
730 (!oldtree && newtree &&
731 same(current, newtree)) || /* 6 and 7 */
732 (oldtree && newtree &&
733 same(oldtree, newtree)) || /* 14 and 15 */
734 (oldtree && newtree &&
735 !same(oldtree, newtree) && /* 18 and 19*/
736 same(current, newtree))) {
737 return keep_entry(current);
738 }
739 else if (oldtree && !newtree && same(current, oldtree)) {
740 /* 10 or 11 */
741 return deleted_entry(oldtree, current);
742 }
743 else if (oldtree && newtree &&
744 same(current, oldtree) && !same(current, newtree)) {
745 /* 20 or 21 */
746 return merged_entry(newtree, current);
747 }
748 else {
749 /* all other failures */
750 if (oldtree)
751 reject_merge(oldtree);
752 if (current)
753 reject_merge(current);
754 if (newtree)
755 reject_merge(newtree);
756 return -1;
757 }
758 }
759 else if (newtree)
760 return merged_entry(newtree, current);
761 else
762 return deleted_entry(oldtree, current);
763}
764
765/*
766 * Bind merge.
767 *
768 * Keep the index entries at stage0, collapse stage1 but make sure
769 * stage0 does not have anything there.
770 */
771static int bind_merge(struct cache_entry **src)
772{
773 struct cache_entry *old = src[0];
774 struct cache_entry *a = src[1];
775
776 if (merge_size != 1)
777 return error("Cannot do a bind merge of %d trees\n",
778 merge_size);
779 if (a && old)
780 die("Entry '%s' overlaps. Cannot bind.", a->name);
781 if (!a)
782 return keep_entry(old);
783 else
784 return merged_entry(a, NULL);
785}
786
787/*
788 * One-way merge.
789 *
790 * The rule is:
791 * - take the stat information from stage0, take the data from stage1
792 */
793static int oneway_merge(struct cache_entry **src)
794{
795 struct cache_entry *old = src[0];
796 struct cache_entry *a = src[1];
797
798 if (merge_size != 1)
799 return error("Cannot do a oneway merge of %d trees",
800 merge_size);
801
802 if (!a) {
803 invalidate_ce_path(old);
804 return deleted_entry(old, old);
805 }
806 if (old && same(old, a)) {
807 if (reset) {
808 struct stat st;
809 if (lstat(old->name, &st) ||
810 ce_match_stat(old, &st, 1))
811 old->ce_flags |= htons(CE_UPDATE);
812 }
813 return keep_entry(old);
814 }
815 return merged_entry(a, old);
816}
817
818static int read_cache_unmerged(void)
819{
820 int i;
821 struct cache_entry **dst;
822 struct cache_entry *last = NULL;
823
824 read_cache();
825 dst = active_cache;
826 for (i = 0; i < active_nr; i++) {
827 struct cache_entry *ce = active_cache[i];
828 if (ce_stage(ce)) {
829 if (last && !strcmp(ce->name, last->name))
830 continue;
831 invalidate_ce_path(ce);
832 last = ce;
833 ce->ce_mode = 0;
834 ce->ce_flags &= ~htons(CE_STAGEMASK);
835 }
836 *dst++ = ce;
837 }
838 active_nr = dst - active_cache;
839 return !!last;
840}
841
842static void prime_cache_tree_rec(struct cache_tree *it, struct tree *tree)
843{
844 struct tree_desc desc;
845 struct name_entry entry;
846 int cnt;
847
848 memcpy(it->sha1, tree->object.sha1, 20);
849 desc.buf = tree->buffer;
850 desc.size = tree->size;
851 cnt = 0;
852 while (tree_entry(&desc, &entry)) {
853 if (!S_ISDIR(entry.mode))
854 cnt++;
855 else {
856 struct cache_tree_sub *sub;
857 struct tree *subtree = lookup_tree(entry.sha1);
858 if (!subtree->object.parsed)
859 parse_tree(subtree);
860 sub = cache_tree_sub(it, entry.path);
861 sub->cache_tree = cache_tree();
862 prime_cache_tree_rec(sub->cache_tree, subtree);
863 cnt += sub->cache_tree->entry_count;
864 }
865 }
866 it->entry_count = cnt;
867}
868
869static void prime_cache_tree(void)
870{
871 struct tree *tree = (struct tree *)trees->item;
872 if (!tree)
873 return;
874 active_cache_tree = cache_tree();
875 prime_cache_tree_rec(active_cache_tree, tree);
876
877}
878
879static const char read_tree_usage[] = "git-read-tree (<sha> | [[-m [--aggressive] | --reset | --prefix=<prefix>] [-u | -i]] <sha1> [<sha2> [<sha3>]])";
880
881static struct cache_file cache_file;
882
883int cmd_read_tree(int argc, const char **argv, char **envp)
884{
885 int i, newfd, stage = 0;
886 unsigned char sha1[20];
887 merge_fn_t fn = NULL;
888
889 setup_git_directory();
890 git_config(git_default_config);
891
892 newfd = hold_index_file_for_update(&cache_file, get_index_file());
893 if (newfd < 0)
894 die("unable to create new cachefile");
895
896 git_config(git_default_config);
897
898 merge = 0;
899 reset = 0;
900 for (i = 1; i < argc; i++) {
901 const char *arg = argv[i];
902
903 /* "-u" means "update", meaning that a merge will update
904 * the working tree.
905 */
906 if (!strcmp(arg, "-u")) {
907 update = 1;
908 continue;
909 }
910
911 if (!strcmp(arg, "-v")) {
912 verbose_update = 1;
913 continue;
914 }
915
916 /* "-i" means "index only", meaning that a merge will
917 * not even look at the working tree.
918 */
919 if (!strcmp(arg, "-i")) {
920 index_only = 1;
921 continue;
922 }
923
924 /* "--prefix=<subdirectory>/" means keep the current index
925 * entries and put the entries from the tree under the
926 * given subdirectory.
927 */
928 if (!strncmp(arg, "--prefix=", 9)) {
929 if (stage || merge || prefix)
930 usage(read_tree_usage);
931 prefix = arg + 9;
932 merge = 1;
933 stage = 1;
934 if (read_cache_unmerged())
935 die("you need to resolve your current index first");
936 continue;
937 }
938
939 /* This differs from "-m" in that we'll silently ignore
940 * unmerged entries and overwrite working tree files that
941 * correspond to them.
942 */
943 if (!strcmp(arg, "--reset")) {
944 if (stage || merge || prefix)
945 usage(read_tree_usage);
946 reset = 1;
947 merge = 1;
948 stage = 1;
949 read_cache_unmerged();
950 continue;
951 }
952
953 if (!strcmp(arg, "--trivial")) {
954 trivial_merges_only = 1;
955 continue;
956 }
957
958 if (!strcmp(arg, "--aggressive")) {
959 aggressive = 1;
960 continue;
961 }
962
963 /* "-m" stands for "merge", meaning we start in stage 1 */
964 if (!strcmp(arg, "-m")) {
965 if (stage || merge || prefix)
966 usage(read_tree_usage);
967 if (read_cache_unmerged())
968 die("you need to resolve your current index first");
969 stage = 1;
970 merge = 1;
971 continue;
972 }
973
974 /* using -u and -i at the same time makes no sense */
975 if (1 < index_only + update)
976 usage(read_tree_usage);
977
978 if (get_sha1(arg, sha1))
979 die("Not a valid object name %s", arg);
980 if (list_tree(sha1) < 0)
981 die("failed to unpack tree object %s", arg);
982 stage++;
983 }
984 if ((update||index_only) && !merge)
985 usage(read_tree_usage);
986
987 if (prefix) {
988 int pfxlen = strlen(prefix);
989 int pos;
990 if (prefix[pfxlen-1] != '/')
991 die("prefix must end with /");
992 if (stage != 2)
993 die("binding merge takes only one tree");
994 pos = cache_name_pos(prefix, pfxlen);
995 if (0 <= pos)
996 die("corrupt index file");
997 pos = -pos-1;
998 if (pos < active_nr &&
999 !strncmp(active_cache[pos]->name, prefix, pfxlen))
1000 die("subdirectory '%s' already exists.", prefix);
1001 pos = cache_name_pos(prefix, pfxlen-1);
1002 if (0 <= pos)
1003 die("file '%.*s' already exists.", pfxlen-1, prefix);
1004 }
1005
1006 if (merge) {
1007 if (stage < 2)
1008 die("just how do you expect me to merge %d trees?", stage-1);
1009 switch (stage - 1) {
1010 case 1:
1011 fn = prefix ? bind_merge : oneway_merge;
1012 break;
1013 case 2:
1014 fn = twoway_merge;
1015 break;
1016 case 3:
1017 default:
1018 fn = threeway_merge;
1019 cache_tree_free(&active_cache_tree);
1020 break;
1021 }
1022
1023 if (stage - 1 >= 3)
1024 head_idx = stage - 2;
1025 else
1026 head_idx = 1;
1027 }
1028
1029 unpack_trees(fn);
1030
1031 /*
1032 * When reading only one tree (either the most basic form,
1033 * "-m ent" or "--reset ent" form), we can obtain a fully
1034 * valid cache-tree because the index must match exactly
1035 * what came from the tree.
1036 */
1037 if (trees && trees->item && (!merge || (stage == 2))) {
1038 cache_tree_free(&active_cache_tree);
1039 prime_cache_tree();
1040 }
1041
1042 if (write_cache(newfd, active_cache, active_nr) ||
1043 commit_index_file(&cache_file))
1044 die("unable to write new index file");
1045 return 0;
1046}