1/*
2 * GIT - The information manager from hell
3 *
4 * Copyright (C) Linus Torvalds, 2005
5 */
6#define DBRT_DEBUG 1
7
8#include "cache.h"
9
10#include "object.h"
11#include "tree.h"
12#include "tree-walk.h"
13#include "cache-tree.h"
14#include <sys/time.h>
15#include <signal.h>
16#include "builtin.h"
17
18static int reset = 0;
19static int merge = 0;
20static int update = 0;
21static int index_only = 0;
22static int nontrivial_merge = 0;
23static int trivial_merges_only = 0;
24static int aggressive = 0;
25static int verbose_update = 0;
26static volatile int progress_update = 0;
27static const char *prefix = NULL;
28
29static int head_idx = -1;
30static int merge_size = 0;
31
32static struct object_list *trees = NULL;
33
34static struct cache_entry df_conflict_entry = {
35};
36
37struct tree_entry_list {
38 struct tree_entry_list *next;
39 unsigned directory : 1;
40 unsigned executable : 1;
41 unsigned symlink : 1;
42 unsigned int mode;
43 const char *name;
44 const unsigned char *sha1;
45};
46
47static struct tree_entry_list df_conflict_list = {
48 .name = NULL,
49 .next = &df_conflict_list
50};
51
52typedef int (*merge_fn_t)(struct cache_entry **src);
53
54static struct tree_entry_list *create_tree_entry_list(struct tree *tree)
55{
56 struct tree_desc desc;
57 struct name_entry one;
58 struct tree_entry_list *ret = NULL;
59 struct tree_entry_list **list_p = &ret;
60
61 desc.buf = tree->buffer;
62 desc.size = tree->size;
63
64 while (tree_entry(&desc, &one)) {
65 struct tree_entry_list *entry;
66
67 entry = xmalloc(sizeof(struct tree_entry_list));
68 entry->name = one.path;
69 entry->sha1 = one.sha1;
70 entry->mode = one.mode;
71 entry->directory = S_ISDIR(one.mode) != 0;
72 entry->executable = (one.mode & S_IXUSR) != 0;
73 entry->symlink = S_ISLNK(one.mode) != 0;
74 entry->next = NULL;
75
76 *list_p = entry;
77 list_p = &entry->next;
78 }
79 return ret;
80}
81
82static int entcmp(const char *name1, int dir1, const char *name2, int dir2)
83{
84 int len1 = strlen(name1);
85 int len2 = strlen(name2);
86 int len = len1 < len2 ? len1 : len2;
87 int ret = memcmp(name1, name2, len);
88 unsigned char c1, c2;
89 if (ret)
90 return ret;
91 c1 = name1[len];
92 c2 = name2[len];
93 if (!c1 && dir1)
94 c1 = '/';
95 if (!c2 && dir2)
96 c2 = '/';
97 ret = (c1 < c2) ? -1 : (c1 > c2) ? 1 : 0;
98 if (c1 && c2 && !ret)
99 ret = len1 - len2;
100 return ret;
101}
102
103static int unpack_trees_rec(struct tree_entry_list **posns, int len,
104 const char *base, merge_fn_t fn, int *indpos)
105{
106 int baselen = strlen(base);
107 int src_size = len + 1;
108 do {
109 int i;
110 const char *first;
111 int firstdir = 0;
112 int pathlen;
113 unsigned ce_size;
114 struct tree_entry_list **subposns;
115 struct cache_entry **src;
116 int any_files = 0;
117 int any_dirs = 0;
118 char *cache_name;
119 int ce_stage;
120
121 /* Find the first name in the input. */
122
123 first = NULL;
124 cache_name = NULL;
125
126 /* Check the cache */
127 if (merge && *indpos < active_nr) {
128 /* This is a bit tricky: */
129 /* If the index has a subdirectory (with
130 * contents) as the first name, it'll get a
131 * filename like "foo/bar". But that's after
132 * "foo", so the entry in trees will get
133 * handled first, at which point we'll go into
134 * "foo", and deal with "bar" from the index,
135 * because the base will be "foo/". The only
136 * way we can actually have "foo/bar" first of
137 * all the things is if the trees don't
138 * contain "foo" at all, in which case we'll
139 * handle "foo/bar" without going into the
140 * directory, but that's fine (and will return
141 * an error anyway, with the added unknown
142 * file case.
143 */
144
145 cache_name = active_cache[*indpos]->name;
146 if (strlen(cache_name) > baselen &&
147 !memcmp(cache_name, base, baselen)) {
148 cache_name += baselen;
149 first = cache_name;
150 } else {
151 cache_name = NULL;
152 }
153 }
154
155#if DBRT_DEBUG > 1
156 if (first)
157 printf("index %s\n", first);
158#endif
159 for (i = 0; i < len; i++) {
160 if (!posns[i] || posns[i] == &df_conflict_list)
161 continue;
162#if DBRT_DEBUG > 1
163 printf("%d %s\n", i + 1, posns[i]->name);
164#endif
165 if (!first || entcmp(first, firstdir,
166 posns[i]->name,
167 posns[i]->directory) > 0) {
168 first = posns[i]->name;
169 firstdir = posns[i]->directory;
170 }
171 }
172 /* No name means we're done */
173 if (!first)
174 return 0;
175
176 pathlen = strlen(first);
177 ce_size = cache_entry_size(baselen + pathlen);
178
179 src = xcalloc(src_size, sizeof(struct cache_entry *));
180
181 subposns = xcalloc(len, sizeof(struct tree_list_entry *));
182
183 if (cache_name && !strcmp(cache_name, first)) {
184 any_files = 1;
185 src[0] = active_cache[*indpos];
186 remove_cache_entry_at(*indpos);
187 }
188
189 for (i = 0; i < len; i++) {
190 struct cache_entry *ce;
191
192 if (!posns[i] ||
193 (posns[i] != &df_conflict_list &&
194 strcmp(first, posns[i]->name))) {
195 continue;
196 }
197
198 if (posns[i] == &df_conflict_list) {
199 src[i + merge] = &df_conflict_entry;
200 continue;
201 }
202
203 if (posns[i]->directory) {
204 struct tree *tree = lookup_tree(posns[i]->sha1);
205 any_dirs = 1;
206 parse_tree(tree);
207 subposns[i] = create_tree_entry_list(tree);
208 posns[i] = posns[i]->next;
209 src[i + merge] = &df_conflict_entry;
210 continue;
211 }
212
213 if (!merge)
214 ce_stage = 0;
215 else if (i + 1 < head_idx)
216 ce_stage = 1;
217 else if (i + 1 > head_idx)
218 ce_stage = 3;
219 else
220 ce_stage = 2;
221
222 ce = xcalloc(1, ce_size);
223 ce->ce_mode = create_ce_mode(posns[i]->mode);
224 ce->ce_flags = create_ce_flags(baselen + pathlen,
225 ce_stage);
226 memcpy(ce->name, base, baselen);
227 memcpy(ce->name + baselen, first, pathlen + 1);
228
229 any_files = 1;
230
231 memcpy(ce->sha1, posns[i]->sha1, 20);
232 src[i + merge] = ce;
233 subposns[i] = &df_conflict_list;
234 posns[i] = posns[i]->next;
235 }
236 if (any_files) {
237 if (merge) {
238 int ret;
239
240#if DBRT_DEBUG > 1
241 printf("%s:\n", first);
242 for (i = 0; i < src_size; i++) {
243 printf(" %d ", i);
244 if (src[i])
245 printf("%s\n", sha1_to_hex(src[i]->sha1));
246 else
247 printf("\n");
248 }
249#endif
250 ret = fn(src);
251
252#if DBRT_DEBUG > 1
253 printf("Added %d entries\n", ret);
254#endif
255 *indpos += ret;
256 } else {
257 for (i = 0; i < src_size; i++) {
258 if (src[i]) {
259 add_cache_entry(src[i], ADD_CACHE_OK_TO_ADD|ADD_CACHE_SKIP_DFCHECK);
260 }
261 }
262 }
263 }
264 if (any_dirs) {
265 char *newbase = xmalloc(baselen + 2 + pathlen);
266 memcpy(newbase, base, baselen);
267 memcpy(newbase + baselen, first, pathlen);
268 newbase[baselen + pathlen] = '/';
269 newbase[baselen + pathlen + 1] = '\0';
270 if (unpack_trees_rec(subposns, len, newbase, fn,
271 indpos))
272 return -1;
273 free(newbase);
274 }
275 free(subposns);
276 free(src);
277 } while (1);
278}
279
280static void reject_merge(struct cache_entry *ce)
281{
282 die("Entry '%s' would be overwritten by merge. Cannot merge.",
283 ce->name);
284}
285
286/* Unlink the last component and attempt to remove leading
287 * directories, in case this unlink is the removal of the
288 * last entry in the directory -- empty directories are removed.
289 */
290static void unlink_entry(char *name)
291{
292 char *cp, *prev;
293
294 if (unlink(name))
295 return;
296 prev = NULL;
297 while (1) {
298 int status;
299 cp = strrchr(name, '/');
300 if (prev)
301 *prev = '/';
302 if (!cp)
303 break;
304
305 *cp = 0;
306 status = rmdir(name);
307 if (status) {
308 *cp = '/';
309 break;
310 }
311 prev = cp;
312 }
313}
314
315static void progress_interval(int signum)
316{
317 progress_update = 1;
318}
319
320static void setup_progress_signal(void)
321{
322 struct sigaction sa;
323 struct itimerval v;
324
325 memset(&sa, 0, sizeof(sa));
326 sa.sa_handler = progress_interval;
327 sigemptyset(&sa.sa_mask);
328 sa.sa_flags = SA_RESTART;
329 sigaction(SIGALRM, &sa, NULL);
330
331 v.it_interval.tv_sec = 1;
332 v.it_interval.tv_usec = 0;
333 v.it_value = v.it_interval;
334 setitimer(ITIMER_REAL, &v, NULL);
335}
336
337static void check_updates(struct cache_entry **src, int nr)
338{
339 static struct checkout state = {
340 .base_dir = "",
341 .force = 1,
342 .quiet = 1,
343 .refresh_cache = 1,
344 };
345 unsigned short mask = htons(CE_UPDATE);
346 unsigned last_percent = 200, cnt = 0, total = 0;
347
348 if (update && verbose_update) {
349 for (total = cnt = 0; cnt < nr; cnt++) {
350 struct cache_entry *ce = src[cnt];
351 if (!ce->ce_mode || ce->ce_flags & mask)
352 total++;
353 }
354
355 /* Don't bother doing this for very small updates */
356 if (total < 250)
357 total = 0;
358
359 if (total) {
360 fprintf(stderr, "Checking files out...\n");
361 setup_progress_signal();
362 progress_update = 1;
363 }
364 cnt = 0;
365 }
366
367 while (nr--) {
368 struct cache_entry *ce = *src++;
369
370 if (total) {
371 if (!ce->ce_mode || ce->ce_flags & mask) {
372 unsigned percent;
373 cnt++;
374 percent = (cnt * 100) / total;
375 if (percent != last_percent ||
376 progress_update) {
377 fprintf(stderr, "%4u%% (%u/%u) done\r",
378 percent, cnt, total);
379 last_percent = percent;
380 progress_update = 0;
381 }
382 }
383 }
384 if (!ce->ce_mode) {
385 if (update)
386 unlink_entry(ce->name);
387 continue;
388 }
389 if (ce->ce_flags & mask) {
390 ce->ce_flags &= ~mask;
391 if (update)
392 checkout_entry(ce, &state, NULL);
393 }
394 }
395 if (total) {
396 signal(SIGALRM, SIG_IGN);
397 fputc('\n', stderr);
398 }
399}
400
401static int unpack_trees(merge_fn_t fn)
402{
403 int indpos = 0;
404 unsigned len = object_list_length(trees);
405 struct tree_entry_list **posns;
406 int i;
407 struct object_list *posn = trees;
408 merge_size = len;
409
410 if (len) {
411 posns = xmalloc(len * sizeof(struct tree_entry_list *));
412 for (i = 0; i < len; i++) {
413 posns[i] = create_tree_entry_list((struct tree *) posn->item);
414 posn = posn->next;
415 }
416 if (unpack_trees_rec(posns, len, prefix ? prefix : "",
417 fn, &indpos))
418 return -1;
419 }
420
421 if (trivial_merges_only && nontrivial_merge)
422 die("Merge requires file-level merging");
423
424 check_updates(active_cache, active_nr);
425 return 0;
426}
427
428static int list_tree(unsigned char *sha1)
429{
430 struct tree *tree = parse_tree_indirect(sha1);
431 if (!tree)
432 return -1;
433 object_list_append(&tree->object, &trees);
434 return 0;
435}
436
437static int same(struct cache_entry *a, struct cache_entry *b)
438{
439 if (!!a != !!b)
440 return 0;
441 if (!a && !b)
442 return 1;
443 return a->ce_mode == b->ce_mode &&
444 !memcmp(a->sha1, b->sha1, 20);
445}
446
447
448/*
449 * When a CE gets turned into an unmerged entry, we
450 * want it to be up-to-date
451 */
452static void verify_uptodate(struct cache_entry *ce)
453{
454 struct stat st;
455
456 if (index_only || reset)
457 return;
458
459 if (!lstat(ce->name, &st)) {
460 unsigned changed = ce_match_stat(ce, &st, 1);
461 if (!changed)
462 return;
463 errno = 0;
464 }
465 if (reset) {
466 ce->ce_flags |= htons(CE_UPDATE);
467 return;
468 }
469 if (errno == ENOENT)
470 return;
471 die("Entry '%s' not uptodate. Cannot merge.", ce->name);
472}
473
474static void invalidate_ce_path(struct cache_entry *ce)
475{
476 if (ce)
477 cache_tree_invalidate_path(active_cache_tree, ce->name);
478}
479
480/*
481 * We do not want to remove or overwrite a working tree file that
482 * is not tracked.
483 */
484static void verify_absent(const char *path, const char *action)
485{
486 struct stat st;
487
488 if (index_only || reset || !update)
489 return;
490 if (!lstat(path, &st))
491 die("Untracked working tree file '%s' "
492 "would be %s by merge.", path, action);
493}
494
495static int merged_entry(struct cache_entry *merge, struct cache_entry *old)
496{
497 merge->ce_flags |= htons(CE_UPDATE);
498 if (old) {
499 /*
500 * See if we can re-use the old CE directly?
501 * That way we get the uptodate stat info.
502 *
503 * This also removes the UPDATE flag on
504 * a match.
505 */
506 if (same(old, merge)) {
507 *merge = *old;
508 } else {
509 verify_uptodate(old);
510 invalidate_ce_path(old);
511 }
512 }
513 else {
514 verify_absent(merge->name, "overwritten");
515 invalidate_ce_path(merge);
516 }
517
518 merge->ce_flags &= ~htons(CE_STAGEMASK);
519 add_cache_entry(merge, ADD_CACHE_OK_TO_ADD);
520 return 1;
521}
522
523static int deleted_entry(struct cache_entry *ce, struct cache_entry *old)
524{
525 if (old)
526 verify_uptodate(old);
527 else
528 verify_absent(ce->name, "removed");
529 ce->ce_mode = 0;
530 add_cache_entry(ce, ADD_CACHE_OK_TO_ADD);
531 invalidate_ce_path(ce);
532 return 1;
533}
534
535static int keep_entry(struct cache_entry *ce)
536{
537 add_cache_entry(ce, ADD_CACHE_OK_TO_ADD);
538 return 1;
539}
540
541#if DBRT_DEBUG
542static void show_stage_entry(FILE *o,
543 const char *label, const struct cache_entry *ce)
544{
545 if (!ce)
546 fprintf(o, "%s (missing)\n", label);
547 else
548 fprintf(o, "%s%06o %s %d\t%s\n",
549 label,
550 ntohl(ce->ce_mode),
551 sha1_to_hex(ce->sha1),
552 ce_stage(ce),
553 ce->name);
554}
555#endif
556
557static int threeway_merge(struct cache_entry **stages)
558{
559 struct cache_entry *index;
560 struct cache_entry *head;
561 struct cache_entry *remote = stages[head_idx + 1];
562 int count;
563 int head_match = 0;
564 int remote_match = 0;
565 const char *path = NULL;
566
567 int df_conflict_head = 0;
568 int df_conflict_remote = 0;
569
570 int any_anc_missing = 0;
571 int no_anc_exists = 1;
572 int i;
573
574 for (i = 1; i < head_idx; i++) {
575 if (!stages[i])
576 any_anc_missing = 1;
577 else {
578 if (!path)
579 path = stages[i]->name;
580 no_anc_exists = 0;
581 }
582 }
583
584 index = stages[0];
585 head = stages[head_idx];
586
587 if (head == &df_conflict_entry) {
588 df_conflict_head = 1;
589 head = NULL;
590 }
591
592 if (remote == &df_conflict_entry) {
593 df_conflict_remote = 1;
594 remote = NULL;
595 }
596
597 if (!path && index)
598 path = index->name;
599 if (!path && head)
600 path = head->name;
601 if (!path && remote)
602 path = remote->name;
603
604 /* First, if there's a #16 situation, note that to prevent #13
605 * and #14.
606 */
607 if (!same(remote, head)) {
608 for (i = 1; i < head_idx; i++) {
609 if (same(stages[i], head)) {
610 head_match = i;
611 }
612 if (same(stages[i], remote)) {
613 remote_match = i;
614 }
615 }
616 }
617
618 /* We start with cases where the index is allowed to match
619 * something other than the head: #14(ALT) and #2ALT, where it
620 * is permitted to match the result instead.
621 */
622 /* #14, #14ALT, #2ALT */
623 if (remote && !df_conflict_head && head_match && !remote_match) {
624 if (index && !same(index, remote) && !same(index, head))
625 reject_merge(index);
626 return merged_entry(remote, index);
627 }
628 /*
629 * If we have an entry in the index cache, then we want to
630 * make sure that it matches head.
631 */
632 if (index && !same(index, head)) {
633 reject_merge(index);
634 }
635
636 if (head) {
637 /* #5ALT, #15 */
638 if (same(head, remote))
639 return merged_entry(head, index);
640 /* #13, #3ALT */
641 if (!df_conflict_remote && remote_match && !head_match)
642 return merged_entry(head, index);
643 }
644
645 /* #1 */
646 if (!head && !remote && any_anc_missing)
647 return 0;
648
649 /* Under the new "aggressive" rule, we resolve mostly trivial
650 * cases that we historically had git-merge-one-file resolve.
651 */
652 if (aggressive) {
653 int head_deleted = !head && !df_conflict_head;
654 int remote_deleted = !remote && !df_conflict_remote;
655 /*
656 * Deleted in both.
657 * Deleted in one and unchanged in the other.
658 */
659 if ((head_deleted && remote_deleted) ||
660 (head_deleted && remote && remote_match) ||
661 (remote_deleted && head && head_match)) {
662 if (index)
663 return deleted_entry(index, index);
664 else if (path)
665 verify_absent(path, "removed");
666 return 0;
667 }
668 /*
669 * Added in both, identically.
670 */
671 if (no_anc_exists && head && remote && same(head, remote))
672 return merged_entry(head, index);
673
674 }
675
676 /* Below are "no merge" cases, which require that the index be
677 * up-to-date to avoid the files getting overwritten with
678 * conflict resolution files.
679 */
680 if (index) {
681 verify_uptodate(index);
682 }
683 else if (path)
684 verify_absent(path, "overwritten");
685
686 nontrivial_merge = 1;
687
688 /* #2, #3, #4, #6, #7, #9, #11. */
689 count = 0;
690 if (!head_match || !remote_match) {
691 for (i = 1; i < head_idx; i++) {
692 if (stages[i]) {
693 keep_entry(stages[i]);
694 count++;
695 break;
696 }
697 }
698 }
699#if DBRT_DEBUG
700 else {
701 fprintf(stderr, "read-tree: warning #16 detected\n");
702 show_stage_entry(stderr, "head ", stages[head_match]);
703 show_stage_entry(stderr, "remote ", stages[remote_match]);
704 }
705#endif
706 if (head) { count += keep_entry(head); }
707 if (remote) { count += keep_entry(remote); }
708 return count;
709}
710
711/*
712 * Two-way merge.
713 *
714 * The rule is to "carry forward" what is in the index without losing
715 * information across a "fast forward", favoring a successful merge
716 * over a merge failure when it makes sense. For details of the
717 * "carry forward" rule, please see <Documentation/git-read-tree.txt>.
718 *
719 */
720static int twoway_merge(struct cache_entry **src)
721{
722 struct cache_entry *current = src[0];
723 struct cache_entry *oldtree = src[1], *newtree = src[2];
724
725 if (merge_size != 2)
726 return error("Cannot do a twoway merge of %d trees",
727 merge_size);
728
729 if (current) {
730 if ((!oldtree && !newtree) || /* 4 and 5 */
731 (!oldtree && newtree &&
732 same(current, newtree)) || /* 6 and 7 */
733 (oldtree && newtree &&
734 same(oldtree, newtree)) || /* 14 and 15 */
735 (oldtree && newtree &&
736 !same(oldtree, newtree) && /* 18 and 19*/
737 same(current, newtree))) {
738 return keep_entry(current);
739 }
740 else if (oldtree && !newtree && same(current, oldtree)) {
741 /* 10 or 11 */
742 return deleted_entry(oldtree, current);
743 }
744 else if (oldtree && newtree &&
745 same(current, oldtree) && !same(current, newtree)) {
746 /* 20 or 21 */
747 return merged_entry(newtree, current);
748 }
749 else {
750 /* all other failures */
751 if (oldtree)
752 reject_merge(oldtree);
753 if (current)
754 reject_merge(current);
755 if (newtree)
756 reject_merge(newtree);
757 return -1;
758 }
759 }
760 else if (newtree)
761 return merged_entry(newtree, current);
762 else
763 return deleted_entry(oldtree, current);
764}
765
766/*
767 * Bind merge.
768 *
769 * Keep the index entries at stage0, collapse stage1 but make sure
770 * stage0 does not have anything there.
771 */
772static int bind_merge(struct cache_entry **src)
773{
774 struct cache_entry *old = src[0];
775 struct cache_entry *a = src[1];
776
777 if (merge_size != 1)
778 return error("Cannot do a bind merge of %d trees\n",
779 merge_size);
780 if (a && old)
781 die("Entry '%s' overlaps. Cannot bind.", a->name);
782 if (!a)
783 return keep_entry(old);
784 else
785 return merged_entry(a, NULL);
786}
787
788/*
789 * One-way merge.
790 *
791 * The rule is:
792 * - take the stat information from stage0, take the data from stage1
793 */
794static int oneway_merge(struct cache_entry **src)
795{
796 struct cache_entry *old = src[0];
797 struct cache_entry *a = src[1];
798
799 if (merge_size != 1)
800 return error("Cannot do a oneway merge of %d trees",
801 merge_size);
802
803 if (!a)
804 return deleted_entry(old, old);
805 if (old && same(old, a)) {
806 if (reset) {
807 struct stat st;
808 if (lstat(old->name, &st) ||
809 ce_match_stat(old, &st, 1))
810 old->ce_flags |= htons(CE_UPDATE);
811 }
812 return keep_entry(old);
813 }
814 return merged_entry(a, old);
815}
816
817static int read_cache_unmerged(void)
818{
819 int i;
820 struct cache_entry **dst;
821 struct cache_entry *last = NULL;
822
823 read_cache();
824 dst = active_cache;
825 for (i = 0; i < active_nr; i++) {
826 struct cache_entry *ce = active_cache[i];
827 if (ce_stage(ce)) {
828 if (last && !strcmp(ce->name, last->name))
829 continue;
830 invalidate_ce_path(ce);
831 last = ce;
832 ce->ce_mode = 0;
833 ce->ce_flags &= ~htons(CE_STAGEMASK);
834 }
835 *dst++ = ce;
836 }
837 active_nr = dst - active_cache;
838 return !!last;
839}
840
841static void prime_cache_tree_rec(struct cache_tree *it, struct tree *tree)
842{
843 struct tree_desc desc;
844 struct name_entry entry;
845 int cnt;
846
847 memcpy(it->sha1, tree->object.sha1, 20);
848 desc.buf = tree->buffer;
849 desc.size = tree->size;
850 cnt = 0;
851 while (tree_entry(&desc, &entry)) {
852 if (!S_ISDIR(entry.mode))
853 cnt++;
854 else {
855 struct cache_tree_sub *sub;
856 struct tree *subtree = lookup_tree(entry.sha1);
857 if (!subtree->object.parsed)
858 parse_tree(subtree);
859 sub = cache_tree_sub(it, entry.path);
860 sub->cache_tree = cache_tree();
861 prime_cache_tree_rec(sub->cache_tree, subtree);
862 cnt += sub->cache_tree->entry_count;
863 }
864 }
865 it->entry_count = cnt;
866}
867
868static void prime_cache_tree(void)
869{
870 struct tree *tree = (struct tree *)trees->item;
871 if (!tree)
872 return;
873 active_cache_tree = cache_tree();
874 prime_cache_tree_rec(active_cache_tree, tree);
875
876}
877
878static const char read_tree_usage[] = "git-read-tree (<sha> | [[-m [--aggressive] | --reset | --prefix=<prefix>] [-u | -i]] <sha1> [<sha2> [<sha3>]])";
879
880static struct lock_file lock_file;
881
882int cmd_read_tree(int argc, const char **argv, char **envp)
883{
884 int i, newfd, stage = 0;
885 unsigned char sha1[20];
886 merge_fn_t fn = NULL;
887
888 setup_git_directory();
889 git_config(git_default_config);
890
891 newfd = hold_lock_file_for_update(&lock_file, get_index_file());
892 if (newfd < 0)
893 die("unable to create new index file");
894
895 git_config(git_default_config);
896
897 merge = 0;
898 reset = 0;
899 for (i = 1; i < argc; i++) {
900 const char *arg = argv[i];
901
902 /* "-u" means "update", meaning that a merge will update
903 * the working tree.
904 */
905 if (!strcmp(arg, "-u")) {
906 update = 1;
907 continue;
908 }
909
910 if (!strcmp(arg, "-v")) {
911 verbose_update = 1;
912 continue;
913 }
914
915 /* "-i" means "index only", meaning that a merge will
916 * not even look at the working tree.
917 */
918 if (!strcmp(arg, "-i")) {
919 index_only = 1;
920 continue;
921 }
922
923 /* "--prefix=<subdirectory>/" means keep the current index
924 * entries and put the entries from the tree under the
925 * given subdirectory.
926 */
927 if (!strncmp(arg, "--prefix=", 9)) {
928 if (stage || merge || prefix)
929 usage(read_tree_usage);
930 prefix = arg + 9;
931 merge = 1;
932 stage = 1;
933 if (read_cache_unmerged())
934 die("you need to resolve your current index first");
935 continue;
936 }
937
938 /* This differs from "-m" in that we'll silently ignore
939 * unmerged entries and overwrite working tree files that
940 * correspond to them.
941 */
942 if (!strcmp(arg, "--reset")) {
943 if (stage || merge || prefix)
944 usage(read_tree_usage);
945 reset = 1;
946 merge = 1;
947 stage = 1;
948 read_cache_unmerged();
949 continue;
950 }
951
952 if (!strcmp(arg, "--trivial")) {
953 trivial_merges_only = 1;
954 continue;
955 }
956
957 if (!strcmp(arg, "--aggressive")) {
958 aggressive = 1;
959 continue;
960 }
961
962 /* "-m" stands for "merge", meaning we start in stage 1 */
963 if (!strcmp(arg, "-m")) {
964 if (stage || merge || prefix)
965 usage(read_tree_usage);
966 if (read_cache_unmerged())
967 die("you need to resolve your current index first");
968 stage = 1;
969 merge = 1;
970 continue;
971 }
972
973 /* using -u and -i at the same time makes no sense */
974 if (1 < index_only + update)
975 usage(read_tree_usage);
976
977 if (get_sha1(arg, sha1))
978 die("Not a valid object name %s", arg);
979 if (list_tree(sha1) < 0)
980 die("failed to unpack tree object %s", arg);
981 stage++;
982 }
983 if ((update||index_only) && !merge)
984 usage(read_tree_usage);
985
986 if (prefix) {
987 int pfxlen = strlen(prefix);
988 int pos;
989 if (prefix[pfxlen-1] != '/')
990 die("prefix must end with /");
991 if (stage != 2)
992 die("binding merge takes only one tree");
993 pos = cache_name_pos(prefix, pfxlen);
994 if (0 <= pos)
995 die("corrupt index file");
996 pos = -pos-1;
997 if (pos < active_nr &&
998 !strncmp(active_cache[pos]->name, prefix, pfxlen))
999 die("subdirectory '%s' already exists.", prefix);
1000 pos = cache_name_pos(prefix, pfxlen-1);
1001 if (0 <= pos)
1002 die("file '%.*s' already exists.", pfxlen-1, prefix);
1003 }
1004
1005 if (merge) {
1006 if (stage < 2)
1007 die("just how do you expect me to merge %d trees?", stage-1);
1008 switch (stage - 1) {
1009 case 1:
1010 fn = prefix ? bind_merge : oneway_merge;
1011 break;
1012 case 2:
1013 fn = twoway_merge;
1014 break;
1015 case 3:
1016 default:
1017 fn = threeway_merge;
1018 cache_tree_free(&active_cache_tree);
1019 break;
1020 }
1021
1022 if (stage - 1 >= 3)
1023 head_idx = stage - 2;
1024 else
1025 head_idx = 1;
1026 }
1027
1028 unpack_trees(fn);
1029
1030 /*
1031 * When reading only one tree (either the most basic form,
1032 * "-m ent" or "--reset ent" form), we can obtain a fully
1033 * valid cache-tree because the index must match exactly
1034 * what came from the tree.
1035 */
1036 if (trees && trees->item && !prefix && (!merge || (stage == 2))) {
1037 cache_tree_free(&active_cache_tree);
1038 prime_cache_tree();
1039 }
1040
1041 if (write_cache(newfd, active_cache, active_nr) ||
1042 commit_lock_file(&lock_file))
1043 die("unable to write new index file");
1044 return 0;
1045}