1/*
2 * GIT - The information manager from hell
3 *
4 * Copyright (C) Linus Torvalds, 2005
5 */
6#define DBRT_DEBUG 1
7
8#include "cache.h"
9
10#include "object.h"
11#include "tree.h"
12#include "tree-walk.h"
13#include "cache-tree.h"
14#include <sys/time.h>
15#include <signal.h>
16#include "builtin.h"
17
18static int reset = 0;
19static int merge = 0;
20static int update = 0;
21static int index_only = 0;
22static int nontrivial_merge = 0;
23static int trivial_merges_only = 0;
24static int aggressive = 0;
25static int verbose_update = 0;
26static volatile int progress_update = 0;
27static const char *prefix = NULL;
28
29static int head_idx = -1;
30static int merge_size = 0;
31
32static struct object_list *trees = NULL;
33
34static struct cache_entry df_conflict_entry;
35
36struct tree_entry_list {
37 struct tree_entry_list *next;
38 unsigned directory : 1;
39 unsigned executable : 1;
40 unsigned symlink : 1;
41 unsigned int mode;
42 const char *name;
43 const unsigned char *sha1;
44};
45
46static struct tree_entry_list df_conflict_list;
47
48typedef int (*merge_fn_t)(struct cache_entry **src);
49
50static struct tree_entry_list *create_tree_entry_list(struct tree *tree)
51{
52 struct tree_desc desc;
53 struct name_entry one;
54 struct tree_entry_list *ret = NULL;
55 struct tree_entry_list **list_p = &ret;
56
57 desc.buf = tree->buffer;
58 desc.size = tree->size;
59
60 while (tree_entry(&desc, &one)) {
61 struct tree_entry_list *entry;
62
63 entry = xmalloc(sizeof(struct tree_entry_list));
64 entry->name = one.path;
65 entry->sha1 = one.sha1;
66 entry->mode = one.mode;
67 entry->directory = S_ISDIR(one.mode) != 0;
68 entry->executable = (one.mode & S_IXUSR) != 0;
69 entry->symlink = S_ISLNK(one.mode) != 0;
70 entry->next = NULL;
71
72 *list_p = entry;
73 list_p = &entry->next;
74 }
75 return ret;
76}
77
78static int entcmp(const char *name1, int dir1, const char *name2, int dir2)
79{
80 int len1 = strlen(name1);
81 int len2 = strlen(name2);
82 int len = len1 < len2 ? len1 : len2;
83 int ret = memcmp(name1, name2, len);
84 unsigned char c1, c2;
85 if (ret)
86 return ret;
87 c1 = name1[len];
88 c2 = name2[len];
89 if (!c1 && dir1)
90 c1 = '/';
91 if (!c2 && dir2)
92 c2 = '/';
93 ret = (c1 < c2) ? -1 : (c1 > c2) ? 1 : 0;
94 if (c1 && c2 && !ret)
95 ret = len1 - len2;
96 return ret;
97}
98
99static int unpack_trees_rec(struct tree_entry_list **posns, int len,
100 const char *base, merge_fn_t fn, int *indpos)
101{
102 int baselen = strlen(base);
103 int src_size = len + 1;
104 do {
105 int i;
106 const char *first;
107 int firstdir = 0;
108 int pathlen;
109 unsigned ce_size;
110 struct tree_entry_list **subposns;
111 struct cache_entry **src;
112 int any_files = 0;
113 int any_dirs = 0;
114 char *cache_name;
115 int ce_stage;
116
117 /* Find the first name in the input. */
118
119 first = NULL;
120 cache_name = NULL;
121
122 /* Check the cache */
123 if (merge && *indpos < active_nr) {
124 /* This is a bit tricky: */
125 /* If the index has a subdirectory (with
126 * contents) as the first name, it'll get a
127 * filename like "foo/bar". But that's after
128 * "foo", so the entry in trees will get
129 * handled first, at which point we'll go into
130 * "foo", and deal with "bar" from the index,
131 * because the base will be "foo/". The only
132 * way we can actually have "foo/bar" first of
133 * all the things is if the trees don't
134 * contain "foo" at all, in which case we'll
135 * handle "foo/bar" without going into the
136 * directory, but that's fine (and will return
137 * an error anyway, with the added unknown
138 * file case.
139 */
140
141 cache_name = active_cache[*indpos]->name;
142 if (strlen(cache_name) > baselen &&
143 !memcmp(cache_name, base, baselen)) {
144 cache_name += baselen;
145 first = cache_name;
146 } else {
147 cache_name = NULL;
148 }
149 }
150
151#if DBRT_DEBUG > 1
152 if (first)
153 printf("index %s\n", first);
154#endif
155 for (i = 0; i < len; i++) {
156 if (!posns[i] || posns[i] == &df_conflict_list)
157 continue;
158#if DBRT_DEBUG > 1
159 printf("%d %s\n", i + 1, posns[i]->name);
160#endif
161 if (!first || entcmp(first, firstdir,
162 posns[i]->name,
163 posns[i]->directory) > 0) {
164 first = posns[i]->name;
165 firstdir = posns[i]->directory;
166 }
167 }
168 /* No name means we're done */
169 if (!first)
170 return 0;
171
172 pathlen = strlen(first);
173 ce_size = cache_entry_size(baselen + pathlen);
174
175 src = xcalloc(src_size, sizeof(struct cache_entry *));
176
177 subposns = xcalloc(len, sizeof(struct tree_list_entry *));
178
179 if (cache_name && !strcmp(cache_name, first)) {
180 any_files = 1;
181 src[0] = active_cache[*indpos];
182 remove_cache_entry_at(*indpos);
183 }
184
185 for (i = 0; i < len; i++) {
186 struct cache_entry *ce;
187
188 if (!posns[i] ||
189 (posns[i] != &df_conflict_list &&
190 strcmp(first, posns[i]->name))) {
191 continue;
192 }
193
194 if (posns[i] == &df_conflict_list) {
195 src[i + merge] = &df_conflict_entry;
196 continue;
197 }
198
199 if (posns[i]->directory) {
200 struct tree *tree = lookup_tree(posns[i]->sha1);
201 any_dirs = 1;
202 parse_tree(tree);
203 subposns[i] = create_tree_entry_list(tree);
204 posns[i] = posns[i]->next;
205 src[i + merge] = &df_conflict_entry;
206 continue;
207 }
208
209 if (!merge)
210 ce_stage = 0;
211 else if (i + 1 < head_idx)
212 ce_stage = 1;
213 else if (i + 1 > head_idx)
214 ce_stage = 3;
215 else
216 ce_stage = 2;
217
218 ce = xcalloc(1, ce_size);
219 ce->ce_mode = create_ce_mode(posns[i]->mode);
220 ce->ce_flags = create_ce_flags(baselen + pathlen,
221 ce_stage);
222 memcpy(ce->name, base, baselen);
223 memcpy(ce->name + baselen, first, pathlen + 1);
224
225 any_files = 1;
226
227 memcpy(ce->sha1, posns[i]->sha1, 20);
228 src[i + merge] = ce;
229 subposns[i] = &df_conflict_list;
230 posns[i] = posns[i]->next;
231 }
232 if (any_files) {
233 if (merge) {
234 int ret;
235
236#if DBRT_DEBUG > 1
237 printf("%s:\n", first);
238 for (i = 0; i < src_size; i++) {
239 printf(" %d ", i);
240 if (src[i])
241 printf("%s\n", sha1_to_hex(src[i]->sha1));
242 else
243 printf("\n");
244 }
245#endif
246 ret = fn(src);
247
248#if DBRT_DEBUG > 1
249 printf("Added %d entries\n", ret);
250#endif
251 *indpos += ret;
252 } else {
253 for (i = 0; i < src_size; i++) {
254 if (src[i]) {
255 add_cache_entry(src[i], ADD_CACHE_OK_TO_ADD|ADD_CACHE_SKIP_DFCHECK);
256 }
257 }
258 }
259 }
260 if (any_dirs) {
261 char *newbase = xmalloc(baselen + 2 + pathlen);
262 memcpy(newbase, base, baselen);
263 memcpy(newbase + baselen, first, pathlen);
264 newbase[baselen + pathlen] = '/';
265 newbase[baselen + pathlen + 1] = '\0';
266 if (unpack_trees_rec(subposns, len, newbase, fn,
267 indpos))
268 return -1;
269 free(newbase);
270 }
271 free(subposns);
272 free(src);
273 } while (1);
274}
275
276static void reject_merge(struct cache_entry *ce)
277{
278 die("Entry '%s' would be overwritten by merge. Cannot merge.",
279 ce->name);
280}
281
282/* Unlink the last component and attempt to remove leading
283 * directories, in case this unlink is the removal of the
284 * last entry in the directory -- empty directories are removed.
285 */
286static void unlink_entry(char *name)
287{
288 char *cp, *prev;
289
290 if (unlink(name))
291 return;
292 prev = NULL;
293 while (1) {
294 int status;
295 cp = strrchr(name, '/');
296 if (prev)
297 *prev = '/';
298 if (!cp)
299 break;
300
301 *cp = 0;
302 status = rmdir(name);
303 if (status) {
304 *cp = '/';
305 break;
306 }
307 prev = cp;
308 }
309}
310
311static void progress_interval(int signum)
312{
313 progress_update = 1;
314}
315
316static void setup_progress_signal(void)
317{
318 struct sigaction sa;
319 struct itimerval v;
320
321 memset(&sa, 0, sizeof(sa));
322 sa.sa_handler = progress_interval;
323 sigemptyset(&sa.sa_mask);
324 sa.sa_flags = SA_RESTART;
325 sigaction(SIGALRM, &sa, NULL);
326
327 v.it_interval.tv_sec = 1;
328 v.it_interval.tv_usec = 0;
329 v.it_value = v.it_interval;
330 setitimer(ITIMER_REAL, &v, NULL);
331}
332
333static struct checkout state;
334static void check_updates(struct cache_entry **src, int nr)
335{
336 unsigned short mask = htons(CE_UPDATE);
337 unsigned last_percent = 200, cnt = 0, total = 0;
338
339 if (update && verbose_update) {
340 for (total = cnt = 0; cnt < nr; cnt++) {
341 struct cache_entry *ce = src[cnt];
342 if (!ce->ce_mode || ce->ce_flags & mask)
343 total++;
344 }
345
346 /* Don't bother doing this for very small updates */
347 if (total < 250)
348 total = 0;
349
350 if (total) {
351 fprintf(stderr, "Checking files out...\n");
352 setup_progress_signal();
353 progress_update = 1;
354 }
355 cnt = 0;
356 }
357
358 while (nr--) {
359 struct cache_entry *ce = *src++;
360
361 if (total) {
362 if (!ce->ce_mode || ce->ce_flags & mask) {
363 unsigned percent;
364 cnt++;
365 percent = (cnt * 100) / total;
366 if (percent != last_percent ||
367 progress_update) {
368 fprintf(stderr, "%4u%% (%u/%u) done\r",
369 percent, cnt, total);
370 last_percent = percent;
371 progress_update = 0;
372 }
373 }
374 }
375 if (!ce->ce_mode) {
376 if (update)
377 unlink_entry(ce->name);
378 continue;
379 }
380 if (ce->ce_flags & mask) {
381 ce->ce_flags &= ~mask;
382 if (update)
383 checkout_entry(ce, &state, NULL);
384 }
385 }
386 if (total) {
387 signal(SIGALRM, SIG_IGN);
388 fputc('\n', stderr);
389 }
390}
391
392static int unpack_trees(merge_fn_t fn)
393{
394 int indpos = 0;
395 unsigned len = object_list_length(trees);
396 struct tree_entry_list **posns;
397 int i;
398 struct object_list *posn = trees;
399 merge_size = len;
400
401 if (len) {
402 posns = xmalloc(len * sizeof(struct tree_entry_list *));
403 for (i = 0; i < len; i++) {
404 posns[i] = create_tree_entry_list((struct tree *) posn->item);
405 posn = posn->next;
406 }
407 if (unpack_trees_rec(posns, len, prefix ? prefix : "",
408 fn, &indpos))
409 return -1;
410 }
411
412 if (trivial_merges_only && nontrivial_merge)
413 die("Merge requires file-level merging");
414
415 check_updates(active_cache, active_nr);
416 return 0;
417}
418
419static int list_tree(unsigned char *sha1)
420{
421 struct tree *tree = parse_tree_indirect(sha1);
422 if (!tree)
423 return -1;
424 object_list_append(&tree->object, &trees);
425 return 0;
426}
427
428static int same(struct cache_entry *a, struct cache_entry *b)
429{
430 if (!!a != !!b)
431 return 0;
432 if (!a && !b)
433 return 1;
434 return a->ce_mode == b->ce_mode &&
435 !memcmp(a->sha1, b->sha1, 20);
436}
437
438
439/*
440 * When a CE gets turned into an unmerged entry, we
441 * want it to be up-to-date
442 */
443static void verify_uptodate(struct cache_entry *ce)
444{
445 struct stat st;
446
447 if (index_only || reset)
448 return;
449
450 if (!lstat(ce->name, &st)) {
451 unsigned changed = ce_match_stat(ce, &st, 1);
452 if (!changed)
453 return;
454 errno = 0;
455 }
456 if (reset) {
457 ce->ce_flags |= htons(CE_UPDATE);
458 return;
459 }
460 if (errno == ENOENT)
461 return;
462 die("Entry '%s' not uptodate. Cannot merge.", ce->name);
463}
464
465static void invalidate_ce_path(struct cache_entry *ce)
466{
467 if (ce)
468 cache_tree_invalidate_path(active_cache_tree, ce->name);
469}
470
471/*
472 * We do not want to remove or overwrite a working tree file that
473 * is not tracked.
474 */
475static void verify_absent(const char *path, const char *action)
476{
477 struct stat st;
478
479 if (index_only || reset || !update)
480 return;
481 if (!lstat(path, &st))
482 die("Untracked working tree file '%s' "
483 "would be %s by merge.", path, action);
484}
485
486static int merged_entry(struct cache_entry *merge, struct cache_entry *old)
487{
488 merge->ce_flags |= htons(CE_UPDATE);
489 if (old) {
490 /*
491 * See if we can re-use the old CE directly?
492 * That way we get the uptodate stat info.
493 *
494 * This also removes the UPDATE flag on
495 * a match.
496 */
497 if (same(old, merge)) {
498 *merge = *old;
499 } else {
500 verify_uptodate(old);
501 invalidate_ce_path(old);
502 }
503 }
504 else {
505 verify_absent(merge->name, "overwritten");
506 invalidate_ce_path(merge);
507 }
508
509 merge->ce_flags &= ~htons(CE_STAGEMASK);
510 add_cache_entry(merge, ADD_CACHE_OK_TO_ADD|ADD_CACHE_OK_TO_REPLACE);
511 return 1;
512}
513
514static int deleted_entry(struct cache_entry *ce, struct cache_entry *old)
515{
516 if (old)
517 verify_uptodate(old);
518 else
519 verify_absent(ce->name, "removed");
520 ce->ce_mode = 0;
521 add_cache_entry(ce, ADD_CACHE_OK_TO_ADD|ADD_CACHE_OK_TO_REPLACE);
522 invalidate_ce_path(ce);
523 return 1;
524}
525
526static int keep_entry(struct cache_entry *ce)
527{
528 add_cache_entry(ce, ADD_CACHE_OK_TO_ADD);
529 return 1;
530}
531
532#if DBRT_DEBUG
533static void show_stage_entry(FILE *o,
534 const char *label, const struct cache_entry *ce)
535{
536 if (!ce)
537 fprintf(o, "%s (missing)\n", label);
538 else
539 fprintf(o, "%s%06o %s %d\t%s\n",
540 label,
541 ntohl(ce->ce_mode),
542 sha1_to_hex(ce->sha1),
543 ce_stage(ce),
544 ce->name);
545}
546#endif
547
548static int threeway_merge(struct cache_entry **stages)
549{
550 struct cache_entry *index;
551 struct cache_entry *head;
552 struct cache_entry *remote = stages[head_idx + 1];
553 int count;
554 int head_match = 0;
555 int remote_match = 0;
556 const char *path = NULL;
557
558 int df_conflict_head = 0;
559 int df_conflict_remote = 0;
560
561 int any_anc_missing = 0;
562 int no_anc_exists = 1;
563 int i;
564
565 for (i = 1; i < head_idx; i++) {
566 if (!stages[i])
567 any_anc_missing = 1;
568 else {
569 if (!path)
570 path = stages[i]->name;
571 no_anc_exists = 0;
572 }
573 }
574
575 index = stages[0];
576 head = stages[head_idx];
577
578 if (head == &df_conflict_entry) {
579 df_conflict_head = 1;
580 head = NULL;
581 }
582
583 if (remote == &df_conflict_entry) {
584 df_conflict_remote = 1;
585 remote = NULL;
586 }
587
588 if (!path && index)
589 path = index->name;
590 if (!path && head)
591 path = head->name;
592 if (!path && remote)
593 path = remote->name;
594
595 /* First, if there's a #16 situation, note that to prevent #13
596 * and #14.
597 */
598 if (!same(remote, head)) {
599 for (i = 1; i < head_idx; i++) {
600 if (same(stages[i], head)) {
601 head_match = i;
602 }
603 if (same(stages[i], remote)) {
604 remote_match = i;
605 }
606 }
607 }
608
609 /* We start with cases where the index is allowed to match
610 * something other than the head: #14(ALT) and #2ALT, where it
611 * is permitted to match the result instead.
612 */
613 /* #14, #14ALT, #2ALT */
614 if (remote && !df_conflict_head && head_match && !remote_match) {
615 if (index && !same(index, remote) && !same(index, head))
616 reject_merge(index);
617 return merged_entry(remote, index);
618 }
619 /*
620 * If we have an entry in the index cache, then we want to
621 * make sure that it matches head.
622 */
623 if (index && !same(index, head)) {
624 reject_merge(index);
625 }
626
627 if (head) {
628 /* #5ALT, #15 */
629 if (same(head, remote))
630 return merged_entry(head, index);
631 /* #13, #3ALT */
632 if (!df_conflict_remote && remote_match && !head_match)
633 return merged_entry(head, index);
634 }
635
636 /* #1 */
637 if (!head && !remote && any_anc_missing)
638 return 0;
639
640 /* Under the new "aggressive" rule, we resolve mostly trivial
641 * cases that we historically had git-merge-one-file resolve.
642 */
643 if (aggressive) {
644 int head_deleted = !head && !df_conflict_head;
645 int remote_deleted = !remote && !df_conflict_remote;
646 /*
647 * Deleted in both.
648 * Deleted in one and unchanged in the other.
649 */
650 if ((head_deleted && remote_deleted) ||
651 (head_deleted && remote && remote_match) ||
652 (remote_deleted && head && head_match)) {
653 if (index)
654 return deleted_entry(index, index);
655 else if (path)
656 verify_absent(path, "removed");
657 return 0;
658 }
659 /*
660 * Added in both, identically.
661 */
662 if (no_anc_exists && head && remote && same(head, remote))
663 return merged_entry(head, index);
664
665 }
666
667 /* Below are "no merge" cases, which require that the index be
668 * up-to-date to avoid the files getting overwritten with
669 * conflict resolution files.
670 */
671 if (index) {
672 verify_uptodate(index);
673 }
674 else if (path)
675 verify_absent(path, "overwritten");
676
677 nontrivial_merge = 1;
678
679 /* #2, #3, #4, #6, #7, #9, #11. */
680 count = 0;
681 if (!head_match || !remote_match) {
682 for (i = 1; i < head_idx; i++) {
683 if (stages[i]) {
684 keep_entry(stages[i]);
685 count++;
686 break;
687 }
688 }
689 }
690#if DBRT_DEBUG
691 else {
692 fprintf(stderr, "read-tree: warning #16 detected\n");
693 show_stage_entry(stderr, "head ", stages[head_match]);
694 show_stage_entry(stderr, "remote ", stages[remote_match]);
695 }
696#endif
697 if (head) { count += keep_entry(head); }
698 if (remote) { count += keep_entry(remote); }
699 return count;
700}
701
702/*
703 * Two-way merge.
704 *
705 * The rule is to "carry forward" what is in the index without losing
706 * information across a "fast forward", favoring a successful merge
707 * over a merge failure when it makes sense. For details of the
708 * "carry forward" rule, please see <Documentation/git-read-tree.txt>.
709 *
710 */
711static int twoway_merge(struct cache_entry **src)
712{
713 struct cache_entry *current = src[0];
714 struct cache_entry *oldtree = src[1], *newtree = src[2];
715
716 if (merge_size != 2)
717 return error("Cannot do a twoway merge of %d trees",
718 merge_size);
719
720 if (current) {
721 if ((!oldtree && !newtree) || /* 4 and 5 */
722 (!oldtree && newtree &&
723 same(current, newtree)) || /* 6 and 7 */
724 (oldtree && newtree &&
725 same(oldtree, newtree)) || /* 14 and 15 */
726 (oldtree && newtree &&
727 !same(oldtree, newtree) && /* 18 and 19*/
728 same(current, newtree))) {
729 return keep_entry(current);
730 }
731 else if (oldtree && !newtree && same(current, oldtree)) {
732 /* 10 or 11 */
733 return deleted_entry(oldtree, current);
734 }
735 else if (oldtree && newtree &&
736 same(current, oldtree) && !same(current, newtree)) {
737 /* 20 or 21 */
738 return merged_entry(newtree, current);
739 }
740 else {
741 /* all other failures */
742 if (oldtree)
743 reject_merge(oldtree);
744 if (current)
745 reject_merge(current);
746 if (newtree)
747 reject_merge(newtree);
748 return -1;
749 }
750 }
751 else if (newtree)
752 return merged_entry(newtree, current);
753 else
754 return deleted_entry(oldtree, current);
755}
756
757/*
758 * Bind merge.
759 *
760 * Keep the index entries at stage0, collapse stage1 but make sure
761 * stage0 does not have anything there.
762 */
763static int bind_merge(struct cache_entry **src)
764{
765 struct cache_entry *old = src[0];
766 struct cache_entry *a = src[1];
767
768 if (merge_size != 1)
769 return error("Cannot do a bind merge of %d trees\n",
770 merge_size);
771 if (a && old)
772 die("Entry '%s' overlaps. Cannot bind.", a->name);
773 if (!a)
774 return keep_entry(old);
775 else
776 return merged_entry(a, NULL);
777}
778
779/*
780 * One-way merge.
781 *
782 * The rule is:
783 * - take the stat information from stage0, take the data from stage1
784 */
785static int oneway_merge(struct cache_entry **src)
786{
787 struct cache_entry *old = src[0];
788 struct cache_entry *a = src[1];
789
790 if (merge_size != 1)
791 return error("Cannot do a oneway merge of %d trees",
792 merge_size);
793
794 if (!a)
795 return deleted_entry(old, old);
796 if (old && same(old, a)) {
797 if (reset) {
798 struct stat st;
799 if (lstat(old->name, &st) ||
800 ce_match_stat(old, &st, 1))
801 old->ce_flags |= htons(CE_UPDATE);
802 }
803 return keep_entry(old);
804 }
805 return merged_entry(a, old);
806}
807
808static int read_cache_unmerged(void)
809{
810 int i;
811 struct cache_entry **dst;
812 struct cache_entry *last = NULL;
813
814 read_cache();
815 dst = active_cache;
816 for (i = 0; i < active_nr; i++) {
817 struct cache_entry *ce = active_cache[i];
818 if (ce_stage(ce)) {
819 if (last && !strcmp(ce->name, last->name))
820 continue;
821 invalidate_ce_path(ce);
822 last = ce;
823 ce->ce_mode = 0;
824 ce->ce_flags &= ~htons(CE_STAGEMASK);
825 }
826 *dst++ = ce;
827 }
828 active_nr = dst - active_cache;
829 return !!last;
830}
831
832static void prime_cache_tree_rec(struct cache_tree *it, struct tree *tree)
833{
834 struct tree_desc desc;
835 struct name_entry entry;
836 int cnt;
837
838 memcpy(it->sha1, tree->object.sha1, 20);
839 desc.buf = tree->buffer;
840 desc.size = tree->size;
841 cnt = 0;
842 while (tree_entry(&desc, &entry)) {
843 if (!S_ISDIR(entry.mode))
844 cnt++;
845 else {
846 struct cache_tree_sub *sub;
847 struct tree *subtree = lookup_tree(entry.sha1);
848 if (!subtree->object.parsed)
849 parse_tree(subtree);
850 sub = cache_tree_sub(it, entry.path);
851 sub->cache_tree = cache_tree();
852 prime_cache_tree_rec(sub->cache_tree, subtree);
853 cnt += sub->cache_tree->entry_count;
854 }
855 }
856 it->entry_count = cnt;
857}
858
859static void prime_cache_tree(void)
860{
861 struct tree *tree = (struct tree *)trees->item;
862 if (!tree)
863 return;
864 active_cache_tree = cache_tree();
865 prime_cache_tree_rec(active_cache_tree, tree);
866
867}
868
869static const char read_tree_usage[] = "git-read-tree (<sha> | [[-m [--aggressive] | --reset | --prefix=<prefix>] [-u | -i]] <sha1> [<sha2> [<sha3>]])";
870
871static struct lock_file lock_file;
872
873int cmd_read_tree(int argc, const char **argv, const char *prefix)
874{
875 int i, newfd, stage = 0;
876 unsigned char sha1[20];
877 merge_fn_t fn = NULL;
878
879 df_conflict_list.next = &df_conflict_list;
880 state.base_dir = "";
881 state.force = 1;
882 state.quiet = 1;
883 state.refresh_cache = 1;
884
885 git_config(git_default_config);
886
887 newfd = hold_lock_file_for_update(&lock_file, get_index_file());
888 if (newfd < 0)
889 die("unable to create new index file");
890
891 git_config(git_default_config);
892
893 merge = 0;
894 reset = 0;
895 for (i = 1; i < argc; i++) {
896 const char *arg = argv[i];
897
898 /* "-u" means "update", meaning that a merge will update
899 * the working tree.
900 */
901 if (!strcmp(arg, "-u")) {
902 update = 1;
903 continue;
904 }
905
906 if (!strcmp(arg, "-v")) {
907 verbose_update = 1;
908 continue;
909 }
910
911 /* "-i" means "index only", meaning that a merge will
912 * not even look at the working tree.
913 */
914 if (!strcmp(arg, "-i")) {
915 index_only = 1;
916 continue;
917 }
918
919 /* "--prefix=<subdirectory>/" means keep the current index
920 * entries and put the entries from the tree under the
921 * given subdirectory.
922 */
923 if (!strncmp(arg, "--prefix=", 9)) {
924 if (stage || merge || prefix)
925 usage(read_tree_usage);
926 prefix = arg + 9;
927 merge = 1;
928 stage = 1;
929 if (read_cache_unmerged())
930 die("you need to resolve your current index first");
931 continue;
932 }
933
934 /* This differs from "-m" in that we'll silently ignore
935 * unmerged entries and overwrite working tree files that
936 * correspond to them.
937 */
938 if (!strcmp(arg, "--reset")) {
939 if (stage || merge || prefix)
940 usage(read_tree_usage);
941 reset = 1;
942 merge = 1;
943 stage = 1;
944 read_cache_unmerged();
945 continue;
946 }
947
948 if (!strcmp(arg, "--trivial")) {
949 trivial_merges_only = 1;
950 continue;
951 }
952
953 if (!strcmp(arg, "--aggressive")) {
954 aggressive = 1;
955 continue;
956 }
957
958 /* "-m" stands for "merge", meaning we start in stage 1 */
959 if (!strcmp(arg, "-m")) {
960 if (stage || merge || prefix)
961 usage(read_tree_usage);
962 if (read_cache_unmerged())
963 die("you need to resolve your current index first");
964 stage = 1;
965 merge = 1;
966 continue;
967 }
968
969 /* using -u and -i at the same time makes no sense */
970 if (1 < index_only + update)
971 usage(read_tree_usage);
972
973 if (get_sha1(arg, sha1))
974 die("Not a valid object name %s", arg);
975 if (list_tree(sha1) < 0)
976 die("failed to unpack tree object %s", arg);
977 stage++;
978 }
979 if ((update||index_only) && !merge)
980 usage(read_tree_usage);
981
982 if (prefix) {
983 int pfxlen = strlen(prefix);
984 int pos;
985 if (prefix[pfxlen-1] != '/')
986 die("prefix must end with /");
987 if (stage != 2)
988 die("binding merge takes only one tree");
989 pos = cache_name_pos(prefix, pfxlen);
990 if (0 <= pos)
991 die("corrupt index file");
992 pos = -pos-1;
993 if (pos < active_nr &&
994 !strncmp(active_cache[pos]->name, prefix, pfxlen))
995 die("subdirectory '%s' already exists.", prefix);
996 pos = cache_name_pos(prefix, pfxlen-1);
997 if (0 <= pos)
998 die("file '%.*s' already exists.", pfxlen-1, prefix);
999 }
1000
1001 if (merge) {
1002 if (stage < 2)
1003 die("just how do you expect me to merge %d trees?", stage-1);
1004 switch (stage - 1) {
1005 case 1:
1006 fn = prefix ? bind_merge : oneway_merge;
1007 break;
1008 case 2:
1009 fn = twoway_merge;
1010 break;
1011 case 3:
1012 default:
1013 fn = threeway_merge;
1014 cache_tree_free(&active_cache_tree);
1015 break;
1016 }
1017
1018 if (stage - 1 >= 3)
1019 head_idx = stage - 2;
1020 else
1021 head_idx = 1;
1022 }
1023
1024 unpack_trees(fn);
1025
1026 /*
1027 * When reading only one tree (either the most basic form,
1028 * "-m ent" or "--reset ent" form), we can obtain a fully
1029 * valid cache-tree because the index must match exactly
1030 * what came from the tree.
1031 */
1032 if (trees && trees->item && !prefix && (!merge || (stage == 2))) {
1033 cache_tree_free(&active_cache_tree);
1034 prime_cache_tree();
1035 }
1036
1037 if (write_cache(newfd, active_cache, active_nr) ||
1038 close(newfd) || commit_lock_file(&lock_file))
1039 die("unable to write new index file");
1040 return 0;
1041}