1/*
2 * GIT - The information manager from hell
3 *
4 * Copyright (C) Linus Torvalds, 2005
5 */
6#define DBRT_DEBUG 1
7
8#include "cache.h"
9
10#include "object.h"
11#include "tree.h"
12#include "cache-tree.h"
13#include <sys/time.h>
14#include <signal.h>
15
16static int merge = 0;
17static int update = 0;
18static int index_only = 0;
19static int nontrivial_merge = 0;
20static int trivial_merges_only = 0;
21static int aggressive = 0;
22static int verbose_update = 0;
23static volatile int progress_update = 0;
24static const char *prefix = NULL;
25
26static int head_idx = -1;
27static int merge_size = 0;
28
29static struct object_list *trees = NULL;
30
31static struct cache_entry df_conflict_entry = {
32};
33
34static struct tree_entry_list df_conflict_list = {
35 .name = NULL,
36 .next = &df_conflict_list
37};
38
39typedef int (*merge_fn_t)(struct cache_entry **src);
40
41static int entcmp(char *name1, int dir1, char *name2, int dir2)
42{
43 int len1 = strlen(name1);
44 int len2 = strlen(name2);
45 int len = len1 < len2 ? len1 : len2;
46 int ret = memcmp(name1, name2, len);
47 unsigned char c1, c2;
48 if (ret)
49 return ret;
50 c1 = name1[len];
51 c2 = name2[len];
52 if (!c1 && dir1)
53 c1 = '/';
54 if (!c2 && dir2)
55 c2 = '/';
56 ret = (c1 < c2) ? -1 : (c1 > c2) ? 1 : 0;
57 if (c1 && c2 && !ret)
58 ret = len1 - len2;
59 return ret;
60}
61
62static int unpack_trees_rec(struct tree_entry_list **posns, int len,
63 const char *base, merge_fn_t fn, int *indpos)
64{
65 int baselen = strlen(base);
66 int src_size = len + 1;
67 do {
68 int i;
69 char *first;
70 int firstdir = 0;
71 int pathlen;
72 unsigned ce_size;
73 struct tree_entry_list **subposns;
74 struct cache_entry **src;
75 int any_files = 0;
76 int any_dirs = 0;
77 char *cache_name;
78 int ce_stage;
79
80 /* Find the first name in the input. */
81
82 first = NULL;
83 cache_name = NULL;
84
85 /* Check the cache */
86 if (merge && *indpos < active_nr) {
87 /* This is a bit tricky: */
88 /* If the index has a subdirectory (with
89 * contents) as the first name, it'll get a
90 * filename like "foo/bar". But that's after
91 * "foo", so the entry in trees will get
92 * handled first, at which point we'll go into
93 * "foo", and deal with "bar" from the index,
94 * because the base will be "foo/". The only
95 * way we can actually have "foo/bar" first of
96 * all the things is if the trees don't
97 * contain "foo" at all, in which case we'll
98 * handle "foo/bar" without going into the
99 * directory, but that's fine (and will return
100 * an error anyway, with the added unknown
101 * file case.
102 */
103
104 cache_name = active_cache[*indpos]->name;
105 if (strlen(cache_name) > baselen &&
106 !memcmp(cache_name, base, baselen)) {
107 cache_name += baselen;
108 first = cache_name;
109 } else {
110 cache_name = NULL;
111 }
112 }
113
114#if DBRT_DEBUG > 1
115 if (first)
116 printf("index %s\n", first);
117#endif
118 for (i = 0; i < len; i++) {
119 if (!posns[i] || posns[i] == &df_conflict_list)
120 continue;
121#if DBRT_DEBUG > 1
122 printf("%d %s\n", i + 1, posns[i]->name);
123#endif
124 if (!first || entcmp(first, firstdir,
125 posns[i]->name,
126 posns[i]->directory) > 0) {
127 first = posns[i]->name;
128 firstdir = posns[i]->directory;
129 }
130 }
131 /* No name means we're done */
132 if (!first)
133 return 0;
134
135 pathlen = strlen(first);
136 ce_size = cache_entry_size(baselen + pathlen);
137
138 src = xcalloc(src_size, sizeof(struct cache_entry *));
139
140 subposns = xcalloc(len, sizeof(struct tree_list_entry *));
141
142 if (cache_name && !strcmp(cache_name, first)) {
143 any_files = 1;
144 src[0] = active_cache[*indpos];
145 remove_cache_entry_at(*indpos);
146 }
147
148 for (i = 0; i < len; i++) {
149 struct cache_entry *ce;
150
151 if (!posns[i] ||
152 (posns[i] != &df_conflict_list &&
153 strcmp(first, posns[i]->name))) {
154 continue;
155 }
156
157 if (posns[i] == &df_conflict_list) {
158 src[i + merge] = &df_conflict_entry;
159 continue;
160 }
161
162 if (posns[i]->directory) {
163 any_dirs = 1;
164 parse_tree(posns[i]->item.tree);
165 subposns[i] = posns[i]->item.tree->entries;
166 posns[i] = posns[i]->next;
167 src[i + merge] = &df_conflict_entry;
168 continue;
169 }
170
171 if (!merge)
172 ce_stage = 0;
173 else if (i + 1 < head_idx)
174 ce_stage = 1;
175 else if (i + 1 > head_idx)
176 ce_stage = 3;
177 else
178 ce_stage = 2;
179
180 ce = xcalloc(1, ce_size);
181 ce->ce_mode = create_ce_mode(posns[i]->mode);
182 ce->ce_flags = create_ce_flags(baselen + pathlen,
183 ce_stage);
184 memcpy(ce->name, base, baselen);
185 memcpy(ce->name + baselen, first, pathlen + 1);
186
187 any_files = 1;
188
189 memcpy(ce->sha1, posns[i]->item.any->sha1, 20);
190 src[i + merge] = ce;
191 subposns[i] = &df_conflict_list;
192 posns[i] = posns[i]->next;
193 }
194 if (any_files) {
195 if (merge) {
196 int ret;
197
198#if DBRT_DEBUG > 1
199 printf("%s:\n", first);
200 for (i = 0; i < src_size; i++) {
201 printf(" %d ", i);
202 if (src[i])
203 printf("%s\n", sha1_to_hex(src[i]->sha1));
204 else
205 printf("\n");
206 }
207#endif
208 ret = fn(src);
209
210#if DBRT_DEBUG > 1
211 printf("Added %d entries\n", ret);
212#endif
213 *indpos += ret;
214 } else {
215 for (i = 0; i < src_size; i++) {
216 if (src[i]) {
217 add_cache_entry(src[i], ADD_CACHE_OK_TO_ADD|ADD_CACHE_SKIP_DFCHECK);
218 }
219 }
220 }
221 }
222 if (any_dirs) {
223 char *newbase = xmalloc(baselen + 2 + pathlen);
224 memcpy(newbase, base, baselen);
225 memcpy(newbase + baselen, first, pathlen);
226 newbase[baselen + pathlen] = '/';
227 newbase[baselen + pathlen + 1] = '\0';
228 if (unpack_trees_rec(subposns, len, newbase, fn,
229 indpos))
230 return -1;
231 free(newbase);
232 }
233 free(subposns);
234 free(src);
235 } while (1);
236}
237
238static void reject_merge(struct cache_entry *ce)
239{
240 die("Entry '%s' would be overwritten by merge. Cannot merge.",
241 ce->name);
242}
243
244/* Unlink the last component and attempt to remove leading
245 * directories, in case this unlink is the removal of the
246 * last entry in the directory -- empty directories are removed.
247 */
248static void unlink_entry(char *name)
249{
250 char *cp, *prev;
251
252 if (unlink(name))
253 return;
254 prev = NULL;
255 while (1) {
256 int status;
257 cp = strrchr(name, '/');
258 if (prev)
259 *prev = '/';
260 if (!cp)
261 break;
262
263 *cp = 0;
264 status = rmdir(name);
265 if (status) {
266 *cp = '/';
267 break;
268 }
269 prev = cp;
270 }
271}
272
273static void progress_interval(int signum)
274{
275 progress_update = 1;
276}
277
278static void setup_progress_signal(void)
279{
280 struct sigaction sa;
281 struct itimerval v;
282
283 memset(&sa, 0, sizeof(sa));
284 sa.sa_handler = progress_interval;
285 sigemptyset(&sa.sa_mask);
286 sa.sa_flags = SA_RESTART;
287 sigaction(SIGALRM, &sa, NULL);
288
289 v.it_interval.tv_sec = 1;
290 v.it_interval.tv_usec = 0;
291 v.it_value = v.it_interval;
292 setitimer(ITIMER_REAL, &v, NULL);
293}
294
295static void check_updates(struct cache_entry **src, int nr)
296{
297 static struct checkout state = {
298 .base_dir = "",
299 .force = 1,
300 .quiet = 1,
301 .refresh_cache = 1,
302 };
303 unsigned short mask = htons(CE_UPDATE);
304 unsigned last_percent = 200, cnt = 0, total = 0;
305
306 if (update && verbose_update) {
307 for (total = cnt = 0; cnt < nr; cnt++) {
308 struct cache_entry *ce = src[cnt];
309 if (!ce->ce_mode || ce->ce_flags & mask)
310 total++;
311 }
312
313 /* Don't bother doing this for very small updates */
314 if (total < 250)
315 total = 0;
316
317 if (total) {
318 fprintf(stderr, "Checking files out...\n");
319 setup_progress_signal();
320 progress_update = 1;
321 }
322 cnt = 0;
323 }
324
325 while (nr--) {
326 struct cache_entry *ce = *src++;
327
328 if (total) {
329 if (!ce->ce_mode || ce->ce_flags & mask) {
330 unsigned percent;
331 cnt++;
332 percent = (cnt * 100) / total;
333 if (percent != last_percent ||
334 progress_update) {
335 fprintf(stderr, "%4u%% (%u/%u) done\r",
336 percent, cnt, total);
337 last_percent = percent;
338 }
339 }
340 }
341 if (!ce->ce_mode) {
342 if (update)
343 unlink_entry(ce->name);
344 continue;
345 }
346 if (ce->ce_flags & mask) {
347 ce->ce_flags &= ~mask;
348 if (update)
349 checkout_entry(ce, &state, NULL);
350 }
351 }
352 if (total) {
353 signal(SIGALRM, SIG_IGN);
354 fputc('\n', stderr);
355 }
356}
357
358static int unpack_trees(merge_fn_t fn)
359{
360 int indpos = 0;
361 unsigned len = object_list_length(trees);
362 struct tree_entry_list **posns;
363 int i;
364 struct object_list *posn = trees;
365 merge_size = len;
366
367 if (len) {
368 posns = xmalloc(len * sizeof(struct tree_entry_list *));
369 for (i = 0; i < len; i++) {
370 posns[i] = ((struct tree *) posn->item)->entries;
371 posn = posn->next;
372 }
373 if (unpack_trees_rec(posns, len, prefix ? prefix : "",
374 fn, &indpos))
375 return -1;
376 }
377
378 if (trivial_merges_only && nontrivial_merge)
379 die("Merge requires file-level merging");
380
381 check_updates(active_cache, active_nr);
382 return 0;
383}
384
385static int list_tree(unsigned char *sha1)
386{
387 struct tree *tree = parse_tree_indirect(sha1);
388 if (!tree)
389 return -1;
390 object_list_append(&tree->object, &trees);
391 return 0;
392}
393
394static int same(struct cache_entry *a, struct cache_entry *b)
395{
396 if (!!a != !!b)
397 return 0;
398 if (!a && !b)
399 return 1;
400 return a->ce_mode == b->ce_mode &&
401 !memcmp(a->sha1, b->sha1, 20);
402}
403
404
405/*
406 * When a CE gets turned into an unmerged entry, we
407 * want it to be up-to-date
408 */
409static void verify_uptodate(struct cache_entry *ce)
410{
411 struct stat st;
412
413 if (index_only)
414 return;
415
416 if (!lstat(ce->name, &st)) {
417 unsigned changed = ce_match_stat(ce, &st, 1);
418 if (!changed)
419 return;
420 errno = 0;
421 }
422 if (errno == ENOENT)
423 return;
424 die("Entry '%s' not uptodate. Cannot merge.", ce->name);
425}
426
427static void invalidate_ce_path(struct cache_entry *ce)
428{
429 if (ce)
430 cache_tree_invalidate_path(active_cache_tree, ce->name);
431}
432
433static int merged_entry(struct cache_entry *merge, struct cache_entry *old)
434{
435 merge->ce_flags |= htons(CE_UPDATE);
436 if (old) {
437 /*
438 * See if we can re-use the old CE directly?
439 * That way we get the uptodate stat info.
440 *
441 * This also removes the UPDATE flag on
442 * a match.
443 */
444 if (same(old, merge)) {
445 *merge = *old;
446 } else {
447 verify_uptodate(old);
448 invalidate_ce_path(old);
449 }
450 }
451 else
452 invalidate_ce_path(merge);
453 merge->ce_flags &= ~htons(CE_STAGEMASK);
454 add_cache_entry(merge, ADD_CACHE_OK_TO_ADD);
455 return 1;
456}
457
458static int deleted_entry(struct cache_entry *ce, struct cache_entry *old)
459{
460 if (old)
461 verify_uptodate(old);
462 ce->ce_mode = 0;
463 add_cache_entry(ce, ADD_CACHE_OK_TO_ADD);
464 invalidate_ce_path(ce);
465 return 1;
466}
467
468static int keep_entry(struct cache_entry *ce)
469{
470 add_cache_entry(ce, ADD_CACHE_OK_TO_ADD);
471 return 1;
472}
473
474#if DBRT_DEBUG
475static void show_stage_entry(FILE *o,
476 const char *label, const struct cache_entry *ce)
477{
478 if (!ce)
479 fprintf(o, "%s (missing)\n", label);
480 else
481 fprintf(o, "%s%06o %s %d\t%s\n",
482 label,
483 ntohl(ce->ce_mode),
484 sha1_to_hex(ce->sha1),
485 ce_stage(ce),
486 ce->name);
487}
488#endif
489
490static int threeway_merge(struct cache_entry **stages)
491{
492 struct cache_entry *index;
493 struct cache_entry *head;
494 struct cache_entry *remote = stages[head_idx + 1];
495 int count;
496 int head_match = 0;
497 int remote_match = 0;
498
499 int df_conflict_head = 0;
500 int df_conflict_remote = 0;
501
502 int any_anc_missing = 0;
503 int no_anc_exists = 1;
504 int i;
505
506 for (i = 1; i < head_idx; i++) {
507 if (!stages[i])
508 any_anc_missing = 1;
509 else
510 no_anc_exists = 0;
511 }
512
513 index = stages[0];
514 head = stages[head_idx];
515
516 if (head == &df_conflict_entry) {
517 df_conflict_head = 1;
518 head = NULL;
519 }
520
521 if (remote == &df_conflict_entry) {
522 df_conflict_remote = 1;
523 remote = NULL;
524 }
525
526 /* First, if there's a #16 situation, note that to prevent #13
527 * and #14.
528 */
529 if (!same(remote, head)) {
530 for (i = 1; i < head_idx; i++) {
531 if (same(stages[i], head)) {
532 head_match = i;
533 }
534 if (same(stages[i], remote)) {
535 remote_match = i;
536 }
537 }
538 }
539
540 /* We start with cases where the index is allowed to match
541 * something other than the head: #14(ALT) and #2ALT, where it
542 * is permitted to match the result instead.
543 */
544 /* #14, #14ALT, #2ALT */
545 if (remote && !df_conflict_head && head_match && !remote_match) {
546 if (index && !same(index, remote) && !same(index, head))
547 reject_merge(index);
548 return merged_entry(remote, index);
549 }
550 /*
551 * If we have an entry in the index cache, then we want to
552 * make sure that it matches head.
553 */
554 if (index && !same(index, head)) {
555 reject_merge(index);
556 }
557
558 if (head) {
559 /* #5ALT, #15 */
560 if (same(head, remote))
561 return merged_entry(head, index);
562 /* #13, #3ALT */
563 if (!df_conflict_remote && remote_match && !head_match)
564 return merged_entry(head, index);
565 }
566
567 /* #1 */
568 if (!head && !remote && any_anc_missing)
569 return 0;
570
571 /* Under the new "aggressive" rule, we resolve mostly trivial
572 * cases that we historically had git-merge-one-file resolve.
573 */
574 if (aggressive) {
575 int head_deleted = !head && !df_conflict_head;
576 int remote_deleted = !remote && !df_conflict_remote;
577 /*
578 * Deleted in both.
579 * Deleted in one and unchanged in the other.
580 */
581 if ((head_deleted && remote_deleted) ||
582 (head_deleted && remote && remote_match) ||
583 (remote_deleted && head && head_match)) {
584 if (index)
585 return deleted_entry(index, index);
586 return 0;
587 }
588 /*
589 * Added in both, identically.
590 */
591 if (no_anc_exists && head && remote && same(head, remote))
592 return merged_entry(head, index);
593
594 }
595
596 /* Below are "no merge" cases, which require that the index be
597 * up-to-date to avoid the files getting overwritten with
598 * conflict resolution files.
599 */
600 if (index) {
601 verify_uptodate(index);
602 }
603
604 nontrivial_merge = 1;
605
606 /* #2, #3, #4, #6, #7, #9, #11. */
607 count = 0;
608 if (!head_match || !remote_match) {
609 for (i = 1; i < head_idx; i++) {
610 if (stages[i]) {
611 keep_entry(stages[i]);
612 count++;
613 break;
614 }
615 }
616 }
617#if DBRT_DEBUG
618 else {
619 fprintf(stderr, "read-tree: warning #16 detected\n");
620 show_stage_entry(stderr, "head ", stages[head_match]);
621 show_stage_entry(stderr, "remote ", stages[remote_match]);
622 }
623#endif
624 if (head) { count += keep_entry(head); }
625 if (remote) { count += keep_entry(remote); }
626 return count;
627}
628
629/*
630 * Two-way merge.
631 *
632 * The rule is to "carry forward" what is in the index without losing
633 * information across a "fast forward", favoring a successful merge
634 * over a merge failure when it makes sense. For details of the
635 * "carry forward" rule, please see <Documentation/git-read-tree.txt>.
636 *
637 */
638static int twoway_merge(struct cache_entry **src)
639{
640 struct cache_entry *current = src[0];
641 struct cache_entry *oldtree = src[1], *newtree = src[2];
642
643 if (merge_size != 2)
644 return error("Cannot do a twoway merge of %d trees",
645 merge_size);
646
647 if (current) {
648 if ((!oldtree && !newtree) || /* 4 and 5 */
649 (!oldtree && newtree &&
650 same(current, newtree)) || /* 6 and 7 */
651 (oldtree && newtree &&
652 same(oldtree, newtree)) || /* 14 and 15 */
653 (oldtree && newtree &&
654 !same(oldtree, newtree) && /* 18 and 19*/
655 same(current, newtree))) {
656 return keep_entry(current);
657 }
658 else if (oldtree && !newtree && same(current, oldtree)) {
659 /* 10 or 11 */
660 return deleted_entry(oldtree, current);
661 }
662 else if (oldtree && newtree &&
663 same(current, oldtree) && !same(current, newtree)) {
664 /* 20 or 21 */
665 return merged_entry(newtree, current);
666 }
667 else {
668 /* all other failures */
669 if (oldtree)
670 reject_merge(oldtree);
671 if (current)
672 reject_merge(current);
673 if (newtree)
674 reject_merge(newtree);
675 return -1;
676 }
677 }
678 else if (newtree)
679 return merged_entry(newtree, current);
680 else
681 return deleted_entry(oldtree, current);
682}
683
684/*
685 * Bind merge.
686 *
687 * Keep the index entries at stage0, collapse stage1 but make sure
688 * stage0 does not have anything in prefix.
689 */
690static int bind_merge(struct cache_entry **src)
691{
692 struct cache_entry *old = src[0];
693 struct cache_entry *a = src[1];
694
695 if (merge_size != 1)
696 return error("Cannot do a bind merge of %d trees\n",
697 merge_size);
698 if (!a)
699 return merged_entry(old, NULL);
700 if (old)
701 die("Entry '%s' overlaps. Cannot bind.", a->name);
702
703 return merged_entry(a, NULL);
704}
705
706/*
707 * One-way merge.
708 *
709 * The rule is:
710 * - take the stat information from stage0, take the data from stage1
711 */
712static int oneway_merge(struct cache_entry **src)
713{
714 struct cache_entry *old = src[0];
715 struct cache_entry *a = src[1];
716
717 if (merge_size != 1)
718 return error("Cannot do a oneway merge of %d trees",
719 merge_size);
720
721 if (!a) {
722 invalidate_ce_path(old);
723 return 0;
724 }
725 if (old && same(old, a)) {
726 return keep_entry(old);
727 }
728 return merged_entry(a, NULL);
729}
730
731static int read_cache_unmerged(void)
732{
733 int i, deleted;
734 struct cache_entry **dst;
735
736 read_cache();
737 dst = active_cache;
738 deleted = 0;
739 for (i = 0; i < active_nr; i++) {
740 struct cache_entry *ce = active_cache[i];
741 if (ce_stage(ce)) {
742 deleted++;
743 invalidate_ce_path(ce);
744 continue;
745 }
746 if (deleted)
747 *dst = ce;
748 dst++;
749 }
750 active_nr -= deleted;
751 return deleted;
752}
753
754static void prime_cache_tree_rec(struct cache_tree *it, struct tree *tree)
755{
756 struct tree_entry_list *ent;
757 int cnt;
758
759 memcpy(it->sha1, tree->object.sha1, 20);
760 for (cnt = 0, ent = tree->entries; ent; ent = ent->next) {
761 if (!ent->directory)
762 cnt++;
763 else {
764 struct cache_tree_sub *sub;
765 struct tree *subtree = (struct tree *)ent->item.tree;
766 if (!subtree->object.parsed)
767 parse_tree(subtree);
768 sub = cache_tree_sub(it, ent->name);
769 sub->cache_tree = cache_tree();
770 prime_cache_tree_rec(sub->cache_tree, subtree);
771 cnt += sub->cache_tree->entry_count;
772 }
773 }
774 it->entry_count = cnt;
775}
776
777static void prime_cache_tree(void)
778{
779 struct tree *tree = (struct tree *)trees->item;
780 if (!tree)
781 return;
782 active_cache_tree = cache_tree();
783 prime_cache_tree_rec(active_cache_tree, tree);
784
785}
786
787static const char read_tree_usage[] = "git-read-tree (<sha> | [[-m [--aggressive] | --reset | --prefix=<prefix>] [-u | -i]] <sha1> [<sha2> [<sha3>]])";
788
789static struct cache_file cache_file;
790
791int main(int argc, char **argv)
792{
793 int i, newfd, reset, stage = 0;
794 unsigned char sha1[20];
795 merge_fn_t fn = NULL;
796
797 setup_git_directory();
798 git_config(git_default_config);
799
800 newfd = hold_index_file_for_update(&cache_file, get_index_file());
801 if (newfd < 0)
802 die("unable to create new cachefile");
803
804 git_config(git_default_config);
805
806 merge = 0;
807 reset = 0;
808 for (i = 1; i < argc; i++) {
809 const char *arg = argv[i];
810
811 /* "-u" means "update", meaning that a merge will update
812 * the working tree.
813 */
814 if (!strcmp(arg, "-u")) {
815 update = 1;
816 continue;
817 }
818
819 if (!strcmp(arg, "-v")) {
820 verbose_update = 1;
821 continue;
822 }
823
824 /* "-i" means "index only", meaning that a merge will
825 * not even look at the working tree.
826 */
827 if (!strcmp(arg, "-i")) {
828 index_only = 1;
829 continue;
830 }
831
832 /* "--prefix=<subdirectory>/" means keep the current index
833 * entries and put the entries from the tree under the
834 * given subdirectory.
835 */
836 if (!strncmp(arg, "--prefix=", 9)) {
837 if (stage || merge || prefix)
838 usage(read_tree_usage);
839 prefix = arg + 9;
840 merge = 1;
841 stage = 1;
842 if (read_cache_unmerged())
843 die("you need to resolve your current index first");
844 continue;
845 }
846
847 /* This differs from "-m" in that we'll silently ignore unmerged entries */
848 if (!strcmp(arg, "--reset")) {
849 if (stage || merge || prefix)
850 usage(read_tree_usage);
851 reset = 1;
852 merge = 1;
853 stage = 1;
854 read_cache_unmerged();
855 continue;
856 }
857
858 if (!strcmp(arg, "--trivial")) {
859 trivial_merges_only = 1;
860 continue;
861 }
862
863 if (!strcmp(arg, "--aggressive")) {
864 aggressive = 1;
865 continue;
866 }
867
868 /* "-m" stands for "merge", meaning we start in stage 1 */
869 if (!strcmp(arg, "-m")) {
870 if (stage || merge || prefix)
871 usage(read_tree_usage);
872 if (read_cache_unmerged())
873 die("you need to resolve your current index first");
874 stage = 1;
875 merge = 1;
876 continue;
877 }
878
879 /* using -u and -i at the same time makes no sense */
880 if (1 < index_only + update)
881 usage(read_tree_usage);
882
883 if (get_sha1(arg, sha1))
884 die("Not a valid object name %s", arg);
885 if (list_tree(sha1) < 0)
886 die("failed to unpack tree object %s", arg);
887 stage++;
888 }
889 if ((update||index_only) && !merge)
890 usage(read_tree_usage);
891
892 if (prefix) {
893 int pfxlen = strlen(prefix);
894 int pos;
895 if (prefix[pfxlen-1] != '/')
896 die("prefix must end with /");
897 if (stage != 2)
898 die("binding merge takes only one tree");
899 pos = cache_name_pos(prefix, pfxlen);
900 if (0 <= pos)
901 die("corrupt index file");
902 pos = -pos-1;
903 if (pos < active_nr &&
904 !strncmp(active_cache[pos]->name, prefix, pfxlen))
905 die("subdirectory '%s' already exists.", prefix);
906 pos = cache_name_pos(prefix, pfxlen-1);
907 if (0 <= pos)
908 die("file '%.*s' already exists.", pfxlen-1, prefix);
909 }
910
911 if (merge) {
912 if (stage < 2)
913 die("just how do you expect me to merge %d trees?", stage-1);
914 switch (stage - 1) {
915 case 1:
916 fn = prefix ? bind_merge : oneway_merge;
917 break;
918 case 2:
919 fn = twoway_merge;
920 break;
921 case 3:
922 default:
923 fn = threeway_merge;
924 cache_tree_free(&active_cache_tree);
925 break;
926 }
927
928 if (stage - 1 >= 3)
929 head_idx = stage - 2;
930 else
931 head_idx = 1;
932 }
933
934 unpack_trees(fn);
935
936 /*
937 * When reading only one tree (either the most basic form,
938 * "-m ent" or "--reset ent" form), we can obtain a fully
939 * valid cache-tree because the index must match exactly
940 * what came from the tree.
941 */
942 if (trees && trees->item && (!merge || (stage == 2))) {
943 cache_tree_free(&active_cache_tree);
944 prime_cache_tree();
945 }
946
947 if (write_cache(newfd, active_cache, active_nr) ||
948 commit_index_file(&cache_file))
949 die("unable to write new index file");
950 return 0;
951}