1/*
2 * GIT - The information manager from hell
3 *
4 * Copyright (C) Linus Torvalds, 2005
5 */
6
7#include "cache.h"
8#include "object.h"
9#include "tree.h"
10#include "tree-walk.h"
11#include "cache-tree.h"
12#include "unpack-trees.h"
13#include "dir.h"
14#include "builtin.h"
15
16#define MAX_TREES 8
17static int nr_trees;
18static struct tree *trees[MAX_TREES];
19
20static int list_tree(unsigned char *sha1)
21{
22 struct tree *tree;
23
24 if (nr_trees >= MAX_TREES)
25 die("I cannot read more than %d trees", MAX_TREES);
26 tree = parse_tree_indirect(sha1);
27 if (!tree)
28 return -1;
29 trees[nr_trees++] = tree;
30 return 0;
31}
32
33static int read_cache_unmerged(void)
34{
35 int i;
36 struct cache_entry **dst;
37 struct cache_entry *last = NULL;
38
39 read_cache();
40 dst = active_cache;
41 for (i = 0; i < active_nr; i++) {
42 struct cache_entry *ce = active_cache[i];
43 if (ce_stage(ce)) {
44 if (last && !strcmp(ce->name, last->name))
45 continue;
46 cache_tree_invalidate_path(active_cache_tree, ce->name);
47 last = ce;
48 ce->ce_mode = 0;
49 ce->ce_flags &= ~htons(CE_STAGEMASK);
50 }
51 *dst++ = ce;
52 }
53 active_nr = dst - active_cache;
54 return !!last;
55}
56
57static void prime_cache_tree_rec(struct cache_tree *it, struct tree *tree)
58{
59 struct tree_desc desc;
60 struct name_entry entry;
61 int cnt;
62
63 hashcpy(it->sha1, tree->object.sha1);
64 init_tree_desc(&desc, tree->buffer, tree->size);
65 cnt = 0;
66 while (tree_entry(&desc, &entry)) {
67 if (!S_ISDIR(entry.mode))
68 cnt++;
69 else {
70 struct cache_tree_sub *sub;
71 struct tree *subtree = lookup_tree(entry.sha1);
72 if (!subtree->object.parsed)
73 parse_tree(subtree);
74 sub = cache_tree_sub(it, entry.path);
75 sub->cache_tree = cache_tree();
76 prime_cache_tree_rec(sub->cache_tree, subtree);
77 cnt += sub->cache_tree->entry_count;
78 }
79 }
80 it->entry_count = cnt;
81}
82
83static void prime_cache_tree(void)
84{
85 if (!nr_trees)
86 return;
87 active_cache_tree = cache_tree();
88 prime_cache_tree_rec(active_cache_tree, trees[0]);
89
90}
91
92static const char read_tree_usage[] = "git-read-tree (<sha> | [[-m [--trivial] [--aggressive] | --reset | --prefix=<prefix>] [-u | -i]] [--exclude-per-directory=<gitignore>] [--index-output=<file>] <sha1> [<sha2> [<sha3>]])";
93
94static struct lock_file lock_file;
95
96int cmd_read_tree(int argc, const char **argv, const char *unused_prefix)
97{
98 int i, newfd, stage = 0;
99 unsigned char sha1[20];
100 struct tree_desc t[MAX_TREES];
101 struct unpack_trees_options opts;
102
103 memset(&opts, 0, sizeof(opts));
104 opts.head_idx = -1;
105
106 git_config(git_default_config);
107
108 newfd = hold_locked_index(&lock_file, 1);
109
110 git_config(git_default_config);
111
112 for (i = 1; i < argc; i++) {
113 const char *arg = argv[i];
114
115 /* "-u" means "update", meaning that a merge will update
116 * the working tree.
117 */
118 if (!strcmp(arg, "-u")) {
119 opts.update = 1;
120 continue;
121 }
122
123 if (!strcmp(arg, "-v")) {
124 opts.verbose_update = 1;
125 continue;
126 }
127
128 /* "-i" means "index only", meaning that a merge will
129 * not even look at the working tree.
130 */
131 if (!strcmp(arg, "-i")) {
132 opts.index_only = 1;
133 continue;
134 }
135
136 if (!prefixcmp(arg, "--index-output=")) {
137 set_alternate_index_output(arg + 15);
138 continue;
139 }
140
141 /* "--prefix=<subdirectory>/" means keep the current index
142 * entries and put the entries from the tree under the
143 * given subdirectory.
144 */
145 if (!prefixcmp(arg, "--prefix=")) {
146 if (stage || opts.merge || opts.prefix)
147 usage(read_tree_usage);
148 opts.prefix = arg + 9;
149 opts.merge = 1;
150 stage = 1;
151 if (read_cache_unmerged())
152 die("you need to resolve your current index first");
153 continue;
154 }
155
156 /* This differs from "-m" in that we'll silently ignore
157 * unmerged entries and overwrite working tree files that
158 * correspond to them.
159 */
160 if (!strcmp(arg, "--reset")) {
161 if (stage || opts.merge || opts.prefix)
162 usage(read_tree_usage);
163 opts.reset = 1;
164 opts.merge = 1;
165 stage = 1;
166 read_cache_unmerged();
167 continue;
168 }
169
170 if (!strcmp(arg, "--trivial")) {
171 opts.trivial_merges_only = 1;
172 continue;
173 }
174
175 if (!strcmp(arg, "--aggressive")) {
176 opts.aggressive = 1;
177 continue;
178 }
179
180 /* "-m" stands for "merge", meaning we start in stage 1 */
181 if (!strcmp(arg, "-m")) {
182 if (stage || opts.merge || opts.prefix)
183 usage(read_tree_usage);
184 if (read_cache_unmerged())
185 die("you need to resolve your current index first");
186 stage = 1;
187 opts.merge = 1;
188 continue;
189 }
190
191 if (!prefixcmp(arg, "--exclude-per-directory=")) {
192 struct dir_struct *dir;
193
194 if (opts.dir)
195 die("more than one --exclude-per-directory are given.");
196
197 dir = xcalloc(1, sizeof(*opts.dir));
198 dir->show_ignored = 1;
199 dir->exclude_per_dir = arg + 24;
200 opts.dir = dir;
201 /* We do not need to nor want to do read-directory
202 * here; we are merely interested in reusing the
203 * per directory ignore stack mechanism.
204 */
205 continue;
206 }
207
208 /* using -u and -i at the same time makes no sense */
209 if (1 < opts.index_only + opts.update)
210 usage(read_tree_usage);
211
212 if (get_sha1(arg, sha1))
213 die("Not a valid object name %s", arg);
214 if (list_tree(sha1) < 0)
215 die("failed to unpack tree object %s", arg);
216 stage++;
217 }
218 if ((opts.update||opts.index_only) && !opts.merge)
219 usage(read_tree_usage);
220 if ((opts.dir && !opts.update))
221 die("--exclude-per-directory is meaningless unless -u");
222
223 if (opts.prefix) {
224 int pfxlen = strlen(opts.prefix);
225 int pos;
226 if (opts.prefix[pfxlen-1] != '/')
227 die("prefix must end with /");
228 if (stage != 2)
229 die("binding merge takes only one tree");
230 pos = cache_name_pos(opts.prefix, pfxlen);
231 if (0 <= pos)
232 die("corrupt index file");
233 pos = -pos-1;
234 if (pos < active_nr &&
235 !strncmp(active_cache[pos]->name, opts.prefix, pfxlen))
236 die("subdirectory '%s' already exists.", opts.prefix);
237 pos = cache_name_pos(opts.prefix, pfxlen-1);
238 if (0 <= pos)
239 die("file '%.*s' already exists.",
240 pfxlen-1, opts.prefix);
241 opts.pos = -1 - pos;
242 }
243
244 if (opts.merge) {
245 if (stage < 2)
246 die("just how do you expect me to merge %d trees?", stage-1);
247 switch (stage - 1) {
248 case 1:
249 opts.fn = opts.prefix ? bind_merge : oneway_merge;
250 break;
251 case 2:
252 opts.fn = twoway_merge;
253 break;
254 case 3:
255 default:
256 opts.fn = threeway_merge;
257 cache_tree_free(&active_cache_tree);
258 break;
259 }
260
261 if (stage - 1 >= 3)
262 opts.head_idx = stage - 2;
263 else
264 opts.head_idx = 1;
265 }
266
267 for (i = 0; i < nr_trees; i++) {
268 struct tree *tree = trees[i];
269 parse_tree(tree);
270 init_tree_desc(t+i, tree->buffer, tree->size);
271 }
272 unpack_trees(nr_trees, t, &opts);
273
274 /*
275 * When reading only one tree (either the most basic form,
276 * "-m ent" or "--reset ent" form), we can obtain a fully
277 * valid cache-tree because the index must match exactly
278 * what came from the tree.
279 */
280 if (nr_trees && !opts.prefix && (!opts.merge || (stage == 2))) {
281 cache_tree_free(&active_cache_tree);
282 prime_cache_tree();
283 }
284
285 if (write_cache(newfd, active_cache, active_nr) ||
286 close(newfd) || commit_locked_index(&lock_file))
287 die("unable to write new index file");
288 return 0;
289}