builtin-read-tree.con commit unpack-trees: allow Porcelain to give different error messages (8ccba00)
   1/*
   2 * GIT - The information manager from hell
   3 *
   4 * Copyright (C) Linus Torvalds, 2005
   5 */
   6
   7#include "cache.h"
   8#include "object.h"
   9#include "tree.h"
  10#include "tree-walk.h"
  11#include "cache-tree.h"
  12#include "unpack-trees.h"
  13#include "dir.h"
  14#include "builtin.h"
  15
  16static int nr_trees;
  17static struct tree *trees[MAX_UNPACK_TREES];
  18
  19static int list_tree(unsigned char *sha1)
  20{
  21        struct tree *tree;
  22
  23        if (nr_trees >= MAX_UNPACK_TREES)
  24                die("I cannot read more than %d trees", MAX_UNPACK_TREES);
  25        tree = parse_tree_indirect(sha1);
  26        if (!tree)
  27                return -1;
  28        trees[nr_trees++] = tree;
  29        return 0;
  30}
  31
  32static int read_cache_unmerged(void)
  33{
  34        int i;
  35        struct cache_entry **dst;
  36        struct cache_entry *last = NULL;
  37
  38        read_cache();
  39        dst = active_cache;
  40        for (i = 0; i < active_nr; i++) {
  41                struct cache_entry *ce = active_cache[i];
  42                if (ce_stage(ce)) {
  43                        remove_name_hash(ce);
  44                        if (last && !strcmp(ce->name, last->name))
  45                                continue;
  46                        cache_tree_invalidate_path(active_cache_tree, ce->name);
  47                        last = ce;
  48                        continue;
  49                }
  50                *dst++ = ce;
  51        }
  52        active_nr = dst - active_cache;
  53        return !!last;
  54}
  55
  56static void prime_cache_tree_rec(struct cache_tree *it, struct tree *tree)
  57{
  58        struct tree_desc desc;
  59        struct name_entry entry;
  60        int cnt;
  61
  62        hashcpy(it->sha1, tree->object.sha1);
  63        init_tree_desc(&desc, tree->buffer, tree->size);
  64        cnt = 0;
  65        while (tree_entry(&desc, &entry)) {
  66                if (!S_ISDIR(entry.mode))
  67                        cnt++;
  68                else {
  69                        struct cache_tree_sub *sub;
  70                        struct tree *subtree = lookup_tree(entry.sha1);
  71                        if (!subtree->object.parsed)
  72                                parse_tree(subtree);
  73                        sub = cache_tree_sub(it, entry.path);
  74                        sub->cache_tree = cache_tree();
  75                        prime_cache_tree_rec(sub->cache_tree, subtree);
  76                        cnt += sub->cache_tree->entry_count;
  77                }
  78        }
  79        it->entry_count = cnt;
  80}
  81
  82static void prime_cache_tree(void)
  83{
  84        if (!nr_trees)
  85                return;
  86        active_cache_tree = cache_tree();
  87        prime_cache_tree_rec(active_cache_tree, trees[0]);
  88
  89}
  90
  91static const char read_tree_usage[] = "git-read-tree (<sha> | [[-m [--trivial] [--aggressive] | --reset | --prefix=<prefix>] [-u | -i]] [--exclude-per-directory=<gitignore>] [--index-output=<file>] <sha1> [<sha2> [<sha3>]])";
  92
  93static struct lock_file lock_file;
  94
  95int cmd_read_tree(int argc, const char **argv, const char *unused_prefix)
  96{
  97        int i, newfd, stage = 0;
  98        unsigned char sha1[20];
  99        struct tree_desc t[MAX_UNPACK_TREES];
 100        struct unpack_trees_options opts;
 101
 102        memset(&opts, 0, sizeof(opts));
 103        opts.head_idx = -1;
 104        opts.src_index = &the_index;
 105        opts.dst_index = &the_index;
 106
 107        git_config(git_default_config);
 108
 109        newfd = hold_locked_index(&lock_file, 1);
 110
 111        git_config(git_default_config);
 112
 113        for (i = 1; i < argc; i++) {
 114                const char *arg = argv[i];
 115
 116                /* "-u" means "update", meaning that a merge will update
 117                 * the working tree.
 118                 */
 119                if (!strcmp(arg, "-u")) {
 120                        opts.update = 1;
 121                        continue;
 122                }
 123
 124                if (!strcmp(arg, "-v")) {
 125                        opts.verbose_update = 1;
 126                        continue;
 127                }
 128
 129                /* "-i" means "index only", meaning that a merge will
 130                 * not even look at the working tree.
 131                 */
 132                if (!strcmp(arg, "-i")) {
 133                        opts.index_only = 1;
 134                        continue;
 135                }
 136
 137                if (!prefixcmp(arg, "--index-output=")) {
 138                        set_alternate_index_output(arg + 15);
 139                        continue;
 140                }
 141
 142                /* "--prefix=<subdirectory>/" means keep the current index
 143                 *  entries and put the entries from the tree under the
 144                 * given subdirectory.
 145                 */
 146                if (!prefixcmp(arg, "--prefix=")) {
 147                        if (stage || opts.merge || opts.prefix)
 148                                usage(read_tree_usage);
 149                        opts.prefix = arg + 9;
 150                        opts.merge = 1;
 151                        stage = 1;
 152                        if (read_cache_unmerged())
 153                                die("you need to resolve your current index first");
 154                        continue;
 155                }
 156
 157                /* This differs from "-m" in that we'll silently ignore
 158                 * unmerged entries and overwrite working tree files that
 159                 * correspond to them.
 160                 */
 161                if (!strcmp(arg, "--reset")) {
 162                        if (stage || opts.merge || opts.prefix)
 163                                usage(read_tree_usage);
 164                        opts.reset = 1;
 165                        opts.merge = 1;
 166                        stage = 1;
 167                        read_cache_unmerged();
 168                        continue;
 169                }
 170
 171                if (!strcmp(arg, "--trivial")) {
 172                        opts.trivial_merges_only = 1;
 173                        continue;
 174                }
 175
 176                if (!strcmp(arg, "--aggressive")) {
 177                        opts.aggressive = 1;
 178                        continue;
 179                }
 180
 181                /* "-m" stands for "merge", meaning we start in stage 1 */
 182                if (!strcmp(arg, "-m")) {
 183                        if (stage || opts.merge || opts.prefix)
 184                                usage(read_tree_usage);
 185                        if (read_cache_unmerged())
 186                                die("you need to resolve your current index first");
 187                        stage = 1;
 188                        opts.merge = 1;
 189                        continue;
 190                }
 191
 192                if (!prefixcmp(arg, "--exclude-per-directory=")) {
 193                        struct dir_struct *dir;
 194
 195                        if (opts.dir)
 196                                die("more than one --exclude-per-directory are given.");
 197
 198                        dir = xcalloc(1, sizeof(*opts.dir));
 199                        dir->show_ignored = 1;
 200                        dir->exclude_per_dir = arg + 24;
 201                        opts.dir = dir;
 202                        /* We do not need to nor want to do read-directory
 203                         * here; we are merely interested in reusing the
 204                         * per directory ignore stack mechanism.
 205                         */
 206                        continue;
 207                }
 208
 209                /* using -u and -i at the same time makes no sense */
 210                if (1 < opts.index_only + opts.update)
 211                        usage(read_tree_usage);
 212
 213                if (get_sha1(arg, sha1))
 214                        die("Not a valid object name %s", arg);
 215                if (list_tree(sha1) < 0)
 216                        die("failed to unpack tree object %s", arg);
 217                stage++;
 218        }
 219        if ((opts.update||opts.index_only) && !opts.merge)
 220                usage(read_tree_usage);
 221        if ((opts.dir && !opts.update))
 222                die("--exclude-per-directory is meaningless unless -u");
 223
 224        if (opts.merge) {
 225                if (stage < 2)
 226                        die("just how do you expect me to merge %d trees?", stage-1);
 227                switch (stage - 1) {
 228                case 1:
 229                        opts.fn = opts.prefix ? bind_merge : oneway_merge;
 230                        break;
 231                case 2:
 232                        opts.fn = twoway_merge;
 233                        break;
 234                case 3:
 235                default:
 236                        opts.fn = threeway_merge;
 237                        cache_tree_free(&active_cache_tree);
 238                        break;
 239                }
 240
 241                if (stage - 1 >= 3)
 242                        opts.head_idx = stage - 2;
 243                else
 244                        opts.head_idx = 1;
 245        }
 246
 247        for (i = 0; i < nr_trees; i++) {
 248                struct tree *tree = trees[i];
 249                parse_tree(tree);
 250                init_tree_desc(t+i, tree->buffer, tree->size);
 251        }
 252        if (unpack_trees(nr_trees, t, &opts))
 253                return 128;
 254
 255        /*
 256         * When reading only one tree (either the most basic form,
 257         * "-m ent" or "--reset ent" form), we can obtain a fully
 258         * valid cache-tree because the index must match exactly
 259         * what came from the tree.
 260         */
 261        if (nr_trees && !opts.prefix && (!opts.merge || (stage == 2))) {
 262                cache_tree_free(&active_cache_tree);
 263                prime_cache_tree();
 264        }
 265
 266        if (write_cache(newfd, active_cache, active_nr) ||
 267            commit_locked_index(&lock_file))
 268                die("unable to write new index file");
 269        return 0;
 270}