builtin-read-tree.con commit Merge branch 'lt/racy-empty' (159e639)
   1/*
   2 * GIT - The information manager from hell
   3 *
   4 * Copyright (C) Linus Torvalds, 2005
   5 */
   6
   7#include "cache.h"
   8#include "object.h"
   9#include "tree.h"
  10#include "tree-walk.h"
  11#include "cache-tree.h"
  12#include "unpack-trees.h"
  13#include "dir.h"
  14#include "builtin.h"
  15
  16static int nr_trees;
  17static struct tree *trees[MAX_UNPACK_TREES];
  18
  19static int list_tree(unsigned char *sha1)
  20{
  21        struct tree *tree;
  22
  23        if (nr_trees >= MAX_UNPACK_TREES)
  24                die("I cannot read more than %d trees", MAX_UNPACK_TREES);
  25        tree = parse_tree_indirect(sha1);
  26        if (!tree)
  27                return -1;
  28        trees[nr_trees++] = tree;
  29        return 0;
  30}
  31
  32static int read_cache_unmerged(void)
  33{
  34        int i;
  35        struct cache_entry **dst;
  36        struct cache_entry *last = NULL;
  37
  38        read_cache();
  39        dst = active_cache;
  40        for (i = 0; i < active_nr; i++) {
  41                struct cache_entry *ce = active_cache[i];
  42                if (ce_stage(ce)) {
  43                        remove_name_hash(ce);
  44                        if (last && !strcmp(ce->name, last->name))
  45                                continue;
  46                        cache_tree_invalidate_path(active_cache_tree, ce->name);
  47                        last = ce;
  48                        continue;
  49                }
  50                *dst++ = ce;
  51        }
  52        active_nr = dst - active_cache;
  53        return !!last;
  54}
  55
  56static void prime_cache_tree_rec(struct cache_tree *it, struct tree *tree)
  57{
  58        struct tree_desc desc;
  59        struct name_entry entry;
  60        int cnt;
  61
  62        hashcpy(it->sha1, tree->object.sha1);
  63        init_tree_desc(&desc, tree->buffer, tree->size);
  64        cnt = 0;
  65        while (tree_entry(&desc, &entry)) {
  66                if (!S_ISDIR(entry.mode))
  67                        cnt++;
  68                else {
  69                        struct cache_tree_sub *sub;
  70                        struct tree *subtree = lookup_tree(entry.sha1);
  71                        if (!subtree->object.parsed)
  72                                parse_tree(subtree);
  73                        sub = cache_tree_sub(it, entry.path);
  74                        sub->cache_tree = cache_tree();
  75                        prime_cache_tree_rec(sub->cache_tree, subtree);
  76                        cnt += sub->cache_tree->entry_count;
  77                }
  78        }
  79        it->entry_count = cnt;
  80}
  81
  82static void prime_cache_tree(void)
  83{
  84        if (!nr_trees)
  85                return;
  86        active_cache_tree = cache_tree();
  87        prime_cache_tree_rec(active_cache_tree, trees[0]);
  88
  89}
  90
  91static const char read_tree_usage[] = "git-read-tree (<sha> | [[-m [--trivial] [--aggressive] | --reset | --prefix=<prefix>] [-u | -i]] [--exclude-per-directory=<gitignore>] [--index-output=<file>] <sha1> [<sha2> [<sha3>]])";
  92
  93static struct lock_file lock_file;
  94
  95int cmd_read_tree(int argc, const char **argv, const char *unused_prefix)
  96{
  97        int i, newfd, stage = 0;
  98        unsigned char sha1[20];
  99        struct tree_desc t[MAX_UNPACK_TREES];
 100        struct unpack_trees_options opts;
 101
 102        memset(&opts, 0, sizeof(opts));
 103        opts.head_idx = -1;
 104        opts.src_index = &the_index;
 105        opts.dst_index = &the_index;
 106
 107        git_config(git_default_config, NULL);
 108
 109        newfd = hold_locked_index(&lock_file, 1);
 110
 111        for (i = 1; i < argc; i++) {
 112                const char *arg = argv[i];
 113
 114                /* "-u" means "update", meaning that a merge will update
 115                 * the working tree.
 116                 */
 117                if (!strcmp(arg, "-u")) {
 118                        opts.update = 1;
 119                        continue;
 120                }
 121
 122                if (!strcmp(arg, "-v")) {
 123                        opts.verbose_update = 1;
 124                        continue;
 125                }
 126
 127                /* "-i" means "index only", meaning that a merge will
 128                 * not even look at the working tree.
 129                 */
 130                if (!strcmp(arg, "-i")) {
 131                        opts.index_only = 1;
 132                        continue;
 133                }
 134
 135                if (!prefixcmp(arg, "--index-output=")) {
 136                        set_alternate_index_output(arg + 15);
 137                        continue;
 138                }
 139
 140                /* "--prefix=<subdirectory>/" means keep the current index
 141                 *  entries and put the entries from the tree under the
 142                 * given subdirectory.
 143                 */
 144                if (!prefixcmp(arg, "--prefix=")) {
 145                        if (stage || opts.merge || opts.prefix)
 146                                usage(read_tree_usage);
 147                        opts.prefix = arg + 9;
 148                        opts.merge = 1;
 149                        stage = 1;
 150                        if (read_cache_unmerged())
 151                                die("you need to resolve your current index first");
 152                        continue;
 153                }
 154
 155                /* This differs from "-m" in that we'll silently ignore
 156                 * unmerged entries and overwrite working tree files that
 157                 * correspond to them.
 158                 */
 159                if (!strcmp(arg, "--reset")) {
 160                        if (stage || opts.merge || opts.prefix)
 161                                usage(read_tree_usage);
 162                        opts.reset = 1;
 163                        opts.merge = 1;
 164                        stage = 1;
 165                        read_cache_unmerged();
 166                        continue;
 167                }
 168
 169                if (!strcmp(arg, "--trivial")) {
 170                        opts.trivial_merges_only = 1;
 171                        continue;
 172                }
 173
 174                if (!strcmp(arg, "--aggressive")) {
 175                        opts.aggressive = 1;
 176                        continue;
 177                }
 178
 179                /* "-m" stands for "merge", meaning we start in stage 1 */
 180                if (!strcmp(arg, "-m")) {
 181                        if (stage || opts.merge || opts.prefix)
 182                                usage(read_tree_usage);
 183                        if (read_cache_unmerged())
 184                                die("you need to resolve your current index first");
 185                        stage = 1;
 186                        opts.merge = 1;
 187                        continue;
 188                }
 189
 190                if (!prefixcmp(arg, "--exclude-per-directory=")) {
 191                        struct dir_struct *dir;
 192
 193                        if (opts.dir)
 194                                die("more than one --exclude-per-directory are given.");
 195
 196                        dir = xcalloc(1, sizeof(*opts.dir));
 197                        dir->show_ignored = 1;
 198                        dir->exclude_per_dir = arg + 24;
 199                        opts.dir = dir;
 200                        /* We do not need to nor want to do read-directory
 201                         * here; we are merely interested in reusing the
 202                         * per directory ignore stack mechanism.
 203                         */
 204                        continue;
 205                }
 206
 207                /* using -u and -i at the same time makes no sense */
 208                if (1 < opts.index_only + opts.update)
 209                        usage(read_tree_usage);
 210
 211                if (get_sha1(arg, sha1))
 212                        die("Not a valid object name %s", arg);
 213                if (list_tree(sha1) < 0)
 214                        die("failed to unpack tree object %s", arg);
 215                stage++;
 216        }
 217        if ((opts.update||opts.index_only) && !opts.merge)
 218                usage(read_tree_usage);
 219        if ((opts.dir && !opts.update))
 220                die("--exclude-per-directory is meaningless unless -u");
 221
 222        if (opts.merge) {
 223                if (stage < 2)
 224                        die("just how do you expect me to merge %d trees?", stage-1);
 225                switch (stage - 1) {
 226                case 1:
 227                        opts.fn = opts.prefix ? bind_merge : oneway_merge;
 228                        break;
 229                case 2:
 230                        opts.fn = twoway_merge;
 231                        break;
 232                case 3:
 233                default:
 234                        opts.fn = threeway_merge;
 235                        cache_tree_free(&active_cache_tree);
 236                        break;
 237                }
 238
 239                if (stage - 1 >= 3)
 240                        opts.head_idx = stage - 2;
 241                else
 242                        opts.head_idx = 1;
 243        }
 244
 245        for (i = 0; i < nr_trees; i++) {
 246                struct tree *tree = trees[i];
 247                parse_tree(tree);
 248                init_tree_desc(t+i, tree->buffer, tree->size);
 249        }
 250        if (unpack_trees(nr_trees, t, &opts))
 251                return 128;
 252
 253        /*
 254         * When reading only one tree (either the most basic form,
 255         * "-m ent" or "--reset ent" form), we can obtain a fully
 256         * valid cache-tree because the index must match exactly
 257         * what came from the tree.
 258         */
 259        if (nr_trees && !opts.prefix && (!opts.merge || (stage == 2))) {
 260                cache_tree_free(&active_cache_tree);
 261                prime_cache_tree();
 262        }
 263
 264        if (write_cache(newfd, active_cache, active_nr) ||
 265            commit_locked_index(&lock_file))
 266                die("unable to write new index file");
 267        return 0;
 268}