tree.con commit vscode: let cSpell work on commit messages, too (12861e2)
   1#define NO_THE_INDEX_COMPATIBILITY_MACROS
   2#include "cache.h"
   3#include "cache-tree.h"
   4#include "tree.h"
   5#include "object-store.h"
   6#include "blob.h"
   7#include "commit.h"
   8#include "tag.h"
   9#include "alloc.h"
  10#include "tree-walk.h"
  11
  12const char *tree_type = "tree";
  13
  14static int read_one_entry_opt(struct index_state *istate,
  15                              const struct object_id *oid,
  16                              const char *base, int baselen,
  17                              const char *pathname,
  18                              unsigned mode, int stage, int opt)
  19{
  20        int len;
  21        unsigned int size;
  22        struct cache_entry *ce;
  23
  24        if (S_ISDIR(mode))
  25                return READ_TREE_RECURSIVE;
  26
  27        len = strlen(pathname);
  28        size = cache_entry_size(baselen + len);
  29        ce = xcalloc(1, size);
  30
  31        ce->ce_mode = create_ce_mode(mode);
  32        ce->ce_flags = create_ce_flags(stage);
  33        ce->ce_namelen = baselen + len;
  34        memcpy(ce->name, base, baselen);
  35        memcpy(ce->name + baselen, pathname, len+1);
  36        oidcpy(&ce->oid, oid);
  37        return add_index_entry(istate, ce, opt);
  38}
  39
  40static int read_one_entry(const struct object_id *oid, struct strbuf *base,
  41                          const char *pathname, unsigned mode, int stage,
  42                          void *context)
  43{
  44        struct index_state *istate = context;
  45        return read_one_entry_opt(istate, oid, base->buf, base->len, pathname,
  46                                  mode, stage,
  47                                  ADD_CACHE_OK_TO_ADD|ADD_CACHE_SKIP_DFCHECK);
  48}
  49
  50/*
  51 * This is used when the caller knows there is no existing entries at
  52 * the stage that will conflict with the entry being added.
  53 */
  54static int read_one_entry_quick(const struct object_id *oid, struct strbuf *base,
  55                                const char *pathname, unsigned mode, int stage,
  56                                void *context)
  57{
  58        struct index_state *istate = context;
  59        return read_one_entry_opt(istate, oid, base->buf, base->len, pathname,
  60                                  mode, stage,
  61                                  ADD_CACHE_JUST_APPEND);
  62}
  63
  64static int read_tree_1(struct tree *tree, struct strbuf *base,
  65                       int stage, const struct pathspec *pathspec,
  66                       read_tree_fn_t fn, void *context)
  67{
  68        struct tree_desc desc;
  69        struct name_entry entry;
  70        struct object_id oid;
  71        int len, oldlen = base->len;
  72        enum interesting retval = entry_not_interesting;
  73
  74        if (parse_tree(tree))
  75                return -1;
  76
  77        init_tree_desc(&desc, tree->buffer, tree->size);
  78
  79        while (tree_entry(&desc, &entry)) {
  80                if (retval != all_entries_interesting) {
  81                        retval = tree_entry_interesting(&entry, base, 0, pathspec);
  82                        if (retval == all_entries_not_interesting)
  83                                break;
  84                        if (retval == entry_not_interesting)
  85                                continue;
  86                }
  87
  88                switch (fn(entry.oid, base,
  89                           entry.path, entry.mode, stage, context)) {
  90                case 0:
  91                        continue;
  92                case READ_TREE_RECURSIVE:
  93                        break;
  94                default:
  95                        return -1;
  96                }
  97
  98                if (S_ISDIR(entry.mode))
  99                        oidcpy(&oid, entry.oid);
 100                else if (S_ISGITLINK(entry.mode)) {
 101                        struct commit *commit;
 102
 103                        commit = lookup_commit(entry.oid);
 104                        if (!commit)
 105                                die("Commit %s in submodule path %s%s not found",
 106                                    oid_to_hex(entry.oid),
 107                                    base->buf, entry.path);
 108
 109                        if (parse_commit(commit))
 110                                die("Invalid commit %s in submodule path %s%s",
 111                                    oid_to_hex(entry.oid),
 112                                    base->buf, entry.path);
 113
 114                        oidcpy(&oid, get_commit_tree_oid(commit));
 115                }
 116                else
 117                        continue;
 118
 119                len = tree_entry_len(&entry);
 120                strbuf_add(base, entry.path, len);
 121                strbuf_addch(base, '/');
 122                retval = read_tree_1(lookup_tree(&oid),
 123                                     base, stage, pathspec,
 124                                     fn, context);
 125                strbuf_setlen(base, oldlen);
 126                if (retval)
 127                        return -1;
 128        }
 129        return 0;
 130}
 131
 132int read_tree_recursive(struct tree *tree,
 133                        const char *base, int baselen,
 134                        int stage, const struct pathspec *pathspec,
 135                        read_tree_fn_t fn, void *context)
 136{
 137        struct strbuf sb = STRBUF_INIT;
 138        int ret;
 139
 140        strbuf_add(&sb, base, baselen);
 141        ret = read_tree_1(tree, &sb, stage, pathspec, fn, context);
 142        strbuf_release(&sb);
 143        return ret;
 144}
 145
 146static int cmp_cache_name_compare(const void *a_, const void *b_)
 147{
 148        const struct cache_entry *ce1, *ce2;
 149
 150        ce1 = *((const struct cache_entry **)a_);
 151        ce2 = *((const struct cache_entry **)b_);
 152        return cache_name_stage_compare(ce1->name, ce1->ce_namelen, ce_stage(ce1),
 153                                  ce2->name, ce2->ce_namelen, ce_stage(ce2));
 154}
 155
 156int read_tree(struct tree *tree, int stage, struct pathspec *match,
 157              struct index_state *istate)
 158{
 159        read_tree_fn_t fn = NULL;
 160        int i, err;
 161
 162        /*
 163         * Currently the only existing callers of this function all
 164         * call it with stage=1 and after making sure there is nothing
 165         * at that stage; we could always use read_one_entry_quick().
 166         *
 167         * But when we decide to straighten out git-read-tree not to
 168         * use unpack_trees() in some cases, this will probably start
 169         * to matter.
 170         */
 171
 172        /*
 173         * See if we have cache entry at the stage.  If so,
 174         * do it the original slow way, otherwise, append and then
 175         * sort at the end.
 176         */
 177        for (i = 0; !fn && i < istate->cache_nr; i++) {
 178                const struct cache_entry *ce = istate->cache[i];
 179                if (ce_stage(ce) == stage)
 180                        fn = read_one_entry;
 181        }
 182
 183        if (!fn)
 184                fn = read_one_entry_quick;
 185        err = read_tree_recursive(tree, "", 0, stage, match, fn, istate);
 186        if (fn == read_one_entry || err)
 187                return err;
 188
 189        /*
 190         * Sort the cache entry -- we need to nuke the cache tree, though.
 191         */
 192        cache_tree_free(&istate->cache_tree);
 193        QSORT(istate->cache, istate->cache_nr, cmp_cache_name_compare);
 194        return 0;
 195}
 196
 197struct tree *lookup_tree(const struct object_id *oid)
 198{
 199        struct object *obj = lookup_object(oid->hash);
 200        if (!obj)
 201                return create_object(the_repository, oid->hash,
 202                                     alloc_tree_node(the_repository));
 203        return object_as_type(obj, OBJ_TREE, 0);
 204}
 205
 206int parse_tree_buffer(struct tree *item, void *buffer, unsigned long size)
 207{
 208        if (item->object.parsed)
 209                return 0;
 210        item->object.parsed = 1;
 211        item->buffer = buffer;
 212        item->size = size;
 213
 214        return 0;
 215}
 216
 217int parse_tree_gently(struct tree *item, int quiet_on_missing)
 218{
 219         enum object_type type;
 220         void *buffer;
 221         unsigned long size;
 222
 223        if (item->object.parsed)
 224                return 0;
 225        buffer = read_object_file(&item->object.oid, &type, &size);
 226        if (!buffer)
 227                return quiet_on_missing ? -1 :
 228                        error("Could not read %s",
 229                             oid_to_hex(&item->object.oid));
 230        if (type != OBJ_TREE) {
 231                free(buffer);
 232                return error("Object %s not a tree",
 233                             oid_to_hex(&item->object.oid));
 234        }
 235        return parse_tree_buffer(item, buffer, size);
 236}
 237
 238void free_tree_buffer(struct tree *tree)
 239{
 240        FREE_AND_NULL(tree->buffer);
 241        tree->size = 0;
 242        tree->object.parsed = 0;
 243}
 244
 245struct tree *parse_tree_indirect(const struct object_id *oid)
 246{
 247        struct object *obj = parse_object(oid);
 248        do {
 249                if (!obj)
 250                        return NULL;
 251                if (obj->type == OBJ_TREE)
 252                        return (struct tree *) obj;
 253                else if (obj->type == OBJ_COMMIT)
 254                        obj = &(get_commit_tree(((struct commit *)obj))->object);
 255                else if (obj->type == OBJ_TAG)
 256                        obj = ((struct tag *) obj)->tagged;
 257                else
 258                        return NULL;
 259                if (!obj->parsed)
 260                        parse_object(&obj->oid);
 261        } while (1);
 262}