#include "cache.h"
+#include "cache-tree.h"
#include "tree.h"
#include "blob.h"
#include "commit.h"
const char *tree_type = "tree";
-static int read_one_entry(const unsigned char *sha1, const char *base, int baselen, const char *pathname, unsigned mode, int stage)
+static int read_one_entry_opt(const unsigned char *sha1, const char *base, int baselen, const char *pathname, unsigned mode, int stage, int opt)
{
int len;
unsigned int size;
memcpy(ce->name, base, baselen);
memcpy(ce->name + baselen, pathname, len+1);
hashcpy(ce->sha1, sha1);
- return add_cache_entry(ce, ADD_CACHE_OK_TO_ADD|ADD_CACHE_SKIP_DFCHECK);
+ return add_cache_entry(ce, opt);
+}
+
+static int read_one_entry(const unsigned char *sha1, const char *base, int baselen, const char *pathname, unsigned mode, int stage)
+{
+ return read_one_entry_opt(sha1, base, baselen, pathname, mode, stage,
+ ADD_CACHE_OK_TO_ADD|ADD_CACHE_SKIP_DFCHECK);
+}
+
+/*
+ * This is used when the caller knows there is no existing entries at
+ * the stage that will conflict with the entry being added.
+ */
+static int read_one_entry_quick(const unsigned char *sha1, const char *base, int baselen, const char *pathname, unsigned mode, int stage)
+{
+ return read_one_entry_opt(sha1, base, baselen, pathname, mode, stage,
+ ADD_CACHE_JUST_APPEND);
}
static int match_tree_entry(const char *base, int baselen, const char *path, unsigned int mode, const char **paths)
return 0;
}
+static int cmp_cache_name_compare(const void *a_, const void *b_)
+{
+ const struct cache_entry *ce1, *ce2;
+
+ ce1 = *((const struct cache_entry **)a_);
+ ce2 = *((const struct cache_entry **)b_);
+ return cache_name_compare(ce1->name, ntohs(ce1->ce_flags),
+ ce2->name, ntohs(ce2->ce_flags));
+}
+
int read_tree(struct tree *tree, int stage, const char **match)
{
- return read_tree_recursive(tree, "", 0, stage, match, read_one_entry);
+ read_tree_fn_t fn = NULL;
+ int i, err;
+
+ /*
+ * Currently the only existing callers of this function all
+ * call it with stage=1 and after making sure there is nothing
+ * at that stage; we could always use read_one_entry_quick().
+ *
+ * But when we decide to straighten out git-read-tree not to
+ * use unpack_trees() in some cases, this will probably start
+ * to matter.
+ */
+
+ /*
+ * See if we have cache entry at the stage. If so,
+ * do it the original slow way, otherwise, append and then
+ * sort at the end.
+ */
+ for (i = 0; !fn && i < active_nr; i++) {
+ struct cache_entry *ce = active_cache[i];
+ if (ce_stage(ce) == stage)
+ fn = read_one_entry;
+ }
+
+ if (!fn)
+ fn = read_one_entry_quick;
+ err = read_tree_recursive(tree, "", 0, stage, match, fn);
+ if (fn == read_one_entry || err)
+ return err;
+
+ /*
+ * Sort the cache entry -- we need to nuke the cache tree, though.
+ */
+ cache_tree_free(&active_cache_tree);
+ qsort(active_cache, active_nr, sizeof(active_cache[0]),
+ cmp_cache_name_compare);
+ return 0;
}
struct tree *lookup_tree(const unsigned char *sha1)
{
struct object *obj = lookup_object(sha1);
- if (!obj) {
- struct tree *ret = alloc_tree_node();
- created_object(sha1, &ret->object);
- ret->object.type = OBJ_TREE;
- return ret;
- }
+ if (!obj)
+ return create_object(sha1, OBJ_TREE, alloc_tree_node());
if (!obj->type)
obj->type = OBJ_TREE;
if (obj->type != OBJ_TREE) {
/* Count how many entries there are.. */
init_tree_desc(&desc, item->buffer, item->size);
while (tree_entry(&desc, &entry)) {
- if (S_ISDIRLNK(entry.mode))
+ if (S_ISGITLINK(entry.mode))
continue;
n_refs++;
}
while (tree_entry(&desc, &entry)) {
struct object *obj;
- if (S_ISDIRLNK(entry.mode))
+ if (S_ISGITLINK(entry.mode))
continue;
if (S_ISDIR(entry.mode))
obj = &lookup_tree(entry.sha1)->object;
- else
+ else if (S_ISREG(entry.mode) || S_ISLNK(entry.mode))
obj = &lookup_blob(entry.sha1)->object;
+ else {
+ warning("in tree %s: entry %s has bad mode %.6o\n",
+ sha1_to_hex(item->object.sha1), entry.path, entry.mode);
+ obj = lookup_unknown_object(entry.sha1);
+ }
refs->ref[i++] = obj;
}
set_object_refs(&item->object, refs);