githooks documentation: add a note about the +x mode
[gitweb.git] / cache-tree.c
index 50b35264fd0405a299700ef8bf4a61f416f30e46..3d8f218a5f9e838b15e1d56113a4dd56904ee544 100644 (file)
@@ -155,13 +155,17 @@ static int verify_cache(struct cache_entry **cache,
        funny = 0;
        for (i = 0; i < entries; i++) {
                struct cache_entry *ce = cache[i];
-               if (ce_stage(ce)) {
+               if (ce_stage(ce) || (ce->ce_flags & CE_INTENT_TO_ADD)) {
                        if (10 < ++funny) {
                                fprintf(stderr, "...\n");
                                break;
                        }
-                       fprintf(stderr, "%s: unmerged (%s)\n",
-                               ce->name, sha1_to_hex(ce->sha1));
+                       if (ce_stage(ce))
+                               fprintf(stderr, "%s: unmerged (%s)\n",
+                                       ce->name, sha1_to_hex(ce->sha1));
+                       else
+                               fprintf(stderr, "%s: not added yet\n",
+                                       ce->name);
                }
        }
        if (funny)
@@ -320,13 +324,13 @@ static int update_one(struct cache_tree *it,
                }
                else {
                        sha1 = ce->sha1;
-                       mode = ntohl(ce->ce_mode);
+                       mode = ce->ce_mode;
                        entlen = pathlen - baselen;
                }
                if (mode != S_IFGITLINK && !missing_ok && !has_sha1_file(sha1))
                        return error("invalid object %s", sha1_to_hex(sha1));
 
-               if (!ce->ce_mode)
+               if (ce->ce_flags & CE_REMOVE)
                        continue; /* entry being removed */
 
                strbuf_grow(&buffer, entlen + 100);
@@ -341,8 +345,11 @@ static int update_one(struct cache_tree *it,
 
        if (dryrun)
                hash_sha1_file(buffer.buf, buffer.len, tree_type, it->sha1);
-       else
-               write_sha1_file(buffer.buf, buffer.len, tree_type, it->sha1);
+       else if (write_sha1_file(buffer.buf, buffer.len, tree_type, it->sha1)) {
+               strbuf_release(&buffer);
+               return -1;
+       }
+
        strbuf_release(&buffer);
        it->entry_count = i;
 #if DEBUG
@@ -504,7 +511,7 @@ struct cache_tree *cache_tree_read(const char *buffer, unsigned long size)
        return read_one(&buffer, &size);
 }
 
-struct cache_tree *cache_tree_find(struct cache_tree *it, const char *path)
+static struct cache_tree *cache_tree_find(struct cache_tree *it, const char *path)
 {
        while (*path) {
                const char *slash;
@@ -529,3 +536,58 @@ struct cache_tree *cache_tree_find(struct cache_tree *it, const char *path)
        }
        return it;
 }
+
+int write_cache_as_tree(unsigned char *sha1, int missing_ok, const char *prefix)
+{
+       int entries, was_valid, newfd;
+
+       /*
+        * We can't free this memory, it becomes part of a linked list
+        * parsed atexit()
+        */
+       struct lock_file *lock_file = xcalloc(1, sizeof(struct lock_file));
+
+       newfd = hold_locked_index(lock_file, 1);
+
+       entries = read_cache();
+       if (entries < 0)
+               return WRITE_TREE_UNREADABLE_INDEX;
+
+       if (!active_cache_tree)
+               active_cache_tree = cache_tree();
+
+       was_valid = cache_tree_fully_valid(active_cache_tree);
+
+       if (!was_valid) {
+               if (cache_tree_update(active_cache_tree,
+                                     active_cache, active_nr,
+                                     missing_ok, 0) < 0)
+                       return WRITE_TREE_UNMERGED_INDEX;
+               if (0 <= newfd) {
+                       if (!write_cache(newfd, active_cache, active_nr) &&
+                           !commit_lock_file(lock_file))
+                               newfd = -1;
+               }
+               /* Not being able to write is fine -- we are only interested
+                * in updating the cache-tree part, and if the next caller
+                * ends up using the old index with unupdated cache-tree part
+                * it misses the work we did here, but that is just a
+                * performance penalty and not a big deal.
+                */
+       }
+
+       if (prefix) {
+               struct cache_tree *subtree =
+                       cache_tree_find(active_cache_tree, prefix);
+               if (!subtree)
+                       return WRITE_TREE_PREFIX_ERROR;
+               hashcpy(sha1, subtree->sha1);
+       }
+       else
+               hashcpy(sha1, active_cache_tree->sha1);
+
+       if (0 <= newfd)
+               rollback_lock_file(lock_file);
+
+       return 0;
+}