upload-archive: allow user to turn off filters
[gitweb.git] / sha1_file.c
index e194f6a12862a4a4479fd714b550d426f8ee1313..064a33040812ba8782bf602c693abf08613d6ec7 100644 (file)
 #include "pack.h"
 #include "blob.h"
 #include "commit.h"
+#include "run-command.h"
 #include "tag.h"
 #include "tree.h"
+#include "tree-walk.h"
 #include "refs.h"
 #include "pack-revindex.h"
 #include "sha1-lookup.h"
 #endif
 #endif
 
-#ifdef NO_C99_FORMAT
-#define SZ_FMT "lu"
-static unsigned long sz_fmt(size_t s) { return (unsigned long)s; }
-#else
-#define SZ_FMT "zu"
-static size_t sz_fmt(size_t s) { return s; }
-#endif
+#define SZ_FMT PRIuMAX
+static inline uintmax_t sz_fmt(size_t s) { return s; }
 
 const unsigned char null_sha1[20];
 
-static int git_open_noatime(const char *name, struct packed_git *p);
-
 /*
  * This is meant to hold a *small* number of objects that you would
  * want read_sha1_file() to be able to return, but yet you do not want
@@ -72,6 +67,35 @@ static struct cached_object *find_cached_object(const unsigned char *sha1)
        return NULL;
 }
 
+int mkdir_in_gitdir(const char *path)
+{
+       if (mkdir(path, 0777)) {
+               int saved_errno = errno;
+               struct stat st;
+               struct strbuf sb = STRBUF_INIT;
+
+               if (errno != EEXIST)
+                       return -1;
+               /*
+                * Are we looking at a path in a symlinked worktree
+                * whose original repository does not yet have it?
+                * e.g. .git/rr-cache pointing at its original
+                * repository in which the user hasn't performed any
+                * conflict resolution yet?
+                */
+               if (lstat(path, &st) || !S_ISLNK(st.st_mode) ||
+                   strbuf_readlink(&sb, path, st.st_size) ||
+                   !is_absolute_path(sb.buf) ||
+                   mkdir(sb.buf, 0777)) {
+                       strbuf_release(&sb);
+                       errno = saved_errno;
+                       return -1;
+               }
+               strbuf_release(&sb);
+       }
+       return adjust_shared_perm(path);
+}
+
 int safe_create_leading_directories(char *path)
 {
        char *pos = path + offset_1st_component(path);
@@ -202,6 +226,7 @@ struct alternate_object_database *alt_odb_list;
 static struct alternate_object_database **alt_odb_tail;
 
 static void read_info_alternates(const char * alternates, int depth);
+static int git_open_noatime(const char *name);
 
 /*
  * Prepare alternate object database registry.
@@ -335,7 +360,7 @@ static void read_info_alternates(const char * relative_base, int depth)
        int fd;
 
        sprintf(path, "%s/%s", relative_base, alt_file_name);
-       fd = git_open_noatime(path, NULL);
+       fd = git_open_noatime(path);
        if (fd < 0)
                return;
        if (fstat(fd, &st) || (st.st_size == 0)) {
@@ -450,7 +475,7 @@ static int check_packed_git_idx(const char *path,  struct packed_git *p)
        struct pack_idx_header *hdr;
        size_t idx_size;
        uint32_t version, nr, i, *index;
-       int fd = git_open_noatime(path, p);
+       int fd = git_open_noatime(path);
        struct stat st;
 
        if (fd < 0)
@@ -732,7 +757,7 @@ static int open_packed_git_1(struct packed_git *p)
        while (pack_max_fds <= pack_open_fds && unuse_one_window(NULL, -1))
                ; /* nothing */
 
-       p->pack_fd = git_open_noatime(p->pack_name, p);
+       p->pack_fd = git_open_noatime(p->pack_name);
        if (p->pack_fd < 0 || fstat(p->pack_fd, &st))
                return -1;
        pack_open_fds++;
@@ -1120,7 +1145,7 @@ int check_sha1_signature(const unsigned char *sha1, void *map, unsigned long siz
        return hashcmp(sha1, real_sha1) ? -1 : 0;
 }
 
-static int git_open_noatime(const char *name, struct packed_git *p)
+static int git_open_noatime(const char *name)
 {
        static int sha1_file_open_flag = O_NOATIME;
 
@@ -1145,7 +1170,7 @@ static int open_sha1_file(const unsigned char *sha1)
        char *name = sha1_file_name(sha1);
        struct alternate_object_database *alt;
 
-       fd = git_open_noatime(name, NULL);
+       fd = git_open_noatime(name);
        if (fd >= 0)
                return fd;
 
@@ -1154,7 +1179,7 @@ static int open_sha1_file(const unsigned char *sha1)
        for (alt = alt_odb_list; alt; alt = alt->next) {
                name = alt->name;
                fill_sha1_path(name, sha1);
-               fd = git_open_noatime(alt->base, NULL);
+               fd = git_open_noatime(alt->base);
                if (fd >= 0)
                        return fd;
        }
@@ -1283,7 +1308,7 @@ static void *unpack_sha1_rest(z_stream *stream, void *buffer, unsigned long size
                /*
                 * The above condition must be (bytes <= size), not
                 * (bytes < size).  In other words, even though we
-                * expect no more output and set avail_out to zer0,
+                * expect no more output and set avail_out to zero,
                 * the input zlib stream may have bytes that express
                 * "this concludes the stream", and we *do* want to
                 * eat that input.
@@ -1509,7 +1534,7 @@ static int unpack_object_header(struct packed_git *p,
        enum object_type type;
 
        /* use_pack() assures us we have [base, base + 20) available
-        * as a range that we can look at at.  (Its actually the hash
+        * as a range that we can look at.  (Its actually the hash
         * size that is assured.)  With our object header encoding
         * the maximum deflated object size is 2^137, which is just
         * insane, so we know won't exceed what we have been given.
@@ -2181,23 +2206,21 @@ static void *read_object(const unsigned char *sha1, enum object_type *type,
  * deal with them should arrange to call read_object() and give error
  * messages themselves.
  */
-void *read_sha1_file_repl(const unsigned char *sha1,
-                         enum object_type *type,
-                         unsigned long *size,
-                         const unsigned char **replacement)
+void *read_sha1_file_extended(const unsigned char *sha1,
+                             enum object_type *type,
+                             unsigned long *size,
+                             unsigned flag)
 {
-       const unsigned char *repl = lookup_replace_object(sha1);
        void *data;
        char *path;
        const struct packed_git *p;
+       const unsigned char *repl = (flag & READ_SHA1_FILE_REPLACE)
+               ? lookup_replace_object(sha1) : sha1;
 
        errno = 0;
        data = read_object(repl, type, size);
-       if (data) {
-               if (replacement)
-                       *replacement = repl;
+       if (data)
                return data;
-       }
 
        if (errno && errno != ENOENT)
                die_errno("failed to read object %s", sha1_to_hex(sha1));
@@ -2527,10 +2550,40 @@ int has_sha1_file(const unsigned char *sha1)
        return has_loose_object(sha1);
 }
 
+static void check_tree(const void *buf, size_t size)
+{
+       struct tree_desc desc;
+       struct name_entry entry;
+
+       init_tree_desc(&desc, buf, size);
+       while (tree_entry(&desc, &entry))
+               /* do nothing
+                * tree_entry() will die() on malformed entries */
+               ;
+}
+
+static void check_commit(const void *buf, size_t size)
+{
+       struct commit c;
+       memset(&c, 0, sizeof(c));
+       if (parse_commit_buffer(&c, buf, size))
+               die("corrupt commit");
+}
+
+static void check_tag(const void *buf, size_t size)
+{
+       struct tag t;
+       memset(&t, 0, sizeof(t));
+       if (parse_tag_buffer(&t, buf, size))
+               die("corrupt tag");
+}
+
 static int index_mem(unsigned char *sha1, void *buf, size_t size,
-                    int write_object, enum object_type type, const char *path)
+                    enum object_type type,
+                    const char *path, unsigned flags)
 {
        int ret, re_allocated = 0;
+       int write_object = flags & HASH_WRITE_OBJECT;
 
        if (!type)
                type = OBJ_BLOB;
@@ -2546,6 +2599,14 @@ static int index_mem(unsigned char *sha1, void *buf, size_t size,
                        re_allocated = 1;
                }
        }
+       if (flags & HASH_FORMAT_CHECK) {
+               if (type == OBJ_TREE)
+                       check_tree(buf, size);
+               if (type == OBJ_COMMIT)
+                       check_commit(buf, size);
+               if (type == OBJ_TAG)
+                       check_tag(buf, size);
+       }
 
        if (write_object)
                ret = write_sha1_file(buf, size, typename(type), sha1);
@@ -2556,42 +2617,141 @@ static int index_mem(unsigned char *sha1, void *buf, size_t size,
        return ret;
 }
 
+static int index_pipe(unsigned char *sha1, int fd, enum object_type type,
+                     const char *path, unsigned flags)
+{
+       struct strbuf sbuf = STRBUF_INIT;
+       int ret;
+
+       if (strbuf_read(&sbuf, fd, 4096) >= 0)
+               ret = index_mem(sha1, sbuf.buf, sbuf.len, type, path, flags);
+       else
+               ret = -1;
+       strbuf_release(&sbuf);
+       return ret;
+}
+
 #define SMALL_FILE_SIZE (32*1024)
 
-int index_fd(unsigned char *sha1, int fd, struct stat *st, int write_object,
-            enum object_type type, const char *path)
+static int index_core(unsigned char *sha1, int fd, size_t size,
+                     enum object_type type, const char *path,
+                     unsigned flags)
 {
        int ret;
-       size_t size = xsize_t(st->st_size);
 
-       if (!S_ISREG(st->st_mode)) {
-               struct strbuf sbuf = STRBUF_INIT;
-               if (strbuf_read(&sbuf, fd, 4096) >= 0)
-                       ret = index_mem(sha1, sbuf.buf, sbuf.len, write_object,
-                                       type, path);
-               else
-                       ret = -1;
-               strbuf_release(&sbuf);
-       } else if (!size) {
-               ret = index_mem(sha1, NULL, size, write_object, type, path);
+       if (!size) {
+               ret = index_mem(sha1, NULL, size, type, path, flags);
        } else if (size <= SMALL_FILE_SIZE) {
                char *buf = xmalloc(size);
                if (size == read_in_full(fd, buf, size))
-                       ret = index_mem(sha1, buf, size, write_object, type,
-                                       path);
+                       ret = index_mem(sha1, buf, size, type, path, flags);
                else
                        ret = error("short read %s", strerror(errno));
                free(buf);
        } else {
                void *buf = xmmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0);
-               ret = index_mem(sha1, buf, size, write_object, type, path);
+               ret = index_mem(sha1, buf, size, type, path, flags);
                munmap(buf, size);
        }
+       return ret;
+}
+
+/*
+ * This creates one packfile per large blob, because the caller
+ * immediately wants the result sha1, and fast-import can report the
+ * object name via marks mechanism only by closing the created
+ * packfile.
+ *
+ * This also bypasses the usual "convert-to-git" dance, and that is on
+ * purpose. We could write a streaming version of the converting
+ * functions and insert that before feeding the data to fast-import
+ * (or equivalent in-core API described above), but the primary
+ * motivation for trying to stream from the working tree file and to
+ * avoid mmaping it in core is to deal with large binary blobs, and
+ * by definition they do _not_ want to get any conversion.
+ */
+static int index_stream(unsigned char *sha1, int fd, size_t size,
+                       enum object_type type, const char *path,
+                       unsigned flags)
+{
+       struct child_process fast_import;
+       char export_marks[512];
+       const char *argv[] = { "fast-import", "--quiet", export_marks, NULL };
+       char tmpfile[512];
+       char fast_import_cmd[512];
+       char buf[512];
+       int len, tmpfd;
+
+       strcpy(tmpfile, git_path("hashstream_XXXXXX"));
+       tmpfd = git_mkstemp_mode(tmpfile, 0600);
+       if (tmpfd < 0)
+               die_errno("cannot create tempfile: %s", tmpfile);
+       if (close(tmpfd))
+               die_errno("cannot close tempfile: %s", tmpfile);
+       sprintf(export_marks, "--export-marks=%s", tmpfile);
+
+       memset(&fast_import, 0, sizeof(fast_import));
+       fast_import.in = -1;
+       fast_import.argv = argv;
+       fast_import.git_cmd = 1;
+       if (start_command(&fast_import))
+               die_errno("index-stream: git fast-import failed");
+
+       len = sprintf(fast_import_cmd, "blob\nmark :1\ndata %lu\n",
+                     (unsigned long) size);
+       write_or_whine(fast_import.in, fast_import_cmd, len,
+                      "index-stream: feeding fast-import");
+       while (size) {
+               char buf[10240];
+               size_t sz = size < sizeof(buf) ? size : sizeof(buf);
+               size_t actual;
+
+               actual = read_in_full(fd, buf, sz);
+               if (actual < 0)
+                       die_errno("index-stream: reading input");
+               if (write_in_full(fast_import.in, buf, actual) != actual)
+                       die_errno("index-stream: feeding fast-import");
+               size -= actual;
+       }
+       if (close(fast_import.in))
+               die_errno("index-stream: closing fast-import");
+       if (finish_command(&fast_import))
+               die_errno("index-stream: finishing fast-import");
+
+       tmpfd = open(tmpfile, O_RDONLY);
+       if (tmpfd < 0)
+               die_errno("index-stream: cannot open fast-import mark");
+       len = read(tmpfd, buf, sizeof(buf));
+       if (len < 0)
+               die_errno("index-stream: reading fast-import mark");
+       if (close(tmpfd) < 0)
+               die_errno("index-stream: closing fast-import mark");
+       if (unlink(tmpfile))
+               die_errno("index-stream: unlinking fast-import mark");
+       if (len != 44 ||
+           memcmp(":1 ", buf, 3) ||
+           get_sha1_hex(buf + 3, sha1))
+               die_errno("index-stream: unexpected fast-import mark: <%s>", buf);
+       return 0;
+}
+
+int index_fd(unsigned char *sha1, int fd, struct stat *st,
+            enum object_type type, const char *path, unsigned flags)
+{
+       int ret;
+       size_t size = xsize_t(st->st_size);
+
+       if (!S_ISREG(st->st_mode))
+               ret = index_pipe(sha1, fd, type, path, flags);
+       else if (size <= big_file_threshold || type != OBJ_BLOB)
+               ret = index_core(sha1, fd, size, type, path, flags);
+       else
+               ret = index_stream(sha1, fd, size, type, path, flags);
        close(fd);
        return ret;
 }
 
-int index_path(unsigned char *sha1, const char *path, struct stat *st, int write_object)
+int index_path(unsigned char *sha1, const char *path, struct stat *st, unsigned flags)
 {
        int fd;
        struct strbuf sb = STRBUF_INIT;
@@ -2602,7 +2762,7 @@ int index_path(unsigned char *sha1, const char *path, struct stat *st, int write
                if (fd < 0)
                        return error("open(\"%s\"): %s", path,
                                     strerror(errno));
-               if (index_fd(sha1, fd, st, write_object, OBJ_BLOB, path) < 0)
+               if (index_fd(sha1, fd, st, OBJ_BLOB, path, flags) < 0)
                        return error("%s: failed to insert into database",
                                     path);
                break;
@@ -2612,7 +2772,7 @@ int index_path(unsigned char *sha1, const char *path, struct stat *st, int write
                        return error("readlink(\"%s\"): %s", path,
                                     errstr);
                }
-               if (!write_object)
+               if (!(flags & HASH_WRITE_OBJECT))
                        hash_sha1_file(sb.buf, sb.len, blob_type, sha1);
                else if (write_sha1_file(sb.buf, sb.len, blob_type, sha1))
                        return error("%s: failed to insert into database",