#include "pack.h"
#include "blob.h"
#include "commit.h"
+#include "run-command.h"
#include "tag.h"
#include "tree.h"
#include "tree-walk.h"
}
static int index_mem(unsigned char *sha1, void *buf, size_t size,
- int write_object, enum object_type type,
- const char *path, int format_check)
+ enum object_type type,
+ const char *path, unsigned flags)
{
int ret, re_allocated = 0;
+ int write_object = flags & HASH_WRITE_OBJECT;
if (!type)
type = OBJ_BLOB;
re_allocated = 1;
}
}
- if (format_check) {
+ if (flags & HASH_FORMAT_CHECK) {
if (type == OBJ_TREE)
check_tree(buf, size);
if (type == OBJ_COMMIT)
return ret;
}
+static int index_pipe(unsigned char *sha1, int fd, enum object_type type,
+ const char *path, unsigned flags)
+{
+ struct strbuf sbuf = STRBUF_INIT;
+ int ret;
+
+ if (strbuf_read(&sbuf, fd, 4096) >= 0)
+ ret = index_mem(sha1, sbuf.buf, sbuf.len, type, path, flags);
+ else
+ ret = -1;
+ strbuf_release(&sbuf);
+ return ret;
+}
+
#define SMALL_FILE_SIZE (32*1024)
-int index_fd(unsigned char *sha1, int fd, struct stat *st, int write_object,
- enum object_type type, const char *path, int format_check)
+static int index_core(unsigned char *sha1, int fd, size_t size,
+ enum object_type type, const char *path,
+ unsigned flags)
{
int ret;
- size_t size = xsize_t(st->st_size);
- if (!S_ISREG(st->st_mode)) {
- struct strbuf sbuf = STRBUF_INIT;
- if (strbuf_read(&sbuf, fd, 4096) >= 0)
- ret = index_mem(sha1, sbuf.buf, sbuf.len, write_object,
- type, path, format_check);
- else
- ret = -1;
- strbuf_release(&sbuf);
- } else if (!size) {
- ret = index_mem(sha1, NULL, size, write_object, type, path,
- format_check);
+ if (!size) {
+ ret = index_mem(sha1, NULL, size, type, path, flags);
} else if (size <= SMALL_FILE_SIZE) {
char *buf = xmalloc(size);
if (size == read_in_full(fd, buf, size))
- ret = index_mem(sha1, buf, size, write_object, type,
- path, format_check);
+ ret = index_mem(sha1, buf, size, type, path, flags);
else
ret = error("short read %s", strerror(errno));
free(buf);
} else {
void *buf = xmmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0);
- ret = index_mem(sha1, buf, size, write_object, type, path,
- format_check);
+ ret = index_mem(sha1, buf, size, type, path, flags);
munmap(buf, size);
}
+ return ret;
+}
+
+/*
+ * This creates one packfile per large blob, because the caller
+ * immediately wants the result sha1, and fast-import can report the
+ * object name via marks mechanism only by closing the created
+ * packfile.
+ *
+ * This also bypasses the usual "convert-to-git" dance, and that is on
+ * purpose. We could write a streaming version of the converting
+ * functions and insert that before feeding the data to fast-import
+ * (or equivalent in-core API described above), but the primary
+ * motivation for trying to stream from the working tree file and to
+ * avoid mmaping it in core is to deal with large binary blobs, and
+ * by definition they do _not_ want to get any conversion.
+ */
+static int index_stream(unsigned char *sha1, int fd, size_t size,
+ enum object_type type, const char *path,
+ unsigned flags)
+{
+ struct child_process fast_import;
+ char export_marks[512];
+ const char *argv[] = { "fast-import", "--quiet", export_marks, NULL };
+ char tmpfile[512];
+ char fast_import_cmd[512];
+ char buf[512];
+ int len, tmpfd;
+
+ strcpy(tmpfile, git_path("hashstream_XXXXXX"));
+ tmpfd = git_mkstemp_mode(tmpfile, 0600);
+ if (tmpfd < 0)
+ die_errno("cannot create tempfile: %s", tmpfile);
+ if (close(tmpfd))
+ die_errno("cannot close tempfile: %s", tmpfile);
+ sprintf(export_marks, "--export-marks=%s", tmpfile);
+
+ memset(&fast_import, 0, sizeof(fast_import));
+ fast_import.in = -1;
+ fast_import.argv = argv;
+ fast_import.git_cmd = 1;
+ if (start_command(&fast_import))
+ die_errno("index-stream: git fast-import failed");
+
+ len = sprintf(fast_import_cmd, "blob\nmark :1\ndata %lu\n",
+ (unsigned long) size);
+ write_or_whine(fast_import.in, fast_import_cmd, len,
+ "index-stream: feeding fast-import");
+ while (size) {
+ char buf[10240];
+ size_t sz = size < sizeof(buf) ? size : sizeof(buf);
+ size_t actual;
+
+ actual = read_in_full(fd, buf, sz);
+ if (actual < 0)
+ die_errno("index-stream: reading input");
+ if (write_in_full(fast_import.in, buf, actual) != actual)
+ die_errno("index-stream: feeding fast-import");
+ size -= actual;
+ }
+ if (close(fast_import.in))
+ die_errno("index-stream: closing fast-import");
+ if (finish_command(&fast_import))
+ die_errno("index-stream: finishing fast-import");
+
+ tmpfd = open(tmpfile, O_RDONLY);
+ if (tmpfd < 0)
+ die_errno("index-stream: cannot open fast-import mark");
+ len = read(tmpfd, buf, sizeof(buf));
+ if (len < 0)
+ die_errno("index-stream: reading fast-import mark");
+ if (close(tmpfd) < 0)
+ die_errno("index-stream: closing fast-import mark");
+ if (unlink(tmpfile))
+ die_errno("index-stream: unlinking fast-import mark");
+ if (len != 44 ||
+ memcmp(":1 ", buf, 3) ||
+ get_sha1_hex(buf + 3, sha1))
+ die_errno("index-stream: unexpected fast-import mark: <%s>", buf);
+ return 0;
+}
+
+int index_fd(unsigned char *sha1, int fd, struct stat *st,
+ enum object_type type, const char *path, unsigned flags)
+{
+ int ret;
+ size_t size = xsize_t(st->st_size);
+
+ if (!S_ISREG(st->st_mode))
+ ret = index_pipe(sha1, fd, type, path, flags);
+ else if (size <= big_file_threshold || type != OBJ_BLOB)
+ ret = index_core(sha1, fd, size, type, path, flags);
+ else
+ ret = index_stream(sha1, fd, size, type, path, flags);
close(fd);
return ret;
}
-int index_path(unsigned char *sha1, const char *path, struct stat *st, int write_object)
+int index_path(unsigned char *sha1, const char *path, struct stat *st, unsigned flags)
{
int fd;
struct strbuf sb = STRBUF_INIT;
if (fd < 0)
return error("open(\"%s\"): %s", path,
strerror(errno));
- if (index_fd(sha1, fd, st, write_object, OBJ_BLOB, path, 0) < 0)
+ if (index_fd(sha1, fd, st, OBJ_BLOB, path, flags) < 0)
return error("%s: failed to insert into database",
path);
break;
return error("readlink(\"%s\"): %s", path,
errstr);
}
- if (!write_object)
+ if (!(flags & HASH_WRITE_OBJECT))
hash_sha1_file(sb.buf, sb.len, blob_type, sha1);
else if (write_sha1_file(sb.buf, sb.len, blob_type, sha1))
return error("%s: failed to insert into database",