#
# Define NO_STRLCPY if you don't have strlcpy.
#
-# Define NO_STRTOUMAX if you don't have strtoumax in the C library.
-# If your compiler also does not support long long or does not have
+# Define NO_STRTOUMAX if you don't have both strtoimax and strtoumax in the
+# C library. If your compiler also does not support long long or does not have
# strtoull, define NO_STRTOULL.
#
# Define NO_SETENV if you don't have setenv in the C library.
# DEFAULT_EDITOR='$GIT_FALLBACK_EDITOR',
# DEFAULT_EDITOR='"C:\Program Files\Vim\gvim.exe" --nofork'
#
+# Define COMPUTE_HEADER_DEPENDENCIES to "yes" if you want dependencies on
+# header files to be automatically computed, to avoid rebuilding objects when
+# an unrelated header file changes. Define it to "no" to use the hard-coded
+# dependency rules. The default is "auto", which means to use computed header
+# dependencies if your compiler is detected to support it.
+#
# Define CHECK_HEADER_DEPENDENCIES to check for problems in the hard-coded
# dependency rules.
#
LIB_H += attr.h
LIB_H += blob.h
LIB_H += builtin.h
+ LIB_H += bulk-checkin.h
LIB_H += cache.h
LIB_H += cache-tree.h
LIB_H += color.h
LIB_H += compat/obstack.h
LIB_H += compat/win32/pthread.h
LIB_H += compat/win32/syslog.h
-LIB_H += compat/win32/sys/poll.h
+LIB_H += compat/win32/poll.h
LIB_H += compat/win32/dirent.h
LIB_H += connected.h
+LIB_H += convert.h
LIB_H += csum-file.h
LIB_H += decorate.h
LIB_H += delta.h
LIB_H += diff.h
LIB_H += dir.h
LIB_H += exec_cmd.h
+LIB_H += fmt-merge-msg.h
LIB_H += fsck.h
LIB_H += gettext.h
LIB_H += git-compat-util.h
+LIB_H += gpg-interface.h
LIB_H += graph.h
LIB_H += grep.h
LIB_H += hash.h
LIB_OBJS += bisect.o
LIB_OBJS += blob.o
LIB_OBJS += branch.o
+ LIB_OBJS += bulk-checkin.o
LIB_OBJS += bundle.o
LIB_OBJS += cache-tree.o
LIB_OBJS += color.o
LIB_OBJS += environment.o
LIB_OBJS += exec_cmd.o
LIB_OBJS += fsck.o
+LIB_OBJS += gpg-interface.o
LIB_OBJS += graph.o
LIB_OBJS += grep.o
LIB_OBJS += hash.o
NO_PREAD = YesPlease
NEEDS_CRYPTO_WITH_SSL = YesPlease
NO_LIBGEN_H = YesPlease
+ NO_SYS_POLL_H = YesPlease
NO_SYMLINK_HEAD = YesPlease
NO_IPV6 = YesPlease
NO_SETENV = YesPlease
BASIC_CFLAGS = -nologo -I. -I../zlib -Icompat/vcbuild -Icompat/vcbuild/include -DWIN32 -D_CONSOLE -DHAVE_STRING_H -D_CRT_SECURE_NO_WARNINGS -D_CRT_NONSTDC_NO_DEPRECATE
COMPAT_OBJS = compat/msvc.o compat/winansi.o \
compat/win32/pthread.o compat/win32/syslog.o \
- compat/win32/sys/poll.o compat/win32/dirent.o
+ compat/win32/poll.o compat/win32/dirent.o
COMPAT_CFLAGS = -D__USE_MINGW_ACCESS -DNOGDI -DHAVE_STRING_H -DHAVE_ALLOCA_H -Icompat -Icompat/regex -Icompat/win32 -DSTRIP_EXTENSION=\".exe\"
BASIC_LDFLAGS = -IGNORE:4217 -IGNORE:4049 -NOLOGO -SUBSYSTEM:CONSOLE -NODEFAULTLIB:MSVCRT.lib
EXTLIBS = user32.lib advapi32.lib shell32.lib wininet.lib ws2_32.lib
NO_PREAD = YesPlease
NEEDS_CRYPTO_WITH_SSL = YesPlease
NO_LIBGEN_H = YesPlease
+ NO_SYS_POLL_H = YesPlease
NO_SYMLINK_HEAD = YesPlease
NO_SETENV = YesPlease
NO_UNSETENV = YesPlease
COMPAT_CFLAGS += -DSTRIP_EXTENSION=\".exe\"
COMPAT_OBJS += compat/mingw.o compat/winansi.o \
compat/win32/pthread.o compat/win32/syslog.o \
- compat/win32/sys/poll.o compat/win32/dirent.o
+ compat/win32/poll.o compat/win32/dirent.o
EXTLIBS += -lws2_32
PTHREAD_LIBS =
X = .exe
endif
ifdef CHECK_HEADER_DEPENDENCIES
-COMPUTE_HEADER_DEPENDENCIES =
+COMPUTE_HEADER_DEPENDENCIES = no
USE_COMPUTED_HEADER_DEPENDENCIES =
-else
+endif
+
ifndef COMPUTE_HEADER_DEPENDENCIES
+COMPUTE_HEADER_DEPENDENCIES = auto
+endif
+
+ifeq ($(COMPUTE_HEADER_DEPENDENCIES),auto)
dep_check = $(shell $(CC) $(ALL_CFLAGS) \
-c -MF /dev/null -MMD -MP -x c /dev/null -o /dev/null 2>&1; \
echo $$?)
ifeq ($(dep_check),0)
-COMPUTE_HEADER_DEPENDENCIES=YesPlease
-endif
+override COMPUTE_HEADER_DEPENDENCIES = yes
+else
+override COMPUTE_HEADER_DEPENDENCIES = no
endif
endif
-ifdef COMPUTE_HEADER_DEPENDENCIES
+ifeq ($(COMPUTE_HEADER_DEPENDENCIES),yes)
USE_COMPUTED_HEADER_DEPENDENCIES = YesPlease
+else
+ifneq ($(COMPUTE_HEADER_DEPENDENCIES),no)
+$(error please set COMPUTE_HEADER_DEPENDENCIES to yes, no, or auto \
+(not "$(COMPUTE_HEADER_DEPENDENCIES)"))
+endif
endif
ifdef SANE_TOOL_PATH
endif
ifdef NO_STRTOUMAX
COMPAT_CFLAGS += -DNO_STRTOUMAX
- COMPAT_OBJS += compat/strtoumax.o
+ COMPAT_OBJS += compat/strtoumax.o compat/strtoimax.o
endif
ifdef NO_STRTOULL
COMPAT_CFLAGS += -DNO_STRTOULL
dep_files := $(foreach f,$(OBJECTS),$(dir $f).depend/$(notdir $f).d)
dep_dirs := $(addsuffix .depend,$(sort $(dir $(OBJECTS))))
-ifdef COMPUTE_HEADER_DEPENDENCIES
+ifeq ($(COMPUTE_HEADER_DEPENDENCIES),yes)
$(dep_dirs):
@mkdir -p $@
endif
endif
-ifndef COMPUTE_HEADER_DEPENDENCIES
+ifneq ($(COMPUTE_HEADER_DEPENDENCIES),yes)
ifndef CHECK_HEADER_DEPENDENCIES
dep_dirs =
missing_dep_dirs =
builtin/bundle.o bundle.o transport.o: bundle.h
builtin/bisect--helper.o builtin/rev-list.o bisect.o: bisect.h
builtin/clone.o builtin/fetch-pack.o transport.o: fetch-pack.h
-builtin/grep.o builtin/pack-objects.o transport-helper.o: thread-utils.h
+builtin/grep.o builtin/pack-objects.o transport-helper.o thread-utils.o: thread-utils.h
builtin/send-pack.o transport.o: send-pack.h
builtin/log.o builtin/shortlog.o: shortlog.h
builtin/prune.o builtin/reflog.o reachable.o: reachable.h
builtin/commit.o builtin/revert.o wt-status.o: wt-status.h
builtin/tar-tree.o archive-tar.o: tar.h
-connect.o transport.o http-backend.o: url.h
+connect.o transport.o url.o http-backend.o: url.h
http-fetch.o http-walker.o remote-curl.o transport.o walker.o: walker.h
http.o http-walker.o http-push.o http-fetch.o remote-curl.o: http.h url.h
static const char *base_name;
static int progress = 1;
static int window = 10;
- static unsigned long pack_size_limit, pack_size_limit_cfg;
+ static unsigned long pack_size_limit;
static int depth = 50;
static int delta_search_threads;
static int pack_to_stdout;
return hdrlen + datalen;
}
-static int write_one(struct sha1file *f,
- struct object_entry *e,
- off_t *offset)
+enum write_one_status {
+ WRITE_ONE_SKIP = -1, /* already written */
+ WRITE_ONE_BREAK = 0, /* writing this will bust the limit; not written */
+ WRITE_ONE_WRITTEN = 1, /* normal */
+ WRITE_ONE_RECURSIVE = 2 /* already scheduled to be written */
+};
+
+static enum write_one_status write_one(struct sha1file *f,
+ struct object_entry *e,
+ off_t *offset)
{
unsigned long size;
+ int recursing;
- /* offset is non zero if object is written already. */
- if (e->idx.offset || e->preferred_base)
- return -1;
+ /*
+ * we set offset to 1 (which is an impossible value) to mark
+ * the fact that this object is involved in "write its base
+ * first before writing a deltified object" recursion.
+ */
+ recursing = (e->idx.offset == 1);
+ if (recursing) {
+ warning("recursive delta detected for object %s",
+ sha1_to_hex(e->idx.sha1));
+ return WRITE_ONE_RECURSIVE;
+ } else if (e->idx.offset || e->preferred_base) {
+ /* offset is non zero if object is written already. */
+ return WRITE_ONE_SKIP;
+ }
/* if we are deltified, write out base object first. */
- if (e->delta && !write_one(f, e->delta, offset))
- return 0;
+ if (e->delta) {
+ e->idx.offset = 1; /* now recurse */
+ switch (write_one(f, e->delta, offset)) {
+ case WRITE_ONE_RECURSIVE:
+ /* we cannot depend on this one */
+ e->delta = NULL;
+ break;
+ default:
+ break;
+ case WRITE_ONE_BREAK:
+ e->idx.offset = recursing;
+ return WRITE_ONE_BREAK;
+ }
+ }
e->idx.offset = *offset;
size = write_object(f, e, *offset);
if (!size) {
- e->idx.offset = 0;
- return 0;
+ e->idx.offset = recursing;
+ return WRITE_ONE_BREAK;
}
written_list[nr_written++] = &e->idx;
if (signed_add_overflows(*offset, size))
die("pack too large for current definition of off_t");
*offset += size;
- return 1;
+ return WRITE_ONE_WRITTEN;
}
static int mark_tagged(const char *path, const unsigned char *sha1, int flag,
return 0;
}
-static void add_to_write_order(struct object_entry **wo,
- int *endp,
+static inline void add_to_write_order(struct object_entry **wo,
+ unsigned int *endp,
struct object_entry *e)
{
if (e->filled)
}
static void add_descendants_to_write_order(struct object_entry **wo,
- int *endp,
+ unsigned int *endp,
struct object_entry *e)
{
- struct object_entry *child;
-
- for (child = e->delta_child; child; child = child->delta_sibling)
- add_to_write_order(wo, endp, child);
- for (child = e->delta_child; child; child = child->delta_sibling)
- add_descendants_to_write_order(wo, endp, child);
+ int add_to_order = 1;
+ while (e) {
+ if (add_to_order) {
+ struct object_entry *s;
+ /* add this node... */
+ add_to_write_order(wo, endp, e);
+ /* all its siblings... */
+ for (s = e->delta_sibling; s; s = s->delta_sibling) {
+ add_to_write_order(wo, endp, s);
+ }
+ }
+ /* drop down a level to add left subtree nodes if possible */
+ if (e->delta_child) {
+ add_to_order = 1;
+ e = e->delta_child;
+ } else {
+ add_to_order = 0;
+ /* our sibling might have some children, it is next */
+ if (e->delta_sibling) {
+ e = e->delta_sibling;
+ continue;
+ }
+ /* go back to our parent node */
+ e = e->delta;
+ while (e && !e->delta_sibling) {
+ /* we're on the right side of a subtree, keep
+ * going up until we can go right again */
+ e = e->delta;
+ }
+ if (!e) {
+ /* done- we hit our original root node */
+ return;
+ }
+ /* pass it off to sibling at this level */
+ e = e->delta_sibling;
+ }
+ };
}
static void add_family_to_write_order(struct object_entry **wo,
- int *endp,
+ unsigned int *endp,
struct object_entry *e)
{
struct object_entry *root;
for (root = e; root->delta; root = root->delta)
; /* nothing */
- add_to_write_order(wo, endp, root);
add_descendants_to_write_order(wo, endp, root);
}
static struct object_entry **compute_write_order(void)
{
- int i, wo_end;
+ unsigned int i, wo_end, last_untagged;
struct object_entry **wo = xmalloc(nr_objects * sizeof(*wo));
* Make sure delta_sibling is sorted in the original
* recency order.
*/
- for (i = nr_objects - 1; 0 <= i; i--) {
- struct object_entry *e = &objects[i];
+ for (i = nr_objects; i > 0;) {
+ struct object_entry *e = &objects[--i];
if (!e->delta)
continue;
/* Mark me as the first child */
for_each_tag_ref(mark_tagged, NULL);
/*
- * Give the commits in the original recency order until
+ * Give the objects in the original recency order until
* we see a tagged tip.
*/
for (i = wo_end = 0; i < nr_objects; i++) {
break;
add_to_write_order(wo, &wo_end, &objects[i]);
}
+ last_untagged = i;
/*
* Then fill all the tagged tips.
/*
* And then all remaining commits and tags.
*/
- for (i = 0; i < nr_objects; i++) {
+ for (i = last_untagged; i < nr_objects; i++) {
if (objects[i].type != OBJ_COMMIT &&
objects[i].type != OBJ_TAG)
continue;
/*
* And then all the trees.
*/
- for (i = 0; i < nr_objects; i++) {
+ for (i = last_untagged; i < nr_objects; i++) {
if (objects[i].type != OBJ_TREE)
continue;
add_to_write_order(wo, &wo_end, &objects[i]);
/*
* Finally all the rest in really tight order
*/
- for (i = 0; i < nr_objects; i++)
- add_family_to_write_order(wo, &wo_end, &objects[i]);
+ for (i = last_untagged; i < nr_objects; i++) {
+ if (!objects[i].filled)
+ add_family_to_write_order(wo, &wo_end, &objects[i]);
+ }
+
+ if (wo_end != nr_objects)
+ die("ordered %u objects, expected %"PRIu32, wo_end, nr_objects);
return wo;
}
uint32_t i = 0, j;
struct sha1file *f;
off_t offset;
- struct pack_header hdr;
uint32_t nr_remaining = nr_result;
time_t last_mtime = 0;
struct object_entry **write_order;
unsigned char sha1[20];
char *pack_tmp_name = NULL;
- if (pack_to_stdout) {
+ if (pack_to_stdout)
f = sha1fd_throughput(1, "<stdout>", progress_state);
- } else {
- char tmpname[PATH_MAX];
- int fd;
- fd = odb_mkstemp(tmpname, sizeof(tmpname),
- "pack/tmp_pack_XXXXXX");
- pack_tmp_name = xstrdup(tmpname);
- f = sha1fd(fd, pack_tmp_name);
- }
+ else
+ f = create_tmp_packfile(&pack_tmp_name);
- hdr.hdr_signature = htonl(PACK_SIGNATURE);
- hdr.hdr_version = htonl(PACK_VERSION);
- hdr.hdr_entries = htonl(nr_remaining);
- sha1write(f, &hdr, sizeof(hdr));
- offset = sizeof(hdr);
+ offset = write_pack_header(f, nr_remaining);
+ if (!offset)
+ die_errno("unable to write pack header");
nr_written = 0;
for (; i < nr_objects; i++) {
struct object_entry *e = write_order[i];
- if (!write_one(f, e, &offset))
+ if (write_one(f, e, &offset) == WRITE_ONE_BREAK)
break;
display_progress(progress_state, written);
}
if (!pack_to_stdout) {
struct stat st;
- const char *idx_tmp_name;
char tmpname[PATH_MAX];
- idx_tmp_name = write_idx_file(NULL, written_list, nr_written,
- &pack_idx_opts, sha1);
-
- snprintf(tmpname, sizeof(tmpname), "%s-%s.pack",
- base_name, sha1_to_hex(sha1));
- free_pack_by_name(tmpname);
- if (adjust_shared_perm(pack_tmp_name))
- die_errno("unable to make temporary pack file readable");
- if (rename(pack_tmp_name, tmpname))
- die_errno("unable to rename temporary pack file");
-
/*
* Packs are runtime accessed in their mtime
* order since newer packs are more likely to contain
* packs then we should modify the mtime of later ones
* to preserve this property.
*/
- if (stat(tmpname, &st) < 0) {
+ if (stat(pack_tmp_name, &st) < 0) {
warning("failed to stat %s: %s",
- tmpname, strerror(errno));
+ pack_tmp_name, strerror(errno));
} else if (!last_mtime) {
last_mtime = st.st_mtime;
} else {
struct utimbuf utb;
utb.actime = st.st_atime;
utb.modtime = --last_mtime;
- if (utime(tmpname, &utb) < 0)
+ if (utime(pack_tmp_name, &utb) < 0)
warning("failed utime() on %s: %s",
tmpname, strerror(errno));
}
- snprintf(tmpname, sizeof(tmpname), "%s-%s.idx",
- base_name, sha1_to_hex(sha1));
- if (adjust_shared_perm(idx_tmp_name))
- die_errno("unable to make temporary index file readable");
- if (rename(idx_tmp_name, tmpname))
- die_errno("unable to rename temporary index file");
-
- free((void *) idx_tmp_name);
+ /* Enough space for "-<sha-1>.pack"? */
+ if (sizeof(tmpname) <= strlen(base_name) + 50)
+ die("pack base name '%s' too long", base_name);
+ snprintf(tmpname, sizeof(tmpname), "%s-", base_name);
+ finish_tmp_packfile(tmpname, pack_tmp_name,
+ written_list, nr_written,
+ &pack_idx_opts, sha1);
free(pack_tmp_name);
puts(sha1_to_hex(sha1));
}
while (tree_entry(tree,&entry)) {
if (S_ISGITLINK(entry.mode))
continue;
- cmp = tree_entry_len(entry.path, entry.sha1) != cmplen ? 1 :
+ cmp = tree_entry_len(&entry) != cmplen ? 1 :
memcmp(name, entry.path, cmplen);
if (cmp > 0)
continue;
pack_idx_opts.version);
return 0;
}
- if (!strcmp(k, "pack.packsizelimit")) {
- pack_size_limit_cfg = git_config_ulong(k, v);
- return 0;
- }
return git_default_config(k, v, cb);
}
void git_deflate_init(git_zstream *, int level);
void git_deflate_init_gzip(git_zstream *, int level);
void git_deflate_end(git_zstream *);
+ int git_deflate_abort(git_zstream *);
int git_deflate_end_gently(git_zstream *);
int git_deflate(git_zstream *, int flush);
unsigned long git_deflate_bound(git_zstream *, unsigned long);
}
#define flexible_size(STRUCT,len) ((offsetof(struct STRUCT,name) + (len) + 8) & ~7)
-#define cache_entry_size(len) flexible_size(cache_entry,len)
+#define cache_entry_size(len) (offsetof(struct cache_entry,name) + (len) + 1)
#define ondisk_cache_entry_size(len) flexible_size(ondisk_cache_entry,len)
#define ondisk_cache_entry_extended_size(len) flexible_size(ondisk_cache_entry_extended,len)
struct string_list *resolve_undo;
struct cache_tree *cache_tree;
struct cache_time timestamp;
- void *alloc;
unsigned name_hash_initialized : 1,
initialized : 1;
struct hash_table name_hash;
extern size_t packed_git_limit;
extern size_t delta_base_cache_limit;
extern unsigned long big_file_threshold;
+ extern unsigned long pack_size_limit_cfg;
extern int read_replace_refs;
extern int fsync_object_files;
extern int core_preload_index;
extern int get_sha1_hex(const char *hex, unsigned char *sha1);
extern char *sha1_to_hex(const unsigned char *sha1); /* static buffer result! */
+extern int read_ref_full(const char *filename, unsigned char *sha1,
+ int reading, int *flags);
extern int read_ref(const char *filename, unsigned char *sha1);
/*
extern int refname_match(const char *abbrev_name, const char *full_name, const char **rules);
extern const char *ref_rev_parse_rules[];
-extern const char *ref_fetch_rules[];
+#define ref_fetch_rules ref_rev_parse_rules
extern int create_symref(const char *ref, const char *refs_heads_master, const char *logmsg);
extern int validate_headref(const char *ref);
die("bad config file line %d in %s", cf->linenr, cf->name);
}
-static int parse_unit_factor(const char *end, unsigned long *val)
+static int parse_unit_factor(const char *end, uintmax_t *val)
{
if (!*end)
return 1;
{
if (value && *value) {
char *end;
- long val = strtol(value, &end, 0);
- unsigned long factor = 1;
+ intmax_t val;
+ uintmax_t uval;
+ uintmax_t factor = 1;
+
+ errno = 0;
+ val = strtoimax(value, &end, 0);
+ if (errno == ERANGE)
+ return 0;
if (!parse_unit_factor(end, &factor))
return 0;
- *ret = val * factor;
+ uval = abs(val);
+ uval *= factor;
+ if ((uval > maximum_signed_value_of_type(long)) ||
+ (abs(val) > uval))
+ return 0;
+ val *= factor;
+ *ret = val;
return 1;
}
return 0;
{
if (value && *value) {
char *end;
- unsigned long val = strtoul(value, &end, 0);
+ uintmax_t val;
+ uintmax_t oldval;
+
+ errno = 0;
+ val = strtoumax(value, &end, 0);
+ if (errno == ERANGE)
+ return 0;
+ oldval = val;
if (!parse_unit_factor(end, &val))
return 0;
+ if ((val > maximum_unsigned_value_of_type(long)) ||
+ (oldval > val))
+ return 0;
*ret = val;
return 1;
}
if (!strcmp(var, "core.packedgitwindowsize")) {
int pgsz_x2 = getpagesize() * 2;
- packed_git_window_size = git_config_int(var, value);
+ packed_git_window_size = git_config_ulong(var, value);
/* This value must be multiple of (pagesize * 2) */
packed_git_window_size /= pgsz_x2;
}
if (!strcmp(var, "core.bigfilethreshold")) {
- long n = git_config_int(var, value);
- big_file_threshold = 0 < n ? n : 0;
+ big_file_threshold = git_config_ulong(var, value);
return 0;
}
if (!strcmp(var, "core.packedgitlimit")) {
- packed_git_limit = git_config_int(var, value);
+ packed_git_limit = git_config_ulong(var, value);
return 0;
}
if (!strcmp(var, "core.deltabasecachelimit")) {
- delta_base_cache_limit = git_config_int(var, value);
+ delta_base_cache_limit = git_config_ulong(var, value);
return 0;
}
return 0;
}
+ if (!strcmp(var, "pack.packsizelimit")) {
+ pack_size_limit_cfg = git_config_ulong(var, value);
+ return 0;
+ }
/* Add other config variables here and to Documentation/config.txt. */
return 0;
}
home = getenv("HOME");
if (home) {
- char *user_config = xstrdup(mkpath("%s/.gitconfig", home));
+ char buf[PATH_MAX];
+ char *user_config = mksnpath(buf, sizeof(buf), "%s/.gitconfig", home);
if (!access(user_config, R_OK)) {
ret += git_config_from_file(fn, user_config, data);
found += 1;
}
- free(user_config);
}
if (repo_config && !access(repo_config, R_OK)) {
*/
#include "cache.h"
#include "refs.h"
+#include "fmt-merge-msg.h"
char git_default_email[MAX_GITNAME];
char git_default_name[MAX_GITNAME];
char *notes_ref_name;
int grafts_replace_parents = 1;
int core_apply_sparse_checkout;
+int merge_log_config = -1;
struct startup_info *startup_info;
+ unsigned long pack_size_limit_cfg;
/* Parallel index stat data preload? */
int core_preload_index = 0;
return 0;
}
- static void truncate_pack(off_t to, git_SHA_CTX *ctx)
+ static void truncate_pack(struct sha1file_checkpoint *checkpoint)
{
- if (ftruncate(pack_data->pack_fd, to)
- || lseek(pack_data->pack_fd, to, SEEK_SET) != to)
+ if (sha1file_truncate(pack_file, checkpoint))
die_errno("cannot truncate pack to skip duplicate");
- pack_size = to;
-
- /* yes this is a layering violation */
- pack_file->total = to;
- pack_file->offset = 0;
- pack_file->ctx = *ctx;
+ pack_size = checkpoint->offset;
}
static void stream_blob(uintmax_t len, unsigned char *sha1out, uintmax_t mark)
unsigned long hdrlen;
off_t offset;
git_SHA_CTX c;
- git_SHA_CTX pack_file_ctx;
git_zstream s;
+ struct sha1file_checkpoint checkpoint;
int status = Z_OK;
/* Determine if we should auto-checkpoint. */
|| (pack_size + 60 + len) < pack_size)
cycle_packfile();
- offset = pack_size;
-
- /* preserve the pack_file SHA1 ctx in case we have to truncate later */
- sha1flush(pack_file);
- pack_file_ctx = pack_file->ctx;
+ sha1file_checkpoint(pack_file, &checkpoint);
+ offset = checkpoint.offset;
hdrlen = snprintf((char *)out_buf, out_sz, "blob %" PRIuMAX, len) + 1;
if (out_sz <= hdrlen)
if (e->idx.offset) {
duplicate_count_by_type[OBJ_BLOB]++;
- truncate_pack(offset, &pack_file_ctx);
+ truncate_pack(&checkpoint);
} else if (find_sha1_pack(sha1, packed_git)) {
e->type = OBJ_BLOB;
e->pack_id = MAX_PACK_ID;
e->idx.offset = 1; /* just not zero! */
duplicate_count_by_type[OBJ_BLOB]++;
- truncate_pack(offset, &pack_file_ctx);
+ truncate_pack(&checkpoint);
} else {
e->depth = 0;
if (tmp_hex_sha1_len == 40 && !get_sha1_hex(hex_sha1, sha1)) {
/* This is a note entry */
+ if (fanout == 0xff) {
+ /* Counting mode, no rename */
+ num_notes++;
+ continue;
+ }
construct_path_with_fanout(hex_sha1, fanout, realpath);
if (!strcmp(fullpath, realpath)) {
/* Note entry is in correct location */
leaf.tree);
}
-static void note_change_n(struct branch *b, unsigned char old_fanout)
+static void note_change_n(struct branch *b, unsigned char *old_fanout)
{
const char *p = command_buf.buf + 2;
static struct strbuf uq = STRBUF_INIT;
uint16_t inline_data = 0;
unsigned char new_fanout;
+ /*
+ * When loading a branch, we don't traverse its tree to count the real
+ * number of notes (too expensive to do this for all non-note refs).
+ * This means that recently loaded notes refs might incorrectly have
+ * b->num_notes == 0, and consequently, old_fanout might be wrong.
+ *
+ * Fix this by traversing the tree and counting the number of notes
+ * when b->num_notes == 0. If the notes tree is truly empty, the
+ * calculation should not take long.
+ */
+ if (b->num_notes == 0 && *old_fanout == 0) {
+ /* Invoke change_note_fanout() in "counting mode". */
+ b->num_notes = change_note_fanout(&b->branch_tree, 0xff);
+ *old_fanout = convert_num_notes_to_fanout(b->num_notes);
+ }
+
+ /* Now parse the notemodify command. */
/* <dataref> or 'inline' */
if (*p == ':') {
char *x;
typename(type), command_buf.buf);
}
- construct_path_with_fanout(sha1_to_hex(commit_sha1), old_fanout, path);
+ construct_path_with_fanout(sha1_to_hex(commit_sha1), *old_fanout, path);
if (tree_content_remove(&b->branch_tree, path, NULL))
b->num_notes--;
else if (!prefixcmp(command_buf.buf, "C "))
file_change_cr(b, 0);
else if (!prefixcmp(command_buf.buf, "N "))
- note_change_n(b, prev_fanout);
+ note_change_n(b, &prev_fanout);
else if (!strcmp("deleteall", command_buf.buf))
file_change_deleteall(b);
else if (!prefixcmp(command_buf.buf, "ls "))
}
sha1write(f, obj->sha1, 20);
git_SHA1_Update(&ctx, obj->sha1, 20);
+ if ((opts->flags & WRITE_IDX_STRICT) &&
+ (i && !hashcmp(list[-2]->sha1, obj->sha1)))
+ die("The same object %s appears twice in the pack",
+ sha1_to_hex(obj->sha1));
}
if (index_version >= 2) {
return index_name;
}
+ off_t write_pack_header(struct sha1file *f, uint32_t nr_entries)
+ {
+ struct pack_header hdr;
+
+ hdr.hdr_signature = htonl(PACK_SIGNATURE);
+ hdr.hdr_version = htonl(PACK_VERSION);
+ hdr.hdr_entries = htonl(nr_entries);
+ if (sha1write(f, &hdr, sizeof(hdr)))
+ return 0;
+ return sizeof(hdr);
+ }
+
/*
* Update pack header with object_count and compute new SHA1 for pack data
* associated to pack_fd, and write that SHA1 at the end. That new SHA1
*hdr = c;
return n;
}
+
+ struct sha1file *create_tmp_packfile(char **pack_tmp_name)
+ {
+ char tmpname[PATH_MAX];
+ int fd;
+
+ fd = odb_mkstemp(tmpname, sizeof(tmpname), "pack/tmp_pack_XXXXXX");
+ *pack_tmp_name = xstrdup(tmpname);
+ return sha1fd(fd, *pack_tmp_name);
+ }
+
+ void finish_tmp_packfile(char *name_buffer,
+ const char *pack_tmp_name,
+ struct pack_idx_entry **written_list,
+ uint32_t nr_written,
+ struct pack_idx_option *pack_idx_opts,
+ unsigned char sha1[])
+ {
+ const char *idx_tmp_name;
+ char *end_of_name_prefix = strrchr(name_buffer, 0);
+
+ if (adjust_shared_perm(pack_tmp_name))
+ die_errno("unable to make temporary pack file readable");
+
+ idx_tmp_name = write_idx_file(NULL, written_list, nr_written,
+ pack_idx_opts, sha1);
+ if (adjust_shared_perm(idx_tmp_name))
+ die_errno("unable to make temporary index file readable");
+
+ sprintf(end_of_name_prefix, "%s.pack", sha1_to_hex(sha1));
+ free_pack_by_name(name_buffer);
+
+ if (rename(pack_tmp_name, name_buffer))
+ die_errno("unable to rename temporary pack file");
+
+ sprintf(end_of_name_prefix, "%s.idx", sha1_to_hex(sha1));
+ if (rename(idx_tmp_name, name_buffer))
+ die_errno("unable to rename temporary index file");
+
+ free((void *)idx_tmp_name);
+ }
#define PACK_H
#include "object.h"
+ #include "csum-file.h"
/*
* Packed object header
struct pack_idx_option {
unsigned flags;
/* flag bits */
-#define WRITE_IDX_VERIFY 01
+#define WRITE_IDX_VERIFY 01 /* verify only, do not write the idx file */
+#define WRITE_IDX_STRICT 02
uint32_t version;
uint32_t off32_limit;
off_t offset;
};
+
+struct progress;
+typedef int (*verify_fn)(const unsigned char*, enum object_type, unsigned long, void*, int*);
+
extern const char *write_idx_file(const char *index_name, struct pack_idx_entry **objects, int nr_objects, const struct pack_idx_option *, unsigned char *sha1);
extern int check_pack_crc(struct packed_git *p, struct pack_window **w_curs, off_t offset, off_t len, unsigned int nr);
extern int verify_pack_index(struct packed_git *);
-extern int verify_pack(struct packed_git *);
+extern int verify_pack(struct packed_git *, verify_fn fn, struct progress *, uint32_t);
+ extern off_t write_pack_header(struct sha1file *f, uint32_t);
extern void fixup_pack_header_footer(int, unsigned char *, const char *, uint32_t, unsigned char *, off_t);
extern char *index_pack_lockfile(int fd);
extern int encode_in_pack_object_header(enum object_type, uintmax_t, unsigned char *);
#define PH_ERROR_PACK_SIGNATURE (-2)
#define PH_ERROR_PROTOCOL (-3)
extern int read_pack_header(int fd, struct pack_header *);
+
+ extern struct sha1file *create_tmp_packfile(char **pack_tmp_name);
+ extern void finish_tmp_packfile(char *name_buffer, const char *pack_tmp_name, struct pack_idx_entry **written_list, uint32_t nr_written, struct pack_idx_option *pack_idx_opts, unsigned char sha1[]);
+
#endif
#include "refs.h"
#include "pack-revindex.h"
#include "sha1-lookup.h"
+ #include "bulk-checkin.h"
#ifndef O_NOATIME
#if defined(__linux__) && (defined(__i386__) || defined(__PPC__))
while (c & 0x80) {
if (len <= used || bitsizeof(long) <= shift) {
error("bad object header");
- return 0;
+ size = used = 0;
+ break;
}
c = buf[used++];
size += (c & 0x7f) << shift;
if ((type == OBJ_BLOB) && path) {
struct strbuf nbuf = STRBUF_INIT;
if (convert_to_git(path, buf, size, &nbuf,
- write_object ? safe_crlf : 0)) {
+ write_object ? safe_crlf : SAFE_CRLF_FALSE)) {
buf = strbuf_detach(&nbuf, &size);
re_allocated = 1;
}
}
/*
- * This creates one packfile per large blob, because the caller
- * immediately wants the result sha1, and fast-import can report the
- * object name via marks mechanism only by closing the created
- * packfile.
+ * This creates one packfile per large blob unless bulk-checkin
+ * machinery is "plugged".
*
* This also bypasses the usual "convert-to-git" dance, and that is on
* purpose. We could write a streaming version of the converting
enum object_type type, const char *path,
unsigned flags)
{
- struct child_process fast_import;
- char export_marks[512];
- const char *argv[] = { "fast-import", "--quiet", export_marks, NULL };
- char tmpfile[512];
- char fast_import_cmd[512];
- char buf[512];
- int len, tmpfd;
-
- strcpy(tmpfile, git_path("hashstream_XXXXXX"));
- tmpfd = git_mkstemp_mode(tmpfile, 0600);
- if (tmpfd < 0)
- die_errno("cannot create tempfile: %s", tmpfile);
- if (close(tmpfd))
- die_errno("cannot close tempfile: %s", tmpfile);
- sprintf(export_marks, "--export-marks=%s", tmpfile);
-
- memset(&fast_import, 0, sizeof(fast_import));
- fast_import.in = -1;
- fast_import.argv = argv;
- fast_import.git_cmd = 1;
- if (start_command(&fast_import))
- die_errno("index-stream: git fast-import failed");
-
- len = sprintf(fast_import_cmd, "blob\nmark :1\ndata %lu\n",
- (unsigned long) size);
- write_or_whine(fast_import.in, fast_import_cmd, len,
- "index-stream: feeding fast-import");
- while (size) {
- char buf[10240];
- size_t sz = size < sizeof(buf) ? size : sizeof(buf);
- ssize_t actual;
-
- actual = read_in_full(fd, buf, sz);
- if (actual < 0)
- die_errno("index-stream: reading input");
- if (write_in_full(fast_import.in, buf, actual) != actual)
- die_errno("index-stream: feeding fast-import");
- size -= actual;
- }
- if (close(fast_import.in))
- die_errno("index-stream: closing fast-import");
- if (finish_command(&fast_import))
- die_errno("index-stream: finishing fast-import");
-
- tmpfd = open(tmpfile, O_RDONLY);
- if (tmpfd < 0)
- die_errno("index-stream: cannot open fast-import mark");
- len = read(tmpfd, buf, sizeof(buf));
- if (len < 0)
- die_errno("index-stream: reading fast-import mark");
- if (close(tmpfd) < 0)
- die_errno("index-stream: closing fast-import mark");
- if (unlink(tmpfile))
- die_errno("index-stream: unlinking fast-import mark");
- if (len != 44 ||
- memcmp(":1 ", buf, 3) ||
- get_sha1_hex(buf + 3, sha1))
- die_errno("index-stream: unexpected fast-import mark: <%s>", buf);
- return 0;
+ return index_bulk_checkin(sha1, fd, size, type, path, flags);
}
int index_fd(unsigned char *sha1, int fd, struct stat *st,