/git-upload-archive
/git-upload-pack
/git-var
+/git-verify-commit
/git-verify-pack
/git-verify-tag
/git-web--browse
/test-date
/test-delta
/test-dump-cache-tree
+ /test-dump-split-index
/test-scrap-cache-tree
/test-genrandom
/test-hashmap
/config.mak.autogen
/config.mak.append
/configure
+/unicode
/tags
/TAGS
/cscope*
# Define LIBPCREDIR=/foo/bar if your libpcre header and library files are in
# /foo/bar/include and /foo/bar/lib directories.
#
+# Define HAVE_ALLOCA_H if you have working alloca(3) defined in that header.
+#
# Define NO_CURL if you do not have libcurl installed. git-http-fetch and
# git-http-push are not built, and you cannot use http:// and https://
# transports (neither smart nor dumb).
#
-# Define CURL_CONFIG to the path to a curl-config binary other than the
-# default 'curl-config'. If CURL_CONFIG is unset or points to a binary that
-# is not found, defaults to the CURLDIR behavior.
-#
-# Define CURL_STATIC to statically link libcurl. Only applies if
-# CURL_CONFIG is used.
-#
# Define CURLDIR=/foo/bar if your curl header and library files are in
-# /foo/bar/include and /foo/bar/lib directories. This overrides
-# CURL_CONFIG, but is less robust. If not set, and CURL_CONFIG is not set,
-# uses -lcurl with no additional library detection (other than
-# NEEDS_*_WITH_CURL).
+# /foo/bar/include and /foo/bar/lib directories.
#
# Define NO_EXPAT if you do not have expat installed. git-http-push is
# not built, and you cannot push using http:// and https:// transports (dumb).
#
# Define NEEDS_SSL_WITH_CRYPTO if you need -lssl when using -lcrypto (Darwin).
#
-# Define NEEDS_SSL_WITH_CURL if you need -lssl with -lcurl (Minix). Only used
-# if CURLDIR is set.
+# Define NEEDS_SSL_WITH_CURL if you need -lssl with -lcurl (Minix).
#
-# Define NEEDS_IDN_WITH_CURL if you need -lidn when using -lcurl (Minix). Only
-# used if CURLDIR is set.
+# Define NEEDS_IDN_WITH_CURL if you need -lidn when using -lcurl (Minix).
#
# Define NEEDS_LIBICONV if linking with libc is not enough (Darwin).
#
# Define NO_STRUCT_ITIMERVAL if you don't have struct itimerval
# This also implies NO_SETITIMER
#
-# Define NO_THREAD_SAFE_PREAD if your pread() implementation is not
-# thread-safe. (e.g. compat/pread.c or cygwin)
-#
# Define NO_FAST_WORKING_DIRECTORY if accessing objects in pack files is
# generally faster on your platform than accessing the working directory.
#
TEST_PROGRAMS_NEED_X += test-date
TEST_PROGRAMS_NEED_X += test-delta
TEST_PROGRAMS_NEED_X += test-dump-cache-tree
+ TEST_PROGRAMS_NEED_X += test-dump-split-index
TEST_PROGRAMS_NEED_X += test-genrandom
TEST_PROGRAMS_NEED_X += test-hashmap
TEST_PROGRAMS_NEED_X += test-index-version
LIB_H += tree-walk.h
LIB_H += tree.h
LIB_H += unpack-trees.h
+LIB_H += unicode_width.h
LIB_H += url.h
LIB_H += urlmatch.h
LIB_H += userdiff.h
LIB_OBJS += shallow.o
LIB_OBJS += sideband.o
LIB_OBJS += sigchain.o
+ LIB_OBJS += split-index.o
LIB_OBJS += strbuf.o
LIB_OBJS += streaming.o
LIB_OBJS += string-list.o
BUILTIN_OBJS += builtin/update-server-info.o
BUILTIN_OBJS += builtin/upload-archive.o
BUILTIN_OBJS += builtin/var.o
+BUILTIN_OBJS += builtin/verify-commit.o
BUILTIN_OBJS += builtin/verify-pack.o
BUILTIN_OBJS += builtin/verify-tag.o
BUILTIN_OBJS += builtin/write-tree.o
EXTLIBS += -lpcre
endif
+ifdef HAVE_ALLOCA_H
+ BASIC_CFLAGS += -DHAVE_ALLOCA_H
+endif
+
ifdef NO_CURL
BASIC_CFLAGS += -DNO_CURL
REMOTE_CURL_PRIMARY =
REMOTE_CURL_NAMES =
else
ifdef CURLDIR
- CURL_LIBCURL =
+ # Try "-Wl,-rpath=$(CURLDIR)/$(lib)" in such a case.
+ BASIC_CFLAGS += -I$(CURLDIR)/include
+ CURL_LIBCURL = -L$(CURLDIR)/$(lib) $(CC_LD_DYNPATH)$(CURLDIR)/$(lib) -lcurl
else
- CURL_CONFIG = curl-config
- ifeq "$(CURL_CONFIG)" ""
- CURL_LIBCURL =
- else
- CURL_LIBCURL := $(shell $(CURL_CONFIG) --libs)
- endif
+ CURL_LIBCURL = -lcurl
endif
-
- ifeq "$(CURL_LIBCURL)" ""
- ifdef CURL_STATIC
-$(error "CURL_STATIC must be used with CURL_CONFIG")
- endif
- ifdef CURLDIR
- # Try "-Wl,-rpath=$(CURLDIR)/$(lib)" in such a case.
- BASIC_CFLAGS += -I$(CURLDIR)/include
- CURL_LIBCURL = -L$(CURLDIR)/$(lib) $(CC_LD_DYNPATH)$(CURLDIR)/$(lib) -lcurl
- else
- CURL_LIBCURL = -lcurl
- endif
- ifdef NEEDS_SSL_WITH_CURL
- CURL_LIBCURL += -lssl
- ifdef NEEDS_CRYPTO_WITH_SSL
- CURL_LIBCURL += -lcrypto
- endif
- endif
- ifdef NEEDS_IDN_WITH_CURL
- CURL_LIBCURL += -lidn
- endif
- else
- BASIC_CFLAGS += $(shell $(CURL_CONFIG) --cflags)
- ifdef CURL_STATIC
- CURL_LIBCURL = $(shell $(CURL_CONFIG) --static-libs)
- ifeq "$(CURL_LIBCURL)" ""
-$(error libcurl not detected or not compiled with static support)
- endif
+ ifdef NEEDS_SSL_WITH_CURL
+ CURL_LIBCURL += -lssl
+ ifdef NEEDS_CRYPTO_WITH_SSL
+ CURL_LIBCURL += -lcrypto
endif
endif
+ ifdef NEEDS_IDN_WITH_CURL
+ CURL_LIBCURL += -lidn
+ endif
REMOTE_CURL_PRIMARY = git-remote-http$X
REMOTE_CURL_ALIASES = git-remote-https$X git-remote-ftp$X git-remote-ftps$X
ifdef NO_PREAD
COMPAT_CFLAGS += -DNO_PREAD
COMPAT_OBJS += compat/pread.o
- NO_THREAD_SAFE_PREAD = YesPlease
-endif
-ifdef NO_THREAD_SAFE_PREAD
- BASIC_CFLAGS += -DNO_THREAD_SAFE_PREAD
endif
ifdef NO_FAST_WORKING_DIRECTORY
BASIC_CFLAGS += -DNO_FAST_WORKING_DIRECTORY
while ((*last2 == '\r') || (*last2 == '\n'))
last2--;
- /* skip leading whitespace */
- while (isspace(*s1) && (s1 <= last1))
- s1++;
- while (isspace(*s2) && (s2 <= last2))
- s2++;
+ /* skip leading whitespaces, if both begin with whitespace */
+ if (s1 <= last1 && s2 <= last2 && isspace(*s1) && isspace(*s2)) {
+ while (isspace(*s1) && (s1 <= last1))
+ s1++;
+ while (isspace(*s2) && (s2 <= last2))
+ s2++;
+ }
/* early return if both lines are empty */
if ((s1 > last1) && (s2 > last2))
return 1;
*/
patch->def_name = git_header_name(line, len);
if (patch->def_name && root) {
- char *s = xmalloc(root_len + strlen(patch->def_name) + 1);
- strcpy(s, root);
- strcpy(s + root_len, patch->def_name);
+ char *s = xstrfmt("%s%s", root, patch->def_name);
free(patch->def_name);
patch->def_name = s;
}
}
}
- static int checkout_target(struct cache_entry *ce, struct stat *st)
+ static int checkout_target(struct index_state *istate,
+ struct cache_entry *ce, struct stat *st)
{
struct checkout costate;
memset(&costate, 0, sizeof(costate));
costate.base_dir = "";
costate.refresh_cache = 1;
+ costate.istate = istate;
if (checkout_entry(ce, &costate, NULL) || lstat(ce->name, st))
return error(_("cannot checkout %s"), ce->name);
return 0;
if (lstat(name, &st)) {
if (errno != ENOENT)
return error(_("%s: %s"), name, strerror(errno));
- if (checkout_target(ce, &st))
+ if (checkout_target(&the_index, ce, &st))
return -1;
}
if (verify_index_match(ce, &st))
}
*ce = active_cache[pos];
if (stat_ret < 0) {
- if (checkout_target(*ce, st))
+ if (checkout_target(&the_index, *ce, st))
return -1;
}
if (!cached && verify_index_match(*ce, st))
{
struct patch *patch;
struct index_state result = { NULL };
- int fd;
+ static struct lock_file lock;
/* Once we start supporting the reverse patch, it may be
* worth showing the new sha1 prefix, but until then...
die ("Could not add %s to temporary index", name);
}
- fd = open(filename, O_WRONLY | O_CREAT, 0666);
- if (fd < 0 || write_index(&result, fd) || close(fd))
+ hold_lock_file_for_update(&lock, filename, LOCK_DIE_ON_ERROR);
+ if (write_locked_index(&result, &lock, COMMIT_LOCK))
die ("Could not write temporary index to %s", filename);
discard_index(&result);
ce->ce_flags = create_ce_flags(0);
ce->ce_namelen = namelen;
if (S_ISGITLINK(mode)) {
- const char *s = buf;
+ const char *s;
- if (get_sha1_hex(s + strlen("Subproject commit "), ce->sha1))
+ if (!skip_prefix(buf, "Subproject commit ", &s) ||
+ get_sha1_hex(s, ce->sha1))
die(_("corrupt patch for submodule %s"), path);
} else {
if (!cached) {
}
if (update_index) {
- if (write_cache(newfd, active_cache, active_nr) ||
- commit_locked_index(&lock_file))
+ if (write_locked_index(&the_index, &lock_file, COMMIT_LOCK))
die(_("Unable to write new index file"));
}
/*
* Blame
*
- * Copyright (c) 2006, Junio C Hamano
+ * Copyright (c) 2006, 2014 by its authors
+ * See COPYING for licensing conditions
*/
#include "cache.h"
#include "cache-tree.h"
#include "string-list.h"
#include "mailmap.h"
+#include "mergesort.h"
#include "parse-options.h"
+#include "prio-queue.h"
#include "utf8.h"
#include "userdiff.h"
#include "line-range.h"
*/
struct origin {
int refcnt;
+ /* Record preceding blame record for this blob */
struct origin *previous;
+ /* origins are put in a list linked via `next' hanging off the
+ * corresponding commit's util field in order to make finding
+ * them fast. The presence in this chain does not count
+ * towards the origin's reference count. It is tempting to
+ * let it count as long as the commit is pending examination,
+ * but even under circumstances where the commit will be
+ * present multiple times in the priority queue of unexamined
+ * commits, processing the first instance will not leave any
+ * work requiring the origin data for the second instance. An
+ * interspersed commit changing that would have to be
+ * preexisting with a different ancestry and with the same
+ * commit date in order to wedge itself between two instances
+ * of the same commit in the priority queue _and_ produce
+ * blame entries relevant for it. While we don't want to let
+ * us get tripped up by this case, it certainly does not seem
+ * worth optimizing for.
+ */
+ struct origin *next;
struct commit *commit;
+ /* `suspects' contains blame entries that may be attributed to
+ * this origin's commit or to parent commits. When a commit
+ * is being processed, all suspects will be moved, either by
+ * assigning them to an origin in a different commit, or by
+ * shipping them to the scoreboard's ent list because they
+ * cannot be attributed to a different commit.
+ */
+ struct blame_entry *suspects;
mmfile_t file;
unsigned char blob_sha1[20];
unsigned mode;
+ /* guilty gets set when shipping any suspects to the final
+ * blame list instead of other commits
+ */
+ char guilty;
char path[FLEX_ARRAY];
};
static void origin_decref(struct origin *o)
{
if (o && --o->refcnt <= 0) {
+ struct origin *p, *l = NULL;
if (o->previous)
origin_decref(o->previous);
free(o->file.ptr);
- free(o);
+ /* Should be present exactly once in commit chain */
+ for (p = o->commit->util; p; l = p, p = p->next) {
+ if (p == o) {
+ if (l)
+ l->next = p->next;
+ else
+ o->commit->util = p->next;
+ free(o);
+ return;
+ }
+ }
+ die("internal error in blame::origin_decref");
}
}
/*
* Each group of lines is described by a blame_entry; it can be split
- * as we pass blame to the parents. They form a linked list in the
- * scoreboard structure, sorted by the target line number.
+ * as we pass blame to the parents. They are arranged in linked lists
+ * kept as `suspects' of some unprocessed origin, or entered (when the
+ * blame origin has been finalized) into the scoreboard structure.
+ * While the scoreboard structure is only sorted at the end of
+ * processing (according to final image line number), the lists
+ * attached to an origin are sorted by the target line number.
*/
struct blame_entry {
struct blame_entry *next;
/* the commit that introduced this group into the final image */
struct origin *suspect;
- /* true if the suspect is truly guilty; false while we have not
- * checked if the group came from one of its parents.
- */
- char guilty;
-
- /* true if the entry has been scanned for copies in the current parent
- */
- char scanned;
-
/* the line number of the first line of this group in the
* suspect's file; internally all line numbers are 0 based.
*/
unsigned score;
};
+/*
+ * Any merge of blames happens on lists of blames that arrived via
+ * different parents in a single suspect. In this case, we want to
+ * sort according to the suspect line numbers as opposed to the final
+ * image line numbers. The function body is somewhat longish because
+ * it avoids unnecessary writes.
+ */
+
+static struct blame_entry *blame_merge(struct blame_entry *list1,
+ struct blame_entry *list2)
+{
+ struct blame_entry *p1 = list1, *p2 = list2,
+ **tail = &list1;
+
+ if (!p1)
+ return p2;
+ if (!p2)
+ return p1;
+
+ if (p1->s_lno <= p2->s_lno) {
+ do {
+ tail = &p1->next;
+ if ((p1 = *tail) == NULL) {
+ *tail = p2;
+ return list1;
+ }
+ } while (p1->s_lno <= p2->s_lno);
+ }
+ for (;;) {
+ *tail = p2;
+ do {
+ tail = &p2->next;
+ if ((p2 = *tail) == NULL) {
+ *tail = p1;
+ return list1;
+ }
+ } while (p1->s_lno > p2->s_lno);
+ *tail = p1;
+ do {
+ tail = &p1->next;
+ if ((p1 = *tail) == NULL) {
+ *tail = p2;
+ return list1;
+ }
+ } while (p1->s_lno <= p2->s_lno);
+ }
+}
+
+static void *get_next_blame(const void *p)
+{
+ return ((struct blame_entry *)p)->next;
+}
+
+static void set_next_blame(void *p1, void *p2)
+{
+ ((struct blame_entry *)p1)->next = p2;
+}
+
+/*
+ * Final image line numbers are all different, so we don't need a
+ * three-way comparison here.
+ */
+
+static int compare_blame_final(const void *p1, const void *p2)
+{
+ return ((struct blame_entry *)p1)->lno > ((struct blame_entry *)p2)->lno
+ ? 1 : -1;
+}
+
+static int compare_blame_suspect(const void *p1, const void *p2)
+{
+ const struct blame_entry *s1 = p1, *s2 = p2;
+ /*
+ * to allow for collating suspects, we sort according to the
+ * respective pointer value as the primary sorting criterion.
+ * The actual relation is pretty unimportant as long as it
+ * establishes a total order. Comparing as integers gives us
+ * that.
+ */
+ if (s1->suspect != s2->suspect)
+ return (intptr_t)s1->suspect > (intptr_t)s2->suspect ? 1 : -1;
+ if (s1->s_lno == s2->s_lno)
+ return 0;
+ return s1->s_lno > s2->s_lno ? 1 : -1;
+}
+
+static struct blame_entry *blame_sort(struct blame_entry *head,
+ int (*compare_fn)(const void *, const void *))
+{
+ return llist_mergesort (head, get_next_blame, set_next_blame, compare_fn);
+}
+
+static int compare_commits_by_reverse_commit_date(const void *a,
+ const void *b,
+ void *c)
+{
+ return -compare_commits_by_commit_date(a, b, c);
+}
+
/*
* The current state of the blame assignment.
*/
struct scoreboard {
/* the final commit (i.e. where we started digging from) */
struct commit *final;
+ /* Priority queue for commits with unassigned blame records */
+ struct prio_queue commits;
struct rev_info *revs;
const char *path;
for (ent = sb->ent; ent && (next = ent->next); ent = next) {
if (ent->suspect == next->suspect &&
- ent->guilty == next->guilty &&
ent->s_lno + ent->num_lines == next->s_lno) {
ent->num_lines += next->num_lines;
ent->next = next->next;
sanity_check_refcnt(sb);
}
+/*
+ * Merge the given sorted list of blames into a preexisting origin.
+ * If there were no previous blames to that commit, it is entered into
+ * the commit priority queue of the score board.
+ */
+
+static void queue_blames(struct scoreboard *sb, struct origin *porigin,
+ struct blame_entry *sorted)
+{
+ if (porigin->suspects)
+ porigin->suspects = blame_merge(porigin->suspects, sorted);
+ else {
+ struct origin *o;
+ for (o = porigin->commit->util; o; o = o->next) {
+ if (o->suspects) {
+ porigin->suspects = sorted;
+ return;
+ }
+ }
+ porigin->suspects = sorted;
+ prio_queue_put(&sb->commits, porigin->commit);
+ }
+}
+
/*
* Given a commit and a path in it, create a new origin structure.
* The callers that add blame to the scoreboard should use
o = xcalloc(1, sizeof(*o) + strlen(path) + 1);
o->commit = commit;
o->refcnt = 1;
+ o->next = commit->util;
+ commit->util = o;
strcpy(o->path, path);
return o;
}
/*
* Locate an existing origin or create a new one.
+ * This moves the origin to front position in the commit util list.
*/
static struct origin *get_origin(struct scoreboard *sb,
struct commit *commit,
const char *path)
{
- struct blame_entry *e;
+ struct origin *o, *l;
- for (e = sb->ent; e; e = e->next) {
- if (e->suspect->commit == commit &&
- !strcmp(e->suspect->path, path))
- return origin_incref(e->suspect);
+ for (o = commit->util, l = NULL; o; l = o, o = o->next) {
+ if (!strcmp(o->path, path)) {
+ /* bump to front */
+ if (l) {
+ l->next = o->next;
+ o->next = commit->util;
+ commit->util = o;
+ }
+ return origin_incref(o);
+ }
}
return make_origin(commit, path);
}
struct commit *parent,
struct origin *origin)
{
- struct origin *porigin = NULL;
+ struct origin *porigin;
struct diff_options diff_opts;
const char *paths[2];
- if (parent->util) {
- /*
- * Each commit object can cache one origin in that
- * commit. This is a freestanding copy of origin and
- * not refcounted.
- */
- struct origin *cached = parent->util;
- if (!strcmp(cached->path, origin->path)) {
+ /* First check any existing origins */
+ for (porigin = parent->util; porigin; porigin = porigin->next)
+ if (!strcmp(porigin->path, origin->path)) {
/*
* The same path between origin and its parent
* without renaming -- the most common case.
*/
- porigin = get_origin(sb, parent, cached->path);
-
- /*
- * If the origin was newly created (i.e. get_origin
- * would call make_origin if none is found in the
- * scoreboard), it does not know the blob_sha1/mode,
- * so copy it. Otherwise porigin was in the
- * scoreboard and already knows blob_sha1/mode.
- */
- if (porigin->refcnt == 1) {
- hashcpy(porigin->blob_sha1, cached->blob_sha1);
- porigin->mode = cached->mode;
- }
- return porigin;
+ return origin_incref (porigin);
}
- /* otherwise it was not very useful; free it */
- free(parent->util);
- parent->util = NULL;
- }
/* See if the origin->path is different between parent
* and origin first. Most of the time they are the
}
diff_flush(&diff_opts);
free_pathspec(&diff_opts.pathspec);
- if (porigin) {
- /*
- * Create a freestanding copy that is not part of
- * the refcounted origin found in the scoreboard, and
- * cache it in the commit.
- */
- struct origin *cached;
-
- cached = make_origin(porigin->commit, porigin->path);
- hashcpy(cached->blob_sha1, porigin->blob_sha1);
- cached->mode = porigin->mode;
- parent->util = cached;
- }
return porigin;
}
}
/*
- * Link in a new blame entry to the scoreboard. Entries that cover the
- * same line range have been removed from the scoreboard previously.
+ * Append a new blame entry to a given output queue.
*/
-static void add_blame_entry(struct scoreboard *sb, struct blame_entry *e)
+static void add_blame_entry(struct blame_entry ***queue, struct blame_entry *e)
{
- struct blame_entry *ent, *prev = NULL;
-
origin_incref(e->suspect);
- for (ent = sb->ent; ent && ent->lno < e->lno; ent = ent->next)
- prev = ent;
-
- /* prev, if not NULL, is the last one that is below e */
-
- if (prev) {
- e->next = prev->next;
- prev->next = e;
- }
- else {
- e->next = sb->ent;
- sb->ent = e;
- }
+ e->next = **queue;
+ **queue = e;
+ *queue = &e->next;
}
/*
* src typically is on-stack; we want to copy the information in it to
- * a malloced blame_entry that is already on the linked list of the
- * scoreboard. The origin of dst loses a refcnt while the origin of src
- * gains one.
+ * a malloced blame_entry that gets added to the given queue. The
+ * origin of dst loses a refcnt.
*/
-static void dup_entry(struct blame_entry *dst, struct blame_entry *src)
+static void dup_entry(struct blame_entry ***queue,
+ struct blame_entry *dst, struct blame_entry *src)
{
- struct blame_entry *n;
-
- n = dst->next;
origin_incref(src->suspect);
origin_decref(dst->suspect);
memcpy(dst, src, sizeof(*src));
- dst->next = n;
- dst->score = 0;
+ dst->next = **queue;
+ **queue = dst;
+ *queue = &dst->next;
}
static const char *nth_line(struct scoreboard *sb, long lno)
/*
* split_overlap() divided an existing blame e into up to three parts
- * in split. Adjust the linked list of blames in the scoreboard to
+ * in split. Any assigned blame is moved to queue to
* reflect the split.
*/
-static void split_blame(struct scoreboard *sb,
+static void split_blame(struct blame_entry ***blamed,
+ struct blame_entry ***unblamed,
struct blame_entry *split,
struct blame_entry *e)
{
if (split[0].suspect && split[2].suspect) {
/* The first part (reuse storage for the existing entry e) */
- dup_entry(e, &split[0]);
+ dup_entry(unblamed, e, &split[0]);
/* The last part -- me */
new_entry = xmalloc(sizeof(*new_entry));
memcpy(new_entry, &(split[2]), sizeof(struct blame_entry));
- add_blame_entry(sb, new_entry);
+ add_blame_entry(unblamed, new_entry);
/* ... and the middle part -- parent */
new_entry = xmalloc(sizeof(*new_entry));
memcpy(new_entry, &(split[1]), sizeof(struct blame_entry));
- add_blame_entry(sb, new_entry);
+ add_blame_entry(blamed, new_entry);
}
else if (!split[0].suspect && !split[2].suspect)
/*
* The parent covers the entire area; reuse storage for
* e and replace it with the parent.
*/
- dup_entry(e, &split[1]);
+ dup_entry(blamed, e, &split[1]);
else if (split[0].suspect) {
/* me and then parent */
- dup_entry(e, &split[0]);
+ dup_entry(unblamed, e, &split[0]);
new_entry = xmalloc(sizeof(*new_entry));
memcpy(new_entry, &(split[1]), sizeof(struct blame_entry));
- add_blame_entry(sb, new_entry);
+ add_blame_entry(blamed, new_entry);
}
else {
/* parent and then me */
- dup_entry(e, &split[1]);
+ dup_entry(blamed, e, &split[1]);
new_entry = xmalloc(sizeof(*new_entry));
memcpy(new_entry, &(split[2]), sizeof(struct blame_entry));
- add_blame_entry(sb, new_entry);
- }
-
- if (DEBUG) { /* sanity */
- struct blame_entry *ent;
- int lno = sb->ent->lno, corrupt = 0;
-
- for (ent = sb->ent; ent; ent = ent->next) {
- if (lno != ent->lno)
- corrupt = 1;
- if (ent->s_lno < 0)
- corrupt = 1;
- lno += ent->num_lines;
- }
- if (corrupt) {
- lno = sb->ent->lno;
- for (ent = sb->ent; ent; ent = ent->next) {
- printf("L %8d l %8d n %8d\n",
- lno, ent->lno, ent->num_lines);
- lno = ent->lno + ent->num_lines;
- }
- die("oops");
- }
+ add_blame_entry(unblamed, new_entry);
}
}
}
/*
- * Helper for blame_chunk(). blame_entry e is known to overlap with
- * the patch hunk; split it and pass blame to the parent.
+ * reverse_blame reverses the list given in head, appending tail.
+ * That allows us to build lists in reverse order, then reverse them
+ * afterwards. This can be faster than building the list in proper
+ * order right away. The reason is that building in proper order
+ * requires writing a link in the _previous_ element, while building
+ * in reverse order just requires placing the list head into the
+ * _current_ element.
*/
-static void blame_overlap(struct scoreboard *sb, struct blame_entry *e,
- int tlno, int plno, int same,
- struct origin *parent)
-{
- struct blame_entry split[3];
- split_overlap(split, e, tlno, plno, same, parent);
- if (split[1].suspect)
- split_blame(sb, split, e);
- decref_split(split);
-}
-
-/*
- * Find the line number of the last line the target is suspected for.
- */
-static int find_last_in_target(struct scoreboard *sb, struct origin *target)
+static struct blame_entry *reverse_blame(struct blame_entry *head,
+ struct blame_entry *tail)
{
- struct blame_entry *e;
- int last_in_target = -1;
-
- for (e = sb->ent; e; e = e->next) {
- if (e->guilty || e->suspect != target)
- continue;
- if (last_in_target < e->s_lno + e->num_lines)
- last_in_target = e->s_lno + e->num_lines;
+ while (head) {
+ struct blame_entry *next = head->next;
+ head->next = tail;
+ tail = head;
+ head = next;
}
- return last_in_target;
+ return tail;
}
/*
* Process one hunk from the patch between the current suspect for
- * blame_entry e and its parent. Find and split the overlap, and
- * pass blame to the overlapping part to the parent.
+ * blame_entry e and its parent. This first blames any unfinished
+ * entries before the chunk (which is where target and parent start
+ * differing) on the parent, and then splits blame entries at the
+ * start and at the end of the difference region. Since use of -M and
+ * -C options may lead to overlapping/duplicate source line number
+ * ranges, all we can rely on from sorting/merging is the order of the
+ * first suspect line number.
*/
-static void blame_chunk(struct scoreboard *sb,
- int tlno, int plno, int same,
- struct origin *target, struct origin *parent)
+static void blame_chunk(struct blame_entry ***dstq, struct blame_entry ***srcq,
+ int tlno, int offset, int same,
+ struct origin *parent)
{
- struct blame_entry *e;
+ struct blame_entry *e = **srcq;
+ struct blame_entry *samep = NULL, *diffp = NULL;
- for (e = sb->ent; e; e = e->next) {
- if (e->guilty || e->suspect != target)
- continue;
- if (same <= e->s_lno)
- continue;
- if (tlno < e->s_lno + e->num_lines)
- blame_overlap(sb, e, tlno, plno, same, parent);
+ while (e && e->s_lno < tlno) {
+ struct blame_entry *next = e->next;
+ /*
+ * current record starts before differing portion. If
+ * it reaches into it, we need to split it up and
+ * examine the second part separately.
+ */
+ if (e->s_lno + e->num_lines > tlno) {
+ /* Move second half to a new record */
+ int len = tlno - e->s_lno;
+ struct blame_entry *n = xcalloc(1, sizeof (struct blame_entry));
+ n->suspect = e->suspect;
+ n->lno = e->lno + len;
+ n->s_lno = e->s_lno + len;
+ n->num_lines = e->num_lines - len;
+ e->num_lines = len;
+ e->score = 0;
+ /* Push new record to diffp */
+ n->next = diffp;
+ diffp = n;
+ } else
+ origin_decref(e->suspect);
+ /* Pass blame for everything before the differing
+ * chunk to the parent */
+ e->suspect = origin_incref(parent);
+ e->s_lno += offset;
+ e->next = samep;
+ samep = e;
+ e = next;
}
+ /*
+ * As we don't know how much of a common stretch after this
+ * diff will occur, the currently blamed parts are all that we
+ * can assign to the parent for now.
+ */
+
+ if (samep) {
+ **dstq = reverse_blame(samep, **dstq);
+ *dstq = &samep->next;
+ }
+ /*
+ * Prepend the split off portions: everything after e starts
+ * after the blameable portion.
+ */
+ e = reverse_blame(diffp, e);
+
+ /*
+ * Now retain records on the target while parts are different
+ * from the parent.
+ */
+ samep = NULL;
+ diffp = NULL;
+ while (e && e->s_lno < same) {
+ struct blame_entry *next = e->next;
+
+ /*
+ * If current record extends into sameness, need to split.
+ */
+ if (e->s_lno + e->num_lines > same) {
+ /*
+ * Move second half to a new record to be
+ * processed by later chunks
+ */
+ int len = same - e->s_lno;
+ struct blame_entry *n = xcalloc(1, sizeof (struct blame_entry));
+ n->suspect = origin_incref(e->suspect);
+ n->lno = e->lno + len;
+ n->s_lno = e->s_lno + len;
+ n->num_lines = e->num_lines - len;
+ e->num_lines = len;
+ e->score = 0;
+ /* Push new record to samep */
+ n->next = samep;
+ samep = n;
+ }
+ e->next = diffp;
+ diffp = e;
+ e = next;
+ }
+ **srcq = reverse_blame(diffp, reverse_blame(samep, e));
+ /* Move across elements that are in the unblamable portion */
+ if (diffp)
+ *srcq = &diffp->next;
}
struct blame_chunk_cb_data {
- struct scoreboard *sb;
- struct origin *target;
struct origin *parent;
- long plno;
- long tlno;
+ long offset;
+ struct blame_entry **dstq;
+ struct blame_entry **srcq;
};
+/* diff chunks are from parent to target */
static int blame_chunk_cb(long start_a, long count_a,
long start_b, long count_b, void *data)
{
struct blame_chunk_cb_data *d = data;
- blame_chunk(d->sb, d->tlno, d->plno, start_b, d->target, d->parent);
- d->plno = start_a + count_a;
- d->tlno = start_b + count_b;
+ if (start_a - start_b != d->offset)
+ die("internal error in blame::blame_chunk_cb");
+ blame_chunk(&d->dstq, &d->srcq, start_b, start_a - start_b,
+ start_b + count_b, d->parent);
+ d->offset = start_a + count_a - (start_b + count_b);
return 0;
}
* for the lines it is suspected to its parent. Run diff to find
* which lines came from parent and pass blame for them.
*/
-static int pass_blame_to_parent(struct scoreboard *sb,
- struct origin *target,
- struct origin *parent)
+static void pass_blame_to_parent(struct scoreboard *sb,
+ struct origin *target,
+ struct origin *parent)
{
- int last_in_target;
mmfile_t file_p, file_o;
struct blame_chunk_cb_data d;
+ struct blame_entry *newdest = NULL;
- memset(&d, 0, sizeof(d));
- d.sb = sb; d.target = target; d.parent = parent;
- last_in_target = find_last_in_target(sb, target);
- if (last_in_target < 0)
- return 1; /* nothing remains for this target */
+ if (!target->suspects)
+ return; /* nothing remains for this target */
+
+ d.parent = parent;
+ d.offset = 0;
+ d.dstq = &newdest; d.srcq = &target->suspects;
fill_origin_blob(&sb->revs->diffopt, parent, &file_p);
fill_origin_blob(&sb->revs->diffopt, target, &file_o);
num_get_patch++;
diff_hunks(&file_p, &file_o, 0, blame_chunk_cb, &d);
- /* The rest (i.e. anything after tlno) are the same as the parent */
- blame_chunk(sb, d.tlno, d.plno, last_in_target, target, parent);
+ /* The rest are the same as the parent */
+ blame_chunk(&d.dstq, &d.srcq, INT_MAX, d.offset, INT_MAX, parent);
+ *d.dstq = NULL;
+ queue_blames(sb, parent, newdest);
- return 0;
+ return;
}
/*
handle_split(sb, ent, d.tlno, d.plno, ent->num_lines, parent, split);
}
+/* Move all blame entries from list *source that have a score smaller
+ * than score_min to the front of list *small.
+ * Returns a pointer to the link pointing to the old head of the small list.
+ */
+
+static struct blame_entry **filter_small(struct scoreboard *sb,
+ struct blame_entry **small,
+ struct blame_entry **source,
+ unsigned score_min)
+{
+ struct blame_entry *p = *source;
+ struct blame_entry *oldsmall = *small;
+ while (p) {
+ if (ent_score(sb, p) <= score_min) {
+ *small = p;
+ small = &p->next;
+ p = *small;
+ } else {
+ *source = p;
+ source = &p->next;
+ p = *source;
+ }
+ }
+ *small = oldsmall;
+ *source = NULL;
+ return small;
+}
+
/*
* See if lines currently target is suspected for can be attributed to
* parent.
*/
-static int find_move_in_parent(struct scoreboard *sb,
- struct origin *target,
- struct origin *parent)
+static void find_move_in_parent(struct scoreboard *sb,
+ struct blame_entry ***blamed,
+ struct blame_entry **toosmall,
+ struct origin *target,
+ struct origin *parent)
{
- int last_in_target, made_progress;
struct blame_entry *e, split[3];
+ struct blame_entry *unblamed = target->suspects;
+ struct blame_entry *leftover = NULL;
mmfile_t file_p;
- last_in_target = find_last_in_target(sb, target);
- if (last_in_target < 0)
- return 1; /* nothing remains for this target */
+ if (!unblamed)
+ return; /* nothing remains for this target */
fill_origin_blob(&sb->revs->diffopt, parent, &file_p);
if (!file_p.ptr)
- return 0;
+ return;
- made_progress = 1;
- while (made_progress) {
- made_progress = 0;
- for (e = sb->ent; e; e = e->next) {
- if (e->guilty || e->suspect != target ||
- ent_score(sb, e) < blame_move_score)
- continue;
+ /* At each iteration, unblamed has a NULL-terminated list of
+ * entries that have not yet been tested for blame. leftover
+ * contains the reversed list of entries that have been tested
+ * without being assignable to the parent.
+ */
+ do {
+ struct blame_entry **unblamedtail = &unblamed;
+ struct blame_entry *next;
+ for (e = unblamed; e; e = next) {
+ next = e->next;
find_copy_in_blob(sb, e, parent, split, &file_p);
if (split[1].suspect &&
blame_move_score < ent_score(sb, &split[1])) {
- split_blame(sb, split, e);
- made_progress = 1;
+ split_blame(blamed, &unblamedtail, split, e);
+ } else {
+ e->next = leftover;
+ leftover = e;
}
decref_split(split);
}
- }
- return 0;
+ *unblamedtail = NULL;
+ toosmall = filter_small(sb, toosmall, &unblamed, blame_move_score);
+ } while (unblamed);
+ target->suspects = reverse_blame(leftover, NULL);
}
struct blame_list {
* Count the number of entries the target is suspected for,
* and prepare a list of entry and the best split.
*/
-static struct blame_list *setup_blame_list(struct scoreboard *sb,
- struct origin *target,
- int min_score,
+static struct blame_list *setup_blame_list(struct blame_entry *unblamed,
int *num_ents_p)
{
struct blame_entry *e;
int num_ents, i;
struct blame_list *blame_list = NULL;
- for (e = sb->ent, num_ents = 0; e; e = e->next)
- if (!e->scanned && !e->guilty &&
- e->suspect == target &&
- min_score < ent_score(sb, e))
- num_ents++;
+ for (e = unblamed, num_ents = 0; e; e = e->next)
+ num_ents++;
if (num_ents) {
blame_list = xcalloc(num_ents, sizeof(struct blame_list));
- for (e = sb->ent, i = 0; e; e = e->next)
- if (!e->scanned && !e->guilty &&
- e->suspect == target &&
- min_score < ent_score(sb, e))
- blame_list[i++].ent = e;
+ for (e = unblamed, i = 0; e; e = e->next)
+ blame_list[i++].ent = e;
}
*num_ents_p = num_ents;
return blame_list;
}
-/*
- * Reset the scanned status on all entries.
- */
-static void reset_scanned_flag(struct scoreboard *sb)
-{
- struct blame_entry *e;
- for (e = sb->ent; e; e = e->next)
- e->scanned = 0;
-}
-
/*
* For lines target is suspected for, see if we can find code movement
* across file boundary from the parent commit. porigin is the path
* in the parent we already tried.
*/
-static int find_copy_in_parent(struct scoreboard *sb,
- struct origin *target,
- struct commit *parent,
- struct origin *porigin,
- int opt)
+static void find_copy_in_parent(struct scoreboard *sb,
+ struct blame_entry ***blamed,
+ struct blame_entry **toosmall,
+ struct origin *target,
+ struct commit *parent,
+ struct origin *porigin,
+ int opt)
{
struct diff_options diff_opts;
int i, j;
- int retval;
struct blame_list *blame_list;
int num_ents;
+ struct blame_entry *unblamed = target->suspects;
+ struct blame_entry *leftover = NULL;
- blame_list = setup_blame_list(sb, target, blame_copy_score, &num_ents);
- if (!blame_list)
- return 1; /* nothing remains for this target */
+ if (!unblamed)
+ return; /* nothing remains for this target */
diff_setup(&diff_opts);
DIFF_OPT_SET(&diff_opts, RECURSIVE);
if (!DIFF_OPT_TST(&diff_opts, FIND_COPIES_HARDER))
diffcore_std(&diff_opts);
- retval = 0;
- while (1) {
- int made_progress = 0;
+ do {
+ struct blame_entry **unblamedtail = &unblamed;
+ blame_list = setup_blame_list(unblamed, &num_ents);
for (i = 0; i < diff_queued_diff.nr; i++) {
struct diff_filepair *p = diff_queued_diff.queue[i];
struct blame_entry *split = blame_list[j].split;
if (split[1].suspect &&
blame_copy_score < ent_score(sb, &split[1])) {
- split_blame(sb, split, blame_list[j].ent);
- made_progress = 1;
+ split_blame(blamed, &unblamedtail, split,
+ blame_list[j].ent);
+ } else {
+ blame_list[j].ent->next = leftover;
+ leftover = blame_list[j].ent;
}
- else
- blame_list[j].ent->scanned = 1;
decref_split(split);
}
free(blame_list);
-
- if (!made_progress)
- break;
- blame_list = setup_blame_list(sb, target, blame_copy_score, &num_ents);
- if (!blame_list) {
- retval = 1;
- break;
- }
- }
- reset_scanned_flag(sb);
+ *unblamedtail = NULL;
+ toosmall = filter_small(sb, toosmall, &unblamed, blame_copy_score);
+ } while (unblamed);
+ target->suspects = reverse_blame(leftover, NULL);
diff_flush(&diff_opts);
free_pathspec(&diff_opts.pathspec);
- return retval;
}
/*
static void pass_whole_blame(struct scoreboard *sb,
struct origin *origin, struct origin *porigin)
{
- struct blame_entry *e;
+ struct blame_entry *e, *suspects;
if (!porigin->file.ptr && origin->file.ptr) {
/* Steal its file */
porigin->file = origin->file;
origin->file.ptr = NULL;
}
- for (e = sb->ent; e; e = e->next) {
- if (e->suspect != origin)
- continue;
+ suspects = origin->suspects;
+ origin->suspects = NULL;
+ for (e = suspects; e; e = e->next) {
origin_incref(porigin);
origin_decref(e->suspect);
e->suspect = porigin;
}
+ queue_blames(sb, porigin, suspects);
}
/*
return cnt;
}
+/* Distribute collected unsorted blames to the respected sorted lists
+ * in the various origins.
+ */
+static void distribute_blame(struct scoreboard *sb, struct blame_entry *blamed)
+{
+ blamed = blame_sort(blamed, compare_blame_suspect);
+ while (blamed)
+ {
+ struct origin *porigin = blamed->suspect;
+ struct blame_entry *suspects = NULL;
+ do {
+ struct blame_entry *next = blamed->next;
+ blamed->next = suspects;
+ suspects = blamed;
+ blamed = next;
+ } while (blamed && blamed->suspect == porigin);
+ suspects = reverse_blame(suspects, NULL);
+ queue_blames(sb, porigin, suspects);
+ }
+}
+
#define MAXSG 16
static void pass_blame(struct scoreboard *sb, struct origin *origin, int opt)
struct commit_list *sg;
struct origin *sg_buf[MAXSG];
struct origin *porigin, **sg_origin = sg_buf;
+ struct blame_entry *toosmall = NULL;
+ struct blame_entry *blames, **blametail = &blames;
num_sg = num_scapegoats(revs, commit);
if (!num_sg)
origin_incref(porigin);
origin->previous = porigin;
}
- if (pass_blame_to_parent(sb, origin, porigin))
+ pass_blame_to_parent(sb, origin, porigin);
+ if (!origin->suspects)
goto finish;
}
/*
* Optionally find moves in parents' files.
*/
- if (opt & PICKAXE_BLAME_MOVE)
- for (i = 0, sg = first_scapegoat(revs, commit);
- i < num_sg && sg;
- sg = sg->next, i++) {
- struct origin *porigin = sg_origin[i];
- if (!porigin)
- continue;
- if (find_move_in_parent(sb, origin, porigin))
- goto finish;
+ if (opt & PICKAXE_BLAME_MOVE) {
+ filter_small(sb, &toosmall, &origin->suspects, blame_move_score);
+ if (origin->suspects) {
+ for (i = 0, sg = first_scapegoat(revs, commit);
+ i < num_sg && sg;
+ sg = sg->next, i++) {
+ struct origin *porigin = sg_origin[i];
+ if (!porigin)
+ continue;
+ find_move_in_parent(sb, &blametail, &toosmall, origin, porigin);
+ if (!origin->suspects)
+ break;
+ }
}
+ }
/*
* Optionally find copies from parents' files.
*/
- if (opt & PICKAXE_BLAME_COPY)
+ if (opt & PICKAXE_BLAME_COPY) {
+ if (blame_copy_score > blame_move_score)
+ filter_small(sb, &toosmall, &origin->suspects, blame_copy_score);
+ else if (blame_copy_score < blame_move_score) {
+ origin->suspects = blame_merge(origin->suspects, toosmall);
+ toosmall = NULL;
+ filter_small(sb, &toosmall, &origin->suspects, blame_copy_score);
+ }
+ if (!origin->suspects)
+ goto finish;
+
for (i = 0, sg = first_scapegoat(revs, commit);
i < num_sg && sg;
sg = sg->next, i++) {
struct origin *porigin = sg_origin[i];
- if (find_copy_in_parent(sb, origin, sg->item,
- porigin, opt))
+ find_copy_in_parent(sb, &blametail, &toosmall,
+ origin, sg->item, porigin, opt);
+ if (!origin->suspects)
goto finish;
}
+ }
- finish:
+finish:
+ *blametail = NULL;
+ distribute_blame(sb, blames);
+ /*
+ * prepend toosmall to origin->suspects
+ *
+ * There is no point in sorting: this ends up on a big
+ * unsorted list in the caller anyway.
+ */
+ if (toosmall) {
+ struct blame_entry **tail = &toosmall;
+ while (*tail)
+ tail = &(*tail)->next;
+ *tail = origin->suspects;
+ origin->suspects = toosmall;
+ }
for (i = 0; i < num_sg; i++) {
if (sg_origin[i]) {
drop_origin_blob(sg_origin[i]);
{
int len;
const char *subject, *encoding;
- char *message;
+ const char *message;
commit_info_init(ret);
&ret->author_time, &ret->author_tz);
if (!detailed) {
- logmsg_free(message, commit);
+ unuse_commit_buffer(commit, message);
return;
}
else
strbuf_addf(&ret->summary, "(%s)", sha1_to_hex(commit->object.sha1));
- logmsg_free(message, commit);
+ unuse_commit_buffer(commit, message);
}
/*
}
/*
- * The blame_entry is found to be guilty for the range. Mark it
- * as such, and show it in incremental output.
+ * The blame_entry is found to be guilty for the range.
+ * Show it in incremental output.
*/
static void found_guilty_entry(struct blame_entry *ent)
{
- if (ent->guilty)
- return;
- ent->guilty = 1;
if (incremental) {
struct origin *suspect = ent->suspect;
}
/*
- * The main loop -- while the scoreboard has lines whose true origin
- * is still unknown, pick one blame_entry, and allow its current
- * suspect to pass blames to its parents.
- */
+ * The main loop -- while we have blobs with lines whose true origin
+ * is still unknown, pick one blob, and allow its lines to pass blames
+ * to its parents. */
static void assign_blame(struct scoreboard *sb, int opt)
{
struct rev_info *revs = sb->revs;
+ struct commit *commit = prio_queue_get(&sb->commits);
- while (1) {
+ while (commit) {
struct blame_entry *ent;
- struct commit *commit;
- struct origin *suspect = NULL;
+ struct origin *suspect = commit->util;
/* find one suspect to break down */
- for (ent = sb->ent; !suspect && ent; ent = ent->next)
- if (!ent->guilty)
- suspect = ent->suspect;
- if (!suspect)
- return; /* all done */
+ while (suspect && !suspect->suspects)
+ suspect = suspect->next;
+
+ if (!suspect) {
+ commit = prio_queue_get(&sb->commits);
+ continue;
+ }
+
+ assert(commit == suspect->commit);
/*
* We will use this suspect later in the loop,
* so hold onto it in the meantime.
*/
origin_incref(suspect);
- commit = suspect->commit;
parse_commit(commit);
if (reverse ||
(!(commit->object.flags & UNINTERESTING) &&
commit->object.flags |= UNINTERESTING;
/* Take responsibility for the remaining entries */
- for (ent = sb->ent; ent; ent = ent->next)
- if (ent->suspect == suspect)
+ ent = suspect->suspects;
+ if (ent) {
+ suspect->guilty = 1;
+ for (;;) {
+ struct blame_entry *next = ent->next;
found_guilty_entry(ent);
+ if (next) {
+ ent = next;
+ continue;
+ }
+ ent->next = sb->ent;
+ sb->ent = suspect->suspects;
+ suspect->suspects = NULL;
+ break;
+ }
+ }
origin_decref(suspect);
if (DEBUG) /* sanity */
static const char *format_time(unsigned long time, const char *tz_str,
int show_raw_time)
{
- static char time_buf[128];
+ static struct strbuf time_buf = STRBUF_INIT;
+ strbuf_reset(&time_buf);
if (show_raw_time) {
- snprintf(time_buf, sizeof(time_buf), "%lu %s", time, tz_str);
+ strbuf_addf(&time_buf, "%lu %s", time, tz_str);
}
else {
const char *time_str;
- int time_len;
+ size_t time_width;
int tz;
tz = atoi(tz_str);
time_str = show_date(time, tz, blame_date_mode);
- time_len = strlen(time_str);
- memcpy(time_buf, time_str, time_len);
- memset(time_buf + time_len, ' ', blame_date_width - time_len);
+ strbuf_addstr(&time_buf, time_str);
+ /*
+ * Add space paddings to time_buf to display a fixed width
+ * string, and use time_width for display width calibration.
+ */
+ for (time_width = utf8_strwidth(time_str);
+ time_width < blame_date_width;
+ time_width++)
+ strbuf_addch(&time_buf, ' ');
}
- return time_buf;
+ return time_buf.buf;
}
#define OUTPUT_ANNOTATE_COMPAT 001
char hex[41];
strcpy(hex, sha1_to_hex(suspect->commit->object.sha1));
- printf("%s%c%d %d %d\n",
+ printf("%s %d %d %d\n",
hex,
- ent->guilty ? ' ' : '*', /* purely for debugging */
ent->s_lno + 1,
ent->lno + 1,
ent->num_lines);
if (option & OUTPUT_PORCELAIN) {
for (ent = sb->ent; ent; ent = ent->next) {
- struct blame_entry *oth;
- struct origin *suspect = ent->suspect;
- struct commit *commit = suspect->commit;
+ int count = 0;
+ struct origin *suspect;
+ struct commit *commit = ent->suspect->commit;
if (commit->object.flags & MORE_THAN_ONE_PATH)
continue;
- for (oth = ent->next; oth; oth = oth->next) {
- if ((oth->suspect->commit != commit) ||
- !strcmp(oth->suspect->path, suspect->path))
- continue;
- commit->object.flags |= MORE_THAN_ONE_PATH;
- break;
+ for (suspect = commit->util; suspect; suspect = suspect->next) {
+ if (suspect->guilty && count++) {
+ commit->object.flags |= MORE_THAN_ONE_PATH;
+ break;
+ }
}
}
}
}
}
+static const char *get_next_line(const char *start, const char *end)
+{
+ const char *nl = memchr(start, '\n', end - start);
+ return nl ? nl + 1 : end;
+}
+
/*
* To allow quick access to the contents of nth line in the
* final image, prepare an index in the scoreboard.
const char *end = buf + len;
const char *p;
int *lineno;
- int num = 0, incomplete = 0;
+ int num = 0;
- for (p = buf;;) {
- p = memchr(p, '\n', end - p);
- if (p) {
- p++;
- num++;
- continue;
- }
- break;
- }
+ for (p = buf; p < end; p = get_next_line(p, end))
+ num++;
- if (len && end[-1] != '\n')
- incomplete++; /* incomplete line at the end */
+ sb->lineno = lineno = xmalloc(sizeof(*sb->lineno) * (num + 1));
- sb->lineno = xmalloc(sizeof(*sb->lineno) * (num + incomplete + 1));
- lineno = sb->lineno;
-
- *lineno++ = 0;
- for (p = buf;;) {
- p = memchr(p, '\n', end - p);
- if (p) {
- p++;
- *lineno++ = p - buf;
- continue;
- }
- break;
- }
+ for (p = buf; p < end; p = get_next_line(p, end))
+ *lineno++ = p - buf;
- if (incomplete)
- *lineno++ = len;
+ *lineno = len;
- sb->num_lines = num + incomplete;
+ sb->num_lines = num;
return sb->num_lines;
}
strbuf_release(&line);
}
+/*
+ * This isn't as simple as passing sb->buf and sb->len, because we
+ * want to transfer ownership of the buffer to the commit (so we
+ * must use detach).
+ */
+static void set_commit_buffer_from_strbuf(struct commit *c, struct strbuf *sb)
+{
+ size_t len;
+ void *buf = strbuf_detach(sb, &len);
+ set_commit_buffer(c, buf, len);
+}
+
/*
* Prepare a dummy commit that represents the work tree (or staged) item.
* Note that annotating work tree item never works in the reverse.
struct strbuf msg = STRBUF_INIT;
time(&now);
- commit = xcalloc(1, sizeof(*commit));
+ commit = alloc_commit_node();
commit->object.parsed = 1;
commit->date = now;
commit->object.type = OBJ_COMMIT;
ident, ident, path,
(!contents_from ? path :
(!strcmp(contents_from, "-") ? "standard input" : contents_from)));
- commit->buffer = strbuf_detach(&msg, NULL);
+ set_commit_buffer_from_strbuf(commit, &msg);
if (!contents_from || strcmp("-", contents_from)) {
struct stat st;
if (strbuf_read(&buf, 0, 0) < 0)
die_errno("failed to read from stdin");
}
- convert_to_git(path, buf.buf, buf.len, &buf, 0);
origin->file.ptr = buf.buf;
origin->file.size = buf.len;
pretend_sha1_file(buf.buf, buf.len, OBJ_BLOB, origin->blob_sha1);
- commit->util = origin;
/*
* Read the current index, replace the path entry with
* right now, but someday we might optimize diff-index --cached
* with cache-tree information.
*/
- cache_tree_invalidate_path(active_cache_tree, path);
+ cache_tree_invalidate_path(&the_index, path);
return commit;
}
blame_date_width = sizeof("2006-10-19");
break;
case DATE_RELATIVE:
- /* "normal" is used as the fallback for "relative" */
+ /* TRANSLATORS: This string is used to tell us the maximum
+ display width for a relative timestamp in "git blame"
+ output. For C locale, "4 years, 11 months ago", which
+ takes 22 places, is the longest among various forms of
+ relative timestamps, but your language may need more or
+ fewer display columns. */
+ blame_date_width = utf8_strwidth(_("4 years, 11 months ago")) + 1; /* add the null */
+ break;
case DATE_LOCAL:
case DATE_NORMAL:
blame_date_width = sizeof("Thu Oct 19 16:00:04 2006 -0700");
memset(&sb, 0, sizeof(sb));
sb.revs = &revs;
- if (!reverse)
+ if (!reverse) {
final_commit_name = prepare_final(&sb);
+ sb.commits.compare = compare_commits_by_commit_date;
+ }
else if (contents_from)
die("--contents and --children do not blend well.");
- else
+ else {
final_commit_name = prepare_initial(&sb);
+ sb.commits.compare = compare_commits_by_reverse_commit_date;
+ }
if (!sb.final) {
/*
ent->next = next;
origin_incref(o);
}
+
+ o->suspects = ent;
+ prio_queue_put(&sb.commits, o->commit);
+
origin_decref(o);
range_set_release(&ranges);
string_list_clear(&range_list, 0);
- sb.ent = ent;
+ sb.ent = NULL;
sb.path = path;
read_mailmap(&mailmap, NULL);
if (incremental)
return 0;
+ sb.ent = blame_sort(sb.ent, compare_blame_final);
+
coalesce(&sb);
if (!(output_option & OUTPUT_PORCELAIN))
int flag;
struct commit *head;
int errs = 0;
- int newfd;
struct lock_file *lock_file;
if (opts->track != BRANCH_TRACK_UNSPECIFIED)
lock_file = xcalloc(1, sizeof(struct lock_file));
- newfd = hold_locked_index(lock_file, 1);
+ hold_locked_index(lock_file, 1);
if (read_cache_preload(&opts->pathspec) < 0)
return error(_("corrupt index file"));
memset(&state, 0, sizeof(state));
state.force = 1;
state.refresh_cache = 1;
+ state.istate = &the_index;
for (pos = 0; pos < active_nr; pos++) {
struct cache_entry *ce = active_cache[pos];
if (ce->ce_flags & CE_MATCHED) {
}
}
- if (write_cache(newfd, active_cache, active_nr) ||
- commit_locked_index(lock_file))
+ if (write_locked_index(&the_index, lock_file, COMMIT_LOCK))
die(_("unable to write new index file"));
read_ref_full("HEAD", rev, 0, &flag);
{
int ret;
struct lock_file *lock_file = xcalloc(1, sizeof(struct lock_file));
- int newfd = hold_locked_index(lock_file, 1);
+ hold_locked_index(lock_file, 1);
if (read_cache_preload(NULL) < 0)
return error(_("corrupt index file"));
}
}
- if (write_cache(newfd, active_cache, active_nr) ||
- commit_locked_index(lock_file))
+ if (write_locked_index(&the_index, lock_file, COMMIT_LOCK))
die(_("unable to write new index file"));
if (!opts->force && !opts->quiet)
/* Nothing to do. */
} else if (opts->force_detach || !new->path) { /* No longer on any branch. */
update_ref(msg.buf, "HEAD", new->commit->object.sha1, NULL,
- REF_NODEREF, DIE_ON_ERR);
+ REF_NODEREF, UPDATE_REFS_DIE_ON_ERR);
if (!opts->quiet) {
if (old->path && advice_detached_head)
detach_advice(new->name);
}
}
if (old->path && old->name) {
- char log_file[PATH_MAX], ref_file[PATH_MAX];
-
- git_snpath(log_file, sizeof(log_file), "logs/%s", old->path);
- git_snpath(ref_file, sizeof(ref_file), "%s", old->path);
- if (!file_exists(ref_file) && file_exists(log_file))
- remove_path(log_file);
+ if (!ref_exists(old->path) && reflog_exists(old->path))
+ delete_reflog(old->path);
}
}
remove_branch_state();
if (!(flag & REF_ISSYMREF))
old.path = NULL;
- if (old.path && starts_with(old.path, "refs/heads/"))
- old.name = old.path + strlen("refs/heads/");
+ if (old.path)
+ skip_prefix(old.path, "refs/heads/", &old.name);
if (!new->name) {
new->name = "HEAD";
if (!has_sha1_file(ref->old_sha1))
continue;
update_ref(msg, ref->name, ref->old_sha1,
- NULL, 0, DIE_ON_ERR);
+ NULL, 0, UPDATE_REFS_DIE_ON_ERR);
}
}
static void update_head(const struct ref *our, const struct ref *remote,
const char *msg)
{
- if (our && starts_with(our->name, "refs/heads/")) {
+ const char *head;
+ if (our && skip_prefix(our->name, "refs/heads/", &head)) {
/* Local default branch link */
create_symref("HEAD", our->name, NULL);
if (!option_bare) {
- const char *head = skip_prefix(our->name, "refs/heads/");
- update_ref(msg, "HEAD", our->old_sha1, NULL, 0, DIE_ON_ERR);
+ update_ref(msg, "HEAD", our->old_sha1, NULL, 0,
+ UPDATE_REFS_DIE_ON_ERR);
install_branch_config(0, head, option_origin, our->name);
}
} else if (our) {
struct commit *c = lookup_commit_reference(our->old_sha1);
/* --branch specifies a non-branch (i.e. tags), detach HEAD */
update_ref(msg, "HEAD", c->object.sha1,
- NULL, REF_NODEREF, DIE_ON_ERR);
+ NULL, REF_NODEREF, UPDATE_REFS_DIE_ON_ERR);
} else if (remote) {
/*
* We know remote HEAD points to a non-branch, or
* Detach HEAD in all these cases.
*/
update_ref(msg, "HEAD", remote->old_sha1,
- NULL, REF_NODEREF, DIE_ON_ERR);
+ NULL, REF_NODEREF, UPDATE_REFS_DIE_ON_ERR);
}
}
struct unpack_trees_options opts;
struct tree *tree;
struct tree_desc t;
- int err = 0, fd;
+ int err = 0;
if (option_no_checkout)
return 0;
setup_work_tree();
lock_file = xcalloc(1, sizeof(struct lock_file));
- fd = hold_locked_index(lock_file, 1);
+ hold_locked_index(lock_file, 1);
memset(&opts, 0, sizeof opts);
opts.update = 1;
if (unpack_trees(1, &t, &opts) < 0)
die(_("unable to checkout working tree"));
- if (write_cache(fd, active_cache, active_nr) ||
- commit_locked_index(lock_file))
+ if (write_locked_index(&the_index, lock_file, COMMIT_LOCK))
die(_("unable to write new index file"));
err |= run_hook_le(NULL, "post-checkout", sha1_to_hex(null_sha1),
if (option_mirror || !option_bare) {
if (option_single_branch && !option_mirror) {
if (option_branch) {
- if (strstr(our_head_points_at->name, "refs/tags/"))
+ if (starts_with(our_head_points_at->name, "refs/tags/"))
strbuf_addf(&value, "+%s:%s", our_head_points_at->name,
our_head_points_at->name);
else
strbuf_addf(&value, "+%s:%s%s", our_head_points_at->name,
branch_top->buf, option_branch);
} else if (remote_head_points_at) {
+ const char *head = remote_head_points_at->name;
+ if (!skip_prefix(head, "refs/heads/", &head))
+ die("BUG: remote HEAD points at non-head?");
+
strbuf_addf(&value, "+%s:%s%s", remote_head_points_at->name,
- branch_top->buf,
- skip_prefix(remote_head_points_at->name, "refs/heads/"));
+ branch_top->buf, head);
}
/*
* otherwise, the next "git fetch" will
static char *prepare_index(int argc, const char **argv, const char *prefix,
const struct commit *current_head, int is_status)
{
- int fd;
struct string_list partial;
struct pathspec pathspec;
int refresh_flags = REFRESH_QUIET;
if (interactive) {
char *old_index_env = NULL;
- fd = hold_locked_index(&index_lock, 1);
+ hold_locked_index(&index_lock, 1);
refresh_cache_or_die(refresh_flags);
- if (write_cache(fd, active_cache, active_nr) ||
- close_lock_file(&index_lock))
+ if (write_locked_index(&the_index, &index_lock, CLOSE_LOCK))
die(_("unable to create temporary index"));
old_index_env = getenv(INDEX_ENVIRONMENT);
* (B) on failure, rollback the real index.
*/
if (all || (also && pathspec.nr)) {
- fd = hold_locked_index(&index_lock, 1);
+ hold_locked_index(&index_lock, 1);
add_files_to_cache(also ? prefix : NULL, &pathspec, 0);
refresh_cache_or_die(refresh_flags);
update_main_cache_tree(WRITE_TREE_SILENT);
- if (write_cache(fd, active_cache, active_nr) ||
- close_lock_file(&index_lock))
+ if (write_locked_index(&the_index, &index_lock, CLOSE_LOCK))
die(_("unable to write new_index file"));
commit_style = COMMIT_NORMAL;
return index_lock.filename;
* We still need to refresh the index here.
*/
if (!only && !pathspec.nr) {
- fd = hold_locked_index(&index_lock, 1);
+ hold_locked_index(&index_lock, 1);
refresh_cache_or_die(refresh_flags);
if (active_cache_changed) {
update_main_cache_tree(WRITE_TREE_SILENT);
- if (write_cache(fd, active_cache, active_nr) ||
- commit_locked_index(&index_lock))
+ if (write_locked_index(&the_index, &index_lock,
+ COMMIT_LOCK))
die(_("unable to write new_index file"));
} else {
rollback_lock_file(&index_lock);
if (read_cache() < 0)
die(_("cannot read the index"));
- fd = hold_locked_index(&index_lock, 1);
+ hold_locked_index(&index_lock, 1);
add_remove_files(&partial);
refresh_cache(REFRESH_QUIET);
- if (write_cache(fd, active_cache, active_nr) ||
- close_lock_file(&index_lock))
+ if (write_locked_index(&the_index, &index_lock, CLOSE_LOCK))
die(_("unable to write new_index file"));
- fd = hold_lock_file_for_update(&false_lock,
- git_path("next-index-%"PRIuMAX,
- (uintmax_t) getpid()),
- LOCK_DIE_ON_ERROR);
+ hold_lock_file_for_update(&false_lock,
+ git_path("next-index-%"PRIuMAX,
+ (uintmax_t) getpid()),
+ LOCK_DIE_ON_ERROR);
create_base_index(current_head);
add_remove_files(&partial);
refresh_cache(REFRESH_QUIET);
- if (write_cache(fd, active_cache, active_nr) ||
- close_lock_file(&false_lock))
+ if (write_locked_index(&the_index, &false_lock, CLOSE_LOCK))
die(_("unable to write temporary index file"));
discard_cache();
return 1;
}
+static int parse_force_date(const char *in, char *out, int len)
+{
+ if (len < 1)
+ return -1;
+ *out++ = '@';
+ len--;
+
+ if (parse_date(in, out, len) < 0) {
+ int errors = 0;
+ unsigned long t = approxidate_careful(in, &errors);
+ if (errors)
+ return -1;
+ snprintf(out, len, "%lu", t);
+ }
+
+ return 0;
+}
+
static void determine_author_info(struct strbuf *author_ident)
{
char *name, *email, *date;
struct ident_split author;
+ char date_buf[64];
name = getenv("GIT_AUTHOR_NAME");
email = getenv("GIT_AUTHOR_EMAIL");
email = xstrndup(lb + 2, rb - (lb + 2));
}
- if (force_date)
- date = force_date;
+ if (force_date) {
+ if (parse_force_date(force_date, date_buf, sizeof(date_buf)))
+ die(_("invalid date format: %s"), force_date);
+ date = date_buf;
+ }
+
strbuf_addstr(author_ident, fmt_ident(name, email, date, IDENT_STRICT));
if (!split_ident_line(&author, author_ident->buf, author_ident->len) &&
sane_ident_split(&author)) {
}
}
-static char *cut_ident_timestamp_part(char *string)
+static void split_ident_or_die(struct ident_split *id, const struct strbuf *buf)
+{
+ if (split_ident_line(id, buf->buf, buf->len) ||
+ !sane_ident_split(id))
+ die(_("Malformed ident string: '%s'"), buf->buf);
+}
+
+static int author_date_is_interesting(void)
{
- char *ket = strrchr(string, '>');
- if (!ket || ket[1] != ' ')
- die(_("Malformed ident string: '%s'"), string);
- *++ket = '\0';
- return ket;
+ return author_message || force_date;
+}
+
+static void adjust_comment_line_char(const struct strbuf *sb)
+{
+ char candidates[] = "#;@!$%^&|:";
+ char *candidate;
+ const char *p;
+
+ comment_line_char = candidates[0];
+ if (!memchr(sb->buf, comment_line_char, sb->len))
+ return;
+
+ p = sb->buf;
+ candidate = strchr(candidates, *p);
+ if (candidate)
+ *candidate = ' ';
+ for (p = sb->buf; *p; p++) {
+ if ((p[0] == '\n' || p[0] == '\r') && p[1]) {
+ candidate = strchr(candidates, p[1]);
+ if (candidate)
+ *candidate = ' ';
+ }
+ }
+
+ for (p = candidates; *p == ' '; p++)
+ ;
+ if (!*p)
+ die(_("unable to select a comment character that is not used\n"
+ "in the current commit message"));
+ comment_line_char = *p;
}
static int prepare_to_commit(const char *index_file, const char *prefix,
} else if (use_message) {
char *buffer;
buffer = strstr(use_message_buffer, "\n\n");
- if (!use_editor && (!buffer || buffer[2] == '\0'))
- die(_("commit has empty message"));
- strbuf_add(&sb, buffer + 2, strlen(buffer + 2));
+ if (buffer)
+ strbuf_add(&sb, buffer + 2, strlen(buffer + 2));
hook_arg1 = "commit";
hook_arg2 = use_message;
} else if (fixup_message) {
if (fwrite(sb.buf, 1, sb.len, s->fp) < sb.len)
die_errno(_("could not write commit template"));
+ if (auto_comment_line_char)
+ adjust_comment_line_char(&sb);
strbuf_release(&sb);
/* This checks if committer ident is explicitly given */
if (use_editor && include_status) {
int ident_shown = 0;
int saved_color_setting;
- char *ai_tmp, *ci_tmp;
+ struct ident_split ci, ai;
+
if (whence != FROM_COMMIT) {
if (cleanup_mode == CLEANUP_SCISSORS)
wt_status_add_cut_line(s->fp);
status_printf_ln(s, GIT_COLOR_NORMAL,
"%s", only_include_assumed);
- ai_tmp = cut_ident_timestamp_part(author_ident->buf);
- ci_tmp = cut_ident_timestamp_part(committer_ident.buf);
- if (strcmp(author_ident->buf, committer_ident.buf))
+ split_ident_or_die(&ai, author_ident);
+ split_ident_or_die(&ci, &committer_ident);
+
+ if (ident_cmp(&ai, &ci))
+ status_printf_ln(s, GIT_COLOR_NORMAL,
+ _("%s"
+ "Author: %.*s <%.*s>"),
+ ident_shown++ ? "" : "\n",
+ (int)(ai.name_end - ai.name_begin), ai.name_begin,
+ (int)(ai.mail_end - ai.mail_begin), ai.mail_begin);
+
+ if (author_date_is_interesting())
status_printf_ln(s, GIT_COLOR_NORMAL,
_("%s"
- "Author: %s"),
+ "Date: %s"),
ident_shown++ ? "" : "\n",
- author_ident->buf);
+ show_ident_date(&ai, DATE_NORMAL));
if (!committer_ident_sufficiently_given())
status_printf_ln(s, GIT_COLOR_NORMAL,
_("%s"
- "Committer: %s"),
+ "Committer: %.*s <%.*s>"),
ident_shown++ ? "" : "\n",
- committer_ident.buf);
+ (int)(ci.name_end - ci.name_begin), ci.name_begin,
+ (int)(ci.mail_end - ci.mail_begin), ci.mail_begin);
if (ident_shown)
- status_printf_ln(s, GIT_COLOR_NORMAL, "");
+ status_printf_ln(s, GIT_COLOR_NORMAL, "%s", "");
saved_color_setting = s->use_color;
s->use_color = 0;
commitable = run_status(s->fp, index_file, prefix, 1, s);
s->use_color = saved_color_setting;
-
- *ai_tmp = ' ';
- *ci_tmp = ' ';
} else {
unsigned char sha1[20];
const char *parent = "HEAD";
if (get_sha1(parent, sha1))
commitable = !!active_nr;
- else
- commitable = index_differs_from(parent, 0);
+ else {
+ /*
+ * Unless the user did explicitly request a submodule
+ * ignore mode by passing a command line option we do
+ * not ignore any changed submodule SHA-1s when
+ * comparing index and parent, no matter what is
+ * configured. Otherwise we won't commit any
+ * submodules which were manually staged, which would
+ * be really confusing.
+ */
+ int diff_flags = DIFF_OPT_OVERRIDE_SUBMODULE_CONFIG;
+ if (ignore_submodule_arg &&
+ !strcmp(ignore_submodule_arg, "all"))
+ diff_flags |= DIFF_OPT_IGNORE_SUBMODULES;
+ commitable = index_differs_from(parent, diff_flags);
+ }
}
strbuf_release(&committer_ident);
static int template_untouched(struct strbuf *sb)
{
struct strbuf tmpl = STRBUF_INIT;
- char *start;
+ const char *start;
if (cleanup_mode == CLEANUP_NONE && sb->len)
return 0;
return 0;
stripspace(&tmpl, cleanup_mode == CLEANUP_ALL);
- start = (char *)skip_prefix(sb->buf, tmpl.buf);
- if (!start)
+ if (!skip_prefix(sb->buf, tmpl.buf, &start))
start = sb->buf;
strbuf_release(&tmpl);
return rest_is_empty(sb, start - sb->buf);
strbuf_addstr(&format, "\n Author: ");
strbuf_addbuf_percentquote(&format, &author_ident);
}
+ if (author_date_is_interesting()) {
+ struct strbuf date = STRBUF_INIT;
+ format_commit_message(commit, "%ad", &date, &pctx);
+ strbuf_addstr(&format, "\n Date: ");
+ strbuf_addbuf_percentquote(&format, &date);
+ strbuf_release(&date);
+ }
if (!committer_ident_sufficiently_given()) {
strbuf_addstr(&format, "\n Committer: ");
strbuf_addbuf_percentquote(&format, &committer_ident);
append_merge_tag_headers(parents, &tail);
}
- if (commit_tree_extended(&sb, active_cache_tree->sha1, parents, sha1,
- author_ident.buf, sign_commit, extra)) {
+ if (commit_tree_extended(sb.buf, sb.len, active_cache_tree->sha1,
+ parents, sha1, author_ident.buf, sign_commit, extra)) {
rollback_index_files();
die(_("failed to write commit object"));
}
? NULL
: current_head->object.sha1,
0, NULL);
+ if (!ref_lock) {
+ rollback_index_files();
+ die(_("cannot lock HEAD ref"));
+ }
nl = strchr(sb.buf, '\n');
if (nl)
strbuf_insert(&sb, 0, reflog_msg, strlen(reflog_msg));
strbuf_insert(&sb, strlen(reflog_msg), ": ", 2);
- if (!ref_lock) {
- rollback_index_files();
- die(_("cannot lock HEAD ref"));
- }
if (write_ref_sha1(ref_lock, sha1, sb.buf) < 0) {
rollback_index_files();
die(_("cannot update HEAD ref"));
static int allow_rerere_auto;
static int abort_current_merge;
static int show_progress = -1;
-static int default_to_upstream;
+static int default_to_upstream = 1;
static const char *sign_commit;
static struct strategy all_strategy[] = {
const char *argv_gc_auto[] = { "gc", "--auto", NULL };
update_ref(reflog_message.buf, "HEAD",
new_head, head, 0,
- DIE_ON_ERR);
+ UPDATE_REFS_DIE_ON_ERR);
/*
* We ignore errors in 'gc --auto', since the
* user should see them.
struct commit_list *remoteheads,
struct commit *head, const char *head_arg)
{
- int index_fd;
struct lock_file *lock = xcalloc(1, sizeof(struct lock_file));
- index_fd = hold_locked_index(lock, 1);
+ hold_locked_index(lock, 1);
refresh_cache(REFRESH_QUIET);
if (active_cache_changed &&
- (write_cache(index_fd, active_cache, active_nr) ||
- commit_locked_index(lock)))
+ write_locked_index(&the_index, lock, COMMIT_LOCK))
return error(_("Unable to write index."));
rollback_lock_file(lock);
int clean, x;
struct commit *result;
struct lock_file *lock = xcalloc(1, sizeof(struct lock_file));
- int index_fd;
struct commit_list *reversed = NULL;
struct merge_options o;
struct commit_list *j;
for (j = common; j; j = j->next)
commit_list_insert(j->item, &reversed);
- index_fd = hold_locked_index(lock, 1);
+ hold_locked_index(lock, 1);
clean = merge_recursive(&o, head,
remoteheads->item, reversed, &result);
if (active_cache_changed &&
- (write_cache(index_fd, active_cache, active_nr) ||
- commit_locked_index(lock)))
+ write_locked_index(&the_index, lock, COMMIT_LOCK))
die (_("unable to write %s"), get_index_file());
rollback_lock_file(lock);
return clean ? 0 : 1;
parent->next->item = remoteheads->item;
parent->next->next = NULL;
prepare_to_commit(remoteheads);
- if (commit_tree(&merge_msg, result_tree, parent, result_commit, NULL,
- sign_commit))
+ if (commit_tree(merge_msg.buf, merge_msg.len, result_tree, parent,
+ result_commit, NULL, sign_commit))
die(_("failed to write commit object"));
finish(head, remoteheads, result_commit, "In-index merge");
drop_save();
commit_list_insert(head, &parents);
strbuf_addch(&merge_msg, '\n');
prepare_to_commit(remoteheads);
- if (commit_tree(&merge_msg, result_tree, parents, result_commit,
- NULL, sign_commit))
+ if (commit_tree(merge_msg.buf, merge_msg.len, result_tree, parents,
+ result_commit, NULL, sign_commit))
die(_("failed to write commit object"));
strbuf_addf(&buf, "Merge made by the '%s' strategy.", wt_strategy);
finish(head, remoteheads, result_commit, buf.buf);
die(_("%s - not something we can merge"), argv[0]);
read_empty(remote_head->object.sha1, 0);
update_ref("initial pull", "HEAD", remote_head->object.sha1,
- NULL, 0, DIE_ON_ERR);
+ NULL, 0, UPDATE_REFS_DIE_ON_ERR);
goto done;
} else {
struct strbuf merge_names = STRBUF_INIT;
printf(_("Commit %s has a good GPG signature by %s\n"),
hex, signature_check.signer);
- free(signature_check.gpg_output);
- free(signature_check.gpg_status);
- free(signature_check.signer);
- free(signature_check.key);
+ signature_check_clear(&signature_check);
}
}
}
update_ref("updating ORIG_HEAD", "ORIG_HEAD", head_commit->object.sha1,
- NULL, 0, DIE_ON_ERR);
+ NULL, 0, UPDATE_REFS_DIE_ON_ERR);
if (remoteheads && !common)
; /* No common ancestors found. We need a real merge. */
int cmd_mv(int argc, const char **argv, const char *prefix)
{
- int i, newfd, gitmodules_modified = 0;
+ int i, gitmodules_modified = 0;
int verbose = 0, show_only = 0, force = 0, ignore_errors = 0;
struct option builtin_mv_options[] = {
OPT__VERBOSE(&verbose, N_("be verbose")),
if (--argc < 1)
usage_with_options(builtin_mv_usage, builtin_mv_options);
- newfd = hold_locked_index(&lock_file, 1);
+ hold_locked_index(&lock_file, 1);
if (read_cache() < 0)
die(_("index file corrupt"));
}
} else if (cache_name_pos(src, length) < 0)
bad = _("not under version control");
- else if (lstat(dst, &st) == 0) {
+ else if (lstat(dst, &st) == 0 &&
+ (!ignore_case || strcasecmp(src, dst))) {
bad = _("destination exists");
if (force) {
/*
stage_updated_gitmodules();
if (active_cache_changed) {
- if (write_cache(newfd, active_cache, active_nr) ||
- commit_locked_index(&lock_file))
+ if (write_locked_index(&the_index, &lock_file, COMMIT_LOCK))
die(_("Unable to write new index file"));
}
if (reset_type == MIXED || reset_type == HARD) {
tree = parse_tree_indirect(sha1);
- prime_cache_tree(&active_cache_tree, tree);
+ prime_cache_tree(&the_index, tree);
}
return 0;
static void print_new_head_line(struct commit *commit)
{
const char *hex, *body;
- char *msg;
+ const char *msg;
hex = find_unique_abbrev(commit->object.sha1, DEFAULT_ABBREV);
printf(_("HEAD is now at %s"), hex);
}
else
printf("\n");
- logmsg_free(msg, commit);
+ unuse_commit_buffer(commit, msg);
}
static void update_index_from_diff(struct diff_queue_struct *q,
if (!get_sha1("HEAD", sha1_orig)) {
orig = sha1_orig;
set_reflog_message(&msg, "updating ORIG_HEAD", NULL);
- update_ref(msg.buf, "ORIG_HEAD", orig, old_orig, 0, MSG_ON_ERR);
+ update_ref(msg.buf, "ORIG_HEAD", orig, old_orig, 0,
+ UPDATE_REFS_MSG_ON_ERR);
} else if (old_orig)
delete_ref("ORIG_HEAD", old_orig, 0);
set_reflog_message(&msg, "updating HEAD", rev);
- update_ref_status = update_ref(msg.buf, "HEAD", sha1, orig, 0, MSG_ON_ERR);
+ update_ref_status = update_ref(msg.buf, "HEAD", sha1, orig, 0,
+ UPDATE_REFS_MSG_ON_ERR);
strbuf_release(&msg);
return update_ref_status;
}
if (reset_type != SOFT) {
struct lock_file *lock = xcalloc(1, sizeof(*lock));
- int newfd = hold_locked_index(lock, 1);
+ hold_locked_index(lock, 1);
if (reset_type == MIXED) {
int flags = quiet ? REFRESH_QUIET : REFRESH_IN_PORCELAIN;
if (read_from_tree(&pathspec, sha1, intent_to_add))
die(_("Could not reset index file to revision '%s'."), rev);
}
- if (write_cache(newfd, active_cache, active_nr) ||
- commit_locked_index(lock))
+ if (write_locked_index(&the_index, lock, COMMIT_LOCK))
die(_("Could not write new index file."));
}
#include "parse-options.h"
#include "pathspec.h"
#include "dir.h"
+ #include "split-index.h"
/*
* Default to not allowing changes to the list of files. The
active_cache[pos]->ce_flags |= flag;
else
active_cache[pos]->ce_flags &= ~flag;
- cache_tree_invalidate_path(active_cache_tree, path);
- active_cache_changed = 1;
+ active_cache[pos]->ce_flags |= CE_UPDATE_IN_BASE;
+ cache_tree_invalidate_path(&the_index, path);
+ active_cache_changed |= CE_ENTRY_CHANGED;
return 0;
}
return -1;
default:
goto fail;
}
- cache_tree_invalidate_path(active_cache_tree, path);
- active_cache_changed = 1;
+ cache_tree_invalidate_path(&the_index, path);
+ ce->ce_flags |= CE_UPDATE_IN_BASE;
+ active_cache_changed |= CE_ENTRY_CHANGED;
report("chmod %cx '%s'", flip, path);
return;
fail:
unsigned long ul;
char *endp;
+ if (!arg)
+ return -1;
+
errno = 0;
ul = strtoul(arg, &endp, 8);
if (errno || endp == arg || *endp != ',' || (unsigned int) ul != ul)
char set_executable_bit = 0;
struct refresh_params refresh_args = {0, &has_errors};
int lock_error = 0;
+ int split_index = -1;
struct lock_file *lock_file;
struct parse_opt_ctx_t ctx;
int parseopt_state = PARSE_OPT_UNKNOWN;
resolve_undo_clear_callback},
OPT_INTEGER(0, "index-version", &preferred_index_format,
N_("write index in this format")),
+ OPT_BOOL(0, "split-index", &split_index,
+ N_("enable or disable split index")),
OPT_END()
};
INDEX_FORMAT_LB, INDEX_FORMAT_UB);
if (the_index.version != preferred_index_format)
- active_cache_changed = 1;
+ active_cache_changed |= SOMETHING_CHANGED;
the_index.version = preferred_index_format;
}
strbuf_release(&buf);
}
+ if (split_index > 0) {
+ init_split_index(&the_index);
+ the_index.cache_changed |= SPLIT_INDEX_ORDERED;
+ } else if (!split_index && the_index.split_index) {
+ /*
+ * can't discard_split_index(&the_index); because that
+ * will destroy split_index->base->cache[], which may
+ * be shared with the_index.cache[]. So yeah we're
+ * leaking a bit here.
+ */
+ the_index.split_index = NULL;
+ the_index.cache_changed |= SOMETHING_CHANGED;
+ }
+
if (active_cache_changed) {
if (newfd < 0) {
if (refresh_args.flags & REFRESH_QUIET)
exit(128);
unable_to_lock_index_die(get_index_file(), lock_error);
}
- if (write_cache(newfd, active_cache, active_nr) ||
- commit_locked_index(lock_file))
+ if (write_locked_index(&the_index, lock_file, COMMIT_LOCK))
die("Unable to write new index file");
}
#define S_IFGITLINK 0160000
#define S_ISGITLINK(m) (((m) & S_IFMT) == S_IFGITLINK)
+/*
+ * Some mode bits are also used internally for computations.
+ *
+ * They *must* not overlap with any valid modes, and they *must* not be emitted
+ * to outside world - i.e. appear on disk or network. In other words, it's just
+ * temporary fields, which we internally use, but they have to stay in-house.
+ *
+ * ( such approach is valid, as standard S_IF* fits into 16 bits, and in Git
+ * codebase mode is `unsigned int` which is assumed to be at least 32 bits )
+ */
+
+/* used internally in tree-diff */
+#define S_DIFFTREE_IFXMIN_NEQ 0x80000000
+
+
/*
* Intensive research over the course of many years has shown that
* port 9418 is totally unused by anything else. Or
unsigned int ce_mode;
unsigned int ce_flags;
unsigned int ce_namelen;
+ unsigned int index; /* for link extension */
unsigned char sha1[20];
char name[FLEX_ARRAY]; /* more */
};
#define CE_STAGESHIFT 12
/*
- * Range 0xFFFF0000 in ce_flags is divided into
+ * Range 0xFFFF0FFF in ce_flags is divided into
* two parts: in-memory flags and on-disk ones.
* Flags in CE_EXTENDED_FLAGS will get saved on-disk
* if you want to save a new flag, add it in
/* used to temporarily mark paths matched by pathspecs */
#define CE_MATCHED (1 << 26)
+ #define CE_UPDATE_IN_BASE (1 << 27)
+ #define CE_STRIP_NAME (1 << 28)
+
/*
* Extended on-disk flags
*/
#define cache_entry_size(len) (offsetof(struct cache_entry,name) + (len) + 1)
+ #define SOMETHING_CHANGED (1 << 0) /* unclassified changes go here */
+ #define CE_ENTRY_CHANGED (1 << 1)
+ #define CE_ENTRY_REMOVED (1 << 2)
+ #define CE_ENTRY_ADDED (1 << 3)
+ #define RESOLVE_UNDO_CHANGED (1 << 4)
+ #define CACHE_TREE_CHANGED (1 << 5)
+ #define SPLIT_INDEX_ORDERED (1 << 6)
+
+ struct split_index;
struct index_state {
struct cache_entry **cache;
unsigned int version;
unsigned int cache_nr, cache_alloc, cache_changed;
struct string_list *resolve_undo;
struct cache_tree *cache_tree;
+ struct split_index *split_index;
struct cache_time timestamp;
unsigned name_hash_initialized : 1,
initialized : 1;
#define read_cache_preload(pathspec) read_index_preload(&the_index, (pathspec))
#define is_cache_unborn() is_index_unborn(&the_index)
#define read_cache_unmerged() read_index_unmerged(&the_index)
- #define write_cache(newfd, cache, entries) write_index(&the_index, (newfd))
#define discard_cache() discard_index(&the_index)
#define unmerged_cache() unmerged_index(&the_index)
#define cache_name_pos(name, namelen) index_name_pos(&the_index,(name),(namelen))
} while (0)
/* Initialize and use the cache information */
+ struct lock_file;
extern int read_index(struct index_state *);
extern int read_index_preload(struct index_state *, const struct pathspec *pathspec);
+ extern int do_read_index(struct index_state *istate, const char *path,
+ int must_exist); /* for testting only! */
extern int read_index_from(struct index_state *, const char *path);
extern int is_index_unborn(struct index_state *);
extern int read_index_unmerged(struct index_state *);
- extern int write_index(struct index_state *, int newfd);
+ #define COMMIT_LOCK (1 << 0)
+ #define CLOSE_LOCK (1 << 1)
+ extern int write_locked_index(struct index_state *, struct lock_file *lock, unsigned flags);
extern int discard_index(struct index_state *);
extern int unmerged_index(const struct index_state *);
extern int verify_path(const char *path);
#define ADD_CACHE_SKIP_DFCHECK 4 /* Ok to skip DF conflict checks */
#define ADD_CACHE_JUST_APPEND 8 /* Append only; tree.c::read_tree() */
#define ADD_CACHE_NEW_ONLY 16 /* Do not replace existing ones */
+ #define ADD_CACHE_KEEP_CACHE_TREE 32 /* Do not invalidate cache-tree */
extern int add_index_entry(struct index_state *, struct cache_entry *ce, int option);
extern void rename_index_entry_at(struct index_state *, int pos, const char *new_name);
extern int remove_index_entry_at(struct index_state *, int pos);
extern void update_index_if_able(struct index_state *, struct lock_file *);
extern int hold_locked_index(struct lock_file *, int);
- extern int commit_locked_index(struct lock_file *);
extern void set_alternate_index_output(const char *);
extern int close_lock_file(struct lock_file *);
extern void rollback_lock_file(struct lock_file *);
* that is subject to stripspace.
*/
extern char comment_line_char;
+extern int auto_comment_line_char;
enum branch_track {
BRANCH_TRACK_UNSPECIFIED = -1,
int longest_ancestor_length(const char *path, struct string_list *prefixes);
char *strip_path_suffix(const char *path, const char *suffix);
int daemon_avoid_alias(const char *path);
-int offset_1st_component(const char *path);
/* object replacement */
#define LOOKUP_REPLACE_OBJECT 1
extern int base_name_compare(const char *name1, int len1, int mode1, const char *name2, int len2, int mode2);
extern int df_name_compare(const char *name1, int len1, int mode1, const char *name2, int len2, int mode2);
-extern int cache_name_compare(const char *name1, int len1, const char *name2, int len2);
+extern int name_compare(const char *name1, size_t len1, const char *name2, size_t len2);
extern int cache_name_stage_compare(const char *name1, int len1, int stage1, const char *name2, int len2, int stage2);
extern void *read_object_with_reference(const unsigned char *sha1,
*/
extern int split_ident_line(struct ident_split *, const char *, int);
+/*
+ * Like show_date, but pull the timestamp and tz parameters from
+ * the ident_split. It will also sanity-check the values and produce
+ * a well-known sentinel date if they appear bogus.
+ */
+const char *show_ident_date(const struct ident_split *id, enum date_mode mode);
+
/*
* Compare split idents for equality or strict ordering. Note that we
* compare only the ident part of the line, ignoring any timestamp.
extern int ident_cmp(const struct ident_split *, const struct ident_split *);
struct checkout {
+ struct index_state *istate;
const char *base_dir;
int base_dir_len;
unsigned force:1,
extern int checkout_entry(struct cache_entry *ce, const struct checkout *state, char *topath);
struct cache_def {
- char path[PATH_MAX + 1];
- int len;
+ struct strbuf path;
int flags;
int track_flags;
int prefix_len_stat_func;
};
+#define CACHE_DEF_INIT { STRBUF_INIT, 0, 0, 0 }
+static inline void cache_def_free(struct cache_def *cache)
+{
+ strbuf_release(&cache->path);
+}
extern int has_symlink_leading_path(const char *name, int len);
extern int threaded_has_symlink_leading_path(struct cache_def *, const char *, int);
extern int git_env_bool(const char *, int);
extern int git_config_system(void);
extern int config_error_nonbool(const char *);
-#if defined(__GNUC__) && ! defined(__clang__)
-#define config_error_nonbool(s) (config_error_nonbool(s), -1)
+#if defined(__GNUC__)
+#define config_error_nonbool(s) (config_error_nonbool(s), const_error())
#endif
extern const char *get_log_output_encoding(void);
extern const char *get_commit_output_encoding(void);
extern ssize_t read_in_full(int fd, void *buf, size_t count);
extern ssize_t write_in_full(int fd, const void *buf, size_t count);
+extern ssize_t pread_in_full(int fd, void *buf, size_t count, off_t offset);
+
static inline ssize_t write_str_in_full(int fd, const char *str)
{
return write_in_full(fd, str, strlen(str));
static struct commit *make_virtual_commit(struct tree *tree, const char *comment)
{
- struct commit *commit = xcalloc(1, sizeof(struct commit));
+ struct commit *commit = alloc_commit_node();
struct merge_remote_desc *desc = xmalloc(sizeof(*desc));
desc->name = comment;
printf(_("(bad commit)\n"));
else {
const char *title;
- int len = find_commit_subject(commit->buffer, &title);
+ const char *msg = get_commit_buffer(commit, NULL);
+ int len = find_commit_subject(msg, &title);
if (len)
printf("%.*s\n", len, title);
+ unuse_commit_buffer(commit, msg);
}
}
}
active_cache_tree = cache_tree();
if (!cache_tree_fully_valid(active_cache_tree) &&
- cache_tree_update(active_cache_tree,
- (const struct cache_entry * const *)active_cache,
- active_nr, 0) < 0)
+ cache_tree_update(&the_index, 0) < 0)
die(_("error building trees"));
result = lookup_tree(active_cache_tree->sha1);
return -1;
}
if (update_working_directory) {
+ if (ignore_case) {
+ struct cache_entry *ce;
+ ce = cache_file_exists(path, strlen(path), ignore_case);
+ if (ce && ce_stage(ce) == 0)
+ return 0;
+ }
if (remove_path(path))
return -1;
}
return 0;
}
+/* add a string to a strbuf, but converting "/" to "_" */
+static void add_flattened_path(struct strbuf *out, const char *s)
+{
+ size_t i = out->len;
+ strbuf_addstr(out, s);
+ for (; i < out->len; i++)
+ if (out->buf[i] == '/')
+ out->buf[i] = '_';
+}
+
static char *unique_path(struct merge_options *o, const char *path, const char *branch)
{
- char *newpath = xmalloc(strlen(path) + 1 + strlen(branch) + 8 + 1);
+ struct strbuf newpath = STRBUF_INIT;
int suffix = 0;
struct stat st;
- char *p = newpath + strlen(path);
- strcpy(newpath, path);
- *(p++) = '~';
- strcpy(p, branch);
- for (; *p; ++p)
- if ('/' == *p)
- *p = '_';
- while (string_list_has_string(&o->current_file_set, newpath) ||
- string_list_has_string(&o->current_directory_set, newpath) ||
- lstat(newpath, &st) == 0)
- sprintf(p, "_%d", suffix++);
-
- string_list_insert(&o->current_file_set, newpath);
- return newpath;
+ size_t base_len;
+
+ strbuf_addf(&newpath, "%s~", path);
+ add_flattened_path(&newpath, branch);
+
+ base_len = newpath.len;
+ while (string_list_has_string(&o->current_file_set, newpath.buf) ||
+ string_list_has_string(&o->current_directory_set, newpath.buf) ||
+ lstat(newpath.buf, &st) == 0) {
+ strbuf_setlen(&newpath, base_len);
+ strbuf_addf(&newpath, "_%d", suffix++);
+ }
+
+ string_list_insert(&o->current_file_set, newpath.buf);
+ return strbuf_detach(&newpath, NULL);
}
static int dir_in_way(const char *path, int check_working_copy)
char *side2 = NULL;
struct merge_file_info mfi;
- if (filename1) {
- side1 = xmalloc(strlen(branch1) + strlen(filename1) + 2);
- sprintf(side1, "%s:%s", branch1, filename1);
- }
- if (filename2) {
- side2 = xmalloc(strlen(branch2) + strlen(filename2) + 2);
- sprintf(side2, "%s:%s", branch2, filename2);
- }
+ if (filename1)
+ side1 = xstrfmt("%s:%s", branch1, filename1);
+ if (filename2)
+ side2 = xstrfmt("%s:%s", branch2, filename2);
mfi = merge_file_1(o, one, a, b,
side1 ? side1 : branch1, side2 ? side2 : branch2);
const unsigned char **base_list,
struct commit **result)
{
- int clean, index_fd;
+ int clean;
struct lock_file *lock = xcalloc(1, sizeof(struct lock_file));
struct commit *head_commit = get_ref(head, o->branch1);
struct commit *next_commit = get_ref(merge, o->branch2);
}
}
- index_fd = hold_locked_index(lock, 1);
+ hold_locked_index(lock, 1);
clean = merge_recursive(o, head_commit, next_commit, ca,
result);
if (active_cache_changed &&
- (write_cache(index_fd, active_cache, active_nr) ||
- commit_locked_index(lock)))
+ write_locked_index(&the_index, lock, COMMIT_LOCK))
return error(_("Unable to write index."));
return clean ? 0 : 1;
int parse_merge_opt(struct merge_options *o, const char *s)
{
+ const char *arg;
+
if (!s || !*s)
return -1;
if (!strcmp(s, "ours"))
o->recursive_variant = MERGE_RECURSIVE_THEIRS;
else if (!strcmp(s, "subtree"))
o->subtree_shift = "";
- else if (starts_with(s, "subtree="))
- o->subtree_shift = s + strlen("subtree=");
+ else if (skip_prefix(s, "subtree=", &arg))
+ o->subtree_shift = arg;
else if (!strcmp(s, "patience"))
o->xdl_opts = DIFF_WITH_ALG(o, PATIENCE_DIFF);
else if (!strcmp(s, "histogram"))
o->xdl_opts = DIFF_WITH_ALG(o, HISTOGRAM_DIFF);
- else if (starts_with(s, "diff-algorithm=")) {
- long value = parse_algorithm_value(s + strlen("diff-algorithm="));
+ else if (skip_prefix(s, "diff-algorithm=", &arg)) {
+ long value = parse_algorithm_value(arg);
if (value < 0)
return -1;
/* clear out previous settings */
o->renormalize = 1;
else if (!strcmp(s, "no-renormalize"))
o->renormalize = 0;
- else if (starts_with(s, "rename-threshold=")) {
- const char *score = s + strlen("rename-threshold=");
- if ((o->rename_score = parse_rename_score(&score)) == -1 || *score != 0)
+ else if (skip_prefix(s, "rename-threshold=", &arg)) {
+ if ((o->rename_score = parse_rename_score(&arg)) == -1 || *arg != 0)
return -1;
}
else
const char **xopts, struct commit_list *common,
const char *head_arg, struct commit_list *remotes)
{
- const char **args;
- int i = 0, x = 0, ret;
+ struct argv_array args = ARGV_ARRAY_INIT;
+ int i, ret;
struct commit_list *j;
- struct strbuf buf = STRBUF_INIT;
- args = xmalloc((4 + xopts_nr + commit_list_count(common) +
- commit_list_count(remotes)) * sizeof(char *));
- strbuf_addf(&buf, "merge-%s", strategy);
- args[i++] = buf.buf;
- for (x = 0; x < xopts_nr; x++) {
- char *s = xmalloc(strlen(xopts[x])+2+1);
- strcpy(s, "--");
- strcpy(s+2, xopts[x]);
- args[i++] = s;
- }
+ argv_array_pushf(&args, "merge-%s", strategy);
+ for (i = 0; i < xopts_nr; i++)
+ argv_array_pushf(&args, "--%s", xopts[i]);
for (j = common; j; j = j->next)
- args[i++] = xstrdup(merge_argument(j->item));
- args[i++] = "--";
- args[i++] = head_arg;
+ argv_array_push(&args, merge_argument(j->item));
+ argv_array_push(&args, "--");
+ argv_array_push(&args, head_arg);
for (j = remotes; j; j = j->next)
- args[i++] = xstrdup(merge_argument(j->item));
- args[i] = NULL;
- ret = run_command_v_opt(args, RUN_GIT_CMD);
- strbuf_release(&buf);
- i = 1;
- for (x = 0; x < xopts_nr; x++)
- free((void *)args[i++]);
- for (j = common; j; j = j->next)
- free((void *)args[i++]);
- i += 2;
- for (j = remotes; j; j = j->next)
- free((void *)args[i++]);
- free(args);
+ argv_array_push(&args, merge_argument(j->item));
+
+ ret = run_command_v_opt(args.argv, RUN_GIT_CMD);
+ argv_array_clear(&args);
+
discard_cache();
if (read_cache() < 0)
die(_("failed to read the cache"));
struct tree *trees[MAX_UNPACK_TREES];
struct unpack_trees_options opts;
struct tree_desc t[MAX_UNPACK_TREES];
- int i, fd, nr_trees = 0;
+ int i, nr_trees = 0;
struct dir_struct dir;
struct lock_file *lock_file = xcalloc(1, sizeof(struct lock_file));
refresh_cache(REFRESH_QUIET);
- fd = hold_locked_index(lock_file, 1);
+ hold_locked_index(lock_file, 1);
memset(&trees, 0, sizeof(trees));
memset(&opts, 0, sizeof(opts));
}
if (unpack_trees(nr_trees, t, &opts))
return -1;
- if (write_cache(fd, active_cache, active_nr) ||
- commit_locked_index(lock_file))
+ if (write_locked_index(&the_index, lock_file, COMMIT_LOCK))
die(_("unable to write new index file"));
return 0;
}
#include "resolve-undo.h"
#include "strbuf.h"
#include "varint.h"
+ #include "split-index.h"
+ #include "sigchain.h"
static struct cache_entry *refresh_cache_entry(struct cache_entry *ce,
unsigned int options);
#define CACHE_EXT(s) ( (s[0]<<24)|(s[1]<<16)|(s[2]<<8)|(s[3]) )
#define CACHE_EXT_TREE 0x54524545 /* "TREE" */
#define CACHE_EXT_RESOLVE_UNDO 0x52455543 /* "REUC" */
+ #define CACHE_EXT_LINK 0x6c696e6b /* "link" */
+
+ /* changes that can be kept in $GIT_DIR/index (basically all extensions) */
+ #define EXTMASK (RESOLVE_UNDO_CHANGED | CACHE_TREE_CHANGED | \
+ CE_ENTRY_ADDED | CE_ENTRY_REMOVED | CE_ENTRY_CHANGED | \
+ SPLIT_INDEX_ORDERED)
struct index_state the_index;
+ static const char *alternate_index_output;
static void set_index_entry(struct index_state *istate, int nr, struct cache_entry *ce)
{
{
struct cache_entry *old = istate->cache[nr];
+ replace_index_entry_in_base(istate, old, ce);
remove_name_hash(istate, old);
free(old);
set_index_entry(istate, nr, ce);
- istate->cache_changed = 1;
+ ce->ce_flags |= CE_UPDATE_IN_BASE;
+ istate->cache_changed |= CE_ENTRY_CHANGED;
}
void rename_index_entry_at(struct index_state *istate, int nr, const char *new_name)
copy_cache_entry(new, old);
new->ce_flags &= ~CE_HASHED;
new->ce_namelen = namelen;
+ new->index = 0;
memcpy(new->name, new_name, namelen + 1);
- cache_tree_invalidate_path(istate->cache_tree, old->name);
+ cache_tree_invalidate_path(istate, old->name);
remove_index_entry_at(istate, nr);
add_index_entry(istate, new, ADD_CACHE_OK_TO_ADD|ADD_CACHE_OK_TO_REPLACE);
}
return c1 - c2;
}
-int cache_name_stage_compare(const char *name1, int len1, int stage1, const char *name2, int len2, int stage2)
+int name_compare(const char *name1, size_t len1, const char *name2, size_t len2)
{
- int len = len1 < len2 ? len1 : len2;
- int cmp;
-
- cmp = memcmp(name1, name2, len);
+ size_t min_len = (len1 < len2) ? len1 : len2;
+ int cmp = memcmp(name1, name2, min_len);
if (cmp)
return cmp;
if (len1 < len2)
return -1;
if (len1 > len2)
return 1;
+ return 0;
+}
+
+int cache_name_stage_compare(const char *name1, int len1, int stage1, const char *name2, int len2, int stage2)
+{
+ int cmp;
+
+ cmp = name_compare(name1, len1, name2, len2);
+ if (cmp)
+ return cmp;
if (stage1 < stage2)
return -1;
return 0;
}
-int cache_name_compare(const char *name1, int len1, const char *name2, int len2)
-{
- return cache_name_stage_compare(name1, len1, 0, name2, len2, 0);
-}
-
static int index_name_stage_pos(const struct index_state *istate, const char *name, int namelen, int stage)
{
int first, last;
record_resolve_undo(istate, ce);
remove_name_hash(istate, ce);
- free(ce);
- istate->cache_changed = 1;
+ save_or_free_index_entry(istate, ce);
+ istate->cache_changed |= CE_ENTRY_REMOVED;
istate->cache_nr--;
if (pos >= istate->cache_nr)
return 0;
for (i = j = 0; i < istate->cache_nr; i++) {
if (ce_array[i]->ce_flags & CE_REMOVE) {
remove_name_hash(istate, ce_array[i]);
- free(ce_array[i]);
+ save_or_free_index_entry(istate, ce_array[i]);
}
else
ce_array[j++] = ce_array[i];
}
- istate->cache_changed = 1;
+ if (j == istate->cache_nr)
+ return;
+ istate->cache_changed |= CE_ENTRY_REMOVED;
istate->cache_nr = j;
}
int pos = index_name_pos(istate, path, strlen(path));
if (pos < 0)
pos = -pos-1;
- cache_tree_invalidate_path(istate->cache_tree, path);
+ cache_tree_invalidate_path(istate, path);
while (pos < istate->cache_nr && !strcmp(istate->cache[pos]->name, path))
remove_index_entry_at(istate, pos);
return 0;
* So we use the CE_ADDED flag to verify that the alias was an old
* one before we accept it as
*/
- static struct cache_entry *create_alias_ce(struct cache_entry *ce, struct cache_entry *alias)
+ static struct cache_entry *create_alias_ce(struct index_state *istate,
+ struct cache_entry *ce,
+ struct cache_entry *alias)
{
int len;
struct cache_entry *new;
new = xcalloc(1, cache_entry_size(len));
memcpy(new->name, alias->name, len);
copy_cache_entry(new, ce);
- free(ce);
+ save_or_free_index_entry(istate, ce);
return new;
}
set_object_name_for_intent_to_add_entry(ce);
if (ignore_case && alias && different_name(ce, alias))
- ce = create_alias_ce(ce, alias);
+ ce = create_alias_ce(istate, ce, alias);
ce->ce_flags |= CE_ADDED;
/* It was suspected to be racily clean, but it turns out to be Ok */
int skip_df_check = option & ADD_CACHE_SKIP_DFCHECK;
int new_only = option & ADD_CACHE_NEW_ONLY;
- cache_tree_invalidate_path(istate->cache_tree, ce->name);
+ if (!(option & ADD_CACHE_KEEP_CACHE_TREE))
+ cache_tree_invalidate_path(istate, ce->name);
pos = index_name_stage_pos(istate, ce->name, ce_namelen(ce), ce_stage(ce));
/* existing match? Just replace it. */
istate->cache + pos,
(istate->cache_nr - pos - 1) * sizeof(ce));
set_index_entry(istate, pos, ce);
- istate->cache_changed = 1;
+ istate->cache_changed |= CE_ENTRY_ADDED;
return 0;
}
!(ce->ce_flags & CE_VALID))
updated->ce_flags &= ~CE_VALID;
+ /* istate->cache_changed is updated in the caller */
return updated;
}
* means the index is not valid anymore.
*/
ce->ce_flags &= ~CE_VALID;
- istate->cache_changed = 1;
+ ce->ce_flags |= CE_UPDATE_IN_BASE;
+ istate->cache_changed |= CE_ENTRY_CHANGED;
}
if (quiet)
continue;
case CACHE_EXT_RESOLVE_UNDO:
istate->resolve_undo = resolve_undo_read(data, sz);
break;
+ case CACHE_EXT_LINK:
+ if (read_link_extension(istate, data, sz))
+ return -1;
+ break;
default:
if (*ext < 'A' || 'Z' < *ext)
return error("index uses %.4s extension, which we do not understand",
ce->ce_stat_data.sd_size = get_be32(&ondisk->size);
ce->ce_flags = flags & ~CE_NAMEMASK;
ce->ce_namelen = len;
+ ce->index = 0;
hashcpy(ce->sha1, ondisk->sha1);
memcpy(ce->name, name, len);
ce->name[len] = '\0';
}
/* remember to discard_cache() before reading a different cache! */
- int read_index_from(struct index_state *istate, const char *path)
+ int do_read_index(struct index_state *istate, const char *path, int must_exist)
{
int fd, i;
struct stat st;
istate->timestamp.nsec = 0;
fd = open(path, O_RDONLY);
if (fd < 0) {
- if (errno == ENOENT)
+ if (!must_exist && errno == ENOENT)
return 0;
- die_errno("index file open failed");
+ die_errno("%s: index file open failed", path);
}
if (fstat(fd, &st))
if (verify_hdr(hdr, mmap_size) < 0)
goto unmap;
- hashcpy(istate->sha1, (unsigned char *)hdr + mmap_size - 20);
+ hashcpy(istate->sha1, (const unsigned char *)hdr + mmap_size - 20);
istate->version = ntohl(hdr->hdr_version);
istate->cache_nr = ntohl(hdr->hdr_entries);
istate->cache_alloc = alloc_nr(istate->cache_nr);
die("index file corrupt");
}
+ int read_index_from(struct index_state *istate, const char *path)
+ {
+ struct split_index *split_index;
+ int ret;
+
+ /* istate->initialized covers both .git/index and .git/sharedindex.xxx */
+ if (istate->initialized)
+ return istate->cache_nr;
+
+ ret = do_read_index(istate, path, 0);
+ split_index = istate->split_index;
+ if (!split_index)
+ return ret;
+
+ if (is_null_sha1(split_index->base_sha1))
+ return ret;
+
+ if (split_index->base)
+ discard_index(split_index->base);
+ else
+ split_index->base = xcalloc(1, sizeof(*split_index->base));
+ ret = do_read_index(split_index->base,
+ git_path("sharedindex.%s",
+ sha1_to_hex(split_index->base_sha1)), 1);
+ if (hashcmp(split_index->base_sha1, split_index->base->sha1))
+ die("broken index, expect %s in %s, got %s",
+ sha1_to_hex(split_index->base_sha1),
+ git_path("sharedindex.%s",
+ sha1_to_hex(split_index->base_sha1)),
+ sha1_to_hex(split_index->base->sha1));
+ merge_base_index(istate);
+ return ret;
+ }
+
int is_index_unborn(struct index_state *istate)
{
return (!istate->cache_nr && !istate->timestamp.sec);
{
int i;
- for (i = 0; i < istate->cache_nr; i++)
+ for (i = 0; i < istate->cache_nr; i++) {
+ if (istate->cache[i]->index &&
+ istate->split_index &&
+ istate->split_index->base &&
+ istate->cache[i]->index <= istate->split_index->base->cache_nr &&
+ istate->cache[i] == istate->split_index->base->cache[istate->cache[i]->index - 1])
+ continue;
free(istate->cache[i]);
+ }
resolve_undo_clear_index(istate);
istate->cache_nr = 0;
istate->cache_changed = 0;
free(istate->cache);
istate->cache = NULL;
istate->cache_alloc = 0;
+ discard_split_index(istate);
return 0;
}
(ce_write(context, fd, &sz, 4) < 0)) ? -1 : 0;
}
- static int ce_flush(git_SHA_CTX *context, int fd)
+ static int ce_flush(git_SHA_CTX *context, int fd, unsigned char *sha1)
{
unsigned int left = write_buffer_len;
/* Append the SHA1 signature at the end */
git_SHA1_Final(write_buffer + left, context);
+ hashcpy(sha1, write_buffer + left);
left += 20;
return (write_in_full(fd, write_buffer, left) != left) ? -1 : 0;
}
ondisk->size = htonl(ce->ce_stat_data.sd_size);
hashcpy(ondisk->sha1, ce->sha1);
- flags = ce->ce_flags;
+ flags = ce->ce_flags & ~CE_NAMEMASK;
flags |= (ce_namelen(ce) >= CE_NAMEMASK ? CE_NAMEMASK : ce_namelen(ce));
ondisk->flags = htons(flags);
if (ce->ce_flags & CE_EXTENDED) {
{
int size;
struct ondisk_cache_entry *ondisk;
+ int saved_namelen = saved_namelen; /* compiler workaround */
char *name;
int result;
+ if (ce->ce_flags & CE_STRIP_NAME) {
+ saved_namelen = ce_namelen(ce);
+ ce->ce_namelen = 0;
+ }
+
if (!previous_name) {
size = ondisk_ce_size(ce);
ondisk = xcalloc(1, size);
strbuf_splice(previous_name, common, to_remove,
ce->name + common, ce_namelen(ce) - common);
}
+ if (ce->ce_flags & CE_STRIP_NAME) {
+ ce->ce_namelen = saved_namelen;
+ ce->ce_flags &= ~CE_STRIP_NAME;
+ }
result = ce_write(c, fd, ondisk, size);
free(ondisk);
return result;
}
+/*
+ * This function verifies if index_state has the correct sha1 of the
+ * index file. Don't die if we have any other failure, just return 0.
+ */
+static int verify_index_from(const struct index_state *istate, const char *path)
+{
+ int fd;
+ ssize_t n;
+ struct stat st;
+ unsigned char sha1[20];
+
+ if (!istate->initialized)
+ return 0;
+
+ fd = open(path, O_RDONLY);
+ if (fd < 0)
+ return 0;
+
+ if (fstat(fd, &st))
+ goto out;
+
+ if (st.st_size < sizeof(struct cache_header) + 20)
+ goto out;
+
+ n = pread_in_full(fd, sha1, 20, st.st_size - 20);
+ if (n != 20)
+ goto out;
+
+ if (hashcmp(istate->sha1, sha1))
+ goto out;
+
+ close(fd);
+ return 1;
+
+out:
+ close(fd);
+ return 0;
+}
+
+static int verify_index(const struct index_state *istate)
+{
+ return verify_index_from(istate, get_index_file());
+}
+
static int has_racy_timestamp(struct index_state *istate)
{
int entries = istate->cache_nr;
void update_index_if_able(struct index_state *istate, struct lock_file *lockfile)
{
if ((istate->cache_changed || has_racy_timestamp(istate)) &&
- verify_index(istate) && !write_index(istate, lockfile->fd))
- commit_locked_index(lockfile);
- else
++ verify_index(istate) &&
+ write_locked_index(istate, lockfile, COMMIT_LOCK))
rollback_lock_file(lockfile);
}
- int write_index(struct index_state *istate, int newfd)
+ static int do_write_index(struct index_state *istate, int newfd,
+ int strip_extensions)
{
git_SHA_CTX c;
struct cache_header hdr;
}
}
- if (!istate->version)
+ if (!istate->version) {
istate->version = get_index_format_default();
+ if (getenv("GIT_TEST_SPLIT_INDEX"))
+ init_split_index(istate);
+ }
/* demote version 3 to version 2 when the latter suffices */
if (istate->version == 3 || istate->version == 2)
strbuf_release(&previous_name_buf);
/* Write extension data here */
- if (istate->cache_tree) {
+ if (!strip_extensions && istate->split_index) {
+ struct strbuf sb = STRBUF_INIT;
+
+ err = write_link_extension(&sb, istate) < 0 ||
+ write_index_ext_header(&c, newfd, CACHE_EXT_LINK,
+ sb.len) < 0 ||
+ ce_write(&c, newfd, sb.buf, sb.len) < 0;
+ strbuf_release(&sb);
+ if (err)
+ return -1;
+ }
+ if (!strip_extensions && istate->cache_tree) {
struct strbuf sb = STRBUF_INIT;
cache_tree_write(&sb, istate->cache_tree);
if (err)
return -1;
}
- if (istate->resolve_undo) {
+ if (!strip_extensions && istate->resolve_undo) {
struct strbuf sb = STRBUF_INIT;
resolve_undo_write(&sb, istate->resolve_undo);
return -1;
}
- if (ce_flush(&c, newfd) || fstat(newfd, &st))
+ if (ce_flush(&c, newfd, istate->sha1) || fstat(newfd, &st))
return -1;
istate->timestamp.sec = (unsigned int)st.st_mtime;
istate->timestamp.nsec = ST_MTIME_NSEC(st);
return 0;
}
+ void set_alternate_index_output(const char *name)
+ {
+ alternate_index_output = name;
+ }
+
+ static int commit_locked_index(struct lock_file *lk)
+ {
+ if (alternate_index_output) {
+ if (lk->fd >= 0 && close_lock_file(lk))
+ return -1;
+ if (rename(lk->filename, alternate_index_output))
+ return -1;
+ lk->filename[0] = 0;
+ return 0;
+ } else {
+ return commit_lock_file(lk);
+ }
+ }
+
+ static int do_write_locked_index(struct index_state *istate, struct lock_file *lock,
+ unsigned flags)
+ {
+ int ret = do_write_index(istate, lock->fd, 0);
+ if (ret)
+ return ret;
+ assert((flags & (COMMIT_LOCK | CLOSE_LOCK)) !=
+ (COMMIT_LOCK | CLOSE_LOCK));
+ if (flags & COMMIT_LOCK)
+ return commit_locked_index(lock);
+ else if (flags & CLOSE_LOCK)
+ return close_lock_file(lock);
+ else
+ return ret;
+ }
+
+ static int write_split_index(struct index_state *istate,
+ struct lock_file *lock,
+ unsigned flags)
+ {
+ int ret;
+ prepare_to_write_split_index(istate);
+ ret = do_write_locked_index(istate, lock, flags);
+ finish_writing_split_index(istate);
+ return ret;
+ }
+
+ static char *temporary_sharedindex;
+
+ static void remove_temporary_sharedindex(void)
+ {
+ if (temporary_sharedindex) {
+ unlink_or_warn(temporary_sharedindex);
+ free(temporary_sharedindex);
+ temporary_sharedindex = NULL;
+ }
+ }
+
+ static void remove_temporary_sharedindex_on_signal(int signo)
+ {
+ remove_temporary_sharedindex();
+ sigchain_pop(signo);
+ raise(signo);
+ }
+
+ static int write_shared_index(struct index_state *istate,
+ struct lock_file *lock, unsigned flags)
+ {
+ struct split_index *si = istate->split_index;
+ static int installed_handler;
+ int fd, ret;
+
+ temporary_sharedindex = git_pathdup("sharedindex_XXXXXX");
+ fd = mkstemp(temporary_sharedindex);
+ if (fd < 0) {
+ free(temporary_sharedindex);
+ temporary_sharedindex = NULL;
+ hashclr(si->base_sha1);
+ return do_write_locked_index(istate, lock, flags);
+ }
+ if (!installed_handler) {
+ atexit(remove_temporary_sharedindex);
+ sigchain_push_common(remove_temporary_sharedindex_on_signal);
+ }
+ move_cache_to_base_index(istate);
+ ret = do_write_index(si->base, fd, 1);
+ close(fd);
+ if (ret) {
+ remove_temporary_sharedindex();
+ return ret;
+ }
+ ret = rename(temporary_sharedindex,
+ git_path("sharedindex.%s", sha1_to_hex(si->base->sha1)));
+ free(temporary_sharedindex);
+ temporary_sharedindex = NULL;
+ if (!ret)
+ hashcpy(si->base_sha1, si->base->sha1);
+ return ret;
+ }
+
+ int write_locked_index(struct index_state *istate, struct lock_file *lock,
+ unsigned flags)
+ {
+ struct split_index *si = istate->split_index;
+
+ if (!si || alternate_index_output ||
+ (istate->cache_changed & ~EXTMASK)) {
+ if (si)
+ hashclr(si->base_sha1);
+ return do_write_locked_index(istate, lock, flags);
+ }
+
+ if (getenv("GIT_TEST_SPLIT_INDEX")) {
+ int v = si->base_sha1[0];
+ if ((v & 15) < 6)
+ istate->cache_changed |= SPLIT_INDEX_ORDERED;
+ }
+ if (istate->cache_changed & SPLIT_INDEX_ORDERED) {
+ int ret = write_shared_index(istate, lock, flags);
+ if (ret)
+ return ret;
+ }
+
+ return write_split_index(istate, lock, flags);
+ }
+
/*
* Read the index file that is potentially unmerged into given
* index_state, dropping any unmerged entries. Returns true if
return opts->action == REPLAY_REVERT ? "revert" : "cherry-pick";
}
-static char *get_encoding(const char *message);
-
struct commit_message {
char *parent_label;
const char *label;
const char *subject;
- char *reencoded_message;
const char *message;
};
static int get_message(struct commit *commit, struct commit_message *out)
{
- const char *encoding;
const char *abbrev, *subject;
int abbrev_len, subject_len;
char *q;
- if (!commit->buffer)
- return -1;
- encoding = get_encoding(commit->buffer);
- if (!encoding)
- encoding = "UTF-8";
if (!git_commit_encoding)
git_commit_encoding = "UTF-8";
- out->reencoded_message = NULL;
- out->message = commit->buffer;
- if (same_encoding(encoding, git_commit_encoding))
- out->reencoded_message = reencode_string(commit->buffer,
- git_commit_encoding, encoding);
- if (out->reencoded_message)
- out->message = out->reencoded_message;
-
+ out->message = logmsg_reencode(commit, NULL, git_commit_encoding);
abbrev = find_unique_abbrev(commit->object.sha1, DEFAULT_ABBREV);
abbrev_len = strlen(abbrev);
return 0;
}
-static void free_message(struct commit_message *msg)
+static void free_message(struct commit *commit, struct commit_message *msg)
{
free(msg->parent_label);
- free(msg->reencoded_message);
-}
-
-static char *get_encoding(const char *message)
-{
- const char *p = message, *eol;
-
- while (*p && *p != '\n') {
- for (eol = p + 1; *eol && *eol != '\n'; eol++)
- ; /* do nothing */
- if (starts_with(p, "encoding ")) {
- char *result = xmalloc(eol - 8 - p);
- strlcpy(result, p + 9, eol - 8 - p);
- return result;
- }
- p = eol;
- if (*p == '\n')
- p++;
- }
- return NULL;
+ unuse_commit_buffer(commit, msg->message);
}
static void write_cherry_pick_head(struct commit *commit, const char *pseudoref)
read_cache();
if (checkout_fast_forward(from, to, 1))
- exit(1); /* the callee should have complained already */
+ exit(128); /* the callee should have complained already */
ref_lock = lock_any_ref_for_update("HEAD", unborn ? null_sha1 : from,
0, NULL);
+ if (!ref_lock)
+ return error(_("Failed to lock HEAD during fast_forward_to"));
+
strbuf_addf(&sb, "%s: fast-forward", action_name(opts));
ret = write_ref_sha1(ref_lock, to, sb.buf);
+
strbuf_release(&sb);
return ret;
}
{
struct merge_options o;
struct tree *result, *next_tree, *base_tree, *head_tree;
- int clean, index_fd;
+ int clean;
const char **xopt;
static struct lock_file index_lock;
- index_fd = hold_locked_index(&index_lock, 1);
+ hold_locked_index(&index_lock, 1);
read_cache();
next_tree, base_tree, &result);
if (active_cache_changed &&
- (write_cache(index_fd, active_cache, active_nr) ||
- commit_locked_index(&index_lock)))
+ write_locked_index(&the_index, &index_lock, COMMIT_LOCK))
/* TRANSLATORS: %s will be "revert" or "cherry-pick" */
die(_("%s: Unable to write new index file"), action_name(opts));
rollback_lock_file(&index_lock);
active_cache_tree = cache_tree();
if (!cache_tree_fully_valid(active_cache_tree))
- if (cache_tree_update(active_cache_tree,
- (const struct cache_entry * const *)active_cache,
- active_nr, 0))
+ if (cache_tree_update(&the_index, 0))
return error(_("Unable to update cache tree\n"));
return !hashcmp(active_cache_tree->sha1, head_commit->tree->object.sha1);
{
struct argv_array array;
int rc;
- char *gpg_sign;
argv_array_init(&array);
argv_array_push(&array, "commit");
argv_array_push(&array, "-n");
- if (opts->gpg_sign) {
- gpg_sign = xmalloc(3 + strlen(opts->gpg_sign));
- sprintf(gpg_sign, "-S%s", opts->gpg_sign);
- argv_array_push(&array, gpg_sign);
- free(gpg_sign);
- }
+ if (opts->gpg_sign)
+ argv_array_pushf(&array, "-S%s", opts->gpg_sign);
if (opts->signoff)
argv_array_push(&array, "-s");
if (!opts->edit) {
unsigned char head[20];
struct commit *base, *next, *parent;
const char *base_label, *next_label;
- struct commit_message msg = { NULL, NULL, NULL, NULL, NULL };
+ struct commit_message msg = { NULL, NULL, NULL, NULL };
char *defmsg = NULL;
struct strbuf msgbuf = STRBUF_INIT;
int res, unborn = 0, allow;
res = run_git_commit(defmsg, opts, allow);
leave:
- free_message(&msg);
+ free_message(commit, &msg);
free(defmsg);
return res;
if (read_index_preload(&the_index, NULL) < 0)
die(_("git %s: failed to read the index"), action_name(opts));
refresh_index(&the_index, REFRESH_QUIET|REFRESH_UNMERGED, NULL, NULL, NULL);
- if (the_index.cache_changed) {
- if (write_index(&the_index, index_fd) ||
- commit_locked_index(&index_lock))
+ if (the_index.cache_changed && index_fd >= 0) {
+ if (write_locked_index(&the_index, &index_lock, COMMIT_LOCK))
die(_("git %s: failed to refresh the index"), action_name(opts));
}
rollback_lock_file(&index_lock);
int subject_len;
for (cur = todo_list; cur; cur = cur->next) {
+ const char *commit_buffer = get_commit_buffer(cur->item, NULL);
sha1_abbrev = find_unique_abbrev(cur->item->object.sha1, DEFAULT_ABBREV);
- subject_len = find_commit_subject(cur->item->buffer, &subject);
+ subject_len = find_commit_subject(commit_buffer, &subject);
strbuf_addf(buf, "%s %s %.*s\n", action_str, sha1_abbrev,
subject_len, subject);
+ unuse_commit_buffer(cur->item, commit_buffer);
}
return 0;
}
#include "progress.h"
#include "refs.h"
#include "attr.h"
+ #include "split-index.h"
/*
* Error messages expected by scripts out of plumbing commands such as
int i;
const char **msgs = opts->msgs;
const char *msg;
- char *tmp;
const char *cmd2 = strcmp(cmd, "checkout") ? cmd : "switch branches";
+
if (advice_commit_before_merge)
msg = "Your local changes to the following files would be overwritten by %s:\n%%s"
"Please, commit your changes or stash them before you can %s.";
else
msg = "Your local changes to the following files would be overwritten by %s:\n%%s";
- tmp = xmalloc(strlen(msg) + strlen(cmd) + strlen(cmd2) - 2);
- sprintf(tmp, msg, cmd, cmd2);
- msgs[ERROR_WOULD_OVERWRITE] = tmp;
- msgs[ERROR_NOT_UPTODATE_FILE] = tmp;
+ msgs[ERROR_WOULD_OVERWRITE] = msgs[ERROR_NOT_UPTODATE_FILE] =
+ xstrfmt(msg, cmd, cmd2);
msgs[ERROR_NOT_UPTODATE_DIR] =
"Updating the following directories would lose untracked files in it:\n%s";
"Please move or remove them before you can %s.";
else
msg = "The following untracked working tree files would be %s by %s:\n%%s";
- tmp = xmalloc(strlen(msg) + strlen(cmd) + strlen("removed") + strlen(cmd2) - 4);
- sprintf(tmp, msg, "removed", cmd, cmd2);
- msgs[ERROR_WOULD_LOSE_UNTRACKED_REMOVED] = tmp;
- tmp = xmalloc(strlen(msg) + strlen(cmd) + strlen("overwritten") + strlen(cmd2) - 4);
- sprintf(tmp, msg, "overwritten", cmd, cmd2);
- msgs[ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN] = tmp;
+
+ msgs[ERROR_WOULD_LOSE_UNTRACKED_REMOVED] = xstrfmt(msg, "removed", cmd, cmd2);
+ msgs[ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN] = xstrfmt(msg, "overwritten", cmd, cmd2);
/*
* Special case: ERROR_BIND_OVERLAP refers to a pair of paths, we
enum unpack_trees_error_types,
struct unpack_trees_options *o);
- static int apply_sparse_checkout(struct cache_entry *ce, struct unpack_trees_options *o)
+ static int apply_sparse_checkout(struct index_state *istate,
+ struct cache_entry *ce,
+ struct unpack_trees_options *o)
{
int was_skip_worktree = ce_skip_worktree(ce);
ce->ce_flags |= CE_SKIP_WORKTREE;
else
ce->ce_flags &= ~CE_SKIP_WORKTREE;
+ if (was_skip_worktree != ce_skip_worktree(ce)) {
+ ce->ce_flags |= CE_UPDATE_IN_BASE;
+ istate->cache_changed |= CE_ENTRY_CHANGED;
+ }
/*
* if (!was_skip_worktree && !ce_skip_worktree()) {
return -1;
}
-/* NEEDSWORK: give this a better name and share with tree-walk.c */
-static int name_compare(const char *a, int a_len,
- const char *b, int b_len)
-{
- int len = (a_len < b_len) ? a_len : b_len;
- int cmp = memcmp(a, b, len);
- if (cmp)
- return cmp;
- return (a_len - b_len);
-}
-
/*
* The tree traversal is looking at name p. If we have a matching entry,
* return it. If name p is a directory in the index, do not return
state.force = 1;
state.quiet = 1;
state.refresh_cache = 1;
+ state.istate = &o->result;
memset(&el, 0, sizeof(el));
if (!core_apply_sparse_checkout || !o->update)
o->result.timestamp.sec = o->src_index->timestamp.sec;
o->result.timestamp.nsec = o->src_index->timestamp.nsec;
o->result.version = o->src_index->version;
+ o->result.split_index = o->src_index->split_index;
+ if (o->result.split_index)
+ o->result.split_index->refcount++;
+ hashcpy(o->result.sha1, o->src_index->sha1);
o->merge_size = len;
mark_all_ce_unused(o->src_index);
ret = -1;
}
- if (apply_sparse_checkout(ce, o)) {
+ if (apply_sparse_checkout(&o->result, ce, o)) {
if (!o->show_all_errors)
goto return_failed;
ret = -1;
struct unpack_trees_options *o)
{
if (ce)
- cache_tree_invalidate_path(o->src_index->cache_tree, ce->name);
+ cache_tree_invalidate_path(o->src_index, ce->name);
}
/*