Fixes since v1.7.10.2
---------------------
+ * The message file for German translation has been updated a bit.
+
* Running "git checkout" on an unborn branch used to corrupt HEAD.
* When checking out another commit from an already detached state, we
"checkout" phase; when run without any "--quiet" option, it should
give progress to the lengthy operation.
+ * The directory path used in "git diff --no-index", when it recurses
+ down, was broken with a recent update after v1.7.10.1 release.
+
* "log -z --pretty=tformat:..." did not terminate each record with
NUL. The fix is not entirely correct when the output also asks for
--patch and/or --stat, though.
broken and gave undue precedence to configured log.date, causing
"git stash list" to show "stash@{time stamp string}".
+ * "git status --porcelain" ignored "--branch" option by mistake. The
+ output for "git status --branch -z" was also incorrect and did not
+ terminate the record for the current branch name with NUL as asked.
+
+ * When a submodule repository uses alternate object store mechanism,
+ some commands that were started from the superproject did not
+ notice it and failed with "No such object" errors. The subcommands
+ of "git submodule" command that recursed into the submodule in a
+ separate process were OK; only the ones that cheated and peeked
+ directly into the submodule's repository from the primary process
+ were affected.
+
Also contains minor fixes and documentation updates.
Performance and Internal Implementation (please report possible regressions)
+ * Bash completion script (in contrib/) have been cleaned up to make
+ future work on it simpler.
+
* An experimental "version 4" format of the index file has been
introduced to reduce on-disk footprint and I/O overhead.
releases are contained in this release (see release notes to them for
details).
- * "git status --porcelain" ignored "--branch" option by mistake. The
- output for "git status --branch -z" was also incorrect and did not
- terminate the record for the current branch name with NUL as asked.
- (merge d4a6bf1 jk/maint-status-porcelain-z-b later to maint).
+ * The progress indicator for a large "git checkout" was sent to
+ stderr even if it is not a terminal.
+ (merge e9fc64c ap/checkout-no-progress-for-non-tty later to maint).
+
+ * A name taken from mailmap was copied into an internal buffer
+ incorrectly and could overun the buffer if it is too long.
+ (merge c9b4e9e jk/format-person-part-buffer-limit later to maint).
+
+ * A malformed commit object that has a header line chomped in the
+ middle could kill git with a NULL pointer dereference.
+ (merge a9c7a8a jk/pretty-commit-header-incomplete-line later to maint).
+
+ * An author/committer name that is a single character was mishandled
+ as an invalid name by mistake.
+ (merge d9955fd jk/ident-split-fix later to maint).
+
+ * "git grep -e '$pattern'", unlike the case where the patterns are
+ read from a file, did not treat individual lines in the given
+ pattern argument as separate regular expressions as it should.
+ (merge ec83061 rs/maint-grep-F later to maint).
* "git diff --stat" used to fully count a binary file with modified
execution bits whose contents is unmodified, which was not quite
'git config' will search for configuration options:
$GIT_DIR/config::
- Repository specific configuration file. (The filename is
- of course relative to the repository root, not the working
- directory.)
+ Repository specific configuration file.
~/.gitconfig::
User-specific configuration file. Also called "global"
DESCRIPTION
-----------
Look for specified patterns in the tracked files in the work tree, blobs
-registered in the index file, or blobs in given tree objects.
+registered in the index file, or blobs in given tree objects. Patterns
+are lists of one or more search expressions separated by newline
+characters. An empty string as search expression matches all lines.
CONFIGURATION
--git-dir::
Show `$GIT_DIR` if defined. Otherwise show the path to
- the .git directory, relative to the current directory.
+ the .git directory. The path shown, when relative, is
+ relative to the current working directory.
+
If `$GIT_DIR` is not defined and the current directory
is not detected to lie in a git repository or work tree
#!/bin/sh
GVF=GIT-VERSION-FILE
-DEF_VER=v1.7.10.GIT
+DEF_VER=v1.7.11-rc0
LF='
'
static unsigned int ustar_header_chksum(const struct ustar_header *header)
{
- char *p = (char *)header;
+ const char *p = (const char *)header;
unsigned int chksum = 0;
while (p < header->chksum)
chksum += *p++;
chksum += sizeof(header->chksum) * ' ';
p += sizeof(header->chksum);
- while (p < (char *)header + sizeof(struct ustar_header))
+ while (p < (const char *)header + sizeof(struct ustar_header))
chksum += *p++;
return chksum;
}
/* Remotes are only allowed to fetch actual refs */
if (remote) {
char *ref = NULL;
- const char *refname, *colon = NULL;
-
- colon = strchr(name, ':');
- if (colon)
- refname = xstrndup(name, colon - name);
- else
- refname = name;
-
- if (!dwim_ref(refname, strlen(refname), sha1, &ref))
- die("no such ref: %s", refname);
- if (refname != name)
- free((void *)refname);
+ const char *colon = strchr(name, ':');
+ int refnamelen = colon ? colon - name : strlen(name);
+
+ if (!dwim_ref(name, refnamelen, sha1, &ref))
+ die("no such ref: %.*s", refnamelen, name);
free(ref);
}
opts.reset = 1;
opts.merge = 1;
opts.fn = oneway_merge;
- opts.verbose_update = !o->quiet;
+ opts.verbose_update = !o->quiet && isatty(2);
opts.src_index = &the_index;
opts.dst_index = &the_index;
parse_tree(tree);
topts.update = 1;
topts.merge = 1;
topts.gently = opts->merge && old->commit;
- topts.verbose_update = !opts->quiet;
+ topts.verbose_update = !opts->quiet && isatty(2);
topts.fn = twoway_merge;
if (opts->overwrite_ignore) {
topts.dir = xcalloc(1, sizeof(*topts.dir));
int cmd_fetch_pack(int argc, const char **argv, const char *prefix)
{
- int i, ret, nr_heads;
+ int i, ret;
struct ref *ref = NULL;
- char *dest = NULL, **heads;
+ const char *dest = NULL;
+ int alloc_heads = 0, nr_heads = 0;
+ char **heads = NULL;
int fd[2];
char *pack_lockfile = NULL;
char **pack_lockfile_ptr = NULL;
packet_trace_identity("fetch-pack");
- nr_heads = 0;
- heads = NULL;
- for (i = 1; i < argc; i++) {
+ for (i = 1; i < argc && *argv[i] == '-'; i++) {
const char *arg = argv[i];
- if (*arg == '-') {
- if (!prefixcmp(arg, "--upload-pack=")) {
- args.uploadpack = arg + 14;
- continue;
- }
- if (!prefixcmp(arg, "--exec=")) {
- args.uploadpack = arg + 7;
- continue;
- }
- if (!strcmp("--quiet", arg) || !strcmp("-q", arg)) {
- args.quiet = 1;
- continue;
- }
- if (!strcmp("--keep", arg) || !strcmp("-k", arg)) {
- args.lock_pack = args.keep_pack;
- args.keep_pack = 1;
- continue;
- }
- if (!strcmp("--thin", arg)) {
- args.use_thin_pack = 1;
- continue;
- }
- if (!strcmp("--include-tag", arg)) {
- args.include_tag = 1;
- continue;
- }
- if (!strcmp("--all", arg)) {
- args.fetch_all = 1;
- continue;
- }
- if (!strcmp("--stdin", arg)) {
- args.stdin_refs = 1;
- continue;
- }
- if (!strcmp("-v", arg)) {
- args.verbose = 1;
- continue;
- }
- if (!prefixcmp(arg, "--depth=")) {
- args.depth = strtol(arg + 8, NULL, 0);
- continue;
- }
- if (!strcmp("--no-progress", arg)) {
- args.no_progress = 1;
- continue;
- }
- if (!strcmp("--stateless-rpc", arg)) {
- args.stateless_rpc = 1;
- continue;
- }
- if (!strcmp("--lock-pack", arg)) {
- args.lock_pack = 1;
- pack_lockfile_ptr = &pack_lockfile;
- continue;
- }
- usage(fetch_pack_usage);
+ if (!prefixcmp(arg, "--upload-pack=")) {
+ args.uploadpack = arg + 14;
+ continue;
+ }
+ if (!prefixcmp(arg, "--exec=")) {
+ args.uploadpack = arg + 7;
+ continue;
}
- dest = (char *)arg;
- heads = (char **)(argv + i + 1);
- nr_heads = argc - i - 1;
- break;
+ if (!strcmp("--quiet", arg) || !strcmp("-q", arg)) {
+ args.quiet = 1;
+ continue;
+ }
+ if (!strcmp("--keep", arg) || !strcmp("-k", arg)) {
+ args.lock_pack = args.keep_pack;
+ args.keep_pack = 1;
+ continue;
+ }
+ if (!strcmp("--thin", arg)) {
+ args.use_thin_pack = 1;
+ continue;
+ }
+ if (!strcmp("--include-tag", arg)) {
+ args.include_tag = 1;
+ continue;
+ }
+ if (!strcmp("--all", arg)) {
+ args.fetch_all = 1;
+ continue;
+ }
+ if (!strcmp("--stdin", arg)) {
+ args.stdin_refs = 1;
+ continue;
+ }
+ if (!strcmp("-v", arg)) {
+ args.verbose = 1;
+ continue;
+ }
+ if (!prefixcmp(arg, "--depth=")) {
+ args.depth = strtol(arg + 8, NULL, 0);
+ continue;
+ }
+ if (!strcmp("--no-progress", arg)) {
+ args.no_progress = 1;
+ continue;
+ }
+ if (!strcmp("--stateless-rpc", arg)) {
+ args.stateless_rpc = 1;
+ continue;
+ }
+ if (!strcmp("--lock-pack", arg)) {
+ args.lock_pack = 1;
+ pack_lockfile_ptr = &pack_lockfile;
+ continue;
+ }
+ usage(fetch_pack_usage);
}
- if (!dest)
+
+ if (i < argc)
+ dest = argv[i++];
+ else
usage(fetch_pack_usage);
+ /*
+ * Copy refs from cmdline to growable list, then append any
+ * refs from the standard input:
+ */
+ ALLOC_GROW(heads, argc - i, alloc_heads);
+ for (; i < argc; i++)
+ heads[nr_heads++] = xstrdup(argv[i]);
if (args.stdin_refs) {
- /*
- * Copy refs from cmdline to new growable list, then
- * append the refs from the standard input.
- */
- int alloc_heads = nr_heads;
- int size = nr_heads * sizeof(*heads);
- heads = memcpy(xmalloc(size), heads, size);
if (args.stateless_rpc) {
/* in stateless RPC mode we use pkt-line to read
* from stdin, until we get a flush packet
fd[0] = 0;
fd[1] = 1;
} else {
- conn = git_connect(fd, (char *)dest, args.uploadpack,
+ conn = git_connect(fd, dest, args.uploadpack,
args.verbose ? CONNECT_VERBOSE : 0);
}
strbuf_add(tagbuf, tag_body, buf + len - tag_body);
}
strbuf_complete_line(tagbuf);
- strbuf_add_lines(tagbuf, "# ", sig->buf, sig->len);
+ if (sig->len) {
+ strbuf_addch(tagbuf, '\n');
+ strbuf_add_lines(tagbuf, "# ", sig->buf, sig->len);
+ }
}
static void fmt_merge_msg_sigs(struct strbuf *out)
rev.ignore_merges = 1;
rev.limited = 1;
- if (suffixcmp(out->buf, "\n"))
- strbuf_addch(out, '\n');
+ strbuf_complete_line(out);
for (i = 0; i < origins.nr; i++)
shortlog(origins.items[i].string,
if (!patterns)
die_errno(_("cannot open '%s'"), arg);
while (strbuf_getline(&sb, patterns, '\n') == 0) {
- char *s;
- size_t len;
-
/* ignore empty line like grep does */
if (sb.len == 0)
continue;
- s = strbuf_detach(&sb, &len);
- append_grep_pat(grep_opt, s, len, arg, ++lno, GREP_PATTERN);
+ append_grep_pat(grep_opt, sb.buf, sb.len, arg, ++lno,
+ GREP_PATTERN);
}
if (!from_stdin)
fclose(patterns);
}
/* Return 0 if we will bust the pack-size limit */
-static unsigned long write_object(struct sha1file *f,
- struct object_entry *entry,
- off_t write_offset)
+static unsigned long write_no_reuse_object(struct sha1file *f, struct object_entry *entry,
+ unsigned long limit, int usable_delta)
{
- unsigned long size, limit, datalen;
- void *buf;
+ unsigned long size, datalen;
unsigned char header[10], dheader[10];
unsigned hdrlen;
enum object_type type;
+ void *buf;
+
+ if (!usable_delta) {
+ buf = read_sha1_file(entry->idx.sha1, &type, &size);
+ if (!buf)
+ die("unable to read %s", sha1_to_hex(entry->idx.sha1));
+ /*
+ * make sure no cached delta data remains from a
+ * previous attempt before a pack split occurred.
+ */
+ free(entry->delta_data);
+ entry->delta_data = NULL;
+ entry->z_delta_size = 0;
+ } else if (entry->delta_data) {
+ size = entry->delta_size;
+ buf = entry->delta_data;
+ entry->delta_data = NULL;
+ type = (allow_ofs_delta && entry->delta->idx.offset) ?
+ OBJ_OFS_DELTA : OBJ_REF_DELTA;
+ } else {
+ buf = get_delta(entry);
+ size = entry->delta_size;
+ type = (allow_ofs_delta && entry->delta->idx.offset) ?
+ OBJ_OFS_DELTA : OBJ_REF_DELTA;
+ }
+
+ if (entry->z_delta_size)
+ datalen = entry->z_delta_size;
+ else
+ datalen = do_compress(&buf, size);
+
+ /*
+ * The object header is a byte of 'type' followed by zero or
+ * more bytes of length.
+ */
+ hdrlen = encode_in_pack_object_header(type, size, header);
+
+ if (type == OBJ_OFS_DELTA) {
+ /*
+ * Deltas with relative base contain an additional
+ * encoding of the relative offset for the delta
+ * base from this object's position in the pack.
+ */
+ off_t ofs = entry->idx.offset - entry->delta->idx.offset;
+ unsigned pos = sizeof(dheader) - 1;
+ dheader[pos] = ofs & 127;
+ while (ofs >>= 7)
+ dheader[--pos] = 128 | (--ofs & 127);
+ if (limit && hdrlen + sizeof(dheader) - pos + datalen + 20 >= limit) {
+ free(buf);
+ return 0;
+ }
+ sha1write(f, header, hdrlen);
+ sha1write(f, dheader + pos, sizeof(dheader) - pos);
+ hdrlen += sizeof(dheader) - pos;
+ } else if (type == OBJ_REF_DELTA) {
+ /*
+ * Deltas with a base reference contain
+ * an additional 20 bytes for the base sha1.
+ */
+ if (limit && hdrlen + 20 + datalen + 20 >= limit) {
+ free(buf);
+ return 0;
+ }
+ sha1write(f, header, hdrlen);
+ sha1write(f, entry->delta->idx.sha1, 20);
+ hdrlen += 20;
+ } else {
+ if (limit && hdrlen + datalen + 20 >= limit) {
+ free(buf);
+ return 0;
+ }
+ sha1write(f, header, hdrlen);
+ }
+ sha1write(f, buf, datalen);
+ free(buf);
+
+ return hdrlen + datalen;
+}
+
+/* Return 0 if we will bust the pack-size limit */
+static unsigned long write_reuse_object(struct sha1file *f, struct object_entry *entry,
+ unsigned long limit, int usable_delta)
+{
+ struct packed_git *p = entry->in_pack;
+ struct pack_window *w_curs = NULL;
+ struct revindex_entry *revidx;
+ off_t offset;
+ enum object_type type = entry->type;
+ unsigned long datalen;
+ unsigned char header[10], dheader[10];
+ unsigned hdrlen;
+
+ if (entry->delta)
+ type = (allow_ofs_delta && entry->delta->idx.offset) ?
+ OBJ_OFS_DELTA : OBJ_REF_DELTA;
+ hdrlen = encode_in_pack_object_header(type, entry->size, header);
+
+ offset = entry->in_pack_offset;
+ revidx = find_pack_revindex(p, offset);
+ datalen = revidx[1].offset - offset;
+ if (!pack_to_stdout && p->index_version > 1 &&
+ check_pack_crc(p, &w_curs, offset, datalen, revidx->nr)) {
+ error("bad packed object CRC for %s", sha1_to_hex(entry->idx.sha1));
+ unuse_pack(&w_curs);
+ return write_no_reuse_object(f, entry, limit, usable_delta);
+ }
+
+ offset += entry->in_pack_header_size;
+ datalen -= entry->in_pack_header_size;
+
+ if (!pack_to_stdout && p->index_version == 1 &&
+ check_pack_inflate(p, &w_curs, offset, datalen, entry->size)) {
+ error("corrupt packed object for %s", sha1_to_hex(entry->idx.sha1));
+ unuse_pack(&w_curs);
+ return write_no_reuse_object(f, entry, limit, usable_delta);
+ }
+
+ if (type == OBJ_OFS_DELTA) {
+ off_t ofs = entry->idx.offset - entry->delta->idx.offset;
+ unsigned pos = sizeof(dheader) - 1;
+ dheader[pos] = ofs & 127;
+ while (ofs >>= 7)
+ dheader[--pos] = 128 | (--ofs & 127);
+ if (limit && hdrlen + sizeof(dheader) - pos + datalen + 20 >= limit) {
+ unuse_pack(&w_curs);
+ return 0;
+ }
+ sha1write(f, header, hdrlen);
+ sha1write(f, dheader + pos, sizeof(dheader) - pos);
+ hdrlen += sizeof(dheader) - pos;
+ reused_delta++;
+ } else if (type == OBJ_REF_DELTA) {
+ if (limit && hdrlen + 20 + datalen + 20 >= limit) {
+ unuse_pack(&w_curs);
+ return 0;
+ }
+ sha1write(f, header, hdrlen);
+ sha1write(f, entry->delta->idx.sha1, 20);
+ hdrlen += 20;
+ reused_delta++;
+ } else {
+ if (limit && hdrlen + datalen + 20 >= limit) {
+ unuse_pack(&w_curs);
+ return 0;
+ }
+ sha1write(f, header, hdrlen);
+ }
+ copy_pack_data(f, p, &w_curs, offset, datalen);
+ unuse_pack(&w_curs);
+ reused++;
+ return hdrlen + datalen;
+}
+
+/* Return 0 if we will bust the pack-size limit */
+static unsigned long write_object(struct sha1file *f,
+ struct object_entry *entry,
+ off_t write_offset)
+{
+ unsigned long limit, len;
int usable_delta, to_reuse;
if (!pack_to_stdout)
crc32_begin(f);
- type = entry->type;
-
/* apply size limit if limited packsize and not first object */
if (!pack_size_limit || !nr_written)
limit = 0;
to_reuse = 0; /* explicit */
else if (!entry->in_pack)
to_reuse = 0; /* can't reuse what we don't have */
- else if (type == OBJ_REF_DELTA || type == OBJ_OFS_DELTA)
+ else if (entry->type == OBJ_REF_DELTA || entry->type == OBJ_OFS_DELTA)
/* check_object() decided it for us ... */
to_reuse = usable_delta;
/* ... but pack split may override that */
- else if (type != entry->in_pack_type)
+ else if (entry->type != entry->in_pack_type)
to_reuse = 0; /* pack has delta which is unusable */
else if (entry->delta)
to_reuse = 0; /* we want to pack afresh */
* and we do not need to deltify it.
*/
- if (!to_reuse) {
- no_reuse:
- if (!usable_delta) {
- buf = read_sha1_file(entry->idx.sha1, &type, &size);
- if (!buf)
- die("unable to read %s", sha1_to_hex(entry->idx.sha1));
- /*
- * make sure no cached delta data remains from a
- * previous attempt before a pack split occurred.
- */
- free(entry->delta_data);
- entry->delta_data = NULL;
- entry->z_delta_size = 0;
- } else if (entry->delta_data) {
- size = entry->delta_size;
- buf = entry->delta_data;
- entry->delta_data = NULL;
- type = (allow_ofs_delta && entry->delta->idx.offset) ?
- OBJ_OFS_DELTA : OBJ_REF_DELTA;
- } else {
- buf = get_delta(entry);
- size = entry->delta_size;
- type = (allow_ofs_delta && entry->delta->idx.offset) ?
- OBJ_OFS_DELTA : OBJ_REF_DELTA;
- }
-
- if (entry->z_delta_size)
- datalen = entry->z_delta_size;
- else
- datalen = do_compress(&buf, size);
-
- /*
- * The object header is a byte of 'type' followed by zero or
- * more bytes of length.
- */
- hdrlen = encode_in_pack_object_header(type, size, header);
-
- if (type == OBJ_OFS_DELTA) {
- /*
- * Deltas with relative base contain an additional
- * encoding of the relative offset for the delta
- * base from this object's position in the pack.
- */
- off_t ofs = entry->idx.offset - entry->delta->idx.offset;
- unsigned pos = sizeof(dheader) - 1;
- dheader[pos] = ofs & 127;
- while (ofs >>= 7)
- dheader[--pos] = 128 | (--ofs & 127);
- if (limit && hdrlen + sizeof(dheader) - pos + datalen + 20 >= limit) {
- free(buf);
- return 0;
- }
- sha1write(f, header, hdrlen);
- sha1write(f, dheader + pos, sizeof(dheader) - pos);
- hdrlen += sizeof(dheader) - pos;
- } else if (type == OBJ_REF_DELTA) {
- /*
- * Deltas with a base reference contain
- * an additional 20 bytes for the base sha1.
- */
- if (limit && hdrlen + 20 + datalen + 20 >= limit) {
- free(buf);
- return 0;
- }
- sha1write(f, header, hdrlen);
- sha1write(f, entry->delta->idx.sha1, 20);
- hdrlen += 20;
- } else {
- if (limit && hdrlen + datalen + 20 >= limit) {
- free(buf);
- return 0;
- }
- sha1write(f, header, hdrlen);
- }
- sha1write(f, buf, datalen);
- free(buf);
- }
- else {
- struct packed_git *p = entry->in_pack;
- struct pack_window *w_curs = NULL;
- struct revindex_entry *revidx;
- off_t offset;
-
- if (entry->delta)
- type = (allow_ofs_delta && entry->delta->idx.offset) ?
- OBJ_OFS_DELTA : OBJ_REF_DELTA;
- hdrlen = encode_in_pack_object_header(type, entry->size, header);
-
- offset = entry->in_pack_offset;
- revidx = find_pack_revindex(p, offset);
- datalen = revidx[1].offset - offset;
- if (!pack_to_stdout && p->index_version > 1 &&
- check_pack_crc(p, &w_curs, offset, datalen, revidx->nr)) {
- error("bad packed object CRC for %s", sha1_to_hex(entry->idx.sha1));
- unuse_pack(&w_curs);
- goto no_reuse;
- }
-
- offset += entry->in_pack_header_size;
- datalen -= entry->in_pack_header_size;
- if (!pack_to_stdout && p->index_version == 1 &&
- check_pack_inflate(p, &w_curs, offset, datalen, entry->size)) {
- error("corrupt packed object for %s", sha1_to_hex(entry->idx.sha1));
- unuse_pack(&w_curs);
- goto no_reuse;
- }
+ if (!to_reuse)
+ len = write_no_reuse_object(f, entry, limit, usable_delta);
+ else
+ len = write_reuse_object(f, entry, limit, usable_delta);
+ if (!len)
+ return 0;
- if (type == OBJ_OFS_DELTA) {
- off_t ofs = entry->idx.offset - entry->delta->idx.offset;
- unsigned pos = sizeof(dheader) - 1;
- dheader[pos] = ofs & 127;
- while (ofs >>= 7)
- dheader[--pos] = 128 | (--ofs & 127);
- if (limit && hdrlen + sizeof(dheader) - pos + datalen + 20 >= limit) {
- unuse_pack(&w_curs);
- return 0;
- }
- sha1write(f, header, hdrlen);
- sha1write(f, dheader + pos, sizeof(dheader) - pos);
- hdrlen += sizeof(dheader) - pos;
- reused_delta++;
- } else if (type == OBJ_REF_DELTA) {
- if (limit && hdrlen + 20 + datalen + 20 >= limit) {
- unuse_pack(&w_curs);
- return 0;
- }
- sha1write(f, header, hdrlen);
- sha1write(f, entry->delta->idx.sha1, 20);
- hdrlen += 20;
- reused_delta++;
- } else {
- if (limit && hdrlen + datalen + 20 >= limit) {
- unuse_pack(&w_curs);
- return 0;
- }
- sha1write(f, header, hdrlen);
- }
- copy_pack_data(f, p, &w_curs, offset, datalen);
- unuse_pack(&w_curs);
- reused++;
- }
if (usable_delta)
written_delta++;
written++;
if (!pack_to_stdout)
entry->idx.crc32 = crc32_end(f);
- return hdrlen + datalen;
+ return len;
}
enum write_one_status {
for (i = 0; i < nr_objects; i++) {
struct object_entry *entry = sorted_by_offset[i];
check_object(entry);
- if (big_file_threshold <= entry->size)
+ if (big_file_threshold < entry->size)
entry->no_try_delta = 1;
}
char base[FLEX_ARRAY]; /* more */
} *alt_odb_list;
extern void prepare_alt_odb(void);
+extern void read_info_alternates(const char * relative_base, int depth);
extern void add_to_alternates_file(const char *reference);
typedef int alt_odb_fn(struct alternate_object_database *, void *);
extern void foreach_alt_odb(alt_odb_fn, void*);
_git_log
}
-_git ()
+_main_git ()
{
local i c=1 command __git_dir
fi
}
-_gitk ()
+_main_gitk ()
{
__git_has_doubledash && return
|| complete -o default -o nospace -F $wrapper $1
}
-__git_complete git _git
-__git_complete gitk _gitk
+# wrapper for backwards compatibility
+_git ()
+{
+ __git_wrap_main_git
+}
+
+# wrapper for backwards compatibility
+_gitk ()
+{
+ __git_wrap_main_gitk
+}
+
+__git_complete git _main_git
+__git_complete gitk _main_gitk
# The following are necessary only for Cygwin, and only are needed
# when the user has tab-completed the executable name and consequently
# included the '.exe' suffix.
#
if [ Cygwin = "$(uname -o 2>/dev/null)" ]; then
-__git_complete git.exe _git
+__git_complete git.exe _main_git
fi
CC = gcc
RM = rm -f
-CFLAGS = -g -Wall
+CFLAGS = -g -O2 -Wall
+
+-include ../../../config.mak.autogen
+-include ../../../config.mak
git-credential-osxkeychain: git-credential-osxkeychain.o
- $(CC) -o $@ $< -Wl,-framework -Wl,Security
+ $(CC) $(CFLAGS) -o $@ $< $(LDFLAGS) -Wl,-framework -Wl,Security
git-credential-osxkeychain.o: git-credential-osxkeychain.c
$(CC) -c $(CFLAGS) $<
struct string_list p1 = STRING_LIST_INIT_DUP;
struct string_list p2 = STRING_LIST_INIT_DUP;
int i1, i2, ret = 0;
+ size_t len1 = 0, len2 = 0;
if (name1 && read_directory(name1, &p1))
return -1;
strbuf_addstr(&buffer1, name1);
if (buffer1.len && buffer1.buf[buffer1.len - 1] != '/')
strbuf_addch(&buffer1, '/');
+ len1 = buffer1.len;
}
if (name2) {
strbuf_addstr(&buffer2, name2);
if (buffer2.len && buffer2.buf[buffer2.len - 1] != '/')
strbuf_addch(&buffer2, '/');
+ len2 = buffer2.len;
}
for (i1 = i2 = 0; !ret && (i1 < p1.nr || i2 < p2.nr); ) {
const char *n1, *n2;
int comp;
+ strbuf_setlen(&buffer1, len1);
+ strbuf_setlen(&buffer2, len2);
+
if (i1 == p1.nr)
comp = 1;
else if (i2 == p2.nr)
}
string_list_clear(&p1, 0);
string_list_clear(&p2, 0);
- strbuf_reset(&buffer1);
- strbuf_reset(&buffer2);
+ strbuf_release(&buffer1);
+ strbuf_release(&buffer2);
return ret;
} else {
int fill_directory(struct dir_struct *dir, const char **pathspec)
{
- const char *path;
size_t len;
/*
* use that to optimize the directory walk
*/
len = common_prefix_len(pathspec);
- path = "";
-
- if (len)
- path = xmemdupz(*pathspec, len);
/* Read the directory and prune it */
- read_directory(dir, path, len, pathspec);
- if (*path)
- free((char *)path);
+ read_directory(dir, pathspec ? *pathspec : "", len, pathspec);
return len;
}
int check_only,
const struct path_simplify *simplify)
{
- DIR *fdir = opendir(*base ? base : ".");
+ DIR *fdir;
int contents = 0;
struct dirent *de;
struct strbuf path = STRBUF_INIT;
- if (!fdir)
- return 0;
-
strbuf_add(&path, base, baselen);
+ fdir = opendir(path.len ? path.buf : ".");
+ if (!fdir)
+ goto out;
+
while ((de = readdir(fdir)) != NULL) {
switch (treat_path(dir, de, &path, baselen, simplify)) {
case path_recurse:
}
contents++;
if (check_only)
- goto exit_early;
- else
- dir_add_name(dir, path.buf, path.len);
+ break;
+ dir_add_name(dir, path.buf, path.len);
}
-exit_early:
closedir(fdir);
+ out:
strbuf_release(&path);
return contents;
unset $(git rev-parse --local-env-vars)
}
-# Make sure we are in a valid repository of a vintage we understand,
-# if we require to be in a git repository.
-if test -z "$NONGIT_OK"
-then
- GIT_DIR=$(git rev-parse --git-dir) || exit
- if [ -z "$SUBDIRECTORY_OK" ]
- then
- test -z "$(git rev-parse --show-cdup)" || {
- exit=$?
- echo >&2 "You need to run this command from the toplevel of the working tree."
- exit $exit
- }
- fi
- test -n "$GIT_DIR" && GIT_DIR=$(cd "$GIT_DIR" && pwd) || {
- echo >&2 "Unable to determine absolute path of git directory"
- exit 1
- }
- : ${GIT_OBJECT_DIRECTORY="$GIT_DIR/objects"}
-fi
-# Fix some commands on Windows
+# Platform specific tweaks to work around some commands
case $(uname -s) in
*MINGW*)
# Windows has its own (incompatible) sort and find
return 1
}
esac
+
+# Make sure we are in a valid repository of a vintage we understand,
+# if we require to be in a git repository.
+if test -z "$NONGIT_OK"
+then
+ GIT_DIR=$(git rev-parse --git-dir) || exit
+ if [ -z "$SUBDIRECTORY_OK" ]
+ then
+ test -z "$(git rev-parse --show-cdup)" || {
+ exit=$?
+ echo >&2 "You need to run this command from the toplevel of the working tree."
+ exit $exit
+ }
+ fi
+ test -n "$GIT_DIR" && GIT_DIR=$(cd "$GIT_DIR" && pwd) || {
+ echo >&2 "Unable to determine absolute path of git directory"
+ exit 1
+ }
+ : ${GIT_OBJECT_DIRECTORY="$GIT_DIR/objects"}
+fi
#include "userdiff.h"
#include "xdiff-interface.h"
-void append_header_grep_pattern(struct grep_opt *opt, enum grep_header_field field, const char *pat)
+static struct grep_pat *create_grep_pat(const char *pat, size_t patlen,
+ const char *origin, int no,
+ enum grep_pat_token t,
+ enum grep_header_field field)
{
struct grep_pat *p = xcalloc(1, sizeof(*p));
- p->pattern = pat;
- p->patternlen = strlen(pat);
- p->origin = "header";
- p->no = 0;
- p->token = GREP_PATTERN_HEAD;
+ p->pattern = xmemdupz(pat, patlen);
+ p->patternlen = patlen;
+ p->origin = origin;
+ p->no = no;
+ p->token = t;
p->field = field;
- *opt->header_tail = p;
- opt->header_tail = &p->next;
+ return p;
+}
+
+static void do_append_grep_pat(struct grep_pat ***tail, struct grep_pat *p)
+{
+ **tail = p;
+ *tail = &p->next;
p->next = NULL;
+
+ switch (p->token) {
+ case GREP_PATTERN: /* atom */
+ case GREP_PATTERN_HEAD:
+ case GREP_PATTERN_BODY:
+ for (;;) {
+ struct grep_pat *new_pat;
+ size_t len = 0;
+ char *cp = p->pattern + p->patternlen, *nl = NULL;
+ while (++len <= p->patternlen) {
+ if (*(--cp) == '\n') {
+ nl = cp;
+ break;
+ }
+ }
+ if (!nl)
+ break;
+ new_pat = create_grep_pat(nl + 1, len - 1, p->origin,
+ p->no, p->token, p->field);
+ new_pat->next = p->next;
+ if (!p->next)
+ *tail = &new_pat->next;
+ p->next = new_pat;
+ *nl = '\0';
+ p->patternlen -= len;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+void append_header_grep_pattern(struct grep_opt *opt,
+ enum grep_header_field field, const char *pat)
+{
+ struct grep_pat *p = create_grep_pat(pat, strlen(pat), "header", 0,
+ GREP_PATTERN_HEAD, field);
+ do_append_grep_pat(&opt->header_tail, p);
}
void append_grep_pattern(struct grep_opt *opt, const char *pat,
void append_grep_pat(struct grep_opt *opt, const char *pat, size_t patlen,
const char *origin, int no, enum grep_pat_token t)
{
- struct grep_pat *p = xcalloc(1, sizeof(*p));
- p->pattern = pat;
- p->patternlen = patlen;
- p->origin = origin;
- p->no = no;
- p->token = t;
- *opt->pattern_tail = p;
- opt->pattern_tail = &p->next;
- p->next = NULL;
+ struct grep_pat *p = create_grep_pat(pat, patlen, origin, no, t, 0);
+ do_append_grep_pat(&opt->pattern_tail, p);
}
struct grep_opt *grep_opt_dup(const struct grep_opt *opt)
free_pcre_regexp(p);
else
regfree(&p->regexp);
+ free(p->pattern);
break;
default:
break;
const char *origin;
int no;
enum grep_pat_token token;
- const char *pattern;
+ char *pattern;
size_t patternlen;
enum grep_header_field field;
regex_t regexp;
if (!split->mail_begin)
return status;
- for (cp = split->mail_begin - 2; line < cp; cp--)
+ for (cp = split->mail_begin - 2; line <= cp; cp--)
if (!isspace(*cp)) {
split->name_end = cp + 1;
break;
int key_len = strlen(key);
const char *line = commit->buffer;
- for (;;) {
+ while (line) {
const char *eol = strchr(line, '\n'), *next;
if (line == eol)
return NULL;
if (!eol) {
+ warning("malformed commit (header is missing newline): %s",
+ sha1_to_hex(commit->object.sha1));
eol = line + strlen(line);
next = NULL;
} else
}
line = next;
}
+ return NULL;
}
static char *replace_encoding_header(char *buf, const char *encoding)
mail_end = s.mail_end;
if (part == 'N' || part == 'E') { /* mailmap lookup */
- strlcpy(person_name, name_start, name_end - name_start + 1);
- strlcpy(person_mail, mail_start, mail_end - mail_start + 1);
+ snprintf(person_name, sizeof(person_name), "%.*s",
+ (int)(name_end - name_start), name_start);
+ snprintf(person_mail, sizeof(person_mail), "%.*s",
+ (int)(mail_end - mail_start), mail_start);
mailmap_name(person_mail, sizeof(person_mail), person_name, sizeof(person_name));
name_start = person_name;
name_end = name_start + strlen(person_name);
static void free_ref_entry(struct ref_entry *entry)
{
- if (entry->flag & REF_DIR)
- clear_ref_dir(get_ref_dir(entry));
+ if (entry->flag & REF_DIR) {
+ /*
+ * Do not use get_ref_dir() here, as that might
+ * trigger the reading of loose refs.
+ */
+ clear_ref_dir(&entry->u.subdir);
+ }
free(entry);
}
{
ALLOC_GROW(dir->entries, dir->nr + 1, dir->alloc);
dir->entries[dir->nr++] = entry;
+ /* optimize for the case that entries are added in order */
+ if (dir->nr == 1 ||
+ (dir->nr == dir->sorted + 1 &&
+ strcmp(dir->entries[dir->nr - 2]->name,
+ dir->entries[dir->nr - 1]->name) < 0))
+ dir->sorted = dir->nr;
}
/*
struct alternate_object_database *alt_odb_list;
static struct alternate_object_database **alt_odb_tail;
-static void read_info_alternates(const char * alternates, int depth);
static int git_open_noatime(const char *name);
/*
}
}
-static void read_info_alternates(const char * relative_base, int depth)
+void read_info_alternates(const char * relative_base, int depth)
{
char *map;
size_t mapsz;
case OI_LOOSE:
return loose;
case OI_PACKED:
- if (!oi->u.packed.is_delta && big_file_threshold <= size)
+ if (!oi->u.packed.is_delta && big_file_threshold < size)
return pack_non_delta;
/* fallthru */
default:
alt_odb->name[40] = '\0';
alt_odb->name[41] = '\0';
alt_odb_list = alt_odb;
+
+ /* add possible alternates from the submodule */
+ read_info_alternates(objects_directory.buf, 0);
prepare_alt_odb();
done:
strbuf_release(&objects_directory);
prove: pre-clean $(TEST_LINT)
@echo "*** prove ***"; GIT_CONFIG=.git/config $(PROVE) --exec '$(SHELL_PATH_SQ)' $(GIT_PROVE_OPTS) $(T) :: $(GIT_TEST_OPTS)
- $(MAKE) clean
+ $(MAKE) clean-except-prove-cache
$(T):
@echo "*** $@ ***"; GIT_CONFIG=.git/config '$(SHELL_PATH_SQ)' $@ $(GIT_TEST_OPTS)
pre-clean:
$(RM) -r test-results
-clean:
+clean-except-prove-cache:
$(RM) -r 'trash directory'.* test-results
$(RM) -r valgrind/bin
+
+clean: clean-except-prove-cache
$(RM) .prove
test-lint: test-lint-duplicates test-lint-executable
# And rebase G1..M1 onto E2
test_expect_success 'rebase two levels of merge' '
+ git checkout A1 &&
test_commit G1 &&
test_commit H1 &&
test_commit I1 &&
test_cmp expected actual
'
+test_expect_success 'diff --submodule with objects referenced by alternates' '
+ mkdir sub_alt &&
+ (cd sub_alt &&
+ git init &&
+ echo a >a &&
+ git add a &&
+ git commit -m a
+ ) &&
+ mkdir super &&
+ (cd super &&
+ git clone -s ../sub_alt sub &&
+ git init &&
+ git add sub &&
+ git commit -m "sub a"
+ ) &&
+ (cd sub_alt &&
+ sha1_before=$(git rev-parse --short HEAD)
+ echo b >b &&
+ git add b &&
+ git commit -m b
+ sha1_after=$(git rev-parse --short HEAD)
+ echo "Submodule sub $sha1_before..$sha1_after:
+ > b" >../expected
+ ) &&
+ (cd super &&
+ (cd sub &&
+ git fetch &&
+ git checkout origin/master
+ ) &&
+ git diff --submodule > ../actual
+ )
+ test_cmp expected actual
+'
+
test_done
--- /dev/null
+#!/bin/sh
+
+test_description='diff --no-index'
+
+. ./test-lib.sh
+
+test_expect_success 'setup' '
+ mkdir a &&
+ mkdir b &&
+ echo 1 >a/1 &&
+ echo 2 >a/2
+'
+
+test_expect_success 'git diff --no-index directories' '
+ git diff --no-index a b >cnt
+ test $? = 1 && test_line_count = 14 cnt
+'
+
+test_done
test_line_count = 5 testg.txt
'
+test_expect_success 'single-character name is parsed correctly' '
+ git commit --author="a <a@example.com>" --allow-empty -m foo &&
+ echo "a <a@example.com>" >expect &&
+ git log -1 --format="%an <%ae>" >actual &&
+ test_cmp expect actual
+'
+
test_done
test_cmp expected actual
'
+test_expect_success 'grep, multiple patterns' '
+ git grep "$(cat patterns)" >actual &&
+ test_cmp expected actual
+'
+
cat >expected <<EOF
file:foo mmap bar
file:foo_mmap bar
local _cword
_words=( $1 )
(( _cword = ${#_words[@]} - 1 ))
- __git_wrap_git && print_comp
+ __git_wrap_main_git && print_comp
}
test_completion ()
#ifdef XDL_FAST_HASH
-#define ONEBYTES 0x0101010101010101ul
-#define NEWLINEBYTES 0x0a0a0a0a0a0a0a0aul
-#define HIGHBITS 0x8080808080808080ul
+#define REPEAT_BYTE(x) ((~0ul / 0xff) * (x))
+
+#define ONEBYTES REPEAT_BYTE(0x01)
+#define NEWLINEBYTES REPEAT_BYTE(0x0a)
+#define HIGHBITS REPEAT_BYTE(0x80)
/* Return the high bit set in the first byte that is a zero */
static inline unsigned long has_zero(unsigned long a)
* that works for the bytemasks without having to
* mask them first.
*/
- return mask * 0x0001020304050608 >> 56;
- } else {
/*
- * Modified Carl Chatfield G+ version for 32-bit *
+ * return mask * 0x0001020304050608 >> 56;
*
- * (a) gives us
- * -1 (0, ff), 0 (ffff) or 1 (ffffff)
- * (b) gives us
- * 0 for 0, 1 for (ff ffff ffffff)
- * (a+b+1) gives us
- * correct 0-3 bytemask count result
+ * Doing it like this avoids warnings on 32-bit machines.
*/
- long a = (mask - 256) >> 23;
- long b = mask & 1;
- return a + b + 1;
+ long a = (REPEAT_BYTE(0x01) / 0xff + 1);
+ return mask * a >> (sizeof(long) * 7);
+ } else {
+ /* Carl Chatfield / Jan Achrenius G+ version for 32-bit */
+ /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */
+ long a = (0x0ff0001 + mask) >> 23;
+ /* Fix the 1 for 00 case */
+ return a & mask;
}
}