user-manual.xml: user-manual.txt user-manual.conf
$(QUIET_ASCIIDOC)$(RM) $@+ $@ && \
- $(ASCIIDOC) $(ASCIIDOC_EXTRA) -b docbook -d book -o $@+ $< && \
+ $(ASCIIDOC) $(ASCIIDOC_EXTRA) -b docbook -d article -o $@+ $< && \
mv $@+ $@
technical/api-index.txt: technical/api-index-skel.txt \
diff.noprefix::
If set, 'git diff' does not show any source or destination prefix.
+diff.orderfile::
+ File indicating how to order files within a diff, using
+ one shell glob pattern per line.
+ Can be overridden by the '-O' option to linkgit:git-diff[1].
+
diff.renameLimit::
The number of files to consider when performing the copy/rename
detection; equivalent to the 'git diff' option '-l'.
-O<orderfile>::
Output the patch in the order specified in the
<orderfile>, which has one shell glob pattern per line.
+ This overrides the `diff.orderfile` configuration variable
+ (see linkgit:git-config[1]). To cancel `diff.orderfile`,
+ use `-O/dev/null`.
ifndef::git-format-patch[]
-R::
The size, in bytes, that the object takes up on disk. See the
note about on-disk sizes in the `CAVEATS` section below.
+`deltabase`::
+ If the object is stored as a delta on-disk, this expands to the
+ 40-hex sha1 of the delta base object. Otherwise, expands to the
+ null sha1 (40 zeroes). See `CAVEATS` below.
+
`rest`::
If this atom is used in the output string, input lines are split
at the first whitespace boundary. All characters before that
responsible for disk usage. The size of a packed non-delta object may be
much larger than the size of objects which delta against it, but the
choice of which object is the base and which is the delta is arbitrary
-and is subject to change during a repack. Note also that multiple copies
-of an object may be present in the object database; in this case, it is
-undefined which copy's size will be reported.
+and is subject to change during a repack.
+Note also that multiple copies of an object may be present in the object
+database; in this case, it is undefined which copy's size or delta base
+will be reported.
GIT
---
Checklist for Shrinking a Repository
------------------------------------
-git-filter-branch is often used to get rid of a subset of files,
+git-filter-branch can be used to get rid of a subset of files,
usually with some combination of `--index-filter` and
`--subdirectory-filter`. People expect the resulting repository to
be smaller than the original, but you need a few more steps to
(or if your git-gc is not new enough to support arguments to
`--prune`, use `git repack -ad; git prune` instead).
+Notes
+-----
+
+git-filter-branch allows you to make complex shell-scripted rewrites
+of your Git history, but you probably don't need this flexibility if
+you're simply _removing unwanted data_ like large files or passwords.
+For those operations you may want to consider
+link:http://rtyley.github.io/bfg-repo-cleaner/[The BFG Repo-Cleaner],
+a JVM-based alternative to git-filter-branch, typically at least
+10-50x faster for those use-cases, and with quite different
+characteristics:
+
+* Any particular version of a file is cleaned exactly _once_. The BFG,
+ unlike git-filter-branch, does not give you the opportunity to
+ handle a file differently based on where or when it was committed
+ within your history. This constraint gives the core performance
+ benefit of The BFG, and is well-suited to the task of cleansing bad
+ data - you don't care _where_ the bad data is, you just want it
+ _gone_.
+
+* By default The BFG takes full advantage of multi-core machines,
+ cleansing commit file-trees in parallel. git-filter-branch cleans
+ commits sequentially (ie in a single-threaded manner), though it
+ _is_ possible to write filters that include their own parallellism,
+ in the scripts executed against each commit.
+
+* The link:http://rtyley.github.io/bfg-repo-cleaner/#examples[command options]
+ are much more restrictive than git-filter branch, and dedicated just
+ to the tasks of removing unwanted data- e.g:
+ `--strip-blobs-bigger-than 1M`.
+
GIT
---
Part of the linkgit:git[1] suite
[verse]
'git replace' [-f] <object> <replacement>
'git replace' -d <object>...
-'git replace' -l [<pattern>]
+'git replace' [--format=<format>] [-l [<pattern>]]
DESCRIPTION
-----------
Typing "git replace" without arguments, also lists all replace
refs.
+--format=<format>::
+ When listing, use the specified <format>, which can be one of
+ 'short', 'medium' and 'long'. When omitted, the format
+ defaults to 'short'.
+
+FORMATS
+-------
+
+The following format are available:
+
+* 'short':
+ <replaced sha1>
+* 'medium':
+ <replaced sha1> -> <replacement sha1>
+* 'long':
+ <replaced sha1> (<replaced type>) -> <replacement sha1> (<replacement type>)
+
CREATING REPLACEMENT OBJECTS
----------------------------
linkgit:git-clone[1] or (if it begins with ./ or ../) a location
relative to the superproject's origin repository.
+In addition, there are a number of optional keys:
+
submodule.<name>.update::
Defines what to do when the submodule is updated by the superproject.
If 'checkout' (the default), the new commit specified in the
A pathspec that begins with a colon `:` has special meaning. In the
short form, the leading colon `:` is followed by zero or more "magic
signature" letters (which optionally is terminated by another colon `:`),
-and the remainder is the pattern to match against the path. The optional
-colon that terminates the "magic signature" can be omitted if the pattern
-begins with a character that cannot be a "magic signature" and is not a
-colon.
+and the remainder is the pattern to match against the path.
+The "magic signature" consists of ASCII symbols that are neither
+alphanumeric, glob, regex special charaters nor colon.
+The optional colon that terminates the "magic signature" can be
+omitted if the pattern begins with a character that does not belong to
+"magic signature" symbol set and is not a colon.
+
In the long form, the leading colon `:` is followed by a open
parenthesis `(`, a comma-separated list of zero or more "magic words",
and a close parentheses `)`, and the remainder is the pattern to match
against the path.
+
-The "magic signature" consists of an ASCII symbol that is not
-alphanumeric.
+A pathspec with only a colon means "there is no pathspec". This form
+should not be combined with other pathspec.
+
--
-top `/`;;
- The magic word `top` (mnemonic: `/`) makes the pattern match
- from the root of the working tree, even when you are running
- the command from inside a subdirectory.
+top;;
+ The magic word `top` (magic signature: `/`) makes the pattern
+ match from the root of the working tree, even when you are
+ running the command from inside a subdirectory.
literal;;
Wildcards in the pattern such as `*` or `?` are treated
- Other consecutive asterisks are considered invalid.
+
Glob magic is incompatible with literal magic.
+
+exclude;;
+ After a path matches any non-exclude pathspec, it will be run
+ through all exclude pathspec (magic signature: `!`). If it
+ matches, the path is ignored.
--
-+
-Currently only the slash `/` is recognized as the "magic signature",
-but it is envisioned that we will support more types of magic in later
-versions of Git.
-+
-A pathspec with only a colon means "there is no pathspec". This form
-should not be combined with other pathspec.
[[def_parent]]parent::
A <<def_commit_object,commit object>> contains a (possibly empty) list
Git User Manual
-_______________
+===============
Git is a fast distributed revision control system.
*/
const char *prefix_filename(const char *pfx, int pfx_len, const char *arg)
{
- static char path[PATH_MAX];
+ static struct strbuf path = STRBUF_INIT;
#ifndef GIT_WINDOWS_NATIVE
if (!pfx_len || is_absolute_path(arg))
return arg;
- memcpy(path, pfx, pfx_len);
- strcpy(path + pfx_len, arg);
+ strbuf_reset(&path);
+ strbuf_add(&path, pfx, pfx_len);
+ strbuf_addstr(&path, arg);
#else
char *p;
/* don't add prefix to absolute paths, but still replace '\' by '/' */
+ strbuf_reset(&path);
if (is_absolute_path(arg))
pfx_len = 0;
else if (pfx_len)
- memcpy(path, pfx, pfx_len);
- strcpy(path + pfx_len, arg);
- for (p = path + pfx_len; *p; p++)
+ strbuf_add(&path, pfx, pfx_len);
+ strbuf_addstr(&path, arg);
+ for (p = path.buf + pfx_len; *p; p++)
if (*p == '\\')
*p = '/';
#endif
- return path;
+ return path.buf;
}
PATHSPEC_FROMTOP |
PATHSPEC_LITERAL |
PATHSPEC_GLOB |
- PATHSPEC_ICASE);
+ PATHSPEC_ICASE |
+ PATHSPEC_EXCLUDE);
for (i = 0; i < pathspec.nr; i++) {
const char *path = pathspec.items[i].match;
- if (!seen[i] &&
+ if (pathspec.items[i].magic & PATHSPEC_EXCLUDE)
+ continue;
+ if (!seen[i] && path[0] &&
((pathspec.items[i].magic &
(PATHSPEC_GLOB | PATHSPEC_ICASE)) ||
!file_exists(path))) {
static int read_ancestry(const char *graft_file)
{
FILE *fp = fopen(graft_file, "r");
- char buf[1024];
+ struct strbuf buf = STRBUF_INIT;
if (!fp)
return -1;
- while (fgets(buf, sizeof(buf), fp)) {
+ while (!strbuf_getwholeline(&buf, fp, '\n')) {
/* The format is just "Commit Parent1 Parent2 ...\n" */
- int len = strlen(buf);
- struct commit_graft *graft = read_graft_line(buf, len);
+ struct commit_graft *graft = read_graft_line(buf.buf, buf.len);
if (graft)
register_commit_graft(graft, 0);
}
fclose(fp);
+ strbuf_release(&buf);
return 0;
}
unsigned long size;
unsigned long disk_size;
const char *rest;
+ unsigned char delta_base_sha1[20];
/*
* If mark_query is true, we do not expand anything, but rather
data->split_on_whitespace = 1;
else if (data->rest)
strbuf_addstr(sb, data->rest);
+ } else if (is_atom("deltabase", atom, len)) {
+ if (data->mark_query)
+ data->info.delta_base_sha1 = data->delta_base_sha1;
+ else
+ strbuf_addstr(sb, sha1_to_hex(data->delta_base_sha1));
} else
die("unknown format element: %.*s", len, atom);
}
return 0;
}
- if (sha1_object_info_extended(data->sha1, &data->info) < 0) {
+ if (sha1_object_info_extended(data->sha1, &data->info, LOOKUP_REPLACE_OBJECT) < 0) {
printf("%s missing\n", obj_name);
fflush(stdout);
return 0;
static struct transport *gsecondary;
static const char *submodule_prefix = "";
static const char *recurse_submodules_default;
+static int shown_url = 0;
static int option_parse_recurse_submodules(const struct option *opt,
const char *arg, int unset)
{
FILE *fp;
struct commit *commit;
- int url_len, i, shown_url = 0, rc = 0;
+ int url_len, i, rc = 0;
struct strbuf note = STRBUF_INIT;
const char *what, *kind;
struct ref *rm;
return ret;
}
-static int prune_refs(struct refspec *refs, int ref_count, struct ref *ref_map)
+static int prune_refs(struct refspec *refs, int ref_count, struct ref *ref_map,
+ const char *raw_url)
{
- int result = 0;
+ int url_len, i, result = 0;
struct ref *ref, *stale_refs = get_stale_heads(refs, ref_count, ref_map);
+ char *url;
const char *dangling_msg = dry_run
? _(" (%s will become dangling)")
: _(" (%s has become dangling)");
+ if (raw_url)
+ url = transport_anonymize_url(raw_url);
+ else
+ url = xstrdup("foreign");
+
+ url_len = strlen(url);
+ for (i = url_len - 1; url[i] == '/' && 0 <= i; i--)
+ ;
+
+ url_len = i + 1;
+ if (4 < i && !strncmp(".git", url + i - 3, 4))
+ url_len = i - 3;
+
for (ref = stale_refs; ref; ref = ref->next) {
if (!dry_run)
result |= delete_ref(ref->name, NULL, 0);
+ if (verbosity >= 0 && !shown_url) {
+ fprintf(stderr, _("From %.*s\n"), url_len, url);
+ shown_url = 1;
+ }
if (verbosity >= 0) {
fprintf(stderr, " x %-*s %-*s -> %s\n",
TRANSPORT_SUMMARY(_("[deleted]")),
warn_dangling_symref(stderr, dangling_msg, ref->name);
}
}
+ free(url);
free_refs(stale_refs);
return result;
}
if (tags == TAGS_DEFAULT && autotags)
transport_set_option(transport, TRANS_OPT_FOLLOWTAGS, "1");
- if (fetch_refs(transport, ref_map)) {
- free_refs(ref_map);
- retcode = 1;
- goto cleanup;
- }
if (prune) {
/*
* We only prune based on refspecs specified
* don't care whether --tags was specified.
*/
if (ref_count) {
- prune_refs(refs, ref_count, ref_map);
+ prune_refs(refs, ref_count, ref_map, transport->url);
} else {
prune_refs(transport->remote->fetch,
transport->remote->fetch_refspec_nr,
- ref_map);
+ ref_map,
+ transport->url);
}
}
+ if (fetch_refs(transport, ref_map)) {
+ free_refs(ref_map);
+ retcode = 1;
+ goto cleanup;
+ }
free_refs(ref_map);
/* if neither --no-tags nor --tags was specified, do automated tag
time(NULL) - st.st_mtime <= 12 * 3600 &&
fscanf(fp, "%"PRIuMAX" %127c", &pid, locking_host) == 2 &&
/* be gentle to concurrent "gc" on remote hosts */
- (strcmp(locking_host, my_host) || !kill(pid, 0));
+ (strcmp(locking_host, my_host) || !kill(pid, 0) || errno == EPERM);
if (fp != NULL)
fclose(fp);
if (should_exit) {
return r;
}
-static int handle_octopus(int count, const char **args, int reduce, int show_all)
+static int handle_independent(int count, const char **args)
{
struct commit_list *revs = NULL;
struct commit_list *result;
int i;
- if (reduce)
- show_all = 1;
+ for (i = count - 1; i >= 0; i--)
+ commit_list_insert(get_commit_reference(args[i]), &revs);
+
+ result = reduce_heads(revs);
+ if (!result)
+ return 1;
+
+ while (result) {
+ printf("%s\n", sha1_to_hex(result->item->object.sha1));
+ result = result->next;
+ }
+ return 0;
+}
+
+static int handle_octopus(int count, const char **args, int show_all)
+{
+ struct commit_list *revs = NULL;
+ struct commit_list *result;
+ int i;
for (i = count - 1; i >= 0; i--)
commit_list_insert(get_commit_reference(args[i]), &revs);
- result = reduce ? reduce_heads(revs) : get_octopus_merge_bases(revs);
+ result = reduce_heads(get_octopus_merge_bases(revs));
if (!result)
return 1;
if (cmdmode == 'r' && show_all)
die("--independent cannot be used with --all");
- if (cmdmode == 'r' || cmdmode == 'o')
- return handle_octopus(argc, argv, cmdmode == 'r', show_all);
+ if (cmdmode == 'o')
+ return handle_octopus(argc, argv, show_all);
+
+ if (cmdmode == 'r')
+ return handle_independent(argc, argv);
if (cmdmode == 'f') {
if (argc < 1 || 2 < argc)
f = create_tmp_packfile(&pack_tmp_name);
offset = write_pack_header(f, nr_remaining);
- if (!offset)
- die_errno("unable to write pack header");
nr_written = 0;
for (; i < nr_objects; i++) {
struct object_entry *e = write_order[i];
static struct progress *progress;
-static void prune_dir(int i, DIR *dir, char *pathname, int len, int opts)
+static void prune_dir(int i, DIR *dir, struct strbuf *pathname, int opts)
{
struct dirent *de;
char hex[40];
+ int top_len = pathname->len;
sprintf(hex, "%02x", i);
while ((de = readdir(dir)) != NULL) {
unsigned char sha1[20];
if (strlen(de->d_name) != 38)
continue;
- memcpy(hex+2, de->d_name, 38);
+ memcpy(hex + 2, de->d_name, 38);
if (get_sha1_hex(hex, sha1))
continue;
if (!has_sha1_pack(sha1))
continue;
- memcpy(pathname + len, de->d_name, 38);
+
+ strbuf_add(pathname, de->d_name, 38);
if (opts & PRUNE_PACKED_DRY_RUN)
- printf("rm -f %s\n", pathname);
+ printf("rm -f %s\n", pathname->buf);
else
- unlink_or_warn(pathname);
+ unlink_or_warn(pathname->buf);
display_progress(progress, i + 1);
+ strbuf_setlen(pathname, top_len);
}
}
void prune_packed_objects(int opts)
{
int i;
- static char pathname[PATH_MAX];
const char *dir = get_object_directory();
- int len = strlen(dir);
+ struct strbuf pathname = STRBUF_INIT;
+ int top_len;
+ strbuf_addstr(&pathname, dir);
if (opts & PRUNE_PACKED_VERBOSE)
progress = start_progress_delay("Removing duplicate objects",
256, 95, 2);
- if (len > PATH_MAX - 42)
- die("impossible object directory");
- memcpy(pathname, dir, len);
- if (len && pathname[len-1] != '/')
- pathname[len++] = '/';
+ if (pathname.len && pathname.buf[pathname.len - 1] != '/')
+ strbuf_addch(&pathname, '/');
+
+ top_len = pathname.len;
for (i = 0; i < 256; i++) {
DIR *d;
display_progress(progress, i + 1);
- sprintf(pathname + len, "%02x/", i);
- d = opendir(pathname);
+ strbuf_setlen(&pathname, top_len);
+ strbuf_addf(&pathname, "%02x/", i);
+ d = opendir(pathname.buf);
if (!d)
continue;
- prune_dir(i, d, pathname, len + 3, opts);
+ prune_dir(i, d, &pathname, opts);
closedir(d);
- pathname[len + 2] = '\0';
- rmdir(pathname);
+ strbuf_setlen(&pathname, top_len + 2);
+ rmdir(pathname.buf);
}
stop_progress(&progress);
}
static unsigned long expire;
static int show_progress = -1;
-static int prune_tmp_object(const char *path, const char *filename)
+static int prune_tmp_file(const char *fullpath)
{
- const char *fullpath = mkpath("%s/%s", path, filename);
struct stat st;
if (lstat(fullpath, &st))
return error("Could not stat '%s'", fullpath);
return 0;
}
-static int prune_object(char *path, const char *filename, const unsigned char *sha1)
+static int prune_object(const char *fullpath, const unsigned char *sha1)
{
- const char *fullpath = mkpath("%s/%s", path, filename);
struct stat st;
if (lstat(fullpath, &st))
return error("Could not stat '%s'", fullpath);
return 0;
}
-static int prune_dir(int i, char *path)
+static int prune_dir(int i, struct strbuf *path)
{
- DIR *dir = opendir(path);
+ size_t baselen = path->len;
+ DIR *dir = opendir(path->buf);
struct dirent *de;
if (!dir)
if (lookup_object(sha1))
continue;
- prune_object(path, de->d_name, sha1);
+ strbuf_addf(path, "/%s", de->d_name);
+ prune_object(path->buf, sha1);
+ strbuf_setlen(path, baselen);
continue;
}
if (starts_with(de->d_name, "tmp_obj_")) {
- prune_tmp_object(path, de->d_name);
+ strbuf_addf(path, "/%s", de->d_name);
+ prune_tmp_file(path->buf);
+ strbuf_setlen(path, baselen);
continue;
}
- fprintf(stderr, "bad sha1 file: %s/%s\n", path, de->d_name);
+ fprintf(stderr, "bad sha1 file: %s/%s\n", path->buf, de->d_name);
}
closedir(dir);
if (!show_only)
- rmdir(path);
+ rmdir(path->buf);
return 0;
}
static void prune_object_dir(const char *path)
{
+ struct strbuf buf = STRBUF_INIT;
+ size_t baselen;
int i;
+
+ strbuf_addstr(&buf, path);
+ strbuf_addch(&buf, '/');
+ baselen = buf.len;
+
for (i = 0; i < 256; i++) {
- static char dir[4096];
- sprintf(dir, "%s/%02x", path, i);
- prune_dir(i, dir);
+ strbuf_addf(&buf, "%02x", i);
+ prune_dir(i, &buf);
+ strbuf_setlen(&buf, baselen);
}
}
}
while ((de = readdir(dir)) != NULL)
if (starts_with(de->d_name, "tmp_"))
- prune_tmp_object(path, de->d_name);
+ prune_tmp_file(mkpath("%s/%s", path, de->d_name));
closedir(dir);
}
static const char * const git_replace_usage[] = {
N_("git replace [-f] <object> <replacement>"),
N_("git replace -d <object>..."),
- N_("git replace -l [<pattern>]"),
+ N_("git replace [--format=<format>] [-l [<pattern>]]"),
NULL
};
+enum replace_format {
+ REPLACE_FORMAT_SHORT,
+ REPLACE_FORMAT_MEDIUM,
+ REPLACE_FORMAT_LONG
+};
+
+struct show_data {
+ const char *pattern;
+ enum replace_format format;
+};
+
static int show_reference(const char *refname, const unsigned char *sha1,
int flag, void *cb_data)
{
- const char *pattern = cb_data;
+ struct show_data *data = cb_data;
- if (!fnmatch(pattern, refname, 0))
- printf("%s\n", refname);
+ if (!fnmatch(data->pattern, refname, 0)) {
+ if (data->format == REPLACE_FORMAT_SHORT)
+ printf("%s\n", refname);
+ else if (data->format == REPLACE_FORMAT_MEDIUM)
+ printf("%s -> %s\n", refname, sha1_to_hex(sha1));
+ else { /* data->format == REPLACE_FORMAT_LONG */
+ unsigned char object[20];
+ enum object_type obj_type, repl_type;
+
+ if (get_sha1(refname, object))
+ return error("Failed to resolve '%s' as a valid ref.", refname);
+
+ obj_type = sha1_object_info(object, NULL);
+ repl_type = sha1_object_info(sha1, NULL);
+
+ printf("%s (%s) -> %s (%s)\n", refname, typename(obj_type),
+ sha1_to_hex(sha1), typename(repl_type));
+ }
+ }
return 0;
}
-static int list_replace_refs(const char *pattern)
+static int list_replace_refs(const char *pattern, const char *format)
{
+ struct show_data data;
+
if (pattern == NULL)
pattern = "*";
+ data.pattern = pattern;
- for_each_replace_ref(show_reference, (void *) pattern);
+ if (format == NULL || *format == '\0' || !strcmp(format, "short"))
+ data.format = REPLACE_FORMAT_SHORT;
+ else if (!strcmp(format, "medium"))
+ data.format = REPLACE_FORMAT_MEDIUM;
+ else if (!strcmp(format, "long"))
+ data.format = REPLACE_FORMAT_LONG;
+ else
+ die("invalid replace format '%s'\n"
+ "valid formats are 'short', 'medium' and 'long'\n",
+ format);
+
+ for_each_replace_ref(show_reference, (void *) &data);
return 0;
}
int cmd_replace(int argc, const char **argv, const char *prefix)
{
int list = 0, delete = 0, force = 0;
+ const char *format = NULL;
struct option options[] = {
OPT_BOOL('l', "list", &list, N_("list replace refs")),
OPT_BOOL('d', "delete", &delete, N_("delete replace refs")),
OPT_BOOL('f', "force", &force, N_("replace the ref if it exists")),
+ OPT_STRING(0, "format", &format, N_("format"), N_("use this format")),
OPT_END()
};
+ read_replace_refs = 0;
+
argc = parse_options(argc, argv, prefix, options, git_replace_usage, 0);
if (list && delete)
usage_msg_opt("-l and -d cannot be used together",
git_replace_usage, options);
+ if (format && delete)
+ usage_msg_opt("--format and -d cannot be used together",
+ git_replace_usage, options);
+
if (force && (list || delete))
usage_msg_opt("-f cannot be used with -d or -l",
git_replace_usage, options);
if (argc != 2)
usage_msg_opt("bad number of arguments",
git_replace_usage, options);
+ if (format)
+ usage_msg_opt("--format cannot be used when not listing",
+ git_replace_usage, options);
return replace_object(argv[0], argv[1], force);
}
usage_msg_opt("-f needs some arguments",
git_replace_usage, options);
- return list_replace_refs(argv[0]);
+ return list_replace_refs(argv[0], format);
}
int offset_1st_component(const char *path);
/* object replacement */
-#define READ_SHA1_FILE_REPLACE 1
+#define LOOKUP_REPLACE_OBJECT 1
extern void *read_sha1_file_extended(const unsigned char *sha1, enum object_type *type, unsigned long *size, unsigned flag);
static inline void *read_sha1_file(const unsigned char *sha1, enum object_type *type, unsigned long *size)
{
- return read_sha1_file_extended(sha1, type, size, READ_SHA1_FILE_REPLACE);
+ return read_sha1_file_extended(sha1, type, size, LOOKUP_REPLACE_OBJECT);
}
extern const unsigned char *do_lookup_replace_object(const unsigned char *sha1);
static inline const unsigned char *lookup_replace_object(const unsigned char *sha1)
return sha1;
return do_lookup_replace_object(sha1);
}
+static inline const unsigned char *lookup_replace_object_extended(const unsigned char *sha1, unsigned flag)
+{
+ if (!(flag & LOOKUP_REPLACE_OBJECT))
+ return sha1;
+ return lookup_replace_object(sha1);
+}
/* Read and unpack a sha1 file into memory, write memory to a sha1 file */
extern int sha1_object_info(const unsigned char *, unsigned long *);
enum object_type *typep;
unsigned long *sizep;
unsigned long *disk_sizep;
+ unsigned char *delta_base_sha1;
/* Response */
enum {
} packed;
} u;
};
-extern int sha1_object_info_extended(const unsigned char *, struct object_info *);
+extern int sha1_object_info_extended(const unsigned char *, struct object_info *, unsigned flags);
/* Dumb servers support */
extern int update_server_info(int);
static int read_graft_file(const char *graft_file)
{
FILE *fp = fopen(graft_file, "r");
- char buf[1024];
+ struct strbuf buf = STRBUF_INIT;
if (!fp)
return -1;
- while (fgets(buf, sizeof(buf), fp)) {
+ while (!strbuf_getwholeline(&buf, fp, '\n')) {
/* The format is just "Commit Parent1 Parent2 ...\n" */
- int len = strlen(buf);
- struct commit_graft *graft = read_graft_line(buf, len);
+ struct commit_graft *graft = read_graft_line(buf.buf, buf.len);
if (!graft)
continue;
if (register_commit_graft(graft, 1))
- error("duplicate graft data: %s", buf);
+ error("duplicate graft data: %s", buf.buf);
}
fclose(fp);
+ strbuf_release(&buf);
return 0;
}
struct commit_list *get_octopus_merge_bases(struct commit_list *in)
{
struct commit_list *i, *j, *k, *ret = NULL;
- struct commit_list **pptr = &ret;
- for (i = in; i; i = i->next) {
- if (!ret)
- pptr = &commit_list_insert(i->item, pptr)->next;
- else {
- struct commit_list *new = NULL, *end = NULL;
-
- for (j = ret; j; j = j->next) {
- struct commit_list *bases;
- bases = get_merge_bases(i->item, j->item, 1);
- if (!new)
- new = bases;
- else
- end->next = bases;
- for (k = bases; k; k = k->next)
- end = k;
- }
- ret = new;
+ if (!in)
+ return ret;
+
+ commit_list_insert(in->item, &ret);
+
+ for (i = in->next; i; i = i->next) {
+ struct commit_list *new = NULL, *end = NULL;
+
+ for (j = ret; j; j = j->next) {
+ struct commit_list *bases;
+ bases = get_merge_bases(i->item, j->item, 1);
+ if (!new)
+ new = bases;
+ else
+ end->next = bases;
+ for (k = bases; k; k = k->next)
+ end = k;
}
+ ret = new;
}
return ret;
}
}
}
-int commit_tree(const struct strbuf *msg, unsigned char *tree,
+int commit_tree(const struct strbuf *msg, const unsigned char *tree,
struct commit_list *parents, unsigned char *ret,
const char *author, const char *sign_commit)
{
"You may want to amend it after fixing the message, or set the config\n"
"variable i18n.commitencoding to the encoding your project uses.\n";
-int commit_tree_extended(const struct strbuf *msg, unsigned char *tree,
+int commit_tree_extended(const struct strbuf *msg, const unsigned char *tree,
struct commit_list *parents, unsigned char *ret,
const char *author, const char *sign_commit,
struct commit_extra_header *extra)
extern void append_merge_tag_headers(struct commit_list *parents,
struct commit_extra_header ***tail);
-extern int commit_tree(const struct strbuf *msg, unsigned char *tree,
+extern int commit_tree(const struct strbuf *msg, const unsigned char *tree,
struct commit_list *parents, unsigned char *ret,
const char *author, const char *sign_commit);
-extern int commit_tree_extended(const struct strbuf *msg, unsigned char *tree,
+extern int commit_tree_extended(const struct strbuf *msg, const unsigned char *tree,
struct commit_list *parents, unsigned char *ret,
const char *author, const char *sign_commit,
struct commit_extra_header *);
BASIC_LDFLAGS += -L/usr/local/lib
HAVE_PATHS_H = YesPlease
endif
+ifeq ($(uname_S),MirBSD)
+ NO_STRCASESTR = YesPlease
+ NO_MEMMEM = YesPlease
+ USE_ST_TIMESPEC = YesPlease
+ NEEDS_LIBICONV = YesPlease
+ HAVE_PATHS_H = YesPlease
+endif
ifeq ($(uname_S),NetBSD)
ifeq ($(shell expr "$(uname_R)" : '[01]\.'),2)
NEEDS_LIBICONV = YesPlease
}
fi
-__gitcompadd ()
+__gitcompappend ()
{
- local i=0
+ local i=${#COMPREPLY[@]}
for x in $1; do
if [[ "$x" == "$3"* ]]; then
COMPREPLY[i++]="$2$x$4"
done
}
+__gitcompadd ()
+{
+ COMPREPLY=()
+ __gitcompappend "$@"
+}
+
# Generates completion reply, appending a space to possible completion words,
# if necessary.
# It accepts 1 to 4 arguments:
esac
}
+# Variation of __gitcomp_nl () that appends to the existing list of
+# completion candidates, COMPREPLY.
+__gitcomp_nl_append ()
+{
+ local IFS=$'\n'
+ __gitcompappend "$1" "${2-}" "${3-$cur}" "${4- }"
+}
+
# Generates completion reply from newline-separated possible completion words
# by appending a space to all of them.
# It accepts 1 to 4 arguments:
# appended.
__gitcomp_nl ()
{
- local IFS=$'\n'
- __gitcompadd "$1" "${2-}" "${3-$cur}" "${4- }"
+ COMPREPLY=()
+ __gitcomp_nl_append "$@"
}
# Generates completion reply with compgen from newline-separated possible
branch.*)
local pfx="${cur%.*}." cur_="${cur#*.}"
__gitcomp_nl "$(__git_heads)" "$pfx" "$cur_" "."
+ __gitcomp_nl_append $'autosetupmerge\nautosetuprebase\n' "$pfx" "$cur_"
return
;;
guitool.*.*)
remote.*)
local pfx="${cur%.*}." cur_="${cur#*.}"
__gitcomp_nl "$(__git_remotes)" "$pfx" "$cur_" "."
+ __gitcomp_nl_append "pushdefault" "$pfx" "$cur_"
return
;;
url.*.*)
local -a locations
local e
locations=(
+ $(dirname ${funcsourcetrace[1]%:*})/git-completion.bash
'/etc/bash_completion.d/git' # fedora, old debian
'/usr/share/bash-completion/completions/git' # arch, ubuntu, new debian
'/usr/share/bash-completion/git' # gentoo
- $(dirname ${funcsourcetrace[1]%:*})/git-completion.bash
)
for e in $locations; do
test -f $e && script="$e" && break
compadd -Q -S "${4- }" -p "${2-}" -- ${=1} && _ret=0
}
+__gitcomp_nl_append ()
+{
+ emulate -L zsh
+
+ local IFS=$'\n'
+ compadd -Q -S "${4- }" -p "${2-}" -- ${=1} && _ret=0
+}
+
__gitcomp_file ()
{
emulate -L zsh
return fd;
}
-int sha1write(struct sha1file *f, const void *buf, unsigned int count)
+void sha1write(struct sha1file *f, const void *buf, unsigned int count)
{
while (count) {
unsigned offset = f->offset;
}
f->offset = offset;
}
- return 0;
}
struct sha1file *sha1fd(int fd, const char *name)
extern struct sha1file *sha1fd_check(const char *name);
extern struct sha1file *sha1fd_throughput(int fd, const char *name, struct progress *tp);
extern int sha1close(struct sha1file *, unsigned char *, unsigned int);
-extern int sha1write(struct sha1file *, const void *, unsigned int);
+extern void sha1write(struct sha1file *, const void *, unsigned int);
extern void sha1flush(struct sha1file *f);
extern void crc32_begin(struct sha1file *);
extern uint32_t crc32_end(struct sha1file *);
make_service_overridable(arg + 18, 0);
continue;
}
- if (starts_with(arg, "--informative-errors")) {
+ if (!strcmp(arg, "--informative-errors")) {
informative_errors = 1;
continue;
}
- if (starts_with(arg, "--no-informative-errors")) {
+ if (!strcmp(arg, "--no-informative-errors")) {
informative_errors = 0;
continue;
}
static int diff_context_default = 3;
static const char *diff_word_regex_cfg;
static const char *external_diff_cmd_cfg;
+static const char *diff_order_file_cfg;
int diff_auto_refresh_index = 1;
static int diff_mnemonic_prefix;
static int diff_no_prefix;
return git_config_string(&external_diff_cmd_cfg, var, value);
if (!strcmp(var, "diff.wordregex"))
return git_config_string(&diff_word_regex_cfg, var, value);
+ if (!strcmp(var, "diff.orderfile"))
+ return git_config_pathname(&diff_order_file_cfg, var, value);
if (!strcmp(var, "diff.ignoresubmodules"))
handle_ignore_submodules_arg(&default_diff_options, value);
options->detect_rename = diff_detect_rename_default;
options->xdl_opts |= diff_algorithm;
+ options->orderfile = diff_order_file_cfg;
+
if (diff_no_prefix) {
options->a_prefix = options->b_prefix = "";
} else if (!diff_mnemonic_prefix) {
static void prepare_order(const char *orderfile)
{
- int fd, cnt, pass;
+ int cnt, pass;
+ struct strbuf sb = STRBUF_INIT;
void *map;
char *cp, *endp;
- struct stat st;
- size_t sz;
+ ssize_t sz;
if (order)
return;
- fd = open(orderfile, O_RDONLY);
- if (fd < 0)
- return;
- if (fstat(fd, &st)) {
- close(fd);
- return;
- }
- sz = xsize_t(st.st_size);
- map = mmap(NULL, sz, PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0);
- close(fd);
- if (map == MAP_FAILED)
- return;
+ sz = strbuf_read_file(&sb, orderfile, 0);
+ if (sz < 0)
+ die_errno(_("failed to read orderfile '%s'"), orderfile);
+ map = strbuf_detach(&sb, NULL);
endp = (char *) map + sz;
+
for (pass = 0; pass < 2; pass++) {
cnt = 0;
cp = map;
static int match_order(const char *path)
{
int i;
- char p[PATH_MAX];
+ static struct strbuf p = STRBUF_INIT;
for (i = 0; i < order_cnt; i++) {
- strcpy(p, path);
- while (p[0]) {
+ strbuf_reset(&p);
+ strbuf_addstr(&p, path);
+ while (p.buf[0]) {
char *cp;
- if (!fnmatch(order[i], p, 0))
+ if (!fnmatch(order[i], p.buf, 0))
return i;
- cp = strrchr(p, '/');
+ cp = strrchr(p.buf, '/');
if (!cp)
break;
*cp = 0;
PATHSPEC_MAXDEPTH |
PATHSPEC_LITERAL |
PATHSPEC_GLOB |
- PATHSPEC_ICASE);
+ PATHSPEC_ICASE |
+ PATHSPEC_EXCLUDE);
for (n = 0; n < pathspec->nr; n++) {
size_t i = 0, len = 0, item_len;
+ if (pathspec->items[n].magic & PATHSPEC_EXCLUDE)
+ continue;
if (pathspec->items[n].magic & PATHSPEC_ICASE)
item_len = pathspec->items[n].prefix;
else
* pathspec did not match any names, which could indicate that the
* user mistyped the nth pathspec.
*/
-int match_pathspec_depth(const struct pathspec *ps,
- const char *name, int namelen,
- int prefix, char *seen)
+static int match_pathspec_depth_1(const struct pathspec *ps,
+ const char *name, int namelen,
+ int prefix, char *seen,
+ int exclude)
{
int i, retval = 0;
PATHSPEC_MAXDEPTH |
PATHSPEC_LITERAL |
PATHSPEC_GLOB |
- PATHSPEC_ICASE);
+ PATHSPEC_ICASE |
+ PATHSPEC_EXCLUDE);
if (!ps->nr) {
if (!ps->recursive ||
for (i = ps->nr - 1; i >= 0; i--) {
int how;
+
+ if ((!exclude && ps->items[i].magic & PATHSPEC_EXCLUDE) ||
+ ( exclude && !(ps->items[i].magic & PATHSPEC_EXCLUDE)))
+ continue;
+
if (seen && seen[i] == MATCHED_EXACTLY)
continue;
+ /*
+ * Make exclude patterns optional and never report
+ * "pathspec ':(exclude)foo' matches no files"
+ */
+ if (seen && ps->items[i].magic & PATHSPEC_EXCLUDE)
+ seen[i] = MATCHED_FNMATCH;
how = match_pathspec_item(ps->items+i, prefix, name, namelen);
if (ps->recursive &&
(ps->magic & PATHSPEC_MAXDEPTH) &&
return retval;
}
+int match_pathspec_depth(const struct pathspec *ps,
+ const char *name, int namelen,
+ int prefix, char *seen)
+{
+ int positive, negative;
+ positive = match_pathspec_depth_1(ps, name, namelen, prefix, seen, 0);
+ if (!(ps->magic & PATHSPEC_EXCLUDE) || !positive)
+ return positive;
+ negative = match_pathspec_depth_1(ps, name, namelen, prefix, seen, 1);
+ return negative ? 0 : positive;
+}
+
/*
* Return the length of the "simple" part of a path match limiter.
*/
PATHSPEC_MAXDEPTH |
PATHSPEC_LITERAL |
PATHSPEC_GLOB |
- PATHSPEC_ICASE);
+ PATHSPEC_ICASE |
+ PATHSPEC_EXCLUDE);
if (has_symlink_leading_path(path, len))
return dir->nr;
+ /*
+ * exclude patterns are treated like positive ones in
+ * create_simplify. Usually exclude patterns should be a
+ * subset of positive ones, which has no impacts on
+ * create_simplify().
+ */
simplify = create_simplify(pathspec ? pathspec->_raw : NULL);
if (!len || treat_leading_path(dir, path, len, simplify))
read_directory_recursive(dir, path, len, 0, simplify);
# endif
#elif !defined(__APPLE__) && !defined(__FreeBSD__) && !defined(__USLC__) && \
!defined(_M_UNIX) && !defined(__sgi) && !defined(__DragonFly__) && \
- !defined(__TANDEM) && !defined(__QNX__)
+ !defined(__TANDEM) && !defined(__QNX__) && !defined(__MirBSD__)
#define _XOPEN_SOURCE 600 /* glibc2 and AIX 5.3L need 500, OpenBSD needs 600 for S_ISLNK() */
#define _XOPEN_SOURCE_EXTENDED 1 /* AIX 5.3L needs this */
#endif
obj = ((struct tag *)obj)->tagged;
if (!obj)
break;
+ if (!obj->parsed)
+ parse_object(obj->sha1);
add_name_decoration(DECORATION_REF_TAG, refname, obj);
}
return 0;
hdr.hdr_signature = htonl(PACK_SIGNATURE);
hdr.hdr_version = htonl(PACK_VERSION);
hdr.hdr_entries = htonl(nr_entries);
- if (sha1write(f, &hdr, sizeof(hdr)))
- return 0;
+ sha1write(f, &hdr, sizeof(hdr));
return sizeof(hdr);
}
{ PATHSPEC_LITERAL, 0, "literal" },
{ PATHSPEC_GLOB, '\0', "glob" },
{ PATHSPEC_ICASE, '\0', "icase" },
+ { PATHSPEC_EXCLUDE, '!', "exclude" },
};
+static void prefix_short_magic(struct strbuf *sb, int prefixlen,
+ unsigned short_magic)
+{
+ int i;
+ strbuf_addstr(sb, ":(");
+ for (i = 0; i < ARRAY_SIZE(pathspec_magic); i++)
+ if (short_magic & pathspec_magic[i].bit) {
+ if (sb->buf[sb->len - 1] != '(')
+ strbuf_addch(sb, ',');
+ strbuf_addstr(sb, pathspec_magic[i].name);
+ }
+ strbuf_addf(sb, ",prefix:%d)", prefixlen);
+}
+
/*
* Take an element of a pathspec and check for magic signatures.
* Append the result to the prefix. Return the magic bitmap.
*/
if (flags & PATHSPEC_PREFIX_ORIGIN) {
struct strbuf sb = STRBUF_INIT;
- const char *start = elt;
if (prefixlen && !literal_global) {
/* Preserve the actual prefix length of each pattern */
if (short_magic)
- die("BUG: prefixing on short magic is not supported");
+ prefix_short_magic(&sb, prefixlen, short_magic);
else if (long_magic_end) {
- strbuf_add(&sb, start, long_magic_end - start);
- strbuf_addf(&sb, ",prefix:%d", prefixlen);
- start = long_magic_end;
- } else {
- if (*start == ':')
- start++;
+ strbuf_add(&sb, elt, long_magic_end - elt);
+ strbuf_addf(&sb, ",prefix:%d)", prefixlen);
+ } else
strbuf_addf(&sb, ":(prefix:%d)", prefixlen);
- }
}
- strbuf_add(&sb, start, copyfrom - start);
strbuf_addstr(&sb, match);
item->original = strbuf_detach(&sb, NULL);
} else
{
struct pathspec_item *item;
const char *entry = argv ? *argv : NULL;
- int i, n, prefixlen;
+ int i, n, prefixlen, nr_exclude = 0;
memset(pathspec, 0, sizeof(*pathspec));
if ((flags & PATHSPEC_LITERAL_PATH) &&
!(magic_mask & PATHSPEC_LITERAL))
item[i].magic |= PATHSPEC_LITERAL;
+ if (item[i].magic & PATHSPEC_EXCLUDE)
+ nr_exclude++;
if (item[i].magic & magic_mask)
unsupported_magic(entry,
item[i].magic & magic_mask,
pathspec->magic |= item[i].magic;
}
+ if (nr_exclude == n)
+ die(_("There is nothing to exclude from by :(exclude) patterns.\n"
+ "Perhaps you forgot to add either ':/' or '.' ?"));
+
if (pathspec->magic & PATHSPEC_MAXDEPTH) {
if (flags & PATHSPEC_KEEP_ORDER)
#define PATHSPEC_LITERAL (1<<2)
#define PATHSPEC_GLOB (1<<3)
#define PATHSPEC_ICASE (1<<4)
+#define PATHSPEC_EXCLUDE (1<<5)
#define PATHSPEC_ALL_MAGIC \
(PATHSPEC_FROMTOP | \
PATHSPEC_MAXDEPTH | \
PATHSPEC_LITERAL | \
PATHSPEC_GLOB | \
- PATHSPEC_ICASE)
+ PATHSPEC_ICASE | \
+ PATHSPEC_EXCLUDE)
#define PATHSPEC_ONESTAR 1 /* the pathspec pattern satisfies GFNM_ONESTAR */
#: archive.c:328 builtin/log.c:1193
msgid "prefix"
-msgstr "Prefix"
+msgstr "Präfix"
#: archive.c:329
msgid "prepend prefix to each pathname in the archive"
msgstr ""
"Eingabehilfe:\n"
"1 - nummeriertes Element auswählen\n"
-"foo - Element anhand eines eindeutigen Prefix auswählen\n"
+"foo - Element anhand eines eindeutigen Präfix auswählen\n"
" - (leer) nichts auswählen"
#: builtin/clean.c:298
"1 - einzelnes Element auswählen\n"
"3-5 - Bereich von Elementen auswählen\n"
"2-3,6-9 - mehrere Bereiche auswählen\n"
-"foo - Element anhand eines eindeutigen Prefix auswählen\n"
+"foo - Element anhand eines eindeutigen Präfix auswählen\n"
"-... - angegebenes Element abwählen\n"
"* - alle Elemente auswählen\n"
" - (leer) Auswahl beenden"
#: builtin/log.c:1194
msgid "Use [<prefix>] instead of [PATCH]"
-msgstr "verwendet [<Prefix>] anstatt [PATCH]"
+msgstr "verwendet [<Präfix>] anstatt [PATCH]"
#: builtin/log.c:1197
msgid "store resulting files in <dir>"
"[-u [--exclude-per-directory=<gitignore>] | -i]] [--no-sparse-checkout] [--"
"index-output=<file>] (--empty | <tree-ish1> [<tree-ish2> [<tree-ish3>]])"
msgstr ""
-"git read-tree [[-m [--trivial] [--aggressive] | --reset | --prefix=<Prefix>] "
+"git read-tree [[-m [--trivial] [--aggressive] | --reset | --prefix=<Präfix>] "
"[-u [--exclude-per-directory=<gitignore>] | -i]] [--no-sparse-checkout] [--"
"index-output=<Datei>] (--empty | <Commit-Referenz1> [<Commit-Referenz2> "
"[<Commit-Referenz3>]])"
#: builtin/write-tree.c:13
msgid "git write-tree [--missing-ok] [--prefix=<prefix>/]"
-msgstr "git write-tree [--missing-ok] [--prefix=<Prefix>/]"
+msgstr "git write-tree [--missing-ok] [--prefix=<Präfix>/]"
#: builtin/write-tree.c:26
msgid "<prefix>/"
-msgstr "<Prefix>/"
+msgstr "<Präfix>/"
#: builtin/write-tree.c:27
msgid "write tree object for a subdirectory <prefix>"
-msgstr "schreibt das \"Tree\"-Objekt für ein Unterverzeichnis <Prefix>"
+msgstr "schreibt das \"Tree\"-Objekt für ein Unterverzeichnis <Präfix>"
#: builtin/write-tree.c:30
msgid "only useful for debugging"
if (start_command(&pass))
return NULL;
+ strbuf_reset(&buffer);
if (strbuf_read(&buffer, pass.out, 20) < 0)
err = 1;
strbuf_setlen(&buffer, strcspn(buffer.buf, "\r\n"));
- return strbuf_detach(&buffer, NULL);
+ return buffer.buf;
}
char *git_prompt(const char *prompt, int flags)
int pos, depth = MAXREPLACEDEPTH;
const unsigned char *cur = sha1;
- if (!read_replace_refs)
- return sha1;
-
prepare_replace_object();
/* Try to recursively replace the object */
static unsigned int get_max_fd_limit(void)
{
#ifdef RLIMIT_NOFILE
- struct rlimit lim;
+ {
+ struct rlimit lim;
- if (getrlimit(RLIMIT_NOFILE, &lim))
- die_errno("cannot get RLIMIT_NOFILE");
+ if (!getrlimit(RLIMIT_NOFILE, &lim))
+ return lim.rlim_cur;
+ }
+#endif
+
+#ifdef _SC_OPEN_MAX
+ {
+ long open_max = sysconf(_SC_OPEN_MAX);
+ if (0 < open_max)
+ return open_max;
+ /*
+ * Otherwise, we got -1 for one of the two
+ * reasons:
+ *
+ * (1) sysconf() did not understand _SC_OPEN_MAX
+ * and signaled an error with -1; or
+ * (2) sysconf() said there is no limit.
+ *
+ * We _could_ clear errno before calling sysconf() to
+ * tell these two cases apart and return a huge number
+ * in the latter case to let the caller cap it to a
+ * value that is not so selfish, but letting the
+ * fallback OPEN_MAX codepath take care of these cases
+ * is a lot simpler.
+ */
+ }
+#endif
- return lim.rlim_cur;
-#elif defined(_SC_OPEN_MAX)
- return sysconf(_SC_OPEN_MAX);
-#elif defined(OPEN_MAX)
+#ifdef OPEN_MAX
return OPEN_MAX;
#else
return 1; /* see the caller ;-) */
return base_offset;
}
+/*
+ * Like get_delta_base above, but we return the sha1 instead of the pack
+ * offset. This means it is cheaper for REF deltas (we do not have to do
+ * the final object lookup), but more expensive for OFS deltas (we
+ * have to load the revidx to convert the offset back into a sha1).
+ */
+static const unsigned char *get_delta_base_sha1(struct packed_git *p,
+ struct pack_window **w_curs,
+ off_t curpos,
+ enum object_type type,
+ off_t delta_obj_offset)
+{
+ if (type == OBJ_REF_DELTA) {
+ unsigned char *base = use_pack(p, w_curs, curpos, NULL);
+ return base;
+ } else if (type == OBJ_OFS_DELTA) {
+ struct revindex_entry *revidx;
+ off_t base_offset = get_delta_base(p, w_curs, &curpos,
+ type, delta_obj_offset);
+
+ if (!base_offset)
+ return NULL;
+
+ revidx = find_pack_revindex(p, base_offset);
+ if (!revidx)
+ return NULL;
+
+ return nth_packed_object_sha1(p, revidx->nr);
+ } else
+ return NULL;
+}
+
int unpack_object_header(struct packed_git *p,
struct pack_window **w_curs,
off_t *curpos,
}
}
+ if (oi->delta_base_sha1) {
+ if (type == OBJ_OFS_DELTA || type == OBJ_REF_DELTA) {
+ const unsigned char *base;
+
+ base = get_delta_base_sha1(p, &w_curs, curpos,
+ type, obj_offset);
+ if (!base) {
+ type = OBJ_BAD;
+ goto out;
+ }
+
+ hashcpy(oi->delta_base_sha1, base);
+ } else
+ hashclr(oi->delta_base_sha1);
+ }
+
out:
unuse_pack(&w_curs);
return type;
git_zstream stream;
char hdr[32];
+ if (oi->delta_base_sha1)
+ hashclr(oi->delta_base_sha1);
+
/*
* If we don't care about type or size, then we don't
* need to look inside the object at all. Note that we
return 0;
}
-int sha1_object_info_extended(const unsigned char *sha1, struct object_info *oi)
+int sha1_object_info_extended(const unsigned char *sha1, struct object_info *oi, unsigned flags)
{
struct cached_object *co;
struct pack_entry e;
int rtype;
+ const unsigned char *real = lookup_replace_object_extended(sha1, flags);
- co = find_cached_object(sha1);
+ co = find_cached_object(real);
if (co) {
if (oi->typep)
*(oi->typep) = co->type;
*(oi->sizep) = co->size;
if (oi->disk_sizep)
*(oi->disk_sizep) = 0;
+ if (oi->delta_base_sha1)
+ hashclr(oi->delta_base_sha1);
oi->whence = OI_CACHED;
return 0;
}
- if (!find_pack_entry(sha1, &e)) {
+ if (!find_pack_entry(real, &e)) {
/* Most likely it's a loose object. */
- if (!sha1_loose_object_info(sha1, oi)) {
+ if (!sha1_loose_object_info(real, oi)) {
oi->whence = OI_LOOSE;
return 0;
}
/* Not a loose object; someone else may have just packed it. */
reprepare_packed_git();
- if (!find_pack_entry(sha1, &e))
+ if (!find_pack_entry(real, &e))
return -1;
}
rtype = packed_object_info(e.p, e.offset, oi);
if (rtype < 0) {
- mark_bad_packed_object(e.p, sha1);
- return sha1_object_info_extended(sha1, oi);
+ mark_bad_packed_object(e.p, real);
+ return sha1_object_info_extended(real, oi, 0);
} else if (in_delta_base_cache(e.p, e.offset)) {
oi->whence = OI_DBCACHED;
} else {
oi.typep = &type;
oi.sizep = sizep;
- if (sha1_object_info_extended(sha1, &oi) < 0)
+ if (sha1_object_info_extended(sha1, &oi, LOOKUP_REPLACE_OBJECT) < 0)
return -1;
return type;
}
void *data;
char *path;
const struct packed_git *p;
- const unsigned char *repl = (flag & READ_SHA1_FILE_REPLACE)
- ? lookup_replace_object(sha1) : sha1;
+ const unsigned char *repl = lookup_replace_object_extended(sha1, flag);
errno = 0;
data = read_object(repl, type, size);
oi->typep = type;
oi->sizep = &size;
- status = sha1_object_info_extended(sha1, oi);
+ status = sha1_object_info_extended(sha1, oi, 0);
if (status < 0)
return stream_error;
check_count A 2 B 1 B1 2 B2 1 "A U Thor" 1
'
+test_expect_success 'blame huge graft' '
+ test_when_finished "git checkout branch2" &&
+ test_when_finished "rm -f .git/info/grafts" &&
+ graft= &&
+ for i in 0 1 2
+ do
+ for j in 0 1 2 3 4 5 6 7 8 9
+ do
+ git checkout --orphan "$i$j" &&
+ printf "%s\n" "$i" "$j" >file &&
+ test_tick &&
+ GIT_AUTHOR_NAME=$i$j GIT_AUTHOR_EMAIL=$i$j@test.git \
+ git commit -a -m "$i$j" &&
+ commit=$(git rev-parse --verify HEAD) &&
+ graft="$graft$commit "
+ done
+ done &&
+ printf "%s " $graft >.git/info/grafts &&
+ check_count -h 00 01 1 10 1
+'
+
test_expect_success 'setup incomplete line' '
echo "incomplete" | tr -d "\\012" >>file &&
GIT_AUTHOR_NAME="C" GIT_AUTHOR_EMAIL="C@test.git" \
HTTPD_DEST=127.0.0.1:$LIB_HTTPD_PORT
HTTPD_URL=$HTTPD_PROTO://$HTTPD_DEST
HTTPD_URL_USER=$HTTPD_PROTO://user%40host@$HTTPD_DEST
- HTTPD_URL_USER_PASS=$HTTPD_PROTO://user%40host:user%40host@$HTTPD_DEST
+ HTTPD_URL_USER_PASS=$HTTPD_PROTO://user%40host:pass%40host@$HTTPD_DEST
if test -n "$LIB_HTTPD_DAV" -o -n "$LIB_HTTPD_SVN"
then
test_expect_success 'setup askpass helper' '
write_script "$TRASH_DIRECTORY/askpass" <<-\EOF &&
echo >>"$TRASH_DIRECTORY/askpass-query" "askpass: $*" &&
- cat "$TRASH_DIRECTORY/askpass-response"
+ case "$*" in
+ *Username*)
+ what=user
+ ;;
+ *Password*)
+ what=pass
+ ;;
+ esac &&
+ cat "$TRASH_DIRECTORY/askpass-$what"
EOF
GIT_ASKPASS="$TRASH_DIRECTORY/askpass" &&
export GIT_ASKPASS &&
set_askpass() {
>"$TRASH_DIRECTORY/askpass-query" &&
- echo "$*" >"$TRASH_DIRECTORY/askpass-response"
+ echo "$1" >"$TRASH_DIRECTORY/askpass-user" &&
+ echo "$2" >"$TRASH_DIRECTORY/askpass-pass"
}
expect_askpass() {
-user@host:nKpa8pZUHx/ic
+user@host:xb4E8pqD81KQs
test_expect_success 'success is reported like this' '
:
'
-test_expect_failure 'pretend we have a known breakage' '
- false
-'
run_sub_test_lib_test () {
name="$1" descr="$2" # stdin is the body of the test code
shift 2
mkdir "$name" &&
(
- # Pretend we're a test harness. This prevents
- # test-lib from writing the counts to a file that will
- # later be summarized, showing spurious "failed" tests
- HARNESS_ACTIVE=t &&
- export HARNESS_ACTIVE &&
+ # Pretend we're not running under a test harness, whether we
+ # are or not. The test-lib output depends on the setting of
+ # this variable, so we need a stable setting under which to run
+ # the sub-test.
+ sane_unset HARNESS_ACTIVE &&
cd "$name" &&
cat >"$name.sh" <<-EOF &&
#!$SHELL_PATH
cat >>"$name.sh" &&
chmod +x "$name.sh" &&
export TEST_DIRECTORY &&
+ TEST_OUTPUT_DIRECTORY=$(pwd) &&
+ export TEST_OUTPUT_DIRECTORY &&
./"$name.sh" "$@" >out 2>err
)
}
grep -v "^Initialized empty" test-verbose/out+ >test-verbose/out &&
check_sub_test_lib_test test-verbose <<-\EOF
> expecting success: true
- > Z
> ok 1 - passing test
> Z
> expecting success: echo foo
> foo
- > Z
> ok 2 - test with output
> Z
> expecting success: false
- > Z
> not ok 3 - failing test
> # false
> Z
> Z
> expecting success: echo foo
> foo
- > Z
> ok 2 - test with output
> Z
> not ok 3 - failing test
"$(echo_without_newline "$batch_check_input" | git cat-file --batch-check)"
'
+test_expect_success 'setup blobs which are likely to delta' '
+ test-genrandom foo 10240 >foo &&
+ { cat foo; echo plus; } >foo-plus &&
+ git add foo foo-plus &&
+ git commit -m foo &&
+ cat >blobs <<-\EOF
+ HEAD:foo
+ HEAD:foo-plus
+ EOF
+'
+
+test_expect_success 'confirm that neither loose blob is a delta' '
+ cat >expect <<-EOF
+ $_z40
+ $_z40
+ EOF
+ git cat-file --batch-check="%(deltabase)" <blobs >actual &&
+ test_cmp expect actual
+'
+
+# To avoid relying too much on the current delta heuristics,
+# we will check only that one of the two objects is a delta
+# against the other, but not the order. We can do so by just
+# asking for the base of both, and checking whether either
+# sha1 appears in the output.
+test_expect_success '%(deltabase) reports packed delta bases' '
+ git repack -ad &&
+ git cat-file --batch-check="%(deltabase)" <blobs >actual &&
+ {
+ grep "$(git rev-parse HEAD:foo)" actual ||
+ grep "$(git rev-parse HEAD:foo-plus)" actual
+ }
+'
+
test_done
! (git ls-files | grep "non-existent")
'
+test_expect_success 'git add -A on empty repo does not error out' '
+ rm -fr empty &&
+ git init empty &&
+ (
+ cd empty &&
+ git add -A . &&
+ git add -A
+ )
+'
+
+test_expect_success '"git add ." in empty repo' '
+ rm -fr empty &&
+ git init empty &&
+ (
+ cd empty &&
+ git add .
+ )
+'
+
test_expect_success 'git add --dry-run of existing changed file' "
echo new >>track-this &&
git add --dry-run track-this >actual 2>&1 &&
--- /dev/null
+#!/bin/sh
+
+test_description='diff order'
+
+. ./test-lib.sh
+
+create_files () {
+ echo "$1" >a.h &&
+ echo "$1" >b.c &&
+ echo "$1" >c/Makefile &&
+ echo "$1" >d.txt &&
+ git add a.h b.c c/Makefile d.txt &&
+ git commit -m"$1"
+}
+
+test_expect_success 'setup' '
+ mkdir c &&
+ create_files 1 &&
+ create_files 2 &&
+
+ cat >order_file_1 <<-\EOF &&
+ *Makefile
+ *.txt
+ *.h
+ EOF
+
+ cat >order_file_2 <<-\EOF &&
+ *Makefile
+ *.h
+ *.c
+ EOF
+
+ cat >expect_none <<-\EOF &&
+ a.h
+ b.c
+ c/Makefile
+ d.txt
+ EOF
+
+ cat >expect_1 <<-\EOF &&
+ c/Makefile
+ d.txt
+ a.h
+ b.c
+ EOF
+
+ cat >expect_2 <<-\EOF
+ c/Makefile
+ a.h
+ b.c
+ d.txt
+ EOF
+'
+
+test_expect_success "no order (=tree object order)" '
+ git diff --name-only HEAD^..HEAD >actual &&
+ test_cmp expect_none actual
+'
+
+test_expect_success 'missing orderfile' '
+ rm -f bogus_file &&
+ test_must_fail git diff -Obogus_file --name-only HEAD^..HEAD
+'
+
+test_expect_success POSIXPERM,SANITY 'unreadable orderfile' '
+ >unreadable_file &&
+ chmod -r unreadable_file &&
+ test_must_fail git diff -Ounreadable_file --name-only HEAD^..HEAD
+'
+
+test_expect_success 'orderfile is a directory' '
+ test_must_fail git diff -O/ --name-only HEAD^..HEAD
+'
+
+for i in 1 2
+do
+ test_expect_success "orderfile using option ($i)" '
+ git diff -Oorder_file_$i --name-only HEAD^..HEAD >actual &&
+ test_cmp expect_$i actual
+ '
+
+ test_expect_success PIPE "orderfile is fifo ($i)" '
+ rm -f order_fifo &&
+ mkfifo order_fifo &&
+ {
+ cat order_file_$i >order_fifo &
+ } &&
+ git diff -O order_fifo --name-only HEAD^..HEAD >actual &&
+ wait &&
+ test_cmp expect_$i actual
+ '
+
+ test_expect_success "orderfile using config ($i)" '
+ git -c diff.orderfile=order_file_$i diff --name-only HEAD^..HEAD >actual &&
+ test_cmp expect_$i actual
+ '
+
+ test_expect_success "cancelling configured orderfile ($i)" '
+ git -c diff.orderfile=order_file_$i diff -O/dev/null --name-only HEAD^..HEAD >actual &&
+ test_cmp expect_none actual
+ '
+done
+
+test_done
test_cmp expected actual
'
+test_expect_success 'log decoration properly follows tag chain' '
+ git tag -a tag1 -m tag1 &&
+ git tag -a tag2 -m tag2 tag1 &&
+ git tag -d tag1 &&
+ git commit --amend -m shorter &&
+ git log --no-walk --tags --pretty="%H %d" --decorate=full >actual &&
+ cat <<EOF >expected &&
+6a908c10688b2503073c39c9ba26322c73902bb5 (tag: refs/tags/tag2)
+9f716384d92283fb915a4eee5073f030638e05f9 (tag: refs/tags/message-one)
+b87e4cccdb77336ea79d89224737be7ea8e95367 (tag: refs/tags/message-two)
+EOF
+ sort actual >actual1 &&
+ test_cmp expected actual1
+'
+
test_done
test_bundle_object_count .git/objects/pack/pack-${pack##pack }.pack 3
'
+test_expect_success 'fetch --prune prints the remotes url' '
+ git branch goodbye &&
+ git clone . only-prunes &&
+ git branch -D goodbye &&
+ (
+ cd only-prunes &&
+ git fetch --prune origin 2>&1 | head -n1 >../actual
+ ) &&
+ echo "From ${D}/." >expect &&
+ test_cmp expect actual
+'
+
+test_expect_success 'branchname D/F conflict resolved by --prune' '
+ git branch dir/file &&
+ git clone . prune-df-conflict &&
+ git branch -D dir/file &&
+ git branch dir &&
+ (
+ cd prune-df-conflict &&
+ git fetch --prune &&
+ git rev-parse origin/dir >../actual
+ ) &&
+ git rev-parse dir >expect &&
+ test_cmp expect actual
+'
+
test_done
test_expect_success 'push to password-protected repository (user in URL)' '
test_commit pw-user &&
- set_askpass user@host &&
+ set_askpass user@host pass@host &&
git push "$HTTPD_URL_USER/auth/dumb/test_repo.git" HEAD &&
git rev-parse --verify HEAD >expect &&
git --git-dir="$HTTPD_DOCUMENT_ROOT_PATH/auth/dumb/test_repo.git" \
test_expect_failure 'push to password-protected repository (no user in URL)' '
test_commit pw-nouser &&
- set_askpass user@host &&
+ set_askpass user@host pass@host &&
git push "$HTTPD_URL/auth/dumb/test_repo.git" HEAD &&
expect_askpass both user@host
git rev-parse --verify HEAD >expect &&
cd "$ROOT_PATH/test_repo_clone" &&
echo push-auth-test >expect &&
test_commit push-auth-test &&
- set_askpass user@host &&
+ set_askpass user@host pass@host &&
git push "$HTTPD_URL"/auth/smart/test_repo.git &&
git --git-dir="$HTTPD_DOCUMENT_ROOT_PATH/test_repo.git" \
log -1 --format=%s >actual &&
cd "$ROOT_PATH/test_repo_clone" &&
echo push-half-auth >expect &&
test_commit push-half-auth &&
- set_askpass user@host &&
+ set_askpass user@host pass@host &&
git push "$HTTPD_URL"/auth-push/smart/test_repo.git &&
git --git-dir="$HTTPD_DOCUMENT_ROOT_PATH/test_repo.git" \
log -1 --format=%s >actual &&
cd "$ROOT_PATH/half-auth-clone" &&
echo two >expect &&
test_commit two &&
- set_askpass user@host &&
+ set_askpass user@host pass@host &&
git push "$HTTPD_URL/half-auth-complete/smart/half-auth.git" &&
git --git-dir="$HTTPD_DOCUMENT_ROOT_PATH/half-auth.git" \
log -1 --format=%s >actual &&
'
test_expect_success 'http auth can use just user in URL' '
- set_askpass user@host &&
+ set_askpass wrong pass@host &&
git clone "$HTTPD_URL_USER/auth/dumb/repo.git" clone-auth-pass &&
expect_askpass pass user@host
'
test_expect_success 'http auth can request both user and pass' '
- set_askpass user@host &&
+ set_askpass user@host pass@host &&
git clone "$HTTPD_URL/auth/dumb/repo.git" clone-auth-both &&
expect_askpass both user@host
'
test_config_global credential.helper "!f() {
cat >/dev/null
echo username=user@host
- echo password=user@host
+ echo password=pass@host
}; f" &&
set_askpass wrong &&
git clone "$HTTPD_URL/auth/dumb/repo.git" clone-auth-helper &&
test_expect_success 'http auth can get username from config' '
test_config_global "credential.$HTTPD_URL.username" user@host &&
- set_askpass user@host &&
+ set_askpass wrong pass@host &&
git clone "$HTTPD_URL/auth/dumb/repo.git" clone-auth-user &&
expect_askpass pass user@host
'
test_expect_success 'configured username does not override URL' '
test_config_global "credential.$HTTPD_URL.username" wrong &&
- set_askpass user@host &&
+ set_askpass wrong pass@host &&
git clone "$HTTPD_URL_USER/auth/dumb/repo.git" clone-auth-user2 &&
expect_askpass pass user@host
'
test_expect_success 'clone from password-protected repository' '
echo two >expect &&
- set_askpass user@host &&
+ set_askpass user@host pass@host &&
git clone --bare "$HTTPD_URL/auth/smart/repo.git" smart-auth &&
expect_askpass both user@host &&
git --git-dir=smart-auth log -1 --format=%s >actual &&
test_expect_success 'clone from auth-only-for-objects repository' '
echo two >expect &&
- set_askpass user@host &&
+ set_askpass user@host pass@host &&
git clone --bare "$HTTPD_URL/auth-fetch/smart/repo.git" half-auth &&
expect_askpass both user@host &&
git --git-dir=half-auth log -1 --format=%s >actual &&
'
test_expect_success 'redirects send auth to new location' '
- set_askpass user@host &&
+ set_askpass user@host pass@host &&
git -c credential.useHttpPath=true \
clone $HTTPD_URL/smart-redir-auth/repo.git repo-redir-auth &&
expect_askpass both user@host auth/smart/repo.git
test_cmp expect3 actual
'
+test_expect_success 'merge-base --octopus --all for complex tree' '
+ # Best common ancestor for JE, JAA and JDD is JC
+ # JE
+ # / |
+ # / |
+ # / |
+ # JAA / |
+ # |\ / |
+ # | \ | JDD |
+ # | \ |/ | |
+ # | JC JD |
+ # | | /| |
+ # | |/ | |
+ # JA | | |
+ # |\ /| | |
+ # X JB | X X
+ # \ \ | / /
+ # \__\|/___/
+ # J
+ test_commit J &&
+ test_commit JB &&
+ git reset --hard J &&
+ test_commit JC &&
+ git reset --hard J &&
+ test_commit JTEMP1 &&
+ test_merge JA JB &&
+ test_merge JAA JC &&
+ git reset --hard J &&
+ test_commit JTEMP2 &&
+ test_merge JD JB &&
+ test_merge JDD JC &&
+ git reset --hard J &&
+ test_commit JTEMP3 &&
+ test_merge JE JC &&
+ git rev-parse JC >expected &&
+ git merge-base --all --octopus JAA JDD JE >actual &&
+ test_cmp expected actual
+'
+
test_done
git replace -f HEAD^ $BLOB
'
+test_expect_success 'git cat-file --batch works on replace objects' '
+ git replace | grep $PARA3 &&
+ echo $PARA3 | git cat-file --batch
+'
+
+test_expect_success 'test --format bogus' '
+ test_must_fail git replace --format bogus >/dev/null 2>&1
+'
+
+test_expect_success 'test --format short' '
+ git replace --format=short >actual &&
+ git replace >expected &&
+ test_cmp expected actual
+'
+
+test_expect_success 'test --format medium' '
+ H1=$(git --no-replace-objects rev-parse HEAD~1) &&
+ HT=$(git --no-replace-objects rev-parse HEAD^{tree}) &&
+ MYTAG=$(git --no-replace-objects rev-parse mytag) &&
+ {
+ echo "$H1 -> $BLOB" &&
+ echo "$BLOB -> $REPLACED" &&
+ echo "$HT -> $H1" &&
+ echo "$PARA3 -> $S" &&
+ echo "$MYTAG -> $HASH1"
+ } | sort >expected &&
+ git replace -l --format medium | sort > actual &&
+ test_cmp expected actual
+'
+
+test_expect_success 'test --format long' '
+ {
+ echo "$H1 (commit) -> $BLOB (blob)" &&
+ echo "$BLOB (blob) -> $REPLACED (blob)" &&
+ echo "$HT (tree) -> $H1 (commit)" &&
+ echo "$PARA3 (commit) -> $S (commit)" &&
+ echo "$MYTAG (tag) -> $HASH1 (commit)"
+ } | sort >expected &&
+ git replace --format=long | sort > actual &&
+ test_cmp expected actual
+'
+
test_expect_success 'replace ref cleanup' '
test -n "$(git replace)" &&
git replace -d $(git replace) &&
test_commit start2 &&
git checkout master &&
git merge -m next start2 &&
- test_commit final
+ test_commit final &&
+
+ test_seq 40 |
+ while read i
+ do
+ git checkout --orphan "b$i" &&
+ test_tick &&
+ git commit --allow-empty -m "$i" &&
+ commit=$(git rev-parse --verify HEAD) &&
+ printf "$commit " >>.git/info/grafts
+ done
'
test_expect_success 'start is valid' '
test_cmp expect actual
'
+test_expect_success 'large graft octopus' '
+ test_cmp_rev_output b31 "git rev-parse --verify b1^30"
+'
+
test_expect_success 'repack for next test' '
git repack -a -d
'
--- /dev/null
+#!/bin/sh
+
+test_description='test case exclude pathspec'
+
+. ./test-lib.sh
+
+test_expect_success 'setup' '
+ for p in file sub/file sub/sub/file sub/file2 sub/sub/sub/file sub2/file; do
+ if echo $p | grep /; then
+ mkdir -p `dirname $p`
+ fi &&
+ : >$p &&
+ git add $p &&
+ git commit -m $p
+ done &&
+ git log --oneline --format=%s >actual &&
+ cat <<EOF >expect &&
+sub2/file
+sub/sub/sub/file
+sub/file2
+sub/sub/file
+sub/file
+file
+EOF
+ test_cmp expect actual
+'
+
+test_expect_success 'exclude only should error out' '
+ test_must_fail git log --oneline --format=%s -- ":(exclude)sub"
+'
+
+test_expect_success 't_e_i() exclude sub' '
+ git log --oneline --format=%s -- . ":(exclude)sub" >actual
+ cat <<EOF >expect &&
+sub2/file
+file
+EOF
+ test_cmp expect actual
+'
+
+test_expect_success 't_e_i() exclude sub/sub/file' '
+ git log --oneline --format=%s -- . ":(exclude)sub/sub/file" >actual
+ cat <<EOF >expect &&
+sub2/file
+sub/sub/sub/file
+sub/file2
+sub/file
+file
+EOF
+ test_cmp expect actual
+'
+
+test_expect_success 't_e_i() exclude sub using mnemonic' '
+ git log --oneline --format=%s -- . ":!sub" >actual
+ cat <<EOF >expect &&
+sub2/file
+file
+EOF
+ test_cmp expect actual
+'
+
+test_expect_success 't_e_i() exclude :(icase)SUB' '
+ git log --oneline --format=%s -- . ":(exclude,icase)SUB" >actual
+ cat <<EOF >expect &&
+sub2/file
+file
+EOF
+ test_cmp expect actual
+'
+
+test_expect_success 't_e_i() exclude sub2 from sub' '
+ (
+ cd sub &&
+ git log --oneline --format=%s -- :/ ":/!sub2" >actual
+ cat <<EOF >expect &&
+sub/sub/sub/file
+sub/file2
+sub/sub/file
+sub/file
+file
+EOF
+ test_cmp expect actual
+ )
+'
+
+test_expect_success 't_e_i() exclude sub/*file' '
+ git log --oneline --format=%s -- . ":(exclude)sub/*file" >actual
+ cat <<EOF >expect &&
+sub2/file
+sub/file2
+file
+EOF
+ test_cmp expect actual
+'
+
+test_expect_success 't_e_i() exclude :(glob)sub/*/file' '
+ git log --oneline --format=%s -- . ":(exclude,glob)sub/*/file" >actual
+ cat <<EOF >expect &&
+sub2/file
+sub/sub/sub/file
+sub/file2
+sub/file
+file
+EOF
+ test_cmp expect actual
+'
+
+test_expect_success 'm_p_d() exclude sub' '
+ git ls-files -- . ":(exclude)sub" >actual
+ cat <<EOF >expect &&
+file
+sub2/file
+EOF
+ test_cmp expect actual
+'
+
+test_expect_success 'm_p_d() exclude sub/sub/file' '
+ git ls-files -- . ":(exclude)sub/sub/file" >actual
+ cat <<EOF >expect &&
+file
+sub/file
+sub/file2
+sub/sub/sub/file
+sub2/file
+EOF
+ test_cmp expect actual
+'
+
+test_expect_success 'm_p_d() exclude sub using mnemonic' '
+ git ls-files -- . ":!sub" >actual
+ cat <<EOF >expect &&
+file
+sub2/file
+EOF
+ test_cmp expect actual
+'
+
+test_expect_success 'm_p_d() exclude :(icase)SUB' '
+ git ls-files -- . ":(exclude,icase)SUB" >actual
+ cat <<EOF >expect &&
+file
+sub2/file
+EOF
+ test_cmp expect actual
+'
+
+test_expect_success 'm_p_d() exclude sub2 from sub' '
+ (
+ cd sub &&
+ git ls-files -- :/ ":/!sub2" >actual
+ cat <<EOF >expect &&
+../file
+file
+file2
+sub/file
+sub/sub/file
+EOF
+ test_cmp expect actual
+ )
+'
+
+test_expect_success 'm_p_d() exclude sub/*file' '
+ git ls-files -- . ":(exclude)sub/*file" >actual
+ cat <<EOF >expect &&
+file
+sub/file2
+sub2/file
+EOF
+ test_cmp expect actual
+'
+
+test_expect_success 'm_p_d() exclude :(glob)sub/*/file' '
+ git ls-files -- . ":(exclude,glob)sub/*/file" >actual
+ cat <<EOF >expect &&
+file
+sub/file
+sub/file2
+sub/sub/sub/file
+sub2/file
+EOF
+ test_cmp expect actual
+'
+
+test_done
test_done () {
GIT_EXIT_OK=t
- # Note: t0000 relies on $HARNESS_ACTIVE disabling the .counts
- # output file
if test -z "$HARNESS_ACTIVE"
then
test_results_dir="$TEST_OUTPUT_DIRECTORY/test-results"
* Pre-condition: either baselen == base_offset (i.e. empty path)
* or base[baselen-1] == '/' (i.e. with trailing slash).
*/
-enum interesting tree_entry_interesting(const struct name_entry *entry,
- struct strbuf *base, int base_offset,
- const struct pathspec *ps)
+static enum interesting do_match(const struct name_entry *entry,
+ struct strbuf *base, int base_offset,
+ const struct pathspec *ps,
+ int exclude)
{
int i;
int pathlen, baselen = base->len - base_offset;
PATHSPEC_MAXDEPTH |
PATHSPEC_LITERAL |
PATHSPEC_GLOB |
- PATHSPEC_ICASE);
+ PATHSPEC_ICASE |
+ PATHSPEC_EXCLUDE);
if (!ps->nr) {
if (!ps->recursive ||
const char *base_str = base->buf + base_offset;
int matchlen = item->len, matched = 0;
+ if ((!exclude && item->magic & PATHSPEC_EXCLUDE) ||
+ ( exclude && !(item->magic & PATHSPEC_EXCLUDE)))
+ continue;
+
if (baselen >= matchlen) {
/* If it doesn't match, move along... */
if (!match_dir_prefix(item, base_str, match, matchlen))
}
return never_interesting; /* No matches */
}
+
+/*
+ * Is a tree entry interesting given the pathspec we have?
+ *
+ * Pre-condition: either baselen == base_offset (i.e. empty path)
+ * or base[baselen-1] == '/' (i.e. with trailing slash).
+ */
+enum interesting tree_entry_interesting(const struct name_entry *entry,
+ struct strbuf *base, int base_offset,
+ const struct pathspec *ps)
+{
+ enum interesting positive, negative;
+ positive = do_match(entry, base, base_offset, ps, 0);
+
+ /*
+ * case | entry | positive | negative | result
+ * -----+-------+----------+----------+-------
+ * 1 | file | -1 | -1..2 | -1
+ * 2 | file | 0 | -1..2 | 0
+ * 3 | file | 1 | -1 | 1
+ * 4 | file | 1 | 0 | 1
+ * 5 | file | 1 | 1 | 0
+ * 6 | file | 1 | 2 | 0
+ * 7 | file | 2 | -1 | 2
+ * 8 | file | 2 | 0 | 2
+ * 9 | file | 2 | 1 | 0
+ * 10 | file | 2 | 2 | -1
+ * -----+-------+----------+----------+-------
+ * 11 | dir | -1 | -1..2 | -1
+ * 12 | dir | 0 | -1..2 | 0
+ * 13 | dir | 1 | -1 | 1
+ * 14 | dir | 1 | 0 | 1
+ * 15 | dir | 1 | 1 | 1 (*)
+ * 16 | dir | 1 | 2 | 0
+ * 17 | dir | 2 | -1 | 2
+ * 18 | dir | 2 | 0 | 2
+ * 19 | dir | 2 | 1 | 1 (*)
+ * 20 | dir | 2 | 2 | -1
+ *
+ * (*) An exclude pattern interested in a directory does not
+ * necessarily mean it will exclude all of the directory. In
+ * wildcard case, it can't decide until looking at individual
+ * files inside. So don't write such directories off yet.
+ */
+
+ if (!(ps->magic & PATHSPEC_EXCLUDE) ||
+ positive <= entry_not_interesting) /* #1, #2, #11, #12 */
+ return positive;
+
+ negative = do_match(entry, base, base_offset, ps, 1);
+
+ /* #3, #4, #7, #8, #13, #14, #17, #18 */
+ if (negative <= entry_not_interesting)
+ return positive;
+
+ /* #15, #19 */
+ if (S_ISDIR(entry->mode) &&
+ positive >= entry_interesting &&
+ negative == entry_interesting)
+ return entry_interesting;
+
+ if ((positive == entry_interesting &&
+ negative >= entry_interesting) || /* #5, #6, #16 */
+ (positive == all_entries_interesting &&
+ negative == entry_interesting)) /* #9 */
+ return entry_not_interesting;
+
+ return all_entries_not_interesting; /* #10, #20 */
+}
}
static int clear_ce_flags_1(struct cache_entry **cache, int nr,
- char *prefix, int prefix_len,
+ struct strbuf *prefix,
int select_mask, int clear_mask,
struct exclude_list *el, int defval);
/* Whole directory matching */
static int clear_ce_flags_dir(struct cache_entry **cache, int nr,
- char *prefix, int prefix_len,
+ struct strbuf *prefix,
char *basename,
int select_mask, int clear_mask,
struct exclude_list *el, int defval)
{
struct cache_entry **cache_end;
int dtype = DT_DIR;
- int ret = is_excluded_from_list(prefix, prefix_len,
+ int ret = is_excluded_from_list(prefix->buf, prefix->len,
basename, &dtype, el);
+ int rc;
- prefix[prefix_len++] = '/';
+ strbuf_addch(prefix, '/');
/* If undecided, use matching result of parent dir in defval */
if (ret < 0)
for (cache_end = cache; cache_end != cache + nr; cache_end++) {
struct cache_entry *ce = *cache_end;
- if (strncmp(ce->name, prefix, prefix_len))
+ if (strncmp(ce->name, prefix->buf, prefix->len))
break;
}
* calling clear_ce_flags_1(). That function will call
* the expensive is_excluded_from_list() on every entry.
*/
- return clear_ce_flags_1(cache, cache_end - cache,
- prefix, prefix_len,
- select_mask, clear_mask,
- el, ret);
+ rc = clear_ce_flags_1(cache, cache_end - cache,
+ prefix,
+ select_mask, clear_mask,
+ el, ret);
+ strbuf_setlen(prefix, prefix->len - 1);
+ return rc;
}
/*
* Top level path has prefix_len zero.
*/
static int clear_ce_flags_1(struct cache_entry **cache, int nr,
- char *prefix, int prefix_len,
+ struct strbuf *prefix,
int select_mask, int clear_mask,
struct exclude_list *el, int defval)
{
continue;
}
- if (prefix_len && strncmp(ce->name, prefix, prefix_len))
+ if (prefix->len && strncmp(ce->name, prefix->buf, prefix->len))
break;
- name = ce->name + prefix_len;
+ name = ce->name + prefix->len;
slash = strchr(name, '/');
/* If it's a directory, try whole directory match first */
int processed;
len = slash - name;
- memcpy(prefix + prefix_len, name, len);
+ strbuf_add(prefix, name, len);
- /*
- * terminate the string (no trailing slash),
- * clear_c_f_dir needs it
- */
- prefix[prefix_len + len] = '\0';
processed = clear_ce_flags_dir(cache, cache_end - cache,
- prefix, prefix_len + len,
- prefix + prefix_len,
+ prefix,
+ prefix->buf + prefix->len - len,
select_mask, clear_mask,
el, defval);
/* clear_c_f_dir eats a whole dir already? */
if (processed) {
cache += processed;
+ strbuf_setlen(prefix, prefix->len - len);
continue;
}
- prefix[prefix_len + len++] = '/';
+ strbuf_addch(prefix, '/');
cache += clear_ce_flags_1(cache, cache_end - cache,
- prefix, prefix_len + len,
+ prefix,
select_mask, clear_mask, el, defval);
+ strbuf_setlen(prefix, prefix->len - len - 1);
continue;
}
int select_mask, int clear_mask,
struct exclude_list *el)
{
- char prefix[PATH_MAX];
+ static struct strbuf prefix = STRBUF_INIT;
+
+ strbuf_reset(&prefix);
+
return clear_ce_flags_1(cache, nr,
- prefix, 0,
+ &prefix,
select_mask, clear_mask,
el, 0);
}