More _("i18n") markings.
* nd/i18n:
fsck: mark strings for translation
fsck: reduce word legos to help i18n
parse-options.c: mark more strings for translation
parse-options.c: turn some die() to BUG()
parse-options: replace opterror() with optname()
repack: mark more strings for translation
remote.c: mark messages for translation
remote.c: turn some error() or die() to BUG()
reflog: mark strings for translation
read-cache.c: add missing colon separators
read-cache.c: mark more strings for translation
read-cache.c: turn die("internal error") to BUG()
attr.c: mark more string for translation
archive.c: mark more strings for translation
alias.c: mark split_cmdline_strerror() strings for translation
git.c: mark more strings for translation
archivers[nr_archivers++] = ar;
}
+void init_archivers(void)
+{
+ init_tar_archiver();
+ init_zip_archiver();
+}
+
static void format_subst(const struct commit *commit,
const char *src, size_t len,
struct strbuf *buf)
int refnamelen = colon - name;
if (!dwim_ref(name, refnamelen, &oid, &ref))
- die("no such ref: %.*s", refnamelen, name);
+ die(_("no such ref: %.*s"), refnamelen, name);
free(ref);
}
if (get_oid(name, &oid))
- die("Not a valid object name");
+ die(_("not a valid object name: %s"), name);
commit = lookup_commit_reference_gently(ar_args->repo, &oid, 1);
if (commit) {
tree = parse_tree_indirect(&oid);
if (tree == NULL)
- die("not a tree object");
+ die(_("not a tree object: %s"), oid_to_hex(&oid));
if (prefix) {
struct object_id tree_oid;
err = get_tree_entry(&tree->object.oid, prefix, &tree_oid,
&mode);
if (err || !S_ISDIR(mode))
- die("current working directory is untracked");
+ die(_("current working directory is untracked"));
tree = parse_tree_indirect(&tree_oid);
}
git_config_get_bool("uploadarchive.allowunreachable", &remote_allow_unreachable);
git_config(git_default_config, NULL);
- init_tar_archiver();
- init_zip_archiver();
-
args.repo = repo;
argc = parse_archive_args(argc, argv, &ar, &args, name_hint, remote);
if (!startup_info->have_repository) {
struct attr_hashmap {
struct hashmap map;
-#ifndef NO_PTHREADS
pthread_mutex_t mutex;
-#endif
};
static inline void hashmap_lock(struct attr_hashmap *map)
{
-#ifndef NO_PTHREADS
pthread_mutex_lock(&map->mutex);
-#endif
}
static inline void hashmap_unlock(struct attr_hashmap *map)
{
-#ifndef NO_PTHREADS
pthread_mutex_unlock(&map->mutex);
-#endif
}
/*
if (strlen(ATTRIBUTE_MACRO_PREFIX) < namelen &&
starts_with(name, ATTRIBUTE_MACRO_PREFIX)) {
if (!macro_ok) {
- fprintf(stderr, "%s not allowed: %s:%d\n",
- name, src, lineno);
+ fprintf_ln(stderr, _("%s not allowed: %s:%d"),
+ name, src, lineno);
goto fail_return;
}
is_macro = 1;
size_t nr;
size_t alloc;
struct attr_check **checks;
-#ifndef NO_PTHREADS
pthread_mutex_t mutex;
-#endif
} check_vector;
static inline void vector_lock(void)
{
-#ifndef NO_PTHREADS
pthread_mutex_lock(&check_vector.mutex);
-#endif
}
static inline void vector_unlock(void)
{
-#ifndef NO_PTHREADS
pthread_mutex_unlock(&check_vector.mutex);
-#endif
}
static void check_vector_add(struct attr_check *c)
void attr_start(void)
{
-#ifndef NO_PTHREADS
pthread_mutex_init(&g_attr_hashmap.mutex, NULL);
pthread_mutex_init(&check_vector.mutex, NULL);
-#endif
}
#include "packfile.h"
#include "object-store.h"
#include "run-command.h"
+#include "worktree.h"
#define REACHABLE 0x0001
#define SEEN 0x0002
static int keep_cache_objects;
static struct fsck_options fsck_walk_options = FSCK_OPTIONS_DEFAULT;
static struct fsck_options fsck_obj_options = FSCK_OPTIONS_DEFAULT;
-static struct object_id head_oid;
-static const char *head_points_at;
static int errors_found;
static int write_lost_and_found;
static int verbose;
static const char *describe_object(struct object *obj)
{
- static struct strbuf buf = STRBUF_INIT;
- char *name = name_objects ?
- lookup_decoration(fsck_walk_options.object_names, obj) : NULL;
+ static struct strbuf bufs[] = {
+ STRBUF_INIT, STRBUF_INIT, STRBUF_INIT, STRBUF_INIT
+ };
+ static int b = 0;
+ struct strbuf *buf;
+ char *name = NULL;
- strbuf_reset(&buf);
- strbuf_addstr(&buf, oid_to_hex(&obj->oid));
+ if (name_objects)
+ name = lookup_decoration(fsck_walk_options.object_names, obj);
+
+ buf = bufs + b;
+ b = (b + 1) % ARRAY_SIZE(bufs);
+ strbuf_reset(buf);
+ strbuf_addstr(buf, oid_to_hex(&obj->oid));
if (name)
- strbuf_addf(&buf, " (%s)", name);
+ strbuf_addf(buf, " (%s)", name);
- return buf.buf;
+ return buf->buf;
}
static const char *printable_type(struct object *obj)
ret = type_name(obj->type);
if (!ret)
- ret = "unknown";
+ ret = _("unknown");
return ret;
}
return git_default_config(var, value, cb);
}
- static void objreport(struct object *obj, const char *msg_type,
- const char *err)
- {
- fprintf(stderr, "%s in %s %s: %s\n",
- msg_type, printable_type(obj), describe_object(obj), err);
- }
-
static int objerror(struct object *obj, const char *err)
{
errors_found |= ERROR_OBJECT;
- objreport(obj, "error", err);
+ /* TRANSLATORS: e.g. error in tree 01bfda: <more explanation> */
+ fprintf_ln(stderr, _("error in %s %s: %s"),
+ printable_type(obj), describe_object(obj), err);
return -1;
}
static int fsck_error_func(struct fsck_options *o,
struct object *obj, int type, const char *message)
{
- objreport(obj, (type == FSCK_WARN) ? "warning" : "error", message);
- return (type == FSCK_WARN) ? 0 : 1;
+ switch (type) {
+ case FSCK_WARN:
+ /* TRANSLATORS: e.g. warning in tree 01bfda: <more explanation> */
+ fprintf_ln(stderr, _("warning in %s %s: %s"),
+ printable_type(obj), describe_object(obj), message);
+ return 0;
+ case FSCK_ERROR:
+ /* TRANSLATORS: e.g. error in tree 01bfda: <more explanation> */
+ fprintf_ln(stderr, _("error in %s %s: %s"),
+ printable_type(obj), describe_object(obj), message);
+ return 1;
+ default:
+ BUG("%d (FSCK_IGNORE?) should never trigger this callback", type);
+ }
}
static struct object_array pending;
*/
if (!obj) {
/* ... these references to parent->fld are safe here */
- printf("broken link from %7s %s\n",
- printable_type(parent), describe_object(parent));
- printf("broken link from %7s %s\n",
- (type == OBJ_ANY ? "unknown" : type_name(type)), "unknown");
+ printf_ln(_("broken link from %7s %s"),
+ printable_type(parent), describe_object(parent));
+ printf_ln(_("broken link from %7s %s"),
+ (type == OBJ_ANY ? _("unknown") : type_name(type)),
+ _("unknown"));
errors_found |= ERROR_REACHABLE;
return 1;
}
if (type != OBJ_ANY && obj->type != type)
/* ... and the reference to parent is safe here */
- objerror(parent, "wrong object type in link");
+ objerror(parent, _("wrong object type in link"));
if (obj->flags & REACHABLE)
return 0;
if (!(obj->flags & HAS_OBJ)) {
if (parent && !has_object_file(&obj->oid)) {
- printf("broken link from %7s %s\n",
- printable_type(parent), describe_object(parent));
- printf(" to %7s %s\n",
- printable_type(obj), describe_object(obj));
+ printf_ln(_("broken link from %7s %s\n"
+ " to %7s %s"),
+ printable_type(parent),
+ describe_object(parent),
+ printable_type(obj),
+ describe_object(obj));
errors_found |= ERROR_REACHABLE;
}
return 1;
return;
if (has_object_pack(&obj->oid))
return; /* it is in pack - forget about it */
- printf("missing %s %s\n", printable_type(obj),
- describe_object(obj));
+ printf_ln(_("missing %s %s"), printable_type(obj),
+ describe_object(obj));
errors_found |= ERROR_REACHABLE;
return;
}
* since this is something that is prunable.
*/
if (show_unreachable) {
- printf("unreachable %s %s\n", printable_type(obj),
- describe_object(obj));
+ printf_ln(_("unreachable %s %s"), printable_type(obj),
+ describe_object(obj));
return;
}
*/
if (!(obj->flags & USED)) {
if (show_dangling)
- printf("dangling %s %s\n", printable_type(obj),
- describe_object(obj));
+ printf_ln(_("dangling %s %s"), printable_type(obj),
+ describe_object(obj));
if (write_lost_and_found) {
char *filename = git_pathdup("lost-found/%s/%s",
obj->type == OBJ_COMMIT ? "commit" : "other",
FILE *f;
if (safe_create_leading_directories_const(filename)) {
- error("Could not create lost-found");
+ error(_("could not create lost-found"));
free(filename);
return;
}
f = xfopen(filename, "w");
if (obj->type == OBJ_BLOB) {
if (stream_blob_to_fd(fileno(f), &obj->oid, NULL, 1))
- die_errno("Could not write '%s'", filename);
+ die_errno(_("could not write '%s'"), filename);
} else
fprintf(f, "%s\n", describe_object(obj));
if (fclose(f))
- die_errno("Could not finish '%s'",
+ die_errno(_("could not finish '%s'"),
filename);
free(filename);
}
static void check_object(struct object *obj)
{
if (verbose)
- fprintf(stderr, "Checking %s\n", describe_object(obj));
+ fprintf_ln(stderr, _("Checking %s"), describe_object(obj));
if (obj->flags & REACHABLE)
check_reachable_object(obj);
/* Look up all the requirements, warn about missing objects.. */
max = get_max_object_index();
if (verbose)
- fprintf(stderr, "Checking connectivity (%d objects)\n", max);
+ fprintf_ln(stderr, _("Checking connectivity (%d objects)"), max);
for (i = 0; i < max; i++) {
struct object *obj = get_indexed_object(i);
obj->flags |= SEEN;
if (verbose)
- fprintf(stderr, "Checking %s %s\n",
- printable_type(obj), describe_object(obj));
+ fprintf_ln(stderr, _("Checking %s %s"),
+ printable_type(obj), describe_object(obj));
if (fsck_walk(obj, NULL, &fsck_obj_options))
- objerror(obj, "broken links");
+ objerror(obj, _("broken links"));
err = fsck_object(obj, buffer, size, &fsck_obj_options);
if (err)
goto out;
struct commit *commit = (struct commit *) obj;
if (!commit->parents && show_root)
- printf("root %s\n", describe_object(&commit->object));
+ printf_ln(_("root %s"),
+ describe_object(&commit->object));
}
if (obj->type == OBJ_TAG) {
struct tag *tag = (struct tag *) obj;
if (show_tags && tag->tagged) {
- printf("tagged %s %s", printable_type(tag->tagged),
- describe_object(tag->tagged));
- printf(" (%s) in %s\n", tag->tag,
- describe_object(&tag->object));
+ printf_ln(_("tagged %s %s (%s) in %s"),
+ printable_type(tag->tagged),
+ describe_object(tag->tagged),
+ tag->tag,
+ describe_object(&tag->object));
}
}
eaten);
if (!obj) {
errors_found |= ERROR_OBJECT;
- return error("%s: object corrupt or missing", oid_to_hex(oid));
+ return error(_("%s: object corrupt or missing"),
+ oid_to_hex(oid));
}
obj->flags &= ~(REACHABLE | SEEN);
obj->flags |= HAS_OBJ;
obj->flags |= USED;
mark_object_reachable(obj);
} else if (!is_promisor_object(oid)) {
- error("%s: invalid reflog entry %s", refname, oid_to_hex(oid));
+ error(_("%s: invalid reflog entry %s"),
+ refname, oid_to_hex(oid));
errors_found |= ERROR_REACHABLE;
}
}
const char *refname = cb_data;
if (verbose)
- fprintf(stderr, "Checking reflog %s->%s\n",
- oid_to_hex(ooid), oid_to_hex(noid));
+ fprintf_ln(stderr, _("Checking reflog %s->%s"),
+ oid_to_hex(ooid), oid_to_hex(noid));
fsck_handle_reflog_oid(refname, ooid, 0);
fsck_handle_reflog_oid(refname, noid, timestamp);
static int fsck_handle_reflog(const char *logname, const struct object_id *oid,
int flag, void *cb_data)
{
- for_each_reflog_ent(logname, fsck_handle_reflog_ent, (void *)logname);
+ struct strbuf refname = STRBUF_INIT;
+
+ strbuf_worktree_ref(cb_data, &refname, logname);
+ for_each_reflog_ent(refname.buf, fsck_handle_reflog_ent, refname.buf);
+ strbuf_release(&refname);
return 0;
}
default_refs++;
return 0;
}
- error("%s: invalid sha1 pointer %s", refname, oid_to_hex(oid));
+ error(_("%s: invalid sha1 pointer %s"),
+ refname, oid_to_hex(oid));
errors_found |= ERROR_REACHABLE;
/* We'll continue with the rest despite the error.. */
return 0;
}
if (obj->type != OBJ_COMMIT && is_branch(refname)) {
- error("%s: not a commit", refname);
+ error(_("%s: not a commit"), refname);
errors_found |= ERROR_REFS;
}
default_refs++;
return 0;
}
+static int fsck_head_link(const char *head_ref_name,
+ const char **head_points_at,
+ struct object_id *head_oid);
+
static void get_default_heads(void)
{
- if (head_points_at && !is_null_oid(&head_oid))
- fsck_handle_ref("HEAD", &head_oid, 0, NULL);
+ struct worktree **worktrees, **p;
+ const char *head_points_at;
+ struct object_id head_oid;
+
for_each_rawref(fsck_handle_ref, NULL);
- if (include_reflogs)
- for_each_reflog(fsck_handle_reflog, NULL);
+
+ worktrees = get_worktrees(0);
+ for (p = worktrees; *p; p++) {
+ struct worktree *wt = *p;
+ struct strbuf ref = STRBUF_INIT;
+
+ strbuf_worktree_ref(wt, &ref, "HEAD");
+ fsck_head_link(ref.buf, &head_points_at, &head_oid);
+ if (head_points_at && !is_null_oid(&head_oid))
+ fsck_handle_ref(ref.buf, &head_oid, 0, NULL);
+ strbuf_release(&ref);
+
+ if (include_reflogs)
+ refs_for_each_reflog(get_worktree_ref_store(wt),
+ fsck_handle_reflog, wt);
+ }
+ free_worktrees(worktrees);
/*
* Not having any default heads isn't really fatal, but
* "show_unreachable" flag.
*/
if (!default_refs) {
- fprintf(stderr, "notice: No default references\n");
+ fprintf_ln(stderr, _("notice: No default references"));
show_unreachable = 0;
}
}
if (read_loose_object(path, oid, &type, &size, &contents) < 0) {
errors_found |= ERROR_OBJECT;
- error("%s: object corrupt or missing: %s",
+ error(_("%s: object corrupt or missing: %s"),
oid_to_hex(oid), path);
return 0; /* keep checking other objects */
}
if (!obj) {
errors_found |= ERROR_OBJECT;
- error("%s: object could not be parsed: %s",
+ error(_("%s: object could not be parsed: %s"),
oid_to_hex(oid), path);
if (!eaten)
free(contents);
static int fsck_cruft(const char *basename, const char *path, void *data)
{
if (!starts_with(basename, "tmp_obj_"))
- fprintf(stderr, "bad sha1 file: %s\n", path);
+ fprintf_ln(stderr, _("bad sha1 file: %s"), path);
return 0;
}
struct progress *progress = NULL;
if (verbose)
- fprintf(stderr, "Checking object directory\n");
+ fprintf_ln(stderr, _("Checking object directory"));
if (show_progress)
progress = start_progress(_("Checking object directories"), 256);
stop_progress(&progress);
}
-static int fsck_head_link(void)
+static int fsck_head_link(const char *head_ref_name,
+ const char **head_points_at,
+ struct object_id *head_oid)
{
int null_is_error = 0;
if (verbose)
- fprintf(stderr, "Checking %s link\n", head_ref_name);
- fprintf_ln(stderr, _("Checking HEAD link"));
++ fprintf_ln(stderr, _("Checking %s link"), head_ref_name);
- head_points_at = resolve_ref_unsafe("HEAD", 0, &head_oid, NULL);
- if (!head_points_at) {
+ *head_points_at = resolve_ref_unsafe(head_ref_name, 0, head_oid, NULL);
+ if (!*head_points_at) {
errors_found |= ERROR_REFS;
- return error("Invalid %s", head_ref_name);
- return error(_("invalid HEAD"));
++ return error(_("invalid %s"), head_ref_name);
}
- if (!strcmp(head_points_at, "HEAD"))
+ if (!strcmp(*head_points_at, head_ref_name))
/* detached HEAD */
null_is_error = 1;
- else if (!starts_with(head_points_at, "refs/heads/")) {
+ else if (!starts_with(*head_points_at, "refs/heads/")) {
errors_found |= ERROR_REFS;
- return error("%s points to something strange (%s)",
- return error(_("HEAD points to something strange (%s)"),
- head_points_at);
++ return error(_("%s points to something strange (%s)"),
+ head_ref_name, *head_points_at);
}
- if (is_null_oid(&head_oid)) {
+ if (is_null_oid(head_oid)) {
if (null_is_error) {
errors_found |= ERROR_REFS;
- return error("%s: detached HEAD points at nothing",
- return error(_("HEAD: detached HEAD points at nothing"));
++ return error(_("%s: detached HEAD points at nothing"),
+ head_ref_name);
}
- fprintf(stderr, "notice: %s points to an unborn branch (%s)\n",
- head_ref_name, *head_points_at + 11);
- fprintf_ln(stderr, _("notice: HEAD points to an unborn branch (%s)"),
- head_points_at + 11);
++ fprintf_ln(stderr,
++ _("notice: %s points to an unborn branch (%s)"),
++ head_ref_name, *head_points_at + 11);
}
return 0;
}
int err = 0;
if (verbose)
- fprintf(stderr, "Checking cache tree\n");
+ fprintf_ln(stderr, _("Checking cache tree"));
if (0 <= it->entry_count) {
struct object *obj = parse_object(the_repository, &it->oid);
if (!obj) {
- error("%s: invalid sha1 pointer in cache-tree",
+ error(_("%s: invalid sha1 pointer in cache-tree"),
oid_to_hex(&it->oid));
errors_found |= ERROR_REFS;
return 1;
obj, xstrdup(":"));
mark_object_reachable(obj);
if (obj->type != OBJ_TREE)
- err |= objerror(obj, "non-tree in cache-tree");
+ err |= objerror(obj, _("non-tree in cache-tree"));
}
for (i = 0; i < it->subtree_nr; i++)
err |= fsck_cache_tree(it->down[i]->cache_tree);
git_config(fsck_config, NULL);
- fsck_head_link();
if (connectivity_only) {
for_each_loose_object(mark_loose_for_connectivity, NULL, 0);
for_each_packed_object(mark_packed_for_connectivity, NULL, 0);
if (!obj || !(obj->flags & HAS_OBJ)) {
if (is_promisor_object(&oid))
continue;
- error("%s: object missing", oid_to_hex(&oid));
+ error(_("%s: object missing"), oid_to_hex(&oid));
errors_found |= ERROR_OBJECT;
continue;
}
mark_object_reachable(obj);
continue;
}
- error("invalid parameter: expected sha1, got '%s'", arg);
+ error(_("invalid parameter: expected sha1, got '%s'"), arg);
errors_found |= ERROR_OBJECT;
}
ctx->argc--;
arg = *++ctx->argv;
} else
- return opterror(opt, "requires a value", 0);
+ return error(_("option `%s' requires a value"), opt->long_name);
if (buf->len)
strbuf_addch(buf, '\n');
static int option_parse_n(const struct option *opt,
const char *arg, int unset)
{
+ BUG_ON_OPT_ARG(arg);
show_diffstat = unset;
return 0;
}
argc = split_cmdline(bmo, &argv);
if (argc < 0)
die(_("Bad branch.%s.mergeoptions string: %s"), branch,
- split_cmdline_strerror(argc));
+ _(split_cmdline_strerror(argc)));
REALLOC_ARRAY(argv, argc + 2);
MOVE_ARRAY(argv + 1, argv, argc + 1);
argc++;
die(_("%s - not something we can merge"), argv[0]);
if (remoteheads->next)
die(_("Can merge only exactly one commit into empty head"));
+
+ if (verify_signatures)
+ verify_merge_signature(remoteheads->item, verbosity);
+
remote_head_oid = &remoteheads->item->object.oid;
read_empty(remote_head_oid, 0);
update_ref("initial pull", "HEAD", remote_head_oid, NULL, 0,
if (verify_signatures) {
for (p = remoteheads; p; p = p->next) {
- struct commit *commit = p->item;
- char hex[GIT_MAX_HEXSZ + 1];
- struct signature_check signature_check;
- memset(&signature_check, 0, sizeof(signature_check));
-
- check_commit_signature(commit, &signature_check);
-
- find_unique_abbrev_r(hex, &commit->object.oid, DEFAULT_ABBREV);
- switch (signature_check.result) {
- case 'G':
- break;
- case 'U':
- die(_("Commit %s has an untrusted GPG signature, "
- "allegedly by %s."), hex, signature_check.signer);
- case 'B':
- die(_("Commit %s has a bad GPG signature "
- "allegedly by %s."), hex, signature_check.signer);
- default: /* 'N' */
- die(_("Commit %s does not have a GPG signature."), hex);
- }
- if (verbosity >= 0 && signature_check.result == 'G')
- printf(_("Commit %s has a good GPG signature by %s\n"),
- hex, signature_check.signer);
-
- signature_check_clear(&signature_check);
+ verify_merge_signature(p->item, verbosity);
}
}
#include "diff.h"
#include "revision.h"
#include "reachable.h"
+#include "worktree.h"
/* NEEDSWORK: switch to using parse_options */
static const char reflog_expire_usage[] =
- "git reflog expire [--expire=<time>] [--expire-unreachable=<time>] [--rewrite] [--updateref] [--stale-fix] [--dry-run | -n] [--verbose] [--all] <refs>...";
+ N_("git reflog expire [--expire=<time>] "
+ "[--expire-unreachable=<time>] "
+ "[--rewrite] [--updateref] [--stale-fix] [--dry-run | -n] "
+ "[--verbose] [--all] <refs>...");
static const char reflog_delete_usage[] =
- "git reflog delete [--rewrite] [--updateref] [--dry-run | -n] [--verbose] <refs>...";
+ N_("git reflog delete [--rewrite] [--updateref] "
+ "[--dry-run | -n] [--verbose] <refs>...");
static const char reflog_exists_usage[] =
- "git reflog exists <ref>";
+ N_("git reflog exists <ref>");
static timestamp_t default_reflog_expire;
static timestamp_t default_reflog_expire_unreachable;
struct collected_reflog **e;
int alloc;
int nr;
+ struct worktree *wt;
};
/* Remember to update object flag allocation in object.h */
return 0;
}
+static int is_head(const char *refname)
+{
+ switch (ref_type(refname)) {
+ case REF_TYPE_OTHER_PSEUDOREF:
+ case REF_TYPE_MAIN_PSEUDOREF:
+ if (parse_worktree_ref(refname, NULL, NULL, &refname))
+ BUG("not a worktree ref: %s", refname);
+ break;
+ default:
+ break;
+ }
+ return !strcmp(refname, "HEAD");
+}
+
static void reflog_expiry_prepare(const char *refname,
const struct object_id *oid,
void *cb_data)
{
struct expire_reflog_policy_cb *cb = cb_data;
- if (!cb->cmd.expire_unreachable || !strcmp(refname, "HEAD")) {
+ if (!cb->cmd.expire_unreachable || is_head(refname)) {
cb->tip_commit = NULL;
cb->unreachable_expire_kind = UE_HEAD;
} else {
{
struct collected_reflog *e;
struct collect_reflog_cb *cb = cb_data;
+ struct strbuf newref = STRBUF_INIT;
+
+ /*
+ * Avoid collecting the same shared ref multiple times because
+ * they are available via all worktrees.
+ */
+ if (!cb->wt->is_current && ref_type(ref) == REF_TYPE_NORMAL)
+ return 0;
+
+ strbuf_worktree_ref(cb->wt, &newref, ref);
+ FLEX_ALLOC_STR(e, reflog, newref.buf);
+ strbuf_release(&newref);
- FLEX_ALLOC_STR(e, reflog, ref);
oidcpy(&e->oid, oid);
ALLOC_GROW(cb->e, cb->nr + 1, cb->alloc);
cb->e[cb->nr++] = e;
{
struct expire_reflog_policy_cb cb;
timestamp_t now = time(NULL);
- int i, status, do_all;
+ int i, status, do_all, all_worktrees = 1;
int explicit_expiry = 0;
unsigned int flags = 0;
flags |= EXPIRE_REFLOGS_UPDATE_REF;
else if (!strcmp(arg, "--all"))
do_all = 1;
+ else if (!strcmp(arg, "--single-worktree"))
+ all_worktrees = 0;
else if (!strcmp(arg, "--verbose"))
flags |= EXPIRE_REFLOGS_VERBOSE;
else if (!strcmp(arg, "--")) {
break;
}
else if (arg[0] == '-')
- usage(reflog_expire_usage);
+ usage(_(reflog_expire_usage));
else
break;
}
if (cb.cmd.stalefix) {
repo_init_revisions(the_repository, &cb.cmd.revs, prefix);
if (flags & EXPIRE_REFLOGS_VERBOSE)
- printf("Marking reachable objects...");
+ printf(_("Marking reachable objects..."));
mark_reachable_objects(&cb.cmd.revs, 0, 0, NULL);
if (flags & EXPIRE_REFLOGS_VERBOSE)
putchar('\n');
if (do_all) {
struct collect_reflog_cb collected;
+ struct worktree **worktrees, **p;
int i;
memset(&collected, 0, sizeof(collected));
- for_each_reflog(collect_reflog, &collected);
+ worktrees = get_worktrees(0);
+ for (p = worktrees; *p; p++) {
+ if (!all_worktrees && !(*p)->is_current)
+ continue;
+ collected.wt = *p;
+ refs_for_each_reflog(get_worktree_ref_store(*p),
+ collect_reflog, &collected);
+ }
+ free_worktrees(worktrees);
for (i = 0; i < collected.nr; i++) {
struct collected_reflog *e = collected.e[i];
set_reflog_expiry_param(&cb.cmd, explicit_expiry, e->reflog);
char *ref;
struct object_id oid;
if (!dwim_log(argv[i], strlen(argv[i]), &oid, &ref)) {
- status |= error("%s points nowhere!", argv[i]);
+ status |= error(_("%s points nowhere!"), argv[i]);
continue;
}
set_reflog_expiry_param(&cb.cmd, explicit_expiry, ref);
break;
}
else if (arg[0] == '-')
- usage(reflog_delete_usage);
+ usage(_(reflog_delete_usage));
else
break;
}
if (argc - i < 1)
- return error("Nothing to delete?");
+ return error(_("no reflog specified to delete"));
for ( ; i < argc; i++) {
const char *spec = strstr(argv[i], "@{");
int recno;
if (!spec) {
- status |= error("Not a reflog: %s", argv[i]);
+ status |= error(_("not a reflog: %s"), argv[i]);
continue;
}
if (!dwim_log(argv[i], spec - argv[i], &oid, &ref)) {
- status |= error("no reflog for '%s'", argv[i]);
+ status |= error(_("no reflog for '%s'"), argv[i]);
continue;
}
break;
}
else if (arg[0] == '-')
- usage(reflog_exists_usage);
+ usage(_(reflog_exists_usage));
else
break;
}
start = i;
if (argc - start != 1)
- usage(reflog_exists_usage);
+ usage(_(reflog_exists_usage));
if (check_refname_format(argv[start], REFNAME_ALLOW_ONELEVEL))
- die("invalid ref format: %s", argv[start]);
+ die(_("invalid ref format: %s"), argv[start]);
return !reflog_exists(argv[start]);
}
*/
static const char reflog_usage[] =
- "git reflog [ show | expire | delete | exists ]";
+ N_("git reflog [ show | expire | delete | exists ]");
int cmd_reflog(int argc, const char **argv, const char *prefix)
{
if (argc > 1 && !strcmp(argv[1], "-h"))
- usage(reflog_usage);
+ usage(_(reflog_usage));
/* With no command, we default to showing it. */
if (argc < 2 || *argv[1] == '-')
if (cmd->in == -1) {
if (start_command(cmd))
- die("Could not start pack-objects to repack promisor objects");
+ die(_("could not start pack-objects to repack promisor objects"));
}
xwrite(cmd->in, oid_to_hex(oid), GIT_SHA1_HEXSZ);
while (strbuf_getline_lf(&line, out) != EOF) {
char *promisor_name;
int fd;
- if (line.len != 40)
- die(_("repack: Expecting 40 character sha1 lines only from pack-objects."));
+ if (line.len != the_hash_algo->hexsz)
- die("repack: Expecting full hex object ID lines only from pack-objects.");
++ die(_("repack: Expecting full hex object ID lines only from pack-objects."));
string_list_append(names, line.buf);
/*
line.buf);
fd = open(promisor_name, O_CREAT|O_EXCL|O_WRONLY, 0600);
if (fd < 0)
- die_errno("unable to create '%s'", promisor_name);
+ die_errno(_("unable to create '%s'"), promisor_name);
close(fd);
free(promisor_name);
}
fclose(out);
if (finish_command(&cmd))
- die("Could not finish pack-objects to repack promisor objects");
+ die(_("could not finish pack-objects to repack promisor objects"));
}
#define ALL_INTO_ONE 1
out = xfdopen(cmd.out, "r");
while (strbuf_getline_lf(&line, out) != EOF) {
- if (line.len != 40)
- die(_("repack: Expecting 40 character sha1 lines only from pack-objects"));
+ if (line.len != the_hash_algo->hexsz)
- die("repack: Expecting full hex object ID lines only from pack-objects.");
++ die(_("repack: Expecting full hex object ID lines only from pack-objects."));
string_list_append(&names, line.buf);
}
fclose(out);
return ret;
if (!names.nr && !po_args.quiet)
- printf("Nothing new to pack.\n");
+ printf_ln(_("Nothing new to pack."));
/*
* Ok we have prepared all new packfiles.
char *fname, *fname_old;
if (!midx_cleared) {
- /* if we move a packfile, it will invalidated the midx */
- clear_midx_file(get_object_directory());
+ clear_midx_file(the_repository);
midx_cleared = 1;
}
if (rollback_failure.nr) {
int i;
fprintf(stderr,
- "WARNING: Some packs in use have been renamed by\n"
- "WARNING: prefixing old- to their name, in order to\n"
- "WARNING: replace them with the new version of the\n"
- "WARNING: file. But the operation failed, and the\n"
- "WARNING: attempt to rename them back to their\n"
- "WARNING: original names also failed.\n"
- "WARNING: Please rename them in %s manually:\n", packdir);
+ _("WARNING: Some packs in use have been renamed by\n"
+ "WARNING: prefixing old- to their name, in order to\n"
+ "WARNING: replace them with the new version of the\n"
+ "WARNING: file. But the operation failed, and the\n"
+ "WARNING: attempt to rename them back to their\n"
+ "WARNING: original names also failed.\n"
+ "WARNING: Please rename them in %s manually:\n"), packdir);
for (i = 0; i < rollback_failure.nr; i++)
fprintf(stderr, "WARNING: old-%s -> %s\n",
rollback_failure.items[i].string,
reprepare_packed_git(the_repository);
if (delete_redundant) {
+ const int hexsz = the_hash_algo->hexsz;
int opts = 0;
string_list_sort(&names);
for_each_string_list_item(item, &existing_packs) {
char *sha1;
size_t len = strlen(item->string);
- if (len < 40)
+ if (len < hexsz)
continue;
- sha1 = item->string + len - 40;
+ sha1 = item->string + len - hexsz;
if (!string_list_has_string(&names, sha1))
remove_redundant_pack(packdir, item->string);
}
if (!po_args.quiet && isatty(2))
opts |= PRUNE_PACKED_VERBOSE;
prune_packed_objects(opts);
+
+ if (!keep_unreachable &&
+ (!(pack_everything & LOOSEN_UNREACHABLE) ||
+ unpack_unreachable) &&
+ is_repository_shallow(the_repository))
+ prune_shallow(PRUNE_QUICK);
}
if (!no_update_server_info)
update_server_info(0);
remove_temporary_files();
+
+ if (git_env_bool(GIT_TEST_MULTI_PACK_INDEX, 0))
+ write_midx_file(get_object_directory());
+
string_list_clear(&names, 0);
string_list_clear(&rollback, 0);
string_list_clear(&existing_packs, 0);
if (ret >= 0) /* normal exit */
exit(ret);
- die_errno("while expanding alias '%s': '%s'",
- alias_command, alias_string + 1);
+ die_errno(_("while expanding alias '%s': '%s'"),
+ alias_command, alias_string + 1);
}
count = split_cmdline(alias_string, &new_argv);
if (count < 0)
- die("Bad alias.%s string: %s", alias_command,
- split_cmdline_strerror(count));
+ die(_("bad alias.%s string: %s"), alias_command,
+ _(split_cmdline_strerror(count)));
option_count = handle_options(&new_argv, &count, &envchanged);
if (envchanged)
- die("alias '%s' changes environment variables.\n"
- "You can use '!git' in the alias to do this",
- alias_command);
+ die(_("alias '%s' changes environment variables.\n"
+ "You can use '!git' in the alias to do this"),
+ alias_command);
memmove(new_argv - option_count, new_argv,
count * sizeof(char *));
new_argv -= option_count;
if (count < 1)
- die("empty alias for %s", alias_command);
+ die(_("empty alias for %s"), alias_command);
if (!strcmp(alias_command, new_argv[0]))
- die("recursive alias: %s", alias_command);
+ die(_("recursive alias: %s"), alias_command);
trace_argv_printf(new_argv,
"trace: alias expansion: %s =>",
if (!help && get_super_prefix()) {
if (!(p->option & SUPPORT_SUPER_PREFIX))
- die("%s doesn't support --super-prefix", p->cmd);
+ die(_("%s doesn't support --super-prefix"), p->cmd);
}
if (!help && p->option & NEED_WORK_TREE)
/* Check for ENOSPC and EIO errors.. */
if (fflush(stdout))
- die_errno("write failure on standard output");
+ die_errno(_("write failure on standard output"));
if (ferror(stdout))
- die("unknown write failure on standard output");
+ die(_("unknown write failure on standard output"));
if (fclose(stdout))
- die_errno("close failed on standard output");
+ die_errno(_("close failed on standard output"));
return 0;
}
{ "push", cmd_push, RUN_SETUP },
{ "range-diff", cmd_range_diff, RUN_SETUP | USE_PAGER },
{ "read-tree", cmd_read_tree, RUN_SETUP | SUPPORT_SUPER_PREFIX},
- { "rebase--helper", cmd_rebase__helper, RUN_SETUP | NEED_WORK_TREE },
+ /*
+ * NEEDSWORK: Until the rebase is independent and needs no redirection
+ * to rebase shell script this is kept as is, then should be changed to
+ * RUN_SETUP | NEED_WORK_TREE
+ */
+ { "rebase", cmd_rebase },
+ { "rebase--interactive", cmd_rebase__interactive, RUN_SETUP | NEED_WORK_TREE },
{ "receive-pack", cmd_receive_pack },
{ "reflog", cmd_reflog, RUN_SETUP },
{ "remote", cmd_remote, RUN_SETUP },
int status;
if (get_super_prefix())
- die("%s doesn't support --super-prefix", argv[0]);
+ die(_("%s doesn't support --super-prefix"), argv[0]);
if (use_pager == -1 && !is_builtin(argv[0]))
use_pager = check_pager_config(argv[0]);
if (skip_prefix(cmd, "git-", &cmd)) {
argv[0] = cmd;
handle_builtin(argc, argv);
- die("cannot handle %s as a builtin", cmd);
+ die(_("cannot handle %s as a builtin"), cmd);
}
/* Look for flags.. */
} else {
/* The user didn't specify a command; give them help */
commit_pager_choice();
- printf("usage: %s\n\n", git_usage_string);
+ printf(_("usage: %s\n\n"), git_usage_string);
list_common_cmds_help();
printf("\n%s\n", _(git_more_info_string));
exit(1);
} else {
v = strtol(arg, (char **)&arg, 10);
if (*arg)
- return opterror(opt, "expects a numerical value", 0);
+ return error(_("option `%s' expects a numerical value"),
+ opt->long_name);
if (v && v < MINIMUM_ABBREV)
v = MINIMUM_ABBREV;
else if (v > 40)
return 0;
}
-int parse_opt_approxidate_cb(const struct option *opt, const char *arg,
- int unset)
-{
- *(timestamp_t *)(opt->value) = approxidate(arg);
- return 0;
-}
-
int parse_opt_expiry_date_cb(const struct option *opt, const char *arg,
int unset)
{
arg = unset ? "never" : (const char *)opt->defval;
value = git_config_colorbool(NULL, arg);
if (value < 0)
- return opterror(opt,
- "expects \"always\", \"auto\", or \"never\"", 0);
+ return error(_("option `%s' expects \"always\", \"auto\", or \"never\""),
+ opt->long_name);
*(int *)opt->value = value;
return 0;
}
{
int *target = opt->value;
+ BUG_ON_OPT_ARG(arg);
+
if (unset)
/* --no-quiet, --no-verbose */
*target = 0;
struct object_id oid;
struct commit *commit;
+ BUG_ON_OPT_NEG(unset);
+
if (!arg)
return -1;
if (get_oid(arg, &oid))
int parse_opt_tertiary(const struct option *opt, const char *arg, int unset)
{
int *target = opt->value;
+
+ BUG_ON_OPT_ARG(arg);
+
*target = unset ? 2 : 1;
return 0;
}
p->argc--;
*arg = *++p->argv;
} else
- return opterror(opt, "requires a value", flags);
+ return error(_("%s requires a value"), optname(opt, flags));
return 0;
}
int flags)
{
const struct option *that;
- struct strbuf message = STRBUF_INIT;
struct strbuf that_name = STRBUF_INIT;
/*
strbuf_addf(&that_name, "--%s", that->long_name);
else
strbuf_addf(&that_name, "-%c", that->short_name);
- strbuf_addf(&message, ": incompatible with %s", that_name.buf);
+ error(_("%s is incompatible with %s"),
+ optname(opt, flags), that_name.buf);
strbuf_release(&that_name);
- opterror(opt, message.buf, flags);
- strbuf_release(&message);
return -1;
}
- return opterror(opt, ": incompatible with something else", flags);
+ return error(_("%s : incompatible with something else"),
+ optname(opt, flags));
}
static int get_value(struct parse_opt_ctx_t *p,
int err;
if (unset && p->opt)
- return opterror(opt, "takes no value", flags);
+ return error(_("%s takes no value"), optname(opt, flags));
if (unset && (opt->flags & PARSE_OPT_NONEG))
- return opterror(opt, "isn't available", flags);
+ return error(_("%s isn't available"), optname(opt, flags));
if (!(flags & OPT_SHORT) && p->opt && (opt->flags & PARSE_OPT_NOARG))
- return opterror(opt, "takes no value", flags);
+ return error(_("%s takes no value"), optname(opt, flags));
switch (opt->type) {
case OPTION_LOWLEVEL_CALLBACK:
return -1;
*(int *)opt->value = strtol(arg, (char **)&s, 10);
if (*s)
- return opterror(opt, "expects a numerical value", flags);
+ return error(_("%s expects a numerical value"),
+ optname(opt, flags));
return 0;
case OPTION_MAGNITUDE:
if (get_arg(p, opt, flags, &arg))
return -1;
if (!git_parse_ulong(arg, opt->value))
- return opterror(opt,
- "expects a non-negative integer value with an optional k/m/g suffix",
- flags);
+ return error(_("%s expects a non-negative integer value"
+ " with an optional k/m/g suffix"),
+ optname(opt, flags));
return 0;
default:
- die("should not happen, someone must be hit on the forehead");
+ BUG("opt->type %d should not happen", opt->type);
}
}
if (!rest)
continue;
if (*rest == '=')
- return opterror(options, "takes no value", flags);
+ return error(_("%s takes no value"),
+ optname(options, flags));
if (*rest)
continue;
p->out[p->cpidx++] = arg - 2;
}
if (ambiguous_option) {
- error("Ambiguous option: %s "
- "(could be --%s%s or --%s%s)",
+ error(_("ambiguous option: %s "
+ "(could be --%s%s or --%s%s)"),
arg,
(ambiguous_flags & OPT_UNSET) ? "no-" : "",
ambiguous_option->long_name,
return;
if (starts_with(arg, "no-")) {
- error ("did you mean `--%s` (with two dashes ?)", arg);
+ error(_("did you mean `--%s` (with two dashes ?)"), arg);
exit(129);
}
if (!options->long_name)
continue;
if (starts_with(options->long_name, arg)) {
- error ("did you mean `--%s` (with two dashes ?)", arg);
+ error(_("did you mean `--%s` (with two dashes ?)"), arg);
exit(129);
}
}
ctx->flags = flags;
if ((flags & PARSE_OPT_KEEP_UNKNOWN) &&
(flags & PARSE_OPT_STOP_AT_NON_OPTION))
- die("STOP_AT_NON_OPTION and KEEP_UNKNOWN don't go together");
+ BUG("STOP_AT_NON_OPTION and KEEP_UNKNOWN don't go together");
parse_options_check(options);
}
show_negated_gitcomp(original_opts, -1);
show_negated_gitcomp(original_opts, nr_noopts);
fputc('\n', stdout);
- exit(0);
+ return PARSE_OPT_COMPLETE;
}
static int usage_with_options_internal(struct parse_opt_ctx_t *,
case PARSE_OPT_HELP:
case PARSE_OPT_ERROR:
exit(129);
+ case PARSE_OPT_COMPLETE:
+ exit(0);
case PARSE_OPT_NON_OPTION:
case PARSE_OPT_DONE:
break;
default: /* PARSE_OPT_UNKNOWN */
if (ctx.argv[0][1] == '-') {
- error("unknown option `%s'", ctx.argv[0] + 2);
+ error(_("unknown option `%s'"), ctx.argv[0] + 2);
} else if (isascii(*ctx.opt)) {
- error("unknown switch `%c'", *ctx.opt);
+ error(_("unknown switch `%c'"), *ctx.opt);
} else {
- error("unknown non-ascii option in string: `%s'",
+ error(_("unknown non-ascii option in string: `%s'"),
ctx.argv[0]);
}
usage_with_options(usagestr, options);
usage_with_options(usagestr, options);
}
- #undef opterror
- int opterror(const struct option *opt, const char *reason, int flags)
+ const char *optname(const struct option *opt, int flags)
{
+ static struct strbuf sb = STRBUF_INIT;
+
+ strbuf_reset(&sb);
if (flags & OPT_SHORT)
- return error("switch `%c' %s", opt->short_name, reason);
- if (flags & OPT_UNSET)
- return error("option `no-%s' %s", opt->long_name, reason);
- return error("option `%s' %s", opt->long_name, reason);
+ strbuf_addf(&sb, "switch `%c'", opt->short_name);
+ else if (flags & OPT_UNSET)
+ strbuf_addf(&sb, "option `no-%s'", opt->long_name);
+ else
+ strbuf_addf(&sb, "option `%s'", opt->long_name);
+
+ return sb.buf;
}
(h), 0, &parse_opt_string_list }
#define OPT_UYN(s, l, v, h) { OPTION_CALLBACK, (s), (l), (v), NULL, \
(h), PARSE_OPT_NOARG, &parse_opt_tertiary }
-#define OPT_DATE(s, l, v, h) \
- { OPTION_CALLBACK, (s), (l), (v), N_("time"),(h), 0, \
- parse_opt_approxidate_cb }
#define OPT_EXPIRY_DATE(s, l, v, h) \
{ OPTION_CALLBACK, (s), (l), (v), N_("expiry-date"),(h), 0, \
parse_opt_expiry_date_cb }
const struct option *options);
extern int optbug(const struct option *opt, const char *reason);
- extern int opterror(const struct option *opt, const char *reason, int flags);
- #if defined(__GNUC__)
- #define opterror(o,r,f) (opterror((o),(r),(f)), const_error())
- #endif
+ const char *optname(const struct option *opt, int flags);
+/*
+ * Use these assertions for callbacks that expect to be called with NONEG and
+ * NOARG respectively, and do not otherwise handle the "unset" and "arg"
+ * parameters.
+ */
+#define BUG_ON_OPT_NEG(unset) do { \
+ if ((unset)) \
+ BUG("option callback does not expect negation"); \
+} while (0)
+#define BUG_ON_OPT_ARG(arg) do { \
+ if ((arg)) \
+ BUG("option callback does not expect an argument"); \
+} while (0)
+
/*----- incremental advanced APIs -----*/
enum {
+ PARSE_OPT_COMPLETE = -2,
PARSE_OPT_HELP = -1,
PARSE_OPT_DONE,
PARSE_OPT_NON_OPTION,
/*----- some often used options -----*/
extern int parse_opt_abbrev_cb(const struct option *, const char *, int);
-extern int parse_opt_approxidate_cb(const struct option *, const char *, int);
extern int parse_opt_expiry_date_cb(const struct option *, const char *, int);
extern int parse_opt_color_flag_cb(const struct option *, const char *, int);
extern int parse_opt_verbosity_cb(const struct option *, const char *, int);
changed |= DATA_CHANGED;
return changed;
default:
- die("internal error: ce_mode is %o", ce->ce_mode);
+ BUG("unsupported ce_mode: %o", ce->ce_mode);
}
changed |= match_stat_data(&ce->ce_stat_data, st);
struct cache_entry *new_entry;
if (alias->ce_flags & CE_ADDED)
- die("Will not add file alias '%s' ('%s' already exists in index)", ce->name, alias->name);
+ die(_("will not add file alias '%s' ('%s' already exists in index)"),
+ ce->name, alias->name);
/* Ok, create the new entry using the name of the existing alias */
len = ce_namelen(alias);
{
struct object_id oid;
if (write_object_file("", 0, blob_type, &oid))
- die("cannot create an empty blob in the object database");
+ die(_("cannot create an empty blob in the object database"));
oidcpy(&ce->oid, &oid);
}
newflags |= HASH_RENORMALIZE;
if (!S_ISREG(st_mode) && !S_ISLNK(st_mode) && !S_ISDIR(st_mode))
- return error("%s: can only add regular files, symbolic links or git-directories", path);
+ return error(_("%s: can only add regular files, symbolic links or git-directories"), path);
namelen = strlen(path);
if (S_ISDIR(st_mode)) {
if (!intent_only) {
if (index_path(istate, &ce->oid, path, st, newflags)) {
discard_cache_entry(ce);
- return error("unable to index file %s", path);
+ return error(_("unable to index file '%s'"), path);
}
} else
set_object_name_for_intent_to_add_entry(ce);
discard_cache_entry(ce);
else if (add_index_entry(istate, ce, add_option)) {
discard_cache_entry(ce);
- return error("unable to add %s to index", path);
+ return error(_("unable to add '%s' to index"), path);
}
if (verbose && !was_same)
printf("add '%s'\n", path);
{
struct stat st;
if (lstat(path, &st))
- die_errno("unable to stat '%s'", path);
+ die_errno(_("unable to stat '%s'"), path);
return add_to_index(istate, path, &st, flags);
}
int len;
if (!verify_path(path, mode)) {
- error("Invalid path '%s'", path);
+ error(_("invalid path '%s'"), path);
return NULL;
}
int len;
if (!verify_path(path, mode)) {
- error("Invalid path '%s'", path);
+ error(_("invalid path '%s'"), path);
return NULL;
}
if (!ok_to_add)
return -1;
if (!verify_path(ce->name, ce->ce_mode))
- return error("Invalid path '%s'", ce->name);
+ return error(_("invalid path '%s'"), ce->name);
if (!skip_df_check &&
check_file_directory_conflict(istate, ce, pos, ok_to_replace)) {
if (!ok_to_replace)
- return error("'%s' appears as both a file and as a directory",
+ return error(_("'%s' appears as both a file and as a directory"),
ce->name);
pos = index_name_stage_pos(istate, ce->name, ce_namelen(ce), ce_stage(ce));
pos = -pos-1;
istate->cache_nr);
trace_performance_enter();
- modified_fmt = (in_porcelain ? "M\t%s\n" : "%s: needs update\n");
- deleted_fmt = (in_porcelain ? "D\t%s\n" : "%s: needs update\n");
- typechange_fmt = (in_porcelain ? "T\t%s\n" : "%s needs update\n");
- added_fmt = (in_porcelain ? "A\t%s\n" : "%s needs update\n");
- unmerged_fmt = (in_porcelain ? "U\t%s\n" : "%s: needs merge\n");
+ modified_fmt = in_porcelain ? "M\t%s\n" : "%s: needs update\n";
+ deleted_fmt = in_porcelain ? "D\t%s\n" : "%s: needs update\n";
+ typechange_fmt = in_porcelain ? "T\t%s\n" : "%s: needs update\n";
+ added_fmt = in_porcelain ? "A\t%s\n" : "%s: needs update\n";
+ unmerged_fmt = in_porcelain ? "U\t%s\n" : "%s: needs merge\n";
+ /*
+ * Use the multi-threaded preload_index() to refresh most of the
+ * cache entries quickly then in the single threaded loop below,
+ * we only have to do the special cases that are left.
+ */
+ preload_index(istate, pathspec, 0);
for (i = 0; i < istate->cache_nr; i++) {
struct cache_entry *ce, *new_entry;
int cache_errno = 0;
int hdr_version;
if (hdr->hdr_signature != htonl(CACHE_SIGNATURE))
- return error("bad signature");
+ return error(_("bad signature 0x%08x"), hdr->hdr_signature);
hdr_version = ntohl(hdr->hdr_version);
if (hdr_version < INDEX_FORMAT_LB || INDEX_FORMAT_UB < hdr_version)
- return error("bad index version %d", hdr_version);
+ return error(_("bad index version %d"), hdr_version);
if (!verify_index_checksum)
return 0;
the_hash_algo->update_fn(&c, hdr, size - the_hash_algo->rawsz);
the_hash_algo->final_fn(hash, &c);
if (!hasheq(hash, (unsigned char *)hdr + size - the_hash_algo->rawsz))
- return error("bad index file sha1 signature");
+ return error(_("bad index file sha1 signature"));
return 0;
}
break;
default:
if (*ext < 'A' || 'Z' < *ext)
- return error("index uses %.4s extension, which we do not understand",
+ return error(_("index uses %.4s extension, which we do not understand"),
ext);
- fprintf(stderr, "ignoring %.4s extension\n", ext);
+ fprintf_ln(stderr, _("ignoring %.4s extension"), ext);
break;
}
return 0;
size_t len;
const char *name;
unsigned int flags;
- size_t copy_len;
+ size_t copy_len = 0;
/*
* Adjacent cache entries tend to share the leading paths, so it makes
* sense to only store the differences in later entries. In the v4
extended_flags = get_be16(&ondisk2->flags2) << 16;
/* We do not yet understand any bit out of CE_EXTENDED_FLAGS */
if (extended_flags & ~CE_EXTENDED_FLAGS)
- die("Unknown index entry format %08x", extended_flags);
+ die(_("unknown index entry format 0x%08x"), extended_flags);
flags |= extended_flags;
name = ondisk2->name;
}
die(_("malformed name field in the index, near path '%s'"),
previous_ce->name);
copy_len = previous_len - strip_len;
- } else {
- copy_len = 0;
}
name = (const char *)cp;
}
int name_compare = strcmp(ce->name, next_ce->name);
if (0 < name_compare)
- die("unordered stage entries in index");
+ die(_("unordered stage entries in index"));
if (!name_compare) {
if (!ce_stage(ce))
- die("multiple stage entries for merged file '%s'",
+ die(_("multiple stage entries for merged file '%s'"),
ce->name);
if (ce_stage(ce) > ce_stage(next_ce))
- die("unordered stage entries for '%s'",
+ die(_("unordered stage entries for '%s'"),
ce->name);
}
}
struct index_entry_offset entries[FLEX_ARRAY];
};
-#ifndef NO_PTHREADS
static struct index_entry_offset_table *read_ieot_extension(const char *mmap, size_t mmap_size, size_t offset);
static void write_ieot_extension(struct strbuf *sb, struct index_entry_offset_table *ieot);
-#endif
static size_t read_eoie_extension(const char *mmap, size_t mmap_size);
static void write_eoie_extension(struct strbuf *sb, git_hash_ctx *eoie_context, size_t offset);
struct load_index_extensions
{
-#ifndef NO_PTHREADS
pthread_t pthread;
-#endif
struct index_state *istate;
const char *mmap;
size_t mmap_size;
return consumed;
}
-#ifndef NO_PTHREADS
-
/*
* Mostly randomly chosen maximum thread counts: we
* cap the parallelism to online_cpus() threads, and we want
return consumed;
}
-#endif
/* remember to discard_cache() before reading a different cache! */
int do_read_index(struct index_state *istate, const char *path, int must_exist)
size_t mmap_size;
struct load_index_extensions p;
size_t extension_offset = 0;
-#ifndef NO_PTHREADS
int nr_threads, cpus;
struct index_entry_offset_table *ieot = NULL;
-#endif
if (istate->initialized)
return istate->cache_nr;
if (fd < 0) {
if (!must_exist && errno == ENOENT)
return 0;
- die_errno("%s: index file open failed", path);
+ die_errno(_("%s: index file open failed"), path);
}
if (fstat(fd, &st))
- die_errno("cannot stat the open index");
+ die_errno(_("%s: cannot stat the open index"), path);
mmap_size = xsize_t(st.st_size);
if (mmap_size < sizeof(struct cache_header) + the_hash_algo->rawsz)
- die("index file smaller than expected");
+ die(_("%s: index file smaller than expected"), path);
mmap = xmmap(NULL, mmap_size, PROT_READ, MAP_PRIVATE, fd, 0);
if (mmap == MAP_FAILED)
- die_errno("unable to map index file");
+ die_errno(_("%s: unable to map index file"), path);
close(fd);
hdr = (const struct cache_header *)mmap;
src_offset = sizeof(*hdr);
-#ifndef NO_PTHREADS
- nr_threads = git_config_get_index_threads();
+ if (git_config_get_index_threads(&nr_threads))
+ nr_threads = 1;
/* TODO: does creating more threads than cores help? */
if (!nr_threads) {
nr_threads = cpus;
}
+ if (!HAVE_THREADS)
+ nr_threads = 1;
+
if (nr_threads > 1) {
extension_offset = read_eoie_extension(mmap, mmap_size);
if (extension_offset) {
} else {
src_offset += load_all_cache_entries(istate, mmap, mmap_size, src_offset);
}
-#else
- src_offset += load_all_cache_entries(istate, mmap, mmap_size, src_offset);
-#endif
istate->timestamp.sec = st.st_mtime;
istate->timestamp.nsec = ST_MTIME_NSEC(st);
/* if we created a thread, join it otherwise load the extensions on the primary thread */
-#ifndef NO_PTHREADS
if (extension_offset) {
int ret = pthread_join(p.pthread, NULL);
if (ret)
die(_("unable to join load_index_extensions thread: %s"), strerror(ret));
- }
-#endif
- if (!extension_offset) {
+ } else {
p.src_offset = src_offset;
load_index_extensions(&p);
}
unmap:
munmap((void *)mmap, mmap_size);
- die("index file corrupt");
+ die(_("index file corrupt"));
}
/*
static void freshen_shared_index(const char *shared_index, int warn)
{
if (!check_and_freshen_file(shared_index, 1) && warn)
- warning("could not freshen shared index '%s'", shared_index);
+ warning(_("could not freshen shared index '%s'"), shared_index);
}
int read_index_from(struct index_state *istate, const char *path,
base_path = xstrfmt("%s/sharedindex.%s", gitdir, base_oid_hex);
ret = do_read_index(split_index->base, base_path, 1);
if (!oideq(&split_index->base_oid, &split_index->base->oid))
- die("broken index, expect %s in %s, got %s",
+ die(_("broken index, expect %s in %s, got %s"),
base_oid_hex, base_path,
oid_to_hex(&split_index->base->oid));
freshen_shared_index(base_path, 0);
merge_base_index(istate);
post_read_index_from(istate);
- free(base_path);
trace_performance_leave("read cache %s", base_path);
+ free(base_path);
return ret;
}
for (i = 0; i < istate->cache_nr; i++) {
if (!istate) {
- die("internal error: cache entry is not allocated from expected memory pool");
+ BUG("cache entry is not allocated from expected memory pool");
} else if (!istate->ce_mem_pool ||
!mem_pool_contains(istate->ce_mem_pool, istate->cache[i])) {
if (!istate->split_index ||
!istate->split_index->base ||
!istate->split_index->base->ce_mem_pool ||
!mem_pool_contains(istate->split_index->base->ce_mem_pool, istate->cache[i])) {
- die("internal error: cache entry is not allocated from expected memory pool");
+ BUG("cache entry is not allocated from expected memory pool");
}
}
}
rollback_lock_file(lockfile);
}
+static int record_eoie(void)
+{
+ int val;
+
+ if (!git_config_get_bool("index.recordendofindexentries", &val))
+ return val;
+
+ /*
+ * As a convenience, the end of index entries extension
+ * used for threading is written by default if the user
+ * explicitly requested threaded index reads.
+ */
+ return !git_config_get_index_threads(&val) && val != 1;
+}
+
+static int record_ieot(void)
+{
+ int val;
+
+ if (!git_config_get_bool("index.recordoffsettable", &val))
+ return val;
+
+ /*
+ * As a convenience, the offset table used for threading is
+ * written by default if the user explicitly requested
+ * threaded index reads.
+ */
+ return !git_config_get_index_threads(&val) && val != 1;
+}
+
/*
* On success, `tempfile` is closed. If it is the temporary file
* of a `struct lock_file`, we will therefore effectively perform
if (ce_write(&c, newfd, &hdr, sizeof(hdr)) < 0)
return -1;
-#ifndef NO_PTHREADS
- nr_threads = git_config_get_index_threads();
- if (nr_threads != 1) {
+ if (!HAVE_THREADS || git_config_get_index_threads(&nr_threads))
+ nr_threads = 1;
+
+ if (nr_threads != 1 && record_ieot()) {
int ieot_blocks, cpus;
/*
ieot_entries = DIV_ROUND_UP(entries, ieot_blocks);
}
}
-#endif
offset = lseek(newfd, 0, SEEK_CUR);
if (offset < 0) {
* strip_extensions parameter as we need it when loading the shared
* index.
*/
-#ifndef NO_PTHREADS
if (ieot) {
struct strbuf sb = STRBUF_INIT;
if (err)
return -1;
}
-#endif
if (!strip_extensions && istate->split_index) {
struct strbuf sb = STRBUF_INIT;
* read. Write it out regardless of the strip_extensions parameter as we need it
* when loading the shared index.
*/
- if (offset) {
+ if (offset && record_eoie()) {
struct strbuf sb = STRBUF_INIT;
write_eoie_extension(&sb, &eoie_c, offset);
return ret;
ret = adjust_shared_perm(get_tempfile_path(*temp));
if (ret) {
- error("cannot fix permission bits on %s", get_tempfile_path(*temp));
+ error(_("cannot fix permission bits on '%s'"), get_tempfile_path(*temp));
return ret;
}
ret = rename_tempfile(temp,
struct tempfile *temp;
int saved_errno;
- temp = mks_tempfile(git_path("sharedindex_XXXXXX"));
+ /* Same initial permissions as the main .git/index file */
+ temp = mks_tempfile_sm(git_path("sharedindex_XXXXXX"), 0, 0666);
if (!temp) {
oidclr(&si->base_oid);
ret = do_write_locked_index(istate, lock, flags);
new_ce->ce_namelen = len;
new_ce->ce_mode = ce->ce_mode;
if (add_index_entry(istate, new_ce, ADD_CACHE_SKIP_DFCHECK))
- return error("%s: cannot drop to stage #0",
+ return error(_("%s: cannot drop to stage #0"),
new_ce->name);
}
return unmerged;
strbuf_add(sb, hash, the_hash_algo->rawsz);
}
-#ifndef NO_PTHREADS
#define IEOT_VERSION (1)
static struct index_entry_offset_table *read_ieot_extension(const char *mmap, size_t mmap_size, size_t offset)
strbuf_add(sb, &buffer, sizeof(uint32_t));
}
}
-#endif
if (ARRAY_SIZE(valid_atom) <= i)
return strbuf_addf_ret(err, -1, _("unknown field name: %.*s"),
(int)(ep-atom), atom);
+ if (valid_atom[i].source != SOURCE_NONE && !have_git_dir())
+ return strbuf_addf_ret(err, -1,
+ _("not a git repository, but the field '%.*s' requires access to object data"),
+ (int)(ep-atom), atom);
/* Add it in, including the deref prefix */
at = used_atom_cnt;
if (deref)
name++;
if (!strcmp(name, "objecttype"))
- v->s = type_name(oi->type);
+ v->s = xstrdup(type_name(oi->type));
else if (!strcmp(name, "objectsize")) {
v->value = oi->size;
- v->s = xstrfmt("%lu", oi->size);
+ v->s = xstrfmt("%"PRIuMAX , (uintmax_t)oi->size);
}
else if (deref)
grab_objectname(name, &oi->oid, v, &used_atom[i]);
if (deref)
name++;
if (!strcmp(name, "tag"))
- v->s = tag->tag;
+ v->s = xstrdup(tag->tag);
else if (!strcmp(name, "type") && tag->tagged)
- v->s = type_name(tag->tagged->type);
+ v->s = xstrdup(type_name(tag->tagged->type));
else if (!strcmp(name, "object") && tag->tagged)
v->s = xstrdup(oid_to_hex(&tag->tagged->oid));
}
v->value = timestamp;
return;
bad:
- v->s = "";
+ v->s = xstrdup("");
v->value = 0;
}
for (i = 0; i < used_atom_cnt; i++) {
struct atom_value *v = &val[i];
if (v->s == NULL)
- v->s = "";
+ v->s = xstrdup("");
}
}
static const char *lstrip_ref_components(const char *refname, int len)
{
long remaining = len;
- const char *start = refname;
+ const char *start = xstrdup(refname);
+ const char *to_free = start;
if (len < 0) {
int i;
while (remaining > 0) {
switch (*start++) {
case '\0':
- return "";
+ free((char *)to_free);
+ return xstrdup("");
case '/':
remaining--;
break;
}
}
+ start = xstrdup(start);
+ free((char *)to_free);
return start;
}
static const char *rstrip_ref_components(const char *refname, int len)
{
long remaining = len;
- char *start = xstrdup(refname);
+ const char *start = xstrdup(refname);
+ const char *to_free = start;
if (len < 0) {
int i;
while (remaining-- > 0) {
char *p = strrchr(start, '/');
- if (p == NULL)
- return "";
- else
+ if (p == NULL) {
+ free((char *)to_free);
+ return xstrdup("");
+ } else
p[0] = '\0';
}
return start;
else if (atom->option == R_RSTRIP)
return rstrip_ref_components(refname, atom->rstrip);
else
- return refname;
+ return xstrdup(refname);
}
static void fill_remote_ref_details(struct used_atom *atom, const char *refname,
NULL, AHEAD_BEHIND_FULL) < 0) {
*s = xstrdup(msgs.gone);
} else if (!num_ours && !num_theirs)
- *s = "";
+ *s = xstrdup("");
else if (!num_ours)
*s = xstrfmt(msgs.behind, num_theirs);
else if (!num_theirs)
}
} else if (atom->u.remote_ref.option == RR_TRACKSHORT) {
if (stat_tracking_info(branch, &num_ours, &num_theirs,
- NULL, AHEAD_BEHIND_FULL) < 0)
+ NULL, AHEAD_BEHIND_FULL) < 0) {
+ *s = xstrdup("");
return;
-
+ }
if (!num_ours && !num_theirs)
- *s = "=";
+ *s = xstrdup("=");
else if (!num_ours)
- *s = "<";
+ *s = xstrdup("<");
else if (!num_theirs)
- *s = ">";
+ *s = xstrdup(">");
else
- *s = "<>";
+ *s = xstrdup("<>");
} else if (atom->u.remote_ref.option == RR_REMOTE_NAME) {
int explicit;
const char *remote = atom->u.remote_ref.push ?
pushremote_for_branch(branch, &explicit) :
remote_for_branch(branch, &explicit);
- if (explicit)
- *s = xstrdup(remote);
- else
- *s = "";
+ *s = xstrdup(explicit ? remote : "");
} else if (atom->u.remote_ref.option == RR_REMOTE_REF) {
int explicit;
const char *merge;
merge = remote_ref_for_branch(branch, atom->u.remote_ref.push,
&explicit);
- if (explicit)
- *s = xstrdup(merge);
- else
- *s = "";
+ *s = xstrdup(explicit ? merge : "");
} else
BUG("unhandled RR_* enum");
}
static const char *get_symref(struct used_atom *atom, struct ref_array_item *ref)
{
if (!ref->symref)
- return "";
+ return xstrdup("");
else
return show_ref(&atom->u.refname, ref->symref);
}
ref->symref = resolve_refdup(ref->refname, RESOLVE_REF_READING,
NULL, NULL);
if (!ref->symref)
- ref->symref = "";
+ ref->symref = xstrdup("");
}
/* Fill in specials first */
refname = get_symref(atom, ref);
else if (starts_with(name, "upstream")) {
const char *branch_name;
- v->s = "";
/* only local branches may have an upstream */
if (!skip_prefix(ref->refname, "refs/heads/",
- &branch_name))
+ &branch_name)) {
+ v->s = xstrdup("");
continue;
+ }
branch = branch_get(branch_name);
refname = branch_get_upstream(branch, NULL);
if (refname)
fill_remote_ref_details(atom, refname, branch, &v->s);
+ else
+ v->s = xstrdup("");
continue;
} else if (atom->u.remote_ref.push) {
const char *branch_name;
- v->s = "";
+ v->s = xstrdup("");
if (!skip_prefix(ref->refname, "refs/heads/",
&branch_name))
continue;
if (!refname)
continue;
}
+ /* We will definitely re-init v->s on the next line. */
+ free((char *)v->s);
fill_remote_ref_details(atom, refname, branch, &v->s);
continue;
} else if (starts_with(name, "color:")) {
- v->s = atom->u.color;
+ v->s = xstrdup(atom->u.color);
continue;
} else if (!strcmp(name, "flag")) {
char buf[256], *cp = buf;
if (ref->flag & REF_ISPACKED)
cp = copy_advance(cp, ",packed");
if (cp == buf)
- v->s = "";
+ v->s = xstrdup("");
else {
*cp = '\0';
v->s = xstrdup(buf + 1);
continue;
} else if (!strcmp(name, "HEAD")) {
if (atom->u.head && !strcmp(ref->refname, atom->u.head))
- v->s = "*";
+ v->s = xstrdup("*");
else
- v->s = " ";
+ v->s = xstrdup(" ");
continue;
} else if (starts_with(name, "align")) {
v->handler = align_atom_handler;
- v->s = "";
+ v->s = xstrdup("");
continue;
} else if (!strcmp(name, "end")) {
v->handler = end_atom_handler;
- v->s = "";
+ v->s = xstrdup("");
continue;
} else if (starts_with(name, "if")) {
const char *s;
- v->s = "";
if (skip_prefix(name, "if:", &s))
v->s = xstrdup(s);
+ else
+ v->s = xstrdup("");
v->handler = if_atom_handler;
continue;
} else if (!strcmp(name, "then")) {
v->handler = then_atom_handler;
- v->s = "";
+ v->s = xstrdup("");
continue;
} else if (!strcmp(name, "else")) {
v->handler = else_atom_handler;
- v->s = "";
+ v->s = xstrdup("");
continue;
} else
continue;
if (!deref)
- v->s = refname;
+ v->s = xstrdup(refname);
else
v->s = xstrfmt("%s^{}", refname);
+ free((char *)refname);
}
for (i = 0; i < used_atom_cnt; i++) {
static void free_array_item(struct ref_array_item *item)
{
free((char *)item->symref);
+ if (item->value) {
+ free((char *)item->value->s);
+ free(item->value);
+ }
free(item);
}
{
int i;
+ for (i = 0; i < used_atom_cnt; i++)
+ free((char *)used_atom[i].name);
+ FREE_AND_NULL(used_atom);
+ used_atom_cnt = 0;
for (i = 0; i < array->nr; i++)
free_array_item(array->items[i]);
FREE_AND_NULL(array->items);
struct object_id oid;
int no_merged = starts_with(opt->long_name, "no");
+ BUG_ON_OPT_NEG(unset);
+
if (rf->merge) {
if (no_merged) {
- return opterror(opt, "is incompatible with --merged", 0);
+ return error(_("option `%s' is incompatible with --merged"),
+ opt->long_name);
} else {
- return opterror(opt, "is incompatible with --no-merged", 0);
+ return error(_("option `%s' is incompatible with --no-merged"),
+ opt->long_name);
}
}
rf->merge_commit = lookup_commit_reference_gently(the_repository,
&oid, 0);
if (!rf->merge_commit)
- return opterror(opt, "must point to a commit", 0);
+ return error(_("option `%s' must point to a commit"), opt->long_name);
return 0;
}
return 0;
/* Handle remote.<name>.* variables */
if (*name == '/') {
- warning("Config remote shorthand cannot begin with '/': %s",
+ warning(_("config remote shorthand cannot begin with '/': %s"),
name);
return 0;
}
if (!remote->receivepack)
remote->receivepack = v;
else
- error("more than one receivepack given, using the first");
+ error(_("more than one receivepack given, using the first"));
} else if (!strcmp(subkey, "uploadpack")) {
const char *v;
if (git_config_string(&v, key, value))
if (!remote->uploadpack)
remote->uploadpack = v;
else
- error("more than one uploadpack given, using the first");
+ error(_("more than one uploadpack given, using the first"));
} else if (!strcmp(subkey, "tagopt")) {
if (!strcmp(value, "--no-tags"))
remote->fetch_tags = -1;
* FETCH_HEAD_IGNORE entries always appear at
* the end of the list.
*/
- die(_("Internal error"));
+ BUG("Internal error");
}
}
free(ref2->peer_ref);
size_t namelen;
int ret;
if (!kstar)
- die("Key '%s' of pattern had no '*'", key);
+ die(_("key '%s' of pattern had no '*'"), key);
klen = kstar - key;
ksuffixlen = strlen(kstar + 1);
namelen = strlen(name);
struct strbuf sb = STRBUF_INIT;
const char *vstar = strchr(value, '*');
if (!vstar)
- die("Value '%s' of pattern has no '*'", value);
+ die(_("value '%s' of pattern has no '*'"), value);
strbuf_add(&sb, value, vstar - value);
strbuf_add(&sb, name + klen, namelen - klen - ksuffixlen);
strbuf_addstr(&sb, vstar + 1);
int find_src = !query->src;
if (find_src && !query->dst)
- error("query_refspecs_multiple: need either src or dst");
+ BUG("query_refspecs_multiple: need either src or dst");
for (i = 0; i < rs->nr; i++) {
struct refspec_item *refspec = &rs->items[i];
char **result = find_src ? &query->src : &query->dst;
if (find_src && !query->dst)
- return error("query_refspecs: need either src or dst");
+ BUG("query_refspecs: need either src or dst");
for (i = 0; i < rs->nr; i++) {
struct refspec_item *refspec = &rs->items[i];
* way to delete 'other' ref at the remote end.
*/
if (try_explicit_object_name(rs->src, match) < 0)
- return error("src refspec %s does not match any.", rs->src);
+ return error(_("src refspec %s does not match any"), rs->src);
if (allocated_match)
*allocated_match = 1;
return 0;
default:
- return error("src refspec %s matches more than one.", rs->src);
+ return error(_("src refspec %s matches more than one"), rs->src);
}
}
if (!dst_value ||
((flag & REF_ISSYMREF) &&
!starts_with(dst_value, "refs/heads/")))
- die("%s cannot be resolved to branch.",
+ die(_("%s cannot be resolved to branch"),
matched_src->name);
}
if (starts_with(dst_value, "refs/"))
matched_dst = make_linked_ref(dst_value, dst_tail);
else if (is_null_oid(&matched_src->new_oid))
- error("unable to delete '%s': remote ref does not exist",
+ error(_("unable to delete '%s': remote ref does not exist"),
dst_value);
else if ((dst_guess = guess_ref(dst_value, matched_src))) {
matched_dst = make_linked_ref(dst_guess, dst_tail);
free(dst_guess);
} else
- error("unable to push to unqualified destination: %s\n"
- "The destination refspec neither matches an "
- "existing ref on the remote nor\n"
- "begins with refs/, and we are unable to "
- "guess a prefix based on the source ref.",
+ error(_("unable to push to unqualified destination: %s\n"
+ "The destination refspec neither matches an "
+ "existing ref on the remote nor\n"
+ "begins with refs/, and we are unable to "
+ "guess a prefix based on the source ref."),
dst_value);
break;
default:
matched_dst = NULL;
- error("dst refspec %s matches more than one.",
+ error(_("dst refspec %s matches more than one"),
dst_value);
break;
}
if (!matched_dst)
return -1;
if (matched_dst->peer_ref)
- return error("dst ref %s receives from more than one src.",
- matched_dst->name);
+ return error(_("dst ref %s receives from more than one src"),
+ matched_dst->name);
else {
matched_dst->peer_ref = allocated_src ?
matched_src :
* sent to the other side.
*/
if (sent_tips.nr) {
+ const int reachable_flag = 1;
+ struct commit_list *found_commits;
+ struct commit **src_commits;
+ int nr_src_commits = 0, alloc_src_commits = 16;
+ ALLOC_ARRAY(src_commits, alloc_src_commits);
+
for_each_string_list_item(item, &src_tag) {
struct ref *ref = item->util;
+ struct commit *commit;
+
+ if (is_null_oid(&ref->new_oid))
+ continue;
+ commit = lookup_commit_reference_gently(the_repository,
+ &ref->new_oid,
+ 1);
+ if (!commit)
+ /* not pushing a commit, which is not an error */
+ continue;
+
+ ALLOC_GROW(src_commits, nr_src_commits + 1, alloc_src_commits);
+ src_commits[nr_src_commits++] = commit;
+ }
+
+ found_commits = get_reachable_subset(sent_tips.tip, sent_tips.nr,
+ src_commits, nr_src_commits,
+ reachable_flag);
+
+ for_each_string_list_item(item, &src_tag) {
struct ref *dst_ref;
+ struct ref *ref = item->util;
struct commit *commit;
if (is_null_oid(&ref->new_oid))
* Is this tag, which they do not have, reachable from
* any of the commits we are sending?
*/
- if (!in_merge_bases_many(commit, sent_tips.nr, sent_tips.tip))
+ if (!(commit->object.flags & reachable_flag))
continue;
/* Add it in */
oidcpy(&dst_ref->new_oid, &ref->new_oid);
dst_ref->peer_ref = copy_ref(ref);
}
+
+ clear_commit_marks_many(nr_src_commits, src_commits, reachable_flag);
+ free(src_commits);
+ free_commit_list(found_commits);
}
+
string_list_clear(&src_tag, 0);
free(sent_tips.tip);
}
ref_map = get_remote_ref(remote_refs, name);
}
if (!missing_ok && !ref_map)
- die("Couldn't find remote ref %s", name);
+ die(_("couldn't find remote ref %s"), name);
if (ref_map) {
ref_map->peer_ref = get_local_ref(refspec->dst);
if (ref_map->peer_ref && refspec->force)
if (!starts_with((*rmp)->peer_ref->name, "refs/") ||
check_refname_format((*rmp)->peer_ref->name, 0)) {
struct ref *ignore = *rmp;
- error("* Ignoring funny ref '%s' locally",
+ error(_("* Ignoring funny ref '%s' locally"),
(*rmp)->peer_ref->name);
*rmp = (*rmp)->next;
free(ignore->peer_ref);
repo_init_revisions(the_repository, &revs, NULL);
setup_revisions(argv.argc, argv.argv, &revs, NULL);
if (prepare_revision_walk(&revs))
- die("revision walk setup failed");
+ die(_("revision walk setup failed"));
/* ... and count the commits on each side. */
while (1) {
else if (!colon[1])
oidclr(&entry->expect);
else if (get_oid(colon + 1, &entry->expect))
- return error("cannot parse expected object name '%s'", colon + 1);
+ return error(_("cannot parse expected object name '%s'"),
+ colon + 1);
return 0;
}
-j <n> get a integer, too
-m, --magnitude <n> get a magnitude
--set23 set integer to 23
- -t <time> get timestamp of <time>
-L, --length <str> get length of <str>
-F, --file <file> set file to <file>
test_expect_success 'detect possible typos' '
test_must_fail test-tool parse-options -boolean >output 2>output.err &&
test_must_be_empty output &&
- test_cmp typo.err output.err
+ test_i18ncmp typo.err output.err
'
cat >typo.err <<\EOF
test_expect_success 'detect possible typos' '
test_must_fail test-tool parse-options -ambiguous >output 2>output.err &&
test_must_be_empty output &&
- test_cmp typo.err output.err
+ test_i18ncmp typo.err output.err
'
test_expect_success 'keep some options as arguments' '
test-tool parse-options --expect="arg 00: --quux" --quux
'
-cat >expect <<\EOF
-boolean: 0
-integer: 0
-magnitude: 0
-timestamp: 1
-string: (not set)
-abbrev: 7
-verbose: -1
-quiet: 1
-dry run: no
-file: (not set)
-arg 00: foo
-EOF
-
-test_expect_success 'OPT_DATE() works' '
- test-tool parse-options -t "1970-01-01 00:00:01 +0000" \
- foo -q >output 2>output.err &&
- test_must_be_empty output.err &&
- test_cmp expect output
-'
-
cat >expect <<\EOF
Callback: "four", 0
boolean: 5
}
check_fsck () {
- output=$(git fsck --full)
+ git fsck --full >fsck.output
case "$1" in
'')
- test -z "$output" ;;
+ test_must_be_empty fsck.output ;;
*)
- echo "$output" | grep "$1" ;;
+ test_i18ngrep "$1" fsck.output ;;
esac
}
)
'
+test_expect_success 'expire with multiple worktrees' '
+ git init main-wt &&
+ (
+ cd main-wt &&
+ test_tick &&
+ test_commit foo &&
+ git worktree add link-wt &&
+ test_tick &&
+ test_commit -C link-wt foobar &&
+ test_tick &&
+ git reflog expire --verbose --all --expire=$test_tick &&
+ test_must_be_empty .git/worktrees/link-wt/logs/HEAD
+ )
+'
+
test_done
test_must_fail git fsck 2>out &&
cat out &&
- grep "$sha.*corrupt" out
+ test_i18ngrep "$sha.*corrupt" out
'
test_expect_success 'branch pointing to non-commit' '
test_when_finished "git update-ref -d refs/heads/invalid" &&
test_must_fail git fsck 2>out &&
cat out &&
- grep "not a commit" out
+ test_i18ngrep "not a commit" out
'
test_expect_success 'HEAD link pointing at a funny object' '
# avoid corrupt/broken HEAD from interfering with repo discovery
test_must_fail env GIT_DIR=.git git fsck 2>out &&
cat out &&
- grep "detached HEAD points" out
+ test_i18ngrep "detached HEAD points" out
'
test_expect_success 'HEAD link pointing at a funny place' '
# avoid corrupt/broken HEAD from interfering with repo discovery
test_must_fail env GIT_DIR=.git git fsck 2>out &&
cat out &&
- grep "HEAD points to something strange" out
+ test_i18ngrep "HEAD points to something strange" out
'
- grep "main-worktree/HEAD: detached HEAD points" out
+test_expect_success 'HEAD link pointing at a funny object (from different wt)' '
+ test_when_finished "mv .git/SAVED_HEAD .git/HEAD" &&
+ test_when_finished "rm -rf .git/worktrees wt" &&
+ git worktree add wt &&
+ mv .git/HEAD .git/SAVED_HEAD &&
+ echo $ZERO_OID >.git/HEAD &&
+ # avoid corrupt/broken HEAD from interfering with repo discovery
+ test_must_fail git -C wt fsck 2>out &&
- grep "worktrees/other/HEAD: detached HEAD points" out
++ test_i18ngrep "main-worktree/HEAD: detached HEAD points" out
+'
+
+test_expect_success 'other worktree HEAD link pointing at a funny object' '
+ test_when_finished "rm -rf .git/worktrees other" &&
+ git worktree add other &&
+ echo $ZERO_OID >.git/worktrees/other/HEAD &&
+ test_must_fail git fsck 2>out &&
- grep "worktrees/other/HEAD: invalid sha1 pointer" out
++ test_i18ngrep "worktrees/other/HEAD: detached HEAD points" out
+'
+
+test_expect_success 'other worktree HEAD link pointing at missing object' '
+ test_when_finished "rm -rf .git/worktrees other" &&
+ git worktree add other &&
+ echo "Contents missing from repo" | git hash-object --stdin >.git/worktrees/other/HEAD &&
+ test_must_fail git fsck 2>out &&
- grep "worktrees/other/HEAD points to something strange" out
++ test_i18ngrep "worktrees/other/HEAD: invalid sha1 pointer" out
+'
+
+test_expect_success 'other worktree HEAD link pointing at a funny place' '
+ test_when_finished "rm -rf .git/worktrees other" &&
+ git worktree add other &&
+ echo "ref: refs/funny/place" >.git/worktrees/other/HEAD &&
+ test_must_fail git fsck 2>out &&
++ test_i18ngrep "worktrees/other/HEAD points to something strange" out
+'
+
test_expect_success 'email without @ is okay' '
git cat-file commit HEAD >basis &&
sed "s/@/AT/" basis >okay &&
test_when_finished "git update-ref -d refs/heads/bogus" &&
test_must_fail git fsck 2>out &&
cat out &&
- grep "error in commit $new" out
+ test_i18ngrep "error in commit $new" out
'
test_expect_success 'missing < email delimiter is reported nicely' '
test_when_finished "git update-ref -d refs/heads/bogus" &&
test_must_fail git fsck 2>out &&
cat out &&
- grep "error in commit $new.* - bad name" out
+ test_i18ngrep "error in commit $new.* - bad name" out
'
test_expect_success 'missing email is reported nicely' '
test_when_finished "git update-ref -d refs/heads/bogus" &&
test_must_fail git fsck 2>out &&
cat out &&
- grep "error in commit $new.* - missing email" out
+ test_i18ngrep "error in commit $new.* - missing email" out
'
test_expect_success '> in name is reported' '
test_when_finished "git update-ref -d refs/heads/bogus" &&
test_must_fail git fsck 2>out &&
cat out &&
- grep "error in commit $new" out
+ test_i18ngrep "error in commit $new" out
'
# date is 2^64 + 1
test_when_finished "git update-ref -d refs/heads/bogus" &&
test_must_fail git fsck 2>out &&
cat out &&
- grep "error in commit $new.*integer overflow" out
+ test_i18ngrep "error in commit $new.*integer overflow" out
'
test_expect_success 'commit with NUL in header' '
test_when_finished "git update-ref -d refs/heads/bogus" &&
test_must_fail git fsck 2>out &&
cat out &&
- grep "error in commit $new.*unterminated header: NUL at offset" out
+ test_i18ngrep "error in commit $new.*unterminated header: NUL at offset" out
'
test_expect_success 'tree object with duplicate entries' '
git hash-object -w -t tree --stdin
) &&
test_must_fail git fsck 2>out &&
- grep "error in tree .*contains duplicate file entries" out
+ test_i18ngrep "error in tree .*contains duplicate file entries" out
'
test_expect_success 'unparseable tree object' '
test_when_finished "git update-ref -d refs/tags/invalid" &&
test_must_fail git fsck --tags >out &&
cat out &&
- grep "broken link" out
+ test_i18ngrep "broken link" out
'
test_expect_success 'tag pointing to something else than its type' '
warning in tag $tag: badTagName: invalid '\''tag'\'' name: wrong name format
warning in tag $tag: missingTaggerEntry: invalid format - expected '\''tagger'\'' line
EOF
- test_cmp expect out
+ test_i18ncmp expect out
'
test_expect_success 'tag with bad tagger' '
echo $tag >.git/refs/tags/wrong &&
test_when_finished "git update-ref -d refs/tags/wrong" &&
test_must_fail git fsck --tags 2>out &&
- grep "error in tag .*: invalid author/committer" out
+ test_i18ngrep "error in tag .*: invalid author/committer" out
'
test_expect_success 'tag with NUL in header' '
test_when_finished "git update-ref -d refs/tags/wrong" &&
test_must_fail git fsck --tags 2>out &&
cat out &&
- grep "error in tag $tag.*unterminated header: NUL at offset" out
+ test_i18ngrep "error in tag $tag.*unterminated header: NUL at offset" out
'
test_expect_success 'cleaned up' '
git hash-object -w --stdin -t tree) &&
git fsck 2>out &&
cat out &&
- grep "warning.*null sha1" out
+ test_i18ngrep "warning.*null sha1" out
)
'
git hash-object -w --stdin -t tree) &&
git fsck 2>out &&
cat out &&
- grep "warning.*null sha1" out
+ test_i18ngrep "warning.*null sha1" out
)
'
bad_tree=$(git mktree <bad) &&
git fsck 2>out &&
cat out &&
- grep "warning.*tree $bad_tree" out
+ test_i18ngrep "warning.*tree $bad_tree" out
)'
done <<-\EOF
100644 blob
git branch bad $(cat name) &&
test_must_fail git -c fsck.nulInCommit=error fsck 2>warn.1 &&
- grep nulInCommit warn.1 &&
+ test_i18ngrep nulInCommit warn.1 &&
git fsck 2>warn.2 &&
- grep nulInCommit warn.2
+ test_i18ngrep nulInCommit warn.2
)
'
remove_object $(git rev-parse julius:caesar.t) &&
test_must_fail git fsck --name-objects >out &&
tree=$(git rev-parse --verify julius:) &&
- egrep "$tree \((refs/heads/master|HEAD)@\{[0-9]*\}:" out
+ test_i18ngrep -E "$tree \((refs/heads/master|HEAD)@\{[0-9]*\}:" out
)
'
mkdir alt.git/objects/12 &&
>alt.git/objects/12/34567890123456789012345678901234567890 &&
test_must_fail git fsck >out 2>&1 &&
- grep alt.git out
+ test_i18ngrep alt.git out
'
test_expect_success 'fsck errors in packed objects' '
remove_object $one &&
remove_object $two &&
test_must_fail git fsck 2>out &&
- grep "error in commit $one.* - bad name" out &&
- grep "error in commit $two.* - bad name" out &&
+ test_i18ngrep "error in commit $one.* - bad name" out &&
+ test_i18ngrep "error in commit $two.* - bad name" out &&
! grep corrupt out
'
test_i18ngrep "garbage.*$commit" out
'
-test_expect_success 'fsck detects trailing loose garbage (blob)' '
+test_expect_success 'fsck detects trailing loose garbage (large blob)' '
blob=$(echo trailing | git hash-object -w --stdin) &&
file=$(sha1_file $blob) &&
test_when_finished "remove_object $blob" &&
chmod +w "$file" &&
echo garbage >>"$file" &&
- test_must_fail git fsck 2>out &&
+ test_must_fail git -c core.bigfilethreshold=5 fsck 2>out &&
test_i18ngrep "garbage.*$blob" out
'
+test_expect_success 'fsck detects truncated loose object' '
+ # make it big enough that we know we will truncate in the data
+ # portion, not the header
+ test-tool genrandom truncate 4096 >file &&
+ blob=$(git hash-object -w file) &&
+ file=$(sha1_file $blob) &&
+ test_when_finished "remove_object $blob" &&
+ test_copy_bytes 1024 <"$file" >tmp &&
+ rm "$file" &&
+ mv -f tmp "$file" &&
+
+ # check both regular and streaming code paths
+ test_must_fail git fsck 2>out &&
+ test_i18ngrep corrupt.*$blob out &&
+
+ test_must_fail git -c core.bigfilethreshold=128 fsck 2>out &&
+ test_i18ngrep corrupt.*$blob out
+'
+
# for each of type, we have one version which is referenced by another object
# (and so while unreachable, not dangling), and another variant which really is
# dangling.
git fsck >actual &&
# the output order is non-deterministic, as it comes from a hash
sort <actual >actual.sorted &&
- test_cmp expect actual.sorted
+ test_i18ncmp expect actual.sorted
)
'
test_when_finished "mv .git/index.backup .git/index" &&
corrupt_index_checksum &&
test_must_fail git fsck --cache 2>errors &&
- grep "bad index file" errors
+ test_i18ngrep "bad index file" errors
'
test_done
grep "git index-pack.*--fsck-objects" trace
'
+test_expect_success 'use fsck before and after manually fetching a missing subtree' '
+ # push new commit so server has a subtree
+ mkdir src/dir &&
+ echo "in dir" >src/dir/file.txt &&
+ git -C src add dir/file.txt &&
+ git -C src commit -m "file in dir" &&
+ git -C src push -u srv master &&
+ SUBTREE=$(git -C src rev-parse HEAD:dir) &&
+
+ rm -rf dst &&
+ git clone --no-checkout --filter=tree:0 "file://$(pwd)/srv.bare" dst &&
+ git -C dst fsck &&
+
+ # Make sure we only have commits, and all trees and blobs are missing.
+ git -C dst rev-list --missing=allow-any --objects master \
+ >fetched_objects &&
+ awk -f print_1.awk fetched_objects |
+ xargs -n1 git -C dst cat-file -t >fetched_types &&
+
+ sort -u fetched_types >unique_types.observed &&
+ echo commit >unique_types.expected &&
+ test_cmp unique_types.expected unique_types.observed &&
+
+ # Auto-fetch a tree with cat-file.
+ git -C dst cat-file -p $SUBTREE >tree_contents &&
+ grep file.txt tree_contents &&
+
+ # fsck still works after an auto-fetch of a tree.
+ git -C dst fsck &&
+
+ # Auto-fetch all remaining trees and blobs with --missing=error
+ git -C dst rev-list --missing=error --objects master >fetched_objects &&
+ test_line_count = 70 fetched_objects &&
+
+ awk -f print_1.awk fetched_objects |
+ xargs -n1 git -C dst cat-file -t >fetched_types &&
+
+ sort -u fetched_types >unique_types.observed &&
+ test_write_lines blob commit tree >unique_types.expected &&
+ test_cmp unique_types.expected unique_types.observed
+'
+
test_expect_success 'partial clone fetches blobs pointed to by refs even if normally filtered out' '
rm -rf src dst &&
git init src &&
test_must_fail git -c protocol.version=2 clone \
--filter=blob:none $HTTPD_URL/one_time_sed/server repo 2>err &&
-- grep "did not send all necessary objects" err &&
++ test_i18ngrep "did not send all necessary objects" err &&
# Ensure that the one-time-sed script was used.
! test -e "$HTTPD_ROOT_PATH/one-time-sed"
cp -r "$LOCAL_PRISTINE" local &&
inconsistency master 1234567890123456789012345678901234567890 &&
test_must_fail git -C local fetch 2>err &&
-- grep "ERR upload-pack: not our ref" err
++ test_i18ngrep "ERR upload-pack: not our ref" err
'
test_expect_success 'server is initially ahead - ref in want' '
echo "s/master/raster/" >"$HTTPD_ROOT_PATH/one-time-sed" &&
test_must_fail git -C local fetch 2>err &&
-- grep "ERR unknown ref refs/heads/raster" err
++ test_i18ngrep "ERR unknown ref refs/heads/raster" err
'
stop_httpd
test_expect_success '"git fsck" works' '
git fsck master >fsck_master.out &&
- grep "dangling commit $R" fsck_master.out &&
- grep "dangling tag $(cat .git/refs/tags/mytag)" fsck_master.out &&
+ test_i18ngrep "dangling commit $R" fsck_master.out &&
+ test_i18ngrep "dangling tag $(cat .git/refs/tags/mytag)" fsck_master.out &&
test -z "$(git fsck)"
'
printf "%s\n%s %s\n\n# comment\n%s\n" \
$(git rev-parse HEAD^^ HEAD^ HEAD^^ HEAD^2) \
>.git/info/grafts &&
- git replace --convert-graft-file &&
+ git status 2>stderr &&
+ test_i18ngrep "hint:.*grafts is deprecated" stderr &&
+ git replace --convert-graft-file 2>stderr &&
+ test_i18ngrep ! "hint:.*grafts is deprecated" stderr &&
test_path_is_missing .git/info/grafts &&
: verify that the history is now "grafted" &&