st = open_istream(oid, &type, &sz, NULL);
if (!st)
- return error("cannot stream blob %s", oid_to_hex(oid));
+ return error(_("cannot stream blob %s"), oid_to_hex(oid));
for (;;) {
readlen = read_istream(st, buf, sizeof(buf));
if (readlen <= 0)
*header.typeflag = TYPEFLAG_REG;
mode = (mode | ((mode & 0100) ? 0777 : 0666)) & ~tar_umask;
} else {
- return error("unsupported file mode: 0%o (SHA1: %s)",
+ return error(_("unsupported file mode: 0%o (SHA1: %s)"),
mode, oid_to_hex(oid));
}
if (pathlen > sizeof(header.name)) {
memcpy(header.name, path, pathlen);
if (S_ISREG(mode) && !args->convert &&
- oid_object_info(the_repository, oid, &size) == OBJ_BLOB &&
+ oid_object_info(args->repo, oid, &size) == OBJ_BLOB &&
size > big_file_threshold)
buffer = NULL;
else if (S_ISLNK(mode) || S_ISREG(mode)) {
enum object_type type;
buffer = object_file_to_archive(args, path, oid, old_mode, &type, &size);
if (!buffer)
- return error("cannot read %s", oid_to_hex(oid));
+ return error(_("cannot read %s"), oid_to_hex(oid));
} else {
buffer = NULL;
size = 0;
filter.in = -1;
if (start_command(&filter) < 0)
- die_errno("unable to start '%s' filter", argv[0]);
+ die_errno(_("unable to start '%s' filter"), argv[0]);
close(1);
if (dup2(filter.in, 1) < 0)
- die_errno("unable to redirect descriptor");
+ die_errno(_("unable to redirect descriptor"));
close(filter.in);
r = write_tar_archive(ar, args);
close(1);
if (finish_command(&filter) != 0)
- die("'%s' filter reported error", argv[0]);
+ die(_("'%s' filter reported error"), argv[0]);
strbuf_release(&cmd);
return r;
if (is_utf8(path))
flags |= ZIP_UTF8;
else
- warning("Path is not valid UTF-8: %s", path);
+ warning(_("path is not valid UTF-8: %s"), path);
}
if (pathlen > 0xffff) {
- return error("path too long (%d chars, SHA1: %s): %s",
+ return error(_("path too long (%d chars, SHA1: %s): %s"),
(int)pathlen, oid_to_hex(oid), path);
}
compressed_size = 0;
buffer = NULL;
} else if (S_ISREG(mode) || S_ISLNK(mode)) {
- enum object_type type = oid_object_info(the_repository, oid,
+ enum object_type type = oid_object_info(args->repo, oid,
&size);
method = 0;
size > big_file_threshold) {
stream = open_istream(oid, &type, &size, NULL);
if (!stream)
- return error("cannot stream blob %s",
+ return error(_("cannot stream blob %s"),
oid_to_hex(oid));
flags |= ZIP_STREAM;
out = buffer = NULL;
buffer = object_file_to_archive(args, path, oid, mode,
&type, &size);
if (!buffer)
- return error("cannot read %s",
+ return error(_("cannot read %s"),
oid_to_hex(oid));
crc = crc32(crc, buffer, size);
is_binary = entry_is_binary(path_without_prefix,
}
compressed_size = (method == 0) ? size : 0;
} else {
- return error("unsupported file mode: 0%o (SHA1: %s)", mode,
+ return error(_("unsupported file mode: 0%o (SHA1: %s)"), mode,
oid_to_hex(oid));
}
zstream.avail_in = readlen;
result = git_deflate(&zstream, 0);
if (result != Z_OK)
- die("deflate error (%d)", result);
+ die(_("deflate error (%d)"), result);
out_len = zstream.next_out - compressed;
if (out_len > 0) {
struct tm *t;
if (date_overflows(*timestamp))
- die("timestamp too large for this system: %"PRItime,
+ die(_("timestamp too large for this system: %"PRItime),
*timestamp);
time = (time_t)*timestamp;
t = localtime(&time);
for (i = 0; i < active_nr; i++) {
struct cache_entry *ce = active_cache[i];
- if (pathspec && !ce_path_match(ce, pathspec, NULL))
+ if (pathspec && !ce_path_match(&the_index, ce, pathspec, NULL))
continue;
if (chmod_cache_entry(ce, flip) < 0)
continue; /* do not touch unmerged paths */
if (!S_ISREG(ce->ce_mode) && !S_ISLNK(ce->ce_mode))
continue; /* do not touch non blobs */
- if (pathspec && !ce_path_match(ce, pathspec, NULL))
+ if (pathspec && !ce_path_match(&the_index, ce, pathspec, NULL))
continue;
retval |= add_file_to_cache(ce->name, flags | HASH_RENORMALIZE);
}
i = dir->nr;
while (--i >= 0) {
struct dir_entry *entry = *src++;
- if (dir_path_match(entry, pathspec, prefix, seen))
+ if (dir_path_match(&the_index, entry, pathspec, prefix, seen))
*dst++ = entry;
}
dir->nr = dst - dir->entries;
OPT_BOOL( 0 , "refresh", &refresh_only, N_("don't add, only refresh the index")),
OPT_BOOL( 0 , "ignore-errors", &ignore_add_errors, N_("just skip files which cannot be added because of errors")),
OPT_BOOL( 0 , "ignore-missing", &ignore_missing, N_("check if - even missing - files are ignored in dry run")),
- OPT_STRING( 0 , "chmod", &chmod_arg, N_("(+/-)x"), N_("override the executable bit of the listed files")),
+ OPT_STRING(0, "chmod", &chmod_arg, "(+|-)x",
+ N_("override the executable bit of the listed files")),
OPT_HIDDEN_BOOL(0, "warn-embedded-repo", &warn_on_embedded_repo,
N_("warn when adding an embedded repository")),
OPT_END(),
}
if (next == EXPECT_COLOR)
- die (_("must end with a color"));
+ die(_("must end with a color"));
colorfield[colorfield_nr].hop = TIME_MAX;
string_list_clear(&l, 0);
sb.revs = &revs;
sb.contents_from = contents_from;
sb.reverse = reverse;
+ sb.repo = the_repository;
setup_scoreboard(&sb, path, &o);
lno = sb.num_lines;
int print_contents;
int buffer_output;
int all_objects;
+ int unordered;
int cmdmode; /* may be 'w' or 'c' for --filters or --textconv */
const char *format;
};
oid_to_hex(oid), path);
if ((type == OBJ_BLOB) && S_ISREG(mode)) {
struct strbuf strbuf = STRBUF_INIT;
- if (convert_to_working_tree(path, *buf, *size, &strbuf)) {
+ if (convert_to_working_tree(&the_index, path, *buf, *size, &strbuf)) {
free(*buf);
*size = strbuf.len;
*buf = strbuf_detach(&strbuf, NULL);
}
}
-static void batch_object_write(const char *obj_name, struct batch_options *opt,
+static void batch_object_write(const char *obj_name,
+ struct strbuf *scratch,
+ struct batch_options *opt,
struct expand_data *data)
{
- struct strbuf buf = STRBUF_INIT;
-
if (!data->skip_object_info &&
oid_object_info_extended(the_repository, &data->oid, &data->info,
OBJECT_INFO_LOOKUP_REPLACE) < 0) {
return;
}
- strbuf_expand(&buf, opt->format, expand_format, data);
- strbuf_addch(&buf, '\n');
- batch_write(opt, buf.buf, buf.len);
- strbuf_release(&buf);
+ strbuf_reset(scratch);
+ strbuf_expand(scratch, opt->format, expand_format, data);
+ strbuf_addch(scratch, '\n');
+ batch_write(opt, scratch->buf, scratch->len);
if (opt->print_contents) {
print_object_or_die(opt, data);
}
}
-static void batch_one_object(const char *obj_name, struct batch_options *opt,
+static void batch_one_object(const char *obj_name,
+ struct strbuf *scratch,
+ struct batch_options *opt,
struct expand_data *data)
{
struct object_context ctx;
return;
}
- batch_object_write(obj_name, opt, data);
+ batch_object_write(obj_name, scratch, opt, data);
}
struct object_cb_data {
struct batch_options *opt;
struct expand_data *expand;
+ struct oidset *seen;
+ struct strbuf *scratch;
};
static int batch_object_cb(const struct object_id *oid, void *vdata)
{
struct object_cb_data *data = vdata;
oidcpy(&data->expand->oid, oid);
- batch_object_write(NULL, data->opt, data->expand);
+ batch_object_write(NULL, data->scratch, data->opt, data->expand);
return 0;
}
-static int batch_loose_object(const struct object_id *oid,
- const char *path,
- void *data)
+static int collect_loose_object(const struct object_id *oid,
+ const char *path,
+ void *data)
{
oid_array_append(data, oid);
return 0;
}
-static int batch_packed_object(const struct object_id *oid,
- struct packed_git *pack,
- uint32_t pos,
- void *data)
+static int collect_packed_object(const struct object_id *oid,
+ struct packed_git *pack,
+ uint32_t pos,
+ void *data)
{
oid_array_append(data, oid);
return 0;
}
+static int batch_unordered_object(const struct object_id *oid, void *vdata)
+{
+ struct object_cb_data *data = vdata;
+
+ if (oidset_insert(data->seen, oid))
+ return 0;
+
+ return batch_object_cb(oid, data);
+}
+
+static int batch_unordered_loose(const struct object_id *oid,
+ const char *path,
+ void *data)
+{
+ return batch_unordered_object(oid, data);
+}
+
+static int batch_unordered_packed(const struct object_id *oid,
+ struct packed_git *pack,
+ uint32_t pos,
+ void *data)
+{
+ return batch_unordered_object(oid, data);
+}
+
static int batch_objects(struct batch_options *opt)
{
- struct strbuf buf = STRBUF_INIT;
+ struct strbuf input = STRBUF_INIT;
+ struct strbuf output = STRBUF_INIT;
struct expand_data data;
int save_warning;
int retval = 0;
*/
memset(&data, 0, sizeof(data));
data.mark_query = 1;
- strbuf_expand(&buf, opt->format, expand_format, &data);
+ strbuf_expand(&output, opt->format, expand_format, &data);
data.mark_query = 0;
+ strbuf_release(&output);
if (opt->cmdmode)
data.split_on_whitespace = 1;
data.info.typep = &data.type;
if (opt->all_objects) {
- struct oid_array sa = OID_ARRAY_INIT;
struct object_cb_data cb;
- for_each_loose_object(batch_loose_object, &sa, 0);
- for_each_packed_object(batch_packed_object, &sa, 0);
if (repository_format_partial_clone)
warning("This repository has extensions.partialClone set. Some objects may not be loaded.");
cb.opt = opt;
cb.expand = &data;
- oid_array_for_each_unique(&sa, batch_object_cb, &cb);
+ cb.scratch = &output;
+
+ if (opt->unordered) {
+ struct oidset seen = OIDSET_INIT;
+
+ cb.seen = &seen;
+
+ for_each_loose_object(batch_unordered_loose, &cb, 0);
+ for_each_packed_object(batch_unordered_packed, &cb,
+ FOR_EACH_OBJECT_PACK_ORDER);
+
+ oidset_clear(&seen);
+ } else {
+ struct oid_array sa = OID_ARRAY_INIT;
+
+ for_each_loose_object(collect_loose_object, &sa, 0);
+ for_each_packed_object(collect_packed_object, &sa, 0);
+
+ oid_array_for_each_unique(&sa, batch_object_cb, &cb);
+
+ oid_array_clear(&sa);
+ }
- oid_array_clear(&sa);
+ strbuf_release(&output);
return 0;
}
save_warning = warn_on_object_refname_ambiguity;
warn_on_object_refname_ambiguity = 0;
- while (strbuf_getline(&buf, stdin) != EOF) {
+ while (strbuf_getline(&input, stdin) != EOF) {
if (data.split_on_whitespace) {
/*
* Split at first whitespace, tying off the beginning
* of the string and saving the remainder (or NULL) in
* data.rest.
*/
- char *p = strpbrk(buf.buf, " \t");
+ char *p = strpbrk(input.buf, " \t");
if (p) {
while (*p && strchr(" \t", *p))
*p++ = '\0';
data.rest = p;
}
- batch_one_object(buf.buf, opt, &data);
+ batch_one_object(input.buf, &output, opt, &data);
}
- strbuf_release(&buf);
+ strbuf_release(&input);
+ strbuf_release(&output);
warn_on_object_refname_ambiguity = save_warning;
return retval;
}
N_("follow in-tree symlinks (used with --batch or --batch-check)")),
OPT_BOOL(0, "batch-all-objects", &batch.all_objects,
N_("show all objects with --batch or --batch-check")),
+ OPT_BOOL(0, "unordered", &batch.unordered,
+ N_("do not order --batch-all-objects output")),
OPT_END()
};
* match_pathspec() for _all_ entries when
* opts->source_tree != NULL.
*/
- if (ce_path_match(ce, &opts->pathspec, ps_matched))
+ if (ce_path_match(&the_index, ce, &opts->pathspec, ps_matched))
ce->ce_flags |= CE_MATCHED;
}
if (opts.track != BRANCH_TRACK_UNSPECIFIED && !opts.new_branch) {
const char *argv0 = argv[0];
if (!argc || !strcmp(argv0, "--"))
- die (_("--track needs a branch name"));
+ die(_("--track needs a branch name"));
skip_prefix(argv0, "refs/", &argv0);
skip_prefix(argv0, "remotes/", &argv0);
argv0 = strchr(argv0, '/');
if (!argv0 || !argv0[1])
- die (_("Missing branch name; try -b"));
+ die(_("missing branch name; try -b"));
opts.new_branch = argv0 + 1;
}
if (ce->ce_flags & CE_UPDATE)
continue;
- if (!ce_path_match(ce, pattern, m))
+ if (!ce_path_match(&the_index, ce, pattern, m))
continue;
item = string_list_insert(list, ce->name);
if (ce_skip_worktree(ce))
unlink(git_path_squash_msg(the_repository));
if (commit_index_files())
- die (_("Repository has been updated, but unable to write\n"
- "new_index file. Check that disk is not full and quota is\n"
- "not exceeded, and then \"git reset HEAD\" to recover."));
+ die(_("repository has been updated, but unable to write\n"
+ "new_index file. Check that disk is not full and quota is\n"
+ "not exceeded, and then \"git reset HEAD\" to recover."));
rerere(0);
run_command_v_opt(argv_gc_auto, RUN_GIT_CMD);
}
if (repo_read_index(repo) < 0)
- die("index file corrupt");
+ die(_("index file corrupt"));
for (nr = 0; nr < repo->index->cache_nr; nr++) {
const struct cache_entry *ce = repo->index->cache[nr];
strbuf_addstr(&name, ce->name);
if (S_ISREG(ce->ce_mode) &&
- match_pathspec(pathspec, name.buf, name.len, 0, NULL,
+ match_pathspec(repo->index, pathspec, name.buf, name.len, 0, NULL,
S_ISDIR(ce->ce_mode) ||
S_ISGITLINK(ce->ce_mode))) {
/*
hit |= grep_file(opt, name.buf);
}
} else if (recurse_submodules && S_ISGITLINK(ce->ce_mode) &&
- submodule_path_match(pathspec, name.buf, NULL)) {
+ submodule_path_match(repo->index, pathspec, name.buf, NULL)) {
hit |= grep_submodule(opt, repo, pathspec, NULL, ce->name, ce->name);
} else {
continue;
fill_directory(&dir, &the_index, pathspec);
for (i = 0; i < dir.nr; i++) {
- if (!dir_path_match(dir.entries[i], pathspec, 0, NULL))
+ if (!dir_path_match(&the_index, dir.entries[i], pathspec, 0, NULL))
continue;
hit |= grep_file(opt, dir.entries[i]->name);
if (hit && opt->status_only)
}
if (!opt.pattern_list)
- die(_("no pattern given."));
+ die(_("no pattern given"));
/* --only-matching has no effect with --invert. */
if (opt.invert)
}
if (recurse_submodules && (!use_index || untracked))
- die(_("option not supported with --recurse-submodules."));
+ die(_("option not supported with --recurse-submodules"));
if (!show_in_pager && !opt.status_only)
setup_pager();
if (!use_index && (untracked || cached))
- die(_("--cached or --untracked cannot be used with --no-index."));
+ die(_("--cached or --untracked cannot be used with --no-index"));
if (!use_index || untracked) {
int use_exclude = (opt_exclude < 0) ? use_index : !!opt_exclude;
hit = grep_directory(&opt, &pathspec, use_exclude, use_index);
} else if (0 <= opt_exclude) {
- die(_("--[no-]exclude-standard cannot be used for tracked contents."));
+ die(_("--[no-]exclude-standard cannot be used for tracked contents"));
} else if (!list.nr) {
if (!cached)
setup_work_tree();
hit = grep_cache(&opt, the_repository, &pathspec, cached);
} else {
if (cached)
- die(_("both --cached and trees are given."));
+ die(_("both --cached and trees are given"));
hit = grep_objects(&opt, &pathspec, &list);
}
buf = read_object_file(&entry->idx.oid, &type, &size);
if (!buf)
- die("unable to read %s", oid_to_hex(&entry->idx.oid));
+ die(_("unable to read %s"), oid_to_hex(&entry->idx.oid));
base_buf = read_object_file(&DELTA(entry)->idx.oid, &type,
&base_size);
if (!base_buf)
oid_to_hex(&DELTA(entry)->idx.oid));
delta_buf = diff_delta(base_buf, base_size,
buf, size, &delta_size, 0);
+ /*
+ * We succesfully computed this delta once but dropped it for
+ * memory reasons. Something is very wrong if this time we
+ * recompute and create a different delta.
+ */
if (!delta_buf || delta_size != DELTA_SIZE(entry))
- die("delta size changed");
+ BUG("delta size changed");
free(buf);
free(base_buf);
return delta_buf;
datalen = revidx[1].offset - offset;
if (!pack_to_stdout && p->index_version > 1 &&
check_pack_crc(p, &w_curs, offset, datalen, revidx->nr)) {
- error("bad packed object CRC for %s",
+ error(_("bad packed object CRC for %s"),
oid_to_hex(&entry->idx.oid));
unuse_pack(&w_curs);
return write_no_reuse_object(f, entry, limit, usable_delta);
if (!pack_to_stdout && p->index_version == 1 &&
check_pack_inflate(p, &w_curs, offset, datalen, entry_size)) {
- error("corrupt packed object for %s",
+ error(_("corrupt packed object for %s"),
oid_to_hex(&entry->idx.oid));
unuse_pack(&w_curs);
return write_no_reuse_object(f, entry, limit, usable_delta);
*/
recursing = (e->idx.offset == 1);
if (recursing) {
- warning("recursive delta detected for object %s",
+ warning(_("recursive delta detected for object %s"),
oid_to_hex(&e->idx.oid));
return WRITE_ONE_RECURSIVE;
} else if (e->idx.offset || e->preferred_base) {
/* make sure off_t is sufficiently large not to wrap */
if (signed_add_overflows(*offset, size))
- die("pack too large for current definition of off_t");
+ die(_("pack too large for current definition of off_t"));
*offset += size;
return WRITE_ONE_WRITTEN;
}
}
if (wo_end != to_pack.nr_objects)
- die("ordered %u objects, expected %"PRIu32, wo_end, to_pack.nr_objects);
+ die(_("ordered %u objects, expected %"PRIu32),
+ wo_end, to_pack.nr_objects);
return wo;
}
int fd;
if (!is_pack_valid(reuse_packfile))
- die("packfile is invalid: %s", reuse_packfile->pack_name);
+ die(_("packfile is invalid: %s"), reuse_packfile->pack_name);
fd = git_open(reuse_packfile->pack_name);
if (fd < 0)
- die_errno("unable to open packfile for reuse: %s",
+ die_errno(_("unable to open packfile for reuse: %s"),
reuse_packfile->pack_name);
if (lseek(fd, sizeof(struct pack_header), SEEK_SET) == -1)
- die_errno("unable to seek in reused packfile");
+ die_errno(_("unable to seek in reused packfile"));
if (reuse_packfile_offset < 0)
reuse_packfile_offset = reuse_packfile->pack_size - the_hash_algo->rawsz;
int read_pack = xread(fd, buffer, sizeof(buffer));
if (read_pack <= 0)
- die_errno("unable to read from reused packfile");
+ die_errno(_("unable to read from reused packfile"));
if (read_pack > to_write)
read_pack = to_write;
* to preserve this property.
*/
if (stat(pack_tmp_name, &st) < 0) {
- warning_errno("failed to stat %s", pack_tmp_name);
+ warning_errno(_("failed to stat %s"), pack_tmp_name);
} else if (!last_mtime) {
last_mtime = st.st_mtime;
} else {
utb.actime = st.st_atime;
utb.modtime = --last_mtime;
if (utime(pack_tmp_name, &utb) < 0)
- warning_errno("failed utime() on %s", pack_tmp_name);
+ warning_errno(_("failed utime() on %s"), pack_tmp_name);
}
strbuf_addf(&tmpname, "%s-", base_name);
free(write_order);
stop_progress(&progress_state);
if (written != nr_result)
- die("wrote %"PRIu32" objects while expecting %"PRIu32,
- written, nr_result);
+ die(_("wrote %"PRIu32" objects while expecting %"PRIu32),
+ written, nr_result);
}
static int no_try_delta(const char *path)
if (!check)
check = attr_check_initl("delta", NULL);
- if (git_check_attr(path, check))
+ if (git_check_attr(&the_index, path, check))
return 0;
if (ATTR_FALSE(check->items[0].value))
return 1;
while (c & 128) {
ofs += 1;
if (!ofs || MSB(ofs, 7)) {
- error("delta base offset overflow in pack for %s",
+ error(_("delta base offset overflow in pack for %s"),
oid_to_hex(&entry->idx.oid));
goto give_up;
}
}
ofs = entry->in_pack_offset - ofs;
if (ofs <= 0 || ofs >= entry->in_pack_offset) {
- error("delta base offset out of bound for %s",
+ error(_("delta base offset out of bound for %s"),
oid_to_hex(&entry->idx.oid));
goto give_up;
}
#ifndef NO_PTHREADS
+/* Protect access to object database */
static pthread_mutex_t read_mutex;
#define read_lock() pthread_mutex_lock(&read_mutex)
#define read_unlock() pthread_mutex_unlock(&read_mutex)
+/* Protect delta_cache_size */
static pthread_mutex_t cache_mutex;
#define cache_lock() pthread_mutex_lock(&cache_mutex)
#define cache_unlock() pthread_mutex_unlock(&cache_mutex)
+/*
+ * Protect object list partitioning (e.g. struct thread_param) and
+ * progress_state
+ */
static pthread_mutex_t progress_mutex;
#define progress_lock() pthread_mutex_lock(&progress_mutex)
#define progress_unlock() pthread_mutex_unlock(&progress_mutex)
+/*
+ * Access to struct object_entry is unprotected since each thread owns
+ * a portion of the main object list. Just don't access object entries
+ * ahead in the list because they can be stolen and would need
+ * progress_mutex for protection.
+ */
#else
#define read_lock() (void)0
trg->data = read_object_file(&trg_entry->idx.oid, &type, &sz);
read_unlock();
if (!trg->data)
- die("object %s cannot be read",
+ die(_("object %s cannot be read"),
oid_to_hex(&trg_entry->idx.oid));
if (sz != trg_size)
- die("object %s inconsistent object length (%lu vs %lu)",
+ die(_("object %s inconsistent object length (%lu vs %lu)"),
oid_to_hex(&trg_entry->idx.oid), sz,
trg_size);
*mem_usage += sz;
if (src_entry->preferred_base) {
static int warned = 0;
if (!warned++)
- warning("object %s cannot be read",
+ warning(_("object %s cannot be read"),
oid_to_hex(&src_entry->idx.oid));
/*
* Those objects are not included in the
*/
return 0;
}
- die("object %s cannot be read",
+ die(_("object %s cannot be read"),
oid_to_hex(&src_entry->idx.oid));
}
if (sz != src_size)
- die("object %s inconsistent object length (%lu vs %lu)",
+ die(_("object %s inconsistent object length (%lu vs %lu)"),
oid_to_hex(&src_entry->idx.oid), sz,
src_size);
*mem_usage += sz;
if (!src->index) {
static int warned = 0;
if (!warned++)
- warning("suboptimal pack - out of memory");
+ warning(_("suboptimal pack - out of memory"));
return 0;
}
*mem_usage += sizeof_delta_index(src->index);
static try_to_free_t old_try_to_free_routine;
/*
+ * The main object list is split into smaller lists, each is handed to
+ * one worker.
+ *
* The main thread waits on the condition that (at least) one of the workers
* has stopped working (which is indicated in the .working member of
* struct thread_params).
+ *
* When a work thread has completed its work, it sets .working to 0 and
* signals the main thread and waits on the condition that .data_ready
* becomes 1.
+ *
+ * The main thread steals half of the work from the worker that has
+ * most work left to hand it to the idle worker.
*/
struct thread_params {
return;
}
if (progress > pack_to_stdout)
- fprintf(stderr, "Delta compression using up to %d threads.\n",
- delta_search_threads);
+ fprintf_ln(stderr, _("Delta compression using up to %d threads"),
+ delta_search_threads);
p = xcalloc(delta_search_threads, sizeof(*p));
/* Partition the work amongst work threads. */
ret = pthread_create(&p[i].thread, NULL,
threaded_find_deltas, &p[i]);
if (ret)
- die("unable to create thread: %s", strerror(ret));
+ die(_("unable to create thread: %s"), strerror(ret));
active_threads++;
}
tag = lookup_tag(the_repository, oid);
while (1) {
if (!tag || parse_tag(tag) || !tag->tagged)
- die("unable to pack objects reachable from tag %s",
+ die(_("unable to pack objects reachable from tag %s"),
oid_to_hex(oid));
add_object_entry(&tag->object.oid, OBJ_TAG, NULL, 0);
if (!entry->preferred_base) {
nr_deltas++;
if (oe_type(entry) < 0)
- die("unable to get type of object %s",
+ die(_("unable to get type of object %s"),
oid_to_hex(&entry->idx.oid));
} else {
if (oe_type(entry) < 0) {
ll_find_deltas(delta_list, n, window+1, depth, &nr_done);
stop_progress(&progress_state);
if (nr_done != nr_deltas)
- die("inconsistency with delta count");
+ die(_("inconsistency with delta count"));
}
free(delta_list);
}
if (!strcmp(k, "pack.threads")) {
delta_search_threads = git_config_int(k, v);
if (delta_search_threads < 0)
- die("invalid number of threads specified (%d)",
+ die(_("invalid number of threads specified (%d)"),
delta_search_threads);
#ifdef NO_PTHREADS
if (delta_search_threads != 1) {
- warning("no threads support, ignoring %s", k);
+ warning(_("no threads support, ignoring %s"), k);
delta_search_threads = 0;
}
#endif
if (!strcmp(k, "pack.indexversion")) {
pack_idx_opts.version = git_config_int(k, v);
if (pack_idx_opts.version > 2)
- die("bad pack.indexversion=%"PRIu32,
+ die(_("bad pack.indexversion=%"PRIu32),
pack_idx_opts.version);
return 0;
}
if (feof(stdin))
break;
if (!ferror(stdin))
- die("fgets returned NULL, not EOF, not error!");
+ die("BUG: fgets returned NULL, not EOF, not error!");
if (errno != EINTR)
die_errno("fgets");
clearerr(stdin);
}
if (line[0] == '-') {
if (get_oid_hex(line+1, &oid))
- die("expected edge object ID, got garbage:\n %s",
+ die(_("expected edge object ID, got garbage:\n %s"),
line);
add_preferred_base(&oid);
continue;
}
if (parse_oid_hex(line, &oid, &p))
- die("expected object ID, got garbage:\n %s", line);
+ die(_("expected object ID, got garbage:\n %s"), line);
add_preferred_base_object(p + 1);
add_object_entry(&oid, OBJ_NONE, p + 1, 0);
if (!p->pack_local || p->pack_keep || p->pack_keep_in_core)
continue;
if (open_pack_index(p))
- die("cannot open pack index");
+ die(_("cannot open pack index"));
ALLOC_GROW(in_pack.array,
in_pack.nr + p->num_objects,
enum object_type type = oid_object_info(the_repository, oid, NULL);
if (type < 0) {
- warning("loose object at %s could not be examined", path);
+ warning(_("loose object at %s could not be examined"), path);
return 0;
}
continue;
if (open_pack_index(p))
- die("cannot open pack index");
+ die(_("cannot open pack index"));
for (i = 0; i < p->num_objects; i++) {
nth_packed_object_oid(&oid, p, i);
!has_sha1_pack_kept_or_nonlocal(&oid) &&
!loosened_object_can_be_discarded(&oid, p->mtime))
if (force_object_loose(&oid, p->mtime))
- die("unable to force loose object");
+ die(_("unable to force loose object"));
}
}
}
use_bitmap_index = 0;
continue;
}
- die("not a rev '%s'", line);
+ die(_("not a rev '%s'"), line);
}
if (handle_revision_arg(line, &revs, flags, REVARG_CANNOT_BE_FILENAME))
- die("bad revision '%s'", line);
+ die(_("bad revision '%s'"), line);
}
if (use_bitmap_index && !get_object_list_from_bitmap(&revs))
return;
if (prepare_revision_walk(&revs))
- die("revision walk setup failed");
+ die(_("revision walk setup failed"));
mark_edges_uninteresting(&revs, show_edge);
if (!fn_show_object)
revs.ignore_missing_links = 1;
if (add_unseen_recent_objects_to_traversal(&revs,
unpack_unreachable_expiration))
- die("unable to add recent objects");
+ die(_("unable to add recent objects"));
if (prepare_revision_walk(&revs))
- die("revision walk setup failed");
+ die(_("revision walk setup failed"));
traverse_commit_list(&revs, record_recent_commit,
record_recent_object, NULL);
}
OPT_BOOL(0, "all-progress-implied",
&all_progress_implied,
N_("similar to --all-progress when progress meter is shown")),
- { OPTION_CALLBACK, 0, "index-version", NULL, N_("version[,offset]"),
+ { OPTION_CALLBACK, 0, "index-version", NULL, N_("<version>[,<offset>]"),
N_("write the pack index file in the specified idx format version"),
0, option_parse_index_version },
OPT_MAGNITUDE(0, "max-pack-size", &pack_size_limit,
if (DFS_NUM_STATES > (1 << OE_DFS_STATE_BITS))
BUG("too many dfs states, increase OE_DFS_STATE_BITS");
- check_replace_refs = 0;
+ read_replace_refs = 0;
reset_pack_idx_option(&pack_idx_opts);
git_config(git_pack_config, NULL);
if (pack_compression_level == -1)
pack_compression_level = Z_DEFAULT_COMPRESSION;
else if (pack_compression_level < 0 || pack_compression_level > Z_BEST_COMPRESSION)
- die("bad pack compression level %d", pack_compression_level);
+ die(_("bad pack compression level %d"), pack_compression_level);
if (!delta_search_threads) /* --threads=0 means autodetect */
delta_search_threads = online_cpus();
#ifdef NO_PTHREADS
if (delta_search_threads != 1)
- warning("no threads support, ignoring --threads");
+ warning(_("no threads support, ignoring --threads"));
#endif
if (!pack_to_stdout && !pack_size_limit)
pack_size_limit = pack_size_limit_cfg;
if (pack_to_stdout && pack_size_limit)
- die("--max-pack-size cannot be used to build a pack for transfer.");
+ die(_("--max-pack-size cannot be used to build a pack for transfer"));
if (pack_size_limit && pack_size_limit < 1024*1024) {
- warning("minimum pack size limit is 1 MiB");
+ warning(_("minimum pack size limit is 1 MiB"));
pack_size_limit = 1024*1024;
}
if (!pack_to_stdout && thin)
- die("--thin cannot be used to build an indexable pack.");
+ die(_("--thin cannot be used to build an indexable pack"));
if (keep_unreachable && unpack_unreachable)
- die("--keep-unreachable and --unpack-unreachable are incompatible.");
+ die(_("--keep-unreachable and --unpack-unreachable are incompatible"));
if (!rev_list_all || !rev_list_reflog || !rev_list_index)
unpack_unreachable_expiration = 0;
if (filter_options.choice) {
if (!pack_to_stdout)
- die("cannot use --filter without --stdout.");
+ die(_("cannot use --filter without --stdout"));
use_bitmap_index = 0;
}
prepare_pack(window, depth);
write_pack_file();
if (progress)
- fprintf(stderr, "Total %"PRIu32" (delta %"PRIu32"),"
- " reused %"PRIu32" (delta %"PRIu32")\n",
- written, written_delta, reused, reused_delta);
+ fprintf_ln(stderr,
+ _("Total %"PRIu32" (delta %"PRIu32"),"
+ " reused %"PRIu32" (delta %"PRIu32")"),
+ written, written_delta, reused, reused_delta);
return 0;
}
for (i = 0; i < active_nr; i++) {
const struct cache_entry *ce = active_cache[i];
- if (!ce_path_match(ce, &pathspec, seen))
+ if (!ce_path_match(&the_index, ce, &pathspec, seen))
continue;
ALLOC_GROW(list.entry, list.nr + 1, list.alloc);
list.entry[list.nr].name = xstrdup(ce->name);
list.entry[list.nr].is_submodule = S_ISGITLINK(ce->ce_mode);
if (list.entry[list.nr++].is_submodule &&
!is_staging_gitmodules_ok(&the_index))
- die (_("Please stage your changes to .gitmodules or stash them to proceed"));
+ die(_("please stage your changes to .gitmodules or stash them to proceed"));
}
if (pathspec.nr) {
int save_nr;
char *path;
- if (ce_stage(ce) || !ce_path_match(ce, &pathspec, NULL))
+ if (ce_stage(ce) || !ce_path_match(&the_index, ce, &pathspec, NULL))
continue;
if (has_head)
old = read_one_ent(NULL, &head_oid,
PARSE_OPT_NOARG | /* disallow --cacheinfo=<mode> form */
PARSE_OPT_NONEG | PARSE_OPT_LITERAL_ARGHELP,
(parse_opt_cb *) cacheinfo_callback},
- {OPTION_CALLBACK, 0, "chmod", &set_executable_bit, N_("(+/-)x"),
+ {OPTION_CALLBACK, 0, "chmod", &set_executable_bit, "(+|-)x",
N_("override the executable bit of the listed files"),
- PARSE_OPT_NONEG | PARSE_OPT_LITERAL_ARGHELP,
+ PARSE_OPT_NONEG,
chmod_callback},
{OPTION_SET_INT, 0, "assume-unchanged", &mark_valid_only, NULL,
N_("mark files as \"not changing\""),
/* fall through */
return text_eol_is_crlf() ? EOL_CRLF : EOL_LF;
}
- warning("Illegal crlf_action %d\n", (int)crlf_action);
+ warning(_("illegal crlf_action %d"), (int)crlf_action);
return core_eol;
}
* CRLFs would not be restored by checkout
*/
if (conv_flags & CONV_EOL_RNDTRP_DIE)
- die(_("CRLF would be replaced by LF in %s."), path);
+ die(_("CRLF would be replaced by LF in %s"), path);
else if (conv_flags & CONV_EOL_RNDTRP_WARN)
warning(_("CRLF will be replaced by LF in %s.\n"
"The file will have its original line"
- " endings in your working directory."), path);
+ " endings in your working directory"), path);
} else if (old_stats->lonelf && !new_stats->lonelf ) {
/*
* CRLFs would be added by checkout
else if (conv_flags & CONV_EOL_RNDTRP_WARN)
warning(_("LF will be replaced by CRLF in %s.\n"
"The file will have its original line"
- " endings in your working directory."), path);
+ " endings in your working directory"), path);
}
}
struct strbuf *buf, const char *enc, int conv_flags)
{
char *dst;
- int dst_len;
+ size_t dst_len;
int die_on_error = conv_flags & CONV_WRITE_OBJECT;
/*
*/
if (die_on_error && check_roundtrip(enc)) {
char *re_src;
- int re_src_len;
+ size_t re_src_len;
re_src = reencode_string_len(dst, dst_len,
enc, default_encoding,
struct strbuf *buf, const char *enc)
{
char *dst;
- int dst_len;
+ size_t dst_len;
/*
* No encoding is specified or there is nothing to encode.
dst = reencode_string_len(src, src_len, enc, default_encoding,
&dst_len);
if (!dst) {
- error("failed to encode '%s' from %s to %s",
- path, default_encoding, enc);
+ error(_("failed to encode '%s' from %s to %s"),
+ path, default_encoding, enc);
return 0;
}
if (start_command(&child_process)) {
strbuf_release(&cmd);
- return error("cannot fork to run external filter '%s'", params->cmd);
+ return error(_("cannot fork to run external filter '%s'"),
+ params->cmd);
}
sigchain_push(SIGPIPE, SIG_IGN);
if (close(child_process.in))
write_err = 1;
if (write_err)
- error("cannot feed the input to external filter '%s'", params->cmd);
+ error(_("cannot feed the input to external filter '%s'"),
+ params->cmd);
sigchain_pop(SIGPIPE);
status = finish_command(&child_process);
if (status)
- error("external filter '%s' failed %d", params->cmd, status);
+ error(_("external filter '%s' failed %d"), params->cmd, status);
strbuf_release(&cmd);
return (write_err || status);
return 0; /* error was already reported */
if (strbuf_read(&nbuf, async.out, len) < 0) {
- err = error("read from external filter '%s' failed", cmd);
+ err = error(_("read from external filter '%s' failed"), cmd);
}
if (close(async.out)) {
- err = error("read from external filter '%s' failed", cmd);
+ err = error(_("read from external filter '%s' failed"), cmd);
}
if (finish_async(&async)) {
- err = error("external filter '%s' failed", cmd);
+ err = error(_("external filter '%s' failed"), cmd);
}
if (!err) {
* Something went wrong with the protocol filter.
* Force shutdown and restart if another blob requires filtering.
*/
- error("external filter '%s' failed", entry->subprocess.cmd);
+ error(_("external filter '%s' failed"), entry->subprocess.cmd);
subprocess_stop(&subprocess_map, &entry->subprocess);
free(entry);
}
else if (wanted_capability & CAP_SMUDGE)
filter_type = "smudge";
else
- die("unexpected filter type");
+ die(_("unexpected filter type"));
sigchain_push(SIGPIPE, SIG_IGN);
err = strlen(path) > LARGE_PACKET_DATA_MAX - strlen("pathname=\n");
if (err) {
- error("path name too long for external filter");
+ error(_("path name too long for external filter"));
goto done;
}
assert(subprocess_map_initialized);
entry = (struct cmd2process *)subprocess_find_entry(&subprocess_map, cmd);
if (!entry) {
- error("external filter '%s' is not available anymore although "
- "not all paths have been filtered", cmd);
+ error(_("external filter '%s' is not available anymore although "
+ "not all paths have been filtered"), cmd);
return 0;
}
process = &entry->subprocess.process;
const char *working_tree_encoding; /* Supported encoding or default encoding if NULL */
};
- static void convert_attrs(struct conv_attrs *ca, const char *path)
+ static void convert_attrs(const struct index_state *istate,
+ struct conv_attrs *ca, const char *path)
{
static struct attr_check *check;
git_config(read_convert_config, NULL);
}
- if (!git_check_attr(path, check)) {
+ if (!git_check_attr(istate, path, check)) {
struct attr_check_item *ccheck = check->items;
ca->crlf_action = git_path_check_crlf(ccheck + 4);
if (ca->crlf_action == CRLF_UNDEFINED)
ca->crlf_action = CRLF_AUTO_INPUT;
}
- int would_convert_to_git_filter_fd(const char *path)
+ int would_convert_to_git_filter_fd(const struct index_state *istate, const char *path)
{
struct conv_attrs ca;
- convert_attrs(&ca, path);
+ convert_attrs(istate, &ca, path);
if (!ca.drv)
return 0;
return apply_filter(path, NULL, 0, -1, NULL, ca.drv, CAP_CLEAN, NULL);
}
- const char *get_convert_attr_ascii(const char *path)
+ const char *get_convert_attr_ascii(const struct index_state *istate, const char *path)
{
struct conv_attrs ca;
- convert_attrs(&ca, path);
+ convert_attrs(istate, &ca, path);
switch (ca.attr_action) {
case CRLF_UNDEFINED:
return "";
int ret = 0;
struct conv_attrs ca;
- convert_attrs(&ca, path);
+ convert_attrs(istate, &ca, path);
ret |= apply_filter(path, src, len, -1, dst, ca.drv, CAP_CLEAN, NULL);
if (!ret && ca.drv && ca.drv->required)
- die("%s: clean filter '%s' failed", path, ca.drv->name);
+ die(_("%s: clean filter '%s' failed"), path, ca.drv->name);
if (ret && dst) {
src = dst->buf;
int conv_flags)
{
struct conv_attrs ca;
- convert_attrs(&ca, path);
+ convert_attrs(istate, &ca, path);
assert(ca.drv);
assert(ca.drv->clean || ca.drv->process);
if (!apply_filter(path, NULL, 0, fd, dst, ca.drv, CAP_CLEAN, NULL))
- die("%s: clean filter '%s' failed", path, ca.drv->name);
+ die(_("%s: clean filter '%s' failed"), path, ca.drv->name);
encode_to_git(path, dst->buf, dst->len, dst, ca.working_tree_encoding, conv_flags);
crlf_to_git(istate, path, dst->buf, dst->len, dst, ca.crlf_action, conv_flags);
ident_to_git(path, dst->buf, dst->len, dst, ca.ident);
}
- static int convert_to_working_tree_internal(const char *path, const char *src,
+ static int convert_to_working_tree_internal(const struct index_state *istate,
+ const char *path, const char *src,
size_t len, struct strbuf *dst,
int normalizing, struct delayed_checkout *dco)
{
int ret = 0, ret_filter = 0;
struct conv_attrs ca;
- convert_attrs(&ca, path);
+ convert_attrs(istate, &ca, path);
ret |= ident_to_worktree(path, src, len, dst, ca.ident);
if (ret) {
ret_filter = apply_filter(
path, src, len, -1, dst, ca.drv, CAP_SMUDGE, dco);
if (!ret_filter && ca.drv && ca.drv->required)
- die("%s: smudge filter %s failed", path, ca.drv->name);
+ die(_("%s: smudge filter %s failed"), path, ca.drv->name);
return ret | ret_filter;
}
- int async_convert_to_working_tree(const char *path, const char *src,
+ int async_convert_to_working_tree(const struct index_state *istate,
+ const char *path, const char *src,
size_t len, struct strbuf *dst,
void *dco)
{
- return convert_to_working_tree_internal(path, src, len, dst, 0, dco);
+ return convert_to_working_tree_internal(istate, path, src, len, dst, 0, dco);
}
- int convert_to_working_tree(const char *path, const char *src, size_t len, struct strbuf *dst)
+ int convert_to_working_tree(const struct index_state *istate,
+ const char *path, const char *src,
+ size_t len, struct strbuf *dst)
{
- return convert_to_working_tree_internal(path, src, len, dst, 0, NULL);
+ return convert_to_working_tree_internal(istate, path, src, len, dst, 0, NULL);
}
int renormalize_buffer(const struct index_state *istate, const char *path,
const char *src, size_t len, struct strbuf *dst)
{
- int ret = convert_to_working_tree_internal(path, src, len, dst, 1, NULL);
+ int ret = convert_to_working_tree_internal(istate, path, src, len, dst, 1, NULL);
if (ret) {
src = dst->buf;
len = dst->len;
* Note that you would be crazy to set CRLF, smuge/clean or ident to a
* large binary blob you would want us not to slurp into the memory!
*/
- struct stream_filter *get_stream_filter(const char *path, const struct object_id *oid)
+ struct stream_filter *get_stream_filter(const struct index_state *istate,
+ const char *path,
+ const struct object_id *oid)
{
struct conv_attrs ca;
struct stream_filter *filter = NULL;
- convert_attrs(&ca, path);
+ convert_attrs(istate, &ca, path);
if (ca.drv && (ca.drv->process || ca.drv->smudge || ca.drv->clean))
return NULL;
return COLOR_MOVED_ZEBRA;
else if (!strcmp(arg, "default"))
return COLOR_MOVED_DEFAULT;
+ else if (!strcmp(arg, "dimmed-zebra"))
+ return COLOR_MOVED_ZEBRA_DIM;
else if (!strcmp(arg, "dimmed_zebra"))
return COLOR_MOVED_ZEBRA_DIM;
else
- return error(_("color moved setting must be one of 'no', 'default', 'blocks', 'zebra', 'dimmed_zebra', 'plain'"));
+ return error(_("color moved setting must be one of 'no', 'default', 'blocks', 'zebra', 'dimmed-zebra', 'plain'"));
}
static int parse_color_moved_ws(const char *arg)
if (regcomp(ecbdata->diff_words->word_regex,
o->word_regex,
REG_EXTENDED | REG_NEWLINE))
- die ("Invalid regular expression: %s",
- o->word_regex);
+ die("invalid regular expression: %s",
+ o->word_regex);
}
for (i = 0; i < ARRAY_SIZE(diff_words_styles); i++) {
if (o->word_diff == diff_words_styles[i].type) {
temp->tempfile = mks_tempfile_ts(tempfile.buf, strlen(base) + 1);
if (!temp->tempfile)
die_errno("unable to create temp-file");
- if (convert_to_working_tree(path,
+ if (convert_to_working_tree(&the_index, path,
(const char *)blob, (size_t)size, &buf)) {
blob = buf.buf;
size = buf.len;
if (options->detect_rename && options->rename_limit < 0)
options->rename_limit = diff_rename_limit_default;
- if (options->setup & DIFF_SETUP_USE_CACHE) {
- if (!active_cache)
- /* read-cache does not die even when it fails
- * so it is safe for us to do this here. Also
- * it does not smudge active_cache or active_nr
- * when it fails, so we do not have to worry about
- * cleaning it up ourselves either.
- */
- read_cache();
- }
if (hexsz < options->abbrev)
options->abbrev = hexsz; /* full */
#define DO_MATCH_DIRECTORY (1<<1)
#define DO_MATCH_SUBMODULE (1<<2)
- static int match_attrs(const char *name, int namelen,
+ static int match_attrs(const struct index_state *istate,
+ const char *name, int namelen,
const struct pathspec_item *item)
{
int i;
- git_check_attr(name, item->attr_check);
+ git_check_attr(istate, name, item->attr_check);
for (i = 0; i < item->attr_match_nr; i++) {
const char *value;
int matched;
*
* It returns 0 when there is no match.
*/
- static int match_pathspec_item(const struct pathspec_item *item, int prefix,
+ static int match_pathspec_item(const struct index_state *istate,
+ const struct pathspec_item *item, int prefix,
const char *name, int namelen, unsigned flags)
{
/* name/namelen has prefix cut off by caller */
strncmp(item->match, name - prefix, item->prefix))
return 0;
- if (item->attr_match_nr && !match_attrs(name, namelen, item))
+ if (item->attr_match_nr && !match_attrs(istate, name, namelen, item))
return 0;
/* If the match was just the prefix, we matched */
* pathspec did not match any names, which could indicate that the
* user mistyped the nth pathspec.
*/
- static int do_match_pathspec(const struct pathspec *ps,
+ static int do_match_pathspec(const struct index_state *istate,
+ const struct pathspec *ps,
const char *name, int namelen,
int prefix, char *seen,
unsigned flags)
*/
if (seen && ps->items[i].magic & PATHSPEC_EXCLUDE)
seen[i] = MATCHED_FNMATCH;
- how = match_pathspec_item(ps->items+i, prefix, name,
+ how = match_pathspec_item(istate, ps->items+i, prefix, name,
namelen, flags);
if (ps->recursive &&
(ps->magic & PATHSPEC_MAXDEPTH) &&
return retval;
}
- int match_pathspec(const struct pathspec *ps,
+ int match_pathspec(const struct index_state *istate,
+ const struct pathspec *ps,
const char *name, int namelen,
int prefix, char *seen, int is_dir)
{
int positive, negative;
unsigned flags = is_dir ? DO_MATCH_DIRECTORY : 0;
- positive = do_match_pathspec(ps, name, namelen,
+ positive = do_match_pathspec(istate, ps, name, namelen,
prefix, seen, flags);
if (!(ps->magic & PATHSPEC_EXCLUDE) || !positive)
return positive;
- negative = do_match_pathspec(ps, name, namelen,
+ negative = do_match_pathspec(istate, ps, name, namelen,
prefix, seen,
flags | DO_MATCH_EXCLUDE);
return negative ? 0 : positive;
/**
* Check if a submodule is a superset of the pathspec
*/
- int submodule_path_match(const struct pathspec *ps,
+ int submodule_path_match(const struct index_state *istate,
+ const struct pathspec *ps,
const char *submodule_name,
char *seen)
{
- int matched = do_match_pathspec(ps, submodule_name,
+ int matched = do_match_pathspec(istate, ps, submodule_name,
strlen(submodule_name),
0, seen,
DO_MATCH_DIRECTORY |
if (found_dup)
continue;
- error("pathspec '%s' did not match any file(s) known to git.",
+ error(_("pathspec '%s' did not match any file(s) known to git"),
pathspec->items[num].original);
errors++;
}
dir->unmanaged_exclude_files++;
el = add_exclude_list(dir, EXC_FILE, fname);
if (add_excludes(fname, "", 0, el, NULL, oid_stat) < 0)
- die("cannot use %s as an exclude file", fname);
+ die(_("cannot use %s as an exclude file"), fname);
}
void add_excludes_from_file(struct dir_struct *dir, const char *fname)
return NULL;
if (!ident_in_untracked(dir->untracked)) {
- warning(_("Untracked cache is disabled on this system or location."));
+ warning(_("untracked cache is disabled on this system or location"));
return NULL;
}
return;
if (repo_read_index(&subrepo) < 0)
- die("index file corrupt in repo %s", subrepo.gitdir);
+ die(_("index file corrupt in repo %s"), subrepo.gitdir);
for (i = 0; i < subrepo.index->cache_nr; i++) {
const struct cache_entry *ce = subrepo.index->cache[i];
}
if (S_ISREG(mode)) {
struct strbuf strbuf = STRBUF_INIT;
- if (convert_to_working_tree(path, buf, size, &strbuf)) {
+ if (convert_to_working_tree(&the_index, path, buf, size, &strbuf)) {
free(buf);
size = strbuf.len;
buf = strbuf_detach(&strbuf, NULL);
if (mfi.clean &&
was_tracked_and_matches(o, path, &mfi.oid, mfi.mode) &&
!df_conflict_remains) {
+ int pos;
+ struct cache_entry *ce;
+
output(o, 3, _("Skipped %s (merged same as existing)"), path);
if (add_cacheinfo(o, mfi.mode, &mfi.oid, path,
0, (!o->call_depth && !is_dirty), 0))
return -1;
+ /*
+ * However, add_cacheinfo() will delete the old cache entry
+ * and add a new one. We need to copy over any skip_worktree
+ * flag to avoid making the file appear as if it were
+ * deleted by the user.
+ */
+ pos = index_name_pos(&o->orig_index, path, strlen(path));
+ ce = o->orig_index.cache[pos];
+ if (ce_skip_worktree(ce)) {
+ pos = index_name_pos(&the_index, path, strlen(path));
+ ce = the_index.cache[pos];
+ ce->ce_flags |= CE_SKIP_WORKTREE;
+ }
return mfi.clean;
}
if (ignore_submodules && S_ISGITLINK(ce->ce_mode))
continue;
- if (pathspec && !ce_path_match(ce, pathspec, seen))
+ if (pathspec && !ce_path_match(&the_index, ce, pathspec, seen))
filtered = 1;
if (ce_stage(ce)) {
/*
* Read the index file that is potentially unmerged into given
- * index_state, dropping any unmerged entries. Returns true if
- * the index is unmerged. Callers who want to refuse to work
- * from an unmerged state can call this and check its return value,
- * instead of calling read_cache().
+ * index_state, dropping any unmerged entries to stage #0 (potentially
+ * resulting in a path appearing as both a file and a directory in the
+ * index; the caller is responsible to clear out the extra entries
+ * before writing the index to a tree). Returns true if the index is
+ * unmerged. Callers who want to refuse to work from an unmerged
+ * state can call this and check its return value, instead of calling
+ * read_cache().
*/
int read_index_unmerged(struct index_state *istate)
{
new_ce->ce_flags = create_ce_flags(0) | CE_CONFLICTED;
new_ce->ce_namelen = len;
new_ce->ce_mode = ce->ce_mode;
- if (add_index_entry(istate, new_ce, 0))
+ if (add_index_entry(istate, new_ce, ADD_CACHE_SKIP_DFCHECK))
return error("%s: cannot drop to stage #0",
new_ce->name);
}
case REPLAY_INTERACTIVE_REBASE:
return N_("rebase -i");
}
- die(_("Unknown action: %d"), opts->action);
+ die(_("unknown action: %d"), opts->action);
}
struct commit_message {
strbuf_addch(&buf, *(message++));
else
strbuf_addf(&buf, "'\\\\%c'", *(message++));
+ strbuf_addch(&buf, '\'');
res = write_message(buf.buf, buf.len, rebase_path_author_script(), 1);
strbuf_release(&buf);
return res;
const char *keys[] = {
"GIT_AUTHOR_NAME=", "GIT_AUTHOR_EMAIL=", "GIT_AUTHOR_DATE="
};
- char *in, *out, *eol;
- int i = 0, len;
+ struct strbuf out = STRBUF_INIT;
+ char *in, *eol;
+ const char *val[3];
+ int i = 0;
if (strbuf_read_file(buf, rebase_path_author_script(), 256) <= 0)
return NULL;
/* dequote values and construct ident line in-place */
- for (in = out = buf->buf; i < 3 && in - buf->buf < buf->len; i++) {
+ for (in = buf->buf; i < 3 && in - buf->buf < buf->len; i++) {
if (!skip_prefix(in, keys[i], (const char **)&in)) {
- warning("could not parse '%s' (looking for '%s'",
+ warning(_("could not parse '%s' (looking for '%s'"),
rebase_path_author_script(), keys[i]);
return NULL;
}
eol = strchrnul(in, '\n');
*eol = '\0';
- sq_dequote(in);
- len = strlen(in);
-
- if (i > 0) /* separate values by spaces */
- *(out++) = ' ';
- if (i == 1) /* email needs to be surrounded by <...> */
- *(out++) = '<';
- memmove(out, in, len);
- out += len;
- if (i == 1) /* email needs to be surrounded by <...> */
- *(out++) = '>';
+ if (!sq_dequote(in)) {
+ warning(_("bad quoting on %s value in '%s'"),
+ keys[i], rebase_path_author_script());
+ return NULL;
+ }
+ val[i] = in;
in = eol + 1;
}
if (i < 3) {
- warning("could not parse '%s' (looking for '%s')",
+ warning(_("could not parse '%s' (looking for '%s')"),
rebase_path_author_script(), keys[i]);
return NULL;
}
- buf->len = out - buf->buf;
+ /* validate date since fmt_ident() will die() on bad value */
+ if (parse_date(val[2], &out)){
+ warning(_("invalid date format '%s' in '%s'"),
+ val[2], rebase_path_author_script());
+ strbuf_release(&out);
+ return NULL;
+ }
+
+ strbuf_reset(&out);
+ strbuf_addstr(&out, fmt_ident(val[0], val[1], val[2], 0));
+ strbuf_swap(buf, &out);
+ strbuf_release(&out);
return buf->buf;
}
commit_list_insert(current_head, &parents);
}
- if (write_cache_as_tree(&tree, 0, NULL)) {
+ if (write_index_as_tree(&tree, &the_index, get_index_file(), 0, NULL)) {
res = error(_("git write-tree failed to write a tree"));
goto out;
}
{
if (command < TODO_COMMENT)
return todo_command_info[command].str;
- die("Unknown command: %d", command);
+ die(_("unknown command: %d"), command);
}
static char command_to_char(const enum todo_command command)
* that represents the "current" state for merge-recursive
* to work on.
*/
- if (write_cache_as_tree(&head, 0, NULL))
+ if (write_index_as_tree(&head, &the_index, get_index_file(), 0, NULL))
return error(_("your index file is unmerged."));
} else {
unborn = get_oid("HEAD", &head);
if (intend_to_amend())
return -1;
- fprintf(stderr, "You can amend the commit now, with\n"
- "\n"
- " git commit --amend %s\n"
- "\n"
- "Once you are satisfied with your changes, run\n"
- "\n"
- " git rebase --continue\n", gpg_sign_opt_quoted(opts));
+ fprintf(stderr,
+ _("You can amend the commit now, with\n"
+ "\n"
+ " git commit --amend %s\n"
+ "\n"
+ "Once you are satisfied with your changes, run\n"
+ "\n"
+ " git rebase --continue\n"),
+ gpg_sign_opt_quoted(opts));
} else if (exit_code)
- fprintf(stderr, "Could not apply %s... %.*s\n",
+ fprintf_ln(stderr, _("Could not apply %s... %.*s"),
short_commit_name(commit), subject_len, subject);
return exit_code;
struct object_id head_oid;
if (len == 1 && *name == '#')
- return error("Illegal label name: '%.*s'", len, name);
+ return error(_("illegal label name: '%.*s'"), len, name);
strbuf_addf(&ref_name, "refs/rewritten/%.*s", len, name);
strbuf_addf(&msg, "rebase -i (label) '%.*s'", len, name);
{
const char *todo_file = rebase_path_todo();
struct todo_list todo_list = TODO_LIST_INIT;
- struct todo_item *item;
struct strbuf *buf = &todo_list.buf;
size_t offset = 0, commands_len = strlen(commands);
- int i, first;
+ int i, insert;
if (strbuf_read_file(&todo_list.buf, todo_file, 0) < 0)
return error(_("could not read '%s'."), todo_file);
return error(_("unusable todo list: '%s'"), todo_file);
}
- first = 1;
- /* insert <commands> before every pick except the first one */
- for (item = todo_list.items, i = 0; i < todo_list.nr; i++, item++) {
- if (item->command == TODO_PICK && !first) {
- strbuf_insert(buf, item->offset_in_buf + offset,
- commands, commands_len);
+ /*
+ * Insert <commands> after every pick. Here, fixup/squash chains
+ * are considered part of the pick, so we insert the commands *after*
+ * those chains if there are any.
+ */
+ insert = -1;
+ for (i = 0; i < todo_list.nr; i++) {
+ enum todo_command command = todo_list.items[i].command;
+
+ if (insert >= 0) {
+ /* skip fixup/squash chains */
+ if (command == TODO_COMMENT)
+ continue;
+ else if (is_fixup(command)) {
+ insert = i + 1;
+ continue;
+ }
+ strbuf_insert(buf,
+ todo_list.items[insert].offset_in_buf +
+ offset, commands, commands_len);
offset += commands_len;
+ insert = -1;
}
- first = 0;
+
+ if (command == TODO_PICK || command == TODO_MERGE)
+ insert = i + 1;
}
- /* append final <commands> */
- strbuf_add(buf, commands, commands_len);
+ /* insert or append final <commands> */
+ if (insert >= 0 && insert < todo_list.nr)
+ strbuf_insert(buf, todo_list.items[insert].offset_in_buf +
+ offset, commands, commands_len);
+ else if (insert >= 0 || !offset)
+ strbuf_add(buf, commands, commands_len);
i = write_message(buf->buf, buf->len, todo_file, 0);
todo_list_release(&todo_list);
static void git_hash_unknown_init(git_hash_ctx *ctx)
{
- die("trying to init unknown hash");
+ BUG("trying to init unknown hash");
}
static void git_hash_unknown_update(git_hash_ctx *ctx, const void *data, size_t len)
{
- die("trying to update unknown hash");
+ BUG("trying to update unknown hash");
}
static void git_hash_unknown_final(unsigned char *hash, git_hash_ctx *ctx)
{
- die("trying to finalize unknown hash");
+ BUG("trying to finalize unknown hash");
}
const struct git_hash_algo hash_algos[GIT_HASH_NALGOS] = {
/* Detect cases where alternate disappeared */
if (!is_directory(path->buf)) {
- error("object directory %s does not exist; "
- "check .git/objects/info/alternates.",
+ error(_("object directory %s does not exist; "
+ "check .git/objects/info/alternates"),
path->buf);
return 0;
}
strbuf_addstr(&pathbuf, entry);
if (strbuf_normalize_path(&pathbuf) < 0 && relative_base) {
- error("unable to normalize alternate object path: %s",
+ error(_("unable to normalize alternate object path: %s"),
pathbuf.buf);
strbuf_release(&pathbuf);
return -1;
return;
if (depth > 5) {
- error("%s: ignoring alternate object stores, nesting too deep.",
+ error(_("%s: ignoring alternate object stores, nesting too deep"),
relative_base);
return;
}
strbuf_add_absolute_path(&objdirbuf, r->objects->objectdir);
if (strbuf_normalize_path(&objdirbuf) < 0)
- die("unable to normalize object directory: %s",
+ die(_("unable to normalize object directory: %s"),
objdirbuf.buf);
while (*alt) {
hold_lock_file_for_update(&lock, alts, LOCK_DIE_ON_ERROR);
out = fdopen_lock_file(&lock, "w");
if (!out)
- die_errno("unable to fdopen alternates lockfile");
+ die_errno(_("unable to fdopen alternates lockfile"));
in = fopen(alts, "r");
if (in) {
fclose(in);
}
else if (errno != ENOENT)
- die_errno("unable to read alternates file");
+ die_errno(_("unable to read alternates file"));
if (found) {
rollback_lock_file(&lock);
} else {
fprintf_or_die(out, "%s\n", reference);
if (commit_lock_file(&lock))
- die_errno("unable to move new alternates file into place");
+ die_errno(_("unable to move new alternates file into place"));
if (the_repository->objects->alt_odb_tail)
link_alt_odb_entries(the_repository, reference,
'\n', NULL, 0);
limit = SIZE_MAX;
}
if (length > limit)
- die("attempting to mmap %"PRIuMAX" over limit %"PRIuMAX,
+ die(_("attempting to mmap %"PRIuMAX" over limit %"PRIuMAX),
(uintmax_t)length, (uintmax_t)limit);
}
{
void *ret = xmmap_gently(start, length, prot, flags, fd, offset);
if (ret == MAP_FAILED)
- die_errno("mmap failed");
+ die_errno(_("mmap failed"));
return ret;
}
*size = xsize_t(st.st_size);
if (!*size) {
/* mmap() is forbidden on empty files */
- error("object file %s is empty", path);
+ error(_("object file %s is empty"), path);
return NULL;
}
map = xmmap(NULL, *size, PROT_READ, MAP_PRIVATE, fd, 0);
}
if (status < 0)
- error("corrupt loose object '%s'", sha1_to_hex(sha1));
+ error(_("corrupt loose object '%s'"), sha1_to_hex(sha1));
else if (stream->avail_in)
- error("garbage at end of loose object '%s'",
+ error(_("garbage at end of loose object '%s'"),
sha1_to_hex(sha1));
free(buf);
return NULL;
if ((flags & OBJECT_INFO_ALLOW_UNKNOWN_TYPE) && (type < 0))
type = 0;
else if (type < 0)
- die("invalid object type");
+ die(_("invalid object type"));
if (oi->typep)
*oi->typep = type;
*oi->disk_sizep = mapsize;
if ((flags & OBJECT_INFO_ALLOW_UNKNOWN_TYPE)) {
if (unpack_sha1_header_to_strbuf(&stream, map, mapsize, hdr, sizeof(hdr), &hdrbuf) < 0)
- status = error("unable to unpack %s header with --allow-unknown-type",
+ status = error(_("unable to unpack %s header with --allow-unknown-type"),
sha1_to_hex(sha1));
} else if (unpack_sha1_header(&stream, map, mapsize, hdr, sizeof(hdr)) < 0)
- status = error("unable to unpack %s header",
+ status = error(_("unable to unpack %s header"),
sha1_to_hex(sha1));
if (status < 0)
; /* Do nothing */
else if (hdrbuf.len) {
if ((status = parse_sha1_header_extended(hdrbuf.buf, oi, flags)) < 0)
- status = error("unable to parse %s header with --allow-unknown-type",
+ status = error(_("unable to parse %s header with --allow-unknown-type"),
sha1_to_hex(sha1));
} else if ((status = parse_sha1_header_extended(hdr, oi, flags)) < 0)
- status = error("unable to parse %s header", sha1_to_hex(sha1));
+ status = error(_("unable to parse %s header"), sha1_to_hex(sha1));
if (status >= 0 && oi->contentp) {
*oi->contentp = unpack_sha1_rest(&stream, hdr,
return data;
if (errno && errno != ENOENT)
- die_errno("failed to read object %s", oid_to_hex(oid));
+ die_errno(_("failed to read object %s"), oid_to_hex(oid));
/* die if we replaced an object with one that does not exist */
if (repl != oid)
- die("replacement %s not found for %s",
+ die(_("replacement %s not found for %s"),
oid_to_hex(repl), oid_to_hex(oid));
if (!stat_sha1_file(the_repository, repl->hash, &st, &path))
- die("loose object %s (stored in %s) is corrupt",
+ die(_("loose object %s (stored in %s) is corrupt"),
oid_to_hex(repl), path);
if ((p = has_packed_and_bad(repl->hash)) != NULL)
- die("packed object %s (stored in %s) is corrupt",
+ die(_("packed object %s (stored in %s) is corrupt"),
oid_to_hex(repl), p->pack_name);
return NULL;
unlink_or_warn(tmpfile);
if (ret) {
if (ret != EEXIST) {
- return error_errno("unable to write sha1 filename %s", filename);
+ return error_errno(_("unable to write sha1 filename %s"), filename);
}
/* FIXME!!! Collision check here ? */
}
out:
if (adjust_shared_perm(filename))
- return error("unable to set permission to '%s'", filename);
+ return error(_("unable to set permission to '%s'"), filename);
return 0;
}
static int write_buffer(int fd, const void *buf, size_t len)
{
if (write_in_full(fd, buf, len) < 0)
- return error_errno("file write error");
+ return error_errno(_("file write error"));
return 0;
}
if (fsync_object_files)
fsync_or_die(fd, "sha1 file");
if (close(fd) != 0)
- die_errno("error when closing sha1 file");
+ die_errno(_("error when closing sha1 file"));
}
/* Size of directory component, including the ending '/' */
fd = create_tmpfile(&tmp_file, filename.buf);
if (fd < 0) {
if (errno == EACCES)
- return error("insufficient permission for adding an object to repository database %s", get_object_directory());
+ return error(_("insufficient permission for adding an object to repository database %s"), get_object_directory());
else
- return error_errno("unable to create temporary file");
+ return error_errno(_("unable to create temporary file"));
}
/* Set it up */
ret = git_deflate(&stream, Z_FINISH);
the_hash_algo->update_fn(&c, in0, stream.next_in - in0);
if (write_buffer(fd, compressed, stream.next_out - compressed) < 0)
- die("unable to write sha1 file");
+ die(_("unable to write sha1 file"));
stream.next_out = compressed;
stream.avail_out = sizeof(compressed);
} while (ret == Z_OK);
if (ret != Z_STREAM_END)
- die("unable to deflate new object %s (%d)", oid_to_hex(oid),
+ die(_("unable to deflate new object %s (%d)"), oid_to_hex(oid),
ret);
ret = git_deflate_end_gently(&stream);
if (ret != Z_OK)
- die("deflateEnd on object %s failed (%d)", oid_to_hex(oid),
+ die(_("deflateEnd on object %s failed (%d)"), oid_to_hex(oid),
ret);
the_hash_algo->final_fn(parano_oid.hash, &c);
if (oidcmp(oid, ¶no_oid) != 0)
- die("confused by unstable object source data for %s",
+ die(_("confused by unstable object source data for %s"),
oid_to_hex(oid));
close_sha1_file(fd);
utb.actime = mtime;
utb.modtime = mtime;
if (utime(tmp_file.buf, &utb) < 0)
- warning_errno("failed utime() on %s", tmp_file.buf);
+ warning_errno(_("failed utime() on %s"), tmp_file.buf);
}
return finalize_object_file(tmp_file.buf, filename.buf);
return 0;
buf = read_object(oid->hash, &type, &len);
if (!buf)
- return error("cannot read sha1_file for %s", oid_to_hex(oid));
+ return error(_("cannot read sha1_file for %s"), oid_to_hex(oid));
hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", type_name(type), len) + 1;
ret = write_loose_object(oid, hdr, hdrlen, buf, len, mtime);
free(buf);
struct commit c;
memset(&c, 0, sizeof(c));
if (parse_commit_buffer(the_repository, &c, buf, size, 0))
- die("corrupt commit");
+ die(_("corrupt commit"));
}
static void check_tag(const void *buf, size_t size)
struct tag t;
memset(&t, 0, sizeof(t));
if (parse_tag_buffer(the_repository, &t, buf, size))
- die("corrupt tag");
+ die(_("corrupt tag"));
}
static int index_mem(struct object_id *oid, void *buf, size_t size,
struct strbuf sbuf = STRBUF_INIT;
assert(path);
- assert(would_convert_to_git_filter_fd(path));
+ assert(would_convert_to_git_filter_fd(&the_index, path));
convert_to_git_filter_fd(&the_index, path, fd, &sbuf,
get_conv_flags(flags));
char *buf = xmalloc(size);
ssize_t read_result = read_in_full(fd, buf, size);
if (read_result < 0)
- ret = error_errno("read error while indexing %s",
+ ret = error_errno(_("read error while indexing %s"),
path ? path : "<unknown>");
else if (read_result != size)
- ret = error("short read while indexing %s",
+ ret = error(_("short read while indexing %s"),
path ? path : "<unknown>");
else
ret = index_mem(oid, buf, size, type, path, flags);
* Call xsize_t() only when needed to avoid potentially unnecessary
* die() for large files.
*/
- if (type == OBJ_BLOB && path && would_convert_to_git_filter_fd(path))
+ if (type == OBJ_BLOB && path && would_convert_to_git_filter_fd(&the_index, path))
ret = index_stream_convert_blob(oid, fd, path, flags);
else if (!S_ISREG(st->st_mode))
ret = index_pipe(oid, fd, type, path, flags);
if (fd < 0)
return error_errno("open(\"%s\")", path);
if (index_fd(oid, fd, st, OBJ_BLOB, path, flags) < 0)
- return error("%s: failed to insert into database",
+ return error(_("%s: failed to insert into database"),
path);
break;
case S_IFLNK:
if (!(flags & HASH_WRITE_OBJECT))
hash_object_file(sb.buf, sb.len, blob_type, oid);
else if (write_object_file(sb.buf, sb.len, blob_type, oid))
- rc = error("%s: failed to insert into database", path);
+ rc = error(_("%s: failed to insert into database"), path);
strbuf_release(&sb);
break;
case S_IFDIR:
return resolve_gitlink_ref(path, "HEAD", oid);
default:
- return error("%s: unsupported file type", path);
+ return error(_("%s: unsupported file type"), path);
}
return rc;
}
{
enum object_type type = oid_object_info(the_repository, oid, NULL);
if (type < 0)
- die("%s is not a valid object", oid_to_hex(oid));
+ die(_("%s is not a valid object"), oid_to_hex(oid));
if (type != expect)
- die("%s is not a valid '%s' object", oid_to_hex(oid),
+ die(_("%s is not a valid '%s' object"), oid_to_hex(oid),
type_name(expect));
}
dir = opendir(path->buf);
if (!dir) {
if (errno != ENOENT)
- r = error_errno("unable to open %s", path->buf);
+ r = error_errno(_("unable to open %s"), path->buf);
strbuf_setlen(path, origlen);
return r;
}
return r;
}
-int for_each_loose_object(each_loose_object_fn cb, void *data, unsigned flags)
+int for_each_loose_object(each_loose_object_fn cb, void *data,
+ enum for_each_object_flags flags)
{
struct loose_alt_odb_data alt;
int r;
git_inflate_end(stream);
if (status != Z_STREAM_END) {
- error("corrupt loose object '%s'", sha1_to_hex(expected_sha1));
+ error(_("corrupt loose object '%s'"), sha1_to_hex(expected_sha1));
return -1;
}
if (stream->avail_in) {
- error("garbage at end of loose object '%s'",
+ error(_("garbage at end of loose object '%s'"),
sha1_to_hex(expected_sha1));
return -1;
}
the_hash_algo->final_fn(real_sha1, &c);
if (hashcmp(expected_sha1, real_sha1)) {
- error("sha1 mismatch for %s (expected %s)", path,
+ error(_("sha1 mismatch for %s (expected %s)"), path,
sha1_to_hex(expected_sha1));
return -1;
}
map = map_sha1_file_1(the_repository, path, NULL, &mapsize);
if (!map) {
- error_errno("unable to mmap %s", path);
+ error_errno(_("unable to mmap %s"), path);
goto out;
}
if (unpack_sha1_header(&stream, map, mapsize, hdr, sizeof(hdr)) < 0) {
- error("unable to unpack header of %s", path);
+ error(_("unable to unpack header of %s"), path);
goto out;
}
*type = parse_sha1_header(hdr, size);
if (*type < 0) {
- error("unable to parse header of %s", path);
+ error(_("unable to parse header of %s"), path);
git_inflate_end(&stream);
goto out;
}
} else {
*contents = unpack_sha1_rest(&stream, hdr, *size, expected_oid->hash);
if (!*contents) {
- error("unable to unpack contents of %s", path);
+ error(_("unable to unpack contents of %s"), path);
git_inflate_end(&stream);
goto out;
}
if (check_object_signature(expected_oid, *contents,
*size, type_name(*type))) {
- error("sha1 mismatch for %s (expected %s)", path,
+ error(_("sha1 mismatch for %s (expected %s)"), path,
oid_to_hex(expected_oid));
free(*contents);
goto out;