Avoid using identifiers that clash with C++ keywords. Even though
it is not a goal to compile Git with C++ compilers, changes like
this help use of code analysis tools that targets C++ on our
codebase.
* bw/c-plus-plus: (37 commits)
replace: rename 'new' variables
trailer: rename 'template' variables
tempfile: rename 'template' variables
wrapper: rename 'template' variables
environment: rename 'namespace' variables
diff: rename 'template' variables
environment: rename 'template' variables
init-db: rename 'template' variables
unpack-trees: rename 'new' variables
trailer: rename 'new' variables
submodule: rename 'new' variables
split-index: rename 'new' variables
remote: rename 'new' variables
ref-filter: rename 'new' variables
read-cache: rename 'new' variables
line-log: rename 'new' variables
imap-send: rename 'new' variables
http: rename 'new' variables
entry: rename 'new' variables
diffcore-delta: rename 'new' variables
...
}
free(another);
} else {
- if (!starts_with(line, "/dev/null\n"))
+ if (!is_dev_null(line))
return error(_("git apply: bad git-diff - expected /dev/null on line %d"), state->linenr);
}
static int read_old_data(struct stat *st, struct patch *patch,
const char *path, struct strbuf *buf)
{
- enum safe_crlf safe_crlf = patch->crlf_in_old ?
- SAFE_CRLF_KEEP_CRLF : SAFE_CRLF_RENORMALIZE;
+ int conv_flags = patch->crlf_in_old ?
+ CONV_EOL_KEEP_CRLF : CONV_EOL_RENORMALIZE;
switch (st->st_mode & S_IFMT) {
case S_IFLNK:
if (strbuf_readlink(buf, path, st->st_size) < 0)
* should never look at the index when explicit crlf option
* is given.
*/
- convert_to_git(NULL, path, buf->buf, buf->len, buf, safe_crlf);
+ convert_to_git(NULL, path, buf->buf, buf->len, buf, conv_flags);
return 0;
default:
return -1;
size_t len, size_t postlen)
{
int i, ctx, reduced;
- char *new, *old, *fixed;
+ char *new_buf, *old_buf, *fixed;
struct image fixed_preimage;
/*
* We trust the caller to tell us if the update can be done
* in place (postlen==0) or not.
*/
- old = postimage->buf;
+ old_buf = postimage->buf;
if (postlen)
- new = postimage->buf = xmalloc(postlen);
+ new_buf = postimage->buf = xmalloc(postlen);
else
- new = old;
+ new_buf = old_buf;
fixed = preimage->buf;
for (i = reduced = ctx = 0; i < postimage->nr; i++) {
size_t l_len = postimage->line[i].len;
if (!(postimage->line[i].flag & LINE_COMMON)) {
/* an added line -- no counterparts in preimage */
- memmove(new, old, l_len);
- old += l_len;
- new += l_len;
+ memmove(new_buf, old_buf, l_len);
+ old_buf += l_len;
+ new_buf += l_len;
continue;
}
/* a common context -- skip it in the original postimage */
- old += l_len;
+ old_buf += l_len;
/* and find the corresponding one in the fixed preimage */
while (ctx < preimage->nr &&
/* and copy it in, while fixing the line length */
l_len = preimage->line[ctx].len;
- memcpy(new, fixed, l_len);
- new += l_len;
+ memcpy(new_buf, fixed, l_len);
+ new_buf += l_len;
fixed += l_len;
postimage->line[i].len = l_len;
ctx++;
}
if (postlen
- ? postlen < new - postimage->buf
- : postimage->len < new - postimage->buf)
+ ? postlen < new_buf - postimage->buf
+ : postimage->len < new_buf - postimage->buf)
die("BUG: caller miscounted postlen: asked %d, orig = %d, used = %d",
- (int)postlen, (int) postimage->len, (int)(new - postimage->buf));
+ (int)postlen, (int) postimage->len, (int)(new_buf - postimage->buf));
/* Fix the length of the whole thing */
- postimage->len = new - postimage->buf;
+ postimage->len = new_buf - postimage->buf;
postimage->nr -= reduced;
}
static int line_by_line_fuzzy_match(struct image *img,
struct image *preimage,
struct image *postimage,
- unsigned long try,
- int try_lno,
+ unsigned long current,
+ int current_lno,
int preimage_limit)
{
int i;
for (i = 0; i < preimage_limit; i++) {
size_t prelen = preimage->line[i].len;
- size_t imglen = img->line[try_lno+i].len;
+ size_t imglen = img->line[current_lno+i].len;
- if (!fuzzy_matchlines(img->buf + try + imgoff, imglen,
+ if (!fuzzy_matchlines(img->buf + current + imgoff, imglen,
preimage->buf + preoff, prelen))
return 0;
if (preimage->line[i].flag & LINE_COMMON)
*/
extra_chars = preimage_end - preimage_eof;
strbuf_init(&fixed, imgoff + extra_chars);
- strbuf_add(&fixed, img->buf + try, imgoff);
+ strbuf_add(&fixed, img->buf + current, imgoff);
strbuf_add(&fixed, preimage_eof, extra_chars);
fixed_buf = strbuf_detach(&fixed, &fixed_len);
update_pre_post_images(preimage, postimage,
struct image *img,
struct image *preimage,
struct image *postimage,
- unsigned long try,
- int try_lno,
+ unsigned long current,
+ int current_lno,
unsigned ws_rule,
int match_beginning, int match_end)
{
size_t fixed_len, postlen;
int preimage_limit;
- if (preimage->nr + try_lno <= img->nr) {
+ if (preimage->nr + current_lno <= img->nr) {
/*
* The hunk falls within the boundaries of img.
*/
preimage_limit = preimage->nr;
- if (match_end && (preimage->nr + try_lno != img->nr))
+ if (match_end && (preimage->nr + current_lno != img->nr))
return 0;
} else if (state->ws_error_action == correct_ws_error &&
(ws_rule & WS_BLANK_AT_EOF)) {
* match with img, and the remainder of the preimage
* must be blank.
*/
- preimage_limit = img->nr - try_lno;
+ preimage_limit = img->nr - current_lno;
} else {
/*
* The hunk extends beyond the end of the img and
return 0;
}
- if (match_beginning && try_lno)
+ if (match_beginning && current_lno)
return 0;
/* Quick hash check */
for (i = 0; i < preimage_limit; i++)
- if ((img->line[try_lno + i].flag & LINE_PATCHED) ||
- (preimage->line[i].hash != img->line[try_lno + i].hash))
+ if ((img->line[current_lno + i].flag & LINE_PATCHED) ||
+ (preimage->line[i].hash != img->line[current_lno + i].hash))
return 0;
if (preimage_limit == preimage->nr) {
/*
* Do we have an exact match? If we were told to match
- * at the end, size must be exactly at try+fragsize,
- * otherwise try+fragsize must be still within the preimage,
+ * at the end, size must be exactly at current+fragsize,
+ * otherwise current+fragsize must be still within the preimage,
* and either case, the old piece should match the preimage
* exactly.
*/
if ((match_end
- ? (try + preimage->len == img->len)
- : (try + preimage->len <= img->len)) &&
- !memcmp(img->buf + try, preimage->buf, preimage->len))
+ ? (current + preimage->len == img->len)
+ : (current + preimage->len <= img->len)) &&
+ !memcmp(img->buf + current, preimage->buf, preimage->len))
return 1;
} else {
/*
*/
if (state->ws_ignore_action == ignore_ws_change)
return line_by_line_fuzzy_match(img, preimage, postimage,
- try, try_lno, preimage_limit);
+ current, current_lno, preimage_limit);
if (state->ws_error_action != correct_ws_error)
return 0;
*/
strbuf_init(&fixed, preimage->len + 1);
orig = preimage->buf;
- target = img->buf + try;
+ target = img->buf + current;
for (i = 0; i < preimage_limit; i++) {
size_t oldlen = preimage->line[i].len;
- size_t tgtlen = img->line[try_lno + i].len;
+ size_t tgtlen = img->line[current_lno + i].len;
size_t fixstart = fixed.len;
struct strbuf tgtfix;
int match;
int match_beginning, int match_end)
{
int i;
- unsigned long backwards, forwards, try;
- int backwards_lno, forwards_lno, try_lno;
+ unsigned long backwards, forwards, current;
+ int backwards_lno, forwards_lno, current_lno;
/*
* If match_beginning or match_end is specified, there is no
if ((size_t) line > img->nr)
line = img->nr;
- try = 0;
+ current = 0;
for (i = 0; i < line; i++)
- try += img->line[i].len;
+ current += img->line[i].len;
/*
* There's probably some smart way to do this, but I'll leave
* that to the smart and beautiful people. I'm simple and stupid.
*/
- backwards = try;
+ backwards = current;
backwards_lno = line;
- forwards = try;
+ forwards = current;
forwards_lno = line;
- try_lno = line;
+ current_lno = line;
for (i = 0; ; i++) {
if (match_fragment(state, img, preimage, postimage,
- try, try_lno, ws_rule,
+ current, current_lno, ws_rule,
match_beginning, match_end))
- return try_lno;
+ return current_lno;
again:
if (backwards_lno == 0 && forwards_lno == img->nr)
}
backwards_lno--;
backwards -= img->line[backwards_lno].len;
- try = backwards;
- try_lno = backwards_lno;
+ current = backwards;
+ current_lno = backwards_lno;
} else {
if (forwards_lno == img->nr) {
i++;
}
forwards += img->line[forwards_lno].len;
forwards_lno++;
- try = forwards;
- try_lno = forwards_lno;
+ current = forwards;
+ current_lno = forwards_lno;
}
}
* See if the old one matches what the patch
* applies to.
*/
- hash_sha1_file(img->buf, img->len, blob_type, oid.hash);
+ hash_object_file(img->buf, img->len, blob_type, &oid);
if (strcmp(oid_to_hex(&oid), patch->old_sha1_prefix))
return error(_("the patch applies to '%s' (%s), "
"which does not match the "
name);
/* verify that the result matches */
- hash_sha1_file(img->buf, img->len, blob_type, oid.hash);
+ hash_object_file(img->buf, img->len, blob_type, &oid);
if (strcmp(oid_to_hex(&oid), patch->new_sha1_prefix))
return error(_("binary patch to '%s' creates incorrect result (expecting %s, got %s)"),
name, patch->new_sha1_prefix, oid_to_hex(&oid));
/* Preimage the patch was prepared for */
if (patch->is_new)
- write_sha1_file("", 0, blob_type, pre_oid.hash);
+ write_object_file("", 0, blob_type, &pre_oid);
else if (get_oid(patch->old_sha1_prefix, &pre_oid) ||
read_blob_object(&buf, &pre_oid, patch->old_mode))
return error(_("repository lacks the necessary blob to fall back on 3-way merge."));
return -1;
}
/* post_oid is theirs */
- write_sha1_file(tmp_image.buf, tmp_image.len, blob_type, post_oid.hash);
+ write_object_file(tmp_image.buf, tmp_image.len, blob_type, &post_oid);
clear_image(&tmp_image);
/* our_oid is ours */
return error(_("cannot read the current contents of '%s'"),
patch->old_name);
}
- write_sha1_file(tmp_image.buf, tmp_image.len, blob_type, our_oid.hash);
+ write_object_file(tmp_image.buf, tmp_image.len, blob_type, &our_oid);
clear_image(&tmp_image);
/* in-core three-way merge between post and our using pre as base */
static void show_rename_copy(struct patch *p)
{
const char *renamecopy = p->is_rename ? "rename" : "copy";
- const char *old, *new;
+ const char *old_name, *new_name;
/* Find common prefix */
- old = p->old_name;
- new = p->new_name;
+ old_name = p->old_name;
+ new_name = p->new_name;
while (1) {
const char *slash_old, *slash_new;
- slash_old = strchr(old, '/');
- slash_new = strchr(new, '/');
+ slash_old = strchr(old_name, '/');
+ slash_new = strchr(new_name, '/');
if (!slash_old ||
!slash_new ||
- slash_old - old != slash_new - new ||
- memcmp(old, new, slash_new - new))
+ slash_old - old_name != slash_new - new_name ||
+ memcmp(old_name, new_name, slash_new - new_name))
break;
- old = slash_old + 1;
- new = slash_new + 1;
+ old_name = slash_old + 1;
+ new_name = slash_new + 1;
}
- /* p->old_name thru old is the common prefix, and old and new
+ /* p->old_name thru old_name is the common prefix, and old_name and new_name
* through the end of names are renames
*/
- if (old != p->old_name)
+ if (old_name != p->old_name)
printf(" %s %.*s{%s => %s} (%d%%)\n", renamecopy,
- (int)(old - p->old_name), p->old_name,
- old, new, p->score);
+ (int)(old_name - p->old_name), p->old_name,
+ old_name, new_name, p->score);
else
printf(" %s %s => %s (%d%%)\n", renamecopy,
p->old_name, p->new_name, p->score);
}
fill_stat_cache_info(ce, &st);
}
- if (write_sha1_file(buf, size, blob_type, ce->oid.hash) < 0) {
+ if (write_object_file(buf, size, blob_type, &ce->oid) < 0) {
free(ce);
return error(_("unable to create backing store "
"for newly created file %s"), path);
convert_to_git(&the_index, path, buf.buf, buf.len, &buf, 0);
origin->file.ptr = buf.buf;
origin->file.size = buf.len;
- pretend_sha1_file(buf.buf, buf.len, OBJ_BLOB, origin->blob_oid.hash);
+ pretend_object_file(buf.buf, buf.len, OBJ_BLOB, &origin->blob_oid);
/*
* Read the current index, replace the path entry with
}
/*
- * best_so_far[] and this[] are both a split of an existing blame_entry
- * that passes blame to the parent. Maintain best_so_far the best split
- * so far, by comparing this and best_so_far and copying this into
+ * best_so_far[] and potential[] are both a split of an existing blame_entry
+ * that passes blame to the parent. Maintain best_so_far the best split so
+ * far, by comparing potential and best_so_far and copying potential into
* bst_so_far as needed.
*/
static void copy_split_if_better(struct blame_scoreboard *sb,
struct blame_entry *best_so_far,
- struct blame_entry *this)
+ struct blame_entry *potential)
{
int i;
- if (!this[1].suspect)
+ if (!potential[1].suspect)
return;
if (best_so_far[1].suspect) {
- if (blame_entry_score(sb, &this[1]) < blame_entry_score(sb, &best_so_far[1]))
+ if (blame_entry_score(sb, &potential[1]) <
+ blame_entry_score(sb, &best_so_far[1]))
return;
}
for (i = 0; i < 3; i++)
- blame_origin_incref(this[i].suspect);
+ blame_origin_incref(potential[i].suspect);
decref_split(best_so_far);
- memcpy(best_so_far, this, sizeof(struct blame_entry [3]));
+ memcpy(best_so_far, potential, sizeof(struct blame_entry[3]));
}
/*
if (ent->num_lines <= tlno)
return;
if (tlno < same) {
- struct blame_entry this[3];
+ struct blame_entry potential[3];
tlno += ent->s_lno;
same += ent->s_lno;
- split_overlap(this, ent, tlno, plno, same, parent);
- copy_split_if_better(sb, split, this);
- decref_split(this);
+ split_overlap(potential, ent, tlno, plno, same, parent);
+ copy_split_if_better(sb, split, potential);
+ decref_split(potential);
}
}
struct diff_filepair *p = diff_queued_diff.queue[i];
struct blame_origin *norigin;
mmfile_t file_p;
- struct blame_entry this[3];
+ struct blame_entry potential[3];
if (!DIFF_FILE_VALID(p->one))
continue; /* does not exist in parent */
for (j = 0; j < num_ents; j++) {
find_copy_in_blob(sb, blame_list[j].ent,
- norigin, this, &file_p);
+ norigin, potential, &file_p);
copy_split_if_better(sb, blame_list[j].split,
- this);
- decref_split(this);
+ potential);
+ decref_split(potential);
}
blame_origin_decref(norigin);
}
buf = NULL;
switch (opt) {
case 't':
- oi.typename = &sb;
+ oi.type_name = &sb;
if (sha1_object_info_extended(oid.hash, &oi, flags) < 0)
die("git cat-file: could not get object info");
if (sb.len) {
if (data->mark_query)
data->info.typep = &data->type;
else
- strbuf_addstr(sb, typename(data->type));
+ strbuf_addstr(sb, type_name(data->type));
} else if (is_atom("objectsize", atom, len)) {
if (data->mark_query)
data->info.sizep = &data->size;
for_each_loose_object(batch_loose_object, &sa, 0);
for_each_packed_object(batch_packed_object, &sa, 0);
+ if (repository_format_partial_clone)
+ warning("This repository has extensions.partialClone set. Some objects may not be loaded.");
cb.opt = opt;
cb.expand = &data;
struct tree *source_tree;
};
- static int post_checkout_hook(struct commit *old, struct commit *new,
+ static int post_checkout_hook(struct commit *old_commit, struct commit *new_commit,
int changed)
{
return run_hook_le(NULL, "post-checkout",
- oid_to_hex(old ? &old->object.oid : &null_oid),
- oid_to_hex(new ? &new->object.oid : &null_oid),
+ oid_to_hex(old_commit ? &old_commit->object.oid : &null_oid),
+ oid_to_hex(new_commit ? &new_commit->object.oid : &null_oid),
changed ? "1" : "0", NULL);
- /* "new" can be NULL when checking out from the index before
+ /* "new_commit" can be NULL when checking out from the index before
a commit exists. */
}
* (it also writes the merge result to the object database even
* when it may contain conflicts).
*/
- if (write_sha1_file(result_buf.ptr, result_buf.size,
- blob_type, oid.hash))
+ if (write_object_file(result_buf.ptr, result_buf.size, blob_type, &oid))
die(_("Unable to add merge result for '%s'"), path);
free(result_buf.ptr);
ce = make_cache_entry(mode, oid.hash, path, 2, 0);
}
static int merge_working_tree(const struct checkout_opts *opts,
- struct branch_info *old,
- struct branch_info *new,
+ struct branch_info *old_branch_info,
+ struct branch_info *new_branch_info,
int *writeout_error)
{
int ret;
resolve_undo_clear();
if (opts->force) {
- ret = reset_tree(new->commit->tree, opts, 1, writeout_error);
+ ret = reset_tree(new_branch_info->commit->tree, opts, 1, writeout_error);
if (ret)
return ret;
} else {
topts.initial_checkout = is_cache_unborn();
topts.update = 1;
topts.merge = 1;
- topts.gently = opts->merge && old->commit;
+ topts.gently = opts->merge && old_branch_info->commit;
topts.verbose_update = opts->show_progress;
topts.fn = twoway_merge;
if (opts->overwrite_ignore) {
topts.dir->flags |= DIR_SHOW_IGNORED;
setup_standard_excludes(topts.dir);
}
- tree = parse_tree_indirect(old->commit ?
- &old->commit->object.oid :
+ tree = parse_tree_indirect(old_branch_info->commit ?
+ &old_branch_info->commit->object.oid :
the_hash_algo->empty_tree);
init_tree_desc(&trees[0], tree->buffer, tree->size);
- tree = parse_tree_indirect(&new->commit->object.oid);
+ tree = parse_tree_indirect(&new_branch_info->commit->object.oid);
init_tree_desc(&trees[1], tree->buffer, tree->size);
ret = unpack_trees(2, trees, &topts);
return 1;
/*
- * Without old->commit, the below is the same as
+ * Without old_branch_info->commit, the below is the same as
* the two-tree unpack we already tried and failed.
*/
- if (!old->commit)
+ if (!old_branch_info->commit)
return 1;
/* Do more real merge */
o.verbosity = 0;
work = write_tree_from_memory(&o);
- ret = reset_tree(new->commit->tree, opts, 1,
+ ret = reset_tree(new_branch_info->commit->tree, opts, 1,
writeout_error);
if (ret)
return ret;
- o.ancestor = old->name;
- o.branch1 = new->name;
+ o.ancestor = old_branch_info->name;
+ o.branch1 = new_branch_info->name;
o.branch2 = "local";
- ret = merge_trees(&o, new->commit->tree, work,
- old->commit->tree, &result);
+ ret = merge_trees(&o, new_branch_info->commit->tree, work,
+ old_branch_info->commit->tree, &result);
if (ret < 0)
exit(128);
- ret = reset_tree(new->commit->tree, opts, 0,
+ ret = reset_tree(new_branch_info->commit->tree, opts, 0,
writeout_error);
strbuf_release(&o.obuf);
if (ret)
die(_("unable to write new index file"));
if (!opts->force && !opts->quiet)
- show_local_changes(&new->commit->object, &opts->diff_options);
+ show_local_changes(&new_branch_info->commit->object, &opts->diff_options);
return 0;
}
- static void report_tracking(struct branch_info *new)
+ static void report_tracking(struct branch_info *new_branch_info)
{
struct strbuf sb = STRBUF_INIT;
- struct branch *branch = branch_get(new->name);
+ struct branch *branch = branch_get(new_branch_info->name);
if (!format_tracking_info(branch, &sb))
return;
}
static void update_refs_for_switch(const struct checkout_opts *opts,
- struct branch_info *old,
- struct branch_info *new)
+ struct branch_info *old_branch_info,
+ struct branch_info *new_branch_info)
{
struct strbuf msg = STRBUF_INIT;
const char *old_desc, *reflog_msg;
free(refname);
}
else
- create_branch(opts->new_branch, new->name,
+ create_branch(opts->new_branch, new_branch_info->name,
opts->new_branch_force ? 1 : 0,
opts->new_branch_force ? 1 : 0,
opts->new_branch_log,
opts->quiet,
opts->track);
- new->name = opts->new_branch;
- setup_branch_path(new);
+ new_branch_info->name = opts->new_branch;
+ setup_branch_path(new_branch_info);
}
- old_desc = old->name;
- if (!old_desc && old->commit)
- old_desc = oid_to_hex(&old->commit->object.oid);
+ old_desc = old_branch_info->name;
+ if (!old_desc && old_branch_info->commit)
+ old_desc = oid_to_hex(&old_branch_info->commit->object.oid);
reflog_msg = getenv("GIT_REFLOG_ACTION");
if (!reflog_msg)
strbuf_addf(&msg, "checkout: moving from %s to %s",
- old_desc ? old_desc : "(invalid)", new->name);
+ old_desc ? old_desc : "(invalid)", new_branch_info->name);
else
strbuf_insert(&msg, 0, reflog_msg, strlen(reflog_msg));
- if (!strcmp(new->name, "HEAD") && !new->path && !opts->force_detach) {
+ if (!strcmp(new_branch_info->name, "HEAD") && !new_branch_info->path && !opts->force_detach) {
/* Nothing to do. */
- } else if (opts->force_detach || !new->path) { /* No longer on any branch. */
- update_ref(msg.buf, "HEAD", &new->commit->object.oid, NULL,
+ } else if (opts->force_detach || !new_branch_info->path) { /* No longer on any branch. */
+ update_ref(msg.buf, "HEAD", &new_branch_info->commit->object.oid, NULL,
REF_NO_DEREF, UPDATE_REFS_DIE_ON_ERR);
if (!opts->quiet) {
- if (old->path &&
+ if (old_branch_info->path &&
advice_detached_head && !opts->force_detach)
- detach_advice(new->name);
- describe_detached_head(_("HEAD is now at"), new->commit);
+ detach_advice(new_branch_info->name);
+ describe_detached_head(_("HEAD is now at"), new_branch_info->commit);
}
- } else if (new->path) { /* Switch branches. */
- if (create_symref("HEAD", new->path, msg.buf) < 0)
+ } else if (new_branch_info->path) { /* Switch branches. */
+ if (create_symref("HEAD", new_branch_info->path, msg.buf) < 0)
die(_("unable to update HEAD"));
if (!opts->quiet) {
- if (old->path && !strcmp(new->path, old->path)) {
+ if (old_branch_info->path && !strcmp(new_branch_info->path, old_branch_info->path)) {
if (opts->new_branch_force)
fprintf(stderr, _("Reset branch '%s'\n"),
- new->name);
+ new_branch_info->name);
else
fprintf(stderr, _("Already on '%s'\n"),
- new->name);
+ new_branch_info->name);
} else if (opts->new_branch) {
if (opts->branch_exists)
- fprintf(stderr, _("Switched to and reset branch '%s'\n"), new->name);
+ fprintf(stderr, _("Switched to and reset branch '%s'\n"), new_branch_info->name);
else
- fprintf(stderr, _("Switched to a new branch '%s'\n"), new->name);
+ fprintf(stderr, _("Switched to a new branch '%s'\n"), new_branch_info->name);
} else {
fprintf(stderr, _("Switched to branch '%s'\n"),
- new->name);
+ new_branch_info->name);
}
}
- if (old->path && old->name) {
- if (!ref_exists(old->path) && reflog_exists(old->path))
- delete_reflog(old->path);
+ if (old_branch_info->path && old_branch_info->name) {
+ if (!ref_exists(old_branch_info->path) && reflog_exists(old_branch_info->path))
+ delete_reflog(old_branch_info->path);
}
}
remove_branch_state();
strbuf_release(&msg);
if (!opts->quiet &&
- (new->path || (!opts->force_detach && !strcmp(new->name, "HEAD"))))
- report_tracking(new);
+ (new_branch_info->path || (!opts->force_detach && !strcmp(new_branch_info->name, "HEAD"))))
+ report_tracking(new_branch_info);
}
static int add_pending_uninteresting_ref(const char *refname,
* HEAD. If it is not reachable from any ref, this is the last chance
* for the user to do so without resorting to reflog.
*/
- static void orphaned_commit_warning(struct commit *old, struct commit *new)
+ static void orphaned_commit_warning(struct commit *old_commit, struct commit *new_commit)
{
struct rev_info revs;
- struct object *object = &old->object;
+ struct object *object = &old_commit->object;
init_revisions(&revs, NULL);
setup_revisions(0, NULL, &revs, NULL);
add_pending_object(&revs, object, oid_to_hex(&object->oid));
for_each_ref(add_pending_uninteresting_ref, &revs);
- add_pending_oid(&revs, "HEAD", &new->object.oid, UNINTERESTING);
+ add_pending_oid(&revs, "HEAD", &new_commit->object.oid, UNINTERESTING);
if (prepare_revision_walk(&revs))
die(_("internal error in revision walk"));
- if (!(old->object.flags & UNINTERESTING))
- suggest_reattach(old, &revs);
+ if (!(old_commit->object.flags & UNINTERESTING))
+ suggest_reattach(old_commit, &revs);
else
- describe_detached_head(_("Previous HEAD position was"), old);
+ describe_detached_head(_("Previous HEAD position was"), old_commit);
/* Clean up objects used, as they will be reused. */
clear_commit_marks_all(ALL_REV_FLAGS);
}
static int switch_branches(const struct checkout_opts *opts,
- struct branch_info *new)
+ struct branch_info *new_branch_info)
{
int ret = 0;
- struct branch_info old;
+ struct branch_info old_branch_info;
void *path_to_free;
struct object_id rev;
int flag, writeout_error = 0;
- memset(&old, 0, sizeof(old));
- old.path = path_to_free = resolve_refdup("HEAD", 0, &rev, &flag);
- if (old.path)
- old.commit = lookup_commit_reference_gently(&rev, 1);
+ memset(&old_branch_info, 0, sizeof(old_branch_info));
+ old_branch_info.path = path_to_free = resolve_refdup("HEAD", 0, &rev, &flag);
+ if (old_branch_info.path)
+ old_branch_info.commit = lookup_commit_reference_gently(&rev, 1);
if (!(flag & REF_ISSYMREF))
- old.path = NULL;
+ old_branch_info.path = NULL;
- if (old.path)
- skip_prefix(old.path, "refs/heads/", &old.name);
+ if (old_branch_info.path)
+ skip_prefix(old_branch_info.path, "refs/heads/", &old_branch_info.name);
- if (!new->name) {
- new->name = "HEAD";
- new->commit = old.commit;
- if (!new->commit)
+ if (!new_branch_info->name) {
+ new_branch_info->name = "HEAD";
+ new_branch_info->commit = old_branch_info.commit;
+ if (!new_branch_info->commit)
die(_("You are on a branch yet to be born"));
- parse_commit_or_die(new->commit);
+ parse_commit_or_die(new_branch_info->commit);
}
- ret = merge_working_tree(opts, &old, new, &writeout_error);
+ ret = merge_working_tree(opts, &old_branch_info, new_branch_info, &writeout_error);
if (ret) {
free(path_to_free);
return ret;
}
- if (!opts->quiet && !old.path && old.commit && new->commit != old.commit)
- orphaned_commit_warning(old.commit, new->commit);
+ if (!opts->quiet && !old_branch_info.path && old_branch_info.commit && new_branch_info->commit != old_branch_info.commit)
+ orphaned_commit_warning(old_branch_info.commit, new_branch_info->commit);
- update_refs_for_switch(opts, &old, new);
+ update_refs_for_switch(opts, &old_branch_info, new_branch_info);
- ret = post_checkout_hook(old.commit, new->commit, 1);
+ ret = post_checkout_hook(old_branch_info.commit, new_branch_info->commit, 1);
free(path_to_free);
return ret || writeout_error;
}
static int parse_branchname_arg(int argc, const char **argv,
int dwim_new_local_branch_ok,
- struct branch_info *new,
+ struct branch_info *new_branch_info,
struct checkout_opts *opts,
struct object_id *rev)
{
argv++;
argc--;
- new->name = arg;
- setup_branch_path(new);
+ new_branch_info->name = arg;
+ setup_branch_path(new_branch_info);
- if (!check_refname_format(new->path, 0) &&
- !read_ref(new->path, &branch_rev))
+ if (!check_refname_format(new_branch_info->path, 0) &&
+ !read_ref(new_branch_info->path, &branch_rev))
oidcpy(rev, &branch_rev);
else
- new->path = NULL; /* not an existing branch */
+ new_branch_info->path = NULL; /* not an existing branch */
- new->commit = lookup_commit_reference_gently(rev, 1);
- if (!new->commit) {
+ new_branch_info->commit = lookup_commit_reference_gently(rev, 1);
+ if (!new_branch_info->commit) {
/* not a commit */
*source_tree = parse_tree_indirect(rev);
} else {
- parse_commit_or_die(new->commit);
- *source_tree = new->commit->tree;
+ parse_commit_or_die(new_branch_info->commit);
+ *source_tree = new_branch_info->commit->tree;
}
if (!*source_tree) /* case (1): want a tree */
}
static int checkout_branch(struct checkout_opts *opts,
- struct branch_info *new)
+ struct branch_info *new_branch_info)
{
if (opts->pathspec.nr)
die(_("paths cannot be used with switching branches"));
} else if (opts->track == BRANCH_TRACK_UNSPECIFIED)
opts->track = git_branch_track;
- if (new->name && !new->commit)
+ if (new_branch_info->name && !new_branch_info->commit)
die(_("Cannot switch branch to a non-commit '%s'"),
- new->name);
+ new_branch_info->name);
- if (new->path && !opts->force_detach && !opts->new_branch &&
+ if (new_branch_info->path && !opts->force_detach && !opts->new_branch &&
!opts->ignore_other_worktrees) {
int flag;
char *head_ref = resolve_refdup("HEAD", 0, NULL, &flag);
if (head_ref &&
- (!(flag & REF_ISSYMREF) || strcmp(head_ref, new->path)))
- die_if_checked_out(new->path, 1);
+ (!(flag & REF_ISSYMREF) || strcmp(head_ref, new_branch_info->path)))
+ die_if_checked_out(new_branch_info->path, 1);
free(head_ref);
}
- if (!new->commit && opts->new_branch) {
+ if (!new_branch_info->commit && opts->new_branch) {
struct object_id rev;
int flag;
(flag & REF_ISSYMREF) && is_null_oid(&rev))
return switch_unborn_to_new_branch(opts);
}
- return switch_branches(opts, new);
+ return switch_branches(opts, new_branch_info);
}
int cmd_checkout(int argc, const char **argv, const char *prefix)
{
struct checkout_opts opts;
- struct branch_info new;
+ struct branch_info new_branch_info;
char *conflict_style = NULL;
int dwim_new_local_branch = 1;
struct option options[] = {
};
memset(&opts, 0, sizeof(opts));
- memset(&new, 0, sizeof(new));
+ memset(&new_branch_info, 0, sizeof(new_branch_info));
opts.overwrite_ignore = 1;
opts.prefix = prefix;
opts.show_progress = -1;
opts.track == BRANCH_TRACK_UNSPECIFIED &&
!opts.new_branch;
int n = parse_branchname_arg(argc, argv, dwim_ok,
- &new, &opts, &rev);
+ &new_branch_info, &opts, &rev);
argv += n;
argc -= n;
}
UNLEAK(opts);
if (opts.patch_mode || opts.pathspec.nr)
- return checkout_paths(&opts, new.name);
+ return checkout_paths(&opts, new_branch_info.name);
else
- return checkout_branch(&opts, &new);
+ return checkout_branch(&opts, &new_branch_info);
}
object_as_type(obj, type, 0);
}
- ret = typename(obj->type);
+ ret = type_name(obj->type);
if (!ret)
ret = "unknown";
printf("broken link from %7s %s\n",
printable_type(parent), describe_object(parent));
printf("broken link from %7s %s\n",
- (type == OBJ_ANY ? "unknown" : typename(type)), "unknown");
+ (type == OBJ_ANY ? "unknown" : type_name(type)), "unknown");
errors_found |= ERROR_REACHABLE;
return 1;
}
if (obj->flags & REACHABLE)
return 0;
obj->flags |= REACHABLE;
+
+ if (is_promisor_object(&obj->oid))
+ /*
+ * Further recursion does not need to be performed on this
+ * object since it is a promisor object (so it does not need to
+ * be added to "pending").
+ */
+ return 0;
+
if (!(obj->flags & HAS_OBJ)) {
if (parent && !has_object_file(&obj->oid)) {
printf("broken link from %7s %s\n",
static int traverse_one_object(struct object *obj)
{
- return fsck_walk(obj, obj, &fsck_walk_options);
+ int result = fsck_walk(obj, obj, &fsck_walk_options);
+
+ if (obj->type == OBJ_TREE) {
+ struct tree *tree = (struct tree *)obj;
+ free_tree_buffer(tree);
+ }
+ return result;
}
static int traverse_reachable(void)
* do a full fsck
*/
if (!(obj->flags & HAS_OBJ)) {
+ if (is_promisor_object(&obj->oid))
+ return;
if (has_sha1_pack(obj->oid.hash))
return; /* it is in pack - forget about it */
printf("missing %s %s\n", printable_type(obj),
xstrfmt("%s@{%"PRItime"}", refname, timestamp));
obj->flags |= USED;
mark_object_reachable(obj);
- } else {
+ } else if (!is_promisor_object(oid)) {
error("%s: invalid reflog entry %s", refname, oid_to_hex(oid));
errors_found |= ERROR_REACHABLE;
}
obj = parse_object(oid);
if (!obj) {
+ if (is_promisor_object(oid)) {
+ /*
+ * Increment default_refs anyway, because this is a
+ * valid ref.
+ */
+ default_refs++;
+ return 0;
+ }
error("%s: invalid sha1 pointer %s", refname, oid_to_hex(oid));
errors_found |= ERROR_REACHABLE;
/* We'll continue with the rest despite the error.. */
int i;
struct alternate_object_database *alt;
+ /* fsck knows how to handle missing promisor objects */
+ fetch_if_missing = 0;
+
errors_found = 0;
check_replace_refs = 0;
struct object *obj = lookup_object(oid.hash);
if (!obj || !(obj->flags & HAS_OBJ)) {
+ if (is_promisor_object(&oid))
+ continue;
error("%s: object missing", oid_to_hex(&oid));
errors_found |= ERROR_OBJECT;
continue;
static off_t consumed_bytes;
static off_t max_input_size;
static unsigned deepest_delta;
-static git_SHA_CTX input_ctx;
+static git_hash_ctx input_ctx;
static uint32_t input_crc32;
static int input_fd, output_fd;
static const char *curr_pack;
if (type != obj->type)
die(_("object %s: expected type %s, found %s"),
oid_to_hex(&obj->oid),
- typename(obj->type), typename(type));
+ type_name(obj->type), type_name(type));
obj->flags |= FLAG_CHECKED;
return 1;
}
if (input_offset) {
if (output_fd >= 0)
write_or_die(output_fd, input_buffer, input_offset);
- git_SHA1_Update(&input_ctx, input_buffer, input_offset);
+ the_hash_algo->update_fn(&input_ctx, input_buffer, input_offset);
memmove(input_buffer, input_buffer + input_offset, input_len);
input_offset = 0;
}
output_fd = -1;
nothread_data.pack_fd = input_fd;
}
- git_SHA1_Init(&input_ctx);
+ the_hash_algo->init_fn(&input_ctx);
return pack_name;
}
}
static void *unpack_entry_data(off_t offset, unsigned long size,
- enum object_type type, unsigned char *sha1)
+ enum object_type type, struct object_id *oid)
{
static char fixed_buf[8192];
int status;
git_zstream stream;
void *buf;
- git_SHA_CTX c;
+ git_hash_ctx c;
char hdr[32];
int hdrlen;
if (!is_delta_type(type)) {
- hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", typename(type), size) + 1;
+ hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", type_name(type), size) + 1;
- git_SHA1_Init(&c);
- git_SHA1_Update(&c, hdr, hdrlen);
+ the_hash_algo->init_fn(&c);
+ the_hash_algo->update_fn(&c, hdr, hdrlen);
} else
- sha1 = NULL;
+ oid = NULL;
if (type == OBJ_BLOB && size > big_file_threshold)
buf = fixed_buf;
else
stream.avail_in = input_len;
status = git_inflate(&stream, 0);
use(input_len - stream.avail_in);
- if (sha1)
- git_SHA1_Update(&c, last_out, stream.next_out - last_out);
+ if (oid)
+ the_hash_algo->update_fn(&c, last_out, stream.next_out - last_out);
if (buf == fixed_buf) {
stream.next_out = buf;
stream.avail_out = sizeof(fixed_buf);
if (stream.total_out != size || status != Z_STREAM_END)
bad_object(offset, _("inflate returned %d"), status);
git_inflate_end(&stream);
- if (sha1)
- git_SHA1_Final(sha1, &c);
+ if (oid)
+ the_hash_algo->final_fn(oid->hash, &c);
return buf == fixed_buf ? NULL : buf;
}
static void *unpack_raw_entry(struct object_entry *obj,
off_t *ofs_offset,
- unsigned char *ref_sha1,
- unsigned char *sha1)
+ struct object_id *ref_oid,
+ struct object_id *oid)
{
unsigned char *p;
unsigned long size, c;
switch (obj->type) {
case OBJ_REF_DELTA:
- hashcpy(ref_sha1, fill(20));
- use(20);
+ hashcpy(ref_oid->hash, fill(the_hash_algo->rawsz));
+ use(the_hash_algo->rawsz);
break;
case OBJ_OFS_DELTA:
p = fill(1);
}
obj->hdr_size = consumed_bytes - obj->idx.offset;
- data = unpack_entry_data(obj->idx.offset, obj->size, obj->type, sha1);
+ data = unpack_entry_data(obj->idx.offset, obj->size, obj->type, oid);
obj->idx.crc32 = input_crc32;
return data;
}
obj = parse_object_buffer(oid, type, size, buf,
&eaten);
if (!obj)
- die(_("invalid %s"), typename(type));
+ die(_("invalid %s"), type_name(type));
if (do_fsck_object &&
fsck_object(obj, buf, size, &fsck_options))
die(_("Error in object"));
free(delta_data);
if (!result->data)
bad_object(delta_obj->idx.offset, _("failed to apply delta"));
- hash_sha1_file(result->data, result->size,
- type_name(delta_obj->real_type),
- delta_obj->idx.oid.hash);
+ hash_object_file(result->data, result->size,
- typename(delta_obj->real_type), &delta_obj->idx.oid);
++ type_name(delta_obj->real_type), &delta_obj->idx.oid);
sha1_object(result->data, NULL, result->size, delta_obj->real_type,
&delta_obj->idx.oid);
counter_lock();
* - calculate SHA1 of all non-delta objects;
* - remember base (SHA1 or offset) for all deltas.
*/
-static void parse_pack_objects(unsigned char *sha1)
+static void parse_pack_objects(unsigned char *hash)
{
int i, nr_delays = 0;
struct ofs_delta_entry *ofs_delta = ofs_deltas;
- unsigned char ref_delta_sha1[20];
+ struct object_id ref_delta_oid;
struct stat st;
if (verbose)
for (i = 0; i < nr_objects; i++) {
struct object_entry *obj = &objects[i];
void *data = unpack_raw_entry(obj, &ofs_delta->offset,
- ref_delta_sha1,
- obj->idx.oid.hash);
+ &ref_delta_oid,
+ &obj->idx.oid);
obj->real_type = obj->type;
if (obj->type == OBJ_OFS_DELTA) {
nr_ofs_deltas++;
ofs_delta++;
} else if (obj->type == OBJ_REF_DELTA) {
ALLOC_GROW(ref_deltas, nr_ref_deltas + 1, ref_deltas_alloc);
- hashcpy(ref_deltas[nr_ref_deltas].sha1, ref_delta_sha1);
+ hashcpy(ref_deltas[nr_ref_deltas].sha1, ref_delta_oid.hash);
ref_deltas[nr_ref_deltas].obj_no = i;
nr_ref_deltas++;
} else if (!data) {
/* Check pack integrity */
flush();
- git_SHA1_Final(sha1, &input_ctx);
- if (hashcmp(fill(20), sha1))
+ the_hash_algo->final_fn(hash, &input_ctx);
+ if (hashcmp(fill(the_hash_algo->rawsz), hash))
die(_("pack is corrupted (SHA1 mismatch)"));
- use(20);
+ use(the_hash_algo->rawsz);
/* If input_fd is a file, we should have reached its end now. */
if (fstat(input_fd, &st))
/*
* Third pass:
* - append objects to convert thin pack to full pack if required
- * - write the final 20-byte SHA-1
+ * - write the final pack hash
*/
-static void fix_unresolved_deltas(struct sha1file *f);
-static void conclude_pack(int fix_thin_pack, const char *curr_pack, unsigned char *pack_sha1)
+static void fix_unresolved_deltas(struct hashfile *f);
+static void conclude_pack(int fix_thin_pack, const char *curr_pack, unsigned char *pack_hash)
{
if (nr_ref_deltas + nr_ofs_deltas == nr_resolved_deltas) {
stop_progress(&progress);
- /* Flush remaining pack final 20-byte SHA1. */
+ /* Flush remaining pack final hash. */
flush();
return;
}
if (fix_thin_pack) {
- struct sha1file *f;
- unsigned char read_sha1[20], tail_sha1[20];
+ struct hashfile *f;
+ unsigned char read_hash[GIT_MAX_RAWSZ], tail_hash[GIT_MAX_RAWSZ];
struct strbuf msg = STRBUF_INIT;
int nr_unresolved = nr_ofs_deltas + nr_ref_deltas - nr_resolved_deltas;
int nr_objects_initial = nr_objects;
REALLOC_ARRAY(objects, nr_objects + nr_unresolved + 1);
memset(objects + nr_objects + 1, 0,
nr_unresolved * sizeof(*objects));
- f = sha1fd(output_fd, curr_pack);
+ f = hashfd(output_fd, curr_pack);
fix_unresolved_deltas(f);
strbuf_addf(&msg, Q_("completed with %d local object",
"completed with %d local objects",
nr_objects - nr_objects_initial);
stop_progress_msg(&progress, msg.buf);
strbuf_release(&msg);
- sha1close(f, tail_sha1, 0);
- hashcpy(read_sha1, pack_sha1);
- fixup_pack_header_footer(output_fd, pack_sha1,
+ hashclose(f, tail_hash, 0);
+ hashcpy(read_hash, pack_hash);
+ fixup_pack_header_footer(output_fd, pack_hash,
curr_pack, nr_objects,
- read_sha1, consumed_bytes-20);
- if (hashcmp(read_sha1, tail_sha1) != 0)
+ read_hash, consumed_bytes-the_hash_algo->rawsz);
+ if (hashcmp(read_hash, tail_hash) != 0)
die(_("Unexpected tail checksum for %s "
"(disk corruption?)"), curr_pack);
}
nr_ofs_deltas + nr_ref_deltas - nr_resolved_deltas);
}
-static int write_compressed(struct sha1file *f, void *in, unsigned int size)
+static int write_compressed(struct hashfile *f, void *in, unsigned int size)
{
git_zstream stream;
int status;
stream.next_out = outbuf;
stream.avail_out = sizeof(outbuf);
status = git_deflate(&stream, Z_FINISH);
- sha1write(f, outbuf, sizeof(outbuf) - stream.avail_out);
+ hashwrite(f, outbuf, sizeof(outbuf) - stream.avail_out);
} while (status == Z_OK);
if (status != Z_STREAM_END)
return size;
}
-static struct object_entry *append_obj_to_pack(struct sha1file *f,
+static struct object_entry *append_obj_to_pack(struct hashfile *f,
const unsigned char *sha1, void *buf,
unsigned long size, enum object_type type)
{
}
header[n++] = c;
crc32_begin(f);
- sha1write(f, header, n);
+ hashwrite(f, header, n);
obj[0].size = size;
obj[0].hdr_size = n;
obj[0].type = type;
obj[1].idx.offset = obj[0].idx.offset + n;
obj[1].idx.offset += write_compressed(f, buf, size);
obj[0].idx.crc32 = crc32_end(f);
- sha1flush(f);
+ hashflush(f);
hashcpy(obj->idx.oid.hash, sha1);
return obj;
}
return a->obj_no - b->obj_no;
}
-static void fix_unresolved_deltas(struct sha1file *f)
+static void fix_unresolved_deltas(struct hashfile *f)
{
struct ref_delta_entry **sorted_by_pos;
int i;
continue;
if (check_sha1_signature(d->sha1, base_obj->data,
- base_obj->size, typename(type)))
+ base_obj->size, type_name(type)))
die(_("local object %s is corrupt"), sha1_to_hex(d->sha1));
base_obj->obj = append_obj_to_pack(f, d->sha1,
base_obj->data, base_obj->size, type);
free(sorted_by_pos);
}
+static const char *derive_filename(const char *pack_name, const char *suffix,
+ struct strbuf *buf)
+{
+ size_t len;
+ if (!strip_suffix(pack_name, ".pack", &len))
+ die(_("packfile name '%s' does not end with '.pack'"),
+ pack_name);
+ strbuf_add(buf, pack_name, len);
+ strbuf_addch(buf, '.');
+ strbuf_addstr(buf, suffix);
+ return buf->buf;
+}
+
+static void write_special_file(const char *suffix, const char *msg,
+ const char *pack_name, const unsigned char *hash,
+ const char **report)
+{
+ struct strbuf name_buf = STRBUF_INIT;
+ const char *filename;
+ int fd;
+ int msg_len = strlen(msg);
+
+ if (pack_name)
+ filename = derive_filename(pack_name, suffix, &name_buf);
+ else
+ filename = odb_pack_name(&name_buf, hash, suffix);
+
+ fd = odb_pack_keep(filename);
+ if (fd < 0) {
+ if (errno != EEXIST)
+ die_errno(_("cannot write %s file '%s'"),
+ suffix, filename);
+ } else {
+ if (msg_len > 0) {
+ write_or_die(fd, msg, msg_len);
+ write_or_die(fd, "\n", 1);
+ }
+ if (close(fd) != 0)
+ die_errno(_("cannot close written %s file '%s'"),
+ suffix, filename);
+ if (report)
+ *report = suffix;
+ }
+ strbuf_release(&name_buf);
+}
+
static void final(const char *final_pack_name, const char *curr_pack_name,
const char *final_index_name, const char *curr_index_name,
- const char *keep_name, const char *keep_msg,
- unsigned char *sha1)
+ const char *keep_msg, const char *promisor_msg,
+ unsigned char *hash)
{
const char *report = "pack";
struct strbuf pack_name = STRBUF_INIT;
struct strbuf index_name = STRBUF_INIT;
- struct strbuf keep_name_buf = STRBUF_INIT;
int err;
if (!from_stdin) {
die_errno(_("error while closing pack file"));
}
- if (keep_msg) {
- int keep_fd, keep_msg_len = strlen(keep_msg);
-
- if (!keep_name)
- keep_name = odb_pack_name(&keep_name_buf, sha1, "keep");
-
- keep_fd = odb_pack_keep(keep_name);
- if (keep_fd < 0) {
- if (errno != EEXIST)
- die_errno(_("cannot write keep file '%s'"),
- keep_name);
- } else {
- if (keep_msg_len > 0) {
- write_or_die(keep_fd, keep_msg, keep_msg_len);
- write_or_die(keep_fd, "\n", 1);
- }
- if (close(keep_fd) != 0)
- die_errno(_("cannot close written keep file '%s'"),
- keep_name);
- report = "keep";
- }
- }
+ if (keep_msg)
+ write_special_file("keep", keep_msg, final_pack_name, hash,
+ &report);
+ if (promisor_msg)
+ write_special_file("promisor", promisor_msg, final_pack_name,
+ hash, NULL);
if (final_pack_name != curr_pack_name) {
if (!final_pack_name)
- final_pack_name = odb_pack_name(&pack_name, sha1, "pack");
+ final_pack_name = odb_pack_name(&pack_name, hash, "pack");
if (finalize_object_file(curr_pack_name, final_pack_name))
die(_("cannot store pack file"));
} else if (from_stdin)
if (final_index_name != curr_index_name) {
if (!final_index_name)
- final_index_name = odb_pack_name(&index_name, sha1, "idx");
+ final_index_name = odb_pack_name(&index_name, hash, "idx");
if (finalize_object_file(curr_index_name, final_index_name))
die(_("cannot store index file"));
} else
chmod(final_index_name, 0444);
if (!from_stdin) {
- printf("%s\n", sha1_to_hex(sha1));
+ printf("%s\n", sha1_to_hex(hash));
} else {
struct strbuf buf = STRBUF_INIT;
- strbuf_addf(&buf, "%s\t%s\n", report, sha1_to_hex(sha1));
+ strbuf_addf(&buf, "%s\t%s\n", report, sha1_to_hex(hash));
write_or_die(1, buf.buf, buf.len);
strbuf_release(&buf);
strbuf_release(&index_name);
strbuf_release(&pack_name);
- strbuf_release(&keep_name_buf);
}
static int git_index_pack_config(const char *k, const char *v, void *cb)
continue;
printf("%s %-6s %lu %lu %"PRIuMAX,
oid_to_hex(&obj->idx.oid),
- typename(obj->real_type), obj->size,
+ type_name(obj->real_type), obj->size,
(unsigned long)(obj[1].idx.offset - obj->idx.offset),
(uintmax_t)obj->idx.offset);
if (is_delta_type(obj->type)) {
}
}
-static const char *derive_filename(const char *pack_name, const char *suffix,
- struct strbuf *buf)
-{
- size_t len;
- if (!strip_suffix(pack_name, ".pack", &len))
- die(_("packfile name '%s' does not end with '.pack'"),
- pack_name);
- strbuf_add(buf, pack_name, len);
- strbuf_addstr(buf, suffix);
- return buf->buf;
-}
-
int cmd_index_pack(int argc, const char **argv, const char *prefix)
{
int i, fix_thin_pack = 0, verify = 0, stat_only = 0;
const char *curr_index;
const char *index_name = NULL, *pack_name = NULL;
- const char *keep_name = NULL, *keep_msg = NULL;
- struct strbuf index_name_buf = STRBUF_INIT,
- keep_name_buf = STRBUF_INIT;
+ const char *keep_msg = NULL;
+ const char *promisor_msg = NULL;
+ struct strbuf index_name_buf = STRBUF_INIT;
struct pack_idx_entry **idx_objects;
struct pack_idx_option opts;
- unsigned char pack_sha1[20];
+ unsigned char pack_hash[GIT_MAX_RAWSZ];
unsigned foreign_nr = 1; /* zero is a "good" value, assume bad */
int report_end_of_input = 0;
+ /*
+ * index-pack never needs to fetch missing objects, since it only
+ * accesses the repo to do hash collision checks
+ */
+ fetch_if_missing = 0;
+
if (argc == 2 && !strcmp(argv[1], "-h"))
usage(index_pack_usage);
stat_only = 1;
} else if (skip_to_optional_arg(arg, "--keep", &keep_msg)) {
; /* nothing to do */
+ } else if (skip_to_optional_arg(arg, "--promisor", &promisor_msg)) {
+ ; /* already parsed */
} else if (starts_with(arg, "--threads=")) {
char *end;
nr_threads = strtoul(arg+10, &end, 0);
if (from_stdin && !startup_info->have_repository)
die(_("--stdin requires a git repository"));
if (!index_name && pack_name)
- index_name = derive_filename(pack_name, ".idx", &index_name_buf);
- if (keep_msg && !keep_name && pack_name)
- keep_name = derive_filename(pack_name, ".keep", &keep_name_buf);
+ index_name = derive_filename(pack_name, "idx", &index_name_buf);
if (verify) {
if (!index_name)
if (show_stat)
obj_stat = xcalloc(st_add(nr_objects, 1), sizeof(struct object_stat));
ofs_deltas = xcalloc(nr_objects, sizeof(struct ofs_delta_entry));
- parse_pack_objects(pack_sha1);
+ parse_pack_objects(pack_hash);
if (report_end_of_input)
write_in_full(2, "\0", 1);
resolve_deltas();
- conclude_pack(fix_thin_pack, curr_pack, pack_sha1);
+ conclude_pack(fix_thin_pack, curr_pack, pack_hash);
free(ofs_deltas);
free(ref_deltas);
if (strict)
ALLOC_ARRAY(idx_objects, nr_objects);
for (i = 0; i < nr_objects; i++)
idx_objects[i] = &objects[i].idx;
- curr_index = write_idx_file(index_name, idx_objects, nr_objects, &opts, pack_sha1);
+ curr_index = write_idx_file(index_name, idx_objects, nr_objects, &opts, pack_hash);
free(idx_objects);
if (!verify)
final(pack_name, curr_pack,
index_name, curr_index,
- keep_name, keep_msg,
- pack_sha1);
+ keep_msg, promisor_msg,
+ pack_hash);
else
close(input_fd);
free(objects);
strbuf_release(&index_name_buf);
- strbuf_release(&keep_name_buf);
if (pack_name == NULL)
free((void *) curr_pack);
if (index_name == NULL)
#include "sequencer.h"
#include "string-list.h"
#include "packfile.h"
+#include "tag.h"
#define DEFAULT_TWOHEAD (1<<0)
#define DEFAULT_OCTOPUS (1<<1)
if (desc && desc->obj && desc->obj->type == OBJ_TAG) {
strbuf_addf(msg, "%s\t\t%s '%s'\n",
oid_to_hex(&desc->obj->oid),
- typename(desc->obj->type),
+ type_name(desc->obj->type),
remote);
goto cleanup;
}
pptr = commit_list_append(head, pptr);
pptr = commit_list_append(remoteheads->item, pptr);
prepare_to_commit(remoteheads);
- if (commit_tree(merge_msg.buf, merge_msg.len, result_tree.hash, parents,
- result_commit.hash, NULL, sign_commit))
+ if (commit_tree(merge_msg.buf, merge_msg.len, &result_tree, parents,
+ &result_commit, NULL, sign_commit))
die(_("failed to write commit object"));
finish(head, remoteheads, &result_commit, "In-index merge");
drop_save();
commit_list_insert(head, &parents);
strbuf_addch(&merge_msg, '\n');
prepare_to_commit(remoteheads);
- if (commit_tree(merge_msg.buf, merge_msg.len, result_tree->hash, parents,
- result_commit.hash, NULL, sign_commit))
+ if (commit_tree(merge_msg.buf, merge_msg.len, result_tree, parents,
+ &result_commit, NULL, sign_commit))
die(_("failed to write commit object"));
strbuf_addf(&buf, "Merge made by the '%s' strategy.", wt_strategy);
finish(head, remoteheads, &result_commit, buf.buf);
return remoteheads;
}
+static int merging_a_throwaway_tag(struct commit *commit)
+{
+ char *tag_ref;
+ struct object_id oid;
+ int is_throwaway_tag = 0;
+
+ /* Are we merging a tag? */
+ if (!merge_remote_util(commit) ||
+ !merge_remote_util(commit)->obj ||
+ merge_remote_util(commit)->obj->type != OBJ_TAG)
+ return is_throwaway_tag;
+
+ /*
+ * Now we know we are merging a tag object. Are we downstream
+ * and following the tags from upstream? If so, we must have
+ * the tag object pointed at by "refs/tags/$T" where $T is the
+ * tagname recorded in the tag object. We want to allow such
+ * a "just to catch up" merge to fast-forward.
+ *
+ * Otherwise, we are playing an integrator's role, making a
+ * merge with a throw-away tag from a contributor with
+ * something like "git pull $contributor $signed_tag".
+ * We want to forbid such a merge from fast-forwarding
+ * by default; otherwise we would not keep the signature
+ * anywhere.
+ */
+ tag_ref = xstrfmt("refs/tags/%s",
+ ((struct tag *)merge_remote_util(commit)->obj)->tag);
+ if (!read_ref(tag_ref, &oid) &&
+ !oidcmp(&oid, &merge_remote_util(commit)->obj->oid))
+ is_throwaway_tag = 0;
+ else
+ is_throwaway_tag = 1;
+ free(tag_ref);
+ return is_throwaway_tag;
+}
+
int cmd_merge(int argc, const char **argv, const char *prefix)
{
struct object_id result_tree, stash, head_oid;
oid_to_hex(&commit->object.oid));
setenv(buf.buf, merge_remote_util(commit)->name, 1);
strbuf_reset(&buf);
- if (fast_forward != FF_ONLY &&
- merge_remote_util(commit) &&
- merge_remote_util(commit)->obj &&
- merge_remote_util(commit)->obj->type == OBJ_TAG)
+ if (fast_forward != FF_ONLY && merging_a_throwaway_tag(commit))
fast_forward = FF_NO;
}
b->name, b->len, b->mode);
}
-static void write_tree(unsigned char *sha1)
+static void write_tree(struct object_id *oid)
{
struct strbuf buf;
size_t size;
strbuf_add(&buf, ent->sha1, 20);
}
- write_sha1_file(buf.buf, buf.len, tree_type, sha1);
+ write_object_file(buf.buf, buf.len, tree_type, oid);
strbuf_release(&buf);
}
mode_type = object_type(mode);
if (mode_type != type_from_string(ptr)) {
die("entry '%s' object type (%s) doesn't match mode type (%s)",
- path, ptr, typename(mode_type));
+ path, ptr, type_name(mode_type));
}
/* Check the type of object identified by sha1 */
* because the new tree entry will never be correct.
*/
die("entry '%s' object %s is a %s but specified type was (%s)",
- path, sha1_to_hex(sha1), typename(obj_type), typename(mode_type));
+ path, sha1_to_hex(sha1), type_name(obj_type), type_name(mode_type));
}
}
int cmd_mktree(int ac, const char **av, const char *prefix)
{
struct strbuf sb = STRBUF_INIT;
- unsigned char sha1[20];
+ struct object_id oid;
int nul_term_line = 0;
int allow_missing = 0;
int is_batch_mode = 0;
*/
; /* skip creating an empty tree */
} else {
- write_tree(sha1);
- puts(sha1_to_hex(sha1));
+ write_tree(&oid);
+ puts(oid_to_hex(&oid));
fflush(stdout);
}
used=0; /* reset tree entry buffer for re-use in batch mode */
#include "reachable.h"
#include "sha1-array.h"
#include "argv-array.h"
-#include "mru.h"
+#include "list.h"
#include "packfile.h"
static const char *pack_usage[] = {
static int write_bitmap_index;
static uint16_t write_bitmap_options;
+static int exclude_promisor_objects;
+
static unsigned long delta_cache_size = 0;
static unsigned long max_delta_cache_size = 256 * 1024 * 1024;
static unsigned long cache_max_small_delta_size = 1000;
static struct list_objects_filter_options filter_options;
enum missing_action {
- MA_ERROR = 0, /* fail if any missing objects are encountered */
- MA_ALLOW_ANY, /* silently allow ALL missing objects */
+ MA_ERROR = 0, /* fail if any missing objects are encountered */
+ MA_ALLOW_ANY, /* silently allow ALL missing objects */
+ MA_ALLOW_PROMISOR, /* silently allow all missing PROMISOR objects */
};
static enum missing_action arg_missing_action;
static show_object_fn fn_show_object;
return stream.total_out;
}
-static unsigned long write_large_blob_data(struct git_istream *st, struct sha1file *f,
+static unsigned long write_large_blob_data(struct git_istream *st, struct hashfile *f,
const struct object_id *oid)
{
git_zstream stream;
stream.next_out = obuf;
stream.avail_out = sizeof(obuf);
zret = git_deflate(&stream, readlen ? 0 : Z_FINISH);
- sha1write(f, obuf, stream.next_out - obuf);
+ hashwrite(f, obuf, stream.next_out - obuf);
olen += stream.next_out - obuf;
}
if (stream.avail_in)
stream.total_in == len) ? 0 : -1;
}
-static void copy_pack_data(struct sha1file *f,
+static void copy_pack_data(struct hashfile *f,
struct packed_git *p,
struct pack_window **w_curs,
off_t offset,
in = use_pack(p, w_curs, offset, &avail);
if (avail > len)
avail = (unsigned long)len;
- sha1write(f, in, avail);
+ hashwrite(f, in, avail);
offset += avail;
len -= avail;
}
}
/* Return 0 if we will bust the pack-size limit */
-static unsigned long write_no_reuse_object(struct sha1file *f, struct object_entry *entry,
+static unsigned long write_no_reuse_object(struct hashfile *f, struct object_entry *entry,
unsigned long limit, int usable_delta)
{
unsigned long size, datalen;
free(buf);
return 0;
}
- sha1write(f, header, hdrlen);
- sha1write(f, dheader + pos, sizeof(dheader) - pos);
+ hashwrite(f, header, hdrlen);
+ hashwrite(f, dheader + pos, sizeof(dheader) - pos);
hdrlen += sizeof(dheader) - pos;
} else if (type == OBJ_REF_DELTA) {
/*
free(buf);
return 0;
}
- sha1write(f, header, hdrlen);
- sha1write(f, entry->delta->idx.oid.hash, 20);
+ hashwrite(f, header, hdrlen);
+ hashwrite(f, entry->delta->idx.oid.hash, 20);
hdrlen += 20;
} else {
if (limit && hdrlen + datalen + 20 >= limit) {
free(buf);
return 0;
}
- sha1write(f, header, hdrlen);
+ hashwrite(f, header, hdrlen);
}
if (st) {
datalen = write_large_blob_data(st, f, &entry->idx.oid);
close_istream(st);
} else {
- sha1write(f, buf, datalen);
+ hashwrite(f, buf, datalen);
free(buf);
}
}
/* Return 0 if we will bust the pack-size limit */
-static off_t write_reuse_object(struct sha1file *f, struct object_entry *entry,
+static off_t write_reuse_object(struct hashfile *f, struct object_entry *entry,
unsigned long limit, int usable_delta)
{
struct packed_git *p = entry->in_pack;
unuse_pack(&w_curs);
return 0;
}
- sha1write(f, header, hdrlen);
- sha1write(f, dheader + pos, sizeof(dheader) - pos);
+ hashwrite(f, header, hdrlen);
+ hashwrite(f, dheader + pos, sizeof(dheader) - pos);
hdrlen += sizeof(dheader) - pos;
reused_delta++;
} else if (type == OBJ_REF_DELTA) {
unuse_pack(&w_curs);
return 0;
}
- sha1write(f, header, hdrlen);
- sha1write(f, entry->delta->idx.oid.hash, 20);
+ hashwrite(f, header, hdrlen);
+ hashwrite(f, entry->delta->idx.oid.hash, 20);
hdrlen += 20;
reused_delta++;
} else {
unuse_pack(&w_curs);
return 0;
}
- sha1write(f, header, hdrlen);
+ hashwrite(f, header, hdrlen);
}
copy_pack_data(f, p, &w_curs, offset, datalen);
unuse_pack(&w_curs);
}
/* Return 0 if we will bust the pack-size limit */
-static off_t write_object(struct sha1file *f,
+static off_t write_object(struct hashfile *f,
struct object_entry *entry,
off_t write_offset)
{
WRITE_ONE_RECURSIVE = 2 /* already scheduled to be written */
};
-static enum write_one_status write_one(struct sha1file *f,
+static enum write_one_status write_one(struct hashfile *f,
struct object_entry *e,
off_t *offset)
{
return wo;
}
-static off_t write_reused_pack(struct sha1file *f)
+static off_t write_reused_pack(struct hashfile *f)
{
unsigned char buffer[8192];
off_t to_write, total;
if (read_pack > to_write)
read_pack = to_write;
- sha1write(f, buffer, read_pack);
+ hashwrite(f, buffer, read_pack);
to_write -= read_pack;
/*
static void write_pack_file(void)
{
uint32_t i = 0, j;
- struct sha1file *f;
+ struct hashfile *f;
off_t offset;
uint32_t nr_remaining = nr_result;
time_t last_mtime = 0;
char *pack_tmp_name = NULL;
if (pack_to_stdout)
- f = sha1fd_throughput(1, "<stdout>", progress_state);
+ f = hashfd_throughput(1, "<stdout>", progress_state);
else
f = create_tmp_packfile(&pack_tmp_name);
* If so, rewrite it like in fast-import
*/
if (pack_to_stdout) {
- sha1close(f, oid.hash, CSUM_CLOSE);
+ hashclose(f, oid.hash, CSUM_CLOSE);
} else if (nr_written == nr_remaining) {
- sha1close(f, oid.hash, CSUM_FSYNC);
+ hashclose(f, oid.hash, CSUM_FSYNC);
} else {
- int fd = sha1close(f, oid.hash, 0);
+ int fd = hashclose(f, oid.hash, 0);
fixup_pack_header_footer(fd, oid.hash, pack_tmp_name,
nr_written, oid.hash, offset);
close(fd);
struct packed_git **found_pack,
off_t *found_offset)
{
- struct mru_entry *entry;
int want;
+ struct list_head *pos;
if (!exclude && local && has_loose_object_nonlocal(oid->hash))
return 0;
return want;
}
- for (entry = packed_git_mru.head; entry; entry = entry->next) {
- struct packed_git *p = entry->item;
+ list_for_each(pos, &packed_git_mru) {
+ struct packed_git *p = list_entry(pos, struct packed_git, mru);
off_t offset;
if (p == *found_pack)
}
want = want_found_object(exclude, p);
if (!exclude && want > 0)
- mru_mark(&packed_git_mru, entry);
+ list_move(&p->mru, &packed_git_mru);
if (want != -1)
return want;
}
it = pbase_tree;
pbase_tree = NULL;
while (it) {
- struct pbase_tree *this = it;
- it = this->next;
- free(this->pcache.tree_data);
- free(this);
+ struct pbase_tree *tmp = it;
+ it = tmp->next;
+ free(tmp->pcache.tree_data);
+ free(tmp);
}
for (i = 0; i < ARRAY_SIZE(pbase_tree_cache); i++) {
show_object(obj, name, data);
}
+static void show_object__ma_allow_promisor(struct object *obj, const char *name, void *data)
+{
+ assert(arg_missing_action == MA_ALLOW_PROMISOR);
+
+ /*
+ * Quietly ignore EXPECTED missing objects. This avoids problems with
+ * staging them now and getting an odd error later.
+ */
+ if (!has_object_file(&obj->oid) && is_promisor_object(&obj->oid))
+ return;
+
+ show_object(obj, name, data);
+}
+
static int option_parse_missing_action(const struct option *opt,
const char *arg, int unset)
{
if (!strcmp(arg, "allow-any")) {
arg_missing_action = MA_ALLOW_ANY;
+ fetch_if_missing = 0;
fn_show_object = show_object__ma_allow_any;
return 0;
}
+ if (!strcmp(arg, "allow-promisor")) {
+ arg_missing_action = MA_ALLOW_PROMISOR;
+ fetch_if_missing = 0;
+ fn_show_object = show_object__ma_allow_promisor;
+ return 0;
+ }
+
die(_("invalid value for --missing"));
return 0;
}
if (!packlist_find(&to_pack, oid.hash, NULL) &&
!has_sha1_pack_kept_or_nonlocal(&oid) &&
!loosened_object_can_be_discarded(&oid, p->mtime))
- if (force_object_loose(oid.hash, p->mtime))
+ if (force_object_loose(&oid, p->mtime))
die("unable to force loose object");
}
}
{ OPTION_CALLBACK, 0, "missing", NULL, N_("action"),
N_("handling for missing objects"), PARSE_OPT_NONEG,
option_parse_missing_action },
+ OPT_BOOL(0, "exclude-promisor-objects", &exclude_promisor_objects,
+ N_("do not pack objects in promisor packfiles")),
OPT_END(),
};
argv_array_push(&rp, "--unpacked");
}
+ if (exclude_promisor_objects) {
+ use_internal_rev_list = 1;
+ fetch_if_missing = 0;
+ argv_array_push(&rp, "--exclude-promisor-objects");
+ }
+
if (!reuse_object)
reuse_delta = 0;
if (pack_compression_level == -1)
if (show_only || verbose) {
enum object_type type = sha1_object_info(oid->hash, NULL);
printf("%s %s\n", oid_to_hex(oid),
- (type > 0) ? typename(type) : "unknown");
+ (type > 0) ? type_name(type) : "unknown");
}
if (!show_only)
unlink_or_warn(fullpath);
{
struct rev_info revs;
struct progress *progress = NULL;
+ int exclude_promisor_objects = 0;
const struct option options[] = {
OPT__DRY_RUN(&show_only, N_("do not remove, show only")),
OPT__VERBOSE(&verbose, N_("report pruned objects")),
OPT_BOOL(0, "progress", &show_progress, N_("show progress")),
OPT_EXPIRY_DATE(0, "expire", &expire,
N_("expire objects older than <time>")),
+ OPT_BOOL(0, "exclude-promisor-objects", &exclude_promisor_objects,
+ N_("limit traversal to objects outside promisor packfiles")),
OPT_END()
};
char *s;
show_progress = isatty(2);
if (show_progress)
progress = start_delayed_progress(_("Checking connectivity"), 0);
+ if (exclude_promisor_objects) {
+ fetch_if_missing = 0;
+ revs.exclude_promisor_objects = 1;
+ }
mark_reachable_objects(&revs, 1, expire, progress);
stop_progress(&progress);
obj_type = sha1_object_info(object.hash, NULL);
repl_type = sha1_object_info(oid->hash, NULL);
- printf("%s (%s) -> %s (%s)\n", refname, typename(obj_type),
- oid_to_hex(oid), typename(repl_type));
+ printf("%s (%s) -> %s (%s)\n", refname, type_name(obj_type),
+ oid_to_hex(oid), type_name(repl_type));
}
}
die("Objects must be of the same type.\n"
"'%s' points to a replaced object of type '%s'\n"
"while '%s' points to a replacement object of type '%s'.",
- object_ref, typename(obj_type),
- replace_ref, typename(repl_type));
+ object_ref, type_name(obj_type),
+ replace_ref, type_name(repl_type));
check_ref_valid(object, &prev, &ref, force);
argv_array_push(&cmd.args, "--no-replace-objects");
argv_array_push(&cmd.args, "cat-file");
if (raw)
- argv_array_push(&cmd.args, typename(type));
+ argv_array_push(&cmd.args, type_name(type));
else
argv_array_push(&cmd.args, "-p");
argv_array_push(&cmd.args, oid_to_hex(oid));
{
char *tmpfile = git_pathdup("REPLACE_EDITOBJ");
enum object_type type;
- struct object_id old, new, prev;
+ struct object_id old_oid, new_oid, prev;
struct strbuf ref = STRBUF_INIT;
- if (get_oid(object_ref, &old) < 0)
+ if (get_oid(object_ref, &old_oid) < 0)
die("Not a valid object name: '%s'", object_ref);
- type = sha1_object_info(old.hash, NULL);
+ type = sha1_object_info(old_oid.hash, NULL);
if (type < 0)
- die("unable to get object type for %s", oid_to_hex(&old));
+ die("unable to get object type for %s", oid_to_hex(&old_oid));
- check_ref_valid(&old, &prev, &ref, force);
+ check_ref_valid(&old_oid, &prev, &ref, force);
strbuf_release(&ref);
- export_object(&old, type, raw, tmpfile);
+ export_object(&old_oid, type, raw, tmpfile);
if (launch_editor(tmpfile, NULL, NULL) < 0)
die("editing object file failed");
- import_object(&new, type, raw, tmpfile);
+ import_object(&new_oid, type, raw, tmpfile);
free(tmpfile);
- if (!oidcmp(&old, &new))
- return error("new object is the same as the old one: '%s'", oid_to_hex(&old));
+ if (!oidcmp(&old_oid, &new_oid))
+ return error("new object is the same as the old one: '%s'", oid_to_hex(&old_oid));
- return replace_object_oid(object_ref, &old, "replacement", &new, force);
+ return replace_object_oid(object_ref, &old_oid, "replacement", &new_oid, force);
}
static void replace_parents(struct strbuf *buf, int argc, const char **argv)
struct tag *tag;
int i;
- hash_object_file(extra->value, extra->len, typename(OBJ_TAG), &tag_oid);
- hash_sha1_file(extra->value, extra->len, type_name(OBJ_TAG), tag_oid.hash);
++ hash_object_file(extra->value, extra->len, type_name(OBJ_TAG), &tag_oid);
tag = lookup_tag(&tag_oid);
if (!tag)
die(_("bad mergetag in commit '%s'"), ref);
static int create_graft(int argc, const char **argv, int force)
{
- struct object_id old, new;
+ struct object_id old_oid, new_oid;
const char *old_ref = argv[0];
struct commit *commit;
struct strbuf buf = STRBUF_INIT;
const char *buffer;
unsigned long size;
- if (get_oid(old_ref, &old) < 0)
+ if (get_oid(old_ref, &old_oid) < 0)
die(_("Not a valid object name: '%s'"), old_ref);
- commit = lookup_commit_or_die(&old, old_ref);
+ commit = lookup_commit_or_die(&old_oid, old_ref);
buffer = get_commit_buffer(commit, &size);
strbuf_add(&buf, buffer, size);
check_mergetags(commit, argc, argv);
- if (write_object_file(buf.buf, buf.len, commit_type, &new))
- if (write_sha1_file(buf.buf, buf.len, commit_type, new_oid.hash))
++ if (write_object_file(buf.buf, buf.len, commit_type, &new_oid))
die(_("could not write replacement commit for: '%s'"), old_ref);
strbuf_release(&buf);
- if (!oidcmp(&old, &new))
- return error("new commit is the same as the old one: '%s'", oid_to_hex(&old));
+ if (!oidcmp(&old_oid, &new_oid))
+ return error("new commit is the same as the old one: '%s'", oid_to_hex(&old_oid));
- return replace_object_oid(old_ref, &old, "replacement", &new, force);
+ return replace_object_oid(old_ref, &old_oid, "replacement", &new_oid, force);
}
int cmd_replace(int argc, const char **argv, const char *prefix)
static int try_difference(const char *arg)
{
char *dotdot;
- struct object_id oid;
- struct object_id end;
- const char *next;
- const char *this;
+ struct object_id start_oid;
+ struct object_id end_oid;
+ const char *end;
+ const char *start;
int symmetric;
static const char head_by_default[] = "HEAD";
if (!(dotdot = strstr(arg, "..")))
return 0;
- next = dotdot + 2;
- this = arg;
- symmetric = (*next == '.');
+ end = dotdot + 2;
+ start = arg;
+ symmetric = (*end == '.');
*dotdot = 0;
- next += symmetric;
+ end += symmetric;
- if (!*next)
- next = head_by_default;
+ if (!*end)
+ end = head_by_default;
if (dotdot == arg)
- this = head_by_default;
+ start = head_by_default;
- if (this == head_by_default && next == head_by_default &&
+ if (start == head_by_default && end == head_by_default &&
!symmetric) {
/*
* Just ".."? That is not a range but the
return 0;
}
- if (!get_oid_committish(this, &oid) && !get_oid_committish(next, &end)) {
- show_rev(NORMAL, &end, next);
- show_rev(symmetric ? NORMAL : REVERSED, &oid, this);
+ if (!get_oid_committish(start, &start_oid) && !get_oid_committish(end, &end_oid)) {
+ show_rev(NORMAL, &end_oid, end);
+ show_rev(symmetric ? NORMAL : REVERSED, &start_oid, start);
if (symmetric) {
struct commit_list *exclude;
struct commit *a, *b;
- a = lookup_commit_reference(&oid);
- b = lookup_commit_reference(&end);
+ a = lookup_commit_reference(&start_oid);
+ b = lookup_commit_reference(&end_oid);
exclude = get_merge_bases(a, b);
while (exclude) {
struct commit *commit = pop_commit(&exclude);
PARSE_OPT_SHELL_EVAL);
strbuf_addstr(&parsed, " --");
- sq_quote_argv(&parsed, argv, 0);
+ sq_quote_argv(&parsed, argv);
puts(parsed.buf);
return 0;
}
struct strbuf buf = STRBUF_INIT;
if (argc)
- sq_quote_argv(&buf, argv, 0);
+ sq_quote_argv(&buf, argv);
printf("%s\n", buf.buf);
strbuf_release(&buf);
{
if (sign && do_sign(buf) < 0)
return error(_("unable to sign the tag"));
- if (write_sha1_file(buf->buf, buf->len, tag_type, result->hash) < 0)
+ if (write_object_file(buf->buf, buf->len, tag_type, result) < 0)
return error(_("unable to write tag file"));
return 0;
}
struct create_tag_options {
unsigned int message_given:1;
+ unsigned int use_editor:1;
unsigned int sign;
enum {
CLEANUP_NONE,
"tag %s\n"
"tagger %s\n\n",
oid_to_hex(object),
- typename(type),
+ type_name(type),
tag,
git_committer_info(IDENT_STRICT));
- if (!opt->message_given) {
+ if (!opt->message_given || opt->use_editor) {
int fd;
/* write the template message before editing: */
if (fd < 0)
die_errno(_("could not create file '%s'"), path);
- if (!is_null_oid(prev)) {
+ if (opt->message_given) {
+ write_or_die(fd, buf->buf, buf->len);
+ strbuf_reset(buf);
+ } else if (!is_null_oid(prev)) {
write_tag_body(fd, prev);
} else {
struct strbuf buf = STRBUF_INIT;
static struct ref_sorting *sorting = NULL, **sorting_tail = &sorting;
struct ref_format format = REF_FORMAT_INIT;
int icase = 0;
+ int edit_flag = 0;
struct option options[] = {
OPT_CMDMODE('l', "list", &cmdmode, N_("list tag names"), 'l'),
{ OPTION_INTEGER, 'n', NULL, &filter.lines, N_("n"),
OPT_CALLBACK('m', "message", &msg, N_("message"),
N_("tag message"), parse_msg_arg),
OPT_FILENAME('F', "file", &msgfile, N_("read message from file")),
+ OPT_BOOL('e', "edit", &edit_flag, N_("force edit of tag message")),
OPT_BOOL('s', "sign", &opt.sign, N_("annotated and GPG-signed tag")),
OPT_STRING(0, "cleanup", &cleanup_arg, N_("mode"),
N_("how to strip spaces and #comments from message")),
die(_("tag '%s' already exists"), tag);
opt.message_given = msg.given || msgfile;
+ opt.use_editor = edit_flag;
if (!cleanup_arg || !strcmp(cleanup_arg, "strip"))
opt.cleanup_mode = CLEANUP_ALL;
static unsigned int offset, len;
static off_t consumed_bytes;
static off_t max_input_size;
-static git_SHA_CTX ctx;
+static git_hash_ctx ctx;
static struct fsck_options fsck_options = FSCK_OPTIONS_STRICT;
/*
if (min > sizeof(buffer))
die("cannot fill %d bytes", min);
if (offset) {
- git_SHA1_Update(&ctx, buffer, offset);
+ the_hash_algo->update_fn(&ctx, buffer, offset);
memmove(buffer, buffer + offset, len);
offset = 0;
}
{
struct object_id oid;
- if (write_sha1_file(obj_buf->buffer, obj_buf->size, type_name(obj->type), oid.hash) < 0)
+ if (write_object_file(obj_buf->buffer, obj_buf->size,
- typename(obj->type), &oid) < 0)
++ type_name(obj->type), &oid) < 0)
die("failed to write object %s", oid_to_hex(&obj->oid));
obj->flags |= FLAG_WRITTEN;
}
void *buf, unsigned long size)
{
if (!strict) {
- if (write_object_file(buf, size, typename(type),
- if (write_sha1_file(buf, size, type_name(type), obj_list[nr].oid.hash) < 0)
++ if (write_object_file(buf, size, type_name(type),
+ &obj_list[nr].oid) < 0)
die("failed to write object");
added_object(nr, type, buf, size);
free(buf);
obj_list[nr].obj = NULL;
} else if (type == OBJ_BLOB) {
struct blob *blob;
- if (write_object_file(buf, size, typename(type),
- if (write_sha1_file(buf, size, type_name(type), obj_list[nr].oid.hash) < 0)
++ if (write_object_file(buf, size, type_name(type),
+ &obj_list[nr].oid) < 0)
die("failed to write object");
added_object(nr, type, buf, size);
free(buf);
} else {
struct object *obj;
int eaten;
- hash_object_file(buf, size, typename(type), &obj_list[nr].oid);
- hash_sha1_file(buf, size, type_name(type), obj_list[nr].oid.hash);
++ hash_object_file(buf, size, type_name(type), &obj_list[nr].oid);
added_object(nr, type, buf, size);
obj = parse_object_buffer(&obj_list[nr].oid, type, size, buf,
&eaten);
if (!obj)
- die("invalid %s", typename(type));
+ die("invalid %s", type_name(type));
add_object_buffer(obj, buf, size);
obj->flags |= FLAG_OPEN;
obj_list[nr].obj = obj;
struct object_id base_oid;
if (type == OBJ_REF_DELTA) {
- hashcpy(base_oid.hash, fill(GIT_SHA1_RAWSZ));
- use(GIT_SHA1_RAWSZ);
+ hashcpy(base_oid.hash, fill(the_hash_algo->rawsz));
+ use(the_hash_algo->rawsz);
delta_data = get_data(delta_size);
if (dry_run || !delta_data) {
free(delta_data);
/* We don't take any non-flag arguments now.. Maybe some day */
usage(unpack_usage);
}
- git_SHA1_Init(&ctx);
+ the_hash_algo->init_fn(&ctx);
unpack_all();
- git_SHA1_Update(&ctx, buffer, offset);
- git_SHA1_Final(oid.hash, &ctx);
+ the_hash_algo->update_fn(&ctx, buffer, offset);
+ the_hash_algo->final_fn(oid.hash, &ctx);
if (strict)
write_rest();
- if (hashcmp(fill(GIT_SHA1_RAWSZ), oid.hash))
+ if (hashcmp(fill(the_hash_algo->rawsz), oid.hash))
die("final sha1 did not match");
- use(GIT_SHA1_RAWSZ);
+ use(the_hash_algo->rawsz);
/* Write the last part of the buffer to stdout */
while (len) {
unsigned plugged:1;
char *pack_tmp_name;
- struct sha1file *f;
+ struct hashfile *f;
off_t offset;
struct pack_idx_option pack_idx_opts;
unlink(state->pack_tmp_name);
goto clear_exit;
} else if (state->nr_written == 1) {
- sha1close(state->f, oid.hash, CSUM_FSYNC);
+ hashclose(state->f, oid.hash, CSUM_FSYNC);
} else {
- int fd = sha1close(state->f, oid.hash, 0);
+ int fd = hashclose(state->f, oid.hash, 0);
fixup_pack_header_footer(fd, oid.hash, state->pack_tmp_name,
state->nr_written, oid.hash,
state->offset);
* with a new pack.
*/
static int stream_to_pack(struct bulk_checkin_state *state,
- git_SHA_CTX *ctx, off_t *already_hashed_to,
+ git_hash_ctx *ctx, off_t *already_hashed_to,
int fd, size_t size, enum object_type type,
const char *path, unsigned flags)
{
if (rsize < hsize)
hsize = rsize;
if (hsize)
- git_SHA1_Update(ctx, ibuf, hsize);
+ the_hash_algo->update_fn(ctx, ibuf, hsize);
*already_hashed_to = offset;
}
s.next_in = ibuf;
return -1;
}
- sha1write(state->f, obuf, written);
+ hashwrite(state->f, obuf, written);
state->offset += written;
}
s.next_out = obuf;
unsigned flags)
{
off_t seekback, already_hashed_to;
- git_SHA_CTX ctx;
+ git_hash_ctx ctx;
unsigned char obuf[16384];
unsigned header_len;
- struct sha1file_checkpoint checkpoint;
+ struct hashfile_checkpoint checkpoint;
struct pack_idx_entry *idx = NULL;
seekback = lseek(fd, 0, SEEK_CUR);
return error("cannot find the current offset");
header_len = xsnprintf((char *)obuf, sizeof(obuf), "%s %" PRIuMAX,
- typename(type), (uintmax_t)size) + 1;
+ type_name(type), (uintmax_t)size) + 1;
- git_SHA1_Init(&ctx);
- git_SHA1_Update(&ctx, obuf, header_len);
+ the_hash_algo->init_fn(&ctx);
+ the_hash_algo->update_fn(&ctx, obuf, header_len);
/* Note: idx is non-NULL when we are writing */
if ((flags & HASH_WRITE_OBJECT) != 0)
while (1) {
prepare_to_stream(state, flags);
if (idx) {
- sha1file_checkpoint(state->f, &checkpoint);
+ hashfile_checkpoint(state->f, &checkpoint);
idx->offset = state->offset;
crc32_begin(state->f);
}
*/
if (!idx)
die("BUG: should not happen");
- sha1file_truncate(state->f, &checkpoint);
+ hashfile_truncate(state->f, &checkpoint);
state->offset = checkpoint.offset;
finish_bulk_checkin(state);
if (lseek(fd, seekback, SEEK_SET) == (off_t) -1)
return error("cannot seek back");
}
- git_SHA1_Final(result_sha1, &ctx);
+ the_hash_algo->final_fn(result_sha1, &ctx);
if (!idx)
return 0;
idx->crc32 = crc32_end(state->f);
if (already_written(state, result_sha1)) {
- sha1file_truncate(state->f, &checkpoint);
+ hashfile_truncate(state->f, &checkpoint);
state->offset = checkpoint.offset;
free(idx);
} else {
#include "git-compat-util.h"
#include "strbuf.h"
#include "hashmap.h"
-#include "mru.h"
+#include "list.h"
#include "advice.h"
#include "gettext.h"
#include "convert.h"
#include "sha1-array.h"
#include "repository.h"
-#ifndef platform_SHA_CTX
-/*
- * platform's underlying implementation of SHA-1; could be OpenSSL,
- * blk_SHA, Apple CommonCrypto, etc... Note that including
- * SHA1_HEADER may have already defined platform_SHA_CTX for our
- * own implementations like block-sha1 and ppc-sha1, so we list
- * the default for OpenSSL compatible SHA-1 implementations here.
- */
-#define platform_SHA_CTX SHA_CTX
-#define platform_SHA1_Init SHA1_Init
-#define platform_SHA1_Update SHA1_Update
-#define platform_SHA1_Final SHA1_Final
-#endif
-
-#define git_SHA_CTX platform_SHA_CTX
-#define git_SHA1_Init platform_SHA1_Init
-#define git_SHA1_Update platform_SHA1_Update
-#define git_SHA1_Final platform_SHA1_Final
-
-#ifdef SHA1_MAX_BLOCK_SIZE
-#include "compat/sha1-chunked.h"
-#undef git_SHA1_Update
-#define git_SHA1_Update git_SHA1_Update_Chunked
-#endif
-
#include <zlib.h>
typedef struct git_zstream {
z_stream z;
struct split_index *split_index;
struct cache_time timestamp;
unsigned name_hash_initialized : 1,
- initialized : 1;
+ initialized : 1,
+ drop_cache_tree : 1;
struct hashmap name_hash;
struct hashmap dir_hash;
unsigned char sha1[20];
#define active_cache_tree (the_index.cache_tree)
#define read_cache() read_index(&the_index)
-#define read_cache_from(path) read_index_from(&the_index, (path))
+#define read_cache_from(path) read_index_from(&the_index, (path), (get_git_dir()))
#define read_cache_preload(pathspec) read_index_preload(&the_index, (pathspec))
#define is_cache_unborn() is_index_unborn(&the_index)
#define read_cache_unmerged() read_index_unmerged(&the_index)
extern int read_index_preload(struct index_state *, const struct pathspec *pathspec);
extern int do_read_index(struct index_state *istate, const char *path,
int must_exist); /* for testting only! */
-extern int read_index_from(struct index_state *, const char *path);
+extern int read_index_from(struct index_state *, const char *path,
+ const char *gitdir);
extern int is_index_unborn(struct index_state *);
extern int read_index_unmerged(struct index_state *);
#define GIT_REPO_VERSION 0
#define GIT_REPO_VERSION_READ 1
extern int repository_format_precious_objects;
+extern char *repository_format_partial_clone;
+extern const char *core_partial_clone_filter_default;
struct repository_format {
int version;
int precious_objects;
+ char *partial_clone; /* value of extensions.partialclone */
int is_bare;
int hash_algo;
char *work_tree;
#define TYPE_CHANGED 0x0040
/*
- * Return the name of the file in the local object database that would
- * be used to store a loose object with the specified sha1. The
- * return value is a pointer to a statically allocated buffer that is
- * overwritten each time the function is called.
+ * Put in `buf` the name of the file in the local object database that
+ * would be used to store a loose object with the specified sha1.
*/
-extern const char *sha1_file_name(const unsigned char *sha1);
+extern void sha1_file_name(struct strbuf *buf, const unsigned char *sha1);
/*
* Return an abbreviated sha1 unique within this repository's object database.
static inline void oidclr(struct object_id *oid)
{
- hashclr(oid->hash);
+ memset(oid->hash, 0, GIT_MAX_RAWSZ);
}
"\xe6\x9d\xe2\x9b\xb2\xd1\xd6\x43\x4b\x8b" \
"\x29\xae\x77\x5a\xd8\xc2\xe4\x8c\x53\x91"
extern const struct object_id empty_blob_oid;
-#define EMPTY_BLOB_SHA1_BIN (empty_blob_oid.hash)
-
static inline int is_empty_blob_sha1(const unsigned char *sha1)
{
/* Read and unpack a sha1 file into memory, write memory to a sha1 file */
extern int sha1_object_info(const unsigned char *, unsigned long *);
-extern int hash_sha1_file(const void *buf, unsigned long len, const char *type, unsigned char *sha1);
-extern int write_sha1_file(const void *buf, unsigned long len, const char *type, unsigned char *return_sha1);
-extern int hash_sha1_file_literally(const void *buf, unsigned long len, const char *type, struct object_id *oid, unsigned flags);
-extern int pretend_sha1_file(void *, unsigned long, enum object_type, unsigned char *);
-extern int force_object_loose(const unsigned char *sha1, time_t mtime);
+
+extern int hash_object_file(const void *buf, unsigned long len,
+ const char *type, struct object_id *oid);
+
+extern int write_object_file(const void *buf, unsigned long len,
+ const char *type, struct object_id *oid);
+
+extern int hash_object_file_literally(const void *buf, unsigned long len,
+ const char *type, struct object_id *oid,
+ unsigned flags);
+
+extern int pretend_object_file(void *, unsigned long, enum object_type,
+ struct object_id *oid);
+
+extern int force_object_loose(const struct object_id *oid, time_t mtime);
+
extern int git_open_cloexec(const char *name, int flags);
#define git_open(name) git_open_cloexec(name, O_RDONLY)
extern void *map_sha1_file(const unsigned char *sha1, unsigned long *size);
extern struct packed_git {
struct packed_git *next;
+ struct list_head mru;
struct pack_window *windows;
off_t pack_size;
const void *index_data;
unsigned pack_local:1,
pack_keep:1,
freshened:1,
- do_not_close:1;
+ do_not_close:1,
+ pack_promisor:1;
unsigned char sha1[20];
struct revindex_entry *revindex;
/* something like ".git/objects/pack/xxxxx.pack" */
} *packed_git;
/*
- * A most-recently-used ordered version of the packed_git list, which can
- * be iterated instead of packed_git (and marked via mru_mark).
+ * A most-recently-used ordered version of the packed_git list.
*/
-extern struct mru packed_git_mru;
+extern struct list_head packed_git_mru;
struct pack_entry {
off_t offset;
* usual "XXXXXX" trailer, and the resulting filename is written into the
* "template" buffer. Returns the open descriptor.
*/
- extern int odb_mkstemp(struct strbuf *template, const char *pattern);
+ extern int odb_mkstemp(struct strbuf *temp_filename, const char *pattern);
/*
* Create a pack .keep file named "name" (which should generally be the output
unsigned long *sizep;
off_t *disk_sizep;
unsigned char *delta_base_sha1;
- struct strbuf *typename;
+ struct strbuf *type_name;
void **contentp;
/* Response */
#define OBJECT_INFO_QUICK 8
extern int sha1_object_info_extended(const unsigned char *, struct object_info *, unsigned flags);
+/*
+ * Set this to 0 to prevent sha1_object_info_extended() from fetching missing
+ * blobs. This has a difference only if extensions.partialClone is set.
+ *
+ * Its default value is 1.
+ */
+extern int fetch_if_missing;
+
/* Dumb servers support */
extern int update_server_info(int);
/* Coalesce new lines into base by finding LCS */
static struct lline *coalesce_lines(struct lline *base, int *lenbase,
- struct lline *new, int lennew,
+ struct lline *newline, int lennew,
unsigned long parent, long flags)
{
int **lcs;
struct lline *baseend, *newend = NULL;
int i, j, origbaselen = *lenbase;
- if (new == NULL)
+ if (newline == NULL)
return base;
if (base == NULL) {
*lenbase = lennew;
- return new;
+ return newline;
}
/*
directions[0][j] = NEW;
for (i = 1, baseend = base; i < origbaselen + 1; i++) {
- for (j = 1, newend = new; j < lennew + 1; j++) {
+ for (j = 1, newend = newline; j < lennew + 1; j++) {
if (match_string_spaces(baseend->line, baseend->len,
newend->line, newend->len, flags)) {
lcs[i][j] = lcs[i - 1][j - 1] + 1;
if (lline->prev)
lline->prev->next = lline->next;
else
- new = lline->next;
+ newline = lline->next;
if (lline->next)
lline->next->prev = lline->prev;
}
}
- newend = new;
+ newend = newline;
while (newend) {
struct lline *lline = newend;
newend = newend->next;
if (is_file) {
struct strbuf buf = STRBUF_INIT;
- if (convert_to_git(&the_index, elem->path, result, len, &buf, safe_crlf)) {
+ if (convert_to_git(&the_index, elem->path, result, len, &buf, global_conv_flags_eol)) {
free(result);
result = strbuf_detach(&buf, &len);
result_size = len;
ALLOC_GROW(commit_graft, commit_graft_nr + 1, commit_graft_alloc);
commit_graft_nr++;
if (pos < commit_graft_nr)
- memmove(commit_graft + pos + 1,
- commit_graft + pos,
- (commit_graft_nr - pos - 1) *
- sizeof(*commit_graft));
+ MOVE_ARRAY(commit_graft + pos + 1, commit_graft + pos,
+ commit_graft_nr - pos - 1);
commit_graft[pos] = graft;
return 0;
}
oid_to_hex(&commit->object.oid));
if (type != OBJ_COMMIT)
die("expected commit for %s, got %s",
- oid_to_hex(&commit->object.oid), typename(type));
+ oid_to_hex(&commit->object.oid), type_name(type));
if (sizep)
*sizep = size;
}
commit_list_insert(in->item, &ret);
for (i = in->next; i; i = i->next) {
- struct commit_list *new = NULL, *end = NULL;
+ struct commit_list *new_commits = NULL, *end = NULL;
for (j = ret; j; j = j->next) {
struct commit_list *bases;
bases = get_merge_bases(i->item, j->item);
- if (!new)
- new = bases;
+ if (!new_commits)
+ new_commits = bases;
else
end->next = bases;
for (k = bases; k; k = k->next)
end = k;
}
- ret = new;
+ ret = new_commits;
}
return ret;
}
}
}
-int commit_tree(const char *msg, size_t msg_len,
- const unsigned char *tree,
- struct commit_list *parents, unsigned char *ret,
+int commit_tree(const char *msg, size_t msg_len, const struct object_id *tree,
+ struct commit_list *parents, struct object_id *ret,
const char *author, const char *sign_commit)
{
struct commit_extra_header *extra = NULL, **tail = &extra;
"variable i18n.commitencoding to the encoding your project uses.\n");
int commit_tree_extended(const char *msg, size_t msg_len,
- const unsigned char *tree,
- struct commit_list *parents, unsigned char *ret,
+ const struct object_id *tree,
+ struct commit_list *parents, struct object_id *ret,
const char *author, const char *sign_commit,
struct commit_extra_header *extra)
{
int encoding_is_utf8;
struct strbuf buffer;
- assert_sha1_type(tree, OBJ_TREE);
+ assert_sha1_type(tree->hash, OBJ_TREE);
if (memchr(msg, '\0', msg_len))
return error("a NUL byte in commit log message not allowed.");
encoding_is_utf8 = is_encoding_utf8(git_commit_encoding);
strbuf_init(&buffer, 8192); /* should avoid reallocs for the headers */
- strbuf_addf(&buffer, "tree %s\n", sha1_to_hex(tree));
+ strbuf_addf(&buffer, "tree %s\n", oid_to_hex(tree));
/*
* NOTE! This ordering means that the same exact tree merged with a
goto out;
}
- result = write_sha1_file(buffer.buf, buffer.len, commit_type, ret);
+ result = write_object_file(buffer.buf, buffer.len, commit_type, ret);
out:
strbuf_release(&buffer);
return result;
struct commit_list **commit_list_append(struct commit *commit,
struct commit_list **next)
{
- struct commit_list *new = xmalloc(sizeof(struct commit_list));
- new->item = commit;
- *next = new;
- new->next = NULL;
- return &new->next;
+ struct commit_list *new_commit = xmalloc(sizeof(struct commit_list));
+ new_commit->item = commit;
+ *next = new_commit;
+ new_commit->next = NULL;
+ return &new_commit->next;
}
const char *find_commit_header(const char *msg, const char *key, size_t *out_len)
int diff_unmerged_stage = revs->max_count;
unsigned ce_option = ((option & DIFF_RACY_IS_MODIFIED)
? CE_MATCH_RACY_IS_DIRTY : 0);
+ uint64_t start = getnanotime();
diff_set_mnemonic_prefix(&revs->diffopt, "i/", "w/");
}
diffcore_std(&revs->diffopt);
diff_flush(&revs->diffopt);
+ trace_performance_since(start, "diff-files");
return 0;
}
}
static void show_new_file(struct rev_info *revs,
- const struct cache_entry *new,
+ const struct cache_entry *new_file,
int cached, int match_missing)
{
const struct object_id *oid;
* New file in the index: it might actually be different in
* the working tree.
*/
- if (get_stat_data(new, &oid, &mode, cached, match_missing,
+ if (get_stat_data(new_file, &oid, &mode, cached, match_missing,
&dirty_submodule, &revs->diffopt) < 0)
return;
- diff_index_show_file(revs, "+", new, oid, !is_null_oid(oid), mode, dirty_submodule);
+ diff_index_show_file(revs, "+", new_file, oid, !is_null_oid(oid), mode, dirty_submodule);
}
static int show_modified(struct rev_info *revs,
- const struct cache_entry *old,
- const struct cache_entry *new,
+ const struct cache_entry *old_entry,
+ const struct cache_entry *new_entry,
int report_missing,
int cached, int match_missing)
{
const struct object_id *oid;
unsigned dirty_submodule = 0;
- if (get_stat_data(new, &oid, &mode, cached, match_missing,
+ if (get_stat_data(new_entry, &oid, &mode, cached, match_missing,
&dirty_submodule, &revs->diffopt) < 0) {
if (report_missing)
- diff_index_show_file(revs, "-", old,
- &old->oid, 1, old->ce_mode,
+ diff_index_show_file(revs, "-", old_entry,
+ &old_entry->oid, 1, old_entry->ce_mode,
0);
return -1;
}
if (revs->combine_merges && !cached &&
- (oidcmp(oid, &old->oid) || oidcmp(&old->oid, &new->oid))) {
+ (oidcmp(oid, &old_entry->oid) || oidcmp(&old_entry->oid, &new_entry->oid))) {
struct combine_diff_path *p;
- int pathlen = ce_namelen(new);
+ int pathlen = ce_namelen(new_entry);
p = xmalloc(combine_diff_path_size(2, pathlen));
p->path = (char *) &p->parent[2];
p->next = NULL;
- memcpy(p->path, new->name, pathlen);
+ memcpy(p->path, new_entry->name, pathlen);
p->path[pathlen] = 0;
p->mode = mode;
oidclr(&p->oid);
memset(p->parent, 0, 2 * sizeof(struct combine_diff_parent));
p->parent[0].status = DIFF_STATUS_MODIFIED;
- p->parent[0].mode = new->ce_mode;
- oidcpy(&p->parent[0].oid, &new->oid);
+ p->parent[0].mode = new_entry->ce_mode;
+ oidcpy(&p->parent[0].oid, &new_entry->oid);
p->parent[1].status = DIFF_STATUS_MODIFIED;
- p->parent[1].mode = old->ce_mode;
- oidcpy(&p->parent[1].oid, &old->oid);
+ p->parent[1].mode = old_entry->ce_mode;
+ oidcpy(&p->parent[1].oid, &old_entry->oid);
show_combined_diff(p, 2, revs->dense_combined_merges, revs);
free(p);
return 0;
}
- oldmode = old->ce_mode;
- if (mode == oldmode && !oidcmp(oid, &old->oid) && !dirty_submodule &&
+ oldmode = old_entry->ce_mode;
+ if (mode == oldmode && !oidcmp(oid, &old_entry->oid) && !dirty_submodule &&
!revs->diffopt.flags.find_copies_harder)
return 0;
diff_change(&revs->diffopt, oldmode, mode,
- &old->oid, oid, 1, !is_null_oid(oid),
- old->name, 0, dirty_submodule);
+ &old_entry->oid, oid, 1, !is_null_oid(oid),
+ old_entry->name, 0, dirty_submodule);
return 0;
}
int run_diff_index(struct rev_info *revs, int cached)
{
struct object_array_entry *ent;
+ uint64_t start = getnanotime();
ent = revs->pending.objects;
if (diff_cache(revs, &ent->item->oid, ent->name, cached))
diffcore_fix_diff_index(&revs->diffopt);
diffcore_std(&revs->diffopt);
diff_flush(&revs->diffopt);
+ trace_performance_since(start, "diff-index");
return 0;
}
struct diff_words_style {
enum diff_words_type type;
- struct diff_words_style_elem new, old, ctx;
+ struct diff_words_style_elem new_word, old_word, ctx;
const char *newline;
};
}
if (minus_begin != minus_end) {
fn_out_diff_words_write_helper(diff_words->opt,
- &style->old, style->newline,
+ &style->old_word, style->newline,
minus_end - minus_begin, minus_begin);
}
if (plus_begin != plus_end) {
fn_out_diff_words_write_helper(diff_words->opt,
- &style->new, style->newline,
+ &style->new_word, style->newline,
plus_end - plus_begin, plus_begin);
}
emit_diff_symbol(diff_words->opt, DIFF_SYMBOL_WORD_DIFF,
line_prefix, strlen(line_prefix), 0);
fn_out_diff_words_write_helper(diff_words->opt,
- &style->old, style->newline,
+ &style->old_word, style->newline,
diff_words->minus.text.size,
diff_words->minus.text.ptr);
diff_words->minus.text.size = 0;
}
if (want_color(o->use_color)) {
struct diff_words_style *st = ecbdata->diff_words->style;
- st->old.color = diff_get_color_opt(o, DIFF_FILE_OLD);
- st->new.color = diff_get_color_opt(o, DIFF_FILE_NEW);
+ st->old_word.color = diff_get_color_opt(o, DIFF_FILE_OLD);
+ st->new_word.color = diff_get_color_opt(o, DIFF_FILE_NEW);
st->ctx.color = diff_get_color_opt(o, DIFF_CONTEXT);
}
}
static char *pprint_rename(const char *a, const char *b)
{
- const char *old = a;
- const char *new = b;
+ const char *old_name = a;
+ const char *new_name = b;
struct strbuf name = STRBUF_INIT;
int pfx_length, sfx_length;
int pfx_adjust_for_slash;
/* Find common prefix */
pfx_length = 0;
- while (*old && *new && *old == *new) {
- if (*old == '/')
- pfx_length = old - a + 1;
- old++;
- new++;
+ while (*old_name && *new_name && *old_name == *new_name) {
+ if (*old_name == '/')
+ pfx_length = old_name - a + 1;
+ old_name++;
+ new_name++;
}
/* Find common suffix */
- old = a + len_a;
- new = b + len_b;
+ old_name = a + len_a;
+ new_name = b + len_b;
sfx_length = 0;
/*
* If there is a common prefix, it must end in a slash. In
* underrun the input strings.
*/
pfx_adjust_for_slash = (pfx_length ? 1 : 0);
- while (a + pfx_length - pfx_adjust_for_slash <= old &&
- b + pfx_length - pfx_adjust_for_slash <= new &&
- *old == *new) {
- if (*old == '/')
- sfx_length = len_a - (old - a);
- old--;
- new--;
+ while (a + pfx_length - pfx_adjust_for_slash <= old_name &&
+ b + pfx_length - pfx_adjust_for_slash <= new_name &&
+ *old_name == *new_name) {
+ if (*old_name == '/')
+ sfx_length = len_a - (old_name - a);
+ old_name--;
+ new_name--;
}
/*
static long gather_dirstat(struct diff_options *opt, struct dirstat_dir *dir,
unsigned long changed, const char *base, int baselen)
{
- unsigned long this_dir = 0;
+ unsigned long sum_changes = 0;
unsigned int sources = 0;
const char *line_prefix = diff_line_prefix(opt);
while (dir->nr) {
struct dirstat_file *f = dir->files;
int namelen = strlen(f->name);
- unsigned long this;
+ unsigned long changes;
char *slash;
if (namelen < baselen)
slash = strchr(f->name + baselen, '/');
if (slash) {
int newbaselen = slash + 1 - f->name;
- this = gather_dirstat(opt, dir, changed, f->name, newbaselen);
+ changes = gather_dirstat(opt, dir, changed, f->name, newbaselen);
sources++;
} else {
- this = f->changed;
+ changes = f->changed;
dir->files++;
dir->nr--;
sources += 2;
}
- this_dir += this;
+ sum_changes += changes;
}
/*
* under this directory (sources == 1).
*/
if (baselen && sources != 1) {
- if (this_dir) {
- int permille = this_dir * 1000 / changed;
+ if (sum_changes) {
+ int permille = sum_changes * 1000 / changed;
if (permille >= dir->permille) {
fprintf(opt->file, "%s%4d.%01d%% %.*s\n", line_prefix,
permille / 10, permille % 10, baselen, base);
}
}
}
- return this_dir;
+ return sum_changes;
}
static int dirstat_compare(const void *_a, const void *_b)
{
int size_only = flags & CHECK_SIZE_ONLY;
int err = 0;
+ int conv_flags = global_conv_flags_eol;
/*
* demote FAIL to WARN to allow inspecting the situation
* instead of refusing.
*/
- enum safe_crlf crlf_warn = (safe_crlf == SAFE_CRLF_FAIL
- ? SAFE_CRLF_WARN
- : safe_crlf);
+ if (conv_flags & CONV_EOL_RNDTRP_DIE)
+ conv_flags = CONV_EOL_RNDTRP_WARN;
if (!DIFF_FILE_VALID(s))
die("internal error: asking to populate invalid file.");
/*
* Convert from working tree format to canonical git format
*/
- if (convert_to_git(&the_index, s->path, s->data, s->size, &buf, crlf_warn)) {
+ if (convert_to_git(&the_index, s->path, s->data, s->size, &buf, conv_flags)) {
size_t size = 0;
munmap(s->data, s->size);
s->should_munmap = 0;
int mode)
{
struct strbuf buf = STRBUF_INIT;
- struct strbuf template = STRBUF_INIT;
+ struct strbuf tempfile = STRBUF_INIT;
char *path_dup = xstrdup(path);
const char *base = basename(path_dup);
/* Generate "XXXXXX_basename.ext" */
- strbuf_addstr(&template, "XXXXXX_");
- strbuf_addstr(&template, base);
+ strbuf_addstr(&tempfile, "XXXXXX_");
+ strbuf_addstr(&tempfile, base);
- temp->tempfile = mks_tempfile_ts(template.buf, strlen(base) + 1);
+ temp->tempfile = mks_tempfile_ts(tempfile.buf, strlen(base) + 1);
if (!temp->tempfile)
die_errno("unable to create temp-file");
if (convert_to_working_tree(path,
oid_to_hex_r(temp->hex, oid);
xsnprintf(temp->mode, sizeof(temp->mode), "%06o", mode);
strbuf_release(&buf);
- strbuf_release(&template);
+ strbuf_release(&tempfile);
free(path_dup);
}
void diff_warn_rename_limit(const char *varname, int needed, int degraded_cc)
{
+ fflush(stdout);
if (degraded_cc)
warning(_(degrade_cc_to_c_warning));
else if (needed)
int warn_on_object_refname_ambiguity = 1;
int ref_paranoia = -1;
int repository_format_precious_objects;
+char *repository_format_partial_clone;
+const char *core_partial_clone_filter_default;
const char *git_commit_encoding;
const char *git_log_output_encoding;
const char *apply_default_whitespace;
int check_replace_refs = 1;
char *git_replace_ref_base;
enum eol core_eol = EOL_UNSET;
-enum safe_crlf safe_crlf = SAFE_CRLF_WARN;
+int global_conv_flags_eol = CONV_EOL_RNDTRP_WARN;
unsigned whitespace_rule_cfg = WS_DEFAULT_RULE;
enum branch_track git_branch_track = BRANCH_TRACK_REMOTE;
enum rebase_setup_type autorebase = AUTOREBASE_NEVER;
/* This is set by setup_git_dir_gently() and/or git_default_config() */
char *git_work_tree_cfg;
- static char *namespace;
+ static char *git_namespace;
static const char *super_prefix;
free(git_replace_ref_base);
git_replace_ref_base = xstrdup(replace_ref_base ? replace_ref_base
: "refs/replace/");
- free(namespace);
- namespace = expand_namespace(getenv(GIT_NAMESPACE_ENVIRONMENT));
+ free(git_namespace);
+ git_namespace = expand_namespace(getenv(GIT_NAMESPACE_ENVIRONMENT));
shallow_file = getenv(GIT_SHALLOW_FILE_ENVIRONMENT);
if (shallow_file)
set_alternate_shallow_file(shallow_file, 0);
const char *get_git_namespace(void)
{
- if (!namespace)
+ if (!git_namespace)
BUG("git environment hasn't been setup");
- return namespace;
+ return git_namespace;
}
const char *strip_namespace(const char *namespaced_ref)
return the_repository->objectdir;
}
- int odb_mkstemp(struct strbuf *template, const char *pattern)
+ int odb_mkstemp(struct strbuf *temp_filename, const char *pattern)
{
int fd;
/*
* restrictive except to remove write permission.
*/
int mode = 0444;
- git_path_buf(template, "objects/%s", pattern);
- fd = git_mkstemp_mode(template->buf, mode);
+ git_path_buf(temp_filename, "objects/%s", pattern);
+ fd = git_mkstemp_mode(temp_filename->buf, mode);
if (0 <= fd)
return fd;
/* slow path */
- /* some mkstemp implementations erase template on failure */
- git_path_buf(template, "objects/%s", pattern);
- safe_create_leading_directories(template->buf);
- return xmkstemp_mode(template->buf, mode);
+ /* some mkstemp implementations erase temp_filename on failure */
+ git_path_buf(temp_filename, "objects/%s", pattern);
+ safe_create_leading_directories(temp_filename->buf);
+ return xmkstemp_mode(temp_filename->buf, mode);
}
int odb_pack_keep(const char *name)
/* The .pack file being generated */
static struct pack_idx_option pack_idx_opts;
static unsigned int pack_id;
-static struct sha1file *pack_file;
+static struct hashfile *pack_file;
static struct packed_git *pack_data;
static struct packed_git **all_packs;
static off_t pack_size;
p->pack_fd = pack_fd;
p->do_not_close = 1;
- pack_file = sha1fd(pack_fd, p->pack_name);
+ pack_file = hashfd(pack_fd, p->pack_name);
hdr.hdr_signature = htonl(PACK_SIGNATURE);
hdr.hdr_version = htonl(2);
hdr.hdr_entries = 0;
- sha1write(pack_file, &hdr, sizeof(hdr));
+ hashwrite(pack_file, &hdr, sizeof(hdr));
pack_data = p;
pack_size = sizeof(hdr);
struct tag *t;
close_pack_windows(pack_data);
- sha1close(pack_file, cur_pack_oid.hash, 0);
+ hashclose(pack_file, cur_pack_oid.hash, 0);
fixup_pack_header_footer(pack_data->pack_fd, pack_data->sha1,
pack_data->pack_name, object_count,
cur_pack_oid.hash, pack_size);
unsigned char hdr[96];
struct object_id oid;
unsigned long hdrlen, deltalen;
- git_SHA_CTX c;
+ git_hash_ctx c;
git_zstream s;
hdrlen = xsnprintf((char *)hdr, sizeof(hdr), "%s %lu",
- typename(type), (unsigned long)dat->len) + 1;
+ type_name(type), (unsigned long)dat->len) + 1;
- git_SHA1_Init(&c);
- git_SHA1_Update(&c, hdr, hdrlen);
- git_SHA1_Update(&c, dat->buf, dat->len);
- git_SHA1_Final(oid.hash, &c);
+ the_hash_algo->init_fn(&c);
+ the_hash_algo->update_fn(&c, hdr, hdrlen);
+ the_hash_algo->update_fn(&c, dat->buf, dat->len);
+ the_hash_algo->final_fn(oid.hash, &c);
if (oidout)
oidcpy(oidout, &oid);
return 1;
}
- if (last && last->data.buf && last->depth < max_depth && dat->len > 20) {
+ if (last && last->data.buf && last->depth < max_depth
+ && dat->len > the_hash_algo->rawsz) {
+
delta_count_attempts_by_type[type]++;
delta = diff_delta(last->data.buf, last->data.len,
dat->buf, dat->len,
- &deltalen, dat->len - 20);
+ &deltalen, dat->len - the_hash_algo->rawsz);
} else
delta = NULL;
hdrlen = encode_in_pack_object_header(hdr, sizeof(hdr),
OBJ_OFS_DELTA, deltalen);
- sha1write(pack_file, hdr, hdrlen);
+ hashwrite(pack_file, hdr, hdrlen);
pack_size += hdrlen;
hdr[pos] = ofs & 127;
while (ofs >>= 7)
hdr[--pos] = 128 | (--ofs & 127);
- sha1write(pack_file, hdr + pos, sizeof(hdr) - pos);
+ hashwrite(pack_file, hdr + pos, sizeof(hdr) - pos);
pack_size += sizeof(hdr) - pos;
} else {
e->depth = 0;
hdrlen = encode_in_pack_object_header(hdr, sizeof(hdr),
type, dat->len);
- sha1write(pack_file, hdr, hdrlen);
+ hashwrite(pack_file, hdr, hdrlen);
pack_size += hdrlen;
}
- sha1write(pack_file, out, s.total_out);
+ hashwrite(pack_file, out, s.total_out);
pack_size += s.total_out;
e->idx.crc32 = crc32_end(pack_file);
return 0;
}
-static void truncate_pack(struct sha1file_checkpoint *checkpoint)
+static void truncate_pack(struct hashfile_checkpoint *checkpoint)
{
- if (sha1file_truncate(pack_file, checkpoint))
+ if (hashfile_truncate(pack_file, checkpoint))
die_errno("cannot truncate pack to skip duplicate");
pack_size = checkpoint->offset;
}
struct object_id oid;
unsigned long hdrlen;
off_t offset;
- git_SHA_CTX c;
+ git_hash_ctx c;
git_zstream s;
- struct sha1file_checkpoint checkpoint;
+ struct hashfile_checkpoint checkpoint;
int status = Z_OK;
/* Determine if we should auto-checkpoint. */
|| (pack_size + 60 + len) < pack_size)
cycle_packfile();
- sha1file_checkpoint(pack_file, &checkpoint);
+ hashfile_checkpoint(pack_file, &checkpoint);
offset = checkpoint.offset;
hdrlen = xsnprintf((char *)out_buf, out_sz, "blob %" PRIuMAX, len) + 1;
- git_SHA1_Init(&c);
- git_SHA1_Update(&c, out_buf, hdrlen);
+ the_hash_algo->init_fn(&c);
+ the_hash_algo->update_fn(&c, out_buf, hdrlen);
crc32_begin(pack_file);
if (!n && feof(stdin))
die("EOF in data (%" PRIuMAX " bytes remaining)", len);
- git_SHA1_Update(&c, in_buf, n);
+ the_hash_algo->update_fn(&c, in_buf, n);
s.next_in = in_buf;
s.avail_in = n;
len -= n;
if (!s.avail_out || status == Z_STREAM_END) {
size_t n = s.next_out - out_buf;
- sha1write(pack_file, out_buf, n);
+ hashwrite(pack_file, out_buf, n);
pack_size += n;
s.next_out = out_buf;
s.avail_out = out_sz;
}
}
git_deflate_end(&s);
- git_SHA1_Final(oid.hash, &c);
+ the_hash_algo->final_fn(oid.hash, &c);
if (oidout)
oidcpy(oidout, &oid);
{
enum object_type type;
struct packed_git *p = all_packs[oe->pack_id];
- if (p == pack_data && p->pack_size < (pack_size + 20)) {
+ if (p == pack_data && p->pack_size < (pack_size + the_hash_algo->rawsz)) {
/* The object is stored in the packfile we are writing to
* and we have modified it since the last time we scanned
* back to read a previously written object. If an old
- * window covered [p->pack_size, p->pack_size + 20) its
+ * window covered [p->pack_size, p->pack_size + rawsz) its
* data is stale and is not valid. Closing all windows
* and updating the packfile length ensures we can read
* the newly written data.
*/
close_pack_windows(p);
- sha1flush(pack_file);
+ hashflush(pack_file);
- /* We have to offer 20 bytes additional on the end of
+ /* We have to offer rawsz bytes additional on the end of
* the packfile as the core unpacker code assumes the
* footer is present at the file end and must promise
- * at least 20 bytes within any window it maps. But
+ * at least rawsz bytes within any window it maps. But
* we don't actually create the footer here.
*/
- p->pack_size = pack_size + 20;
+ p->pack_size = pack_size + the_hash_algo->rawsz;
}
return unpack_entry(p, oe->idx.offset, &type, sizep);
}
unsigned char fanout, char *path)
{
unsigned int i = 0, j = 0;
- if (fanout >= 20)
+ if (fanout >= the_hash_algo->rawsz)
die("Too large fanout (%u)", fanout);
while (fanout) {
path[i++] = hex_sha1[j++];
path[i++] = '/';
fanout--;
}
- memcpy(path + i, hex_sha1 + j, GIT_SHA1_HEXSZ - j);
- path[i + GIT_SHA1_HEXSZ - j] = '\0';
+ memcpy(path + i, hex_sha1 + j, the_hash_algo->hexsz - j);
+ path[i + the_hash_algo->hexsz - j] = '\0';
}
static uintmax_t do_change_note_fanout(
else if (oe) {
if (oe->type != OBJ_COMMIT)
die("Not a commit (actually a %s): %s",
- typename(oe->type), command_buf.buf);
+ type_name(oe->type), command_buf.buf);
}
/*
* Accept the sha1 without checking; it expected to be in
command_buf.buf);
if (type != expected)
die("Not a %s (actually a %s): %s",
- typename(expected), typename(type),
+ type_name(expected), type_name(type),
command_buf.buf);
}
} else if (oe) {
if (oe->type != OBJ_BLOB)
die("Not a blob (actually a %s): %s",
- typename(oe->type), command_buf.buf);
+ type_name(oe->type), command_buf.buf);
} else if (!is_null_oid(&oid)) {
enum object_type type = sha1_object_info(oid.hash, NULL);
if (type < 0)
die("Blob not found: %s", command_buf.buf);
if (type != OBJ_BLOB)
die("Not a blob (actually a %s): %s",
- typename(type), command_buf.buf);
+ type_name(type), command_buf.buf);
}
construct_path_with_fanout(oid_to_hex(&commit_oid), *old_fanout, path);
"object %s\n"
"type %s\n"
"tag %s\n",
- oid_to_hex(&oid), typename(type), t->name);
+ oid_to_hex(&oid), type_name(type), t->name);
if (tagger)
strbuf_addf(&new_data,
"tagger %s\n", tagger);
die("Can't read object %s", oid_to_hex(oid));
if (type != OBJ_BLOB)
die("Object %s is a %s but a blob was expected.",
- oid_to_hex(oid), typename(type));
+ oid_to_hex(oid), type_name(type));
strbuf_reset(&line);
strbuf_addf(&line, "%s %s %lu\n", oid_to_hex(oid),
- typename(type), size);
+ type_name(type), size);
cat_blob_write(line.buf, line.len);
strbuf_release(&line);
cat_blob_write(buf, size);
git_zstream stream;
unpacked = read_sha1_file(request->obj->oid.hash, &type, &len);
- hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", typename(type), len) + 1;
+ hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", type_name(type), len) + 1;
/* Set it up */
git_deflate_init(&stream, zlib_compression_level);
lock->timeout = -1;
}
XML_ParserFree(parser);
+ } else {
+ fprintf(stderr,
+ "error: curl result=%d, HTTP code=%ld\n",
+ results.curl_result, results.http_code);
}
} else {
fprintf(stderr, "Unable to start LOCK request\n");
#include "transport.h"
#include "packfile.h"
#include "protocol.h"
+#include "string-list.h"
static struct trace_key trace_curl = TRACE_KEY_INIT(CURL);
+static int trace_curl_data = 1;
+static struct string_list cookies_to_redact = STRING_LIST_INIT_DUP;
#if LIBCURL_VERSION_NUM >= 0x070a08
long int git_curl_ipresolve = CURL_IPRESOLVE_WHATEVER;
#else
/* Everything else is opaque and possibly sensitive */
strbuf_setlen(header, sensitive_header - header->buf);
strbuf_addstr(header, " <redacted>");
+ } else if (cookies_to_redact.nr &&
+ skip_prefix(header->buf, "Cookie:", &sensitive_header)) {
+ struct strbuf redacted_header = STRBUF_INIT;
+ char *cookie;
+
+ while (isspace(*sensitive_header))
+ sensitive_header++;
+
+ /*
+ * The contents of header starting from sensitive_header will
+ * subsequently be overridden, so it is fine to mutate this
+ * string (hence the assignment to "char *").
+ */
+ cookie = (char *) sensitive_header;
+
+ while (cookie) {
+ char *equals;
+ char *semicolon = strstr(cookie, "; ");
+ if (semicolon)
+ *semicolon = 0;
+ equals = strchrnul(cookie, '=');
+ if (!equals) {
+ /* invalid cookie, just append and continue */
+ strbuf_addstr(&redacted_header, cookie);
+ continue;
+ }
+ *equals = 0; /* temporarily set to NUL for lookup */
+ if (string_list_lookup(&cookies_to_redact, cookie)) {
+ strbuf_addstr(&redacted_header, cookie);
+ strbuf_addstr(&redacted_header, "=<redacted>");
+ } else {
+ *equals = '=';
+ strbuf_addstr(&redacted_header, cookie);
+ }
+ if (semicolon) {
+ /*
+ * There are more cookies. (Or, for some
+ * reason, the input string ends in "; ".)
+ */
+ strbuf_addstr(&redacted_header, "; ");
+ cookie = semicolon + strlen("; ");
+ } else {
+ cookie = NULL;
+ }
+ }
+
+ strbuf_setlen(header, sensitive_header - header->buf);
+ strbuf_addbuf(header, &redacted_header);
}
}
curl_dump_header(text, (unsigned char *)data, size, DO_FILTER);
break;
case CURLINFO_DATA_OUT:
- text = "=> Send data";
- curl_dump_data(text, (unsigned char *)data, size);
+ if (trace_curl_data) {
+ text = "=> Send data";
+ curl_dump_data(text, (unsigned char *)data, size);
+ }
break;
case CURLINFO_SSL_DATA_OUT:
- text = "=> Send SSL data";
- curl_dump_data(text, (unsigned char *)data, size);
+ if (trace_curl_data) {
+ text = "=> Send SSL data";
+ curl_dump_data(text, (unsigned char *)data, size);
+ }
break;
case CURLINFO_HEADER_IN:
text = "<= Recv header";
curl_dump_header(text, (unsigned char *)data, size, NO_FILTER);
break;
case CURLINFO_DATA_IN:
- text = "<= Recv data";
- curl_dump_data(text, (unsigned char *)data, size);
+ if (trace_curl_data) {
+ text = "<= Recv data";
+ curl_dump_data(text, (unsigned char *)data, size);
+ }
break;
case CURLINFO_SSL_DATA_IN:
- text = "<= Recv SSL data";
- curl_dump_data(text, (unsigned char *)data, size);
+ if (trace_curl_data) {
+ text = "<= Recv SSL data";
+ curl_dump_data(text, (unsigned char *)data, size);
+ }
break;
default: /* we ignore unknown types by default */
if (getenv("GIT_CURL_VERBOSE"))
curl_easy_setopt(result, CURLOPT_VERBOSE, 1L);
setup_curl_trace(result);
+ if (getenv("GIT_TRACE_CURL_NO_DATA"))
+ trace_curl_data = 0;
+ if (getenv("GIT_REDACT_COOKIES")) {
+ string_list_split(&cookies_to_redact,
+ getenv("GIT_REDACT_COOKIES"), ',', -1);
+ string_list_sort(&cookies_to_redact);
+ }
curl_easy_setopt(result, CURLOPT_USERAGENT,
user_agent ? user_agent : git_user_agent());
void add_fill_function(void *data, int (*fill)(void *))
{
- struct fill_chain *new = xmalloc(sizeof(*new));
+ struct fill_chain *new_fill = xmalloc(sizeof(*new_fill));
struct fill_chain **linkp = &fill_cfg;
- new->data = data;
- new->fill = fill;
- new->next = NULL;
+ new_fill->data = data;
+ new_fill->fill = fill;
+ new_fill->next = NULL;
while (*linkp)
linkp = &(*linkp)->next;
- *linkp = new;
+ *linkp = new_fill;
}
void fill_active_slots(void)
unsigned char *sha1)
{
char *hex = sha1_to_hex(sha1);
- const char *filename;
+ struct strbuf filename = STRBUF_INIT;
char prevfile[PATH_MAX];
int prevlocal;
char prev_buf[PREV_BUF_SIZE];
hashcpy(freq->sha1, sha1);
freq->localfile = -1;
- filename = sha1_file_name(sha1);
+ sha1_file_name(&filename, sha1);
snprintf(freq->tmpfile, sizeof(freq->tmpfile),
- "%s.temp", filename);
+ "%s.temp", filename.buf);
- snprintf(prevfile, sizeof(prevfile), "%s.prev", filename);
+ snprintf(prevfile, sizeof(prevfile), "%s.prev", filename.buf);
unlink_or_warn(prevfile);
rename(freq->tmpfile, prevfile);
unlink_or_warn(freq->tmpfile);
+ strbuf_release(&filename);
if (freq->localfile != -1)
error("fd leakage in start: %d", freq->localfile);
int finish_http_object_request(struct http_object_request *freq)
{
struct stat st;
+ struct strbuf filename = STRBUF_INIT;
close(freq->localfile);
freq->localfile = -1;
unlink_or_warn(freq->tmpfile);
return -1;
}
- freq->rename =
- finalize_object_file(freq->tmpfile, sha1_file_name(freq->sha1));
+
+ sha1_file_name(&filename, freq->sha1);
+ freq->rename = finalize_object_file(freq->tmpfile, filename.buf);
+ strbuf_release(&filename);
return freq->rename;
}
int status, nth;
size_t payload_size, gpg_message_offset;
- hash_object_file(extra->value, extra->len, typename(OBJ_TAG), &oid);
- hash_sha1_file(extra->value, extra->len, type_name(OBJ_TAG), oid.hash);
++ hash_object_file(extra->value, extra->len, type_name(OBJ_TAG), &oid);
tag = lookup_tag(&oid);
if (!tag)
return; /* error message already given */
show_mergetag(opt, commit);
}
- if (!get_cached_commit_buffer(commit, NULL))
- return;
-
if (opt->show_notes) {
int raw;
struct strbuf notebuf = STRBUF_INIT;
"tag", /* OBJ_TAG = 4 */
};
- const char *typename(unsigned int type)
+ const char *type_name(unsigned int type)
{
if (type >= ARRAY_SIZE(object_type_strings))
return NULL;
if (!quiet)
error("object %s is a %s, not a %s",
oid_to_hex(&obj->oid),
- typename(obj->type), typename(type));
+ type_name(obj->type), type_name(type));
return NULL;
}
}
if (obj && obj->parsed)
return obj;
- if ((obj && obj->type == OBJ_BLOB) ||
+ if ((obj && obj->type == OBJ_BLOB && has_object_file(oid)) ||
(!obj && has_object_file(oid) &&
sha1_object_info(oid->hash, NULL) == OBJ_BLOB)) {
if (check_sha1_signature(repl, NULL, 0, NULL) < 0) {
buffer = read_sha1_file(oid->hash, &type, &size);
if (buffer) {
- if (check_sha1_signature(repl, buffer, size, typename(type)) < 0) {
+ if (check_sha1_signature(repl, buffer, size, type_name(type)) < 0) {
free(buffer);
error("sha1 mismatch %s", sha1_to_hex(repl));
return NULL;
} while (len);
index_crc = p->index_data;
- index_crc += 2 + 256 + p->num_objects * (20/4) + nr;
+ index_crc += 2 + 256 + p->num_objects * (the_hash_algo->rawsz/4) + nr;
return data_crc != ntohl(*index_crc);
}
{
off_t index_size = p->index_size;
const unsigned char *index_base = p->index_data;
- git_SHA_CTX ctx;
+ git_hash_ctx ctx;
unsigned char hash[GIT_MAX_RAWSZ], *pack_sig;
off_t offset = 0, pack_sig_ofs = 0;
uint32_t nr_objects, i;
if (!is_pack_valid(p))
return error("packfile %s cannot be accessed", p->pack_name);
- git_SHA1_Init(&ctx);
+ the_hash_algo->init_fn(&ctx);
do {
unsigned long remaining;
unsigned char *in = use_pack(p, w_curs, offset, &remaining);
offset += remaining;
if (!pack_sig_ofs)
- pack_sig_ofs = p->pack_size - 20;
+ pack_sig_ofs = p->pack_size - the_hash_algo->rawsz;
if (offset > pack_sig_ofs)
remaining -= (unsigned int)(offset - pack_sig_ofs);
- git_SHA1_Update(&ctx, in, remaining);
+ the_hash_algo->update_fn(&ctx, in, remaining);
} while (offset < pack_sig_ofs);
- git_SHA1_Final(hash, &ctx);
+ the_hash_algo->final_fn(hash, &ctx);
pack_sig = use_pack(p, w_curs, pack_sig_ofs, NULL);
if (hashcmp(hash, pack_sig))
- err = error("%s SHA1 checksum mismatch",
+ err = error("%s pack checksum mismatch",
p->pack_name);
- if (hashcmp(index_base + index_size - 40, pack_sig))
- err = error("%s SHA1 does not match its index",
+ if (hashcmp(index_base + index_size - the_hash_algo->hexsz, pack_sig))
+ err = error("%s pack checksum does not match its index",
p->pack_name);
unuse_pack(w_curs);
err = error("cannot unpack %s from %s at offset %"PRIuMAX"",
oid_to_hex(entries[i].oid.oid), p->pack_name,
(uintmax_t)entries[i].offset);
- else if (check_sha1_signature(entries[i].oid.hash, data, size, typename(type)))
+ else if (check_sha1_signature(entries[i].oid.hash, data, size, type_name(type)))
err = error("packed %s from %s is corrupt",
oid_to_hex(entries[i].oid.oid), p->pack_name);
else if (fn) {
{
off_t index_size;
const unsigned char *index_base;
- git_SHA_CTX ctx;
- unsigned char sha1[20];
+ git_hash_ctx ctx;
+ unsigned char hash[GIT_MAX_RAWSZ];
int err = 0;
if (open_pack_index(p))
index_base = p->index_data;
/* Verify SHA1 sum of the index file */
- git_SHA1_Init(&ctx);
- git_SHA1_Update(&ctx, index_base, (unsigned int)(index_size - 20));
- git_SHA1_Final(sha1, &ctx);
- if (hashcmp(sha1, index_base + index_size - 20))
- err = error("Packfile index for %s SHA1 mismatch",
+ the_hash_algo->init_fn(&ctx);
+ the_hash_algo->update_fn(&ctx, index_base, (unsigned int)(index_size - the_hash_algo->rawsz));
+ the_hash_algo->final_fn(hash, &ctx);
+ if (hashcmp(hash, index_base + index_size - the_hash_algo->rawsz))
+ err = error("Packfile index for %s hash mismatch",
p->pack_name);
return err;
}
#include "cache.h"
-#include "mru.h"
+#include "list.h"
#include "pack.h"
#include "dir.h"
#include "mergesort.h"
#include "list.h"
#include "streaming.h"
#include "sha1-lookup.h"
+#include "commit.h"
+#include "object.h"
+#include "tag.h"
+#include "tree-walk.h"
+#include "tree.h"
char *odb_pack_name(struct strbuf *buf,
const unsigned char *sha1,
static size_t peak_pack_mapped;
static size_t pack_mapped;
struct packed_git *packed_git;
-struct mru packed_git_mru;
+LIST_HEAD(packed_git_mru);
#define SZ_FMT PRIuMAX
static inline uintmax_t sz_fmt(size_t s) { return s; }
return NULL;
/*
- * ".pack" is long enough to hold any suffix we're adding (and
+ * ".promisor" is long enough to hold any suffix we're adding (and
* the use xsnprintf double-checks that)
*/
- alloc = st_add3(path_len, strlen(".pack"), 1);
+ alloc = st_add3(path_len, strlen(".promisor"), 1);
p = alloc_packed_git(alloc);
memcpy(p->pack_name, path, path_len);
if (!access(p->pack_name, F_OK))
p->pack_keep = 1;
+ xsnprintf(p->pack_name + path_len, alloc - path_len, ".promisor");
+ if (!access(p->pack_name, F_OK))
+ p->pack_promisor = 1;
+
xsnprintf(p->pack_name + path_len, alloc - path_len, ".pack");
if (stat(p->pack_name, &st) || !S_ISREG(st.st_mode)) {
free(p);
if (ends_with(de->d_name, ".idx") ||
ends_with(de->d_name, ".pack") ||
ends_with(de->d_name, ".bitmap") ||
- ends_with(de->d_name, ".keep"))
+ ends_with(de->d_name, ".keep") ||
+ ends_with(de->d_name, ".promisor"))
string_list_append(&garbage, path.buf);
else
report_garbage(PACKDIR_FILE_GARBAGE, path.buf);
{
struct packed_git *p;
- mru_clear(&packed_git_mru);
+ INIT_LIST_HEAD(&packed_git_mru);
+
for (p = packed_git; p; p = p->next)
- mru_append(&packed_git_mru, p);
+ list_add_tail(&p->mru, &packed_git_mru);
}
static int prepare_packed_git_run_once = 0;
*oi->disk_sizep = revidx[1].offset - obj_offset;
}
- if (oi->typep || oi->typename) {
+ if (oi->typep || oi->type_name) {
enum object_type ptot;
ptot = packed_to_object_type(p, obj_offset, type, &w_curs,
curpos);
if (oi->typep)
*oi->typep = ptot;
- if (oi->typename) {
- const char *tn = typename(ptot);
+ if (oi->type_name) {
+ const char *tn = type_name(ptot);
if (tn)
- strbuf_addstr(oi->typename, tn);
+ strbuf_addstr(oi->type_name, tn);
}
if (ptot < 0) {
type = OBJ_BAD;
return off;
index += p->num_objects * 4 + (off & 0x7fffffff) * 8;
check_pack_index_ptr(p, index);
- return (((uint64_t)ntohl(*((uint32_t *)(index + 0)))) << 32) |
- ntohl(*((uint32_t *)(index + 4)));
+ return get_be64(index);
}
}
{
const uint32_t *level1_ofs = p->index_data;
const unsigned char *index = p->index_data;
- unsigned hi, lo, stride;
- static int debug_lookup = -1;
-
- if (debug_lookup < 0)
- debug_lookup = !!getenv("GIT_DEBUG_LOOKUP");
+ unsigned stride;
+ uint32_t result;
if (!index) {
if (open_pack_index(p))
index += 8;
}
index += 4 * 256;
- hi = ntohl(level1_ofs[*sha1]);
- lo = ((*sha1 == 0x0) ? 0 : ntohl(level1_ofs[*sha1 - 1]));
if (p->index_version > 1) {
stride = 20;
} else {
index += 4;
}
- if (debug_lookup)
- printf("%02x%02x%02x... lo %u hi %u nr %"PRIu32"\n",
- sha1[0], sha1[1], sha1[2], lo, hi, p->num_objects);
-
- while (lo < hi) {
- unsigned mi = lo + (hi - lo) / 2;
- int cmp = hashcmp(index + mi * stride, sha1);
-
- if (debug_lookup)
- printf("lo %u hi %u rg %u mi %u\n",
- lo, hi, hi - lo, mi);
- if (!cmp)
- return nth_packed_object_offset(p, mi);
- if (cmp > 0)
- hi = mi;
- else
- lo = mi+1;
- }
+ if (bsearch_hash(sha1, level1_ofs, index, stride, &result))
+ return nth_packed_object_offset(p, result);
return 0;
}
*/
int find_pack_entry(const unsigned char *sha1, struct pack_entry *e)
{
- struct mru_entry *p;
+ struct list_head *pos;
prepare_packed_git();
if (!packed_git)
return 0;
- for (p = packed_git_mru.head; p; p = p->next) {
- if (fill_pack_entry(sha1, e, p->item)) {
- mru_mark(&packed_git_mru, p);
+ list_for_each(pos, &packed_git_mru) {
+ struct packed_git *p = list_entry(pos, struct packed_git, mru);
+ if (fill_pack_entry(sha1, e, p)) {
+ list_move(&p->mru, &packed_git_mru);
return 1;
}
}
for (p = packed_git; p; p = p->next) {
if ((flags & FOR_EACH_OBJECT_LOCAL_ONLY) && !p->pack_local)
continue;
+ if ((flags & FOR_EACH_OBJECT_PROMISOR_ONLY) &&
+ !p->pack_promisor)
+ continue;
if (open_pack_index(p)) {
pack_errors = 1;
continue;
}
return r ? r : pack_errors;
}
+
+static int add_promisor_object(const struct object_id *oid,
+ struct packed_git *pack,
+ uint32_t pos,
+ void *set_)
+{
+ struct oidset *set = set_;
+ struct object *obj = parse_object(oid);
+ if (!obj)
+ return 1;
+
+ oidset_insert(set, oid);
+
+ /*
+ * If this is a tree, commit, or tag, the objects it refers
+ * to are also promisor objects. (Blobs refer to no objects.)
+ */
+ if (obj->type == OBJ_TREE) {
+ struct tree *tree = (struct tree *)obj;
+ struct tree_desc desc;
+ struct name_entry entry;
+ if (init_tree_desc_gently(&desc, tree->buffer, tree->size))
+ /*
+ * Error messages are given when packs are
+ * verified, so do not print any here.
+ */
+ return 0;
+ while (tree_entry_gently(&desc, &entry))
+ oidset_insert(set, entry.oid);
+ } else if (obj->type == OBJ_COMMIT) {
+ struct commit *commit = (struct commit *) obj;
+ struct commit_list *parents = commit->parents;
+
+ oidset_insert(set, &commit->tree->object.oid);
+ for (; parents; parents = parents->next)
+ oidset_insert(set, &parents->item->object.oid);
+ } else if (obj->type == OBJ_TAG) {
+ struct tag *tag = (struct tag *) obj;
+ oidset_insert(set, &tag->tagged->oid);
+ }
+ return 0;
+}
+
+int is_promisor_object(const struct object_id *oid)
+{
+ static struct oidset promisor_objects;
+ static int promisor_objects_prepared;
+
+ if (!promisor_objects_prepared) {
+ if (repository_format_partial_clone) {
+ for_each_packed_object(add_promisor_object,
+ &promisor_objects,
+ FOR_EACH_OBJECT_PROMISOR_ONLY);
+ }
+ promisor_objects_prepared = 1;
+ }
+ return oidset_contains(&promisor_objects, oid);
+}
void rename_index_entry_at(struct index_state *istate, int nr, const char *new_name)
{
- struct cache_entry *old = istate->cache[nr], *new;
+ struct cache_entry *old_entry = istate->cache[nr], *new_entry;
int namelen = strlen(new_name);
- new = xmalloc(cache_entry_size(namelen));
- copy_cache_entry(new, old);
- new->ce_flags &= ~CE_HASHED;
- new->ce_namelen = namelen;
- new->index = 0;
- memcpy(new->name, new_name, namelen + 1);
+ new_entry = xmalloc(cache_entry_size(namelen));
+ copy_cache_entry(new_entry, old_entry);
+ new_entry->ce_flags &= ~CE_HASHED;
+ new_entry->ce_namelen = namelen;
+ new_entry->index = 0;
+ memcpy(new_entry->name, new_name, namelen + 1);
- cache_tree_invalidate_path(istate, old->name);
- untracked_cache_remove_from_index(istate, old->name);
+ cache_tree_invalidate_path(istate, old_entry->name);
+ untracked_cache_remove_from_index(istate, old_entry->name);
remove_index_entry_at(istate, nr);
- add_index_entry(istate, new, ADD_CACHE_OK_TO_ADD|ADD_CACHE_OK_TO_REPLACE);
+ add_index_entry(istate, new_entry, ADD_CACHE_OK_TO_ADD|ADD_CACHE_OK_TO_REPLACE);
}
void fill_stat_data(struct stat_data *sd, struct stat *st)
struct cache_entry *alias)
{
int len;
- struct cache_entry *new;
+ struct cache_entry *new_entry;
if (alias->ce_flags & CE_ADDED)
die("Will not add file alias '%s' ('%s' already exists in index)", ce->name, alias->name);
/* Ok, create the new entry using the name of the existing alias */
len = ce_namelen(alias);
- new = xcalloc(1, cache_entry_size(len));
- memcpy(new->name, alias->name, len);
- copy_cache_entry(new, ce);
+ new_entry = xcalloc(1, cache_entry_size(len));
+ memcpy(new_entry->name, alias->name, len);
+ copy_cache_entry(new_entry, ce);
save_or_free_index_entry(istate, ce);
- return new;
+ return new_entry;
}
void set_object_name_for_intent_to_add_entry(struct cache_entry *ce)
{
- unsigned char sha1[20];
- if (write_sha1_file("", 0, blob_type, sha1))
+ struct object_id oid;
+ if (write_object_file("", 0, blob_type, &oid))
die("cannot create an empty blob in the object database");
- hashcpy(ce->oid.hash, sha1);
+ oidcpy(&ce->oid, &oid);
}
int add_to_index(struct index_state *istate, const char *path, struct stat *st, int flags)
/* Add it in.. */
istate->cache_nr++;
if (istate->cache_nr > pos + 1)
- memmove(istate->cache + pos + 1,
- istate->cache + pos,
- (istate->cache_nr - pos - 1) * sizeof(ce));
+ MOVE_ARRAY(istate->cache + pos + 1, istate->cache + pos,
+ istate->cache_nr - pos - 1);
set_index_entry(istate, pos, ce);
istate->cache_changed |= CE_ENTRY_ADDED;
return 0;
const char *typechange_fmt;
const char *added_fmt;
const char *unmerged_fmt;
+ uint64_t start = getnanotime();
modified_fmt = (in_porcelain ? "M\t%s\n" : "%s: needs update\n");
deleted_fmt = (in_porcelain ? "D\t%s\n" : "%s: needs update\n");
added_fmt = (in_porcelain ? "A\t%s\n" : "%s needs update\n");
unmerged_fmt = (in_porcelain ? "U\t%s\n" : "%s: needs merge\n");
for (i = 0; i < istate->cache_nr; i++) {
- struct cache_entry *ce, *new;
+ struct cache_entry *ce, *new_entry;
int cache_errno = 0;
int changed = 0;
int filtered = 0;
if (filtered)
continue;
- new = refresh_cache_ent(istate, ce, options, &cache_errno, &changed);
- if (new == ce)
+ new_entry = refresh_cache_ent(istate, ce, options, &cache_errno, &changed);
+ if (new_entry == ce)
continue;
- if (!new) {
+ if (!new_entry) {
const char *fmt;
if (really && cache_errno == EINVAL) {
continue;
}
- replace_index_entry(istate, i, new);
+ replace_index_entry(istate, i, new_entry);
}
+ trace_performance_since(start, "refresh index");
return has_errors;
}
static int verify_hdr(struct cache_header *hdr, unsigned long size)
{
- git_SHA_CTX c;
- unsigned char sha1[20];
+ git_hash_ctx c;
+ unsigned char hash[GIT_MAX_RAWSZ];
int hdr_version;
if (hdr->hdr_signature != htonl(CACHE_SIGNATURE))
if (!verify_index_checksum)
return 0;
- git_SHA1_Init(&c);
- git_SHA1_Update(&c, hdr, size - 20);
- git_SHA1_Final(sha1, &c);
- if (hashcmp(sha1, (unsigned char *)hdr + size - 20))
+ the_hash_algo->init_fn(&c);
+ the_hash_algo->update_fn(&c, hdr, size - the_hash_algo->rawsz);
+ the_hash_algo->final_fn(hash, &c);
+ if (hashcmp(hash, (unsigned char *)hdr + size - the_hash_algo->rawsz))
return error("bad index file sha1 signature");
return 0;
}
int read_index(struct index_state *istate)
{
- return read_index_from(istate, get_index_file());
+ return read_index_from(istate, get_index_file(), get_git_dir());
}
static struct cache_entry *cache_entry_from_ondisk(struct ondisk_cache_entry *ondisk,
die_errno("cannot stat the open index");
mmap_size = xsize_t(st.st_size);
- if (mmap_size < sizeof(struct cache_header) + 20)
+ if (mmap_size < sizeof(struct cache_header) + the_hash_algo->rawsz)
die("index file smaller than expected");
mmap = xmmap(NULL, mmap_size, PROT_READ, MAP_PRIVATE, fd, 0);
if (verify_hdr(hdr, mmap_size) < 0)
goto unmap;
- hashcpy(istate->sha1, (const unsigned char *)hdr + mmap_size - 20);
+ hashcpy(istate->sha1, (const unsigned char *)hdr + mmap_size - the_hash_algo->rawsz);
istate->version = ntohl(hdr->hdr_version);
istate->cache_nr = ntohl(hdr->hdr_entries);
istate->cache_alloc = alloc_nr(istate->cache_nr);
istate->timestamp.sec = st.st_mtime;
istate->timestamp.nsec = ST_MTIME_NSEC(st);
- while (src_offset <= mmap_size - 20 - 8) {
+ while (src_offset <= mmap_size - the_hash_algo->rawsz - 8) {
/* After an array of active_nr index entries,
* there can be arbitrary number of extended
* sections, each of which is prefixed with
* This way, shared index can be removed if they have not been used
* for some time.
*/
-static void freshen_shared_index(char *base_sha1_hex, int warn)
+static void freshen_shared_index(const char *shared_index, int warn)
{
- char *shared_index = git_pathdup("sharedindex.%s", base_sha1_hex);
if (!check_and_freshen_file(shared_index, 1) && warn)
warning("could not freshen shared index '%s'", shared_index);
- free(shared_index);
}
-int read_index_from(struct index_state *istate, const char *path)
+int read_index_from(struct index_state *istate, const char *path,
+ const char *gitdir)
{
+ uint64_t start = getnanotime();
struct split_index *split_index;
int ret;
char *base_sha1_hex;
- const char *base_path;
+ char *base_path;
/* istate->initialized covers both .git/index and .git/sharedindex.xxx */
if (istate->initialized)
return istate->cache_nr;
ret = do_read_index(istate, path, 0);
+ trace_performance_since(start, "read cache %s", path);
split_index = istate->split_index;
if (!split_index || is_null_sha1(split_index->base_sha1)) {
split_index->base = xcalloc(1, sizeof(*split_index->base));
base_sha1_hex = sha1_to_hex(split_index->base_sha1);
- base_path = git_path("sharedindex.%s", base_sha1_hex);
+ base_path = xstrfmt("%s/sharedindex.%s", gitdir, base_sha1_hex);
ret = do_read_index(split_index->base, base_path, 1);
if (hashcmp(split_index->base_sha1, split_index->base->sha1))
die("broken index, expect %s in %s, got %s",
base_sha1_hex, base_path,
sha1_to_hex(split_index->base->sha1));
- freshen_shared_index(base_sha1_hex, 0);
+ freshen_shared_index(base_path, 0);
merge_base_index(istate);
post_read_index_from(istate);
+ trace_performance_since(start, "read cache %s", base_path);
+ free(base_path);
return ret;
}
static unsigned char write_buffer[WRITE_BUFFER_SIZE];
static unsigned long write_buffer_len;
-static int ce_write_flush(git_SHA_CTX *context, int fd)
+static int ce_write_flush(git_hash_ctx *context, int fd)
{
unsigned int buffered = write_buffer_len;
if (buffered) {
- git_SHA1_Update(context, write_buffer, buffered);
+ the_hash_algo->update_fn(context, write_buffer, buffered);
if (write_in_full(fd, write_buffer, buffered) < 0)
return -1;
write_buffer_len = 0;
return 0;
}
-static int ce_write(git_SHA_CTX *context, int fd, void *data, unsigned int len)
+static int ce_write(git_hash_ctx *context, int fd, void *data, unsigned int len)
{
while (len) {
unsigned int buffered = write_buffer_len;
return 0;
}
-static int write_index_ext_header(git_SHA_CTX *context, int fd,
+static int write_index_ext_header(git_hash_ctx *context, int fd,
unsigned int ext, unsigned int sz)
{
ext = htonl(ext);
(ce_write(context, fd, &sz, 4) < 0)) ? -1 : 0;
}
-static int ce_flush(git_SHA_CTX *context, int fd, unsigned char *sha1)
+static int ce_flush(git_hash_ctx *context, int fd, unsigned char *hash)
{
unsigned int left = write_buffer_len;
if (left) {
write_buffer_len = 0;
- git_SHA1_Update(context, write_buffer, left);
+ the_hash_algo->update_fn(context, write_buffer, left);
}
- /* Flush first if not enough space for SHA1 signature */
- if (left + 20 > WRITE_BUFFER_SIZE) {
+ /* Flush first if not enough space for hash signature */
+ if (left + the_hash_algo->rawsz > WRITE_BUFFER_SIZE) {
if (write_in_full(fd, write_buffer, left) < 0)
return -1;
left = 0;
}
- /* Append the SHA1 signature at the end */
- git_SHA1_Final(write_buffer + left, context);
- hashcpy(sha1, write_buffer + left);
- left += 20;
+ /* Append the hash signature at the end */
+ the_hash_algo->final_fn(write_buffer + left, context);
+ hashcpy(hash, write_buffer + left);
+ left += the_hash_algo->rawsz;
return (write_in_full(fd, write_buffer, left) < 0) ? -1 : 0;
}
}
}
-static int ce_write_entry(git_SHA_CTX *c, int fd, struct cache_entry *ce,
+static int ce_write_entry(git_hash_ctx *c, int fd, struct cache_entry *ce,
struct strbuf *previous_name, struct ondisk_cache_entry *ondisk)
{
int size;
int fd;
ssize_t n;
struct stat st;
- unsigned char sha1[20];
+ unsigned char hash[GIT_MAX_RAWSZ];
if (!istate->initialized)
return 0;
if (fstat(fd, &st))
goto out;
- if (st.st_size < sizeof(struct cache_header) + 20)
+ if (st.st_size < sizeof(struct cache_header) + the_hash_algo->rawsz)
goto out;
- n = pread_in_full(fd, sha1, 20, st.st_size - 20);
- if (n != 20)
+ n = pread_in_full(fd, hash, the_hash_algo->rawsz, st.st_size - the_hash_algo->rawsz);
+ if (n != the_hash_algo->rawsz)
goto out;
- if (hashcmp(istate->sha1, sha1))
+ if (hashcmp(istate->sha1, hash))
goto out;
close(fd);
static int do_write_index(struct index_state *istate, struct tempfile *tempfile,
int strip_extensions)
{
+ uint64_t start = getnanotime();
int newfd = tempfile->fd;
- git_SHA_CTX c;
+ git_hash_ctx c;
struct cache_header hdr;
int i, err = 0, removed, extended, hdr_version;
struct cache_entry **cache = istate->cache;
struct stat st;
struct ondisk_cache_entry_extended ondisk;
struct strbuf previous_name_buf = STRBUF_INIT, *previous_name;
- int drop_cache_tree = 0;
+ int drop_cache_tree = istate->drop_cache_tree;
for (i = removed = extended = 0; i < entries; i++) {
if (cache[i]->ce_flags & CE_REMOVE)
hdr.hdr_version = htonl(hdr_version);
hdr.hdr_entries = htonl(entries - removed);
- git_SHA1_Init(&c);
+ the_hash_algo->init_fn(&c);
if (ce_write(&c, newfd, &hdr, sizeof(hdr)) < 0)
return -1;
return -1;
istate->timestamp.sec = (unsigned int)st.st_mtime;
istate->timestamp.nsec = ST_MTIME_NSEC(st);
+ trace_performance_since(start, "write index, changed mask = %x", istate->cache_changed);
return 0;
}
}
static int write_shared_index(struct index_state *istate,
- struct lock_file *lock, unsigned flags)
+ struct tempfile **temp)
{
- struct tempfile *temp;
struct split_index *si = istate->split_index;
int ret;
- temp = mks_tempfile(git_path("sharedindex_XXXXXX"));
- if (!temp) {
- hashclr(si->base_sha1);
- return do_write_locked_index(istate, lock, flags);
- }
move_cache_to_base_index(istate);
- ret = do_write_index(si->base, temp, 1);
- if (ret) {
- delete_tempfile(&temp);
+ ret = do_write_index(si->base, *temp, 1);
+ if (ret)
return ret;
- }
- ret = adjust_shared_perm(get_tempfile_path(temp));
+ ret = adjust_shared_perm(get_tempfile_path(*temp));
if (ret) {
- int save_errno = errno;
- error("cannot fix permission bits on %s", get_tempfile_path(temp));
- delete_tempfile(&temp);
- errno = save_errno;
+ error("cannot fix permission bits on %s", get_tempfile_path(*temp));
return ret;
}
- ret = rename_tempfile(&temp,
+ ret = rename_tempfile(temp,
git_path("sharedindex.%s", sha1_to_hex(si->base->sha1)));
if (!ret) {
hashcpy(si->base_sha1, si->base->sha1);
new_shared_index = istate->cache_changed & SPLIT_INDEX_ORDERED;
if (new_shared_index) {
- ret = write_shared_index(istate, lock, flags);
+ struct tempfile *temp;
+ int saved_errno;
+
+ temp = mks_tempfile(git_path("sharedindex_XXXXXX"));
+ if (!temp) {
+ hashclr(si->base_sha1);
+ ret = do_write_locked_index(istate, lock, flags);
+ goto out;
+ }
+ ret = write_shared_index(istate, &temp);
+
+ saved_errno = errno;
+ if (is_tempfile_active(temp))
+ delete_tempfile(&temp);
+ errno = saved_errno;
+
if (ret)
goto out;
}
ret = write_split_index(istate, lock, flags);
/* Freshen the shared index only if the split-index was written */
- if (!ret && !new_shared_index)
- freshen_shared_index(sha1_to_hex(si->base_sha1), 1);
+ if (!ret && !new_shared_index) {
+ const char *shared_index = git_path("sharedindex.%s",
+ sha1_to_hex(si->base_sha1));
+ freshen_shared_index(shared_index, 1);
+ }
out:
if (flags & COMMIT_LOCK)
"refs/tags/*"
};
+/* See TAG_REFSPEC for the string version */
const struct refspec *tag_refspec = &s_tag_refspec;
struct counted_string {
remote->fetch_refspec[remote->fetch_refspec_nr++] = ref;
}
+void add_prune_tags_to_fetch_refspec(struct remote *remote)
+{
+ int nr = remote->fetch_refspec_nr;
+ int bufsize = nr + 1;
+ int size = sizeof(struct refspec);
+
+ remote->fetch = xrealloc(remote->fetch, size * bufsize);
+ memcpy(&remote->fetch[nr], tag_refspec, size);
+ add_fetch_refspec(remote, xstrdup(TAG_REFSPEC));
+}
+
static void add_url(struct remote *remote, const char *url)
{
ALLOC_GROW(remote->url, remote->url_nr + 1, remote->url_alloc);
ret = xcalloc(1, sizeof(struct remote));
ret->prune = -1; /* unspecified */
+ ret->prune_tags = -1; /* unspecified */
ALLOC_GROW(remotes, remotes_nr + 1, remotes_alloc);
remotes[remotes_nr++] = ret;
ret->name = xstrndup(name, len);
remote->skip_default_update = git_config_bool(key, value);
else if (!strcmp(subkey, "prune"))
remote->prune = git_config_bool(key, value);
+ else if (!strcmp(subkey, "prunetags"))
+ remote->prune_tags = git_config_bool(key, value);
else if (!strcmp(subkey, "url")) {
const char *v;
if (git_config_string(&v, key, value))
int ref_newer(const struct object_id *new_oid, const struct object_id *old_oid)
{
struct object *o;
- struct commit *old, *new;
+ struct commit *old_commit, *new_commit;
struct commit_list *list, *used;
int found = 0;
/*
- * Both new and old must be commit-ish and new is descendant of
- * old. Otherwise we require --force.
+ * Both new_commit and old_commit must be commit-ish and new_commit is descendant of
+ * old_commit. Otherwise we require --force.
*/
o = deref_tag(parse_object(old_oid), NULL, 0);
if (!o || o->type != OBJ_COMMIT)
return 0;
- old = (struct commit *) o;
+ old_commit = (struct commit *) o;
o = deref_tag(parse_object(new_oid), NULL, 0);
if (!o || o->type != OBJ_COMMIT)
return 0;
- new = (struct commit *) o;
+ new_commit = (struct commit *) o;
- if (parse_commit(new) < 0)
+ if (parse_commit(new_commit) < 0)
return 0;
used = list = NULL;
- commit_list_insert(new, &list);
+ commit_list_insert(new_commit, &list);
while (list) {
- new = pop_most_recent_commit(&list, TMP_MARK);
- commit_list_insert(new, &used);
- if (new == old) {
+ new_commit = pop_most_recent_commit(&list, TMP_MARK);
+ commit_list_insert(new_commit, &used);
+ if (new_commit == old_commit) {
found = 1;
break;
}
#include "cache.h"
#include "config.h"
#include "lockfile.h"
-#include "sequencer.h"
#include "dir.h"
#include "object.h"
#include "commit.h"
+#include "sequencer.h"
#include "tag.h"
#include "run-command.h"
#include "exec_cmd.h"
#include "log-tree.h"
#include "wt-status.h"
#include "hashmap.h"
+#include "notes-utils.h"
+#include "sigchain.h"
#define GIT_REFLOG_ACTION "GIT_REFLOG_ACTION"
const char sign_off_header[] = "Signed-off-by: ";
static const char cherry_picked_prefix[] = "(cherry picked from commit ";
+GIT_PATH_FUNC(git_path_commit_editmsg, "COMMIT_EDITMSG")
+
GIT_PATH_FUNC(git_path_seq_dir, "sequencer")
static GIT_PATH_FUNC(git_path_todo_file, "sequencer/todo")
static GIT_PATH_FUNC(rebase_path_strategy_opts, "rebase-merge/strategy_opts")
static GIT_PATH_FUNC(rebase_path_allow_rerere_autoupdate, "rebase-merge/allow_rerere_autoupdate")
+static int git_sequencer_config(const char *k, const char *v, void *cb)
+{
+ struct replay_opts *opts = cb;
+ int status;
+
+ if (!strcmp(k, "commit.cleanup")) {
+ const char *s;
+
+ status = git_config_string(&s, k, v);
+ if (status)
+ return status;
+
+ if (!strcmp(s, "verbatim"))
+ opts->default_msg_cleanup = COMMIT_MSG_CLEANUP_NONE;
+ else if (!strcmp(s, "whitespace"))
+ opts->default_msg_cleanup = COMMIT_MSG_CLEANUP_SPACE;
+ else if (!strcmp(s, "strip"))
+ opts->default_msg_cleanup = COMMIT_MSG_CLEANUP_ALL;
+ else if (!strcmp(s, "scissors"))
+ opts->default_msg_cleanup = COMMIT_MSG_CLEANUP_SPACE;
+ else
+ warning(_("invalid commit message cleanup mode '%s'"),
+ s);
+
+ return status;
+ }
+
+ if (!strcmp(k, "commit.gpgsign")) {
+ opts->gpg_sign = git_config_bool(k, v) ? xstrdup("") : NULL;
+ return 0;
+ }
+
+ status = git_gpg_config(k, v, NULL);
+ if (status)
+ return status;
+
+ return git_diff_basic_config(k, v, NULL);
+}
+
+void sequencer_init_config(struct replay_opts *opts)
+{
+ opts->default_msg_cleanup = COMMIT_MSG_CLEANUP_NONE;
+ git_config(git_sequencer_config, opts);
+}
+
static inline int is_rebase_i(const struct replay_opts *opts)
{
return opts->action == REPLAY_INTERACTIVE_REBASE;
_(action_name(opts)));
rollback_lock_file(&index_lock);
- if (opts->signoff)
- append_signoff(msgbuf, 0, 0);
-
if (!clean)
append_conflicts_hint(msgbuf);
return 0;
}
+static char *get_author(const char *message)
+{
+ size_t len;
+ const char *a;
+
+ a = find_commit_header(message, "author", &len);
+ if (a)
+ return xmemdupz(a, len);
+
+ return NULL;
+}
+
static const char staged_changes_advice[] =
N_("you have staged changes in your working tree\n"
"If these changes are meant to be squashed into the previous commit, run:\n"
argv_array_push(&cmd.args, "--amend");
if (opts->gpg_sign)
argv_array_pushf(&cmd.args, "-S%s", opts->gpg_sign);
- if (opts->signoff)
- argv_array_push(&cmd.args, "-s");
if (defmsg)
argv_array_pushl(&cmd.args, "-F", defmsg, NULL);
if ((flags & CLEANUP_MSG))
return run_command(&cmd);
}
+static int rest_is_empty(const struct strbuf *sb, int start)
+{
+ int i, eol;
+ const char *nl;
+
+ /* Check if the rest is just whitespace and Signed-off-by's. */
+ for (i = start; i < sb->len; i++) {
+ nl = memchr(sb->buf + i, '\n', sb->len - i);
+ if (nl)
+ eol = nl - sb->buf;
+ else
+ eol = sb->len;
+
+ if (strlen(sign_off_header) <= eol - i &&
+ starts_with(sb->buf + i, sign_off_header)) {
+ i = eol;
+ continue;
+ }
+ while (i < eol)
+ if (!isspace(sb->buf[i++]))
+ return 0;
+ }
+
+ return 1;
+}
+
+/*
+ * Find out if the message in the strbuf contains only whitespace and
+ * Signed-off-by lines.
+ */
+int message_is_empty(const struct strbuf *sb,
+ enum commit_msg_cleanup_mode cleanup_mode)
+{
+ if (cleanup_mode == COMMIT_MSG_CLEANUP_NONE && sb->len)
+ return 0;
+ return rest_is_empty(sb, 0);
+}
+
+/*
+ * See if the user edited the message in the editor or left what
+ * was in the template intact
+ */
+int template_untouched(const struct strbuf *sb, const char *template_file,
+ enum commit_msg_cleanup_mode cleanup_mode)
+{
+ struct strbuf tmpl = STRBUF_INIT;
+ const char *start;
+
+ if (cleanup_mode == COMMIT_MSG_CLEANUP_NONE && sb->len)
+ return 0;
+
+ if (!template_file || strbuf_read_file(&tmpl, template_file, 0) <= 0)
+ return 0;
+
+ strbuf_stripspace(&tmpl, cleanup_mode == COMMIT_MSG_CLEANUP_ALL);
+ if (!skip_prefix(sb->buf, tmpl.buf, &start))
+ start = sb->buf;
+ strbuf_release(&tmpl);
+ return rest_is_empty(sb, start - sb->buf);
+}
+
+int update_head_with_reflog(const struct commit *old_head,
+ const struct object_id *new_head,
+ const char *action, const struct strbuf *msg,
+ struct strbuf *err)
+{
+ struct ref_transaction *transaction;
+ struct strbuf sb = STRBUF_INIT;
+ const char *nl;
+ int ret = 0;
+
+ if (action) {
+ strbuf_addstr(&sb, action);
+ strbuf_addstr(&sb, ": ");
+ }
+
+ nl = strchr(msg->buf, '\n');
+ if (nl) {
+ strbuf_add(&sb, msg->buf, nl + 1 - msg->buf);
+ } else {
+ strbuf_addbuf(&sb, msg);
+ strbuf_addch(&sb, '\n');
+ }
+
+ transaction = ref_transaction_begin(err);
+ if (!transaction ||
+ ref_transaction_update(transaction, "HEAD", new_head,
+ old_head ? &old_head->object.oid : &null_oid,
+ 0, sb.buf, err) ||
+ ref_transaction_commit(transaction, err)) {
+ ret = -1;
+ }
+ ref_transaction_free(transaction);
+ strbuf_release(&sb);
+
+ return ret;
+}
+
+static int run_rewrite_hook(const struct object_id *oldoid,
+ const struct object_id *newoid)
+{
+ struct child_process proc = CHILD_PROCESS_INIT;
+ const char *argv[3];
+ int code;
+ struct strbuf sb = STRBUF_INIT;
+
+ argv[0] = find_hook("post-rewrite");
+ if (!argv[0])
+ return 0;
+
+ argv[1] = "amend";
+ argv[2] = NULL;
+
+ proc.argv = argv;
+ proc.in = -1;
+ proc.stdout_to_stderr = 1;
+
+ code = start_command(&proc);
+ if (code)
+ return code;
+ strbuf_addf(&sb, "%s %s\n", oid_to_hex(oldoid), oid_to_hex(newoid));
+ sigchain_push(SIGPIPE, SIG_IGN);
+ write_in_full(proc.in, sb.buf, sb.len);
+ close(proc.in);
+ strbuf_release(&sb);
+ sigchain_pop(SIGPIPE);
+ return finish_command(&proc);
+}
+
+void commit_post_rewrite(const struct commit *old_head,
+ const struct object_id *new_head)
+{
+ struct notes_rewrite_cfg *cfg;
+
+ cfg = init_copy_notes_for_rewrite("amend");
+ if (cfg) {
+ /* we are amending, so old_head is not NULL */
+ copy_note_for_rewrite(cfg, &old_head->object.oid, new_head);
+ finish_copy_notes_for_rewrite(cfg, "Notes added by 'git commit --amend'");
+ }
+ run_rewrite_hook(&old_head->object.oid, new_head);
+}
+
+static int run_prepare_commit_msg_hook(struct strbuf *msg, const char *commit)
+{
+ struct argv_array hook_env = ARGV_ARRAY_INIT;
+ int ret;
+ const char *name;
+
+ name = git_path_commit_editmsg();
+ if (write_message(msg->buf, msg->len, name, 0))
+ return -1;
+
+ argv_array_pushf(&hook_env, "GIT_INDEX_FILE=%s", get_index_file());
+ argv_array_push(&hook_env, "GIT_EDITOR=:");
+ if (commit)
+ ret = run_hook_le(hook_env.argv, "prepare-commit-msg", name,
+ "commit", commit, NULL);
+ else
+ ret = run_hook_le(hook_env.argv, "prepare-commit-msg", name,
+ "message", NULL);
+ if (ret)
+ ret = error(_("'prepare-commit-msg' hook failed"));
+ argv_array_clear(&hook_env);
+
+ return ret;
+}
+
+static const char implicit_ident_advice_noconfig[] =
+N_("Your name and email address were configured automatically based\n"
+"on your username and hostname. Please check that they are accurate.\n"
+"You can suppress this message by setting them explicitly. Run the\n"
+"following command and follow the instructions in your editor to edit\n"
+"your configuration file:\n"
+"\n"
+" git config --global --edit\n"
+"\n"
+"After doing this, you may fix the identity used for this commit with:\n"
+"\n"
+" git commit --amend --reset-author\n");
+
+static const char implicit_ident_advice_config[] =
+N_("Your name and email address were configured automatically based\n"
+"on your username and hostname. Please check that they are accurate.\n"
+"You can suppress this message by setting them explicitly:\n"
+"\n"
+" git config --global user.name \"Your Name\"\n"
+" git config --global user.email you@example.com\n"
+"\n"
+"After doing this, you may fix the identity used for this commit with:\n"
+"\n"
+" git commit --amend --reset-author\n");
+
+static const char *implicit_ident_advice(void)
+{
+ char *user_config = expand_user_path("~/.gitconfig", 0);
+ char *xdg_config = xdg_config_home("config");
+ int config_exists = file_exists(user_config) || file_exists(xdg_config);
+
+ free(user_config);
+ free(xdg_config);
+
+ if (config_exists)
+ return _(implicit_ident_advice_config);
+ else
+ return _(implicit_ident_advice_noconfig);
+
+}
+
+void print_commit_summary(const char *prefix, const struct object_id *oid,
+ unsigned int flags)
+{
+ struct rev_info rev;
+ struct commit *commit;
+ struct strbuf format = STRBUF_INIT;
+ const char *head;
+ struct pretty_print_context pctx = {0};
+ struct strbuf author_ident = STRBUF_INIT;
+ struct strbuf committer_ident = STRBUF_INIT;
+
+ commit = lookup_commit(oid);
+ if (!commit)
+ die(_("couldn't look up newly created commit"));
+ if (parse_commit(commit))
+ die(_("could not parse newly created commit"));
+
+ strbuf_addstr(&format, "format:%h] %s");
+
+ format_commit_message(commit, "%an <%ae>", &author_ident, &pctx);
+ format_commit_message(commit, "%cn <%ce>", &committer_ident, &pctx);
+ if (strbuf_cmp(&author_ident, &committer_ident)) {
+ strbuf_addstr(&format, "\n Author: ");
+ strbuf_addbuf_percentquote(&format, &author_ident);
+ }
+ if (flags & SUMMARY_SHOW_AUTHOR_DATE) {
+ struct strbuf date = STRBUF_INIT;
+
+ format_commit_message(commit, "%ad", &date, &pctx);
+ strbuf_addstr(&format, "\n Date: ");
+ strbuf_addbuf_percentquote(&format, &date);
+ strbuf_release(&date);
+ }
+ if (!committer_ident_sufficiently_given()) {
+ strbuf_addstr(&format, "\n Committer: ");
+ strbuf_addbuf_percentquote(&format, &committer_ident);
+ if (advice_implicit_identity) {
+ strbuf_addch(&format, '\n');
+ strbuf_addstr(&format, implicit_ident_advice());
+ }
+ }
+ strbuf_release(&author_ident);
+ strbuf_release(&committer_ident);
+
+ init_revisions(&rev, prefix);
+ setup_revisions(0, NULL, &rev, NULL);
+
+ rev.diff = 1;
+ rev.diffopt.output_format =
+ DIFF_FORMAT_SHORTSTAT | DIFF_FORMAT_SUMMARY;
+
+ rev.verbose_header = 1;
+ rev.show_root_diff = 1;
+ get_commit_format(format.buf, &rev);
+ rev.always_show_header = 0;
+ rev.diffopt.detect_rename = DIFF_DETECT_RENAME;
+ rev.diffopt.break_opt = 0;
+ diff_setup_done(&rev.diffopt);
+
+ head = resolve_ref_unsafe("HEAD", 0, NULL, NULL);
+ if (!head)
+ die_errno(_("unable to resolve HEAD after creating commit"));
+ if (!strcmp(head, "HEAD"))
+ head = _("detached HEAD");
+ else
+ skip_prefix(head, "refs/heads/", &head);
+ printf("[%s%s ", head, (flags & SUMMARY_INITIAL_COMMIT) ?
+ _(" (root-commit)") : "");
+
+ if (!log_tree_commit(&rev, commit)) {
+ rev.always_show_header = 1;
+ rev.use_terminator = 1;
+ log_tree_commit(&rev, commit);
+ }
+
+ strbuf_release(&format);
+}
+
+static int parse_head(struct commit **head)
+{
+ struct commit *current_head;
+ struct object_id oid;
+
+ if (get_oid("HEAD", &oid)) {
+ current_head = NULL;
+ } else {
+ current_head = lookup_commit_reference(&oid);
+ if (!current_head)
+ return error(_("could not parse HEAD"));
+ if (oidcmp(&oid, ¤t_head->object.oid)) {
+ warning(_("HEAD %s is not a commit!"),
+ oid_to_hex(&oid));
+ }
+ if (parse_commit(current_head))
+ return error(_("could not parse HEAD commit"));
+ }
+ *head = current_head;
+
+ return 0;
+}
+
+/*
+ * Try to commit without forking 'git commit'. In some cases we need
+ * to run 'git commit' to display an error message
+ *
+ * Returns:
+ * -1 - error unable to commit
+ * 0 - success
+ * 1 - run 'git commit'
+ */
+static int try_to_commit(struct strbuf *msg, const char *author,
+ struct replay_opts *opts, unsigned int flags,
+ struct object_id *oid)
+{
+ struct object_id tree;
+ struct commit *current_head;
+ struct commit_list *parents = NULL;
+ struct commit_extra_header *extra = NULL;
+ struct strbuf err = STRBUF_INIT;
+ struct strbuf commit_msg = STRBUF_INIT;
+ char *amend_author = NULL;
+ const char *hook_commit = NULL;
+ enum commit_msg_cleanup_mode cleanup;
+ int res = 0;
+
+ if (parse_head(¤t_head))
+ return -1;
+
+ if (flags & AMEND_MSG) {
+ const char *exclude_gpgsig[] = { "gpgsig", NULL };
+ const char *out_enc = get_commit_output_encoding();
+ const char *message = logmsg_reencode(current_head, NULL,
+ out_enc);
+
+ if (!msg) {
+ const char *orig_message = NULL;
+
+ find_commit_subject(message, &orig_message);
+ msg = &commit_msg;
+ strbuf_addstr(msg, orig_message);
+ hook_commit = "HEAD";
+ }
+ author = amend_author = get_author(message);
+ unuse_commit_buffer(current_head, message);
+ if (!author) {
+ res = error(_("unable to parse commit author"));
+ goto out;
+ }
+ parents = copy_commit_list(current_head->parents);
+ extra = read_commit_extra_headers(current_head, exclude_gpgsig);
+ } else if (current_head) {
+ commit_list_insert(current_head, &parents);
+ }
+
+ if (write_cache_as_tree(tree.hash, 0, NULL)) {
+ res = error(_("git write-tree failed to write a tree"));
+ goto out;
+ }
+
+ if (!(flags & ALLOW_EMPTY) && !oidcmp(current_head ?
+ ¤t_head->tree->object.oid :
+ &empty_tree_oid, &tree)) {
+ res = 1; /* run 'git commit' to display error message */
+ goto out;
+ }
+
+ if (find_hook("prepare-commit-msg")) {
+ res = run_prepare_commit_msg_hook(msg, hook_commit);
+ if (res)
+ goto out;
+ if (strbuf_read_file(&commit_msg, git_path_commit_editmsg(),
+ 2048) < 0) {
+ res = error_errno(_("unable to read commit message "
+ "from '%s'"),
+ git_path_commit_editmsg());
+ goto out;
+ }
+ msg = &commit_msg;
+ }
+
+ cleanup = (flags & CLEANUP_MSG) ? COMMIT_MSG_CLEANUP_ALL :
+ opts->default_msg_cleanup;
+
+ if (cleanup != COMMIT_MSG_CLEANUP_NONE)
+ strbuf_stripspace(msg, cleanup == COMMIT_MSG_CLEANUP_ALL);
+ if (!opts->allow_empty_message && message_is_empty(msg, cleanup)) {
+ res = 1; /* run 'git commit' to display error message */
+ goto out;
+ }
+
+ if (commit_tree_extended(msg->buf, msg->len, &tree, parents,
+ oid, author, opts->gpg_sign, extra)) {
+ res = error(_("failed to write commit object"));
+ goto out;
+ }
+
+ if (update_head_with_reflog(current_head, oid,
+ getenv("GIT_REFLOG_ACTION"), msg, &err)) {
+ res = error("%s", err.buf);
+ goto out;
+ }
+
+ if (flags & AMEND_MSG)
+ commit_post_rewrite(current_head, oid);
+
+out:
+ free_commit_extra_headers(extra);
+ strbuf_release(&err);
+ strbuf_release(&commit_msg);
+ free(amend_author);
+
+ return res;
+}
+
+static int do_commit(const char *msg_file, const char *author,
+ struct replay_opts *opts, unsigned int flags)
+{
+ int res = 1;
+
+ if (!(flags & EDIT_MSG) && !(flags & VERIFY_MSG)) {
+ struct object_id oid;
+ struct strbuf sb = STRBUF_INIT;
+
+ if (msg_file && strbuf_read_file(&sb, msg_file, 2048) < 0)
+ return error_errno(_("unable to read commit message "
+ "from '%s'"),
+ msg_file);
+
+ res = try_to_commit(msg_file ? &sb : NULL, author, opts, flags,
+ &oid);
+ strbuf_release(&sb);
+ if (!res) {
+ unlink(git_path_cherry_pick_head());
+ unlink(git_path_merge_msg());
+ if (!is_rebase_i(opts))
+ print_commit_summary(NULL, &oid,
+ SUMMARY_SHOW_AUTHOR_DATE);
+ return res;
+ }
+ }
+ if (res == 1)
+ return run_git_commit(msg_file, opts, flags);
+
+ return res;
+}
+
static int is_original_commit_empty(struct commit *commit)
{
const struct object_id *ptree_oid;
struct object_id head;
struct commit *base, *next, *parent;
const char *base_label, *next_label;
+ char *author = NULL;
struct commit_message msg = { NULL, NULL, NULL, NULL };
struct strbuf msgbuf = STRBUF_INIT;
int res, unborn = 0, allow;
strbuf_addstr(&msgbuf, oid_to_hex(&commit->object.oid));
strbuf_addstr(&msgbuf, ")\n");
}
+ if (!is_fixup(command))
+ author = get_author(msg.message);
}
if (command == TODO_REWORD)
}
}
+ if (opts->signoff)
+ append_signoff(&msgbuf, 0, 0);
+
if (is_rebase_i(opts) && write_author_script(msg.message) < 0)
res = -1;
else if (!opts->strategy || !strcmp(opts->strategy, "recursive") || command == TODO_REVERT) {
goto leave;
} else if (allow)
flags |= ALLOW_EMPTY;
- if (!opts->no_commit)
+ if (!opts->no_commit) {
fast_forward_edit:
- res = run_git_commit(msg_file, opts, flags);
+ if (author || command == TODO_REVERT || (flags & AMEND_MSG))
+ res = do_commit(msg_file, author, opts, flags);
+ else
+ res = error(_("unable to parse commit author"));
+ }
if (!res && final_fixup) {
unlink(rebase_path_fixup_msg());
leave:
free_message(commit, &msg);
+ free(author);
update_abort_safety_file();
return res;
return count;
}
+static ssize_t strbuf_read_file_or_whine(struct strbuf *sb, const char *path)
+{
+ int fd;
+ ssize_t len;
+
+ fd = open(path, O_RDONLY);
+ if (fd < 0)
+ return error_errno(_("could not open '%s'"), path);
+ len = strbuf_read(sb, fd, 0);
+ close(fd);
+ if (len < 0)
+ return error(_("could not read '%s'."), path);
+ return len;
+}
+
static int read_populate_todo(struct todo_list *todo_list,
struct replay_opts *opts)
{
struct stat st;
const char *todo_file = get_todo_path(opts);
- int fd, res;
+ int res;
strbuf_reset(&todo_list->buf);
- fd = open(todo_file, O_RDONLY);
- if (fd < 0)
- return error_errno(_("could not open '%s'"), todo_file);
- if (strbuf_read(&todo_list->buf, fd, 0) < 0) {
- close(fd);
- return error(_("could not read '%s'."), todo_file);
- }
- close(fd);
+ if (strbuf_read_file_or_whine(&todo_list->buf, todo_file) < 0)
+ return -1;
res = stat(todo_file, &st);
if (res)
p = short_commit_name(commit);
if (write_message(p, strlen(p), rebase_path_stopped_sha(), 1) < 0)
return -1;
+ if (update_ref("rebase", "REBASE_HEAD", &commit->object.oid,
+ NULL, REF_NO_DEREF, UPDATE_REFS_MSG_ON_ERR))
+ res |= error(_("could not update %s"), "REBASE_HEAD");
strbuf_addf(&buf, "%s/patch", get_dir(opts));
memset(&log_tree_opt, 0, sizeof(log_tree_opt));
unlink(rebase_path_author_script());
unlink(rebase_path_stopped_sha());
unlink(rebase_path_amend());
+ delete_ref(NULL, "REBASE_HEAD", NULL, REF_NO_DEREF);
}
if (item->command <= TODO_SQUASH) {
if (is_rebase_i(opts))
if (!lookup_commit_reference_gently(&oid, 1)) {
enum object_type type = sha1_object_info(oid.hash, NULL);
return error(_("%s: can't cherry-pick a %s"),
- name, typename(type));
+ name, type_name(type));
}
} else
return error(_("%s: bad revision"), name);
struct strbuf todo_file = STRBUF_INIT;
struct todo_list todo_list = TODO_LIST_INIT;
struct strbuf missing = STRBUF_INIT;
- int advise_to_edit_todo = 0, res = 0, fd, i;
+ int advise_to_edit_todo = 0, res = 0, i;
strbuf_addstr(&todo_file, rebase_path_todo());
- fd = open(todo_file.buf, O_RDONLY);
- if (fd < 0) {
- res = error_errno(_("could not open '%s'"), todo_file.buf);
- goto leave_check;
- }
- if (strbuf_read(&todo_list.buf, fd, 0) < 0) {
- close(fd);
- res = error(_("could not read '%s'."), todo_file.buf);
+ if (strbuf_read_file_or_whine(&todo_list.buf, todo_file.buf) < 0) {
+ res = -1;
goto leave_check;
}
- close(fd);
advise_to_edit_todo = res =
parse_insn_buffer(todo_list.buf.buf, &todo_list);
todo_list_release(&todo_list);
strbuf_addstr(&todo_file, ".backup");
- fd = open(todo_file.buf, O_RDONLY);
- if (fd < 0) {
- res = error_errno(_("could not open '%s'"), todo_file.buf);
- goto leave_check;
- }
- if (strbuf_read(&todo_list.buf, fd, 0) < 0) {
- close(fd);
- res = error(_("could not read '%s'."), todo_file.buf);
+ if (strbuf_read_file_or_whine(&todo_list.buf, todo_file.buf) < 0) {
+ res = -1;
goto leave_check;
}
- close(fd);
strbuf_release(&todo_file);
res = !!parse_insn_buffer(todo_list.buf.buf, &todo_list);
}
strbuf_release(&buf);
- fd = open(todo_file, O_RDONLY);
- if (fd < 0) {
- return error_errno(_("could not open '%s'"), todo_file);
- }
- if (strbuf_read(&todo_list.buf, fd, 0) < 0) {
- close(fd);
- return error(_("could not read '%s'."), todo_file);
- }
- close(fd);
+ if (strbuf_read_file_or_whine(&todo_list.buf, todo_file) < 0)
+ return -1;
if (parse_insn_buffer(todo_list.buf.buf, &todo_list) < 0) {
todo_list_release(&todo_list);
return -1;
const char *todo_file = rebase_path_todo();
struct todo_list todo_list = TODO_LIST_INIT;
struct hashmap subject2item;
- int res = 0, rearranged = 0, *next, *tail, fd, i;
+ int res = 0, rearranged = 0, *next, *tail, i;
char **subjects;
- fd = open(todo_file, O_RDONLY);
- if (fd < 0)
- return error_errno(_("could not open '%s'"), todo_file);
- if (strbuf_read(&todo_list.buf, fd, 0) < 0) {
- close(fd);
- return error(_("could not read '%s'."), todo_file);
- }
- close(fd);
+ if (strbuf_read_file_or_whine(&todo_list.buf, todo_file) < 0)
+ return -1;
if (parse_insn_buffer(todo_list.buf.buf, &todo_list) < 0) {
todo_list_release(&todo_list);
return -1;
#include "bulk-checkin.h"
#include "streaming.h"
#include "dir.h"
-#include "mru.h"
#include "list.h"
#include "mergesort.h"
#include "quote.h"
#include "packfile.h"
+#include "fetch-object.h"
const unsigned char null_sha1[GIT_MAX_RAWSZ];
const struct object_id null_oid;
EMPTY_BLOB_SHA1_BIN_LITERAL
};
-static void git_hash_sha1_init(void *ctx)
+static void git_hash_sha1_init(git_hash_ctx *ctx)
{
- git_SHA1_Init((git_SHA_CTX *)ctx);
+ git_SHA1_Init(&ctx->sha1);
}
-static void git_hash_sha1_update(void *ctx, const void *data, size_t len)
+static void git_hash_sha1_update(git_hash_ctx *ctx, const void *data, size_t len)
{
- git_SHA1_Update((git_SHA_CTX *)ctx, data, len);
+ git_SHA1_Update(&ctx->sha1, data, len);
}
-static void git_hash_sha1_final(unsigned char *hash, void *ctx)
+static void git_hash_sha1_final(unsigned char *hash, git_hash_ctx *ctx)
{
- git_SHA1_Final(hash, (git_SHA_CTX *)ctx);
+ git_SHA1_Final(hash, &ctx->sha1);
}
-static void git_hash_unknown_init(void *ctx)
+static void git_hash_unknown_init(git_hash_ctx *ctx)
{
die("trying to init unknown hash");
}
-static void git_hash_unknown_update(void *ctx, const void *data, size_t len)
+static void git_hash_unknown_update(git_hash_ctx *ctx, const void *data, size_t len)
{
die("trying to update unknown hash");
}
-static void git_hash_unknown_final(unsigned char *hash, void *ctx)
+static void git_hash_unknown_final(unsigned char *hash, git_hash_ctx *ctx)
{
die("trying to finalize unknown hash");
}
0x00000000,
0,
0,
- 0,
git_hash_unknown_init,
git_hash_unknown_update,
git_hash_unknown_final,
"sha-1",
/* "sha1", big-endian */
0x73686131,
- sizeof(git_SHA_CTX),
GIT_SHA1_RAWSZ,
GIT_SHA1_HEXSZ,
git_hash_sha1_init,
}
-static enum safe_crlf get_safe_crlf(unsigned flags)
+static int get_conv_flags(unsigned flags)
{
if (flags & HASH_RENORMALIZE)
- return SAFE_CRLF_RENORMALIZE;
+ return CONV_EOL_RENORMALIZE;
else if (flags & HASH_WRITE_OBJECT)
- return safe_crlf;
+ return global_conv_flags_eol;
else
- return SAFE_CRLF_FALSE;
+ return 0;
}
}
}
-const char *sha1_file_name(const unsigned char *sha1)
+void sha1_file_name(struct strbuf *buf, const unsigned char *sha1)
{
- static struct strbuf buf = STRBUF_INIT;
-
- strbuf_reset(&buf);
- strbuf_addf(&buf, "%s/", get_object_directory());
-
- fill_sha1_path(&buf, sha1);
- return buf.buf;
+ strbuf_addstr(buf, get_object_directory());
+ strbuf_addch(buf, '/');
+ fill_sha1_path(buf, sha1);
}
struct strbuf *alt_scratch_buf(struct alternate_object_database *alt)
static int check_and_freshen_local(const unsigned char *sha1, int freshen)
{
- return check_and_freshen_file(sha1_file_name(sha1), freshen);
+ static struct strbuf buf = STRBUF_INIT;
+
+ strbuf_reset(&buf);
+ sha1_file_name(&buf, sha1);
+
+ return check_and_freshen_file(buf.buf, freshen);
}
static int check_and_freshen_nonlocal(const unsigned char *sha1, int freshen)
int check_sha1_signature(const unsigned char *sha1, void *map,
unsigned long size, const char *type)
{
- unsigned char real_sha1[20];
+ struct object_id real_oid;
enum object_type obj_type;
struct git_istream *st;
- git_SHA_CTX c;
+ git_hash_ctx c;
char hdr[32];
int hdrlen;
if (map) {
- hash_sha1_file(map, size, type, real_sha1);
- return hashcmp(sha1, real_sha1) ? -1 : 0;
+ hash_object_file(map, size, type, &real_oid);
+ return hashcmp(sha1, real_oid.hash) ? -1 : 0;
}
st = open_istream(sha1, &obj_type, &size, NULL);
return -1;
/* Generate the header */
- hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", typename(obj_type), size) + 1;
+ hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", type_name(obj_type), size) + 1;
/* Sha1.. */
- git_SHA1_Init(&c);
- git_SHA1_Update(&c, hdr, hdrlen);
+ the_hash_algo->init_fn(&c);
+ the_hash_algo->update_fn(&c, hdr, hdrlen);
for (;;) {
char buf[1024 * 16];
ssize_t readlen = read_istream(st, buf, sizeof(buf));
}
if (!readlen)
break;
- git_SHA1_Update(&c, buf, readlen);
+ the_hash_algo->update_fn(&c, buf, readlen);
}
- git_SHA1_Final(real_sha1, &c);
+ the_hash_algo->final_fn(real_oid.hash, &c);
close_istream(st);
- return hashcmp(sha1, real_sha1) ? -1 : 0;
+ return hashcmp(sha1, real_oid.hash) ? -1 : 0;
}
int git_open_cloexec(const char *name, int flags)
const char **path)
{
struct alternate_object_database *alt;
+ static struct strbuf buf = STRBUF_INIT;
+
+ strbuf_reset(&buf);
+ sha1_file_name(&buf, sha1);
+ *path = buf.buf;
- *path = sha1_file_name(sha1);
if (!lstat(*path, st))
return 0;
int fd;
struct alternate_object_database *alt;
int most_interesting_errno;
+ static struct strbuf buf = STRBUF_INIT;
+
+ strbuf_reset(&buf);
+ sha1_file_name(&buf, sha1);
+ *path = buf.buf;
- *path = sha1_file_name(sha1);
fd = git_open(*path);
if (fd >= 0)
return fd;
}
type = type_from_string_gently(type_buf, type_len, 1);
- if (oi->typename)
- strbuf_add(oi->typename, type_buf, type_len);
+ if (oi->type_name)
+ strbuf_add(oi->type_name, type_buf, type_len);
/*
* Set type to 0 if its an unknown object and
* we're obtaining the type using '--allow-unknown-type'
* return value implicitly indicates whether the
* object even exists.
*/
- if (!oi->typep && !oi->typename && !oi->sizep && !oi->contentp) {
+ if (!oi->typep && !oi->type_name && !oi->sizep && !oi->contentp) {
const char *path;
struct stat st;
if (stat_sha1_file(sha1, &st, &path) < 0)
return (status < 0) ? status : 0;
}
+int fetch_if_missing = 1;
+
int sha1_object_info_extended(const unsigned char *sha1, struct object_info *oi, unsigned flags)
{
static struct object_info blank_oi = OBJECT_INFO_INIT;
const unsigned char *real = (flags & OBJECT_INFO_LOOKUP_REPLACE) ?
lookup_replace_object(sha1) :
sha1;
+ int already_retried = 0;
if (is_null_sha1(real))
return -1;
*(oi->disk_sizep) = 0;
if (oi->delta_base_sha1)
hashclr(oi->delta_base_sha1);
- if (oi->typename)
- strbuf_addstr(oi->typename, typename(co->type));
+ if (oi->type_name)
+ strbuf_addstr(oi->type_name, type_name(co->type));
if (oi->contentp)
*oi->contentp = xmemdupz(co->buf, co->size);
oi->whence = OI_CACHED;
}
}
- if (!find_pack_entry(real, &e)) {
+ while (1) {
+ if (find_pack_entry(real, &e))
+ break;
+
/* Most likely it's a loose object. */
if (!sha1_loose_object_info(real, oi, flags))
return 0;
/* Not a loose object; someone else may have just packed it. */
- if (flags & OBJECT_INFO_QUICK) {
- return -1;
- } else {
- reprepare_packed_git();
- if (!find_pack_entry(real, &e))
- return -1;
+ reprepare_packed_git();
+ if (find_pack_entry(real, &e))
+ break;
+
+ /* Check if it is a missing object */
+ if (fetch_if_missing && repository_format_partial_clone &&
+ !already_retried) {
+ /*
+ * TODO Investigate haveing fetch_object() return
+ * TODO error/success and stopping the music here.
+ */
+ fetch_object(repository_format_partial_clone, real);
+ already_retried = 1;
+ continue;
}
+
+ return -1;
}
if (oi == &blank_oi)
* information below, so return early.
*/
return 0;
-
rtype = packed_object_info(e.p, e.offset, oi);
if (rtype < 0) {
mark_bad_packed_object(e.p, real);
return content;
}
-int pretend_sha1_file(void *buf, unsigned long len, enum object_type type,
- unsigned char *sha1)
+int pretend_object_file(void *buf, unsigned long len, enum object_type type,
+ struct object_id *oid)
{
struct cached_object *co;
- hash_object_file(buf, len, typename(type), oid);
- hash_sha1_file(buf, len, type_name(type), sha1);
- if (has_sha1_file(sha1) || find_cached_object(sha1))
++ hash_object_file(buf, len, type_name(type), oid);
+ if (has_sha1_file(oid->hash) || find_cached_object(oid->hash))
return 0;
ALLOC_GROW(cached_objects, cached_object_nr + 1, cached_object_alloc);
co = &cached_objects[cached_object_nr++];
co->type = type;
co->buf = xmalloc(len);
memcpy(co->buf, buf, len);
- hashcpy(co->sha1, sha1);
+ hashcpy(co->sha1, oid->hash);
return 0;
}
}
}
-static void write_sha1_file_prepare(const void *buf, unsigned long len,
- const char *type, unsigned char *sha1,
- char *hdr, int *hdrlen)
+static void write_object_file_prepare(const void *buf, unsigned long len,
+ const char *type, struct object_id *oid,
+ char *hdr, int *hdrlen)
{
- git_SHA_CTX c;
+ git_hash_ctx c;
/* Generate the header */
*hdrlen = xsnprintf(hdr, *hdrlen, "%s %lu", type, len)+1;
/* Sha1.. */
- git_SHA1_Init(&c);
- git_SHA1_Update(&c, hdr, *hdrlen);
- git_SHA1_Update(&c, buf, len);
- git_SHA1_Final(sha1, &c);
+ the_hash_algo->init_fn(&c);
+ the_hash_algo->update_fn(&c, hdr, *hdrlen);
+ the_hash_algo->update_fn(&c, buf, len);
+ the_hash_algo->final_fn(oid->hash, &c);
}
/*
return 0;
}
-int hash_sha1_file(const void *buf, unsigned long len, const char *type,
- unsigned char *sha1)
+int hash_object_file(const void *buf, unsigned long len, const char *type,
+ struct object_id *oid)
{
char hdr[32];
int hdrlen = sizeof(hdr);
- write_sha1_file_prepare(buf, len, type, sha1, hdr, &hdrlen);
+ write_object_file_prepare(buf, len, type, oid, hdr, &hdrlen);
return 0;
}
return fd;
}
-static int write_loose_object(const unsigned char *sha1, char *hdr, int hdrlen,
- const void *buf, unsigned long len, time_t mtime)
+static int write_loose_object(const struct object_id *oid, char *hdr,
+ int hdrlen, const void *buf, unsigned long len,
+ time_t mtime)
{
int fd, ret;
unsigned char compressed[4096];
git_zstream stream;
- git_SHA_CTX c;
- unsigned char parano_sha1[20];
+ git_hash_ctx c;
+ struct object_id parano_oid;
static struct strbuf tmp_file = STRBUF_INIT;
- const char *filename = sha1_file_name(sha1);
+ static struct strbuf filename = STRBUF_INIT;
+
+ strbuf_reset(&filename);
+ sha1_file_name(&filename, oid->hash);
- fd = create_tmpfile(&tmp_file, filename);
+ fd = create_tmpfile(&tmp_file, filename.buf);
if (fd < 0) {
if (errno == EACCES)
return error("insufficient permission for adding an object to repository database %s", get_object_directory());
git_deflate_init(&stream, zlib_compression_level);
stream.next_out = compressed;
stream.avail_out = sizeof(compressed);
- git_SHA1_Init(&c);
+ the_hash_algo->init_fn(&c);
/* First header.. */
stream.next_in = (unsigned char *)hdr;
stream.avail_in = hdrlen;
while (git_deflate(&stream, 0) == Z_OK)
; /* nothing */
- git_SHA1_Update(&c, hdr, hdrlen);
+ the_hash_algo->update_fn(&c, hdr, hdrlen);
/* Then the data itself.. */
stream.next_in = (void *)buf;
do {
unsigned char *in0 = stream.next_in;
ret = git_deflate(&stream, Z_FINISH);
- git_SHA1_Update(&c, in0, stream.next_in - in0);
+ the_hash_algo->update_fn(&c, in0, stream.next_in - in0);
if (write_buffer(fd, compressed, stream.next_out - compressed) < 0)
die("unable to write sha1 file");
stream.next_out = compressed;
} while (ret == Z_OK);
if (ret != Z_STREAM_END)
- die("unable to deflate new object %s (%d)", sha1_to_hex(sha1), ret);
+ die("unable to deflate new object %s (%d)", oid_to_hex(oid),
+ ret);
ret = git_deflate_end_gently(&stream);
if (ret != Z_OK)
- die("deflateEnd on object %s failed (%d)", sha1_to_hex(sha1), ret);
- git_SHA1_Final(parano_sha1, &c);
- if (hashcmp(sha1, parano_sha1) != 0)
- die("confused by unstable object source data for %s", sha1_to_hex(sha1));
+ die("deflateEnd on object %s failed (%d)", oid_to_hex(oid),
+ ret);
+ the_hash_algo->final_fn(parano_oid.hash, &c);
+ if (oidcmp(oid, ¶no_oid) != 0)
+ die("confused by unstable object source data for %s",
+ oid_to_hex(oid));
close_sha1_file(fd);
warning_errno("failed utime() on %s", tmp_file.buf);
}
- return finalize_object_file(tmp_file.buf, filename);
+ return finalize_object_file(tmp_file.buf, filename.buf);
}
static int freshen_loose_object(const unsigned char *sha1)
return 1;
}
-int write_sha1_file(const void *buf, unsigned long len, const char *type, unsigned char *sha1)
+int write_object_file(const void *buf, unsigned long len, const char *type,
+ struct object_id *oid)
{
char hdr[32];
int hdrlen = sizeof(hdr);
/* Normally if we have it in the pack then we do not bother writing
* it out into .git/objects/??/?{38} file.
*/
- write_sha1_file_prepare(buf, len, type, sha1, hdr, &hdrlen);
- if (freshen_packed_object(sha1) || freshen_loose_object(sha1))
+ write_object_file_prepare(buf, len, type, oid, hdr, &hdrlen);
+ if (freshen_packed_object(oid->hash) || freshen_loose_object(oid->hash))
return 0;
- return write_loose_object(sha1, hdr, hdrlen, buf, len, 0);
+ return write_loose_object(oid, hdr, hdrlen, buf, len, 0);
}
-int hash_sha1_file_literally(const void *buf, unsigned long len, const char *type,
- struct object_id *oid, unsigned flags)
+int hash_object_file_literally(const void *buf, unsigned long len,
+ const char *type, struct object_id *oid,
+ unsigned flags)
{
char *header;
int hdrlen, status = 0;
/* type string, SP, %lu of the length plus NUL must fit this */
hdrlen = strlen(type) + 32;
header = xmalloc(hdrlen);
- write_sha1_file_prepare(buf, len, type, oid->hash, header, &hdrlen);
+ write_object_file_prepare(buf, len, type, oid, header, &hdrlen);
if (!(flags & HASH_WRITE_OBJECT))
goto cleanup;
if (freshen_packed_object(oid->hash) || freshen_loose_object(oid->hash))
goto cleanup;
- status = write_loose_object(oid->hash, header, hdrlen, buf, len, 0);
+ status = write_loose_object(oid, header, hdrlen, buf, len, 0);
cleanup:
free(header);
return status;
}
-int force_object_loose(const unsigned char *sha1, time_t mtime)
+int force_object_loose(const struct object_id *oid, time_t mtime)
{
void *buf;
unsigned long len;
int hdrlen;
int ret;
- if (has_loose_object(sha1))
+ if (has_loose_object(oid->hash))
return 0;
- buf = read_object(sha1, &type, &len);
+ buf = read_object(oid->hash, &type, &len);
if (!buf)
- return error("cannot read sha1_file for %s", sha1_to_hex(sha1));
+ return error("cannot read sha1_file for %s", oid_to_hex(oid));
- hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", typename(type), len) + 1;
+ hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", type_name(type), len) + 1;
- ret = write_loose_object(sha1, hdr, hdrlen, buf, len, mtime);
+ ret = write_loose_object(oid, hdr, hdrlen, buf, len, mtime);
free(buf);
return ret;
if ((type == OBJ_BLOB) && path) {
struct strbuf nbuf = STRBUF_INIT;
if (convert_to_git(&the_index, path, buf, size, &nbuf,
- get_safe_crlf(flags))) {
+ get_conv_flags(flags))) {
buf = strbuf_detach(&nbuf, &size);
re_allocated = 1;
}
}
if (write_object)
- ret = write_object_file(buf, size, typename(type), oid);
- ret = write_sha1_file(buf, size, type_name(type), oid->hash);
++ ret = write_object_file(buf, size, type_name(type), oid);
else
- ret = hash_object_file(buf, size, typename(type), oid);
- ret = hash_sha1_file(buf, size, type_name(type), oid->hash);
++ ret = hash_object_file(buf, size, type_name(type), oid);
if (re_allocated)
free(buf);
return ret;
assert(would_convert_to_git_filter_fd(path));
convert_to_git_filter_fd(&the_index, path, fd, &sbuf,
- get_safe_crlf(flags));
+ get_conv_flags(flags));
if (write_object)
- ret = write_object_file(sbuf.buf, sbuf.len, typename(OBJ_BLOB),
- ret = write_sha1_file(sbuf.buf, sbuf.len, type_name(OBJ_BLOB),
- oid->hash);
++ ret = write_object_file(sbuf.buf, sbuf.len, type_name(OBJ_BLOB),
+ oid);
else
- ret = hash_object_file(sbuf.buf, sbuf.len, typename(OBJ_BLOB),
- ret = hash_sha1_file(sbuf.buf, sbuf.len, type_name(OBJ_BLOB),
- oid->hash);
++ ret = hash_object_file(sbuf.buf, sbuf.len, type_name(OBJ_BLOB),
+ oid);
strbuf_release(&sbuf);
return ret;
}
if (strbuf_readlink(&sb, path, st->st_size))
return error_errno("readlink(\"%s\")", path);
if (!(flags & HASH_WRITE_OBJECT))
- hash_sha1_file(sb.buf, sb.len, blob_type, oid->hash);
- else if (write_sha1_file(sb.buf, sb.len, blob_type, oid->hash))
+ hash_object_file(sb.buf, sb.len, blob_type, oid);
+ else if (write_object_file(sb.buf, sb.len, blob_type, oid))
rc = error("%s: failed to insert into database", path);
strbuf_release(&sb);
break;
die("%s is not a valid object", sha1_to_hex(sha1));
if (type != expect)
die("%s is not a valid '%s' object", sha1_to_hex(sha1),
- typename(expect));
+ type_name(expect));
}
int for_each_file_in_obj_subdir(unsigned int subdir_nr,
const char *path,
const unsigned char *expected_sha1)
{
- git_SHA_CTX c;
+ git_hash_ctx c;
unsigned char real_sha1[GIT_MAX_RAWSZ];
unsigned char buf[4096];
unsigned long total_read;
int status = Z_OK;
- git_SHA1_Init(&c);
- git_SHA1_Update(&c, hdr, stream->total_out);
+ the_hash_algo->init_fn(&c);
+ the_hash_algo->update_fn(&c, hdr, stream->total_out);
/*
* We already read some bytes into hdr, but the ones up to the NUL
if (size - total_read < stream->avail_out)
stream->avail_out = size - total_read;
status = git_inflate(stream, Z_FINISH);
- git_SHA1_Update(&c, buf, stream->next_out - buf);
+ the_hash_algo->update_fn(&c, buf, stream->next_out - buf);
total_read += stream->next_out - buf;
}
git_inflate_end(stream);
return -1;
}
- git_SHA1_Final(real_sha1, &c);
+ the_hash_algo->final_fn(real_sha1, &c);
if (hashcmp(expected_sha1, real_sha1)) {
error("sha1 mismatch for %s (expected %s)", path,
sha1_to_hex(expected_sha1));
goto out;
}
if (check_sha1_signature(expected_sha1, *contents,
- *size, typename(*type))) {
+ *size, type_name(*type))) {
error("sha1 mismatch for %s (expected %s)", path,
sha1_to_hex(expected_sha1));
free(*contents);
ALLOC_GROW(entries, nr_entries+1, nr_alloc);
entries[nr_entries++] = ce;
}
+ if (is_null_oid(&ce->oid))
+ istate->drop_cache_tree = 1;
}
}
}
void replace_index_entry_in_base(struct index_state *istate,
- struct cache_entry *old,
- struct cache_entry *new)
+ struct cache_entry *old_entry,
+ struct cache_entry *new_entry)
{
- if (old->index &&
+ if (old_entry->index &&
istate->split_index &&
istate->split_index->base &&
- old->index <= istate->split_index->base->cache_nr) {
- new->index = old->index;
- if (old != istate->split_index->base->cache[new->index - 1])
- free(istate->split_index->base->cache[new->index - 1]);
- istate->split_index->base->cache[new->index - 1] = new;
+ old_entry->index <= istate->split_index->base->cache_nr) {
+ new_entry->index = old_entry->index;
+ if (old_entry != istate->split_index->base->cache[new_entry->index - 1])
+ free(istate->split_index->base->cache[new_entry->index - 1]);
+ istate->split_index->base->cache[new_entry->index - 1] = new_entry;
}
}
#include "submodule.h"
#include "submodule-config.h"
#include "fsmonitor.h"
+#include "fetch-object.h"
/*
* Error messages expected by scripts out of plumbing commands such as
static struct cache_entry *dup_entry(const struct cache_entry *ce)
{
unsigned int size = ce_size(ce);
- struct cache_entry *new = xmalloc(size);
+ struct cache_entry *new_entry = xmalloc(size);
- memcpy(new, ce, size);
- return new;
+ memcpy(new_entry, ce, size);
+ return new_entry;
}
static void add_entry(struct unpack_trees_options *o,
load_gitmodules_file(index, &state);
enable_delayed_checkout(&state);
+ if (repository_format_partial_clone && o->update && !o->dry_run) {
+ /*
+ * Prefetch the objects that are to be checked out in the loop
+ * below.
+ */
+ struct oid_array to_fetch = OID_ARRAY_INIT;
+ int fetch_if_missing_store = fetch_if_missing;
+ fetch_if_missing = 0;
+ for (i = 0; i < index->cache_nr; i++) {
+ struct cache_entry *ce = index->cache[i];
+ if ((ce->ce_flags & CE_UPDATE) &&
+ !S_ISGITLINK(ce->ce_mode)) {
+ if (!has_object_file(&ce->oid))
+ oid_array_append(&to_fetch, &ce->oid);
+ }
+ }
+ if (to_fetch.nr)
+ fetch_objects(repository_format_partial_clone,
+ &to_fetch);
+ fetch_if_missing = fetch_if_missing_store;
+ }
for (i = 0; i < index->cache_nr; i++) {
struct cache_entry *ce = index->cache[i];
if (!ce)
return;
cache_tree_invalidate_path(o->src_index, ce->name);
- untracked_cache_invalidate_path(o->src_index, ce->name);
+ untracked_cache_invalidate_path(o->src_index, ce->name, 1);
}
/*