* (it also writes the merge result to the object database even
* when it may contain conflicts).
*/
- if (write_sha1_file(result_buf.ptr, result_buf.size,
- blob_type, oid.hash))
+ if (write_object_file(result_buf.ptr, result_buf.size, blob_type, &oid))
die(_("Unable to add merge result for '%s'"), path);
free(result_buf.ptr);
ce = make_cache_entry(mode, oid.hash, path, 2, 0);
{
struct rev_info revs;
struct object *object = &old->object;
- struct object_array refs;
init_revisions(&revs, NULL);
setup_revisions(0, NULL, &revs, NULL);
for_each_ref(add_pending_uninteresting_ref, &revs);
add_pending_oid(&revs, "HEAD", &new->object.oid, UNINTERESTING);
- /* Save pending objects, so they can be cleaned up later. */
- refs = revs.pending;
- revs.leak_pending = 1;
-
- /*
- * prepare_revision_walk (together with .leak_pending = 1) makes us
- * the sole owner of the list of pending objects.
- */
if (prepare_revision_walk(&revs))
die(_("internal error in revision walk"));
if (!(old->object.flags & UNINTERESTING))
describe_detached_head(_("Previous HEAD position was"), old);
/* Clean up objects used, as they will be reused. */
- clear_commit_marks_for_object_array(&refs, ALL_REV_FLAGS);
-
- object_array_clear(&refs);
+ clear_commit_marks_all(ALL_REV_FLAGS);
}
static int switch_branches(const struct checkout_opts *opts,