Conversion from uchar[20] to struct object_id continues.
* bc/object-id: (42 commits)
merge-one-file: compute empty blob object ID
add--interactive: compute the empty tree value
Update shell scripts to compute empty tree object ID
sha1_file: only expose empty object constants through git_hash_algo
dir: use the_hash_algo for empty blob object ID
sequencer: use the_hash_algo for empty tree object ID
cache-tree: use is_empty_tree_oid
sha1_file: convert cached object code to struct object_id
builtin/reset: convert use of EMPTY_TREE_SHA1_BIN
builtin/receive-pack: convert one use of EMPTY_TREE_SHA1_HEX
wt-status: convert two uses of EMPTY_TREE_SHA1_HEX
submodule: convert several uses of EMPTY_TREE_SHA1_HEX
sequencer: convert one use of EMPTY_TREE_SHA1_HEX
merge: convert empty tree constant to the_hash_algo
builtin/merge: switch tree functions to use object_id
builtin/am: convert uses of EMPTY_TREE_SHA1_BIN to the_hash_algo
sha1-file: add functions for hex empty tree and blob OIDs
builtin/receive-pack: avoid hard-coded constants for push certs
diff: specify abbreviation size in terms of the_hash_algo
upload-pack: replace use of several hard-coded constants
...
#include "cache.h"
#include "config.h"
#include "builtin.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
#include "parse-options.h"
#include "dir.h"
#include "run-command.h"
struct strbuf sb = STRBUF_INIT;
if (read_state_file(&sb, state, "next", 1) < 0)
- die("BUG: state file 'next' does not exist");
+ BUG("state file 'next' does not exist");
state->cur = strtol(sb.buf, NULL, 10);
if (read_state_file(&sb, state, "last", 1) < 0)
- die("BUG: state file 'last' does not exist");
+ BUG("state file 'last' does not exist");
state->last = strtol(sb.buf, NULL, 10);
if (read_author_script(state) < 0)
case PATCH_FORMAT_MBOXRD:
return split_mail_mbox(state, paths, keep_cr, 1);
default:
- die("BUG: invalid patch_format");
+ BUG("invalid patch_format");
}
return -1;
}
str = "b";
break;
default:
- die("BUG: invalid value for state->keep");
+ BUG("invalid value for state->keep");
}
write_state_text(state, "keep", str);
str = "t";
break;
default:
- die("BUG: invalid value for state->scissors");
+ BUG("invalid value for state->scissors");
}
write_state_text(state, "scissors", str);
mi.keep_non_patch_brackets_in_subject = 1;
break;
default:
- die("BUG: invalid value for state->keep");
+ BUG("invalid value for state->keep");
}
if (state->message_id)
mi.use_scissors = 1;
break;
default:
- die("BUG: invalid value for state->scissors");
+ BUG("invalid value for state->scissors");
}
mi.input = xfopen(mail, "r");
int options = 0;
if (init_apply_state(&apply_state, NULL))
- die("BUG: init_apply_state() failed");
+ BUG("init_apply_state() failed");
argv_array_push(&apply_opts, "apply");
argv_array_pushv(&apply_opts, state->git_apply_opts.argv);
apply_state.apply_verbosity = verbosity_silent;
if (check_apply_state(&apply_state, force_apply))
- die("BUG: check_apply_state() failed");
+ BUG("check_apply_state() failed");
argv_array_push(&apply_paths, am_path(state, "patch"));
char *their_tree_name;
if (get_oid("HEAD", &our_tree) < 0)
- hashcpy(our_tree.hash, EMPTY_TREE_SHA1_BIN);
+ oidcpy(&our_tree, the_hash_algo->empty_tree);
if (build_fake_ancestor(state, index_path))
return error("could not build fake ancestor");
am_rerere_clear();
if (get_oid("HEAD", &head))
- hashcpy(head.hash, EMPTY_TREE_SHA1_BIN);
+ oidcpy(&head, the_hash_algo->empty_tree);
if (clean_index(&head, &head))
die(_("failed to clean index"));
curr_branch = resolve_refdup("HEAD", 0, &curr_head, NULL);
has_curr_head = curr_branch && !is_null_oid(&curr_head);
if (!has_curr_head)
- hashcpy(curr_head.hash, EMPTY_TREE_SHA1_BIN);
+ oidcpy(&curr_head, the_hash_algo->empty_tree);
has_orig_head = !get_oid("ORIG_HEAD", &orig_head);
if (!has_orig_head)
- hashcpy(orig_head.hash, EMPTY_TREE_SHA1_BIN);
+ oidcpy(&orig_head, the_hash_algo->empty_tree);
clean_index(&curr_head, &orig_head);
ret = show_patch(&state);
break;
default:
- die("BUG: invalid resume value");
+ BUG("invalid resume value");
}
am_state_release(&state);
const char *ret;
if (obj->type == OBJ_NONE) {
- enum object_type type = oid_object_info(&obj->oid, NULL);
+ enum object_type type = oid_object_info(the_repository,
+ &obj->oid, NULL);
if (type > 0)
object_as_type(obj, type, 0);
}
if (!(obj->flags & HAS_OBJ)) {
if (is_promisor_object(&obj->oid))
return;
- if (has_sha1_pack(obj->oid.hash))
+ if (has_object_pack(&obj->oid))
return; /* it is in pack - forget about it */
printf("missing %s %s\n", printable_type(obj),
describe_object(obj));
}
}
-static int fsck_obj(struct object *obj)
+static int fsck_obj(struct object *obj, void *buffer, unsigned long size)
{
int err;
if (fsck_walk(obj, NULL, &fsck_obj_options))
objerror(obj, "broken links");
- err = fsck_object(obj, NULL, 0, &fsck_obj_options);
+ err = fsck_object(obj, buffer, size, &fsck_obj_options);
if (err)
goto out;
}
obj->flags &= ~(REACHABLE | SEEN);
obj->flags |= HAS_OBJ;
- return fsck_obj(obj);
+ return fsck_obj(obj, buffer, size);
}
static int default_refs;
}
}
-static struct object *parse_loose_object(const struct object_id *oid,
- const char *path)
+static int fsck_loose(const struct object_id *oid, const char *path, void *data)
{
struct object *obj;
- void *contents;
enum object_type type;
unsigned long size;
+ void *contents;
int eaten;
- if (read_loose_object(path, oid, &type, &size, &contents) < 0)
- return NULL;
+ if (read_loose_object(path, oid, &type, &size, &contents) < 0) {
+ errors_found |= ERROR_OBJECT;
+ error("%s: object corrupt or missing: %s",
+ oid_to_hex(oid), path);
+ return 0; /* keep checking other objects */
+ }
if (!contents && type != OBJ_BLOB)
- die("BUG: read_loose_object streamed a non-blob");
+ BUG("read_loose_object streamed a non-blob");
obj = parse_object_buffer(oid, type, size, contents, &eaten);
-
- if (!eaten)
- free(contents);
- return obj;
-}
-
-static int fsck_loose(const struct object_id *oid, const char *path, void *data)
-{
- struct object *obj = parse_loose_object(oid, path);
-
if (!obj) {
errors_found |= ERROR_OBJECT;
- error("%s: object corrupt or missing: %s",
+ error("%s: object could not be parsed: %s",
oid_to_hex(oid), path);
+ if (!eaten)
+ free(contents);
return 0; /* keep checking other objects */
}
obj->flags &= ~(REACHABLE | SEEN);
obj->flags |= HAS_OBJ;
- if (fsck_obj(obj))
+ if (fsck_obj(obj, contents, size))
errors_found |= ERROR_OBJECT;
- return 0;
+
+ if (!eaten)
+ free(contents);
+ return 0; /* keep checking other objects, even if we saw an error */
}
static int fsck_cruft(const char *basename, const char *path, void *data)
}
stop_progress(&progress);
}
+
+ if (fsck_finish(&fsck_obj_options))
+ errors_found |= ERROR_OBJECT;
}
for (i = 0; i < argc; i++) {
#include "tree.h"
#include "progress.h"
#include "fsck.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
#include "streaming.h"
#include "thread-utils.h"
#include "packfile.h"
if (!(obj->flags & FLAG_CHECKED)) {
unsigned long size;
- int type = oid_object_info(&obj->oid, &size);
+ int type = oid_object_info(the_repository, &obj->oid, &size);
if (type <= 0)
die(_("did not receive expected object %s"),
oid_to_hex(&obj->oid));
enum object_type has_type;
unsigned long has_size;
read_lock();
- has_type = oid_object_info(oid, &has_size);
+ has_type = oid_object_info(the_repository, oid, &has_size);
if (has_type < 0)
die(_("cannot read existing object info %s"), oid_to_hex(oid));
if (has_type != type || has_size != size)
blob->object.flags |= FLAG_CHECKED;
else
die(_("invalid blob object %s"), oid_to_hex(oid));
+ if (do_fsck_object &&
+ fsck_object(&blob->object, (void *)data, size, &fsck_options))
+ die(_("fsck error in packed object"));
} else {
struct object *obj;
int eaten;
die(_("invalid %s"), type_name(type));
if (do_fsck_object &&
fsck_object(obj, buf, size, &fsck_options))
- die(_("Error in object"));
+ die(_("fsck error in packed object"));
if (strict && fsck_walk(obj, NULL, &fsck_options))
die(_("Not all child objects of %s are reachable"), oid_to_hex(&obj->oid));
if (obj->type == OBJ_COMMIT) {
struct commit *commit = (struct commit *) obj;
if (detach_commit_buffer(commit, NULL) != data)
- die("BUG: parse_object_buffer transmogrified our buffer");
+ BUG("parse_object_buffer transmogrified our buffer");
}
obj->flags |= FLAG_CHECKED;
}
if (!compare_and_swap_type(&child->real_type, OBJ_REF_DELTA,
base->obj->real_type))
- die("BUG: child->real_type != OBJ_REF_DELTA");
+ BUG("child->real_type != OBJ_REF_DELTA");
resolve_delta(child, base, result);
if (base->ref_first == base->ref_last && base->ofs_last == -1)
nr_objects - nr_objects_initial);
stop_progress_msg(&progress, msg.buf);
strbuf_release(&msg);
- hashclose(f, tail_hash, 0);
+ finalize_hashfile(f, tail_hash, 0);
hashcpy(read_hash, pack_hash);
fixup_pack_header_footer(output_fd, pack_hash,
curr_pack, nr_objects,
} else
chmod(final_index_name, 0444);
+ if (do_fsck_object)
+ add_packed_git(final_index_name, strlen(final_index_name), 0);
+
if (!from_stdin) {
printf("%s\n", sha1_to_hex(hash));
} else {
{
const uint32_t *idx1, *idx2;
uint32_t i;
+ const uint32_t hashwords = the_hash_algo->rawsz / sizeof(uint32_t);
/* The address of the 4-byte offset table */
idx1 = (((const uint32_t *)p->index_data)
+ 2 /* 8-byte header */
+ 256 /* fan out */
- + 5 * p->num_objects /* 20-byte SHA-1 table */
+ + hashwords * p->num_objects /* object ID table */
+ p->num_objects /* CRC32 table */
);
/*
* Get rid of the idx file as we do not need it anymore.
* NEEDSWORK: extract this bit from free_pack_by_name() in
- * sha1_file.c, perhaps? It shouldn't matter very much as we
+ * sha1-file.c, perhaps? It shouldn't matter very much as we
* know we haven't installed this pack (hence we never have
* read anything from it).
*/
pack_hash);
else
close(input_fd);
+
+ if (do_fsck_object && fsck_finish(&fsck_options))
+ die(_("fsck error in pack objects"));
+
free(objects);
strbuf_release(&index_name_buf);
if (pack_name == NULL)
return rc;
}
- static void read_empty(unsigned const char *sha1, int verbose)
+ static void read_empty(const struct object_id *oid, int verbose)
{
int i = 0;
const char *args[7];
args[i++] = "-v";
args[i++] = "-m";
args[i++] = "-u";
- args[i++] = EMPTY_TREE_SHA1_HEX;
- args[i++] = sha1_to_hex(sha1);
+ args[i++] = empty_tree_oid_hex();
+ args[i++] = oid_to_hex(oid);
args[i] = NULL;
if (run_command_v_opt(args, RUN_GIT_CMD))
die(_("read-tree failed"));
}
- static void reset_hard(unsigned const char *sha1, int verbose)
+ static void reset_hard(const struct object_id *oid, int verbose)
{
int i = 0;
const char *args[6];
args[i++] = "-v";
args[i++] = "--reset";
args[i++] = "-u";
- args[i++] = sha1_to_hex(sha1);
+ args[i++] = oid_to_hex(oid);
args[i] = NULL;
if (run_command_v_opt(args, RUN_GIT_CMD))
if (is_null_oid(stash))
return;
- reset_hard(head->hash, 1);
+ reset_hard(head, 1);
args[2] = oid_to_hex(stash);
struct commit_list *remoteheads,
struct commit *head)
{
- static struct lock_file lock;
+ struct lock_file lock = LOCK_INIT;
const char *head_arg = "HEAD";
hold_locked_index(&lock, LOCK_DIE_ON_ERROR);
{
struct object_id result_tree, result_commit;
struct commit_list *parents, **pptr = &parents;
- static struct lock_file lock;
+ struct lock_file lock = LOCK_INIT;
hold_locked_index(&lock, LOCK_DIE_ON_ERROR);
refresh_cache(REFRESH_QUIET);
if (remoteheads->next)
die(_("Can merge only exactly one commit into empty head"));
remote_head_oid = &remoteheads->item->object.oid;
- read_empty(remote_head_oid->hash, 0);
+ read_empty(remote_head_oid, 0);
update_ref("initial pull", "HEAD", remote_head_oid, NULL, 0,
UPDATE_REFS_DIE_ON_ERR);
goto done;
#include "list.h"
#include "packfile.h"
#include "object-store.h"
+#include "dir.h"
+
+#define IN_PACK(obj) oe_in_pack(&to_pack, obj)
+#define SIZE(obj) oe_size(&to_pack, obj)
+#define SET_SIZE(obj,size) oe_set_size(&to_pack, obj, size)
+#define DELTA_SIZE(obj) oe_delta_size(&to_pack, obj)
+#define DELTA(obj) oe_delta(&to_pack, obj)
+#define DELTA_CHILD(obj) oe_delta_child(&to_pack, obj)
+#define DELTA_SIBLING(obj) oe_delta_sibling(&to_pack, obj)
+#define SET_DELTA(obj, val) oe_set_delta(&to_pack, obj, val)
+#define SET_DELTA_SIZE(obj, val) oe_set_delta_size(&to_pack, obj, val)
+#define SET_DELTA_CHILD(obj, val) oe_set_delta_child(&to_pack, obj, val)
+#define SET_DELTA_SIBLING(obj, val) oe_set_delta_sibling(&to_pack, obj, val)
static const char *pack_usage[] = {
N_("git pack-objects --stdout [<options>...] [< <ref-list> | < <object-list>]"),
static struct packing_data to_pack;
static struct pack_idx_entry **written_list;
-static uint32_t nr_result, nr_written;
+static uint32_t nr_result, nr_written, nr_seen;
static int non_empty;
static int reuse_delta = 1, reuse_object = 1;
static int local;
static int have_non_local_packs;
static int incremental;
-static int ignore_packed_keep;
+static int ignore_packed_keep_on_disk;
+static int ignore_packed_keep_in_core;
static int allow_ofs_delta;
static struct pack_idx_option pack_idx_opts;
static const char *base_name;
static int exclude_promisor_objects;
static unsigned long delta_cache_size = 0;
-static unsigned long max_delta_cache_size = 256 * 1024 * 1024;
+static unsigned long max_delta_cache_size = DEFAULT_DELTA_CACHE_SIZE;
static unsigned long cache_max_small_delta_size = 1000;
static unsigned long window_memory_limit = 0;
buf = read_object_file(&entry->idx.oid, &type, &size);
if (!buf)
die("unable to read %s", oid_to_hex(&entry->idx.oid));
- base_buf = read_object_file(&entry->delta->idx.oid, &type, &base_size);
+ base_buf = read_object_file(&DELTA(entry)->idx.oid, &type,
+ &base_size);
if (!base_buf)
die("unable to read %s",
- oid_to_hex(&entry->delta->idx.oid));
+ oid_to_hex(&DELTA(entry)->idx.oid));
delta_buf = diff_delta(base_buf, base_size,
buf, size, &delta_size, 0);
- if (!delta_buf || delta_size != entry->delta_size)
+ if (!delta_buf || delta_size != DELTA_SIZE(entry))
die("delta size changed");
free(buf);
free(base_buf);
enum object_type type;
void *buf;
struct git_istream *st = NULL;
+ const unsigned hashsz = the_hash_algo->rawsz;
if (!usable_delta) {
- if (entry->type == OBJ_BLOB &&
- entry->size > big_file_threshold &&
+ if (oe_type(entry) == OBJ_BLOB &&
+ oe_size_greater_than(&to_pack, entry, big_file_threshold) &&
(st = open_istream(&entry->idx.oid, &type, &size, NULL)) != NULL)
buf = NULL;
else {
FREE_AND_NULL(entry->delta_data);
entry->z_delta_size = 0;
} else if (entry->delta_data) {
- size = entry->delta_size;
+ size = DELTA_SIZE(entry);
buf = entry->delta_data;
entry->delta_data = NULL;
- type = (allow_ofs_delta && entry->delta->idx.offset) ?
+ type = (allow_ofs_delta && DELTA(entry)->idx.offset) ?
OBJ_OFS_DELTA : OBJ_REF_DELTA;
} else {
buf = get_delta(entry);
- size = entry->delta_size;
- type = (allow_ofs_delta && entry->delta->idx.offset) ?
+ size = DELTA_SIZE(entry);
+ type = (allow_ofs_delta && DELTA(entry)->idx.offset) ?
OBJ_OFS_DELTA : OBJ_REF_DELTA;
}
* encoding of the relative offset for the delta
* base from this object's position in the pack.
*/
- off_t ofs = entry->idx.offset - entry->delta->idx.offset;
+ off_t ofs = entry->idx.offset - DELTA(entry)->idx.offset;
unsigned pos = sizeof(dheader) - 1;
dheader[pos] = ofs & 127;
while (ofs >>= 7)
dheader[--pos] = 128 | (--ofs & 127);
- if (limit && hdrlen + sizeof(dheader) - pos + datalen + 20 >= limit) {
+ if (limit && hdrlen + sizeof(dheader) - pos + datalen + hashsz >= limit) {
if (st)
close_istream(st);
free(buf);
} else if (type == OBJ_REF_DELTA) {
/*
* Deltas with a base reference contain
- * an additional 20 bytes for the base sha1.
+ * additional bytes for the base object ID.
*/
- if (limit && hdrlen + 20 + datalen + 20 >= limit) {
+ if (limit && hdrlen + hashsz + datalen + hashsz >= limit) {
if (st)
close_istream(st);
free(buf);
return 0;
}
hashwrite(f, header, hdrlen);
- hashwrite(f, DELTA(entry)->idx.oid.hash, 20);
- hdrlen += 20;
- hashwrite(f, entry->delta->idx.oid.hash, hashsz);
++ hashwrite(f, DELTA(entry)->idx.oid.hash, hashsz);
+ hdrlen += hashsz;
} else {
- if (limit && hdrlen + datalen + 20 >= limit) {
+ if (limit && hdrlen + datalen + hashsz >= limit) {
if (st)
close_istream(st);
free(buf);
static off_t write_reuse_object(struct hashfile *f, struct object_entry *entry,
unsigned long limit, int usable_delta)
{
- struct packed_git *p = entry->in_pack;
+ struct packed_git *p = IN_PACK(entry);
struct pack_window *w_curs = NULL;
struct revindex_entry *revidx;
off_t offset;
- enum object_type type = entry->type;
+ enum object_type type = oe_type(entry);
off_t datalen;
unsigned char header[MAX_PACK_OBJECT_HEADER],
dheader[MAX_PACK_OBJECT_HEADER];
unsigned hdrlen;
+ const unsigned hashsz = the_hash_algo->rawsz;
+ unsigned long entry_size = SIZE(entry);
- if (entry->delta)
- type = (allow_ofs_delta && entry->delta->idx.offset) ?
+ if (DELTA(entry))
+ type = (allow_ofs_delta && DELTA(entry)->idx.offset) ?
OBJ_OFS_DELTA : OBJ_REF_DELTA;
hdrlen = encode_in_pack_object_header(header, sizeof(header),
- type, entry->size);
+ type, entry_size);
offset = entry->in_pack_offset;
revidx = find_pack_revindex(p, offset);
datalen -= entry->in_pack_header_size;
if (!pack_to_stdout && p->index_version == 1 &&
- check_pack_inflate(p, &w_curs, offset, datalen, entry->size)) {
+ check_pack_inflate(p, &w_curs, offset, datalen, entry_size)) {
error("corrupt packed object for %s",
oid_to_hex(&entry->idx.oid));
unuse_pack(&w_curs);
}
if (type == OBJ_OFS_DELTA) {
- off_t ofs = entry->idx.offset - entry->delta->idx.offset;
+ off_t ofs = entry->idx.offset - DELTA(entry)->idx.offset;
unsigned pos = sizeof(dheader) - 1;
dheader[pos] = ofs & 127;
while (ofs >>= 7)
dheader[--pos] = 128 | (--ofs & 127);
- if (limit && hdrlen + sizeof(dheader) - pos + datalen + 20 >= limit) {
+ if (limit && hdrlen + sizeof(dheader) - pos + datalen + hashsz >= limit) {
unuse_pack(&w_curs);
return 0;
}
hdrlen += sizeof(dheader) - pos;
reused_delta++;
} else if (type == OBJ_REF_DELTA) {
- if (limit && hdrlen + 20 + datalen + 20 >= limit) {
+ if (limit && hdrlen + hashsz + datalen + hashsz >= limit) {
unuse_pack(&w_curs);
return 0;
}
hashwrite(f, header, hdrlen);
- hashwrite(f, DELTA(entry)->idx.oid.hash, 20);
- hdrlen += 20;
- hashwrite(f, entry->delta->idx.oid.hash, hashsz);
++ hashwrite(f, DELTA(entry)->idx.oid.hash, hashsz);
+ hdrlen += hashsz;
reused_delta++;
} else {
- if (limit && hdrlen + datalen + 20 >= limit) {
+ if (limit && hdrlen + datalen + hashsz >= limit) {
unuse_pack(&w_curs);
return 0;
}
else
limit = pack_size_limit - write_offset;
- if (!entry->delta)
+ if (!DELTA(entry))
usable_delta = 0; /* no delta */
else if (!pack_size_limit)
usable_delta = 1; /* unlimited packfile */
- else if (entry->delta->idx.offset == (off_t)-1)
+ else if (DELTA(entry)->idx.offset == (off_t)-1)
usable_delta = 0; /* base was written to another pack */
- else if (entry->delta->idx.offset)
+ else if (DELTA(entry)->idx.offset)
usable_delta = 1; /* base already exists in this pack */
else
usable_delta = 0; /* base could end up in another pack */
if (!reuse_object)
to_reuse = 0; /* explicit */
- else if (!entry->in_pack)
+ else if (!IN_PACK(entry))
to_reuse = 0; /* can't reuse what we don't have */
- else if (entry->type == OBJ_REF_DELTA || entry->type == OBJ_OFS_DELTA)
+ else if (oe_type(entry) == OBJ_REF_DELTA ||
+ oe_type(entry) == OBJ_OFS_DELTA)
/* check_object() decided it for us ... */
to_reuse = usable_delta;
/* ... but pack split may override that */
- else if (entry->type != entry->in_pack_type)
+ else if (oe_type(entry) != entry->in_pack_type)
to_reuse = 0; /* pack has delta which is unusable */
- else if (entry->delta)
+ else if (DELTA(entry))
to_reuse = 0; /* we want to pack afresh */
else
to_reuse = 1; /* we have it in-pack undeltified,
}
/* if we are deltified, write out base object first. */
- if (e->delta) {
+ if (DELTA(e)) {
e->idx.offset = 1; /* now recurse */
- switch (write_one(f, e->delta, offset)) {
+ switch (write_one(f, DELTA(e), offset)) {
case WRITE_ONE_RECURSIVE:
/* we cannot depend on this one */
- e->delta = NULL;
+ SET_DELTA(e, NULL);
break;
default:
break;
/* add this node... */
add_to_write_order(wo, endp, e);
/* all its siblings... */
- for (s = e->delta_sibling; s; s = s->delta_sibling) {
+ for (s = DELTA_SIBLING(e); s; s = DELTA_SIBLING(s)) {
add_to_write_order(wo, endp, s);
}
}
/* drop down a level to add left subtree nodes if possible */
- if (e->delta_child) {
+ if (DELTA_CHILD(e)) {
add_to_order = 1;
- e = e->delta_child;
+ e = DELTA_CHILD(e);
} else {
add_to_order = 0;
/* our sibling might have some children, it is next */
- if (e->delta_sibling) {
- e = e->delta_sibling;
+ if (DELTA_SIBLING(e)) {
+ e = DELTA_SIBLING(e);
continue;
}
/* go back to our parent node */
- e = e->delta;
- while (e && !e->delta_sibling) {
+ e = DELTA(e);
+ while (e && !DELTA_SIBLING(e)) {
/* we're on the right side of a subtree, keep
* going up until we can go right again */
- e = e->delta;
+ e = DELTA(e);
}
if (!e) {
/* done- we hit our original root node */
return;
}
/* pass it off to sibling at this level */
- e = e->delta_sibling;
+ e = DELTA_SIBLING(e);
}
};
}
{
struct object_entry *root;
- for (root = e; root->delta; root = root->delta)
+ for (root = e; DELTA(root); root = DELTA(root))
; /* nothing */
add_descendants_to_write_order(wo, endp, root);
}
for (i = 0; i < to_pack.nr_objects; i++) {
objects[i].tagged = 0;
objects[i].filled = 0;
- objects[i].delta_child = NULL;
- objects[i].delta_sibling = NULL;
+ SET_DELTA_CHILD(&objects[i], NULL);
+ SET_DELTA_SIBLING(&objects[i], NULL);
}
/*
*/
for (i = to_pack.nr_objects; i > 0;) {
struct object_entry *e = &objects[--i];
- if (!e->delta)
+ if (!DELTA(e))
continue;
/* Mark me as the first child */
- e->delta_sibling = e->delta->delta_child;
- e->delta->delta_child = e;
+ e->delta_sibling_idx = DELTA(e)->delta_child_idx;
+ SET_DELTA_CHILD(DELTA(e), e);
}
/*
* And then all remaining commits and tags.
*/
for (i = last_untagged; i < to_pack.nr_objects; i++) {
- if (objects[i].type != OBJ_COMMIT &&
- objects[i].type != OBJ_TAG)
+ if (oe_type(&objects[i]) != OBJ_COMMIT &&
+ oe_type(&objects[i]) != OBJ_TAG)
continue;
add_to_write_order(wo, &wo_end, &objects[i]);
}
* And then all the trees.
*/
for (i = last_untagged; i < to_pack.nr_objects; i++) {
- if (objects[i].type != OBJ_TREE)
+ if (oe_type(&objects[i]) != OBJ_TREE)
continue;
add_to_write_order(wo, &wo_end, &objects[i]);
}
die_errno("unable to seek in reused packfile");
if (reuse_packfile_offset < 0)
- reuse_packfile_offset = reuse_packfile->pack_size - 20;
+ reuse_packfile_offset = reuse_packfile->pack_size - the_hash_algo->rawsz;
total = to_write = reuse_packfile_offset - sizeof(struct pack_header);
* If so, rewrite it like in fast-import
*/
if (pack_to_stdout) {
- hashclose(f, oid.hash, CSUM_CLOSE);
+ finalize_hashfile(f, oid.hash, CSUM_HASH_IN_STREAM | CSUM_CLOSE);
} else if (nr_written == nr_remaining) {
- hashclose(f, oid.hash, CSUM_FSYNC);
+ finalize_hashfile(f, oid.hash, CSUM_HASH_IN_STREAM | CSUM_FSYNC | CSUM_CLOSE);
} else {
- int fd = hashclose(f, oid.hash, 0);
+ int fd = finalize_hashfile(f, oid.hash, 0);
fixup_pack_header_footer(fd, oid.hash, pack_tmp_name,
nr_written, oid.hash, offset);
close(fd);
if (write_bitmap_index) {
bitmap_writer_set_checksum(oid.hash);
- bitmap_writer_build_type_index(written_list, nr_written);
+ bitmap_writer_build_type_index(
+ &to_pack, written_list, nr_written);
}
finish_tmp_packfile(&tmpname, pack_tmp_name,
* Otherwise, we signal "-1" at the end to tell the caller that we do
* not know either way, and it needs to check more packs.
*/
- if (!ignore_packed_keep &&
+ if (!ignore_packed_keep_on_disk &&
+ !ignore_packed_keep_in_core &&
(!local || !have_non_local_packs))
return 1;
if (local && !p->pack_local)
return 0;
- if (ignore_packed_keep && p->pack_local && p->pack_keep)
+ if (p->pack_local &&
+ ((ignore_packed_keep_on_disk && p->pack_keep) ||
+ (ignore_packed_keep_in_core && p->pack_keep_in_core)))
return 0;
/* we don't know yet; keep looking for more packs */
int want;
struct list_head *pos;
- if (!exclude && local && has_loose_object_nonlocal(oid->hash))
+ if (!exclude && local && has_loose_object_nonlocal(oid))
return 0;
/*
entry = packlist_alloc(&to_pack, oid->hash, index_pos);
entry->hash = hash;
- if (type)
- entry->type = type;
+ oe_set_type(entry, type);
if (exclude)
entry->preferred_base = 1;
else
nr_result++;
if (found_pack) {
- entry->in_pack = found_pack;
+ oe_set_in_pack(&to_pack, entry, found_pack);
entry->in_pack_offset = found_offset;
}
off_t found_offset = 0;
uint32_t index_pos;
+ display_progress(progress_state, ++nr_seen);
+
if (have_duplicate_entry(oid, exclude, &index_pos))
return 0;
create_object_entry(oid, type, pack_name_hash(name),
exclude, name && no_try_delta(name),
index_pos, found_pack, found_offset);
-
- display_progress(progress_state, nr_result);
return 1;
}
{
uint32_t index_pos;
+ display_progress(progress_state, ++nr_seen);
+
if (have_duplicate_entry(oid, 0, &index_pos))
return 0;
return 0;
create_object_entry(oid, type, name_hash, 0, 0, index_pos, pack, offset);
-
- display_progress(progress_state, nr_result);
return 1;
}
static void check_object(struct object_entry *entry)
{
- if (entry->in_pack) {
- struct packed_git *p = entry->in_pack;
+ unsigned long canonical_size;
+
+ if (IN_PACK(entry)) {
+ struct packed_git *p = IN_PACK(entry);
struct pack_window *w_curs = NULL;
const unsigned char *base_ref = NULL;
struct object_entry *base_entry;
unsigned long avail;
off_t ofs;
unsigned char *buf, c;
+ enum object_type type;
+ unsigned long in_pack_size;
buf = use_pack(p, &w_curs, entry->in_pack_offset, &avail);
* since non-delta representations could still be reused.
*/
used = unpack_object_header_buffer(buf, avail,
- &entry->in_pack_type,
- &entry->size);
+ &type,
+ &in_pack_size);
if (used == 0)
goto give_up;
+ if (type < 0)
+ BUG("invalid type %d", type);
+ entry->in_pack_type = type;
+
/*
* Determine if this is a delta and if so whether we can
* reuse it or not. Otherwise let's find out as cheaply as
switch (entry->in_pack_type) {
default:
/* Not a delta hence we've already got all we need. */
- entry->type = entry->in_pack_type;
+ oe_set_type(entry, entry->in_pack_type);
+ SET_SIZE(entry, in_pack_size);
entry->in_pack_header_size = used;
- if (entry->type < OBJ_COMMIT || entry->type > OBJ_BLOB)
+ if (oe_type(entry) < OBJ_COMMIT || oe_type(entry) > OBJ_BLOB)
goto give_up;
unuse_pack(&w_curs);
return;
if (reuse_delta && !entry->preferred_base)
base_ref = use_pack(p, &w_curs,
entry->in_pack_offset + used, NULL);
- entry->in_pack_header_size = used + 20;
+ entry->in_pack_header_size = used + the_hash_algo->rawsz;
break;
case OBJ_OFS_DELTA:
buf = use_pack(p, &w_curs,
* deltify other objects against, in order to avoid
* circular deltas.
*/
- entry->type = entry->in_pack_type;
- entry->delta = base_entry;
- entry->delta_size = entry->size;
- entry->delta_sibling = base_entry->delta_child;
- base_entry->delta_child = entry;
+ oe_set_type(entry, entry->in_pack_type);
+ SET_SIZE(entry, in_pack_size); /* delta size */
+ SET_DELTA(entry, base_entry);
+ SET_DELTA_SIZE(entry, in_pack_size);
+ entry->delta_sibling_idx = base_entry->delta_child_idx;
+ SET_DELTA_CHILD(base_entry, entry);
unuse_pack(&w_curs);
return;
}
- if (entry->type) {
+ if (oe_type(entry)) {
+ off_t delta_pos;
+
/*
* This must be a delta and we already know what the
* final object type is. Let's extract the actual
* object size from the delta header.
*/
- entry->size = get_size_from_delta(p, &w_curs,
- entry->in_pack_offset + entry->in_pack_header_size);
- if (entry->size == 0)
+ delta_pos = entry->in_pack_offset + entry->in_pack_header_size;
+ canonical_size = get_size_from_delta(p, &w_curs, delta_pos);
+ if (canonical_size == 0)
goto give_up;
+ SET_SIZE(entry, canonical_size);
unuse_pack(&w_curs);
return;
}
unuse_pack(&w_curs);
}
- entry->type = oid_object_info(&entry->idx.oid, &entry->size);
- /*
- * The error condition is checked in prepare_pack(). This is
- * to permit a missing preferred base object to be ignored
- * as a preferred base. Doing so can result in a larger
- * pack file, but the transfer will still take place.
- */
+ oe_set_type(entry,
+ oid_object_info(the_repository, &entry->idx.oid, &canonical_size));
+ if (entry->type_valid) {
+ SET_SIZE(entry, canonical_size);
+ } else {
+ /*
+ * Bad object type is checked in prepare_pack(). This is
+ * to permit a missing preferred base object to be ignored
+ * as a preferred base. Doing so can result in a larger
+ * pack file, but the transfer will still take place.
+ */
+ }
}
static int pack_offset_sort(const void *_a, const void *_b)
{
const struct object_entry *a = *(struct object_entry **)_a;
const struct object_entry *b = *(struct object_entry **)_b;
+ const struct packed_git *a_in_pack = IN_PACK(a);
+ const struct packed_git *b_in_pack = IN_PACK(b);
/* avoid filesystem trashing with loose objects */
- if (!a->in_pack && !b->in_pack)
+ if (!a_in_pack && !b_in_pack)
return oidcmp(&a->idx.oid, &b->idx.oid);
- if (a->in_pack < b->in_pack)
+ if (a_in_pack < b_in_pack)
return -1;
- if (a->in_pack > b->in_pack)
+ if (a_in_pack > b_in_pack)
return 1;
return a->in_pack_offset < b->in_pack_offset ? -1 :
(a->in_pack_offset > b->in_pack_offset);
*/
static void drop_reused_delta(struct object_entry *entry)
{
- struct object_entry **p = &entry->delta->delta_child;
+ unsigned *idx = &to_pack.objects[entry->delta_idx - 1].delta_child_idx;
struct object_info oi = OBJECT_INFO_INIT;
+ enum object_type type;
+ unsigned long size;
+
+ while (*idx) {
+ struct object_entry *oe = &to_pack.objects[*idx - 1];
- while (*p) {
- if (*p == entry)
- *p = (*p)->delta_sibling;
+ if (oe == entry)
+ *idx = oe->delta_sibling_idx;
else
- p = &(*p)->delta_sibling;
+ idx = &oe->delta_sibling_idx;
}
- entry->delta = NULL;
+ SET_DELTA(entry, NULL);
entry->depth = 0;
- oi.sizep = &entry->size;
- oi.typep = &entry->type;
- if (packed_object_info(entry->in_pack, entry->in_pack_offset, &oi) < 0) {
+ oi.sizep = &size;
+ oi.typep = &type;
+ if (packed_object_info(the_repository, IN_PACK(entry), entry->in_pack_offset, &oi) < 0) {
/*
* We failed to get the info from this pack for some reason;
* fall back to sha1_object_info, which may find another copy.
- * And if that fails, the error will be recorded in entry->type
+ * And if that fails, the error will be recorded in oe_type(entry)
* and dealt with in prepare_pack().
*/
- entry->type = oid_object_info(&entry->idx.oid, &entry->size);
+ oe_set_type(entry,
+ oid_object_info(the_repository, &entry->idx.oid, &size));
+ } else {
+ oe_set_type(entry, type);
}
+ SET_SIZE(entry, size);
}
/*
for (cur = entry, total_depth = 0;
cur;
- cur = cur->delta, total_depth++) {
+ cur = DELTA(cur), total_depth++) {
if (cur->dfs_state == DFS_DONE) {
/*
* We've already seen this object and know it isn't
* is a bug.
*/
if (cur->dfs_state != DFS_NONE)
- die("BUG: confusing delta dfs state in first pass: %d",
+ BUG("confusing delta dfs state in first pass: %d",
cur->dfs_state);
/*
* it's not a delta, we're done traversing, but we'll mark it
* done to save time on future traversals.
*/
- if (!cur->delta) {
+ if (!DELTA(cur)) {
cur->dfs_state = DFS_DONE;
break;
}
* We keep all commits in the chain that we examined.
*/
cur->dfs_state = DFS_ACTIVE;
- if (cur->delta->dfs_state == DFS_ACTIVE) {
+ if (DELTA(cur)->dfs_state == DFS_ACTIVE) {
drop_reused_delta(cur);
cur->dfs_state = DFS_DONE;
break;
* an extra "next" pointer to keep going after we reset cur->delta.
*/
for (cur = entry; cur; cur = next) {
- next = cur->delta;
+ next = DELTA(cur);
/*
* We should have a chain of zero or more ACTIVE states down to
if (cur->dfs_state == DFS_DONE)
break;
else if (cur->dfs_state != DFS_ACTIVE)
- die("BUG: confusing delta dfs state in second pass: %d",
+ BUG("confusing delta dfs state in second pass: %d",
cur->dfs_state);
/*
uint32_t i;
struct object_entry **sorted_by_offset;
+ if (progress)
+ progress_state = start_progress(_("Counting objects"),
+ to_pack.nr_objects);
+
sorted_by_offset = xcalloc(to_pack.nr_objects, sizeof(struct object_entry *));
for (i = 0; i < to_pack.nr_objects; i++)
sorted_by_offset[i] = to_pack.objects + i;
for (i = 0; i < to_pack.nr_objects; i++) {
struct object_entry *entry = sorted_by_offset[i];
check_object(entry);
- if (big_file_threshold < entry->size)
+ if (entry->type_valid &&
+ oe_size_greater_than(&to_pack, entry, big_file_threshold))
entry->no_try_delta = 1;
+ display_progress(progress_state, i + 1);
}
+ stop_progress(&progress_state);
/*
* This must happen in a second pass, since we rely on the delta
{
const struct object_entry *a = *(struct object_entry **)_a;
const struct object_entry *b = *(struct object_entry **)_b;
+ enum object_type a_type = oe_type(a);
+ enum object_type b_type = oe_type(b);
+ unsigned long a_size = SIZE(a);
+ unsigned long b_size = SIZE(b);
- if (a->type > b->type)
+ if (a_type > b_type)
return -1;
- if (a->type < b->type)
+ if (a_type < b_type)
return 1;
if (a->hash > b->hash)
return -1;
return -1;
if (a->preferred_base < b->preferred_base)
return 1;
- if (a->size > b->size)
+ if (a_size > b_size)
return -1;
- if (a->size < b->size)
+ if (a_size < b_size)
return 1;
return a < b ? -1 : (a > b); /* newest first */
}
#endif
+/*
+ * Return the size of the object without doing any delta
+ * reconstruction (so non-deltas are true object sizes, but deltas
+ * return the size of the delta data).
+ */
+unsigned long oe_get_size_slow(struct packing_data *pack,
+ const struct object_entry *e)
+{
+ struct packed_git *p;
+ struct pack_window *w_curs;
+ unsigned char *buf;
+ enum object_type type;
+ unsigned long used, avail, size;
+
+ if (e->type_ != OBJ_OFS_DELTA && e->type_ != OBJ_REF_DELTA) {
+ read_lock();
+ if (oid_object_info(the_repository, &e->idx.oid, &size) < 0)
+ die(_("unable to get size of %s"),
+ oid_to_hex(&e->idx.oid));
+ read_unlock();
+ return size;
+ }
+
+ p = oe_in_pack(pack, e);
+ if (!p)
+ BUG("when e->type is a delta, it must belong to a pack");
+
+ read_lock();
+ w_curs = NULL;
+ buf = use_pack(p, &w_curs, e->in_pack_offset, &avail);
+ used = unpack_object_header_buffer(buf, avail, &type, &size);
+ if (used == 0)
+ die(_("unable to parse object header of %s"),
+ oid_to_hex(&e->idx.oid));
+
+ unuse_pack(&w_curs);
+ read_unlock();
+ return size;
+}
+
static int try_delta(struct unpacked *trg, struct unpacked *src,
unsigned max_depth, unsigned long *mem_usage)
{
void *delta_buf;
/* Don't bother doing diffs between different types */
- if (trg_entry->type != src_entry->type)
+ if (oe_type(trg_entry) != oe_type(src_entry))
return -1;
/*
* it, we will still save the transfer cost, as we already know
* the other side has it and we won't send src_entry at all.
*/
- if (reuse_delta && trg_entry->in_pack &&
- trg_entry->in_pack == src_entry->in_pack &&
+ if (reuse_delta && IN_PACK(trg_entry) &&
+ IN_PACK(trg_entry) == IN_PACK(src_entry) &&
!src_entry->preferred_base &&
trg_entry->in_pack_type != OBJ_REF_DELTA &&
trg_entry->in_pack_type != OBJ_OFS_DELTA)
return 0;
/* Now some size filtering heuristics. */
- trg_size = trg_entry->size;
- if (!trg_entry->delta) {
+ trg_size = SIZE(trg_entry);
+ if (!DELTA(trg_entry)) {
- max_size = trg_size/2 - 20;
+ max_size = trg_size/2 - the_hash_algo->rawsz;
ref_depth = 1;
} else {
- max_size = trg_entry->delta_size;
+ max_size = DELTA_SIZE(trg_entry);
ref_depth = trg->depth;
}
max_size = (uint64_t)max_size * (max_depth - src->depth) /
(max_depth - ref_depth + 1);
if (max_size == 0)
return 0;
- src_size = src_entry->size;
+ src_size = SIZE(src_entry);
sizediff = src_size < trg_size ? trg_size - src_size : 0;
if (sizediff >= max_size)
return 0;
delta_buf = create_delta(src->index, trg->data, trg_size, &delta_size, max_size);
if (!delta_buf)
return 0;
+ if (delta_size >= (1U << OE_DELTA_SIZE_BITS)) {
+ free(delta_buf);
+ return 0;
+ }
- if (trg_entry->delta) {
+ if (DELTA(trg_entry)) {
/* Prefer only shallower same-sized deltas. */
- if (delta_size == trg_entry->delta_size &&
+ if (delta_size == DELTA_SIZE(trg_entry) &&
src->depth + 1 >= trg->depth) {
free(delta_buf);
return 0;
free(trg_entry->delta_data);
cache_lock();
if (trg_entry->delta_data) {
- delta_cache_size -= trg_entry->delta_size;
+ delta_cache_size -= DELTA_SIZE(trg_entry);
trg_entry->delta_data = NULL;
}
if (delta_cacheable(src_size, trg_size, delta_size)) {
free(delta_buf);
}
- trg_entry->delta = src_entry;
- trg_entry->delta_size = delta_size;
+ SET_DELTA(trg_entry, src_entry);
+ SET_DELTA_SIZE(trg_entry, delta_size);
trg->depth = src->depth + 1;
return 1;
static unsigned int check_delta_limit(struct object_entry *me, unsigned int n)
{
- struct object_entry *child = me->delta_child;
+ struct object_entry *child = DELTA_CHILD(me);
unsigned int m = n;
while (child) {
unsigned int c = check_delta_limit(child, n + 1);
if (m < c)
m = c;
- child = child->delta_sibling;
+ child = DELTA_SIBLING(child);
}
return m;
}
free_delta_index(n->index);
n->index = NULL;
if (n->data) {
- freed_mem += n->entry->size;
+ freed_mem += SIZE(n->entry);
FREE_AND_NULL(n->data);
}
n->entry = NULL;
* otherwise they would become too deep.
*/
max_depth = depth;
- if (entry->delta_child) {
+ if (DELTA_CHILD(entry)) {
max_depth -= check_delta_limit(entry, 0);
if (max_depth <= 0)
goto next;
* between writes at that moment.
*/
if (entry->delta_data && !pack_to_stdout) {
- entry->z_delta_size = do_compress(&entry->delta_data,
- entry->delta_size);
- cache_lock();
- delta_cache_size -= entry->delta_size;
- delta_cache_size += entry->z_delta_size;
- cache_unlock();
+ unsigned long size;
+
+ size = do_compress(&entry->delta_data, DELTA_SIZE(entry));
+ if (size < (1U << OE_Z_DELTA_BITS)) {
+ entry->z_delta_size = size;
+ cache_lock();
+ delta_cache_size -= DELTA_SIZE(entry);
+ delta_cache_size += entry->z_delta_size;
+ cache_unlock();
+ } else {
+ FREE_AND_NULL(entry->delta_data);
+ entry->z_delta_size = 0;
+ }
}
/* if we made n a delta, and if n is already at max
* depth, leaving it in the window is pointless. we
* should evict it first.
*/
- if (entry->delta && max_depth <= n->depth)
+ if (DELTA(entry) && max_depth <= n->depth)
continue;
/*
* currently deltified object, to keep it longer. It will
* be the first base object to be attempted next.
*/
- if (entry->delta) {
+ if (DELTA(entry)) {
struct unpacked swap = array[best_base];
int dist = (window + idx - best_base) % window;
int dst = best_base;
for (i = 0; i < to_pack.nr_objects; i++) {
struct object_entry *entry = to_pack.objects + i;
- if (entry->delta)
+ if (DELTA(entry))
/* This happens if we decided to reuse existing
* delta from a pack. "reuse_delta &&" is implied.
*/
continue;
- if (entry->size < 50)
+ if (!entry->type_valid ||
+ oe_size_less_than(&to_pack, entry, 50))
continue;
if (entry->no_try_delta)
if (!entry->preferred_base) {
nr_deltas++;
- if (entry->type < 0)
+ if (oe_type(entry) < 0)
die("unable to get type of object %s",
oid_to_hex(&entry->idx.oid));
} else {
- if (entry->type < 0) {
+ if (oe_type(entry) < 0) {
/*
* This object is not found, but we
* don't have to include it anyway.
die("expected object ID, got garbage:\n %s", line);
add_preferred_base_object(p + 1);
- add_object_entry(&oid, 0, p + 1, 0);
+ add_object_entry(&oid, OBJ_NONE, p + 1, 0);
}
}
struct object_id oid;
struct object *o;
- if (!p->pack_local || p->pack_keep)
+ if (!p->pack_local || p->pack_keep || p->pack_keep_in_core)
continue;
if (open_pack_index(p))
die("cannot open pack index");
static int add_loose_object(const struct object_id *oid, const char *path,
void *data)
{
- enum object_type type = oid_object_info(oid, NULL);
+ enum object_type type = oid_object_info(the_repository, oid, NULL);
if (type < 0) {
warning("loose object at %s could not be examined", path);
get_packed_git(the_repository);
while (p) {
- if ((!p->pack_local || p->pack_keep) &&
+ if ((!p->pack_local || p->pack_keep ||
+ p->pack_keep_in_core) &&
find_pack_entry_one(oid->hash, p)) {
last_found = p;
return 1;
struct object_id oid;
for (p = get_packed_git(the_repository); p; p = p->next) {
- if (!p->pack_local || p->pack_keep)
+ if (!p->pack_local || p->pack_keep || p->pack_keep_in_core)
continue;
if (open_pack_index(p))
{
return pack_to_stdout &&
allow_ofs_delta &&
- !ignore_packed_keep &&
+ !ignore_packed_keep_on_disk &&
+ !ignore_packed_keep_in_core &&
(!local || !have_non_local_packs) &&
!incremental;
}
oid_array_clear(&recent_objects);
}
+static void add_extra_kept_packs(const struct string_list *names)
+{
+ struct packed_git *p;
+
+ if (!names->nr)
+ return;
+
+ for (p = get_packed_git(the_repository); p; p = p->next) {
+ const char *name = basename(p->pack_name);
+ int i;
+
+ if (!p->pack_local)
+ continue;
+
+ for (i = 0; i < names->nr; i++)
+ if (!fspathcmp(name, names->items[i].string))
+ break;
+
+ if (i < names->nr) {
+ p->pack_keep_in_core = 1;
+ ignore_packed_keep_in_core = 1;
+ continue;
+ }
+ }
+}
+
static int option_parse_index_version(const struct option *opt,
const char *arg, int unset)
{
struct argv_array rp = ARGV_ARRAY_INIT;
int rev_list_unpacked = 0, rev_list_all = 0, rev_list_reflog = 0;
int rev_list_index = 0;
+ struct string_list keep_pack_list = STRING_LIST_INIT_NODUP;
struct option pack_objects_options[] = {
OPT_SET_INT('q', "quiet", &progress,
N_("do not show progress meter"), 0),
N_("create thin packs")),
OPT_BOOL(0, "shallow", &shallow,
N_("create packs suitable for shallow fetches")),
- OPT_BOOL(0, "honor-pack-keep", &ignore_packed_keep,
+ OPT_BOOL(0, "honor-pack-keep", &ignore_packed_keep_on_disk,
N_("ignore packs that have companion .keep file")),
+ OPT_STRING_LIST(0, "keep-pack", &keep_pack_list, N_("name"),
+ N_("ignore this pack")),
OPT_INTEGER(0, "compression", &pack_compression_level,
N_("pack compression level")),
OPT_SET_INT(0, "keep-true-parents", &grafts_replace_parents,
OPT_END(),
};
+ if (DFS_NUM_STATES > (1 << OE_DFS_STATE_BITS))
+ BUG("too many dfs states, increase OE_DFS_STATE_BITS");
+
check_replace_refs = 0;
reset_pack_idx_option(&pack_idx_opts);
if (pack_to_stdout != !base_name || argc)
usage_with_options(pack_usage, pack_objects_options);
+ if (depth >= (1 << OE_DEPTH_BITS)) {
+ warning(_("delta chain depth %d is too deep, forcing %d"),
+ depth, (1 << OE_DEPTH_BITS) - 1);
+ depth = (1 << OE_DEPTH_BITS) - 1;
+ }
+ if (cache_max_small_delta_size >= (1U << OE_Z_DELTA_BITS)) {
+ warning(_("pack.deltaCacheLimit is too high, forcing %d"),
+ (1U << OE_Z_DELTA_BITS) - 1);
+ cache_max_small_delta_size = (1U << OE_Z_DELTA_BITS) - 1;
+ }
+
argv_array_push(&rp, "pack-objects");
if (thin) {
use_internal_rev_list = 1;
fetch_if_missing = 0;
argv_array_push(&rp, "--exclude-promisor-objects");
}
+ if (unpack_unreachable || keep_unreachable || pack_loose_unreachable)
+ use_internal_rev_list = 1;
if (!reuse_object)
reuse_delta = 0;
if (progress && all_progress_implied)
progress = 2;
- if (ignore_packed_keep) {
+ add_extra_kept_packs(&keep_pack_list);
+ if (ignore_packed_keep_on_disk) {
struct packed_git *p;
for (p = get_packed_git(the_repository); p; p = p->next)
if (p->pack_local && p->pack_keep)
break;
if (!p) /* no keep-able packs found */
- ignore_packed_keep = 0;
+ ignore_packed_keep_on_disk = 0;
}
if (local) {
/*
- * unlike ignore_packed_keep above, we do not want to
- * unset "local" based on looking at packs, as it
- * also covers non-local objects
+ * unlike ignore_packed_keep_on_disk above, we do not
+ * want to unset "local" based on looking at packs, as
+ * it also covers non-local objects
*/
struct packed_git *p;
for (p = get_packed_git(the_repository); p; p = p->next) {
}
}
+ prepare_packing_data(&to_pack);
+
if (progress)
- progress_state = start_progress(_("Counting objects"), 0);
+ progress_state = start_progress(_("Enumerating objects"), 0);
if (!use_internal_rev_list)
read_object_list_from_stdin();
else {
#include "pkt-line.h"
#include "sideband.h"
#include "run-command.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
#include "commit.h"
#include "object.h"
#include "remote.h"
/* RFC 2104 2. (6) & (7) */
git_SHA1_Init(&ctx);
git_SHA1_Update(&ctx, k_opad, sizeof(k_opad));
- git_SHA1_Update(&ctx, out, 20);
+ git_SHA1_Update(&ctx, out, GIT_SHA1_RAWSZ);
git_SHA1_Final(out, &ctx);
}
static char *prepare_push_cert_nonce(const char *path, timestamp_t stamp)
{
struct strbuf buf = STRBUF_INIT;
- unsigned char sha1[20];
+ unsigned char sha1[GIT_SHA1_RAWSZ];
strbuf_addf(&buf, "%s:%"PRItime, path, stamp);
hmac_sha1(sha1, buf.buf, buf.len, cert_nonce_seed, strlen(cert_nonce_seed));;
strbuf_release(&buf);
/* RFC 2104 5. HMAC-SHA1-80 */
- strbuf_addf(&buf, "%"PRItime"-%.*s", stamp, 20, sha1_to_hex(sha1));
+ strbuf_addf(&buf, "%"PRItime"-%.*s", stamp, GIT_SHA1_HEXSZ, sha1_to_hex(sha1));
return strbuf_detach(&buf, NULL);
}
static int command_singleton_iterator(void *cb_data, struct object_id *oid);
static int update_shallow_ref(struct command *cmd, struct shallow_info *si)
{
- static struct lock_file shallow_lock;
+ struct lock_file shallow_lock = LOCK_INIT;
struct oid_array extra = OID_ARRAY_INIT;
struct check_connected_options opt = CHECK_CONNECTED_INIT;
uint32_t mask = 1 << (cmd->index % 32);
return "Working directory has unstaged changes";
/* diff-index with either HEAD or an empty tree */
- diff_index[4] = head_has_history() ? "HEAD" : EMPTY_TREE_SHA1_HEX;
+ diff_index[4] = head_has_history() ? "HEAD" : empty_tree_oid_hex();
child_process_init(&child);
child.argv = diff_index;
}
}
if (!checked_connectivity)
- die("BUG: connectivity check skipped???");
+ BUG("connectivity check skipped???");
}
static void execute_commands_non_atomic(struct command *commands,
unpack_limit = receive_unpack_limit;
switch (determine_protocol_version_server()) {
+ case protocol_v2:
+ /*
+ * push support for protocol v2 has not been implemented yet,
+ * so ignore the request to use v2 and fallback to using v0.
+ */
+ break;
case protocol_v1:
/*
* v1 is just the original protocol with a version string,
drop_cache_tree : 1;
struct hashmap name_hash;
struct hashmap dir_hash;
- unsigned char sha1[20];
+ struct object_id oid;
struct untracked_cache *untracked;
uint64_t fsmonitor_last_update;
struct ewah_bitmap *fsmonitor_dirty;
#define read_blob_data_from_cache(path, sz) read_blob_data_from_index(&the_index, (path), (sz))
#endif
+#define TYPE_BITS 3
+
+/*
+ * Values in this enum (except those outside the 3 bit range) are part
+ * of pack file format. See Documentation/technical/pack-format.txt
+ * for more information.
+ */
enum object_type {
OBJ_BAD = -1,
OBJ_NONE = 0,
#define GIT_ICASE_PATHSPECS_ENVIRONMENT "GIT_ICASE_PATHSPECS"
#define GIT_QUARANTINE_ENVIRONMENT "GIT_QUARANTINE_PATH"
#define GIT_OPTIONAL_LOCKS_ENVIRONMENT "GIT_OPTIONAL_LOCKS"
+#define GIT_TEXT_DOMAIN_DIR_ENVIRONMENT "GIT_TEXTDOMAINDIR"
/*
* Environment variable used in handshaking the wire protocol.
extern char *get_object_directory(void);
extern char *get_index_file(void);
extern char *get_graft_file(void);
-extern int set_git_dir(const char *path);
+extern void set_git_dir(const char *path);
extern int get_common_dir_noenv(struct strbuf *sb, const char *gitdir);
extern int get_common_dir(struct strbuf *sb, const char *gitdir);
extern const char *get_git_namespace(void);
*/
extern int index_has_changes(struct strbuf *sb);
-extern int verify_path(const char *path);
+extern int verify_path(const char *path, unsigned mode);
extern int strcmp_offset(const char *s1, const char *s2, size_t *first_change);
extern int index_dir_exists(struct index_state *istate, const char *name, int namelen);
extern void adjust_dirname_case(struct index_state *istate, char *name);
extern int fsync_object_files;
extern int core_preload_index;
+extern int core_commit_graph;
extern int core_apply_sparse_checkout;
extern int precomposed_unicode;
extern int protect_hfs;
memset(oid->hash, 0, GIT_MAX_RAWSZ);
}
-
- #define EMPTY_TREE_SHA1_HEX \
- "4b825dc642cb6eb9a060e54bf8d69288fbee4904"
- #define EMPTY_TREE_SHA1_BIN_LITERAL \
- "\x4b\x82\x5d\xc6\x42\xcb\x6e\xb9\xa0\x60" \
- "\xe5\x4b\xf8\xd6\x92\x88\xfb\xee\x49\x04"
- extern const struct object_id empty_tree_oid;
- #define EMPTY_TREE_SHA1_BIN (empty_tree_oid.hash)
-
- #define EMPTY_BLOB_SHA1_HEX \
- "e69de29bb2d1d6434b8b29ae775ad8c2e48c5391"
- #define EMPTY_BLOB_SHA1_BIN_LITERAL \
- "\xe6\x9d\xe2\x9b\xb2\xd1\xd6\x43\x4b\x8b" \
- "\x29\xae\x77\x5a\xd8\xc2\xe4\x8c\x53\x91"
- extern const struct object_id empty_blob_oid;
+ static inline void oidread(struct object_id *oid, const unsigned char *hash)
+ {
+ memcpy(oid->hash, hash, the_hash_algo->rawsz);
+ }
static inline int is_empty_blob_sha1(const unsigned char *sha1)
{
return !oidcmp(oid, the_hash_algo->empty_tree);
}
+ const char *empty_tree_oid_hex(void);
+ const char *empty_blob_oid_hex(void);
+
/* set default permissions by passing mode arguments to open(2) */
int git_mkstemps_mode(char *pattern, int suffix_len, int mode);
int git_mkstemp_mode(char *pattern, int mode);
int longest_ancestor_length(const char *path, struct string_list *prefixes);
char *strip_path_suffix(const char *path, const char *suffix);
int daemon_avoid_alias(const char *path);
-extern int is_ntfs_dotgit(const char *name);
+
+/*
+ * These functions match their is_hfs_dotgit() counterparts; see utf8.h for
+ * details.
+ */
+int is_ntfs_dotgit(const char *name);
+int is_ntfs_dotgitmodules(const char *name);
+int is_ntfs_dotgitignore(const char *name);
+int is_ntfs_dotgitattributes(const char *name);
/*
* Returns true iff "str" could be confused as a command-line option when
return read_object_file_extended(oid, type, size, 1);
}
-/*
- * This internal function is only declared here for the benefit of
- * lookup_replace_object(). Please do not call it directly.
- */
-extern const struct object_id *do_lookup_replace_object(const struct object_id *oid);
-
-/*
- * If object sha1 should be replaced, return the replacement object's
- * name (replaced recursively, if necessary). The return value is
- * either sha1 or a pointer to a permanently-allocated value. When
- * object replacement is suppressed, always return sha1.
- */
-static inline const struct object_id *lookup_replace_object(const struct object_id *oid)
-{
- if (!check_replace_refs)
- return oid;
- return do_lookup_replace_object(oid);
-}
-
/* Read and unpack an object file into memory, write memory to an object file */
-extern int oid_object_info(const struct object_id *, unsigned long *);
+int oid_object_info(struct repository *r, const struct object_id *, unsigned long *);
extern int hash_object_file(const void *buf, unsigned long len,
const char *type, struct object_id *oid);
* with the specified name. This function does not respect replace
* references.
*/
- extern int has_loose_object_nonlocal(const unsigned char *sha1);
+ extern int has_loose_object_nonlocal(const struct object_id *oid);
extern void assert_oid_type(const struct object_id *oid, enum object_type expect);
#define FALLBACK_DEFAULT_ABBREV 7
struct object_context {
- unsigned char tree[20];
unsigned mode;
/*
* symlink_path is only used by get_tree_entry_follow_symlinks,
struct pack_entry {
off_t offset;
- unsigned char sha1[20];
struct packed_git *p;
};
#define OBJECT_INFO_QUICK 8
/* Do not check loose object */
#define OBJECT_INFO_IGNORE_LOOSE 16
-extern int oid_object_info_extended(const struct object_id *, struct object_info *, unsigned flags);
+
+int oid_object_info_extended(struct repository *r,
+ const struct object_id *,
+ struct object_info *, unsigned flags);
/*
* Set this to 0 to prevent sha1_object_info_extended() from fetching missing
#include "cache.h"
#include "tag.h"
#include "commit.h"
+#include "commit-graph.h"
#include "pkt-line.h"
#include "utf8.h"
#include "diff.h"
#include "prio-queue.h"
#include "sha1-lookup.h"
#include "wt-status.h"
+#include "advice.h"
static struct commit_extra_header *read_commit_extra_header_lines(const char *buf, size_t len, const char **);
struct strbuf buf = STRBUF_INIT;
if (!fp)
return -1;
+ if (advice_graft_file_deprecated)
+ advise(_("Support for <GIT_DIR>/info/grafts is deprecated\n"
+ "and will be removed in a future Git version.\n"
+ "\n"
+ "Please use \"git replace --convert-graft-file\"\n"
+ "to convert the grafts into replace refs.\n"
+ "\n"
+ "Turn this message off by running\n"
+ "\"git config advice.graftFileDeprecated false\""));
while (!strbuf_getwholeline(&buf, fp, '\n')) {
/* The format is just "Commit Parent1 Parent2 ...\n" */
struct commit_graft *graft = read_graft_line(&buf);
}
}
+struct tree *get_commit_tree(const struct commit *commit)
+{
+ if (commit->maybe_tree || !commit->object.parsed)
+ return commit->maybe_tree;
+
+ if (commit->graph_pos == COMMIT_NOT_FROM_GRAPH)
+ BUG("commit has NULL tree, but was not loaded from commit-graph");
+
+ return get_commit_tree_in_graph(commit);
+}
+
+struct object_id *get_commit_tree_oid(const struct commit *commit)
+{
+ return &get_commit_tree(commit)->object.oid;
+}
+
const void *detach_commit_buffer(struct commit *commit, unsigned long *sizep)
{
struct commit_buffer *v = buffer_slab_peek(&buffer_slab, commit);
if (tail <= bufptr + tree_entry_len + 1 || memcmp(bufptr, "tree ", 5) ||
bufptr[tree_entry_len] != '\n')
return error("bogus commit object %s", oid_to_hex(&item->object.oid));
- if (get_sha1_hex(bufptr + 5, parent.hash) < 0)
+ if (get_oid_hex(bufptr + 5, &parent) < 0)
return error("bad tree pointer in commit %s",
oid_to_hex(&item->object.oid));
- item->tree = lookup_tree(&parent);
+ item->maybe_tree = lookup_tree(&parent);
bufptr += tree_entry_len + 1; /* "tree " + "hex sha1" + "\n" */
pptr = &item->parents;
struct commit *new_parent;
if (tail <= bufptr + parent_entry_len + 1 ||
- get_sha1_hex(bufptr + 7, parent.hash) ||
+ get_oid_hex(bufptr + 7, &parent) ||
bufptr[parent_entry_len] != '\n')
return error("bad parents in commit %s", oid_to_hex(&item->object.oid));
bufptr += parent_entry_len + 1;
return -1;
if (item->object.parsed)
return 0;
+ if (parse_commit_in_graph(item))
+ return 0;
buffer = read_object_file(&item->object.oid, &type, &size);
if (!buffer)
return quiet_on_missing ? -1 :
return extra;
}
-void for_each_mergetag(each_mergetag_fn fn, struct commit *commit, void *data)
+int for_each_mergetag(each_mergetag_fn fn, struct commit *commit, void *data)
{
struct commit_extra_header *extra, *to_free;
+ int res = 0;
to_free = read_commit_extra_headers(commit, NULL);
- for (extra = to_free; extra; extra = extra->next) {
+ for (extra = to_free; !res && extra; extra = extra->next) {
if (strcmp(extra->key, "mergetag"))
continue; /* not a merge tag */
- fn(commit, extra, data);
+ res = fn(commit, extra, data);
}
free_commit_extra_headers(to_free);
+ return res;
}
static inline int standard_header_field(const char *field, size_t len)
return 0;
}
-static int git_config_rename(const char *var, const char *value)
+int git_config_rename(const char *var, const char *value)
{
if (!value)
return DIFF_DETECT_RENAME;
fputs(o->stat_sep, o->file);
break;
default:
- die("BUG: unknown diff symbol");
+ BUG("unknown diff symbol");
}
strbuf_release(&sb);
}
for (i = 0; i < ARRAY_SIZE(diff_temp); i++)
if (!diff_temp[i].name)
return diff_temp + i;
- die("BUG: diff is failing to clean up its tempfiles");
+ BUG("diff is failing to clean up its tempfiles");
}
static void remove_tempfile(void)
* objects however would tend to be slower as they need
* to be individually opened and inflated.
*/
- if (!FAST_WORKING_DIRECTORY && !want_file && has_sha1_pack(oid->hash))
+ if (!FAST_WORKING_DIRECTORY && !want_file && has_object_pack(oid))
return 0;
/*
else {
enum object_type type;
if (size_only || (flags & CHECK_BINARY)) {
- type = oid_object_info(&s->oid, &s->size);
+ type = oid_object_info(the_repository, &s->oid,
+ &s->size);
if (type < 0)
die("unable to read %s",
oid_to_hex(&s->oid));
if (abbrev < 0)
abbrev = FALLBACK_DEFAULT_ABBREV;
if (abbrev > GIT_SHA1_HEXSZ)
- die("BUG: oid abbreviation out of range: %d", abbrev);
+ BUG("oid abbreviation out of range: %d", abbrev);
if (abbrev)
hex[abbrev] = '\0';
return hex;
*must_show_header = 0;
}
if (one && two && oidcmp(&one->oid, &two->oid)) {
- int abbrev = o->flags.full_index ? 40 : DEFAULT_ABBREV;
+ const unsigned hexsz = the_hash_algo->hexsz;
+ int abbrev = o->flags.full_index ? hexsz : DEFAULT_ABBREV;
if (o->flags.binary) {
mmfile_t mf;
if ((!fill_mmfile(&mf, one) && diff_filespec_is_binary(one)) ||
(!fill_mmfile(&mf, two) && diff_filespec_is_binary(two)))
- abbrev = 40;
+ abbrev = hexsz;
}
strbuf_addf(msg, "%s%sindex %s..%s", line_prefix, set,
diff_abbrev_oid(&one->oid, abbrev),
DIFF_FORMAT_NAME_STATUS |
DIFF_FORMAT_CHECKDIFF |
DIFF_FORMAT_NO_OUTPUT;
+ /*
+ * This must be signed because we're comparing against a potentially
+ * negative value.
+ */
+ const int hexsz = the_hash_algo->hexsz;
if (options->set_default)
options->set_default(options);
*/
read_cache();
}
- if (40 < options->abbrev)
- options->abbrev = 40; /* full */
+ if (hexsz < options->abbrev)
+ options->abbrev = hexsz; /* full */
/*
* It does not make sense to show the first hit we happened
int argcount = 1;
if (!skip_prefix(arg, "--stat", &arg))
- die("BUG: stat option does not begin with --stat: %s", arg);
+ BUG("stat option does not begin with --stat: %s", arg);
end = (char *)arg;
switch (*arg) {
options->abbrev = strtoul(arg, NULL, 10);
if (options->abbrev < MINIMUM_ABBREV)
options->abbrev = MINIMUM_ABBREV;
- else if (40 < options->abbrev)
- options->abbrev = 40;
+ else if (the_hash_algo->hexsz < options->abbrev)
+ options->abbrev = the_hash_algo->hexsz;
}
else if ((argcount = parse_long_opt("src-prefix", av, &optarg))) {
options->a_prefix = optarg;
struct diff_queue_struct *q = &diff_queued_diff;
if (WSEH_NEW & WS_RULE_MASK)
- die("BUG: WS rules bit mask overlaps with diff symbol flags");
+ BUG("WS rules bit mask overlaps with diff symbol flags");
if (o->color_moved)
o->emitted_symbols = &esm;
}
if (!driver->textconv)
- die("BUG: fill_textconv called with non-textconv driver");
+ BUG("fill_textconv called with non-textconv driver");
if (driver->textconv_cache && df->oid_valid) {
*outbuf = notes_cache_get(driver->textconv_cache,
#include "varint.h"
#include "ewah/ewok.h"
#include "fsmonitor.h"
+#include "submodule-config.h"
/*
* Tells read_directory_recursive how a file or directory should be treated.
if (size == 0) {
if (oid_stat) {
fill_stat_data(&oid_stat->stat, &st);
- oidcpy(&oid_stat->oid, &empty_blob_oid);
+ oidcpy(&oid_stat->oid, the_hash_algo->empty_blob);
oid_stat->valid = 1;
}
close(fd);
(!untracked || !untracked->valid ||
/*
* .. and .gitignore does not exist before
- * (i.e. null exclude_sha1). Then we can skip
+ * (i.e. null exclude_oid). Then we can skip
* loading .gitignore, which would result in
* ENOENT anyway.
*/
- !is_null_sha1(untracked->exclude_sha1))) {
+ !is_null_oid(&untracked->exclude_oid))) {
/*
* dir->basebuf gets reused by the traversal, but we
* need fname to remain unchanged to ensure the src
* order, though, if you do that.
*/
if (untracked &&
- hashcmp(oid_stat.oid.hash, untracked->exclude_sha1)) {
+ oidcmp(&oid_stat.oid, &untracked->exclude_oid)) {
invalidate_gitignore(dir->untracked, untracked);
- hashcpy(untracked->exclude_sha1, oid_stat.oid.hash);
+ oidcpy(&untracked->exclude_oid, &oid_stat.oid);
}
dir->exclude_stack = stk;
current = stk->baselen;
stat_data_to_disk(&stat_data, &untracked->stat_data);
strbuf_add(&wd->sb_stat, &stat_data, sizeof(stat_data));
}
- if (!is_null_sha1(untracked->exclude_sha1)) {
+ if (!is_null_oid(&untracked->exclude_oid)) {
ewah_set(wd->sha1_valid, i);
- strbuf_add(&wd->sb_sha1, untracked->exclude_sha1, 20);
+ strbuf_add(&wd->sb_sha1, untracked->exclude_oid.hash,
+ the_hash_algo->rawsz);
}
intlen = encode_varint(untracked->untracked_nr, intbuf);
ud->valid = 1;
}
- static void read_sha1(size_t pos, void *cb)
+ static void read_oid(size_t pos, void *cb)
{
struct read_data *rd = cb;
struct untracked_cache_dir *ud = rd->ucd[pos];
- if (rd->data + 20 > rd->end) {
+ if (rd->data + the_hash_algo->rawsz > rd->end) {
rd->data = rd->end + 1;
return;
}
- hashcpy(ud->exclude_sha1, rd->data);
- rd->data += 20;
+ hashcpy(ud->exclude_oid.hash, rd->data);
+ rd->data += the_hash_algo->rawsz;
}
static void load_oid_stat(struct oid_stat *oid_stat, const unsigned char *data,
ewah_each_bit(rd.check_only, set_check_only, &rd);
rd.data = next + len;
ewah_each_bit(rd.valid, read_stat, &rd);
- ewah_each_bit(rd.sha1_valid, read_sha1, &rd);
+ ewah_each_bit(rd.sha1_valid, read_oid, &rd);
next = rd.data;
done:
{
if (!istate->untracked || !istate->untracked->root)
return;
- if (!safe_path && !verify_path(path))
+ if (!safe_path && !verify_path(path, 0))
return;
invalidate_one_component(istate->untracked, istate->untracked->root,
path, strlen(path));
untracked_cache_invalidate_path(istate, path, 1);
}
-/* Update gitfile and core.worktree setting to connect work tree and git dir */
-void connect_work_tree_and_git_dir(const char *work_tree_, const char *git_dir_)
+static void connect_wt_gitdir_in_nested(const char *sub_worktree,
+ const char *sub_gitdir)
+{
+ int i;
+ struct repository subrepo;
+ struct strbuf sub_wt = STRBUF_INIT;
+ struct strbuf sub_gd = STRBUF_INIT;
+
+ const struct submodule *sub;
+
+ /* If the submodule has no working tree, we can ignore it. */
+ if (repo_init(&subrepo, sub_gitdir, sub_worktree))
+ return;
+
+ if (repo_read_index(&subrepo) < 0)
+ die("index file corrupt in repo %s", subrepo.gitdir);
+
+ for (i = 0; i < subrepo.index->cache_nr; i++) {
+ const struct cache_entry *ce = subrepo.index->cache[i];
+
+ if (!S_ISGITLINK(ce->ce_mode))
+ continue;
+
+ while (i + 1 < subrepo.index->cache_nr &&
+ !strcmp(ce->name, subrepo.index->cache[i + 1]->name))
+ /*
+ * Skip entries with the same name in different stages
+ * to make sure an entry is returned only once.
+ */
+ i++;
+
+ sub = submodule_from_path(&subrepo, &null_oid, ce->name);
+ if (!sub || !is_submodule_active(&subrepo, ce->name))
+ /* .gitmodules broken or inactive sub */
+ continue;
+
+ strbuf_reset(&sub_wt);
+ strbuf_reset(&sub_gd);
+ strbuf_addf(&sub_wt, "%s/%s", sub_worktree, sub->path);
+ strbuf_addf(&sub_gd, "%s/modules/%s", sub_gitdir, sub->name);
+
+ connect_work_tree_and_git_dir(sub_wt.buf, sub_gd.buf, 1);
+ }
+ strbuf_release(&sub_wt);
+ strbuf_release(&sub_gd);
+ repo_clear(&subrepo);
+}
+
+void connect_work_tree_and_git_dir(const char *work_tree_,
+ const char *git_dir_,
+ int recurse_into_nested)
{
struct strbuf gitfile_sb = STRBUF_INIT;
struct strbuf cfg_sb = STRBUF_INIT;
strbuf_release(&gitfile_sb);
strbuf_release(&cfg_sb);
strbuf_release(&rel_path);
+
+ if (recurse_into_nested)
+ connect_wt_gitdir_in_nested(work_tree, git_dir);
+
free(work_tree);
free(git_dir);
}
die_errno(_("could not migrate git directory from '%s' to '%s'"),
old_git_dir, new_git_dir);
- connect_work_tree_and_git_dir(path, new_git_dir);
+ connect_work_tree_and_git_dir(path, new_git_dir, 0);
}
/* See Documentation/technical/api-directory-listing.txt */
+ #include "cache.h"
#include "strbuf.h"
struct dir_entry {
/* all data except 'dirs' in this struct are good */
unsigned int valid : 1;
unsigned int recurse : 1;
- /* null SHA-1 means this directory does not have .gitignore */
- unsigned char exclude_sha1[20];
+ /* null object ID means this directory does not have .gitignore */
+ struct object_id exclude_oid;
char name[FLEX_ARRAY];
};
void write_untracked_extension(struct strbuf *out, struct untracked_cache *untracked);
void add_untracked_cache(struct index_state *istate);
void remove_untracked_cache(struct index_state *istate);
-extern void connect_work_tree_and_git_dir(const char *work_tree, const char *git_dir);
+
+/*
+ * Connect a worktree to a git directory by creating (or overwriting) a
+ * '.git' file containing the location of the git directory. In the git
+ * directory set the core.worktree setting to indicate where the worktree is.
+ * When `recurse_into_nested` is set, recurse into any nested submodules,
+ * connecting them as well.
+ */
+extern void connect_work_tree_and_git_dir(const char *work_tree,
+ const char *git_dir,
+ int recurse_into_nested);
extern void relocate_gitdir(const char *path,
const char *old_git_dir,
const char *new_git_dir);
#include "utf8.h"
#include "sha1-array.h"
#include "decorate.h"
+#include "oidset.h"
+#include "packfile.h"
+#include "submodule-config.h"
+#include "config.h"
+
+static struct oidset gitmodules_found = OIDSET_INIT;
+static struct oidset gitmodules_done = OIDSET_INIT;
#define FSCK_FATAL -1
#define FSCK_INFO -2
FUNC(MISSING_TAG_ENTRY, ERROR) \
FUNC(MISSING_TAG_OBJECT, ERROR) \
FUNC(MISSING_TREE, ERROR) \
+ FUNC(MISSING_TREE_OBJECT, ERROR) \
FUNC(MISSING_TYPE, ERROR) \
FUNC(MISSING_TYPE_ENTRY, ERROR) \
FUNC(MULTIPLE_AUTHORS, ERROR) \
FUNC(TREE_NOT_SORTED, ERROR) \
FUNC(UNKNOWN_TYPE, ERROR) \
FUNC(ZERO_PADDED_DATE, ERROR) \
+ FUNC(GITMODULES_MISSING, ERROR) \
+ FUNC(GITMODULES_BLOB, ERROR) \
+ FUNC(GITMODULES_PARSE, ERROR) \
+ FUNC(GITMODULES_NAME, ERROR) \
+ FUNC(GITMODULES_SYMLINK, ERROR) \
/* warnings */ \
FUNC(BAD_FILEMODE, WARN) \
FUNC(EMPTY_NAME, WARN) \
name = get_object_name(options, &commit->object);
if (name)
- put_object_name(options, &commit->tree->object, "%s:", name);
+ put_object_name(options, &get_commit_tree(commit)->object,
+ "%s:", name);
- result = options->walk((struct object *)commit->tree, OBJ_TREE, data, options);
+ result = options->walk((struct object *)get_commit_tree(commit),
+ OBJ_TREE, data, options);
if (result < 0)
return result;
res = result;
has_empty_name |= !*name;
has_dot |= !strcmp(name, ".");
has_dotdot |= !strcmp(name, "..");
- has_dotgit |= (!strcmp(name, ".git") ||
- is_hfs_dotgit(name) ||
- is_ntfs_dotgit(name));
+ has_dotgit |= is_hfs_dotgit(name) || is_ntfs_dotgit(name);
has_zero_pad |= *(char *)desc.buffer == '0';
+
+ if (is_hfs_dotgitmodules(name) || is_ntfs_dotgitmodules(name)) {
+ if (!S_ISLNK(mode))
+ oidset_insert(&gitmodules_found, oid);
+ else
+ retval += report(options, &item->object,
+ FSCK_MSG_GITMODULES_SYMLINK,
+ ".gitmodules is a symbolic link");
+ }
+
if (update_tree_entry_gently(&desc)) {
retval += report(options, &item->object, FSCK_MSG_BAD_TREE, "cannot be parsed as a tree");
break;
static int fsck_commit_buffer(struct commit *commit, const char *buffer,
unsigned long size, struct fsck_options *options)
{
- unsigned char tree_sha1[20], sha1[20];
+ struct object_id tree_oid, oid;
struct commit_graft *graft;
unsigned parent_count, parent_line_count = 0, author_count;
int err;
const char *buffer_begin = buffer;
+ const char *p;
if (verify_headers(buffer, size, &commit->object, options))
return -1;
if (!skip_prefix(buffer, "tree ", &buffer))
return report(options, &commit->object, FSCK_MSG_MISSING_TREE, "invalid format - expected 'tree' line");
- if (get_sha1_hex(buffer, tree_sha1) || buffer[40] != '\n') {
+ if (parse_oid_hex(buffer, &tree_oid, &p) || *p != '\n') {
err = report(options, &commit->object, FSCK_MSG_BAD_TREE_SHA1, "invalid 'tree' line format - bad sha1");
if (err)
return err;
}
- buffer += 41;
+ buffer = p + 1;
while (skip_prefix(buffer, "parent ", &buffer)) {
- if (get_sha1_hex(buffer, sha1) || buffer[40] != '\n') {
+ if (parse_oid_hex(buffer, &oid, &p) || *p != '\n') {
err = report(options, &commit->object, FSCK_MSG_BAD_PARENT_SHA1, "invalid 'parent' line format - bad sha1");
if (err)
return err;
}
- buffer += 41;
+ buffer = p + 1;
parent_line_count++;
}
graft = lookup_commit_graft(&commit->object.oid);
err = fsck_ident(&buffer, &commit->object, options);
if (err)
return err;
- if (!commit->tree) {
+ if (!get_commit_tree(commit)) {
- err = report(options, &commit->object, FSCK_MSG_BAD_TREE, "could not load commit's tree %s", sha1_to_hex(tree_sha1));
+ err = report(options, &commit->object, FSCK_MSG_BAD_TREE, "could not load commit's tree %s", oid_to_hex(&tree_oid));
if (err)
return err;
}
static int fsck_tag_buffer(struct tag *tag, const char *data,
unsigned long size, struct fsck_options *options)
{
- unsigned char sha1[20];
+ struct object_id oid;
int ret = 0;
const char *buffer;
char *to_free = NULL, *eol;
struct strbuf sb = STRBUF_INIT;
+ const char *p;
if (data)
buffer = data;
ret = report(options, &tag->object, FSCK_MSG_MISSING_OBJECT, "invalid format - expected 'object' line");
goto done;
}
- if (get_sha1_hex(buffer, sha1) || buffer[40] != '\n') {
+ if (parse_oid_hex(buffer, &oid, &p) || *p != '\n') {
ret = report(options, &tag->object, FSCK_MSG_BAD_OBJECT_SHA1, "invalid 'object' line format - bad sha1");
if (ret)
goto done;
}
- buffer += 41;
+ buffer = p + 1;
if (!skip_prefix(buffer, "type ", &buffer)) {
ret = report(options, &tag->object, FSCK_MSG_MISSING_TYPE_ENTRY, "invalid format - expected 'type' line");
return fsck_tag_buffer(tag, data, size, options);
}
+struct fsck_gitmodules_data {
+ struct object *obj;
+ struct fsck_options *options;
+ int ret;
+};
+
+static int fsck_gitmodules_fn(const char *var, const char *value, void *vdata)
+{
+ struct fsck_gitmodules_data *data = vdata;
+ const char *subsection, *key;
+ int subsection_len;
+ char *name;
+
+ if (parse_config_key(var, "submodule", &subsection, &subsection_len, &key) < 0 ||
+ !subsection)
+ return 0;
+
+ name = xmemdupz(subsection, subsection_len);
+ if (check_submodule_name(name) < 0)
+ data->ret |= report(data->options, data->obj,
+ FSCK_MSG_GITMODULES_NAME,
+ "disallowed submodule name: %s",
+ name);
+ free(name);
+
+ return 0;
+}
+
+static int fsck_blob(struct blob *blob, const char *buf,
+ unsigned long size, struct fsck_options *options)
+{
+ struct fsck_gitmodules_data data;
+
+ if (!oidset_contains(&gitmodules_found, &blob->object.oid))
+ return 0;
+ oidset_insert(&gitmodules_done, &blob->object.oid);
+
+ if (!buf) {
+ /*
+ * A missing buffer here is a sign that the caller found the
+ * blob too gigantic to load into memory. Let's just consider
+ * that an error.
+ */
+ return report(options, &blob->object,
+ FSCK_MSG_GITMODULES_PARSE,
+ ".gitmodules too large to parse");
+ }
+
+ data.obj = &blob->object;
+ data.options = options;
+ data.ret = 0;
+ if (git_config_from_mem(fsck_gitmodules_fn, CONFIG_ORIGIN_BLOB,
+ ".gitmodules", buf, size, &data))
+ data.ret |= report(options, &blob->object,
+ FSCK_MSG_GITMODULES_PARSE,
+ "could not parse gitmodules blob");
+
+ return data.ret;
+}
+
int fsck_object(struct object *obj, void *data, unsigned long size,
struct fsck_options *options)
{
return report(options, obj, FSCK_MSG_BAD_OBJECT_SHA1, "no valid object to fsck");
if (obj->type == OBJ_BLOB)
- return 0;
+ return fsck_blob((struct blob *)obj, data, size, options);
if (obj->type == OBJ_TREE)
return fsck_tree((struct tree *) obj, options);
if (obj->type == OBJ_COMMIT)
error("object %s: %s", describe_object(o, obj), message);
return 1;
}
+
+int fsck_finish(struct fsck_options *options)
+{
+ int ret = 0;
+ struct oidset_iter iter;
+ const struct object_id *oid;
+
+ oidset_iter_init(&gitmodules_found, &iter);
+ while ((oid = oidset_iter_next(&iter))) {
+ struct blob *blob;
+ enum object_type type;
+ unsigned long size;
+ char *buf;
+
+ if (oidset_contains(&gitmodules_done, oid))
+ continue;
+
+ blob = lookup_blob(oid);
+ if (!blob) {
+ ret |= report(options, &blob->object,
+ FSCK_MSG_GITMODULES_BLOB,
+ "non-blob found at .gitmodules");
+ continue;
+ }
+
+ buf = read_object_file(oid, &type, &size);
+ if (!buf) {
+ if (is_promisor_object(&blob->object.oid))
+ continue;
+ ret |= report(options, &blob->object,
+ FSCK_MSG_GITMODULES_MISSING,
+ "unable to read .gitmodules blob");
+ continue;
+ }
+
+ if (type == OBJ_BLOB)
+ ret |= fsck_blob(blob, buf, size, options);
+ else
+ ret |= report(options, &blob->object,
+ FSCK_MSG_GITMODULES_BLOB,
+ "non-blob found at .gitmodules");
+ free(buf);
+ }
+
+
+ oidset_clear(&gitmodules_found);
+ oidset_clear(&gitmodules_done);
+ return ret;
+}
# and leaves CR at the end instead.
cr=$(printf "\015")
+ empty_tree=$(git hash-object -t tree /dev/null)
+
strategy_args=${strategy:+--strategy=$strategy}
test -n "$strategy_opts" &&
eval '
append_todo_help () {
gettext "
Commands:
-p, pick = use commit
-r, reword = use commit, but edit the commit message
-e, edit = use commit, but stop for amending
-s, squash = use commit, but meld into previous commit
-f, fixup = like \"squash\", but discard this commit's log message
-x, exec = run command (the rest of the line) using shell
-d, drop = remove commit
+p, pick <commit> = use commit
+r, reword <commit> = use commit, but edit the commit message
+e, edit <commit> = use commit, but stop for amending
+s, squash <commit> = use commit, but meld into previous commit
+f, fixup <commit> = like \"squash\", but discard this commit's log message
+x, exec <commit> = run command (the rest of the line) using shell
+d, drop <commit> = remove commit
+l, label <label> = label current HEAD with a name
+t, reset <label> = reset HEAD to a label
+m, merge [-C <commit> | -c <commit>] <label> [# <oneline>]
+. create a merge commit using the original merge commit's
+. message (or the oneline, if no original merge commit was
+. specified). Use -c <commit> to reword the commit message.
These lines can be re-ordered; they are executed from top to bottom.
" | git stripspace --comment-lines >>"$todo"
die "$(eval_gettext "\$sha1: not a commit that can be picked")"
}
ptree=$(git rev-parse -q --verify "$1"^^{tree} 2>/dev/null) ||
- ptree=4b825dc642cb6eb9a060e54bf8d69288fbee4904
+ ptree=$empty_tree
test "$tree" = "$ptree"
}
pick_one_preserving_merges "$@" && return
output eval git cherry-pick $allow_rerere_autoupdate $allow_empty_message \
${gpg_sign_opt:+$(git rev-parse --sq-quote "$gpg_sign_opt")} \
- "$strategy_args" $empty_args $ff "$@"
+ $signoff "$strategy_args" $empty_args $ff "$@"
# If cherry-pick dies it leaves the to-be-picked commit unrecorded. Reschedule
# previous task so this commit is not lost.
# resolve before manually running git commit --amend then git
# rebase --continue.
git commit --allow-empty --allow-empty-message --amend \
- --no-post-rewrite -n -q -C $sha1 &&
+ --no-post-rewrite -n -q -C $sha1 $signoff &&
pick_one -n $sha1 &&
git commit --allow-empty --allow-empty-message \
- --amend --no-post-rewrite -n -q -C $sha1 \
+ --amend --no-post-rewrite -n -q -C $sha1 $signoff \
${gpg_sign_opt:+"$gpg_sign_opt"} ||
die_with_patch $sha1 "$(eval_gettext "Could not apply \$sha1... \$rest")"
else
else
revisions=$onto...$orig_head
shortrevisions=$shorthead
+ test -z "$squash_onto" ||
+ echo "$squash_onto" >"$state_dir"/squash-onto
fi
}
die "Could not skip unnecessary pick commands"
checkout_onto
- if test -z "$rebase_root" && test ! -d "$rewritten"
+ if test ! -d "$rewritten"
then
require_clean_work_tree "rebase"
exec git rebase--helper ${force_rebase:+--no-ff} $allow_empty_message \
init_revisions_and_shortrevisions
git rebase--helper --make-script ${keep_empty:+--keep-empty} \
+ ${rebase_merges:+--rebase-merges} \
+ ${rebase_cousins:+--rebase-cousins} \
$revisions ${restrict_revision+^$restrict_revision} >"$todo" ||
die "$(gettext "Could not generate todo list")"
*var = val;
}
-static void protocol_http_header(void)
-{
- if (get_protocol_version_config() > 0) {
- struct strbuf protocol_header = STRBUF_INIT;
-
- strbuf_addf(&protocol_header, GIT_PROTOCOL_HEADER ": version=%d",
- get_protocol_version_config());
-
-
- extra_http_headers = curl_slist_append(extra_http_headers,
- protocol_header.buf);
- strbuf_release(&protocol_header);
- }
-}
-
void http_init(struct remote *remote, const char *url, int proactive_auth)
{
char *low_speed_limit;
if (remote)
var_override(&http_proxy_authmethod, remote->http_proxy_authmethod);
- protocol_http_header();
-
pragma_header = curl_slist_append(http_copy_default_headers(),
"Pragma: no-cache");
no_pragma_header = curl_slist_append(http_copy_default_headers(),
headers = curl_slist_append(headers, buf.buf);
+ /* Add additional headers here */
+ if (options && options->extra_headers) {
+ const struct string_list_item *item;
+ for_each_string_list_item(item, options->extra_headers) {
+ headers = curl_slist_append(headers, item->string);
+ }
+ }
+
curl_easy_setopt(slot->curl, CURLOPT_URL, url);
curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, headers);
curl_easy_setopt(slot->curl, CURLOPT_ENCODING, "gzip");
return 0;
if (!skip_prefix(asked, base->buf, &tail))
- die("BUG: update_url_from_redirect: %s is not a superset of %s",
+ BUG("update_url_from_redirect: %s is not a superset of %s",
asked, base->buf);
new_len = got->len;
strbuf_reset(result);
break;
default:
- die("BUG: HTTP_KEEP_ERROR is only supported with strbufs");
+ BUG("HTTP_KEEP_ERROR is only supported with strbufs");
}
}
int ret = 0, i = 0;
char *url, *data;
struct strbuf buf = STRBUF_INIT;
- unsigned char sha1[20];
+ unsigned char hash[GIT_MAX_RAWSZ];
+ const unsigned hexsz = the_hash_algo->hexsz;
end_url_with_slash(&buf, base_url);
strbuf_addstr(&buf, "objects/info/packs");
switch (data[i]) {
case 'P':
i++;
- if (i + 52 <= buf.len &&
+ if (i + hexsz + 12 <= buf.len &&
starts_with(data + i, " pack-") &&
- starts_with(data + i + 46, ".pack\n")) {
- get_sha1_hex(data + i + 6, sha1);
- fetch_and_setup_pack_index(packs_head, sha1,
+ starts_with(data + i + hexsz + 6, ".pack\n")) {
+ get_sha1_hex(data + i + 6, hash);
+ fetch_and_setup_pack_index(packs_head, hash,
base_url);
- i += 51;
+ i += hexsz + 11;
break;
}
default:
*lst = (*lst)->next;
if (!strip_suffix(preq->tmpfile, ".pack.temp", &len))
- die("BUG: pack tmpfile does not end in .pack.temp?");
+ BUG("pack tmpfile does not end in .pack.temp?");
tmp_idx = xstrfmt("%.*s.idx.temp", (int)len, preq->tmpfile);
argv_array_push(&ip.args, "index-pack");
CURLcode c = curl_easy_getinfo(slot->curl, CURLINFO_HTTP_CODE,
&slot->http_code);
if (c != CURLE_OK)
- die("BUG: curl_easy_getinfo for HTTP code failed: %s",
+ BUG("curl_easy_getinfo for HTTP code failed: %s",
curl_easy_strerror(c));
if (slot->http_code >= 300)
return size;
uint32_t version, nr, i, *index;
int fd = git_open(path);
struct stat st;
+ const unsigned int hashsz = the_hash_algo->rawsz;
if (fd < 0)
return -1;
return -1;
}
idx_size = xsize_t(st.st_size);
- if (idx_size < 4 * 256 + 20 + 20) {
+ if (idx_size < 4 * 256 + hashsz + hashsz) {
close(fd);
return error("index file %s is too small", path);
}
/*
* Total size:
* - 256 index entries 4 bytes each
- * - 24-byte entries * nr (20-byte sha1 + 4-byte offset)
- * - 20-byte SHA1 of the packfile
- * - 20-byte SHA1 file checksum
+ * - 24-byte entries * nr (object ID + 4-byte offset)
+ * - hash of the packfile
+ * - file checksum
*/
- if (idx_size != 4*256 + nr * 24 + 20 + 20) {
+ if (idx_size != 4*256 + nr * (hashsz + 4) + hashsz + hashsz) {
munmap(idx_map, idx_size);
return error("wrong index v1 file size in %s", path);
}
* Minimum size:
* - 8 bytes of header
* - 256 index entries 4 bytes each
- * - 20-byte sha1 entry * nr
+ * - object ID entry * nr
* - 4-byte crc entry * nr
* - 4-byte offset entry * nr
- * - 20-byte SHA1 of the packfile
- * - 20-byte SHA1 file checksum
+ * - hash of the packfile
+ * - file checksum
* And after the 4-byte offset table might be a
* variable sized table containing 8-byte entries
* for offsets larger than 2^31.
*/
- unsigned long min_size = 8 + 4*256 + nr*(20 + 4 + 4) + 20 + 20;
+ unsigned long min_size = 8 + 4*256 + nr*(hashsz + 4 + 4) + hashsz + hashsz;
unsigned long max_size = min_size;
if (nr)
max_size += (nr - 1)*8;
return 0;
if (!strip_suffix(p->pack_name, ".pack", &len))
- die("BUG: pack_name does not end in .pack");
+ BUG("pack_name does not end in .pack");
idx_name = xstrfmt("%.*s.idx", (int)len, p->pack_name);
ret = check_packed_git_idx(idx_name, p);
free(idx_name);
}
}
-static void close_pack(struct packed_git *p)
+void close_pack(struct packed_git *p)
{
close_pack_windows(p);
close_pack_fd(p);
for (p = o->packed_git; p; p = p->next)
if (p->do_not_close)
- die("BUG: want to close pack marked 'do-not-close'");
+ BUG("want to close pack marked 'do-not-close'");
else
close_pack(p);
}
{
struct stat st;
struct pack_header hdr;
- unsigned char sha1[20];
- unsigned char *idx_sha1;
+ unsigned char hash[GIT_MAX_RAWSZ];
+ unsigned char *idx_hash;
long fd_flag;
ssize_t read_result;
+ const unsigned hashsz = the_hash_algo->rawsz;
if (!p->index_data && open_pack_index(p))
return error("packfile %s index unavailable", p->pack_name);
" while index indicates %"PRIu32" objects",
p->pack_name, ntohl(hdr.hdr_entries),
p->num_objects);
- if (lseek(p->pack_fd, p->pack_size - sizeof(sha1), SEEK_SET) == -1)
+ if (lseek(p->pack_fd, p->pack_size - hashsz, SEEK_SET) == -1)
return error("end of packfile %s is unavailable", p->pack_name);
- read_result = read_in_full(p->pack_fd, sha1, sizeof(sha1));
+ read_result = read_in_full(p->pack_fd, hash, hashsz);
if (read_result < 0)
return error_errno("error reading from %s", p->pack_name);
- if (read_result != sizeof(sha1))
+ if (read_result != hashsz)
return error("packfile %s signature is unavailable", p->pack_name);
- idx_sha1 = ((unsigned char *)p->index_data) + p->index_size - 40;
- if (hashcmp(sha1, idx_sha1))
+ idx_hash = ((unsigned char *)p->index_data) + p->index_size - hashsz * 2;
+ if (hashcmp(hash, idx_hash))
return error("packfile %s does not match index", p->pack_name);
return 0;
}
static int in_window(struct pack_window *win, off_t offset)
{
- /* We must promise at least 20 bytes (one hash) after the
+ /* We must promise at least one full hash after the
* offset is available from this window, otherwise the offset
* is not actually in this window and a different window (which
* has that one hash excess) must be used. This is to support
*/
off_t win_off = win->offset;
return win_off <= offset
- && (offset + 20) <= (win_off + win->len);
+ && (offset + the_hash_algo->rawsz) <= (win_off + win->len);
}
unsigned char *use_pack(struct packed_git *p,
*/
if (!p->pack_size && p->pack_fd == -1 && open_packed_git(p))
die("packfile %s cannot be accessed", p->pack_name);
- if (offset > (p->pack_size - 20))
+ if (offset > (p->pack_size - the_hash_algo->rawsz))
die("offset beyond end of packfile (truncated pack?)");
if (offset < 0)
die(_("offset before end of packfile (broken .idx?)"));
p->pack_size = st.st_size;
p->pack_local = local;
p->mtime = st.st_mtime;
- if (path_len < 40 || get_sha1_hex(path + path_len - 40, p->sha1))
+ if (path_len < the_hash_algo->hexsz ||
+ get_sha1_hex(path + path_len - the_hash_algo->hexsz, p->sha1))
hashclr(p->sha1);
return p;
}
for (p = the_repository->objects->packed_git; p; p = p->next)
for (i = 0; i < p->num_bad_objects; i++)
- if (!hashcmp(sha1, p->bad_object_sha1 + 20 * i))
+ if (!hashcmp(sha1,
+ p->bad_object_sha1 + the_hash_algo->rawsz * i))
return p;
return NULL;
}
} else if (type == OBJ_REF_DELTA) {
/* The base entry _must_ be in the same pack */
base_offset = find_pack_entry_one(base_info, p);
- *curpos += 20;
+ *curpos += the_hash_algo->rawsz;
} else
die("I am totally screwed");
return base_offset;
return NULL;
}
-static int retry_bad_packed_offset(struct packed_git *p, off_t obj_offset)
+static int retry_bad_packed_offset(struct repository *r,
+ struct packed_git *p,
+ off_t obj_offset)
{
int type;
struct revindex_entry *revidx;
return OBJ_BAD;
nth_packed_object_oid(&oid, p, revidx->nr);
mark_bad_packed_object(p, oid.hash);
- type = oid_object_info(&oid, NULL);
+ type = oid_object_info(r, &oid, NULL);
if (type <= OBJ_NONE)
return OBJ_BAD;
return type;
#define POI_STACK_PREALLOC 64
-static enum object_type packed_to_object_type(struct packed_git *p,
+static enum object_type packed_to_object_type(struct repository *r,
+ struct packed_git *p,
off_t obj_offset,
enum object_type type,
struct pack_window **w_curs,
if (type <= OBJ_NONE) {
/* If getting the base itself fails, we first
* retry the base, otherwise unwind */
- type = retry_bad_packed_offset(p, base_offset);
+ type = retry_bad_packed_offset(r, p, base_offset);
if (type > OBJ_NONE)
goto out;
goto unwind;
unwind:
while (poi_stack_nr) {
obj_offset = poi_stack[--poi_stack_nr];
- type = retry_bad_packed_offset(p, obj_offset);
+ type = retry_bad_packed_offset(r, p, obj_offset);
if (type > OBJ_NONE)
goto out;
}
free(ent);
}
-static void *cache_or_unpack_entry(struct packed_git *p, off_t base_offset,
- unsigned long *base_size, enum object_type *type)
+static void *cache_or_unpack_entry(struct repository *r, struct packed_git *p,
+ off_t base_offset, unsigned long *base_size,
+ enum object_type *type)
{
struct delta_base_cache_entry *ent;
ent = get_delta_base_cache_entry(p, base_offset);
if (!ent)
- return unpack_entry(p, base_offset, type, base_size);
+ return unpack_entry(r, p, base_offset, type, base_size);
if (type)
*type = ent->type;
hashmap_add(&delta_base_cache, ent);
}
-int packed_object_info(struct packed_git *p, off_t obj_offset,
- struct object_info *oi)
+int packed_object_info(struct repository *r, struct packed_git *p,
+ off_t obj_offset, struct object_info *oi)
{
struct pack_window *w_curs = NULL;
unsigned long size;
* a "real" type later if the caller is interested.
*/
if (oi->contentp) {
- *oi->contentp = cache_or_unpack_entry(p, obj_offset, oi->sizep,
+ *oi->contentp = cache_or_unpack_entry(r, p, obj_offset, oi->sizep,
&type);
if (!*oi->contentp)
type = OBJ_BAD;
if (oi->typep || oi->type_name) {
enum object_type ptot;
- ptot = packed_to_object_type(p, obj_offset, type, &w_curs,
- curpos);
+ ptot = packed_to_object_type(r, p, obj_offset,
+ type, &w_curs, curpos);
if (oi->typep)
*oi->typep = ptot;
if (oi->type_name) {
unsigned long size;
};
-static void *read_object(const struct object_id *oid, enum object_type *type,
+static void *read_object(struct repository *r,
+ const struct object_id *oid,
+ enum object_type *type,
unsigned long *size)
{
struct object_info oi = OBJECT_INFO_INIT;
oi.sizep = size;
oi.contentp = &content;
- if (oid_object_info_extended(oid, &oi, 0) < 0)
+ if (oid_object_info_extended(r, oid, &oi, 0) < 0)
return NULL;
return content;
}
-void *unpack_entry(struct packed_git *p, off_t obj_offset,
+void *unpack_entry(struct repository *r, struct packed_git *p, off_t obj_offset,
enum object_type *final_type, unsigned long *final_size)
{
struct pack_window *w_curs = NULL;
case OBJ_OFS_DELTA:
case OBJ_REF_DELTA:
if (data)
- die("BUG: unpack_entry: left loop at a valid delta");
+ BUG("unpack_entry: left loop at a valid delta");
break;
case OBJ_COMMIT:
case OBJ_TREE:
oid_to_hex(&base_oid), (uintmax_t)obj_offset,
p->pack_name);
mark_bad_packed_object(p, base_oid.hash);
- base = read_object(&base_oid, &type, &base_size);
+ base = read_object(r, &base_oid, &type, &base_size);
external_base = base;
}
}
{
const unsigned char *index_fanout = p->index_data;
const unsigned char *index_lookup;
+ const unsigned int hashsz = the_hash_algo->rawsz;
int index_lookup_width;
if (!index_fanout)
index_lookup = index_fanout + 4 * 256;
if (p->index_version == 1) {
- index_lookup_width = 24;
+ index_lookup_width = hashsz + 4;
index_lookup += 4;
} else {
- index_lookup_width = 20;
+ index_lookup_width = hashsz;
index_fanout += 8;
index_lookup += 8;
}
uint32_t n)
{
const unsigned char *index = p->index_data;
+ const unsigned int hashsz = the_hash_algo->rawsz;
if (!index) {
if (open_pack_index(p))
return NULL;
return NULL;
index += 4 * 256;
if (p->index_version == 1) {
- return index + 24 * n + 4;
+ return index + (hashsz + 4) * n + 4;
} else {
index += 8;
- return index + 20 * n;
+ return index + hashsz * n;
}
}
off_t nth_packed_object_offset(const struct packed_git *p, uint32_t n)
{
const unsigned char *index = p->index_data;
+ const unsigned int hashsz = the_hash_algo->rawsz;
index += 4 * 256;
if (p->index_version == 1) {
- return ntohl(*((uint32_t *)(index + 24 * n)));
+ return ntohl(*((uint32_t *)(index + (hashsz + 4) * n)));
} else {
uint32_t off;
- index += 8 + p->num_objects * (20 + 4);
+ index += 8 + p->num_objects * (hashsz + 4);
off = ntohl(*((uint32_t *)(index + 4 * n)));
if (!(off & 0x80000000))
return off;
}
- static int fill_pack_entry(const unsigned char *sha1,
+ static int fill_pack_entry(const struct object_id *oid,
struct pack_entry *e,
struct packed_git *p)
{
if (p->num_bad_objects) {
unsigned i;
for (i = 0; i < p->num_bad_objects; i++)
- if (!hashcmp(sha1, p->bad_object_sha1 + 20 * i))
+ if (!hashcmp(oid->hash,
+ p->bad_object_sha1 + the_hash_algo->rawsz * i))
return 0;
}
- offset = find_pack_entry_one(sha1, p);
+ offset = find_pack_entry_one(oid->hash, p);
if (!offset)
return 0;
return 0;
e->offset = offset;
e->p = p;
- hashcpy(e->sha1, sha1);
return 1;
}
- int find_pack_entry(struct repository *r, const unsigned char *sha1, struct pack_entry *e)
+ int find_pack_entry(struct repository *r, const struct object_id *oid, struct pack_entry *e)
{
struct list_head *pos;
list_for_each(pos, &r->objects->packed_git_mru) {
struct packed_git *p = list_entry(pos, struct packed_git, mru);
- if (fill_pack_entry(sha1, e, p)) {
+ if (fill_pack_entry(oid, e, p)) {
list_move(&p->mru, &r->objects->packed_git_mru);
return 1;
}
return 0;
}
- int has_sha1_pack(const unsigned char *sha1)
+ int has_object_pack(const struct object_id *oid)
{
struct pack_entry e;
- return find_pack_entry(the_repository, sha1, &e);
+ return find_pack_entry(the_repository, oid, &e);
}
int has_pack_index(const unsigned char *sha1)
return 1;
}
-static int for_each_object_in_pack(struct packed_git *p, each_packed_object_fn cb, void *data)
+int for_each_object_in_pack(struct packed_git *p, each_packed_object_fn cb, void *data)
{
uint32_t i;
int r = 0;
struct commit *commit = (struct commit *) obj;
struct commit_list *parents = commit->parents;
- oidset_insert(set, &commit->tree->object.oid);
+ oidset_insert(set, get_commit_tree_oid(commit));
for (; parents; parents = parents->next)
oidset_insert(set, &parents->item->object.oid);
} else if (obj->type == OBJ_TAG) {
extern unsigned char *use_pack(struct packed_git *, struct pack_window **, off_t, unsigned long *);
extern void close_pack_windows(struct packed_git *);
+extern void close_pack(struct packed_git *);
extern void close_all_packs(struct raw_object_store *o);
extern void unuse_pack(struct pack_window **);
extern void clear_delta_base_cache(void);
extern off_t find_pack_entry_one(const unsigned char *sha1, struct packed_git *);
extern int is_pack_valid(struct packed_git *);
-extern void *unpack_entry(struct packed_git *, off_t, enum object_type *, unsigned long *);
+extern void *unpack_entry(struct repository *r, struct packed_git *, off_t, enum object_type *, unsigned long *);
extern unsigned long unpack_object_header_buffer(const unsigned char *buf, unsigned long len, enum object_type *type, unsigned long *sizep);
extern unsigned long get_size_from_delta(struct packed_git *, struct pack_window **, off_t);
extern int unpack_object_header(struct packed_git *, struct pack_window **, off_t *, unsigned long *);
/* global flag to enable extra checks when accessing packed objects */
extern int do_check_packed_object_crc;
-extern int packed_object_info(struct packed_git *pack, off_t offset, struct object_info *);
+extern int packed_object_info(struct repository *r,
+ struct packed_git *pack,
+ off_t offset, struct object_info *);
extern void mark_bad_packed_object(struct packed_git *p, const unsigned char *sha1);
extern const struct packed_git *has_packed_and_bad(const unsigned char *sha1);
* Iff a pack file in the given repository contains the object named by sha1,
* return true and store its location to e.
*/
- extern int find_pack_entry(struct repository *r, const unsigned char *sha1, struct pack_entry *e);
+ extern int find_pack_entry(struct repository *r, const struct object_id *oid, struct pack_entry *e);
- extern int has_sha1_pack(const unsigned char *sha1);
+ extern int has_object_pack(const struct object_id *oid);
extern int has_pack_index(const unsigned char *sha1);
struct packed_git *pack,
uint32_t pos,
void *data);
+extern int for_each_object_in_pack(struct packed_git *p, each_packed_object_fn, void *data);
extern int for_each_packed_object(each_packed_object_fn, void *, unsigned flags);
/*
int size, len;
struct cache_entry *ce, *ret;
- if (!verify_path(path)) {
+ if (!verify_path(path, mode)) {
error("Invalid path '%s'", path);
return NULL;
}
* Also, we don't want double slashes or slashes at the
* end that can make pathnames ambiguous.
*/
-static int verify_dotfile(const char *rest)
+static int verify_dotfile(const char *rest, unsigned mode)
{
/*
* The first character was '.', but that
switch (*rest) {
/*
- * ".git" followed by NUL or slash is bad. This
- * shares the path end test with the ".." case.
+ * ".git" followed by NUL or slash is bad. Note that we match
+ * case-insensitively here, even if ignore_case is not set.
+ * This outlaws ".GIT" everywhere out of an abundance of caution,
+ * since there's really no good reason to allow it.
+ *
+ * Once we've seen ".git", we can also find ".gitmodules", etc (also
+ * case-insensitively).
*/
case 'g':
case 'G':
break;
if (rest[2] != 't' && rest[2] != 'T')
break;
- rest += 2;
- /* fallthrough */
+ if (rest[3] == '\0' || is_dir_sep(rest[3]))
+ return 0;
+ if (S_ISLNK(mode)) {
+ rest += 3;
+ if (skip_iprefix(rest, "modules", &rest) &&
+ (*rest == '\0' || is_dir_sep(*rest)))
+ return 0;
+ }
+ break;
case '.':
if (rest[1] == '\0' || is_dir_sep(rest[1]))
return 0;
return 1;
}
-int verify_path(const char *path)
+int verify_path(const char *path, unsigned mode)
{
char c;
return 1;
if (is_dir_sep(c)) {
inside:
- if (protect_hfs && is_hfs_dotgit(path))
- return 0;
- if (protect_ntfs && is_ntfs_dotgit(path))
- return 0;
+ if (protect_hfs) {
+ if (is_hfs_dotgit(path))
+ return 0;
+ if (S_ISLNK(mode)) {
+ if (is_hfs_dotgitmodules(path))
+ return 0;
+ }
+ }
+ if (protect_ntfs) {
+ if (is_ntfs_dotgit(path))
+ return 0;
+ if (S_ISLNK(mode)) {
+ if (is_ntfs_dotgitmodules(path))
+ return 0;
+ }
+ }
+
c = *path++;
- if ((c == '.' && !verify_dotfile(path)) ||
+ if ((c == '.' && !verify_dotfile(path, mode)) ||
is_dir_sep(c) || c == '\0')
return 0;
}
if (!ok_to_add)
return -1;
- if (!verify_path(ce->name))
+ if (!verify_path(ce->name, ce->ce_mode))
return error("Invalid path '%s'", ce->name);
if (!skip_df_check &&
if (verify_hdr(hdr, mmap_size) < 0)
goto unmap;
- hashcpy(istate->sha1, (const unsigned char *)hdr + mmap_size - the_hash_algo->rawsz);
+ hashcpy(istate->oid.hash, (const unsigned char *)hdr + mmap_size - the_hash_algo->rawsz);
istate->version = ntohl(hdr->hdr_version);
istate->cache_nr = ntohl(hdr->hdr_entries);
istate->cache_alloc = alloc_nr(istate->cache_nr);
uint64_t start = getnanotime();
struct split_index *split_index;
int ret;
- char *base_sha1_hex;
+ char *base_oid_hex;
char *base_path;
/* istate->initialized covers both .git/index and .git/sharedindex.xxx */
trace_performance_since(start, "read cache %s", path);
split_index = istate->split_index;
- if (!split_index || is_null_sha1(split_index->base_sha1)) {
+ if (!split_index || is_null_oid(&split_index->base_oid)) {
post_read_index_from(istate);
return ret;
}
else
split_index->base = xcalloc(1, sizeof(*split_index->base));
- base_sha1_hex = sha1_to_hex(split_index->base_sha1);
- base_path = xstrfmt("%s/sharedindex.%s", gitdir, base_sha1_hex);
+ base_oid_hex = oid_to_hex(&split_index->base_oid);
+ base_path = xstrfmt("%s/sharedindex.%s", gitdir, base_oid_hex);
ret = do_read_index(split_index->base, base_path, 1);
- if (hashcmp(split_index->base_sha1, split_index->base->sha1))
+ if (oidcmp(&split_index->base_oid, &split_index->base->oid))
die("broken index, expect %s in %s, got %s",
- base_sha1_hex, base_path,
- sha1_to_hex(split_index->base->sha1));
+ base_oid_hex, base_path,
+ oid_to_hex(&split_index->base->oid));
freshen_shared_index(base_path, 0);
merge_base_index(istate);
if (n != the_hash_algo->rawsz)
goto out;
- if (hashcmp(istate->sha1, hash))
+ if (hashcmp(istate->oid.hash, hash))
goto out;
close(fd);
if (!istate->version) {
istate->version = get_index_format_default();
- if (getenv("GIT_TEST_SPLIT_INDEX"))
+ if (git_env_bool("GIT_TEST_SPLIT_INDEX", 0))
init_split_index(istate);
}
return -1;
}
- if (ce_flush(&c, newfd, istate->sha1))
+ if (ce_flush(&c, newfd, istate->oid.hash))
return -1;
if (close_tempfile_gently(tempfile)) {
error(_("could not close '%s'"), tempfile->filename.buf);
return ret;
}
ret = rename_tempfile(temp,
- git_path("sharedindex.%s", sha1_to_hex(si->base->sha1)));
+ git_path("sharedindex.%s", oid_to_hex(&si->base->oid)));
if (!ret) {
- hashcpy(si->base_sha1, si->base->sha1);
- clean_shared_index_files(sha1_to_hex(si->base->sha1));
+ oidcpy(&si->base_oid, &si->base->oid);
+ clean_shared_index_files(oid_to_hex(&si->base->oid));
}
return ret;
if (!si || alternate_index_output ||
(istate->cache_changed & ~EXTMASK)) {
if (si)
- hashclr(si->base_sha1);
+ oidclr(&si->base_oid);
ret = do_write_locked_index(istate, lock, flags);
goto out;
}
- if (getenv("GIT_TEST_SPLIT_INDEX")) {
+ if (git_env_bool("GIT_TEST_SPLIT_INDEX", 0)) {
- int v = si->base_sha1[0];
+ int v = si->base_oid.hash[0];
if ((v & 15) < 6)
istate->cache_changed |= SPLIT_INDEX_ORDERED;
}
temp = mks_tempfile(git_path("sharedindex_XXXXXX"));
if (!temp) {
- hashclr(si->base_sha1);
+ oidclr(&si->base_oid);
ret = do_write_locked_index(istate, lock, flags);
goto out;
}
/* Freshen the shared index only if the split-index was written */
if (!ret && !new_shared_index) {
const char *shared_index = git_path("sharedindex.%s",
- sha1_to_hex(si->base_sha1));
+ oid_to_hex(&si->base_oid));
freshen_shared_index(shared_index, 1);
}
#include "diff.h"
#include "refs.h"
#include "revision.h"
+#include "repository.h"
#include "graph.h"
#include "grep.h"
#include "reflog-walk.h"
{
struct tree_desc desc;
struct name_entry entry;
- struct object *obj = &tree->object;
- if (!has_object_file(&obj->oid))
+ if (parse_tree_gently(tree, 1) < 0)
return;
- if (parse_tree(tree) < 0)
- die("bad tree %s", oid_to_hex(&obj->oid));
init_tree_desc(&desc, tree->buffer, tree->size);
while (tree_entry(&desc, &entry)) {
mark_tree_contents_uninteresting(tree);
}
-void mark_parents_uninteresting(struct commit *commit)
+struct commit_stack {
+ struct commit **items;
+ size_t nr, alloc;
+};
+#define COMMIT_STACK_INIT { NULL, 0, 0 }
+
+static void commit_stack_push(struct commit_stack *stack, struct commit *commit)
{
- struct commit_list *parents = NULL, *l;
+ ALLOC_GROW(stack->items, stack->nr + 1, stack->alloc);
+ stack->items[stack->nr++] = commit;
+}
- for (l = commit->parents; l; l = l->next)
- commit_list_insert(l->item, &parents);
+static struct commit *commit_stack_pop(struct commit_stack *stack)
+{
+ return stack->nr ? stack->items[--stack->nr] : NULL;
+}
- while (parents) {
- struct commit *commit = pop_commit(&parents);
+static void commit_stack_clear(struct commit_stack *stack)
+{
+ FREE_AND_NULL(stack->items);
+ stack->nr = stack->alloc = 0;
+}
- while (commit) {
- /*
- * A missing commit is ok iff its parent is marked
- * uninteresting.
- *
- * We just mark such a thing parsed, so that when
- * it is popped next time around, we won't be trying
- * to parse it and get an error.
- */
- if (!commit->object.parsed &&
- !has_object_file(&commit->object.oid))
- commit->object.parsed = 1;
+static void mark_one_parent_uninteresting(struct commit *commit,
+ struct commit_stack *pending)
+{
+ struct commit_list *l;
- if (commit->object.flags & UNINTERESTING)
- break;
+ if (commit->object.flags & UNINTERESTING)
+ return;
+ commit->object.flags |= UNINTERESTING;
- commit->object.flags |= UNINTERESTING;
+ /*
+ * Normally we haven't parsed the parent
+ * yet, so we won't have a parent of a parent
+ * here. However, it may turn out that we've
+ * reached this commit some other way (where it
+ * wasn't uninteresting), in which case we need
+ * to mark its parents recursively too..
+ */
+ for (l = commit->parents; l; l = l->next)
+ commit_stack_push(pending, l->item);
+}
- /*
- * Normally we haven't parsed the parent
- * yet, so we won't have a parent of a parent
- * here. However, it may turn out that we've
- * reached this commit some other way (where it
- * wasn't uninteresting), in which case we need
- * to mark its parents recursively too..
- */
- if (!commit->parents)
- break;
+void mark_parents_uninteresting(struct commit *commit)
+{
+ struct commit_stack pending = COMMIT_STACK_INIT;
+ struct commit_list *l;
- for (l = commit->parents->next; l; l = l->next)
- commit_list_insert(l->item, &parents);
- commit = commit->parents->item;
- }
- }
+ for (l = commit->parents; l; l = l->next)
+ mark_one_parent_uninteresting(l->item, &pending);
+
+ while (pending.nr > 0)
+ mark_one_parent_uninteresting(commit_stack_pop(&pending),
+ &pending);
+
+ commit_stack_clear(&pending);
}
static void add_pending_object_with_path(struct rev_info *revs,
static int rev_compare_tree(struct rev_info *revs,
struct commit *parent, struct commit *commit)
{
- struct tree *t1 = parent->tree;
- struct tree *t2 = commit->tree;
+ struct tree *t1 = get_commit_tree(parent);
+ struct tree *t2 = get_commit_tree(commit);
if (!t1)
return REV_TREE_NEW;
static int rev_same_tree_as_empty(struct rev_info *revs, struct commit *commit)
{
int retval;
- struct tree *t1 = commit->tree;
+ struct tree *t1 = get_commit_tree(commit);
if (!t1)
return 0;
if (!revs->prune)
return;
- if (!commit->tree)
+ if (!get_commit_tree(commit))
return;
if (!commit->parents) {
cb.all_revs = revs;
cb.all_flags = flags;
- cb.refs = get_main_ref_store();
+ cb.refs = get_main_ref_store(the_repository);
for_each_reflog(handle_one_reflog, &cb);
if (!revs->single_worktree)
const char *arg = argv[0];
const char *optarg;
int argcount;
+ const unsigned hexsz = the_hash_algo->hexsz;
/* pseudo revision arguments */
if (!strcmp(arg, "--all") || !strcmp(arg, "--branches") ||
revs->abbrev = strtoul(optarg, NULL, 10);
if (revs->abbrev < MINIMUM_ABBREV)
revs->abbrev = MINIMUM_ABBREV;
- else if (revs->abbrev > 40)
- revs->abbrev = 40;
+ else if (revs->abbrev > hexsz)
+ revs->abbrev = hexsz;
} else if (!strcmp(arg, "--abbrev-commit")) {
revs->abbrev_commit = 1;
revs->abbrev_commit_given = 1;
revs->ignore_missing = 1;
} else if (!strcmp(arg, "--exclude-promisor-objects")) {
if (fetch_if_missing)
- die("BUG: exclude_promisor_objects can only be used when fetch_if_missing is 0");
+ BUG("exclude_promisor_objects can only be used when fetch_if_missing is 0");
revs->exclude_promisor_objects = 1;
} else {
int opts = diff_opt_parse(&revs->diffopt, argv, argc, revs->prefix);
* supported right now, so stick to single worktree.
*/
if (!revs->single_worktree)
- die("BUG: --single-worktree cannot be used together with submodule");
+ BUG("--single-worktree cannot be used together with submodule");
refs = get_submodule_ref_store(submodule);
} else
- refs = get_main_ref_store();
+ refs = get_main_ref_store(the_repository);
/*
* NOTE!
{
if (commit->object.flags & SHOWN)
return commit_ignore;
- if (revs->unpacked && has_sha1_pack(commit->object.oid.hash))
+ if (revs->unpacked && has_object_pack(&commit->object.oid))
return commit_ignore;
if (commit->object.flags & UNINTERESTING)
return commit_ignore;
#include "sequencer.h"
#include "tag.h"
#include "run-command.h"
-#include "exec_cmd.h"
+#include "exec-cmd.h"
#include "utf8.h"
#include "cache-tree.h"
#include "diff.h"
#include "hashmap.h"
#include "notes-utils.h"
#include "sigchain.h"
+#include "unpack-trees.h"
+#include "worktree.h"
+#include "oidmap.h"
+#include "oidset.h"
#define GIT_REFLOG_ACTION "GIT_REFLOG_ACTION"
* previous commit and from the first squash/fixup commit are written
* to it. The commit message for each subsequent squash/fixup commit
* is appended to the file as it is processed.
- *
- * The first line of the file is of the form
- * # This is a combination of $count commits.
- * where $count is the number of commits whose messages have been
- * written to the file so far (including the initial "pick" commit).
- * Each time that a commit message is processed, this line is read and
- * updated. It is deleted just before the combined commit is made.
*/
static GIT_PATH_FUNC(rebase_path_squash_msg, "rebase-merge/message-squash")
/*
* commit without opening the editor.)
*/
static GIT_PATH_FUNC(rebase_path_fixup_msg, "rebase-merge/message-fixup")
+/*
+ * This file contains the list fixup/squash commands that have been
+ * accumulated into message-fixup or message-squash so far.
+ */
+static GIT_PATH_FUNC(rebase_path_current_fixups, "rebase-merge/current-fixups")
/*
* A script to set the GIT_AUTHOR_NAME, GIT_AUTHOR_EMAIL, and
* GIT_AUTHOR_DATE that will be used for the commit that is currently
static GIT_PATH_FUNC(rebase_path_rewritten_list, "rebase-merge/rewritten-list")
static GIT_PATH_FUNC(rebase_path_rewritten_pending,
"rebase-merge/rewritten-pending")
+
+/*
+ * The path of the file containig the OID of the "squash onto" commit, i.e.
+ * the dummy commit used for `reset [new root]`.
+ */
+static GIT_PATH_FUNC(rebase_path_squash_onto, "rebase-merge/squash-onto")
+
+/*
+ * The path of the file listing refs that need to be deleted after the rebase
+ * finishes. This is used by the `label` command to record the need for cleanup.
+ */
+static GIT_PATH_FUNC(rebase_path_refs_to_delete, "rebase-merge/refs-to-delete")
+
/*
* The following files are written by git-rebase just after parsing the
* command-line (and are only consumed, not modified, by the sequencer).
static GIT_PATH_FUNC(rebase_path_gpg_sign_opt, "rebase-merge/gpg_sign_opt")
static GIT_PATH_FUNC(rebase_path_orig_head, "rebase-merge/orig-head")
static GIT_PATH_FUNC(rebase_path_verbose, "rebase-merge/verbose")
+static GIT_PATH_FUNC(rebase_path_signoff, "rebase-merge/signoff")
static GIT_PATH_FUNC(rebase_path_head_name, "rebase-merge/head-name")
static GIT_PATH_FUNC(rebase_path_onto, "rebase-merge/onto")
static GIT_PATH_FUNC(rebase_path_autostash, "rebase-merge/autostash")
int sequencer_remove_state(struct replay_opts *opts)
{
- struct strbuf dir = STRBUF_INIT;
+ struct strbuf buf = STRBUF_INIT;
int i;
+ if (is_rebase_i(opts) &&
+ strbuf_read_file(&buf, rebase_path_refs_to_delete(), 0) > 0) {
+ char *p = buf.buf;
+ while (*p) {
+ char *eol = strchr(p, '\n');
+ if (eol)
+ *eol = '\0';
+ if (delete_ref("(rebase -i) cleanup", p, NULL, 0) < 0)
+ warning(_("could not delete '%s'"), p);
+ if (!eol)
+ break;
+ p = eol + 1;
+ }
+ }
+
free(opts->gpg_sign);
free(opts->strategy);
for (i = 0; i < opts->xopts_nr; i++)
free(opts->xopts[i]);
free(opts->xopts);
+ strbuf_release(&opts->current_fixups);
- strbuf_addstr(&dir, get_dir(opts));
- remove_dir_recursively(&dir, 0);
- strbuf_release(&dir);
+ strbuf_reset(&buf);
+ strbuf_addstr(&buf, get_dir(opts));
+ remove_dir_recursively(&buf, 0);
+ strbuf_release(&buf);
return 0;
}
if (msg_fd < 0)
return error_errno(_("could not lock '%s'"), filename);
if (write_in_full(msg_fd, buf, len) < 0) {
+ error_errno(_("could not write to '%s'"), filename);
rollback_lock_file(&msg_file);
- return error_errno(_("could not write to '%s'"), filename);
+ return -1;
}
if (append_eol && write(msg_fd, "\n", 1) < 0) {
+ error_errno(_("could not write eol to '%s'"), filename);
rollback_lock_file(&msg_file);
- return error_errno(_("could not write eol to '%s'"), filename);
+ return -1;
}
if (commit_lock_file(&msg_file) < 0)
return error(_("failed to finalize '%s'"), filename);
transaction = ref_transaction_begin(&err);
if (!transaction ||
ref_transaction_update(transaction, "HEAD",
- to, unborn ? &null_oid : from,
+ to, unborn && !is_rebase_i(opts) ?
+ &null_oid : from,
0, sb.buf, &err) ||
ref_transaction_commit(transaction, &err)) {
ref_transaction_free(transaction);
o.show_rename_progress = 1;
head_tree = parse_tree_indirect(head);
- next_tree = next ? next->tree : empty_tree();
- base_tree = base ? base->tree : empty_tree();
+ next_tree = next ? get_commit_tree(next) : empty_tree();
+ base_tree = base ? get_commit_tree(base) : empty_tree();
for (xopt = opts->xopts; xopt != opts->xopts + opts->xopts_nr; xopt++)
parse_merge_opt(&o, *xopt);
return !clean;
}
+static struct object_id *get_cache_tree_oid(void)
+{
+ if (!active_cache_tree)
+ active_cache_tree = cache_tree();
+
+ if (!cache_tree_fully_valid(active_cache_tree))
+ if (cache_tree_update(&the_index, 0)) {
+ error(_("unable to update cache tree"));
+ return NULL;
+ }
+
+ return &active_cache_tree->oid;
+}
+
static int is_index_unchanged(void)
{
- struct object_id head_oid;
+ struct object_id head_oid, *cache_tree_oid;
struct commit *head_commit;
if (!resolve_ref_unsafe("HEAD", RESOLVE_REF_READING, &head_oid, NULL))
if (parse_commit(head_commit))
return -1;
- if (!active_cache_tree)
- active_cache_tree = cache_tree();
-
- if (!cache_tree_fully_valid(active_cache_tree))
- if (cache_tree_update(&the_index, 0))
- return error(_("unable to update cache tree"));
+ if (!(cache_tree_oid = get_cache_tree_oid()))
+ return -1;
- return !oidcmp(&active_cache_tree->oid,
- &head_commit->tree->object.oid);
+ return !oidcmp(cache_tree_oid, get_commit_tree_oid(head_commit));
}
static int write_author_script(const char *message)
return NULL;
}
+/* Read author-script and return an ident line (author <email> timestamp) */
+static const char *read_author_ident(struct strbuf *buf)
+{
+ const char *keys[] = {
+ "GIT_AUTHOR_NAME=", "GIT_AUTHOR_EMAIL=", "GIT_AUTHOR_DATE="
+ };
+ char *in, *out, *eol;
+ int i = 0, len;
+
+ if (strbuf_read_file(buf, rebase_path_author_script(), 256) <= 0)
+ return NULL;
+
+ /* dequote values and construct ident line in-place */
+ for (in = out = buf->buf; i < 3 && in - buf->buf < buf->len; i++) {
+ if (!skip_prefix(in, keys[i], (const char **)&in)) {
+ warning("could not parse '%s' (looking for '%s'",
+ rebase_path_author_script(), keys[i]);
+ return NULL;
+ }
+
+ eol = strchrnul(in, '\n');
+ *eol = '\0';
+ sq_dequote(in);
+ len = strlen(in);
+
+ if (i > 0) /* separate values by spaces */
+ *(out++) = ' ';
+ if (i == 1) /* email needs to be surrounded by <...> */
+ *(out++) = '<';
+ memmove(out, in, len);
+ out += len;
+ if (i == 1) /* email needs to be surrounded by <...> */
+ *(out++) = '>';
+ in = eol + 1;
+ }
+
+ if (i < 3) {
+ warning("could not parse '%s' (looking for '%s')",
+ rebase_path_author_script(), keys[i]);
+ return NULL;
+ }
+
+ buf->len = out - buf->buf;
+ return buf->buf;
+}
+
static const char staged_changes_advice[] =
N_("you have staged changes in your working tree\n"
"If these changes are meant to be squashed into the previous commit, run:\n"
#define AMEND_MSG (1<<2)
#define CLEANUP_MSG (1<<3)
#define VERIFY_MSG (1<<4)
+#define CREATE_ROOT_COMMIT (1<<5)
/*
* If we are cherry-pick, and if the merge did not result in
struct child_process cmd = CHILD_PROCESS_INIT;
const char *value;
+ if (flags & CREATE_ROOT_COMMIT) {
+ struct strbuf msg = STRBUF_INIT, script = STRBUF_INIT;
+ const char *author = is_rebase_i(opts) ?
+ read_author_ident(&script) : NULL;
+ struct object_id root_commit, *cache_tree_oid;
+ int res = 0;
+
+ if (!defmsg)
+ BUG("root commit without message");
+
+ if (!(cache_tree_oid = get_cache_tree_oid()))
+ res = -1;
+
+ if (!res)
+ res = strbuf_read_file(&msg, defmsg, 0);
+
+ if (res <= 0)
+ res = error_errno(_("could not read '%s'"), defmsg);
+ else
+ res = commit_tree(msg.buf, msg.len, cache_tree_oid,
+ NULL, &root_commit, author,
+ opts->gpg_sign);
+
+ strbuf_release(&msg);
+ strbuf_release(&script);
+ if (!res) {
+ update_ref(NULL, "CHERRY_PICK_HEAD", &root_commit, NULL,
+ REF_NO_DEREF, UPDATE_REFS_MSG_ON_ERR);
+ res = update_ref(NULL, "HEAD", &root_commit, NULL, 0,
+ UPDATE_REFS_MSG_ON_ERR);
+ }
+ return res < 0 ? error(_("writing root commit")) : 0;
+ }
+
cmd.git_cmd = 1;
if (is_rebase_i(opts)) {
argv_array_pushf(&cmd.args, "-S%s", opts->gpg_sign);
if (defmsg)
argv_array_pushl(&cmd.args, "-F", defmsg, NULL);
+ else if (!(flags & EDIT_MSG))
+ argv_array_pushl(&cmd.args, "-C", "HEAD", NULL);
if ((flags & CLEANUP_MSG))
argv_array_push(&cmd.args, "--cleanup=strip");
if ((flags & EDIT_MSG))
}
if (!(flags & ALLOW_EMPTY) && !oidcmp(current_head ?
- ¤t_head->tree->object.oid :
+ get_commit_tree_oid(current_head) :
- &empty_tree_oid, &tree)) {
+ the_hash_algo->empty_tree, &tree)) {
res = 1; /* run 'git commit' to display error message */
goto out;
}
goto out;
}
+ reset_ident_date();
+
if (commit_tree_extended(msg->buf, msg->len, &tree, parents,
oid, author, opts->gpg_sign, extra)) {
res = error(_("failed to write commit object"));
{
int res = 1;
- if (!(flags & EDIT_MSG) && !(flags & VERIFY_MSG)) {
+ if (!(flags & EDIT_MSG) && !(flags & VERIFY_MSG) &&
+ !(flags & CREATE_ROOT_COMMIT)) {
struct object_id oid;
struct strbuf sb = STRBUF_INIT;
if (parse_commit(parent))
return error(_("could not parse parent commit %s"),
oid_to_hex(&parent->object.oid));
- ptree_oid = &parent->tree->object.oid;
+ ptree_oid = get_commit_tree_oid(parent);
} else {
ptree_oid = the_hash_algo->empty_tree; /* commit is root */
}
- return !oidcmp(ptree_oid, &commit->tree->object.oid);
+ return !oidcmp(ptree_oid, get_commit_tree_oid(commit));
}
/*
TODO_SQUASH,
/* commands that do something else than handling a single commit */
TODO_EXEC,
+ TODO_LABEL,
+ TODO_RESET,
+ TODO_MERGE,
/* commands that do nothing but are counted for reporting progress */
TODO_NOOP,
TODO_DROP,
{ 'f', "fixup" },
{ 's', "squash" },
{ 'x', "exec" },
+ { 'l', "label" },
+ { 't', "reset" },
+ { 'm', "merge" },
{ 0, "noop" },
{ 'd', "drop" },
{ 0, NULL }
return command == TODO_FIXUP || command == TODO_SQUASH;
}
+/* Does this command create a (non-merge) commit? */
+static int is_pick_or_similar(enum todo_command command)
+{
+ switch (command) {
+ case TODO_PICK:
+ case TODO_REVERT:
+ case TODO_EDIT:
+ case TODO_REWORD:
+ case TODO_FIXUP:
+ case TODO_SQUASH:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
static int update_squash_messages(enum todo_command command,
struct commit *commit, struct replay_opts *opts)
{
struct strbuf buf = STRBUF_INIT;
- int count, res;
+ int res;
const char *message, *body;
- if (file_exists(rebase_path_squash_msg())) {
+ if (opts->current_fixup_count > 0) {
struct strbuf header = STRBUF_INIT;
- char *eol, *p;
+ char *eol;
- if (strbuf_read_file(&buf, rebase_path_squash_msg(), 2048) <= 0)
+ if (strbuf_read_file(&buf, rebase_path_squash_msg(), 9) <= 0)
return error(_("could not read '%s'"),
rebase_path_squash_msg());
- p = buf.buf + 1;
- eol = strchrnul(buf.buf, '\n');
- if (buf.buf[0] != comment_line_char ||
- (p += strcspn(p, "0123456789\n")) == eol)
- return error(_("unexpected 1st line of squash message:"
- "\n\n\t%.*s"),
- (int)(eol - buf.buf), buf.buf);
- count = strtol(p, NULL, 10);
-
- if (count < 1)
- return error(_("invalid 1st line of squash message:\n"
- "\n\t%.*s"),
- (int)(eol - buf.buf), buf.buf);
+ eol = buf.buf[0] != comment_line_char ?
+ buf.buf : strchrnul(buf.buf, '\n');
strbuf_addf(&header, "%c ", comment_line_char);
- strbuf_addf(&header,
- _("This is a combination of %d commits."), ++count);
+ strbuf_addf(&header, _("This is a combination of %d commits."),
+ opts->current_fixup_count + 2);
strbuf_splice(&buf, 0, eol - buf.buf, header.buf, header.len);
strbuf_release(&header);
} else {
rebase_path_fixup_msg());
}
- count = 2;
strbuf_addf(&buf, "%c ", comment_line_char);
- strbuf_addf(&buf, _("This is a combination of %d commits."),
- count);
+ strbuf_addf(&buf, _("This is a combination of %d commits."), 2);
strbuf_addf(&buf, "\n%c ", comment_line_char);
strbuf_addstr(&buf, _("This is the 1st commit message:"));
strbuf_addstr(&buf, "\n\n");
if (command == TODO_SQUASH) {
unlink(rebase_path_fixup_msg());
strbuf_addf(&buf, "\n%c ", comment_line_char);
- strbuf_addf(&buf, _("This is the commit message #%d:"), count);
+ strbuf_addf(&buf, _("This is the commit message #%d:"),
+ ++opts->current_fixup_count);
strbuf_addstr(&buf, "\n\n");
strbuf_addstr(&buf, body);
} else if (command == TODO_FIXUP) {
strbuf_addf(&buf, "\n%c ", comment_line_char);
strbuf_addf(&buf, _("The commit message #%d will be skipped:"),
- count);
+ ++opts->current_fixup_count);
strbuf_addstr(&buf, "\n\n");
strbuf_add_commented_lines(&buf, body, strlen(body));
} else
res = write_message(buf.buf, buf.len, rebase_path_squash_msg(), 0);
strbuf_release(&buf);
+
+ if (!res) {
+ strbuf_addf(&opts->current_fixups, "%s%s %s",
+ opts->current_fixups.len ? "\n" : "",
+ command_to_string(command),
+ oid_to_hex(&commit->object.oid));
+ res = write_message(opts->current_fixups.buf,
+ opts->current_fixups.len,
+ rebase_path_current_fixups(), 0);
+ }
+
return res;
}
return error(_("your index file is unmerged."));
} else {
unborn = get_oid("HEAD", &head);
- if (unborn)
+ /* Do we want to generate a root commit? */
+ if (is_pick_or_similar(command) && opts->have_squash_onto &&
+ !oidcmp(&head, &opts->squash_onto)) {
+ if (is_fixup(command))
+ return error(_("cannot fixup root commit"));
+ flags |= CREATE_ROOT_COMMIT;
+ unborn = 1;
+ } else if (unborn)
oidcpy(&head, the_hash_algo->empty_tree);
- if (index_differs_from(unborn ? EMPTY_TREE_SHA1_HEX : "HEAD",
+ if (index_differs_from(unborn ? empty_tree_oid_hex() : "HEAD",
NULL, 0))
return error_dirty_index(opts);
}
}
}
- if (opts->signoff)
+ if (opts->signoff && !is_fixup(command))
append_signoff(&msgbuf, 0, 0);
if (is_rebase_i(opts) && write_author_script(msg.message) < 0)
if (!res && final_fixup) {
unlink(rebase_path_fixup_msg());
unlink(rebase_path_squash_msg());
+ unlink(rebase_path_current_fixups());
+ strbuf_reset(&opts->current_fixups);
+ opts->current_fixup_count = 0;
}
leave:
return 0;
}
+enum todo_item_flags {
+ TODO_EDIT_MERGE_MSG = 1
+};
+
struct todo_item {
enum todo_command command;
struct commit *commit;
+ unsigned int flags;
const char *arg;
int arg_len;
size_t offset_in_buf;
char *end_of_object_name;
int i, saved, status, padding;
+ item->flags = 0;
+
/* left-trim */
bol += strspn(bol, " \t");
return error(_("missing arguments for %s"),
command_to_string(item->command));
- if (item->command == TODO_EXEC) {
+ if (item->command == TODO_EXEC || item->command == TODO_LABEL ||
+ item->command == TODO_RESET) {
item->commit = NULL;
item->arg = bol;
item->arg_len = (int)(eol - bol);
return 0;
}
+ if (item->command == TODO_MERGE) {
+ if (skip_prefix(bol, "-C", &bol))
+ bol += strspn(bol, " \t");
+ else if (skip_prefix(bol, "-c", &bol)) {
+ bol += strspn(bol, " \t");
+ item->flags |= TODO_EDIT_MERGE_MSG;
+ } else {
+ item->flags |= TODO_EDIT_MERGE_MSG;
+ item->commit = NULL;
+ item->arg = bol;
+ item->arg_len = (int)(eol - bol);
+ return 0;
+ }
+ }
+
end_of_object_name = (char *) bol + strcspn(bol, " \t\n");
saved = *end_of_object_name;
*end_of_object_name = '\0';
return count;
}
+static int get_item_line_offset(struct todo_list *todo_list, int index)
+{
+ return index < todo_list->nr ?
+ todo_list->items[index].offset_in_buf : todo_list->buf.len;
+}
+
+static const char *get_item_line(struct todo_list *todo_list, int index)
+{
+ return todo_list->buf.buf + get_item_line_offset(todo_list, index);
+}
+
+static int get_item_line_length(struct todo_list *todo_list, int index)
+{
+ return get_item_line_offset(todo_list, index + 1)
+ - get_item_line_offset(todo_list, index);
+}
+
static ssize_t strbuf_read_file_or_whine(struct strbuf *sb, const char *path)
{
int fd;
if (file_exists(rebase_path_verbose()))
opts->verbose = 1;
+ if (file_exists(rebase_path_signoff())) {
+ opts->allow_ff = 0;
+ opts->signoff = 1;
+ }
+
read_strategy_opts(opts, &buf);
strbuf_release(&buf);
+ if (read_oneliner(&opts->current_fixups,
+ rebase_path_current_fixups(), 1)) {
+ const char *p = opts->current_fixups.buf;
+ opts->current_fixup_count = 1;
+ while ((p = strchr(p, '\n'))) {
+ opts->current_fixup_count++;
+ p++;
+ }
+ }
+
+ if (read_oneliner(&buf, rebase_path_squash_onto(), 0)) {
+ if (get_oid_hex(buf.buf, &opts->squash_onto) < 0)
+ return error(_("unusable squash-onto"));
+ opts->have_squash_onto = 1;
+ }
+
return 0;
}
written = write_in_full(fd, buf.buf, buf.len);
strbuf_release(&buf);
if (written < 0) {
+ error_errno(_("could not write to '%s'"), git_path_head_file());
rollback_lock_file(&head_lock);
- return error_errno(_("could not write to '%s'"),
- git_path_head_file());
+ return -1;
}
if (commit_lock_file(&head_lock) < 0)
return error(_("failed to finalize '%s'"), git_path_head_file());
fd = hold_lock_file_for_update(&todo_lock, todo_path, 0);
if (fd < 0)
return error_errno(_("could not lock '%s'"), todo_path);
- offset = next < todo_list->nr ?
- todo_list->items[next].offset_in_buf : todo_list->buf.len;
+ offset = get_item_line_offset(todo_list, next);
if (write_in_full(fd, todo_list->buf.buf + offset,
todo_list->buf.len - offset) < 0)
return error_errno(_("could not write to '%s'"), todo_path);
if (commit_lock_file(&todo_lock) < 0)
return error(_("failed to finalize '%s'"), todo_path);
- if (is_rebase_i(opts)) {
- const char *done_path = rebase_path_done();
- int fd = open(done_path, O_CREAT | O_WRONLY | O_APPEND, 0666);
- int prev_offset = !next ? 0 :
- todo_list->items[next - 1].offset_in_buf;
+ if (is_rebase_i(opts) && next > 0) {
+ const char *done = rebase_path_done();
+ int fd = open(done, O_CREAT | O_WRONLY | O_APPEND, 0666);
+ int ret = 0;
- if (fd >= 0 && offset > prev_offset &&
- write_in_full(fd, todo_list->buf.buf + prev_offset,
- offset - prev_offset) < 0) {
- close(fd);
- return error_errno(_("could not write to '%s'"),
- done_path);
- }
- if (fd >= 0)
- close(fd);
+ if (fd < 0)
+ return 0;
+ if (write_in_full(fd, get_item_line(todo_list, next - 1),
+ get_item_line_length(todo_list, next - 1))
+ < 0)
+ ret = error_errno(_("could not write to '%s'"), done);
+ if (close(fd) < 0)
+ ret = error_errno(_("failed to finalize '%s'"), done);
+ return ret;
}
return 0;
}
static int error_failed_squash(struct commit *commit,
struct replay_opts *opts, int subject_len, const char *subject)
{
- if (rename(rebase_path_squash_msg(), rebase_path_message()))
- return error(_("could not rename '%s' to '%s'"),
+ if (copy_file(rebase_path_message(), rebase_path_squash_msg(), 0666))
+ return error(_("could not copy '%s' to '%s'"),
rebase_path_squash_msg(), rebase_path_message());
- unlink(rebase_path_fixup_msg());
unlink(git_path_merge_msg());
if (copy_file(git_path_merge_msg(), rebase_path_message(), 0666))
return error(_("could not copy '%s' to '%s'"),
return status;
}
+static int safe_append(const char *filename, const char *fmt, ...)
+{
+ va_list ap;
+ struct lock_file lock = LOCK_INIT;
+ int fd = hold_lock_file_for_update(&lock, filename,
+ LOCK_REPORT_ON_ERROR);
+ struct strbuf buf = STRBUF_INIT;
+
+ if (fd < 0)
+ return -1;
+
+ if (strbuf_read_file(&buf, filename, 0) < 0 && errno != ENOENT) {
+ error_errno(_("could not read '%s'"), filename);
+ rollback_lock_file(&lock);
+ return -1;
+ }
+ strbuf_complete(&buf, '\n');
+ va_start(ap, fmt);
+ strbuf_vaddf(&buf, fmt, ap);
+ va_end(ap);
+
+ if (write_in_full(fd, buf.buf, buf.len) < 0) {
+ error_errno(_("could not write to '%s'"), filename);
+ strbuf_release(&buf);
+ rollback_lock_file(&lock);
+ return -1;
+ }
+ if (commit_lock_file(&lock) < 0) {
+ strbuf_release(&buf);
+ rollback_lock_file(&lock);
+ return error(_("failed to finalize '%s'"), filename);
+ }
+
+ strbuf_release(&buf);
+ return 0;
+}
+
+static int do_label(const char *name, int len)
+{
+ struct ref_store *refs = get_main_ref_store(the_repository);
+ struct ref_transaction *transaction;
+ struct strbuf ref_name = STRBUF_INIT, err = STRBUF_INIT;
+ struct strbuf msg = STRBUF_INIT;
+ int ret = 0;
+ struct object_id head_oid;
+
+ if (len == 1 && *name == '#')
+ return error("Illegal label name: '%.*s'", len, name);
+
+ strbuf_addf(&ref_name, "refs/rewritten/%.*s", len, name);
+ strbuf_addf(&msg, "rebase -i (label) '%.*s'", len, name);
+
+ transaction = ref_store_transaction_begin(refs, &err);
+ if (!transaction) {
+ error("%s", err.buf);
+ ret = -1;
+ } else if (get_oid("HEAD", &head_oid)) {
+ error(_("could not read HEAD"));
+ ret = -1;
+ } else if (ref_transaction_update(transaction, ref_name.buf, &head_oid,
+ NULL, 0, msg.buf, &err) < 0 ||
+ ref_transaction_commit(transaction, &err)) {
+ error("%s", err.buf);
+ ret = -1;
+ }
+ ref_transaction_free(transaction);
+ strbuf_release(&err);
+ strbuf_release(&msg);
+
+ if (!ret)
+ ret = safe_append(rebase_path_refs_to_delete(),
+ "%s\n", ref_name.buf);
+ strbuf_release(&ref_name);
+
+ return ret;
+}
+
+static const char *reflog_message(struct replay_opts *opts,
+ const char *sub_action, const char *fmt, ...);
+
+static int do_reset(const char *name, int len, struct replay_opts *opts)
+{
+ struct strbuf ref_name = STRBUF_INIT;
+ struct object_id oid;
+ struct lock_file lock = LOCK_INIT;
+ struct tree_desc desc;
+ struct tree *tree;
+ struct unpack_trees_options unpack_tree_opts;
+ int ret = 0, i;
+
+ if (hold_locked_index(&lock, LOCK_REPORT_ON_ERROR) < 0)
+ return -1;
+
+ if (len == 10 && !strncmp("[new root]", name, len)) {
+ if (!opts->have_squash_onto) {
+ const char *hex;
+ if (commit_tree("", 0, the_hash_algo->empty_tree,
+ NULL, &opts->squash_onto,
+ NULL, NULL))
+ return error(_("writing fake root commit"));
+ opts->have_squash_onto = 1;
+ hex = oid_to_hex(&opts->squash_onto);
+ if (write_message(hex, strlen(hex),
+ rebase_path_squash_onto(), 0))
+ return error(_("writing squash-onto"));
+ }
+ oidcpy(&oid, &opts->squash_onto);
+ } else {
+ /* Determine the length of the label */
+ for (i = 0; i < len; i++)
+ if (isspace(name[i]))
+ len = i;
+
+ strbuf_addf(&ref_name, "refs/rewritten/%.*s", len, name);
+ if (get_oid(ref_name.buf, &oid) &&
+ get_oid(ref_name.buf + strlen("refs/rewritten/"), &oid)) {
+ error(_("could not read '%s'"), ref_name.buf);
+ rollback_lock_file(&lock);
+ strbuf_release(&ref_name);
+ return -1;
+ }
+ }
+
+ memset(&unpack_tree_opts, 0, sizeof(unpack_tree_opts));
+ setup_unpack_trees_porcelain(&unpack_tree_opts, "reset");
+ unpack_tree_opts.head_idx = 1;
+ unpack_tree_opts.src_index = &the_index;
+ unpack_tree_opts.dst_index = &the_index;
+ unpack_tree_opts.fn = oneway_merge;
+ unpack_tree_opts.merge = 1;
+ unpack_tree_opts.update = 1;
+
+ if (read_cache_unmerged()) {
+ rollback_lock_file(&lock);
+ strbuf_release(&ref_name);
+ return error_resolve_conflict(_(action_name(opts)));
+ }
+
+ if (!fill_tree_descriptor(&desc, &oid)) {
+ error(_("failed to find tree of %s"), oid_to_hex(&oid));
+ rollback_lock_file(&lock);
+ free((void *)desc.buffer);
+ strbuf_release(&ref_name);
+ return -1;
+ }
+
+ if (unpack_trees(1, &desc, &unpack_tree_opts)) {
+ rollback_lock_file(&lock);
+ free((void *)desc.buffer);
+ strbuf_release(&ref_name);
+ return -1;
+ }
+
+ tree = parse_tree_indirect(&oid);
+ prime_cache_tree(&the_index, tree);
+
+ if (write_locked_index(&the_index, &lock, COMMIT_LOCK) < 0)
+ ret = error(_("could not write index"));
+ free((void *)desc.buffer);
+
+ if (!ret)
+ ret = update_ref(reflog_message(opts, "reset", "'%.*s'",
+ len, name), "HEAD", &oid,
+ NULL, 0, UPDATE_REFS_MSG_ON_ERR);
+
+ strbuf_release(&ref_name);
+ return ret;
+}
+
+static int do_merge(struct commit *commit, const char *arg, int arg_len,
+ int flags, struct replay_opts *opts)
+{
+ int run_commit_flags = (flags & TODO_EDIT_MERGE_MSG) ?
+ EDIT_MSG | VERIFY_MSG : 0;
+ struct strbuf ref_name = STRBUF_INIT;
+ struct commit *head_commit, *merge_commit, *i;
+ struct commit_list *bases, *j, *reversed = NULL;
+ struct merge_options o;
+ int merge_arg_len, oneline_offset, can_fast_forward, ret;
+ static struct lock_file lock;
+ const char *p;
+
+ if (hold_locked_index(&lock, LOCK_REPORT_ON_ERROR) < 0) {
+ ret = -1;
+ goto leave_merge;
+ }
+
+ head_commit = lookup_commit_reference_by_name("HEAD");
+ if (!head_commit) {
+ ret = error(_("cannot merge without a current revision"));
+ goto leave_merge;
+ }
+
+ oneline_offset = arg_len;
+ merge_arg_len = strcspn(arg, " \t\n");
+ p = arg + merge_arg_len;
+ p += strspn(p, " \t\n");
+ if (*p == '#' && (!p[1] || isspace(p[1]))) {
+ p += 1 + strspn(p + 1, " \t\n");
+ oneline_offset = p - arg;
+ } else if (p - arg < arg_len)
+ BUG("octopus merges are not supported yet: '%s'", p);
+
+ strbuf_addf(&ref_name, "refs/rewritten/%.*s", merge_arg_len, arg);
+ merge_commit = lookup_commit_reference_by_name(ref_name.buf);
+ if (!merge_commit) {
+ /* fall back to non-rewritten ref or commit */
+ strbuf_splice(&ref_name, 0, strlen("refs/rewritten/"), "", 0);
+ merge_commit = lookup_commit_reference_by_name(ref_name.buf);
+ }
+
+ if (!merge_commit) {
+ ret = error(_("could not resolve '%s'"), ref_name.buf);
+ goto leave_merge;
+ }
+
+ if (opts->have_squash_onto &&
+ !oidcmp(&head_commit->object.oid, &opts->squash_onto)) {
+ /*
+ * When the user tells us to "merge" something into a
+ * "[new root]", let's simply fast-forward to the merge head.
+ */
+ rollback_lock_file(&lock);
+ ret = fast_forward_to(&merge_commit->object.oid,
+ &head_commit->object.oid, 0, opts);
+ goto leave_merge;
+ }
+
+ if (commit) {
+ const char *message = get_commit_buffer(commit, NULL);
+ const char *body;
+ int len;
+
+ if (!message) {
+ ret = error(_("could not get commit message of '%s'"),
+ oid_to_hex(&commit->object.oid));
+ goto leave_merge;
+ }
+ write_author_script(message);
+ find_commit_subject(message, &body);
+ len = strlen(body);
+ ret = write_message(body, len, git_path_merge_msg(), 0);
+ unuse_commit_buffer(commit, message);
+ if (ret) {
+ error_errno(_("could not write '%s'"),
+ git_path_merge_msg());
+ goto leave_merge;
+ }
+ } else {
+ struct strbuf buf = STRBUF_INIT;
+ int len;
+
+ strbuf_addf(&buf, "author %s", git_author_info(0));
+ write_author_script(buf.buf);
+ strbuf_reset(&buf);
+
+ if (oneline_offset < arg_len) {
+ p = arg + oneline_offset;
+ len = arg_len - oneline_offset;
+ } else {
+ strbuf_addf(&buf, "Merge branch '%.*s'",
+ merge_arg_len, arg);
+ p = buf.buf;
+ len = buf.len;
+ }
+
+ ret = write_message(p, len, git_path_merge_msg(), 0);
+ strbuf_release(&buf);
+ if (ret) {
+ error_errno(_("could not write '%s'"),
+ git_path_merge_msg());
+ goto leave_merge;
+ }
+ }
+
+ /*
+ * If HEAD is not identical to the first parent of the original merge
+ * commit, we cannot fast-forward.
+ */
+ can_fast_forward = opts->allow_ff && commit && commit->parents &&
+ !oidcmp(&commit->parents->item->object.oid,
+ &head_commit->object.oid);
+
+ /*
+ * If the merge head is different from the original one, we cannot
+ * fast-forward.
+ */
+ if (can_fast_forward) {
+ struct commit_list *second_parent = commit->parents->next;
+
+ if (second_parent && !second_parent->next &&
+ oidcmp(&merge_commit->object.oid,
+ &second_parent->item->object.oid))
+ can_fast_forward = 0;
+ }
+
+ if (can_fast_forward && commit->parents->next &&
+ !commit->parents->next->next &&
+ !oidcmp(&commit->parents->next->item->object.oid,
+ &merge_commit->object.oid)) {
+ rollback_lock_file(&lock);
+ ret = fast_forward_to(&commit->object.oid,
+ &head_commit->object.oid, 0, opts);
+ goto leave_merge;
+ }
+
+ write_message(oid_to_hex(&merge_commit->object.oid), GIT_SHA1_HEXSZ,
+ git_path_merge_head(), 0);
+ write_message("no-ff", 5, git_path_merge_mode(), 0);
+
+ bases = get_merge_bases(head_commit, merge_commit);
+ if (bases && !oidcmp(&merge_commit->object.oid,
+ &bases->item->object.oid)) {
+ ret = 0;
+ /* skip merging an ancestor of HEAD */
+ goto leave_merge;
+ }
+
+ for (j = bases; j; j = j->next)
+ commit_list_insert(j->item, &reversed);
+ free_commit_list(bases);
+
+ read_cache();
+ init_merge_options(&o);
+ o.branch1 = "HEAD";
+ o.branch2 = ref_name.buf;
+ o.buffer_output = 2;
+
+ ret = merge_recursive(&o, head_commit, merge_commit, reversed, &i);
+ if (ret <= 0)
+ fputs(o.obuf.buf, stdout);
+ strbuf_release(&o.obuf);
+ if (ret < 0) {
+ error(_("could not even attempt to merge '%.*s'"),
+ merge_arg_len, arg);
+ goto leave_merge;
+ }
+ /*
+ * The return value of merge_recursive() is 1 on clean, and 0 on
+ * unclean merge.
+ *
+ * Let's reverse that, so that do_merge() returns 0 upon success and
+ * 1 upon failed merge (keeping the return value -1 for the cases where
+ * we will want to reschedule the `merge` command).
+ */
+ ret = !ret;
+
+ if (active_cache_changed &&
+ write_locked_index(&the_index, &lock, COMMIT_LOCK)) {
+ ret = error(_("merge: Unable to write new index file"));
+ goto leave_merge;
+ }
+
+ rollback_lock_file(&lock);
+ if (ret)
+ rerere(opts->allow_rerere_auto);
+ else
+ /*
+ * In case of problems, we now want to return a positive
+ * value (a negative one would indicate that the `merge`
+ * command needs to be rescheduled).
+ */
+ ret = !!run_git_commit(git_path_merge_msg(), opts,
+ run_commit_flags);
+
+leave_merge:
+ strbuf_release(&ref_name);
+ rollback_lock_file(&lock);
+ return ret;
+}
+
static int is_final_fixup(struct todo_list *todo_list)
{
int i = todo_list->current;
return buf.buf;
}
+static const char rescheduled_advice[] =
+N_("Could not execute the todo command\n"
+"\n"
+" %.*s"
+"\n"
+"It has been rescheduled; To edit the command before continuing, please\n"
+"edit the todo list first:\n"
+"\n"
+" git rebase --edit-todo\n"
+" git rebase --continue\n");
+
static int pick_commits(struct todo_list *todo_list, struct replay_opts *opts)
{
- int res = 0;
+ int res = 0, reschedule = 0;
setenv(GIT_REFLOG_ACTION, action_name(opts), 0);
if (opts->allow_ff)
opts, is_final_fixup(todo_list));
if (is_rebase_i(opts) && res < 0) {
/* Reschedule */
+ advise(_(rescheduled_advice),
+ get_item_line_length(todo_list,
+ todo_list->current),
+ get_item_line(todo_list,
+ todo_list->current));
todo_list->current--;
if (save_todo(todo_list, opts))
return -1;
intend_to_amend();
return error_failed_squash(item->commit, opts,
item->arg_len, item->arg);
- } else if (res && is_rebase_i(opts))
+ } else if (res && is_rebase_i(opts) && item->commit)
return res | error_with_patch(item->commit,
item->arg, item->arg_len, opts, res,
item->command == TODO_REWORD);
/* `current` will be incremented below */
todo_list->current = -1;
}
+ } else if (item->command == TODO_LABEL) {
+ if ((res = do_label(item->arg, item->arg_len)))
+ reschedule = 1;
+ } else if (item->command == TODO_RESET) {
+ if ((res = do_reset(item->arg, item->arg_len, opts)))
+ reschedule = 1;
+ } else if (item->command == TODO_MERGE) {
+ if ((res = do_merge(item->commit,
+ item->arg, item->arg_len,
+ item->flags, opts)) < 0)
+ reschedule = 1;
+ else if (item->commit)
+ record_in_rewritten(&item->commit->object.oid,
+ peek_command(todo_list, 1));
+ if (res > 0)
+ /* failed with merge conflicts */
+ return error_with_patch(item->commit,
+ item->arg,
+ item->arg_len, opts,
+ res, 0);
} else if (!is_noop(item->command))
return error(_("unknown command %d"), item->command);
+ if (reschedule) {
+ advise(_(rescheduled_advice),
+ get_item_line_length(todo_list,
+ todo_list->current),
+ get_item_line(todo_list, todo_list->current));
+ todo_list->current--;
+ if (save_todo(todo_list, opts))
+ return -1;
+ if (item->commit)
+ return error_with_patch(item->commit,
+ item->arg,
+ item->arg_len, opts,
+ res, 0);
+ }
+
todo_list->current++;
if (res)
return res;
return run_command_v_opt(argv, RUN_GIT_CMD);
}
-static int commit_staged_changes(struct replay_opts *opts)
+static int commit_staged_changes(struct replay_opts *opts,
+ struct todo_list *todo_list)
{
unsigned int flags = ALLOW_EMPTY | EDIT_MSG;
+ unsigned int final_fixup = 0, is_clean;
if (has_unstaged_changes(1))
return error(_("cannot rebase: You have unstaged changes."));
- if (!has_uncommitted_changes(0)) {
- const char *cherry_pick_head = git_path_cherry_pick_head();
- if (file_exists(cherry_pick_head) && unlink(cherry_pick_head))
- return error(_("could not remove CHERRY_PICK_HEAD"));
- return 0;
- }
+ is_clean = !has_uncommitted_changes(0);
if (file_exists(rebase_path_amend())) {
struct strbuf rev = STRBUF_INIT;
if (get_oid_hex(rev.buf, &to_amend))
return error(_("invalid contents: '%s'"),
rebase_path_amend());
- if (oidcmp(&head, &to_amend))
+ if (!is_clean && oidcmp(&head, &to_amend))
return error(_("\nYou have uncommitted changes in your "
"working tree. Please, commit them\n"
"first and then run 'git rebase "
"--continue' again."));
+ /*
+ * When skipping a failed fixup/squash, we need to edit the
+ * commit message, the current fixup list and count, and if it
+ * was the last fixup/squash in the chain, we need to clean up
+ * the commit message and if there was a squash, let the user
+ * edit it.
+ */
+ if (is_clean && !oidcmp(&head, &to_amend) &&
+ opts->current_fixup_count > 0 &&
+ file_exists(rebase_path_stopped_sha())) {
+ const char *p = opts->current_fixups.buf;
+ int len = opts->current_fixups.len;
+
+ opts->current_fixup_count--;
+ if (!len)
+ BUG("Incorrect current_fixups:\n%s", p);
+ while (len && p[len - 1] != '\n')
+ len--;
+ strbuf_setlen(&opts->current_fixups, len);
+ if (write_message(p, len, rebase_path_current_fixups(),
+ 0) < 0)
+ return error(_("could not write file: '%s'"),
+ rebase_path_current_fixups());
+
+ /*
+ * If a fixup/squash in a fixup/squash chain failed, the
+ * commit message is already correct, no need to commit
+ * it again.
+ *
+ * Only if it is the final command in the fixup/squash
+ * chain, and only if the chain is longer than a single
+ * fixup/squash command (which was just skipped), do we
+ * actually need to re-commit with a cleaned up commit
+ * message.
+ */
+ if (opts->current_fixup_count > 0 &&
+ !is_fixup(peek_command(todo_list, 0))) {
+ final_fixup = 1;
+ /*
+ * If there was not a single "squash" in the
+ * chain, we only need to clean up the commit
+ * message, no need to bother the user with
+ * opening the commit message in the editor.
+ */
+ if (!starts_with(p, "squash ") &&
+ !strstr(p, "\nsquash "))
+ flags = (flags & ~EDIT_MSG) | CLEANUP_MSG;
+ } else if (is_fixup(peek_command(todo_list, 0))) {
+ /*
+ * We need to update the squash message to skip
+ * the latest commit message.
+ */
+ struct commit *commit;
+ const char *path = rebase_path_squash_msg();
+
+ if (parse_head(&commit) ||
+ !(p = get_commit_buffer(commit, NULL)) ||
+ write_message(p, strlen(p), path, 0)) {
+ unuse_commit_buffer(commit, p);
+ return error(_("could not write file: "
+ "'%s'"), path);
+ }
+ unuse_commit_buffer(commit, p);
+ }
+ }
strbuf_release(&rev);
flags |= AMEND_MSG;
}
- if (run_git_commit(rebase_path_message(), opts, flags))
+ if (is_clean) {
+ const char *cherry_pick_head = git_path_cherry_pick_head();
+
+ if (file_exists(cherry_pick_head) && unlink(cherry_pick_head))
+ return error(_("could not remove CHERRY_PICK_HEAD"));
+ if (!final_fixup)
+ return 0;
+ }
+
+ if (run_git_commit(final_fixup ? NULL : rebase_path_message(),
+ opts, flags))
return error(_("could not commit staged changes."));
unlink(rebase_path_amend());
+ if (final_fixup) {
+ unlink(rebase_path_fixup_msg());
+ unlink(rebase_path_squash_msg());
+ }
+ if (opts->current_fixup_count > 0) {
+ /*
+ * Whether final fixup or not, we just cleaned up the commit
+ * message...
+ */
+ unlink(rebase_path_current_fixups());
+ strbuf_reset(&opts->current_fixups);
+ opts->current_fixup_count = 0;
+ }
return 0;
}
if (read_and_refresh_cache(opts))
return -1;
+ if (read_populate_opts(opts))
+ return -1;
if (is_rebase_i(opts)) {
- if (commit_staged_changes(opts))
+ if ((res = read_populate_todo(&todo_list, opts)))
+ goto release_todo_list;
+ if (commit_staged_changes(opts, &todo_list))
return -1;
} else if (!file_exists(get_todo_path(opts)))
return continue_single_pick();
- if (read_populate_opts(opts))
- return -1;
- if ((res = read_populate_todo(&todo_list, opts)))
+ else if ((res = read_populate_todo(&todo_list, opts)))
goto release_todo_list;
if (!is_rebase_i(opts)) {
if (!get_oid(name, &oid)) {
if (!lookup_commit_reference_gently(&oid, 1)) {
- enum object_type type = oid_object_info(&oid,
+ enum object_type type = oid_object_info(the_repository,
+ &oid,
NULL);
return error(_("%s: can't cherry-pick a %s"),
name, type_name(type));
strbuf_release(&sob);
}
+struct labels_entry {
+ struct hashmap_entry entry;
+ char label[FLEX_ARRAY];
+};
+
+static int labels_cmp(const void *fndata, const struct labels_entry *a,
+ const struct labels_entry *b, const void *key)
+{
+ return key ? strcmp(a->label, key) : strcmp(a->label, b->label);
+}
+
+struct string_entry {
+ struct oidmap_entry entry;
+ char string[FLEX_ARRAY];
+};
+
+struct label_state {
+ struct oidmap commit2label;
+ struct hashmap labels;
+ struct strbuf buf;
+};
+
+static const char *label_oid(struct object_id *oid, const char *label,
+ struct label_state *state)
+{
+ struct labels_entry *labels_entry;
+ struct string_entry *string_entry;
+ struct object_id dummy;
+ size_t len;
+ int i;
+
+ string_entry = oidmap_get(&state->commit2label, oid);
+ if (string_entry)
+ return string_entry->string;
+
+ /*
+ * For "uninteresting" commits, i.e. commits that are not to be
+ * rebased, and which can therefore not be labeled, we use a unique
+ * abbreviation of the commit name. This is slightly more complicated
+ * than calling find_unique_abbrev() because we also need to make
+ * sure that the abbreviation does not conflict with any other
+ * label.
+ *
+ * We disallow "interesting" commits to be labeled by a string that
+ * is a valid full-length hash, to ensure that we always can find an
+ * abbreviation for any uninteresting commit's names that does not
+ * clash with any other label.
+ */
+ if (!label) {
+ char *p;
+
+ strbuf_reset(&state->buf);
+ strbuf_grow(&state->buf, GIT_SHA1_HEXSZ);
+ label = p = state->buf.buf;
+
+ find_unique_abbrev_r(p, oid, default_abbrev);
+
+ /*
+ * We may need to extend the abbreviated hash so that there is
+ * no conflicting label.
+ */
+ if (hashmap_get_from_hash(&state->labels, strihash(p), p)) {
+ size_t i = strlen(p) + 1;
+
+ oid_to_hex_r(p, oid);
+ for (; i < GIT_SHA1_HEXSZ; i++) {
+ char save = p[i];
+ p[i] = '\0';
+ if (!hashmap_get_from_hash(&state->labels,
+ strihash(p), p))
+ break;
+ p[i] = save;
+ }
+ }
+ } else if (((len = strlen(label)) == GIT_SHA1_RAWSZ &&
+ !get_oid_hex(label, &dummy)) ||
+ (len == 1 && *label == '#') ||
+ hashmap_get_from_hash(&state->labels,
+ strihash(label), label)) {
+ /*
+ * If the label already exists, or if the label is a valid full
+ * OID, or the label is a '#' (which we use as a separator
+ * between merge heads and oneline), we append a dash and a
+ * number to make it unique.
+ */
+ struct strbuf *buf = &state->buf;
+
+ strbuf_reset(buf);
+ strbuf_add(buf, label, len);
+
+ for (i = 2; ; i++) {
+ strbuf_setlen(buf, len);
+ strbuf_addf(buf, "-%d", i);
+ if (!hashmap_get_from_hash(&state->labels,
+ strihash(buf->buf),
+ buf->buf))
+ break;
+ }
+
+ label = buf->buf;
+ }
+
+ FLEX_ALLOC_STR(labels_entry, label, label);
+ hashmap_entry_init(labels_entry, strihash(label));
+ hashmap_add(&state->labels, labels_entry);
+
+ FLEX_ALLOC_STR(string_entry, string, label);
+ oidcpy(&string_entry->entry.oid, oid);
+ oidmap_put(&state->commit2label, string_entry);
+
+ return string_entry->string;
+}
+
+static int make_script_with_merges(struct pretty_print_context *pp,
+ struct rev_info *revs, FILE *out,
+ unsigned flags)
+{
+ int keep_empty = flags & TODO_LIST_KEEP_EMPTY;
+ int rebase_cousins = flags & TODO_LIST_REBASE_COUSINS;
+ struct strbuf buf = STRBUF_INIT, oneline = STRBUF_INIT;
+ struct strbuf label = STRBUF_INIT;
+ struct commit_list *commits = NULL, **tail = &commits, *iter;
+ struct commit_list *tips = NULL, **tips_tail = &tips;
+ struct commit *commit;
+ struct oidmap commit2todo = OIDMAP_INIT;
+ struct string_entry *entry;
+ struct oidset interesting = OIDSET_INIT, child_seen = OIDSET_INIT,
+ shown = OIDSET_INIT;
+ struct label_state state = { OIDMAP_INIT, { NULL }, STRBUF_INIT };
+
+ int abbr = flags & TODO_LIST_ABBREVIATE_CMDS;
+ const char *cmd_pick = abbr ? "p" : "pick",
+ *cmd_label = abbr ? "l" : "label",
+ *cmd_reset = abbr ? "t" : "reset",
+ *cmd_merge = abbr ? "m" : "merge";
+
+ oidmap_init(&commit2todo, 0);
+ oidmap_init(&state.commit2label, 0);
+ hashmap_init(&state.labels, (hashmap_cmp_fn) labels_cmp, NULL, 0);
+ strbuf_init(&state.buf, 32);
+
+ if (revs->cmdline.nr && (revs->cmdline.rev[0].flags & BOTTOM)) {
+ struct object_id *oid = &revs->cmdline.rev[0].item->oid;
+ FLEX_ALLOC_STR(entry, string, "onto");
+ oidcpy(&entry->entry.oid, oid);
+ oidmap_put(&state.commit2label, entry);
+ }
+
+ /*
+ * First phase:
+ * - get onelines for all commits
+ * - gather all branch tips (i.e. 2nd or later parents of merges)
+ * - label all branch tips
+ */
+ while ((commit = get_revision(revs))) {
+ struct commit_list *to_merge;
+ int is_octopus;
+ const char *p1, *p2;
+ struct object_id *oid;
+ int is_empty;
+
+ tail = &commit_list_insert(commit, tail)->next;
+ oidset_insert(&interesting, &commit->object.oid);
+
+ is_empty = is_original_commit_empty(commit);
+ if (!is_empty && (commit->object.flags & PATCHSAME))
+ continue;
+
+ strbuf_reset(&oneline);
+ pretty_print_commit(pp, commit, &oneline);
+
+ to_merge = commit->parents ? commit->parents->next : NULL;
+ if (!to_merge) {
+ /* non-merge commit: easy case */
+ strbuf_reset(&buf);
+ if (!keep_empty && is_empty)
+ strbuf_addf(&buf, "%c ", comment_line_char);
+ strbuf_addf(&buf, "%s %s %s", cmd_pick,
+ oid_to_hex(&commit->object.oid),
+ oneline.buf);
+
+ FLEX_ALLOC_STR(entry, string, buf.buf);
+ oidcpy(&entry->entry.oid, &commit->object.oid);
+ oidmap_put(&commit2todo, entry);
+
+ continue;
+ }
+
+ is_octopus = to_merge && to_merge->next;
+
+ if (is_octopus)
+ BUG("Octopus merges not yet supported");
+
+ /* Create a label */
+ strbuf_reset(&label);
+ if (skip_prefix(oneline.buf, "Merge ", &p1) &&
+ (p1 = strchr(p1, '\'')) &&
+ (p2 = strchr(++p1, '\'')))
+ strbuf_add(&label, p1, p2 - p1);
+ else if (skip_prefix(oneline.buf, "Merge pull request ",
+ &p1) &&
+ (p1 = strstr(p1, " from ")))
+ strbuf_addstr(&label, p1 + strlen(" from "));
+ else
+ strbuf_addbuf(&label, &oneline);
+
+ for (p1 = label.buf; *p1; p1++)
+ if (isspace(*p1))
+ *(char *)p1 = '-';
+
+ strbuf_reset(&buf);
+ strbuf_addf(&buf, "%s -C %s",
+ cmd_merge, oid_to_hex(&commit->object.oid));
+
+ /* label the tip of merged branch */
+ oid = &to_merge->item->object.oid;
+ strbuf_addch(&buf, ' ');
+
+ if (!oidset_contains(&interesting, oid))
+ strbuf_addstr(&buf, label_oid(oid, NULL, &state));
+ else {
+ tips_tail = &commit_list_insert(to_merge->item,
+ tips_tail)->next;
+
+ strbuf_addstr(&buf, label_oid(oid, label.buf, &state));
+ }
+ strbuf_addf(&buf, " # %s", oneline.buf);
+
+ FLEX_ALLOC_STR(entry, string, buf.buf);
+ oidcpy(&entry->entry.oid, &commit->object.oid);
+ oidmap_put(&commit2todo, entry);
+ }
+
+ /*
+ * Second phase:
+ * - label branch points
+ * - add HEAD to the branch tips
+ */
+ for (iter = commits; iter; iter = iter->next) {
+ struct commit_list *parent = iter->item->parents;
+ for (; parent; parent = parent->next) {
+ struct object_id *oid = &parent->item->object.oid;
+ if (!oidset_contains(&interesting, oid))
+ continue;
+ if (!oidset_contains(&child_seen, oid))
+ oidset_insert(&child_seen, oid);
+ else
+ label_oid(oid, "branch-point", &state);
+ }
+
+ /* Add HEAD as implict "tip of branch" */
+ if (!iter->next)
+ tips_tail = &commit_list_insert(iter->item,
+ tips_tail)->next;
+ }
+
+ /*
+ * Third phase: output the todo list. This is a bit tricky, as we
+ * want to avoid jumping back and forth between revisions. To
+ * accomplish that goal, we walk backwards from the branch tips,
+ * gathering commits not yet shown, reversing the list on the fly,
+ * then outputting that list (labeling revisions as needed).
+ */
+ fprintf(out, "%s onto\n", cmd_label);
+ for (iter = tips; iter; iter = iter->next) {
+ struct commit_list *list = NULL, *iter2;
+
+ commit = iter->item;
+ if (oidset_contains(&shown, &commit->object.oid))
+ continue;
+ entry = oidmap_get(&state.commit2label, &commit->object.oid);
+
+ if (entry)
+ fprintf(out, "\n# Branch %s\n", entry->string);
+ else
+ fprintf(out, "\n");
+
+ while (oidset_contains(&interesting, &commit->object.oid) &&
+ !oidset_contains(&shown, &commit->object.oid)) {
+ commit_list_insert(commit, &list);
+ if (!commit->parents) {
+ commit = NULL;
+ break;
+ }
+ commit = commit->parents->item;
+ }
+
+ if (!commit)
+ fprintf(out, "%s %s\n", cmd_reset,
+ rebase_cousins ? "onto" : "[new root]");
+ else {
+ const char *to = NULL;
+
+ entry = oidmap_get(&state.commit2label,
+ &commit->object.oid);
+ if (entry)
+ to = entry->string;
+ else if (!rebase_cousins)
+ to = label_oid(&commit->object.oid, NULL,
+ &state);
+
+ if (!to || !strcmp(to, "onto"))
+ fprintf(out, "%s onto\n", cmd_reset);
+ else {
+ strbuf_reset(&oneline);
+ pretty_print_commit(pp, commit, &oneline);
+ fprintf(out, "%s %s # %s\n",
+ cmd_reset, to, oneline.buf);
+ }
+ }
+
+ for (iter2 = list; iter2; iter2 = iter2->next) {
+ struct object_id *oid = &iter2->item->object.oid;
+ entry = oidmap_get(&commit2todo, oid);
+ /* only show if not already upstream */
+ if (entry)
+ fprintf(out, "%s\n", entry->string);
+ entry = oidmap_get(&state.commit2label, oid);
+ if (entry)
+ fprintf(out, "%s %s\n",
+ cmd_label, entry->string);
+ oidset_insert(&shown, oid);
+ }
+
+ free_commit_list(list);
+ }
+
+ free_commit_list(commits);
+ free_commit_list(tips);
+
+ strbuf_release(&label);
+ strbuf_release(&oneline);
+ strbuf_release(&buf);
+
+ oidmap_free(&commit2todo, 1);
+ oidmap_free(&state.commit2label, 1);
+ hashmap_free(&state.labels, 1);
+ strbuf_release(&state.buf);
+
+ return 0;
+}
+
int sequencer_make_script(FILE *out, int argc, const char **argv,
unsigned flags)
{
struct commit *commit;
int keep_empty = flags & TODO_LIST_KEEP_EMPTY;
const char *insn = flags & TODO_LIST_ABBREVIATE_CMDS ? "p" : "pick";
+ int rebase_merges = flags & TODO_LIST_REBASE_MERGES;
init_revisions(&revs, NULL);
revs.verbose_header = 1;
- revs.max_parents = 1;
- revs.cherry_pick = 1;
+ if (!rebase_merges)
+ revs.max_parents = 1;
+ revs.cherry_mark = 1;
revs.limited = 1;
revs.reverse = 1;
revs.right_only = 1;
if (prepare_revision_walk(&revs) < 0)
return error(_("make_script: error preparing revisions"));
+ if (rebase_merges)
+ return make_script_with_merges(&pp, &revs, out, flags);
+
while ((commit = get_revision(&revs))) {
+ int is_empty = is_original_commit_empty(commit);
+
+ if (!is_empty && (commit->object.flags & PATCHSAME))
+ continue;
strbuf_reset(&buf);
- if (!keep_empty && is_original_commit_empty(commit))
+ if (!keep_empty && is_empty)
strbuf_addf(&buf, "%c ", comment_line_char);
strbuf_addf(&buf, "%s %s ", insn,
oid_to_hex(&commit->object.oid));
short_commit_name(item->commit) :
oid_to_hex(&item->commit->object.oid);
+ if (item->command == TODO_MERGE) {
+ if (item->flags & TODO_EDIT_MERGE_MSG)
+ strbuf_addstr(&buf, " -c");
+ else
+ strbuf_addstr(&buf, " -C");
+ }
+
strbuf_addf(&buf, " %s", oid);
}
+
/* add all the rest */
if (!item->arg_len)
strbuf_addch(&buf, '\n');
oid = &item->commit->object.oid;
}
if (i > 0) {
- int offset = i < todo_list.nr ?
- todo_list.items[i].offset_in_buf : todo_list.buf.len;
+ int offset = get_item_line_offset(&todo_list, i);
const char *done_path = rebase_path_done();
fd = open(done_path, O_CREAT | O_WRONLY | O_APPEND, 0666);
struct subject2item_entry *entry;
next[i] = tail[i] = -1;
- if (item->command >= TODO_EXEC) {
+ if (!item->commit || item->command == TODO_DROP) {
subjects[i] = NULL;
continue;
}
continue;
while (cur >= 0) {
- int offset = todo_list.items[cur].offset_in_buf;
- int end_offset = cur + 1 < todo_list.nr ?
- todo_list.items[cur + 1].offset_in_buf :
- todo_list.buf.len;
- char *bol = todo_list.buf.buf + offset;
- char *eol = todo_list.buf.buf + end_offset;
+ const char *bol =
+ get_item_line(&todo_list, cur);
+ const char *eol =
+ get_item_line(&todo_list, cur + 1);
/* replace 'pick', by 'fixup' or 'squash' */
command = todo_list.items[cur].command;
--- /dev/null
- const struct object_id empty_tree_oid = {
+/*
+ * GIT - The information manager from hell
+ *
+ * Copyright (C) Linus Torvalds, 2005
+ *
+ * This handles basic git sha1 object files - packing, unpacking,
+ * creation etc.
+ */
+#include "cache.h"
+#include "config.h"
+#include "string-list.h"
+#include "lockfile.h"
+#include "delta.h"
+#include "pack.h"
+#include "blob.h"
+#include "commit.h"
+#include "run-command.h"
+#include "tag.h"
+#include "tree.h"
+#include "tree-walk.h"
+#include "refs.h"
+#include "pack-revindex.h"
+#include "sha1-lookup.h"
+#include "bulk-checkin.h"
+#include "repository.h"
+#include "replace-object.h"
+#include "streaming.h"
+#include "dir.h"
+#include "list.h"
+#include "mergesort.h"
+#include "quote.h"
+#include "packfile.h"
+#include "fetch-object.h"
+#include "object-store.h"
+
+/* The maximum size for an object header. */
+#define MAX_HEADER_LEN 32
+
++
++#define EMPTY_TREE_SHA1_BIN_LITERAL \
++ "\x4b\x82\x5d\xc6\x42\xcb\x6e\xb9\xa0\x60" \
++ "\xe5\x4b\xf8\xd6\x92\x88\xfb\xee\x49\x04"
++
++#define EMPTY_BLOB_SHA1_BIN_LITERAL \
++ "\xe6\x9d\xe2\x9b\xb2\xd1\xd6\x43\x4b\x8b" \
++ "\x29\xae\x77\x5a\xd8\xc2\xe4\x8c\x53\x91"
++
+const unsigned char null_sha1[GIT_MAX_RAWSZ];
+const struct object_id null_oid;
- const struct object_id empty_blob_oid = {
++static const struct object_id empty_tree_oid = {
+ EMPTY_TREE_SHA1_BIN_LITERAL
+};
- unsigned char sha1[20];
++static const struct object_id empty_blob_oid = {
+ EMPTY_BLOB_SHA1_BIN_LITERAL
+};
+
+static void git_hash_sha1_init(git_hash_ctx *ctx)
+{
+ git_SHA1_Init(&ctx->sha1);
+}
+
+static void git_hash_sha1_update(git_hash_ctx *ctx, const void *data, size_t len)
+{
+ git_SHA1_Update(&ctx->sha1, data, len);
+}
+
+static void git_hash_sha1_final(unsigned char *hash, git_hash_ctx *ctx)
+{
+ git_SHA1_Final(hash, &ctx->sha1);
+}
+
+static void git_hash_unknown_init(git_hash_ctx *ctx)
+{
+ die("trying to init unknown hash");
+}
+
+static void git_hash_unknown_update(git_hash_ctx *ctx, const void *data, size_t len)
+{
+ die("trying to update unknown hash");
+}
+
+static void git_hash_unknown_final(unsigned char *hash, git_hash_ctx *ctx)
+{
+ die("trying to finalize unknown hash");
+}
+
+const struct git_hash_algo hash_algos[GIT_HASH_NALGOS] = {
+ {
+ NULL,
+ 0x00000000,
+ 0,
+ 0,
+ git_hash_unknown_init,
+ git_hash_unknown_update,
+ git_hash_unknown_final,
+ NULL,
+ NULL,
+ },
+ {
+ "sha-1",
+ /* "sha1", big-endian */
+ 0x73686131,
+ GIT_SHA1_RAWSZ,
+ GIT_SHA1_HEXSZ,
+ git_hash_sha1_init,
+ git_hash_sha1_update,
+ git_hash_sha1_final,
+ &empty_tree_oid,
+ &empty_blob_oid,
+ },
+};
+
++const char *empty_tree_oid_hex(void)
++{
++ static char buf[GIT_MAX_HEXSZ + 1];
++ return oid_to_hex_r(buf, the_hash_algo->empty_tree);
++}
++
++const char *empty_blob_oid_hex(void)
++{
++ static char buf[GIT_MAX_HEXSZ + 1];
++ return oid_to_hex_r(buf, the_hash_algo->empty_blob);
++}
++
+/*
+ * This is meant to hold a *small* number of objects that you would
+ * want read_sha1_file() to be able to return, but yet you do not want
+ * to write them into the object store (e.g. a browse-only
+ * application).
+ */
+static struct cached_object {
- EMPTY_TREE_SHA1_BIN_LITERAL,
++ struct object_id oid;
+ enum object_type type;
+ void *buf;
+ unsigned long size;
+} *cached_objects;
+static int cached_object_nr, cached_object_alloc;
+
+static struct cached_object empty_tree = {
- static struct cached_object *find_cached_object(const unsigned char *sha1)
++ { EMPTY_TREE_SHA1_BIN_LITERAL },
+ OBJ_TREE,
+ "",
+ 0
+};
+
- if (!hashcmp(co->sha1, sha1))
++static struct cached_object *find_cached_object(const struct object_id *oid)
+{
+ int i;
+ struct cached_object *co = cached_objects;
+
+ for (i = 0; i < cached_object_nr; i++, co++) {
- if (!hashcmp(sha1, empty_tree.sha1))
++ if (!oidcmp(&co->oid, oid))
+ return co;
+ }
- static int check_and_freshen_local(const unsigned char *sha1, int freshen)
++ if (!oidcmp(oid, the_hash_algo->empty_tree))
+ return &empty_tree;
+ return NULL;
+}
+
+
+static int get_conv_flags(unsigned flags)
+{
+ if (flags & HASH_RENORMALIZE)
+ return CONV_EOL_RENORMALIZE;
+ else if (flags & HASH_WRITE_OBJECT)
+ return global_conv_flags_eol | CONV_WRITE_OBJECT;
+ else
+ return 0;
+}
+
+
+int mkdir_in_gitdir(const char *path)
+{
+ if (mkdir(path, 0777)) {
+ int saved_errno = errno;
+ struct stat st;
+ struct strbuf sb = STRBUF_INIT;
+
+ if (errno != EEXIST)
+ return -1;
+ /*
+ * Are we looking at a path in a symlinked worktree
+ * whose original repository does not yet have it?
+ * e.g. .git/rr-cache pointing at its original
+ * repository in which the user hasn't performed any
+ * conflict resolution yet?
+ */
+ if (lstat(path, &st) || !S_ISLNK(st.st_mode) ||
+ strbuf_readlink(&sb, path, st.st_size) ||
+ !is_absolute_path(sb.buf) ||
+ mkdir(sb.buf, 0777)) {
+ strbuf_release(&sb);
+ errno = saved_errno;
+ return -1;
+ }
+ strbuf_release(&sb);
+ }
+ return adjust_shared_perm(path);
+}
+
+enum scld_error safe_create_leading_directories(char *path)
+{
+ char *next_component = path + offset_1st_component(path);
+ enum scld_error ret = SCLD_OK;
+
+ while (ret == SCLD_OK && next_component) {
+ struct stat st;
+ char *slash = next_component, slash_character;
+
+ while (*slash && !is_dir_sep(*slash))
+ slash++;
+
+ if (!*slash)
+ break;
+
+ next_component = slash + 1;
+ while (is_dir_sep(*next_component))
+ next_component++;
+ if (!*next_component)
+ break;
+
+ slash_character = *slash;
+ *slash = '\0';
+ if (!stat(path, &st)) {
+ /* path exists */
+ if (!S_ISDIR(st.st_mode)) {
+ errno = ENOTDIR;
+ ret = SCLD_EXISTS;
+ }
+ } else if (mkdir(path, 0777)) {
+ if (errno == EEXIST &&
+ !stat(path, &st) && S_ISDIR(st.st_mode))
+ ; /* somebody created it since we checked */
+ else if (errno == ENOENT)
+ /*
+ * Either mkdir() failed because
+ * somebody just pruned the containing
+ * directory, or stat() failed because
+ * the file that was in our way was
+ * just removed. Either way, inform
+ * the caller that it might be worth
+ * trying again:
+ */
+ ret = SCLD_VANISHED;
+ else
+ ret = SCLD_FAILED;
+ } else if (adjust_shared_perm(path)) {
+ ret = SCLD_PERMS;
+ }
+ *slash = slash_character;
+ }
+ return ret;
+}
+
+enum scld_error safe_create_leading_directories_const(const char *path)
+{
+ int save_errno;
+ /* path points to cache entries, so xstrdup before messing with it */
+ char *buf = xstrdup(path);
+ enum scld_error result = safe_create_leading_directories(buf);
+
+ save_errno = errno;
+ free(buf);
+ errno = save_errno;
+ return result;
+}
+
+int raceproof_create_file(const char *path, create_file_fn fn, void *cb)
+{
+ /*
+ * The number of times we will try to remove empty directories
+ * in the way of path. This is only 1 because if another
+ * process is racily creating directories that conflict with
+ * us, we don't want to fight against them.
+ */
+ int remove_directories_remaining = 1;
+
+ /*
+ * The number of times that we will try to create the
+ * directories containing path. We are willing to attempt this
+ * more than once, because another process could be trying to
+ * clean up empty directories at the same time as we are
+ * trying to create them.
+ */
+ int create_directories_remaining = 3;
+
+ /* A scratch copy of path, filled lazily if we need it: */
+ struct strbuf path_copy = STRBUF_INIT;
+
+ int ret, save_errno;
+
+ /* Sanity check: */
+ assert(*path);
+
+retry_fn:
+ ret = fn(path, cb);
+ save_errno = errno;
+ if (!ret)
+ goto out;
+
+ if (errno == EISDIR && remove_directories_remaining-- > 0) {
+ /*
+ * A directory is in the way. Maybe it is empty; try
+ * to remove it:
+ */
+ if (!path_copy.len)
+ strbuf_addstr(&path_copy, path);
+
+ if (!remove_dir_recursively(&path_copy, REMOVE_DIR_EMPTY_ONLY))
+ goto retry_fn;
+ } else if (errno == ENOENT && create_directories_remaining-- > 0) {
+ /*
+ * Maybe the containing directory didn't exist, or
+ * maybe it was just deleted by a process that is
+ * racing with us to clean up empty directories. Try
+ * to create it:
+ */
+ enum scld_error scld_result;
+
+ if (!path_copy.len)
+ strbuf_addstr(&path_copy, path);
+
+ do {
+ scld_result = safe_create_leading_directories(path_copy.buf);
+ if (scld_result == SCLD_OK)
+ goto retry_fn;
+ } while (scld_result == SCLD_VANISHED && create_directories_remaining-- > 0);
+ }
+
+out:
+ strbuf_release(&path_copy);
+ errno = save_errno;
+ return ret;
+}
+
+static void fill_sha1_path(struct strbuf *buf, const unsigned char *sha1)
+{
+ int i;
+ for (i = 0; i < 20; i++) {
+ static char hex[] = "0123456789abcdef";
+ unsigned int val = sha1[i];
+ strbuf_addch(buf, hex[val >> 4]);
+ strbuf_addch(buf, hex[val & 0xf]);
+ if (!i)
+ strbuf_addch(buf, '/');
+ }
+}
+
+void sha1_file_name(struct repository *r, struct strbuf *buf, const unsigned char *sha1)
+{
+ strbuf_addstr(buf, r->objects->objectdir);
+ strbuf_addch(buf, '/');
+ fill_sha1_path(buf, sha1);
+}
+
+struct strbuf *alt_scratch_buf(struct alternate_object_database *alt)
+{
+ strbuf_setlen(&alt->scratch, alt->base_len);
+ return &alt->scratch;
+}
+
+static const char *alt_sha1_path(struct alternate_object_database *alt,
+ const unsigned char *sha1)
+{
+ struct strbuf *buf = alt_scratch_buf(alt);
+ fill_sha1_path(buf, sha1);
+ return buf->buf;
+}
+
+/*
+ * Return non-zero iff the path is usable as an alternate object database.
+ */
+static int alt_odb_usable(struct raw_object_store *o,
+ struct strbuf *path,
+ const char *normalized_objdir)
+{
+ struct alternate_object_database *alt;
+
+ /* Detect cases where alternate disappeared */
+ if (!is_directory(path->buf)) {
+ error("object directory %s does not exist; "
+ "check .git/objects/info/alternates.",
+ path->buf);
+ return 0;
+ }
+
+ /*
+ * Prevent the common mistake of listing the same
+ * thing twice, or object directory itself.
+ */
+ for (alt = o->alt_odb_list; alt; alt = alt->next) {
+ if (!fspathcmp(path->buf, alt->path))
+ return 0;
+ }
+ if (!fspathcmp(path->buf, normalized_objdir))
+ return 0;
+
+ return 1;
+}
+
+/*
+ * Prepare alternate object database registry.
+ *
+ * The variable alt_odb_list points at the list of struct
+ * alternate_object_database. The elements on this list come from
+ * non-empty elements from colon separated ALTERNATE_DB_ENVIRONMENT
+ * environment variable, and $GIT_OBJECT_DIRECTORY/info/alternates,
+ * whose contents is similar to that environment variable but can be
+ * LF separated. Its base points at a statically allocated buffer that
+ * contains "/the/directory/corresponding/to/.git/objects/...", while
+ * its name points just after the slash at the end of ".git/objects/"
+ * in the example above, and has enough space to hold 40-byte hex
+ * SHA1, an extra slash for the first level indirection, and the
+ * terminating NUL.
+ */
+static void read_info_alternates(struct repository *r,
+ const char *relative_base,
+ int depth);
+static int link_alt_odb_entry(struct repository *r, const char *entry,
+ const char *relative_base, int depth, const char *normalized_objdir)
+{
+ struct alternate_object_database *ent;
+ struct strbuf pathbuf = STRBUF_INIT;
+
+ if (!is_absolute_path(entry) && relative_base) {
+ strbuf_realpath(&pathbuf, relative_base, 1);
+ strbuf_addch(&pathbuf, '/');
+ }
+ strbuf_addstr(&pathbuf, entry);
+
+ if (strbuf_normalize_path(&pathbuf) < 0 && relative_base) {
+ error("unable to normalize alternate object path: %s",
+ pathbuf.buf);
+ strbuf_release(&pathbuf);
+ return -1;
+ }
+
+ /*
+ * The trailing slash after the directory name is given by
+ * this function at the end. Remove duplicates.
+ */
+ while (pathbuf.len && pathbuf.buf[pathbuf.len - 1] == '/')
+ strbuf_setlen(&pathbuf, pathbuf.len - 1);
+
+ if (!alt_odb_usable(r->objects, &pathbuf, normalized_objdir)) {
+ strbuf_release(&pathbuf);
+ return -1;
+ }
+
+ ent = alloc_alt_odb(pathbuf.buf);
+
+ /* add the alternate entry */
+ *r->objects->alt_odb_tail = ent;
+ r->objects->alt_odb_tail = &(ent->next);
+ ent->next = NULL;
+
+ /* recursively add alternates */
+ read_info_alternates(r, pathbuf.buf, depth + 1);
+
+ strbuf_release(&pathbuf);
+ return 0;
+}
+
+static const char *parse_alt_odb_entry(const char *string,
+ int sep,
+ struct strbuf *out)
+{
+ const char *end;
+
+ strbuf_reset(out);
+
+ if (*string == '#') {
+ /* comment; consume up to next separator */
+ end = strchrnul(string, sep);
+ } else if (*string == '"' && !unquote_c_style(out, string, &end)) {
+ /*
+ * quoted path; unquote_c_style has copied the
+ * data for us and set "end". Broken quoting (e.g.,
+ * an entry that doesn't end with a quote) falls
+ * back to the unquoted case below.
+ */
+ } else {
+ /* normal, unquoted path */
+ end = strchrnul(string, sep);
+ strbuf_add(out, string, end - string);
+ }
+
+ if (*end)
+ end++;
+ return end;
+}
+
+static void link_alt_odb_entries(struct repository *r, const char *alt,
+ int sep, const char *relative_base, int depth)
+{
+ struct strbuf objdirbuf = STRBUF_INIT;
+ struct strbuf entry = STRBUF_INIT;
+
+ if (!alt || !*alt)
+ return;
+
+ if (depth > 5) {
+ error("%s: ignoring alternate object stores, nesting too deep.",
+ relative_base);
+ return;
+ }
+
+ strbuf_add_absolute_path(&objdirbuf, r->objects->objectdir);
+ if (strbuf_normalize_path(&objdirbuf) < 0)
+ die("unable to normalize object directory: %s",
+ objdirbuf.buf);
+
+ while (*alt) {
+ alt = parse_alt_odb_entry(alt, sep, &entry);
+ if (!entry.len)
+ continue;
+ link_alt_odb_entry(r, entry.buf,
+ relative_base, depth, objdirbuf.buf);
+ }
+ strbuf_release(&entry);
+ strbuf_release(&objdirbuf);
+}
+
+static void read_info_alternates(struct repository *r,
+ const char *relative_base,
+ int depth)
+{
+ char *path;
+ struct strbuf buf = STRBUF_INIT;
+
+ path = xstrfmt("%s/info/alternates", relative_base);
+ if (strbuf_read_file(&buf, path, 1024) < 0) {
+ warn_on_fopen_errors(path);
+ free(path);
+ return;
+ }
+
+ link_alt_odb_entries(r, buf.buf, '\n', relative_base, depth);
+ strbuf_release(&buf);
+ free(path);
+}
+
+struct alternate_object_database *alloc_alt_odb(const char *dir)
+{
+ struct alternate_object_database *ent;
+
+ FLEX_ALLOC_STR(ent, path, dir);
+ strbuf_init(&ent->scratch, 0);
+ strbuf_addf(&ent->scratch, "%s/", dir);
+ ent->base_len = ent->scratch.len;
+
+ return ent;
+}
+
+void add_to_alternates_file(const char *reference)
+{
+ struct lock_file lock = LOCK_INIT;
+ char *alts = git_pathdup("objects/info/alternates");
+ FILE *in, *out;
+ int found = 0;
+
+ hold_lock_file_for_update(&lock, alts, LOCK_DIE_ON_ERROR);
+ out = fdopen_lock_file(&lock, "w");
+ if (!out)
+ die_errno("unable to fdopen alternates lockfile");
+
+ in = fopen(alts, "r");
+ if (in) {
+ struct strbuf line = STRBUF_INIT;
+
+ while (strbuf_getline(&line, in) != EOF) {
+ if (!strcmp(reference, line.buf)) {
+ found = 1;
+ break;
+ }
+ fprintf_or_die(out, "%s\n", line.buf);
+ }
+
+ strbuf_release(&line);
+ fclose(in);
+ }
+ else if (errno != ENOENT)
+ die_errno("unable to read alternates file");
+
+ if (found) {
+ rollback_lock_file(&lock);
+ } else {
+ fprintf_or_die(out, "%s\n", reference);
+ if (commit_lock_file(&lock))
+ die_errno("unable to move new alternates file into place");
+ if (the_repository->objects->alt_odb_tail)
+ link_alt_odb_entries(the_repository, reference,
+ '\n', NULL, 0);
+ }
+ free(alts);
+}
+
+void add_to_alternates_memory(const char *reference)
+{
+ /*
+ * Make sure alternates are initialized, or else our entry may be
+ * overwritten when they are.
+ */
+ prepare_alt_odb(the_repository);
+
+ link_alt_odb_entries(the_repository, reference,
+ '\n', NULL, 0);
+}
+
+/*
+ * Compute the exact path an alternate is at and returns it. In case of
+ * error NULL is returned and the human readable error is added to `err`
+ * `path` may be relative and should point to $GITDIR.
+ * `err` must not be null.
+ */
+char *compute_alternate_path(const char *path, struct strbuf *err)
+{
+ char *ref_git = NULL;
+ const char *repo, *ref_git_s;
+ int seen_error = 0;
+
+ ref_git_s = real_path_if_valid(path);
+ if (!ref_git_s) {
+ seen_error = 1;
+ strbuf_addf(err, _("path '%s' does not exist"), path);
+ goto out;
+ } else
+ /*
+ * Beware: read_gitfile(), real_path() and mkpath()
+ * return static buffer
+ */
+ ref_git = xstrdup(ref_git_s);
+
+ repo = read_gitfile(ref_git);
+ if (!repo)
+ repo = read_gitfile(mkpath("%s/.git", ref_git));
+ if (repo) {
+ free(ref_git);
+ ref_git = xstrdup(repo);
+ }
+
+ if (!repo && is_directory(mkpath("%s/.git/objects", ref_git))) {
+ char *ref_git_git = mkpathdup("%s/.git", ref_git);
+ free(ref_git);
+ ref_git = ref_git_git;
+ } else if (!is_directory(mkpath("%s/objects", ref_git))) {
+ struct strbuf sb = STRBUF_INIT;
+ seen_error = 1;
+ if (get_common_dir(&sb, ref_git)) {
+ strbuf_addf(err,
+ _("reference repository '%s' as a linked "
+ "checkout is not supported yet."),
+ path);
+ goto out;
+ }
+
+ strbuf_addf(err, _("reference repository '%s' is not a "
+ "local repository."), path);
+ goto out;
+ }
+
+ if (!access(mkpath("%s/shallow", ref_git), F_OK)) {
+ strbuf_addf(err, _("reference repository '%s' is shallow"),
+ path);
+ seen_error = 1;
+ goto out;
+ }
+
+ if (!access(mkpath("%s/info/grafts", ref_git), F_OK)) {
+ strbuf_addf(err,
+ _("reference repository '%s' is grafted"),
+ path);
+ seen_error = 1;
+ goto out;
+ }
+
+out:
+ if (seen_error) {
+ FREE_AND_NULL(ref_git);
+ }
+
+ return ref_git;
+}
+
+int foreach_alt_odb(alt_odb_fn fn, void *cb)
+{
+ struct alternate_object_database *ent;
+ int r = 0;
+
+ prepare_alt_odb(the_repository);
+ for (ent = the_repository->objects->alt_odb_list; ent; ent = ent->next) {
+ r = fn(ent, cb);
+ if (r)
+ break;
+ }
+ return r;
+}
+
+void prepare_alt_odb(struct repository *r)
+{
+ if (r->objects->alt_odb_tail)
+ return;
+
+ r->objects->alt_odb_tail = &r->objects->alt_odb_list;
+ link_alt_odb_entries(r, r->objects->alternate_db, PATH_SEP, NULL, 0);
+
+ read_info_alternates(r, r->objects->objectdir, 0);
+}
+
+/* Returns 1 if we have successfully freshened the file, 0 otherwise. */
+static int freshen_file(const char *fn)
+{
+ struct utimbuf t;
+ t.actime = t.modtime = time(NULL);
+ return !utime(fn, &t);
+}
+
+/*
+ * All of the check_and_freshen functions return 1 if the file exists and was
+ * freshened (if freshening was requested), 0 otherwise. If they return
+ * 0, you should not assume that it is safe to skip a write of the object (it
+ * either does not exist on disk, or has a stale mtime and may be subject to
+ * pruning).
+ */
+int check_and_freshen_file(const char *fn, int freshen)
+{
+ if (access(fn, F_OK))
+ return 0;
+ if (freshen && !freshen_file(fn))
+ return 0;
+ return 1;
+}
+
- sha1_file_name(the_repository, &buf, sha1);
++static int check_and_freshen_local(const struct object_id *oid, int freshen)
+{
+ static struct strbuf buf = STRBUF_INIT;
+
+ strbuf_reset(&buf);
- static int check_and_freshen_nonlocal(const unsigned char *sha1, int freshen)
++ sha1_file_name(the_repository, &buf, oid->hash);
+
+ return check_and_freshen_file(buf.buf, freshen);
+}
+
- const char *path = alt_sha1_path(alt, sha1);
++static int check_and_freshen_nonlocal(const struct object_id *oid, int freshen)
+{
+ struct alternate_object_database *alt;
+ prepare_alt_odb(the_repository);
+ for (alt = the_repository->objects->alt_odb_list; alt; alt = alt->next) {
- static int check_and_freshen(const unsigned char *sha1, int freshen)
++ const char *path = alt_sha1_path(alt, oid->hash);
+ if (check_and_freshen_file(path, freshen))
+ return 1;
+ }
+ return 0;
+}
+
- return check_and_freshen_local(sha1, freshen) ||
- check_and_freshen_nonlocal(sha1, freshen);
++static int check_and_freshen(const struct object_id *oid, int freshen)
+{
- int has_loose_object_nonlocal(const unsigned char *sha1)
++ return check_and_freshen_local(oid, freshen) ||
++ check_and_freshen_nonlocal(oid, freshen);
+}
+
- return check_and_freshen_nonlocal(sha1, 0);
++int has_loose_object_nonlocal(const struct object_id *oid)
+{
- static int has_loose_object(const unsigned char *sha1)
++ return check_and_freshen_nonlocal(oid, 0);
+}
+
- return check_and_freshen(sha1, 0);
++static int has_loose_object(const struct object_id *oid)
+{
- struct cached_object *co = find_cached_object(real->hash);
++ return check_and_freshen(oid, 0);
+}
+
+static void mmap_limit_check(size_t length)
+{
+ static size_t limit = 0;
+ if (!limit) {
+ limit = git_env_ulong("GIT_MMAP_LIMIT", 0);
+ if (!limit)
+ limit = SIZE_MAX;
+ }
+ if (length > limit)
+ die("attempting to mmap %"PRIuMAX" over limit %"PRIuMAX,
+ (uintmax_t)length, (uintmax_t)limit);
+}
+
+void *xmmap_gently(void *start, size_t length,
+ int prot, int flags, int fd, off_t offset)
+{
+ void *ret;
+
+ mmap_limit_check(length);
+ ret = mmap(start, length, prot, flags, fd, offset);
+ if (ret == MAP_FAILED) {
+ if (!length)
+ return NULL;
+ release_pack_memory(length);
+ ret = mmap(start, length, prot, flags, fd, offset);
+ }
+ return ret;
+}
+
+void *xmmap(void *start, size_t length,
+ int prot, int flags, int fd, off_t offset)
+{
+ void *ret = xmmap_gently(start, length, prot, flags, fd, offset);
+ if (ret == MAP_FAILED)
+ die_errno("mmap failed");
+ return ret;
+}
+
+/*
+ * With an in-core object data in "map", rehash it to make sure the
+ * object name actually matches "sha1" to detect object corruption.
+ * With "map" == NULL, try reading the object named with "sha1" using
+ * the streaming interface and rehash it to do the same.
+ */
+int check_object_signature(const struct object_id *oid, void *map,
+ unsigned long size, const char *type)
+{
+ struct object_id real_oid;
+ enum object_type obj_type;
+ struct git_istream *st;
+ git_hash_ctx c;
+ char hdr[MAX_HEADER_LEN];
+ int hdrlen;
+
+ if (map) {
+ hash_object_file(map, size, type, &real_oid);
+ return oidcmp(oid, &real_oid) ? -1 : 0;
+ }
+
+ st = open_istream(oid, &obj_type, &size, NULL);
+ if (!st)
+ return -1;
+
+ /* Generate the header */
+ hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", type_name(obj_type), size) + 1;
+
+ /* Sha1.. */
+ the_hash_algo->init_fn(&c);
+ the_hash_algo->update_fn(&c, hdr, hdrlen);
+ for (;;) {
+ char buf[1024 * 16];
+ ssize_t readlen = read_istream(st, buf, sizeof(buf));
+
+ if (readlen < 0) {
+ close_istream(st);
+ return -1;
+ }
+ if (!readlen)
+ break;
+ the_hash_algo->update_fn(&c, buf, readlen);
+ }
+ the_hash_algo->final_fn(real_oid.hash, &c);
+ close_istream(st);
+ return oidcmp(oid, &real_oid) ? -1 : 0;
+}
+
+int git_open_cloexec(const char *name, int flags)
+{
+ int fd;
+ static int o_cloexec = O_CLOEXEC;
+
+ fd = open(name, flags | o_cloexec);
+ if ((o_cloexec & O_CLOEXEC) && fd < 0 && errno == EINVAL) {
+ /* Try again w/o O_CLOEXEC: the kernel might not support it */
+ o_cloexec &= ~O_CLOEXEC;
+ fd = open(name, flags | o_cloexec);
+ }
+
+#if defined(F_GETFD) && defined(F_SETFD) && defined(FD_CLOEXEC)
+ {
+ static int fd_cloexec = FD_CLOEXEC;
+
+ if (!o_cloexec && 0 <= fd && fd_cloexec) {
+ /* Opened w/o O_CLOEXEC? try with fcntl(2) to add it */
+ int flags = fcntl(fd, F_GETFD);
+ if (fcntl(fd, F_SETFD, flags | fd_cloexec))
+ fd_cloexec = 0;
+ }
+ }
+#endif
+ return fd;
+}
+
+/*
+ * Find "sha1" as a loose object in the local repository or in an alternate.
+ * Returns 0 on success, negative on failure.
+ *
+ * The "path" out-parameter will give the path of the object we found (if any).
+ * Note that it may point to static storage and is only valid until another
+ * call to sha1_file_name(), etc.
+ */
+static int stat_sha1_file(struct repository *r, const unsigned char *sha1,
+ struct stat *st, const char **path)
+{
+ struct alternate_object_database *alt;
+ static struct strbuf buf = STRBUF_INIT;
+
+ strbuf_reset(&buf);
+ sha1_file_name(r, &buf, sha1);
+ *path = buf.buf;
+
+ if (!lstat(*path, st))
+ return 0;
+
+ prepare_alt_odb(r);
+ errno = ENOENT;
+ for (alt = r->objects->alt_odb_list; alt; alt = alt->next) {
+ *path = alt_sha1_path(alt, sha1);
+ if (!lstat(*path, st))
+ return 0;
+ }
+
+ return -1;
+}
+
+/*
+ * Like stat_sha1_file(), but actually open the object and return the
+ * descriptor. See the caveats on the "path" parameter above.
+ */
+static int open_sha1_file(struct repository *r,
+ const unsigned char *sha1, const char **path)
+{
+ int fd;
+ struct alternate_object_database *alt;
+ int most_interesting_errno;
+ static struct strbuf buf = STRBUF_INIT;
+
+ strbuf_reset(&buf);
+ sha1_file_name(r, &buf, sha1);
+ *path = buf.buf;
+
+ fd = git_open(*path);
+ if (fd >= 0)
+ return fd;
+ most_interesting_errno = errno;
+
+ prepare_alt_odb(r);
+ for (alt = r->objects->alt_odb_list; alt; alt = alt->next) {
+ *path = alt_sha1_path(alt, sha1);
+ fd = git_open(*path);
+ if (fd >= 0)
+ return fd;
+ if (most_interesting_errno == ENOENT)
+ most_interesting_errno = errno;
+ }
+ errno = most_interesting_errno;
+ return -1;
+}
+
+/*
+ * Map the loose object at "path" if it is not NULL, or the path found by
+ * searching for a loose object named "sha1".
+ */
+static void *map_sha1_file_1(struct repository *r, const char *path,
+ const unsigned char *sha1, unsigned long *size)
+{
+ void *map;
+ int fd;
+
+ if (path)
+ fd = git_open(path);
+ else
+ fd = open_sha1_file(r, sha1, &path);
+ map = NULL;
+ if (fd >= 0) {
+ struct stat st;
+
+ if (!fstat(fd, &st)) {
+ *size = xsize_t(st.st_size);
+ if (!*size) {
+ /* mmap() is forbidden on empty files */
+ error("object file %s is empty", path);
+ return NULL;
+ }
+ map = xmmap(NULL, *size, PROT_READ, MAP_PRIVATE, fd, 0);
+ }
+ close(fd);
+ }
+ return map;
+}
+
+void *map_sha1_file(struct repository *r,
+ const unsigned char *sha1, unsigned long *size)
+{
+ return map_sha1_file_1(r, NULL, sha1, size);
+}
+
+static int unpack_sha1_short_header(git_zstream *stream,
+ unsigned char *map, unsigned long mapsize,
+ void *buffer, unsigned long bufsiz)
+{
+ /* Get the data stream */
+ memset(stream, 0, sizeof(*stream));
+ stream->next_in = map;
+ stream->avail_in = mapsize;
+ stream->next_out = buffer;
+ stream->avail_out = bufsiz;
+
+ git_inflate_init(stream);
+ return git_inflate(stream, 0);
+}
+
+int unpack_sha1_header(git_zstream *stream,
+ unsigned char *map, unsigned long mapsize,
+ void *buffer, unsigned long bufsiz)
+{
+ int status = unpack_sha1_short_header(stream, map, mapsize,
+ buffer, bufsiz);
+
+ if (status < Z_OK)
+ return status;
+
+ /* Make sure we have the terminating NUL */
+ if (!memchr(buffer, '\0', stream->next_out - (unsigned char *)buffer))
+ return -1;
+ return 0;
+}
+
+static int unpack_sha1_header_to_strbuf(git_zstream *stream, unsigned char *map,
+ unsigned long mapsize, void *buffer,
+ unsigned long bufsiz, struct strbuf *header)
+{
+ int status;
+
+ status = unpack_sha1_short_header(stream, map, mapsize, buffer, bufsiz);
+ if (status < Z_OK)
+ return -1;
+
+ /*
+ * Check if entire header is unpacked in the first iteration.
+ */
+ if (memchr(buffer, '\0', stream->next_out - (unsigned char *)buffer))
+ return 0;
+
+ /*
+ * buffer[0..bufsiz] was not large enough. Copy the partial
+ * result out to header, and then append the result of further
+ * reading the stream.
+ */
+ strbuf_add(header, buffer, stream->next_out - (unsigned char *)buffer);
+ stream->next_out = buffer;
+ stream->avail_out = bufsiz;
+
+ do {
+ status = git_inflate(stream, 0);
+ strbuf_add(header, buffer, stream->next_out - (unsigned char *)buffer);
+ if (memchr(buffer, '\0', stream->next_out - (unsigned char *)buffer))
+ return 0;
+ stream->next_out = buffer;
+ stream->avail_out = bufsiz;
+ } while (status != Z_STREAM_END);
+ return -1;
+}
+
+static void *unpack_sha1_rest(git_zstream *stream, void *buffer, unsigned long size, const unsigned char *sha1)
+{
+ int bytes = strlen(buffer) + 1;
+ unsigned char *buf = xmallocz(size);
+ unsigned long n;
+ int status = Z_OK;
+
+ n = stream->total_out - bytes;
+ if (n > size)
+ n = size;
+ memcpy(buf, (char *) buffer + bytes, n);
+ bytes = n;
+ if (bytes <= size) {
+ /*
+ * The above condition must be (bytes <= size), not
+ * (bytes < size). In other words, even though we
+ * expect no more output and set avail_out to zero,
+ * the input zlib stream may have bytes that express
+ * "this concludes the stream", and we *do* want to
+ * eat that input.
+ *
+ * Otherwise we would not be able to test that we
+ * consumed all the input to reach the expected size;
+ * we also want to check that zlib tells us that all
+ * went well with status == Z_STREAM_END at the end.
+ */
+ stream->next_out = buf + bytes;
+ stream->avail_out = size - bytes;
+ while (status == Z_OK)
+ status = git_inflate(stream, Z_FINISH);
+ }
+ if (status == Z_STREAM_END && !stream->avail_in) {
+ git_inflate_end(stream);
+ return buf;
+ }
+
+ if (status < 0)
+ error("corrupt loose object '%s'", sha1_to_hex(sha1));
+ else if (stream->avail_in)
+ error("garbage at end of loose object '%s'",
+ sha1_to_hex(sha1));
+ free(buf);
+ return NULL;
+}
+
+/*
+ * We used to just use "sscanf()", but that's actually way
+ * too permissive for what we want to check. So do an anal
+ * object header parse by hand.
+ */
+static int parse_sha1_header_extended(const char *hdr, struct object_info *oi,
+ unsigned int flags)
+{
+ const char *type_buf = hdr;
+ unsigned long size;
+ int type, type_len = 0;
+
+ /*
+ * The type can be of any size but is followed by
+ * a space.
+ */
+ for (;;) {
+ char c = *hdr++;
+ if (!c)
+ return -1;
+ if (c == ' ')
+ break;
+ type_len++;
+ }
+
+ type = type_from_string_gently(type_buf, type_len, 1);
+ if (oi->type_name)
+ strbuf_add(oi->type_name, type_buf, type_len);
+ /*
+ * Set type to 0 if its an unknown object and
+ * we're obtaining the type using '--allow-unknown-type'
+ * option.
+ */
+ if ((flags & OBJECT_INFO_ALLOW_UNKNOWN_TYPE) && (type < 0))
+ type = 0;
+ else if (type < 0)
+ die("invalid object type");
+ if (oi->typep)
+ *oi->typep = type;
+
+ /*
+ * The length must follow immediately, and be in canonical
+ * decimal format (ie "010" is not valid).
+ */
+ size = *hdr++ - '0';
+ if (size > 9)
+ return -1;
+ if (size) {
+ for (;;) {
+ unsigned long c = *hdr - '0';
+ if (c > 9)
+ break;
+ hdr++;
+ size = size * 10 + c;
+ }
+ }
+
+ if (oi->sizep)
+ *oi->sizep = size;
+
+ /*
+ * The length must be followed by a zero byte
+ */
+ return *hdr ? -1 : type;
+}
+
+int parse_sha1_header(const char *hdr, unsigned long *sizep)
+{
+ struct object_info oi = OBJECT_INFO_INIT;
+
+ oi.sizep = sizep;
+ return parse_sha1_header_extended(hdr, &oi, 0);
+}
+
+static int sha1_loose_object_info(struct repository *r,
+ const unsigned char *sha1,
+ struct object_info *oi, int flags)
+{
+ int status = 0;
+ unsigned long mapsize;
+ void *map;
+ git_zstream stream;
+ char hdr[MAX_HEADER_LEN];
+ struct strbuf hdrbuf = STRBUF_INIT;
+ unsigned long size_scratch;
+
+ if (oi->delta_base_sha1)
+ hashclr(oi->delta_base_sha1);
+
+ /*
+ * If we don't care about type or size, then we don't
+ * need to look inside the object at all. Note that we
+ * do not optimize out the stat call, even if the
+ * caller doesn't care about the disk-size, since our
+ * return value implicitly indicates whether the
+ * object even exists.
+ */
+ if (!oi->typep && !oi->type_name && !oi->sizep && !oi->contentp) {
+ const char *path;
+ struct stat st;
+ if (stat_sha1_file(r, sha1, &st, &path) < 0)
+ return -1;
+ if (oi->disk_sizep)
+ *oi->disk_sizep = st.st_size;
+ return 0;
+ }
+
+ map = map_sha1_file(r, sha1, &mapsize);
+ if (!map)
+ return -1;
+
+ if (!oi->sizep)
+ oi->sizep = &size_scratch;
+
+ if (oi->disk_sizep)
+ *oi->disk_sizep = mapsize;
+ if ((flags & OBJECT_INFO_ALLOW_UNKNOWN_TYPE)) {
+ if (unpack_sha1_header_to_strbuf(&stream, map, mapsize, hdr, sizeof(hdr), &hdrbuf) < 0)
+ status = error("unable to unpack %s header with --allow-unknown-type",
+ sha1_to_hex(sha1));
+ } else if (unpack_sha1_header(&stream, map, mapsize, hdr, sizeof(hdr)) < 0)
+ status = error("unable to unpack %s header",
+ sha1_to_hex(sha1));
+ if (status < 0)
+ ; /* Do nothing */
+ else if (hdrbuf.len) {
+ if ((status = parse_sha1_header_extended(hdrbuf.buf, oi, flags)) < 0)
+ status = error("unable to parse %s header with --allow-unknown-type",
+ sha1_to_hex(sha1));
+ } else if ((status = parse_sha1_header_extended(hdr, oi, flags)) < 0)
+ status = error("unable to parse %s header", sha1_to_hex(sha1));
+
+ if (status >= 0 && oi->contentp) {
+ *oi->contentp = unpack_sha1_rest(&stream, hdr,
+ *oi->sizep, sha1);
+ if (!*oi->contentp) {
+ git_inflate_end(&stream);
+ status = -1;
+ }
+ } else
+ git_inflate_end(&stream);
+
+ munmap(map, mapsize);
+ if (status && oi->typep)
+ *oi->typep = status;
+ if (oi->sizep == &size_scratch)
+ oi->sizep = NULL;
+ strbuf_release(&hdrbuf);
+ oi->whence = OI_LOOSE;
+ return (status < 0) ? status : 0;
+}
+
+int fetch_if_missing = 1;
+
+int oid_object_info_extended(struct repository *r, const struct object_id *oid,
+ struct object_info *oi, unsigned flags)
+{
+ static struct object_info blank_oi = OBJECT_INFO_INIT;
+ struct pack_entry e;
+ int rtype;
+ const struct object_id *real = oid;
+ int already_retried = 0;
+
+ if (flags & OBJECT_INFO_LOOKUP_REPLACE)
+ real = lookup_replace_object(r, oid);
+
+ if (is_null_oid(real))
+ return -1;
+
+ if (!oi)
+ oi = &blank_oi;
+
+ if (!(flags & OBJECT_INFO_SKIP_CACHED)) {
- if (find_pack_entry(r, real->hash, &e))
++ struct cached_object *co = find_cached_object(real);
+ if (co) {
+ if (oi->typep)
+ *(oi->typep) = co->type;
+ if (oi->sizep)
+ *(oi->sizep) = co->size;
+ if (oi->disk_sizep)
+ *(oi->disk_sizep) = 0;
+ if (oi->delta_base_sha1)
+ hashclr(oi->delta_base_sha1);
+ if (oi->type_name)
+ strbuf_addstr(oi->type_name, type_name(co->type));
+ if (oi->contentp)
+ *oi->contentp = xmemdupz(co->buf, co->size);
+ oi->whence = OI_CACHED;
+ return 0;
+ }
+ }
+
+ while (1) {
- if (find_pack_entry(r, real->hash, &e))
++ if (find_pack_entry(r, real, &e))
+ break;
+
+ if (flags & OBJECT_INFO_IGNORE_LOOSE)
+ return -1;
+
+ /* Most likely it's a loose object. */
+ if (!sha1_loose_object_info(r, real->hash, oi, flags))
+ return 0;
+
+ /* Not a loose object; someone else may have just packed it. */
+ if (!(flags & OBJECT_INFO_QUICK)) {
+ reprepare_packed_git(r);
- if (has_sha1_file(oid->hash) || find_cached_object(oid->hash))
++ if (find_pack_entry(r, real, &e))
+ break;
+ }
+
+ /* Check if it is a missing object */
+ if (fetch_if_missing && repository_format_partial_clone &&
+ !already_retried && r == the_repository) {
+ /*
+ * TODO Investigate having fetch_object() return
+ * TODO error/success and stopping the music here.
+ * TODO Pass a repository struct through fetch_object,
+ * such that arbitrary repositories work.
+ */
+ fetch_object(repository_format_partial_clone, real->hash);
+ already_retried = 1;
+ continue;
+ }
+
+ return -1;
+ }
+
+ if (oi == &blank_oi)
+ /*
+ * We know that the caller doesn't actually need the
+ * information below, so return early.
+ */
+ return 0;
+ rtype = packed_object_info(r, e.p, e.offset, oi);
+ if (rtype < 0) {
+ mark_bad_packed_object(e.p, real->hash);
+ return oid_object_info_extended(r, real, oi, 0);
+ } else if (oi->whence == OI_PACKED) {
+ oi->u.packed.offset = e.offset;
+ oi->u.packed.pack = e.p;
+ oi->u.packed.is_delta = (rtype == OBJ_REF_DELTA ||
+ rtype == OBJ_OFS_DELTA);
+ }
+
+ return 0;
+}
+
+/* returns enum object_type or negative */
+int oid_object_info(struct repository *r,
+ const struct object_id *oid,
+ unsigned long *sizep)
+{
+ enum object_type type;
+ struct object_info oi = OBJECT_INFO_INIT;
+
+ oi.typep = &type;
+ oi.sizep = sizep;
+ if (oid_object_info_extended(r, oid, &oi,
+ OBJECT_INFO_LOOKUP_REPLACE) < 0)
+ return -1;
+ return type;
+}
+
+static void *read_object(const unsigned char *sha1, enum object_type *type,
+ unsigned long *size)
+{
+ struct object_id oid;
+ struct object_info oi = OBJECT_INFO_INIT;
+ void *content;
+ oi.typep = type;
+ oi.sizep = size;
+ oi.contentp = &content;
+
+ hashcpy(oid.hash, sha1);
+
+ if (oid_object_info_extended(the_repository, &oid, &oi, 0) < 0)
+ return NULL;
+ return content;
+}
+
+int pretend_object_file(void *buf, unsigned long len, enum object_type type,
+ struct object_id *oid)
+{
+ struct cached_object *co;
+
+ hash_object_file(buf, len, type_name(type), oid);
- hashcpy(co->sha1, oid->hash);
++ if (has_sha1_file(oid->hash) || find_cached_object(oid))
+ return 0;
+ ALLOC_GROW(cached_objects, cached_object_nr + 1, cached_object_alloc);
+ co = &cached_objects[cached_object_nr++];
+ co->size = len;
+ co->type = type;
+ co->buf = xmalloc(len);
+ memcpy(co->buf, buf, len);
- static int freshen_loose_object(const unsigned char *sha1)
++ oidcpy(&co->oid, oid);
+ return 0;
+}
+
+/*
+ * This function dies on corrupt objects; the callers who want to
+ * deal with them should arrange to call read_object() and give error
+ * messages themselves.
+ */
+void *read_object_file_extended(const struct object_id *oid,
+ enum object_type *type,
+ unsigned long *size,
+ int lookup_replace)
+{
+ void *data;
+ const struct packed_git *p;
+ const char *path;
+ struct stat st;
+ const struct object_id *repl = lookup_replace ?
+ lookup_replace_object(the_repository, oid) : oid;
+
+ errno = 0;
+ data = read_object(repl->hash, type, size);
+ if (data)
+ return data;
+
+ if (errno && errno != ENOENT)
+ die_errno("failed to read object %s", oid_to_hex(oid));
+
+ /* die if we replaced an object with one that does not exist */
+ if (repl != oid)
+ die("replacement %s not found for %s",
+ oid_to_hex(repl), oid_to_hex(oid));
+
+ if (!stat_sha1_file(the_repository, repl->hash, &st, &path))
+ die("loose object %s (stored in %s) is corrupt",
+ oid_to_hex(repl), path);
+
+ if ((p = has_packed_and_bad(repl->hash)) != NULL)
+ die("packed object %s (stored in %s) is corrupt",
+ oid_to_hex(repl), p->pack_name);
+
+ return NULL;
+}
+
+void *read_object_with_reference(const struct object_id *oid,
+ const char *required_type_name,
+ unsigned long *size,
+ struct object_id *actual_oid_return)
+{
+ enum object_type type, required_type;
+ void *buffer;
+ unsigned long isize;
+ struct object_id actual_oid;
+
+ required_type = type_from_string(required_type_name);
+ oidcpy(&actual_oid, oid);
+ while (1) {
+ int ref_length = -1;
+ const char *ref_type = NULL;
+
+ buffer = read_object_file(&actual_oid, &type, &isize);
+ if (!buffer)
+ return NULL;
+ if (type == required_type) {
+ *size = isize;
+ if (actual_oid_return)
+ oidcpy(actual_oid_return, &actual_oid);
+ return buffer;
+ }
+ /* Handle references */
+ else if (type == OBJ_COMMIT)
+ ref_type = "tree ";
+ else if (type == OBJ_TAG)
+ ref_type = "object ";
+ else {
+ free(buffer);
+ return NULL;
+ }
+ ref_length = strlen(ref_type);
+
+ if (ref_length + GIT_SHA1_HEXSZ > isize ||
+ memcmp(buffer, ref_type, ref_length) ||
+ get_oid_hex((char *) buffer + ref_length, &actual_oid)) {
+ free(buffer);
+ return NULL;
+ }
+ free(buffer);
+ /* Now we have the ID of the referred-to object in
+ * actual_oid. Check again. */
+ }
+}
+
+static void write_object_file_prepare(const void *buf, unsigned long len,
+ const char *type, struct object_id *oid,
+ char *hdr, int *hdrlen)
+{
+ git_hash_ctx c;
+
+ /* Generate the header */
+ *hdrlen = xsnprintf(hdr, *hdrlen, "%s %lu", type, len)+1;
+
+ /* Sha1.. */
+ the_hash_algo->init_fn(&c);
+ the_hash_algo->update_fn(&c, hdr, *hdrlen);
+ the_hash_algo->update_fn(&c, buf, len);
+ the_hash_algo->final_fn(oid->hash, &c);
+}
+
+/*
+ * Move the just written object into its final resting place.
+ */
+int finalize_object_file(const char *tmpfile, const char *filename)
+{
+ int ret = 0;
+
+ if (object_creation_mode == OBJECT_CREATION_USES_RENAMES)
+ goto try_rename;
+ else if (link(tmpfile, filename))
+ ret = errno;
+
+ /*
+ * Coda hack - coda doesn't like cross-directory links,
+ * so we fall back to a rename, which will mean that it
+ * won't be able to check collisions, but that's not a
+ * big deal.
+ *
+ * The same holds for FAT formatted media.
+ *
+ * When this succeeds, we just return. We have nothing
+ * left to unlink.
+ */
+ if (ret && ret != EEXIST) {
+ try_rename:
+ if (!rename(tmpfile, filename))
+ goto out;
+ ret = errno;
+ }
+ unlink_or_warn(tmpfile);
+ if (ret) {
+ if (ret != EEXIST) {
+ return error_errno("unable to write sha1 filename %s", filename);
+ }
+ /* FIXME!!! Collision check here ? */
+ }
+
+out:
+ if (adjust_shared_perm(filename))
+ return error("unable to set permission to '%s'", filename);
+ return 0;
+}
+
+static int write_buffer(int fd, const void *buf, size_t len)
+{
+ if (write_in_full(fd, buf, len) < 0)
+ return error_errno("file write error");
+ return 0;
+}
+
+int hash_object_file(const void *buf, unsigned long len, const char *type,
+ struct object_id *oid)
+{
+ char hdr[MAX_HEADER_LEN];
+ int hdrlen = sizeof(hdr);
+ write_object_file_prepare(buf, len, type, oid, hdr, &hdrlen);
+ return 0;
+}
+
+/* Finalize a file on disk, and close it. */
+static void close_sha1_file(int fd)
+{
+ if (fsync_object_files)
+ fsync_or_die(fd, "sha1 file");
+ if (close(fd) != 0)
+ die_errno("error when closing sha1 file");
+}
+
+/* Size of directory component, including the ending '/' */
+static inline int directory_size(const char *filename)
+{
+ const char *s = strrchr(filename, '/');
+ if (!s)
+ return 0;
+ return s - filename + 1;
+}
+
+/*
+ * This creates a temporary file in the same directory as the final
+ * 'filename'
+ *
+ * We want to avoid cross-directory filename renames, because those
+ * can have problems on various filesystems (FAT, NFS, Coda).
+ */
+static int create_tmpfile(struct strbuf *tmp, const char *filename)
+{
+ int fd, dirlen = directory_size(filename);
+
+ strbuf_reset(tmp);
+ strbuf_add(tmp, filename, dirlen);
+ strbuf_addstr(tmp, "tmp_obj_XXXXXX");
+ fd = git_mkstemp_mode(tmp->buf, 0444);
+ if (fd < 0 && dirlen && errno == ENOENT) {
+ /*
+ * Make sure the directory exists; note that the contents
+ * of the buffer are undefined after mkstemp returns an
+ * error, so we have to rewrite the whole buffer from
+ * scratch.
+ */
+ strbuf_reset(tmp);
+ strbuf_add(tmp, filename, dirlen - 1);
+ if (mkdir(tmp->buf, 0777) && errno != EEXIST)
+ return -1;
+ if (adjust_shared_perm(tmp->buf))
+ return -1;
+
+ /* Try again */
+ strbuf_addstr(tmp, "/tmp_obj_XXXXXX");
+ fd = git_mkstemp_mode(tmp->buf, 0444);
+ }
+ return fd;
+}
+
+static int write_loose_object(const struct object_id *oid, char *hdr,
+ int hdrlen, const void *buf, unsigned long len,
+ time_t mtime)
+{
+ int fd, ret;
+ unsigned char compressed[4096];
+ git_zstream stream;
+ git_hash_ctx c;
+ struct object_id parano_oid;
+ static struct strbuf tmp_file = STRBUF_INIT;
+ static struct strbuf filename = STRBUF_INIT;
+
+ strbuf_reset(&filename);
+ sha1_file_name(the_repository, &filename, oid->hash);
+
+ fd = create_tmpfile(&tmp_file, filename.buf);
+ if (fd < 0) {
+ if (errno == EACCES)
+ return error("insufficient permission for adding an object to repository database %s", get_object_directory());
+ else
+ return error_errno("unable to create temporary file");
+ }
+
+ /* Set it up */
+ git_deflate_init(&stream, zlib_compression_level);
+ stream.next_out = compressed;
+ stream.avail_out = sizeof(compressed);
+ the_hash_algo->init_fn(&c);
+
+ /* First header.. */
+ stream.next_in = (unsigned char *)hdr;
+ stream.avail_in = hdrlen;
+ while (git_deflate(&stream, 0) == Z_OK)
+ ; /* nothing */
+ the_hash_algo->update_fn(&c, hdr, hdrlen);
+
+ /* Then the data itself.. */
+ stream.next_in = (void *)buf;
+ stream.avail_in = len;
+ do {
+ unsigned char *in0 = stream.next_in;
+ ret = git_deflate(&stream, Z_FINISH);
+ the_hash_algo->update_fn(&c, in0, stream.next_in - in0);
+ if (write_buffer(fd, compressed, stream.next_out - compressed) < 0)
+ die("unable to write sha1 file");
+ stream.next_out = compressed;
+ stream.avail_out = sizeof(compressed);
+ } while (ret == Z_OK);
+
+ if (ret != Z_STREAM_END)
+ die("unable to deflate new object %s (%d)", oid_to_hex(oid),
+ ret);
+ ret = git_deflate_end_gently(&stream);
+ if (ret != Z_OK)
+ die("deflateEnd on object %s failed (%d)", oid_to_hex(oid),
+ ret);
+ the_hash_algo->final_fn(parano_oid.hash, &c);
+ if (oidcmp(oid, ¶no_oid) != 0)
+ die("confused by unstable object source data for %s",
+ oid_to_hex(oid));
+
+ close_sha1_file(fd);
+
+ if (mtime) {
+ struct utimbuf utb;
+ utb.actime = mtime;
+ utb.modtime = mtime;
+ if (utime(tmp_file.buf, &utb) < 0)
+ warning_errno("failed utime() on %s", tmp_file.buf);
+ }
+
+ return finalize_object_file(tmp_file.buf, filename.buf);
+}
+
- return check_and_freshen(sha1, 1);
++static int freshen_loose_object(const struct object_id *oid)
+{
- static int freshen_packed_object(const unsigned char *sha1)
++ return check_and_freshen(oid, 1);
+}
+
- if (!find_pack_entry(the_repository, sha1, &e))
++static int freshen_packed_object(const struct object_id *oid)
+{
+ struct pack_entry e;
- if (freshen_packed_object(oid->hash) || freshen_loose_object(oid->hash))
++ if (!find_pack_entry(the_repository, oid, &e))
+ return 0;
+ if (e.p->freshened)
+ return 1;
+ if (!freshen_file(e.p->pack_name))
+ return 0;
+ e.p->freshened = 1;
+ return 1;
+}
+
+int write_object_file(const void *buf, unsigned long len, const char *type,
+ struct object_id *oid)
+{
+ char hdr[MAX_HEADER_LEN];
+ int hdrlen = sizeof(hdr);
+
+ /* Normally if we have it in the pack then we do not bother writing
+ * it out into .git/objects/??/?{38} file.
+ */
+ write_object_file_prepare(buf, len, type, oid, hdr, &hdrlen);
- if (freshen_packed_object(oid->hash) || freshen_loose_object(oid->hash))
++ if (freshen_packed_object(oid) || freshen_loose_object(oid))
+ return 0;
+ return write_loose_object(oid, hdr, hdrlen, buf, len, 0);
+}
+
+int hash_object_file_literally(const void *buf, unsigned long len,
+ const char *type, struct object_id *oid,
+ unsigned flags)
+{
+ char *header;
+ int hdrlen, status = 0;
+
+ /* type string, SP, %lu of the length plus NUL must fit this */
+ hdrlen = strlen(type) + MAX_HEADER_LEN;
+ header = xmalloc(hdrlen);
+ write_object_file_prepare(buf, len, type, oid, header, &hdrlen);
+
+ if (!(flags & HASH_WRITE_OBJECT))
+ goto cleanup;
- if (has_loose_object(oid->hash))
++ if (freshen_packed_object(oid) || freshen_loose_object(oid))
+ goto cleanup;
+ status = write_loose_object(oid, header, hdrlen, buf, len, 0);
+
+cleanup:
+ free(header);
+ return status;
+}
+
+int force_object_loose(const struct object_id *oid, time_t mtime)
+{
+ void *buf;
+ unsigned long len;
+ enum object_type type;
+ char hdr[MAX_HEADER_LEN];
+ int hdrlen;
+ int ret;
+
++ if (has_loose_object(oid))
+ return 0;
+ buf = read_object(oid->hash, &type, &len);
+ if (!buf)
+ return error("cannot read sha1_file for %s", oid_to_hex(oid));
+ hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", type_name(type), len) + 1;
+ ret = write_loose_object(oid, hdr, hdrlen, buf, len, mtime);
+ free(buf);
+
+ return ret;
+}
+
+int has_sha1_file_with_flags(const unsigned char *sha1, int flags)
+{
+ struct object_id oid;
+ if (!startup_info->have_repository)
+ return 0;
+ hashcpy(oid.hash, sha1);
+ return oid_object_info_extended(the_repository, &oid, NULL,
+ flags | OBJECT_INFO_SKIP_CACHED) >= 0;
+}
+
+int has_object_file(const struct object_id *oid)
+{
+ return has_sha1_file(oid->hash);
+}
+
+int has_object_file_with_flags(const struct object_id *oid, int flags)
+{
+ return has_sha1_file_with_flags(oid->hash, flags);
+}
+
+static void check_tree(const void *buf, size_t size)
+{
+ struct tree_desc desc;
+ struct name_entry entry;
+
+ init_tree_desc(&desc, buf, size);
+ while (tree_entry(&desc, &entry))
+ /* do nothing
+ * tree_entry() will die() on malformed entries */
+ ;
+}
+
+static void check_commit(const void *buf, size_t size)
+{
+ struct commit c;
+ memset(&c, 0, sizeof(c));
+ if (parse_commit_buffer(&c, buf, size))
+ die("corrupt commit");
+}
+
+static void check_tag(const void *buf, size_t size)
+{
+ struct tag t;
+ memset(&t, 0, sizeof(t));
+ if (parse_tag_buffer(&t, buf, size))
+ die("corrupt tag");
+}
+
+static int index_mem(struct object_id *oid, void *buf, size_t size,
+ enum object_type type,
+ const char *path, unsigned flags)
+{
+ int ret, re_allocated = 0;
+ int write_object = flags & HASH_WRITE_OBJECT;
+
+ if (!type)
+ type = OBJ_BLOB;
+
+ /*
+ * Convert blobs to git internal format
+ */
+ if ((type == OBJ_BLOB) && path) {
+ struct strbuf nbuf = STRBUF_INIT;
+ if (convert_to_git(&the_index, path, buf, size, &nbuf,
+ get_conv_flags(flags))) {
+ buf = strbuf_detach(&nbuf, &size);
+ re_allocated = 1;
+ }
+ }
+ if (flags & HASH_FORMAT_CHECK) {
+ if (type == OBJ_TREE)
+ check_tree(buf, size);
+ if (type == OBJ_COMMIT)
+ check_commit(buf, size);
+ if (type == OBJ_TAG)
+ check_tag(buf, size);
+ }
+
+ if (write_object)
+ ret = write_object_file(buf, size, type_name(type), oid);
+ else
+ ret = hash_object_file(buf, size, type_name(type), oid);
+ if (re_allocated)
+ free(buf);
+ return ret;
+}
+
+static int index_stream_convert_blob(struct object_id *oid, int fd,
+ const char *path, unsigned flags)
+{
+ int ret;
+ const int write_object = flags & HASH_WRITE_OBJECT;
+ struct strbuf sbuf = STRBUF_INIT;
+
+ assert(path);
+ assert(would_convert_to_git_filter_fd(path));
+
+ convert_to_git_filter_fd(&the_index, path, fd, &sbuf,
+ get_conv_flags(flags));
+
+ if (write_object)
+ ret = write_object_file(sbuf.buf, sbuf.len, type_name(OBJ_BLOB),
+ oid);
+ else
+ ret = hash_object_file(sbuf.buf, sbuf.len, type_name(OBJ_BLOB),
+ oid);
+ strbuf_release(&sbuf);
+ return ret;
+}
+
+static int index_pipe(struct object_id *oid, int fd, enum object_type type,
+ const char *path, unsigned flags)
+{
+ struct strbuf sbuf = STRBUF_INIT;
+ int ret;
+
+ if (strbuf_read(&sbuf, fd, 4096) >= 0)
+ ret = index_mem(oid, sbuf.buf, sbuf.len, type, path, flags);
+ else
+ ret = -1;
+ strbuf_release(&sbuf);
+ return ret;
+}
+
+#define SMALL_FILE_SIZE (32*1024)
+
+static int index_core(struct object_id *oid, int fd, size_t size,
+ enum object_type type, const char *path,
+ unsigned flags)
+{
+ int ret;
+
+ if (!size) {
+ ret = index_mem(oid, "", size, type, path, flags);
+ } else if (size <= SMALL_FILE_SIZE) {
+ char *buf = xmalloc(size);
+ ssize_t read_result = read_in_full(fd, buf, size);
+ if (read_result < 0)
+ ret = error_errno("read error while indexing %s",
+ path ? path : "<unknown>");
+ else if (read_result != size)
+ ret = error("short read while indexing %s",
+ path ? path : "<unknown>");
+ else
+ ret = index_mem(oid, buf, size, type, path, flags);
+ free(buf);
+ } else {
+ void *buf = xmmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0);
+ ret = index_mem(oid, buf, size, type, path, flags);
+ munmap(buf, size);
+ }
+ return ret;
+}
+
+/*
+ * This creates one packfile per large blob unless bulk-checkin
+ * machinery is "plugged".
+ *
+ * This also bypasses the usual "convert-to-git" dance, and that is on
+ * purpose. We could write a streaming version of the converting
+ * functions and insert that before feeding the data to fast-import
+ * (or equivalent in-core API described above). However, that is
+ * somewhat complicated, as we do not know the size of the filter
+ * result, which we need to know beforehand when writing a git object.
+ * Since the primary motivation for trying to stream from the working
+ * tree file and to avoid mmaping it in core is to deal with large
+ * binary blobs, they generally do not want to get any conversion, and
+ * callers should avoid this code path when filters are requested.
+ */
+static int index_stream(struct object_id *oid, int fd, size_t size,
+ enum object_type type, const char *path,
+ unsigned flags)
+{
+ return index_bulk_checkin(oid, fd, size, type, path, flags);
+}
+
+int index_fd(struct object_id *oid, int fd, struct stat *st,
+ enum object_type type, const char *path, unsigned flags)
+{
+ int ret;
+
+ /*
+ * Call xsize_t() only when needed to avoid potentially unnecessary
+ * die() for large files.
+ */
+ if (type == OBJ_BLOB && path && would_convert_to_git_filter_fd(path))
+ ret = index_stream_convert_blob(oid, fd, path, flags);
+ else if (!S_ISREG(st->st_mode))
+ ret = index_pipe(oid, fd, type, path, flags);
+ else if (st->st_size <= big_file_threshold || type != OBJ_BLOB ||
+ (path && would_convert_to_git(&the_index, path)))
+ ret = index_core(oid, fd, xsize_t(st->st_size), type, path,
+ flags);
+ else
+ ret = index_stream(oid, fd, xsize_t(st->st_size), type, path,
+ flags);
+ close(fd);
+ return ret;
+}
+
+int index_path(struct object_id *oid, const char *path, struct stat *st, unsigned flags)
+{
+ int fd;
+ struct strbuf sb = STRBUF_INIT;
+ int rc = 0;
+
+ switch (st->st_mode & S_IFMT) {
+ case S_IFREG:
+ fd = open(path, O_RDONLY);
+ if (fd < 0)
+ return error_errno("open(\"%s\")", path);
+ if (index_fd(oid, fd, st, OBJ_BLOB, path, flags) < 0)
+ return error("%s: failed to insert into database",
+ path);
+ break;
+ case S_IFLNK:
+ if (strbuf_readlink(&sb, path, st->st_size))
+ return error_errno("readlink(\"%s\")", path);
+ if (!(flags & HASH_WRITE_OBJECT))
+ hash_object_file(sb.buf, sb.len, blob_type, oid);
+ else if (write_object_file(sb.buf, sb.len, blob_type, oid))
+ rc = error("%s: failed to insert into database", path);
+ strbuf_release(&sb);
+ break;
+ case S_IFDIR:
+ return resolve_gitlink_ref(path, "HEAD", oid);
+ default:
+ return error("%s: unsupported file type", path);
+ }
+ return rc;
+}
+
+int read_pack_header(int fd, struct pack_header *header)
+{
+ if (read_in_full(fd, header, sizeof(*header)) != sizeof(*header))
+ /* "eof before pack header was fully read" */
+ return PH_ERROR_EOF;
+
+ if (header->hdr_signature != htonl(PACK_SIGNATURE))
+ /* "protocol error (pack signature mismatch detected)" */
+ return PH_ERROR_PACK_SIGNATURE;
+ if (!pack_version_ok(header->hdr_version))
+ /* "protocol error (pack version unsupported)" */
+ return PH_ERROR_PROTOCOL;
+ return 0;
+}
+
+void assert_oid_type(const struct object_id *oid, enum object_type expect)
+{
+ enum object_type type = oid_object_info(the_repository, oid, NULL);
+ if (type < 0)
+ die("%s is not a valid object", oid_to_hex(oid));
+ if (type != expect)
+ die("%s is not a valid '%s' object", oid_to_hex(oid),
+ type_name(expect));
+}
+
+int for_each_file_in_obj_subdir(unsigned int subdir_nr,
+ struct strbuf *path,
+ each_loose_object_fn obj_cb,
+ each_loose_cruft_fn cruft_cb,
+ each_loose_subdir_fn subdir_cb,
+ void *data)
+{
+ size_t origlen, baselen;
+ DIR *dir;
+ struct dirent *de;
+ int r = 0;
+ struct object_id oid;
+
+ if (subdir_nr > 0xff)
+ BUG("invalid loose object subdirectory: %x", subdir_nr);
+
+ origlen = path->len;
+ strbuf_complete(path, '/');
+ strbuf_addf(path, "%02x", subdir_nr);
+
+ dir = opendir(path->buf);
+ if (!dir) {
+ if (errno != ENOENT)
+ r = error_errno("unable to open %s", path->buf);
+ strbuf_setlen(path, origlen);
+ return r;
+ }
+
+ oid.hash[0] = subdir_nr;
+ strbuf_addch(path, '/');
+ baselen = path->len;
+
+ while ((de = readdir(dir))) {
+ size_t namelen;
+ if (is_dot_or_dotdot(de->d_name))
+ continue;
+
+ namelen = strlen(de->d_name);
+ strbuf_setlen(path, baselen);
+ strbuf_add(path, de->d_name, namelen);
+ if (namelen == GIT_SHA1_HEXSZ - 2 &&
+ !hex_to_bytes(oid.hash + 1, de->d_name,
+ GIT_SHA1_RAWSZ - 1)) {
+ if (obj_cb) {
+ r = obj_cb(&oid, path->buf, data);
+ if (r)
+ break;
+ }
+ continue;
+ }
+
+ if (cruft_cb) {
+ r = cruft_cb(de->d_name, path->buf, data);
+ if (r)
+ break;
+ }
+ }
+ closedir(dir);
+
+ strbuf_setlen(path, baselen - 1);
+ if (!r && subdir_cb)
+ r = subdir_cb(subdir_nr, path->buf, data);
+
+ strbuf_setlen(path, origlen);
+
+ return r;
+}
+
+int for_each_loose_file_in_objdir_buf(struct strbuf *path,
+ each_loose_object_fn obj_cb,
+ each_loose_cruft_fn cruft_cb,
+ each_loose_subdir_fn subdir_cb,
+ void *data)
+{
+ int r = 0;
+ int i;
+
+ for (i = 0; i < 256; i++) {
+ r = for_each_file_in_obj_subdir(i, path, obj_cb, cruft_cb,
+ subdir_cb, data);
+ if (r)
+ break;
+ }
+
+ return r;
+}
+
+int for_each_loose_file_in_objdir(const char *path,
+ each_loose_object_fn obj_cb,
+ each_loose_cruft_fn cruft_cb,
+ each_loose_subdir_fn subdir_cb,
+ void *data)
+{
+ struct strbuf buf = STRBUF_INIT;
+ int r;
+
+ strbuf_addstr(&buf, path);
+ r = for_each_loose_file_in_objdir_buf(&buf, obj_cb, cruft_cb,
+ subdir_cb, data);
+ strbuf_release(&buf);
+
+ return r;
+}
+
+struct loose_alt_odb_data {
+ each_loose_object_fn *cb;
+ void *data;
+};
+
+static int loose_from_alt_odb(struct alternate_object_database *alt,
+ void *vdata)
+{
+ struct loose_alt_odb_data *data = vdata;
+ struct strbuf buf = STRBUF_INIT;
+ int r;
+
+ strbuf_addstr(&buf, alt->path);
+ r = for_each_loose_file_in_objdir_buf(&buf,
+ data->cb, NULL, NULL,
+ data->data);
+ strbuf_release(&buf);
+ return r;
+}
+
+int for_each_loose_object(each_loose_object_fn cb, void *data, unsigned flags)
+{
+ struct loose_alt_odb_data alt;
+ int r;
+
+ r = for_each_loose_file_in_objdir(get_object_directory(),
+ cb, NULL, NULL, data);
+ if (r)
+ return r;
+
+ if (flags & FOR_EACH_OBJECT_LOCAL_ONLY)
+ return 0;
+
+ alt.cb = cb;
+ alt.data = data;
+ return foreach_alt_odb(loose_from_alt_odb, &alt);
+}
+
+static int check_stream_sha1(git_zstream *stream,
+ const char *hdr,
+ unsigned long size,
+ const char *path,
+ const unsigned char *expected_sha1)
+{
+ git_hash_ctx c;
+ unsigned char real_sha1[GIT_MAX_RAWSZ];
+ unsigned char buf[4096];
+ unsigned long total_read;
+ int status = Z_OK;
+
+ the_hash_algo->init_fn(&c);
+ the_hash_algo->update_fn(&c, hdr, stream->total_out);
+
+ /*
+ * We already read some bytes into hdr, but the ones up to the NUL
+ * do not count against the object's content size.
+ */
+ total_read = stream->total_out - strlen(hdr) - 1;
+
+ /*
+ * This size comparison must be "<=" to read the final zlib packets;
+ * see the comment in unpack_sha1_rest for details.
+ */
+ while (total_read <= size &&
+ (status == Z_OK || status == Z_BUF_ERROR)) {
+ stream->next_out = buf;
+ stream->avail_out = sizeof(buf);
+ if (size - total_read < stream->avail_out)
+ stream->avail_out = size - total_read;
+ status = git_inflate(stream, Z_FINISH);
+ the_hash_algo->update_fn(&c, buf, stream->next_out - buf);
+ total_read += stream->next_out - buf;
+ }
+ git_inflate_end(stream);
+
+ if (status != Z_STREAM_END) {
+ error("corrupt loose object '%s'", sha1_to_hex(expected_sha1));
+ return -1;
+ }
+ if (stream->avail_in) {
+ error("garbage at end of loose object '%s'",
+ sha1_to_hex(expected_sha1));
+ return -1;
+ }
+
+ the_hash_algo->final_fn(real_sha1, &c);
+ if (hashcmp(expected_sha1, real_sha1)) {
+ error("sha1 mismatch for %s (expected %s)", path,
+ sha1_to_hex(expected_sha1));
+ return -1;
+ }
+
+ return 0;
+}
+
+int read_loose_object(const char *path,
+ const struct object_id *expected_oid,
+ enum object_type *type,
+ unsigned long *size,
+ void **contents)
+{
+ int ret = -1;
+ void *map = NULL;
+ unsigned long mapsize;
+ git_zstream stream;
+ char hdr[MAX_HEADER_LEN];
+
+ *contents = NULL;
+
+ map = map_sha1_file_1(the_repository, path, NULL, &mapsize);
+ if (!map) {
+ error_errno("unable to mmap %s", path);
+ goto out;
+ }
+
+ if (unpack_sha1_header(&stream, map, mapsize, hdr, sizeof(hdr)) < 0) {
+ error("unable to unpack header of %s", path);
+ goto out;
+ }
+
+ *type = parse_sha1_header(hdr, size);
+ if (*type < 0) {
+ error("unable to parse header of %s", path);
+ git_inflate_end(&stream);
+ goto out;
+ }
+
+ if (*type == OBJ_BLOB && *size > big_file_threshold) {
+ if (check_stream_sha1(&stream, hdr, *size, path, expected_oid->hash) < 0)
+ goto out;
+ } else {
+ *contents = unpack_sha1_rest(&stream, hdr, *size, expected_oid->hash);
+ if (!*contents) {
+ error("unable to unpack contents of %s", path);
+ git_inflate_end(&stream);
+ goto out;
+ }
+ if (check_object_signature(expected_oid, *contents,
+ *size, type_name(*type))) {
+ error("sha1 mismatch for %s (expected %s)", path,
+ oid_to_hex(expected_oid));
+ free(*contents);
+ goto out;
+ }
+ }
+
+ ret = 0; /* everything checks out */
+
+out:
+ if (map)
+ munmap(map, mapsize);
+ return ret;
+}
--- /dev/null
- ret = get_tree_entry_follow_symlinks(tree_oid.hash,
- filename, oid->hash, &oc->symlink_path,
+#include "cache.h"
+#include "config.h"
+#include "tag.h"
+#include "commit.h"
+#include "tree.h"
+#include "blob.h"
+#include "tree-walk.h"
+#include "refs.h"
+#include "remote.h"
+#include "dir.h"
+#include "sha1-array.h"
+#include "packfile.h"
+#include "object-store.h"
+#include "repository.h"
+
+static int get_oid_oneline(const char *, struct object_id *, struct commit_list *);
+
+typedef int (*disambiguate_hint_fn)(const struct object_id *, void *);
+
+struct disambiguate_state {
+ int len; /* length of prefix in hex chars */
+ char hex_pfx[GIT_MAX_HEXSZ + 1];
+ struct object_id bin_pfx;
+
+ disambiguate_hint_fn fn;
+ void *cb_data;
+ struct object_id candidate;
+ unsigned candidate_exists:1;
+ unsigned candidate_checked:1;
+ unsigned candidate_ok:1;
+ unsigned disambiguate_fn_used:1;
+ unsigned ambiguous:1;
+ unsigned always_call_fn:1;
+};
+
+static void update_candidates(struct disambiguate_state *ds, const struct object_id *current)
+{
+ if (ds->always_call_fn) {
+ ds->ambiguous = ds->fn(current, ds->cb_data) ? 1 : 0;
+ return;
+ }
+ if (!ds->candidate_exists) {
+ /* this is the first candidate */
+ oidcpy(&ds->candidate, current);
+ ds->candidate_exists = 1;
+ return;
+ } else if (!oidcmp(&ds->candidate, current)) {
+ /* the same as what we already have seen */
+ return;
+ }
+
+ if (!ds->fn) {
+ /* cannot disambiguate between ds->candidate and current */
+ ds->ambiguous = 1;
+ return;
+ }
+
+ if (!ds->candidate_checked) {
+ ds->candidate_ok = ds->fn(&ds->candidate, ds->cb_data);
+ ds->disambiguate_fn_used = 1;
+ ds->candidate_checked = 1;
+ }
+
+ if (!ds->candidate_ok) {
+ /* discard the candidate; we know it does not satisfy fn */
+ oidcpy(&ds->candidate, current);
+ ds->candidate_checked = 0;
+ return;
+ }
+
+ /* if we reach this point, we know ds->candidate satisfies fn */
+ if (ds->fn(current, ds->cb_data)) {
+ /*
+ * if both current and candidate satisfy fn, we cannot
+ * disambiguate.
+ */
+ ds->candidate_ok = 0;
+ ds->ambiguous = 1;
+ }
+
+ /* otherwise, current can be discarded and candidate is still good */
+}
+
+static int append_loose_object(const struct object_id *oid, const char *path,
+ void *data)
+{
+ oid_array_append(data, oid);
+ return 0;
+}
+
+static int match_sha(unsigned, const unsigned char *, const unsigned char *);
+
+static void find_short_object_filename(struct disambiguate_state *ds)
+{
+ int subdir_nr = ds->bin_pfx.hash[0];
+ struct alternate_object_database *alt;
+ static struct alternate_object_database *fakeent;
+
+ if (!fakeent) {
+ /*
+ * Create a "fake" alternate object database that
+ * points to our own object database, to make it
+ * easier to get a temporary working space in
+ * alt->name/alt->base while iterating over the
+ * object databases including our own.
+ */
+ fakeent = alloc_alt_odb(get_object_directory());
+ }
+ fakeent->next = the_repository->objects->alt_odb_list;
+
+ for (alt = fakeent; alt && !ds->ambiguous; alt = alt->next) {
+ int pos;
+
+ if (!alt->loose_objects_subdir_seen[subdir_nr]) {
+ struct strbuf *buf = alt_scratch_buf(alt);
+ for_each_file_in_obj_subdir(subdir_nr, buf,
+ append_loose_object,
+ NULL, NULL,
+ &alt->loose_objects_cache);
+ alt->loose_objects_subdir_seen[subdir_nr] = 1;
+ }
+
+ pos = oid_array_lookup(&alt->loose_objects_cache, &ds->bin_pfx);
+ if (pos < 0)
+ pos = -1 - pos;
+ while (!ds->ambiguous && pos < alt->loose_objects_cache.nr) {
+ const struct object_id *oid;
+ oid = alt->loose_objects_cache.oid + pos;
+ if (!match_sha(ds->len, ds->bin_pfx.hash, oid->hash))
+ break;
+ update_candidates(ds, oid);
+ pos++;
+ }
+ }
+}
+
+static int match_sha(unsigned len, const unsigned char *a, const unsigned char *b)
+{
+ do {
+ if (*a != *b)
+ return 0;
+ a++;
+ b++;
+ len -= 2;
+ } while (len > 1);
+ if (len)
+ if ((*a ^ *b) & 0xf0)
+ return 0;
+ return 1;
+}
+
+static void unique_in_pack(struct packed_git *p,
+ struct disambiguate_state *ds)
+{
+ uint32_t num, i, first = 0;
+ const struct object_id *current = NULL;
+
+ if (open_pack_index(p) || !p->num_objects)
+ return;
+
+ num = p->num_objects;
+ bsearch_pack(&ds->bin_pfx, p, &first);
+
+ /*
+ * At this point, "first" is the location of the lowest object
+ * with an object name that could match "bin_pfx". See if we have
+ * 0, 1 or more objects that actually match(es).
+ */
+ for (i = first; i < num && !ds->ambiguous; i++) {
+ struct object_id oid;
+ current = nth_packed_object_oid(&oid, p, i);
+ if (!match_sha(ds->len, ds->bin_pfx.hash, current->hash))
+ break;
+ update_candidates(ds, current);
+ }
+}
+
+static void find_short_packed_object(struct disambiguate_state *ds)
+{
+ struct packed_git *p;
+
+ for (p = get_packed_git(the_repository); p && !ds->ambiguous;
+ p = p->next)
+ unique_in_pack(p, ds);
+}
+
+#define SHORT_NAME_NOT_FOUND (-1)
+#define SHORT_NAME_AMBIGUOUS (-2)
+
+static int finish_object_disambiguation(struct disambiguate_state *ds,
+ struct object_id *oid)
+{
+ if (ds->ambiguous)
+ return SHORT_NAME_AMBIGUOUS;
+
+ if (!ds->candidate_exists)
+ return SHORT_NAME_NOT_FOUND;
+
+ if (!ds->candidate_checked)
+ /*
+ * If this is the only candidate, there is no point
+ * calling the disambiguation hint callback.
+ *
+ * On the other hand, if the current candidate
+ * replaced an earlier candidate that did _not_ pass
+ * the disambiguation hint callback, then we do have
+ * more than one objects that match the short name
+ * given, so we should make sure this one matches;
+ * otherwise, if we discovered this one and the one
+ * that we previously discarded in the reverse order,
+ * we would end up showing different results in the
+ * same repository!
+ */
+ ds->candidate_ok = (!ds->disambiguate_fn_used ||
+ ds->fn(&ds->candidate, ds->cb_data));
+
+ if (!ds->candidate_ok)
+ return SHORT_NAME_AMBIGUOUS;
+
+ oidcpy(oid, &ds->candidate);
+ return 0;
+}
+
+static int disambiguate_commit_only(const struct object_id *oid, void *cb_data_unused)
+{
+ int kind = oid_object_info(the_repository, oid, NULL);
+ return kind == OBJ_COMMIT;
+}
+
+static int disambiguate_committish_only(const struct object_id *oid, void *cb_data_unused)
+{
+ struct object *obj;
+ int kind;
+
+ kind = oid_object_info(the_repository, oid, NULL);
+ if (kind == OBJ_COMMIT)
+ return 1;
+ if (kind != OBJ_TAG)
+ return 0;
+
+ /* We need to do this the hard way... */
+ obj = deref_tag(parse_object(oid), NULL, 0);
+ if (obj && obj->type == OBJ_COMMIT)
+ return 1;
+ return 0;
+}
+
+static int disambiguate_tree_only(const struct object_id *oid, void *cb_data_unused)
+{
+ int kind = oid_object_info(the_repository, oid, NULL);
+ return kind == OBJ_TREE;
+}
+
+static int disambiguate_treeish_only(const struct object_id *oid, void *cb_data_unused)
+{
+ struct object *obj;
+ int kind;
+
+ kind = oid_object_info(the_repository, oid, NULL);
+ if (kind == OBJ_TREE || kind == OBJ_COMMIT)
+ return 1;
+ if (kind != OBJ_TAG)
+ return 0;
+
+ /* We need to do this the hard way... */
+ obj = deref_tag(parse_object(oid), NULL, 0);
+ if (obj && (obj->type == OBJ_TREE || obj->type == OBJ_COMMIT))
+ return 1;
+ return 0;
+}
+
+static int disambiguate_blob_only(const struct object_id *oid, void *cb_data_unused)
+{
+ int kind = oid_object_info(the_repository, oid, NULL);
+ return kind == OBJ_BLOB;
+}
+
+static disambiguate_hint_fn default_disambiguate_hint;
+
+int set_disambiguate_hint_config(const char *var, const char *value)
+{
+ static const struct {
+ const char *name;
+ disambiguate_hint_fn fn;
+ } hints[] = {
+ { "none", NULL },
+ { "commit", disambiguate_commit_only },
+ { "committish", disambiguate_committish_only },
+ { "tree", disambiguate_tree_only },
+ { "treeish", disambiguate_treeish_only },
+ { "blob", disambiguate_blob_only }
+ };
+ int i;
+
+ if (!value)
+ return config_error_nonbool(var);
+
+ for (i = 0; i < ARRAY_SIZE(hints); i++) {
+ if (!strcasecmp(value, hints[i].name)) {
+ default_disambiguate_hint = hints[i].fn;
+ return 0;
+ }
+ }
+
+ return error("unknown hint type for '%s': %s", var, value);
+}
+
+static int init_object_disambiguation(const char *name, int len,
+ struct disambiguate_state *ds)
+{
+ int i;
+
+ if (len < MINIMUM_ABBREV || len > GIT_SHA1_HEXSZ)
+ return -1;
+
+ memset(ds, 0, sizeof(*ds));
+
+ for (i = 0; i < len ;i++) {
+ unsigned char c = name[i];
+ unsigned char val;
+ if (c >= '0' && c <= '9')
+ val = c - '0';
+ else if (c >= 'a' && c <= 'f')
+ val = c - 'a' + 10;
+ else if (c >= 'A' && c <='F') {
+ val = c - 'A' + 10;
+ c -= 'A' - 'a';
+ }
+ else
+ return -1;
+ ds->hex_pfx[i] = c;
+ if (!(i & 1))
+ val <<= 4;
+ ds->bin_pfx.hash[i >> 1] |= val;
+ }
+
+ ds->len = len;
+ ds->hex_pfx[len] = '\0';
+ prepare_alt_odb(the_repository);
+ return 0;
+}
+
+static int show_ambiguous_object(const struct object_id *oid, void *data)
+{
+ const struct disambiguate_state *ds = data;
+ struct strbuf desc = STRBUF_INIT;
+ int type;
+
+
+ if (ds->fn && !ds->fn(oid, ds->cb_data))
+ return 0;
+
+ type = oid_object_info(the_repository, oid, NULL);
+ if (type == OBJ_COMMIT) {
+ struct commit *commit = lookup_commit(oid);
+ if (commit) {
+ struct pretty_print_context pp = {0};
+ pp.date_mode.type = DATE_SHORT;
+ format_commit_message(commit, " %ad - %s", &desc, &pp);
+ }
+ } else if (type == OBJ_TAG) {
+ struct tag *tag = lookup_tag(oid);
+ if (!parse_tag(tag) && tag->tag)
+ strbuf_addf(&desc, " %s", tag->tag);
+ }
+
+ advise(" %s %s%s",
+ find_unique_abbrev(oid, DEFAULT_ABBREV),
+ type_name(type) ? type_name(type) : "unknown type",
+ desc.buf);
+
+ strbuf_release(&desc);
+ return 0;
+}
+
+static int get_short_oid(const char *name, int len, struct object_id *oid,
+ unsigned flags)
+{
+ int status;
+ struct disambiguate_state ds;
+ int quietly = !!(flags & GET_OID_QUIETLY);
+
+ if (init_object_disambiguation(name, len, &ds) < 0)
+ return -1;
+
+ if (HAS_MULTI_BITS(flags & GET_OID_DISAMBIGUATORS))
+ BUG("multiple get_short_oid disambiguator flags");
+
+ if (flags & GET_OID_COMMIT)
+ ds.fn = disambiguate_commit_only;
+ else if (flags & GET_OID_COMMITTISH)
+ ds.fn = disambiguate_committish_only;
+ else if (flags & GET_OID_TREE)
+ ds.fn = disambiguate_tree_only;
+ else if (flags & GET_OID_TREEISH)
+ ds.fn = disambiguate_treeish_only;
+ else if (flags & GET_OID_BLOB)
+ ds.fn = disambiguate_blob_only;
+ else
+ ds.fn = default_disambiguate_hint;
+
+ find_short_object_filename(&ds);
+ find_short_packed_object(&ds);
+ status = finish_object_disambiguation(&ds, oid);
+
+ if (!quietly && (status == SHORT_NAME_AMBIGUOUS)) {
+ error(_("short SHA1 %s is ambiguous"), ds.hex_pfx);
+
+ /*
+ * We may still have ambiguity if we simply saw a series of
+ * candidates that did not satisfy our hint function. In
+ * that case, we still want to show them, so disable the hint
+ * function entirely.
+ */
+ if (!ds.ambiguous)
+ ds.fn = NULL;
+
+ advise(_("The candidates are:"));
+ for_each_abbrev(ds.hex_pfx, show_ambiguous_object, &ds);
+ }
+
+ return status;
+}
+
+static int collect_ambiguous(const struct object_id *oid, void *data)
+{
+ oid_array_append(data, oid);
+ return 0;
+}
+
+int for_each_abbrev(const char *prefix, each_abbrev_fn fn, void *cb_data)
+{
+ struct oid_array collect = OID_ARRAY_INIT;
+ struct disambiguate_state ds;
+ int ret;
+
+ if (init_object_disambiguation(prefix, strlen(prefix), &ds) < 0)
+ return -1;
+
+ ds.always_call_fn = 1;
+ ds.fn = collect_ambiguous;
+ ds.cb_data = &collect;
+ find_short_object_filename(&ds);
+ find_short_packed_object(&ds);
+
+ ret = oid_array_for_each_unique(&collect, fn, cb_data);
+ oid_array_clear(&collect);
+ return ret;
+}
+
+/*
+ * Return the slot of the most-significant bit set in "val". There are various
+ * ways to do this quickly with fls() or __builtin_clzl(), but speed is
+ * probably not a big deal here.
+ */
+static unsigned msb(unsigned long val)
+{
+ unsigned r = 0;
+ while (val >>= 1)
+ r++;
+ return r;
+}
+
+struct min_abbrev_data {
+ unsigned int init_len;
+ unsigned int cur_len;
+ char *hex;
+ const struct object_id *oid;
+};
+
+static inline char get_hex_char_from_oid(const struct object_id *oid,
+ unsigned int pos)
+{
+ static const char hex[] = "0123456789abcdef";
+
+ if ((pos & 1) == 0)
+ return hex[oid->hash[pos >> 1] >> 4];
+ else
+ return hex[oid->hash[pos >> 1] & 0xf];
+}
+
+static int extend_abbrev_len(const struct object_id *oid, void *cb_data)
+{
+ struct min_abbrev_data *mad = cb_data;
+
+ unsigned int i = mad->init_len;
+ while (mad->hex[i] && mad->hex[i] == get_hex_char_from_oid(oid, i))
+ i++;
+
+ if (i < GIT_MAX_RAWSZ && i >= mad->cur_len)
+ mad->cur_len = i + 1;
+
+ return 0;
+}
+
+static void find_abbrev_len_for_pack(struct packed_git *p,
+ struct min_abbrev_data *mad)
+{
+ int match = 0;
+ uint32_t num, first = 0;
+ struct object_id oid;
+ const struct object_id *mad_oid;
+
+ if (open_pack_index(p) || !p->num_objects)
+ return;
+
+ num = p->num_objects;
+ mad_oid = mad->oid;
+ match = bsearch_pack(mad_oid, p, &first);
+
+ /*
+ * first is now the position in the packfile where we would insert
+ * mad->hash if it does not exist (or the position of mad->hash if
+ * it does exist). Hence, we consider a maximum of two objects
+ * nearby for the abbreviation length.
+ */
+ mad->init_len = 0;
+ if (!match) {
+ if (nth_packed_object_oid(&oid, p, first))
+ extend_abbrev_len(&oid, mad);
+ } else if (first < num - 1) {
+ if (nth_packed_object_oid(&oid, p, first + 1))
+ extend_abbrev_len(&oid, mad);
+ }
+ if (first > 0) {
+ if (nth_packed_object_oid(&oid, p, first - 1))
+ extend_abbrev_len(&oid, mad);
+ }
+ mad->init_len = mad->cur_len;
+}
+
+static void find_abbrev_len_packed(struct min_abbrev_data *mad)
+{
+ struct packed_git *p;
+
+ for (p = get_packed_git(the_repository); p; p = p->next)
+ find_abbrev_len_for_pack(p, mad);
+}
+
+int find_unique_abbrev_r(char *hex, const struct object_id *oid, int len)
+{
+ struct disambiguate_state ds;
+ struct min_abbrev_data mad;
+ struct object_id oid_ret;
+ if (len < 0) {
+ unsigned long count = approximate_object_count();
+ /*
+ * Add one because the MSB only tells us the highest bit set,
+ * not including the value of all the _other_ bits (so "15"
+ * is only one off of 2^4, but the MSB is the 3rd bit.
+ */
+ len = msb(count) + 1;
+ /*
+ * We now know we have on the order of 2^len objects, which
+ * expects a collision at 2^(len/2). But we also care about hex
+ * chars, not bits, and there are 4 bits per hex. So all
+ * together we need to divide by 2 and round up.
+ */
+ len = DIV_ROUND_UP(len, 2);
+ /*
+ * For very small repos, we stick with our regular fallback.
+ */
+ if (len < FALLBACK_DEFAULT_ABBREV)
+ len = FALLBACK_DEFAULT_ABBREV;
+ }
+
+ oid_to_hex_r(hex, oid);
+ if (len == GIT_SHA1_HEXSZ || !len)
+ return GIT_SHA1_HEXSZ;
+
+ mad.init_len = len;
+ mad.cur_len = len;
+ mad.hex = hex;
+ mad.oid = oid;
+
+ find_abbrev_len_packed(&mad);
+
+ if (init_object_disambiguation(hex, mad.cur_len, &ds) < 0)
+ return -1;
+
+ ds.fn = extend_abbrev_len;
+ ds.always_call_fn = 1;
+ ds.cb_data = (void *)&mad;
+
+ find_short_object_filename(&ds);
+ (void)finish_object_disambiguation(&ds, &oid_ret);
+
+ hex[mad.cur_len] = 0;
+ return mad.cur_len;
+}
+
+const char *find_unique_abbrev(const struct object_id *oid, int len)
+{
+ static int bufno;
+ static char hexbuffer[4][GIT_MAX_HEXSZ + 1];
+ char *hex = hexbuffer[bufno];
+ bufno = (bufno + 1) % ARRAY_SIZE(hexbuffer);
+ find_unique_abbrev_r(hex, oid, len);
+ return hex;
+}
+
+static int ambiguous_path(const char *path, int len)
+{
+ int slash = 1;
+ int cnt;
+
+ for (cnt = 0; cnt < len; cnt++) {
+ switch (*path++) {
+ case '\0':
+ break;
+ case '/':
+ if (slash)
+ break;
+ slash = 1;
+ continue;
+ case '.':
+ continue;
+ default:
+ slash = 0;
+ continue;
+ }
+ break;
+ }
+ return slash;
+}
+
+static inline int at_mark(const char *string, int len,
+ const char **suffix, int nr)
+{
+ int i;
+
+ for (i = 0; i < nr; i++) {
+ int suffix_len = strlen(suffix[i]);
+ if (suffix_len <= len
+ && !strncasecmp(string, suffix[i], suffix_len))
+ return suffix_len;
+ }
+ return 0;
+}
+
+static inline int upstream_mark(const char *string, int len)
+{
+ const char *suffix[] = { "@{upstream}", "@{u}" };
+ return at_mark(string, len, suffix, ARRAY_SIZE(suffix));
+}
+
+static inline int push_mark(const char *string, int len)
+{
+ const char *suffix[] = { "@{push}" };
+ return at_mark(string, len, suffix, ARRAY_SIZE(suffix));
+}
+
+static int get_oid_1(const char *name, int len, struct object_id *oid, unsigned lookup_flags);
+static int interpret_nth_prior_checkout(const char *name, int namelen, struct strbuf *buf);
+
+static int get_oid_basic(const char *str, int len, struct object_id *oid,
+ unsigned int flags)
+{
+ static const char *warn_msg = "refname '%.*s' is ambiguous.";
+ static const char *object_name_msg = N_(
+ "Git normally never creates a ref that ends with 40 hex characters\n"
+ "because it will be ignored when you just specify 40-hex. These refs\n"
+ "may be created by mistake. For example,\n"
+ "\n"
+ " git checkout -b $br $(git rev-parse ...)\n"
+ "\n"
+ "where \"$br\" is somehow empty and a 40-hex ref is created. Please\n"
+ "examine these refs and maybe delete them. Turn this message off by\n"
+ "running \"git config advice.objectNameWarning false\"");
+ struct object_id tmp_oid;
+ char *real_ref = NULL;
+ int refs_found = 0;
+ int at, reflog_len, nth_prior = 0;
+
+ if (len == GIT_SHA1_HEXSZ && !get_oid_hex(str, oid)) {
+ if (warn_ambiguous_refs && warn_on_object_refname_ambiguity) {
+ refs_found = dwim_ref(str, len, &tmp_oid, &real_ref);
+ if (refs_found > 0) {
+ warning(warn_msg, len, str);
+ if (advice_object_name_warning)
+ fprintf(stderr, "%s\n", _(object_name_msg));
+ }
+ free(real_ref);
+ }
+ return 0;
+ }
+
+ /* basic@{time or number or -number} format to query ref-log */
+ reflog_len = at = 0;
+ if (len && str[len-1] == '}') {
+ for (at = len-4; at >= 0; at--) {
+ if (str[at] == '@' && str[at+1] == '{') {
+ if (str[at+2] == '-') {
+ if (at != 0)
+ /* @{-N} not at start */
+ return -1;
+ nth_prior = 1;
+ continue;
+ }
+ if (!upstream_mark(str + at, len - at) &&
+ !push_mark(str + at, len - at)) {
+ reflog_len = (len-1) - (at+2);
+ len = at;
+ }
+ break;
+ }
+ }
+ }
+
+ /* Accept only unambiguous ref paths. */
+ if (len && ambiguous_path(str, len))
+ return -1;
+
+ if (nth_prior) {
+ struct strbuf buf = STRBUF_INIT;
+ int detached;
+
+ if (interpret_nth_prior_checkout(str, len, &buf) > 0) {
+ detached = (buf.len == GIT_SHA1_HEXSZ && !get_oid_hex(buf.buf, oid));
+ strbuf_release(&buf);
+ if (detached)
+ return 0;
+ }
+ }
+
+ if (!len && reflog_len)
+ /* allow "@{...}" to mean the current branch reflog */
+ refs_found = dwim_ref("HEAD", 4, oid, &real_ref);
+ else if (reflog_len)
+ refs_found = dwim_log(str, len, oid, &real_ref);
+ else
+ refs_found = dwim_ref(str, len, oid, &real_ref);
+
+ if (!refs_found)
+ return -1;
+
+ if (warn_ambiguous_refs && !(flags & GET_OID_QUIETLY) &&
+ (refs_found > 1 ||
+ !get_short_oid(str, len, &tmp_oid, GET_OID_QUIETLY)))
+ warning(warn_msg, len, str);
+
+ if (reflog_len) {
+ int nth, i;
+ timestamp_t at_time;
+ timestamp_t co_time;
+ int co_tz, co_cnt;
+
+ /* Is it asking for N-th entry, or approxidate? */
+ for (i = nth = 0; 0 <= nth && i < reflog_len; i++) {
+ char ch = str[at+2+i];
+ if ('0' <= ch && ch <= '9')
+ nth = nth * 10 + ch - '0';
+ else
+ nth = -1;
+ }
+ if (100000000 <= nth) {
+ at_time = nth;
+ nth = -1;
+ } else if (0 <= nth)
+ at_time = 0;
+ else {
+ int errors = 0;
+ char *tmp = xstrndup(str + at + 2, reflog_len);
+ at_time = approxidate_careful(tmp, &errors);
+ free(tmp);
+ if (errors) {
+ free(real_ref);
+ return -1;
+ }
+ }
+ if (read_ref_at(real_ref, flags, at_time, nth, oid, NULL,
+ &co_time, &co_tz, &co_cnt)) {
+ if (!len) {
+ if (starts_with(real_ref, "refs/heads/")) {
+ str = real_ref + 11;
+ len = strlen(real_ref + 11);
+ } else {
+ /* detached HEAD */
+ str = "HEAD";
+ len = 4;
+ }
+ }
+ if (at_time) {
+ if (!(flags & GET_OID_QUIETLY)) {
+ warning("Log for '%.*s' only goes "
+ "back to %s.", len, str,
+ show_date(co_time, co_tz, DATE_MODE(RFC2822)));
+ }
+ } else {
+ if (flags & GET_OID_QUIETLY) {
+ exit(128);
+ }
+ die("Log for '%.*s' only has %d entries.",
+ len, str, co_cnt);
+ }
+ }
+ }
+
+ free(real_ref);
+ return 0;
+}
+
+static int get_parent(const char *name, int len,
+ struct object_id *result, int idx)
+{
+ struct object_id oid;
+ int ret = get_oid_1(name, len, &oid, GET_OID_COMMITTISH);
+ struct commit *commit;
+ struct commit_list *p;
+
+ if (ret)
+ return ret;
+ commit = lookup_commit_reference(&oid);
+ if (parse_commit(commit))
+ return -1;
+ if (!idx) {
+ oidcpy(result, &commit->object.oid);
+ return 0;
+ }
+ p = commit->parents;
+ while (p) {
+ if (!--idx) {
+ oidcpy(result, &p->item->object.oid);
+ return 0;
+ }
+ p = p->next;
+ }
+ return -1;
+}
+
+static int get_nth_ancestor(const char *name, int len,
+ struct object_id *result, int generation)
+{
+ struct object_id oid;
+ struct commit *commit;
+ int ret;
+
+ ret = get_oid_1(name, len, &oid, GET_OID_COMMITTISH);
+ if (ret)
+ return ret;
+ commit = lookup_commit_reference(&oid);
+ if (!commit)
+ return -1;
+
+ while (generation--) {
+ if (parse_commit(commit) || !commit->parents)
+ return -1;
+ commit = commit->parents->item;
+ }
+ oidcpy(result, &commit->object.oid);
+ return 0;
+}
+
+struct object *peel_to_type(const char *name, int namelen,
+ struct object *o, enum object_type expected_type)
+{
+ if (name && !namelen)
+ namelen = strlen(name);
+ while (1) {
+ if (!o || (!o->parsed && !parse_object(&o->oid)))
+ return NULL;
+ if (expected_type == OBJ_ANY || o->type == expected_type)
+ return o;
+ if (o->type == OBJ_TAG)
+ o = ((struct tag*) o)->tagged;
+ else if (o->type == OBJ_COMMIT)
+ o = &(get_commit_tree(((struct commit *)o))->object);
+ else {
+ if (name)
+ error("%.*s: expected %s type, but the object "
+ "dereferences to %s type",
+ namelen, name, type_name(expected_type),
+ type_name(o->type));
+ return NULL;
+ }
+ }
+}
+
+static int peel_onion(const char *name, int len, struct object_id *oid,
+ unsigned lookup_flags)
+{
+ struct object_id outer;
+ const char *sp;
+ unsigned int expected_type = 0;
+ struct object *o;
+
+ /*
+ * "ref^{type}" dereferences ref repeatedly until you cannot
+ * dereference anymore, or you get an object of given type,
+ * whichever comes first. "ref^{}" means just dereference
+ * tags until you get a non-tag. "ref^0" is a shorthand for
+ * "ref^{commit}". "commit^{tree}" could be used to find the
+ * top-level tree of the given commit.
+ */
+ if (len < 4 || name[len-1] != '}')
+ return -1;
+
+ for (sp = name + len - 1; name <= sp; sp--) {
+ int ch = *sp;
+ if (ch == '{' && name < sp && sp[-1] == '^')
+ break;
+ }
+ if (sp <= name)
+ return -1;
+
+ sp++; /* beginning of type name, or closing brace for empty */
+ if (starts_with(sp, "commit}"))
+ expected_type = OBJ_COMMIT;
+ else if (starts_with(sp, "tag}"))
+ expected_type = OBJ_TAG;
+ else if (starts_with(sp, "tree}"))
+ expected_type = OBJ_TREE;
+ else if (starts_with(sp, "blob}"))
+ expected_type = OBJ_BLOB;
+ else if (starts_with(sp, "object}"))
+ expected_type = OBJ_ANY;
+ else if (sp[0] == '}')
+ expected_type = OBJ_NONE;
+ else if (sp[0] == '/')
+ expected_type = OBJ_COMMIT;
+ else
+ return -1;
+
+ lookup_flags &= ~GET_OID_DISAMBIGUATORS;
+ if (expected_type == OBJ_COMMIT)
+ lookup_flags |= GET_OID_COMMITTISH;
+ else if (expected_type == OBJ_TREE)
+ lookup_flags |= GET_OID_TREEISH;
+
+ if (get_oid_1(name, sp - name - 2, &outer, lookup_flags))
+ return -1;
+
+ o = parse_object(&outer);
+ if (!o)
+ return -1;
+ if (!expected_type) {
+ o = deref_tag(o, name, sp - name - 2);
+ if (!o || (!o->parsed && !parse_object(&o->oid)))
+ return -1;
+ oidcpy(oid, &o->oid);
+ return 0;
+ }
+
+ /*
+ * At this point, the syntax look correct, so
+ * if we do not get the needed object, we should
+ * barf.
+ */
+ o = peel_to_type(name, len, o, expected_type);
+ if (!o)
+ return -1;
+
+ oidcpy(oid, &o->oid);
+ if (sp[0] == '/') {
+ /* "$commit^{/foo}" */
+ char *prefix;
+ int ret;
+ struct commit_list *list = NULL;
+
+ /*
+ * $commit^{/}. Some regex implementation may reject.
+ * We don't need regex anyway. '' pattern always matches.
+ */
+ if (sp[1] == '}')
+ return 0;
+
+ prefix = xstrndup(sp + 1, name + len - 1 - (sp + 1));
+ commit_list_insert((struct commit *)o, &list);
+ ret = get_oid_oneline(prefix, oid, list);
+ free(prefix);
+ return ret;
+ }
+ return 0;
+}
+
+static int get_describe_name(const char *name, int len, struct object_id *oid)
+{
+ const char *cp;
+ unsigned flags = GET_OID_QUIETLY | GET_OID_COMMIT;
+
+ for (cp = name + len - 1; name + 2 <= cp; cp--) {
+ char ch = *cp;
+ if (!isxdigit(ch)) {
+ /* We must be looking at g in "SOMETHING-g"
+ * for it to be describe output.
+ */
+ if (ch == 'g' && cp[-1] == '-') {
+ cp++;
+ len -= cp - name;
+ return get_short_oid(cp, len, oid, flags);
+ }
+ }
+ }
+ return -1;
+}
+
+static int get_oid_1(const char *name, int len, struct object_id *oid, unsigned lookup_flags)
+{
+ int ret, has_suffix;
+ const char *cp;
+
+ /*
+ * "name~3" is "name^^^", "name~" is "name~1", and "name^" is "name^1".
+ */
+ has_suffix = 0;
+ for (cp = name + len - 1; name <= cp; cp--) {
+ int ch = *cp;
+ if ('0' <= ch && ch <= '9')
+ continue;
+ if (ch == '~' || ch == '^')
+ has_suffix = ch;
+ break;
+ }
+
+ if (has_suffix) {
+ int num = 0;
+ int len1 = cp - name;
+ cp++;
+ while (cp < name + len)
+ num = num * 10 + *cp++ - '0';
+ if (!num && len1 == len - 1)
+ num = 1;
+ if (has_suffix == '^')
+ return get_parent(name, len1, oid, num);
+ /* else if (has_suffix == '~') -- goes without saying */
+ return get_nth_ancestor(name, len1, oid, num);
+ }
+
+ ret = peel_onion(name, len, oid, lookup_flags);
+ if (!ret)
+ return 0;
+
+ ret = get_oid_basic(name, len, oid, lookup_flags);
+ if (!ret)
+ return 0;
+
+ /* It could be describe output that is "SOMETHING-gXXXX" */
+ ret = get_describe_name(name, len, oid);
+ if (!ret)
+ return 0;
+
+ return get_short_oid(name, len, oid, lookup_flags);
+}
+
+/*
+ * This interprets names like ':/Initial revision of "git"' by searching
+ * through history and returning the first commit whose message starts
+ * the given regular expression.
+ *
+ * For negative-matching, prefix the pattern-part with '!-', like: ':/!-WIP'.
+ *
+ * For a literal '!' character at the beginning of a pattern, you have to repeat
+ * that, like: ':/!!foo'
+ *
+ * For future extension, all other sequences beginning with ':/!' are reserved.
+ */
+
+/* Remember to update object flag allocation in object.h */
+#define ONELINE_SEEN (1u<<20)
+
+static int handle_one_ref(const char *path, const struct object_id *oid,
+ int flag, void *cb_data)
+{
+ struct commit_list **list = cb_data;
+ struct object *object = parse_object(oid);
+ if (!object)
+ return 0;
+ if (object->type == OBJ_TAG) {
+ object = deref_tag(object, path, strlen(path));
+ if (!object)
+ return 0;
+ }
+ if (object->type != OBJ_COMMIT)
+ return 0;
+ commit_list_insert((struct commit *)object, list);
+ return 0;
+}
+
+static int get_oid_oneline(const char *prefix, struct object_id *oid,
+ struct commit_list *list)
+{
+ struct commit_list *backup = NULL, *l;
+ int found = 0;
+ int negative = 0;
+ regex_t regex;
+
+ if (prefix[0] == '!') {
+ prefix++;
+
+ if (prefix[0] == '-') {
+ prefix++;
+ negative = 1;
+ } else if (prefix[0] != '!') {
+ return -1;
+ }
+ }
+
+ if (regcomp(®ex, prefix, REG_EXTENDED))
+ return -1;
+
+ for (l = list; l; l = l->next) {
+ l->item->object.flags |= ONELINE_SEEN;
+ commit_list_insert(l->item, &backup);
+ }
+ while (list) {
+ const char *p, *buf;
+ struct commit *commit;
+ int matches;
+
+ commit = pop_most_recent_commit(&list, ONELINE_SEEN);
+ if (!parse_object(&commit->object.oid))
+ continue;
+ buf = get_commit_buffer(commit, NULL);
+ p = strstr(buf, "\n\n");
+ matches = negative ^ (p && !regexec(®ex, p + 2, 0, NULL, 0));
+ unuse_commit_buffer(commit, buf);
+
+ if (matches) {
+ oidcpy(oid, &commit->object.oid);
+ found = 1;
+ break;
+ }
+ }
+ regfree(®ex);
+ free_commit_list(list);
+ for (l = backup; l; l = l->next)
+ clear_commit_marks(l->item, ONELINE_SEEN);
+ free_commit_list(backup);
+ return found ? 0 : -1;
+}
+
+struct grab_nth_branch_switch_cbdata {
+ int remaining;
+ struct strbuf buf;
+};
+
+static int grab_nth_branch_switch(struct object_id *ooid, struct object_id *noid,
+ const char *email, timestamp_t timestamp, int tz,
+ const char *message, void *cb_data)
+{
+ struct grab_nth_branch_switch_cbdata *cb = cb_data;
+ const char *match = NULL, *target = NULL;
+ size_t len;
+
+ if (skip_prefix(message, "checkout: moving from ", &match))
+ target = strstr(match, " to ");
+
+ if (!match || !target)
+ return 0;
+ if (--(cb->remaining) == 0) {
+ len = target - match;
+ strbuf_reset(&cb->buf);
+ strbuf_add(&cb->buf, match, len);
+ return 1; /* we are done */
+ }
+ return 0;
+}
+
+/*
+ * Parse @{-N} syntax, return the number of characters parsed
+ * if successful; otherwise signal an error with negative value.
+ */
+static int interpret_nth_prior_checkout(const char *name, int namelen,
+ struct strbuf *buf)
+{
+ long nth;
+ int retval;
+ struct grab_nth_branch_switch_cbdata cb;
+ const char *brace;
+ char *num_end;
+
+ if (namelen < 4)
+ return -1;
+ if (name[0] != '@' || name[1] != '{' || name[2] != '-')
+ return -1;
+ brace = memchr(name, '}', namelen);
+ if (!brace)
+ return -1;
+ nth = strtol(name + 3, &num_end, 10);
+ if (num_end != brace)
+ return -1;
+ if (nth <= 0)
+ return -1;
+ cb.remaining = nth;
+ strbuf_init(&cb.buf, 20);
+
+ retval = 0;
+ if (0 < for_each_reflog_ent_reverse("HEAD", grab_nth_branch_switch, &cb)) {
+ strbuf_reset(buf);
+ strbuf_addbuf(buf, &cb.buf);
+ retval = brace - name + 1;
+ }
+
+ strbuf_release(&cb.buf);
+ return retval;
+}
+
+int get_oid_mb(const char *name, struct object_id *oid)
+{
+ struct commit *one, *two;
+ struct commit_list *mbs;
+ struct object_id oid_tmp;
+ const char *dots;
+ int st;
+
+ dots = strstr(name, "...");
+ if (!dots)
+ return get_oid(name, oid);
+ if (dots == name)
+ st = get_oid("HEAD", &oid_tmp);
+ else {
+ struct strbuf sb;
+ strbuf_init(&sb, dots - name);
+ strbuf_add(&sb, name, dots - name);
+ st = get_oid_committish(sb.buf, &oid_tmp);
+ strbuf_release(&sb);
+ }
+ if (st)
+ return st;
+ one = lookup_commit_reference_gently(&oid_tmp, 0);
+ if (!one)
+ return -1;
+
+ if (get_oid_committish(dots[3] ? (dots + 3) : "HEAD", &oid_tmp))
+ return -1;
+ two = lookup_commit_reference_gently(&oid_tmp, 0);
+ if (!two)
+ return -1;
+ mbs = get_merge_bases(one, two);
+ if (!mbs || mbs->next)
+ st = -1;
+ else {
+ st = 0;
+ oidcpy(oid, &mbs->item->object.oid);
+ }
+ free_commit_list(mbs);
+ return st;
+}
+
+/* parse @something syntax, when 'something' is not {.*} */
+static int interpret_empty_at(const char *name, int namelen, int len, struct strbuf *buf)
+{
+ const char *next;
+
+ if (len || name[1] == '{')
+ return -1;
+
+ /* make sure it's a single @, or @@{.*}, not @foo */
+ next = memchr(name + len + 1, '@', namelen - len - 1);
+ if (next && next[1] != '{')
+ return -1;
+ if (!next)
+ next = name + namelen;
+ if (next != name + 1)
+ return -1;
+
+ strbuf_reset(buf);
+ strbuf_add(buf, "HEAD", 4);
+ return 1;
+}
+
+static int reinterpret(const char *name, int namelen, int len,
+ struct strbuf *buf, unsigned allowed)
+{
+ /* we have extra data, which might need further processing */
+ struct strbuf tmp = STRBUF_INIT;
+ int used = buf->len;
+ int ret;
+
+ strbuf_add(buf, name + len, namelen - len);
+ ret = interpret_branch_name(buf->buf, buf->len, &tmp, allowed);
+ /* that data was not interpreted, remove our cruft */
+ if (ret < 0) {
+ strbuf_setlen(buf, used);
+ return len;
+ }
+ strbuf_reset(buf);
+ strbuf_addbuf(buf, &tmp);
+ strbuf_release(&tmp);
+ /* tweak for size of {-N} versus expanded ref name */
+ return ret - used + len;
+}
+
+static void set_shortened_ref(struct strbuf *buf, const char *ref)
+{
+ char *s = shorten_unambiguous_ref(ref, 0);
+ strbuf_reset(buf);
+ strbuf_addstr(buf, s);
+ free(s);
+}
+
+static int branch_interpret_allowed(const char *refname, unsigned allowed)
+{
+ if (!allowed)
+ return 1;
+
+ if ((allowed & INTERPRET_BRANCH_LOCAL) &&
+ starts_with(refname, "refs/heads/"))
+ return 1;
+ if ((allowed & INTERPRET_BRANCH_REMOTE) &&
+ starts_with(refname, "refs/remotes/"))
+ return 1;
+
+ return 0;
+}
+
+static int interpret_branch_mark(const char *name, int namelen,
+ int at, struct strbuf *buf,
+ int (*get_mark)(const char *, int),
+ const char *(*get_data)(struct branch *,
+ struct strbuf *),
+ unsigned allowed)
+{
+ int len;
+ struct branch *branch;
+ struct strbuf err = STRBUF_INIT;
+ const char *value;
+
+ len = get_mark(name + at, namelen - at);
+ if (!len)
+ return -1;
+
+ if (memchr(name, ':', at))
+ return -1;
+
+ if (at) {
+ char *name_str = xmemdupz(name, at);
+ branch = branch_get(name_str);
+ free(name_str);
+ } else
+ branch = branch_get(NULL);
+
+ value = get_data(branch, &err);
+ if (!value)
+ die("%s", err.buf);
+
+ if (!branch_interpret_allowed(value, allowed))
+ return -1;
+
+ set_shortened_ref(buf, value);
+ return len + at;
+}
+
+int interpret_branch_name(const char *name, int namelen, struct strbuf *buf,
+ unsigned allowed)
+{
+ char *at;
+ const char *start;
+ int len;
+
+ if (!namelen)
+ namelen = strlen(name);
+
+ if (!allowed || (allowed & INTERPRET_BRANCH_LOCAL)) {
+ len = interpret_nth_prior_checkout(name, namelen, buf);
+ if (!len) {
+ return len; /* syntax Ok, not enough switches */
+ } else if (len > 0) {
+ if (len == namelen)
+ return len; /* consumed all */
+ else
+ return reinterpret(name, namelen, len, buf, allowed);
+ }
+ }
+
+ for (start = name;
+ (at = memchr(start, '@', namelen - (start - name)));
+ start = at + 1) {
+
+ if (!allowed || (allowed & INTERPRET_BRANCH_HEAD)) {
+ len = interpret_empty_at(name, namelen, at - name, buf);
+ if (len > 0)
+ return reinterpret(name, namelen, len, buf,
+ allowed);
+ }
+
+ len = interpret_branch_mark(name, namelen, at - name, buf,
+ upstream_mark, branch_get_upstream,
+ allowed);
+ if (len > 0)
+ return len;
+
+ len = interpret_branch_mark(name, namelen, at - name, buf,
+ push_mark, branch_get_push,
+ allowed);
+ if (len > 0)
+ return len;
+ }
+
+ return -1;
+}
+
+void strbuf_branchname(struct strbuf *sb, const char *name, unsigned allowed)
+{
+ int len = strlen(name);
+ int used = interpret_branch_name(name, len, sb, allowed);
+
+ if (used < 0)
+ used = 0;
+ strbuf_add(sb, name + used, len - used);
+}
+
+int strbuf_check_branch_ref(struct strbuf *sb, const char *name)
+{
+ if (startup_info->have_repository)
+ strbuf_branchname(sb, name, INTERPRET_BRANCH_LOCAL);
+ else
+ strbuf_addstr(sb, name);
+
+ /*
+ * This splice must be done even if we end up rejecting the
+ * name; builtin/branch.c::copy_or_rename_branch() still wants
+ * to see what the name expanded to so that "branch -m" can be
+ * used as a tool to correct earlier mistakes.
+ */
+ strbuf_splice(sb, 0, 0, "refs/heads/", 11);
+
+ if (*name == '-' ||
+ !strcmp(sb->buf, "refs/heads/HEAD"))
+ return -1;
+
+ return check_refname_format(sb->buf, 0);
+}
+
+/*
+ * This is like "get_oid_basic()", except it allows "object ID expressions",
+ * notably "xyz^" for "parent of xyz"
+ */
+int get_oid(const char *name, struct object_id *oid)
+{
+ struct object_context unused;
+ return get_oid_with_context(name, 0, oid, &unused);
+}
+
+
+/*
+ * Many callers know that the user meant to name a commit-ish by
+ * syntactical positions where the object name appears. Calling this
+ * function allows the machinery to disambiguate shorter-than-unique
+ * abbreviated object names between commit-ish and others.
+ *
+ * Note that this does NOT error out when the named object is not a
+ * commit-ish. It is merely to give a hint to the disambiguation
+ * machinery.
+ */
+int get_oid_committish(const char *name, struct object_id *oid)
+{
+ struct object_context unused;
+ return get_oid_with_context(name, GET_OID_COMMITTISH,
+ oid, &unused);
+}
+
+int get_oid_treeish(const char *name, struct object_id *oid)
+{
+ struct object_context unused;
+ return get_oid_with_context(name, GET_OID_TREEISH,
+ oid, &unused);
+}
+
+int get_oid_commit(const char *name, struct object_id *oid)
+{
+ struct object_context unused;
+ return get_oid_with_context(name, GET_OID_COMMIT,
+ oid, &unused);
+}
+
+int get_oid_tree(const char *name, struct object_id *oid)
+{
+ struct object_context unused;
+ return get_oid_with_context(name, GET_OID_TREE,
+ oid, &unused);
+}
+
+int get_oid_blob(const char *name, struct object_id *oid)
+{
+ struct object_context unused;
+ return get_oid_with_context(name, GET_OID_BLOB,
+ oid, &unused);
+}
+
+/* Must be called only when object_name:filename doesn't exist. */
+static void diagnose_invalid_oid_path(const char *prefix,
+ const char *filename,
+ const struct object_id *tree_oid,
+ const char *object_name,
+ int object_name_len)
+{
+ struct object_id oid;
+ unsigned mode;
+
+ if (!prefix)
+ prefix = "";
+
+ if (file_exists(filename))
+ die("Path '%s' exists on disk, but not in '%.*s'.",
+ filename, object_name_len, object_name);
+ if (is_missing_file_error(errno)) {
+ char *fullname = xstrfmt("%s%s", prefix, filename);
+
+ if (!get_tree_entry(tree_oid, fullname, &oid, &mode)) {
+ die("Path '%s' exists, but not '%s'.\n"
+ "Did you mean '%.*s:%s' aka '%.*s:./%s'?",
+ fullname,
+ filename,
+ object_name_len, object_name,
+ fullname,
+ object_name_len, object_name,
+ filename);
+ }
+ die("Path '%s' does not exist in '%.*s'",
+ filename, object_name_len, object_name);
+ }
+}
+
+/* Must be called only when :stage:filename doesn't exist. */
+static void diagnose_invalid_index_path(int stage,
+ const char *prefix,
+ const char *filename)
+{
+ const struct cache_entry *ce;
+ int pos;
+ unsigned namelen = strlen(filename);
+ struct strbuf fullname = STRBUF_INIT;
+
+ if (!prefix)
+ prefix = "";
+
+ /* Wrong stage number? */
+ pos = cache_name_pos(filename, namelen);
+ if (pos < 0)
+ pos = -pos - 1;
+ if (pos < active_nr) {
+ ce = active_cache[pos];
+ if (ce_namelen(ce) == namelen &&
+ !memcmp(ce->name, filename, namelen))
+ die("Path '%s' is in the index, but not at stage %d.\n"
+ "Did you mean ':%d:%s'?",
+ filename, stage,
+ ce_stage(ce), filename);
+ }
+
+ /* Confusion between relative and absolute filenames? */
+ strbuf_addstr(&fullname, prefix);
+ strbuf_addstr(&fullname, filename);
+ pos = cache_name_pos(fullname.buf, fullname.len);
+ if (pos < 0)
+ pos = -pos - 1;
+ if (pos < active_nr) {
+ ce = active_cache[pos];
+ if (ce_namelen(ce) == fullname.len &&
+ !memcmp(ce->name, fullname.buf, fullname.len))
+ die("Path '%s' is in the index, but not '%s'.\n"
+ "Did you mean ':%d:%s' aka ':%d:./%s'?",
+ fullname.buf, filename,
+ ce_stage(ce), fullname.buf,
+ ce_stage(ce), filename);
+ }
+
+ if (file_exists(filename))
+ die("Path '%s' exists on disk, but not in the index.", filename);
+ if (is_missing_file_error(errno))
+ die("Path '%s' does not exist (neither on disk nor in the index).",
+ filename);
+
+ strbuf_release(&fullname);
+}
+
+
+static char *resolve_relative_path(const char *rel)
+{
+ if (!starts_with(rel, "./") && !starts_with(rel, "../"))
+ return NULL;
+
+ if (!is_inside_work_tree())
+ die("relative path syntax can't be used outside working tree.");
+
+ /* die() inside prefix_path() if resolved path is outside worktree */
+ return prefix_path(startup_info->prefix,
+ startup_info->prefix ? strlen(startup_info->prefix) : 0,
+ rel);
+}
+
+static int get_oid_with_context_1(const char *name,
+ unsigned flags,
+ const char *prefix,
+ struct object_id *oid,
+ struct object_context *oc)
+{
+ int ret, bracket_depth;
+ int namelen = strlen(name);
+ const char *cp;
+ int only_to_die = flags & GET_OID_ONLY_TO_DIE;
+
+ if (only_to_die)
+ flags |= GET_OID_QUIETLY;
+
+ memset(oc, 0, sizeof(*oc));
+ oc->mode = S_IFINVALID;
+ strbuf_init(&oc->symlink_path, 0);
+ ret = get_oid_1(name, namelen, oid, flags);
+ if (!ret)
+ return ret;
+ /*
+ * sha1:path --> object name of path in ent sha1
+ * :path -> object name of absolute path in index
+ * :./path -> object name of path relative to cwd in index
+ * :[0-3]:path -> object name of path in index at stage
+ * :/foo -> recent commit matching foo
+ */
+ if (name[0] == ':') {
+ int stage = 0;
+ const struct cache_entry *ce;
+ char *new_path = NULL;
+ int pos;
+ if (!only_to_die && namelen > 2 && name[1] == '/') {
+ struct commit_list *list = NULL;
+
+ for_each_ref(handle_one_ref, &list);
+ commit_list_sort_by_date(&list);
+ return get_oid_oneline(name + 2, oid, list);
+ }
+ if (namelen < 3 ||
+ name[2] != ':' ||
+ name[1] < '0' || '3' < name[1])
+ cp = name + 1;
+ else {
+ stage = name[1] - '0';
+ cp = name + 3;
+ }
+ new_path = resolve_relative_path(cp);
+ if (!new_path) {
+ namelen = namelen - (cp - name);
+ } else {
+ cp = new_path;
+ namelen = strlen(cp);
+ }
+
+ if (flags & GET_OID_RECORD_PATH)
+ oc->path = xstrdup(cp);
+
+ if (!active_cache)
+ read_cache();
+ pos = cache_name_pos(cp, namelen);
+ if (pos < 0)
+ pos = -pos - 1;
+ while (pos < active_nr) {
+ ce = active_cache[pos];
+ if (ce_namelen(ce) != namelen ||
+ memcmp(ce->name, cp, namelen))
+ break;
+ if (ce_stage(ce) == stage) {
+ oidcpy(oid, &ce->oid);
+ oc->mode = ce->ce_mode;
+ free(new_path);
+ return 0;
+ }
+ pos++;
+ }
+ if (only_to_die && name[1] && name[1] != '/')
+ diagnose_invalid_index_path(stage, prefix, cp);
+ free(new_path);
+ return -1;
+ }
+ for (cp = name, bracket_depth = 0; *cp; cp++) {
+ if (*cp == '{')
+ bracket_depth++;
+ else if (bracket_depth && *cp == '}')
+ bracket_depth--;
+ else if (!bracket_depth && *cp == ':')
+ break;
+ }
+ if (*cp == ':') {
+ struct object_id tree_oid;
+ int len = cp - name;
+ unsigned sub_flags = flags;
+
+ sub_flags &= ~GET_OID_DISAMBIGUATORS;
+ sub_flags |= GET_OID_TREEISH;
+
+ if (!get_oid_1(name, len, &tree_oid, sub_flags)) {
+ const char *filename = cp+1;
+ char *new_filename = NULL;
+
+ new_filename = resolve_relative_path(filename);
+ if (new_filename)
+ filename = new_filename;
+ if (flags & GET_OID_FOLLOW_SYMLINKS) {
- hashcpy(oc->tree, tree_oid.hash);
++ ret = get_tree_entry_follow_symlinks(&tree_oid,
++ filename, oid, &oc->symlink_path,
+ &oc->mode);
+ } else {
+ ret = get_tree_entry(&tree_oid, filename, oid,
+ &oc->mode);
+ if (ret && only_to_die) {
+ diagnose_invalid_oid_path(prefix,
+ filename,
+ &tree_oid,
+ name, len);
+ }
+ }
+ if (flags & GET_OID_RECORD_PATH)
+ oc->path = xstrdup(filename);
+
+ free(new_filename);
+ return ret;
+ } else {
+ if (only_to_die)
+ die("Invalid object name '%.*s'.", len, name);
+ }
+ }
+ return ret;
+}
+
+/*
+ * Call this function when you know "name" given by the end user must
+ * name an object but it doesn't; the function _may_ die with a better
+ * diagnostic message than "no such object 'name'", e.g. "Path 'doc' does not
+ * exist in 'HEAD'" when given "HEAD:doc", or it may return in which case
+ * you have a chance to diagnose the error further.
+ */
+void maybe_die_on_misspelt_object_name(const char *name, const char *prefix)
+{
+ struct object_context oc;
+ struct object_id oid;
+ get_oid_with_context_1(name, GET_OID_ONLY_TO_DIE, prefix, &oid, &oc);
+}
+
+int get_oid_with_context(const char *str, unsigned flags, struct object_id *oid, struct object_context *oc)
+{
+ if (flags & GET_OID_FOLLOW_SYMLINKS && flags & GET_OID_ONLY_TO_DIE)
+ BUG("incompatible flags for get_sha1_with_context");
+ return get_oid_with_context_1(str, flags, NULL, oid, oc);
+}
const struct submodule_entry *b = entry_or_key;
return strcmp(a->config->path, b->config->path) ||
- hashcmp(a->config->gitmodules_sha1, b->config->gitmodules_sha1);
+ oidcmp(&a->config->gitmodules_oid, &b->config->gitmodules_oid);
}
static int config_name_cmp(const void *unused_cmp_data,
const struct submodule_entry *b = entry_or_key;
return strcmp(a->config->name, b->config->name) ||
- hashcmp(a->config->gitmodules_sha1, b->config->gitmodules_sha1);
+ oidcmp(&a->config->gitmodules_oid, &b->config->gitmodules_oid);
}
static struct submodule_cache *submodule_cache_alloc(void)
free(cache);
}
- static unsigned int hash_sha1_string(const unsigned char *sha1,
- const char *string)
+ static unsigned int hash_oid_string(const struct object_id *oid,
+ const char *string)
{
- return memhash(sha1, 20) + strhash(string);
+ return memhash(oid->hash, the_hash_algo->rawsz) + strhash(string);
}
static void cache_put_path(struct submodule_cache *cache,
struct submodule *submodule)
{
- unsigned int hash = hash_sha1_string(submodule->gitmodules_sha1,
- submodule->path);
+ unsigned int hash = hash_oid_string(&submodule->gitmodules_oid,
+ submodule->path);
struct submodule_entry *e = xmalloc(sizeof(*e));
hashmap_entry_init(e, hash);
e->config = submodule;
static void cache_remove_path(struct submodule_cache *cache,
struct submodule *submodule)
{
- unsigned int hash = hash_sha1_string(submodule->gitmodules_sha1,
- submodule->path);
+ unsigned int hash = hash_oid_string(&submodule->gitmodules_oid,
+ submodule->path);
struct submodule_entry e;
struct submodule_entry *removed;
hashmap_entry_init(&e, hash);
static void cache_add(struct submodule_cache *cache,
struct submodule *submodule)
{
- unsigned int hash = hash_sha1_string(submodule->gitmodules_sha1,
- submodule->name);
+ unsigned int hash = hash_oid_string(&submodule->gitmodules_oid,
+ submodule->name);
struct submodule_entry *e = xmalloc(sizeof(*e));
hashmap_entry_init(e, hash);
e->config = submodule;
}
static const struct submodule *cache_lookup_path(struct submodule_cache *cache,
- const unsigned char *gitmodules_sha1, const char *path)
+ const struct object_id *gitmodules_oid, const char *path)
{
struct submodule_entry *entry;
- unsigned int hash = hash_sha1_string(gitmodules_sha1, path);
+ unsigned int hash = hash_oid_string(gitmodules_oid, path);
struct submodule_entry key;
struct submodule key_config;
- hashcpy(key_config.gitmodules_sha1, gitmodules_sha1);
+ oidcpy(&key_config.gitmodules_oid, gitmodules_oid);
key_config.path = path;
hashmap_entry_init(&key, hash);
}
static struct submodule *cache_lookup_name(struct submodule_cache *cache,
- const unsigned char *gitmodules_sha1, const char *name)
+ const struct object_id *gitmodules_oid, const char *name)
{
struct submodule_entry *entry;
- unsigned int hash = hash_sha1_string(gitmodules_sha1, name);
+ unsigned int hash = hash_oid_string(gitmodules_oid, name);
struct submodule_entry key;
struct submodule key_config;
- hashcpy(key_config.gitmodules_sha1, gitmodules_sha1);
+ oidcpy(&key_config.gitmodules_oid, gitmodules_oid);
key_config.name = name;
hashmap_entry_init(&key, hash);
return NULL;
}
+int check_submodule_name(const char *name)
+{
+ /* Disallow empty names */
+ if (!*name)
+ return -1;
+
+ /*
+ * Look for '..' as a path component. Check both '/' and '\\' as
+ * separators rather than is_dir_sep(), because we want the name rules
+ * to be consistent across platforms.
+ */
+ goto in_component; /* always start inside component */
+ while (*name) {
+ char c = *name++;
+ if (c == '/' || c == '\\') {
+in_component:
+ if (name[0] == '.' && name[1] == '.' &&
+ (!name[2] || name[2] == '/' || name[2] == '\\'))
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
static int name_and_item_from_var(const char *var, struct strbuf *name,
struct strbuf *item)
{
return 0;
strbuf_add(name, subsection, subsection_len);
+ if (check_submodule_name(name->buf) < 0) {
+ warning(_("ignoring suspicious submodule name: %s"), name->buf);
+ strbuf_release(name);
+ return 0;
+ }
+
strbuf_addstr(item, key);
return 1;
}
static struct submodule *lookup_or_create_by_name(struct submodule_cache *cache,
- const unsigned char *gitmodules_sha1, const char *name)
+ const struct object_id *gitmodules_oid, const char *name)
{
struct submodule *submodule;
struct strbuf name_buf = STRBUF_INIT;
- submodule = cache_lookup_name(cache, gitmodules_sha1, name);
+ submodule = cache_lookup_name(cache, gitmodules_oid, name);
if (submodule)
return submodule;
submodule->branch = NULL;
submodule->recommend_shallow = -1;
- hashcpy(submodule->gitmodules_sha1, gitmodules_sha1);
+ oidcpy(&submodule->gitmodules_oid, gitmodules_oid);
cache_add(cache, submodule);
return parse_push_recurse(opt, arg, 1);
}
- static void warn_multiple_config(const unsigned char *treeish_name,
+ static void warn_multiple_config(const struct object_id *treeish_name,
const char *name, const char *option)
{
const char *commit_string = "WORKTREE";
if (treeish_name)
- commit_string = sha1_to_hex(treeish_name);
+ commit_string = oid_to_hex(treeish_name);
warning("%s:.gitmodules, multiple configurations found for "
"'submodule.%s.%s'. Skipping second one!",
commit_string, name, option);
struct parse_config_parameter {
struct submodule_cache *cache;
- const unsigned char *treeish_name;
- const unsigned char *gitmodules_sha1;
+ const struct object_id *treeish_name;
+ const struct object_id *gitmodules_oid;
int overwrite;
};
return 0;
submodule = lookup_or_create_by_name(me->cache,
- me->gitmodules_sha1,
+ me->gitmodules_oid,
name.buf);
if (!strcmp(item.buf, "path")) {
}
} else if (!strcmp(item.buf, "fetchrecursesubmodules")) {
/* when parsing worktree configurations we can die early */
- int die_on_error = is_null_sha1(me->gitmodules_sha1);
+ int die_on_error = is_null_oid(me->gitmodules_oid);
if (!me->overwrite &&
submodule->fetch_recurse != RECURSE_SUBMODULES_NONE)
warn_multiple_config(me->treeish_name, submodule->name,
switch (lookup_type) {
case lookup_name:
- submodule = cache_lookup_name(cache, oid.hash, key);
+ submodule = cache_lookup_name(cache, &oid, key);
break;
case lookup_path:
- submodule = cache_lookup_path(cache, oid.hash, key);
+ submodule = cache_lookup_path(cache, &oid, key);
break;
}
if (submodule)
/* fill the submodule config into the cache */
parameter.cache = cache;
- parameter.treeish_name = treeish_name->hash;
- parameter.gitmodules_sha1 = oid.hash;
+ parameter.treeish_name = treeish_name;
+ parameter.gitmodules_oid = &oid;
parameter.overwrite = 0;
git_config_from_mem(parse_config, CONFIG_ORIGIN_SUBMODULE_BLOB, rev.buf,
config, config_size, ¶meter);
switch (lookup_type) {
case lookup_name:
- return cache_lookup_name(cache, oid.hash, key);
+ return cache_lookup_name(cache, &oid, key);
case lookup_path:
- return cache_lookup_path(cache, oid.hash, key);
+ return cache_lookup_path(cache, &oid, key);
default:
return NULL;
}
parameter.cache = repo->submodule_cache;
parameter.treeish_name = NULL;
- parameter.gitmodules_sha1 = null_sha1;
+ parameter.gitmodules_oid = &null_oid;
parameter.overwrite = 1;
return parse_config(var, value, ¶meter);
repo_read_gitmodules(repo);
}
-const struct submodule *submodule_from_name(const struct object_id *treeish_name,
+const struct submodule *submodule_from_name(struct repository *r,
+ const struct object_id *treeish_name,
const char *name)
{
- gitmodules_read_check(the_repository);
- return config_from(the_repository->submodule_cache, treeish_name, name, lookup_name);
+ gitmodules_read_check(r);
+ return config_from(r->submodule_cache, treeish_name, name, lookup_name);
}
-const struct submodule *submodule_from_path(const struct object_id *treeish_name,
+const struct submodule *submodule_from_path(struct repository *r,
+ const struct object_id *treeish_name,
const char *path)
{
- gitmodules_read_check(the_repository);
- return config_from(the_repository->submodule_cache, treeish_name, path, lookup_path);
-}
-
-const struct submodule *submodule_from_cache(struct repository *repo,
- const struct object_id *treeish_name,
- const char *key)
-{
- gitmodules_read_check(repo);
- return config_from(repo->submodule_cache, treeish_name,
- key, lookup_path);
+ gitmodules_read_check(r);
+ return config_from(r->submodule_cache, treeish_name, path, lookup_path);
}
-void submodule_free(void)
+void submodule_free(struct repository *r)
{
- if (the_repository->submodule_cache)
- submodule_cache_clear(the_repository->submodule_cache);
+ if (r->submodule_cache)
+ submodule_cache_clear(r->submodule_cache);
}
#ifndef SUBMODULE_CONFIG_CACHE_H
#define SUBMODULE_CONFIG_CACHE_H
+ #include "cache.h"
#include "hashmap.h"
#include "submodule.h"
#include "strbuf.h"
const char *ignore;
const char *branch;
struct submodule_update_strategy update_strategy;
- /* the sha1 blob id of the responsible .gitmodules file */
- unsigned char gitmodules_sha1[20];
+ /* the object id of the responsible .gitmodules file */
+ struct object_id gitmodules_oid;
int recommend_shallow;
};
#define SUBMODULE_INIT { NULL, NULL, NULL, RECURSE_SUBMODULES_NONE, \
- NULL, NULL, SUBMODULE_UPDATE_STRATEGY_INIT, {0}, -1 };
+ NULL, NULL, SUBMODULE_UPDATE_STRATEGY_INIT, { { 0 } }, -1 };
struct submodule_cache;
struct repository;
extern int parse_push_recurse_submodules_arg(const char *opt, const char *arg);
extern void repo_read_gitmodules(struct repository *repo);
extern void gitmodules_config_oid(const struct object_id *commit_oid);
-extern const struct submodule *submodule_from_name(
- const struct object_id *commit_or_tree, const char *name);
-extern const struct submodule *submodule_from_path(
- const struct object_id *commit_or_tree, const char *path);
-extern const struct submodule *submodule_from_cache(struct repository *repo,
- const struct object_id *treeish_name,
- const char *key);
-extern void submodule_free(void);
+const struct submodule *submodule_from_name(struct repository *r,
+ const struct object_id *commit_or_tree,
+ const char *name);
+const struct submodule *submodule_from_path(struct repository *r,
+ const struct object_id *commit_or_tree,
+ const char *path);
+void submodule_free(struct repository *r);
+
+/*
+ * Returns 0 if the name is syntactically acceptable as a submodule "name"
+ * (e.g., that may be found in the subsection of a .gitmodules file) and -1
+ * otherwise.
+ */
+int check_submodule_name(const char *name);
#endif /* SUBMODULE_CONFIG_H */
if (is_gitmodules_unmerged(&the_index))
die(_("Cannot change unmerged .gitmodules, resolve merge conflicts first"));
- submodule = submodule_from_path(&null_oid, oldpath);
+ submodule = submodule_from_path(the_repository, &null_oid, oldpath);
if (!submodule || !submodule->name) {
warning(_("Could not find section in .gitmodules where path=%s"), oldpath);
return -1;
if (is_gitmodules_unmerged(&the_index))
die(_("Cannot change unmerged .gitmodules, resolve merge conflicts first"));
- submodule = submodule_from_path(&null_oid, path);
+ submodule = submodule_from_path(the_repository, &null_oid, path);
if (!submodule || !submodule->name) {
warning(_("Could not find section in .gitmodules where path=%s"), path);
return -1;
void set_diffopt_flags_from_submodule_config(struct diff_options *diffopt,
const char *path)
{
- const struct submodule *submodule = submodule_from_path(&null_oid, path);
+ const struct submodule *submodule = submodule_from_path(the_repository,
+ &null_oid, path);
if (submodule) {
const char *ignore;
char *key;
const struct string_list *sl;
const struct submodule *module;
- module = submodule_from_cache(repo, &null_oid, path);
+ module = submodule_from_path(repo, &null_oid, path);
/* early return if there isn't a path->module mapping */
if (!module)
if (!should_update_submodules())
return NULL;
- return submodule_from_path(&null_oid, ce->name);
+ return submodule_from_path(the_repository, &null_oid, ce->name);
}
static struct oid_array *submodule_commits(struct string_list *submodules,
if (!S_ISGITLINK(p->two->mode))
continue;
- submodule = submodule_from_path(commit_oid, p->two->path);
+ submodule = submodule_from_path(the_repository,
+ commit_oid, p->two->path);
if (submodule)
name = submodule->name;
else {
name = default_name_or_path(p->two->path);
/* make sure name does not collide with existing one */
- submodule = submodule_from_name(commit_oid, name);
+ submodule = submodule_from_name(the_repository, commit_oid, name);
if (submodule) {
warning("Submodule in commit %s at path: "
"'%s' collides with a submodule named "
{
struct has_commit_data *cb = data;
- enum object_type type = oid_object_info(oid, NULL);
+ enum object_type type = oid_object_info(the_repository, oid, NULL);
switch (type) {
case OBJ_COMMIT:
const struct submodule *submodule;
const char *path = NULL;
- submodule = submodule_from_name(&null_oid, name->string);
+ submodule = submodule_from_name(the_repository, &null_oid, name->string);
if (submodule)
path = submodule->path;
else
const struct string_list_item *name;
/* No need to check if there are no submodules configured */
- if (!submodule_from_path(NULL, NULL))
+ if (!submodule_from_path(the_repository, NULL, NULL))
return;
argv_array_push(&argv, "--"); /* argv[0] program name */
const struct submodule *submodule;
const char *path = NULL;
- submodule = submodule_from_name(&null_oid, name->string);
+ submodule = submodule_from_name(the_repository, &null_oid, name->string);
if (submodule)
path = submodule->path;
else
int ret;
/* No need to check if there are no submodules configured */
- if (!submodule_from_path(NULL, NULL))
+ if (!submodule_from_path(the_repository, NULL, NULL))
return 0;
argv_array_push(&args, "--"); /* args[0] program name */
if (!S_ISGITLINK(ce->ce_mode))
continue;
- submodule = submodule_from_cache(spf->r, &null_oid, ce->name);
+ submodule = submodule_from_path(spf->r, &null_oid, ce->name);
if (!submodule) {
const char *name = default_name_or_path(ce->name);
if (name) {
buf.buf[0] == '2') {
/* T = line type, XY = status, SSSS = submodule state */
if (buf.len < strlen("T XY SSSS"))
- die("BUG: invalid status --porcelain=2 line %s",
+ BUG("invalid status --porcelain=2 line %s",
buf.buf);
if (buf.buf[5] == 'S' && buf.buf[8] == 'U')
get_super_prefix_or_empty(), path);
argv_array_pushl(&cp.args, "read-tree", "-u", "--reset", NULL);
- argv_array_push(&cp.args, EMPTY_TREE_SHA1_HEX);
+ argv_array_push(&cp.args, empty_tree_oid_hex());
if (run_command(&cp))
die("could not reset submodule index");
if (old_head && !is_submodule_populated_gently(path, error_code_ptr))
return 0;
- sub = submodule_from_path(&null_oid, path);
+ sub = submodule_from_path(the_repository, &null_oid, path);
if (!sub)
- die("BUG: could not get submodule information for '%s'", path);
+ BUG("could not get submodule information for '%s'", path);
if (old_head && !(flags & SUBMODULE_MOVE_HEAD_FORCE)) {
/* Check if the submodule has a dirty index. */
} else {
char *gitdir = xstrfmt("%s/modules/%s",
get_git_common_dir(), sub->name);
- connect_work_tree_and_git_dir(path, gitdir);
+ connect_work_tree_and_git_dir(path, gitdir, 0);
free(gitdir);
/* make sure the index is clean as well */
if (old_head && (flags & SUBMODULE_MOVE_HEAD_FORCE)) {
char *gitdir = xstrfmt("%s/modules/%s",
get_git_common_dir(), sub->name);
- connect_work_tree_and_git_dir(path, gitdir);
+ connect_work_tree_and_git_dir(path, gitdir, 1);
free(gitdir);
}
}
argv_array_push(&cp.args, "-m");
if (!(flags & SUBMODULE_MOVE_HEAD_FORCE))
- argv_array_push(&cp.args, old_head ? old_head : EMPTY_TREE_SHA1_HEX);
+ argv_array_push(&cp.args, old_head ? old_head : empty_tree_oid_hex());
- argv_array_push(&cp.args, new_head ? new_head : EMPTY_TREE_SHA1_HEX);
+ argv_array_push(&cp.args, new_head ? new_head : empty_tree_oid_hex());
if (run_command(&cp)) {
ret = -1;
real_old_git_dir = real_pathdup(old_git_dir, 1);
- sub = submodule_from_path(&null_oid, path);
+ sub = submodule_from_path(the_repository, &null_oid, path);
if (!sub)
die(_("could not lookup name for submodule '%s'"), path);
* superproject did not rewrite the git file links yet,
* fix it now.
*/
- sub = submodule_from_path(&null_oid, path);
+ sub = submodule_from_path(the_repository, &null_oid, path);
if (!sub)
die(_("could not lookup name for submodule '%s'"), path);
connect_work_tree_and_git_dir(path,
- git_path("modules/%s", sub->name));
+ git_path("modules/%s", sub->name), 0);
} else {
/* Is it already absorbed into the superprojects git dir? */
char *real_sub_git_dir = real_pathdup(sub_git_dir, 1);
struct strbuf sb = STRBUF_INIT;
if (flags & ~ABSORB_GITDIR_RECURSE_SUBMODULES)
- die("BUG: we don't know how to pass the flags down?");
+ BUG("we don't know how to pass the flags down?");
strbuf_addstr(&sb, get_super_prefix_or_empty());
strbuf_addstr(&sb, path);
if (super_sub_len > cwd_len ||
strcmp(&cwd[cwd_len - super_sub_len], super_sub))
- die (_("BUG: returned path string doesn't match cwd?"));
+ BUG("returned path string doesn't match cwd?");
super_wt = xstrdup(cwd);
super_wt[cwd_len - super_sub_len] = '\0';
strbuf_addstr(buf, git_dir);
}
if (!is_git_directory(buf->buf)) {
- sub = submodule_from_path(&null_oid, submodule);
+ sub = submodule_from_path(the_repository, &null_oid, submodule);
if (!sub) {
ret = -1;
goto cleanup;
if (!state && ce->ce_flags & CE_WT_REMOVE) {
repo_read_gitmodules(the_repository);
} else if (state && (ce->ce_flags & CE_UPDATE)) {
- submodule_free();
+ submodule_free(the_repository);
checkout_entry(ce, state, NULL);
repo_read_gitmodules(the_repository);
}
if (ce->ce_flags & CE_UPDATE) {
if (ce->ce_flags & CE_WT_REMOVE)
- die("BUG: both update and delete flags are set on %s",
+ BUG("both update and delete flags are set on %s",
ce->name);
display_progress(progress, ++cnt);
ce->ce_flags &= ~CE_UPDATE;
o->result.timestamp.sec = o->src_index->timestamp.sec;
o->result.timestamp.nsec = o->src_index->timestamp.nsec;
o->result.version = o->src_index->version;
- o->result.split_index = o->src_index->split_index;
- if (o->result.split_index)
+ if (!o->src_index->split_index) {
+ o->result.split_index = NULL;
+ } else if (o->src_index == o->dst_index) {
+ /*
+ * o->dst_index (and thus o->src_index) will be discarded
+ * and overwritten with o->result at the end of this function,
+ * so just use src_index's split_index to avoid having to
+ * create a new one.
+ */
+ o->result.split_index = o->src_index->split_index;
o->result.split_index->refcount++;
- hashcpy(o->result.sha1, o->src_index->sha1);
+ } else {
+ o->result.split_index = init_split_index(&o->result);
+ }
+ oidcpy(&o->result.oid, &o->src_index->oid);
o->merge_size = len;
mark_all_ce_unused(o->src_index);
}
}
- o->src_index = NULL;
ret = check_updates(o) ? (-2) : 0;
if (o->dst_index) {
if (!ret) {
WRITE_TREE_SILENT |
WRITE_TREE_REPAIR);
}
- move_index_extensions(&o->result, o->dst_index);
+ move_index_extensions(&o->result, o->src_index);
discard_index(o->dst_index);
*o->dst_index = o->result;
} else {
discard_index(&o->result);
}
+ o->src_index = NULL;
done:
clear_exclude_list(&el);
add_rejected_path(o, error_type, ce->name);
}
-static int verify_uptodate(const struct cache_entry *ce,
- struct unpack_trees_options *o)
+int verify_uptodate(const struct cache_entry *ce,
+ struct unpack_trees_options *o)
{
if (!o->skip_sparse_checkout && (ce->ce_flags & CE_NEW_SKIP_WORKTREE))
return 0;
#include "tag.h"
#include "object.h"
#include "commit.h"
-#include "exec_cmd.h"
#include "diff.h"
#include "revision.h"
#include "list-objects.h"
#include "sigchain.h"
#include "version.h"
#include "string-list.h"
-#include "parse-options.h"
#include "argv-array.h"
#include "prio-queue.h"
#include "protocol.h"
#include "quote.h"
-
-static const char * const upload_pack_usage[] = {
- N_("git upload-pack [<options>] <dir>"),
- NULL
-};
+#include "upload-pack.h"
+#include "serve.h"
/* Remember to update object flag allocation in object.h */
#define THEY_HAVE (1u << 11)
* otherwise maximum packet size (up to 65520 bytes).
*/
static int use_sideband;
-static int advertise_refs;
static int stateless_rpc;
static const char *pack_objects_hook;
break;
default:
got_common = 1;
- memcpy(last_hex, oid_to_hex(&oid), 41);
+ oid_to_hex_r(last_hex, &oid);
if (multi_ack == 2)
packet_write_fmt(1, "ACK %s common\n", last_hex);
else if (multi_ack)
"rev-list", "--stdin", NULL,
};
struct object *o;
- char namebuf[42]; /* ^ + SHA-1 + LF */
+ char namebuf[GIT_MAX_HEXSZ + 2]; /* ^ + hash + LF */
int i;
cmd->argv = argv;
struct child_process cmd = CHILD_PROCESS_INIT;
int i;
struct object *o;
- char namebuf[42]; /* ^ + SHA-1 + LF */
+ char namebuf[GIT_MAX_HEXSZ + 2]; /* ^ + hash + LF */
+ const unsigned hexsz = the_hash_algo->hexsz;
if (do_reachable_revlist(&cmd, src, reachable) < 0)
return -1;
- while ((i = read_in_full(cmd.out, namebuf, 41)) == 41) {
+ while ((i = read_in_full(cmd.out, namebuf, hexsz + 1)) == hexsz + 1) {
struct object_id sha1;
+ const char *p;
- if (namebuf[40] != '\n' || get_oid_hex(namebuf, &sha1))
+ if (parse_oid_hex(namebuf, &sha1, &p) || *p != '\n')
break;
o = lookup_object(sha1.hash);
}
send_unshallow(shallows);
- packet_flush(1);
}
static void deepen_by_rev_list(int ac, const char **av,
send_shallow(result);
free_commit_list(result);
send_unshallow(shallows);
- packet_flush(1);
+}
+
+/* Returns 1 if a shallow list is sent or 0 otherwise */
+static int send_shallow_list(int depth, int deepen_rev_list,
+ timestamp_t deepen_since,
+ struct string_list *deepen_not,
+ struct object_array *shallows)
+{
+ int ret = 0;
+
+ if (depth > 0 && deepen_rev_list)
+ die("git upload-pack: deepen and deepen-since (or deepen-not) cannot be used together");
+ if (depth > 0) {
+ deepen(depth, deepen_relative, shallows);
+ ret = 1;
+ } else if (deepen_rev_list) {
+ struct argv_array av = ARGV_ARRAY_INIT;
+ int i;
+
+ argv_array_push(&av, "rev-list");
+ if (deepen_since)
+ argv_array_pushf(&av, "--max-age=%"PRItime, deepen_since);
+ if (deepen_not->nr) {
+ argv_array_push(&av, "--not");
+ for (i = 0; i < deepen_not->nr; i++) {
+ struct string_list_item *s = deepen_not->items + i;
+ argv_array_push(&av, s->string);
+ }
+ argv_array_push(&av, "--not");
+ }
+ for (i = 0; i < want_obj.nr; i++) {
+ struct object *o = want_obj.objects[i].item;
+ argv_array_push(&av, oid_to_hex(&o->oid));
+ }
+ deepen_by_rev_list(av.argc, av.argv, shallows);
+ argv_array_clear(&av);
+ ret = 1;
+ } else {
+ if (shallows->nr > 0) {
+ int i;
+ for (i = 0; i < shallows->nr; i++)
+ register_shallow(&shallows->objects[i].item->oid);
+ }
+ }
+
+ shallow_nr += shallows->nr;
+ return ret;
+}
+
+static int process_shallow(const char *line, struct object_array *shallows)
+{
+ const char *arg;
+ if (skip_prefix(line, "shallow ", &arg)) {
+ struct object_id oid;
+ struct object *object;
+ if (get_oid_hex(arg, &oid))
+ die("invalid shallow line: %s", line);
+ object = parse_object(&oid);
+ if (!object)
+ return 1;
+ if (object->type != OBJ_COMMIT)
+ die("invalid shallow object %s", oid_to_hex(&oid));
+ if (!(object->flags & CLIENT_SHALLOW)) {
+ object->flags |= CLIENT_SHALLOW;
+ add_object_array(object, NULL, shallows);
+ }
+ return 1;
+ }
+
+ return 0;
+}
+
+static int process_deepen(const char *line, int *depth)
+{
+ const char *arg;
+ if (skip_prefix(line, "deepen ", &arg)) {
+ char *end = NULL;
+ *depth = (int)strtol(arg, &end, 0);
+ if (!end || *end || *depth <= 0)
+ die("Invalid deepen: %s", line);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int process_deepen_since(const char *line, timestamp_t *deepen_since, int *deepen_rev_list)
+{
+ const char *arg;
+ if (skip_prefix(line, "deepen-since ", &arg)) {
+ char *end = NULL;
+ *deepen_since = parse_timestamp(arg, &end, 0);
+ if (!end || *end || !deepen_since ||
+ /* revisions.c's max_age -1 is special */
+ *deepen_since == -1)
+ die("Invalid deepen-since: %s", line);
+ *deepen_rev_list = 1;
+ return 1;
+ }
+ return 0;
+}
+
+static int process_deepen_not(const char *line, struct string_list *deepen_not, int *deepen_rev_list)
+{
+ const char *arg;
+ if (skip_prefix(line, "deepen-not ", &arg)) {
+ char *ref = NULL;
+ struct object_id oid;
+ if (expand_ref(arg, strlen(arg), &oid, &ref) != 1)
+ die("git upload-pack: ambiguous deepen-not: %s", line);
+ string_list_append(deepen_not, ref);
+ free(ref);
+ *deepen_rev_list = 1;
+ return 1;
+ }
+ return 0;
}
static void receive_needs(void)
if (!line)
break;
- if (skip_prefix(line, "shallow ", &arg)) {
- struct object_id oid;
- struct object *object;
- if (get_oid_hex(arg, &oid))
- die("invalid shallow line: %s", line);
- object = parse_object(&oid);
- if (!object)
- continue;
- if (object->type != OBJ_COMMIT)
- die("invalid shallow object %s", oid_to_hex(&oid));
- if (!(object->flags & CLIENT_SHALLOW)) {
- object->flags |= CLIENT_SHALLOW;
- add_object_array(object, NULL, &shallows);
- }
+ if (process_shallow(line, &shallows))
continue;
- }
- if (skip_prefix(line, "deepen ", &arg)) {
- char *end = NULL;
- depth = strtol(arg, &end, 0);
- if (!end || *end || depth <= 0)
- die("Invalid deepen: %s", line);
+ if (process_deepen(line, &depth))
continue;
- }
- if (skip_prefix(line, "deepen-since ", &arg)) {
- char *end = NULL;
- deepen_since = parse_timestamp(arg, &end, 0);
- if (!end || *end || !deepen_since ||
- /* revisions.c's max_age -1 is special */
- deepen_since == -1)
- die("Invalid deepen-since: %s", line);
- deepen_rev_list = 1;
+ if (process_deepen_since(line, &deepen_since, &deepen_rev_list))
continue;
- }
- if (skip_prefix(line, "deepen-not ", &arg)) {
- char *ref = NULL;
- struct object_id oid;
- if (expand_ref(arg, strlen(arg), &oid, &ref) != 1)
- die("git upload-pack: ambiguous deepen-not: %s", line);
- string_list_append(&deepen_not, ref);
- free(ref);
- deepen_rev_list = 1;
+ if (process_deepen_not(line, &deepen_not, &deepen_rev_list))
continue;
- }
+
if (skip_prefix(line, "filter ", &arg)) {
if (!filter_capability_requested)
die("git upload-pack: filtering capability not negotiated");
parse_list_objects_filter(&filter_options, arg);
continue;
}
+
if (!skip_prefix(line, "want ", &arg) ||
- get_oid_hex(arg, &oid_buf))
+ parse_oid_hex(arg, &oid_buf, &features))
die("git upload-pack: protocol error, "
- "expected to get sha, not '%s'", line);
-
- features = arg + 40;
+ "expected to get object ID, not '%s'", line);
if (parse_feature_request(features, "deepen-relative"))
deepen_relative = 1;
if (depth == 0 && !deepen_rev_list && shallows.nr == 0)
return;
- if (depth > 0 && deepen_rev_list)
- die("git upload-pack: deepen and deepen-since (or deepen-not) cannot be used together");
- if (depth > 0)
- deepen(depth, deepen_relative, &shallows);
- else if (deepen_rev_list) {
- struct argv_array av = ARGV_ARRAY_INIT;
- int i;
- argv_array_push(&av, "rev-list");
- if (deepen_since)
- argv_array_pushf(&av, "--max-age=%"PRItime, deepen_since);
- if (deepen_not.nr) {
- argv_array_push(&av, "--not");
- for (i = 0; i < deepen_not.nr; i++) {
- struct string_list_item *s = deepen_not.items + i;
- argv_array_push(&av, s->string);
- }
- argv_array_push(&av, "--not");
- }
- for (i = 0; i < want_obj.nr; i++) {
- struct object *o = want_obj.objects[i].item;
- argv_array_push(&av, oid_to_hex(&o->oid));
- }
- deepen_by_rev_list(av.argc, av.argv, &shallows);
- argv_array_clear(&av);
- }
- else
- if (shallows.nr > 0) {
- int i;
- for (i = 0; i < shallows.nr; i++)
- register_shallow(&shallows.objects[i].item->oid);
- }
-
- shallow_nr += shallows.nr;
+ if (send_shallow_list(depth, deepen_rev_list, deepen_since,
+ &deepen_not, &shallows))
+ packet_flush(1);
object_array_clear(&shallows);
}
return 0;
}
-static void upload_pack(void)
-{
- struct string_list symref = STRING_LIST_INIT_DUP;
-
- head_ref_namespaced(find_symref, &symref);
-
- if (advertise_refs || !stateless_rpc) {
- reset_timeout();
- head_ref_namespaced(send_ref, &symref);
- for_each_namespaced_ref(send_ref, &symref);
- advertise_shallow_grafts(1);
- packet_flush(1);
- } else {
- head_ref_namespaced(check_ref, NULL);
- for_each_namespaced_ref(check_ref, NULL);
- }
- string_list_clear(&symref, 1);
- if (advertise_refs)
- return;
-
- receive_needs();
- if (want_obj.nr) {
- get_common_commits();
- create_pack_file();
- }
-}
-
static int upload_pack_config(const char *var, const char *value, void *unused)
{
if (!strcmp("uploadpack.allowtipsha1inwant", var)) {
return parse_hide_refs_config(var, value, "uploadpack");
}
-int cmd_main(int argc, const char **argv)
+void upload_pack(struct upload_pack_options *options)
{
- const char *dir;
- int strict = 0;
- struct option options[] = {
- OPT_BOOL(0, "stateless-rpc", &stateless_rpc,
- N_("quit after a single request/response exchange")),
- OPT_BOOL(0, "advertise-refs", &advertise_refs,
- N_("exit immediately after initial ref advertisement")),
- OPT_BOOL(0, "strict", &strict,
- N_("do not try <directory>/.git/ if <directory> is no Git directory")),
- OPT_INTEGER(0, "timeout", &timeout,
- N_("interrupt transfer after <n> seconds of inactivity")),
- OPT_END()
- };
+ struct string_list symref = STRING_LIST_INIT_DUP;
- packet_trace_identity("upload-pack");
- check_replace_refs = 0;
+ stateless_rpc = options->stateless_rpc;
+ timeout = options->timeout;
+ daemon_mode = options->daemon_mode;
- argc = parse_options(argc, argv, NULL, options, upload_pack_usage, 0);
+ git_config(upload_pack_config, NULL);
- if (argc != 1)
- usage_with_options(upload_pack_usage, options);
+ head_ref_namespaced(find_symref, &symref);
- if (timeout)
- daemon_mode = 1;
+ if (options->advertise_refs || !stateless_rpc) {
+ reset_timeout();
+ head_ref_namespaced(send_ref, &symref);
+ for_each_namespaced_ref(send_ref, &symref);
+ advertise_shallow_grafts(1);
+ packet_flush(1);
+ } else {
+ head_ref_namespaced(check_ref, NULL);
+ for_each_namespaced_ref(check_ref, NULL);
+ }
+ string_list_clear(&symref, 1);
+ if (options->advertise_refs)
+ return;
- setup_path();
+ receive_needs();
+ if (want_obj.nr) {
+ get_common_commits();
+ create_pack_file();
+ }
+}
- dir = argv[0];
+struct upload_pack_data {
+ struct object_array wants;
+ struct oid_array haves;
- if (!enter_repo(dir, strict))
- die("'%s' does not appear to be a git repository", dir);
+ struct object_array shallows;
+ struct string_list deepen_not;
+ int depth;
+ timestamp_t deepen_since;
+ int deepen_rev_list;
+ int deepen_relative;
- git_config(upload_pack_config, NULL);
+ unsigned stateless_rpc : 1;
- switch (determine_protocol_version_server()) {
- case protocol_v1:
- /*
- * v1 is just the original protocol with a version string,
- * so just fall through after writing the version string.
- */
- if (advertise_refs || !stateless_rpc)
- packet_write_fmt(1, "version 1\n");
-
- /* fallthrough */
- case protocol_v0:
- upload_pack();
- break;
- case protocol_unknown_version:
- BUG("unknown protocol version");
+ unsigned use_thin_pack : 1;
+ unsigned use_ofs_delta : 1;
+ unsigned no_progress : 1;
+ unsigned use_include_tag : 1;
+ unsigned done : 1;
+};
+
+static void upload_pack_data_init(struct upload_pack_data *data)
+{
+ struct object_array wants = OBJECT_ARRAY_INIT;
+ struct oid_array haves = OID_ARRAY_INIT;
+ struct object_array shallows = OBJECT_ARRAY_INIT;
+ struct string_list deepen_not = STRING_LIST_INIT_DUP;
+
+ memset(data, 0, sizeof(*data));
+ data->wants = wants;
+ data->haves = haves;
+ data->shallows = shallows;
+ data->deepen_not = deepen_not;
+}
+
+static void upload_pack_data_clear(struct upload_pack_data *data)
+{
+ object_array_clear(&data->wants);
+ oid_array_clear(&data->haves);
+ object_array_clear(&data->shallows);
+ string_list_clear(&data->deepen_not, 0);
+}
+
+static int parse_want(const char *line)
+{
+ const char *arg;
+ if (skip_prefix(line, "want ", &arg)) {
+ struct object_id oid;
+ struct object *o;
+
+ if (get_oid_hex(arg, &oid))
+ die("git upload-pack: protocol error, "
+ "expected to get oid, not '%s'", line);
+
+ o = parse_object(&oid);
+ if (!o) {
+ packet_write_fmt(1,
+ "ERR upload-pack: not our ref %s",
+ oid_to_hex(&oid));
+ die("git upload-pack: not our ref %s",
+ oid_to_hex(&oid));
+ }
+
+ if (!(o->flags & WANTED)) {
+ o->flags |= WANTED;
+ add_object_array(o, NULL, &want_obj);
+ }
+
+ return 1;
+ }
+
+ return 0;
+}
+
+static int parse_have(const char *line, struct oid_array *haves)
+{
+ const char *arg;
+ if (skip_prefix(line, "have ", &arg)) {
+ struct object_id oid;
+
+ if (get_oid_hex(arg, &oid))
+ die("git upload-pack: expected SHA1 object, got '%s'", arg);
+ oid_array_append(haves, &oid);
+ return 1;
}
return 0;
}
+
+static void process_args(struct packet_reader *request,
+ struct upload_pack_data *data)
+{
+ while (packet_reader_read(request) != PACKET_READ_FLUSH) {
+ const char *arg = request->line;
+
+ /* process want */
+ if (parse_want(arg))
+ continue;
+ /* process have line */
+ if (parse_have(arg, &data->haves))
+ continue;
+
+ /* process args like thin-pack */
+ if (!strcmp(arg, "thin-pack")) {
+ use_thin_pack = 1;
+ continue;
+ }
+ if (!strcmp(arg, "ofs-delta")) {
+ use_ofs_delta = 1;
+ continue;
+ }
+ if (!strcmp(arg, "no-progress")) {
+ no_progress = 1;
+ continue;
+ }
+ if (!strcmp(arg, "include-tag")) {
+ use_include_tag = 1;
+ continue;
+ }
+ if (!strcmp(arg, "done")) {
+ data->done = 1;
+ continue;
+ }
+
+ /* Shallow related arguments */
+ if (process_shallow(arg, &data->shallows))
+ continue;
+ if (process_deepen(arg, &data->depth))
+ continue;
+ if (process_deepen_since(arg, &data->deepen_since,
+ &data->deepen_rev_list))
+ continue;
+ if (process_deepen_not(arg, &data->deepen_not,
+ &data->deepen_rev_list))
+ continue;
+ if (!strcmp(arg, "deepen-relative")) {
+ data->deepen_relative = 1;
+ continue;
+ }
+
+ /* ignore unknown lines maybe? */
+ die("unexpect line: '%s'", arg);
+ }
+}
+
+static int process_haves(struct oid_array *haves, struct oid_array *common)
+{
+ int i;
+
+ /* Process haves */
+ for (i = 0; i < haves->nr; i++) {
+ const struct object_id *oid = &haves->oid[i];
+ struct object *o;
+ int we_knew_they_have = 0;
+
+ if (!has_object_file(oid))
+ continue;
+
+ oid_array_append(common, oid);
+
+ o = parse_object(oid);
+ if (!o)
+ die("oops (%s)", oid_to_hex(oid));
+ if (o->type == OBJ_COMMIT) {
+ struct commit_list *parents;
+ struct commit *commit = (struct commit *)o;
+ if (o->flags & THEY_HAVE)
+ we_knew_they_have = 1;
+ else
+ o->flags |= THEY_HAVE;
+ if (!oldest_have || (commit->date < oldest_have))
+ oldest_have = commit->date;
+ for (parents = commit->parents;
+ parents;
+ parents = parents->next)
+ parents->item->object.flags |= THEY_HAVE;
+ }
+ if (!we_knew_they_have)
+ add_object_array(o, NULL, &have_obj);
+ }
+
+ return 0;
+}
+
+static int send_acks(struct oid_array *acks, struct strbuf *response)
+{
+ int i;
+
+ packet_buf_write(response, "acknowledgments\n");
+
+ /* Send Acks */
+ if (!acks->nr)
+ packet_buf_write(response, "NAK\n");
+
+ for (i = 0; i < acks->nr; i++) {
+ packet_buf_write(response, "ACK %s\n",
+ oid_to_hex(&acks->oid[i]));
+ }
+
+ if (ok_to_give_up()) {
+ /* Send Ready */
+ packet_buf_write(response, "ready\n");
+ return 1;
+ }
+
+ return 0;
+}
+
+static int process_haves_and_send_acks(struct upload_pack_data *data)
+{
+ struct oid_array common = OID_ARRAY_INIT;
+ struct strbuf response = STRBUF_INIT;
+ int ret = 0;
+
+ process_haves(&data->haves, &common);
+ if (data->done) {
+ ret = 1;
+ } else if (send_acks(&common, &response)) {
+ packet_buf_delim(&response);
+ ret = 1;
+ } else {
+ /* Add Flush */
+ packet_buf_flush(&response);
+ ret = 0;
+ }
+
+ /* Send response */
+ write_or_die(1, response.buf, response.len);
+ strbuf_release(&response);
+
+ oid_array_clear(&data->haves);
+ oid_array_clear(&common);
+ return ret;
+}
+
+static void send_shallow_info(struct upload_pack_data *data)
+{
+ /* No shallow info needs to be sent */
+ if (!data->depth && !data->deepen_rev_list && !data->shallows.nr &&
+ !is_repository_shallow())
+ return;
+
+ packet_write_fmt(1, "shallow-info\n");
+
+ if (!send_shallow_list(data->depth, data->deepen_rev_list,
+ data->deepen_since, &data->deepen_not,
+ &data->shallows) && is_repository_shallow())
+ deepen(INFINITE_DEPTH, data->deepen_relative, &data->shallows);
+
+ packet_delim(1);
+}
+
+enum fetch_state {
+ FETCH_PROCESS_ARGS = 0,
+ FETCH_SEND_ACKS,
+ FETCH_SEND_PACK,
+ FETCH_DONE,
+};
+
+int upload_pack_v2(struct repository *r, struct argv_array *keys,
+ struct packet_reader *request)
+{
+ enum fetch_state state = FETCH_PROCESS_ARGS;
+ struct upload_pack_data data;
+
+ upload_pack_data_init(&data);
+ use_sideband = LARGE_PACKET_MAX;
+
+ while (state != FETCH_DONE) {
+ switch (state) {
+ case FETCH_PROCESS_ARGS:
+ process_args(request, &data);
+
+ if (!want_obj.nr) {
+ /*
+ * Request didn't contain any 'want' lines,
+ * guess they didn't want anything.
+ */
+ state = FETCH_DONE;
+ } else if (data.haves.nr) {
+ /*
+ * Request had 'have' lines, so lets ACK them.
+ */
+ state = FETCH_SEND_ACKS;
+ } else {
+ /*
+ * Request had 'want's but no 'have's so we can
+ * immedietly go to construct and send a pack.
+ */
+ state = FETCH_SEND_PACK;
+ }
+ break;
+ case FETCH_SEND_ACKS:
+ if (process_haves_and_send_acks(&data))
+ state = FETCH_SEND_PACK;
+ else
+ state = FETCH_DONE;
+ break;
+ case FETCH_SEND_PACK:
+ send_shallow_info(&data);
+
+ packet_write_fmt(1, "packfile\n");
+ create_pack_file();
+ state = FETCH_DONE;
+ break;
+ case FETCH_DONE:
+ continue;
+ }
+ }
+
+ upload_pack_data_clear(&data);
+ return 0;
+}
+
+int upload_pack_advertise(struct repository *r,
+ struct strbuf *value)
+{
+ if (value)
+ strbuf_addstr(value, "shallow");
+ return 1;
+}
s->show_stash = 0;
s->ahead_behind_flags = AHEAD_BEHIND_UNSPECIFIED;
s->display_comment_prefix = 0;
+ s->detect_rename = -1;
+ s->rename_score = -1;
+ s->rename_limit = -1;
}
static void wt_longstatus_print_unmerged_header(struct wt_status *s)
case 7:
return _("both modified:");
default:
- die("BUG: unhandled unmerged status %x", stagemask);
+ BUG("unhandled unmerged status %x", stagemask);
}
}
status = d->worktree_status;
break;
default:
- die("BUG: unhandled change_type %d in wt_longstatus_print_change_data",
+ BUG("unhandled change_type %d in wt_longstatus_print_change_data",
change_type);
}
status_printf(s, color(WT_STATUS_HEADER, s), "\t");
what = wt_status_diff_status_string(status);
if (!what)
- die("BUG: unhandled diff status %c", status);
+ BUG("unhandled diff status %c", status);
len = label_width - utf8_strwidth(what);
assert(len >= 0);
if (one_name != two_name)
case DIFF_STATUS_COPIED:
case DIFF_STATUS_RENAMED:
if (d->rename_status)
- die("BUG: multiple renames on the same target? how?");
+ BUG("multiple renames on the same target? how?");
d->rename_source = xstrdup(p->one->path);
d->rename_score = p->score * 100 / MAX_SCORE;
d->rename_status = p->status;
break;
default:
- die("BUG: unhandled diff-files status '%c'", p->status);
+ BUG("unhandled diff-files status '%c'", p->status);
break;
}
case DIFF_STATUS_COPIED:
case DIFF_STATUS_RENAMED:
if (d->rename_status)
- die("BUG: multiple renames on the same target? how?");
+ BUG("multiple renames on the same target? how?");
d->rename_source = xstrdup(p->one->path);
d->rename_score = p->score * 100 / MAX_SCORE;
d->rename_status = p->status;
break;
default:
- die("BUG: unhandled diff-index status '%c'", p->status);
+ BUG("unhandled diff-index status '%c'", p->status);
break;
}
}
}
rev.diffopt.format_callback = wt_status_collect_changed_cb;
rev.diffopt.format_callback_data = s;
+ rev.diffopt.detect_rename = s->detect_rename >= 0 ? s->detect_rename : rev.diffopt.detect_rename;
+ rev.diffopt.rename_limit = s->rename_limit >= 0 ? s->rename_limit : rev.diffopt.rename_limit;
+ rev.diffopt.rename_score = s->rename_score >= 0 ? s->rename_score : rev.diffopt.rename_score;
copy_pathspec(&rev.prune_data, &s->pathspec);
run_diff_files(&rev, 0);
}
init_revisions(&rev, NULL);
memset(&opt, 0, sizeof(opt));
- opt.def = s->is_initial ? EMPTY_TREE_SHA1_HEX : s->reference;
+ opt.def = s->is_initial ? empty_tree_oid_hex() : s->reference;
setup_revisions(0, NULL, &rev, &opt);
rev.diffopt.flags.override_submodule_config = 1;
rev.diffopt.output_format |= DIFF_FORMAT_CALLBACK;
rev.diffopt.format_callback = wt_status_collect_updated_cb;
rev.diffopt.format_callback_data = s;
- rev.diffopt.detect_rename = DIFF_DETECT_RENAME;
- rev.diffopt.rename_limit = 200;
- rev.diffopt.break_opt = 0;
+ rev.diffopt.detect_rename = s->detect_rename >= 0 ? s->detect_rename : rev.diffopt.detect_rename;
+ rev.diffopt.rename_limit = s->rename_limit >= 0 ? s->rename_limit : rev.diffopt.rename_limit;
+ rev.diffopt.rename_score = s->rename_score >= 0 ? s->rename_score : rev.diffopt.rename_score;
copy_pathspec(&rev.prune_data, &s->pathspec);
run_diff_index(&rev, 1);
}
rev.diffopt.ita_invisible_in_index = 1;
memset(&opt, 0, sizeof(opt));
- opt.def = s->is_initial ? EMPTY_TREE_SHA1_HEX : s->reference;
+ opt.def = s->is_initial ? empty_tree_oid_hex() : s->reference;
setup_revisions(0, NULL, &rev, &opt);
rev.diffopt.output_format |= DIFF_FORMAT_PATCH;
- rev.diffopt.detect_rename = DIFF_DETECT_RENAME;
+ rev.diffopt.detect_rename = s->detect_rename >= 0 ? s->detect_rename : rev.diffopt.detect_rename;
+ rev.diffopt.rename_limit = s->rename_limit >= 0 ? s->rename_limit : rev.diffopt.rename_limit;
+ rev.diffopt.rename_score = s->rename_score >= 0 ? s->rename_score : rev.diffopt.rename_score;
rev.diffopt.file = s->fp;
rev.diffopt.close_file = 0;
/*
case 6: key = "AA"; break; /* both added */
case 7: key = "UU"; break; /* both modified */
default:
- die("BUG: unhandled unmerged status %x", d->stagemask);
+ BUG("unhandled unmerged status %x", d->stagemask);
}
/*
sum |= (1 << (stage - 1));
}
if (sum != d->stagemask)
- die("BUG: observed stagemask 0x%x != expected stagemask 0x%x", sum, d->stagemask);
+ BUG("observed stagemask 0x%x != expected stagemask 0x%x", sum, d->stagemask);
if (s->null_termination)
path_index = it->string;
wt_porcelain_v2_print(s);
break;
case STATUS_FORMAT_UNSPECIFIED:
- die("BUG: finalize_deferred_config() should have been called");
+ BUG("finalize_deferred_config() should have been called");
break;
case STATUS_FORMAT_NONE:
case STATUS_FORMAT_LONG: