Refactoring of the internal global data structure continues.
* sb/packfiles-in-repository:
packfile: keep prepare_packed_git() private
packfile: allow find_pack_entry to handle arbitrary repositories
packfile: add repository argument to find_pack_entry
packfile: allow reprepare_packed_git to handle arbitrary repositories
packfile: allow prepare_packed_git to handle arbitrary repositories
packfile: allow prepare_packed_git_one to handle arbitrary repositories
packfile: add repository argument to reprepare_packed_git
packfile: add repository argument to prepare_packed_git
packfile: add repository argument to prepare_packed_git_one
packfile: allow install_packed_git to handle arbitrary repositories
packfile: allow rearrange_packed_git to handle arbitrary repositories
packfile: allow prepare_packed_git_mru to handle arbitrary repositories
const char *ret;
if (obj->type == OBJ_NONE) {
- enum object_type type = sha1_object_info(obj->oid.hash, NULL);
+ enum object_type type = oid_object_info(&obj->oid, NULL);
if (type > 0)
object_as_type(obj, type, 0);
}
- ret = typename(obj->type);
+ ret = type_name(obj->type);
if (!ret)
ret = "unknown";
printf("broken link from %7s %s\n",
printable_type(parent), describe_object(parent));
printf("broken link from %7s %s\n",
- (type == OBJ_ANY ? "unknown" : typename(type)), "unknown");
+ (type == OBJ_ANY ? "unknown" : type_name(type)), "unknown");
errors_found |= ERROR_REACHABLE;
return 1;
}
static int traverse_one_object(struct object *obj)
{
- return fsck_walk(obj, obj, &fsck_walk_options);
+ int result = fsck_walk(obj, obj, &fsck_walk_options);
+
+ if (obj->type == OBJ_TREE) {
+ struct tree *tree = (struct tree *)obj;
+ free_tree_buffer(tree);
+ }
+ return result;
}
static int traverse_reachable(void)
unsigned long size;
int eaten;
- if (read_loose_object(path, oid->hash, &type, &size, &contents) < 0)
+ if (read_loose_object(path, oid, &type, &size, &contents) < 0)
return NULL;
if (!contents && type != OBJ_BLOB)
uint32_t total = 0, count = 0;
struct progress *progress = NULL;
- prepare_packed_git();
-
if (show_progress) {
for (p = get_packed_git(the_repository); p;
p = p->next) {
if (gc_auto_pack_limit <= 0)
return 0;
- prepare_packed_git();
for (cnt = 0, p = get_packed_git(the_repository); p; p = p->next) {
if (!p->pack_local)
continue;
N_("prune unreferenced objects"),
PARSE_OPT_OPTARG, NULL, (intptr_t)prune_expire },
OPT_BOOL(0, "aggressive", &aggressive, N_("be more thorough (increased runtime)")),
- OPT_BOOL(0, "auto", &auto_gc, N_("enable auto-gc mode")),
- OPT_BOOL(0, "force", &force, N_("force running gc even if there may be another gc running")),
+ OPT_BOOL_F(0, "auto", &auto_gc, N_("enable auto-gc mode"),
+ PARSE_OPT_NOCOMPLETE),
+ OPT_BOOL_F(0, "force", &force,
+ N_("force running gc even if there may be another gc running"),
+ PARSE_OPT_NOCOMPLETE),
OPT_END()
};
return error(FAILED_RUN, rerere.argv[0]);
report_garbage = report_pack_garbage;
- reprepare_packed_git();
+ reprepare_packed_git(the_repository);
if (pack_garbage.nr > 0)
clean_pack_garbage();
void *buf, *base_buf, *delta_buf;
enum object_type type;
- buf = read_sha1_file(entry->idx.oid.hash, &type, &size);
+ buf = read_object_file(&entry->idx.oid, &type, &size);
if (!buf)
die("unable to read %s", oid_to_hex(&entry->idx.oid));
- base_buf = read_sha1_file(entry->delta->idx.oid.hash, &type,
- &base_size);
+ base_buf = read_object_file(&entry->delta->idx.oid, &type, &base_size);
if (!base_buf)
die("unable to read %s",
oid_to_hex(&entry->delta->idx.oid));
return stream.total_out;
}
-static unsigned long write_large_blob_data(struct git_istream *st, struct sha1file *f,
+static unsigned long write_large_blob_data(struct git_istream *st, struct hashfile *f,
const struct object_id *oid)
{
git_zstream stream;
stream.next_out = obuf;
stream.avail_out = sizeof(obuf);
zret = git_deflate(&stream, readlen ? 0 : Z_FINISH);
- sha1write(f, obuf, stream.next_out - obuf);
+ hashwrite(f, obuf, stream.next_out - obuf);
olen += stream.next_out - obuf;
}
if (stream.avail_in)
stream.total_in == len) ? 0 : -1;
}
-static void copy_pack_data(struct sha1file *f,
+static void copy_pack_data(struct hashfile *f,
struct packed_git *p,
struct pack_window **w_curs,
off_t offset,
in = use_pack(p, w_curs, offset, &avail);
if (avail > len)
avail = (unsigned long)len;
- sha1write(f, in, avail);
+ hashwrite(f, in, avail);
offset += avail;
len -= avail;
}
}
/* Return 0 if we will bust the pack-size limit */
-static unsigned long write_no_reuse_object(struct sha1file *f, struct object_entry *entry,
+static unsigned long write_no_reuse_object(struct hashfile *f, struct object_entry *entry,
unsigned long limit, int usable_delta)
{
unsigned long size, datalen;
if (!usable_delta) {
if (entry->type == OBJ_BLOB &&
entry->size > big_file_threshold &&
- (st = open_istream(entry->idx.oid.hash, &type, &size, NULL)) != NULL)
+ (st = open_istream(&entry->idx.oid, &type, &size, NULL)) != NULL)
buf = NULL;
else {
- buf = read_sha1_file(entry->idx.oid.hash, &type,
- &size);
+ buf = read_object_file(&entry->idx.oid, &type, &size);
if (!buf)
die(_("unable to read %s"),
oid_to_hex(&entry->idx.oid));
free(buf);
return 0;
}
- sha1write(f, header, hdrlen);
- sha1write(f, dheader + pos, sizeof(dheader) - pos);
+ hashwrite(f, header, hdrlen);
+ hashwrite(f, dheader + pos, sizeof(dheader) - pos);
hdrlen += sizeof(dheader) - pos;
} else if (type == OBJ_REF_DELTA) {
/*
free(buf);
return 0;
}
- sha1write(f, header, hdrlen);
- sha1write(f, entry->delta->idx.oid.hash, 20);
+ hashwrite(f, header, hdrlen);
+ hashwrite(f, entry->delta->idx.oid.hash, 20);
hdrlen += 20;
} else {
if (limit && hdrlen + datalen + 20 >= limit) {
free(buf);
return 0;
}
- sha1write(f, header, hdrlen);
+ hashwrite(f, header, hdrlen);
}
if (st) {
datalen = write_large_blob_data(st, f, &entry->idx.oid);
close_istream(st);
} else {
- sha1write(f, buf, datalen);
+ hashwrite(f, buf, datalen);
free(buf);
}
}
/* Return 0 if we will bust the pack-size limit */
-static off_t write_reuse_object(struct sha1file *f, struct object_entry *entry,
+static off_t write_reuse_object(struct hashfile *f, struct object_entry *entry,
unsigned long limit, int usable_delta)
{
struct packed_git *p = entry->in_pack;
unuse_pack(&w_curs);
return 0;
}
- sha1write(f, header, hdrlen);
- sha1write(f, dheader + pos, sizeof(dheader) - pos);
+ hashwrite(f, header, hdrlen);
+ hashwrite(f, dheader + pos, sizeof(dheader) - pos);
hdrlen += sizeof(dheader) - pos;
reused_delta++;
} else if (type == OBJ_REF_DELTA) {
unuse_pack(&w_curs);
return 0;
}
- sha1write(f, header, hdrlen);
- sha1write(f, entry->delta->idx.oid.hash, 20);
+ hashwrite(f, header, hdrlen);
+ hashwrite(f, entry->delta->idx.oid.hash, 20);
hdrlen += 20;
reused_delta++;
} else {
unuse_pack(&w_curs);
return 0;
}
- sha1write(f, header, hdrlen);
+ hashwrite(f, header, hdrlen);
}
copy_pack_data(f, p, &w_curs, offset, datalen);
unuse_pack(&w_curs);
}
/* Return 0 if we will bust the pack-size limit */
-static off_t write_object(struct sha1file *f,
+static off_t write_object(struct hashfile *f,
struct object_entry *entry,
off_t write_offset)
{
WRITE_ONE_RECURSIVE = 2 /* already scheduled to be written */
};
-static enum write_one_status write_one(struct sha1file *f,
+static enum write_one_status write_one(struct hashfile *f,
struct object_entry *e,
off_t *offset)
{
return wo;
}
-static off_t write_reused_pack(struct sha1file *f)
+static off_t write_reused_pack(struct hashfile *f)
{
unsigned char buffer[8192];
off_t to_write, total;
if (read_pack > to_write)
read_pack = to_write;
- sha1write(f, buffer, read_pack);
+ hashwrite(f, buffer, read_pack);
to_write -= read_pack;
/*
static void write_pack_file(void)
{
uint32_t i = 0, j;
- struct sha1file *f;
+ struct hashfile *f;
off_t offset;
uint32_t nr_remaining = nr_result;
time_t last_mtime = 0;
char *pack_tmp_name = NULL;
if (pack_to_stdout)
- f = sha1fd_throughput(1, "<stdout>", progress_state);
+ f = hashfd_throughput(1, "<stdout>", progress_state);
else
f = create_tmp_packfile(&pack_tmp_name);
* If so, rewrite it like in fast-import
*/
if (pack_to_stdout) {
- sha1close(f, oid.hash, CSUM_CLOSE);
+ hashclose(f, oid.hash, CSUM_CLOSE);
} else if (nr_written == nr_remaining) {
- sha1close(f, oid.hash, CSUM_FSYNC);
+ hashclose(f, oid.hash, CSUM_FSYNC);
} else {
- int fd = sha1close(f, oid.hash, 0);
+ int fd = hashclose(f, oid.hash, 0);
fixup_pack_header_footer(fd, oid.hash, pack_tmp_name,
nr_written, oid.hash, offset);
close(fd);
/* Did not find one. Either we got a bogus request or
* we need to read and perhaps cache.
*/
- data = read_sha1_file(oid->hash, &type, &size);
+ data = read_object_file(oid, &type, &size);
if (!data)
return NULL;
if (type != OBJ_TREE) {
if (window <= num_preferred_base++)
return;
- data = read_object_with_reference(oid->hash, tree_type, &size, tree_oid.hash);
+ data = read_object_with_reference(oid, tree_type, &size, &tree_oid);
if (!data)
return;
it = pbase_tree;
pbase_tree = NULL;
while (it) {
- struct pbase_tree *this = it;
- it = this->next;
- free(this->pcache.tree_data);
- free(this);
+ struct pbase_tree *tmp = it;
+ it = tmp->next;
+ free(tmp->pcache.tree_data);
+ free(tmp);
}
for (i = 0; i < ARRAY_SIZE(pbase_tree_cache); i++) {
unuse_pack(&w_curs);
}
- entry->type = sha1_object_info(entry->idx.oid.hash, &entry->size);
+ entry->type = oid_object_info(&entry->idx.oid, &entry->size);
/*
* The error condition is checked in prepare_pack(). This is
* to permit a missing preferred base object to be ignored
* And if that fails, the error will be recorded in entry->type
* and dealt with in prepare_pack().
*/
- entry->type = sha1_object_info(entry->idx.oid.hash,
- &entry->size);
+ entry->type = oid_object_info(&entry->idx.oid, &entry->size);
}
}
/* Load data if not already done */
if (!trg->data) {
read_lock();
- trg->data = read_sha1_file(trg_entry->idx.oid.hash, &type,
- &sz);
+ trg->data = read_object_file(&trg_entry->idx.oid, &type, &sz);
read_unlock();
if (!trg->data)
die("object %s cannot be read",
}
if (!src->data) {
read_lock();
- src->data = read_sha1_file(src_entry->idx.oid.hash, &type,
- &sz);
+ src->data = read_object_file(&src_entry->idx.oid, &type, &sz);
read_unlock();
if (!src->data) {
if (src_entry->preferred_base) {
}
}
+/* Remember to update object flag allocation in object.h */
#define OBJECT_ADDED (1u<<20)
static void show_commit(struct commit *commit, void *data)
static int add_loose_object(const struct object_id *oid, const char *path,
void *data)
{
- enum object_type type = sha1_object_info(oid->hash, NULL);
+ enum object_type type = oid_object_info(oid, NULL);
if (type < 0) {
warning("loose object at %s could not be examined", path);
if (!packlist_find(&to_pack, oid.hash, NULL) &&
!has_sha1_pack_kept_or_nonlocal(&oid) &&
!loosened_object_can_be_discarded(&oid, p->mtime))
- if (force_object_loose(oid.hash, p->mtime))
+ if (force_object_loose(&oid, p->mtime))
die("unable to force loose object");
}
}
if (progress && all_progress_implied)
progress = 2;
- prepare_packed_git();
if (ignore_packed_keep) {
struct packed_git *p;
for (p = get_packed_git(the_repository); p; p = p->next)
static inline struct llist_item *llist_item_get(void)
{
- struct llist_item *new;
+ struct llist_item *new_item;
if ( free_nodes ) {
- new = free_nodes;
+ new_item = free_nodes;
free_nodes = free_nodes->next;
} else {
int i = 1;
- ALLOC_ARRAY(new, BLKSIZE);
+ ALLOC_ARRAY(new_item, BLKSIZE);
for (; i < BLKSIZE; i++)
- llist_item_put(&new[i]);
+ llist_item_put(&new_item[i]);
}
- return new;
+ return new_item;
}
static void llist_free(struct llist *list)
static struct llist * llist_copy(struct llist *list)
{
struct llist *ret;
- struct llist_item *new, *old, *prev;
+ struct llist_item *new_item, *old_item, *prev;
llist_init(&ret);
if ((ret->size = list->size) == 0)
return ret;
- new = ret->front = llist_item_get();
- new->sha1 = list->front->sha1;
+ new_item = ret->front = llist_item_get();
+ new_item->sha1 = list->front->sha1;
- old = list->front->next;
- while (old) {
- prev = new;
- new = llist_item_get();
- prev->next = new;
- new->sha1 = old->sha1;
- old = old->next;
+ old_item = list->front->next;
+ while (old_item) {
+ prev = new_item;
+ new_item = llist_item_get();
+ prev->next = new_item;
+ new_item->sha1 = old_item->sha1;
+ old_item = old_item->next;
}
- new->next = NULL;
- ret->back = new;
+ new_item->next = NULL;
+ ret->back = new_item;
return ret;
}
struct llist_item *after,
const unsigned char *sha1)
{
- struct llist_item *new = llist_item_get();
- new->sha1 = sha1;
- new->next = NULL;
+ struct llist_item *new_item = llist_item_get();
+ new_item->sha1 = sha1;
+ new_item->next = NULL;
if (after != NULL) {
- new->next = after->next;
- after->next = new;
+ new_item->next = after->next;
+ after->next = new_item;
if (after == list->back)
- list->back = new;
+ list->back = new_item;
} else {/* insert in front */
if (list->size == 0)
- list->back = new;
+ list->back = new_item;
else
- new->next = list->front;
- list->front = new;
+ new_item->next = list->front;
+ list->front = new_item;
}
list->size++;
- return new;
+ return new_item;
}
static inline struct llist_item *llist_insert_back(struct llist *list,
break;
}
- prepare_packed_git();
-
if (load_all_packs)
load_all();
else
#include "builtin.h"
+ #include "repository.h"
#include "config.h"
#include "lockfile.h"
#include "pack.h"
static int shallow_update;
static const char *alt_shallow_file;
static struct strbuf push_cert = STRBUF_INIT;
-static unsigned char push_cert_sha1[20];
+static struct object_id push_cert_oid;
static struct signature_check sigcheck;
static const char *push_cert_nonce;
static const char *cert_nonce_seed;
int bogs /* beginning_of_gpg_sig */;
already_done = 1;
- if (write_sha1_file(push_cert.buf, push_cert.len, "blob", push_cert_sha1))
- hashclr(push_cert_sha1);
+ if (write_object_file(push_cert.buf, push_cert.len, "blob",
+ &push_cert_oid))
+ oidclr(&push_cert_oid);
memset(&sigcheck, '\0', sizeof(sigcheck));
sigcheck.result = 'N';
strbuf_release(&gpg_status);
nonce_status = check_nonce(push_cert.buf, bogs);
}
- if (!is_null_sha1(push_cert_sha1)) {
+ if (!is_null_oid(&push_cert_oid)) {
argv_array_pushf(&proc->env_array, "GIT_PUSH_CERT=%s",
- sha1_to_hex(push_cert_sha1));
+ oid_to_hex(&push_cert_oid));
argv_array_pushf(&proc->env_array, "GIT_PUSH_CERT_SIGNER=%s",
sigcheck.signer ? sigcheck.signer : "");
argv_array_pushf(&proc->env_array, "GIT_PUSH_CERT_KEY=%s",
rp_error("refusing inconsistent update between symref '%s' (%s..%s) and"
" its target '%s' (%s..%s)",
cmd->ref_name,
- find_unique_abbrev(cmd->old_oid.hash, DEFAULT_ABBREV),
- find_unique_abbrev(cmd->new_oid.hash, DEFAULT_ABBREV),
+ find_unique_abbrev(&cmd->old_oid, DEFAULT_ABBREV),
+ find_unique_abbrev(&cmd->new_oid, DEFAULT_ABBREV),
dst_cmd->ref_name,
- find_unique_abbrev(dst_cmd->old_oid.hash, DEFAULT_ABBREV),
- find_unique_abbrev(dst_cmd->new_oid.hash, DEFAULT_ABBREV));
+ find_unique_abbrev(&dst_cmd->old_oid, DEFAULT_ABBREV),
+ find_unique_abbrev(&dst_cmd->new_oid, DEFAULT_ABBREV));
cmd->error_string = dst_cmd->error_string =
"inconsistent aliased update";
status = finish_command(&child);
if (status)
return "index-pack abnormal exit";
- reprepare_packed_git();
+ reprepare_packed_git(the_repository);
}
return NULL;
}
unpack_limit = receive_unpack_limit;
switch (determine_protocol_version_server()) {
+ case protocol_v2:
+ /*
+ * push support for protocol v2 has not been implemented yet,
+ * so ignore the request to use v2 and fallback to using v0.
+ */
+ break;
case protocol_v1:
/*
* v1 is just the original protocol with a version string,
*/
#include "cache.h"
#include "bulk-checkin.h"
+ #include "repository.h"
#include "csum-file.h"
#include "pack.h"
#include "strbuf.h"
unsigned plugged:1;
char *pack_tmp_name;
- struct sha1file *f;
+ struct hashfile *f;
off_t offset;
struct pack_idx_option pack_idx_opts;
unlink(state->pack_tmp_name);
goto clear_exit;
} else if (state->nr_written == 1) {
- sha1close(state->f, oid.hash, CSUM_FSYNC);
+ hashclose(state->f, oid.hash, CSUM_FSYNC);
} else {
- int fd = sha1close(state->f, oid.hash, 0);
+ int fd = hashclose(state->f, oid.hash, 0);
fixup_pack_header_footer(fd, oid.hash, state->pack_tmp_name,
state->nr_written, oid.hash,
state->offset);
strbuf_release(&packname);
/* Make objects we just wrote available to ourselves */
- reprepare_packed_git();
+ reprepare_packed_git(the_repository);
}
-static int already_written(struct bulk_checkin_state *state, unsigned char sha1[])
+static int already_written(struct bulk_checkin_state *state, struct object_id *oid)
{
int i;
/* The object may already exist in the repository */
- if (has_sha1_file(sha1))
+ if (has_sha1_file(oid->hash))
return 1;
/* Might want to keep the list sorted */
for (i = 0; i < state->nr_written; i++)
- if (!hashcmp(state->written[i]->oid.hash, sha1))
+ if (!oidcmp(&state->written[i]->oid, oid))
return 1;
/* This is a new object we need to keep */
* with a new pack.
*/
static int stream_to_pack(struct bulk_checkin_state *state,
- git_SHA_CTX *ctx, off_t *already_hashed_to,
+ git_hash_ctx *ctx, off_t *already_hashed_to,
int fd, size_t size, enum object_type type,
const char *path, unsigned flags)
{
if (rsize < hsize)
hsize = rsize;
if (hsize)
- git_SHA1_Update(ctx, ibuf, hsize);
+ the_hash_algo->update_fn(ctx, ibuf, hsize);
*already_hashed_to = offset;
}
s.next_in = ibuf;
return -1;
}
- sha1write(state->f, obuf, written);
+ hashwrite(state->f, obuf, written);
state->offset += written;
}
s.next_out = obuf;
}
static int deflate_to_pack(struct bulk_checkin_state *state,
- unsigned char result_sha1[],
+ struct object_id *result_oid,
int fd, size_t size,
enum object_type type, const char *path,
unsigned flags)
{
off_t seekback, already_hashed_to;
- git_SHA_CTX ctx;
+ git_hash_ctx ctx;
unsigned char obuf[16384];
unsigned header_len;
- struct sha1file_checkpoint checkpoint;
+ struct hashfile_checkpoint checkpoint;
struct pack_idx_entry *idx = NULL;
seekback = lseek(fd, 0, SEEK_CUR);
return error("cannot find the current offset");
header_len = xsnprintf((char *)obuf, sizeof(obuf), "%s %" PRIuMAX,
- typename(type), (uintmax_t)size) + 1;
- git_SHA1_Init(&ctx);
- git_SHA1_Update(&ctx, obuf, header_len);
+ type_name(type), (uintmax_t)size) + 1;
+ the_hash_algo->init_fn(&ctx);
+ the_hash_algo->update_fn(&ctx, obuf, header_len);
/* Note: idx is non-NULL when we are writing */
if ((flags & HASH_WRITE_OBJECT) != 0)
while (1) {
prepare_to_stream(state, flags);
if (idx) {
- sha1file_checkpoint(state->f, &checkpoint);
+ hashfile_checkpoint(state->f, &checkpoint);
idx->offset = state->offset;
crc32_begin(state->f);
}
*/
if (!idx)
die("BUG: should not happen");
- sha1file_truncate(state->f, &checkpoint);
+ hashfile_truncate(state->f, &checkpoint);
state->offset = checkpoint.offset;
finish_bulk_checkin(state);
if (lseek(fd, seekback, SEEK_SET) == (off_t) -1)
return error("cannot seek back");
}
- git_SHA1_Final(result_sha1, &ctx);
+ the_hash_algo->final_fn(result_oid->hash, &ctx);
if (!idx)
return 0;
idx->crc32 = crc32_end(state->f);
- if (already_written(state, result_sha1)) {
- sha1file_truncate(state->f, &checkpoint);
+ if (already_written(state, result_oid)) {
+ hashfile_truncate(state->f, &checkpoint);
state->offset = checkpoint.offset;
free(idx);
} else {
- hashcpy(idx->oid.hash, result_sha1);
+ oidcpy(&idx->oid, result_oid);
ALLOC_GROW(state->written,
state->nr_written + 1,
state->alloc_written);
return 0;
}
-int index_bulk_checkin(unsigned char *sha1,
+int index_bulk_checkin(struct object_id *oid,
int fd, size_t size, enum object_type type,
const char *path, unsigned flags)
{
- int status = deflate_to_pack(&state, sha1, fd, size, type,
+ int status = deflate_to_pack(&state, oid, fd, size, type,
path, flags);
if (!state.plugged)
finish_bulk_checkin(&state);
#include "run-command.h"
#include "packfile.h"
#include "object-store.h"
+#include "mem-pool.h"
#define PACK_ID_BITS 16
#define MAX_PACK_ID ((1<<PACK_ID_BITS)-1)
unsigned no_swap : 1;
};
-struct mem_pool {
- struct mem_pool *next_pool;
- char *next_free;
- char *end;
- uintmax_t space[FLEX_ARRAY]; /* more */
-};
-
struct atom_str {
struct atom_str *next_atom;
unsigned short str_len;
static const char **global_argv;
/* Memory pools */
-static size_t mem_pool_alloc = 2*1024*1024 - sizeof(struct mem_pool);
-static size_t total_allocd;
-static struct mem_pool *mem_pool;
+static struct mem_pool fi_mem_pool = {0, 2*1024*1024 - sizeof(struct mp_block), 0 };
/* Atom management */
static unsigned int atom_table_sz = 4451;
/* The .pack file being generated */
static struct pack_idx_option pack_idx_opts;
static unsigned int pack_id;
-static struct sha1file *pack_file;
+static struct hashfile *pack_file;
static struct packed_git *pack_data;
static struct packed_git **all_packs;
static off_t pack_size;
/* Table of objects we've written. */
static unsigned int object_entry_alloc = 5000;
static struct object_entry_pool *blocks;
+static size_t total_allocd;
static struct object_entry *object_table[1 << 16];
static struct mark_set *marks;
static const char *export_marks_file;
return r;
}
-static void *pool_alloc(size_t len)
-{
- struct mem_pool *p;
- void *r;
-
- /* round up to a 'uintmax_t' alignment */
- if (len & (sizeof(uintmax_t) - 1))
- len += sizeof(uintmax_t) - (len & (sizeof(uintmax_t) - 1));
-
- for (p = mem_pool; p; p = p->next_pool)
- if ((p->end - p->next_free >= len))
- break;
-
- if (!p) {
- if (len >= (mem_pool_alloc/2)) {
- total_allocd += len;
- return xmalloc(len);
- }
- total_allocd += sizeof(struct mem_pool) + mem_pool_alloc;
- p = xmalloc(st_add(sizeof(struct mem_pool), mem_pool_alloc));
- p->next_pool = mem_pool;
- p->next_free = (char *) p->space;
- p->end = p->next_free + mem_pool_alloc;
- mem_pool = p;
- }
-
- r = p->next_free;
- p->next_free += len;
- return r;
-}
-
-static void *pool_calloc(size_t count, size_t size)
-{
- size_t len = count * size;
- void *r = pool_alloc(len);
- memset(r, 0, len);
- return r;
-}
-
static char *pool_strdup(const char *s)
{
size_t len = strlen(s) + 1;
- char *r = pool_alloc(len);
+ char *r = mem_pool_alloc(&fi_mem_pool, len);
memcpy(r, s, len);
return r;
}
{
struct mark_set *s = marks;
while ((idnum >> s->shift) >= 1024) {
- s = pool_calloc(1, sizeof(struct mark_set));
+ s = mem_pool_calloc(&fi_mem_pool, 1, sizeof(struct mark_set));
s->shift = marks->shift + 10;
s->data.sets[0] = marks;
marks = s;
uintmax_t i = idnum >> s->shift;
idnum -= i << s->shift;
if (!s->data.sets[i]) {
- s->data.sets[i] = pool_calloc(1, sizeof(struct mark_set));
+ s->data.sets[i] = mem_pool_calloc(&fi_mem_pool, 1, sizeof(struct mark_set));
s->data.sets[i]->shift = s->shift - 10;
}
s = s->data.sets[i];
if (c->str_len == len && !strncmp(s, c->str_dat, len))
return c;
- c = pool_alloc(sizeof(struct atom_str) + len + 1);
+ c = mem_pool_alloc(&fi_mem_pool, sizeof(struct atom_str) + len + 1);
c->str_len = len;
memcpy(c->str_dat, s, len);
c->str_dat[len] = 0;
if (check_refname_format(name, REFNAME_ALLOW_ONELEVEL))
die("Branch name doesn't conform to GIT standards: %s", name);
- b = pool_calloc(1, sizeof(struct branch));
+ b = mem_pool_calloc(&fi_mem_pool, 1, sizeof(struct branch));
b->name = pool_strdup(name);
b->table_next_branch = branch_table[hc];
b->branch_tree.versions[0].mode = S_IFDIR;
avail_tree_table[hc] = f->next_avail;
} else {
cnt = cnt & 7 ? ((cnt / 8) + 1) * 8 : cnt;
- f = pool_alloc(sizeof(*t) + sizeof(t->entries[0]) * cnt);
+ f = mem_pool_alloc(&fi_mem_pool, sizeof(*t) + sizeof(t->entries[0]) * cnt);
f->entry_capacity = cnt;
}
p->pack_fd = pack_fd;
p->do_not_close = 1;
- pack_file = sha1fd(pack_fd, p->pack_name);
+ pack_file = hashfd(pack_fd, p->pack_name);
hdr.hdr_signature = htonl(PACK_SIGNATURE);
hdr.hdr_version = htonl(2);
hdr.hdr_entries = 0;
- sha1write(pack_file, &hdr, sizeof(hdr));
+ hashwrite(pack_file, &hdr, sizeof(hdr));
pack_data = p;
pack_size = sizeof(hdr);
struct tag *t;
close_pack_windows(pack_data);
- sha1close(pack_file, cur_pack_oid.hash, 0);
+ hashclose(pack_file, cur_pack_oid.hash, 0);
fixup_pack_header_footer(pack_data->pack_fd, pack_data->sha1,
pack_data->pack_name, object_count,
cur_pack_oid.hash, pack_size);
if (!new_p)
die("core git rejected index %s", idx_name);
all_packs[pack_id] = new_p;
- install_packed_git(new_p);
+ install_packed_git(the_repository, new_p);
free(idx_name);
/* Print the boundary */
unsigned char hdr[96];
struct object_id oid;
unsigned long hdrlen, deltalen;
- git_SHA_CTX c;
+ git_hash_ctx c;
git_zstream s;
hdrlen = xsnprintf((char *)hdr, sizeof(hdr), "%s %lu",
- typename(type), (unsigned long)dat->len) + 1;
- git_SHA1_Init(&c);
- git_SHA1_Update(&c, hdr, hdrlen);
- git_SHA1_Update(&c, dat->buf, dat->len);
- git_SHA1_Final(oid.hash, &c);
+ type_name(type), (unsigned long)dat->len) + 1;
+ the_hash_algo->init_fn(&c);
+ the_hash_algo->update_fn(&c, hdr, hdrlen);
+ the_hash_algo->update_fn(&c, dat->buf, dat->len);
+ the_hash_algo->final_fn(oid.hash, &c);
if (oidout)
oidcpy(oidout, &oid);
return 1;
}
- if (last && last->data.buf && last->depth < max_depth && dat->len > 20) {
+ if (last && last->data.buf && last->depth < max_depth
+ && dat->len > the_hash_algo->rawsz) {
+
delta_count_attempts_by_type[type]++;
delta = diff_delta(last->data.buf, last->data.len,
dat->buf, dat->len,
- &deltalen, dat->len - 20);
+ &deltalen, dat->len - the_hash_algo->rawsz);
} else
delta = NULL;
hdrlen = encode_in_pack_object_header(hdr, sizeof(hdr),
OBJ_OFS_DELTA, deltalen);
- sha1write(pack_file, hdr, hdrlen);
+ hashwrite(pack_file, hdr, hdrlen);
pack_size += hdrlen;
hdr[pos] = ofs & 127;
while (ofs >>= 7)
hdr[--pos] = 128 | (--ofs & 127);
- sha1write(pack_file, hdr + pos, sizeof(hdr) - pos);
+ hashwrite(pack_file, hdr + pos, sizeof(hdr) - pos);
pack_size += sizeof(hdr) - pos;
} else {
e->depth = 0;
hdrlen = encode_in_pack_object_header(hdr, sizeof(hdr),
type, dat->len);
- sha1write(pack_file, hdr, hdrlen);
+ hashwrite(pack_file, hdr, hdrlen);
pack_size += hdrlen;
}
- sha1write(pack_file, out, s.total_out);
+ hashwrite(pack_file, out, s.total_out);
pack_size += s.total_out;
e->idx.crc32 = crc32_end(pack_file);
return 0;
}
-static void truncate_pack(struct sha1file_checkpoint *checkpoint)
+static void truncate_pack(struct hashfile_checkpoint *checkpoint)
{
- if (sha1file_truncate(pack_file, checkpoint))
+ if (hashfile_truncate(pack_file, checkpoint))
die_errno("cannot truncate pack to skip duplicate");
pack_size = checkpoint->offset;
}
struct object_id oid;
unsigned long hdrlen;
off_t offset;
- git_SHA_CTX c;
+ git_hash_ctx c;
git_zstream s;
- struct sha1file_checkpoint checkpoint;
+ struct hashfile_checkpoint checkpoint;
int status = Z_OK;
/* Determine if we should auto-checkpoint. */
|| (pack_size + 60 + len) < pack_size)
cycle_packfile();
- sha1file_checkpoint(pack_file, &checkpoint);
+ hashfile_checkpoint(pack_file, &checkpoint);
offset = checkpoint.offset;
hdrlen = xsnprintf((char *)out_buf, out_sz, "blob %" PRIuMAX, len) + 1;
- git_SHA1_Init(&c);
- git_SHA1_Update(&c, out_buf, hdrlen);
+ the_hash_algo->init_fn(&c);
+ the_hash_algo->update_fn(&c, out_buf, hdrlen);
crc32_begin(pack_file);
if (!n && feof(stdin))
die("EOF in data (%" PRIuMAX " bytes remaining)", len);
- git_SHA1_Update(&c, in_buf, n);
+ the_hash_algo->update_fn(&c, in_buf, n);
s.next_in = in_buf;
s.avail_in = n;
len -= n;
if (!s.avail_out || status == Z_STREAM_END) {
size_t n = s.next_out - out_buf;
- sha1write(pack_file, out_buf, n);
+ hashwrite(pack_file, out_buf, n);
pack_size += n;
s.next_out = out_buf;
s.avail_out = out_sz;
}
}
git_deflate_end(&s);
- git_SHA1_Final(oid.hash, &c);
+ the_hash_algo->final_fn(oid.hash, &c);
if (oidout)
oidcpy(oidout, &oid);
{
enum object_type type;
struct packed_git *p = all_packs[oe->pack_id];
- if (p == pack_data && p->pack_size < (pack_size + 20)) {
+ if (p == pack_data && p->pack_size < (pack_size + the_hash_algo->rawsz)) {
/* The object is stored in the packfile we are writing to
* and we have modified it since the last time we scanned
* back to read a previously written object. If an old
- * window covered [p->pack_size, p->pack_size + 20) its
+ * window covered [p->pack_size, p->pack_size + rawsz) its
* data is stale and is not valid. Closing all windows
* and updating the packfile length ensures we can read
* the newly written data.
*/
close_pack_windows(p);
- sha1flush(pack_file);
+ hashflush(pack_file);
- /* We have to offer 20 bytes additional on the end of
+ /* We have to offer rawsz bytes additional on the end of
* the packfile as the core unpacker code assumes the
* footer is present at the file end and must promise
- * at least 20 bytes within any window it maps. But
+ * at least rawsz bytes within any window it maps. But
* we don't actually create the footer here.
*/
- p->pack_size = pack_size + 20;
+ p->pack_size = pack_size + the_hash_algo->rawsz;
}
return unpack_entry(p, oe->idx.offset, &type, sizep);
}
die("Can't load tree %s", oid_to_hex(oid));
} else {
enum object_type type;
- buf = read_sha1_file(oid->hash, &type, &size);
+ buf = read_object_file(oid, &type, &size);
if (!buf || type != OBJ_TREE)
die("Can't load tree %s", oid_to_hex(oid));
}
die("corrupt mark line: %s", line);
e = find_object(&oid);
if (!e) {
- enum object_type type = sha1_object_info(oid.hash, NULL);
+ enum object_type type = oid_object_info(&oid, NULL);
if (type < 0)
die("object not found: %s", oid_to_hex(&oid));
e = insert_object(&oid);
unsigned char fanout, char *path)
{
unsigned int i = 0, j = 0;
- if (fanout >= 20)
+ if (fanout >= the_hash_algo->rawsz)
die("Too large fanout (%u)", fanout);
while (fanout) {
path[i++] = hex_sha1[j++];
path[i++] = '/';
fanout--;
}
- memcpy(path + i, hex_sha1 + j, GIT_SHA1_HEXSZ - j);
- path[i + GIT_SHA1_HEXSZ - j] = '\0';
+ memcpy(path + i, hex_sha1 + j, the_hash_algo->hexsz - j);
+ path[i + the_hash_algo->hexsz - j] = '\0';
}
static uintmax_t do_change_note_fanout(
else if (oe) {
if (oe->type != OBJ_COMMIT)
die("Not a commit (actually a %s): %s",
- typename(oe->type), command_buf.buf);
+ type_name(oe->type), command_buf.buf);
}
/*
* Accept the sha1 without checking; it expected to be in
enum object_type expected = S_ISDIR(mode) ?
OBJ_TREE: OBJ_BLOB;
enum object_type type = oe ? oe->type :
- sha1_object_info(oid.hash, NULL);
+ oid_object_info(&oid, NULL);
if (type < 0)
die("%s not found: %s",
S_ISDIR(mode) ? "Tree" : "Blob",
command_buf.buf);
if (type != expected)
die("Not a %s (actually a %s): %s",
- typename(expected), typename(type),
+ type_name(expected), type_name(type),
command_buf.buf);
}
oidcpy(&commit_oid, &commit_oe->idx.oid);
} else if (!get_oid(p, &commit_oid)) {
unsigned long size;
- char *buf = read_object_with_reference(commit_oid.hash,
- commit_type, &size, commit_oid.hash);
+ char *buf = read_object_with_reference(&commit_oid,
+ commit_type, &size,
+ &commit_oid);
if (!buf || size < 46)
die("Not a valid commit: %s", p);
free(buf);
} else if (oe) {
if (oe->type != OBJ_BLOB)
die("Not a blob (actually a %s): %s",
- typename(oe->type), command_buf.buf);
+ type_name(oe->type), command_buf.buf);
} else if (!is_null_oid(&oid)) {
- enum object_type type = sha1_object_info(oid.hash, NULL);
+ enum object_type type = oid_object_info(&oid, NULL);
if (type < 0)
die("Blob not found: %s", command_buf.buf);
if (type != OBJ_BLOB)
die("Not a blob (actually a %s): %s",
- typename(type), command_buf.buf);
+ type_name(type), command_buf.buf);
}
construct_path_with_fanout(oid_to_hex(&commit_oid), *old_fanout, path);
unsigned long size;
char *buf;
- buf = read_object_with_reference(b->oid.hash,
- commit_type, &size,
- b->oid.hash);
+ buf = read_object_with_reference(&b->oid, commit_type, &size,
+ &b->oid);
parse_from_commit(b, buf, size);
free(buf);
}
oidcpy(&n->oid, &oe->idx.oid);
} else if (!get_oid(from, &n->oid)) {
unsigned long size;
- char *buf = read_object_with_reference(n->oid.hash,
- commit_type, &size, n->oid.hash);
+ char *buf = read_object_with_reference(&n->oid,
+ commit_type,
+ &size, &n->oid);
if (!buf || size < 46)
die("Not a valid commit: %s", from);
free(buf);
enum object_type type;
const char *v;
- t = pool_alloc(sizeof(struct tag));
+ t = mem_pool_alloc(&fi_mem_pool, sizeof(struct tag));
memset(t, 0, sizeof(struct tag));
t->name = pool_strdup(arg);
if (last_tag)
} else if (!get_oid(from, &oid)) {
struct object_entry *oe = find_object(&oid);
if (!oe) {
- type = sha1_object_info(oid.hash, NULL);
+ type = oid_object_info(&oid, NULL);
if (type < 0)
die("Not a valid object: %s", from);
} else
"object %s\n"
"type %s\n"
"tag %s\n",
- oid_to_hex(&oid), typename(type), t->name);
+ oid_to_hex(&oid), type_name(type), t->name);
if (tagger)
strbuf_addf(&new_data,
"tagger %s\n", tagger);
char *buf;
if (!oe || oe->pack_id == MAX_PACK_ID) {
- buf = read_sha1_file(oid->hash, &type, &size);
+ buf = read_object_file(oid, &type, &size);
} else {
type = oe->type;
buf = gfi_unpack_entry(oe, &size);
die("Can't read object %s", oid_to_hex(oid));
if (type != OBJ_BLOB)
die("Object %s is a %s but a blob was expected.",
- oid_to_hex(oid), typename(type));
+ oid_to_hex(oid), type_name(type));
strbuf_reset(&line);
strbuf_addf(&line, "%s %s %lu\n", oid_to_hex(oid),
- typename(type), size);
+ type_name(type), size);
cat_blob_write(line.buf, line.len);
strbuf_release(&line);
cat_blob_write(buf, size);
static void parse_get_mark(const char *p)
{
- struct object_entry *oe = oe;
+ struct object_entry *oe;
char output[GIT_MAX_HEXSZ + 2];
/* get-mark SP <object> LF */
static void parse_cat_blob(const char *p)
{
- struct object_entry *oe = oe;
+ struct object_entry *oe;
struct object_id oid;
/* cat-blob SP <object> LF */
unsigned long size;
char *buf = NULL;
if (!oe) {
- enum object_type type = sha1_object_info(oid->hash, NULL);
+ enum object_type type = oid_object_info(oid, NULL);
if (type < 0)
die("object not found: %s", oid_to_hex(oid));
/* cache it! */
buf = gfi_unpack_entry(oe, &size);
} else {
enum object_type unused;
- buf = read_sha1_file(oid->hash, &unused, &size);
+ buf = read_object_file(oid, &unused, &size);
}
if (!buf)
die("Can't load object %s", oid_to_hex(oid));
atom_table = xcalloc(atom_table_sz, sizeof(struct atom_str*));
branch_table = xcalloc(branch_table_sz, sizeof(struct branch*));
avail_tree_table = xcalloc(avail_tree_table_sz, sizeof(struct avail_tree_content*));
- marks = pool_calloc(1, sizeof(struct mark_set));
+ marks = mem_pool_calloc(&fi_mem_pool, 1, sizeof(struct mark_set));
global_argc = argc;
global_argv = argv;
- rc_free = pool_alloc(cmd_save * sizeof(*rc_free));
+ rc_free = mem_pool_alloc(&fi_mem_pool, cmd_save * sizeof(*rc_free));
for (i = 0; i < (cmd_save - 1); i++)
rc_free[i].next = &rc_free[i + 1];
rc_free[cmd_save - 1].next = NULL;
- prepare_packed_git();
start_packfile();
set_die_routine(die_nicely);
set_checkpoint_signal();
fprintf(stderr, "Total branches: %10lu (%10lu loads )\n", branch_count, branch_load_count);
fprintf(stderr, " marks: %10" PRIuMAX " (%10" PRIuMAX " unique )\n", (((uintmax_t)1) << marks->shift) * 1024, marks_set_count);
fprintf(stderr, " atoms: %10u\n", atom_cnt);
- fprintf(stderr, "Memory total: %10" PRIuMAX " KiB\n", (total_allocd + alloc_count*sizeof(struct object_entry))/1024);
- fprintf(stderr, " pools: %10lu KiB\n", (unsigned long)(total_allocd/1024));
+ fprintf(stderr, "Memory total: %10" PRIuMAX " KiB\n", (total_allocd + fi_mem_pool.pool_alloc + alloc_count*sizeof(struct object_entry))/1024);
+ fprintf(stderr, " pools: %10lu KiB\n", (unsigned long)((total_allocd + fi_mem_pool.pool_alloc) /1024));
fprintf(stderr, " objects: %10" PRIuMAX " KiB\n", (alloc_count*sizeof(struct object_entry))/1024);
fprintf(stderr, "---------------------------------------------------------------------\n");
pack_report();
#include "cache.h"
+ #include "repository.h"
#include "config.h"
#include "lockfile.h"
#include "refs.h"
char *line = packet_read_line(fd, &len);
const char *arg;
- if (!len)
- die(_("git fetch-pack: expected ACK/NAK, got EOF"));
+ if (!line)
+ die(_("git fetch-pack: expected ACK/NAK, got a flush packet"));
if (!strcmp(line, "NAK"))
return NAK;
if (skip_prefix(line, "ACK ", &arg)) {
#define PIPESAFE_FLUSH 32
#define LARGE_FLUSH 16384
-static int next_flush(struct fetch_pack_args *args, int count)
+static int next_flush(int stateless_rpc, int count)
{
- if (args->stateless_rpc) {
+ if (stateless_rpc) {
if (count < LARGE_FLUSH)
count <<= 1;
else
send_request(args, fd[1], &req_buf);
strbuf_setlen(&req_buf, state_len);
flushes++;
- flush_at = next_flush(args, count);
+ flush_at = next_flush(args->stateless_rpc, count);
/*
* We keep one window "ahead" of the other side, and
mark_complete(&obj->oid);
}
+struct loose_object_iter {
+ struct oidset *loose_object_set;
+ struct ref *refs;
+};
+
+/*
+ * If the number of refs is not larger than the number of loose objects,
+ * this function stops inserting.
+ */
+static int add_loose_objects_to_set(const struct object_id *oid,
+ const char *path,
+ void *data)
+{
+ struct loose_object_iter *iter = data;
+ oidset_insert(iter->loose_object_set, oid);
+ if (iter->refs == NULL)
+ return 1;
+
+ iter->refs = iter->refs->next;
+ return 0;
+}
+
static int everything_local(struct fetch_pack_args *args,
struct ref **refs,
struct ref **sought, int nr_sought)
int retval;
int old_save_commit_buffer = save_commit_buffer;
timestamp_t cutoff = 0;
+ struct oidset loose_oid_set = OIDSET_INIT;
+ int use_oidset = 0;
+ struct loose_object_iter iter = {&loose_oid_set, *refs};
+
+ /* Enumerate all loose objects or know refs are not so many. */
+ use_oidset = !for_each_loose_object(add_loose_objects_to_set,
+ &iter, 0);
save_commit_buffer = 0;
for (ref = *refs; ref; ref = ref->next) {
struct object *o;
+ unsigned int flags = OBJECT_INFO_QUICK;
- if (!has_object_file_with_flags(&ref->old_oid,
- OBJECT_INFO_QUICK))
- continue;
+ if (use_oidset &&
+ !oidset_contains(&loose_oid_set, &ref->old_oid)) {
+ /*
+ * I know this does not exist in the loose form,
+ * so check if it exists in a non-loose form.
+ */
+ flags |= OBJECT_INFO_IGNORE_LOOSE;
+ }
+ if (!has_object_file_with_flags(&ref->old_oid, flags))
+ continue;
o = parse_object(&ref->old_oid);
if (!o)
continue;
}
}
+ oidset_clear(&loose_oid_set);
+
if (!args->no_dependents) {
if (!args->deepen) {
for_each_ref(mark_complete_oid, NULL);
? fetch_fsck_objects
: transfer_fsck_objects >= 0
? transfer_fsck_objects
- : 0)
- argv_array_push(&cmd.args, "--strict");
+ : 0) {
+ if (args->from_promisor)
+ /*
+ * We cannot use --strict in index-pack because it
+ * checks both broken objects and links, but we only
+ * want to check for broken objects.
+ */
+ argv_array_push(&cmd.args, "--fsck-objects");
+ else
+ argv_array_push(&cmd.args, "--strict");
+ }
cmd.in = demux.out;
cmd.git_cmd = 1;
return ref;
}
+static void add_shallow_requests(struct strbuf *req_buf,
+ const struct fetch_pack_args *args)
+{
+ if (is_repository_shallow())
+ write_shallow_commits(req_buf, 1, NULL);
+ if (args->depth > 0)
+ packet_buf_write(req_buf, "deepen %d", args->depth);
+ if (args->deepen_since) {
+ timestamp_t max_age = approxidate(args->deepen_since);
+ packet_buf_write(req_buf, "deepen-since %"PRItime, max_age);
+ }
+ if (args->deepen_not) {
+ int i;
+ for (i = 0; i < args->deepen_not->nr; i++) {
+ struct string_list_item *s = args->deepen_not->items + i;
+ packet_buf_write(req_buf, "deepen-not %s", s->string);
+ }
+ }
+}
+
+static void add_wants(const struct ref *wants, struct strbuf *req_buf)
+{
+ for ( ; wants ; wants = wants->next) {
+ const struct object_id *remote = &wants->old_oid;
+ const char *remote_hex;
+ struct object *o;
+
+ /*
+ * If that object is complete (i.e. it is an ancestor of a
+ * local ref), we tell them we have it but do not have to
+ * tell them about its ancestors, which they already know
+ * about.
+ *
+ * We use lookup_object here because we are only
+ * interested in the case we *know* the object is
+ * reachable and we have already scanned it.
+ */
+ if (((o = lookup_object(remote->hash)) != NULL) &&
+ (o->flags & COMPLETE)) {
+ continue;
+ }
+
+ remote_hex = oid_to_hex(remote);
+ packet_buf_write(req_buf, "want %s\n", remote_hex);
+ }
+}
+
+static void add_common(struct strbuf *req_buf, struct oidset *common)
+{
+ struct oidset_iter iter;
+ const struct object_id *oid;
+ oidset_iter_init(common, &iter);
+
+ while ((oid = oidset_iter_next(&iter))) {
+ packet_buf_write(req_buf, "have %s\n", oid_to_hex(oid));
+ }
+}
+
+static int add_haves(struct strbuf *req_buf, int *haves_to_send, int *in_vain)
+{
+ int ret = 0;
+ int haves_added = 0;
+ const struct object_id *oid;
+
+ while ((oid = get_rev())) {
+ packet_buf_write(req_buf, "have %s\n", oid_to_hex(oid));
+ if (++haves_added >= *haves_to_send)
+ break;
+ }
+
+ *in_vain += haves_added;
+ if (!haves_added || *in_vain >= MAX_IN_VAIN) {
+ /* Send Done */
+ packet_buf_write(req_buf, "done\n");
+ ret = 1;
+ }
+
+ /* Increase haves to send on next round */
+ *haves_to_send = next_flush(1, *haves_to_send);
+
+ return ret;
+}
+
+static int send_fetch_request(int fd_out, const struct fetch_pack_args *args,
+ const struct ref *wants, struct oidset *common,
+ int *haves_to_send, int *in_vain)
+{
+ int ret = 0;
+ struct strbuf req_buf = STRBUF_INIT;
+
+ if (server_supports_v2("fetch", 1))
+ packet_buf_write(&req_buf, "command=fetch");
+ if (server_supports_v2("agent", 0))
+ packet_buf_write(&req_buf, "agent=%s", git_user_agent_sanitized());
+
+ packet_buf_delim(&req_buf);
+ if (args->use_thin_pack)
+ packet_buf_write(&req_buf, "thin-pack");
+ if (args->no_progress)
+ packet_buf_write(&req_buf, "no-progress");
+ if (args->include_tag)
+ packet_buf_write(&req_buf, "include-tag");
+ if (prefer_ofs_delta)
+ packet_buf_write(&req_buf, "ofs-delta");
+
+ /* Add shallow-info and deepen request */
+ if (server_supports_feature("fetch", "shallow", 0))
+ add_shallow_requests(&req_buf, args);
+ else if (is_repository_shallow() || args->deepen)
+ die(_("Server does not support shallow requests"));
+
+ /* add wants */
+ add_wants(wants, &req_buf);
+
+ /* Add all of the common commits we've found in previous rounds */
+ add_common(&req_buf, common);
+
+ /* Add initial haves */
+ ret = add_haves(&req_buf, haves_to_send, in_vain);
+
+ /* Send request */
+ packet_buf_flush(&req_buf);
+ write_or_die(fd_out, req_buf.buf, req_buf.len);
+
+ strbuf_release(&req_buf);
+ return ret;
+}
+
+/*
+ * Processes a section header in a server's response and checks if it matches
+ * `section`. If the value of `peek` is 1, the header line will be peeked (and
+ * not consumed); if 0, the line will be consumed and the function will die if
+ * the section header doesn't match what was expected.
+ */
+static int process_section_header(struct packet_reader *reader,
+ const char *section, int peek)
+{
+ int ret;
+
+ if (packet_reader_peek(reader) != PACKET_READ_NORMAL)
+ die("error reading section header '%s'", section);
+
+ ret = !strcmp(reader->line, section);
+
+ if (!peek) {
+ if (!ret)
+ die("expected '%s', received '%s'",
+ section, reader->line);
+ packet_reader_read(reader);
+ }
+
+ return ret;
+}
+
+static int process_acks(struct packet_reader *reader, struct oidset *common)
+{
+ /* received */
+ int received_ready = 0;
+ int received_ack = 0;
+
+ process_section_header(reader, "acknowledgments", 0);
+ while (packet_reader_read(reader) == PACKET_READ_NORMAL) {
+ const char *arg;
+
+ if (!strcmp(reader->line, "NAK"))
+ continue;
+
+ if (skip_prefix(reader->line, "ACK ", &arg)) {
+ struct object_id oid;
+ if (!get_oid_hex(arg, &oid)) {
+ struct commit *commit;
+ oidset_insert(common, &oid);
+ commit = lookup_commit(&oid);
+ mark_common(commit, 0, 1);
+ }
+ continue;
+ }
+
+ if (!strcmp(reader->line, "ready")) {
+ clear_prio_queue(&rev_list);
+ received_ready = 1;
+ continue;
+ }
+
+ die("unexpected acknowledgment line: '%s'", reader->line);
+ }
+
+ if (reader->status != PACKET_READ_FLUSH &&
+ reader->status != PACKET_READ_DELIM)
+ die("error processing acks: %d", reader->status);
+
+ /* return 0 if no common, 1 if there are common, or 2 if ready */
+ return received_ready ? 2 : (received_ack ? 1 : 0);
+}
+
+static void receive_shallow_info(struct fetch_pack_args *args,
+ struct packet_reader *reader)
+{
+ process_section_header(reader, "shallow-info", 0);
+ while (packet_reader_read(reader) == PACKET_READ_NORMAL) {
+ const char *arg;
+ struct object_id oid;
+
+ if (skip_prefix(reader->line, "shallow ", &arg)) {
+ if (get_oid_hex(arg, &oid))
+ die(_("invalid shallow line: %s"), reader->line);
+ register_shallow(&oid);
+ continue;
+ }
+ if (skip_prefix(reader->line, "unshallow ", &arg)) {
+ if (get_oid_hex(arg, &oid))
+ die(_("invalid unshallow line: %s"), reader->line);
+ if (!lookup_object(oid.hash))
+ die(_("object not found: %s"), reader->line);
+ /* make sure that it is parsed as shallow */
+ if (!parse_object(&oid))
+ die(_("error in object: %s"), reader->line);
+ if (unregister_shallow(&oid))
+ die(_("no shallow found: %s"), reader->line);
+ continue;
+ }
+ die(_("expected shallow/unshallow, got %s"), reader->line);
+ }
+
+ if (reader->status != PACKET_READ_FLUSH &&
+ reader->status != PACKET_READ_DELIM)
+ die("error processing shallow info: %d", reader->status);
+
+ setup_alternate_shallow(&shallow_lock, &alternate_shallow_file, NULL);
+ args->deepen = 1;
+}
+
+enum fetch_state {
+ FETCH_CHECK_LOCAL = 0,
+ FETCH_SEND_REQUEST,
+ FETCH_PROCESS_ACKS,
+ FETCH_GET_PACK,
+ FETCH_DONE,
+};
+
+static struct ref *do_fetch_pack_v2(struct fetch_pack_args *args,
+ int fd[2],
+ const struct ref *orig_ref,
+ struct ref **sought, int nr_sought,
+ char **pack_lockfile)
+{
+ struct ref *ref = copy_ref_list(orig_ref);
+ enum fetch_state state = FETCH_CHECK_LOCAL;
+ struct oidset common = OIDSET_INIT;
+ struct packet_reader reader;
+ int in_vain = 0;
+ int haves_to_send = INITIAL_FLUSH;
+ packet_reader_init(&reader, fd[0], NULL, 0,
+ PACKET_READ_CHOMP_NEWLINE);
+
+ while (state != FETCH_DONE) {
+ switch (state) {
+ case FETCH_CHECK_LOCAL:
+ sort_ref_list(&ref, ref_compare_name);
+ QSORT(sought, nr_sought, cmp_ref_by_name);
+
+ /* v2 supports these by default */
+ allow_unadvertised_object_request |= ALLOW_REACHABLE_SHA1;
+ use_sideband = 2;
+ if (args->depth > 0 || args->deepen_since || args->deepen_not)
+ args->deepen = 1;
+
+ if (marked)
+ for_each_ref(clear_marks, NULL);
+ marked = 1;
+
+ for_each_ref(rev_list_insert_ref_oid, NULL);
+ for_each_cached_alternate(insert_one_alternate_object);
+
+ /* Filter 'ref' by 'sought' and those that aren't local */
+ if (everything_local(args, &ref, sought, nr_sought))
+ state = FETCH_DONE;
+ else
+ state = FETCH_SEND_REQUEST;
+ break;
+ case FETCH_SEND_REQUEST:
+ if (send_fetch_request(fd[1], args, ref, &common,
+ &haves_to_send, &in_vain))
+ state = FETCH_GET_PACK;
+ else
+ state = FETCH_PROCESS_ACKS;
+ break;
+ case FETCH_PROCESS_ACKS:
+ /* Process ACKs/NAKs */
+ switch (process_acks(&reader, &common)) {
+ case 2:
+ state = FETCH_GET_PACK;
+ break;
+ case 1:
+ in_vain = 0;
+ /* fallthrough */
+ default:
+ state = FETCH_SEND_REQUEST;
+ break;
+ }
+ break;
+ case FETCH_GET_PACK:
+ /* Check for shallow-info section */
+ if (process_section_header(&reader, "shallow-info", 1))
+ receive_shallow_info(args, &reader);
+
+ /* get the pack */
+ process_section_header(&reader, "packfile", 0);
+ if (get_pack(args, fd, pack_lockfile))
+ die(_("git fetch-pack: fetch failed."));
+
+ state = FETCH_DONE;
+ break;
+ case FETCH_DONE:
+ continue;
+ }
+ }
+
+ oidset_clear(&common);
+ return ref;
+}
+
static void fetch_pack_config(void)
{
git_config_get_int("fetch.unpacklimit", &fetch_unpack_limit);
const char *dest,
struct ref **sought, int nr_sought,
struct oid_array *shallow,
- char **pack_lockfile)
+ char **pack_lockfile,
+ enum protocol_version version)
{
struct ref *ref_cpy;
struct shallow_info si;
die(_("no matching remote head"));
}
prepare_shallow_info(&si, shallow);
- ref_cpy = do_fetch_pack(args, fd, ref, sought, nr_sought,
- &si, pack_lockfile);
+ if (version == protocol_v2)
+ ref_cpy = do_fetch_pack_v2(args, fd, ref, sought, nr_sought,
+ pack_lockfile);
+ else
+ ref_cpy = do_fetch_pack(args, fd, ref, sought, nr_sought,
+ &si, pack_lockfile);
- reprepare_packed_git();
+ reprepare_packed_git(the_repository);
update_shallow(args, sought, nr_sought, &si);
clear_shallow_info(&si);
return ref_cpy;
#include "argv-array.h"
#include "packfile.h"
#include "object-store.h"
+#include "protocol.h"
static const char content_type[] = "Content-Type";
static const char content_length[] = "Content-Length";
hdr_str(hdr, content_type, buf.buf);
end_headers(hdr);
- packet_write_fmt(1, "# service=git-%s\n", svc->name);
- packet_flush(1);
+
+ if (determine_protocol_version_server() != protocol_v2) {
+ packet_write_fmt(1, "# service=git-%s\n", svc->name);
+ packet_flush(1);
+ }
argv[0] = svc->name;
run_service(argv, 0);
size_t cnt = 0;
select_getanyfile(hdr);
- prepare_packed_git();
for (p = get_packed_git(the_repository); p; p = p->next) {
if (p->pack_local)
cnt++;
{ "tlsv1.1", CURL_SSLVERSION_TLSv1_1 },
{ "tlsv1.2", CURL_SSLVERSION_TLSv1_2 },
#endif
+#if LIBCURL_VERSION_NUM >= 0x073400
+ { "tlsv1.3", CURL_SSLVERSION_TLSv1_3 },
+#endif
};
#if LIBCURL_VERSION_NUM >= 0x070903
static const char *ssl_key;
#if LIBCURL_VERSION_NUM >= 0x070908
static const char *ssl_capath;
#endif
+#if LIBCURL_VERSION_NUM >= 0x071304
+static const char *curl_no_proxy;
+#endif
#if LIBCURL_VERSION_NUM >= 0x072c00
static const char *ssl_pinnedkey;
#endif
static long curl_low_speed_time = -1;
static int curl_ftp_no_epsv;
static const char *curl_http_proxy;
-static const char *curl_no_proxy;
static const char *http_proxy_authmethod;
static struct {
const char *name;
*var = val;
}
-static void protocol_http_header(void)
-{
- if (get_protocol_version_config() > 0) {
- struct strbuf protocol_header = STRBUF_INIT;
-
- strbuf_addf(&protocol_header, GIT_PROTOCOL_HEADER ": version=%d",
- get_protocol_version_config());
-
-
- extra_http_headers = curl_slist_append(extra_http_headers,
- protocol_header.buf);
- strbuf_release(&protocol_header);
- }
-}
-
void http_init(struct remote *remote, const char *url, int proactive_auth)
{
char *low_speed_limit;
if (remote)
var_override(&http_proxy_authmethod, remote->http_proxy_authmethod);
- protocol_http_header();
-
pragma_header = curl_slist_append(http_copy_default_headers(),
"Pragma: no-cache");
no_pragma_header = curl_slist_append(http_copy_default_headers(),
void add_fill_function(void *data, int (*fill)(void *))
{
- struct fill_chain *new = xmalloc(sizeof(*new));
+ struct fill_chain *new_fill = xmalloc(sizeof(*new_fill));
struct fill_chain **linkp = &fill_cfg;
- new->data = data;
- new->fill = fill;
- new->next = NULL;
+ new_fill->data = data;
+ new_fill->fill = fill;
+ new_fill->next = NULL;
while (*linkp)
linkp = &(*linkp)->next;
- *linkp = new;
+ *linkp = new_fill;
}
void fill_active_slots(void)
headers = curl_slist_append(headers, buf.buf);
+ /* Add additional headers here */
+ if (options && options->extra_headers) {
+ const struct string_list_item *item;
+ for_each_string_list_item(item, options->extra_headers) {
+ headers = curl_slist_append(headers, item->string);
+ }
+ }
+
curl_easy_setopt(slot->curl, CURLOPT_URL, url);
curl_easy_setopt(slot->curl, CURLOPT_HTTPHEADER, headers);
curl_easy_setopt(slot->curl, CURLOPT_ENCODING, "gzip");
return -1;
}
- install_packed_git(p);
+ install_packed_git(the_repository, p);
free(tmp_idx);
return 0;
}
return p;
}
- void install_packed_git(struct packed_git *pack)
+ void install_packed_git(struct repository *r, struct packed_git *pack)
{
if (pack->pack_fd != -1)
pack_open_fds++;
- pack->next = the_repository->objects->packed_git;
- the_repository->objects->packed_git = pack;
+ pack->next = r->objects->packed_git;
+ r->objects->packed_git = pack;
}
void (*report_garbage)(unsigned seen_bits, const char *path);
report_helper(list, seen_bits, first, list->nr);
}
- static void prepare_packed_git_one(char *objdir, int local)
+ static void prepare_packed_git_one(struct repository *r, char *objdir, int local)
{
struct strbuf path = STRBUF_INIT;
size_t dirnamelen;
base_len = path.len;
if (strip_suffix_mem(path.buf, &base_len, ".idx")) {
/* Don't reopen a pack we already have. */
- for (p = the_repository->objects->packed_git; p;
+ for (p = r->objects->packed_git; p;
p = p->next) {
size_t len;
if (strip_suffix(p->pack_name, ".pack", &len) &&
* corresponding .pack file that we can map.
*/
(p = add_packed_git(path.buf, path.len, local)) != NULL)
- install_packed_git(p);
+ install_packed_git(r, p);
}
if (!report_garbage)
strbuf_release(&path);
}
+ static void prepare_packed_git(struct repository *r);
/*
* Give a fast, rough count of the number of objects in the repository. This
* ignores loose objects completely. If you have a lot of them, then either
unsigned long count;
struct packed_git *p;
- prepare_packed_git();
+ prepare_packed_git(the_repository);
count = 0;
for (p = the_repository->objects->packed_git; p; p = p->next) {
if (open_pack_index(p))
return -1;
}
- static void rearrange_packed_git(void)
+ static void rearrange_packed_git(struct repository *r)
{
- the_repository->objects->packed_git = llist_mergesort(
- the_repository->objects->packed_git, get_next_packed_git,
+ r->objects->packed_git = llist_mergesort(
+ r->objects->packed_git, get_next_packed_git,
set_next_packed_git, sort_pack);
}
- static void prepare_packed_git_mru(void)
+ static void prepare_packed_git_mru(struct repository *r)
{
struct packed_git *p;
- INIT_LIST_HEAD(&the_repository->objects->packed_git_mru);
+ INIT_LIST_HEAD(&r->objects->packed_git_mru);
- for (p = the_repository->objects->packed_git; p; p = p->next)
- list_add_tail(&p->mru, &the_repository->objects->packed_git_mru);
+ for (p = r->objects->packed_git; p; p = p->next)
+ list_add_tail(&p->mru, &r->objects->packed_git_mru);
}
- void prepare_packed_git(void)
+ static void prepare_packed_git(struct repository *r)
{
struct alternate_object_database *alt;
- if (the_repository->objects->packed_git_initialized)
+ if (r->objects->packed_git_initialized)
return;
- prepare_packed_git_one(get_object_directory(), 1);
- prepare_alt_odb(the_repository);
- for (alt = the_repository->objects->alt_odb_list; alt; alt = alt->next)
- prepare_packed_git_one(alt->path, 0);
- rearrange_packed_git();
- prepare_packed_git_mru();
- the_repository->objects->packed_git_initialized = 1;
+ prepare_packed_git_one(r, r->objects->objectdir, 1);
+ prepare_alt_odb(r);
+ for (alt = r->objects->alt_odb_list; alt; alt = alt->next)
+ prepare_packed_git_one(r, alt->path, 0);
+ rearrange_packed_git(r);
+ prepare_packed_git_mru(r);
+ r->objects->packed_git_initialized = 1;
}
- void reprepare_packed_git(void)
+ void reprepare_packed_git(struct repository *r)
{
- the_repository->objects->approximate_object_count_valid = 0;
- the_repository->objects->packed_git_initialized = 0;
- prepare_packed_git();
+ r->objects->approximate_object_count_valid = 0;
+ r->objects->packed_git_initialized = 0;
+ prepare_packed_git(r);
}
struct packed_git *get_packed_git(struct repository *r)
{
+ prepare_packed_git(r);
return r->objects->packed_git;
}
struct list_head *get_packed_git_mru(struct repository *r)
{
+ prepare_packed_git(r);
return &r->objects->packed_git_mru;
}
{
int type;
struct revindex_entry *revidx;
- const unsigned char *sha1;
+ struct object_id oid;
revidx = find_pack_revindex(p, obj_offset);
if (!revidx)
return OBJ_BAD;
- sha1 = nth_packed_object_sha1(p, revidx->nr);
- mark_bad_packed_object(p, sha1);
- type = sha1_object_info(sha1, NULL);
+ nth_packed_object_oid(&oid, p, revidx->nr);
+ mark_bad_packed_object(p, oid.hash);
+ type = oid_object_info(&oid, NULL);
if (type <= OBJ_NONE)
return OBJ_BAD;
return type;
*oi->disk_sizep = revidx[1].offset - obj_offset;
}
- if (oi->typep || oi->typename) {
+ if (oi->typep || oi->type_name) {
enum object_type ptot;
ptot = packed_to_object_type(p, obj_offset, type, &w_curs,
curpos);
if (oi->typep)
*oi->typep = ptot;
- if (oi->typename) {
- const char *tn = typename(ptot);
+ if (oi->type_name) {
+ const char *tn = type_name(ptot);
if (tn)
- strbuf_addstr(oi->typename, tn);
+ strbuf_addstr(oi->type_name, tn);
}
if (ptot < 0) {
type = OBJ_BAD;
unsigned long size;
};
-static void *read_object(const unsigned char *sha1, enum object_type *type,
+static void *read_object(const struct object_id *oid, enum object_type *type,
unsigned long *size)
{
struct object_info oi = OBJECT_INFO_INIT;
oi.sizep = size;
oi.contentp = &content;
- if (sha1_object_info_extended(sha1, &oi, 0) < 0)
+ if (oid_object_info_extended(oid, &oi, 0) < 0)
return NULL;
return content;
}
struct revindex_entry *revidx = find_pack_revindex(p, obj_offset);
off_t len = revidx[1].offset - obj_offset;
if (check_pack_crc(p, &w_curs, obj_offset, len, revidx->nr)) {
- const unsigned char *sha1 =
- nth_packed_object_sha1(p, revidx->nr);
+ struct object_id oid;
+ nth_packed_object_oid(&oid, p, revidx->nr);
error("bad packed object CRC for %s",
- sha1_to_hex(sha1));
- mark_bad_packed_object(p, sha1);
+ oid_to_hex(&oid));
+ mark_bad_packed_object(p, oid.hash);
data = NULL;
goto out;
}
* of a corrupted pack, and is better than failing outright.
*/
struct revindex_entry *revidx;
- const unsigned char *base_sha1;
+ struct object_id base_oid;
revidx = find_pack_revindex(p, obj_offset);
if (revidx) {
- base_sha1 = nth_packed_object_sha1(p, revidx->nr);
+ nth_packed_object_oid(&base_oid, p, revidx->nr);
error("failed to read delta base object %s"
" at offset %"PRIuMAX" from %s",
- sha1_to_hex(base_sha1), (uintmax_t)obj_offset,
+ oid_to_hex(&base_oid), (uintmax_t)obj_offset,
p->pack_name);
- mark_bad_packed_object(p, base_sha1);
- base = read_object(base_sha1, &type, &base_size);
+ mark_bad_packed_object(p, base_oid.hash);
+ base = read_object(&base_oid, &type, &base_size);
external_base = base;
}
}
return data;
}
+int bsearch_pack(const struct object_id *oid, const struct packed_git *p, uint32_t *result)
+{
+ const unsigned char *index_fanout = p->index_data;
+ const unsigned char *index_lookup;
+ int index_lookup_width;
+
+ if (!index_fanout)
+ BUG("bsearch_pack called without a valid pack-index");
+
+ index_lookup = index_fanout + 4 * 256;
+ if (p->index_version == 1) {
+ index_lookup_width = 24;
+ index_lookup += 4;
+ } else {
+ index_lookup_width = 20;
+ index_fanout += 8;
+ index_lookup += 8;
+ }
+
+ return bsearch_hash(oid->hash, (const uint32_t*)index_fanout,
+ index_lookup, index_lookup_width, result);
+}
+
const unsigned char *nth_packed_object_sha1(struct packed_git *p,
uint32_t n)
{
off_t find_pack_entry_one(const unsigned char *sha1,
struct packed_git *p)
{
- const uint32_t *level1_ofs = p->index_data;
const unsigned char *index = p->index_data;
- unsigned hi, lo, stride;
- static int debug_lookup = -1;
-
- if (debug_lookup < 0)
- debug_lookup = !!getenv("GIT_DEBUG_LOOKUP");
+ struct object_id oid;
+ uint32_t result;
if (!index) {
if (open_pack_index(p))
return 0;
- level1_ofs = p->index_data;
- index = p->index_data;
- }
- if (p->index_version > 1) {
- level1_ofs += 2;
- index += 8;
- }
- index += 4 * 256;
- hi = ntohl(level1_ofs[*sha1]);
- lo = ((*sha1 == 0x0) ? 0 : ntohl(level1_ofs[*sha1 - 1]));
- if (p->index_version > 1) {
- stride = 20;
- } else {
- stride = 24;
- index += 4;
}
- if (debug_lookup)
- printf("%02x%02x%02x... lo %u hi %u nr %"PRIu32"\n",
- sha1[0], sha1[1], sha1[2], lo, hi, p->num_objects);
-
- while (lo < hi) {
- unsigned mi = lo + (hi - lo) / 2;
- int cmp = hashcmp(index + mi * stride, sha1);
-
- if (debug_lookup)
- printf("lo %u hi %u rg %u mi %u\n",
- lo, hi, hi - lo, mi);
- if (!cmp)
- return nth_packed_object_offset(p, mi);
- if (cmp > 0)
- hi = mi;
- else
- lo = mi+1;
- }
+ hashcpy(oid.hash, sha1);
+ if (bsearch_pack(&oid, p, &result))
+ return nth_packed_object_offset(p, result);
return 0;
}
return 1;
}
- /*
- * Iff a pack file contains the object named by sha1, return true and
- * store its location to e.
- */
- int find_pack_entry(const unsigned char *sha1, struct pack_entry *e)
+ int find_pack_entry(struct repository *r, const unsigned char *sha1, struct pack_entry *e)
{
struct list_head *pos;
- prepare_packed_git();
- if (!the_repository->objects->packed_git)
+ prepare_packed_git(r);
+ if (!r->objects->packed_git)
return 0;
- list_for_each(pos, &the_repository->objects->packed_git_mru) {
+ list_for_each(pos, &r->objects->packed_git_mru) {
struct packed_git *p = list_entry(pos, struct packed_git, mru);
if (fill_pack_entry(sha1, e, p)) {
- list_move(&p->mru,
- &the_repository->objects->packed_git_mru);
+ list_move(&p->mru, &r->objects->packed_git_mru);
return 1;
}
}
int has_sha1_pack(const unsigned char *sha1)
{
struct pack_entry e;
- return find_pack_entry(sha1, &e);
+ return find_pack_entry(the_repository, sha1, &e);
}
int has_pack_index(const unsigned char *sha1)
int r = 0;
int pack_errors = 0;
- prepare_packed_git();
+ prepare_packed_git(the_repository);
for (p = the_repository->objects->packed_git; p; p = p->next) {
if ((flags & FOR_EACH_OBJECT_LOCAL_ONLY) && !p->pack_local)
continue;
#define PACKDIR_FILE_GARBAGE 4
extern void (*report_garbage)(unsigned seen_bits, const char *path);
- extern void prepare_packed_git(void);
- extern void reprepare_packed_git(void);
- extern void install_packed_git(struct packed_git *pack);
+ extern void reprepare_packed_git(struct repository *r);
+ extern void install_packed_git(struct repository *r, struct packed_git *pack);
struct packed_git *get_packed_git(struct repository *r);
struct list_head *get_packed_git_mru(struct repository *r);
*/
extern void check_pack_index_ptr(const struct packed_git *p, const void *ptr);
+/*
+ * Perform binary search on a pack-index for a given oid. Packfile is expected to
+ * have a valid pack-index.
+ *
+ * See 'bsearch_hash' for more information.
+ */
+int bsearch_pack(const struct object_id *oid, const struct packed_git *p, uint32_t *result);
+
/*
* Return the SHA-1 of the nth object within the specified packfile.
* Open the index if it is not already open. The return value points
extern void mark_bad_packed_object(struct packed_git *p, const unsigned char *sha1);
extern const struct packed_git *has_packed_and_bad(const unsigned char *sha1);
- extern int find_pack_entry(const unsigned char *sha1, struct pack_entry *e);
+ /*
+ * Iff a pack file in the given repository contains the object named by sha1,
+ * return true and store its location to e.
+ */
+ extern int find_pack_entry(struct repository *r, const unsigned char *sha1, struct pack_entry *e);
extern int has_sha1_pack(const unsigned char *sha1);
#include "fetch-object.h"
#include "object-store.h"
+/* The maximum size for an object header. */
+#define MAX_HEADER_LEN 32
+
const unsigned char null_sha1[GIT_MAX_RAWSZ];
const struct object_id null_oid;
const struct object_id empty_tree_oid = {
EMPTY_BLOB_SHA1_BIN_LITERAL
};
-static void git_hash_sha1_init(void *ctx)
+static void git_hash_sha1_init(git_hash_ctx *ctx)
{
- git_SHA1_Init((git_SHA_CTX *)ctx);
+ git_SHA1_Init(&ctx->sha1);
}
-static void git_hash_sha1_update(void *ctx, const void *data, size_t len)
+static void git_hash_sha1_update(git_hash_ctx *ctx, const void *data, size_t len)
{
- git_SHA1_Update((git_SHA_CTX *)ctx, data, len);
+ git_SHA1_Update(&ctx->sha1, data, len);
}
-static void git_hash_sha1_final(unsigned char *hash, void *ctx)
+static void git_hash_sha1_final(unsigned char *hash, git_hash_ctx *ctx)
{
- git_SHA1_Final(hash, (git_SHA_CTX *)ctx);
+ git_SHA1_Final(hash, &ctx->sha1);
}
-static void git_hash_unknown_init(void *ctx)
+static void git_hash_unknown_init(git_hash_ctx *ctx)
{
die("trying to init unknown hash");
}
-static void git_hash_unknown_update(void *ctx, const void *data, size_t len)
+static void git_hash_unknown_update(git_hash_ctx *ctx, const void *data, size_t len)
{
die("trying to update unknown hash");
}
-static void git_hash_unknown_final(unsigned char *hash, void *ctx)
+static void git_hash_unknown_final(unsigned char *hash, git_hash_ctx *ctx)
{
die("trying to finalize unknown hash");
}
0x00000000,
0,
0,
- 0,
git_hash_unknown_init,
git_hash_unknown_update,
git_hash_unknown_final,
"sha-1",
/* "sha1", big-endian */
0x73686131,
- sizeof(git_SHA_CTX),
GIT_SHA1_RAWSZ,
GIT_SHA1_HEXSZ,
git_hash_sha1_init,
* With "map" == NULL, try reading the object named with "sha1" using
* the streaming interface and rehash it to do the same.
*/
-int check_sha1_signature(const unsigned char *sha1, void *map,
- unsigned long size, const char *type)
+int check_object_signature(const struct object_id *oid, void *map,
+ unsigned long size, const char *type)
{
- unsigned char real_sha1[20];
+ struct object_id real_oid;
enum object_type obj_type;
struct git_istream *st;
- git_SHA_CTX c;
- char hdr[32];
+ git_hash_ctx c;
+ char hdr[MAX_HEADER_LEN];
int hdrlen;
if (map) {
- hash_sha1_file(map, size, type, real_sha1);
- return hashcmp(sha1, real_sha1) ? -1 : 0;
+ hash_object_file(map, size, type, &real_oid);
+ return oidcmp(oid, &real_oid) ? -1 : 0;
}
- st = open_istream(sha1, &obj_type, &size, NULL);
+ st = open_istream(oid, &obj_type, &size, NULL);
if (!st)
return -1;
/* Generate the header */
- hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", typename(obj_type), size) + 1;
+ hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", type_name(obj_type), size) + 1;
/* Sha1.. */
- git_SHA1_Init(&c);
- git_SHA1_Update(&c, hdr, hdrlen);
+ the_hash_algo->init_fn(&c);
+ the_hash_algo->update_fn(&c, hdr, hdrlen);
for (;;) {
char buf[1024 * 16];
ssize_t readlen = read_istream(st, buf, sizeof(buf));
}
if (!readlen)
break;
- git_SHA1_Update(&c, buf, readlen);
+ the_hash_algo->update_fn(&c, buf, readlen);
}
- git_SHA1_Final(real_sha1, &c);
+ the_hash_algo->final_fn(real_oid.hash, &c);
close_istream(st);
- return hashcmp(sha1, real_sha1) ? -1 : 0;
+ return oidcmp(oid, &real_oid) ? -1 : 0;
}
int git_open_cloexec(const char *name, int flags)
}
type = type_from_string_gently(type_buf, type_len, 1);
- if (oi->typename)
- strbuf_add(oi->typename, type_buf, type_len);
+ if (oi->type_name)
+ strbuf_add(oi->type_name, type_buf, type_len);
/*
* Set type to 0 if its an unknown object and
* we're obtaining the type using '--allow-unknown-type'
unsigned long mapsize;
void *map;
git_zstream stream;
- char hdr[32];
+ char hdr[MAX_HEADER_LEN];
struct strbuf hdrbuf = STRBUF_INIT;
unsigned long size_scratch;
* return value implicitly indicates whether the
* object even exists.
*/
- if (!oi->typep && !oi->typename && !oi->sizep && !oi->contentp) {
+ if (!oi->typep && !oi->type_name && !oi->sizep && !oi->contentp) {
const char *path;
struct stat st;
if (stat_sha1_file(r, sha1, &st, &path) < 0)
int fetch_if_missing = 1;
-int sha1_object_info_extended(const unsigned char *sha1, struct object_info *oi, unsigned flags)
+int oid_object_info_extended(const struct object_id *oid, struct object_info *oi, unsigned flags)
{
static struct object_info blank_oi = OBJECT_INFO_INIT;
struct pack_entry e;
int rtype;
- const unsigned char *real = (flags & OBJECT_INFO_LOOKUP_REPLACE) ?
- lookup_replace_object(sha1) :
- sha1;
+ const struct object_id *real = oid;
int already_retried = 0;
- if (is_null_sha1(real))
+ if (flags & OBJECT_INFO_LOOKUP_REPLACE)
+ real = lookup_replace_object(oid);
+
+ if (is_null_oid(real))
return -1;
if (!oi)
oi = &blank_oi;
if (!(flags & OBJECT_INFO_SKIP_CACHED)) {
- struct cached_object *co = find_cached_object(real);
+ struct cached_object *co = find_cached_object(real->hash);
if (co) {
if (oi->typep)
*(oi->typep) = co->type;
*(oi->disk_sizep) = 0;
if (oi->delta_base_sha1)
hashclr(oi->delta_base_sha1);
- if (oi->typename)
- strbuf_addstr(oi->typename, typename(co->type));
+ if (oi->type_name)
+ strbuf_addstr(oi->type_name, type_name(co->type));
if (oi->contentp)
*oi->contentp = xmemdupz(co->buf, co->size);
oi->whence = OI_CACHED;
}
while (1) {
- if (find_pack_entry(real->hash, &e))
- if (find_pack_entry(the_repository, real, &e))
++ if (find_pack_entry(the_repository, real->hash, &e))
break;
+ if (flags & OBJECT_INFO_IGNORE_LOOSE)
+ return -1;
+
/* Most likely it's a loose object. */
- if (!sha1_loose_object_info(the_repository, real, oi, flags))
+ if (!sha1_loose_object_info(the_repository, real->hash, oi, flags))
return 0;
/* Not a loose object; someone else may have just packed it. */
- reprepare_packed_git(the_repository);
- if (find_pack_entry(the_repository, real, &e))
- break;
+ if (!(flags & OBJECT_INFO_QUICK)) {
- reprepare_packed_git();
- if (find_pack_entry(real->hash, &e))
++ reprepare_packed_git(the_repository);
++ if (find_pack_entry(the_repository, real->hash, &e))
+ break;
+ }
/* Check if it is a missing object */
if (fetch_if_missing && repository_format_partial_clone &&
* TODO Investigate haveing fetch_object() return
* TODO error/success and stopping the music here.
*/
- fetch_object(repository_format_partial_clone, real);
+ fetch_object(repository_format_partial_clone, real->hash);
already_retried = 1;
continue;
}
return 0;
rtype = packed_object_info(e.p, e.offset, oi);
if (rtype < 0) {
- mark_bad_packed_object(e.p, real);
- return sha1_object_info_extended(real, oi, 0);
+ mark_bad_packed_object(e.p, real->hash);
+ return oid_object_info_extended(real, oi, 0);
} else if (oi->whence == OI_PACKED) {
oi->u.packed.offset = e.offset;
oi->u.packed.pack = e.p;
}
/* returns enum object_type or negative */
-int sha1_object_info(const unsigned char *sha1, unsigned long *sizep)
+int oid_object_info(const struct object_id *oid, unsigned long *sizep)
{
enum object_type type;
struct object_info oi = OBJECT_INFO_INIT;
oi.typep = &type;
oi.sizep = sizep;
- if (sha1_object_info_extended(sha1, &oi,
- OBJECT_INFO_LOOKUP_REPLACE) < 0)
+ if (oid_object_info_extended(oid, &oi,
+ OBJECT_INFO_LOOKUP_REPLACE) < 0)
return -1;
return type;
}
static void *read_object(const unsigned char *sha1, enum object_type *type,
unsigned long *size)
{
+ struct object_id oid;
struct object_info oi = OBJECT_INFO_INIT;
void *content;
oi.typep = type;
oi.sizep = size;
oi.contentp = &content;
- if (sha1_object_info_extended(sha1, &oi, 0) < 0)
+ hashcpy(oid.hash, sha1);
+
+ if (oid_object_info_extended(&oid, &oi, 0) < 0)
return NULL;
return content;
}
-int pretend_sha1_file(void *buf, unsigned long len, enum object_type type,
- unsigned char *sha1)
+int pretend_object_file(void *buf, unsigned long len, enum object_type type,
+ struct object_id *oid)
{
struct cached_object *co;
- hash_sha1_file(buf, len, typename(type), sha1);
- if (has_sha1_file(sha1) || find_cached_object(sha1))
+ hash_object_file(buf, len, type_name(type), oid);
+ if (has_sha1_file(oid->hash) || find_cached_object(oid->hash))
return 0;
ALLOC_GROW(cached_objects, cached_object_nr + 1, cached_object_alloc);
co = &cached_objects[cached_object_nr++];
co->type = type;
co->buf = xmalloc(len);
memcpy(co->buf, buf, len);
- hashcpy(co->sha1, sha1);
+ hashcpy(co->sha1, oid->hash);
return 0;
}
* deal with them should arrange to call read_object() and give error
* messages themselves.
*/
-void *read_sha1_file_extended(const unsigned char *sha1,
- enum object_type *type,
- unsigned long *size,
- int lookup_replace)
+void *read_object_file_extended(const struct object_id *oid,
+ enum object_type *type,
+ unsigned long *size,
+ int lookup_replace)
{
void *data;
const struct packed_git *p;
const char *path;
struct stat st;
- const unsigned char *repl = lookup_replace ? lookup_replace_object(sha1)
- : sha1;
+ const struct object_id *repl = lookup_replace ? lookup_replace_object(oid)
+ : oid;
errno = 0;
- data = read_object(repl, type, size);
+ data = read_object(repl->hash, type, size);
if (data)
return data;
if (errno && errno != ENOENT)
- die_errno("failed to read object %s", sha1_to_hex(sha1));
+ die_errno("failed to read object %s", oid_to_hex(oid));
/* die if we replaced an object with one that does not exist */
- if (repl != sha1)
+ if (repl != oid)
die("replacement %s not found for %s",
- sha1_to_hex(repl), sha1_to_hex(sha1));
+ oid_to_hex(repl), oid_to_hex(oid));
- if (!stat_sha1_file(the_repository, repl, &st, &path))
+ if (!stat_sha1_file(the_repository, repl->hash, &st, &path))
die("loose object %s (stored in %s) is corrupt",
- sha1_to_hex(repl), path);
+ oid_to_hex(repl), path);
- if ((p = has_packed_and_bad(repl)) != NULL)
+ if ((p = has_packed_and_bad(repl->hash)) != NULL)
die("packed object %s (stored in %s) is corrupt",
- sha1_to_hex(repl), p->pack_name);
+ oid_to_hex(repl), p->pack_name);
return NULL;
}
-void *read_object_with_reference(const unsigned char *sha1,
+void *read_object_with_reference(const struct object_id *oid,
const char *required_type_name,
unsigned long *size,
- unsigned char *actual_sha1_return)
+ struct object_id *actual_oid_return)
{
enum object_type type, required_type;
void *buffer;
unsigned long isize;
- unsigned char actual_sha1[20];
+ struct object_id actual_oid;
required_type = type_from_string(required_type_name);
- hashcpy(actual_sha1, sha1);
+ oidcpy(&actual_oid, oid);
while (1) {
int ref_length = -1;
const char *ref_type = NULL;
- buffer = read_sha1_file(actual_sha1, &type, &isize);
+ buffer = read_object_file(&actual_oid, &type, &isize);
if (!buffer)
return NULL;
if (type == required_type) {
*size = isize;
- if (actual_sha1_return)
- hashcpy(actual_sha1_return, actual_sha1);
+ if (actual_oid_return)
+ oidcpy(actual_oid_return, &actual_oid);
return buffer;
}
/* Handle references */
}
ref_length = strlen(ref_type);
- if (ref_length + 40 > isize ||
+ if (ref_length + GIT_SHA1_HEXSZ > isize ||
memcmp(buffer, ref_type, ref_length) ||
- get_sha1_hex((char *) buffer + ref_length, actual_sha1)) {
+ get_oid_hex((char *) buffer + ref_length, &actual_oid)) {
free(buffer);
return NULL;
}
free(buffer);
/* Now we have the ID of the referred-to object in
- * actual_sha1. Check again. */
+ * actual_oid. Check again. */
}
}
-static void write_sha1_file_prepare(const void *buf, unsigned long len,
- const char *type, unsigned char *sha1,
- char *hdr, int *hdrlen)
+static void write_object_file_prepare(const void *buf, unsigned long len,
+ const char *type, struct object_id *oid,
+ char *hdr, int *hdrlen)
{
- git_SHA_CTX c;
+ git_hash_ctx c;
/* Generate the header */
*hdrlen = xsnprintf(hdr, *hdrlen, "%s %lu", type, len)+1;
/* Sha1.. */
- git_SHA1_Init(&c);
- git_SHA1_Update(&c, hdr, *hdrlen);
- git_SHA1_Update(&c, buf, len);
- git_SHA1_Final(sha1, &c);
+ the_hash_algo->init_fn(&c);
+ the_hash_algo->update_fn(&c, hdr, *hdrlen);
+ the_hash_algo->update_fn(&c, buf, len);
+ the_hash_algo->final_fn(oid->hash, &c);
}
/*
return 0;
}
-int hash_sha1_file(const void *buf, unsigned long len, const char *type,
- unsigned char *sha1)
+int hash_object_file(const void *buf, unsigned long len, const char *type,
+ struct object_id *oid)
{
- char hdr[32];
+ char hdr[MAX_HEADER_LEN];
int hdrlen = sizeof(hdr);
- write_sha1_file_prepare(buf, len, type, sha1, hdr, &hdrlen);
+ write_object_file_prepare(buf, len, type, oid, hdr, &hdrlen);
return 0;
}
return fd;
}
-static int write_loose_object(const unsigned char *sha1, char *hdr, int hdrlen,
- const void *buf, unsigned long len, time_t mtime)
+static int write_loose_object(const struct object_id *oid, char *hdr,
+ int hdrlen, const void *buf, unsigned long len,
+ time_t mtime)
{
int fd, ret;
unsigned char compressed[4096];
git_zstream stream;
- git_SHA_CTX c;
- unsigned char parano_sha1[20];
+ git_hash_ctx c;
+ struct object_id parano_oid;
static struct strbuf tmp_file = STRBUF_INIT;
static struct strbuf filename = STRBUF_INIT;
strbuf_reset(&filename);
- sha1_file_name(the_repository, &filename, sha1);
+ sha1_file_name(the_repository, &filename, oid->hash);
fd = create_tmpfile(&tmp_file, filename.buf);
if (fd < 0) {
git_deflate_init(&stream, zlib_compression_level);
stream.next_out = compressed;
stream.avail_out = sizeof(compressed);
- git_SHA1_Init(&c);
+ the_hash_algo->init_fn(&c);
/* First header.. */
stream.next_in = (unsigned char *)hdr;
stream.avail_in = hdrlen;
while (git_deflate(&stream, 0) == Z_OK)
; /* nothing */
- git_SHA1_Update(&c, hdr, hdrlen);
+ the_hash_algo->update_fn(&c, hdr, hdrlen);
/* Then the data itself.. */
stream.next_in = (void *)buf;
do {
unsigned char *in0 = stream.next_in;
ret = git_deflate(&stream, Z_FINISH);
- git_SHA1_Update(&c, in0, stream.next_in - in0);
+ the_hash_algo->update_fn(&c, in0, stream.next_in - in0);
if (write_buffer(fd, compressed, stream.next_out - compressed) < 0)
die("unable to write sha1 file");
stream.next_out = compressed;
} while (ret == Z_OK);
if (ret != Z_STREAM_END)
- die("unable to deflate new object %s (%d)", sha1_to_hex(sha1), ret);
+ die("unable to deflate new object %s (%d)", oid_to_hex(oid),
+ ret);
ret = git_deflate_end_gently(&stream);
if (ret != Z_OK)
- die("deflateEnd on object %s failed (%d)", sha1_to_hex(sha1), ret);
- git_SHA1_Final(parano_sha1, &c);
- if (hashcmp(sha1, parano_sha1) != 0)
- die("confused by unstable object source data for %s", sha1_to_hex(sha1));
+ die("deflateEnd on object %s failed (%d)", oid_to_hex(oid),
+ ret);
+ the_hash_algo->final_fn(parano_oid.hash, &c);
+ if (oidcmp(oid, ¶no_oid) != 0)
+ die("confused by unstable object source data for %s",
+ oid_to_hex(oid));
close_sha1_file(fd);
static int freshen_packed_object(const unsigned char *sha1)
{
struct pack_entry e;
- if (!find_pack_entry(sha1, &e))
+ if (!find_pack_entry(the_repository, sha1, &e))
return 0;
if (e.p->freshened)
return 1;
return 1;
}
-int write_sha1_file(const void *buf, unsigned long len, const char *type, unsigned char *sha1)
+int write_object_file(const void *buf, unsigned long len, const char *type,
+ struct object_id *oid)
{
- char hdr[32];
+ char hdr[MAX_HEADER_LEN];
int hdrlen = sizeof(hdr);
/* Normally if we have it in the pack then we do not bother writing
* it out into .git/objects/??/?{38} file.
*/
- write_sha1_file_prepare(buf, len, type, sha1, hdr, &hdrlen);
- if (freshen_packed_object(sha1) || freshen_loose_object(sha1))
+ write_object_file_prepare(buf, len, type, oid, hdr, &hdrlen);
+ if (freshen_packed_object(oid->hash) || freshen_loose_object(oid->hash))
return 0;
- return write_loose_object(sha1, hdr, hdrlen, buf, len, 0);
+ return write_loose_object(oid, hdr, hdrlen, buf, len, 0);
}
-int hash_sha1_file_literally(const void *buf, unsigned long len, const char *type,
- struct object_id *oid, unsigned flags)
+int hash_object_file_literally(const void *buf, unsigned long len,
+ const char *type, struct object_id *oid,
+ unsigned flags)
{
char *header;
int hdrlen, status = 0;
/* type string, SP, %lu of the length plus NUL must fit this */
- hdrlen = strlen(type) + 32;
+ hdrlen = strlen(type) + MAX_HEADER_LEN;
header = xmalloc(hdrlen);
- write_sha1_file_prepare(buf, len, type, oid->hash, header, &hdrlen);
+ write_object_file_prepare(buf, len, type, oid, header, &hdrlen);
if (!(flags & HASH_WRITE_OBJECT))
goto cleanup;
if (freshen_packed_object(oid->hash) || freshen_loose_object(oid->hash))
goto cleanup;
- status = write_loose_object(oid->hash, header, hdrlen, buf, len, 0);
+ status = write_loose_object(oid, header, hdrlen, buf, len, 0);
cleanup:
free(header);
return status;
}
-int force_object_loose(const unsigned char *sha1, time_t mtime)
+int force_object_loose(const struct object_id *oid, time_t mtime)
{
void *buf;
unsigned long len;
enum object_type type;
- char hdr[32];
+ char hdr[MAX_HEADER_LEN];
int hdrlen;
int ret;
- if (has_loose_object(sha1))
+ if (has_loose_object(oid->hash))
return 0;
- buf = read_object(sha1, &type, &len);
+ buf = read_object(oid->hash, &type, &len);
if (!buf)
- return error("cannot read sha1_file for %s", sha1_to_hex(sha1));
- hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", typename(type), len) + 1;
- ret = write_loose_object(sha1, hdr, hdrlen, buf, len, mtime);
+ return error("cannot read sha1_file for %s", oid_to_hex(oid));
+ hdrlen = xsnprintf(hdr, sizeof(hdr), "%s %lu", type_name(type), len) + 1;
+ ret = write_loose_object(oid, hdr, hdrlen, buf, len, mtime);
free(buf);
return ret;
int has_sha1_file_with_flags(const unsigned char *sha1, int flags)
{
+ struct object_id oid;
if (!startup_info->have_repository)
return 0;
- return sha1_object_info_extended(sha1, NULL,
- flags | OBJECT_INFO_SKIP_CACHED) >= 0;
+ hashcpy(oid.hash, sha1);
+ return oid_object_info_extended(&oid, NULL,
+ flags | OBJECT_INFO_SKIP_CACHED) >= 0;
}
int has_object_file(const struct object_id *oid)
}
if (write_object)
- ret = write_sha1_file(buf, size, typename(type), oid->hash);
+ ret = write_object_file(buf, size, type_name(type), oid);
else
- ret = hash_sha1_file(buf, size, typename(type), oid->hash);
+ ret = hash_object_file(buf, size, type_name(type), oid);
if (re_allocated)
free(buf);
return ret;
get_conv_flags(flags));
if (write_object)
- ret = write_sha1_file(sbuf.buf, sbuf.len, typename(OBJ_BLOB),
- oid->hash);
+ ret = write_object_file(sbuf.buf, sbuf.len, type_name(OBJ_BLOB),
+ oid);
else
- ret = hash_sha1_file(sbuf.buf, sbuf.len, typename(OBJ_BLOB),
- oid->hash);
+ ret = hash_object_file(sbuf.buf, sbuf.len, type_name(OBJ_BLOB),
+ oid);
strbuf_release(&sbuf);
return ret;
}
enum object_type type, const char *path,
unsigned flags)
{
- return index_bulk_checkin(oid->hash, fd, size, type, path, flags);
+ return index_bulk_checkin(oid, fd, size, type, path, flags);
}
int index_fd(struct object_id *oid, int fd, struct stat *st,
if (strbuf_readlink(&sb, path, st->st_size))
return error_errno("readlink(\"%s\")", path);
if (!(flags & HASH_WRITE_OBJECT))
- hash_sha1_file(sb.buf, sb.len, blob_type, oid->hash);
- else if (write_sha1_file(sb.buf, sb.len, blob_type, oid->hash))
+ hash_object_file(sb.buf, sb.len, blob_type, oid);
+ else if (write_object_file(sb.buf, sb.len, blob_type, oid))
rc = error("%s: failed to insert into database", path);
strbuf_release(&sb);
break;
return 0;
}
-void assert_sha1_type(const unsigned char *sha1, enum object_type expect)
+void assert_oid_type(const struct object_id *oid, enum object_type expect)
{
- enum object_type type = sha1_object_info(sha1, NULL);
+ enum object_type type = oid_object_info(oid, NULL);
if (type < 0)
- die("%s is not a valid object", sha1_to_hex(sha1));
+ die("%s is not a valid object", oid_to_hex(oid));
if (type != expect)
- die("%s is not a valid '%s' object", sha1_to_hex(sha1),
- typename(expect));
+ die("%s is not a valid '%s' object", oid_to_hex(oid),
+ type_name(expect));
}
int for_each_file_in_obj_subdir(unsigned int subdir_nr,
const char *path,
const unsigned char *expected_sha1)
{
- git_SHA_CTX c;
+ git_hash_ctx c;
unsigned char real_sha1[GIT_MAX_RAWSZ];
unsigned char buf[4096];
unsigned long total_read;
int status = Z_OK;
- git_SHA1_Init(&c);
- git_SHA1_Update(&c, hdr, stream->total_out);
+ the_hash_algo->init_fn(&c);
+ the_hash_algo->update_fn(&c, hdr, stream->total_out);
/*
* We already read some bytes into hdr, but the ones up to the NUL
if (size - total_read < stream->avail_out)
stream->avail_out = size - total_read;
status = git_inflate(stream, Z_FINISH);
- git_SHA1_Update(&c, buf, stream->next_out - buf);
+ the_hash_algo->update_fn(&c, buf, stream->next_out - buf);
total_read += stream->next_out - buf;
}
git_inflate_end(stream);
return -1;
}
- git_SHA1_Final(real_sha1, &c);
+ the_hash_algo->final_fn(real_sha1, &c);
if (hashcmp(expected_sha1, real_sha1)) {
error("sha1 mismatch for %s (expected %s)", path,
sha1_to_hex(expected_sha1));
}
int read_loose_object(const char *path,
- const unsigned char *expected_sha1,
+ const struct object_id *expected_oid,
enum object_type *type,
unsigned long *size,
void **contents)
void *map = NULL;
unsigned long mapsize;
git_zstream stream;
- char hdr[32];
+ char hdr[MAX_HEADER_LEN];
*contents = NULL;
}
if (*type == OBJ_BLOB) {
- if (check_stream_sha1(&stream, hdr, *size, path, expected_sha1) < 0)
+ if (check_stream_sha1(&stream, hdr, *size, path, expected_oid->hash) < 0)
goto out;
} else {
- *contents = unpack_sha1_rest(&stream, hdr, *size, expected_sha1);
+ *contents = unpack_sha1_rest(&stream, hdr, *size, expected_oid->hash);
if (!*contents) {
error("unable to unpack contents of %s", path);
git_inflate_end(&stream);
goto out;
}
- if (check_sha1_signature(expected_sha1, *contents,
- *size, typename(*type))) {
+ if (check_object_signature(expected_oid, *contents,
+ *size, type_name(*type))) {
error("sha1 mismatch for %s (expected %s)", path,
- sha1_to_hex(expected_sha1));
+ oid_to_hex(expected_oid));
free(*contents);
goto out;
}
static void unique_in_pack(struct packed_git *p,
struct disambiguate_state *ds)
{
- uint32_t num, last, i, first = 0;
+ uint32_t num, i, first = 0;
const struct object_id *current = NULL;
if (open_pack_index(p) || !p->num_objects)
return;
num = p->num_objects;
- last = num;
- while (first < last) {
- uint32_t mid = first + (last - first) / 2;
- const unsigned char *current;
- int cmp;
-
- current = nth_packed_object_sha1(p, mid);
- cmp = hashcmp(ds->bin_pfx.hash, current);
- if (!cmp) {
- first = mid;
- break;
- }
- if (cmp > 0) {
- first = mid+1;
- continue;
- }
- last = mid;
- }
+ bsearch_pack(&ds->bin_pfx, p, &first);
/*
* At this point, "first" is the location of the lowest object
{
struct packed_git *p;
- prepare_packed_git();
for (p = get_packed_git(the_repository); p && !ds->ambiguous;
p = p->next)
unique_in_pack(p, ds);
static int disambiguate_commit_only(const struct object_id *oid, void *cb_data_unused)
{
- int kind = sha1_object_info(oid->hash, NULL);
+ int kind = oid_object_info(oid, NULL);
return kind == OBJ_COMMIT;
}
struct object *obj;
int kind;
- kind = sha1_object_info(oid->hash, NULL);
+ kind = oid_object_info(oid, NULL);
if (kind == OBJ_COMMIT)
return 1;
if (kind != OBJ_TAG)
static int disambiguate_tree_only(const struct object_id *oid, void *cb_data_unused)
{
- int kind = sha1_object_info(oid->hash, NULL);
+ int kind = oid_object_info(oid, NULL);
return kind == OBJ_TREE;
}
struct object *obj;
int kind;
- kind = sha1_object_info(oid->hash, NULL);
+ kind = oid_object_info(oid, NULL);
if (kind == OBJ_TREE || kind == OBJ_COMMIT)
return 1;
if (kind != OBJ_TAG)
static int disambiguate_blob_only(const struct object_id *oid, void *cb_data_unused)
{
- int kind = sha1_object_info(oid->hash, NULL);
+ int kind = oid_object_info(oid, NULL);
return kind == OBJ_BLOB;
}
if (ds->fn && !ds->fn(oid, ds->cb_data))
return 0;
- type = sha1_object_info(oid->hash, NULL);
+ type = oid_object_info(oid, NULL);
if (type == OBJ_COMMIT) {
struct commit *commit = lookup_commit(oid);
if (commit) {
}
advise(" %s %s%s",
- find_unique_abbrev(oid->hash, DEFAULT_ABBREV),
- typename(type) ? typename(type) : "unknown type",
+ find_unique_abbrev(oid, DEFAULT_ABBREV),
+ type_name(type) ? type_name(type) : "unknown type",
desc.buf);
strbuf_release(&desc);
unsigned int init_len;
unsigned int cur_len;
char *hex;
- const unsigned char *hash;
+ const struct object_id *oid;
};
static inline char get_hex_char_from_oid(const struct object_id *oid,
struct min_abbrev_data *mad)
{
int match = 0;
- uint32_t num, last, first = 0;
+ uint32_t num, first = 0;
struct object_id oid;
+ const struct object_id *mad_oid;
if (open_pack_index(p) || !p->num_objects)
return;
num = p->num_objects;
- last = num;
- while (first < last) {
- uint32_t mid = first + (last - first) / 2;
- const unsigned char *current;
- int cmp;
-
- current = nth_packed_object_sha1(p, mid);
- cmp = hashcmp(mad->hash, current);
- if (!cmp) {
- match = 1;
- first = mid;
- break;
- }
- if (cmp > 0) {
- first = mid + 1;
- continue;
- }
- last = mid;
- }
+ mad_oid = mad->oid;
+ match = bsearch_pack(mad_oid, p, &first);
/*
* first is now the position in the packfile where we would insert
* mad->hash if it does not exist (or the position of mad->hash if
- * it does exist). Hence, we consider a maximum of three objects
+ * it does exist). Hence, we consider a maximum of two objects
* nearby for the abbreviation length.
*/
mad->init_len = 0;
if (!match) {
- nth_packed_object_oid(&oid, p, first);
- extend_abbrev_len(&oid, mad);
+ if (nth_packed_object_oid(&oid, p, first))
+ extend_abbrev_len(&oid, mad);
} else if (first < num - 1) {
- nth_packed_object_oid(&oid, p, first + 1);
- extend_abbrev_len(&oid, mad);
+ if (nth_packed_object_oid(&oid, p, first + 1))
+ extend_abbrev_len(&oid, mad);
}
if (first > 0) {
- nth_packed_object_oid(&oid, p, first - 1);
- extend_abbrev_len(&oid, mad);
+ if (nth_packed_object_oid(&oid, p, first - 1))
+ extend_abbrev_len(&oid, mad);
}
mad->init_len = mad->cur_len;
}
{
struct packed_git *p;
- prepare_packed_git();
for (p = get_packed_git(the_repository); p; p = p->next)
find_abbrev_len_for_pack(p, mad);
}
-int find_unique_abbrev_r(char *hex, const unsigned char *sha1, int len)
+int find_unique_abbrev_r(char *hex, const struct object_id *oid, int len)
{
struct disambiguate_state ds;
struct min_abbrev_data mad;
len = FALLBACK_DEFAULT_ABBREV;
}
- sha1_to_hex_r(hex, sha1);
+ oid_to_hex_r(hex, oid);
if (len == GIT_SHA1_HEXSZ || !len)
return GIT_SHA1_HEXSZ;
mad.init_len = len;
mad.cur_len = len;
mad.hex = hex;
- mad.hash = sha1;
+ mad.oid = oid;
find_abbrev_len_packed(&mad);
return mad.cur_len;
}
-const char *find_unique_abbrev(const unsigned char *sha1, int len)
+const char *find_unique_abbrev(const struct object_id *oid, int len)
{
static int bufno;
static char hexbuffer[4][GIT_MAX_HEXSZ + 1];
char *hex = hexbuffer[bufno];
bufno = (bufno + 1) % ARRAY_SIZE(hexbuffer);
- find_unique_abbrev_r(hex, sha1, len);
+ find_unique_abbrev_r(hex, oid, len);
return hex;
}
if (name)
error("%.*s: expected %s type, but the object "
"dereferences to %s type",
- namelen, name, typename(expected_type),
- typename(o->type));
+ namelen, name, type_name(expected_type),
+ type_name(o->type));
return NULL;
}
}
if (is_missing_file_error(errno)) {
char *fullname = xstrfmt("%s%s", prefix, filename);
- if (!get_tree_entry(tree_oid->hash, fullname,
- oid.hash, &mode)) {
+ if (!get_tree_entry(tree_oid, fullname, &oid, &mode)) {
die("Path '%s' exists, but not '%s'.\n"
"Did you mean '%.*s:%s' aka '%.*s:./%s'?",
fullname,
filename, oid->hash, &oc->symlink_path,
&oc->mode);
} else {
- ret = get_tree_entry(tree_oid.hash, filename,
- oid->hash, &oc->mode);
+ ret = get_tree_entry(&tree_oid, filename, oid,
+ &oc->mode);
if (ret && only_to_die) {
diagnose_invalid_oid_path(prefix,
filename,