From: Junio C Hamano Date: Fri, 17 Aug 2018 20:09:56 +0000 (-0700) Subject: Merge branch 'rs/parse-opt-lithelp' X-Git-Tag: v2.19.0-rc0~50 X-Git-Url: https://git.lorimer.id.au/gitweb.git/diff_plain/8963bb0c2d023738f8ea021b512ed83cd79ee754?ds=inline;hp=-c Merge branch 'rs/parse-opt-lithelp' The parse-options machinery learned to refrain from enclosing placeholder string inside a "" pair automatically without PARSE_OPT_LITERAL_ARGHELP. Existing help text for option arguments that are not formatted correctly have been identified and fixed. * rs/parse-opt-lithelp: parse-options: automatically infer PARSE_OPT_LITERAL_ARGHELP shortlog: correct option help for -w send-pack: specify --force-with-lease argument help explicitly pack-objects: specify --index-version argument help explicitly difftool: remove angular brackets from argument help add, update-index: fix --chmod argument help push: use PARSE_OPT_LITERAL_ARGHELP instead of unbalanced brackets --- 8963bb0c2d023738f8ea021b512ed83cd79ee754 diff --combined builtin/add.c index 8a155dd41e,8352bdf71f..ba1ff5689d --- a/builtin/add.c +++ b/builtin/add.c @@@ -9,7 -9,7 +9,7 @@@ #include "lockfile.h" #include "dir.h" #include "pathspec.h" -#include "exec_cmd.h" +#include "exec-cmd.h" #include "cache-tree.h" #include "run-command.h" #include "parse-options.h" @@@ -265,6 -265,8 +265,6 @@@ static int edit_patch(int argc, const c return 0; } -static struct lock_file lock_file; - static const char ignore_error[] = N_("The following paths are ignored by one of your .gitignore files:\n"); @@@ -292,7 -294,7 +292,7 @@@ static struct option builtin_add_option OPT_BOOL('i', "interactive", &add_interactive, N_("interactive picking")), OPT_BOOL('p', "patch", &patch_interactive, N_("select hunks interactively")), OPT_BOOL('e', "edit", &edit_interactive, N_("edit current diff and apply")), - OPT__FORCE(&ignored_too, N_("allow adding otherwise ignored files")), + OPT__FORCE(&ignored_too, N_("allow adding otherwise ignored files"), 0), OPT_BOOL('u', "update", &take_worktree_changes, N_("update tracked files")), OPT_BOOL(0, "renormalize", &add_renormalize, N_("renormalize EOL of tracked files (implies -u)")), OPT_BOOL('N', "intent-to-add", &intent_to_add, N_("record only the fact that the path will be added later")), @@@ -304,7 -306,8 +304,8 @@@ OPT_BOOL( 0 , "refresh", &refresh_only, N_("don't add, only refresh the index")), OPT_BOOL( 0 , "ignore-errors", &ignore_add_errors, N_("just skip files which cannot be added because of errors")), OPT_BOOL( 0 , "ignore-missing", &ignore_missing, N_("check if - even missing - files are ignored in dry run")), - OPT_STRING( 0 , "chmod", &chmod_arg, N_("(+/-)x"), N_("override the executable bit of the listed files")), + OPT_STRING(0, "chmod", &chmod_arg, "(+|-)x", + N_("override the executable bit of the listed files")), OPT_HIDDEN_BOOL(0, "warn-embedded-repo", &warn_on_embedded_repo, N_("warn when adding an embedded repository")), OPT_END(), @@@ -391,7 -394,6 +392,7 @@@ int cmd_add(int argc, const char **argv int add_new_files; int require_pathspec; char *seen = NULL; + struct lock_file lock_file = LOCK_INIT; git_config(add_config, NULL); @@@ -533,9 -535,10 +534,9 @@@ unplug_bulk_checkin(); finish: - if (active_cache_changed) { - if (write_locked_index(&the_index, &lock_file, COMMIT_LOCK)) - die(_("Unable to write new index file")); - } + if (write_locked_index(&the_index, &lock_file, + COMMIT_LOCK | SKIP_IF_UNCHANGED)) + die(_("Unable to write new index file")); UNLEAK(pathspec); UNLEAK(dir); diff --combined builtin/difftool.c index 3018e61d04,91dbef8f49..cdd585ca76 --- a/builtin/difftool.c +++ b/builtin/difftool.c @@@ -15,12 -15,11 +15,12 @@@ #include "config.h" #include "builtin.h" #include "run-command.h" -#include "exec_cmd.h" +#include "exec-cmd.h" #include "parse-options.h" #include "argv-array.h" #include "strbuf.h" #include "lockfile.h" +#include "object-store.h" #include "dir.h" static char *diff_gui_tool; @@@ -307,7 -306,7 +307,7 @@@ static char *get_symlink(const struct o } else { enum object_type type; unsigned long size; - data = read_sha1_file(oid->hash, &type, &size); + data = read_object_file(oid, &type, &size); if (!data) die(_("could not read object %s for symlink %s"), oid_to_hex(oid), path); @@@ -322,10 -321,10 +322,10 @@@ static int checkout_path(unsigned mode struct cache_entry *ce; int ret; - ce = make_cache_entry(mode, oid->hash, path, 0, 0); + ce = make_transient_cache_entry(mode, oid, path, 0); ret = checkout_entry(ce, state, NULL); - free(ce); + discard_cache_entry(ce); return ret; } @@@ -489,7 -488,7 +489,7 @@@ static int run_dir_diff(const char *ext * index. */ struct cache_entry *ce2 = - make_cache_entry(rmode, roid.hash, + make_cache_entry(&wtindex, rmode, &roid, dst_path, 0, 0); add_index_entry(&wtindex, ce2, @@@ -611,7 -610,7 +611,7 @@@ continue; if (!indices_loaded) { - static struct lock_file lock; + struct lock_file lock = LOCK_INIT; strbuf_reset(&buf); strbuf_addf(&buf, "%s/wtindex", tmpdir); if (hold_lock_file_for_update(&lock, buf.buf, 0) < 0 || @@@ -696,14 -695,15 +696,14 @@@ int cmd_difftool(int argc, const char * N_("use `diff.guitool` instead of `diff.tool`")), OPT_BOOL('d', "dir-diff", &dir_diff, N_("perform a full-directory diff")), - { OPTION_SET_INT, 'y', "no-prompt", &prompt, NULL, + OPT_SET_INT_F('y', "no-prompt", &prompt, N_("do not prompt before launching a diff tool"), - PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, 0}, - { OPTION_SET_INT, 0, "prompt", &prompt, NULL, NULL, - PARSE_OPT_NOARG | PARSE_OPT_NONEG | PARSE_OPT_HIDDEN, - NULL, 1 }, + 0, PARSE_OPT_NONEG), + OPT_SET_INT_F(0, "prompt", &prompt, NULL, + 1, PARSE_OPT_NONEG | PARSE_OPT_HIDDEN), OPT_BOOL(0, "symlinks", &symlinks, N_("use symlinks in dir-diff mode")), - OPT_STRING('t', "tool", &difftool_cmd, N_(""), + OPT_STRING('t', "tool", &difftool_cmd, N_("tool"), N_("use the specified diff tool")), OPT_BOOL(0, "tool-help", &tool_help, N_("print a list of diff tools that may be used with " @@@ -711,7 -711,7 +711,7 @@@ OPT_BOOL(0, "trust-exit-code", &trust_exit_code, N_("make 'git-difftool' exit when an invoked diff " "tool returns a non - zero exit code")), - OPT_STRING('x', "extcmd", &extcmd, N_(""), + OPT_STRING('x', "extcmd", &extcmd, N_("command"), N_("specify a custom command for viewing diffs")), OPT_END() }; diff --combined builtin/pack-objects.c index 80c880e9ad,1529a75e03..c0741baa8b --- a/builtin/pack-objects.c +++ b/builtin/pack-objects.c @@@ -1,6 -1,5 +1,6 @@@ #include "builtin.h" #include "cache.h" +#include "repository.h" #include "config.h" #include "attr.h" #include "object.h" @@@ -27,22 -26,8 +27,22 @@@ #include "reachable.h" #include "sha1-array.h" #include "argv-array.h" -#include "mru.h" +#include "list.h" #include "packfile.h" +#include "object-store.h" +#include "dir.h" + +#define IN_PACK(obj) oe_in_pack(&to_pack, obj) +#define SIZE(obj) oe_size(&to_pack, obj) +#define SET_SIZE(obj,size) oe_set_size(&to_pack, obj, size) +#define DELTA_SIZE(obj) oe_delta_size(&to_pack, obj) +#define DELTA(obj) oe_delta(&to_pack, obj) +#define DELTA_CHILD(obj) oe_delta_child(&to_pack, obj) +#define DELTA_SIBLING(obj) oe_delta_sibling(&to_pack, obj) +#define SET_DELTA(obj, val) oe_set_delta(&to_pack, obj, val) +#define SET_DELTA_SIZE(obj, val) oe_set_delta_size(&to_pack, obj, val) +#define SET_DELTA_CHILD(obj, val) oe_set_delta_child(&to_pack, obj, val) +#define SET_DELTA_SIBLING(obj, val) oe_set_delta_sibling(&to_pack, obj, val) static const char *pack_usage[] = { N_("git pack-objects --stdout [...] [< | < ]"), @@@ -58,7 -43,7 +58,7 @@@ static struct packing_data to_pack; static struct pack_idx_entry **written_list; -static uint32_t nr_result, nr_written; +static uint32_t nr_result, nr_written, nr_seen; static int non_empty; static int reuse_delta = 1, reuse_object = 1; @@@ -68,8 -53,7 +68,8 @@@ static int pack_loose_unreachable static int local; static int have_non_local_packs; static int incremental; -static int ignore_packed_keep; +static int ignore_packed_keep_on_disk; +static int ignore_packed_keep_in_core; static int allow_ofs_delta; static struct pack_idx_option pack_idx_opts; static const char *base_name; @@@ -91,10 -75,8 +91,10 @@@ static int use_bitmap_index = -1 static int write_bitmap_index; static uint16_t write_bitmap_options; +static int exclude_promisor_objects; + static unsigned long delta_cache_size = 0; -static unsigned long max_delta_cache_size = 256 * 1024 * 1024; +static unsigned long max_delta_cache_size = DEFAULT_DELTA_CACHE_SIZE; static unsigned long cache_max_small_delta_size = 1000; static unsigned long window_memory_limit = 0; @@@ -102,9 -84,8 +102,9 @@@ static struct list_objects_filter_options filter_options; enum missing_action { - MA_ERROR = 0, /* fail if any missing objects are encountered */ - MA_ALLOW_ANY, /* silently allow ALL missing objects */ + MA_ERROR = 0, /* fail if any missing objects are encountered */ + MA_ALLOW_ANY, /* silently allow ALL missing objects */ + MA_ALLOW_PROMISOR, /* silently allow all missing PROMISOR objects */ }; static enum missing_action arg_missing_action; static show_object_fn fn_show_object; @@@ -138,23 -119,18 +138,23 @@@ static void *get_delta(struct object_en void *buf, *base_buf, *delta_buf; enum object_type type; - buf = read_sha1_file(entry->idx.oid.hash, &type, &size); + buf = read_object_file(&entry->idx.oid, &type, &size); if (!buf) - die("unable to read %s", oid_to_hex(&entry->idx.oid)); - base_buf = read_sha1_file(entry->delta->idx.oid.hash, &type, - &base_size); + die(_("unable to read %s"), oid_to_hex(&entry->idx.oid)); + base_buf = read_object_file(&DELTA(entry)->idx.oid, &type, + &base_size); if (!base_buf) die("unable to read %s", - oid_to_hex(&entry->delta->idx.oid)); + oid_to_hex(&DELTA(entry)->idx.oid)); delta_buf = diff_delta(base_buf, base_size, buf, size, &delta_size, 0); - if (!delta_buf || delta_size != entry->delta_size) - die("delta size changed"); + /* + * We succesfully computed this delta once but dropped it for + * memory reasons. Something is very wrong if this time we + * recompute and create a different delta. + */ + if (!delta_buf || delta_size != DELTA_SIZE(entry)) + BUG("delta size changed"); free(buf); free(base_buf); return delta_buf; @@@ -185,7 -161,7 +185,7 @@@ static unsigned long do_compress(void * return stream.total_out; } -static unsigned long write_large_blob_data(struct git_istream *st, struct sha1file *f, +static unsigned long write_large_blob_data(struct git_istream *st, struct hashfile *f, const struct object_id *oid) { git_zstream stream; @@@ -209,7 -185,7 +209,7 @@@ stream.next_out = obuf; stream.avail_out = sizeof(obuf); zret = git_deflate(&stream, readlen ? 0 : Z_FINISH); - sha1write(f, obuf, stream.next_out - obuf); + hashwrite(f, obuf, stream.next_out - obuf); olen += stream.next_out - obuf; } if (stream.avail_in) @@@ -254,7 -230,7 +254,7 @@@ static int check_pack_inflate(struct pa stream.total_in == len) ? 0 : -1; } -static void copy_pack_data(struct sha1file *f, +static void copy_pack_data(struct hashfile *f, struct packed_git *p, struct pack_window **w_curs, off_t offset, @@@ -267,14 -243,14 +267,14 @@@ in = use_pack(p, w_curs, offset, &avail); if (avail > len) avail = (unsigned long)len; - sha1write(f, in, avail); + hashwrite(f, in, avail); offset += avail; len -= avail; } } /* Return 0 if we will bust the pack-size limit */ -static unsigned long write_no_reuse_object(struct sha1file *f, struct object_entry *entry, +static unsigned long write_no_reuse_object(struct hashfile *f, struct object_entry *entry, unsigned long limit, int usable_delta) { unsigned long size, datalen; @@@ -284,15 -260,15 +284,15 @@@ enum object_type type; void *buf; struct git_istream *st = NULL; + const unsigned hashsz = the_hash_algo->rawsz; if (!usable_delta) { - if (entry->type == OBJ_BLOB && - entry->size > big_file_threshold && - (st = open_istream(entry->idx.oid.hash, &type, &size, NULL)) != NULL) + if (oe_type(entry) == OBJ_BLOB && + oe_size_greater_than(&to_pack, entry, big_file_threshold) && + (st = open_istream(&entry->idx.oid, &type, &size, NULL)) != NULL) buf = NULL; else { - buf = read_sha1_file(entry->idx.oid.hash, &type, - &size); + buf = read_object_file(&entry->idx.oid, &type, &size); if (!buf) die(_("unable to read %s"), oid_to_hex(&entry->idx.oid)); @@@ -304,15 -280,15 +304,15 @@@ FREE_AND_NULL(entry->delta_data); entry->z_delta_size = 0; } else if (entry->delta_data) { - size = entry->delta_size; + size = DELTA_SIZE(entry); buf = entry->delta_data; entry->delta_data = NULL; - type = (allow_ofs_delta && entry->delta->idx.offset) ? + type = (allow_ofs_delta && DELTA(entry)->idx.offset) ? OBJ_OFS_DELTA : OBJ_REF_DELTA; } else { buf = get_delta(entry); - size = entry->delta_size; - type = (allow_ofs_delta && entry->delta->idx.offset) ? + size = DELTA_SIZE(entry); + type = (allow_ofs_delta && DELTA(entry)->idx.offset) ? OBJ_OFS_DELTA : OBJ_REF_DELTA; } @@@ -336,48 -312,48 +336,48 @@@ * encoding of the relative offset for the delta * base from this object's position in the pack. */ - off_t ofs = entry->idx.offset - entry->delta->idx.offset; + off_t ofs = entry->idx.offset - DELTA(entry)->idx.offset; unsigned pos = sizeof(dheader) - 1; dheader[pos] = ofs & 127; while (ofs >>= 7) dheader[--pos] = 128 | (--ofs & 127); - if (limit && hdrlen + sizeof(dheader) - pos + datalen + 20 >= limit) { + if (limit && hdrlen + sizeof(dheader) - pos + datalen + hashsz >= limit) { if (st) close_istream(st); free(buf); return 0; } - sha1write(f, header, hdrlen); - sha1write(f, dheader + pos, sizeof(dheader) - pos); + hashwrite(f, header, hdrlen); + hashwrite(f, dheader + pos, sizeof(dheader) - pos); hdrlen += sizeof(dheader) - pos; } else if (type == OBJ_REF_DELTA) { /* * Deltas with a base reference contain - * an additional 20 bytes for the base sha1. + * additional bytes for the base object ID. */ - if (limit && hdrlen + 20 + datalen + 20 >= limit) { + if (limit && hdrlen + hashsz + datalen + hashsz >= limit) { if (st) close_istream(st); free(buf); return 0; } - sha1write(f, header, hdrlen); - sha1write(f, entry->delta->idx.oid.hash, 20); - hdrlen += 20; + hashwrite(f, header, hdrlen); + hashwrite(f, DELTA(entry)->idx.oid.hash, hashsz); + hdrlen += hashsz; } else { - if (limit && hdrlen + datalen + 20 >= limit) { + if (limit && hdrlen + datalen + hashsz >= limit) { if (st) close_istream(st); free(buf); return 0; } - sha1write(f, header, hdrlen); + hashwrite(f, header, hdrlen); } if (st) { datalen = write_large_blob_data(st, f, &entry->idx.oid); close_istream(st); } else { - sha1write(f, buf, datalen); + hashwrite(f, buf, datalen); free(buf); } @@@ -385,33 -361,31 +385,33 @@@ } /* Return 0 if we will bust the pack-size limit */ -static off_t write_reuse_object(struct sha1file *f, struct object_entry *entry, +static off_t write_reuse_object(struct hashfile *f, struct object_entry *entry, unsigned long limit, int usable_delta) { - struct packed_git *p = entry->in_pack; + struct packed_git *p = IN_PACK(entry); struct pack_window *w_curs = NULL; struct revindex_entry *revidx; off_t offset; - enum object_type type = entry->type; + enum object_type type = oe_type(entry); off_t datalen; unsigned char header[MAX_PACK_OBJECT_HEADER], dheader[MAX_PACK_OBJECT_HEADER]; unsigned hdrlen; + const unsigned hashsz = the_hash_algo->rawsz; + unsigned long entry_size = SIZE(entry); - if (entry->delta) - type = (allow_ofs_delta && entry->delta->idx.offset) ? + if (DELTA(entry)) + type = (allow_ofs_delta && DELTA(entry)->idx.offset) ? OBJ_OFS_DELTA : OBJ_REF_DELTA; hdrlen = encode_in_pack_object_header(header, sizeof(header), - type, entry->size); + type, entry_size); offset = entry->in_pack_offset; revidx = find_pack_revindex(p, offset); datalen = revidx[1].offset - offset; if (!pack_to_stdout && p->index_version > 1 && check_pack_crc(p, &w_curs, offset, datalen, revidx->nr)) { - error("bad packed object CRC for %s", + error(_("bad packed object CRC for %s"), oid_to_hex(&entry->idx.oid)); unuse_pack(&w_curs); return write_no_reuse_object(f, entry, limit, usable_delta); @@@ -421,42 -395,42 +421,42 @@@ datalen -= entry->in_pack_header_size; if (!pack_to_stdout && p->index_version == 1 && - check_pack_inflate(p, &w_curs, offset, datalen, entry->size)) { - error("corrupt packed object for %s", + check_pack_inflate(p, &w_curs, offset, datalen, entry_size)) { + error(_("corrupt packed object for %s"), oid_to_hex(&entry->idx.oid)); unuse_pack(&w_curs); return write_no_reuse_object(f, entry, limit, usable_delta); } if (type == OBJ_OFS_DELTA) { - off_t ofs = entry->idx.offset - entry->delta->idx.offset; + off_t ofs = entry->idx.offset - DELTA(entry)->idx.offset; unsigned pos = sizeof(dheader) - 1; dheader[pos] = ofs & 127; while (ofs >>= 7) dheader[--pos] = 128 | (--ofs & 127); - if (limit && hdrlen + sizeof(dheader) - pos + datalen + 20 >= limit) { + if (limit && hdrlen + sizeof(dheader) - pos + datalen + hashsz >= limit) { unuse_pack(&w_curs); return 0; } - sha1write(f, header, hdrlen); - sha1write(f, dheader + pos, sizeof(dheader) - pos); + hashwrite(f, header, hdrlen); + hashwrite(f, dheader + pos, sizeof(dheader) - pos); hdrlen += sizeof(dheader) - pos; reused_delta++; } else if (type == OBJ_REF_DELTA) { - if (limit && hdrlen + 20 + datalen + 20 >= limit) { + if (limit && hdrlen + hashsz + datalen + hashsz >= limit) { unuse_pack(&w_curs); return 0; } - sha1write(f, header, hdrlen); - sha1write(f, entry->delta->idx.oid.hash, 20); - hdrlen += 20; + hashwrite(f, header, hdrlen); + hashwrite(f, DELTA(entry)->idx.oid.hash, hashsz); + hdrlen += hashsz; reused_delta++; } else { - if (limit && hdrlen + datalen + 20 >= limit) { + if (limit && hdrlen + datalen + hashsz >= limit) { unuse_pack(&w_curs); return 0; } - sha1write(f, header, hdrlen); + hashwrite(f, header, hdrlen); } copy_pack_data(f, p, &w_curs, offset, datalen); unuse_pack(&w_curs); @@@ -465,7 -439,7 +465,7 @@@ } /* Return 0 if we will bust the pack-size limit */ -static off_t write_object(struct sha1file *f, +static off_t write_object(struct hashfile *f, struct object_entry *entry, off_t write_offset) { @@@ -488,29 -462,28 +488,29 @@@ else limit = pack_size_limit - write_offset; - if (!entry->delta) + if (!DELTA(entry)) usable_delta = 0; /* no delta */ else if (!pack_size_limit) usable_delta = 1; /* unlimited packfile */ - else if (entry->delta->idx.offset == (off_t)-1) + else if (DELTA(entry)->idx.offset == (off_t)-1) usable_delta = 0; /* base was written to another pack */ - else if (entry->delta->idx.offset) + else if (DELTA(entry)->idx.offset) usable_delta = 1; /* base already exists in this pack */ else usable_delta = 0; /* base could end up in another pack */ if (!reuse_object) to_reuse = 0; /* explicit */ - else if (!entry->in_pack) + else if (!IN_PACK(entry)) to_reuse = 0; /* can't reuse what we don't have */ - else if (entry->type == OBJ_REF_DELTA || entry->type == OBJ_OFS_DELTA) + else if (oe_type(entry) == OBJ_REF_DELTA || + oe_type(entry) == OBJ_OFS_DELTA) /* check_object() decided it for us ... */ to_reuse = usable_delta; /* ... but pack split may override that */ - else if (entry->type != entry->in_pack_type) + else if (oe_type(entry) != entry->in_pack_type) to_reuse = 0; /* pack has delta which is unusable */ - else if (entry->delta) + else if (DELTA(entry)) to_reuse = 0; /* we want to pack afresh */ else to_reuse = 1; /* we have it in-pack undeltified, @@@ -539,7 -512,7 +539,7 @@@ enum write_one_status WRITE_ONE_RECURSIVE = 2 /* already scheduled to be written */ }; -static enum write_one_status write_one(struct sha1file *f, +static enum write_one_status write_one(struct hashfile *f, struct object_entry *e, off_t *offset) { @@@ -553,7 -526,7 +553,7 @@@ */ recursing = (e->idx.offset == 1); if (recursing) { - warning("recursive delta detected for object %s", + warning(_("recursive delta detected for object %s"), oid_to_hex(&e->idx.oid)); return WRITE_ONE_RECURSIVE; } else if (e->idx.offset || e->preferred_base) { @@@ -562,12 -535,12 +562,12 @@@ } /* if we are deltified, write out base object first. */ - if (e->delta) { + if (DELTA(e)) { e->idx.offset = 1; /* now recurse */ - switch (write_one(f, e->delta, offset)) { + switch (write_one(f, DELTA(e), offset)) { case WRITE_ONE_RECURSIVE: /* we cannot depend on this one */ - e->delta = NULL; + SET_DELTA(e, NULL); break; default: break; @@@ -587,7 -560,7 +587,7 @@@ /* make sure off_t is sufficiently large not to wrap */ if (signed_add_overflows(*offset, size)) - die("pack too large for current definition of off_t"); + die(_("pack too large for current definition of off_t")); *offset += size; return WRITE_ONE_WRITTEN; } @@@ -629,34 -602,34 +629,34 @@@ static void add_descendants_to_write_or /* add this node... */ add_to_write_order(wo, endp, e); /* all its siblings... */ - for (s = e->delta_sibling; s; s = s->delta_sibling) { + for (s = DELTA_SIBLING(e); s; s = DELTA_SIBLING(s)) { add_to_write_order(wo, endp, s); } } /* drop down a level to add left subtree nodes if possible */ - if (e->delta_child) { + if (DELTA_CHILD(e)) { add_to_order = 1; - e = e->delta_child; + e = DELTA_CHILD(e); } else { add_to_order = 0; /* our sibling might have some children, it is next */ - if (e->delta_sibling) { - e = e->delta_sibling; + if (DELTA_SIBLING(e)) { + e = DELTA_SIBLING(e); continue; } /* go back to our parent node */ - e = e->delta; - while (e && !e->delta_sibling) { + e = DELTA(e); + while (e && !DELTA_SIBLING(e)) { /* we're on the right side of a subtree, keep * going up until we can go right again */ - e = e->delta; + e = DELTA(e); } if (!e) { /* done- we hit our original root node */ return; } /* pass it off to sibling at this level */ - e = e->delta_sibling; + e = DELTA_SIBLING(e); } }; } @@@ -667,7 -640,7 +667,7 @@@ static void add_family_to_write_order(s { struct object_entry *root; - for (root = e; root->delta; root = root->delta) + for (root = e; DELTA(root); root = DELTA(root)) ; /* nothing */ add_descendants_to_write_order(wo, endp, root); } @@@ -682,8 -655,8 +682,8 @@@ static struct object_entry **compute_wr for (i = 0; i < to_pack.nr_objects; i++) { objects[i].tagged = 0; objects[i].filled = 0; - objects[i].delta_child = NULL; - objects[i].delta_sibling = NULL; + SET_DELTA_CHILD(&objects[i], NULL); + SET_DELTA_SIBLING(&objects[i], NULL); } /* @@@ -693,11 -666,11 +693,11 @@@ */ for (i = to_pack.nr_objects; i > 0;) { struct object_entry *e = &objects[--i]; - if (!e->delta) + if (!DELTA(e)) continue; /* Mark me as the first child */ - e->delta_sibling = e->delta->delta_child; - e->delta->delta_child = e; + e->delta_sibling_idx = DELTA(e)->delta_child_idx; + SET_DELTA_CHILD(DELTA(e), e); } /* @@@ -729,8 -702,8 +729,8 @@@ * And then all remaining commits and tags. */ for (i = last_untagged; i < to_pack.nr_objects; i++) { - if (objects[i].type != OBJ_COMMIT && - objects[i].type != OBJ_TAG) + if (oe_type(&objects[i]) != OBJ_COMMIT && + oe_type(&objects[i]) != OBJ_TAG) continue; add_to_write_order(wo, &wo_end, &objects[i]); } @@@ -739,7 -712,7 +739,7 @@@ * And then all the trees. */ for (i = last_untagged; i < to_pack.nr_objects; i++) { - if (objects[i].type != OBJ_TREE) + if (oe_type(&objects[i]) != OBJ_TREE) continue; add_to_write_order(wo, &wo_end, &objects[i]); } @@@ -753,31 -726,30 +753,31 @@@ } if (wo_end != to_pack.nr_objects) - die("ordered %u objects, expected %"PRIu32, wo_end, to_pack.nr_objects); + die(_("ordered %u objects, expected %"PRIu32), + wo_end, to_pack.nr_objects); return wo; } -static off_t write_reused_pack(struct sha1file *f) +static off_t write_reused_pack(struct hashfile *f) { unsigned char buffer[8192]; off_t to_write, total; int fd; if (!is_pack_valid(reuse_packfile)) - die("packfile is invalid: %s", reuse_packfile->pack_name); + die(_("packfile is invalid: %s"), reuse_packfile->pack_name); fd = git_open(reuse_packfile->pack_name); if (fd < 0) - die_errno("unable to open packfile for reuse: %s", + die_errno(_("unable to open packfile for reuse: %s"), reuse_packfile->pack_name); if (lseek(fd, sizeof(struct pack_header), SEEK_SET) == -1) - die_errno("unable to seek in reused packfile"); + die_errno(_("unable to seek in reused packfile")); if (reuse_packfile_offset < 0) - reuse_packfile_offset = reuse_packfile->pack_size - 20; + reuse_packfile_offset = reuse_packfile->pack_size - the_hash_algo->rawsz; total = to_write = reuse_packfile_offset - sizeof(struct pack_header); @@@ -785,12 -757,12 +785,12 @@@ int read_pack = xread(fd, buffer, sizeof(buffer)); if (read_pack <= 0) - die_errno("unable to read from reused packfile"); + die_errno(_("unable to read from reused packfile")); if (read_pack > to_write) read_pack = to_write; - sha1write(f, buffer, read_pack); + hashwrite(f, buffer, read_pack); to_write -= read_pack; /* @@@ -819,7 -791,7 +819,7 @@@ static const char no_split_warning[] = static void write_pack_file(void) { uint32_t i = 0, j; - struct sha1file *f; + struct hashfile *f; off_t offset; uint32_t nr_remaining = nr_result; time_t last_mtime = 0; @@@ -835,7 -807,7 +835,7 @@@ char *pack_tmp_name = NULL; if (pack_to_stdout) - f = sha1fd_throughput(1, "", progress_state); + f = hashfd_throughput(1, "", progress_state); else f = create_tmp_packfile(&pack_tmp_name); @@@ -862,11 -834,11 +862,11 @@@ * If so, rewrite it like in fast-import */ if (pack_to_stdout) { - sha1close(f, oid.hash, CSUM_CLOSE); + finalize_hashfile(f, oid.hash, CSUM_HASH_IN_STREAM | CSUM_CLOSE); } else if (nr_written == nr_remaining) { - sha1close(f, oid.hash, CSUM_FSYNC); + finalize_hashfile(f, oid.hash, CSUM_HASH_IN_STREAM | CSUM_FSYNC | CSUM_CLOSE); } else { - int fd = sha1close(f, oid.hash, 0); + int fd = finalize_hashfile(f, oid.hash, 0); fixup_pack_header_footer(fd, oid.hash, pack_tmp_name, nr_written, oid.hash, offset); close(fd); @@@ -888,7 -860,7 +888,7 @@@ * to preserve this property. */ if (stat(pack_tmp_name, &st) < 0) { - warning_errno("failed to stat %s", pack_tmp_name); + warning_errno(_("failed to stat %s"), pack_tmp_name); } else if (!last_mtime) { last_mtime = st.st_mtime; } else { @@@ -896,15 -868,14 +896,15 @@@ utb.actime = st.st_atime; utb.modtime = --last_mtime; if (utime(pack_tmp_name, &utb) < 0) - warning_errno("failed utime() on %s", pack_tmp_name); + warning_errno(_("failed utime() on %s"), pack_tmp_name); } strbuf_addf(&tmpname, "%s-", base_name); if (write_bitmap_index) { bitmap_writer_set_checksum(oid.hash); - bitmap_writer_build_type_index(written_list, nr_written); + bitmap_writer_build_type_index( + &to_pack, written_list, nr_written); } finish_tmp_packfile(&tmpname, pack_tmp_name, @@@ -941,8 -912,8 +941,8 @@@ free(write_order); stop_progress(&progress_state); if (written != nr_result) - die("wrote %"PRIu32" objects while expecting %"PRIu32, - written, nr_result); + die(_("wrote %"PRIu32" objects while expecting %"PRIu32), + written, nr_result); } static int no_try_delta(const char *path) @@@ -1008,16 -979,13 +1008,16 @@@ static int want_found_object(int exclud * Otherwise, we signal "-1" at the end to tell the caller that we do * not know either way, and it needs to check more packs. */ - if (!ignore_packed_keep && + if (!ignore_packed_keep_on_disk && + !ignore_packed_keep_in_core && (!local || !have_non_local_packs)) return 1; if (local && !p->pack_local) return 0; - if (ignore_packed_keep && p->pack_local && p->pack_keep) + if (p->pack_local && + ((ignore_packed_keep_on_disk && p->pack_keep) || + (ignore_packed_keep_in_core && p->pack_keep_in_core))) return 0; /* we don't know yet; keep looking for more packs */ @@@ -1038,10 -1006,10 +1038,10 @@@ static int want_object_in_pack(const st struct packed_git **found_pack, off_t *found_offset) { - struct mru_entry *entry; int want; + struct list_head *pos; - if (!exclude && local && has_loose_object_nonlocal(oid->hash)) + if (!exclude && local && has_loose_object_nonlocal(oid)) return 0; /* @@@ -1054,8 -1022,9 +1054,8 @@@ if (want != -1) return want; } - - for (entry = packed_git_mru.head; entry; entry = entry->next) { - struct packed_git *p = entry->item; + list_for_each(pos, get_packed_git_mru(the_repository)) { + struct packed_git *p = list_entry(pos, struct packed_git, mru); off_t offset; if (p == *found_pack) @@@ -1072,8 -1041,7 +1072,8 @@@ } want = want_found_object(exclude, p); if (!exclude && want > 0) - mru_mark(&packed_git_mru, entry); + list_move(&p->mru, + get_packed_git_mru(the_repository)); if (want != -1) return want; } @@@ -1095,13 -1063,14 +1095,13 @@@ static void create_object_entry(const s entry = packlist_alloc(&to_pack, oid->hash, index_pos); entry->hash = hash; - if (type) - entry->type = type; + oe_set_type(entry, type); if (exclude) entry->preferred_base = 1; else nr_result++; if (found_pack) { - entry->in_pack = found_pack; + oe_set_in_pack(&to_pack, entry, found_pack); entry->in_pack_offset = found_offset; } @@@ -1119,8 -1088,6 +1119,8 @@@ static int add_object_entry(const struc off_t found_offset = 0; uint32_t index_pos; + display_progress(progress_state, ++nr_seen); + if (have_duplicate_entry(oid, exclude, &index_pos)) return 0; @@@ -1136,6 -1103,8 +1136,6 @@@ create_object_entry(oid, type, pack_name_hash(name), exclude, name && no_try_delta(name), index_pos, found_pack, found_offset); - - display_progress(progress_state, nr_result); return 1; } @@@ -1146,8 -1115,6 +1146,8 @@@ static int add_object_entry_from_bitmap { uint32_t index_pos; + display_progress(progress_state, ++nr_seen); + if (have_duplicate_entry(oid, 0, &index_pos)) return 0; @@@ -1155,6 -1122,8 +1155,6 @@@ return 0; create_object_entry(oid, type, name_hash, 0, 0, index_pos, pack, offset); - - display_progress(progress_state, nr_result); return 1; } @@@ -1218,7 -1187,7 +1218,7 @@@ static struct pbase_tree_cache *pbase_t /* Did not find one. Either we got a bogus request or * we need to read and perhaps cache. */ - data = read_sha1_file(oid->hash, &type, &size); + data = read_object_file(oid, &type, &size); if (!data) return NULL; if (type != OBJ_TREE) { @@@ -1379,7 -1348,7 +1379,7 @@@ static void add_preferred_base(struct o if (window <= num_preferred_base++) return; - data = read_object_with_reference(oid->hash, tree_type, &size, tree_oid.hash); + data = read_object_with_reference(oid, tree_type, &size, &tree_oid); if (!data) return; @@@ -1407,10 -1376,10 +1407,10 @@@ static void cleanup_preferred_base(void it = pbase_tree; pbase_tree = NULL; while (it) { - struct pbase_tree *this = it; - it = this->next; - free(this->pcache.tree_data); - free(this); + struct pbase_tree *tmp = it; + it = tmp->next; + free(tmp->pcache.tree_data); + free(tmp); } for (i = 0; i < ARRAY_SIZE(pbase_tree_cache); i++) { @@@ -1426,10 -1395,8 +1426,10 @@@ static void check_object(struct object_entry *entry) { - if (entry->in_pack) { - struct packed_git *p = entry->in_pack; + unsigned long canonical_size; + + if (IN_PACK(entry)) { + struct packed_git *p = IN_PACK(entry); struct pack_window *w_curs = NULL; const unsigned char *base_ref = NULL; struct object_entry *base_entry; @@@ -1437,8 -1404,6 +1437,8 @@@ unsigned long avail; off_t ofs; unsigned char *buf, c; + enum object_type type; + unsigned long in_pack_size; buf = use_pack(p, &w_curs, entry->in_pack_offset, &avail); @@@ -1447,15 -1412,11 +1447,15 @@@ * since non-delta representations could still be reused. */ used = unpack_object_header_buffer(buf, avail, - &entry->in_pack_type, - &entry->size); + &type, + &in_pack_size); if (used == 0) goto give_up; + if (type < 0) + BUG("invalid type %d", type); + entry->in_pack_type = type; + /* * Determine if this is a delta and if so whether we can * reuse it or not. Otherwise let's find out as cheaply as @@@ -1464,10 -1425,9 +1464,10 @@@ switch (entry->in_pack_type) { default: /* Not a delta hence we've already got all we need. */ - entry->type = entry->in_pack_type; + oe_set_type(entry, entry->in_pack_type); + SET_SIZE(entry, in_pack_size); entry->in_pack_header_size = used; - if (entry->type < OBJ_COMMIT || entry->type > OBJ_BLOB) + if (oe_type(entry) < OBJ_COMMIT || oe_type(entry) > OBJ_BLOB) goto give_up; unuse_pack(&w_curs); return; @@@ -1475,7 -1435,7 +1475,7 @@@ if (reuse_delta && !entry->preferred_base) base_ref = use_pack(p, &w_curs, entry->in_pack_offset + used, NULL); - entry->in_pack_header_size = used + 20; + entry->in_pack_header_size = used + the_hash_algo->rawsz; break; case OBJ_OFS_DELTA: buf = use_pack(p, &w_curs, @@@ -1486,7 -1446,7 +1486,7 @@@ while (c & 128) { ofs += 1; if (!ofs || MSB(ofs, 7)) { - error("delta base offset overflow in pack for %s", + error(_("delta base offset overflow in pack for %s"), oid_to_hex(&entry->idx.oid)); goto give_up; } @@@ -1495,7 -1455,7 +1495,7 @@@ } ofs = entry->in_pack_offset - ofs; if (ofs <= 0 || ofs >= entry->in_pack_offset) { - error("delta base offset out of bound for %s", + error(_("delta base offset out of bound for %s"), oid_to_hex(&entry->idx.oid)); goto give_up; } @@@ -1521,29 -1481,25 +1521,29 @@@ * deltify other objects against, in order to avoid * circular deltas. */ - entry->type = entry->in_pack_type; - entry->delta = base_entry; - entry->delta_size = entry->size; - entry->delta_sibling = base_entry->delta_child; - base_entry->delta_child = entry; + oe_set_type(entry, entry->in_pack_type); + SET_SIZE(entry, in_pack_size); /* delta size */ + SET_DELTA(entry, base_entry); + SET_DELTA_SIZE(entry, in_pack_size); + entry->delta_sibling_idx = base_entry->delta_child_idx; + SET_DELTA_CHILD(base_entry, entry); unuse_pack(&w_curs); return; } - if (entry->type) { + if (oe_type(entry)) { + off_t delta_pos; + /* * This must be a delta and we already know what the * final object type is. Let's extract the actual * object size from the delta header. */ - entry->size = get_size_from_delta(p, &w_curs, - entry->in_pack_offset + entry->in_pack_header_size); - if (entry->size == 0) + delta_pos = entry->in_pack_offset + entry->in_pack_header_size; + canonical_size = get_size_from_delta(p, &w_curs, delta_pos); + if (canonical_size == 0) goto give_up; + SET_SIZE(entry, canonical_size); unuse_pack(&w_curs); return; } @@@ -1557,34 -1513,27 +1557,34 @@@ unuse_pack(&w_curs); } - entry->type = sha1_object_info(entry->idx.oid.hash, &entry->size); - /* - * The error condition is checked in prepare_pack(). This is - * to permit a missing preferred base object to be ignored - * as a preferred base. Doing so can result in a larger - * pack file, but the transfer will still take place. - */ + oe_set_type(entry, + oid_object_info(the_repository, &entry->idx.oid, &canonical_size)); + if (entry->type_valid) { + SET_SIZE(entry, canonical_size); + } else { + /* + * Bad object type is checked in prepare_pack(). This is + * to permit a missing preferred base object to be ignored + * as a preferred base. Doing so can result in a larger + * pack file, but the transfer will still take place. + */ + } } static int pack_offset_sort(const void *_a, const void *_b) { const struct object_entry *a = *(struct object_entry **)_a; const struct object_entry *b = *(struct object_entry **)_b; + const struct packed_git *a_in_pack = IN_PACK(a); + const struct packed_git *b_in_pack = IN_PACK(b); /* avoid filesystem trashing with loose objects */ - if (!a->in_pack && !b->in_pack) + if (!a_in_pack && !b_in_pack) return oidcmp(&a->idx.oid, &b->idx.oid); - if (a->in_pack < b->in_pack) + if (a_in_pack < b_in_pack) return -1; - if (a->in_pack > b->in_pack) + if (a_in_pack > b_in_pack) return 1; return a->in_pack_offset < b->in_pack_offset ? -1 : (a->in_pack_offset > b->in_pack_offset); @@@ -1605,37 -1554,30 +1605,37 @@@ */ static void drop_reused_delta(struct object_entry *entry) { - struct object_entry **p = &entry->delta->delta_child; + unsigned *idx = &to_pack.objects[entry->delta_idx - 1].delta_child_idx; struct object_info oi = OBJECT_INFO_INIT; + enum object_type type; + unsigned long size; + + while (*idx) { + struct object_entry *oe = &to_pack.objects[*idx - 1]; - while (*p) { - if (*p == entry) - *p = (*p)->delta_sibling; + if (oe == entry) + *idx = oe->delta_sibling_idx; else - p = &(*p)->delta_sibling; + idx = &oe->delta_sibling_idx; } - entry->delta = NULL; + SET_DELTA(entry, NULL); entry->depth = 0; - oi.sizep = &entry->size; - oi.typep = &entry->type; - if (packed_object_info(entry->in_pack, entry->in_pack_offset, &oi) < 0) { + oi.sizep = &size; + oi.typep = &type; + if (packed_object_info(the_repository, IN_PACK(entry), entry->in_pack_offset, &oi) < 0) { /* * We failed to get the info from this pack for some reason; * fall back to sha1_object_info, which may find another copy. - * And if that fails, the error will be recorded in entry->type + * And if that fails, the error will be recorded in oe_type(entry) * and dealt with in prepare_pack(). */ - entry->type = sha1_object_info(entry->idx.oid.hash, - &entry->size); + oe_set_type(entry, + oid_object_info(the_repository, &entry->idx.oid, &size)); + } else { + oe_set_type(entry, type); } + SET_SIZE(entry, size); } /* @@@ -1659,7 -1601,7 +1659,7 @@@ static void break_delta_chains(struct o for (cur = entry, total_depth = 0; cur; - cur = cur->delta, total_depth++) { + cur = DELTA(cur), total_depth++) { if (cur->dfs_state == DFS_DONE) { /* * We've already seen this object and know it isn't @@@ -1676,7 -1618,7 +1676,7 @@@ * is a bug. */ if (cur->dfs_state != DFS_NONE) - die("BUG: confusing delta dfs state in first pass: %d", + BUG("confusing delta dfs state in first pass: %d", cur->dfs_state); /* @@@ -1684,7 -1626,7 +1684,7 @@@ * it's not a delta, we're done traversing, but we'll mark it * done to save time on future traversals. */ - if (!cur->delta) { + if (!DELTA(cur)) { cur->dfs_state = DFS_DONE; break; } @@@ -1707,7 -1649,7 +1707,7 @@@ * We keep all commits in the chain that we examined. */ cur->dfs_state = DFS_ACTIVE; - if (cur->delta->dfs_state == DFS_ACTIVE) { + if (DELTA(cur)->dfs_state == DFS_ACTIVE) { drop_reused_delta(cur); cur->dfs_state = DFS_DONE; break; @@@ -1722,7 -1664,7 +1722,7 @@@ * an extra "next" pointer to keep going after we reset cur->delta. */ for (cur = entry; cur; cur = next) { - next = cur->delta; + next = DELTA(cur); /* * We should have a chain of zero or more ACTIVE states down to @@@ -1733,7 -1675,7 +1733,7 @@@ if (cur->dfs_state == DFS_DONE) break; else if (cur->dfs_state != DFS_ACTIVE) - die("BUG: confusing delta dfs state in second pass: %d", + BUG("confusing delta dfs state in second pass: %d", cur->dfs_state); /* @@@ -1767,10 -1709,6 +1767,10 @@@ static void get_object_details(void uint32_t i; struct object_entry **sorted_by_offset; + if (progress) + progress_state = start_progress(_("Counting objects"), + to_pack.nr_objects); + sorted_by_offset = xcalloc(to_pack.nr_objects, sizeof(struct object_entry *)); for (i = 0; i < to_pack.nr_objects; i++) sorted_by_offset[i] = to_pack.objects + i; @@@ -1779,12 -1717,9 +1779,12 @@@ for (i = 0; i < to_pack.nr_objects; i++) { struct object_entry *entry = sorted_by_offset[i]; check_object(entry); - if (big_file_threshold < entry->size) + if (entry->type_valid && + oe_size_greater_than(&to_pack, entry, big_file_threshold)) entry->no_try_delta = 1; + display_progress(progress_state, i + 1); } + stop_progress(&progress_state); /* * This must happen in a second pass, since we rely on the delta @@@ -1809,14 -1744,10 +1809,14 @@@ static int type_size_sort(const void *_ { const struct object_entry *a = *(struct object_entry **)_a; const struct object_entry *b = *(struct object_entry **)_b; + enum object_type a_type = oe_type(a); + enum object_type b_type = oe_type(b); + unsigned long a_size = SIZE(a); + unsigned long b_size = SIZE(b); - if (a->type > b->type) + if (a_type > b_type) return -1; - if (a->type < b->type) + if (a_type < b_type) return 1; if (a->hash > b->hash) return -1; @@@ -1826,9 -1757,9 +1826,9 @@@ return -1; if (a->preferred_base < b->preferred_base) return 1; - if (a->size > b->size) + if (a_size > b_size) return -1; - if (a->size < b->size) + if (a_size < b_size) return 1; return a < b ? -1 : (a > b); /* newest first */ } @@@ -1858,30 -1789,18 +1858,30 @@@ static int delta_cacheable(unsigned lon #ifndef NO_PTHREADS +/* Protect access to object database */ static pthread_mutex_t read_mutex; #define read_lock() pthread_mutex_lock(&read_mutex) #define read_unlock() pthread_mutex_unlock(&read_mutex) +/* Protect delta_cache_size */ static pthread_mutex_t cache_mutex; #define cache_lock() pthread_mutex_lock(&cache_mutex) #define cache_unlock() pthread_mutex_unlock(&cache_mutex) +/* + * Protect object list partitioning (e.g. struct thread_param) and + * progress_state + */ static pthread_mutex_t progress_mutex; #define progress_lock() pthread_mutex_lock(&progress_mutex) #define progress_unlock() pthread_mutex_unlock(&progress_mutex) +/* + * Access to struct object_entry is unprotected since each thread owns + * a portion of the main object list. Just don't access object entries + * ahead in the list because they can be stolen and would need + * progress_mutex for protection. + */ #else #define read_lock() (void)0 @@@ -1893,46 -1812,6 +1893,46 @@@ #endif +/* + * Return the size of the object without doing any delta + * reconstruction (so non-deltas are true object sizes, but deltas + * return the size of the delta data). + */ +unsigned long oe_get_size_slow(struct packing_data *pack, + const struct object_entry *e) +{ + struct packed_git *p; + struct pack_window *w_curs; + unsigned char *buf; + enum object_type type; + unsigned long used, avail, size; + + if (e->type_ != OBJ_OFS_DELTA && e->type_ != OBJ_REF_DELTA) { + read_lock(); + if (oid_object_info(the_repository, &e->idx.oid, &size) < 0) + die(_("unable to get size of %s"), + oid_to_hex(&e->idx.oid)); + read_unlock(); + return size; + } + + p = oe_in_pack(pack, e); + if (!p) + BUG("when e->type is a delta, it must belong to a pack"); + + read_lock(); + w_curs = NULL; + buf = use_pack(p, &w_curs, e->in_pack_offset, &avail); + used = unpack_object_header_buffer(buf, avail, &type, &size); + if (used == 0) + die(_("unable to parse object header of %s"), + oid_to_hex(&e->idx.oid)); + + unuse_pack(&w_curs); + read_unlock(); + return size; +} + static int try_delta(struct unpacked *trg, struct unpacked *src, unsigned max_depth, unsigned long *mem_usage) { @@@ -1944,7 -1823,7 +1944,7 @@@ void *delta_buf; /* Don't bother doing diffs between different types */ - if (trg_entry->type != src_entry->type) + if (oe_type(trg_entry) != oe_type(src_entry)) return -1; /* @@@ -1955,8 -1834,8 +1955,8 @@@ * it, we will still save the transfer cost, as we already know * the other side has it and we won't send src_entry at all. */ - if (reuse_delta && trg_entry->in_pack && - trg_entry->in_pack == src_entry->in_pack && + if (reuse_delta && IN_PACK(trg_entry) && + IN_PACK(trg_entry) == IN_PACK(src_entry) && !src_entry->preferred_base && trg_entry->in_pack_type != OBJ_REF_DELTA && trg_entry->in_pack_type != OBJ_OFS_DELTA) @@@ -1967,19 -1846,19 +1967,19 @@@ return 0; /* Now some size filtering heuristics. */ - trg_size = trg_entry->size; - if (!trg_entry->delta) { - max_size = trg_size/2 - 20; + trg_size = SIZE(trg_entry); + if (!DELTA(trg_entry)) { + max_size = trg_size/2 - the_hash_algo->rawsz; ref_depth = 1; } else { - max_size = trg_entry->delta_size; + max_size = DELTA_SIZE(trg_entry); ref_depth = trg->depth; } max_size = (uint64_t)max_size * (max_depth - src->depth) / (max_depth - ref_depth + 1); if (max_size == 0) return 0; - src_size = src_entry->size; + src_size = SIZE(src_entry); sizediff = src_size < trg_size ? trg_size - src_size : 0; if (sizediff >= max_size) return 0; @@@ -1989,26 -1868,28 +1989,26 @@@ /* Load data if not already done */ if (!trg->data) { read_lock(); - trg->data = read_sha1_file(trg_entry->idx.oid.hash, &type, - &sz); + trg->data = read_object_file(&trg_entry->idx.oid, &type, &sz); read_unlock(); if (!trg->data) - die("object %s cannot be read", + die(_("object %s cannot be read"), oid_to_hex(&trg_entry->idx.oid)); if (sz != trg_size) - die("object %s inconsistent object length (%lu vs %lu)", + die(_("object %s inconsistent object length (%lu vs %lu)"), oid_to_hex(&trg_entry->idx.oid), sz, trg_size); *mem_usage += sz; } if (!src->data) { read_lock(); - src->data = read_sha1_file(src_entry->idx.oid.hash, &type, - &sz); + src->data = read_object_file(&src_entry->idx.oid, &type, &sz); read_unlock(); if (!src->data) { if (src_entry->preferred_base) { static int warned = 0; if (!warned++) - warning("object %s cannot be read", + warning(_("object %s cannot be read"), oid_to_hex(&src_entry->idx.oid)); /* * Those objects are not included in the @@@ -2018,11 -1899,11 +2018,11 @@@ */ return 0; } - die("object %s cannot be read", + die(_("object %s cannot be read"), oid_to_hex(&src_entry->idx.oid)); } if (sz != src_size) - die("object %s inconsistent object length (%lu vs %lu)", + die(_("object %s inconsistent object length (%lu vs %lu)"), oid_to_hex(&src_entry->idx.oid), sz, src_size); *mem_usage += sz; @@@ -2032,7 -1913,7 +2032,7 @@@ if (!src->index) { static int warned = 0; if (!warned++) - warning("suboptimal pack - out of memory"); + warning(_("suboptimal pack - out of memory")); return 0; } *mem_usage += sizeof_delta_index(src->index); @@@ -2041,14 -1922,10 +2041,14 @@@ delta_buf = create_delta(src->index, trg->data, trg_size, &delta_size, max_size); if (!delta_buf) return 0; + if (delta_size >= (1U << OE_DELTA_SIZE_BITS)) { + free(delta_buf); + return 0; + } - if (trg_entry->delta) { + if (DELTA(trg_entry)) { /* Prefer only shallower same-sized deltas. */ - if (delta_size == trg_entry->delta_size && + if (delta_size == DELTA_SIZE(trg_entry) && src->depth + 1 >= trg->depth) { free(delta_buf); return 0; @@@ -2063,7 -1940,7 +2063,7 @@@ free(trg_entry->delta_data); cache_lock(); if (trg_entry->delta_data) { - delta_cache_size -= trg_entry->delta_size; + delta_cache_size -= DELTA_SIZE(trg_entry); trg_entry->delta_data = NULL; } if (delta_cacheable(src_size, trg_size, delta_size)) { @@@ -2075,8 -1952,8 +2075,8 @@@ free(delta_buf); } - trg_entry->delta = src_entry; - trg_entry->delta_size = delta_size; + SET_DELTA(trg_entry, src_entry); + SET_DELTA_SIZE(trg_entry, delta_size); trg->depth = src->depth + 1; return 1; @@@ -2084,13 -1961,13 +2084,13 @@@ static unsigned int check_delta_limit(struct object_entry *me, unsigned int n) { - struct object_entry *child = me->delta_child; + struct object_entry *child = DELTA_CHILD(me); unsigned int m = n; while (child) { unsigned int c = check_delta_limit(child, n + 1); if (m < c) m = c; - child = child->delta_sibling; + child = DELTA_SIBLING(child); } return m; } @@@ -2101,7 -1978,7 +2101,7 @@@ static unsigned long free_unpacked(stru free_delta_index(n->index); n->index = NULL; if (n->data) { - freed_mem += n->entry->size; + freed_mem += SIZE(n->entry); FREE_AND_NULL(n->data); } n->entry = NULL; @@@ -2159,7 -2036,7 +2159,7 @@@ static void find_deltas(struct object_e * otherwise they would become too deep. */ max_depth = depth; - if (entry->delta_child) { + if (DELTA_CHILD(entry)) { max_depth -= check_delta_limit(entry, 0); if (max_depth <= 0) goto next; @@@ -2197,26 -2074,19 +2197,26 @@@ * between writes at that moment. */ if (entry->delta_data && !pack_to_stdout) { - entry->z_delta_size = do_compress(&entry->delta_data, - entry->delta_size); - cache_lock(); - delta_cache_size -= entry->delta_size; - delta_cache_size += entry->z_delta_size; - cache_unlock(); + unsigned long size; + + size = do_compress(&entry->delta_data, DELTA_SIZE(entry)); + if (size < (1U << OE_Z_DELTA_BITS)) { + entry->z_delta_size = size; + cache_lock(); + delta_cache_size -= DELTA_SIZE(entry); + delta_cache_size += entry->z_delta_size; + cache_unlock(); + } else { + FREE_AND_NULL(entry->delta_data); + entry->z_delta_size = 0; + } } /* if we made n a delta, and if n is already at max * depth, leaving it in the window is pointless. we * should evict it first. */ - if (entry->delta && max_depth <= n->depth) + if (DELTA(entry) && max_depth <= n->depth) continue; /* @@@ -2224,7 -2094,7 +2224,7 @@@ * currently deltified object, to keep it longer. It will * be the first base object to be attempted next. */ - if (entry->delta) { + if (DELTA(entry)) { struct unpacked swap = array[best_base]; int dist = (window + idx - best_base) % window; int dst = best_base; @@@ -2263,19 -2133,12 +2263,19 @@@ static void try_to_free_from_threads(si static try_to_free_t old_try_to_free_routine; /* + * The main object list is split into smaller lists, each is handed to + * one worker. + * * The main thread waits on the condition that (at least) one of the workers * has stopped working (which is indicated in the .working member of * struct thread_params). + * * When a work thread has completed its work, it sets .working to 0 and * signals the main thread and waits on the condition that .data_ready * becomes 1. + * + * The main thread steals half of the work from the worker that has + * most work left to hand it to the idle worker. */ struct thread_params { @@@ -2366,8 -2229,8 +2366,8 @@@ static void ll_find_deltas(struct objec return; } if (progress > pack_to_stdout) - fprintf(stderr, "Delta compression using up to %d threads.\n", - delta_search_threads); + fprintf_ln(stderr, _("Delta compression using up to %d threads"), + delta_search_threads); p = xcalloc(delta_search_threads, sizeof(*p)); /* Partition the work amongst work threads. */ @@@ -2407,7 -2270,7 +2407,7 @@@ ret = pthread_create(&p[i].thread, NULL, threaded_find_deltas, &p[i]); if (ret) - die("unable to create thread: %s", strerror(ret)); + die(_("unable to create thread: %s"), strerror(ret)); active_threads++; } @@@ -2499,10 -2362,10 +2499,10 @@@ static void add_tag_chain(const struct if (packlist_find(&to_pack, oid->hash, NULL)) return; - tag = lookup_tag(oid); + tag = lookup_tag(the_repository, oid); while (1) { if (!tag || parse_tag(tag) || !tag->tagged) - die("unable to pack objects reachable from tag %s", + die(_("unable to pack objects reachable from tag %s"), oid_to_hex(oid)); add_object_entry(&tag->object.oid, OBJ_TAG, NULL, 0); @@@ -2552,14 -2415,13 +2552,14 @@@ static void prepare_pack(int window, in for (i = 0; i < to_pack.nr_objects; i++) { struct object_entry *entry = to_pack.objects + i; - if (entry->delta) + if (DELTA(entry)) /* This happens if we decided to reuse existing * delta from a pack. "reuse_delta &&" is implied. */ continue; - if (entry->size < 50) + if (!entry->type_valid || + oe_size_less_than(&to_pack, entry, 50)) continue; if (entry->no_try_delta) @@@ -2567,11 -2429,11 +2567,11 @@@ if (!entry->preferred_base) { nr_deltas++; - if (entry->type < 0) - die("unable to get type of object %s", + if (oe_type(entry) < 0) + die(_("unable to get type of object %s"), oid_to_hex(&entry->idx.oid)); } else { - if (entry->type < 0) { + if (oe_type(entry) < 0) { /* * This object is not found, but we * don't have to include it anyway. @@@ -2592,7 -2454,7 +2592,7 @@@ ll_find_deltas(delta_list, n, window+1, depth, &nr_done); stop_progress(&progress_state); if (nr_done != nr_deltas) - die("inconsistency with delta count"); + die(_("inconsistency with delta count")); } free(delta_list); } @@@ -2632,11 -2494,11 +2632,11 @@@ static int git_pack_config(const char * if (!strcmp(k, "pack.threads")) { delta_search_threads = git_config_int(k, v); if (delta_search_threads < 0) - die("invalid number of threads specified (%d)", + die(_("invalid number of threads specified (%d)"), delta_search_threads); #ifdef NO_PTHREADS if (delta_search_threads != 1) { - warning("no threads support, ignoring %s", k); + warning(_("no threads support, ignoring %s"), k); delta_search_threads = 0; } #endif @@@ -2645,7 -2507,7 +2645,7 @@@ if (!strcmp(k, "pack.indexversion")) { pack_idx_opts.version = git_config_int(k, v); if (pack_idx_opts.version > 2) - die("bad pack.indexversion=%"PRIu32, + die(_("bad pack.indexversion=%"PRIu32), pack_idx_opts.version); return 0; } @@@ -2663,7 -2525,7 +2663,7 @@@ static void read_object_list_from_stdin if (feof(stdin)) break; if (!ferror(stdin)) - die("fgets returned NULL, not EOF, not error!"); + die("BUG: fgets returned NULL, not EOF, not error!"); if (errno != EINTR) die_errno("fgets"); clearerr(stdin); @@@ -2671,20 -2533,19 +2671,20 @@@ } if (line[0] == '-') { if (get_oid_hex(line+1, &oid)) - die("expected edge object ID, got garbage:\n %s", + die(_("expected edge object ID, got garbage:\n %s"), line); add_preferred_base(&oid); continue; } if (parse_oid_hex(line, &oid, &p)) - die("expected object ID, got garbage:\n %s", line); + die(_("expected object ID, got garbage:\n %s"), line); add_preferred_base_object(p + 1); - add_object_entry(&oid, 0, p + 1, 0); + add_object_entry(&oid, OBJ_NONE, p + 1, 0); } } +/* Remember to update object flag allocation in object.h */ #define OBJECT_ADDED (1u<<20) static void show_commit(struct commit *commit, void *data) @@@ -2717,20 -2578,6 +2717,20 @@@ static void show_object__ma_allow_any(s show_object(obj, name, data); } +static void show_object__ma_allow_promisor(struct object *obj, const char *name, void *data) +{ + assert(arg_missing_action == MA_ALLOW_PROMISOR); + + /* + * Quietly ignore EXPECTED missing objects. This avoids problems with + * staging them now and getting an odd error later. + */ + if (!has_object_file(&obj->oid) && is_promisor_object(&obj->oid)) + return; + + show_object(obj, name, data); +} + static int option_parse_missing_action(const struct option *opt, const char *arg, int unset) { @@@ -2745,18 -2592,10 +2745,18 @@@ if (!strcmp(arg, "allow-any")) { arg_missing_action = MA_ALLOW_ANY; + fetch_if_missing = 0; fn_show_object = show_object__ma_allow_any; return 0; } + if (!strcmp(arg, "allow-promisor")) { + arg_missing_action = MA_ALLOW_PROMISOR; + fetch_if_missing = 0; + fn_show_object = show_object__ma_allow_promisor; + return 0; + } + die(_("invalid value for --missing")); return 0; } @@@ -2809,14 -2648,14 +2809,14 @@@ static void add_objects_in_unpacked_pac memset(&in_pack, 0, sizeof(in_pack)); - for (p = packed_git; p; p = p->next) { + for (p = get_packed_git(the_repository); p; p = p->next) { struct object_id oid; struct object *o; - if (!p->pack_local || p->pack_keep) + if (!p->pack_local || p->pack_keep || p->pack_keep_in_core) continue; if (open_pack_index(p)) - die("cannot open pack index"); + die(_("cannot open pack index")); ALLOC_GROW(in_pack.array, in_pack.nr + p->num_objects, @@@ -2844,10 -2683,10 +2844,10 @@@ static int add_loose_object(const struct object_id *oid, const char *path, void *data) { - enum object_type type = sha1_object_info(oid->hash, NULL); + enum object_type type = oid_object_info(the_repository, oid, NULL); if (type < 0) { - warning("loose object at %s could not be examined", path); + warning(_("loose object at %s could not be examined"), path); return 0; } @@@ -2872,18 -2711,16 +2872,18 @@@ static int has_sha1_pack_kept_or_nonloc static struct packed_git *last_found = (void *)1; struct packed_git *p; - p = (last_found != (void *)1) ? last_found : packed_git; + p = (last_found != (void *)1) ? last_found : + get_packed_git(the_repository); while (p) { - if ((!p->pack_local || p->pack_keep) && + if ((!p->pack_local || p->pack_keep || + p->pack_keep_in_core) && find_pack_entry_one(oid->hash, p)) { last_found = p; return 1; } if (p == last_found) - p = packed_git; + p = get_packed_git(the_repository); else p = p->next; if (p == last_found) @@@ -2919,20 -2756,20 +2919,20 @@@ static void loosen_unused_packed_object uint32_t i; struct object_id oid; - for (p = packed_git; p; p = p->next) { - if (!p->pack_local || p->pack_keep) + for (p = get_packed_git(the_repository); p; p = p->next) { + if (!p->pack_local || p->pack_keep || p->pack_keep_in_core) continue; if (open_pack_index(p)) - die("cannot open pack index"); + die(_("cannot open pack index")); for (i = 0; i < p->num_objects; i++) { nth_packed_object_oid(&oid, p, i); if (!packlist_find(&to_pack, oid.hash, NULL) && !has_sha1_pack_kept_or_nonlocal(&oid) && !loosened_object_can_be_discarded(&oid, p->mtime)) - if (force_object_loose(oid.hash, p->mtime)) - die("unable to force loose object"); + if (force_object_loose(&oid, p->mtime)) + die(_("unable to force loose object")); } } } @@@ -2946,21 -2783,18 +2946,21 @@@ static int pack_options_allow_reuse(voi { return pack_to_stdout && allow_ofs_delta && - !ignore_packed_keep && + !ignore_packed_keep_on_disk && + !ignore_packed_keep_in_core && (!local || !have_non_local_packs) && !incremental; } static int get_object_list_from_bitmap(struct rev_info *revs) { - if (prepare_bitmap_walk(revs) < 0) + struct bitmap_index *bitmap_git; + if (!(bitmap_git = prepare_bitmap_walk(revs))) return -1; if (pack_options_allow_reuse() && !reuse_partial_packfile_from_bitmap( + bitmap_git, &reuse_packfile, &reuse_packfile_objects, &reuse_packfile_offset)) { @@@ -2969,8 -2803,7 +2969,8 @@@ display_progress(progress_state, nr_result); } - traverse_bitmap_commit_list(&add_object_entry_from_bitmap); + traverse_bitmap_commit_list(bitmap_git, &add_object_entry_from_bitmap); + free_bitmap_index(bitmap_git); return 0; } @@@ -2997,7 -2830,7 +2997,7 @@@ static void get_object_list(int ac, con setup_revisions(ac, av, &revs, NULL); /* make sure shallows are read */ - is_repository_shallow(); + is_repository_shallow(the_repository); while (fgets(line, sizeof(line), stdin) != NULL) { int len = strlen(line); @@@ -3015,21 -2848,21 +3015,21 @@@ struct object_id oid; if (get_oid_hex(line + 10, &oid)) die("not an SHA-1 '%s'", line + 10); - register_shallow(&oid); + register_shallow(the_repository, &oid); use_bitmap_index = 0; continue; } - die("not a rev '%s'", line); + die(_("not a rev '%s'"), line); } if (handle_revision_arg(line, &revs, flags, REVARG_CANNOT_BE_FILENAME)) - die("bad revision '%s'", line); + die(_("bad revision '%s'"), line); } if (use_bitmap_index && !get_object_list_from_bitmap(&revs)) return; if (prepare_revision_walk(&revs)) - die("revision walk setup failed"); + die(_("revision walk setup failed")); mark_edges_uninteresting(&revs, show_edge); if (!fn_show_object) @@@ -3042,9 -2875,9 +3042,9 @@@ revs.ignore_missing_links = 1; if (add_unseen_recent_objects_to_traversal(&revs, unpack_unreachable_expiration)) - die("unable to add recent objects"); + die(_("unable to add recent objects")); if (prepare_revision_walk(&revs)) - die("revision walk setup failed"); + die(_("revision walk setup failed")); traverse_commit_list(&revs, record_recent_commit, record_recent_object, NULL); } @@@ -3059,32 -2892,6 +3059,32 @@@ oid_array_clear(&recent_objects); } +static void add_extra_kept_packs(const struct string_list *names) +{ + struct packed_git *p; + + if (!names->nr) + return; + + for (p = get_packed_git(the_repository); p; p = p->next) { + const char *name = basename(p->pack_name); + int i; + + if (!p->pack_local) + continue; + + for (i = 0; i < names->nr; i++) + if (!fspathcmp(name, names->items[i].string)) + break; + + if (i < names->nr) { + p->pack_keep_in_core = 1; + ignore_packed_keep_in_core = 1; + continue; + } + } +} + static int option_parse_index_version(const struct option *opt, const char *arg, int unset) { @@@ -3124,7 -2931,6 +3124,7 @@@ int cmd_pack_objects(int argc, const ch struct argv_array rp = ARGV_ARRAY_INIT; int rev_list_unpacked = 0, rev_list_all = 0, rev_list_reflog = 0; int rev_list_index = 0; + struct string_list keep_pack_list = STRING_LIST_INIT_NODUP; struct option pack_objects_options[] = { OPT_SET_INT('q', "quiet", &progress, N_("do not show progress meter"), 0), @@@ -3135,7 -2941,7 +3135,7 @@@ OPT_BOOL(0, "all-progress-implied", &all_progress_implied, N_("similar to --all-progress when progress meter is shown")), - { OPTION_CALLBACK, 0, "index-version", NULL, N_("version[,offset]"), + { OPTION_CALLBACK, 0, "index-version", NULL, N_("[,]"), N_("write the pack index file in the specified idx format version"), 0, option_parse_index_version }, OPT_MAGNITUDE(0, "max-pack-size", &pack_size_limit, @@@ -3162,18 -2968,18 +3162,18 @@@ N_("do not create an empty pack output")), OPT_BOOL(0, "revs", &use_internal_rev_list, N_("read revision arguments from standard input")), - { OPTION_SET_INT, 0, "unpacked", &rev_list_unpacked, NULL, - N_("limit the objects to those that are not yet packed"), - PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, 1 }, - { OPTION_SET_INT, 0, "all", &rev_list_all, NULL, - N_("include objects reachable from any reference"), - PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, 1 }, - { OPTION_SET_INT, 0, "reflog", &rev_list_reflog, NULL, - N_("include objects referred by reflog entries"), - PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, 1 }, - { OPTION_SET_INT, 0, "indexed-objects", &rev_list_index, NULL, - N_("include objects referred to by the index"), - PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, 1 }, + OPT_SET_INT_F(0, "unpacked", &rev_list_unpacked, + N_("limit the objects to those that are not yet packed"), + 1, PARSE_OPT_NONEG), + OPT_SET_INT_F(0, "all", &rev_list_all, + N_("include objects reachable from any reference"), + 1, PARSE_OPT_NONEG), + OPT_SET_INT_F(0, "reflog", &rev_list_reflog, + N_("include objects referred by reflog entries"), + 1, PARSE_OPT_NONEG), + OPT_SET_INT_F(0, "indexed-objects", &rev_list_index, + N_("include objects referred to by the index"), + 1, PARSE_OPT_NONEG), OPT_BOOL(0, "stdout", &pack_to_stdout, N_("output pack to stdout")), OPT_BOOL(0, "include-tag", &include_tag, @@@ -3189,10 -2995,8 +3189,10 @@@ N_("create thin packs")), OPT_BOOL(0, "shallow", &shallow, N_("create packs suitable for shallow fetches")), - OPT_BOOL(0, "honor-pack-keep", &ignore_packed_keep, + OPT_BOOL(0, "honor-pack-keep", &ignore_packed_keep_on_disk, N_("ignore packs that have companion .keep file")), + OPT_STRING_LIST(0, "keep-pack", &keep_pack_list, N_("name"), + N_("ignore this pack")), OPT_INTEGER(0, "compression", &pack_compression_level, N_("pack compression level")), OPT_SET_INT(0, "keep-true-parents", &grafts_replace_parents, @@@ -3205,15 -3009,10 +3205,15 @@@ { OPTION_CALLBACK, 0, "missing", NULL, N_("action"), N_("handling for missing objects"), PARSE_OPT_NONEG, option_parse_missing_action }, + OPT_BOOL(0, "exclude-promisor-objects", &exclude_promisor_objects, + N_("do not pack objects in promisor packfiles")), OPT_END(), }; - check_replace_refs = 0; + if (DFS_NUM_STATES > (1 << OE_DFS_STATE_BITS)) + BUG("too many dfs states, increase OE_DFS_STATE_BITS"); + + read_replace_refs = 0; reset_pack_idx_option(&pack_idx_opts); git_config(git_pack_config, NULL); @@@ -3229,17 -3028,6 +3229,17 @@@ if (pack_to_stdout != !base_name || argc) usage_with_options(pack_usage, pack_objects_options); + if (depth >= (1 << OE_DEPTH_BITS)) { + warning(_("delta chain depth %d is too deep, forcing %d"), + depth, (1 << OE_DEPTH_BITS) - 1); + depth = (1 << OE_DEPTH_BITS) - 1; + } + if (cache_max_small_delta_size >= (1U << OE_Z_DELTA_BITS)) { + warning(_("pack.deltaCacheLimit is too high, forcing %d"), + (1U << OE_Z_DELTA_BITS) - 1); + cache_max_small_delta_size = (1U << OE_Z_DELTA_BITS) - 1; + } + argv_array_push(&rp, "pack-objects"); if (thin) { use_internal_rev_list = 1; @@@ -3266,48 -3054,40 +3266,48 @@@ argv_array_push(&rp, "--unpacked"); } + if (exclude_promisor_objects) { + use_internal_rev_list = 1; + fetch_if_missing = 0; + argv_array_push(&rp, "--exclude-promisor-objects"); + } + if (unpack_unreachable || keep_unreachable || pack_loose_unreachable) + use_internal_rev_list = 1; + if (!reuse_object) reuse_delta = 0; if (pack_compression_level == -1) pack_compression_level = Z_DEFAULT_COMPRESSION; else if (pack_compression_level < 0 || pack_compression_level > Z_BEST_COMPRESSION) - die("bad pack compression level %d", pack_compression_level); + die(_("bad pack compression level %d"), pack_compression_level); if (!delta_search_threads) /* --threads=0 means autodetect */ delta_search_threads = online_cpus(); #ifdef NO_PTHREADS if (delta_search_threads != 1) - warning("no threads support, ignoring --threads"); + warning(_("no threads support, ignoring --threads")); #endif if (!pack_to_stdout && !pack_size_limit) pack_size_limit = pack_size_limit_cfg; if (pack_to_stdout && pack_size_limit) - die("--max-pack-size cannot be used to build a pack for transfer."); + die(_("--max-pack-size cannot be used to build a pack for transfer")); if (pack_size_limit && pack_size_limit < 1024*1024) { - warning("minimum pack size limit is 1 MiB"); + warning(_("minimum pack size limit is 1 MiB")); pack_size_limit = 1024*1024; } if (!pack_to_stdout && thin) - die("--thin cannot be used to build an indexable pack."); + die(_("--thin cannot be used to build an indexable pack")); if (keep_unreachable && unpack_unreachable) - die("--keep-unreachable and --unpack-unreachable are incompatible."); + die(_("--keep-unreachable and --unpack-unreachable are incompatible")); if (!rev_list_all || !rev_list_reflog || !rev_list_index) unpack_unreachable_expiration = 0; if (filter_options.choice) { if (!pack_to_stdout) - die("cannot use --filter without --stdout."); + die(_("cannot use --filter without --stdout")); use_bitmap_index = 0; } @@@ -3327,7 -3107,7 +3327,7 @@@ use_bitmap_index = use_bitmap_index_default; /* "hard" reasons not to use bitmaps; these just won't work at all */ - if (!use_internal_rev_list || (!pack_to_stdout && write_bitmap_index) || is_repository_shallow()) + if (!use_internal_rev_list || (!pack_to_stdout && write_bitmap_index) || is_repository_shallow(the_repository)) use_bitmap_index = 0; if (pack_to_stdout || !rev_list_all) @@@ -3336,23 -3116,23 +3336,23 @@@ if (progress && all_progress_implied) progress = 2; - prepare_packed_git(); - if (ignore_packed_keep) { + add_extra_kept_packs(&keep_pack_list); + if (ignore_packed_keep_on_disk) { struct packed_git *p; - for (p = packed_git; p; p = p->next) + for (p = get_packed_git(the_repository); p; p = p->next) if (p->pack_local && p->pack_keep) break; if (!p) /* no keep-able packs found */ - ignore_packed_keep = 0; + ignore_packed_keep_on_disk = 0; } if (local) { /* - * unlike ignore_packed_keep above, we do not want to - * unset "local" based on looking at packs, as it - * also covers non-local objects + * unlike ignore_packed_keep_on_disk above, we do not + * want to unset "local" based on looking at packs, as + * it also covers non-local objects */ struct packed_git *p; - for (p = packed_git; p; p = p->next) { + for (p = get_packed_git(the_repository); p; p = p->next) { if (!p->pack_local) { have_non_local_packs = 1; break; @@@ -3360,10 -3140,8 +3360,10 @@@ } } + prepare_packing_data(&to_pack); + if (progress) - progress_state = start_progress(_("Counting objects"), 0); + progress_state = start_progress(_("Enumerating objects"), 0); if (!use_internal_rev_list) read_object_list_from_stdin(); else { @@@ -3381,9 -3159,8 +3381,9 @@@ prepare_pack(window, depth); write_pack_file(); if (progress) - fprintf(stderr, "Total %"PRIu32" (delta %"PRIu32")," - " reused %"PRIu32" (delta %"PRIu32")\n", - written, written_delta, reused, reused_delta); + fprintf_ln(stderr, + _("Total %"PRIu32" (delta %"PRIu32")," + " reused %"PRIu32" (delta %"PRIu32")"), + written, written_delta, reused, reused_delta); return 0; } diff --combined builtin/push.c index 9cd8e8cd56,ef4032a9ef..ef4c188895 --- a/builtin/push.c +++ b/builtin/push.c @@@ -4,7 -4,6 +4,7 @@@ #include "cache.h" #include "config.h" #include "refs.h" +#include "refspec.h" #include "run-command.h" #include "builtin.h" #include "remote.h" @@@ -13,40 -12,12 +13,40 @@@ #include "submodule.h" #include "submodule-config.h" #include "send-pack.h" +#include "color.h" static const char * const push_usage[] = { N_("git push [] [ [...]]"), NULL, }; +static int push_use_color = -1; +static char push_colors[][COLOR_MAXLEN] = { + GIT_COLOR_RESET, + GIT_COLOR_RED, /* ERROR */ +}; + +enum color_push { + PUSH_COLOR_RESET = 0, + PUSH_COLOR_ERROR = 1 +}; + +static int parse_push_color_slot(const char *slot) +{ + if (!strcasecmp(slot, "reset")) + return PUSH_COLOR_RESET; + if (!strcasecmp(slot, "error")) + return PUSH_COLOR_ERROR; + return -1; +} + +static const char *push_get_color(enum color_push ix) +{ + if (want_color_stderr(push_use_color)) + return push_colors[ix]; + return ""; +} + static int thin = 1; static int deleterefs; static const char *receivepack; @@@ -57,10 -28,19 +57,10 @@@ static enum transport_family family static struct push_cas_option cas; -static const char **refspec; -static int refspec_nr; -static int refspec_alloc; +static struct refspec rs = REFSPEC_INIT_PUSH; static struct string_list push_options_config = STRING_LIST_INIT_DUP; -static void add_refspec(const char *ref) -{ - refspec_nr++; - ALLOC_GROW(refspec, refspec_nr, refspec_alloc); - refspec[refspec_nr-1] = ref; -} - static const char *map_refspec(const char *ref, struct remote *remote, struct ref *local_refs) { @@@ -70,11 -50,12 +70,11 @@@ if (count_refspec_match(ref, local_refs, &matched) != 1) return ref; - if (remote->push) { - struct refspec query; - memset(&query, 0, sizeof(struct refspec)); + if (remote->push.nr) { + struct refspec_item query; + memset(&query, 0, sizeof(struct refspec_item)); query.src = matched->name; - if (!query_refspecs(remote->push, remote->push_refspec_nr, &query) && - query.dst) { + if (!query_refspecs(&remote->push, &query) && query.dst) { struct strbuf buf = STRBUF_INIT; strbuf_addf(&buf, "%s%s:%s", query.force ? "+" : "", @@@ -129,7 -110,7 +129,7 @@@ static void set_refspecs(const char **r } ref = map_refspec(ref, remote, local_refs); } - add_refspec(ref); + refspec_append(&rs, ref); } } @@@ -217,7 -198,7 +217,7 @@@ static void setup_push_upstream(struct } strbuf_addf(&refspec, "%s:%s", branch->refname, branch->merge[0]->src); - add_refspec(refspec.buf); + refspec_append(&rs, refspec.buf); } static void setup_push_current(struct remote *remote, struct branch *branch) @@@ -227,7 -208,7 +227,7 @@@ if (!branch) die(_(message_detached_head_die), remote->name); strbuf_addf(&refspec, "%s:%s", branch->refname, branch->refname); - add_refspec(refspec.buf); + refspec_append(&rs, refspec.buf); } static int is_workflow_triangular(struct remote *remote) @@@ -244,7 -225,7 +244,7 @@@ static void setup_default_push_refspecs switch (push_default) { default: case PUSH_DEFAULT_MATCHING: - add_refspec(":"); + refspec_append(&rs, ":"); break; case PUSH_DEFAULT_UNSPECIFIED: @@@ -332,8 -313,7 +332,8 @@@ static void advise_ref_needs_force(void advise(_(message_advice_ref_needs_force)); } -static int push_with_options(struct transport *transport, int flags) +static int push_with_options(struct transport *transport, struct refspec *rs, + int flags) { int err; unsigned int reject_reasons; @@@ -355,12 -335,10 +355,12 @@@ if (verbosity > 0) fprintf(stderr, _("Pushing to %s\n"), transport->url); - err = transport_push(transport, refspec_nr, refspec, flags, - &reject_reasons); - if (err != 0) + err = transport_push(transport, rs, flags, &reject_reasons); + if (err != 0) { + fprintf(stderr, "%s", push_get_color(PUSH_COLOR_ERROR)); error(_("failed to push some refs to '%s'"), transport->url); + fprintf(stderr, "%s", push_get_color(PUSH_COLOR_RESET)); + } err |= transport_disconnect(transport); if (!err) @@@ -388,7 -366,6 +388,7 @@@ static int do_push(const char *repo, in struct remote *remote = pushremote_get(repo); const char **url; int url_nr; + struct refspec *push_refspec = &rs; if (!remote) { if (repo) @@@ -409,9 -386,27 +409,9 @@@ if (push_options->nr) flags |= TRANSPORT_PUSH_OPTIONS; - if ((flags & TRANSPORT_PUSH_ALL) && refspec) { - if (!strcmp(*refspec, "refs/tags/*")) - return error(_("--all and --tags are incompatible")); - return error(_("--all can't be combined with refspecs")); - } - - if ((flags & TRANSPORT_PUSH_MIRROR) && refspec) { - if (!strcmp(*refspec, "refs/tags/*")) - return error(_("--mirror and --tags are incompatible")); - return error(_("--mirror can't be combined with refspecs")); - } - - if ((flags & (TRANSPORT_PUSH_ALL|TRANSPORT_PUSH_MIRROR)) == - (TRANSPORT_PUSH_ALL|TRANSPORT_PUSH_MIRROR)) { - return error(_("--all and --mirror are incompatible")); - } - - if (!refspec && !(flags & TRANSPORT_PUSH_ALL)) { - if (remote->push_refspec_nr) { - refspec = remote->push_refspec; - refspec_nr = remote->push_refspec_nr; + if (!push_refspec->nr && !(flags & TRANSPORT_PUSH_ALL)) { + if (remote->push.nr) { + push_refspec = &remote->push; } else if (!(flags & TRANSPORT_PUSH_MIRROR)) setup_default_push_refspecs(remote); } @@@ -423,7 -418,7 +423,7 @@@ transport_get(remote, url[i]); if (flags & TRANSPORT_PUSH_OPTIONS) transport->push_options = push_options; - if (push_with_options(transport, flags)) + if (push_with_options(transport, push_refspec, flags)) errs++; } } else { @@@ -431,7 -426,7 +431,7 @@@ transport_get(remote, NULL); if (flags & TRANSPORT_PUSH_OPTIONS) transport->push_options = push_options; - if (push_with_options(transport, flags)) + if (push_with_options(transport, push_refspec, flags)) errs++; } return !!errs; @@@ -472,7 -467,6 +472,7 @@@ static void set_push_cert_flags(int *fl static int git_push_config(const char *k, const char *v, void *cb) { + const char *slot_name; int *flags = cb; int status; @@@ -520,16 -514,6 +520,16 @@@ else string_list_append(&push_options_config, v); return 0; + } else if (!strcmp(k, "color.push")) { + push_use_color = git_config_colorbool(k, v); + return 0; + } else if (skip_prefix(k, "color.push.", &slot_name)) { + int slot = parse_push_color_slot(slot_name); + if (slot < 0) + return 0; + if (!v) + return config_error_nonbool(k); + return color_parse(v, push_colors[slot]); } return git_default_config(k, v, NULL); @@@ -558,13 -542,13 +558,13 @@@ int cmd_push(int argc, const char **arg OPT_BIT( 0, "porcelain", &flags, N_("machine-readable output"), TRANSPORT_PUSH_PORCELAIN), OPT_BIT('f', "force", &flags, N_("force updates"), TRANSPORT_PUSH_FORCE), { OPTION_CALLBACK, - 0, CAS_OPT_NAME, &cas, N_("refname>::"), N_("require old value of ref to be at this value"), - PARSE_OPT_OPTARG, parseopt_push_cas_option }, + PARSE_OPT_OPTARG | PARSE_OPT_LITERAL_ARGHELP, parseopt_push_cas_option }, { OPTION_CALLBACK, 0, "recurse-submodules", &recurse_submodules, "check|on-demand|no", N_("control recursive pushing of submodules"), PARSE_OPT_OPTARG, option_parse_recurse_submodules }, - OPT_BOOL( 0 , "thin", &thin, N_("use thin pack")), + OPT_BOOL_F( 0 , "thin", &thin, N_("use thin pack"), PARSE_OPT_NOCOMPLETE), OPT_STRING( 0 , "receive-pack", &receivepack, "receive-pack", N_("receive pack program")), OPT_STRING( 0 , "exec", &receivepack, "receive-pack", N_("receive pack program")), OPT_BIT('u', "set-upstream", &flags, N_("set upstream for git pull/status"), @@@ -599,20 -583,6 +599,20 @@@ die(_("--delete is incompatible with --all, --mirror and --tags")); if (deleterefs && argc < 2) die(_("--delete doesn't make sense without any refs")); + if (flags & TRANSPORT_PUSH_ALL) { + if (tags) + die(_("--all and --tags are incompatible")); + if (argc >= 2) + die(_("--all can't be combined with refspecs")); + } + if (flags & TRANSPORT_PUSH_MIRROR) { + if (tags) + die(_("--mirror and --tags are incompatible")); + if (argc >= 2) + die(_("--mirror can't be combined with refspecs")); + } + if ((flags & TRANSPORT_PUSH_ALL) && (flags & TRANSPORT_PUSH_MIRROR)) + die(_("--all and --mirror are incompatible")); if (recurse_submodules == RECURSE_SUBMODULES_CHECK) flags |= TRANSPORT_RECURSE_SUBMODULES_CHECK; @@@ -622,7 -592,7 +622,7 @@@ flags |= TRANSPORT_RECURSE_SUBMODULES_ONLY; if (tags) - add_refspec("refs/tags/*"); + refspec_append(&rs, "refs/tags/*"); if (argc > 0) { repo = argv[0]; diff --combined builtin/read-tree.c index ebc43eb805,3a7fd6394e..fbbc98e516 --- a/builtin/read-tree.c +++ b/builtin/read-tree.c @@@ -107,6 -107,8 +107,6 @@@ static int git_read_tree_config(const c return git_default_config(var, value, cb); } -static struct lock_file lock_file; - int cmd_read_tree(int argc, const char **argv, const char *unused_prefix) { int i, stage = 0; @@@ -114,7 -116,6 +114,7 @@@ struct tree_desc t[MAX_UNPACK_TREES]; struct unpack_trees_options opts; int prefix_set = 0; + struct lock_file lock_file = LOCK_INIT; const struct option read_tree_options[] = { { OPTION_CALLBACK, 0, "index-output", NULL, N_("file"), N_("write resulting index to "), @@@ -133,7 -134,7 +133,7 @@@ N_("same as -m, but discard unmerged entries")), { OPTION_STRING, 0, "prefix", &opts.prefix, N_("/"), N_("read the tree into the index under /"), - PARSE_OPT_NONEG | PARSE_OPT_LITERAL_ARGHELP }, + PARSE_OPT_NONEG }, OPT_BOOL('u', NULL, &opts.update, N_("update working tree with merge result")), { OPTION_CALLBACK, 0, "exclude-per-directory", &opts, diff --combined builtin/send-pack.c index 42fd8d1a35,a4f7dea115..724b484850 --- a/builtin/send-pack.c +++ b/builtin/send-pack.c @@@ -14,7 -14,6 +14,7 @@@ #include "sha1-array.h" #include "gpg-interface.h" #include "gettext.h" +#include "protocol.h" static const char * const send_pack_usage[] = { N_("git send-pack [--all | --mirror] [--dry-run] [--force] " @@@ -121,12 -120,13 +121,12 @@@ static int send_pack_config(const char } } } - return 0; + return git_default_config(k, v, cb); } int cmd_send_pack(int argc, const char **argv, const char *prefix) { - int i, nr_refspecs = 0; - const char **refspecs = NULL; + struct refspec rs = REFSPEC_INIT_PUSH; const char *remote_name = NULL; struct remote *remote = NULL; const char *dest = NULL; @@@ -154,7 -154,6 +154,7 @@@ int progress = -1; int from_stdin = 0; struct push_cas_option cas = {0}; + struct packet_reader reader; struct option options[] = { OPT__VERBOSITY(&verbose), @@@ -178,7 -177,7 +178,7 @@@ OPT_BOOL(0, "stdin", &from_stdin, N_("read refs from stdin")), OPT_BOOL(0, "helper-status", &helper_status, N_("print status from remote helper")), { OPTION_CALLBACK, - 0, CAS_OPT_NAME, &cas, N_("refname>::"), N_("require old value of ref to be at this value"), PARSE_OPT_OPTARG, parseopt_push_cas_option }, OPT_END() @@@ -188,7 -187,8 +188,7 @@@ argc = parse_options(argc, argv, prefix, options, send_pack_usage, 0); if (argc > 0) { dest = argv[0]; - refspecs = (const char **)(argv + 1); - nr_refspecs = argc - 1; + refspec_appendn(&rs, argv + 1, argc - 1); } if (!dest) @@@ -207,23 -207,31 +207,23 @@@ args.push_options = push_options.nr ? &push_options : NULL; if (from_stdin) { - struct argv_array all_refspecs = ARGV_ARRAY_INIT; - - for (i = 0; i < nr_refspecs; i++) - argv_array_push(&all_refspecs, refspecs[i]); - if (args.stateless_rpc) { const char *buf; while ((buf = packet_read_line(0, NULL))) - argv_array_push(&all_refspecs, buf); + refspec_append(&rs, buf); } else { struct strbuf line = STRBUF_INIT; while (strbuf_getline(&line, stdin) != EOF) - argv_array_push(&all_refspecs, line.buf); + refspec_append(&rs, line.buf); strbuf_release(&line); } - - refspecs = all_refspecs.argv; - nr_refspecs = all_refspecs.argc; } /* * --all and --mirror are incompatible; neither makes sense * with any refspecs. */ - if ((nr_refspecs > 0 && (send_all || args.send_mirror)) || + if ((rs.nr > 0 && (send_all || args.send_mirror)) || (send_all && args.send_mirror)) usage_with_options(send_pack_usage, options); @@@ -248,22 -256,10 +248,22 @@@ args.verbose ? CONNECT_VERBOSE : 0); } - get_remote_heads(fd[0], NULL, 0, &remote_refs, REF_NORMAL, - &extra_have, &shallow); - - transport_verify_remote_names(nr_refspecs, refspecs); + packet_reader_init(&reader, fd[0], NULL, 0, + PACKET_READ_CHOMP_NEWLINE | + PACKET_READ_GENTLE_ON_EOF); + + switch (discover_version(&reader)) { + case protocol_v2: + die("support for protocol v2 not implemented yet"); + break; + case protocol_v1: + case protocol_v0: + get_remote_heads(&reader, &remote_refs, REF_NORMAL, + &extra_have, &shallow); + break; + case protocol_unknown_version: + BUG("unknown protocol version"); + } local_refs = get_local_heads(); @@@ -275,7 -271,7 +275,7 @@@ flags |= MATCH_REFS_MIRROR; /* match them up */ - if (match_push_refs(local_refs, &remote_refs, nr_refspecs, refspecs, flags)) + if (match_push_refs(local_refs, &remote_refs, &rs, flags)) return -1; if (!is_empty_cas(&cas)) diff --combined builtin/shortlog.c index 608d6ba77b,581293ee7b..3898a2c9c4 --- a/builtin/shortlog.c +++ b/builtin/shortlog.c @@@ -11,8 -11,7 +11,8 @@@ #include "parse-options.h" static char const * const shortlog_usage[] = { - N_("git shortlog [] [] [[--] [...]]"), + N_("git shortlog [] [] [[--] ...]"), + N_("git log --pretty=short | git shortlog []"), NULL }; @@@ -268,8 -267,9 +268,9 @@@ int cmd_shortlog(int argc, const char * N_("Suppress commit descriptions, only provides commit count")), OPT_BOOL('e', "email", &log.email, N_("Show the email address of each author")), - { OPTION_CALLBACK, 'w', NULL, &log, N_("w[,i1[,i2]]"), - N_("Linewrap output"), PARSE_OPT_OPTARG, &parse_wrap_args }, + { OPTION_CALLBACK, 'w', NULL, &log, N_("[,[,]]"), + N_("Linewrap output"), PARSE_OPT_OPTARG, + &parse_wrap_args }, OPT_END(), }; @@@ -284,7 -284,6 +285,7 @@@ for (;;) { switch (parse_options_step(&ctx, options, shortlog_usage)) { case PARSE_OPT_HELP: + case PARSE_OPT_ERROR: exit(129); case PARSE_OPT_DONE: goto parse_done; @@@ -294,11 -293,6 +295,11 @@@ parse_done: argc = parse_options_end(&ctx); + if (nongit && argc > 1) { + error(_("too many arguments given outside repository")); + usage_with_options(shortlog_usage, options); + } + if (setup_revisions(argc, argv, &rev, NULL) != 1) { error(_("unrecognized argument: %s"), argv[1]); usage_with_options(shortlog_usage, options); diff --combined builtin/show-branch.c index 4b9d3c0059,371b8c8b84..363cf8509a --- a/builtin/show-branch.c +++ b/builtin/show-branch.c @@@ -7,7 -7,6 +7,7 @@@ #include "argv-array.h" #include "parse-options.h" #include "dir.h" +#include "commit-slab.h" static const char* show_branch_usage[] = { N_("git show-branch [-a | --all] [-r | --remotes] [--topo-order | --date-order]\n" @@@ -22,11 -21,6 +22,11 @@@ static int showbranch_use_color = -1 static struct argv_array default_args = ARGV_ARRAY_INIT; +/* + * TODO: convert this use of commit->object.flags to commit-slab + * instead to store a pointer to ref name directly. Then use the same + * UNINTERESTING definition from revision.h here. + */ #define UNINTERESTING 01 #define REV_SHIFT 2 @@@ -65,27 -59,15 +65,27 @@@ struct commit_name int generation; /* how many parents away from head_name */ }; +define_commit_slab(commit_name_slab, struct commit_name *); +static struct commit_name_slab name_slab; + +static struct commit_name *commit_to_name(struct commit *commit) +{ + return *commit_name_slab_at(&name_slab, commit); +} + + /* Name the commit as nth generation ancestor of head_name; * we count only the first-parent relationship for naming purposes. */ static void name_commit(struct commit *commit, const char *head_name, int nth) { struct commit_name *name; - if (!commit->util) - commit->util = xmalloc(sizeof(struct commit_name)); - name = commit->util; + + name = *commit_name_slab_at(&name_slab, commit); + if (!name) { + name = xmalloc(sizeof(*name)); + *commit_name_slab_at(&name_slab, commit) = name; + } name->head_name = head_name; name->generation = nth; } @@@ -97,8 -79,8 +97,8 @@@ */ static void name_parent(struct commit *commit, struct commit *parent) { - struct commit_name *commit_name = commit->util; - struct commit_name *parent_name = parent->util; + struct commit_name *commit_name = commit_to_name(commit); + struct commit_name *parent_name = commit_to_name(parent); if (!commit_name) return; if (!parent_name || @@@ -112,12 -94,12 +112,12 @@@ static int name_first_parent_chain(stru int i = 0; while (c) { struct commit *p; - if (!c->util) + if (!commit_to_name(c)) break; if (!c->parents) break; p = c->parents->item; - if (!p->util) { + if (!commit_to_name(p)) { name_parent(c, p); i++; } @@@ -140,7 -122,7 +140,7 @@@ static void name_commits(struct commit_ /* First give names to the given heads */ for (cl = list; cl; cl = cl->next) { c = cl->item; - if (c->util) + if (commit_to_name(c)) continue; for (i = 0; i < num_rev; i++) { if (rev[i] == c) { @@@ -166,9 -148,9 +166,9 @@@ struct commit_name *n; int nth; c = cl->item; - if (!c->util) + if (!commit_to_name(c)) continue; - n = c->util; + n = commit_to_name(c); parents = c->parents; nth = 0; while (parents) { @@@ -176,7 -158,7 +176,7 @@@ struct strbuf newname = STRBUF_INIT; parents = parents->next; nth++; - if (p->util) + if (commit_to_name(p)) continue; switch (n->generation) { case 0: @@@ -289,7 -271,7 +289,7 @@@ static void show_one_commit(struct comm { struct strbuf pretty = STRBUF_INIT; const char *pretty_str = "(unavailable)"; - struct commit_name *name = commit->util; + struct commit_name *name = commit_to_name(commit); if (commit->object.parsed) { pp_commit_easy(CMIT_FMT_ONELINE, commit, &pretty); @@@ -310,7 -292,7 +310,7 @@@ } else printf("[%s] ", - find_unique_abbrev(commit->object.oid.hash, + find_unique_abbrev(&commit->object.oid, DEFAULT_ABBREV)); } puts(pretty_str); @@@ -378,8 -360,7 +378,8 @@@ static void sort_ref_range(int bottom, static int append_ref(const char *refname, const struct object_id *oid, int allow_dups) { - struct commit *commit = lookup_commit_reference_gently(oid, 1); + struct commit *commit = lookup_commit_reference_gently(the_repository, + oid, 1); int i; if (!commit) @@@ -674,13 -655,11 +674,13 @@@ int cmd_show_branch(int ac, const char { OPTION_CALLBACK, 'g', "reflog", &reflog_base, N_("[,]"), N_("show most recent ref-log entries starting at " "base"), - PARSE_OPT_OPTARG | PARSE_OPT_LITERAL_ARGHELP, + PARSE_OPT_OPTARG, parse_reflog_param }, OPT_END() }; + init_commit_name_slab(&name_slab); + git_config(git_show_branch_config, NULL); /* If nothing is specified, try the default first */ @@@ -831,7 -810,7 +831,7 @@@ MAX_REVS), MAX_REVS); if (get_oid(ref_name[num_rev], &revkey)) die(_("'%s' is not a valid ref."), ref_name[num_rev]); - commit = lookup_commit_reference(&revkey); + commit = lookup_commit_reference(the_repository, &revkey); if (!commit) die(_("cannot find commit %s (%s)"), ref_name[num_rev], oid_to_hex(&revkey)); diff --combined builtin/update-index.c index f5c0b6a1d2,6700a632be..5aee2eaa66 --- a/builtin/update-index.c +++ b/builtin/update-index.c @@@ -268,14 -268,15 +268,14 @@@ static int process_lstat_error(const ch static int add_one_path(const struct cache_entry *old, const char *path, int len, struct stat *st) { - int option, size; + int option; struct cache_entry *ce; /* Was the old index entry already up-to-date? */ if (old && !ce_stage(old) && !ce_match_stat(old, st, 0)) return 0; - size = cache_entry_size(len); - ce = xcalloc(1, size); + ce = make_empty_cache_entry(&the_index, len); memcpy(ce->name, path, len); ce->ce_flags = create_ce_flags(0); ce->ce_namelen = len; @@@ -284,13 -285,13 +284,13 @@@ if (index_path(&ce->oid, path, st, info_only ? 0 : HASH_WRITE_OBJECT)) { - free(ce); + discard_cache_entry(ce); return -1; } option = allow_add ? ADD_CACHE_OK_TO_ADD : 0; option |= allow_replace ? ADD_CACHE_OK_TO_REPLACE : 0; if (add_cache_entry(ce, option)) { - free(ce); + discard_cache_entry(ce); return error("%s: cannot add to the index - missing --add option?", path); } return 0; @@@ -401,14 -402,15 +401,14 @@@ static int process_path(const char *pat static int add_cacheinfo(unsigned int mode, const struct object_id *oid, const char *path, int stage) { - int size, len, option; + int len, option; struct cache_entry *ce; if (!verify_path(path, mode)) return error("Invalid path '%s'", path); len = strlen(path); - size = cache_entry_size(len); - ce = xcalloc(1, size); + ce = make_empty_cache_entry(&the_index, len); oidcpy(&ce->oid, oid); memcpy(ce->name, path, len); @@@ -449,8 -451,7 +449,8 @@@ static void update_one(const char *path int stat_errno = 0; struct stat st; - if (mark_valid_only || mark_skip_worktree_only || force_remove) + if (mark_valid_only || mark_skip_worktree_only || force_remove || + mark_fsmonitor_only) st.st_mode = 0; else if (lstat(path, &st) < 0) { st.st_mode = 0; @@@ -490,7 -491,6 +490,7 @@@ static void read_index_info(int nul_term_line) { + const int hexsz = the_hash_algo->hexsz; struct strbuf buf = STRBUF_INIT; struct strbuf uq = STRBUF_INIT; strbuf_getline_fn getline_fn; @@@ -528,7 -528,7 +528,7 @@@ mode = ul; tab = strchr(ptr, '\t'); - if (!tab || tab - ptr < GIT_SHA1_HEXSZ + 1) + if (!tab || tab - ptr < hexsz + 1) goto bad_line; if (tab[-2] == ' ' && '0' <= tab[-1] && tab[-1] <= '3') { @@@ -541,8 -541,8 +541,8 @@@ ptr = tab + 1; /* point at the head of path */ } - if (get_oid_hex(tab - GIT_SHA1_HEXSZ, &oid) || - tab[-(GIT_SHA1_HEXSZ + 1)] != ' ') + if (get_oid_hex(tab - hexsz, &oid) || + tab[-(hexsz + 1)] != ' ') goto bad_line; path_name = ptr; @@@ -570,7 -570,7 +570,7 @@@ * ptr[-1] points at tab, * ptr[-41] is at the beginning of sha1 */ - ptr[-(GIT_SHA1_HEXSZ + 2)] = ptr[-1] = 0; + ptr[-(hexsz + 2)] = ptr[-1] = 0; if (add_cacheinfo(mode, &oid, path_name, stage)) die("git update-index: unable to update %s", path_name); @@@ -598,9 -598,10 +598,9 @@@ static struct cache_entry *read_one_ent { unsigned mode; struct object_id oid; - int size; struct cache_entry *ce; - if (get_tree_entry(ent->hash, path, oid.hash, &mode)) { + if (get_tree_entry(ent, path, &oid, &mode)) { if (which) error("%s: not in %s branch.", path, which); return NULL; @@@ -610,7 -611,8 +610,7 @@@ error("%s: not a blob in %s branch.", path, which); return NULL; } - size = cache_entry_size(namelen); - ce = xcalloc(1, size); + ce = make_empty_cache_entry(&the_index, namelen); oidcpy(&ce->oid, &oid); memcpy(ce->name, path, namelen); @@@ -687,8 -689,8 +687,8 @@@ static int unresolve_one(const char *pa error("%s: cannot add their version to the index.", path); ret = -1; free_return: - free(ce_2); - free(ce_3); + discard_cache_entry(ce_2); + discard_cache_entry(ce_3); return ret; } @@@ -755,7 -757,7 +755,7 @@@ static int do_reupdate(int ac, const ch ce->name, ce_namelen(ce), 0); if (old && ce->ce_mode == old->ce_mode && !oidcmp(&ce->oid, &old->oid)) { - free(old); + discard_cache_entry(old); continue; /* unchanged */ } /* Be careful. The working tree may not have the @@@ -766,7 -768,7 +766,7 @@@ path = xstrdup(ce->name); update_one(path); free(path); - free(old); + discard_cache_entry(old); if (save_nr != active_nr) goto redo; } @@@ -823,7 -825,6 +823,7 @@@ static int parse_new_style_cacheinfo(co { unsigned long ul; char *endp; + const char *p; if (!arg) return -1; @@@ -834,9 -835,9 +834,9 @@@ return -1; /* not a new-style cacheinfo */ *mode = ul; endp++; - if (get_oid_hex(endp, oid) || endp[GIT_SHA1_HEXSZ] != ',') + if (parse_oid_hex(endp, oid, &p) || *p != ',') return -1; - *path = endp + GIT_SHA1_HEXSZ + 1; + *path = p + 1; return 0; } @@@ -969,9 -970,9 +969,9 @@@ int cmd_update_index(int argc, const ch PARSE_OPT_NOARG | /* disallow --cacheinfo= form */ PARSE_OPT_NONEG | PARSE_OPT_LITERAL_ARGHELP, (parse_opt_cb *) cacheinfo_callback}, - {OPTION_CALLBACK, 0, "chmod", &set_executable_bit, N_("(+/-)x"), + {OPTION_CALLBACK, 0, "chmod", &set_executable_bit, "(+|-)x", N_("override the executable bit of the listed files"), - PARSE_OPT_NONEG | PARSE_OPT_LITERAL_ARGHELP, + PARSE_OPT_NONEG, chmod_callback}, {OPTION_SET_INT, 0, "assume-unchanged", &mark_valid_only, NULL, N_("mark files as \"not changing\""), @@@ -1067,7 -1068,6 +1067,7 @@@ break; switch (parseopt_state) { case PARSE_OPT_HELP: + case PARSE_OPT_ERROR: exit(129); case PARSE_OPT_NON_OPTION: case PARSE_OPT_DONE: @@@ -1172,7 -1172,7 +1172,7 @@@ report(_("Untracked cache enabled for '%s'"), get_git_work_tree()); break; default: - die("BUG: bad untracked_cache value: %d", untracked_cache); + BUG("bad untracked_cache value: %d", untracked_cache); } if (fsmonitor > 0) { diff --combined builtin/write-tree.c index c9d3c544e7,e636419122..cdcbf8264e --- a/builtin/write-tree.c +++ b/builtin/write-tree.c @@@ -19,14 -19,13 +19,13 @@@ int cmd_write_tree(int argc, const cha { int flags = 0, ret; const char *prefix = NULL; - unsigned char sha1[20]; + struct object_id oid; const char *me = "git-write-tree"; struct option write_tree_options[] = { OPT_BIT(0, "missing-ok", &flags, N_("allow missing objects"), WRITE_TREE_MISSING_OK), - { OPTION_STRING, 0, "prefix", &prefix, N_("/"), - N_("write tree object for a subdirectory ") , - PARSE_OPT_LITERAL_ARGHELP }, + OPT_STRING(0, "prefix", &prefix, N_("/"), + N_("write tree object for a subdirectory ")), { OPTION_BIT, 0, "ignore-cache-tree", &flags, NULL, N_("only useful for debugging"), PARSE_OPT_HIDDEN | PARSE_OPT_NOARG, NULL, @@@ -38,10 -37,10 +37,10 @@@ argc = parse_options(argc, argv, unused_prefix, write_tree_options, write_tree_usage, 0); - ret = write_cache_as_tree(sha1, flags, prefix); + ret = write_cache_as_tree(&oid, flags, prefix); switch (ret) { case 0: - printf("%s\n", sha1_to_hex(sha1)); + printf("%s\n", oid_to_hex(&oid)); break; case WRITE_TREE_UNREADABLE_INDEX: die("%s: error reading the index", me); diff --combined parse-options.c index 7db84227ab,2a2b97aec9..3b874a83a0 --- a/parse-options.c +++ b/parse-options.c @@@ -317,16 -317,14 +317,16 @@@ is_abbreviated return get_value(p, options, all_opts, flags ^ opt_flags); } - if (ambiguous_option) - return error("Ambiguous option: %s " + if (ambiguous_option) { + error("Ambiguous option: %s " "(could be --%s%s or --%s%s)", arg, (ambiguous_flags & OPT_UNSET) ? "no-" : "", ambiguous_option->long_name, (abbrev_flags & OPT_UNSET) ? "no-" : "", abbrev_option->long_name); + return -3; + } if (abbrev_option) return get_value(p, abbrev_option, all_opts, abbrev_flags); return -2; @@@ -427,98 -425,6 +427,98 @@@ void parse_options_start(struct parse_o parse_options_check(options); } +static void show_negated_gitcomp(const struct option *opts, int nr_noopts) +{ + int printed_dashdash = 0; + + for (; opts->type != OPTION_END; opts++) { + int has_unset_form = 0; + const char *name; + + if (!opts->long_name) + continue; + if (opts->flags & (PARSE_OPT_HIDDEN | PARSE_OPT_NOCOMPLETE)) + continue; + if (opts->flags & PARSE_OPT_NONEG) + continue; + + switch (opts->type) { + case OPTION_STRING: + case OPTION_FILENAME: + case OPTION_INTEGER: + case OPTION_MAGNITUDE: + case OPTION_CALLBACK: + case OPTION_BIT: + case OPTION_NEGBIT: + case OPTION_COUNTUP: + case OPTION_SET_INT: + has_unset_form = 1; + break; + default: + break; + } + if (!has_unset_form) + continue; + + if (skip_prefix(opts->long_name, "no-", &name)) { + if (nr_noopts < 0) + printf(" --%s", name); + } else if (nr_noopts >= 0) { + if (nr_noopts && !printed_dashdash) { + printf(" --"); + printed_dashdash = 1; + } + printf(" --no-%s", opts->long_name); + nr_noopts++; + } + } +} + +static int show_gitcomp(struct parse_opt_ctx_t *ctx, + const struct option *opts) +{ + const struct option *original_opts = opts; + int nr_noopts = 0; + + for (; opts->type != OPTION_END; opts++) { + const char *suffix = ""; + + if (!opts->long_name) + continue; + if (opts->flags & (PARSE_OPT_HIDDEN | PARSE_OPT_NOCOMPLETE)) + continue; + + switch (opts->type) { + case OPTION_GROUP: + continue; + case OPTION_STRING: + case OPTION_FILENAME: + case OPTION_INTEGER: + case OPTION_MAGNITUDE: + case OPTION_CALLBACK: + if (opts->flags & PARSE_OPT_NOARG) + break; + if (opts->flags & PARSE_OPT_OPTARG) + break; + if (opts->flags & PARSE_OPT_LASTARG_DEFAULT) + break; + suffix = "="; + break; + default: + break; + } + if (opts->flags & PARSE_OPT_COMP_ARG) + suffix = "="; + if (starts_with(opts->long_name, "no-")) + nr_noopts++; + printf(" --%s%s", opts->long_name, suffix); + } + show_negated_gitcomp(original_opts, -1); + show_negated_gitcomp(original_opts, nr_noopts); + fputc('\n', stdout); + exit(0); +} + static int usage_with_options_internal(struct parse_opt_ctx_t *, const char * const *, const struct option *, int, int); @@@ -528,6 -434,7 +528,6 @@@ int parse_options_step(struct parse_opt const char * const usagestr[]) { int internal_help = !(ctx->flags & PARSE_OPT_NO_INTERNAL_HELP); - int err = 0; /* we must reset ->opt, unknown short option leave it dangling */ ctx->opt = NULL; @@@ -548,15 -455,11 +548,15 @@@ if (internal_help && ctx->total == 1 && !strcmp(arg + 1, "h")) goto show_usage; + /* lone --git-completion-helper is asked by git-completion.bash */ + if (ctx->total == 1 && !strcmp(arg + 1, "-git-completion-helper")) + return show_gitcomp(ctx, options); + if (arg[1] != '-') { ctx->opt = arg + 1; switch (parse_short_opt(ctx, options)) { case -1: - goto show_usage_error; + return PARSE_OPT_ERROR; case -2: if (ctx->opt) check_typos(arg + 1, options); @@@ -569,7 -472,7 +569,7 @@@ while (ctx->opt) { switch (parse_short_opt(ctx, options)) { case -1: - goto show_usage_error; + return PARSE_OPT_ERROR; case -2: if (internal_help && *ctx->opt == 'h') goto show_usage; @@@ -601,11 -504,9 +601,11 @@@ goto show_usage; switch (parse_long_opt(ctx, arg + 2, options)) { case -1: - goto show_usage_error; + return PARSE_OPT_ERROR; case -2: goto unknown; + case -3: + goto show_usage; } continue; unknown: @@@ -616,13 -517,15 +616,13 @@@ } return PARSE_OPT_DONE; - show_usage_error: - err = 1; show_usage: - return usage_with_options_internal(ctx, usagestr, options, 0, err); + return usage_with_options_internal(ctx, usagestr, options, 0, 0); } int parse_options_end(struct parse_opt_ctx_t *ctx) { - memmove(ctx->out + ctx->cpidx, ctx->argv, ctx->argc * sizeof(*ctx->out)); + MOVE_ARRAY(ctx->out + ctx->cpidx, ctx->argv, ctx->argc); ctx->out[ctx->cpidx + ctx->argc] = NULL; return ctx->cpidx + ctx->argc; } @@@ -636,7 -539,6 +636,7 @@@ int parse_options(int argc, const char parse_options_start(&ctx, argc, argv, prefix, options, flags); switch (parse_options_step(&ctx, options, usagestr)) { case PARSE_OPT_HELP: + case PARSE_OPT_ERROR: exit(129); case PARSE_OPT_NON_OPTION: case PARSE_OPT_DONE: @@@ -660,7 -562,8 +660,8 @@@ static int usage_argh(const struct option *opts, FILE *outfile) { const char *s; - int literal = (opts->flags & PARSE_OPT_LITERAL_ARGHELP) || !opts->argh; + int literal = (opts->flags & PARSE_OPT_LITERAL_ARGHELP) || + !opts->argh || !!strpbrk(opts->argh, "()<>[]|"); if (opts->flags & PARSE_OPT_OPTARG) if (opts->long_name) s = literal ? "[=%s]" : "[=<%s>]";