Merge branch 'nd/pack-unreachable-objects-doc'
authorJunio C Hamano <gitster@pobox.com>
Wed, 23 May 2018 05:38:24 +0000 (14:38 +0900)
committerJunio C Hamano <gitster@pobox.com>
Wed, 23 May 2018 05:38:24 +0000 (14:38 +0900)
Doc update.

* nd/pack-unreachable-objects-doc:
pack-objects: validation and documentation about unreachable options

1  2 
Documentation/git-pack-objects.txt
builtin/pack-objects.c
index 6bfac6b7ff2c4213da4be27ac63d31d76913800d,44245e5815e7b6bf8b2b090a1ec2832dd28aa215..d95b472d16828b2bea304727e7c5daaa2b75ae89
@@@ -12,7 -12,7 +12,7 @@@ SYNOPSI
  'git pack-objects' [-q | --progress | --all-progress] [--all-progress-implied]
        [--no-reuse-delta] [--delta-base-offset] [--non-empty]
        [--local] [--incremental] [--window=<n>] [--depth=<n>]
 -      [--revs [--unpacked | --all]]
 +      [--revs [--unpacked | --all]] [--keep-pack=<pack-name>]
        [--stdout [--filter=<filter-spec>] | base-name]
        [--shallow] [--keep-true-parents] < object-list
  
@@@ -96,9 -96,7 +96,9 @@@ base-name:
        it too deep affects the performance on the unpacker
        side, because delta data needs to be applied that many
        times to get to the necessary object.
 -      The default value for --window is 10 and --depth is 50.
 ++
 +The default value for --window is 10 and --depth is 50. The maximum
 +depth is 4095.
  
  --window-memory=<n>::
        This option provides an additional limit on top of `--window`;
        has a .keep file to be ignored, even if it would have
        otherwise been packed.
  
 +--keep-pack=<pack-name>::
 +      This flag causes an object already in the given pack to be
 +      ignored, even if it would have otherwise been
 +      packed. `<pack-name>` is the the pack file name without
 +      leading directory (e.g. `pack-123.pack`). The option could be
 +      specified multiple times to keep multiple packs.
 +
  --incremental::
        This flag causes an object already in a pack to be ignored
        even if it would have otherwise been packed.
@@@ -276,6 -267,19 +276,19 @@@ Unexpected missing object will raise a
        locally created objects [without .promisor] and objects from the
        promisor remote [with .promisor].)  This is used with partial clone.
  
+ --keep-unreachable::
+       Objects unreachable from the refs in packs named with
+       --unpacked= option are added to the resulting pack, in
+       addition to the reachable objects that are not in packs marked
+       with *.keep files. This implies `--revs`.
+ --pack-loose-unreachable::
+       Pack unreachable loose objects (and their loose counterparts
+       removed). This implies `--revs`.
+ --unpack-unreachable::
+       Keep unreachable objects in loose form. This implies `--revs`.
  SEE ALSO
  --------
  linkgit:git-rev-list[1]
diff --combined builtin/pack-objects.c
index 8552d7e42e12d2729ae76a6cbf316e1669f1b8f4,b0f3e462f377b590341ef05715e36b8e7ff788b7..3df0bf0f6f7a7eec077485f7027e0c551a6d5343
@@@ -1,6 -1,5 +1,6 @@@
  #include "builtin.h"
  #include "cache.h"
 +#include "repository.h"
  #include "config.h"
  #include "attr.h"
  #include "object.h"
  #include "argv-array.h"
  #include "list.h"
  #include "packfile.h"
 +#include "object-store.h"
 +#include "dir.h"
 +
 +#define IN_PACK(obj) oe_in_pack(&to_pack, obj)
 +#define SIZE(obj) oe_size(&to_pack, obj)
 +#define SET_SIZE(obj,size) oe_set_size(&to_pack, obj, size)
 +#define DELTA_SIZE(obj) oe_delta_size(&to_pack, obj)
 +#define DELTA(obj) oe_delta(&to_pack, obj)
 +#define DELTA_CHILD(obj) oe_delta_child(&to_pack, obj)
 +#define DELTA_SIBLING(obj) oe_delta_sibling(&to_pack, obj)
 +#define SET_DELTA(obj, val) oe_set_delta(&to_pack, obj, val)
 +#define SET_DELTA_SIZE(obj, val) oe_set_delta_size(&to_pack, obj, val)
 +#define SET_DELTA_CHILD(obj, val) oe_set_delta_child(&to_pack, obj, val)
 +#define SET_DELTA_SIBLING(obj, val) oe_set_delta_sibling(&to_pack, obj, val)
  
  static const char *pack_usage[] = {
        N_("git pack-objects --stdout [<options>...] [< <ref-list> | < <object-list>]"),
@@@ -58,7 -43,7 +58,7 @@@
  static struct packing_data to_pack;
  
  static struct pack_idx_entry **written_list;
 -static uint32_t nr_result, nr_written;
 +static uint32_t nr_result, nr_written, nr_seen;
  
  static int non_empty;
  static int reuse_delta = 1, reuse_object = 1;
@@@ -68,8 -53,7 +68,8 @@@ static int pack_loose_unreachable
  static int local;
  static int have_non_local_packs;
  static int incremental;
 -static int ignore_packed_keep;
 +static int ignore_packed_keep_on_disk;
 +static int ignore_packed_keep_in_core;
  static int allow_ofs_delta;
  static struct pack_idx_option pack_idx_opts;
  static const char *base_name;
@@@ -94,7 -78,7 +94,7 @@@ static uint16_t write_bitmap_options
  static int exclude_promisor_objects;
  
  static unsigned long delta_cache_size = 0;
 -static unsigned long max_delta_cache_size = 256 * 1024 * 1024;
 +static unsigned long max_delta_cache_size = DEFAULT_DELTA_CACHE_SIZE;
  static unsigned long cache_max_small_delta_size = 1000;
  
  static unsigned long window_memory_limit = 0;
@@@ -138,17 -122,17 +138,17 @@@ static void *get_delta(struct object_en
        void *buf, *base_buf, *delta_buf;
        enum object_type type;
  
 -      buf = read_sha1_file(entry->idx.oid.hash, &type, &size);
 +      buf = read_object_file(&entry->idx.oid, &type, &size);
        if (!buf)
                die("unable to read %s", oid_to_hex(&entry->idx.oid));
 -      base_buf = read_sha1_file(entry->delta->idx.oid.hash, &type,
 -                                &base_size);
 +      base_buf = read_object_file(&DELTA(entry)->idx.oid, &type,
 +                                  &base_size);
        if (!base_buf)
                die("unable to read %s",
 -                  oid_to_hex(&entry->delta->idx.oid));
 +                  oid_to_hex(&DELTA(entry)->idx.oid));
        delta_buf = diff_delta(base_buf, base_size,
                               buf, size, &delta_size, 0);
 -      if (!delta_buf || delta_size != entry->delta_size)
 +      if (!delta_buf || delta_size != DELTA_SIZE(entry))
                die("delta size changed");
        free(buf);
        free(base_buf);
@@@ -281,12 -265,13 +281,12 @@@ static unsigned long write_no_reuse_obj
        struct git_istream *st = NULL;
  
        if (!usable_delta) {
 -              if (entry->type == OBJ_BLOB &&
 -                  entry->size > big_file_threshold &&
 -                  (st = open_istream(entry->idx.oid.hash, &type, &size, NULL)) != NULL)
 +              if (oe_type(entry) == OBJ_BLOB &&
 +                  oe_size_greater_than(&to_pack, entry, big_file_threshold) &&
 +                  (st = open_istream(&entry->idx.oid, &type, &size, NULL)) != NULL)
                        buf = NULL;
                else {
 -                      buf = read_sha1_file(entry->idx.oid.hash, &type,
 -                                           &size);
 +                      buf = read_object_file(&entry->idx.oid, &type, &size);
                        if (!buf)
                                die(_("unable to read %s"),
                                    oid_to_hex(&entry->idx.oid));
                FREE_AND_NULL(entry->delta_data);
                entry->z_delta_size = 0;
        } else if (entry->delta_data) {
 -              size = entry->delta_size;
 +              size = DELTA_SIZE(entry);
                buf = entry->delta_data;
                entry->delta_data = NULL;
 -              type = (allow_ofs_delta && entry->delta->idx.offset) ?
 +              type = (allow_ofs_delta && DELTA(entry)->idx.offset) ?
                        OBJ_OFS_DELTA : OBJ_REF_DELTA;
        } else {
                buf = get_delta(entry);
 -              size = entry->delta_size;
 -              type = (allow_ofs_delta && entry->delta->idx.offset) ?
 +              size = DELTA_SIZE(entry);
 +              type = (allow_ofs_delta && DELTA(entry)->idx.offset) ?
                        OBJ_OFS_DELTA : OBJ_REF_DELTA;
        }
  
                 * encoding of the relative offset for the delta
                 * base from this object's position in the pack.
                 */
 -              off_t ofs = entry->idx.offset - entry->delta->idx.offset;
 +              off_t ofs = entry->idx.offset - DELTA(entry)->idx.offset;
                unsigned pos = sizeof(dheader) - 1;
                dheader[pos] = ofs & 127;
                while (ofs >>= 7)
                        return 0;
                }
                hashwrite(f, header, hdrlen);
 -              hashwrite(f, entry->delta->idx.oid.hash, 20);
 +              hashwrite(f, DELTA(entry)->idx.oid.hash, 20);
                hdrlen += 20;
        } else {
                if (limit && hdrlen + datalen + 20 >= limit) {
  static off_t write_reuse_object(struct hashfile *f, struct object_entry *entry,
                                unsigned long limit, int usable_delta)
  {
 -      struct packed_git *p = entry->in_pack;
 +      struct packed_git *p = IN_PACK(entry);
        struct pack_window *w_curs = NULL;
        struct revindex_entry *revidx;
        off_t offset;
 -      enum object_type type = entry->type;
 +      enum object_type type = oe_type(entry);
        off_t datalen;
        unsigned char header[MAX_PACK_OBJECT_HEADER],
                      dheader[MAX_PACK_OBJECT_HEADER];
        unsigned hdrlen;
 +      unsigned long entry_size = SIZE(entry);
  
 -      if (entry->delta)
 -              type = (allow_ofs_delta && entry->delta->idx.offset) ?
 +      if (DELTA(entry))
 +              type = (allow_ofs_delta && DELTA(entry)->idx.offset) ?
                        OBJ_OFS_DELTA : OBJ_REF_DELTA;
        hdrlen = encode_in_pack_object_header(header, sizeof(header),
 -                                            type, entry->size);
 +                                            type, entry_size);
  
        offset = entry->in_pack_offset;
        revidx = find_pack_revindex(p, offset);
        datalen -= entry->in_pack_header_size;
  
        if (!pack_to_stdout && p->index_version == 1 &&
 -          check_pack_inflate(p, &w_curs, offset, datalen, entry->size)) {
 +          check_pack_inflate(p, &w_curs, offset, datalen, entry_size)) {
                error("corrupt packed object for %s",
                      oid_to_hex(&entry->idx.oid));
                unuse_pack(&w_curs);
        }
  
        if (type == OBJ_OFS_DELTA) {
 -              off_t ofs = entry->idx.offset - entry->delta->idx.offset;
 +              off_t ofs = entry->idx.offset - DELTA(entry)->idx.offset;
                unsigned pos = sizeof(dheader) - 1;
                dheader[pos] = ofs & 127;
                while (ofs >>= 7)
                        return 0;
                }
                hashwrite(f, header, hdrlen);
 -              hashwrite(f, entry->delta->idx.oid.hash, 20);
 +              hashwrite(f, DELTA(entry)->idx.oid.hash, 20);
                hdrlen += 20;
                reused_delta++;
        } else {
@@@ -481,29 -465,28 +481,29 @@@ static off_t write_object(struct hashfi
        else
                limit = pack_size_limit - write_offset;
  
 -      if (!entry->delta)
 +      if (!DELTA(entry))
                usable_delta = 0;       /* no delta */
        else if (!pack_size_limit)
               usable_delta = 1;        /* unlimited packfile */
 -      else if (entry->delta->idx.offset == (off_t)-1)
 +      else if (DELTA(entry)->idx.offset == (off_t)-1)
                usable_delta = 0;       /* base was written to another pack */
 -      else if (entry->delta->idx.offset)
 +      else if (DELTA(entry)->idx.offset)
                usable_delta = 1;       /* base already exists in this pack */
        else
                usable_delta = 0;       /* base could end up in another pack */
  
        if (!reuse_object)
                to_reuse = 0;   /* explicit */
 -      else if (!entry->in_pack)
 +      else if (!IN_PACK(entry))
                to_reuse = 0;   /* can't reuse what we don't have */
 -      else if (entry->type == OBJ_REF_DELTA || entry->type == OBJ_OFS_DELTA)
 +      else if (oe_type(entry) == OBJ_REF_DELTA ||
 +               oe_type(entry) == OBJ_OFS_DELTA)
                                /* check_object() decided it for us ... */
                to_reuse = usable_delta;
                                /* ... but pack split may override that */
 -      else if (entry->type != entry->in_pack_type)
 +      else if (oe_type(entry) != entry->in_pack_type)
                to_reuse = 0;   /* pack has delta which is unusable */
 -      else if (entry->delta)
 +      else if (DELTA(entry))
                to_reuse = 0;   /* we want to pack afresh */
        else
                to_reuse = 1;   /* we have it in-pack undeltified,
@@@ -555,12 -538,12 +555,12 @@@ static enum write_one_status write_one(
        }
  
        /* if we are deltified, write out base object first. */
 -      if (e->delta) {
 +      if (DELTA(e)) {
                e->idx.offset = 1; /* now recurse */
 -              switch (write_one(f, e->delta, offset)) {
 +              switch (write_one(f, DELTA(e), offset)) {
                case WRITE_ONE_RECURSIVE:
                        /* we cannot depend on this one */
 -                      e->delta = NULL;
 +                      SET_DELTA(e, NULL);
                        break;
                default:
                        break;
@@@ -622,34 -605,34 +622,34 @@@ static void add_descendants_to_write_or
                        /* add this node... */
                        add_to_write_order(wo, endp, e);
                        /* all its siblings... */
 -                      for (s = e->delta_sibling; s; s = s->delta_sibling) {
 +                      for (s = DELTA_SIBLING(e); s; s = DELTA_SIBLING(s)) {
                                add_to_write_order(wo, endp, s);
                        }
                }
                /* drop down a level to add left subtree nodes if possible */
 -              if (e->delta_child) {
 +              if (DELTA_CHILD(e)) {
                        add_to_order = 1;
 -                      e = e->delta_child;
 +                      e = DELTA_CHILD(e);
                } else {
                        add_to_order = 0;
                        /* our sibling might have some children, it is next */
 -                      if (e->delta_sibling) {
 -                              e = e->delta_sibling;
 +                      if (DELTA_SIBLING(e)) {
 +                              e = DELTA_SIBLING(e);
                                continue;
                        }
                        /* go back to our parent node */
 -                      e = e->delta;
 -                      while (e && !e->delta_sibling) {
 +                      e = DELTA(e);
 +                      while (e && !DELTA_SIBLING(e)) {
                                /* we're on the right side of a subtree, keep
                                 * going up until we can go right again */
 -                              e = e->delta;
 +                              e = DELTA(e);
                        }
                        if (!e) {
                                /* done- we hit our original root node */
                                return;
                        }
                        /* pass it off to sibling at this level */
 -                      e = e->delta_sibling;
 +                      e = DELTA_SIBLING(e);
                }
        };
  }
@@@ -660,7 -643,7 +660,7 @@@ static void add_family_to_write_order(s
  {
        struct object_entry *root;
  
 -      for (root = e; root->delta; root = root->delta)
 +      for (root = e; DELTA(root); root = DELTA(root))
                ; /* nothing */
        add_descendants_to_write_order(wo, endp, root);
  }
@@@ -675,8 -658,8 +675,8 @@@ static struct object_entry **compute_wr
        for (i = 0; i < to_pack.nr_objects; i++) {
                objects[i].tagged = 0;
                objects[i].filled = 0;
 -              objects[i].delta_child = NULL;
 -              objects[i].delta_sibling = NULL;
 +              SET_DELTA_CHILD(&objects[i], NULL);
 +              SET_DELTA_SIBLING(&objects[i], NULL);
        }
  
        /*
         */
        for (i = to_pack.nr_objects; i > 0;) {
                struct object_entry *e = &objects[--i];
 -              if (!e->delta)
 +              if (!DELTA(e))
                        continue;
                /* Mark me as the first child */
 -              e->delta_sibling = e->delta->delta_child;
 -              e->delta->delta_child = e;
 +              e->delta_sibling_idx = DELTA(e)->delta_child_idx;
 +              SET_DELTA_CHILD(DELTA(e), e);
        }
  
        /*
         * And then all remaining commits and tags.
         */
        for (i = last_untagged; i < to_pack.nr_objects; i++) {
 -              if (objects[i].type != OBJ_COMMIT &&
 -                  objects[i].type != OBJ_TAG)
 +              if (oe_type(&objects[i]) != OBJ_COMMIT &&
 +                  oe_type(&objects[i]) != OBJ_TAG)
                        continue;
                add_to_write_order(wo, &wo_end, &objects[i]);
        }
         * And then all the trees.
         */
        for (i = last_untagged; i < to_pack.nr_objects; i++) {
 -              if (objects[i].type != OBJ_TREE)
 +              if (oe_type(&objects[i]) != OBJ_TREE)
                        continue;
                add_to_write_order(wo, &wo_end, &objects[i]);
        }
@@@ -854,11 -837,11 +854,11 @@@ static void write_pack_file(void
                 * If so, rewrite it like in fast-import
                 */
                if (pack_to_stdout) {
 -                      hashclose(f, oid.hash, CSUM_CLOSE);
 +                      finalize_hashfile(f, oid.hash, CSUM_HASH_IN_STREAM | CSUM_CLOSE);
                } else if (nr_written == nr_remaining) {
 -                      hashclose(f, oid.hash, CSUM_FSYNC);
 +                      finalize_hashfile(f, oid.hash, CSUM_HASH_IN_STREAM | CSUM_FSYNC | CSUM_CLOSE);
                } else {
 -                      int fd = hashclose(f, oid.hash, 0);
 +                      int fd = finalize_hashfile(f, oid.hash, 0);
                        fixup_pack_header_footer(fd, oid.hash, pack_tmp_name,
                                                 nr_written, oid.hash, offset);
                        close(fd);
  
                        if (write_bitmap_index) {
                                bitmap_writer_set_checksum(oid.hash);
 -                              bitmap_writer_build_type_index(written_list, nr_written);
 +                              bitmap_writer_build_type_index(
 +                                      &to_pack, written_list, nr_written);
                        }
  
                        finish_tmp_packfile(&tmpname, pack_tmp_name,
@@@ -1000,16 -982,13 +1000,16 @@@ static int want_found_object(int exclud
         * Otherwise, we signal "-1" at the end to tell the caller that we do
         * not know either way, and it needs to check more packs.
         */
 -      if (!ignore_packed_keep &&
 +      if (!ignore_packed_keep_on_disk &&
 +          !ignore_packed_keep_in_core &&
            (!local || !have_non_local_packs))
                return 1;
  
        if (local && !p->pack_local)
                return 0;
 -      if (ignore_packed_keep && p->pack_local && p->pack_keep)
 +      if (p->pack_local &&
 +          ((ignore_packed_keep_on_disk && p->pack_keep) ||
 +           (ignore_packed_keep_in_core && p->pack_keep_in_core)))
                return 0;
  
        /* we don't know yet; keep looking for more packs */
@@@ -1046,7 -1025,8 +1046,7 @@@ static int want_object_in_pack(const st
                if (want != -1)
                        return want;
        }
 -
 -      list_for_each(pos, &packed_git_mru) {
 +      list_for_each(pos, get_packed_git_mru(the_repository)) {
                struct packed_git *p = list_entry(pos, struct packed_git, mru);
                off_t offset;
  
                        }
                        want = want_found_object(exclude, p);
                        if (!exclude && want > 0)
 -                              list_move(&p->mru, &packed_git_mru);
 +                              list_move(&p->mru,
 +                                        get_packed_git_mru(the_repository));
                        if (want != -1)
                                return want;
                }
@@@ -1087,13 -1066,14 +1087,13 @@@ static void create_object_entry(const s
  
        entry = packlist_alloc(&to_pack, oid->hash, index_pos);
        entry->hash = hash;
 -      if (type)
 -              entry->type = type;
 +      oe_set_type(entry, type);
        if (exclude)
                entry->preferred_base = 1;
        else
                nr_result++;
        if (found_pack) {
 -              entry->in_pack = found_pack;
 +              oe_set_in_pack(&to_pack, entry, found_pack);
                entry->in_pack_offset = found_offset;
        }
  
@@@ -1111,8 -1091,6 +1111,8 @@@ static int add_object_entry(const struc
        off_t found_offset = 0;
        uint32_t index_pos;
  
 +      display_progress(progress_state, ++nr_seen);
 +
        if (have_duplicate_entry(oid, exclude, &index_pos))
                return 0;
  
        create_object_entry(oid, type, pack_name_hash(name),
                            exclude, name && no_try_delta(name),
                            index_pos, found_pack, found_offset);
 -
 -      display_progress(progress_state, nr_result);
        return 1;
  }
  
@@@ -1138,8 -1118,6 +1138,8 @@@ static int add_object_entry_from_bitmap
  {
        uint32_t index_pos;
  
 +      display_progress(progress_state, ++nr_seen);
 +
        if (have_duplicate_entry(oid, 0, &index_pos))
                return 0;
  
                return 0;
  
        create_object_entry(oid, type, name_hash, 0, 0, index_pos, pack, offset);
 -
 -      display_progress(progress_state, nr_result);
        return 1;
  }
  
@@@ -1210,7 -1190,7 +1210,7 @@@ static struct pbase_tree_cache *pbase_t
        /* Did not find one.  Either we got a bogus request or
         * we need to read and perhaps cache.
         */
 -      data = read_sha1_file(oid->hash, &type, &size);
 +      data = read_object_file(oid, &type, &size);
        if (!data)
                return NULL;
        if (type != OBJ_TREE) {
@@@ -1371,7 -1351,7 +1371,7 @@@ static void add_preferred_base(struct o
        if (window <= num_preferred_base++)
                return;
  
 -      data = read_object_with_reference(oid->hash, tree_type, &size, tree_oid.hash);
 +      data = read_object_with_reference(oid, tree_type, &size, &tree_oid);
        if (!data)
                return;
  
@@@ -1418,10 -1398,8 +1418,10 @@@ static void cleanup_preferred_base(void
  
  static void check_object(struct object_entry *entry)
  {
 -      if (entry->in_pack) {
 -              struct packed_git *p = entry->in_pack;
 +      unsigned long canonical_size;
 +
 +      if (IN_PACK(entry)) {
 +              struct packed_git *p = IN_PACK(entry);
                struct pack_window *w_curs = NULL;
                const unsigned char *base_ref = NULL;
                struct object_entry *base_entry;
                unsigned long avail;
                off_t ofs;
                unsigned char *buf, c;
 +              enum object_type type;
 +              unsigned long in_pack_size;
  
                buf = use_pack(p, &w_curs, entry->in_pack_offset, &avail);
  
                 * since non-delta representations could still be reused.
                 */
                used = unpack_object_header_buffer(buf, avail,
 -                                                 &entry->in_pack_type,
 -                                                 &entry->size);
 +                                                 &type,
 +                                                 &in_pack_size);
                if (used == 0)
                        goto give_up;
  
 +              if (type < 0)
 +                      BUG("invalid type %d", type);
 +              entry->in_pack_type = type;
 +
                /*
                 * Determine if this is a delta and if so whether we can
                 * reuse it or not.  Otherwise let's find out as cheaply as
                switch (entry->in_pack_type) {
                default:
                        /* Not a delta hence we've already got all we need. */
 -                      entry->type = entry->in_pack_type;
 +                      oe_set_type(entry, entry->in_pack_type);
 +                      SET_SIZE(entry, in_pack_size);
                        entry->in_pack_header_size = used;
 -                      if (entry->type < OBJ_COMMIT || entry->type > OBJ_BLOB)
 +                      if (oe_type(entry) < OBJ_COMMIT || oe_type(entry) > OBJ_BLOB)
                                goto give_up;
                        unuse_pack(&w_curs);
                        return;
                         * deltify other objects against, in order to avoid
                         * circular deltas.
                         */
 -                      entry->type = entry->in_pack_type;
 -                      entry->delta = base_entry;
 -                      entry->delta_size = entry->size;
 -                      entry->delta_sibling = base_entry->delta_child;
 -                      base_entry->delta_child = entry;
 +                      oe_set_type(entry, entry->in_pack_type);
 +                      SET_SIZE(entry, in_pack_size); /* delta size */
 +                      SET_DELTA(entry, base_entry);
 +                      SET_DELTA_SIZE(entry, in_pack_size);
 +                      entry->delta_sibling_idx = base_entry->delta_child_idx;
 +                      SET_DELTA_CHILD(base_entry, entry);
                        unuse_pack(&w_curs);
                        return;
                }
  
 -              if (entry->type) {
 +              if (oe_type(entry)) {
 +                      off_t delta_pos;
 +
                        /*
                         * This must be a delta and we already know what the
                         * final object type is.  Let's extract the actual
                         * object size from the delta header.
                         */
 -                      entry->size = get_size_from_delta(p, &w_curs,
 -                                      entry->in_pack_offset + entry->in_pack_header_size);
 -                      if (entry->size == 0)
 +                      delta_pos = entry->in_pack_offset + entry->in_pack_header_size;
 +                      canonical_size = get_size_from_delta(p, &w_curs, delta_pos);
 +                      if (canonical_size == 0)
                                goto give_up;
 +                      SET_SIZE(entry, canonical_size);
                        unuse_pack(&w_curs);
                        return;
                }
                unuse_pack(&w_curs);
        }
  
 -      entry->type = sha1_object_info(entry->idx.oid.hash, &entry->size);
 -      /*
 -       * The error condition is checked in prepare_pack().  This is
 -       * to permit a missing preferred base object to be ignored
 -       * as a preferred base.  Doing so can result in a larger
 -       * pack file, but the transfer will still take place.
 -       */
 +      oe_set_type(entry,
 +                  oid_object_info(the_repository, &entry->idx.oid, &canonical_size));
 +      if (entry->type_valid) {
 +              SET_SIZE(entry, canonical_size);
 +      } else {
 +              /*
 +               * Bad object type is checked in prepare_pack().  This is
 +               * to permit a missing preferred base object to be ignored
 +               * as a preferred base.  Doing so can result in a larger
 +               * pack file, but the transfer will still take place.
 +               */
 +      }
  }
  
  static int pack_offset_sort(const void *_a, const void *_b)
  {
        const struct object_entry *a = *(struct object_entry **)_a;
        const struct object_entry *b = *(struct object_entry **)_b;
 +      const struct packed_git *a_in_pack = IN_PACK(a);
 +      const struct packed_git *b_in_pack = IN_PACK(b);
  
        /* avoid filesystem trashing with loose objects */
 -      if (!a->in_pack && !b->in_pack)
 +      if (!a_in_pack && !b_in_pack)
                return oidcmp(&a->idx.oid, &b->idx.oid);
  
 -      if (a->in_pack < b->in_pack)
 +      if (a_in_pack < b_in_pack)
                return -1;
 -      if (a->in_pack > b->in_pack)
 +      if (a_in_pack > b_in_pack)
                return 1;
        return a->in_pack_offset < b->in_pack_offset ? -1 :
                        (a->in_pack_offset > b->in_pack_offset);
   */
  static void drop_reused_delta(struct object_entry *entry)
  {
 -      struct object_entry **p = &entry->delta->delta_child;
 +      unsigned *idx = &to_pack.objects[entry->delta_idx - 1].delta_child_idx;
        struct object_info oi = OBJECT_INFO_INIT;
 +      enum object_type type;
 +      unsigned long size;
  
 -      while (*p) {
 -              if (*p == entry)
 -                      *p = (*p)->delta_sibling;
 +      while (*idx) {
 +              struct object_entry *oe = &to_pack.objects[*idx - 1];
 +
 +              if (oe == entry)
 +                      *idx = oe->delta_sibling_idx;
                else
 -                      p = &(*p)->delta_sibling;
 +                      idx = &oe->delta_sibling_idx;
        }
 -      entry->delta = NULL;
 +      SET_DELTA(entry, NULL);
        entry->depth = 0;
  
 -      oi.sizep = &entry->size;
 -      oi.typep = &entry->type;
 -      if (packed_object_info(entry->in_pack, entry->in_pack_offset, &oi) < 0) {
 +      oi.sizep = &size;
 +      oi.typep = &type;
 +      if (packed_object_info(the_repository, IN_PACK(entry), entry->in_pack_offset, &oi) < 0) {
                /*
                 * We failed to get the info from this pack for some reason;
                 * fall back to sha1_object_info, which may find another copy.
 -               * And if that fails, the error will be recorded in entry->type
 +               * And if that fails, the error will be recorded in oe_type(entry)
                 * and dealt with in prepare_pack().
                 */
 -              entry->type = sha1_object_info(entry->idx.oid.hash,
 -                                             &entry->size);
 +              oe_set_type(entry,
 +                          oid_object_info(the_repository, &entry->idx.oid, &size));
 +      } else {
 +              oe_set_type(entry, type);
        }
 +      SET_SIZE(entry, size);
  }
  
  /*
@@@ -1651,7 -1604,7 +1651,7 @@@ static void break_delta_chains(struct o
  
        for (cur = entry, total_depth = 0;
             cur;
 -           cur = cur->delta, total_depth++) {
 +           cur = DELTA(cur), total_depth++) {
                if (cur->dfs_state == DFS_DONE) {
                        /*
                         * We've already seen this object and know it isn't
                 * it's not a delta, we're done traversing, but we'll mark it
                 * done to save time on future traversals.
                 */
 -              if (!cur->delta) {
 +              if (!DELTA(cur)) {
                        cur->dfs_state = DFS_DONE;
                        break;
                }
                 * We keep all commits in the chain that we examined.
                 */
                cur->dfs_state = DFS_ACTIVE;
 -              if (cur->delta->dfs_state == DFS_ACTIVE) {
 +              if (DELTA(cur)->dfs_state == DFS_ACTIVE) {
                        drop_reused_delta(cur);
                        cur->dfs_state = DFS_DONE;
                        break;
         * an extra "next" pointer to keep going after we reset cur->delta.
         */
        for (cur = entry; cur; cur = next) {
 -              next = cur->delta;
 +              next = DELTA(cur);
  
                /*
                 * We should have a chain of zero or more ACTIVE states down to
@@@ -1759,10 -1712,6 +1759,10 @@@ static void get_object_details(void
        uint32_t i;
        struct object_entry **sorted_by_offset;
  
 +      if (progress)
 +              progress_state = start_progress(_("Counting objects"),
 +                                              to_pack.nr_objects);
 +
        sorted_by_offset = xcalloc(to_pack.nr_objects, sizeof(struct object_entry *));
        for (i = 0; i < to_pack.nr_objects; i++)
                sorted_by_offset[i] = to_pack.objects + i;
        for (i = 0; i < to_pack.nr_objects; i++) {
                struct object_entry *entry = sorted_by_offset[i];
                check_object(entry);
 -              if (big_file_threshold < entry->size)
 +              if (entry->type_valid &&
 +                  oe_size_greater_than(&to_pack, entry, big_file_threshold))
                        entry->no_try_delta = 1;
 +              display_progress(progress_state, i + 1);
        }
 +      stop_progress(&progress_state);
  
        /*
         * This must happen in a second pass, since we rely on the delta
@@@ -1801,14 -1747,10 +1801,14 @@@ static int type_size_sort(const void *_
  {
        const struct object_entry *a = *(struct object_entry **)_a;
        const struct object_entry *b = *(struct object_entry **)_b;
 +      enum object_type a_type = oe_type(a);
 +      enum object_type b_type = oe_type(b);
 +      unsigned long a_size = SIZE(a);
 +      unsigned long b_size = SIZE(b);
  
 -      if (a->type > b->type)
 +      if (a_type > b_type)
                return -1;
 -      if (a->type < b->type)
 +      if (a_type < b_type)
                return 1;
        if (a->hash > b->hash)
                return -1;
                return -1;
        if (a->preferred_base < b->preferred_base)
                return 1;
 -      if (a->size > b->size)
 +      if (a_size > b_size)
                return -1;
 -      if (a->size < b->size)
 +      if (a_size < b_size)
                return 1;
        return a < b ? -1 : (a > b);  /* newest first */
  }
@@@ -1873,46 -1815,6 +1873,46 @@@ static pthread_mutex_t progress_mutex
  
  #endif
  
 +/*
 + * Return the size of the object without doing any delta
 + * reconstruction (so non-deltas are true object sizes, but deltas
 + * return the size of the delta data).
 + */
 +unsigned long oe_get_size_slow(struct packing_data *pack,
 +                             const struct object_entry *e)
 +{
 +      struct packed_git *p;
 +      struct pack_window *w_curs;
 +      unsigned char *buf;
 +      enum object_type type;
 +      unsigned long used, avail, size;
 +
 +      if (e->type_ != OBJ_OFS_DELTA && e->type_ != OBJ_REF_DELTA) {
 +              read_lock();
 +              if (oid_object_info(the_repository, &e->idx.oid, &size) < 0)
 +                      die(_("unable to get size of %s"),
 +                          oid_to_hex(&e->idx.oid));
 +              read_unlock();
 +              return size;
 +      }
 +
 +      p = oe_in_pack(pack, e);
 +      if (!p)
 +              BUG("when e->type is a delta, it must belong to a pack");
 +
 +      read_lock();
 +      w_curs = NULL;
 +      buf = use_pack(p, &w_curs, e->in_pack_offset, &avail);
 +      used = unpack_object_header_buffer(buf, avail, &type, &size);
 +      if (used == 0)
 +              die(_("unable to parse object header of %s"),
 +                  oid_to_hex(&e->idx.oid));
 +
 +      unuse_pack(&w_curs);
 +      read_unlock();
 +      return size;
 +}
 +
  static int try_delta(struct unpacked *trg, struct unpacked *src,
                     unsigned max_depth, unsigned long *mem_usage)
  {
        void *delta_buf;
  
        /* Don't bother doing diffs between different types */
 -      if (trg_entry->type != src_entry->type)
 +      if (oe_type(trg_entry) != oe_type(src_entry))
                return -1;
  
        /*
         * it, we will still save the transfer cost, as we already know
         * the other side has it and we won't send src_entry at all.
         */
 -      if (reuse_delta && trg_entry->in_pack &&
 -          trg_entry->in_pack == src_entry->in_pack &&
 +      if (reuse_delta && IN_PACK(trg_entry) &&
 +          IN_PACK(trg_entry) == IN_PACK(src_entry) &&
            !src_entry->preferred_base &&
            trg_entry->in_pack_type != OBJ_REF_DELTA &&
            trg_entry->in_pack_type != OBJ_OFS_DELTA)
                return 0;
  
        /* Now some size filtering heuristics. */
 -      trg_size = trg_entry->size;
 -      if (!trg_entry->delta) {
 +      trg_size = SIZE(trg_entry);
 +      if (!DELTA(trg_entry)) {
                max_size = trg_size/2 - 20;
                ref_depth = 1;
        } else {
 -              max_size = trg_entry->delta_size;
 +              max_size = DELTA_SIZE(trg_entry);
                ref_depth = trg->depth;
        }
        max_size = (uint64_t)max_size * (max_depth - src->depth) /
                                                (max_depth - ref_depth + 1);
        if (max_size == 0)
                return 0;
 -      src_size = src_entry->size;
 +      src_size = SIZE(src_entry);
        sizediff = src_size < trg_size ? trg_size - src_size : 0;
        if (sizediff >= max_size)
                return 0;
        /* Load data if not already done */
        if (!trg->data) {
                read_lock();
 -              trg->data = read_sha1_file(trg_entry->idx.oid.hash, &type,
 -                                         &sz);
 +              trg->data = read_object_file(&trg_entry->idx.oid, &type, &sz);
                read_unlock();
                if (!trg->data)
                        die("object %s cannot be read",
        }
        if (!src->data) {
                read_lock();
 -              src->data = read_sha1_file(src_entry->idx.oid.hash, &type,
 -                                         &sz);
 +              src->data = read_object_file(&src_entry->idx.oid, &type, &sz);
                read_unlock();
                if (!src->data) {
                        if (src_entry->preferred_base) {
        delta_buf = create_delta(src->index, trg->data, trg_size, &delta_size, max_size);
        if (!delta_buf)
                return 0;
 +      if (delta_size >= (1U << OE_DELTA_SIZE_BITS)) {
 +              free(delta_buf);
 +              return 0;
 +      }
  
 -      if (trg_entry->delta) {
 +      if (DELTA(trg_entry)) {
                /* Prefer only shallower same-sized deltas. */
 -              if (delta_size == trg_entry->delta_size &&
 +              if (delta_size == DELTA_SIZE(trg_entry) &&
                    src->depth + 1 >= trg->depth) {
                        free(delta_buf);
                        return 0;
        free(trg_entry->delta_data);
        cache_lock();
        if (trg_entry->delta_data) {
 -              delta_cache_size -= trg_entry->delta_size;
 +              delta_cache_size -= DELTA_SIZE(trg_entry);
                trg_entry->delta_data = NULL;
        }
        if (delta_cacheable(src_size, trg_size, delta_size)) {
                free(delta_buf);
        }
  
 -      trg_entry->delta = src_entry;
 -      trg_entry->delta_size = delta_size;
 +      SET_DELTA(trg_entry, src_entry);
 +      SET_DELTA_SIZE(trg_entry, delta_size);
        trg->depth = src->depth + 1;
  
        return 1;
  
  static unsigned int check_delta_limit(struct object_entry *me, unsigned int n)
  {
 -      struct object_entry *child = me->delta_child;
 +      struct object_entry *child = DELTA_CHILD(me);
        unsigned int m = n;
        while (child) {
                unsigned int c = check_delta_limit(child, n + 1);
                if (m < c)
                        m = c;
 -              child = child->delta_sibling;
 +              child = DELTA_SIBLING(child);
        }
        return m;
  }
@@@ -2081,7 -1981,7 +2081,7 @@@ static unsigned long free_unpacked(stru
        free_delta_index(n->index);
        n->index = NULL;
        if (n->data) {
 -              freed_mem += n->entry->size;
 +              freed_mem += SIZE(n->entry);
                FREE_AND_NULL(n->data);
        }
        n->entry = NULL;
@@@ -2139,7 -2039,7 +2139,7 @@@ static void find_deltas(struct object_e
                 * otherwise they would become too deep.
                 */
                max_depth = depth;
 -              if (entry->delta_child) {
 +              if (DELTA_CHILD(entry)) {
                        max_depth -= check_delta_limit(entry, 0);
                        if (max_depth <= 0)
                                goto next;
                 * between writes at that moment.
                 */
                if (entry->delta_data && !pack_to_stdout) {
 -                      entry->z_delta_size = do_compress(&entry->delta_data,
 -                                                        entry->delta_size);
 -                      cache_lock();
 -                      delta_cache_size -= entry->delta_size;
 -                      delta_cache_size += entry->z_delta_size;
 -                      cache_unlock();
 +                      unsigned long size;
 +
 +                      size = do_compress(&entry->delta_data, DELTA_SIZE(entry));
 +                      if (size < (1U << OE_Z_DELTA_BITS)) {
 +                              entry->z_delta_size = size;
 +                              cache_lock();
 +                              delta_cache_size -= DELTA_SIZE(entry);
 +                              delta_cache_size += entry->z_delta_size;
 +                              cache_unlock();
 +                      } else {
 +                              FREE_AND_NULL(entry->delta_data);
 +                              entry->z_delta_size = 0;
 +                      }
                }
  
                /* if we made n a delta, and if n is already at max
                 * depth, leaving it in the window is pointless.  we
                 * should evict it first.
                 */
 -              if (entry->delta && max_depth <= n->depth)
 +              if (DELTA(entry) && max_depth <= n->depth)
                        continue;
  
                /*
                 * currently deltified object, to keep it longer.  It will
                 * be the first base object to be attempted next.
                 */
 -              if (entry->delta) {
 +              if (DELTA(entry)) {
                        struct unpacked swap = array[best_base];
                        int dist = (window + idx - best_base) % window;
                        int dst = best_base;
@@@ -2525,14 -2418,13 +2525,14 @@@ static void prepare_pack(int window, in
        for (i = 0; i < to_pack.nr_objects; i++) {
                struct object_entry *entry = to_pack.objects + i;
  
 -              if (entry->delta)
 +              if (DELTA(entry))
                        /* This happens if we decided to reuse existing
                         * delta from a pack.  "reuse_delta &&" is implied.
                         */
                        continue;
  
 -              if (entry->size < 50)
 +              if (!entry->type_valid ||
 +                  oe_size_less_than(&to_pack, entry, 50))
                        continue;
  
                if (entry->no_try_delta)
  
                if (!entry->preferred_base) {
                        nr_deltas++;
 -                      if (entry->type < 0)
 +                      if (oe_type(entry) < 0)
                                die("unable to get type of object %s",
                                    oid_to_hex(&entry->idx.oid));
                } else {
 -                      if (entry->type < 0) {
 +                      if (oe_type(entry) < 0) {
                                /*
                                 * This object is not found, but we
                                 * don't have to include it anyway.
@@@ -2653,7 -2545,7 +2653,7 @@@ static void read_object_list_from_stdin
                        die("expected object ID, got garbage:\n %s", line);
  
                add_preferred_base_object(p + 1);
 -              add_object_entry(&oid, 0, p + 1, 0);
 +              add_object_entry(&oid, OBJ_NONE, p + 1, 0);
        }
  }
  
@@@ -2782,11 -2674,11 +2782,11 @@@ static void add_objects_in_unpacked_pac
  
        memset(&in_pack, 0, sizeof(in_pack));
  
 -      for (p = packed_git; p; p = p->next) {
 +      for (p = get_packed_git(the_repository); p; p = p->next) {
                struct object_id oid;
                struct object *o;
  
 -              if (!p->pack_local || p->pack_keep)
 +              if (!p->pack_local || p->pack_keep || p->pack_keep_in_core)
                        continue;
                if (open_pack_index(p))
                        die("cannot open pack index");
  static int add_loose_object(const struct object_id *oid, const char *path,
                            void *data)
  {
 -      enum object_type type = sha1_object_info(oid->hash, NULL);
 +      enum object_type type = oid_object_info(the_repository, oid, NULL);
  
        if (type < 0) {
                warning("loose object at %s could not be examined", path);
@@@ -2845,18 -2737,16 +2845,18 @@@ static int has_sha1_pack_kept_or_nonloc
        static struct packed_git *last_found = (void *)1;
        struct packed_git *p;
  
 -      p = (last_found != (void *)1) ? last_found : packed_git;
 +      p = (last_found != (void *)1) ? last_found :
 +                                      get_packed_git(the_repository);
  
        while (p) {
 -              if ((!p->pack_local || p->pack_keep) &&
 +              if ((!p->pack_local || p->pack_keep ||
 +                              p->pack_keep_in_core) &&
                        find_pack_entry_one(oid->hash, p)) {
                        last_found = p;
                        return 1;
                }
                if (p == last_found)
 -                      p = packed_git;
 +                      p = get_packed_git(the_repository);
                else
                        p = p->next;
                if (p == last_found)
@@@ -2892,8 -2782,8 +2892,8 @@@ static void loosen_unused_packed_object
        uint32_t i;
        struct object_id oid;
  
 -      for (p = packed_git; p; p = p->next) {
 -              if (!p->pack_local || p->pack_keep)
 +      for (p = get_packed_git(the_repository); p; p = p->next) {
 +              if (!p->pack_local || p->pack_keep || p->pack_keep_in_core)
                        continue;
  
                if (open_pack_index(p))
@@@ -2919,8 -2809,7 +2919,8 @@@ static int pack_options_allow_reuse(voi
  {
        return pack_to_stdout &&
               allow_ofs_delta &&
 -             !ignore_packed_keep &&
 +             !ignore_packed_keep_on_disk &&
 +             !ignore_packed_keep_in_core &&
               (!local || !have_non_local_packs) &&
               !incremental;
  }
@@@ -3029,32 -2918,6 +3029,32 @@@ static void get_object_list(int ac, con
        oid_array_clear(&recent_objects);
  }
  
 +static void add_extra_kept_packs(const struct string_list *names)
 +{
 +      struct packed_git *p;
 +
 +      if (!names->nr)
 +              return;
 +
 +      for (p = get_packed_git(the_repository); p; p = p->next) {
 +              const char *name = basename(p->pack_name);
 +              int i;
 +
 +              if (!p->pack_local)
 +                      continue;
 +
 +              for (i = 0; i < names->nr; i++)
 +                      if (!fspathcmp(name, names->items[i].string))
 +                              break;
 +
 +              if (i < names->nr) {
 +                      p->pack_keep_in_core = 1;
 +                      ignore_packed_keep_in_core = 1;
 +                      continue;
 +              }
 +      }
 +}
 +
  static int option_parse_index_version(const struct option *opt,
                                      const char *arg, int unset)
  {
@@@ -3094,7 -2957,6 +3094,7 @@@ int cmd_pack_objects(int argc, const ch
        struct argv_array rp = ARGV_ARRAY_INIT;
        int rev_list_unpacked = 0, rev_list_all = 0, rev_list_reflog = 0;
        int rev_list_index = 0;
 +      struct string_list keep_pack_list = STRING_LIST_INIT_NODUP;
        struct option pack_objects_options[] = {
                OPT_SET_INT('q', "quiet", &progress,
                            N_("do not show progress meter"), 0),
                         N_("create thin packs")),
                OPT_BOOL(0, "shallow", &shallow,
                         N_("create packs suitable for shallow fetches")),
 -              OPT_BOOL(0, "honor-pack-keep", &ignore_packed_keep,
 +              OPT_BOOL(0, "honor-pack-keep", &ignore_packed_keep_on_disk,
                         N_("ignore packs that have companion .keep file")),
 +              OPT_STRING_LIST(0, "keep-pack", &keep_pack_list, N_("name"),
 +                              N_("ignore this pack")),
                OPT_INTEGER(0, "compression", &pack_compression_level,
                            N_("pack compression level")),
                OPT_SET_INT(0, "keep-true-parents", &grafts_replace_parents,
                OPT_END(),
        };
  
 +      if (DFS_NUM_STATES > (1 << OE_DFS_STATE_BITS))
 +              BUG("too many dfs states, increase OE_DFS_STATE_BITS");
 +
        check_replace_refs = 0;
  
        reset_pack_idx_option(&pack_idx_opts);
        if (pack_to_stdout != !base_name || argc)
                usage_with_options(pack_usage, pack_objects_options);
  
 +      if (depth >= (1 << OE_DEPTH_BITS)) {
 +              warning(_("delta chain depth %d is too deep, forcing %d"),
 +                      depth, (1 << OE_DEPTH_BITS) - 1);
 +              depth = (1 << OE_DEPTH_BITS) - 1;
 +      }
 +      if (cache_max_small_delta_size >= (1U << OE_Z_DELTA_BITS)) {
 +              warning(_("pack.deltaCacheLimit is too high, forcing %d"),
 +                      (1U << OE_Z_DELTA_BITS) - 1);
 +              cache_max_small_delta_size = (1U << OE_Z_DELTA_BITS) - 1;
 +      }
 +
        argv_array_push(&rp, "pack-objects");
        if (thin) {
                use_internal_rev_list = 1;
                fetch_if_missing = 0;
                argv_array_push(&rp, "--exclude-promisor-objects");
        }
+       if (unpack_unreachable || keep_unreachable || pack_loose_unreachable)
+               use_internal_rev_list = 1;
  
        if (!reuse_object)
                reuse_delta = 0;
        if (progress && all_progress_implied)
                progress = 2;
  
 -      prepare_packed_git();
 -      if (ignore_packed_keep) {
 +      add_extra_kept_packs(&keep_pack_list);
 +      if (ignore_packed_keep_on_disk) {
                struct packed_git *p;
 -              for (p = packed_git; p; p = p->next)
 +              for (p = get_packed_git(the_repository); p; p = p->next)
                        if (p->pack_local && p->pack_keep)
                                break;
                if (!p) /* no keep-able packs found */
 -                      ignore_packed_keep = 0;
 +                      ignore_packed_keep_on_disk = 0;
        }
        if (local) {
                /*
 -               * unlike ignore_packed_keep above, we do not want to
 -               * unset "local" based on looking at packs, as it
 -               * also covers non-local objects
 +               * unlike ignore_packed_keep_on_disk above, we do not
 +               * want to unset "local" based on looking at packs, as
 +               * it also covers non-local objects
                 */
                struct packed_git *p;
 -              for (p = packed_git; p; p = p->next) {
 +              for (p = get_packed_git(the_repository); p; p = p->next) {
                        if (!p->pack_local) {
                                have_non_local_packs = 1;
                                break;
                }
        }
  
 +      prepare_packing_data(&to_pack);
 +
        if (progress)
 -              progress_state = start_progress(_("Counting objects"), 0);
 +              progress_state = start_progress(_("Enumerating objects"), 0);
        if (!use_internal_rev_list)
                read_object_list_from_stdin();
        else {