builtin / pack-objects.con commit Merge branch 'jk/describe-omit-some-refs' (1b32498)
   1#include "builtin.h"
   2#include "cache.h"
   3#include "attr.h"
   4#include "object.h"
   5#include "blob.h"
   6#include "commit.h"
   7#include "tag.h"
   8#include "tree.h"
   9#include "delta.h"
  10#include "pack.h"
  11#include "pack-revindex.h"
  12#include "csum-file.h"
  13#include "tree-walk.h"
  14#include "diff.h"
  15#include "revision.h"
  16#include "list-objects.h"
  17#include "pack-objects.h"
  18#include "progress.h"
  19#include "refs.h"
  20#include "streaming.h"
  21#include "thread-utils.h"
  22#include "pack-bitmap.h"
  23#include "reachable.h"
  24#include "sha1-array.h"
  25#include "argv-array.h"
  26#include "mru.h"
  27
  28static const char *pack_usage[] = {
  29        N_("git pack-objects --stdout [<options>...] [< <ref-list> | < <object-list>]"),
  30        N_("git pack-objects [<options>...] <base-name> [< <ref-list> | < <object-list>]"),
  31        NULL
  32};
  33
  34/*
  35 * Objects we are going to pack are collected in the `to_pack` structure.
  36 * It contains an array (dynamically expanded) of the object data, and a map
  37 * that can resolve SHA1s to their position in the array.
  38 */
  39static struct packing_data to_pack;
  40
  41static struct pack_idx_entry **written_list;
  42static uint32_t nr_result, nr_written;
  43
  44static int non_empty;
  45static int reuse_delta = 1, reuse_object = 1;
  46static int keep_unreachable, unpack_unreachable, include_tag;
  47static unsigned long unpack_unreachable_expiration;
  48static int pack_loose_unreachable;
  49static int local;
  50static int have_non_local_packs;
  51static int incremental;
  52static int ignore_packed_keep;
  53static int allow_ofs_delta;
  54static struct pack_idx_option pack_idx_opts;
  55static const char *base_name;
  56static int progress = 1;
  57static int window = 10;
  58static unsigned long pack_size_limit;
  59static int depth = 50;
  60static int delta_search_threads;
  61static int pack_to_stdout;
  62static int num_preferred_base;
  63static struct progress *progress_state;
  64
  65static struct packed_git *reuse_packfile;
  66static uint32_t reuse_packfile_objects;
  67static off_t reuse_packfile_offset;
  68
  69static int use_bitmap_index_default = 1;
  70static int use_bitmap_index = -1;
  71static int write_bitmap_index;
  72static uint16_t write_bitmap_options;
  73
  74static unsigned long delta_cache_size = 0;
  75static unsigned long max_delta_cache_size = 256 * 1024 * 1024;
  76static unsigned long cache_max_small_delta_size = 1000;
  77
  78static unsigned long window_memory_limit = 0;
  79
  80/*
  81 * stats
  82 */
  83static uint32_t written, written_delta;
  84static uint32_t reused, reused_delta;
  85
  86/*
  87 * Indexed commits
  88 */
  89static struct commit **indexed_commits;
  90static unsigned int indexed_commits_nr;
  91static unsigned int indexed_commits_alloc;
  92
  93static void index_commit_for_bitmap(struct commit *commit)
  94{
  95        if (indexed_commits_nr >= indexed_commits_alloc) {
  96                indexed_commits_alloc = (indexed_commits_alloc + 32) * 2;
  97                REALLOC_ARRAY(indexed_commits, indexed_commits_alloc);
  98        }
  99
 100        indexed_commits[indexed_commits_nr++] = commit;
 101}
 102
 103static void *get_delta(struct object_entry *entry)
 104{
 105        unsigned long size, base_size, delta_size;
 106        void *buf, *base_buf, *delta_buf;
 107        enum object_type type;
 108
 109        buf = read_sha1_file(entry->idx.sha1, &type, &size);
 110        if (!buf)
 111                die("unable to read %s", sha1_to_hex(entry->idx.sha1));
 112        base_buf = read_sha1_file(entry->delta->idx.sha1, &type, &base_size);
 113        if (!base_buf)
 114                die("unable to read %s", sha1_to_hex(entry->delta->idx.sha1));
 115        delta_buf = diff_delta(base_buf, base_size,
 116                               buf, size, &delta_size, 0);
 117        if (!delta_buf || delta_size != entry->delta_size)
 118                die("delta size changed");
 119        free(buf);
 120        free(base_buf);
 121        return delta_buf;
 122}
 123
 124static unsigned long do_compress(void **pptr, unsigned long size)
 125{
 126        git_zstream stream;
 127        void *in, *out;
 128        unsigned long maxsize;
 129
 130        git_deflate_init(&stream, pack_compression_level);
 131        maxsize = git_deflate_bound(&stream, size);
 132
 133        in = *pptr;
 134        out = xmalloc(maxsize);
 135        *pptr = out;
 136
 137        stream.next_in = in;
 138        stream.avail_in = size;
 139        stream.next_out = out;
 140        stream.avail_out = maxsize;
 141        while (git_deflate(&stream, Z_FINISH) == Z_OK)
 142                ; /* nothing */
 143        git_deflate_end(&stream);
 144
 145        free(in);
 146        return stream.total_out;
 147}
 148
 149static unsigned long write_large_blob_data(struct git_istream *st, struct sha1file *f,
 150                                           const unsigned char *sha1)
 151{
 152        git_zstream stream;
 153        unsigned char ibuf[1024 * 16];
 154        unsigned char obuf[1024 * 16];
 155        unsigned long olen = 0;
 156
 157        git_deflate_init(&stream, pack_compression_level);
 158
 159        for (;;) {
 160                ssize_t readlen;
 161                int zret = Z_OK;
 162                readlen = read_istream(st, ibuf, sizeof(ibuf));
 163                if (readlen == -1)
 164                        die(_("unable to read %s"), sha1_to_hex(sha1));
 165
 166                stream.next_in = ibuf;
 167                stream.avail_in = readlen;
 168                while ((stream.avail_in || readlen == 0) &&
 169                       (zret == Z_OK || zret == Z_BUF_ERROR)) {
 170                        stream.next_out = obuf;
 171                        stream.avail_out = sizeof(obuf);
 172                        zret = git_deflate(&stream, readlen ? 0 : Z_FINISH);
 173                        sha1write(f, obuf, stream.next_out - obuf);
 174                        olen += stream.next_out - obuf;
 175                }
 176                if (stream.avail_in)
 177                        die(_("deflate error (%d)"), zret);
 178                if (readlen == 0) {
 179                        if (zret != Z_STREAM_END)
 180                                die(_("deflate error (%d)"), zret);
 181                        break;
 182                }
 183        }
 184        git_deflate_end(&stream);
 185        return olen;
 186}
 187
 188/*
 189 * we are going to reuse the existing object data as is.  make
 190 * sure it is not corrupt.
 191 */
 192static int check_pack_inflate(struct packed_git *p,
 193                struct pack_window **w_curs,
 194                off_t offset,
 195                off_t len,
 196                unsigned long expect)
 197{
 198        git_zstream stream;
 199        unsigned char fakebuf[4096], *in;
 200        int st;
 201
 202        memset(&stream, 0, sizeof(stream));
 203        git_inflate_init(&stream);
 204        do {
 205                in = use_pack(p, w_curs, offset, &stream.avail_in);
 206                stream.next_in = in;
 207                stream.next_out = fakebuf;
 208                stream.avail_out = sizeof(fakebuf);
 209                st = git_inflate(&stream, Z_FINISH);
 210                offset += stream.next_in - in;
 211        } while (st == Z_OK || st == Z_BUF_ERROR);
 212        git_inflate_end(&stream);
 213        return (st == Z_STREAM_END &&
 214                stream.total_out == expect &&
 215                stream.total_in == len) ? 0 : -1;
 216}
 217
 218static void copy_pack_data(struct sha1file *f,
 219                struct packed_git *p,
 220                struct pack_window **w_curs,
 221                off_t offset,
 222                off_t len)
 223{
 224        unsigned char *in;
 225        unsigned long avail;
 226
 227        while (len) {
 228                in = use_pack(p, w_curs, offset, &avail);
 229                if (avail > len)
 230                        avail = (unsigned long)len;
 231                sha1write(f, in, avail);
 232                offset += avail;
 233                len -= avail;
 234        }
 235}
 236
 237/* Return 0 if we will bust the pack-size limit */
 238static unsigned long write_no_reuse_object(struct sha1file *f, struct object_entry *entry,
 239                                           unsigned long limit, int usable_delta)
 240{
 241        unsigned long size, datalen;
 242        unsigned char header[10], dheader[10];
 243        unsigned hdrlen;
 244        enum object_type type;
 245        void *buf;
 246        struct git_istream *st = NULL;
 247
 248        if (!usable_delta) {
 249                if (entry->type == OBJ_BLOB &&
 250                    entry->size > big_file_threshold &&
 251                    (st = open_istream(entry->idx.sha1, &type, &size, NULL)) != NULL)
 252                        buf = NULL;
 253                else {
 254                        buf = read_sha1_file(entry->idx.sha1, &type, &size);
 255                        if (!buf)
 256                                die(_("unable to read %s"), sha1_to_hex(entry->idx.sha1));
 257                }
 258                /*
 259                 * make sure no cached delta data remains from a
 260                 * previous attempt before a pack split occurred.
 261                 */
 262                free(entry->delta_data);
 263                entry->delta_data = NULL;
 264                entry->z_delta_size = 0;
 265        } else if (entry->delta_data) {
 266                size = entry->delta_size;
 267                buf = entry->delta_data;
 268                entry->delta_data = NULL;
 269                type = (allow_ofs_delta && entry->delta->idx.offset) ?
 270                        OBJ_OFS_DELTA : OBJ_REF_DELTA;
 271        } else {
 272                buf = get_delta(entry);
 273                size = entry->delta_size;
 274                type = (allow_ofs_delta && entry->delta->idx.offset) ?
 275                        OBJ_OFS_DELTA : OBJ_REF_DELTA;
 276        }
 277
 278        if (st) /* large blob case, just assume we don't compress well */
 279                datalen = size;
 280        else if (entry->z_delta_size)
 281                datalen = entry->z_delta_size;
 282        else
 283                datalen = do_compress(&buf, size);
 284
 285        /*
 286         * The object header is a byte of 'type' followed by zero or
 287         * more bytes of length.
 288         */
 289        hdrlen = encode_in_pack_object_header(type, size, header);
 290
 291        if (type == OBJ_OFS_DELTA) {
 292                /*
 293                 * Deltas with relative base contain an additional
 294                 * encoding of the relative offset for the delta
 295                 * base from this object's position in the pack.
 296                 */
 297                off_t ofs = entry->idx.offset - entry->delta->idx.offset;
 298                unsigned pos = sizeof(dheader) - 1;
 299                dheader[pos] = ofs & 127;
 300                while (ofs >>= 7)
 301                        dheader[--pos] = 128 | (--ofs & 127);
 302                if (limit && hdrlen + sizeof(dheader) - pos + datalen + 20 >= limit) {
 303                        if (st)
 304                                close_istream(st);
 305                        free(buf);
 306                        return 0;
 307                }
 308                sha1write(f, header, hdrlen);
 309                sha1write(f, dheader + pos, sizeof(dheader) - pos);
 310                hdrlen += sizeof(dheader) - pos;
 311        } else if (type == OBJ_REF_DELTA) {
 312                /*
 313                 * Deltas with a base reference contain
 314                 * an additional 20 bytes for the base sha1.
 315                 */
 316                if (limit && hdrlen + 20 + datalen + 20 >= limit) {
 317                        if (st)
 318                                close_istream(st);
 319                        free(buf);
 320                        return 0;
 321                }
 322                sha1write(f, header, hdrlen);
 323                sha1write(f, entry->delta->idx.sha1, 20);
 324                hdrlen += 20;
 325        } else {
 326                if (limit && hdrlen + datalen + 20 >= limit) {
 327                        if (st)
 328                                close_istream(st);
 329                        free(buf);
 330                        return 0;
 331                }
 332                sha1write(f, header, hdrlen);
 333        }
 334        if (st) {
 335                datalen = write_large_blob_data(st, f, entry->idx.sha1);
 336                close_istream(st);
 337        } else {
 338                sha1write(f, buf, datalen);
 339                free(buf);
 340        }
 341
 342        return hdrlen + datalen;
 343}
 344
 345/* Return 0 if we will bust the pack-size limit */
 346static off_t write_reuse_object(struct sha1file *f, struct object_entry *entry,
 347                                unsigned long limit, int usable_delta)
 348{
 349        struct packed_git *p = entry->in_pack;
 350        struct pack_window *w_curs = NULL;
 351        struct revindex_entry *revidx;
 352        off_t offset;
 353        enum object_type type = entry->type;
 354        off_t datalen;
 355        unsigned char header[10], dheader[10];
 356        unsigned hdrlen;
 357
 358        if (entry->delta)
 359                type = (allow_ofs_delta && entry->delta->idx.offset) ?
 360                        OBJ_OFS_DELTA : OBJ_REF_DELTA;
 361        hdrlen = encode_in_pack_object_header(type, entry->size, header);
 362
 363        offset = entry->in_pack_offset;
 364        revidx = find_pack_revindex(p, offset);
 365        datalen = revidx[1].offset - offset;
 366        if (!pack_to_stdout && p->index_version > 1 &&
 367            check_pack_crc(p, &w_curs, offset, datalen, revidx->nr)) {
 368                error("bad packed object CRC for %s", sha1_to_hex(entry->idx.sha1));
 369                unuse_pack(&w_curs);
 370                return write_no_reuse_object(f, entry, limit, usable_delta);
 371        }
 372
 373        offset += entry->in_pack_header_size;
 374        datalen -= entry->in_pack_header_size;
 375
 376        if (!pack_to_stdout && p->index_version == 1 &&
 377            check_pack_inflate(p, &w_curs, offset, datalen, entry->size)) {
 378                error("corrupt packed object for %s", sha1_to_hex(entry->idx.sha1));
 379                unuse_pack(&w_curs);
 380                return write_no_reuse_object(f, entry, limit, usable_delta);
 381        }
 382
 383        if (type == OBJ_OFS_DELTA) {
 384                off_t ofs = entry->idx.offset - entry->delta->idx.offset;
 385                unsigned pos = sizeof(dheader) - 1;
 386                dheader[pos] = ofs & 127;
 387                while (ofs >>= 7)
 388                        dheader[--pos] = 128 | (--ofs & 127);
 389                if (limit && hdrlen + sizeof(dheader) - pos + datalen + 20 >= limit) {
 390                        unuse_pack(&w_curs);
 391                        return 0;
 392                }
 393                sha1write(f, header, hdrlen);
 394                sha1write(f, dheader + pos, sizeof(dheader) - pos);
 395                hdrlen += sizeof(dheader) - pos;
 396                reused_delta++;
 397        } else if (type == OBJ_REF_DELTA) {
 398                if (limit && hdrlen + 20 + datalen + 20 >= limit) {
 399                        unuse_pack(&w_curs);
 400                        return 0;
 401                }
 402                sha1write(f, header, hdrlen);
 403                sha1write(f, entry->delta->idx.sha1, 20);
 404                hdrlen += 20;
 405                reused_delta++;
 406        } else {
 407                if (limit && hdrlen + datalen + 20 >= limit) {
 408                        unuse_pack(&w_curs);
 409                        return 0;
 410                }
 411                sha1write(f, header, hdrlen);
 412        }
 413        copy_pack_data(f, p, &w_curs, offset, datalen);
 414        unuse_pack(&w_curs);
 415        reused++;
 416        return hdrlen + datalen;
 417}
 418
 419/* Return 0 if we will bust the pack-size limit */
 420static off_t write_object(struct sha1file *f,
 421                          struct object_entry *entry,
 422                          off_t write_offset)
 423{
 424        unsigned long limit;
 425        off_t len;
 426        int usable_delta, to_reuse;
 427
 428        if (!pack_to_stdout)
 429                crc32_begin(f);
 430
 431        /* apply size limit if limited packsize and not first object */
 432        if (!pack_size_limit || !nr_written)
 433                limit = 0;
 434        else if (pack_size_limit <= write_offset)
 435                /*
 436                 * the earlier object did not fit the limit; avoid
 437                 * mistaking this with unlimited (i.e. limit = 0).
 438                 */
 439                limit = 1;
 440        else
 441                limit = pack_size_limit - write_offset;
 442
 443        if (!entry->delta)
 444                usable_delta = 0;       /* no delta */
 445        else if (!pack_size_limit)
 446               usable_delta = 1;        /* unlimited packfile */
 447        else if (entry->delta->idx.offset == (off_t)-1)
 448                usable_delta = 0;       /* base was written to another pack */
 449        else if (entry->delta->idx.offset)
 450                usable_delta = 1;       /* base already exists in this pack */
 451        else
 452                usable_delta = 0;       /* base could end up in another pack */
 453
 454        if (!reuse_object)
 455                to_reuse = 0;   /* explicit */
 456        else if (!entry->in_pack)
 457                to_reuse = 0;   /* can't reuse what we don't have */
 458        else if (entry->type == OBJ_REF_DELTA || entry->type == OBJ_OFS_DELTA)
 459                                /* check_object() decided it for us ... */
 460                to_reuse = usable_delta;
 461                                /* ... but pack split may override that */
 462        else if (entry->type != entry->in_pack_type)
 463                to_reuse = 0;   /* pack has delta which is unusable */
 464        else if (entry->delta)
 465                to_reuse = 0;   /* we want to pack afresh */
 466        else
 467                to_reuse = 1;   /* we have it in-pack undeltified,
 468                                 * and we do not need to deltify it.
 469                                 */
 470
 471        if (!to_reuse)
 472                len = write_no_reuse_object(f, entry, limit, usable_delta);
 473        else
 474                len = write_reuse_object(f, entry, limit, usable_delta);
 475        if (!len)
 476                return 0;
 477
 478        if (usable_delta)
 479                written_delta++;
 480        written++;
 481        if (!pack_to_stdout)
 482                entry->idx.crc32 = crc32_end(f);
 483        return len;
 484}
 485
 486enum write_one_status {
 487        WRITE_ONE_SKIP = -1, /* already written */
 488        WRITE_ONE_BREAK = 0, /* writing this will bust the limit; not written */
 489        WRITE_ONE_WRITTEN = 1, /* normal */
 490        WRITE_ONE_RECURSIVE = 2 /* already scheduled to be written */
 491};
 492
 493static enum write_one_status write_one(struct sha1file *f,
 494                                       struct object_entry *e,
 495                                       off_t *offset)
 496{
 497        off_t size;
 498        int recursing;
 499
 500        /*
 501         * we set offset to 1 (which is an impossible value) to mark
 502         * the fact that this object is involved in "write its base
 503         * first before writing a deltified object" recursion.
 504         */
 505        recursing = (e->idx.offset == 1);
 506        if (recursing) {
 507                warning("recursive delta detected for object %s",
 508                        sha1_to_hex(e->idx.sha1));
 509                return WRITE_ONE_RECURSIVE;
 510        } else if (e->idx.offset || e->preferred_base) {
 511                /* offset is non zero if object is written already. */
 512                return WRITE_ONE_SKIP;
 513        }
 514
 515        /* if we are deltified, write out base object first. */
 516        if (e->delta) {
 517                e->idx.offset = 1; /* now recurse */
 518                switch (write_one(f, e->delta, offset)) {
 519                case WRITE_ONE_RECURSIVE:
 520                        /* we cannot depend on this one */
 521                        e->delta = NULL;
 522                        break;
 523                default:
 524                        break;
 525                case WRITE_ONE_BREAK:
 526                        e->idx.offset = recursing;
 527                        return WRITE_ONE_BREAK;
 528                }
 529        }
 530
 531        e->idx.offset = *offset;
 532        size = write_object(f, e, *offset);
 533        if (!size) {
 534                e->idx.offset = recursing;
 535                return WRITE_ONE_BREAK;
 536        }
 537        written_list[nr_written++] = &e->idx;
 538
 539        /* make sure off_t is sufficiently large not to wrap */
 540        if (signed_add_overflows(*offset, size))
 541                die("pack too large for current definition of off_t");
 542        *offset += size;
 543        return WRITE_ONE_WRITTEN;
 544}
 545
 546static int mark_tagged(const char *path, const struct object_id *oid, int flag,
 547                       void *cb_data)
 548{
 549        unsigned char peeled[20];
 550        struct object_entry *entry = packlist_find(&to_pack, oid->hash, NULL);
 551
 552        if (entry)
 553                entry->tagged = 1;
 554        if (!peel_ref(path, peeled)) {
 555                entry = packlist_find(&to_pack, peeled, NULL);
 556                if (entry)
 557                        entry->tagged = 1;
 558        }
 559        return 0;
 560}
 561
 562static inline void add_to_write_order(struct object_entry **wo,
 563                               unsigned int *endp,
 564                               struct object_entry *e)
 565{
 566        if (e->filled)
 567                return;
 568        wo[(*endp)++] = e;
 569        e->filled = 1;
 570}
 571
 572static void add_descendants_to_write_order(struct object_entry **wo,
 573                                           unsigned int *endp,
 574                                           struct object_entry *e)
 575{
 576        int add_to_order = 1;
 577        while (e) {
 578                if (add_to_order) {
 579                        struct object_entry *s;
 580                        /* add this node... */
 581                        add_to_write_order(wo, endp, e);
 582                        /* all its siblings... */
 583                        for (s = e->delta_sibling; s; s = s->delta_sibling) {
 584                                add_to_write_order(wo, endp, s);
 585                        }
 586                }
 587                /* drop down a level to add left subtree nodes if possible */
 588                if (e->delta_child) {
 589                        add_to_order = 1;
 590                        e = e->delta_child;
 591                } else {
 592                        add_to_order = 0;
 593                        /* our sibling might have some children, it is next */
 594                        if (e->delta_sibling) {
 595                                e = e->delta_sibling;
 596                                continue;
 597                        }
 598                        /* go back to our parent node */
 599                        e = e->delta;
 600                        while (e && !e->delta_sibling) {
 601                                /* we're on the right side of a subtree, keep
 602                                 * going up until we can go right again */
 603                                e = e->delta;
 604                        }
 605                        if (!e) {
 606                                /* done- we hit our original root node */
 607                                return;
 608                        }
 609                        /* pass it off to sibling at this level */
 610                        e = e->delta_sibling;
 611                }
 612        };
 613}
 614
 615static void add_family_to_write_order(struct object_entry **wo,
 616                                      unsigned int *endp,
 617                                      struct object_entry *e)
 618{
 619        struct object_entry *root;
 620
 621        for (root = e; root->delta; root = root->delta)
 622                ; /* nothing */
 623        add_descendants_to_write_order(wo, endp, root);
 624}
 625
 626static struct object_entry **compute_write_order(void)
 627{
 628        unsigned int i, wo_end, last_untagged;
 629
 630        struct object_entry **wo;
 631        struct object_entry *objects = to_pack.objects;
 632
 633        for (i = 0; i < to_pack.nr_objects; i++) {
 634                objects[i].tagged = 0;
 635                objects[i].filled = 0;
 636                objects[i].delta_child = NULL;
 637                objects[i].delta_sibling = NULL;
 638        }
 639
 640        /*
 641         * Fully connect delta_child/delta_sibling network.
 642         * Make sure delta_sibling is sorted in the original
 643         * recency order.
 644         */
 645        for (i = to_pack.nr_objects; i > 0;) {
 646                struct object_entry *e = &objects[--i];
 647                if (!e->delta)
 648                        continue;
 649                /* Mark me as the first child */
 650                e->delta_sibling = e->delta->delta_child;
 651                e->delta->delta_child = e;
 652        }
 653
 654        /*
 655         * Mark objects that are at the tip of tags.
 656         */
 657        for_each_tag_ref(mark_tagged, NULL);
 658
 659        /*
 660         * Give the objects in the original recency order until
 661         * we see a tagged tip.
 662         */
 663        ALLOC_ARRAY(wo, to_pack.nr_objects);
 664        for (i = wo_end = 0; i < to_pack.nr_objects; i++) {
 665                if (objects[i].tagged)
 666                        break;
 667                add_to_write_order(wo, &wo_end, &objects[i]);
 668        }
 669        last_untagged = i;
 670
 671        /*
 672         * Then fill all the tagged tips.
 673         */
 674        for (; i < to_pack.nr_objects; i++) {
 675                if (objects[i].tagged)
 676                        add_to_write_order(wo, &wo_end, &objects[i]);
 677        }
 678
 679        /*
 680         * And then all remaining commits and tags.
 681         */
 682        for (i = last_untagged; i < to_pack.nr_objects; i++) {
 683                if (objects[i].type != OBJ_COMMIT &&
 684                    objects[i].type != OBJ_TAG)
 685                        continue;
 686                add_to_write_order(wo, &wo_end, &objects[i]);
 687        }
 688
 689        /*
 690         * And then all the trees.
 691         */
 692        for (i = last_untagged; i < to_pack.nr_objects; i++) {
 693                if (objects[i].type != OBJ_TREE)
 694                        continue;
 695                add_to_write_order(wo, &wo_end, &objects[i]);
 696        }
 697
 698        /*
 699         * Finally all the rest in really tight order
 700         */
 701        for (i = last_untagged; i < to_pack.nr_objects; i++) {
 702                if (!objects[i].filled)
 703                        add_family_to_write_order(wo, &wo_end, &objects[i]);
 704        }
 705
 706        if (wo_end != to_pack.nr_objects)
 707                die("ordered %u objects, expected %"PRIu32, wo_end, to_pack.nr_objects);
 708
 709        return wo;
 710}
 711
 712static off_t write_reused_pack(struct sha1file *f)
 713{
 714        unsigned char buffer[8192];
 715        off_t to_write, total;
 716        int fd;
 717
 718        if (!is_pack_valid(reuse_packfile))
 719                die("packfile is invalid: %s", reuse_packfile->pack_name);
 720
 721        fd = git_open(reuse_packfile->pack_name);
 722        if (fd < 0)
 723                die_errno("unable to open packfile for reuse: %s",
 724                          reuse_packfile->pack_name);
 725
 726        if (lseek(fd, sizeof(struct pack_header), SEEK_SET) == -1)
 727                die_errno("unable to seek in reused packfile");
 728
 729        if (reuse_packfile_offset < 0)
 730                reuse_packfile_offset = reuse_packfile->pack_size - 20;
 731
 732        total = to_write = reuse_packfile_offset - sizeof(struct pack_header);
 733
 734        while (to_write) {
 735                int read_pack = xread(fd, buffer, sizeof(buffer));
 736
 737                if (read_pack <= 0)
 738                        die_errno("unable to read from reused packfile");
 739
 740                if (read_pack > to_write)
 741                        read_pack = to_write;
 742
 743                sha1write(f, buffer, read_pack);
 744                to_write -= read_pack;
 745
 746                /*
 747                 * We don't know the actual number of objects written,
 748                 * only how many bytes written, how many bytes total, and
 749                 * how many objects total. So we can fake it by pretending all
 750                 * objects we are writing are the same size. This gives us a
 751                 * smooth progress meter, and at the end it matches the true
 752                 * answer.
 753                 */
 754                written = reuse_packfile_objects *
 755                                (((double)(total - to_write)) / total);
 756                display_progress(progress_state, written);
 757        }
 758
 759        close(fd);
 760        written = reuse_packfile_objects;
 761        display_progress(progress_state, written);
 762        return reuse_packfile_offset - sizeof(struct pack_header);
 763}
 764
 765static const char no_split_warning[] = N_(
 766"disabling bitmap writing, packs are split due to pack.packSizeLimit"
 767);
 768
 769static void write_pack_file(void)
 770{
 771        uint32_t i = 0, j;
 772        struct sha1file *f;
 773        off_t offset;
 774        uint32_t nr_remaining = nr_result;
 775        time_t last_mtime = 0;
 776        struct object_entry **write_order;
 777
 778        if (progress > pack_to_stdout)
 779                progress_state = start_progress(_("Writing objects"), nr_result);
 780        ALLOC_ARRAY(written_list, to_pack.nr_objects);
 781        write_order = compute_write_order();
 782
 783        do {
 784                unsigned char sha1[20];
 785                char *pack_tmp_name = NULL;
 786
 787                if (pack_to_stdout)
 788                        f = sha1fd_throughput(1, "<stdout>", progress_state);
 789                else
 790                        f = create_tmp_packfile(&pack_tmp_name);
 791
 792                offset = write_pack_header(f, nr_remaining);
 793
 794                if (reuse_packfile) {
 795                        off_t packfile_size;
 796                        assert(pack_to_stdout);
 797
 798                        packfile_size = write_reused_pack(f);
 799                        offset += packfile_size;
 800                }
 801
 802                nr_written = 0;
 803                for (; i < to_pack.nr_objects; i++) {
 804                        struct object_entry *e = write_order[i];
 805                        if (write_one(f, e, &offset) == WRITE_ONE_BREAK)
 806                                break;
 807                        display_progress(progress_state, written);
 808                }
 809
 810                /*
 811                 * Did we write the wrong # entries in the header?
 812                 * If so, rewrite it like in fast-import
 813                 */
 814                if (pack_to_stdout) {
 815                        sha1close(f, sha1, CSUM_CLOSE);
 816                } else if (nr_written == nr_remaining) {
 817                        sha1close(f, sha1, CSUM_FSYNC);
 818                } else {
 819                        int fd = sha1close(f, sha1, 0);
 820                        fixup_pack_header_footer(fd, sha1, pack_tmp_name,
 821                                                 nr_written, sha1, offset);
 822                        close(fd);
 823                        if (write_bitmap_index) {
 824                                warning(_(no_split_warning));
 825                                write_bitmap_index = 0;
 826                        }
 827                }
 828
 829                if (!pack_to_stdout) {
 830                        struct stat st;
 831                        struct strbuf tmpname = STRBUF_INIT;
 832
 833                        /*
 834                         * Packs are runtime accessed in their mtime
 835                         * order since newer packs are more likely to contain
 836                         * younger objects.  So if we are creating multiple
 837                         * packs then we should modify the mtime of later ones
 838                         * to preserve this property.
 839                         */
 840                        if (stat(pack_tmp_name, &st) < 0) {
 841                                warning_errno("failed to stat %s", pack_tmp_name);
 842                        } else if (!last_mtime) {
 843                                last_mtime = st.st_mtime;
 844                        } else {
 845                                struct utimbuf utb;
 846                                utb.actime = st.st_atime;
 847                                utb.modtime = --last_mtime;
 848                                if (utime(pack_tmp_name, &utb) < 0)
 849                                        warning_errno("failed utime() on %s", pack_tmp_name);
 850                        }
 851
 852                        strbuf_addf(&tmpname, "%s-", base_name);
 853
 854                        if (write_bitmap_index) {
 855                                bitmap_writer_set_checksum(sha1);
 856                                bitmap_writer_build_type_index(written_list, nr_written);
 857                        }
 858
 859                        finish_tmp_packfile(&tmpname, pack_tmp_name,
 860                                            written_list, nr_written,
 861                                            &pack_idx_opts, sha1);
 862
 863                        if (write_bitmap_index) {
 864                                strbuf_addf(&tmpname, "%s.bitmap", sha1_to_hex(sha1));
 865
 866                                stop_progress(&progress_state);
 867
 868                                bitmap_writer_show_progress(progress);
 869                                bitmap_writer_reuse_bitmaps(&to_pack);
 870                                bitmap_writer_select_commits(indexed_commits, indexed_commits_nr, -1);
 871                                bitmap_writer_build(&to_pack);
 872                                bitmap_writer_finish(written_list, nr_written,
 873                                                     tmpname.buf, write_bitmap_options);
 874                                write_bitmap_index = 0;
 875                        }
 876
 877                        strbuf_release(&tmpname);
 878                        free(pack_tmp_name);
 879                        puts(sha1_to_hex(sha1));
 880                }
 881
 882                /* mark written objects as written to previous pack */
 883                for (j = 0; j < nr_written; j++) {
 884                        written_list[j]->offset = (off_t)-1;
 885                }
 886                nr_remaining -= nr_written;
 887        } while (nr_remaining && i < to_pack.nr_objects);
 888
 889        free(written_list);
 890        free(write_order);
 891        stop_progress(&progress_state);
 892        if (written != nr_result)
 893                die("wrote %"PRIu32" objects while expecting %"PRIu32,
 894                        written, nr_result);
 895}
 896
 897static void setup_delta_attr_check(struct git_attr_check *check)
 898{
 899        static struct git_attr *attr_delta;
 900
 901        if (!attr_delta)
 902                attr_delta = git_attr("delta");
 903
 904        check[0].attr = attr_delta;
 905}
 906
 907static int no_try_delta(const char *path)
 908{
 909        struct git_attr_check check[1];
 910
 911        setup_delta_attr_check(check);
 912        if (git_check_attr(path, ARRAY_SIZE(check), check))
 913                return 0;
 914        if (ATTR_FALSE(check->value))
 915                return 1;
 916        return 0;
 917}
 918
 919/*
 920 * When adding an object, check whether we have already added it
 921 * to our packing list. If so, we can skip. However, if we are
 922 * being asked to excludei t, but the previous mention was to include
 923 * it, make sure to adjust its flags and tweak our numbers accordingly.
 924 *
 925 * As an optimization, we pass out the index position where we would have
 926 * found the item, since that saves us from having to look it up again a
 927 * few lines later when we want to add the new entry.
 928 */
 929static int have_duplicate_entry(const unsigned char *sha1,
 930                                int exclude,
 931                                uint32_t *index_pos)
 932{
 933        struct object_entry *entry;
 934
 935        entry = packlist_find(&to_pack, sha1, index_pos);
 936        if (!entry)
 937                return 0;
 938
 939        if (exclude) {
 940                if (!entry->preferred_base)
 941                        nr_result--;
 942                entry->preferred_base = 1;
 943        }
 944
 945        return 1;
 946}
 947
 948static int want_found_object(int exclude, struct packed_git *p)
 949{
 950        if (exclude)
 951                return 1;
 952        if (incremental)
 953                return 0;
 954
 955        /*
 956         * When asked to do --local (do not include an object that appears in a
 957         * pack we borrow from elsewhere) or --honor-pack-keep (do not include
 958         * an object that appears in a pack marked with .keep), finding a pack
 959         * that matches the criteria is sufficient for us to decide to omit it.
 960         * However, even if this pack does not satisfy the criteria, we need to
 961         * make sure no copy of this object appears in _any_ pack that makes us
 962         * to omit the object, so we need to check all the packs.
 963         *
 964         * We can however first check whether these options can possible matter;
 965         * if they do not matter we know we want the object in generated pack.
 966         * Otherwise, we signal "-1" at the end to tell the caller that we do
 967         * not know either way, and it needs to check more packs.
 968         */
 969        if (!ignore_packed_keep &&
 970            (!local || !have_non_local_packs))
 971                return 1;
 972
 973        if (local && !p->pack_local)
 974                return 0;
 975        if (ignore_packed_keep && p->pack_local && p->pack_keep)
 976                return 0;
 977
 978        /* we don't know yet; keep looking for more packs */
 979        return -1;
 980}
 981
 982/*
 983 * Check whether we want the object in the pack (e.g., we do not want
 984 * objects found in non-local stores if the "--local" option was used).
 985 *
 986 * If the caller already knows an existing pack it wants to take the object
 987 * from, that is passed in *found_pack and *found_offset; otherwise this
 988 * function finds if there is any pack that has the object and returns the pack
 989 * and its offset in these variables.
 990 */
 991static int want_object_in_pack(const unsigned char *sha1,
 992                               int exclude,
 993                               struct packed_git **found_pack,
 994                               off_t *found_offset)
 995{
 996        struct mru_entry *entry;
 997        int want;
 998
 999        if (!exclude && local && has_loose_object_nonlocal(sha1))
1000                return 0;
1001
1002        /*
1003         * If we already know the pack object lives in, start checks from that
1004         * pack - in the usual case when neither --local was given nor .keep files
1005         * are present we will determine the answer right now.
1006         */
1007        if (*found_pack) {
1008                want = want_found_object(exclude, *found_pack);
1009                if (want != -1)
1010                        return want;
1011        }
1012
1013        for (entry = packed_git_mru->head; entry; entry = entry->next) {
1014                struct packed_git *p = entry->item;
1015                off_t offset;
1016
1017                if (p == *found_pack)
1018                        offset = *found_offset;
1019                else
1020                        offset = find_pack_entry_one(sha1, p);
1021
1022                if (offset) {
1023                        if (!*found_pack) {
1024                                if (!is_pack_valid(p))
1025                                        continue;
1026                                *found_offset = offset;
1027                                *found_pack = p;
1028                        }
1029                        want = want_found_object(exclude, p);
1030                        if (!exclude && want > 0)
1031                                mru_mark(packed_git_mru, entry);
1032                        if (want != -1)
1033                                return want;
1034                }
1035        }
1036
1037        return 1;
1038}
1039
1040static void create_object_entry(const unsigned char *sha1,
1041                                enum object_type type,
1042                                uint32_t hash,
1043                                int exclude,
1044                                int no_try_delta,
1045                                uint32_t index_pos,
1046                                struct packed_git *found_pack,
1047                                off_t found_offset)
1048{
1049        struct object_entry *entry;
1050
1051        entry = packlist_alloc(&to_pack, sha1, index_pos);
1052        entry->hash = hash;
1053        if (type)
1054                entry->type = type;
1055        if (exclude)
1056                entry->preferred_base = 1;
1057        else
1058                nr_result++;
1059        if (found_pack) {
1060                entry->in_pack = found_pack;
1061                entry->in_pack_offset = found_offset;
1062        }
1063
1064        entry->no_try_delta = no_try_delta;
1065}
1066
1067static const char no_closure_warning[] = N_(
1068"disabling bitmap writing, as some objects are not being packed"
1069);
1070
1071static int add_object_entry(const unsigned char *sha1, enum object_type type,
1072                            const char *name, int exclude)
1073{
1074        struct packed_git *found_pack = NULL;
1075        off_t found_offset = 0;
1076        uint32_t index_pos;
1077
1078        if (have_duplicate_entry(sha1, exclude, &index_pos))
1079                return 0;
1080
1081        if (!want_object_in_pack(sha1, exclude, &found_pack, &found_offset)) {
1082                /* The pack is missing an object, so it will not have closure */
1083                if (write_bitmap_index) {
1084                        warning(_(no_closure_warning));
1085                        write_bitmap_index = 0;
1086                }
1087                return 0;
1088        }
1089
1090        create_object_entry(sha1, type, pack_name_hash(name),
1091                            exclude, name && no_try_delta(name),
1092                            index_pos, found_pack, found_offset);
1093
1094        display_progress(progress_state, nr_result);
1095        return 1;
1096}
1097
1098static int add_object_entry_from_bitmap(const unsigned char *sha1,
1099                                        enum object_type type,
1100                                        int flags, uint32_t name_hash,
1101                                        struct packed_git *pack, off_t offset)
1102{
1103        uint32_t index_pos;
1104
1105        if (have_duplicate_entry(sha1, 0, &index_pos))
1106                return 0;
1107
1108        if (!want_object_in_pack(sha1, 0, &pack, &offset))
1109                return 0;
1110
1111        create_object_entry(sha1, type, name_hash, 0, 0, index_pos, pack, offset);
1112
1113        display_progress(progress_state, nr_result);
1114        return 1;
1115}
1116
1117struct pbase_tree_cache {
1118        unsigned char sha1[20];
1119        int ref;
1120        int temporary;
1121        void *tree_data;
1122        unsigned long tree_size;
1123};
1124
1125static struct pbase_tree_cache *(pbase_tree_cache[256]);
1126static int pbase_tree_cache_ix(const unsigned char *sha1)
1127{
1128        return sha1[0] % ARRAY_SIZE(pbase_tree_cache);
1129}
1130static int pbase_tree_cache_ix_incr(int ix)
1131{
1132        return (ix+1) % ARRAY_SIZE(pbase_tree_cache);
1133}
1134
1135static struct pbase_tree {
1136        struct pbase_tree *next;
1137        /* This is a phony "cache" entry; we are not
1138         * going to evict it or find it through _get()
1139         * mechanism -- this is for the toplevel node that
1140         * would almost always change with any commit.
1141         */
1142        struct pbase_tree_cache pcache;
1143} *pbase_tree;
1144
1145static struct pbase_tree_cache *pbase_tree_get(const unsigned char *sha1)
1146{
1147        struct pbase_tree_cache *ent, *nent;
1148        void *data;
1149        unsigned long size;
1150        enum object_type type;
1151        int neigh;
1152        int my_ix = pbase_tree_cache_ix(sha1);
1153        int available_ix = -1;
1154
1155        /* pbase-tree-cache acts as a limited hashtable.
1156         * your object will be found at your index or within a few
1157         * slots after that slot if it is cached.
1158         */
1159        for (neigh = 0; neigh < 8; neigh++) {
1160                ent = pbase_tree_cache[my_ix];
1161                if (ent && !hashcmp(ent->sha1, sha1)) {
1162                        ent->ref++;
1163                        return ent;
1164                }
1165                else if (((available_ix < 0) && (!ent || !ent->ref)) ||
1166                         ((0 <= available_ix) &&
1167                          (!ent && pbase_tree_cache[available_ix])))
1168                        available_ix = my_ix;
1169                if (!ent)
1170                        break;
1171                my_ix = pbase_tree_cache_ix_incr(my_ix);
1172        }
1173
1174        /* Did not find one.  Either we got a bogus request or
1175         * we need to read and perhaps cache.
1176         */
1177        data = read_sha1_file(sha1, &type, &size);
1178        if (!data)
1179                return NULL;
1180        if (type != OBJ_TREE) {
1181                free(data);
1182                return NULL;
1183        }
1184
1185        /* We need to either cache or return a throwaway copy */
1186
1187        if (available_ix < 0)
1188                ent = NULL;
1189        else {
1190                ent = pbase_tree_cache[available_ix];
1191                my_ix = available_ix;
1192        }
1193
1194        if (!ent) {
1195                nent = xmalloc(sizeof(*nent));
1196                nent->temporary = (available_ix < 0);
1197        }
1198        else {
1199                /* evict and reuse */
1200                free(ent->tree_data);
1201                nent = ent;
1202        }
1203        hashcpy(nent->sha1, sha1);
1204        nent->tree_data = data;
1205        nent->tree_size = size;
1206        nent->ref = 1;
1207        if (!nent->temporary)
1208                pbase_tree_cache[my_ix] = nent;
1209        return nent;
1210}
1211
1212static void pbase_tree_put(struct pbase_tree_cache *cache)
1213{
1214        if (!cache->temporary) {
1215                cache->ref--;
1216                return;
1217        }
1218        free(cache->tree_data);
1219        free(cache);
1220}
1221
1222static int name_cmp_len(const char *name)
1223{
1224        int i;
1225        for (i = 0; name[i] && name[i] != '\n' && name[i] != '/'; i++)
1226                ;
1227        return i;
1228}
1229
1230static void add_pbase_object(struct tree_desc *tree,
1231                             const char *name,
1232                             int cmplen,
1233                             const char *fullname)
1234{
1235        struct name_entry entry;
1236        int cmp;
1237
1238        while (tree_entry(tree,&entry)) {
1239                if (S_ISGITLINK(entry.mode))
1240                        continue;
1241                cmp = tree_entry_len(&entry) != cmplen ? 1 :
1242                      memcmp(name, entry.path, cmplen);
1243                if (cmp > 0)
1244                        continue;
1245                if (cmp < 0)
1246                        return;
1247                if (name[cmplen] != '/') {
1248                        add_object_entry(entry.oid->hash,
1249                                         object_type(entry.mode),
1250                                         fullname, 1);
1251                        return;
1252                }
1253                if (S_ISDIR(entry.mode)) {
1254                        struct tree_desc sub;
1255                        struct pbase_tree_cache *tree;
1256                        const char *down = name+cmplen+1;
1257                        int downlen = name_cmp_len(down);
1258
1259                        tree = pbase_tree_get(entry.oid->hash);
1260                        if (!tree)
1261                                return;
1262                        init_tree_desc(&sub, tree->tree_data, tree->tree_size);
1263
1264                        add_pbase_object(&sub, down, downlen, fullname);
1265                        pbase_tree_put(tree);
1266                }
1267        }
1268}
1269
1270static unsigned *done_pbase_paths;
1271static int done_pbase_paths_num;
1272static int done_pbase_paths_alloc;
1273static int done_pbase_path_pos(unsigned hash)
1274{
1275        int lo = 0;
1276        int hi = done_pbase_paths_num;
1277        while (lo < hi) {
1278                int mi = (hi + lo) / 2;
1279                if (done_pbase_paths[mi] == hash)
1280                        return mi;
1281                if (done_pbase_paths[mi] < hash)
1282                        hi = mi;
1283                else
1284                        lo = mi + 1;
1285        }
1286        return -lo-1;
1287}
1288
1289static int check_pbase_path(unsigned hash)
1290{
1291        int pos = (!done_pbase_paths) ? -1 : done_pbase_path_pos(hash);
1292        if (0 <= pos)
1293                return 1;
1294        pos = -pos - 1;
1295        ALLOC_GROW(done_pbase_paths,
1296                   done_pbase_paths_num + 1,
1297                   done_pbase_paths_alloc);
1298        done_pbase_paths_num++;
1299        if (pos < done_pbase_paths_num)
1300                memmove(done_pbase_paths + pos + 1,
1301                        done_pbase_paths + pos,
1302                        (done_pbase_paths_num - pos - 1) * sizeof(unsigned));
1303        done_pbase_paths[pos] = hash;
1304        return 0;
1305}
1306
1307static void add_preferred_base_object(const char *name)
1308{
1309        struct pbase_tree *it;
1310        int cmplen;
1311        unsigned hash = pack_name_hash(name);
1312
1313        if (!num_preferred_base || check_pbase_path(hash))
1314                return;
1315
1316        cmplen = name_cmp_len(name);
1317        for (it = pbase_tree; it; it = it->next) {
1318                if (cmplen == 0) {
1319                        add_object_entry(it->pcache.sha1, OBJ_TREE, NULL, 1);
1320                }
1321                else {
1322                        struct tree_desc tree;
1323                        init_tree_desc(&tree, it->pcache.tree_data, it->pcache.tree_size);
1324                        add_pbase_object(&tree, name, cmplen, name);
1325                }
1326        }
1327}
1328
1329static void add_preferred_base(unsigned char *sha1)
1330{
1331        struct pbase_tree *it;
1332        void *data;
1333        unsigned long size;
1334        unsigned char tree_sha1[20];
1335
1336        if (window <= num_preferred_base++)
1337                return;
1338
1339        data = read_object_with_reference(sha1, tree_type, &size, tree_sha1);
1340        if (!data)
1341                return;
1342
1343        for (it = pbase_tree; it; it = it->next) {
1344                if (!hashcmp(it->pcache.sha1, tree_sha1)) {
1345                        free(data);
1346                        return;
1347                }
1348        }
1349
1350        it = xcalloc(1, sizeof(*it));
1351        it->next = pbase_tree;
1352        pbase_tree = it;
1353
1354        hashcpy(it->pcache.sha1, tree_sha1);
1355        it->pcache.tree_data = data;
1356        it->pcache.tree_size = size;
1357}
1358
1359static void cleanup_preferred_base(void)
1360{
1361        struct pbase_tree *it;
1362        unsigned i;
1363
1364        it = pbase_tree;
1365        pbase_tree = NULL;
1366        while (it) {
1367                struct pbase_tree *this = it;
1368                it = this->next;
1369                free(this->pcache.tree_data);
1370                free(this);
1371        }
1372
1373        for (i = 0; i < ARRAY_SIZE(pbase_tree_cache); i++) {
1374                if (!pbase_tree_cache[i])
1375                        continue;
1376                free(pbase_tree_cache[i]->tree_data);
1377                free(pbase_tree_cache[i]);
1378                pbase_tree_cache[i] = NULL;
1379        }
1380
1381        free(done_pbase_paths);
1382        done_pbase_paths = NULL;
1383        done_pbase_paths_num = done_pbase_paths_alloc = 0;
1384}
1385
1386static void check_object(struct object_entry *entry)
1387{
1388        if (entry->in_pack) {
1389                struct packed_git *p = entry->in_pack;
1390                struct pack_window *w_curs = NULL;
1391                const unsigned char *base_ref = NULL;
1392                struct object_entry *base_entry;
1393                unsigned long used, used_0;
1394                unsigned long avail;
1395                off_t ofs;
1396                unsigned char *buf, c;
1397
1398                buf = use_pack(p, &w_curs, entry->in_pack_offset, &avail);
1399
1400                /*
1401                 * We want in_pack_type even if we do not reuse delta
1402                 * since non-delta representations could still be reused.
1403                 */
1404                used = unpack_object_header_buffer(buf, avail,
1405                                                   &entry->in_pack_type,
1406                                                   &entry->size);
1407                if (used == 0)
1408                        goto give_up;
1409
1410                /*
1411                 * Determine if this is a delta and if so whether we can
1412                 * reuse it or not.  Otherwise let's find out as cheaply as
1413                 * possible what the actual type and size for this object is.
1414                 */
1415                switch (entry->in_pack_type) {
1416                default:
1417                        /* Not a delta hence we've already got all we need. */
1418                        entry->type = entry->in_pack_type;
1419                        entry->in_pack_header_size = used;
1420                        if (entry->type < OBJ_COMMIT || entry->type > OBJ_BLOB)
1421                                goto give_up;
1422                        unuse_pack(&w_curs);
1423                        return;
1424                case OBJ_REF_DELTA:
1425                        if (reuse_delta && !entry->preferred_base)
1426                                base_ref = use_pack(p, &w_curs,
1427                                                entry->in_pack_offset + used, NULL);
1428                        entry->in_pack_header_size = used + 20;
1429                        break;
1430                case OBJ_OFS_DELTA:
1431                        buf = use_pack(p, &w_curs,
1432                                       entry->in_pack_offset + used, NULL);
1433                        used_0 = 0;
1434                        c = buf[used_0++];
1435                        ofs = c & 127;
1436                        while (c & 128) {
1437                                ofs += 1;
1438                                if (!ofs || MSB(ofs, 7)) {
1439                                        error("delta base offset overflow in pack for %s",
1440                                              sha1_to_hex(entry->idx.sha1));
1441                                        goto give_up;
1442                                }
1443                                c = buf[used_0++];
1444                                ofs = (ofs << 7) + (c & 127);
1445                        }
1446                        ofs = entry->in_pack_offset - ofs;
1447                        if (ofs <= 0 || ofs >= entry->in_pack_offset) {
1448                                error("delta base offset out of bound for %s",
1449                                      sha1_to_hex(entry->idx.sha1));
1450                                goto give_up;
1451                        }
1452                        if (reuse_delta && !entry->preferred_base) {
1453                                struct revindex_entry *revidx;
1454                                revidx = find_pack_revindex(p, ofs);
1455                                if (!revidx)
1456                                        goto give_up;
1457                                base_ref = nth_packed_object_sha1(p, revidx->nr);
1458                        }
1459                        entry->in_pack_header_size = used + used_0;
1460                        break;
1461                }
1462
1463                if (base_ref && (base_entry = packlist_find(&to_pack, base_ref, NULL))) {
1464                        /*
1465                         * If base_ref was set above that means we wish to
1466                         * reuse delta data, and we even found that base
1467                         * in the list of objects we want to pack. Goodie!
1468                         *
1469                         * Depth value does not matter - find_deltas() will
1470                         * never consider reused delta as the base object to
1471                         * deltify other objects against, in order to avoid
1472                         * circular deltas.
1473                         */
1474                        entry->type = entry->in_pack_type;
1475                        entry->delta = base_entry;
1476                        entry->delta_size = entry->size;
1477                        entry->delta_sibling = base_entry->delta_child;
1478                        base_entry->delta_child = entry;
1479                        unuse_pack(&w_curs);
1480                        return;
1481                }
1482
1483                if (entry->type) {
1484                        /*
1485                         * This must be a delta and we already know what the
1486                         * final object type is.  Let's extract the actual
1487                         * object size from the delta header.
1488                         */
1489                        entry->size = get_size_from_delta(p, &w_curs,
1490                                        entry->in_pack_offset + entry->in_pack_header_size);
1491                        if (entry->size == 0)
1492                                goto give_up;
1493                        unuse_pack(&w_curs);
1494                        return;
1495                }
1496
1497                /*
1498                 * No choice but to fall back to the recursive delta walk
1499                 * with sha1_object_info() to find about the object type
1500                 * at this point...
1501                 */
1502                give_up:
1503                unuse_pack(&w_curs);
1504        }
1505
1506        entry->type = sha1_object_info(entry->idx.sha1, &entry->size);
1507        /*
1508         * The error condition is checked in prepare_pack().  This is
1509         * to permit a missing preferred base object to be ignored
1510         * as a preferred base.  Doing so can result in a larger
1511         * pack file, but the transfer will still take place.
1512         */
1513}
1514
1515static int pack_offset_sort(const void *_a, const void *_b)
1516{
1517        const struct object_entry *a = *(struct object_entry **)_a;
1518        const struct object_entry *b = *(struct object_entry **)_b;
1519
1520        /* avoid filesystem trashing with loose objects */
1521        if (!a->in_pack && !b->in_pack)
1522                return hashcmp(a->idx.sha1, b->idx.sha1);
1523
1524        if (a->in_pack < b->in_pack)
1525                return -1;
1526        if (a->in_pack > b->in_pack)
1527                return 1;
1528        return a->in_pack_offset < b->in_pack_offset ? -1 :
1529                        (a->in_pack_offset > b->in_pack_offset);
1530}
1531
1532/*
1533 * Drop an on-disk delta we were planning to reuse. Naively, this would
1534 * just involve blanking out the "delta" field, but we have to deal
1535 * with some extra book-keeping:
1536 *
1537 *   1. Removing ourselves from the delta_sibling linked list.
1538 *
1539 *   2. Updating our size/type to the non-delta representation. These were
1540 *      either not recorded initially (size) or overwritten with the delta type
1541 *      (type) when check_object() decided to reuse the delta.
1542 */
1543static void drop_reused_delta(struct object_entry *entry)
1544{
1545        struct object_entry **p = &entry->delta->delta_child;
1546        struct object_info oi = OBJECT_INFO_INIT;
1547
1548        while (*p) {
1549                if (*p == entry)
1550                        *p = (*p)->delta_sibling;
1551                else
1552                        p = &(*p)->delta_sibling;
1553        }
1554        entry->delta = NULL;
1555
1556        oi.sizep = &entry->size;
1557        oi.typep = &entry->type;
1558        if (packed_object_info(entry->in_pack, entry->in_pack_offset, &oi) < 0) {
1559                /*
1560                 * We failed to get the info from this pack for some reason;
1561                 * fall back to sha1_object_info, which may find another copy.
1562                 * And if that fails, the error will be recorded in entry->type
1563                 * and dealt with in prepare_pack().
1564                 */
1565                entry->type = sha1_object_info(entry->idx.sha1, &entry->size);
1566        }
1567}
1568
1569/*
1570 * Follow the chain of deltas from this entry onward, throwing away any links
1571 * that cause us to hit a cycle (as determined by the DFS state flags in
1572 * the entries).
1573 */
1574static void break_delta_chains(struct object_entry *entry)
1575{
1576        /* If it's not a delta, it can't be part of a cycle. */
1577        if (!entry->delta) {
1578                entry->dfs_state = DFS_DONE;
1579                return;
1580        }
1581
1582        switch (entry->dfs_state) {
1583        case DFS_NONE:
1584                /*
1585                 * This is the first time we've seen the object. We mark it as
1586                 * part of the active potential cycle and recurse.
1587                 */
1588                entry->dfs_state = DFS_ACTIVE;
1589                break_delta_chains(entry->delta);
1590                entry->dfs_state = DFS_DONE;
1591                break;
1592
1593        case DFS_DONE:
1594                /* object already examined, and not part of a cycle */
1595                break;
1596
1597        case DFS_ACTIVE:
1598                /*
1599                 * We found a cycle that needs broken. It would be correct to
1600                 * break any link in the chain, but it's convenient to
1601                 * break this one.
1602                 */
1603                drop_reused_delta(entry);
1604                entry->dfs_state = DFS_DONE;
1605                break;
1606        }
1607}
1608
1609static void get_object_details(void)
1610{
1611        uint32_t i;
1612        struct object_entry **sorted_by_offset;
1613
1614        sorted_by_offset = xcalloc(to_pack.nr_objects, sizeof(struct object_entry *));
1615        for (i = 0; i < to_pack.nr_objects; i++)
1616                sorted_by_offset[i] = to_pack.objects + i;
1617        QSORT(sorted_by_offset, to_pack.nr_objects, pack_offset_sort);
1618
1619        for (i = 0; i < to_pack.nr_objects; i++) {
1620                struct object_entry *entry = sorted_by_offset[i];
1621                check_object(entry);
1622                if (big_file_threshold < entry->size)
1623                        entry->no_try_delta = 1;
1624        }
1625
1626        /*
1627         * This must happen in a second pass, since we rely on the delta
1628         * information for the whole list being completed.
1629         */
1630        for (i = 0; i < to_pack.nr_objects; i++)
1631                break_delta_chains(&to_pack.objects[i]);
1632
1633        free(sorted_by_offset);
1634}
1635
1636/*
1637 * We search for deltas in a list sorted by type, by filename hash, and then
1638 * by size, so that we see progressively smaller and smaller files.
1639 * That's because we prefer deltas to be from the bigger file
1640 * to the smaller -- deletes are potentially cheaper, but perhaps
1641 * more importantly, the bigger file is likely the more recent
1642 * one.  The deepest deltas are therefore the oldest objects which are
1643 * less susceptible to be accessed often.
1644 */
1645static int type_size_sort(const void *_a, const void *_b)
1646{
1647        const struct object_entry *a = *(struct object_entry **)_a;
1648        const struct object_entry *b = *(struct object_entry **)_b;
1649
1650        if (a->type > b->type)
1651                return -1;
1652        if (a->type < b->type)
1653                return 1;
1654        if (a->hash > b->hash)
1655                return -1;
1656        if (a->hash < b->hash)
1657                return 1;
1658        if (a->preferred_base > b->preferred_base)
1659                return -1;
1660        if (a->preferred_base < b->preferred_base)
1661                return 1;
1662        if (a->size > b->size)
1663                return -1;
1664        if (a->size < b->size)
1665                return 1;
1666        return a < b ? -1 : (a > b);  /* newest first */
1667}
1668
1669struct unpacked {
1670        struct object_entry *entry;
1671        void *data;
1672        struct delta_index *index;
1673        unsigned depth;
1674};
1675
1676static int delta_cacheable(unsigned long src_size, unsigned long trg_size,
1677                           unsigned long delta_size)
1678{
1679        if (max_delta_cache_size && delta_cache_size + delta_size > max_delta_cache_size)
1680                return 0;
1681
1682        if (delta_size < cache_max_small_delta_size)
1683                return 1;
1684
1685        /* cache delta, if objects are large enough compared to delta size */
1686        if ((src_size >> 20) + (trg_size >> 21) > (delta_size >> 10))
1687                return 1;
1688
1689        return 0;
1690}
1691
1692#ifndef NO_PTHREADS
1693
1694static pthread_mutex_t read_mutex;
1695#define read_lock()             pthread_mutex_lock(&read_mutex)
1696#define read_unlock()           pthread_mutex_unlock(&read_mutex)
1697
1698static pthread_mutex_t cache_mutex;
1699#define cache_lock()            pthread_mutex_lock(&cache_mutex)
1700#define cache_unlock()          pthread_mutex_unlock(&cache_mutex)
1701
1702static pthread_mutex_t progress_mutex;
1703#define progress_lock()         pthread_mutex_lock(&progress_mutex)
1704#define progress_unlock()       pthread_mutex_unlock(&progress_mutex)
1705
1706#else
1707
1708#define read_lock()             (void)0
1709#define read_unlock()           (void)0
1710#define cache_lock()            (void)0
1711#define cache_unlock()          (void)0
1712#define progress_lock()         (void)0
1713#define progress_unlock()       (void)0
1714
1715#endif
1716
1717static int try_delta(struct unpacked *trg, struct unpacked *src,
1718                     unsigned max_depth, unsigned long *mem_usage)
1719{
1720        struct object_entry *trg_entry = trg->entry;
1721        struct object_entry *src_entry = src->entry;
1722        unsigned long trg_size, src_size, delta_size, sizediff, max_size, sz;
1723        unsigned ref_depth;
1724        enum object_type type;
1725        void *delta_buf;
1726
1727        /* Don't bother doing diffs between different types */
1728        if (trg_entry->type != src_entry->type)
1729                return -1;
1730
1731        /*
1732         * We do not bother to try a delta that we discarded on an
1733         * earlier try, but only when reusing delta data.  Note that
1734         * src_entry that is marked as the preferred_base should always
1735         * be considered, as even if we produce a suboptimal delta against
1736         * it, we will still save the transfer cost, as we already know
1737         * the other side has it and we won't send src_entry at all.
1738         */
1739        if (reuse_delta && trg_entry->in_pack &&
1740            trg_entry->in_pack == src_entry->in_pack &&
1741            !src_entry->preferred_base &&
1742            trg_entry->in_pack_type != OBJ_REF_DELTA &&
1743            trg_entry->in_pack_type != OBJ_OFS_DELTA)
1744                return 0;
1745
1746        /* Let's not bust the allowed depth. */
1747        if (src->depth >= max_depth)
1748                return 0;
1749
1750        /* Now some size filtering heuristics. */
1751        trg_size = trg_entry->size;
1752        if (!trg_entry->delta) {
1753                max_size = trg_size/2 - 20;
1754                ref_depth = 1;
1755        } else {
1756                max_size = trg_entry->delta_size;
1757                ref_depth = trg->depth;
1758        }
1759        max_size = (uint64_t)max_size * (max_depth - src->depth) /
1760                                                (max_depth - ref_depth + 1);
1761        if (max_size == 0)
1762                return 0;
1763        src_size = src_entry->size;
1764        sizediff = src_size < trg_size ? trg_size - src_size : 0;
1765        if (sizediff >= max_size)
1766                return 0;
1767        if (trg_size < src_size / 32)
1768                return 0;
1769
1770        /* Load data if not already done */
1771        if (!trg->data) {
1772                read_lock();
1773                trg->data = read_sha1_file(trg_entry->idx.sha1, &type, &sz);
1774                read_unlock();
1775                if (!trg->data)
1776                        die("object %s cannot be read",
1777                            sha1_to_hex(trg_entry->idx.sha1));
1778                if (sz != trg_size)
1779                        die("object %s inconsistent object length (%lu vs %lu)",
1780                            sha1_to_hex(trg_entry->idx.sha1), sz, trg_size);
1781                *mem_usage += sz;
1782        }
1783        if (!src->data) {
1784                read_lock();
1785                src->data = read_sha1_file(src_entry->idx.sha1, &type, &sz);
1786                read_unlock();
1787                if (!src->data) {
1788                        if (src_entry->preferred_base) {
1789                                static int warned = 0;
1790                                if (!warned++)
1791                                        warning("object %s cannot be read",
1792                                                sha1_to_hex(src_entry->idx.sha1));
1793                                /*
1794                                 * Those objects are not included in the
1795                                 * resulting pack.  Be resilient and ignore
1796                                 * them if they can't be read, in case the
1797                                 * pack could be created nevertheless.
1798                                 */
1799                                return 0;
1800                        }
1801                        die("object %s cannot be read",
1802                            sha1_to_hex(src_entry->idx.sha1));
1803                }
1804                if (sz != src_size)
1805                        die("object %s inconsistent object length (%lu vs %lu)",
1806                            sha1_to_hex(src_entry->idx.sha1), sz, src_size);
1807                *mem_usage += sz;
1808        }
1809        if (!src->index) {
1810                src->index = create_delta_index(src->data, src_size);
1811                if (!src->index) {
1812                        static int warned = 0;
1813                        if (!warned++)
1814                                warning("suboptimal pack - out of memory");
1815                        return 0;
1816                }
1817                *mem_usage += sizeof_delta_index(src->index);
1818        }
1819
1820        delta_buf = create_delta(src->index, trg->data, trg_size, &delta_size, max_size);
1821        if (!delta_buf)
1822                return 0;
1823
1824        if (trg_entry->delta) {
1825                /* Prefer only shallower same-sized deltas. */
1826                if (delta_size == trg_entry->delta_size &&
1827                    src->depth + 1 >= trg->depth) {
1828                        free(delta_buf);
1829                        return 0;
1830                }
1831        }
1832
1833        /*
1834         * Handle memory allocation outside of the cache
1835         * accounting lock.  Compiler will optimize the strangeness
1836         * away when NO_PTHREADS is defined.
1837         */
1838        free(trg_entry->delta_data);
1839        cache_lock();
1840        if (trg_entry->delta_data) {
1841                delta_cache_size -= trg_entry->delta_size;
1842                trg_entry->delta_data = NULL;
1843        }
1844        if (delta_cacheable(src_size, trg_size, delta_size)) {
1845                delta_cache_size += delta_size;
1846                cache_unlock();
1847                trg_entry->delta_data = xrealloc(delta_buf, delta_size);
1848        } else {
1849                cache_unlock();
1850                free(delta_buf);
1851        }
1852
1853        trg_entry->delta = src_entry;
1854        trg_entry->delta_size = delta_size;
1855        trg->depth = src->depth + 1;
1856
1857        return 1;
1858}
1859
1860static unsigned int check_delta_limit(struct object_entry *me, unsigned int n)
1861{
1862        struct object_entry *child = me->delta_child;
1863        unsigned int m = n;
1864        while (child) {
1865                unsigned int c = check_delta_limit(child, n + 1);
1866                if (m < c)
1867                        m = c;
1868                child = child->delta_sibling;
1869        }
1870        return m;
1871}
1872
1873static unsigned long free_unpacked(struct unpacked *n)
1874{
1875        unsigned long freed_mem = sizeof_delta_index(n->index);
1876        free_delta_index(n->index);
1877        n->index = NULL;
1878        if (n->data) {
1879                freed_mem += n->entry->size;
1880                free(n->data);
1881                n->data = NULL;
1882        }
1883        n->entry = NULL;
1884        n->depth = 0;
1885        return freed_mem;
1886}
1887
1888static void find_deltas(struct object_entry **list, unsigned *list_size,
1889                        int window, int depth, unsigned *processed)
1890{
1891        uint32_t i, idx = 0, count = 0;
1892        struct unpacked *array;
1893        unsigned long mem_usage = 0;
1894
1895        array = xcalloc(window, sizeof(struct unpacked));
1896
1897        for (;;) {
1898                struct object_entry *entry;
1899                struct unpacked *n = array + idx;
1900                int j, max_depth, best_base = -1;
1901
1902                progress_lock();
1903                if (!*list_size) {
1904                        progress_unlock();
1905                        break;
1906                }
1907                entry = *list++;
1908                (*list_size)--;
1909                if (!entry->preferred_base) {
1910                        (*processed)++;
1911                        display_progress(progress_state, *processed);
1912                }
1913                progress_unlock();
1914
1915                mem_usage -= free_unpacked(n);
1916                n->entry = entry;
1917
1918                while (window_memory_limit &&
1919                       mem_usage > window_memory_limit &&
1920                       count > 1) {
1921                        uint32_t tail = (idx + window - count) % window;
1922                        mem_usage -= free_unpacked(array + tail);
1923                        count--;
1924                }
1925
1926                /* We do not compute delta to *create* objects we are not
1927                 * going to pack.
1928                 */
1929                if (entry->preferred_base)
1930                        goto next;
1931
1932                /*
1933                 * If the current object is at pack edge, take the depth the
1934                 * objects that depend on the current object into account
1935                 * otherwise they would become too deep.
1936                 */
1937                max_depth = depth;
1938                if (entry->delta_child) {
1939                        max_depth -= check_delta_limit(entry, 0);
1940                        if (max_depth <= 0)
1941                                goto next;
1942                }
1943
1944                j = window;
1945                while (--j > 0) {
1946                        int ret;
1947                        uint32_t other_idx = idx + j;
1948                        struct unpacked *m;
1949                        if (other_idx >= window)
1950                                other_idx -= window;
1951                        m = array + other_idx;
1952                        if (!m->entry)
1953                                break;
1954                        ret = try_delta(n, m, max_depth, &mem_usage);
1955                        if (ret < 0)
1956                                break;
1957                        else if (ret > 0)
1958                                best_base = other_idx;
1959                }
1960
1961                /*
1962                 * If we decided to cache the delta data, then it is best
1963                 * to compress it right away.  First because we have to do
1964                 * it anyway, and doing it here while we're threaded will
1965                 * save a lot of time in the non threaded write phase,
1966                 * as well as allow for caching more deltas within
1967                 * the same cache size limit.
1968                 * ...
1969                 * But only if not writing to stdout, since in that case
1970                 * the network is most likely throttling writes anyway,
1971                 * and therefore it is best to go to the write phase ASAP
1972                 * instead, as we can afford spending more time compressing
1973                 * between writes at that moment.
1974                 */
1975                if (entry->delta_data && !pack_to_stdout) {
1976                        entry->z_delta_size = do_compress(&entry->delta_data,
1977                                                          entry->delta_size);
1978                        cache_lock();
1979                        delta_cache_size -= entry->delta_size;
1980                        delta_cache_size += entry->z_delta_size;
1981                        cache_unlock();
1982                }
1983
1984                /* if we made n a delta, and if n is already at max
1985                 * depth, leaving it in the window is pointless.  we
1986                 * should evict it first.
1987                 */
1988                if (entry->delta && max_depth <= n->depth)
1989                        continue;
1990
1991                /*
1992                 * Move the best delta base up in the window, after the
1993                 * currently deltified object, to keep it longer.  It will
1994                 * be the first base object to be attempted next.
1995                 */
1996                if (entry->delta) {
1997                        struct unpacked swap = array[best_base];
1998                        int dist = (window + idx - best_base) % window;
1999                        int dst = best_base;
2000                        while (dist--) {
2001                                int src = (dst + 1) % window;
2002                                array[dst] = array[src];
2003                                dst = src;
2004                        }
2005                        array[dst] = swap;
2006                }
2007
2008                next:
2009                idx++;
2010                if (count + 1 < window)
2011                        count++;
2012                if (idx >= window)
2013                        idx = 0;
2014        }
2015
2016        for (i = 0; i < window; ++i) {
2017                free_delta_index(array[i].index);
2018                free(array[i].data);
2019        }
2020        free(array);
2021}
2022
2023#ifndef NO_PTHREADS
2024
2025static void try_to_free_from_threads(size_t size)
2026{
2027        read_lock();
2028        release_pack_memory(size);
2029        read_unlock();
2030}
2031
2032static try_to_free_t old_try_to_free_routine;
2033
2034/*
2035 * The main thread waits on the condition that (at least) one of the workers
2036 * has stopped working (which is indicated in the .working member of
2037 * struct thread_params).
2038 * When a work thread has completed its work, it sets .working to 0 and
2039 * signals the main thread and waits on the condition that .data_ready
2040 * becomes 1.
2041 */
2042
2043struct thread_params {
2044        pthread_t thread;
2045        struct object_entry **list;
2046        unsigned list_size;
2047        unsigned remaining;
2048        int window;
2049        int depth;
2050        int working;
2051        int data_ready;
2052        pthread_mutex_t mutex;
2053        pthread_cond_t cond;
2054        unsigned *processed;
2055};
2056
2057static pthread_cond_t progress_cond;
2058
2059/*
2060 * Mutex and conditional variable can't be statically-initialized on Windows.
2061 */
2062static void init_threaded_search(void)
2063{
2064        init_recursive_mutex(&read_mutex);
2065        pthread_mutex_init(&cache_mutex, NULL);
2066        pthread_mutex_init(&progress_mutex, NULL);
2067        pthread_cond_init(&progress_cond, NULL);
2068        old_try_to_free_routine = set_try_to_free_routine(try_to_free_from_threads);
2069}
2070
2071static void cleanup_threaded_search(void)
2072{
2073        set_try_to_free_routine(old_try_to_free_routine);
2074        pthread_cond_destroy(&progress_cond);
2075        pthread_mutex_destroy(&read_mutex);
2076        pthread_mutex_destroy(&cache_mutex);
2077        pthread_mutex_destroy(&progress_mutex);
2078}
2079
2080static void *threaded_find_deltas(void *arg)
2081{
2082        struct thread_params *me = arg;
2083
2084        while (me->remaining) {
2085                find_deltas(me->list, &me->remaining,
2086                            me->window, me->depth, me->processed);
2087
2088                progress_lock();
2089                me->working = 0;
2090                pthread_cond_signal(&progress_cond);
2091                progress_unlock();
2092
2093                /*
2094                 * We must not set ->data_ready before we wait on the
2095                 * condition because the main thread may have set it to 1
2096                 * before we get here. In order to be sure that new
2097                 * work is available if we see 1 in ->data_ready, it
2098                 * was initialized to 0 before this thread was spawned
2099                 * and we reset it to 0 right away.
2100                 */
2101                pthread_mutex_lock(&me->mutex);
2102                while (!me->data_ready)
2103                        pthread_cond_wait(&me->cond, &me->mutex);
2104                me->data_ready = 0;
2105                pthread_mutex_unlock(&me->mutex);
2106        }
2107        /* leave ->working 1 so that this doesn't get more work assigned */
2108        return NULL;
2109}
2110
2111static void ll_find_deltas(struct object_entry **list, unsigned list_size,
2112                           int window, int depth, unsigned *processed)
2113{
2114        struct thread_params *p;
2115        int i, ret, active_threads = 0;
2116
2117        init_threaded_search();
2118
2119        if (delta_search_threads <= 1) {
2120                find_deltas(list, &list_size, window, depth, processed);
2121                cleanup_threaded_search();
2122                return;
2123        }
2124        if (progress > pack_to_stdout)
2125                fprintf(stderr, "Delta compression using up to %d threads.\n",
2126                                delta_search_threads);
2127        p = xcalloc(delta_search_threads, sizeof(*p));
2128
2129        /* Partition the work amongst work threads. */
2130        for (i = 0; i < delta_search_threads; i++) {
2131                unsigned sub_size = list_size / (delta_search_threads - i);
2132
2133                /* don't use too small segments or no deltas will be found */
2134                if (sub_size < 2*window && i+1 < delta_search_threads)
2135                        sub_size = 0;
2136
2137                p[i].window = window;
2138                p[i].depth = depth;
2139                p[i].processed = processed;
2140                p[i].working = 1;
2141                p[i].data_ready = 0;
2142
2143                /* try to split chunks on "path" boundaries */
2144                while (sub_size && sub_size < list_size &&
2145                       list[sub_size]->hash &&
2146                       list[sub_size]->hash == list[sub_size-1]->hash)
2147                        sub_size++;
2148
2149                p[i].list = list;
2150                p[i].list_size = sub_size;
2151                p[i].remaining = sub_size;
2152
2153                list += sub_size;
2154                list_size -= sub_size;
2155        }
2156
2157        /* Start work threads. */
2158        for (i = 0; i < delta_search_threads; i++) {
2159                if (!p[i].list_size)
2160                        continue;
2161                pthread_mutex_init(&p[i].mutex, NULL);
2162                pthread_cond_init(&p[i].cond, NULL);
2163                ret = pthread_create(&p[i].thread, NULL,
2164                                     threaded_find_deltas, &p[i]);
2165                if (ret)
2166                        die("unable to create thread: %s", strerror(ret));
2167                active_threads++;
2168        }
2169
2170        /*
2171         * Now let's wait for work completion.  Each time a thread is done
2172         * with its work, we steal half of the remaining work from the
2173         * thread with the largest number of unprocessed objects and give
2174         * it to that newly idle thread.  This ensure good load balancing
2175         * until the remaining object list segments are simply too short
2176         * to be worth splitting anymore.
2177         */
2178        while (active_threads) {
2179                struct thread_params *target = NULL;
2180                struct thread_params *victim = NULL;
2181                unsigned sub_size = 0;
2182
2183                progress_lock();
2184                for (;;) {
2185                        for (i = 0; !target && i < delta_search_threads; i++)
2186                                if (!p[i].working)
2187                                        target = &p[i];
2188                        if (target)
2189                                break;
2190                        pthread_cond_wait(&progress_cond, &progress_mutex);
2191                }
2192
2193                for (i = 0; i < delta_search_threads; i++)
2194                        if (p[i].remaining > 2*window &&
2195                            (!victim || victim->remaining < p[i].remaining))
2196                                victim = &p[i];
2197                if (victim) {
2198                        sub_size = victim->remaining / 2;
2199                        list = victim->list + victim->list_size - sub_size;
2200                        while (sub_size && list[0]->hash &&
2201                               list[0]->hash == list[-1]->hash) {
2202                                list++;
2203                                sub_size--;
2204                        }
2205                        if (!sub_size) {
2206                                /*
2207                                 * It is possible for some "paths" to have
2208                                 * so many objects that no hash boundary
2209                                 * might be found.  Let's just steal the
2210                                 * exact half in that case.
2211                                 */
2212                                sub_size = victim->remaining / 2;
2213                                list -= sub_size;
2214                        }
2215                        target->list = list;
2216                        victim->list_size -= sub_size;
2217                        victim->remaining -= sub_size;
2218                }
2219                target->list_size = sub_size;
2220                target->remaining = sub_size;
2221                target->working = 1;
2222                progress_unlock();
2223
2224                pthread_mutex_lock(&target->mutex);
2225                target->data_ready = 1;
2226                pthread_cond_signal(&target->cond);
2227                pthread_mutex_unlock(&target->mutex);
2228
2229                if (!sub_size) {
2230                        pthread_join(target->thread, NULL);
2231                        pthread_cond_destroy(&target->cond);
2232                        pthread_mutex_destroy(&target->mutex);
2233                        active_threads--;
2234                }
2235        }
2236        cleanup_threaded_search();
2237        free(p);
2238}
2239
2240#else
2241#define ll_find_deltas(l, s, w, d, p)   find_deltas(l, &s, w, d, p)
2242#endif
2243
2244static void add_tag_chain(const struct object_id *oid)
2245{
2246        struct tag *tag;
2247
2248        /*
2249         * We catch duplicates already in add_object_entry(), but we'd
2250         * prefer to do this extra check to avoid having to parse the
2251         * tag at all if we already know that it's being packed (e.g., if
2252         * it was included via bitmaps, we would not have parsed it
2253         * previously).
2254         */
2255        if (packlist_find(&to_pack, oid->hash, NULL))
2256                return;
2257
2258        tag = lookup_tag(oid->hash);
2259        while (1) {
2260                if (!tag || parse_tag(tag) || !tag->tagged)
2261                        die("unable to pack objects reachable from tag %s",
2262                            oid_to_hex(oid));
2263
2264                add_object_entry(tag->object.oid.hash, OBJ_TAG, NULL, 0);
2265
2266                if (tag->tagged->type != OBJ_TAG)
2267                        return;
2268
2269                tag = (struct tag *)tag->tagged;
2270        }
2271}
2272
2273static int add_ref_tag(const char *path, const struct object_id *oid, int flag, void *cb_data)
2274{
2275        struct object_id peeled;
2276
2277        if (starts_with(path, "refs/tags/") && /* is a tag? */
2278            !peel_ref(path, peeled.hash)    && /* peelable? */
2279            packlist_find(&to_pack, peeled.hash, NULL))      /* object packed? */
2280                add_tag_chain(oid);
2281        return 0;
2282}
2283
2284static void prepare_pack(int window, int depth)
2285{
2286        struct object_entry **delta_list;
2287        uint32_t i, nr_deltas;
2288        unsigned n;
2289
2290        get_object_details();
2291
2292        /*
2293         * If we're locally repacking then we need to be doubly careful
2294         * from now on in order to make sure no stealth corruption gets
2295         * propagated to the new pack.  Clients receiving streamed packs
2296         * should validate everything they get anyway so no need to incur
2297         * the additional cost here in that case.
2298         */
2299        if (!pack_to_stdout)
2300                do_check_packed_object_crc = 1;
2301
2302        if (!to_pack.nr_objects || !window || !depth)
2303                return;
2304
2305        ALLOC_ARRAY(delta_list, to_pack.nr_objects);
2306        nr_deltas = n = 0;
2307
2308        for (i = 0; i < to_pack.nr_objects; i++) {
2309                struct object_entry *entry = to_pack.objects + i;
2310
2311                if (entry->delta)
2312                        /* This happens if we decided to reuse existing
2313                         * delta from a pack.  "reuse_delta &&" is implied.
2314                         */
2315                        continue;
2316
2317                if (entry->size < 50)
2318                        continue;
2319
2320                if (entry->no_try_delta)
2321                        continue;
2322
2323                if (!entry->preferred_base) {
2324                        nr_deltas++;
2325                        if (entry->type < 0)
2326                                die("unable to get type of object %s",
2327                                    sha1_to_hex(entry->idx.sha1));
2328                } else {
2329                        if (entry->type < 0) {
2330                                /*
2331                                 * This object is not found, but we
2332                                 * don't have to include it anyway.
2333                                 */
2334                                continue;
2335                        }
2336                }
2337
2338                delta_list[n++] = entry;
2339        }
2340
2341        if (nr_deltas && n > 1) {
2342                unsigned nr_done = 0;
2343                if (progress)
2344                        progress_state = start_progress(_("Compressing objects"),
2345                                                        nr_deltas);
2346                QSORT(delta_list, n, type_size_sort);
2347                ll_find_deltas(delta_list, n, window+1, depth, &nr_done);
2348                stop_progress(&progress_state);
2349                if (nr_done != nr_deltas)
2350                        die("inconsistency with delta count");
2351        }
2352        free(delta_list);
2353}
2354
2355static int git_pack_config(const char *k, const char *v, void *cb)
2356{
2357        if (!strcmp(k, "pack.window")) {
2358                window = git_config_int(k, v);
2359                return 0;
2360        }
2361        if (!strcmp(k, "pack.windowmemory")) {
2362                window_memory_limit = git_config_ulong(k, v);
2363                return 0;
2364        }
2365        if (!strcmp(k, "pack.depth")) {
2366                depth = git_config_int(k, v);
2367                return 0;
2368        }
2369        if (!strcmp(k, "pack.deltacachesize")) {
2370                max_delta_cache_size = git_config_int(k, v);
2371                return 0;
2372        }
2373        if (!strcmp(k, "pack.deltacachelimit")) {
2374                cache_max_small_delta_size = git_config_int(k, v);
2375                return 0;
2376        }
2377        if (!strcmp(k, "pack.writebitmaphashcache")) {
2378                if (git_config_bool(k, v))
2379                        write_bitmap_options |= BITMAP_OPT_HASH_CACHE;
2380                else
2381                        write_bitmap_options &= ~BITMAP_OPT_HASH_CACHE;
2382        }
2383        if (!strcmp(k, "pack.usebitmaps")) {
2384                use_bitmap_index_default = git_config_bool(k, v);
2385                return 0;
2386        }
2387        if (!strcmp(k, "pack.threads")) {
2388                delta_search_threads = git_config_int(k, v);
2389                if (delta_search_threads < 0)
2390                        die("invalid number of threads specified (%d)",
2391                            delta_search_threads);
2392#ifdef NO_PTHREADS
2393                if (delta_search_threads != 1)
2394                        warning("no threads support, ignoring %s", k);
2395#endif
2396                return 0;
2397        }
2398        if (!strcmp(k, "pack.indexversion")) {
2399                pack_idx_opts.version = git_config_int(k, v);
2400                if (pack_idx_opts.version > 2)
2401                        die("bad pack.indexversion=%"PRIu32,
2402                            pack_idx_opts.version);
2403                return 0;
2404        }
2405        return git_default_config(k, v, cb);
2406}
2407
2408static void read_object_list_from_stdin(void)
2409{
2410        char line[40 + 1 + PATH_MAX + 2];
2411        unsigned char sha1[20];
2412
2413        for (;;) {
2414                if (!fgets(line, sizeof(line), stdin)) {
2415                        if (feof(stdin))
2416                                break;
2417                        if (!ferror(stdin))
2418                                die("fgets returned NULL, not EOF, not error!");
2419                        if (errno != EINTR)
2420                                die_errno("fgets");
2421                        clearerr(stdin);
2422                        continue;
2423                }
2424                if (line[0] == '-') {
2425                        if (get_sha1_hex(line+1, sha1))
2426                                die("expected edge sha1, got garbage:\n %s",
2427                                    line);
2428                        add_preferred_base(sha1);
2429                        continue;
2430                }
2431                if (get_sha1_hex(line, sha1))
2432                        die("expected sha1, got garbage:\n %s", line);
2433
2434                add_preferred_base_object(line+41);
2435                add_object_entry(sha1, 0, line+41, 0);
2436        }
2437}
2438
2439#define OBJECT_ADDED (1u<<20)
2440
2441static void show_commit(struct commit *commit, void *data)
2442{
2443        add_object_entry(commit->object.oid.hash, OBJ_COMMIT, NULL, 0);
2444        commit->object.flags |= OBJECT_ADDED;
2445
2446        if (write_bitmap_index)
2447                index_commit_for_bitmap(commit);
2448}
2449
2450static void show_object(struct object *obj, const char *name, void *data)
2451{
2452        add_preferred_base_object(name);
2453        add_object_entry(obj->oid.hash, obj->type, name, 0);
2454        obj->flags |= OBJECT_ADDED;
2455}
2456
2457static void show_edge(struct commit *commit)
2458{
2459        add_preferred_base(commit->object.oid.hash);
2460}
2461
2462struct in_pack_object {
2463        off_t offset;
2464        struct object *object;
2465};
2466
2467struct in_pack {
2468        int alloc;
2469        int nr;
2470        struct in_pack_object *array;
2471};
2472
2473static void mark_in_pack_object(struct object *object, struct packed_git *p, struct in_pack *in_pack)
2474{
2475        in_pack->array[in_pack->nr].offset = find_pack_entry_one(object->oid.hash, p);
2476        in_pack->array[in_pack->nr].object = object;
2477        in_pack->nr++;
2478}
2479
2480/*
2481 * Compare the objects in the offset order, in order to emulate the
2482 * "git rev-list --objects" output that produced the pack originally.
2483 */
2484static int ofscmp(const void *a_, const void *b_)
2485{
2486        struct in_pack_object *a = (struct in_pack_object *)a_;
2487        struct in_pack_object *b = (struct in_pack_object *)b_;
2488
2489        if (a->offset < b->offset)
2490                return -1;
2491        else if (a->offset > b->offset)
2492                return 1;
2493        else
2494                return oidcmp(&a->object->oid, &b->object->oid);
2495}
2496
2497static void add_objects_in_unpacked_packs(struct rev_info *revs)
2498{
2499        struct packed_git *p;
2500        struct in_pack in_pack;
2501        uint32_t i;
2502
2503        memset(&in_pack, 0, sizeof(in_pack));
2504
2505        for (p = packed_git; p; p = p->next) {
2506                const unsigned char *sha1;
2507                struct object *o;
2508
2509                if (!p->pack_local || p->pack_keep)
2510                        continue;
2511                if (open_pack_index(p))
2512                        die("cannot open pack index");
2513
2514                ALLOC_GROW(in_pack.array,
2515                           in_pack.nr + p->num_objects,
2516                           in_pack.alloc);
2517
2518                for (i = 0; i < p->num_objects; i++) {
2519                        sha1 = nth_packed_object_sha1(p, i);
2520                        o = lookup_unknown_object(sha1);
2521                        if (!(o->flags & OBJECT_ADDED))
2522                                mark_in_pack_object(o, p, &in_pack);
2523                        o->flags |= OBJECT_ADDED;
2524                }
2525        }
2526
2527        if (in_pack.nr) {
2528                QSORT(in_pack.array, in_pack.nr, ofscmp);
2529                for (i = 0; i < in_pack.nr; i++) {
2530                        struct object *o = in_pack.array[i].object;
2531                        add_object_entry(o->oid.hash, o->type, "", 0);
2532                }
2533        }
2534        free(in_pack.array);
2535}
2536
2537static int add_loose_object(const unsigned char *sha1, const char *path,
2538                            void *data)
2539{
2540        enum object_type type = sha1_object_info(sha1, NULL);
2541
2542        if (type < 0) {
2543                warning("loose object at %s could not be examined", path);
2544                return 0;
2545        }
2546
2547        add_object_entry(sha1, type, "", 0);
2548        return 0;
2549}
2550
2551/*
2552 * We actually don't even have to worry about reachability here.
2553 * add_object_entry will weed out duplicates, so we just add every
2554 * loose object we find.
2555 */
2556static void add_unreachable_loose_objects(void)
2557{
2558        for_each_loose_file_in_objdir(get_object_directory(),
2559                                      add_loose_object,
2560                                      NULL, NULL, NULL);
2561}
2562
2563static int has_sha1_pack_kept_or_nonlocal(const unsigned char *sha1)
2564{
2565        static struct packed_git *last_found = (void *)1;
2566        struct packed_git *p;
2567
2568        p = (last_found != (void *)1) ? last_found : packed_git;
2569
2570        while (p) {
2571                if ((!p->pack_local || p->pack_keep) &&
2572                        find_pack_entry_one(sha1, p)) {
2573                        last_found = p;
2574                        return 1;
2575                }
2576                if (p == last_found)
2577                        p = packed_git;
2578                else
2579                        p = p->next;
2580                if (p == last_found)
2581                        p = p->next;
2582        }
2583        return 0;
2584}
2585
2586/*
2587 * Store a list of sha1s that are should not be discarded
2588 * because they are either written too recently, or are
2589 * reachable from another object that was.
2590 *
2591 * This is filled by get_object_list.
2592 */
2593static struct sha1_array recent_objects;
2594
2595static int loosened_object_can_be_discarded(const unsigned char *sha1,
2596                                            unsigned long mtime)
2597{
2598        if (!unpack_unreachable_expiration)
2599                return 0;
2600        if (mtime > unpack_unreachable_expiration)
2601                return 0;
2602        if (sha1_array_lookup(&recent_objects, sha1) >= 0)
2603                return 0;
2604        return 1;
2605}
2606
2607static void loosen_unused_packed_objects(struct rev_info *revs)
2608{
2609        struct packed_git *p;
2610        uint32_t i;
2611        const unsigned char *sha1;
2612
2613        for (p = packed_git; p; p = p->next) {
2614                if (!p->pack_local || p->pack_keep)
2615                        continue;
2616
2617                if (open_pack_index(p))
2618                        die("cannot open pack index");
2619
2620                for (i = 0; i < p->num_objects; i++) {
2621                        sha1 = nth_packed_object_sha1(p, i);
2622                        if (!packlist_find(&to_pack, sha1, NULL) &&
2623                            !has_sha1_pack_kept_or_nonlocal(sha1) &&
2624                            !loosened_object_can_be_discarded(sha1, p->mtime))
2625                                if (force_object_loose(sha1, p->mtime))
2626                                        die("unable to force loose object");
2627                }
2628        }
2629}
2630
2631/*
2632 * This tracks any options which pack-reuse code expects to be on, or which a
2633 * reader of the pack might not understand, and which would therefore prevent
2634 * blind reuse of what we have on disk.
2635 */
2636static int pack_options_allow_reuse(void)
2637{
2638        return pack_to_stdout && allow_ofs_delta;
2639}
2640
2641static int get_object_list_from_bitmap(struct rev_info *revs)
2642{
2643        if (prepare_bitmap_walk(revs) < 0)
2644                return -1;
2645
2646        if (pack_options_allow_reuse() &&
2647            !reuse_partial_packfile_from_bitmap(
2648                        &reuse_packfile,
2649                        &reuse_packfile_objects,
2650                        &reuse_packfile_offset)) {
2651                assert(reuse_packfile_objects);
2652                nr_result += reuse_packfile_objects;
2653                display_progress(progress_state, nr_result);
2654        }
2655
2656        traverse_bitmap_commit_list(&add_object_entry_from_bitmap);
2657        return 0;
2658}
2659
2660static void record_recent_object(struct object *obj,
2661                                 const char *name,
2662                                 void *data)
2663{
2664        sha1_array_append(&recent_objects, obj->oid.hash);
2665}
2666
2667static void record_recent_commit(struct commit *commit, void *data)
2668{
2669        sha1_array_append(&recent_objects, commit->object.oid.hash);
2670}
2671
2672static void get_object_list(int ac, const char **av)
2673{
2674        struct rev_info revs;
2675        char line[1000];
2676        int flags = 0;
2677
2678        init_revisions(&revs, NULL);
2679        save_commit_buffer = 0;
2680        setup_revisions(ac, av, &revs, NULL);
2681
2682        /* make sure shallows are read */
2683        is_repository_shallow();
2684
2685        while (fgets(line, sizeof(line), stdin) != NULL) {
2686                int len = strlen(line);
2687                if (len && line[len - 1] == '\n')
2688                        line[--len] = 0;
2689                if (!len)
2690                        break;
2691                if (*line == '-') {
2692                        if (!strcmp(line, "--not")) {
2693                                flags ^= UNINTERESTING;
2694                                write_bitmap_index = 0;
2695                                continue;
2696                        }
2697                        if (starts_with(line, "--shallow ")) {
2698                                unsigned char sha1[20];
2699                                if (get_sha1_hex(line + 10, sha1))
2700                                        die("not an SHA-1 '%s'", line + 10);
2701                                register_shallow(sha1);
2702                                use_bitmap_index = 0;
2703                                continue;
2704                        }
2705                        die("not a rev '%s'", line);
2706                }
2707                if (handle_revision_arg(line, &revs, flags, REVARG_CANNOT_BE_FILENAME))
2708                        die("bad revision '%s'", line);
2709        }
2710
2711        if (use_bitmap_index && !get_object_list_from_bitmap(&revs))
2712                return;
2713
2714        if (prepare_revision_walk(&revs))
2715                die("revision walk setup failed");
2716        mark_edges_uninteresting(&revs, show_edge);
2717        traverse_commit_list(&revs, show_commit, show_object, NULL);
2718
2719        if (unpack_unreachable_expiration) {
2720                revs.ignore_missing_links = 1;
2721                if (add_unseen_recent_objects_to_traversal(&revs,
2722                                unpack_unreachable_expiration))
2723                        die("unable to add recent objects");
2724                if (prepare_revision_walk(&revs))
2725                        die("revision walk setup failed");
2726                traverse_commit_list(&revs, record_recent_commit,
2727                                     record_recent_object, NULL);
2728        }
2729
2730        if (keep_unreachable)
2731                add_objects_in_unpacked_packs(&revs);
2732        if (pack_loose_unreachable)
2733                add_unreachable_loose_objects();
2734        if (unpack_unreachable)
2735                loosen_unused_packed_objects(&revs);
2736
2737        sha1_array_clear(&recent_objects);
2738}
2739
2740static int option_parse_index_version(const struct option *opt,
2741                                      const char *arg, int unset)
2742{
2743        char *c;
2744        const char *val = arg;
2745        pack_idx_opts.version = strtoul(val, &c, 10);
2746        if (pack_idx_opts.version > 2)
2747                die(_("unsupported index version %s"), val);
2748        if (*c == ',' && c[1])
2749                pack_idx_opts.off32_limit = strtoul(c+1, &c, 0);
2750        if (*c || pack_idx_opts.off32_limit & 0x80000000)
2751                die(_("bad index version '%s'"), val);
2752        return 0;
2753}
2754
2755static int option_parse_unpack_unreachable(const struct option *opt,
2756                                           const char *arg, int unset)
2757{
2758        if (unset) {
2759                unpack_unreachable = 0;
2760                unpack_unreachable_expiration = 0;
2761        }
2762        else {
2763                unpack_unreachable = 1;
2764                if (arg)
2765                        unpack_unreachable_expiration = approxidate(arg);
2766        }
2767        return 0;
2768}
2769
2770int cmd_pack_objects(int argc, const char **argv, const char *prefix)
2771{
2772        int use_internal_rev_list = 0;
2773        int thin = 0;
2774        int shallow = 0;
2775        int all_progress_implied = 0;
2776        struct argv_array rp = ARGV_ARRAY_INIT;
2777        int rev_list_unpacked = 0, rev_list_all = 0, rev_list_reflog = 0;
2778        int rev_list_index = 0;
2779        struct option pack_objects_options[] = {
2780                OPT_SET_INT('q', "quiet", &progress,
2781                            N_("do not show progress meter"), 0),
2782                OPT_SET_INT(0, "progress", &progress,
2783                            N_("show progress meter"), 1),
2784                OPT_SET_INT(0, "all-progress", &progress,
2785                            N_("show progress meter during object writing phase"), 2),
2786                OPT_BOOL(0, "all-progress-implied",
2787                         &all_progress_implied,
2788                         N_("similar to --all-progress when progress meter is shown")),
2789                { OPTION_CALLBACK, 0, "index-version", NULL, N_("version[,offset]"),
2790                  N_("write the pack index file in the specified idx format version"),
2791                  0, option_parse_index_version },
2792                OPT_MAGNITUDE(0, "max-pack-size", &pack_size_limit,
2793                              N_("maximum size of each output pack file")),
2794                OPT_BOOL(0, "local", &local,
2795                         N_("ignore borrowed objects from alternate object store")),
2796                OPT_BOOL(0, "incremental", &incremental,
2797                         N_("ignore packed objects")),
2798                OPT_INTEGER(0, "window", &window,
2799                            N_("limit pack window by objects")),
2800                OPT_MAGNITUDE(0, "window-memory", &window_memory_limit,
2801                              N_("limit pack window by memory in addition to object limit")),
2802                OPT_INTEGER(0, "depth", &depth,
2803                            N_("maximum length of delta chain allowed in the resulting pack")),
2804                OPT_BOOL(0, "reuse-delta", &reuse_delta,
2805                         N_("reuse existing deltas")),
2806                OPT_BOOL(0, "reuse-object", &reuse_object,
2807                         N_("reuse existing objects")),
2808                OPT_BOOL(0, "delta-base-offset", &allow_ofs_delta,
2809                         N_("use OFS_DELTA objects")),
2810                OPT_INTEGER(0, "threads", &delta_search_threads,
2811                            N_("use threads when searching for best delta matches")),
2812                OPT_BOOL(0, "non-empty", &non_empty,
2813                         N_("do not create an empty pack output")),
2814                OPT_BOOL(0, "revs", &use_internal_rev_list,
2815                         N_("read revision arguments from standard input")),
2816                { OPTION_SET_INT, 0, "unpacked", &rev_list_unpacked, NULL,
2817                  N_("limit the objects to those that are not yet packed"),
2818                  PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, 1 },
2819                { OPTION_SET_INT, 0, "all", &rev_list_all, NULL,
2820                  N_("include objects reachable from any reference"),
2821                  PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, 1 },
2822                { OPTION_SET_INT, 0, "reflog", &rev_list_reflog, NULL,
2823                  N_("include objects referred by reflog entries"),
2824                  PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, 1 },
2825                { OPTION_SET_INT, 0, "indexed-objects", &rev_list_index, NULL,
2826                  N_("include objects referred to by the index"),
2827                  PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, 1 },
2828                OPT_BOOL(0, "stdout", &pack_to_stdout,
2829                         N_("output pack to stdout")),
2830                OPT_BOOL(0, "include-tag", &include_tag,
2831                         N_("include tag objects that refer to objects to be packed")),
2832                OPT_BOOL(0, "keep-unreachable", &keep_unreachable,
2833                         N_("keep unreachable objects")),
2834                OPT_BOOL(0, "pack-loose-unreachable", &pack_loose_unreachable,
2835                         N_("pack loose unreachable objects")),
2836                { OPTION_CALLBACK, 0, "unpack-unreachable", NULL, N_("time"),
2837                  N_("unpack unreachable objects newer than <time>"),
2838                  PARSE_OPT_OPTARG, option_parse_unpack_unreachable },
2839                OPT_BOOL(0, "thin", &thin,
2840                         N_("create thin packs")),
2841                OPT_BOOL(0, "shallow", &shallow,
2842                         N_("create packs suitable for shallow fetches")),
2843                OPT_BOOL(0, "honor-pack-keep", &ignore_packed_keep,
2844                         N_("ignore packs that have companion .keep file")),
2845                OPT_INTEGER(0, "compression", &pack_compression_level,
2846                            N_("pack compression level")),
2847                OPT_SET_INT(0, "keep-true-parents", &grafts_replace_parents,
2848                            N_("do not hide commits by grafts"), 0),
2849                OPT_BOOL(0, "use-bitmap-index", &use_bitmap_index,
2850                         N_("use a bitmap index if available to speed up counting objects")),
2851                OPT_BOOL(0, "write-bitmap-index", &write_bitmap_index,
2852                         N_("write a bitmap index together with the pack index")),
2853                OPT_END(),
2854        };
2855
2856        check_replace_refs = 0;
2857
2858        reset_pack_idx_option(&pack_idx_opts);
2859        git_config(git_pack_config, NULL);
2860
2861        progress = isatty(2);
2862        argc = parse_options(argc, argv, prefix, pack_objects_options,
2863                             pack_usage, 0);
2864
2865        if (argc) {
2866                base_name = argv[0];
2867                argc--;
2868        }
2869        if (pack_to_stdout != !base_name || argc)
2870                usage_with_options(pack_usage, pack_objects_options);
2871
2872        argv_array_push(&rp, "pack-objects");
2873        if (thin) {
2874                use_internal_rev_list = 1;
2875                argv_array_push(&rp, shallow
2876                                ? "--objects-edge-aggressive"
2877                                : "--objects-edge");
2878        } else
2879                argv_array_push(&rp, "--objects");
2880
2881        if (rev_list_all) {
2882                use_internal_rev_list = 1;
2883                argv_array_push(&rp, "--all");
2884        }
2885        if (rev_list_reflog) {
2886                use_internal_rev_list = 1;
2887                argv_array_push(&rp, "--reflog");
2888        }
2889        if (rev_list_index) {
2890                use_internal_rev_list = 1;
2891                argv_array_push(&rp, "--indexed-objects");
2892        }
2893        if (rev_list_unpacked) {
2894                use_internal_rev_list = 1;
2895                argv_array_push(&rp, "--unpacked");
2896        }
2897
2898        if (!reuse_object)
2899                reuse_delta = 0;
2900        if (pack_compression_level == -1)
2901                pack_compression_level = Z_DEFAULT_COMPRESSION;
2902        else if (pack_compression_level < 0 || pack_compression_level > Z_BEST_COMPRESSION)
2903                die("bad pack compression level %d", pack_compression_level);
2904
2905        if (!delta_search_threads)      /* --threads=0 means autodetect */
2906                delta_search_threads = online_cpus();
2907
2908#ifdef NO_PTHREADS
2909        if (delta_search_threads != 1)
2910                warning("no threads support, ignoring --threads");
2911#endif
2912        if (!pack_to_stdout && !pack_size_limit)
2913                pack_size_limit = pack_size_limit_cfg;
2914        if (pack_to_stdout && pack_size_limit)
2915                die("--max-pack-size cannot be used to build a pack for transfer.");
2916        if (pack_size_limit && pack_size_limit < 1024*1024) {
2917                warning("minimum pack size limit is 1 MiB");
2918                pack_size_limit = 1024*1024;
2919        }
2920
2921        if (!pack_to_stdout && thin)
2922                die("--thin cannot be used to build an indexable pack.");
2923
2924        if (keep_unreachable && unpack_unreachable)
2925                die("--keep-unreachable and --unpack-unreachable are incompatible.");
2926        if (!rev_list_all || !rev_list_reflog || !rev_list_index)
2927                unpack_unreachable_expiration = 0;
2928
2929        /*
2930         * "soft" reasons not to use bitmaps - for on-disk repack by default we want
2931         *
2932         * - to produce good pack (with bitmap index not-yet-packed objects are
2933         *   packed in suboptimal order).
2934         *
2935         * - to use more robust pack-generation codepath (avoiding possible
2936         *   bugs in bitmap code and possible bitmap index corruption).
2937         */
2938        if (!pack_to_stdout)
2939                use_bitmap_index_default = 0;
2940
2941        if (use_bitmap_index < 0)
2942                use_bitmap_index = use_bitmap_index_default;
2943
2944        /* "hard" reasons not to use bitmaps; these just won't work at all */
2945        if (!use_internal_rev_list || (!pack_to_stdout && write_bitmap_index) || is_repository_shallow())
2946                use_bitmap_index = 0;
2947
2948        if (pack_to_stdout || !rev_list_all)
2949                write_bitmap_index = 0;
2950
2951        if (progress && all_progress_implied)
2952                progress = 2;
2953
2954        prepare_packed_git();
2955        if (ignore_packed_keep) {
2956                struct packed_git *p;
2957                for (p = packed_git; p; p = p->next)
2958                        if (p->pack_local && p->pack_keep)
2959                                break;
2960                if (!p) /* no keep-able packs found */
2961                        ignore_packed_keep = 0;
2962        }
2963        if (local) {
2964                /*
2965                 * unlike ignore_packed_keep above, we do not want to
2966                 * unset "local" based on looking at packs, as it
2967                 * also covers non-local objects
2968                 */
2969                struct packed_git *p;
2970                for (p = packed_git; p; p = p->next) {
2971                        if (!p->pack_local) {
2972                                have_non_local_packs = 1;
2973                                break;
2974                        }
2975                }
2976        }
2977
2978        if (progress)
2979                progress_state = start_progress(_("Counting objects"), 0);
2980        if (!use_internal_rev_list)
2981                read_object_list_from_stdin();
2982        else {
2983                get_object_list(rp.argc, rp.argv);
2984                argv_array_clear(&rp);
2985        }
2986        cleanup_preferred_base();
2987        if (include_tag && nr_result)
2988                for_each_ref(add_ref_tag, NULL);
2989        stop_progress(&progress_state);
2990
2991        if (non_empty && !nr_result)
2992                return 0;
2993        if (nr_result)
2994                prepare_pack(window, depth);
2995        write_pack_file();
2996        if (progress)
2997                fprintf(stderr, "Total %"PRIu32" (delta %"PRIu32"),"
2998                        " reused %"PRIu32" (delta %"PRIu32")\n",
2999                        written, written_delta, reused, reused_delta);
3000        return 0;
3001}