builtin / pack-objects.con commit Merge branch 'tr/mergetool-valgrind' (c4800a3)
   1#include "builtin.h"
   2#include "cache.h"
   3#include "attr.h"
   4#include "object.h"
   5#include "blob.h"
   6#include "commit.h"
   7#include "tag.h"
   8#include "tree.h"
   9#include "delta.h"
  10#include "pack.h"
  11#include "pack-revindex.h"
  12#include "csum-file.h"
  13#include "tree-walk.h"
  14#include "diff.h"
  15#include "revision.h"
  16#include "list-objects.h"
  17#include "progress.h"
  18#include "refs.h"
  19#include "thread-utils.h"
  20
  21static const char pack_usage[] =
  22  "git pack-objects [ -q | --progress | --all-progress ]\n"
  23  "        [--all-progress-implied]\n"
  24  "        [--max-pack-size=<n>] [--local] [--incremental]\n"
  25  "        [--window=<n>] [--window-memory=<n>] [--depth=<n>]\n"
  26  "        [--no-reuse-delta] [--no-reuse-object] [--delta-base-offset]\n"
  27  "        [--threads=<n>] [--non-empty] [--revs [--unpacked | --all]]\n"
  28  "        [--reflog] [--stdout | base-name] [--include-tag]\n"
  29  "        [--keep-unreachable | --unpack-unreachable]\n"
  30  "        [< ref-list | < object-list]";
  31
  32struct object_entry {
  33        struct pack_idx_entry idx;
  34        unsigned long size;     /* uncompressed size */
  35        struct packed_git *in_pack;     /* already in pack */
  36        off_t in_pack_offset;
  37        struct object_entry *delta;     /* delta base object */
  38        struct object_entry *delta_child; /* deltified objects who bases me */
  39        struct object_entry *delta_sibling; /* other deltified objects who
  40                                             * uses the same base as me
  41                                             */
  42        void *delta_data;       /* cached delta (uncompressed) */
  43        unsigned long delta_size;       /* delta data size (uncompressed) */
  44        unsigned long z_delta_size;     /* delta data size (compressed) */
  45        unsigned int hash;      /* name hint hash */
  46        enum object_type type;
  47        enum object_type in_pack_type;  /* could be delta */
  48        unsigned char in_pack_header_size;
  49        unsigned char preferred_base; /* we do not pack this, but is available
  50                                       * to be used as the base object to delta
  51                                       * objects against.
  52                                       */
  53        unsigned char no_try_delta;
  54        unsigned char tagged; /* near the very tip of refs */
  55        unsigned char filled; /* assigned write-order */
  56};
  57
  58/*
  59 * Objects we are going to pack are collected in objects array (dynamically
  60 * expanded).  nr_objects & nr_alloc controls this array.  They are stored
  61 * in the order we see -- typically rev-list --objects order that gives us
  62 * nice "minimum seek" order.
  63 */
  64static struct object_entry *objects;
  65static struct pack_idx_entry **written_list;
  66static uint32_t nr_objects, nr_alloc, nr_result, nr_written;
  67
  68static int non_empty;
  69static int reuse_delta = 1, reuse_object = 1;
  70static int keep_unreachable, unpack_unreachable, include_tag;
  71static int local;
  72static int incremental;
  73static int ignore_packed_keep;
  74static int allow_ofs_delta;
  75static struct pack_idx_option pack_idx_opts;
  76static const char *base_name;
  77static int progress = 1;
  78static int window = 10;
  79static unsigned long pack_size_limit, pack_size_limit_cfg;
  80static int depth = 50;
  81static int delta_search_threads;
  82static int pack_to_stdout;
  83static int num_preferred_base;
  84static struct progress *progress_state;
  85static int pack_compression_level = Z_DEFAULT_COMPRESSION;
  86static int pack_compression_seen;
  87
  88static unsigned long delta_cache_size = 0;
  89static unsigned long max_delta_cache_size = 256 * 1024 * 1024;
  90static unsigned long cache_max_small_delta_size = 1000;
  91
  92static unsigned long window_memory_limit = 0;
  93
  94/*
  95 * The object names in objects array are hashed with this hashtable,
  96 * to help looking up the entry by object name.
  97 * This hashtable is built after all the objects are seen.
  98 */
  99static int *object_ix;
 100static int object_ix_hashsz;
 101static struct object_entry *locate_object_entry(const unsigned char *sha1);
 102
 103/*
 104 * stats
 105 */
 106static uint32_t written, written_delta;
 107static uint32_t reused, reused_delta;
 108
 109
 110static void *get_delta(struct object_entry *entry)
 111{
 112        unsigned long size, base_size, delta_size;
 113        void *buf, *base_buf, *delta_buf;
 114        enum object_type type;
 115
 116        buf = read_sha1_file(entry->idx.sha1, &type, &size);
 117        if (!buf)
 118                die("unable to read %s", sha1_to_hex(entry->idx.sha1));
 119        base_buf = read_sha1_file(entry->delta->idx.sha1, &type, &base_size);
 120        if (!base_buf)
 121                die("unable to read %s", sha1_to_hex(entry->delta->idx.sha1));
 122        delta_buf = diff_delta(base_buf, base_size,
 123                               buf, size, &delta_size, 0);
 124        if (!delta_buf || delta_size != entry->delta_size)
 125                die("delta size changed");
 126        free(buf);
 127        free(base_buf);
 128        return delta_buf;
 129}
 130
 131static unsigned long do_compress(void **pptr, unsigned long size)
 132{
 133        git_zstream stream;
 134        void *in, *out;
 135        unsigned long maxsize;
 136
 137        memset(&stream, 0, sizeof(stream));
 138        git_deflate_init(&stream, pack_compression_level);
 139        maxsize = git_deflate_bound(&stream, size);
 140
 141        in = *pptr;
 142        out = xmalloc(maxsize);
 143        *pptr = out;
 144
 145        stream.next_in = in;
 146        stream.avail_in = size;
 147        stream.next_out = out;
 148        stream.avail_out = maxsize;
 149        while (git_deflate(&stream, Z_FINISH) == Z_OK)
 150                ; /* nothing */
 151        git_deflate_end(&stream);
 152
 153        free(in);
 154        return stream.total_out;
 155}
 156
 157/*
 158 * we are going to reuse the existing object data as is.  make
 159 * sure it is not corrupt.
 160 */
 161static int check_pack_inflate(struct packed_git *p,
 162                struct pack_window **w_curs,
 163                off_t offset,
 164                off_t len,
 165                unsigned long expect)
 166{
 167        git_zstream stream;
 168        unsigned char fakebuf[4096], *in;
 169        int st;
 170
 171        memset(&stream, 0, sizeof(stream));
 172        git_inflate_init(&stream);
 173        do {
 174                in = use_pack(p, w_curs, offset, &stream.avail_in);
 175                stream.next_in = in;
 176                stream.next_out = fakebuf;
 177                stream.avail_out = sizeof(fakebuf);
 178                st = git_inflate(&stream, Z_FINISH);
 179                offset += stream.next_in - in;
 180        } while (st == Z_OK || st == Z_BUF_ERROR);
 181        git_inflate_end(&stream);
 182        return (st == Z_STREAM_END &&
 183                stream.total_out == expect &&
 184                stream.total_in == len) ? 0 : -1;
 185}
 186
 187static void copy_pack_data(struct sha1file *f,
 188                struct packed_git *p,
 189                struct pack_window **w_curs,
 190                off_t offset,
 191                off_t len)
 192{
 193        unsigned char *in;
 194        unsigned long avail;
 195
 196        while (len) {
 197                in = use_pack(p, w_curs, offset, &avail);
 198                if (avail > len)
 199                        avail = (unsigned long)len;
 200                sha1write(f, in, avail);
 201                offset += avail;
 202                len -= avail;
 203        }
 204}
 205
 206/* Return 0 if we will bust the pack-size limit */
 207static unsigned long write_object(struct sha1file *f,
 208                                  struct object_entry *entry,
 209                                  off_t write_offset)
 210{
 211        unsigned long size, limit, datalen;
 212        void *buf;
 213        unsigned char header[10], dheader[10];
 214        unsigned hdrlen;
 215        enum object_type type;
 216        int usable_delta, to_reuse;
 217
 218        if (!pack_to_stdout)
 219                crc32_begin(f);
 220
 221        type = entry->type;
 222
 223        /* apply size limit if limited packsize and not first object */
 224        if (!pack_size_limit || !nr_written)
 225                limit = 0;
 226        else if (pack_size_limit <= write_offset)
 227                /*
 228                 * the earlier object did not fit the limit; avoid
 229                 * mistaking this with unlimited (i.e. limit = 0).
 230                 */
 231                limit = 1;
 232        else
 233                limit = pack_size_limit - write_offset;
 234
 235        if (!entry->delta)
 236                usable_delta = 0;       /* no delta */
 237        else if (!pack_size_limit)
 238               usable_delta = 1;        /* unlimited packfile */
 239        else if (entry->delta->idx.offset == (off_t)-1)
 240                usable_delta = 0;       /* base was written to another pack */
 241        else if (entry->delta->idx.offset)
 242                usable_delta = 1;       /* base already exists in this pack */
 243        else
 244                usable_delta = 0;       /* base could end up in another pack */
 245
 246        if (!reuse_object)
 247                to_reuse = 0;   /* explicit */
 248        else if (!entry->in_pack)
 249                to_reuse = 0;   /* can't reuse what we don't have */
 250        else if (type == OBJ_REF_DELTA || type == OBJ_OFS_DELTA)
 251                                /* check_object() decided it for us ... */
 252                to_reuse = usable_delta;
 253                                /* ... but pack split may override that */
 254        else if (type != entry->in_pack_type)
 255                to_reuse = 0;   /* pack has delta which is unusable */
 256        else if (entry->delta)
 257                to_reuse = 0;   /* we want to pack afresh */
 258        else
 259                to_reuse = 1;   /* we have it in-pack undeltified,
 260                                 * and we do not need to deltify it.
 261                                 */
 262
 263        if (!to_reuse) {
 264                no_reuse:
 265                if (!usable_delta) {
 266                        buf = read_sha1_file(entry->idx.sha1, &type, &size);
 267                        if (!buf)
 268                                die("unable to read %s", sha1_to_hex(entry->idx.sha1));
 269                        /*
 270                         * make sure no cached delta data remains from a
 271                         * previous attempt before a pack split occurred.
 272                         */
 273                        free(entry->delta_data);
 274                        entry->delta_data = NULL;
 275                        entry->z_delta_size = 0;
 276                } else if (entry->delta_data) {
 277                        size = entry->delta_size;
 278                        buf = entry->delta_data;
 279                        entry->delta_data = NULL;
 280                        type = (allow_ofs_delta && entry->delta->idx.offset) ?
 281                                OBJ_OFS_DELTA : OBJ_REF_DELTA;
 282                } else {
 283                        buf = get_delta(entry);
 284                        size = entry->delta_size;
 285                        type = (allow_ofs_delta && entry->delta->idx.offset) ?
 286                                OBJ_OFS_DELTA : OBJ_REF_DELTA;
 287                }
 288
 289                if (entry->z_delta_size)
 290                        datalen = entry->z_delta_size;
 291                else
 292                        datalen = do_compress(&buf, size);
 293
 294                /*
 295                 * The object header is a byte of 'type' followed by zero or
 296                 * more bytes of length.
 297                 */
 298                hdrlen = encode_in_pack_object_header(type, size, header);
 299
 300                if (type == OBJ_OFS_DELTA) {
 301                        /*
 302                         * Deltas with relative base contain an additional
 303                         * encoding of the relative offset for the delta
 304                         * base from this object's position in the pack.
 305                         */
 306                        off_t ofs = entry->idx.offset - entry->delta->idx.offset;
 307                        unsigned pos = sizeof(dheader) - 1;
 308                        dheader[pos] = ofs & 127;
 309                        while (ofs >>= 7)
 310                                dheader[--pos] = 128 | (--ofs & 127);
 311                        if (limit && hdrlen + sizeof(dheader) - pos + datalen + 20 >= limit) {
 312                                free(buf);
 313                                return 0;
 314                        }
 315                        sha1write(f, header, hdrlen);
 316                        sha1write(f, dheader + pos, sizeof(dheader) - pos);
 317                        hdrlen += sizeof(dheader) - pos;
 318                } else if (type == OBJ_REF_DELTA) {
 319                        /*
 320                         * Deltas with a base reference contain
 321                         * an additional 20 bytes for the base sha1.
 322                         */
 323                        if (limit && hdrlen + 20 + datalen + 20 >= limit) {
 324                                free(buf);
 325                                return 0;
 326                        }
 327                        sha1write(f, header, hdrlen);
 328                        sha1write(f, entry->delta->idx.sha1, 20);
 329                        hdrlen += 20;
 330                } else {
 331                        if (limit && hdrlen + datalen + 20 >= limit) {
 332                                free(buf);
 333                                return 0;
 334                        }
 335                        sha1write(f, header, hdrlen);
 336                }
 337                sha1write(f, buf, datalen);
 338                free(buf);
 339        }
 340        else {
 341                struct packed_git *p = entry->in_pack;
 342                struct pack_window *w_curs = NULL;
 343                struct revindex_entry *revidx;
 344                off_t offset;
 345
 346                if (entry->delta)
 347                        type = (allow_ofs_delta && entry->delta->idx.offset) ?
 348                                OBJ_OFS_DELTA : OBJ_REF_DELTA;
 349                hdrlen = encode_in_pack_object_header(type, entry->size, header);
 350
 351                offset = entry->in_pack_offset;
 352                revidx = find_pack_revindex(p, offset);
 353                datalen = revidx[1].offset - offset;
 354                if (!pack_to_stdout && p->index_version > 1 &&
 355                    check_pack_crc(p, &w_curs, offset, datalen, revidx->nr)) {
 356                        error("bad packed object CRC for %s", sha1_to_hex(entry->idx.sha1));
 357                        unuse_pack(&w_curs);
 358                        goto no_reuse;
 359                }
 360
 361                offset += entry->in_pack_header_size;
 362                datalen -= entry->in_pack_header_size;
 363                if (!pack_to_stdout && p->index_version == 1 &&
 364                    check_pack_inflate(p, &w_curs, offset, datalen, entry->size)) {
 365                        error("corrupt packed object for %s", sha1_to_hex(entry->idx.sha1));
 366                        unuse_pack(&w_curs);
 367                        goto no_reuse;
 368                }
 369
 370                if (type == OBJ_OFS_DELTA) {
 371                        off_t ofs = entry->idx.offset - entry->delta->idx.offset;
 372                        unsigned pos = sizeof(dheader) - 1;
 373                        dheader[pos] = ofs & 127;
 374                        while (ofs >>= 7)
 375                                dheader[--pos] = 128 | (--ofs & 127);
 376                        if (limit && hdrlen + sizeof(dheader) - pos + datalen + 20 >= limit) {
 377                                unuse_pack(&w_curs);
 378                                return 0;
 379                        }
 380                        sha1write(f, header, hdrlen);
 381                        sha1write(f, dheader + pos, sizeof(dheader) - pos);
 382                        hdrlen += sizeof(dheader) - pos;
 383                        reused_delta++;
 384                } else if (type == OBJ_REF_DELTA) {
 385                        if (limit && hdrlen + 20 + datalen + 20 >= limit) {
 386                                unuse_pack(&w_curs);
 387                                return 0;
 388                        }
 389                        sha1write(f, header, hdrlen);
 390                        sha1write(f, entry->delta->idx.sha1, 20);
 391                        hdrlen += 20;
 392                        reused_delta++;
 393                } else {
 394                        if (limit && hdrlen + datalen + 20 >= limit) {
 395                                unuse_pack(&w_curs);
 396                                return 0;
 397                        }
 398                        sha1write(f, header, hdrlen);
 399                }
 400                copy_pack_data(f, p, &w_curs, offset, datalen);
 401                unuse_pack(&w_curs);
 402                reused++;
 403        }
 404        if (usable_delta)
 405                written_delta++;
 406        written++;
 407        if (!pack_to_stdout)
 408                entry->idx.crc32 = crc32_end(f);
 409        return hdrlen + datalen;
 410}
 411
 412static int write_one(struct sha1file *f,
 413                               struct object_entry *e,
 414                               off_t *offset)
 415{
 416        unsigned long size;
 417
 418        /* offset is non zero if object is written already. */
 419        if (e->idx.offset || e->preferred_base)
 420                return -1;
 421
 422        /* if we are deltified, write out base object first. */
 423        if (e->delta && !write_one(f, e->delta, offset))
 424                return 0;
 425
 426        e->idx.offset = *offset;
 427        size = write_object(f, e, *offset);
 428        if (!size) {
 429                e->idx.offset = 0;
 430                return 0;
 431        }
 432        written_list[nr_written++] = &e->idx;
 433
 434        /* make sure off_t is sufficiently large not to wrap */
 435        if (signed_add_overflows(*offset, size))
 436                die("pack too large for current definition of off_t");
 437        *offset += size;
 438        return 1;
 439}
 440
 441static int mark_tagged(const char *path, const unsigned char *sha1, int flag,
 442                       void *cb_data)
 443{
 444        unsigned char peeled[20];
 445        struct object_entry *entry = locate_object_entry(sha1);
 446
 447        if (entry)
 448                entry->tagged = 1;
 449        if (!peel_ref(path, peeled)) {
 450                entry = locate_object_entry(peeled);
 451                if (entry)
 452                        entry->tagged = 1;
 453        }
 454        return 0;
 455}
 456
 457static void add_to_write_order(struct object_entry **wo,
 458                               int *endp,
 459                               struct object_entry *e)
 460{
 461        if (e->filled)
 462                return;
 463        wo[(*endp)++] = e;
 464        e->filled = 1;
 465}
 466
 467static void add_descendants_to_write_order(struct object_entry **wo,
 468                                           int *endp,
 469                                           struct object_entry *e)
 470{
 471        struct object_entry *child;
 472
 473        for (child = e->delta_child; child; child = child->delta_sibling)
 474                add_to_write_order(wo, endp, child);
 475        for (child = e->delta_child; child; child = child->delta_sibling)
 476                add_descendants_to_write_order(wo, endp, child);
 477}
 478
 479static void add_family_to_write_order(struct object_entry **wo,
 480                                      int *endp,
 481                                      struct object_entry *e)
 482{
 483        struct object_entry *root;
 484
 485        for (root = e; root->delta; root = root->delta)
 486                ; /* nothing */
 487        add_to_write_order(wo, endp, root);
 488        add_descendants_to_write_order(wo, endp, root);
 489}
 490
 491static struct object_entry **compute_write_order(void)
 492{
 493        int i, wo_end;
 494
 495        struct object_entry **wo = xmalloc(nr_objects * sizeof(*wo));
 496
 497        for (i = 0; i < nr_objects; i++) {
 498                objects[i].tagged = 0;
 499                objects[i].filled = 0;
 500                objects[i].delta_child = NULL;
 501                objects[i].delta_sibling = NULL;
 502        }
 503
 504        /*
 505         * Fully connect delta_child/delta_sibling network.
 506         * Make sure delta_sibling is sorted in the original
 507         * recency order.
 508         */
 509        for (i = nr_objects - 1; 0 <= i; i--) {
 510                struct object_entry *e = &objects[i];
 511                if (!e->delta)
 512                        continue;
 513                /* Mark me as the first child */
 514                e->delta_sibling = e->delta->delta_child;
 515                e->delta->delta_child = e;
 516        }
 517
 518        /*
 519         * Mark objects that are at the tip of tags.
 520         */
 521        for_each_tag_ref(mark_tagged, NULL);
 522
 523        /*
 524         * Give the commits in the original recency order until
 525         * we see a tagged tip.
 526         */
 527        for (i = wo_end = 0; i < nr_objects; i++) {
 528                if (objects[i].tagged)
 529                        break;
 530                add_to_write_order(wo, &wo_end, &objects[i]);
 531        }
 532
 533        /*
 534         * Then fill all the tagged tips.
 535         */
 536        for (; i < nr_objects; i++) {
 537                if (objects[i].tagged)
 538                        add_to_write_order(wo, &wo_end, &objects[i]);
 539        }
 540
 541        /*
 542         * And then all remaining commits and tags.
 543         */
 544        for (i = 0; i < nr_objects; i++) {
 545                if (objects[i].type != OBJ_COMMIT &&
 546                    objects[i].type != OBJ_TAG)
 547                        continue;
 548                add_to_write_order(wo, &wo_end, &objects[i]);
 549        }
 550
 551        /*
 552         * And then all the trees.
 553         */
 554        for (i = 0; i < nr_objects; i++) {
 555                if (objects[i].type != OBJ_TREE)
 556                        continue;
 557                add_to_write_order(wo, &wo_end, &objects[i]);
 558        }
 559
 560        /*
 561         * Finally all the rest in really tight order
 562         */
 563        for (i = 0; i < nr_objects; i++)
 564                add_family_to_write_order(wo, &wo_end, &objects[i]);
 565
 566        return wo;
 567}
 568
 569static void write_pack_file(void)
 570{
 571        uint32_t i = 0, j;
 572        struct sha1file *f;
 573        off_t offset;
 574        struct pack_header hdr;
 575        uint32_t nr_remaining = nr_result;
 576        time_t last_mtime = 0;
 577        struct object_entry **write_order;
 578
 579        if (progress > pack_to_stdout)
 580                progress_state = start_progress("Writing objects", nr_result);
 581        written_list = xmalloc(nr_objects * sizeof(*written_list));
 582        write_order = compute_write_order();
 583
 584        do {
 585                unsigned char sha1[20];
 586                char *pack_tmp_name = NULL;
 587
 588                if (pack_to_stdout) {
 589                        f = sha1fd_throughput(1, "<stdout>", progress_state);
 590                } else {
 591                        char tmpname[PATH_MAX];
 592                        int fd;
 593                        fd = odb_mkstemp(tmpname, sizeof(tmpname),
 594                                         "pack/tmp_pack_XXXXXX");
 595                        pack_tmp_name = xstrdup(tmpname);
 596                        f = sha1fd(fd, pack_tmp_name);
 597                }
 598
 599                hdr.hdr_signature = htonl(PACK_SIGNATURE);
 600                hdr.hdr_version = htonl(PACK_VERSION);
 601                hdr.hdr_entries = htonl(nr_remaining);
 602                sha1write(f, &hdr, sizeof(hdr));
 603                offset = sizeof(hdr);
 604                nr_written = 0;
 605                for (; i < nr_objects; i++) {
 606                        struct object_entry *e = write_order[i];
 607                        if (!write_one(f, e, &offset))
 608                                break;
 609                        display_progress(progress_state, written);
 610                }
 611
 612                /*
 613                 * Did we write the wrong # entries in the header?
 614                 * If so, rewrite it like in fast-import
 615                 */
 616                if (pack_to_stdout) {
 617                        sha1close(f, sha1, CSUM_CLOSE);
 618                } else if (nr_written == nr_remaining) {
 619                        sha1close(f, sha1, CSUM_FSYNC);
 620                } else {
 621                        int fd = sha1close(f, sha1, 0);
 622                        fixup_pack_header_footer(fd, sha1, pack_tmp_name,
 623                                                 nr_written, sha1, offset);
 624                        close(fd);
 625                }
 626
 627                if (!pack_to_stdout) {
 628                        struct stat st;
 629                        const char *idx_tmp_name;
 630                        char tmpname[PATH_MAX];
 631
 632                        idx_tmp_name = write_idx_file(NULL, written_list, nr_written,
 633                                                      &pack_idx_opts, sha1);
 634
 635                        snprintf(tmpname, sizeof(tmpname), "%s-%s.pack",
 636                                 base_name, sha1_to_hex(sha1));
 637                        free_pack_by_name(tmpname);
 638                        if (adjust_shared_perm(pack_tmp_name))
 639                                die_errno("unable to make temporary pack file readable");
 640                        if (rename(pack_tmp_name, tmpname))
 641                                die_errno("unable to rename temporary pack file");
 642
 643                        /*
 644                         * Packs are runtime accessed in their mtime
 645                         * order since newer packs are more likely to contain
 646                         * younger objects.  So if we are creating multiple
 647                         * packs then we should modify the mtime of later ones
 648                         * to preserve this property.
 649                         */
 650                        if (stat(tmpname, &st) < 0) {
 651                                warning("failed to stat %s: %s",
 652                                        tmpname, strerror(errno));
 653                        } else if (!last_mtime) {
 654                                last_mtime = st.st_mtime;
 655                        } else {
 656                                struct utimbuf utb;
 657                                utb.actime = st.st_atime;
 658                                utb.modtime = --last_mtime;
 659                                if (utime(tmpname, &utb) < 0)
 660                                        warning("failed utime() on %s: %s",
 661                                                tmpname, strerror(errno));
 662                        }
 663
 664                        snprintf(tmpname, sizeof(tmpname), "%s-%s.idx",
 665                                 base_name, sha1_to_hex(sha1));
 666                        if (adjust_shared_perm(idx_tmp_name))
 667                                die_errno("unable to make temporary index file readable");
 668                        if (rename(idx_tmp_name, tmpname))
 669                                die_errno("unable to rename temporary index file");
 670
 671                        free((void *) idx_tmp_name);
 672                        free(pack_tmp_name);
 673                        puts(sha1_to_hex(sha1));
 674                }
 675
 676                /* mark written objects as written to previous pack */
 677                for (j = 0; j < nr_written; j++) {
 678                        written_list[j]->offset = (off_t)-1;
 679                }
 680                nr_remaining -= nr_written;
 681        } while (nr_remaining && i < nr_objects);
 682
 683        free(written_list);
 684        free(write_order);
 685        stop_progress(&progress_state);
 686        if (written != nr_result)
 687                die("wrote %"PRIu32" objects while expecting %"PRIu32,
 688                        written, nr_result);
 689}
 690
 691static int locate_object_entry_hash(const unsigned char *sha1)
 692{
 693        int i;
 694        unsigned int ui;
 695        memcpy(&ui, sha1, sizeof(unsigned int));
 696        i = ui % object_ix_hashsz;
 697        while (0 < object_ix[i]) {
 698                if (!hashcmp(sha1, objects[object_ix[i] - 1].idx.sha1))
 699                        return i;
 700                if (++i == object_ix_hashsz)
 701                        i = 0;
 702        }
 703        return -1 - i;
 704}
 705
 706static struct object_entry *locate_object_entry(const unsigned char *sha1)
 707{
 708        int i;
 709
 710        if (!object_ix_hashsz)
 711                return NULL;
 712
 713        i = locate_object_entry_hash(sha1);
 714        if (0 <= i)
 715                return &objects[object_ix[i]-1];
 716        return NULL;
 717}
 718
 719static void rehash_objects(void)
 720{
 721        uint32_t i;
 722        struct object_entry *oe;
 723
 724        object_ix_hashsz = nr_objects * 3;
 725        if (object_ix_hashsz < 1024)
 726                object_ix_hashsz = 1024;
 727        object_ix = xrealloc(object_ix, sizeof(int) * object_ix_hashsz);
 728        memset(object_ix, 0, sizeof(int) * object_ix_hashsz);
 729        for (i = 0, oe = objects; i < nr_objects; i++, oe++) {
 730                int ix = locate_object_entry_hash(oe->idx.sha1);
 731                if (0 <= ix)
 732                        continue;
 733                ix = -1 - ix;
 734                object_ix[ix] = i + 1;
 735        }
 736}
 737
 738static unsigned name_hash(const char *name)
 739{
 740        unsigned c, hash = 0;
 741
 742        if (!name)
 743                return 0;
 744
 745        /*
 746         * This effectively just creates a sortable number from the
 747         * last sixteen non-whitespace characters. Last characters
 748         * count "most", so things that end in ".c" sort together.
 749         */
 750        while ((c = *name++) != 0) {
 751                if (isspace(c))
 752                        continue;
 753                hash = (hash >> 2) + (c << 24);
 754        }
 755        return hash;
 756}
 757
 758static void setup_delta_attr_check(struct git_attr_check *check)
 759{
 760        static struct git_attr *attr_delta;
 761
 762        if (!attr_delta)
 763                attr_delta = git_attr("delta");
 764
 765        check[0].attr = attr_delta;
 766}
 767
 768static int no_try_delta(const char *path)
 769{
 770        struct git_attr_check check[1];
 771
 772        setup_delta_attr_check(check);
 773        if (git_check_attr(path, ARRAY_SIZE(check), check))
 774                return 0;
 775        if (ATTR_FALSE(check->value))
 776                return 1;
 777        return 0;
 778}
 779
 780static int add_object_entry(const unsigned char *sha1, enum object_type type,
 781                            const char *name, int exclude)
 782{
 783        struct object_entry *entry;
 784        struct packed_git *p, *found_pack = NULL;
 785        off_t found_offset = 0;
 786        int ix;
 787        unsigned hash = name_hash(name);
 788
 789        ix = nr_objects ? locate_object_entry_hash(sha1) : -1;
 790        if (ix >= 0) {
 791                if (exclude) {
 792                        entry = objects + object_ix[ix] - 1;
 793                        if (!entry->preferred_base)
 794                                nr_result--;
 795                        entry->preferred_base = 1;
 796                }
 797                return 0;
 798        }
 799
 800        if (!exclude && local && has_loose_object_nonlocal(sha1))
 801                return 0;
 802
 803        for (p = packed_git; p; p = p->next) {
 804                off_t offset = find_pack_entry_one(sha1, p);
 805                if (offset) {
 806                        if (!found_pack) {
 807                                found_offset = offset;
 808                                found_pack = p;
 809                        }
 810                        if (exclude)
 811                                break;
 812                        if (incremental)
 813                                return 0;
 814                        if (local && !p->pack_local)
 815                                return 0;
 816                        if (ignore_packed_keep && p->pack_local && p->pack_keep)
 817                                return 0;
 818                }
 819        }
 820
 821        if (nr_objects >= nr_alloc) {
 822                nr_alloc = (nr_alloc  + 1024) * 3 / 2;
 823                objects = xrealloc(objects, nr_alloc * sizeof(*entry));
 824        }
 825
 826        entry = objects + nr_objects++;
 827        memset(entry, 0, sizeof(*entry));
 828        hashcpy(entry->idx.sha1, sha1);
 829        entry->hash = hash;
 830        if (type)
 831                entry->type = type;
 832        if (exclude)
 833                entry->preferred_base = 1;
 834        else
 835                nr_result++;
 836        if (found_pack) {
 837                entry->in_pack = found_pack;
 838                entry->in_pack_offset = found_offset;
 839        }
 840
 841        if (object_ix_hashsz * 3 <= nr_objects * 4)
 842                rehash_objects();
 843        else
 844                object_ix[-1 - ix] = nr_objects;
 845
 846        display_progress(progress_state, nr_objects);
 847
 848        if (name && no_try_delta(name))
 849                entry->no_try_delta = 1;
 850
 851        return 1;
 852}
 853
 854struct pbase_tree_cache {
 855        unsigned char sha1[20];
 856        int ref;
 857        int temporary;
 858        void *tree_data;
 859        unsigned long tree_size;
 860};
 861
 862static struct pbase_tree_cache *(pbase_tree_cache[256]);
 863static int pbase_tree_cache_ix(const unsigned char *sha1)
 864{
 865        return sha1[0] % ARRAY_SIZE(pbase_tree_cache);
 866}
 867static int pbase_tree_cache_ix_incr(int ix)
 868{
 869        return (ix+1) % ARRAY_SIZE(pbase_tree_cache);
 870}
 871
 872static struct pbase_tree {
 873        struct pbase_tree *next;
 874        /* This is a phony "cache" entry; we are not
 875         * going to evict it nor find it through _get()
 876         * mechanism -- this is for the toplevel node that
 877         * would almost always change with any commit.
 878         */
 879        struct pbase_tree_cache pcache;
 880} *pbase_tree;
 881
 882static struct pbase_tree_cache *pbase_tree_get(const unsigned char *sha1)
 883{
 884        struct pbase_tree_cache *ent, *nent;
 885        void *data;
 886        unsigned long size;
 887        enum object_type type;
 888        int neigh;
 889        int my_ix = pbase_tree_cache_ix(sha1);
 890        int available_ix = -1;
 891
 892        /* pbase-tree-cache acts as a limited hashtable.
 893         * your object will be found at your index or within a few
 894         * slots after that slot if it is cached.
 895         */
 896        for (neigh = 0; neigh < 8; neigh++) {
 897                ent = pbase_tree_cache[my_ix];
 898                if (ent && !hashcmp(ent->sha1, sha1)) {
 899                        ent->ref++;
 900                        return ent;
 901                }
 902                else if (((available_ix < 0) && (!ent || !ent->ref)) ||
 903                         ((0 <= available_ix) &&
 904                          (!ent && pbase_tree_cache[available_ix])))
 905                        available_ix = my_ix;
 906                if (!ent)
 907                        break;
 908                my_ix = pbase_tree_cache_ix_incr(my_ix);
 909        }
 910
 911        /* Did not find one.  Either we got a bogus request or
 912         * we need to read and perhaps cache.
 913         */
 914        data = read_sha1_file(sha1, &type, &size);
 915        if (!data)
 916                return NULL;
 917        if (type != OBJ_TREE) {
 918                free(data);
 919                return NULL;
 920        }
 921
 922        /* We need to either cache or return a throwaway copy */
 923
 924        if (available_ix < 0)
 925                ent = NULL;
 926        else {
 927                ent = pbase_tree_cache[available_ix];
 928                my_ix = available_ix;
 929        }
 930
 931        if (!ent) {
 932                nent = xmalloc(sizeof(*nent));
 933                nent->temporary = (available_ix < 0);
 934        }
 935        else {
 936                /* evict and reuse */
 937                free(ent->tree_data);
 938                nent = ent;
 939        }
 940        hashcpy(nent->sha1, sha1);
 941        nent->tree_data = data;
 942        nent->tree_size = size;
 943        nent->ref = 1;
 944        if (!nent->temporary)
 945                pbase_tree_cache[my_ix] = nent;
 946        return nent;
 947}
 948
 949static void pbase_tree_put(struct pbase_tree_cache *cache)
 950{
 951        if (!cache->temporary) {
 952                cache->ref--;
 953                return;
 954        }
 955        free(cache->tree_data);
 956        free(cache);
 957}
 958
 959static int name_cmp_len(const char *name)
 960{
 961        int i;
 962        for (i = 0; name[i] && name[i] != '\n' && name[i] != '/'; i++)
 963                ;
 964        return i;
 965}
 966
 967static void add_pbase_object(struct tree_desc *tree,
 968                             const char *name,
 969                             int cmplen,
 970                             const char *fullname)
 971{
 972        struct name_entry entry;
 973        int cmp;
 974
 975        while (tree_entry(tree,&entry)) {
 976                if (S_ISGITLINK(entry.mode))
 977                        continue;
 978                cmp = tree_entry_len(entry.path, entry.sha1) != cmplen ? 1 :
 979                      memcmp(name, entry.path, cmplen);
 980                if (cmp > 0)
 981                        continue;
 982                if (cmp < 0)
 983                        return;
 984                if (name[cmplen] != '/') {
 985                        add_object_entry(entry.sha1,
 986                                         object_type(entry.mode),
 987                                         fullname, 1);
 988                        return;
 989                }
 990                if (S_ISDIR(entry.mode)) {
 991                        struct tree_desc sub;
 992                        struct pbase_tree_cache *tree;
 993                        const char *down = name+cmplen+1;
 994                        int downlen = name_cmp_len(down);
 995
 996                        tree = pbase_tree_get(entry.sha1);
 997                        if (!tree)
 998                                return;
 999                        init_tree_desc(&sub, tree->tree_data, tree->tree_size);
1000
1001                        add_pbase_object(&sub, down, downlen, fullname);
1002                        pbase_tree_put(tree);
1003                }
1004        }
1005}
1006
1007static unsigned *done_pbase_paths;
1008static int done_pbase_paths_num;
1009static int done_pbase_paths_alloc;
1010static int done_pbase_path_pos(unsigned hash)
1011{
1012        int lo = 0;
1013        int hi = done_pbase_paths_num;
1014        while (lo < hi) {
1015                int mi = (hi + lo) / 2;
1016                if (done_pbase_paths[mi] == hash)
1017                        return mi;
1018                if (done_pbase_paths[mi] < hash)
1019                        hi = mi;
1020                else
1021                        lo = mi + 1;
1022        }
1023        return -lo-1;
1024}
1025
1026static int check_pbase_path(unsigned hash)
1027{
1028        int pos = (!done_pbase_paths) ? -1 : done_pbase_path_pos(hash);
1029        if (0 <= pos)
1030                return 1;
1031        pos = -pos - 1;
1032        if (done_pbase_paths_alloc <= done_pbase_paths_num) {
1033                done_pbase_paths_alloc = alloc_nr(done_pbase_paths_alloc);
1034                done_pbase_paths = xrealloc(done_pbase_paths,
1035                                            done_pbase_paths_alloc *
1036                                            sizeof(unsigned));
1037        }
1038        done_pbase_paths_num++;
1039        if (pos < done_pbase_paths_num)
1040                memmove(done_pbase_paths + pos + 1,
1041                        done_pbase_paths + pos,
1042                        (done_pbase_paths_num - pos - 1) * sizeof(unsigned));
1043        done_pbase_paths[pos] = hash;
1044        return 0;
1045}
1046
1047static void add_preferred_base_object(const char *name)
1048{
1049        struct pbase_tree *it;
1050        int cmplen;
1051        unsigned hash = name_hash(name);
1052
1053        if (!num_preferred_base || check_pbase_path(hash))
1054                return;
1055
1056        cmplen = name_cmp_len(name);
1057        for (it = pbase_tree; it; it = it->next) {
1058                if (cmplen == 0) {
1059                        add_object_entry(it->pcache.sha1, OBJ_TREE, NULL, 1);
1060                }
1061                else {
1062                        struct tree_desc tree;
1063                        init_tree_desc(&tree, it->pcache.tree_data, it->pcache.tree_size);
1064                        add_pbase_object(&tree, name, cmplen, name);
1065                }
1066        }
1067}
1068
1069static void add_preferred_base(unsigned char *sha1)
1070{
1071        struct pbase_tree *it;
1072        void *data;
1073        unsigned long size;
1074        unsigned char tree_sha1[20];
1075
1076        if (window <= num_preferred_base++)
1077                return;
1078
1079        data = read_object_with_reference(sha1, tree_type, &size, tree_sha1);
1080        if (!data)
1081                return;
1082
1083        for (it = pbase_tree; it; it = it->next) {
1084                if (!hashcmp(it->pcache.sha1, tree_sha1)) {
1085                        free(data);
1086                        return;
1087                }
1088        }
1089
1090        it = xcalloc(1, sizeof(*it));
1091        it->next = pbase_tree;
1092        pbase_tree = it;
1093
1094        hashcpy(it->pcache.sha1, tree_sha1);
1095        it->pcache.tree_data = data;
1096        it->pcache.tree_size = size;
1097}
1098
1099static void cleanup_preferred_base(void)
1100{
1101        struct pbase_tree *it;
1102        unsigned i;
1103
1104        it = pbase_tree;
1105        pbase_tree = NULL;
1106        while (it) {
1107                struct pbase_tree *this = it;
1108                it = this->next;
1109                free(this->pcache.tree_data);
1110                free(this);
1111        }
1112
1113        for (i = 0; i < ARRAY_SIZE(pbase_tree_cache); i++) {
1114                if (!pbase_tree_cache[i])
1115                        continue;
1116                free(pbase_tree_cache[i]->tree_data);
1117                free(pbase_tree_cache[i]);
1118                pbase_tree_cache[i] = NULL;
1119        }
1120
1121        free(done_pbase_paths);
1122        done_pbase_paths = NULL;
1123        done_pbase_paths_num = done_pbase_paths_alloc = 0;
1124}
1125
1126static void check_object(struct object_entry *entry)
1127{
1128        if (entry->in_pack) {
1129                struct packed_git *p = entry->in_pack;
1130                struct pack_window *w_curs = NULL;
1131                const unsigned char *base_ref = NULL;
1132                struct object_entry *base_entry;
1133                unsigned long used, used_0;
1134                unsigned long avail;
1135                off_t ofs;
1136                unsigned char *buf, c;
1137
1138                buf = use_pack(p, &w_curs, entry->in_pack_offset, &avail);
1139
1140                /*
1141                 * We want in_pack_type even if we do not reuse delta
1142                 * since non-delta representations could still be reused.
1143                 */
1144                used = unpack_object_header_buffer(buf, avail,
1145                                                   &entry->in_pack_type,
1146                                                   &entry->size);
1147                if (used == 0)
1148                        goto give_up;
1149
1150                /*
1151                 * Determine if this is a delta and if so whether we can
1152                 * reuse it or not.  Otherwise let's find out as cheaply as
1153                 * possible what the actual type and size for this object is.
1154                 */
1155                switch (entry->in_pack_type) {
1156                default:
1157                        /* Not a delta hence we've already got all we need. */
1158                        entry->type = entry->in_pack_type;
1159                        entry->in_pack_header_size = used;
1160                        if (entry->type < OBJ_COMMIT || entry->type > OBJ_BLOB)
1161                                goto give_up;
1162                        unuse_pack(&w_curs);
1163                        return;
1164                case OBJ_REF_DELTA:
1165                        if (reuse_delta && !entry->preferred_base)
1166                                base_ref = use_pack(p, &w_curs,
1167                                                entry->in_pack_offset + used, NULL);
1168                        entry->in_pack_header_size = used + 20;
1169                        break;
1170                case OBJ_OFS_DELTA:
1171                        buf = use_pack(p, &w_curs,
1172                                       entry->in_pack_offset + used, NULL);
1173                        used_0 = 0;
1174                        c = buf[used_0++];
1175                        ofs = c & 127;
1176                        while (c & 128) {
1177                                ofs += 1;
1178                                if (!ofs || MSB(ofs, 7)) {
1179                                        error("delta base offset overflow in pack for %s",
1180                                              sha1_to_hex(entry->idx.sha1));
1181                                        goto give_up;
1182                                }
1183                                c = buf[used_0++];
1184                                ofs = (ofs << 7) + (c & 127);
1185                        }
1186                        ofs = entry->in_pack_offset - ofs;
1187                        if (ofs <= 0 || ofs >= entry->in_pack_offset) {
1188                                error("delta base offset out of bound for %s",
1189                                      sha1_to_hex(entry->idx.sha1));
1190                                goto give_up;
1191                        }
1192                        if (reuse_delta && !entry->preferred_base) {
1193                                struct revindex_entry *revidx;
1194                                revidx = find_pack_revindex(p, ofs);
1195                                if (!revidx)
1196                                        goto give_up;
1197                                base_ref = nth_packed_object_sha1(p, revidx->nr);
1198                        }
1199                        entry->in_pack_header_size = used + used_0;
1200                        break;
1201                }
1202
1203                if (base_ref && (base_entry = locate_object_entry(base_ref))) {
1204                        /*
1205                         * If base_ref was set above that means we wish to
1206                         * reuse delta data, and we even found that base
1207                         * in the list of objects we want to pack. Goodie!
1208                         *
1209                         * Depth value does not matter - find_deltas() will
1210                         * never consider reused delta as the base object to
1211                         * deltify other objects against, in order to avoid
1212                         * circular deltas.
1213                         */
1214                        entry->type = entry->in_pack_type;
1215                        entry->delta = base_entry;
1216                        entry->delta_size = entry->size;
1217                        entry->delta_sibling = base_entry->delta_child;
1218                        base_entry->delta_child = entry;
1219                        unuse_pack(&w_curs);
1220                        return;
1221                }
1222
1223                if (entry->type) {
1224                        /*
1225                         * This must be a delta and we already know what the
1226                         * final object type is.  Let's extract the actual
1227                         * object size from the delta header.
1228                         */
1229                        entry->size = get_size_from_delta(p, &w_curs,
1230                                        entry->in_pack_offset + entry->in_pack_header_size);
1231                        if (entry->size == 0)
1232                                goto give_up;
1233                        unuse_pack(&w_curs);
1234                        return;
1235                }
1236
1237                /*
1238                 * No choice but to fall back to the recursive delta walk
1239                 * with sha1_object_info() to find about the object type
1240                 * at this point...
1241                 */
1242                give_up:
1243                unuse_pack(&w_curs);
1244        }
1245
1246        entry->type = sha1_object_info(entry->idx.sha1, &entry->size);
1247        /*
1248         * The error condition is checked in prepare_pack().  This is
1249         * to permit a missing preferred base object to be ignored
1250         * as a preferred base.  Doing so can result in a larger
1251         * pack file, but the transfer will still take place.
1252         */
1253}
1254
1255static int pack_offset_sort(const void *_a, const void *_b)
1256{
1257        const struct object_entry *a = *(struct object_entry **)_a;
1258        const struct object_entry *b = *(struct object_entry **)_b;
1259
1260        /* avoid filesystem trashing with loose objects */
1261        if (!a->in_pack && !b->in_pack)
1262                return hashcmp(a->idx.sha1, b->idx.sha1);
1263
1264        if (a->in_pack < b->in_pack)
1265                return -1;
1266        if (a->in_pack > b->in_pack)
1267                return 1;
1268        return a->in_pack_offset < b->in_pack_offset ? -1 :
1269                        (a->in_pack_offset > b->in_pack_offset);
1270}
1271
1272static void get_object_details(void)
1273{
1274        uint32_t i;
1275        struct object_entry **sorted_by_offset;
1276
1277        sorted_by_offset = xcalloc(nr_objects, sizeof(struct object_entry *));
1278        for (i = 0; i < nr_objects; i++)
1279                sorted_by_offset[i] = objects + i;
1280        qsort(sorted_by_offset, nr_objects, sizeof(*sorted_by_offset), pack_offset_sort);
1281
1282        for (i = 0; i < nr_objects; i++) {
1283                struct object_entry *entry = sorted_by_offset[i];
1284                check_object(entry);
1285                if (big_file_threshold <= entry->size)
1286                        entry->no_try_delta = 1;
1287        }
1288
1289        free(sorted_by_offset);
1290}
1291
1292/*
1293 * We search for deltas in a list sorted by type, by filename hash, and then
1294 * by size, so that we see progressively smaller and smaller files.
1295 * That's because we prefer deltas to be from the bigger file
1296 * to the smaller -- deletes are potentially cheaper, but perhaps
1297 * more importantly, the bigger file is likely the more recent
1298 * one.  The deepest deltas are therefore the oldest objects which are
1299 * less susceptible to be accessed often.
1300 */
1301static int type_size_sort(const void *_a, const void *_b)
1302{
1303        const struct object_entry *a = *(struct object_entry **)_a;
1304        const struct object_entry *b = *(struct object_entry **)_b;
1305
1306        if (a->type > b->type)
1307                return -1;
1308        if (a->type < b->type)
1309                return 1;
1310        if (a->hash > b->hash)
1311                return -1;
1312        if (a->hash < b->hash)
1313                return 1;
1314        if (a->preferred_base > b->preferred_base)
1315                return -1;
1316        if (a->preferred_base < b->preferred_base)
1317                return 1;
1318        if (a->size > b->size)
1319                return -1;
1320        if (a->size < b->size)
1321                return 1;
1322        return a < b ? -1 : (a > b);  /* newest first */
1323}
1324
1325struct unpacked {
1326        struct object_entry *entry;
1327        void *data;
1328        struct delta_index *index;
1329        unsigned depth;
1330};
1331
1332static int delta_cacheable(unsigned long src_size, unsigned long trg_size,
1333                           unsigned long delta_size)
1334{
1335        if (max_delta_cache_size && delta_cache_size + delta_size > max_delta_cache_size)
1336                return 0;
1337
1338        if (delta_size < cache_max_small_delta_size)
1339                return 1;
1340
1341        /* cache delta, if objects are large enough compared to delta size */
1342        if ((src_size >> 20) + (trg_size >> 21) > (delta_size >> 10))
1343                return 1;
1344
1345        return 0;
1346}
1347
1348#ifndef NO_PTHREADS
1349
1350static pthread_mutex_t read_mutex;
1351#define read_lock()             pthread_mutex_lock(&read_mutex)
1352#define read_unlock()           pthread_mutex_unlock(&read_mutex)
1353
1354static pthread_mutex_t cache_mutex;
1355#define cache_lock()            pthread_mutex_lock(&cache_mutex)
1356#define cache_unlock()          pthread_mutex_unlock(&cache_mutex)
1357
1358static pthread_mutex_t progress_mutex;
1359#define progress_lock()         pthread_mutex_lock(&progress_mutex)
1360#define progress_unlock()       pthread_mutex_unlock(&progress_mutex)
1361
1362#else
1363
1364#define read_lock()             (void)0
1365#define read_unlock()           (void)0
1366#define cache_lock()            (void)0
1367#define cache_unlock()          (void)0
1368#define progress_lock()         (void)0
1369#define progress_unlock()       (void)0
1370
1371#endif
1372
1373static int try_delta(struct unpacked *trg, struct unpacked *src,
1374                     unsigned max_depth, unsigned long *mem_usage)
1375{
1376        struct object_entry *trg_entry = trg->entry;
1377        struct object_entry *src_entry = src->entry;
1378        unsigned long trg_size, src_size, delta_size, sizediff, max_size, sz;
1379        unsigned ref_depth;
1380        enum object_type type;
1381        void *delta_buf;
1382
1383        /* Don't bother doing diffs between different types */
1384        if (trg_entry->type != src_entry->type)
1385                return -1;
1386
1387        /*
1388         * We do not bother to try a delta that we discarded
1389         * on an earlier try, but only when reusing delta data.
1390         */
1391        if (reuse_delta && trg_entry->in_pack &&
1392            trg_entry->in_pack == src_entry->in_pack &&
1393            trg_entry->in_pack_type != OBJ_REF_DELTA &&
1394            trg_entry->in_pack_type != OBJ_OFS_DELTA)
1395                return 0;
1396
1397        /* Let's not bust the allowed depth. */
1398        if (src->depth >= max_depth)
1399                return 0;
1400
1401        /* Now some size filtering heuristics. */
1402        trg_size = trg_entry->size;
1403        if (!trg_entry->delta) {
1404                max_size = trg_size/2 - 20;
1405                ref_depth = 1;
1406        } else {
1407                max_size = trg_entry->delta_size;
1408                ref_depth = trg->depth;
1409        }
1410        max_size = (uint64_t)max_size * (max_depth - src->depth) /
1411                                                (max_depth - ref_depth + 1);
1412        if (max_size == 0)
1413                return 0;
1414        src_size = src_entry->size;
1415        sizediff = src_size < trg_size ? trg_size - src_size : 0;
1416        if (sizediff >= max_size)
1417                return 0;
1418        if (trg_size < src_size / 32)
1419                return 0;
1420
1421        /* Load data if not already done */
1422        if (!trg->data) {
1423                read_lock();
1424                trg->data = read_sha1_file(trg_entry->idx.sha1, &type, &sz);
1425                read_unlock();
1426                if (!trg->data)
1427                        die("object %s cannot be read",
1428                            sha1_to_hex(trg_entry->idx.sha1));
1429                if (sz != trg_size)
1430                        die("object %s inconsistent object length (%lu vs %lu)",
1431                            sha1_to_hex(trg_entry->idx.sha1), sz, trg_size);
1432                *mem_usage += sz;
1433        }
1434        if (!src->data) {
1435                read_lock();
1436                src->data = read_sha1_file(src_entry->idx.sha1, &type, &sz);
1437                read_unlock();
1438                if (!src->data) {
1439                        if (src_entry->preferred_base) {
1440                                static int warned = 0;
1441                                if (!warned++)
1442                                        warning("object %s cannot be read",
1443                                                sha1_to_hex(src_entry->idx.sha1));
1444                                /*
1445                                 * Those objects are not included in the
1446                                 * resulting pack.  Be resilient and ignore
1447                                 * them if they can't be read, in case the
1448                                 * pack could be created nevertheless.
1449                                 */
1450                                return 0;
1451                        }
1452                        die("object %s cannot be read",
1453                            sha1_to_hex(src_entry->idx.sha1));
1454                }
1455                if (sz != src_size)
1456                        die("object %s inconsistent object length (%lu vs %lu)",
1457                            sha1_to_hex(src_entry->idx.sha1), sz, src_size);
1458                *mem_usage += sz;
1459        }
1460        if (!src->index) {
1461                src->index = create_delta_index(src->data, src_size);
1462                if (!src->index) {
1463                        static int warned = 0;
1464                        if (!warned++)
1465                                warning("suboptimal pack - out of memory");
1466                        return 0;
1467                }
1468                *mem_usage += sizeof_delta_index(src->index);
1469        }
1470
1471        delta_buf = create_delta(src->index, trg->data, trg_size, &delta_size, max_size);
1472        if (!delta_buf)
1473                return 0;
1474
1475        if (trg_entry->delta) {
1476                /* Prefer only shallower same-sized deltas. */
1477                if (delta_size == trg_entry->delta_size &&
1478                    src->depth + 1 >= trg->depth) {
1479                        free(delta_buf);
1480                        return 0;
1481                }
1482        }
1483
1484        /*
1485         * Handle memory allocation outside of the cache
1486         * accounting lock.  Compiler will optimize the strangeness
1487         * away when NO_PTHREADS is defined.
1488         */
1489        free(trg_entry->delta_data);
1490        cache_lock();
1491        if (trg_entry->delta_data) {
1492                delta_cache_size -= trg_entry->delta_size;
1493                trg_entry->delta_data = NULL;
1494        }
1495        if (delta_cacheable(src_size, trg_size, delta_size)) {
1496                delta_cache_size += delta_size;
1497                cache_unlock();
1498                trg_entry->delta_data = xrealloc(delta_buf, delta_size);
1499        } else {
1500                cache_unlock();
1501                free(delta_buf);
1502        }
1503
1504        trg_entry->delta = src_entry;
1505        trg_entry->delta_size = delta_size;
1506        trg->depth = src->depth + 1;
1507
1508        return 1;
1509}
1510
1511static unsigned int check_delta_limit(struct object_entry *me, unsigned int n)
1512{
1513        struct object_entry *child = me->delta_child;
1514        unsigned int m = n;
1515        while (child) {
1516                unsigned int c = check_delta_limit(child, n + 1);
1517                if (m < c)
1518                        m = c;
1519                child = child->delta_sibling;
1520        }
1521        return m;
1522}
1523
1524static unsigned long free_unpacked(struct unpacked *n)
1525{
1526        unsigned long freed_mem = sizeof_delta_index(n->index);
1527        free_delta_index(n->index);
1528        n->index = NULL;
1529        if (n->data) {
1530                freed_mem += n->entry->size;
1531                free(n->data);
1532                n->data = NULL;
1533        }
1534        n->entry = NULL;
1535        n->depth = 0;
1536        return freed_mem;
1537}
1538
1539static void find_deltas(struct object_entry **list, unsigned *list_size,
1540                        int window, int depth, unsigned *processed)
1541{
1542        uint32_t i, idx = 0, count = 0;
1543        struct unpacked *array;
1544        unsigned long mem_usage = 0;
1545
1546        array = xcalloc(window, sizeof(struct unpacked));
1547
1548        for (;;) {
1549                struct object_entry *entry;
1550                struct unpacked *n = array + idx;
1551                int j, max_depth, best_base = -1;
1552
1553                progress_lock();
1554                if (!*list_size) {
1555                        progress_unlock();
1556                        break;
1557                }
1558                entry = *list++;
1559                (*list_size)--;
1560                if (!entry->preferred_base) {
1561                        (*processed)++;
1562                        display_progress(progress_state, *processed);
1563                }
1564                progress_unlock();
1565
1566                mem_usage -= free_unpacked(n);
1567                n->entry = entry;
1568
1569                while (window_memory_limit &&
1570                       mem_usage > window_memory_limit &&
1571                       count > 1) {
1572                        uint32_t tail = (idx + window - count) % window;
1573                        mem_usage -= free_unpacked(array + tail);
1574                        count--;
1575                }
1576
1577                /* We do not compute delta to *create* objects we are not
1578                 * going to pack.
1579                 */
1580                if (entry->preferred_base)
1581                        goto next;
1582
1583                /*
1584                 * If the current object is at pack edge, take the depth the
1585                 * objects that depend on the current object into account
1586                 * otherwise they would become too deep.
1587                 */
1588                max_depth = depth;
1589                if (entry->delta_child) {
1590                        max_depth -= check_delta_limit(entry, 0);
1591                        if (max_depth <= 0)
1592                                goto next;
1593                }
1594
1595                j = window;
1596                while (--j > 0) {
1597                        int ret;
1598                        uint32_t other_idx = idx + j;
1599                        struct unpacked *m;
1600                        if (other_idx >= window)
1601                                other_idx -= window;
1602                        m = array + other_idx;
1603                        if (!m->entry)
1604                                break;
1605                        ret = try_delta(n, m, max_depth, &mem_usage);
1606                        if (ret < 0)
1607                                break;
1608                        else if (ret > 0)
1609                                best_base = other_idx;
1610                }
1611
1612                /*
1613                 * If we decided to cache the delta data, then it is best
1614                 * to compress it right away.  First because we have to do
1615                 * it anyway, and doing it here while we're threaded will
1616                 * save a lot of time in the non threaded write phase,
1617                 * as well as allow for caching more deltas within
1618                 * the same cache size limit.
1619                 * ...
1620                 * But only if not writing to stdout, since in that case
1621                 * the network is most likely throttling writes anyway,
1622                 * and therefore it is best to go to the write phase ASAP
1623                 * instead, as we can afford spending more time compressing
1624                 * between writes at that moment.
1625                 */
1626                if (entry->delta_data && !pack_to_stdout) {
1627                        entry->z_delta_size = do_compress(&entry->delta_data,
1628                                                          entry->delta_size);
1629                        cache_lock();
1630                        delta_cache_size -= entry->delta_size;
1631                        delta_cache_size += entry->z_delta_size;
1632                        cache_unlock();
1633                }
1634
1635                /* if we made n a delta, and if n is already at max
1636                 * depth, leaving it in the window is pointless.  we
1637                 * should evict it first.
1638                 */
1639                if (entry->delta && max_depth <= n->depth)
1640                        continue;
1641
1642                /*
1643                 * Move the best delta base up in the window, after the
1644                 * currently deltified object, to keep it longer.  It will
1645                 * be the first base object to be attempted next.
1646                 */
1647                if (entry->delta) {
1648                        struct unpacked swap = array[best_base];
1649                        int dist = (window + idx - best_base) % window;
1650                        int dst = best_base;
1651                        while (dist--) {
1652                                int src = (dst + 1) % window;
1653                                array[dst] = array[src];
1654                                dst = src;
1655                        }
1656                        array[dst] = swap;
1657                }
1658
1659                next:
1660                idx++;
1661                if (count + 1 < window)
1662                        count++;
1663                if (idx >= window)
1664                        idx = 0;
1665        }
1666
1667        for (i = 0; i < window; ++i) {
1668                free_delta_index(array[i].index);
1669                free(array[i].data);
1670        }
1671        free(array);
1672}
1673
1674#ifndef NO_PTHREADS
1675
1676static void try_to_free_from_threads(size_t size)
1677{
1678        read_lock();
1679        release_pack_memory(size, -1);
1680        read_unlock();
1681}
1682
1683static try_to_free_t old_try_to_free_routine;
1684
1685/*
1686 * The main thread waits on the condition that (at least) one of the workers
1687 * has stopped working (which is indicated in the .working member of
1688 * struct thread_params).
1689 * When a work thread has completed its work, it sets .working to 0 and
1690 * signals the main thread and waits on the condition that .data_ready
1691 * becomes 1.
1692 */
1693
1694struct thread_params {
1695        pthread_t thread;
1696        struct object_entry **list;
1697        unsigned list_size;
1698        unsigned remaining;
1699        int window;
1700        int depth;
1701        int working;
1702        int data_ready;
1703        pthread_mutex_t mutex;
1704        pthread_cond_t cond;
1705        unsigned *processed;
1706};
1707
1708static pthread_cond_t progress_cond;
1709
1710/*
1711 * Mutex and conditional variable can't be statically-initialized on Windows.
1712 */
1713static void init_threaded_search(void)
1714{
1715        init_recursive_mutex(&read_mutex);
1716        pthread_mutex_init(&cache_mutex, NULL);
1717        pthread_mutex_init(&progress_mutex, NULL);
1718        pthread_cond_init(&progress_cond, NULL);
1719        old_try_to_free_routine = set_try_to_free_routine(try_to_free_from_threads);
1720}
1721
1722static void cleanup_threaded_search(void)
1723{
1724        set_try_to_free_routine(old_try_to_free_routine);
1725        pthread_cond_destroy(&progress_cond);
1726        pthread_mutex_destroy(&read_mutex);
1727        pthread_mutex_destroy(&cache_mutex);
1728        pthread_mutex_destroy(&progress_mutex);
1729}
1730
1731static void *threaded_find_deltas(void *arg)
1732{
1733        struct thread_params *me = arg;
1734
1735        while (me->remaining) {
1736                find_deltas(me->list, &me->remaining,
1737                            me->window, me->depth, me->processed);
1738
1739                progress_lock();
1740                me->working = 0;
1741                pthread_cond_signal(&progress_cond);
1742                progress_unlock();
1743
1744                /*
1745                 * We must not set ->data_ready before we wait on the
1746                 * condition because the main thread may have set it to 1
1747                 * before we get here. In order to be sure that new
1748                 * work is available if we see 1 in ->data_ready, it
1749                 * was initialized to 0 before this thread was spawned
1750                 * and we reset it to 0 right away.
1751                 */
1752                pthread_mutex_lock(&me->mutex);
1753                while (!me->data_ready)
1754                        pthread_cond_wait(&me->cond, &me->mutex);
1755                me->data_ready = 0;
1756                pthread_mutex_unlock(&me->mutex);
1757        }
1758        /* leave ->working 1 so that this doesn't get more work assigned */
1759        return NULL;
1760}
1761
1762static void ll_find_deltas(struct object_entry **list, unsigned list_size,
1763                           int window, int depth, unsigned *processed)
1764{
1765        struct thread_params *p;
1766        int i, ret, active_threads = 0;
1767
1768        init_threaded_search();
1769
1770        if (!delta_search_threads)      /* --threads=0 means autodetect */
1771                delta_search_threads = online_cpus();
1772        if (delta_search_threads <= 1) {
1773                find_deltas(list, &list_size, window, depth, processed);
1774                cleanup_threaded_search();
1775                return;
1776        }
1777        if (progress > pack_to_stdout)
1778                fprintf(stderr, "Delta compression using up to %d threads.\n",
1779                                delta_search_threads);
1780        p = xcalloc(delta_search_threads, sizeof(*p));
1781
1782        /* Partition the work amongst work threads. */
1783        for (i = 0; i < delta_search_threads; i++) {
1784                unsigned sub_size = list_size / (delta_search_threads - i);
1785
1786                /* don't use too small segments or no deltas will be found */
1787                if (sub_size < 2*window && i+1 < delta_search_threads)
1788                        sub_size = 0;
1789
1790                p[i].window = window;
1791                p[i].depth = depth;
1792                p[i].processed = processed;
1793                p[i].working = 1;
1794                p[i].data_ready = 0;
1795
1796                /* try to split chunks on "path" boundaries */
1797                while (sub_size && sub_size < list_size &&
1798                       list[sub_size]->hash &&
1799                       list[sub_size]->hash == list[sub_size-1]->hash)
1800                        sub_size++;
1801
1802                p[i].list = list;
1803                p[i].list_size = sub_size;
1804                p[i].remaining = sub_size;
1805
1806                list += sub_size;
1807                list_size -= sub_size;
1808        }
1809
1810        /* Start work threads. */
1811        for (i = 0; i < delta_search_threads; i++) {
1812                if (!p[i].list_size)
1813                        continue;
1814                pthread_mutex_init(&p[i].mutex, NULL);
1815                pthread_cond_init(&p[i].cond, NULL);
1816                ret = pthread_create(&p[i].thread, NULL,
1817                                     threaded_find_deltas, &p[i]);
1818                if (ret)
1819                        die("unable to create thread: %s", strerror(ret));
1820                active_threads++;
1821        }
1822
1823        /*
1824         * Now let's wait for work completion.  Each time a thread is done
1825         * with its work, we steal half of the remaining work from the
1826         * thread with the largest number of unprocessed objects and give
1827         * it to that newly idle thread.  This ensure good load balancing
1828         * until the remaining object list segments are simply too short
1829         * to be worth splitting anymore.
1830         */
1831        while (active_threads) {
1832                struct thread_params *target = NULL;
1833                struct thread_params *victim = NULL;
1834                unsigned sub_size = 0;
1835
1836                progress_lock();
1837                for (;;) {
1838                        for (i = 0; !target && i < delta_search_threads; i++)
1839                                if (!p[i].working)
1840                                        target = &p[i];
1841                        if (target)
1842                                break;
1843                        pthread_cond_wait(&progress_cond, &progress_mutex);
1844                }
1845
1846                for (i = 0; i < delta_search_threads; i++)
1847                        if (p[i].remaining > 2*window &&
1848                            (!victim || victim->remaining < p[i].remaining))
1849                                victim = &p[i];
1850                if (victim) {
1851                        sub_size = victim->remaining / 2;
1852                        list = victim->list + victim->list_size - sub_size;
1853                        while (sub_size && list[0]->hash &&
1854                               list[0]->hash == list[-1]->hash) {
1855                                list++;
1856                                sub_size--;
1857                        }
1858                        if (!sub_size) {
1859                                /*
1860                                 * It is possible for some "paths" to have
1861                                 * so many objects that no hash boundary
1862                                 * might be found.  Let's just steal the
1863                                 * exact half in that case.
1864                                 */
1865                                sub_size = victim->remaining / 2;
1866                                list -= sub_size;
1867                        }
1868                        target->list = list;
1869                        victim->list_size -= sub_size;
1870                        victim->remaining -= sub_size;
1871                }
1872                target->list_size = sub_size;
1873                target->remaining = sub_size;
1874                target->working = 1;
1875                progress_unlock();
1876
1877                pthread_mutex_lock(&target->mutex);
1878                target->data_ready = 1;
1879                pthread_cond_signal(&target->cond);
1880                pthread_mutex_unlock(&target->mutex);
1881
1882                if (!sub_size) {
1883                        pthread_join(target->thread, NULL);
1884                        pthread_cond_destroy(&target->cond);
1885                        pthread_mutex_destroy(&target->mutex);
1886                        active_threads--;
1887                }
1888        }
1889        cleanup_threaded_search();
1890        free(p);
1891}
1892
1893#else
1894#define ll_find_deltas(l, s, w, d, p)   find_deltas(l, &s, w, d, p)
1895#endif
1896
1897static int add_ref_tag(const char *path, const unsigned char *sha1, int flag, void *cb_data)
1898{
1899        unsigned char peeled[20];
1900
1901        if (!prefixcmp(path, "refs/tags/") && /* is a tag? */
1902            !peel_ref(path, peeled)        && /* peelable? */
1903            !is_null_sha1(peeled)          && /* annotated tag? */
1904            locate_object_entry(peeled))      /* object packed? */
1905                add_object_entry(sha1, OBJ_TAG, NULL, 0);
1906        return 0;
1907}
1908
1909static void prepare_pack(int window, int depth)
1910{
1911        struct object_entry **delta_list;
1912        uint32_t i, nr_deltas;
1913        unsigned n;
1914
1915        get_object_details();
1916
1917        /*
1918         * If we're locally repacking then we need to be doubly careful
1919         * from now on in order to make sure no stealth corruption gets
1920         * propagated to the new pack.  Clients receiving streamed packs
1921         * should validate everything they get anyway so no need to incur
1922         * the additional cost here in that case.
1923         */
1924        if (!pack_to_stdout)
1925                do_check_packed_object_crc = 1;
1926
1927        if (!nr_objects || !window || !depth)
1928                return;
1929
1930        delta_list = xmalloc(nr_objects * sizeof(*delta_list));
1931        nr_deltas = n = 0;
1932
1933        for (i = 0; i < nr_objects; i++) {
1934                struct object_entry *entry = objects + i;
1935
1936                if (entry->delta)
1937                        /* This happens if we decided to reuse existing
1938                         * delta from a pack.  "reuse_delta &&" is implied.
1939                         */
1940                        continue;
1941
1942                if (entry->size < 50)
1943                        continue;
1944
1945                if (entry->no_try_delta)
1946                        continue;
1947
1948                if (!entry->preferred_base) {
1949                        nr_deltas++;
1950                        if (entry->type < 0)
1951                                die("unable to get type of object %s",
1952                                    sha1_to_hex(entry->idx.sha1));
1953                } else {
1954                        if (entry->type < 0) {
1955                                /*
1956                                 * This object is not found, but we
1957                                 * don't have to include it anyway.
1958                                 */
1959                                continue;
1960                        }
1961                }
1962
1963                delta_list[n++] = entry;
1964        }
1965
1966        if (nr_deltas && n > 1) {
1967                unsigned nr_done = 0;
1968                if (progress)
1969                        progress_state = start_progress("Compressing objects",
1970                                                        nr_deltas);
1971                qsort(delta_list, n, sizeof(*delta_list), type_size_sort);
1972                ll_find_deltas(delta_list, n, window+1, depth, &nr_done);
1973                stop_progress(&progress_state);
1974                if (nr_done != nr_deltas)
1975                        die("inconsistency with delta count");
1976        }
1977        free(delta_list);
1978}
1979
1980static int git_pack_config(const char *k, const char *v, void *cb)
1981{
1982        if (!strcmp(k, "pack.window")) {
1983                window = git_config_int(k, v);
1984                return 0;
1985        }
1986        if (!strcmp(k, "pack.windowmemory")) {
1987                window_memory_limit = git_config_ulong(k, v);
1988                return 0;
1989        }
1990        if (!strcmp(k, "pack.depth")) {
1991                depth = git_config_int(k, v);
1992                return 0;
1993        }
1994        if (!strcmp(k, "pack.compression")) {
1995                int level = git_config_int(k, v);
1996                if (level == -1)
1997                        level = Z_DEFAULT_COMPRESSION;
1998                else if (level < 0 || level > Z_BEST_COMPRESSION)
1999                        die("bad pack compression level %d", level);
2000                pack_compression_level = level;
2001                pack_compression_seen = 1;
2002                return 0;
2003        }
2004        if (!strcmp(k, "pack.deltacachesize")) {
2005                max_delta_cache_size = git_config_int(k, v);
2006                return 0;
2007        }
2008        if (!strcmp(k, "pack.deltacachelimit")) {
2009                cache_max_small_delta_size = git_config_int(k, v);
2010                return 0;
2011        }
2012        if (!strcmp(k, "pack.threads")) {
2013                delta_search_threads = git_config_int(k, v);
2014                if (delta_search_threads < 0)
2015                        die("invalid number of threads specified (%d)",
2016                            delta_search_threads);
2017#ifdef NO_PTHREADS
2018                if (delta_search_threads != 1)
2019                        warning("no threads support, ignoring %s", k);
2020#endif
2021                return 0;
2022        }
2023        if (!strcmp(k, "pack.indexversion")) {
2024                pack_idx_opts.version = git_config_int(k, v);
2025                if (pack_idx_opts.version > 2)
2026                        die("bad pack.indexversion=%"PRIu32,
2027                            pack_idx_opts.version);
2028                return 0;
2029        }
2030        if (!strcmp(k, "pack.packsizelimit")) {
2031                pack_size_limit_cfg = git_config_ulong(k, v);
2032                return 0;
2033        }
2034        return git_default_config(k, v, cb);
2035}
2036
2037static void read_object_list_from_stdin(void)
2038{
2039        char line[40 + 1 + PATH_MAX + 2];
2040        unsigned char sha1[20];
2041
2042        for (;;) {
2043                if (!fgets(line, sizeof(line), stdin)) {
2044                        if (feof(stdin))
2045                                break;
2046                        if (!ferror(stdin))
2047                                die("fgets returned NULL, not EOF, not error!");
2048                        if (errno != EINTR)
2049                                die_errno("fgets");
2050                        clearerr(stdin);
2051                        continue;
2052                }
2053                if (line[0] == '-') {
2054                        if (get_sha1_hex(line+1, sha1))
2055                                die("expected edge sha1, got garbage:\n %s",
2056                                    line);
2057                        add_preferred_base(sha1);
2058                        continue;
2059                }
2060                if (get_sha1_hex(line, sha1))
2061                        die("expected sha1, got garbage:\n %s", line);
2062
2063                add_preferred_base_object(line+41);
2064                add_object_entry(sha1, 0, line+41, 0);
2065        }
2066}
2067
2068#define OBJECT_ADDED (1u<<20)
2069
2070static void show_commit(struct commit *commit, void *data)
2071{
2072        add_object_entry(commit->object.sha1, OBJ_COMMIT, NULL, 0);
2073        commit->object.flags |= OBJECT_ADDED;
2074}
2075
2076static void show_object(struct object *obj, const struct name_path *path, const char *last)
2077{
2078        char *name = path_name(path, last);
2079
2080        add_preferred_base_object(name);
2081        add_object_entry(obj->sha1, obj->type, name, 0);
2082        obj->flags |= OBJECT_ADDED;
2083
2084        /*
2085         * We will have generated the hash from the name,
2086         * but not saved a pointer to it - we can free it
2087         */
2088        free((char *)name);
2089}
2090
2091static void show_edge(struct commit *commit)
2092{
2093        add_preferred_base(commit->object.sha1);
2094}
2095
2096struct in_pack_object {
2097        off_t offset;
2098        struct object *object;
2099};
2100
2101struct in_pack {
2102        int alloc;
2103        int nr;
2104        struct in_pack_object *array;
2105};
2106
2107static void mark_in_pack_object(struct object *object, struct packed_git *p, struct in_pack *in_pack)
2108{
2109        in_pack->array[in_pack->nr].offset = find_pack_entry_one(object->sha1, p);
2110        in_pack->array[in_pack->nr].object = object;
2111        in_pack->nr++;
2112}
2113
2114/*
2115 * Compare the objects in the offset order, in order to emulate the
2116 * "git rev-list --objects" output that produced the pack originally.
2117 */
2118static int ofscmp(const void *a_, const void *b_)
2119{
2120        struct in_pack_object *a = (struct in_pack_object *)a_;
2121        struct in_pack_object *b = (struct in_pack_object *)b_;
2122
2123        if (a->offset < b->offset)
2124                return -1;
2125        else if (a->offset > b->offset)
2126                return 1;
2127        else
2128                return hashcmp(a->object->sha1, b->object->sha1);
2129}
2130
2131static void add_objects_in_unpacked_packs(struct rev_info *revs)
2132{
2133        struct packed_git *p;
2134        struct in_pack in_pack;
2135        uint32_t i;
2136
2137        memset(&in_pack, 0, sizeof(in_pack));
2138
2139        for (p = packed_git; p; p = p->next) {
2140                const unsigned char *sha1;
2141                struct object *o;
2142
2143                if (!p->pack_local || p->pack_keep)
2144                        continue;
2145                if (open_pack_index(p))
2146                        die("cannot open pack index");
2147
2148                ALLOC_GROW(in_pack.array,
2149                           in_pack.nr + p->num_objects,
2150                           in_pack.alloc);
2151
2152                for (i = 0; i < p->num_objects; i++) {
2153                        sha1 = nth_packed_object_sha1(p, i);
2154                        o = lookup_unknown_object(sha1);
2155                        if (!(o->flags & OBJECT_ADDED))
2156                                mark_in_pack_object(o, p, &in_pack);
2157                        o->flags |= OBJECT_ADDED;
2158                }
2159        }
2160
2161        if (in_pack.nr) {
2162                qsort(in_pack.array, in_pack.nr, sizeof(in_pack.array[0]),
2163                      ofscmp);
2164                for (i = 0; i < in_pack.nr; i++) {
2165                        struct object *o = in_pack.array[i].object;
2166                        add_object_entry(o->sha1, o->type, "", 0);
2167                }
2168        }
2169        free(in_pack.array);
2170}
2171
2172static int has_sha1_pack_kept_or_nonlocal(const unsigned char *sha1)
2173{
2174        static struct packed_git *last_found = (void *)1;
2175        struct packed_git *p;
2176
2177        p = (last_found != (void *)1) ? last_found : packed_git;
2178
2179        while (p) {
2180                if ((!p->pack_local || p->pack_keep) &&
2181                        find_pack_entry_one(sha1, p)) {
2182                        last_found = p;
2183                        return 1;
2184                }
2185                if (p == last_found)
2186                        p = packed_git;
2187                else
2188                        p = p->next;
2189                if (p == last_found)
2190                        p = p->next;
2191        }
2192        return 0;
2193}
2194
2195static void loosen_unused_packed_objects(struct rev_info *revs)
2196{
2197        struct packed_git *p;
2198        uint32_t i;
2199        const unsigned char *sha1;
2200
2201        for (p = packed_git; p; p = p->next) {
2202                if (!p->pack_local || p->pack_keep)
2203                        continue;
2204
2205                if (open_pack_index(p))
2206                        die("cannot open pack index");
2207
2208                for (i = 0; i < p->num_objects; i++) {
2209                        sha1 = nth_packed_object_sha1(p, i);
2210                        if (!locate_object_entry(sha1) &&
2211                                !has_sha1_pack_kept_or_nonlocal(sha1))
2212                                if (force_object_loose(sha1, p->mtime))
2213                                        die("unable to force loose object");
2214                }
2215        }
2216}
2217
2218static void get_object_list(int ac, const char **av)
2219{
2220        struct rev_info revs;
2221        char line[1000];
2222        int flags = 0;
2223
2224        init_revisions(&revs, NULL);
2225        save_commit_buffer = 0;
2226        setup_revisions(ac, av, &revs, NULL);
2227
2228        while (fgets(line, sizeof(line), stdin) != NULL) {
2229                int len = strlen(line);
2230                if (len && line[len - 1] == '\n')
2231                        line[--len] = 0;
2232                if (!len)
2233                        break;
2234                if (*line == '-') {
2235                        if (!strcmp(line, "--not")) {
2236                                flags ^= UNINTERESTING;
2237                                continue;
2238                        }
2239                        die("not a rev '%s'", line);
2240                }
2241                if (handle_revision_arg(line, &revs, flags, 1))
2242                        die("bad revision '%s'", line);
2243        }
2244
2245        if (prepare_revision_walk(&revs))
2246                die("revision walk setup failed");
2247        mark_edges_uninteresting(revs.commits, &revs, show_edge);
2248        traverse_commit_list(&revs, show_commit, show_object, NULL);
2249
2250        if (keep_unreachable)
2251                add_objects_in_unpacked_packs(&revs);
2252        if (unpack_unreachable)
2253                loosen_unused_packed_objects(&revs);
2254}
2255
2256int cmd_pack_objects(int argc, const char **argv, const char *prefix)
2257{
2258        int use_internal_rev_list = 0;
2259        int thin = 0;
2260        int all_progress_implied = 0;
2261        uint32_t i;
2262        const char **rp_av;
2263        int rp_ac_alloc = 64;
2264        int rp_ac;
2265
2266        read_replace_refs = 0;
2267
2268        rp_av = xcalloc(rp_ac_alloc, sizeof(*rp_av));
2269
2270        rp_av[0] = "pack-objects";
2271        rp_av[1] = "--objects"; /* --thin will make it --objects-edge */
2272        rp_ac = 2;
2273
2274        reset_pack_idx_option(&pack_idx_opts);
2275        git_config(git_pack_config, NULL);
2276        if (!pack_compression_seen && core_compression_seen)
2277                pack_compression_level = core_compression_level;
2278
2279        progress = isatty(2);
2280        for (i = 1; i < argc; i++) {
2281                const char *arg = argv[i];
2282
2283                if (*arg != '-')
2284                        break;
2285
2286                if (!strcmp("--non-empty", arg)) {
2287                        non_empty = 1;
2288                        continue;
2289                }
2290                if (!strcmp("--local", arg)) {
2291                        local = 1;
2292                        continue;
2293                }
2294                if (!strcmp("--incremental", arg)) {
2295                        incremental = 1;
2296                        continue;
2297                }
2298                if (!strcmp("--honor-pack-keep", arg)) {
2299                        ignore_packed_keep = 1;
2300                        continue;
2301                }
2302                if (!prefixcmp(arg, "--compression=")) {
2303                        char *end;
2304                        int level = strtoul(arg+14, &end, 0);
2305                        if (!arg[14] || *end)
2306                                usage(pack_usage);
2307                        if (level == -1)
2308                                level = Z_DEFAULT_COMPRESSION;
2309                        else if (level < 0 || level > Z_BEST_COMPRESSION)
2310                                die("bad pack compression level %d", level);
2311                        pack_compression_level = level;
2312                        continue;
2313                }
2314                if (!prefixcmp(arg, "--max-pack-size=")) {
2315                        pack_size_limit_cfg = 0;
2316                        if (!git_parse_ulong(arg+16, &pack_size_limit))
2317                                usage(pack_usage);
2318                        continue;
2319                }
2320                if (!prefixcmp(arg, "--window=")) {
2321                        char *end;
2322                        window = strtoul(arg+9, &end, 0);
2323                        if (!arg[9] || *end)
2324                                usage(pack_usage);
2325                        continue;
2326                }
2327                if (!prefixcmp(arg, "--window-memory=")) {
2328                        if (!git_parse_ulong(arg+16, &window_memory_limit))
2329                                usage(pack_usage);
2330                        continue;
2331                }
2332                if (!prefixcmp(arg, "--threads=")) {
2333                        char *end;
2334                        delta_search_threads = strtoul(arg+10, &end, 0);
2335                        if (!arg[10] || *end || delta_search_threads < 0)
2336                                usage(pack_usage);
2337#ifdef NO_PTHREADS
2338                        if (delta_search_threads != 1)
2339                                warning("no threads support, "
2340                                        "ignoring %s", arg);
2341#endif
2342                        continue;
2343                }
2344                if (!prefixcmp(arg, "--depth=")) {
2345                        char *end;
2346                        depth = strtoul(arg+8, &end, 0);
2347                        if (!arg[8] || *end)
2348                                usage(pack_usage);
2349                        continue;
2350                }
2351                if (!strcmp("--progress", arg)) {
2352                        progress = 1;
2353                        continue;
2354                }
2355                if (!strcmp("--all-progress", arg)) {
2356                        progress = 2;
2357                        continue;
2358                }
2359                if (!strcmp("--all-progress-implied", arg)) {
2360                        all_progress_implied = 1;
2361                        continue;
2362                }
2363                if (!strcmp("-q", arg)) {
2364                        progress = 0;
2365                        continue;
2366                }
2367                if (!strcmp("--no-reuse-delta", arg)) {
2368                        reuse_delta = 0;
2369                        continue;
2370                }
2371                if (!strcmp("--no-reuse-object", arg)) {
2372                        reuse_object = reuse_delta = 0;
2373                        continue;
2374                }
2375                if (!strcmp("--delta-base-offset", arg)) {
2376                        allow_ofs_delta = 1;
2377                        continue;
2378                }
2379                if (!strcmp("--stdout", arg)) {
2380                        pack_to_stdout = 1;
2381                        continue;
2382                }
2383                if (!strcmp("--revs", arg)) {
2384                        use_internal_rev_list = 1;
2385                        continue;
2386                }
2387                if (!strcmp("--keep-unreachable", arg)) {
2388                        keep_unreachable = 1;
2389                        continue;
2390                }
2391                if (!strcmp("--unpack-unreachable", arg)) {
2392                        unpack_unreachable = 1;
2393                        continue;
2394                }
2395                if (!strcmp("--include-tag", arg)) {
2396                        include_tag = 1;
2397                        continue;
2398                }
2399                if (!strcmp("--unpacked", arg) ||
2400                    !strcmp("--reflog", arg) ||
2401                    !strcmp("--all", arg)) {
2402                        use_internal_rev_list = 1;
2403                        if (rp_ac >= rp_ac_alloc - 1) {
2404                                rp_ac_alloc = alloc_nr(rp_ac_alloc);
2405                                rp_av = xrealloc(rp_av,
2406                                                 rp_ac_alloc * sizeof(*rp_av));
2407                        }
2408                        rp_av[rp_ac++] = arg;
2409                        continue;
2410                }
2411                if (!strcmp("--thin", arg)) {
2412                        use_internal_rev_list = 1;
2413                        thin = 1;
2414                        rp_av[1] = "--objects-edge";
2415                        continue;
2416                }
2417                if (!prefixcmp(arg, "--index-version=")) {
2418                        char *c;
2419                        pack_idx_opts.version = strtoul(arg + 16, &c, 10);
2420                        if (pack_idx_opts.version > 2)
2421                                die("bad %s", arg);
2422                        if (*c == ',')
2423                                pack_idx_opts.off32_limit = strtoul(c+1, &c, 0);
2424                        if (*c || pack_idx_opts.off32_limit & 0x80000000)
2425                                die("bad %s", arg);
2426                        continue;
2427                }
2428                if (!strcmp(arg, "--keep-true-parents")) {
2429                        grafts_replace_parents = 0;
2430                        continue;
2431                }
2432                usage(pack_usage);
2433        }
2434
2435        /* Traditionally "pack-objects [options] base extra" failed;
2436         * we would however want to take refs parameter that would
2437         * have been given to upstream rev-list ourselves, which means
2438         * we somehow want to say what the base name is.  So the
2439         * syntax would be:
2440         *
2441         * pack-objects [options] base <refs...>
2442         *
2443         * in other words, we would treat the first non-option as the
2444         * base_name and send everything else to the internal revision
2445         * walker.
2446         */
2447
2448        if (!pack_to_stdout)
2449                base_name = argv[i++];
2450
2451        if (pack_to_stdout != !base_name)
2452                usage(pack_usage);
2453
2454        if (!pack_to_stdout && !pack_size_limit)
2455                pack_size_limit = pack_size_limit_cfg;
2456        if (pack_to_stdout && pack_size_limit)
2457                die("--max-pack-size cannot be used to build a pack for transfer.");
2458        if (pack_size_limit && pack_size_limit < 1024*1024) {
2459                warning("minimum pack size limit is 1 MiB");
2460                pack_size_limit = 1024*1024;
2461        }
2462
2463        if (!pack_to_stdout && thin)
2464                die("--thin cannot be used to build an indexable pack.");
2465
2466        if (keep_unreachable && unpack_unreachable)
2467                die("--keep-unreachable and --unpack-unreachable are incompatible.");
2468
2469        if (progress && all_progress_implied)
2470                progress = 2;
2471
2472        prepare_packed_git();
2473
2474        if (progress)
2475                progress_state = start_progress("Counting objects", 0);
2476        if (!use_internal_rev_list)
2477                read_object_list_from_stdin();
2478        else {
2479                rp_av[rp_ac] = NULL;
2480                get_object_list(rp_ac, rp_av);
2481        }
2482        cleanup_preferred_base();
2483        if (include_tag && nr_result)
2484                for_each_ref(add_ref_tag, NULL);
2485        stop_progress(&progress_state);
2486
2487        if (non_empty && !nr_result)
2488                return 0;
2489        if (nr_result)
2490                prepare_pack(window, depth);
2491        write_pack_file();
2492        if (progress)
2493                fprintf(stderr, "Total %"PRIu32" (delta %"PRIu32"),"
2494                        " reused %"PRIu32" (delta %"PRIu32")\n",
2495                        written, written_delta, reused, reused_delta);
2496        return 0;
2497}