builtin / pack-objects.con commit Merge branch 'jc/request-pull-match-tagname' into maint (fb60f34)
   1#include "builtin.h"
   2#include "cache.h"
   3#include "attr.h"
   4#include "object.h"
   5#include "blob.h"
   6#include "commit.h"
   7#include "tag.h"
   8#include "tree.h"
   9#include "delta.h"
  10#include "pack.h"
  11#include "pack-revindex.h"
  12#include "csum-file.h"
  13#include "tree-walk.h"
  14#include "diff.h"
  15#include "revision.h"
  16#include "list-objects.h"
  17#include "progress.h"
  18#include "refs.h"
  19#include "thread-utils.h"
  20
  21static const char *pack_usage[] = {
  22        "git pack-objects --stdout [options...] [< ref-list | < object-list]",
  23        "git pack-objects [options...] base-name [< ref-list | < object-list]",
  24        NULL
  25};
  26
  27struct object_entry {
  28        struct pack_idx_entry idx;
  29        unsigned long size;     /* uncompressed size */
  30        struct packed_git *in_pack;     /* already in pack */
  31        off_t in_pack_offset;
  32        struct object_entry *delta;     /* delta base object */
  33        struct object_entry *delta_child; /* deltified objects who bases me */
  34        struct object_entry *delta_sibling; /* other deltified objects who
  35                                             * uses the same base as me
  36                                             */
  37        void *delta_data;       /* cached delta (uncompressed) */
  38        unsigned long delta_size;       /* delta data size (uncompressed) */
  39        unsigned long z_delta_size;     /* delta data size (compressed) */
  40        unsigned int hash;      /* name hint hash */
  41        enum object_type type;
  42        enum object_type in_pack_type;  /* could be delta */
  43        unsigned char in_pack_header_size;
  44        unsigned char preferred_base; /* we do not pack this, but is available
  45                                       * to be used as the base object to delta
  46                                       * objects against.
  47                                       */
  48        unsigned char no_try_delta;
  49        unsigned char tagged; /* near the very tip of refs */
  50        unsigned char filled; /* assigned write-order */
  51};
  52
  53/*
  54 * Objects we are going to pack are collected in objects array (dynamically
  55 * expanded).  nr_objects & nr_alloc controls this array.  They are stored
  56 * in the order we see -- typically rev-list --objects order that gives us
  57 * nice "minimum seek" order.
  58 */
  59static struct object_entry *objects;
  60static struct pack_idx_entry **written_list;
  61static uint32_t nr_objects, nr_alloc, nr_result, nr_written;
  62
  63static int non_empty;
  64static int reuse_delta = 1, reuse_object = 1;
  65static int keep_unreachable, unpack_unreachable, include_tag;
  66static unsigned long unpack_unreachable_expiration;
  67static int local;
  68static int incremental;
  69static int ignore_packed_keep;
  70static int allow_ofs_delta;
  71static struct pack_idx_option pack_idx_opts;
  72static const char *base_name;
  73static int progress = 1;
  74static int window = 10;
  75static unsigned long pack_size_limit;
  76static int depth = 50;
  77static int delta_search_threads;
  78static int pack_to_stdout;
  79static int num_preferred_base;
  80static struct progress *progress_state;
  81static int pack_compression_level = Z_DEFAULT_COMPRESSION;
  82static int pack_compression_seen;
  83
  84static unsigned long delta_cache_size = 0;
  85static unsigned long max_delta_cache_size = 256 * 1024 * 1024;
  86static unsigned long cache_max_small_delta_size = 1000;
  87
  88static unsigned long window_memory_limit = 0;
  89
  90/*
  91 * The object names in objects array are hashed with this hashtable,
  92 * to help looking up the entry by object name.
  93 * This hashtable is built after all the objects are seen.
  94 */
  95static int *object_ix;
  96static int object_ix_hashsz;
  97static struct object_entry *locate_object_entry(const unsigned char *sha1);
  98
  99/*
 100 * stats
 101 */
 102static uint32_t written, written_delta;
 103static uint32_t reused, reused_delta;
 104
 105
 106static void *get_delta(struct object_entry *entry)
 107{
 108        unsigned long size, base_size, delta_size;
 109        void *buf, *base_buf, *delta_buf;
 110        enum object_type type;
 111
 112        buf = read_sha1_file(entry->idx.sha1, &type, &size);
 113        if (!buf)
 114                die("unable to read %s", sha1_to_hex(entry->idx.sha1));
 115        base_buf = read_sha1_file(entry->delta->idx.sha1, &type, &base_size);
 116        if (!base_buf)
 117                die("unable to read %s", sha1_to_hex(entry->delta->idx.sha1));
 118        delta_buf = diff_delta(base_buf, base_size,
 119                               buf, size, &delta_size, 0);
 120        if (!delta_buf || delta_size != entry->delta_size)
 121                die("delta size changed");
 122        free(buf);
 123        free(base_buf);
 124        return delta_buf;
 125}
 126
 127static unsigned long do_compress(void **pptr, unsigned long size)
 128{
 129        git_zstream stream;
 130        void *in, *out;
 131        unsigned long maxsize;
 132
 133        memset(&stream, 0, sizeof(stream));
 134        git_deflate_init(&stream, pack_compression_level);
 135        maxsize = git_deflate_bound(&stream, size);
 136
 137        in = *pptr;
 138        out = xmalloc(maxsize);
 139        *pptr = out;
 140
 141        stream.next_in = in;
 142        stream.avail_in = size;
 143        stream.next_out = out;
 144        stream.avail_out = maxsize;
 145        while (git_deflate(&stream, Z_FINISH) == Z_OK)
 146                ; /* nothing */
 147        git_deflate_end(&stream);
 148
 149        free(in);
 150        return stream.total_out;
 151}
 152
 153/*
 154 * we are going to reuse the existing object data as is.  make
 155 * sure it is not corrupt.
 156 */
 157static int check_pack_inflate(struct packed_git *p,
 158                struct pack_window **w_curs,
 159                off_t offset,
 160                off_t len,
 161                unsigned long expect)
 162{
 163        git_zstream stream;
 164        unsigned char fakebuf[4096], *in;
 165        int st;
 166
 167        memset(&stream, 0, sizeof(stream));
 168        git_inflate_init(&stream);
 169        do {
 170                in = use_pack(p, w_curs, offset, &stream.avail_in);
 171                stream.next_in = in;
 172                stream.next_out = fakebuf;
 173                stream.avail_out = sizeof(fakebuf);
 174                st = git_inflate(&stream, Z_FINISH);
 175                offset += stream.next_in - in;
 176        } while (st == Z_OK || st == Z_BUF_ERROR);
 177        git_inflate_end(&stream);
 178        return (st == Z_STREAM_END &&
 179                stream.total_out == expect &&
 180                stream.total_in == len) ? 0 : -1;
 181}
 182
 183static void copy_pack_data(struct sha1file *f,
 184                struct packed_git *p,
 185                struct pack_window **w_curs,
 186                off_t offset,
 187                off_t len)
 188{
 189        unsigned char *in;
 190        unsigned long avail;
 191
 192        while (len) {
 193                in = use_pack(p, w_curs, offset, &avail);
 194                if (avail > len)
 195                        avail = (unsigned long)len;
 196                sha1write(f, in, avail);
 197                offset += avail;
 198                len -= avail;
 199        }
 200}
 201
 202/* Return 0 if we will bust the pack-size limit */
 203static unsigned long write_no_reuse_object(struct sha1file *f, struct object_entry *entry,
 204                                           unsigned long limit, int usable_delta)
 205{
 206        unsigned long size, datalen;
 207        unsigned char header[10], dheader[10];
 208        unsigned hdrlen;
 209        enum object_type type;
 210        void *buf;
 211
 212        if (!usable_delta) {
 213                buf = read_sha1_file(entry->idx.sha1, &type, &size);
 214                if (!buf)
 215                        die("unable to read %s", sha1_to_hex(entry->idx.sha1));
 216                /*
 217                 * make sure no cached delta data remains from a
 218                 * previous attempt before a pack split occurred.
 219                 */
 220                free(entry->delta_data);
 221                entry->delta_data = NULL;
 222                entry->z_delta_size = 0;
 223        } else if (entry->delta_data) {
 224                size = entry->delta_size;
 225                buf = entry->delta_data;
 226                entry->delta_data = NULL;
 227                type = (allow_ofs_delta && entry->delta->idx.offset) ?
 228                        OBJ_OFS_DELTA : OBJ_REF_DELTA;
 229        } else {
 230                buf = get_delta(entry);
 231                size = entry->delta_size;
 232                type = (allow_ofs_delta && entry->delta->idx.offset) ?
 233                        OBJ_OFS_DELTA : OBJ_REF_DELTA;
 234        }
 235
 236        if (entry->z_delta_size)
 237                datalen = entry->z_delta_size;
 238        else
 239                datalen = do_compress(&buf, size);
 240
 241        /*
 242         * The object header is a byte of 'type' followed by zero or
 243         * more bytes of length.
 244         */
 245        hdrlen = encode_in_pack_object_header(type, size, header);
 246
 247        if (type == OBJ_OFS_DELTA) {
 248                /*
 249                 * Deltas with relative base contain an additional
 250                 * encoding of the relative offset for the delta
 251                 * base from this object's position in the pack.
 252                 */
 253                off_t ofs = entry->idx.offset - entry->delta->idx.offset;
 254                unsigned pos = sizeof(dheader) - 1;
 255                dheader[pos] = ofs & 127;
 256                while (ofs >>= 7)
 257                        dheader[--pos] = 128 | (--ofs & 127);
 258                if (limit && hdrlen + sizeof(dheader) - pos + datalen + 20 >= limit) {
 259                        free(buf);
 260                        return 0;
 261                }
 262                sha1write(f, header, hdrlen);
 263                sha1write(f, dheader + pos, sizeof(dheader) - pos);
 264                hdrlen += sizeof(dheader) - pos;
 265        } else if (type == OBJ_REF_DELTA) {
 266                /*
 267                 * Deltas with a base reference contain
 268                 * an additional 20 bytes for the base sha1.
 269                 */
 270                if (limit && hdrlen + 20 + datalen + 20 >= limit) {
 271                        free(buf);
 272                        return 0;
 273                }
 274                sha1write(f, header, hdrlen);
 275                sha1write(f, entry->delta->idx.sha1, 20);
 276                hdrlen += 20;
 277        } else {
 278                if (limit && hdrlen + datalen + 20 >= limit) {
 279                        free(buf);
 280                        return 0;
 281                }
 282                sha1write(f, header, hdrlen);
 283        }
 284        sha1write(f, buf, datalen);
 285        free(buf);
 286
 287        return hdrlen + datalen;
 288}
 289
 290/* Return 0 if we will bust the pack-size limit */
 291static unsigned long write_reuse_object(struct sha1file *f, struct object_entry *entry,
 292                                        unsigned long limit, int usable_delta)
 293{
 294        struct packed_git *p = entry->in_pack;
 295        struct pack_window *w_curs = NULL;
 296        struct revindex_entry *revidx;
 297        off_t offset;
 298        enum object_type type = entry->type;
 299        unsigned long datalen;
 300        unsigned char header[10], dheader[10];
 301        unsigned hdrlen;
 302
 303        if (entry->delta)
 304                type = (allow_ofs_delta && entry->delta->idx.offset) ?
 305                        OBJ_OFS_DELTA : OBJ_REF_DELTA;
 306        hdrlen = encode_in_pack_object_header(type, entry->size, header);
 307
 308        offset = entry->in_pack_offset;
 309        revidx = find_pack_revindex(p, offset);
 310        datalen = revidx[1].offset - offset;
 311        if (!pack_to_stdout && p->index_version > 1 &&
 312            check_pack_crc(p, &w_curs, offset, datalen, revidx->nr)) {
 313                error("bad packed object CRC for %s", sha1_to_hex(entry->idx.sha1));
 314                unuse_pack(&w_curs);
 315                return write_no_reuse_object(f, entry, limit, usable_delta);
 316        }
 317
 318        offset += entry->in_pack_header_size;
 319        datalen -= entry->in_pack_header_size;
 320
 321        if (!pack_to_stdout && p->index_version == 1 &&
 322            check_pack_inflate(p, &w_curs, offset, datalen, entry->size)) {
 323                error("corrupt packed object for %s", sha1_to_hex(entry->idx.sha1));
 324                unuse_pack(&w_curs);
 325                return write_no_reuse_object(f, entry, limit, usable_delta);
 326        }
 327
 328        if (type == OBJ_OFS_DELTA) {
 329                off_t ofs = entry->idx.offset - entry->delta->idx.offset;
 330                unsigned pos = sizeof(dheader) - 1;
 331                dheader[pos] = ofs & 127;
 332                while (ofs >>= 7)
 333                        dheader[--pos] = 128 | (--ofs & 127);
 334                if (limit && hdrlen + sizeof(dheader) - pos + datalen + 20 >= limit) {
 335                        unuse_pack(&w_curs);
 336                        return 0;
 337                }
 338                sha1write(f, header, hdrlen);
 339                sha1write(f, dheader + pos, sizeof(dheader) - pos);
 340                hdrlen += sizeof(dheader) - pos;
 341                reused_delta++;
 342        } else if (type == OBJ_REF_DELTA) {
 343                if (limit && hdrlen + 20 + datalen + 20 >= limit) {
 344                        unuse_pack(&w_curs);
 345                        return 0;
 346                }
 347                sha1write(f, header, hdrlen);
 348                sha1write(f, entry->delta->idx.sha1, 20);
 349                hdrlen += 20;
 350                reused_delta++;
 351        } else {
 352                if (limit && hdrlen + datalen + 20 >= limit) {
 353                        unuse_pack(&w_curs);
 354                        return 0;
 355                }
 356                sha1write(f, header, hdrlen);
 357        }
 358        copy_pack_data(f, p, &w_curs, offset, datalen);
 359        unuse_pack(&w_curs);
 360        reused++;
 361        return hdrlen + datalen;
 362}
 363
 364/* Return 0 if we will bust the pack-size limit */
 365static unsigned long write_object(struct sha1file *f,
 366                                  struct object_entry *entry,
 367                                  off_t write_offset)
 368{
 369        unsigned long limit, len;
 370        int usable_delta, to_reuse;
 371
 372        if (!pack_to_stdout)
 373                crc32_begin(f);
 374
 375        /* apply size limit if limited packsize and not first object */
 376        if (!pack_size_limit || !nr_written)
 377                limit = 0;
 378        else if (pack_size_limit <= write_offset)
 379                /*
 380                 * the earlier object did not fit the limit; avoid
 381                 * mistaking this with unlimited (i.e. limit = 0).
 382                 */
 383                limit = 1;
 384        else
 385                limit = pack_size_limit - write_offset;
 386
 387        if (!entry->delta)
 388                usable_delta = 0;       /* no delta */
 389        else if (!pack_size_limit)
 390               usable_delta = 1;        /* unlimited packfile */
 391        else if (entry->delta->idx.offset == (off_t)-1)
 392                usable_delta = 0;       /* base was written to another pack */
 393        else if (entry->delta->idx.offset)
 394                usable_delta = 1;       /* base already exists in this pack */
 395        else
 396                usable_delta = 0;       /* base could end up in another pack */
 397
 398        if (!reuse_object)
 399                to_reuse = 0;   /* explicit */
 400        else if (!entry->in_pack)
 401                to_reuse = 0;   /* can't reuse what we don't have */
 402        else if (entry->type == OBJ_REF_DELTA || entry->type == OBJ_OFS_DELTA)
 403                                /* check_object() decided it for us ... */
 404                to_reuse = usable_delta;
 405                                /* ... but pack split may override that */
 406        else if (entry->type != entry->in_pack_type)
 407                to_reuse = 0;   /* pack has delta which is unusable */
 408        else if (entry->delta)
 409                to_reuse = 0;   /* we want to pack afresh */
 410        else
 411                to_reuse = 1;   /* we have it in-pack undeltified,
 412                                 * and we do not need to deltify it.
 413                                 */
 414
 415        if (!to_reuse)
 416                len = write_no_reuse_object(f, entry, limit, usable_delta);
 417        else
 418                len = write_reuse_object(f, entry, limit, usable_delta);
 419        if (!len)
 420                return 0;
 421
 422        if (usable_delta)
 423                written_delta++;
 424        written++;
 425        if (!pack_to_stdout)
 426                entry->idx.crc32 = crc32_end(f);
 427        return len;
 428}
 429
 430enum write_one_status {
 431        WRITE_ONE_SKIP = -1, /* already written */
 432        WRITE_ONE_BREAK = 0, /* writing this will bust the limit; not written */
 433        WRITE_ONE_WRITTEN = 1, /* normal */
 434        WRITE_ONE_RECURSIVE = 2 /* already scheduled to be written */
 435};
 436
 437static enum write_one_status write_one(struct sha1file *f,
 438                                       struct object_entry *e,
 439                                       off_t *offset)
 440{
 441        unsigned long size;
 442        int recursing;
 443
 444        /*
 445         * we set offset to 1 (which is an impossible value) to mark
 446         * the fact that this object is involved in "write its base
 447         * first before writing a deltified object" recursion.
 448         */
 449        recursing = (e->idx.offset == 1);
 450        if (recursing) {
 451                warning("recursive delta detected for object %s",
 452                        sha1_to_hex(e->idx.sha1));
 453                return WRITE_ONE_RECURSIVE;
 454        } else if (e->idx.offset || e->preferred_base) {
 455                /* offset is non zero if object is written already. */
 456                return WRITE_ONE_SKIP;
 457        }
 458
 459        /* if we are deltified, write out base object first. */
 460        if (e->delta) {
 461                e->idx.offset = 1; /* now recurse */
 462                switch (write_one(f, e->delta, offset)) {
 463                case WRITE_ONE_RECURSIVE:
 464                        /* we cannot depend on this one */
 465                        e->delta = NULL;
 466                        break;
 467                default:
 468                        break;
 469                case WRITE_ONE_BREAK:
 470                        e->idx.offset = recursing;
 471                        return WRITE_ONE_BREAK;
 472                }
 473        }
 474
 475        e->idx.offset = *offset;
 476        size = write_object(f, e, *offset);
 477        if (!size) {
 478                e->idx.offset = recursing;
 479                return WRITE_ONE_BREAK;
 480        }
 481        written_list[nr_written++] = &e->idx;
 482
 483        /* make sure off_t is sufficiently large not to wrap */
 484        if (signed_add_overflows(*offset, size))
 485                die("pack too large for current definition of off_t");
 486        *offset += size;
 487        return WRITE_ONE_WRITTEN;
 488}
 489
 490static int mark_tagged(const char *path, const unsigned char *sha1, int flag,
 491                       void *cb_data)
 492{
 493        unsigned char peeled[20];
 494        struct object_entry *entry = locate_object_entry(sha1);
 495
 496        if (entry)
 497                entry->tagged = 1;
 498        if (!peel_ref(path, peeled)) {
 499                entry = locate_object_entry(peeled);
 500                if (entry)
 501                        entry->tagged = 1;
 502        }
 503        return 0;
 504}
 505
 506static inline void add_to_write_order(struct object_entry **wo,
 507                               unsigned int *endp,
 508                               struct object_entry *e)
 509{
 510        if (e->filled)
 511                return;
 512        wo[(*endp)++] = e;
 513        e->filled = 1;
 514}
 515
 516static void add_descendants_to_write_order(struct object_entry **wo,
 517                                           unsigned int *endp,
 518                                           struct object_entry *e)
 519{
 520        int add_to_order = 1;
 521        while (e) {
 522                if (add_to_order) {
 523                        struct object_entry *s;
 524                        /* add this node... */
 525                        add_to_write_order(wo, endp, e);
 526                        /* all its siblings... */
 527                        for (s = e->delta_sibling; s; s = s->delta_sibling) {
 528                                add_to_write_order(wo, endp, s);
 529                        }
 530                }
 531                /* drop down a level to add left subtree nodes if possible */
 532                if (e->delta_child) {
 533                        add_to_order = 1;
 534                        e = e->delta_child;
 535                } else {
 536                        add_to_order = 0;
 537                        /* our sibling might have some children, it is next */
 538                        if (e->delta_sibling) {
 539                                e = e->delta_sibling;
 540                                continue;
 541                        }
 542                        /* go back to our parent node */
 543                        e = e->delta;
 544                        while (e && !e->delta_sibling) {
 545                                /* we're on the right side of a subtree, keep
 546                                 * going up until we can go right again */
 547                                e = e->delta;
 548                        }
 549                        if (!e) {
 550                                /* done- we hit our original root node */
 551                                return;
 552                        }
 553                        /* pass it off to sibling at this level */
 554                        e = e->delta_sibling;
 555                }
 556        };
 557}
 558
 559static void add_family_to_write_order(struct object_entry **wo,
 560                                      unsigned int *endp,
 561                                      struct object_entry *e)
 562{
 563        struct object_entry *root;
 564
 565        for (root = e; root->delta; root = root->delta)
 566                ; /* nothing */
 567        add_descendants_to_write_order(wo, endp, root);
 568}
 569
 570static struct object_entry **compute_write_order(void)
 571{
 572        unsigned int i, wo_end, last_untagged;
 573
 574        struct object_entry **wo = xmalloc(nr_objects * sizeof(*wo));
 575
 576        for (i = 0; i < nr_objects; i++) {
 577                objects[i].tagged = 0;
 578                objects[i].filled = 0;
 579                objects[i].delta_child = NULL;
 580                objects[i].delta_sibling = NULL;
 581        }
 582
 583        /*
 584         * Fully connect delta_child/delta_sibling network.
 585         * Make sure delta_sibling is sorted in the original
 586         * recency order.
 587         */
 588        for (i = nr_objects; i > 0;) {
 589                struct object_entry *e = &objects[--i];
 590                if (!e->delta)
 591                        continue;
 592                /* Mark me as the first child */
 593                e->delta_sibling = e->delta->delta_child;
 594                e->delta->delta_child = e;
 595        }
 596
 597        /*
 598         * Mark objects that are at the tip of tags.
 599         */
 600        for_each_tag_ref(mark_tagged, NULL);
 601
 602        /*
 603         * Give the objects in the original recency order until
 604         * we see a tagged tip.
 605         */
 606        for (i = wo_end = 0; i < nr_objects; i++) {
 607                if (objects[i].tagged)
 608                        break;
 609                add_to_write_order(wo, &wo_end, &objects[i]);
 610        }
 611        last_untagged = i;
 612
 613        /*
 614         * Then fill all the tagged tips.
 615         */
 616        for (; i < nr_objects; i++) {
 617                if (objects[i].tagged)
 618                        add_to_write_order(wo, &wo_end, &objects[i]);
 619        }
 620
 621        /*
 622         * And then all remaining commits and tags.
 623         */
 624        for (i = last_untagged; i < nr_objects; i++) {
 625                if (objects[i].type != OBJ_COMMIT &&
 626                    objects[i].type != OBJ_TAG)
 627                        continue;
 628                add_to_write_order(wo, &wo_end, &objects[i]);
 629        }
 630
 631        /*
 632         * And then all the trees.
 633         */
 634        for (i = last_untagged; i < nr_objects; i++) {
 635                if (objects[i].type != OBJ_TREE)
 636                        continue;
 637                add_to_write_order(wo, &wo_end, &objects[i]);
 638        }
 639
 640        /*
 641         * Finally all the rest in really tight order
 642         */
 643        for (i = last_untagged; i < nr_objects; i++) {
 644                if (!objects[i].filled)
 645                        add_family_to_write_order(wo, &wo_end, &objects[i]);
 646        }
 647
 648        if (wo_end != nr_objects)
 649                die("ordered %u objects, expected %"PRIu32, wo_end, nr_objects);
 650
 651        return wo;
 652}
 653
 654static void write_pack_file(void)
 655{
 656        uint32_t i = 0, j;
 657        struct sha1file *f;
 658        off_t offset;
 659        uint32_t nr_remaining = nr_result;
 660        time_t last_mtime = 0;
 661        struct object_entry **write_order;
 662
 663        if (progress > pack_to_stdout)
 664                progress_state = start_progress("Writing objects", nr_result);
 665        written_list = xmalloc(nr_objects * sizeof(*written_list));
 666        write_order = compute_write_order();
 667
 668        do {
 669                unsigned char sha1[20];
 670                char *pack_tmp_name = NULL;
 671
 672                if (pack_to_stdout)
 673                        f = sha1fd_throughput(1, "<stdout>", progress_state);
 674                else
 675                        f = create_tmp_packfile(&pack_tmp_name);
 676
 677                offset = write_pack_header(f, nr_remaining);
 678                if (!offset)
 679                        die_errno("unable to write pack header");
 680                nr_written = 0;
 681                for (; i < nr_objects; i++) {
 682                        struct object_entry *e = write_order[i];
 683                        if (write_one(f, e, &offset) == WRITE_ONE_BREAK)
 684                                break;
 685                        display_progress(progress_state, written);
 686                }
 687
 688                /*
 689                 * Did we write the wrong # entries in the header?
 690                 * If so, rewrite it like in fast-import
 691                 */
 692                if (pack_to_stdout) {
 693                        sha1close(f, sha1, CSUM_CLOSE);
 694                } else if (nr_written == nr_remaining) {
 695                        sha1close(f, sha1, CSUM_FSYNC);
 696                } else {
 697                        int fd = sha1close(f, sha1, 0);
 698                        fixup_pack_header_footer(fd, sha1, pack_tmp_name,
 699                                                 nr_written, sha1, offset);
 700                        close(fd);
 701                }
 702
 703                if (!pack_to_stdout) {
 704                        struct stat st;
 705                        char tmpname[PATH_MAX];
 706
 707                        /*
 708                         * Packs are runtime accessed in their mtime
 709                         * order since newer packs are more likely to contain
 710                         * younger objects.  So if we are creating multiple
 711                         * packs then we should modify the mtime of later ones
 712                         * to preserve this property.
 713                         */
 714                        if (stat(pack_tmp_name, &st) < 0) {
 715                                warning("failed to stat %s: %s",
 716                                        pack_tmp_name, strerror(errno));
 717                        } else if (!last_mtime) {
 718                                last_mtime = st.st_mtime;
 719                        } else {
 720                                struct utimbuf utb;
 721                                utb.actime = st.st_atime;
 722                                utb.modtime = --last_mtime;
 723                                if (utime(pack_tmp_name, &utb) < 0)
 724                                        warning("failed utime() on %s: %s",
 725                                                tmpname, strerror(errno));
 726                        }
 727
 728                        /* Enough space for "-<sha-1>.pack"? */
 729                        if (sizeof(tmpname) <= strlen(base_name) + 50)
 730                                die("pack base name '%s' too long", base_name);
 731                        snprintf(tmpname, sizeof(tmpname), "%s-", base_name);
 732                        finish_tmp_packfile(tmpname, pack_tmp_name,
 733                                            written_list, nr_written,
 734                                            &pack_idx_opts, sha1);
 735                        free(pack_tmp_name);
 736                        puts(sha1_to_hex(sha1));
 737                }
 738
 739                /* mark written objects as written to previous pack */
 740                for (j = 0; j < nr_written; j++) {
 741                        written_list[j]->offset = (off_t)-1;
 742                }
 743                nr_remaining -= nr_written;
 744        } while (nr_remaining && i < nr_objects);
 745
 746        free(written_list);
 747        free(write_order);
 748        stop_progress(&progress_state);
 749        if (written != nr_result)
 750                die("wrote %"PRIu32" objects while expecting %"PRIu32,
 751                        written, nr_result);
 752}
 753
 754static int locate_object_entry_hash(const unsigned char *sha1)
 755{
 756        int i;
 757        unsigned int ui;
 758        memcpy(&ui, sha1, sizeof(unsigned int));
 759        i = ui % object_ix_hashsz;
 760        while (0 < object_ix[i]) {
 761                if (!hashcmp(sha1, objects[object_ix[i] - 1].idx.sha1))
 762                        return i;
 763                if (++i == object_ix_hashsz)
 764                        i = 0;
 765        }
 766        return -1 - i;
 767}
 768
 769static struct object_entry *locate_object_entry(const unsigned char *sha1)
 770{
 771        int i;
 772
 773        if (!object_ix_hashsz)
 774                return NULL;
 775
 776        i = locate_object_entry_hash(sha1);
 777        if (0 <= i)
 778                return &objects[object_ix[i]-1];
 779        return NULL;
 780}
 781
 782static void rehash_objects(void)
 783{
 784        uint32_t i;
 785        struct object_entry *oe;
 786
 787        object_ix_hashsz = nr_objects * 3;
 788        if (object_ix_hashsz < 1024)
 789                object_ix_hashsz = 1024;
 790        object_ix = xrealloc(object_ix, sizeof(int) * object_ix_hashsz);
 791        memset(object_ix, 0, sizeof(int) * object_ix_hashsz);
 792        for (i = 0, oe = objects; i < nr_objects; i++, oe++) {
 793                int ix = locate_object_entry_hash(oe->idx.sha1);
 794                if (0 <= ix)
 795                        continue;
 796                ix = -1 - ix;
 797                object_ix[ix] = i + 1;
 798        }
 799}
 800
 801static unsigned name_hash(const char *name)
 802{
 803        unsigned c, hash = 0;
 804
 805        if (!name)
 806                return 0;
 807
 808        /*
 809         * This effectively just creates a sortable number from the
 810         * last sixteen non-whitespace characters. Last characters
 811         * count "most", so things that end in ".c" sort together.
 812         */
 813        while ((c = *name++) != 0) {
 814                if (isspace(c))
 815                        continue;
 816                hash = (hash >> 2) + (c << 24);
 817        }
 818        return hash;
 819}
 820
 821static void setup_delta_attr_check(struct git_attr_check *check)
 822{
 823        static struct git_attr *attr_delta;
 824
 825        if (!attr_delta)
 826                attr_delta = git_attr("delta");
 827
 828        check[0].attr = attr_delta;
 829}
 830
 831static int no_try_delta(const char *path)
 832{
 833        struct git_attr_check check[1];
 834
 835        setup_delta_attr_check(check);
 836        if (git_check_attr(path, ARRAY_SIZE(check), check))
 837                return 0;
 838        if (ATTR_FALSE(check->value))
 839                return 1;
 840        return 0;
 841}
 842
 843static int add_object_entry(const unsigned char *sha1, enum object_type type,
 844                            const char *name, int exclude)
 845{
 846        struct object_entry *entry;
 847        struct packed_git *p, *found_pack = NULL;
 848        off_t found_offset = 0;
 849        int ix;
 850        unsigned hash = name_hash(name);
 851
 852        ix = nr_objects ? locate_object_entry_hash(sha1) : -1;
 853        if (ix >= 0) {
 854                if (exclude) {
 855                        entry = objects + object_ix[ix] - 1;
 856                        if (!entry->preferred_base)
 857                                nr_result--;
 858                        entry->preferred_base = 1;
 859                }
 860                return 0;
 861        }
 862
 863        if (!exclude && local && has_loose_object_nonlocal(sha1))
 864                return 0;
 865
 866        for (p = packed_git; p; p = p->next) {
 867                off_t offset = find_pack_entry_one(sha1, p);
 868                if (offset) {
 869                        if (!found_pack) {
 870                                if (!is_pack_valid(p)) {
 871                                        warning("packfile %s cannot be accessed", p->pack_name);
 872                                        continue;
 873                                }
 874                                found_offset = offset;
 875                                found_pack = p;
 876                        }
 877                        if (exclude)
 878                                break;
 879                        if (incremental)
 880                                return 0;
 881                        if (local && !p->pack_local)
 882                                return 0;
 883                        if (ignore_packed_keep && p->pack_local && p->pack_keep)
 884                                return 0;
 885                }
 886        }
 887
 888        if (nr_objects >= nr_alloc) {
 889                nr_alloc = (nr_alloc  + 1024) * 3 / 2;
 890                objects = xrealloc(objects, nr_alloc * sizeof(*entry));
 891        }
 892
 893        entry = objects + nr_objects++;
 894        memset(entry, 0, sizeof(*entry));
 895        hashcpy(entry->idx.sha1, sha1);
 896        entry->hash = hash;
 897        if (type)
 898                entry->type = type;
 899        if (exclude)
 900                entry->preferred_base = 1;
 901        else
 902                nr_result++;
 903        if (found_pack) {
 904                entry->in_pack = found_pack;
 905                entry->in_pack_offset = found_offset;
 906        }
 907
 908        if (object_ix_hashsz * 3 <= nr_objects * 4)
 909                rehash_objects();
 910        else
 911                object_ix[-1 - ix] = nr_objects;
 912
 913        display_progress(progress_state, nr_objects);
 914
 915        if (name && no_try_delta(name))
 916                entry->no_try_delta = 1;
 917
 918        return 1;
 919}
 920
 921struct pbase_tree_cache {
 922        unsigned char sha1[20];
 923        int ref;
 924        int temporary;
 925        void *tree_data;
 926        unsigned long tree_size;
 927};
 928
 929static struct pbase_tree_cache *(pbase_tree_cache[256]);
 930static int pbase_tree_cache_ix(const unsigned char *sha1)
 931{
 932        return sha1[0] % ARRAY_SIZE(pbase_tree_cache);
 933}
 934static int pbase_tree_cache_ix_incr(int ix)
 935{
 936        return (ix+1) % ARRAY_SIZE(pbase_tree_cache);
 937}
 938
 939static struct pbase_tree {
 940        struct pbase_tree *next;
 941        /* This is a phony "cache" entry; we are not
 942         * going to evict it nor find it through _get()
 943         * mechanism -- this is for the toplevel node that
 944         * would almost always change with any commit.
 945         */
 946        struct pbase_tree_cache pcache;
 947} *pbase_tree;
 948
 949static struct pbase_tree_cache *pbase_tree_get(const unsigned char *sha1)
 950{
 951        struct pbase_tree_cache *ent, *nent;
 952        void *data;
 953        unsigned long size;
 954        enum object_type type;
 955        int neigh;
 956        int my_ix = pbase_tree_cache_ix(sha1);
 957        int available_ix = -1;
 958
 959        /* pbase-tree-cache acts as a limited hashtable.
 960         * your object will be found at your index or within a few
 961         * slots after that slot if it is cached.
 962         */
 963        for (neigh = 0; neigh < 8; neigh++) {
 964                ent = pbase_tree_cache[my_ix];
 965                if (ent && !hashcmp(ent->sha1, sha1)) {
 966                        ent->ref++;
 967                        return ent;
 968                }
 969                else if (((available_ix < 0) && (!ent || !ent->ref)) ||
 970                         ((0 <= available_ix) &&
 971                          (!ent && pbase_tree_cache[available_ix])))
 972                        available_ix = my_ix;
 973                if (!ent)
 974                        break;
 975                my_ix = pbase_tree_cache_ix_incr(my_ix);
 976        }
 977
 978        /* Did not find one.  Either we got a bogus request or
 979         * we need to read and perhaps cache.
 980         */
 981        data = read_sha1_file(sha1, &type, &size);
 982        if (!data)
 983                return NULL;
 984        if (type != OBJ_TREE) {
 985                free(data);
 986                return NULL;
 987        }
 988
 989        /* We need to either cache or return a throwaway copy */
 990
 991        if (available_ix < 0)
 992                ent = NULL;
 993        else {
 994                ent = pbase_tree_cache[available_ix];
 995                my_ix = available_ix;
 996        }
 997
 998        if (!ent) {
 999                nent = xmalloc(sizeof(*nent));
1000                nent->temporary = (available_ix < 0);
1001        }
1002        else {
1003                /* evict and reuse */
1004                free(ent->tree_data);
1005                nent = ent;
1006        }
1007        hashcpy(nent->sha1, sha1);
1008        nent->tree_data = data;
1009        nent->tree_size = size;
1010        nent->ref = 1;
1011        if (!nent->temporary)
1012                pbase_tree_cache[my_ix] = nent;
1013        return nent;
1014}
1015
1016static void pbase_tree_put(struct pbase_tree_cache *cache)
1017{
1018        if (!cache->temporary) {
1019                cache->ref--;
1020                return;
1021        }
1022        free(cache->tree_data);
1023        free(cache);
1024}
1025
1026static int name_cmp_len(const char *name)
1027{
1028        int i;
1029        for (i = 0; name[i] && name[i] != '\n' && name[i] != '/'; i++)
1030                ;
1031        return i;
1032}
1033
1034static void add_pbase_object(struct tree_desc *tree,
1035                             const char *name,
1036                             int cmplen,
1037                             const char *fullname)
1038{
1039        struct name_entry entry;
1040        int cmp;
1041
1042        while (tree_entry(tree,&entry)) {
1043                if (S_ISGITLINK(entry.mode))
1044                        continue;
1045                cmp = tree_entry_len(&entry) != cmplen ? 1 :
1046                      memcmp(name, entry.path, cmplen);
1047                if (cmp > 0)
1048                        continue;
1049                if (cmp < 0)
1050                        return;
1051                if (name[cmplen] != '/') {
1052                        add_object_entry(entry.sha1,
1053                                         object_type(entry.mode),
1054                                         fullname, 1);
1055                        return;
1056                }
1057                if (S_ISDIR(entry.mode)) {
1058                        struct tree_desc sub;
1059                        struct pbase_tree_cache *tree;
1060                        const char *down = name+cmplen+1;
1061                        int downlen = name_cmp_len(down);
1062
1063                        tree = pbase_tree_get(entry.sha1);
1064                        if (!tree)
1065                                return;
1066                        init_tree_desc(&sub, tree->tree_data, tree->tree_size);
1067
1068                        add_pbase_object(&sub, down, downlen, fullname);
1069                        pbase_tree_put(tree);
1070                }
1071        }
1072}
1073
1074static unsigned *done_pbase_paths;
1075static int done_pbase_paths_num;
1076static int done_pbase_paths_alloc;
1077static int done_pbase_path_pos(unsigned hash)
1078{
1079        int lo = 0;
1080        int hi = done_pbase_paths_num;
1081        while (lo < hi) {
1082                int mi = (hi + lo) / 2;
1083                if (done_pbase_paths[mi] == hash)
1084                        return mi;
1085                if (done_pbase_paths[mi] < hash)
1086                        hi = mi;
1087                else
1088                        lo = mi + 1;
1089        }
1090        return -lo-1;
1091}
1092
1093static int check_pbase_path(unsigned hash)
1094{
1095        int pos = (!done_pbase_paths) ? -1 : done_pbase_path_pos(hash);
1096        if (0 <= pos)
1097                return 1;
1098        pos = -pos - 1;
1099        if (done_pbase_paths_alloc <= done_pbase_paths_num) {
1100                done_pbase_paths_alloc = alloc_nr(done_pbase_paths_alloc);
1101                done_pbase_paths = xrealloc(done_pbase_paths,
1102                                            done_pbase_paths_alloc *
1103                                            sizeof(unsigned));
1104        }
1105        done_pbase_paths_num++;
1106        if (pos < done_pbase_paths_num)
1107                memmove(done_pbase_paths + pos + 1,
1108                        done_pbase_paths + pos,
1109                        (done_pbase_paths_num - pos - 1) * sizeof(unsigned));
1110        done_pbase_paths[pos] = hash;
1111        return 0;
1112}
1113
1114static void add_preferred_base_object(const char *name)
1115{
1116        struct pbase_tree *it;
1117        int cmplen;
1118        unsigned hash = name_hash(name);
1119
1120        if (!num_preferred_base || check_pbase_path(hash))
1121                return;
1122
1123        cmplen = name_cmp_len(name);
1124        for (it = pbase_tree; it; it = it->next) {
1125                if (cmplen == 0) {
1126                        add_object_entry(it->pcache.sha1, OBJ_TREE, NULL, 1);
1127                }
1128                else {
1129                        struct tree_desc tree;
1130                        init_tree_desc(&tree, it->pcache.tree_data, it->pcache.tree_size);
1131                        add_pbase_object(&tree, name, cmplen, name);
1132                }
1133        }
1134}
1135
1136static void add_preferred_base(unsigned char *sha1)
1137{
1138        struct pbase_tree *it;
1139        void *data;
1140        unsigned long size;
1141        unsigned char tree_sha1[20];
1142
1143        if (window <= num_preferred_base++)
1144                return;
1145
1146        data = read_object_with_reference(sha1, tree_type, &size, tree_sha1);
1147        if (!data)
1148                return;
1149
1150        for (it = pbase_tree; it; it = it->next) {
1151                if (!hashcmp(it->pcache.sha1, tree_sha1)) {
1152                        free(data);
1153                        return;
1154                }
1155        }
1156
1157        it = xcalloc(1, sizeof(*it));
1158        it->next = pbase_tree;
1159        pbase_tree = it;
1160
1161        hashcpy(it->pcache.sha1, tree_sha1);
1162        it->pcache.tree_data = data;
1163        it->pcache.tree_size = size;
1164}
1165
1166static void cleanup_preferred_base(void)
1167{
1168        struct pbase_tree *it;
1169        unsigned i;
1170
1171        it = pbase_tree;
1172        pbase_tree = NULL;
1173        while (it) {
1174                struct pbase_tree *this = it;
1175                it = this->next;
1176                free(this->pcache.tree_data);
1177                free(this);
1178        }
1179
1180        for (i = 0; i < ARRAY_SIZE(pbase_tree_cache); i++) {
1181                if (!pbase_tree_cache[i])
1182                        continue;
1183                free(pbase_tree_cache[i]->tree_data);
1184                free(pbase_tree_cache[i]);
1185                pbase_tree_cache[i] = NULL;
1186        }
1187
1188        free(done_pbase_paths);
1189        done_pbase_paths = NULL;
1190        done_pbase_paths_num = done_pbase_paths_alloc = 0;
1191}
1192
1193static void check_object(struct object_entry *entry)
1194{
1195        if (entry->in_pack) {
1196                struct packed_git *p = entry->in_pack;
1197                struct pack_window *w_curs = NULL;
1198                const unsigned char *base_ref = NULL;
1199                struct object_entry *base_entry;
1200                unsigned long used, used_0;
1201                unsigned long avail;
1202                off_t ofs;
1203                unsigned char *buf, c;
1204
1205                buf = use_pack(p, &w_curs, entry->in_pack_offset, &avail);
1206
1207                /*
1208                 * We want in_pack_type even if we do not reuse delta
1209                 * since non-delta representations could still be reused.
1210                 */
1211                used = unpack_object_header_buffer(buf, avail,
1212                                                   &entry->in_pack_type,
1213                                                   &entry->size);
1214                if (used == 0)
1215                        goto give_up;
1216
1217                /*
1218                 * Determine if this is a delta and if so whether we can
1219                 * reuse it or not.  Otherwise let's find out as cheaply as
1220                 * possible what the actual type and size for this object is.
1221                 */
1222                switch (entry->in_pack_type) {
1223                default:
1224                        /* Not a delta hence we've already got all we need. */
1225                        entry->type = entry->in_pack_type;
1226                        entry->in_pack_header_size = used;
1227                        if (entry->type < OBJ_COMMIT || entry->type > OBJ_BLOB)
1228                                goto give_up;
1229                        unuse_pack(&w_curs);
1230                        return;
1231                case OBJ_REF_DELTA:
1232                        if (reuse_delta && !entry->preferred_base)
1233                                base_ref = use_pack(p, &w_curs,
1234                                                entry->in_pack_offset + used, NULL);
1235                        entry->in_pack_header_size = used + 20;
1236                        break;
1237                case OBJ_OFS_DELTA:
1238                        buf = use_pack(p, &w_curs,
1239                                       entry->in_pack_offset + used, NULL);
1240                        used_0 = 0;
1241                        c = buf[used_0++];
1242                        ofs = c & 127;
1243                        while (c & 128) {
1244                                ofs += 1;
1245                                if (!ofs || MSB(ofs, 7)) {
1246                                        error("delta base offset overflow in pack for %s",
1247                                              sha1_to_hex(entry->idx.sha1));
1248                                        goto give_up;
1249                                }
1250                                c = buf[used_0++];
1251                                ofs = (ofs << 7) + (c & 127);
1252                        }
1253                        ofs = entry->in_pack_offset - ofs;
1254                        if (ofs <= 0 || ofs >= entry->in_pack_offset) {
1255                                error("delta base offset out of bound for %s",
1256                                      sha1_to_hex(entry->idx.sha1));
1257                                goto give_up;
1258                        }
1259                        if (reuse_delta && !entry->preferred_base) {
1260                                struct revindex_entry *revidx;
1261                                revidx = find_pack_revindex(p, ofs);
1262                                if (!revidx)
1263                                        goto give_up;
1264                                base_ref = nth_packed_object_sha1(p, revidx->nr);
1265                        }
1266                        entry->in_pack_header_size = used + used_0;
1267                        break;
1268                }
1269
1270                if (base_ref && (base_entry = locate_object_entry(base_ref))) {
1271                        /*
1272                         * If base_ref was set above that means we wish to
1273                         * reuse delta data, and we even found that base
1274                         * in the list of objects we want to pack. Goodie!
1275                         *
1276                         * Depth value does not matter - find_deltas() will
1277                         * never consider reused delta as the base object to
1278                         * deltify other objects against, in order to avoid
1279                         * circular deltas.
1280                         */
1281                        entry->type = entry->in_pack_type;
1282                        entry->delta = base_entry;
1283                        entry->delta_size = entry->size;
1284                        entry->delta_sibling = base_entry->delta_child;
1285                        base_entry->delta_child = entry;
1286                        unuse_pack(&w_curs);
1287                        return;
1288                }
1289
1290                if (entry->type) {
1291                        /*
1292                         * This must be a delta and we already know what the
1293                         * final object type is.  Let's extract the actual
1294                         * object size from the delta header.
1295                         */
1296                        entry->size = get_size_from_delta(p, &w_curs,
1297                                        entry->in_pack_offset + entry->in_pack_header_size);
1298                        if (entry->size == 0)
1299                                goto give_up;
1300                        unuse_pack(&w_curs);
1301                        return;
1302                }
1303
1304                /*
1305                 * No choice but to fall back to the recursive delta walk
1306                 * with sha1_object_info() to find about the object type
1307                 * at this point...
1308                 */
1309                give_up:
1310                unuse_pack(&w_curs);
1311        }
1312
1313        entry->type = sha1_object_info(entry->idx.sha1, &entry->size);
1314        /*
1315         * The error condition is checked in prepare_pack().  This is
1316         * to permit a missing preferred base object to be ignored
1317         * as a preferred base.  Doing so can result in a larger
1318         * pack file, but the transfer will still take place.
1319         */
1320}
1321
1322static int pack_offset_sort(const void *_a, const void *_b)
1323{
1324        const struct object_entry *a = *(struct object_entry **)_a;
1325        const struct object_entry *b = *(struct object_entry **)_b;
1326
1327        /* avoid filesystem trashing with loose objects */
1328        if (!a->in_pack && !b->in_pack)
1329                return hashcmp(a->idx.sha1, b->idx.sha1);
1330
1331        if (a->in_pack < b->in_pack)
1332                return -1;
1333        if (a->in_pack > b->in_pack)
1334                return 1;
1335        return a->in_pack_offset < b->in_pack_offset ? -1 :
1336                        (a->in_pack_offset > b->in_pack_offset);
1337}
1338
1339static void get_object_details(void)
1340{
1341        uint32_t i;
1342        struct object_entry **sorted_by_offset;
1343
1344        sorted_by_offset = xcalloc(nr_objects, sizeof(struct object_entry *));
1345        for (i = 0; i < nr_objects; i++)
1346                sorted_by_offset[i] = objects + i;
1347        qsort(sorted_by_offset, nr_objects, sizeof(*sorted_by_offset), pack_offset_sort);
1348
1349        for (i = 0; i < nr_objects; i++) {
1350                struct object_entry *entry = sorted_by_offset[i];
1351                check_object(entry);
1352                if (big_file_threshold < entry->size)
1353                        entry->no_try_delta = 1;
1354        }
1355
1356        free(sorted_by_offset);
1357}
1358
1359/*
1360 * We search for deltas in a list sorted by type, by filename hash, and then
1361 * by size, so that we see progressively smaller and smaller files.
1362 * That's because we prefer deltas to be from the bigger file
1363 * to the smaller -- deletes are potentially cheaper, but perhaps
1364 * more importantly, the bigger file is likely the more recent
1365 * one.  The deepest deltas are therefore the oldest objects which are
1366 * less susceptible to be accessed often.
1367 */
1368static int type_size_sort(const void *_a, const void *_b)
1369{
1370        const struct object_entry *a = *(struct object_entry **)_a;
1371        const struct object_entry *b = *(struct object_entry **)_b;
1372
1373        if (a->type > b->type)
1374                return -1;
1375        if (a->type < b->type)
1376                return 1;
1377        if (a->hash > b->hash)
1378                return -1;
1379        if (a->hash < b->hash)
1380                return 1;
1381        if (a->preferred_base > b->preferred_base)
1382                return -1;
1383        if (a->preferred_base < b->preferred_base)
1384                return 1;
1385        if (a->size > b->size)
1386                return -1;
1387        if (a->size < b->size)
1388                return 1;
1389        return a < b ? -1 : (a > b);  /* newest first */
1390}
1391
1392struct unpacked {
1393        struct object_entry *entry;
1394        void *data;
1395        struct delta_index *index;
1396        unsigned depth;
1397};
1398
1399static int delta_cacheable(unsigned long src_size, unsigned long trg_size,
1400                           unsigned long delta_size)
1401{
1402        if (max_delta_cache_size && delta_cache_size + delta_size > max_delta_cache_size)
1403                return 0;
1404
1405        if (delta_size < cache_max_small_delta_size)
1406                return 1;
1407
1408        /* cache delta, if objects are large enough compared to delta size */
1409        if ((src_size >> 20) + (trg_size >> 21) > (delta_size >> 10))
1410                return 1;
1411
1412        return 0;
1413}
1414
1415#ifndef NO_PTHREADS
1416
1417static pthread_mutex_t read_mutex;
1418#define read_lock()             pthread_mutex_lock(&read_mutex)
1419#define read_unlock()           pthread_mutex_unlock(&read_mutex)
1420
1421static pthread_mutex_t cache_mutex;
1422#define cache_lock()            pthread_mutex_lock(&cache_mutex)
1423#define cache_unlock()          pthread_mutex_unlock(&cache_mutex)
1424
1425static pthread_mutex_t progress_mutex;
1426#define progress_lock()         pthread_mutex_lock(&progress_mutex)
1427#define progress_unlock()       pthread_mutex_unlock(&progress_mutex)
1428
1429#else
1430
1431#define read_lock()             (void)0
1432#define read_unlock()           (void)0
1433#define cache_lock()            (void)0
1434#define cache_unlock()          (void)0
1435#define progress_lock()         (void)0
1436#define progress_unlock()       (void)0
1437
1438#endif
1439
1440static int try_delta(struct unpacked *trg, struct unpacked *src,
1441                     unsigned max_depth, unsigned long *mem_usage)
1442{
1443        struct object_entry *trg_entry = trg->entry;
1444        struct object_entry *src_entry = src->entry;
1445        unsigned long trg_size, src_size, delta_size, sizediff, max_size, sz;
1446        unsigned ref_depth;
1447        enum object_type type;
1448        void *delta_buf;
1449
1450        /* Don't bother doing diffs between different types */
1451        if (trg_entry->type != src_entry->type)
1452                return -1;
1453
1454        /*
1455         * We do not bother to try a delta that we discarded on an
1456         * earlier try, but only when reusing delta data.  Note that
1457         * src_entry that is marked as the preferred_base should always
1458         * be considered, as even if we produce a suboptimal delta against
1459         * it, we will still save the transfer cost, as we already know
1460         * the other side has it and we won't send src_entry at all.
1461         */
1462        if (reuse_delta && trg_entry->in_pack &&
1463            trg_entry->in_pack == src_entry->in_pack &&
1464            !src_entry->preferred_base &&
1465            trg_entry->in_pack_type != OBJ_REF_DELTA &&
1466            trg_entry->in_pack_type != OBJ_OFS_DELTA)
1467                return 0;
1468
1469        /* Let's not bust the allowed depth. */
1470        if (src->depth >= max_depth)
1471                return 0;
1472
1473        /* Now some size filtering heuristics. */
1474        trg_size = trg_entry->size;
1475        if (!trg_entry->delta) {
1476                max_size = trg_size/2 - 20;
1477                ref_depth = 1;
1478        } else {
1479                max_size = trg_entry->delta_size;
1480                ref_depth = trg->depth;
1481        }
1482        max_size = (uint64_t)max_size * (max_depth - src->depth) /
1483                                                (max_depth - ref_depth + 1);
1484        if (max_size == 0)
1485                return 0;
1486        src_size = src_entry->size;
1487        sizediff = src_size < trg_size ? trg_size - src_size : 0;
1488        if (sizediff >= max_size)
1489                return 0;
1490        if (trg_size < src_size / 32)
1491                return 0;
1492
1493        /* Load data if not already done */
1494        if (!trg->data) {
1495                read_lock();
1496                trg->data = read_sha1_file(trg_entry->idx.sha1, &type, &sz);
1497                read_unlock();
1498                if (!trg->data)
1499                        die("object %s cannot be read",
1500                            sha1_to_hex(trg_entry->idx.sha1));
1501                if (sz != trg_size)
1502                        die("object %s inconsistent object length (%lu vs %lu)",
1503                            sha1_to_hex(trg_entry->idx.sha1), sz, trg_size);
1504                *mem_usage += sz;
1505        }
1506        if (!src->data) {
1507                read_lock();
1508                src->data = read_sha1_file(src_entry->idx.sha1, &type, &sz);
1509                read_unlock();
1510                if (!src->data) {
1511                        if (src_entry->preferred_base) {
1512                                static int warned = 0;
1513                                if (!warned++)
1514                                        warning("object %s cannot be read",
1515                                                sha1_to_hex(src_entry->idx.sha1));
1516                                /*
1517                                 * Those objects are not included in the
1518                                 * resulting pack.  Be resilient and ignore
1519                                 * them if they can't be read, in case the
1520                                 * pack could be created nevertheless.
1521                                 */
1522                                return 0;
1523                        }
1524                        die("object %s cannot be read",
1525                            sha1_to_hex(src_entry->idx.sha1));
1526                }
1527                if (sz != src_size)
1528                        die("object %s inconsistent object length (%lu vs %lu)",
1529                            sha1_to_hex(src_entry->idx.sha1), sz, src_size);
1530                *mem_usage += sz;
1531        }
1532        if (!src->index) {
1533                src->index = create_delta_index(src->data, src_size);
1534                if (!src->index) {
1535                        static int warned = 0;
1536                        if (!warned++)
1537                                warning("suboptimal pack - out of memory");
1538                        return 0;
1539                }
1540                *mem_usage += sizeof_delta_index(src->index);
1541        }
1542
1543        delta_buf = create_delta(src->index, trg->data, trg_size, &delta_size, max_size);
1544        if (!delta_buf)
1545                return 0;
1546
1547        if (trg_entry->delta) {
1548                /* Prefer only shallower same-sized deltas. */
1549                if (delta_size == trg_entry->delta_size &&
1550                    src->depth + 1 >= trg->depth) {
1551                        free(delta_buf);
1552                        return 0;
1553                }
1554        }
1555
1556        /*
1557         * Handle memory allocation outside of the cache
1558         * accounting lock.  Compiler will optimize the strangeness
1559         * away when NO_PTHREADS is defined.
1560         */
1561        free(trg_entry->delta_data);
1562        cache_lock();
1563        if (trg_entry->delta_data) {
1564                delta_cache_size -= trg_entry->delta_size;
1565                trg_entry->delta_data = NULL;
1566        }
1567        if (delta_cacheable(src_size, trg_size, delta_size)) {
1568                delta_cache_size += delta_size;
1569                cache_unlock();
1570                trg_entry->delta_data = xrealloc(delta_buf, delta_size);
1571        } else {
1572                cache_unlock();
1573                free(delta_buf);
1574        }
1575
1576        trg_entry->delta = src_entry;
1577        trg_entry->delta_size = delta_size;
1578        trg->depth = src->depth + 1;
1579
1580        return 1;
1581}
1582
1583static unsigned int check_delta_limit(struct object_entry *me, unsigned int n)
1584{
1585        struct object_entry *child = me->delta_child;
1586        unsigned int m = n;
1587        while (child) {
1588                unsigned int c = check_delta_limit(child, n + 1);
1589                if (m < c)
1590                        m = c;
1591                child = child->delta_sibling;
1592        }
1593        return m;
1594}
1595
1596static unsigned long free_unpacked(struct unpacked *n)
1597{
1598        unsigned long freed_mem = sizeof_delta_index(n->index);
1599        free_delta_index(n->index);
1600        n->index = NULL;
1601        if (n->data) {
1602                freed_mem += n->entry->size;
1603                free(n->data);
1604                n->data = NULL;
1605        }
1606        n->entry = NULL;
1607        n->depth = 0;
1608        return freed_mem;
1609}
1610
1611static void find_deltas(struct object_entry **list, unsigned *list_size,
1612                        int window, int depth, unsigned *processed)
1613{
1614        uint32_t i, idx = 0, count = 0;
1615        struct unpacked *array;
1616        unsigned long mem_usage = 0;
1617
1618        array = xcalloc(window, sizeof(struct unpacked));
1619
1620        for (;;) {
1621                struct object_entry *entry;
1622                struct unpacked *n = array + idx;
1623                int j, max_depth, best_base = -1;
1624
1625                progress_lock();
1626                if (!*list_size) {
1627                        progress_unlock();
1628                        break;
1629                }
1630                entry = *list++;
1631                (*list_size)--;
1632                if (!entry->preferred_base) {
1633                        (*processed)++;
1634                        display_progress(progress_state, *processed);
1635                }
1636                progress_unlock();
1637
1638                mem_usage -= free_unpacked(n);
1639                n->entry = entry;
1640
1641                while (window_memory_limit &&
1642                       mem_usage > window_memory_limit &&
1643                       count > 1) {
1644                        uint32_t tail = (idx + window - count) % window;
1645                        mem_usage -= free_unpacked(array + tail);
1646                        count--;
1647                }
1648
1649                /* We do not compute delta to *create* objects we are not
1650                 * going to pack.
1651                 */
1652                if (entry->preferred_base)
1653                        goto next;
1654
1655                /*
1656                 * If the current object is at pack edge, take the depth the
1657                 * objects that depend on the current object into account
1658                 * otherwise they would become too deep.
1659                 */
1660                max_depth = depth;
1661                if (entry->delta_child) {
1662                        max_depth -= check_delta_limit(entry, 0);
1663                        if (max_depth <= 0)
1664                                goto next;
1665                }
1666
1667                j = window;
1668                while (--j > 0) {
1669                        int ret;
1670                        uint32_t other_idx = idx + j;
1671                        struct unpacked *m;
1672                        if (other_idx >= window)
1673                                other_idx -= window;
1674                        m = array + other_idx;
1675                        if (!m->entry)
1676                                break;
1677                        ret = try_delta(n, m, max_depth, &mem_usage);
1678                        if (ret < 0)
1679                                break;
1680                        else if (ret > 0)
1681                                best_base = other_idx;
1682                }
1683
1684                /*
1685                 * If we decided to cache the delta data, then it is best
1686                 * to compress it right away.  First because we have to do
1687                 * it anyway, and doing it here while we're threaded will
1688                 * save a lot of time in the non threaded write phase,
1689                 * as well as allow for caching more deltas within
1690                 * the same cache size limit.
1691                 * ...
1692                 * But only if not writing to stdout, since in that case
1693                 * the network is most likely throttling writes anyway,
1694                 * and therefore it is best to go to the write phase ASAP
1695                 * instead, as we can afford spending more time compressing
1696                 * between writes at that moment.
1697                 */
1698                if (entry->delta_data && !pack_to_stdout) {
1699                        entry->z_delta_size = do_compress(&entry->delta_data,
1700                                                          entry->delta_size);
1701                        cache_lock();
1702                        delta_cache_size -= entry->delta_size;
1703                        delta_cache_size += entry->z_delta_size;
1704                        cache_unlock();
1705                }
1706
1707                /* if we made n a delta, and if n is already at max
1708                 * depth, leaving it in the window is pointless.  we
1709                 * should evict it first.
1710                 */
1711                if (entry->delta && max_depth <= n->depth)
1712                        continue;
1713
1714                /*
1715                 * Move the best delta base up in the window, after the
1716                 * currently deltified object, to keep it longer.  It will
1717                 * be the first base object to be attempted next.
1718                 */
1719                if (entry->delta) {
1720                        struct unpacked swap = array[best_base];
1721                        int dist = (window + idx - best_base) % window;
1722                        int dst = best_base;
1723                        while (dist--) {
1724                                int src = (dst + 1) % window;
1725                                array[dst] = array[src];
1726                                dst = src;
1727                        }
1728                        array[dst] = swap;
1729                }
1730
1731                next:
1732                idx++;
1733                if (count + 1 < window)
1734                        count++;
1735                if (idx >= window)
1736                        idx = 0;
1737        }
1738
1739        for (i = 0; i < window; ++i) {
1740                free_delta_index(array[i].index);
1741                free(array[i].data);
1742        }
1743        free(array);
1744}
1745
1746#ifndef NO_PTHREADS
1747
1748static void try_to_free_from_threads(size_t size)
1749{
1750        read_lock();
1751        release_pack_memory(size, -1);
1752        read_unlock();
1753}
1754
1755static try_to_free_t old_try_to_free_routine;
1756
1757/*
1758 * The main thread waits on the condition that (at least) one of the workers
1759 * has stopped working (which is indicated in the .working member of
1760 * struct thread_params).
1761 * When a work thread has completed its work, it sets .working to 0 and
1762 * signals the main thread and waits on the condition that .data_ready
1763 * becomes 1.
1764 */
1765
1766struct thread_params {
1767        pthread_t thread;
1768        struct object_entry **list;
1769        unsigned list_size;
1770        unsigned remaining;
1771        int window;
1772        int depth;
1773        int working;
1774        int data_ready;
1775        pthread_mutex_t mutex;
1776        pthread_cond_t cond;
1777        unsigned *processed;
1778};
1779
1780static pthread_cond_t progress_cond;
1781
1782/*
1783 * Mutex and conditional variable can't be statically-initialized on Windows.
1784 */
1785static void init_threaded_search(void)
1786{
1787        init_recursive_mutex(&read_mutex);
1788        pthread_mutex_init(&cache_mutex, NULL);
1789        pthread_mutex_init(&progress_mutex, NULL);
1790        pthread_cond_init(&progress_cond, NULL);
1791        old_try_to_free_routine = set_try_to_free_routine(try_to_free_from_threads);
1792}
1793
1794static void cleanup_threaded_search(void)
1795{
1796        set_try_to_free_routine(old_try_to_free_routine);
1797        pthread_cond_destroy(&progress_cond);
1798        pthread_mutex_destroy(&read_mutex);
1799        pthread_mutex_destroy(&cache_mutex);
1800        pthread_mutex_destroy(&progress_mutex);
1801}
1802
1803static void *threaded_find_deltas(void *arg)
1804{
1805        struct thread_params *me = arg;
1806
1807        while (me->remaining) {
1808                find_deltas(me->list, &me->remaining,
1809                            me->window, me->depth, me->processed);
1810
1811                progress_lock();
1812                me->working = 0;
1813                pthread_cond_signal(&progress_cond);
1814                progress_unlock();
1815
1816                /*
1817                 * We must not set ->data_ready before we wait on the
1818                 * condition because the main thread may have set it to 1
1819                 * before we get here. In order to be sure that new
1820                 * work is available if we see 1 in ->data_ready, it
1821                 * was initialized to 0 before this thread was spawned
1822                 * and we reset it to 0 right away.
1823                 */
1824                pthread_mutex_lock(&me->mutex);
1825                while (!me->data_ready)
1826                        pthread_cond_wait(&me->cond, &me->mutex);
1827                me->data_ready = 0;
1828                pthread_mutex_unlock(&me->mutex);
1829        }
1830        /* leave ->working 1 so that this doesn't get more work assigned */
1831        return NULL;
1832}
1833
1834static void ll_find_deltas(struct object_entry **list, unsigned list_size,
1835                           int window, int depth, unsigned *processed)
1836{
1837        struct thread_params *p;
1838        int i, ret, active_threads = 0;
1839
1840        init_threaded_search();
1841
1842        if (!delta_search_threads)      /* --threads=0 means autodetect */
1843                delta_search_threads = online_cpus();
1844        if (delta_search_threads <= 1) {
1845                find_deltas(list, &list_size, window, depth, processed);
1846                cleanup_threaded_search();
1847                return;
1848        }
1849        if (progress > pack_to_stdout)
1850                fprintf(stderr, "Delta compression using up to %d threads.\n",
1851                                delta_search_threads);
1852        p = xcalloc(delta_search_threads, sizeof(*p));
1853
1854        /* Partition the work amongst work threads. */
1855        for (i = 0; i < delta_search_threads; i++) {
1856                unsigned sub_size = list_size / (delta_search_threads - i);
1857
1858                /* don't use too small segments or no deltas will be found */
1859                if (sub_size < 2*window && i+1 < delta_search_threads)
1860                        sub_size = 0;
1861
1862                p[i].window = window;
1863                p[i].depth = depth;
1864                p[i].processed = processed;
1865                p[i].working = 1;
1866                p[i].data_ready = 0;
1867
1868                /* try to split chunks on "path" boundaries */
1869                while (sub_size && sub_size < list_size &&
1870                       list[sub_size]->hash &&
1871                       list[sub_size]->hash == list[sub_size-1]->hash)
1872                        sub_size++;
1873
1874                p[i].list = list;
1875                p[i].list_size = sub_size;
1876                p[i].remaining = sub_size;
1877
1878                list += sub_size;
1879                list_size -= sub_size;
1880        }
1881
1882        /* Start work threads. */
1883        for (i = 0; i < delta_search_threads; i++) {
1884                if (!p[i].list_size)
1885                        continue;
1886                pthread_mutex_init(&p[i].mutex, NULL);
1887                pthread_cond_init(&p[i].cond, NULL);
1888                ret = pthread_create(&p[i].thread, NULL,
1889                                     threaded_find_deltas, &p[i]);
1890                if (ret)
1891                        die("unable to create thread: %s", strerror(ret));
1892                active_threads++;
1893        }
1894
1895        /*
1896         * Now let's wait for work completion.  Each time a thread is done
1897         * with its work, we steal half of the remaining work from the
1898         * thread with the largest number of unprocessed objects and give
1899         * it to that newly idle thread.  This ensure good load balancing
1900         * until the remaining object list segments are simply too short
1901         * to be worth splitting anymore.
1902         */
1903        while (active_threads) {
1904                struct thread_params *target = NULL;
1905                struct thread_params *victim = NULL;
1906                unsigned sub_size = 0;
1907
1908                progress_lock();
1909                for (;;) {
1910                        for (i = 0; !target && i < delta_search_threads; i++)
1911                                if (!p[i].working)
1912                                        target = &p[i];
1913                        if (target)
1914                                break;
1915                        pthread_cond_wait(&progress_cond, &progress_mutex);
1916                }
1917
1918                for (i = 0; i < delta_search_threads; i++)
1919                        if (p[i].remaining > 2*window &&
1920                            (!victim || victim->remaining < p[i].remaining))
1921                                victim = &p[i];
1922                if (victim) {
1923                        sub_size = victim->remaining / 2;
1924                        list = victim->list + victim->list_size - sub_size;
1925                        while (sub_size && list[0]->hash &&
1926                               list[0]->hash == list[-1]->hash) {
1927                                list++;
1928                                sub_size--;
1929                        }
1930                        if (!sub_size) {
1931                                /*
1932                                 * It is possible for some "paths" to have
1933                                 * so many objects that no hash boundary
1934                                 * might be found.  Let's just steal the
1935                                 * exact half in that case.
1936                                 */
1937                                sub_size = victim->remaining / 2;
1938                                list -= sub_size;
1939                        }
1940                        target->list = list;
1941                        victim->list_size -= sub_size;
1942                        victim->remaining -= sub_size;
1943                }
1944                target->list_size = sub_size;
1945                target->remaining = sub_size;
1946                target->working = 1;
1947                progress_unlock();
1948
1949                pthread_mutex_lock(&target->mutex);
1950                target->data_ready = 1;
1951                pthread_cond_signal(&target->cond);
1952                pthread_mutex_unlock(&target->mutex);
1953
1954                if (!sub_size) {
1955                        pthread_join(target->thread, NULL);
1956                        pthread_cond_destroy(&target->cond);
1957                        pthread_mutex_destroy(&target->mutex);
1958                        active_threads--;
1959                }
1960        }
1961        cleanup_threaded_search();
1962        free(p);
1963}
1964
1965#else
1966#define ll_find_deltas(l, s, w, d, p)   find_deltas(l, &s, w, d, p)
1967#endif
1968
1969static int add_ref_tag(const char *path, const unsigned char *sha1, int flag, void *cb_data)
1970{
1971        unsigned char peeled[20];
1972
1973        if (!prefixcmp(path, "refs/tags/") && /* is a tag? */
1974            !peel_ref(path, peeled)        && /* peelable? */
1975            !is_null_sha1(peeled)          && /* annotated tag? */
1976            locate_object_entry(peeled))      /* object packed? */
1977                add_object_entry(sha1, OBJ_TAG, NULL, 0);
1978        return 0;
1979}
1980
1981static void prepare_pack(int window, int depth)
1982{
1983        struct object_entry **delta_list;
1984        uint32_t i, nr_deltas;
1985        unsigned n;
1986
1987        get_object_details();
1988
1989        /*
1990         * If we're locally repacking then we need to be doubly careful
1991         * from now on in order to make sure no stealth corruption gets
1992         * propagated to the new pack.  Clients receiving streamed packs
1993         * should validate everything they get anyway so no need to incur
1994         * the additional cost here in that case.
1995         */
1996        if (!pack_to_stdout)
1997                do_check_packed_object_crc = 1;
1998
1999        if (!nr_objects || !window || !depth)
2000                return;
2001
2002        delta_list = xmalloc(nr_objects * sizeof(*delta_list));
2003        nr_deltas = n = 0;
2004
2005        for (i = 0; i < nr_objects; i++) {
2006                struct object_entry *entry = objects + i;
2007
2008                if (entry->delta)
2009                        /* This happens if we decided to reuse existing
2010                         * delta from a pack.  "reuse_delta &&" is implied.
2011                         */
2012                        continue;
2013
2014                if (entry->size < 50)
2015                        continue;
2016
2017                if (entry->no_try_delta)
2018                        continue;
2019
2020                if (!entry->preferred_base) {
2021                        nr_deltas++;
2022                        if (entry->type < 0)
2023                                die("unable to get type of object %s",
2024                                    sha1_to_hex(entry->idx.sha1));
2025                } else {
2026                        if (entry->type < 0) {
2027                                /*
2028                                 * This object is not found, but we
2029                                 * don't have to include it anyway.
2030                                 */
2031                                continue;
2032                        }
2033                }
2034
2035                delta_list[n++] = entry;
2036        }
2037
2038        if (nr_deltas && n > 1) {
2039                unsigned nr_done = 0;
2040                if (progress)
2041                        progress_state = start_progress("Compressing objects",
2042                                                        nr_deltas);
2043                qsort(delta_list, n, sizeof(*delta_list), type_size_sort);
2044                ll_find_deltas(delta_list, n, window+1, depth, &nr_done);
2045                stop_progress(&progress_state);
2046                if (nr_done != nr_deltas)
2047                        die("inconsistency with delta count");
2048        }
2049        free(delta_list);
2050}
2051
2052static int git_pack_config(const char *k, const char *v, void *cb)
2053{
2054        if (!strcmp(k, "pack.window")) {
2055                window = git_config_int(k, v);
2056                return 0;
2057        }
2058        if (!strcmp(k, "pack.windowmemory")) {
2059                window_memory_limit = git_config_ulong(k, v);
2060                return 0;
2061        }
2062        if (!strcmp(k, "pack.depth")) {
2063                depth = git_config_int(k, v);
2064                return 0;
2065        }
2066        if (!strcmp(k, "pack.compression")) {
2067                int level = git_config_int(k, v);
2068                if (level == -1)
2069                        level = Z_DEFAULT_COMPRESSION;
2070                else if (level < 0 || level > Z_BEST_COMPRESSION)
2071                        die("bad pack compression level %d", level);
2072                pack_compression_level = level;
2073                pack_compression_seen = 1;
2074                return 0;
2075        }
2076        if (!strcmp(k, "pack.deltacachesize")) {
2077                max_delta_cache_size = git_config_int(k, v);
2078                return 0;
2079        }
2080        if (!strcmp(k, "pack.deltacachelimit")) {
2081                cache_max_small_delta_size = git_config_int(k, v);
2082                return 0;
2083        }
2084        if (!strcmp(k, "pack.threads")) {
2085                delta_search_threads = git_config_int(k, v);
2086                if (delta_search_threads < 0)
2087                        die("invalid number of threads specified (%d)",
2088                            delta_search_threads);
2089#ifdef NO_PTHREADS
2090                if (delta_search_threads != 1)
2091                        warning("no threads support, ignoring %s", k);
2092#endif
2093                return 0;
2094        }
2095        if (!strcmp(k, "pack.indexversion")) {
2096                pack_idx_opts.version = git_config_int(k, v);
2097                if (pack_idx_opts.version > 2)
2098                        die("bad pack.indexversion=%"PRIu32,
2099                            pack_idx_opts.version);
2100                return 0;
2101        }
2102        return git_default_config(k, v, cb);
2103}
2104
2105static void read_object_list_from_stdin(void)
2106{
2107        char line[40 + 1 + PATH_MAX + 2];
2108        unsigned char sha1[20];
2109
2110        for (;;) {
2111                if (!fgets(line, sizeof(line), stdin)) {
2112                        if (feof(stdin))
2113                                break;
2114                        if (!ferror(stdin))
2115                                die("fgets returned NULL, not EOF, not error!");
2116                        if (errno != EINTR)
2117                                die_errno("fgets");
2118                        clearerr(stdin);
2119                        continue;
2120                }
2121                if (line[0] == '-') {
2122                        if (get_sha1_hex(line+1, sha1))
2123                                die("expected edge sha1, got garbage:\n %s",
2124                                    line);
2125                        add_preferred_base(sha1);
2126                        continue;
2127                }
2128                if (get_sha1_hex(line, sha1))
2129                        die("expected sha1, got garbage:\n %s", line);
2130
2131                add_preferred_base_object(line+41);
2132                add_object_entry(sha1, 0, line+41, 0);
2133        }
2134}
2135
2136#define OBJECT_ADDED (1u<<20)
2137
2138static void show_commit(struct commit *commit, void *data)
2139{
2140        add_object_entry(commit->object.sha1, OBJ_COMMIT, NULL, 0);
2141        commit->object.flags |= OBJECT_ADDED;
2142}
2143
2144static void show_object(struct object *obj,
2145                        const struct name_path *path, const char *last,
2146                        void *data)
2147{
2148        char *name = path_name(path, last);
2149
2150        add_preferred_base_object(name);
2151        add_object_entry(obj->sha1, obj->type, name, 0);
2152        obj->flags |= OBJECT_ADDED;
2153
2154        /*
2155         * We will have generated the hash from the name,
2156         * but not saved a pointer to it - we can free it
2157         */
2158        free((char *)name);
2159}
2160
2161static void show_edge(struct commit *commit)
2162{
2163        add_preferred_base(commit->object.sha1);
2164}
2165
2166struct in_pack_object {
2167        off_t offset;
2168        struct object *object;
2169};
2170
2171struct in_pack {
2172        int alloc;
2173        int nr;
2174        struct in_pack_object *array;
2175};
2176
2177static void mark_in_pack_object(struct object *object, struct packed_git *p, struct in_pack *in_pack)
2178{
2179        in_pack->array[in_pack->nr].offset = find_pack_entry_one(object->sha1, p);
2180        in_pack->array[in_pack->nr].object = object;
2181        in_pack->nr++;
2182}
2183
2184/*
2185 * Compare the objects in the offset order, in order to emulate the
2186 * "git rev-list --objects" output that produced the pack originally.
2187 */
2188static int ofscmp(const void *a_, const void *b_)
2189{
2190        struct in_pack_object *a = (struct in_pack_object *)a_;
2191        struct in_pack_object *b = (struct in_pack_object *)b_;
2192
2193        if (a->offset < b->offset)
2194                return -1;
2195        else if (a->offset > b->offset)
2196                return 1;
2197        else
2198                return hashcmp(a->object->sha1, b->object->sha1);
2199}
2200
2201static void add_objects_in_unpacked_packs(struct rev_info *revs)
2202{
2203        struct packed_git *p;
2204        struct in_pack in_pack;
2205        uint32_t i;
2206
2207        memset(&in_pack, 0, sizeof(in_pack));
2208
2209        for (p = packed_git; p; p = p->next) {
2210                const unsigned char *sha1;
2211                struct object *o;
2212
2213                if (!p->pack_local || p->pack_keep)
2214                        continue;
2215                if (open_pack_index(p))
2216                        die("cannot open pack index");
2217
2218                ALLOC_GROW(in_pack.array,
2219                           in_pack.nr + p->num_objects,
2220                           in_pack.alloc);
2221
2222                for (i = 0; i < p->num_objects; i++) {
2223                        sha1 = nth_packed_object_sha1(p, i);
2224                        o = lookup_unknown_object(sha1);
2225                        if (!(o->flags & OBJECT_ADDED))
2226                                mark_in_pack_object(o, p, &in_pack);
2227                        o->flags |= OBJECT_ADDED;
2228                }
2229        }
2230
2231        if (in_pack.nr) {
2232                qsort(in_pack.array, in_pack.nr, sizeof(in_pack.array[0]),
2233                      ofscmp);
2234                for (i = 0; i < in_pack.nr; i++) {
2235                        struct object *o = in_pack.array[i].object;
2236                        add_object_entry(o->sha1, o->type, "", 0);
2237                }
2238        }
2239        free(in_pack.array);
2240}
2241
2242static int has_sha1_pack_kept_or_nonlocal(const unsigned char *sha1)
2243{
2244        static struct packed_git *last_found = (void *)1;
2245        struct packed_git *p;
2246
2247        p = (last_found != (void *)1) ? last_found : packed_git;
2248
2249        while (p) {
2250                if ((!p->pack_local || p->pack_keep) &&
2251                        find_pack_entry_one(sha1, p)) {
2252                        last_found = p;
2253                        return 1;
2254                }
2255                if (p == last_found)
2256                        p = packed_git;
2257                else
2258                        p = p->next;
2259                if (p == last_found)
2260                        p = p->next;
2261        }
2262        return 0;
2263}
2264
2265static void loosen_unused_packed_objects(struct rev_info *revs)
2266{
2267        struct packed_git *p;
2268        uint32_t i;
2269        const unsigned char *sha1;
2270
2271        for (p = packed_git; p; p = p->next) {
2272                if (!p->pack_local || p->pack_keep)
2273                        continue;
2274
2275                if (unpack_unreachable_expiration &&
2276                    p->mtime < unpack_unreachable_expiration)
2277                        continue;
2278
2279                if (open_pack_index(p))
2280                        die("cannot open pack index");
2281
2282                for (i = 0; i < p->num_objects; i++) {
2283                        sha1 = nth_packed_object_sha1(p, i);
2284                        if (!locate_object_entry(sha1) &&
2285                                !has_sha1_pack_kept_or_nonlocal(sha1))
2286                                if (force_object_loose(sha1, p->mtime))
2287                                        die("unable to force loose object");
2288                }
2289        }
2290}
2291
2292static void get_object_list(int ac, const char **av)
2293{
2294        struct rev_info revs;
2295        char line[1000];
2296        int flags = 0;
2297
2298        init_revisions(&revs, NULL);
2299        save_commit_buffer = 0;
2300        setup_revisions(ac, av, &revs, NULL);
2301
2302        while (fgets(line, sizeof(line), stdin) != NULL) {
2303                int len = strlen(line);
2304                if (len && line[len - 1] == '\n')
2305                        line[--len] = 0;
2306                if (!len)
2307                        break;
2308                if (*line == '-') {
2309                        if (!strcmp(line, "--not")) {
2310                                flags ^= UNINTERESTING;
2311                                continue;
2312                        }
2313                        die("not a rev '%s'", line);
2314                }
2315                if (handle_revision_arg(line, &revs, flags, 1))
2316                        die("bad revision '%s'", line);
2317        }
2318
2319        if (prepare_revision_walk(&revs))
2320                die("revision walk setup failed");
2321        mark_edges_uninteresting(revs.commits, &revs, show_edge);
2322        traverse_commit_list(&revs, show_commit, show_object, NULL);
2323
2324        if (keep_unreachable)
2325                add_objects_in_unpacked_packs(&revs);
2326        if (unpack_unreachable)
2327                loosen_unused_packed_objects(&revs);
2328}
2329
2330static int option_parse_index_version(const struct option *opt,
2331                                      const char *arg, int unset)
2332{
2333        char *c;
2334        const char *val = arg;
2335        pack_idx_opts.version = strtoul(val, &c, 10);
2336        if (pack_idx_opts.version > 2)
2337                die(_("unsupported index version %s"), val);
2338        if (*c == ',' && c[1])
2339                pack_idx_opts.off32_limit = strtoul(c+1, &c, 0);
2340        if (*c || pack_idx_opts.off32_limit & 0x80000000)
2341                die(_("bad index version '%s'"), val);
2342        return 0;
2343}
2344
2345static int option_parse_unpack_unreachable(const struct option *opt,
2346                                           const char *arg, int unset)
2347{
2348        if (unset) {
2349                unpack_unreachable = 0;
2350                unpack_unreachable_expiration = 0;
2351        }
2352        else {
2353                unpack_unreachable = 1;
2354                if (arg)
2355                        unpack_unreachable_expiration = approxidate(arg);
2356        }
2357        return 0;
2358}
2359
2360static int option_parse_ulong(const struct option *opt,
2361                              const char *arg, int unset)
2362{
2363        if (unset)
2364                die(_("option %s does not accept negative form"),
2365                    opt->long_name);
2366
2367        if (!git_parse_ulong(arg, opt->value))
2368                die(_("unable to parse value '%s' for option %s"),
2369                    arg, opt->long_name);
2370        return 0;
2371}
2372
2373#define OPT_ULONG(s, l, v, h) \
2374        { OPTION_CALLBACK, (s), (l), (v), "n", (h),     \
2375          PARSE_OPT_NONEG, option_parse_ulong }
2376
2377int cmd_pack_objects(int argc, const char **argv, const char *prefix)
2378{
2379        int use_internal_rev_list = 0;
2380        int thin = 0;
2381        int all_progress_implied = 0;
2382        const char *rp_av[6];
2383        int rp_ac = 0;
2384        int rev_list_unpacked = 0, rev_list_all = 0, rev_list_reflog = 0;
2385        struct option pack_objects_options[] = {
2386                OPT_SET_INT('q', "quiet", &progress,
2387                            "do not show progress meter", 0),
2388                OPT_SET_INT(0, "progress", &progress,
2389                            "show progress meter", 1),
2390                OPT_SET_INT(0, "all-progress", &progress,
2391                            "show progress meter during object writing phase", 2),
2392                OPT_BOOL(0, "all-progress-implied",
2393                         &all_progress_implied,
2394                         "similar to --all-progress when progress meter is shown"),
2395                { OPTION_CALLBACK, 0, "index-version", NULL, "version[,offset]",
2396                  "write the pack index file in the specified idx format version",
2397                  0, option_parse_index_version },
2398                OPT_ULONG(0, "max-pack-size", &pack_size_limit,
2399                          "maximum size of each output pack file"),
2400                OPT_BOOL(0, "local", &local,
2401                         "ignore borrowed objects from alternate object store"),
2402                OPT_BOOL(0, "incremental", &incremental,
2403                         "ignore packed objects"),
2404                OPT_INTEGER(0, "window", &window,
2405                            "limit pack window by objects"),
2406                OPT_ULONG(0, "window-memory", &window_memory_limit,
2407                          "limit pack window by memory in addition to object limit"),
2408                OPT_INTEGER(0, "depth", &depth,
2409                            "maximum length of delta chain allowed in the resulting pack"),
2410                OPT_BOOL(0, "reuse-delta", &reuse_delta,
2411                         "reuse existing deltas"),
2412                OPT_BOOL(0, "reuse-object", &reuse_object,
2413                         "reuse existing objects"),
2414                OPT_BOOL(0, "delta-base-offset", &allow_ofs_delta,
2415                         "use OFS_DELTA objects"),
2416                OPT_INTEGER(0, "threads", &delta_search_threads,
2417                            "use threads when searching for best delta matches"),
2418                OPT_BOOL(0, "non-empty", &non_empty,
2419                         "do not create an empty pack output"),
2420                OPT_BOOL(0, "revs", &use_internal_rev_list,
2421                         "read revision arguments from standard input"),
2422                { OPTION_SET_INT, 0, "unpacked", &rev_list_unpacked, NULL,
2423                  "limit the objects to those that are not yet packed",
2424                  PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, 1 },
2425                { OPTION_SET_INT, 0, "all", &rev_list_all, NULL,
2426                  "include objects reachable from any reference",
2427                  PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, 1 },
2428                { OPTION_SET_INT, 0, "reflog", &rev_list_reflog, NULL,
2429                  "include objects referred by reflog entries",
2430                  PARSE_OPT_NOARG | PARSE_OPT_NONEG, NULL, 1 },
2431                OPT_BOOL(0, "stdout", &pack_to_stdout,
2432                         "output pack to stdout"),
2433                OPT_BOOL(0, "include-tag", &include_tag,
2434                         "include tag objects that refer to objects to be packed"),
2435                OPT_BOOL(0, "keep-unreachable", &keep_unreachable,
2436                         "keep unreachable objects"),
2437                { OPTION_CALLBACK, 0, "unpack-unreachable", NULL, "time",
2438                  "unpack unreachable objects newer than <time>",
2439                  PARSE_OPT_OPTARG, option_parse_unpack_unreachable },
2440                OPT_BOOL(0, "thin", &thin,
2441                         "create thin packs"),
2442                OPT_BOOL(0, "honor-pack-keep", &ignore_packed_keep,
2443                         "ignore packs that have companion .keep file"),
2444                OPT_INTEGER(0, "compression", &pack_compression_level,
2445                            "pack compression level"),
2446                OPT_SET_INT(0, "keep-true-parents", &grafts_replace_parents,
2447                            "do not hide commits by grafts", 0),
2448                OPT_END(),
2449        };
2450
2451        read_replace_refs = 0;
2452
2453        reset_pack_idx_option(&pack_idx_opts);
2454        git_config(git_pack_config, NULL);
2455        if (!pack_compression_seen && core_compression_seen)
2456                pack_compression_level = core_compression_level;
2457
2458        progress = isatty(2);
2459        argc = parse_options(argc, argv, prefix, pack_objects_options,
2460                             pack_usage, 0);
2461
2462        if (argc) {
2463                base_name = argv[0];
2464                argc--;
2465        }
2466        if (pack_to_stdout != !base_name || argc)
2467                usage_with_options(pack_usage, pack_objects_options);
2468
2469        rp_av[rp_ac++] = "pack-objects";
2470        if (thin) {
2471                use_internal_rev_list = 1;
2472                rp_av[rp_ac++] = "--objects-edge";
2473        } else
2474                rp_av[rp_ac++] = "--objects";
2475
2476        if (rev_list_all) {
2477                use_internal_rev_list = 1;
2478                rp_av[rp_ac++] = "--all";
2479        }
2480        if (rev_list_reflog) {
2481                use_internal_rev_list = 1;
2482                rp_av[rp_ac++] = "--reflog";
2483        }
2484        if (rev_list_unpacked) {
2485                use_internal_rev_list = 1;
2486                rp_av[rp_ac++] = "--unpacked";
2487        }
2488
2489        if (!reuse_object)
2490                reuse_delta = 0;
2491        if (pack_compression_level == -1)
2492                pack_compression_level = Z_DEFAULT_COMPRESSION;
2493        else if (pack_compression_level < 0 || pack_compression_level > Z_BEST_COMPRESSION)
2494                die("bad pack compression level %d", pack_compression_level);
2495#ifdef NO_PTHREADS
2496        if (delta_search_threads != 1)
2497                warning("no threads support, ignoring --threads");
2498#endif
2499        if (!pack_to_stdout && !pack_size_limit)
2500                pack_size_limit = pack_size_limit_cfg;
2501        if (pack_to_stdout && pack_size_limit)
2502                die("--max-pack-size cannot be used to build a pack for transfer.");
2503        if (pack_size_limit && pack_size_limit < 1024*1024) {
2504                warning("minimum pack size limit is 1 MiB");
2505                pack_size_limit = 1024*1024;
2506        }
2507
2508        if (!pack_to_stdout && thin)
2509                die("--thin cannot be used to build an indexable pack.");
2510
2511        if (keep_unreachable && unpack_unreachable)
2512                die("--keep-unreachable and --unpack-unreachable are incompatible.");
2513
2514        if (progress && all_progress_implied)
2515                progress = 2;
2516
2517        prepare_packed_git();
2518
2519        if (progress)
2520                progress_state = start_progress("Counting objects", 0);
2521        if (!use_internal_rev_list)
2522                read_object_list_from_stdin();
2523        else {
2524                rp_av[rp_ac] = NULL;
2525                get_object_list(rp_ac, rp_av);
2526        }
2527        cleanup_preferred_base();
2528        if (include_tag && nr_result)
2529                for_each_ref(add_ref_tag, NULL);
2530        stop_progress(&progress_state);
2531
2532        if (non_empty && !nr_result)
2533                return 0;
2534        if (nr_result)
2535                prepare_pack(window, depth);
2536        write_pack_file();
2537        if (progress)
2538                fprintf(stderr, "Total %"PRIu32" (delta %"PRIu32"),"
2539                        " reused %"PRIu32" (delta %"PRIu32")\n",
2540                        written, written_delta, reused, reused_delta);
2541        return 0;
2542}