split-index.con commit general improvements (43abf13)
   1#include "cache.h"
   2#include "split-index.h"
   3#include "ewah/ewok.h"
   4
   5struct split_index *init_split_index(struct index_state *istate)
   6{
   7        if (!istate->split_index) {
   8                istate->split_index = xcalloc(1, sizeof(*istate->split_index));
   9                istate->split_index->refcount = 1;
  10        }
  11        return istate->split_index;
  12}
  13
  14int read_link_extension(struct index_state *istate,
  15                         const void *data_, unsigned long sz)
  16{
  17        const unsigned char *data = data_;
  18        struct split_index *si;
  19        int ret;
  20
  21        if (sz < the_hash_algo->rawsz)
  22                return error("corrupt link extension (too short)");
  23        si = init_split_index(istate);
  24        hashcpy(si->base_oid.hash, data);
  25        data += the_hash_algo->rawsz;
  26        sz -= the_hash_algo->rawsz;
  27        if (!sz)
  28                return 0;
  29        si->delete_bitmap = ewah_new();
  30        ret = ewah_read_mmap(si->delete_bitmap, data, sz);
  31        if (ret < 0)
  32                return error("corrupt delete bitmap in link extension");
  33        data += ret;
  34        sz -= ret;
  35        si->replace_bitmap = ewah_new();
  36        ret = ewah_read_mmap(si->replace_bitmap, data, sz);
  37        if (ret < 0)
  38                return error("corrupt replace bitmap in link extension");
  39        if (ret != sz)
  40                return error("garbage at the end of link extension");
  41        return 0;
  42}
  43
  44int write_link_extension(struct strbuf *sb,
  45                         struct index_state *istate)
  46{
  47        struct split_index *si = istate->split_index;
  48        strbuf_add(sb, si->base_oid.hash, the_hash_algo->rawsz);
  49        if (!si->delete_bitmap && !si->replace_bitmap)
  50                return 0;
  51        ewah_serialize_strbuf(si->delete_bitmap, sb);
  52        ewah_serialize_strbuf(si->replace_bitmap, sb);
  53        return 0;
  54}
  55
  56static void mark_base_index_entries(struct index_state *base)
  57{
  58        int i;
  59        /*
  60         * To keep track of the shared entries between
  61         * istate->base->cache[] and istate->cache[], base entry
  62         * position is stored in each base entry. All positions start
  63         * from 1 instead of 0, which is reserved to say "this is a new
  64         * entry".
  65         */
  66        for (i = 0; i < base->cache_nr; i++)
  67                base->cache[i]->index = i + 1;
  68}
  69
  70void move_cache_to_base_index(struct index_state *istate)
  71{
  72        struct split_index *si = istate->split_index;
  73        int i;
  74
  75        /*
  76         * If there was a previous base index, then transfer ownership of allocated
  77         * entries to the parent index.
  78         */
  79        if (si->base &&
  80                si->base->ce_mem_pool) {
  81
  82                if (!istate->ce_mem_pool)
  83                        mem_pool_init(&istate->ce_mem_pool, 0);
  84
  85                mem_pool_combine(istate->ce_mem_pool, istate->split_index->base->ce_mem_pool);
  86        }
  87
  88        si->base = xcalloc(1, sizeof(*si->base));
  89        si->base->version = istate->version;
  90        /* zero timestamp disables racy test in ce_write_index() */
  91        si->base->timestamp = istate->timestamp;
  92        ALLOC_GROW(si->base->cache, istate->cache_nr, si->base->cache_alloc);
  93        si->base->cache_nr = istate->cache_nr;
  94
  95        /*
  96         * The mem_pool needs to move with the allocated entries.
  97         */
  98        si->base->ce_mem_pool = istate->ce_mem_pool;
  99        istate->ce_mem_pool = NULL;
 100
 101        COPY_ARRAY(si->base->cache, istate->cache, istate->cache_nr);
 102        mark_base_index_entries(si->base);
 103        for (i = 0; i < si->base->cache_nr; i++)
 104                si->base->cache[i]->ce_flags &= ~CE_UPDATE_IN_BASE;
 105}
 106
 107static void mark_entry_for_delete(size_t pos, void *data)
 108{
 109        struct index_state *istate = data;
 110        if (pos >= istate->cache_nr)
 111                die("position for delete %d exceeds base index size %d",
 112                    (int)pos, istate->cache_nr);
 113        istate->cache[pos]->ce_flags |= CE_REMOVE;
 114        istate->split_index->nr_deletions++;
 115}
 116
 117static void replace_entry(size_t pos, void *data)
 118{
 119        struct index_state *istate = data;
 120        struct split_index *si = istate->split_index;
 121        struct cache_entry *dst, *src;
 122
 123        if (pos >= istate->cache_nr)
 124                die("position for replacement %d exceeds base index size %d",
 125                    (int)pos, istate->cache_nr);
 126        if (si->nr_replacements >= si->saved_cache_nr)
 127                die("too many replacements (%d vs %d)",
 128                    si->nr_replacements, si->saved_cache_nr);
 129        dst = istate->cache[pos];
 130        if (dst->ce_flags & CE_REMOVE)
 131                die("entry %d is marked as both replaced and deleted",
 132                    (int)pos);
 133        src = si->saved_cache[si->nr_replacements];
 134        if (ce_namelen(src))
 135                die("corrupt link extension, entry %d should have "
 136                    "zero length name", (int)pos);
 137        src->index = pos + 1;
 138        src->ce_flags |= CE_UPDATE_IN_BASE;
 139        src->ce_namelen = dst->ce_namelen;
 140        copy_cache_entry(dst, src);
 141        discard_cache_entry(src);
 142        si->nr_replacements++;
 143}
 144
 145void merge_base_index(struct index_state *istate)
 146{
 147        struct split_index *si = istate->split_index;
 148        unsigned int i;
 149
 150        mark_base_index_entries(si->base);
 151
 152        si->saved_cache     = istate->cache;
 153        si->saved_cache_nr  = istate->cache_nr;
 154        istate->cache_nr    = si->base->cache_nr;
 155        istate->cache       = NULL;
 156        istate->cache_alloc = 0;
 157        ALLOC_GROW(istate->cache, istate->cache_nr, istate->cache_alloc);
 158        COPY_ARRAY(istate->cache, si->base->cache, istate->cache_nr);
 159
 160        si->nr_deletions = 0;
 161        si->nr_replacements = 0;
 162        ewah_each_bit(si->replace_bitmap, replace_entry, istate);
 163        ewah_each_bit(si->delete_bitmap, mark_entry_for_delete, istate);
 164        if (si->nr_deletions)
 165                remove_marked_cache_entries(istate, 0);
 166
 167        for (i = si->nr_replacements; i < si->saved_cache_nr; i++) {
 168                if (!ce_namelen(si->saved_cache[i]))
 169                        die("corrupt link extension, entry %d should "
 170                            "have non-zero length name", i);
 171                add_index_entry(istate, si->saved_cache[i],
 172                                ADD_CACHE_OK_TO_ADD |
 173                                ADD_CACHE_KEEP_CACHE_TREE |
 174                                /*
 175                                 * we may have to replay what
 176                                 * merge-recursive.c:update_stages()
 177                                 * does, which has this flag on
 178                                 */
 179                                ADD_CACHE_SKIP_DFCHECK);
 180                si->saved_cache[i] = NULL;
 181        }
 182
 183        ewah_free(si->delete_bitmap);
 184        ewah_free(si->replace_bitmap);
 185        FREE_AND_NULL(si->saved_cache);
 186        si->delete_bitmap  = NULL;
 187        si->replace_bitmap = NULL;
 188        si->saved_cache_nr = 0;
 189}
 190
 191/*
 192 * Compare most of the fields in two cache entries, i.e. all except the
 193 * hashmap_entry and the name.
 194 */
 195static int compare_ce_content(struct cache_entry *a, struct cache_entry *b)
 196{
 197        const unsigned int ondisk_flags = CE_STAGEMASK | CE_VALID |
 198                                          CE_EXTENDED_FLAGS;
 199        unsigned int ce_flags = a->ce_flags;
 200        unsigned int base_flags = b->ce_flags;
 201        int ret;
 202
 203        /* only on-disk flags matter */
 204        a->ce_flags &= ondisk_flags;
 205        b->ce_flags &= ondisk_flags;
 206        ret = memcmp(&a->ce_stat_data, &b->ce_stat_data,
 207                     offsetof(struct cache_entry, name) -
 208                     offsetof(struct cache_entry, ce_stat_data));
 209        a->ce_flags = ce_flags;
 210        b->ce_flags = base_flags;
 211
 212        return ret;
 213}
 214
 215void prepare_to_write_split_index(struct index_state *istate)
 216{
 217        struct split_index *si = init_split_index(istate);
 218        struct cache_entry **entries = NULL, *ce;
 219        int i, nr_entries = 0, nr_alloc = 0;
 220
 221        si->delete_bitmap = ewah_new();
 222        si->replace_bitmap = ewah_new();
 223
 224        if (si->base) {
 225                /* Go through istate->cache[] and mark CE_MATCHED to
 226                 * entry with positive index. We'll go through
 227                 * base->cache[] later to delete all entries in base
 228                 * that are not marked with either CE_MATCHED or
 229                 * CE_UPDATE_IN_BASE. If istate->cache[i] is a
 230                 * duplicate, deduplicate it.
 231                 */
 232                for (i = 0; i < istate->cache_nr; i++) {
 233                        struct cache_entry *base;
 234                        ce = istate->cache[i];
 235                        if (!ce->index) {
 236                                /*
 237                                 * During simple update index operations this
 238                                 * is a cache entry that is not present in
 239                                 * the shared index.  It will be added to the
 240                                 * split index.
 241                                 *
 242                                 * However, it might also represent a file
 243                                 * that already has a cache entry in the
 244                                 * shared index, but a new index has just
 245                                 * been constructed by unpack_trees(), and
 246                                 * this entry now refers to different content
 247                                 * than what was recorded in the original
 248                                 * index, e.g. during 'read-tree -m HEAD^' or
 249                                 * 'checkout HEAD^'.  In this case the
 250                                 * original entry in the shared index will be
 251                                 * marked as deleted, and this entry will be
 252                                 * added to the split index.
 253                                 */
 254                                continue;
 255                        }
 256                        if (ce->index > si->base->cache_nr) {
 257                                BUG("ce refers to a shared ce at %d, which is beyond the shared index size %d",
 258                                    ce->index, si->base->cache_nr);
 259                        }
 260                        ce->ce_flags |= CE_MATCHED; /* or "shared" */
 261                        base = si->base->cache[ce->index - 1];
 262                        if (ce == base) {
 263                                /* The entry is present in the shared index. */
 264                                if (ce->ce_flags & CE_UPDATE_IN_BASE) {
 265                                        /*
 266                                         * Already marked for inclusion in
 267                                         * the split index, either because
 268                                         * the corresponding file was
 269                                         * modified and the cached stat data
 270                                         * was refreshed, or because there
 271                                         * is already a replacement entry in
 272                                         * the split index.
 273                                         * Nothing more to do here.
 274                                         */
 275                                } else if (!ce_uptodate(ce) &&
 276                                           is_racy_timestamp(istate, ce)) {
 277                                        /*
 278                                         * A racily clean cache entry stored
 279                                         * only in the shared index: it must
 280                                         * be added to the split index, so
 281                                         * the subsequent do_write_index()
 282                                         * can smudge its stat data.
 283                                         */
 284                                        ce->ce_flags |= CE_UPDATE_IN_BASE;
 285                                } else {
 286                                        /*
 287                                         * The entry is only present in the
 288                                         * shared index and it was not
 289                                         * refreshed.
 290                                         * Just leave it there.
 291                                         */
 292                                }
 293                                continue;
 294                        }
 295                        if (ce->ce_namelen != base->ce_namelen ||
 296                            strcmp(ce->name, base->name)) {
 297                                ce->index = 0;
 298                                continue;
 299                        }
 300                        /*
 301                         * This is the copy of a cache entry that is present
 302                         * in the shared index, created by unpack_trees()
 303                         * while it constructed a new index.
 304                         */
 305                        if (ce->ce_flags & CE_UPDATE_IN_BASE) {
 306                                /*
 307                                 * Already marked for inclusion in the split
 308                                 * index, either because the corresponding
 309                                 * file was modified and the cached stat data
 310                                 * was refreshed, or because the original
 311                                 * entry already had a replacement entry in
 312                                 * the split index.
 313                                 * Nothing to do.
 314                                 */
 315                        } else if (!ce_uptodate(ce) &&
 316                                   is_racy_timestamp(istate, ce)) {
 317                                /*
 318                                 * A copy of a racily clean cache entry from
 319                                 * the shared index.  It must be added to
 320                                 * the split index, so the subsequent
 321                                 * do_write_index() can smudge its stat data.
 322                                 */
 323                                ce->ce_flags |= CE_UPDATE_IN_BASE;
 324                        } else {
 325                                /*
 326                                 * Thoroughly compare the cached data to see
 327                                 * whether it should be marked for inclusion
 328                                 * in the split index.
 329                                 *
 330                                 * This comparison might be unnecessary, as
 331                                 * code paths modifying the cached data do
 332                                 * set CE_UPDATE_IN_BASE as well.
 333                                 */
 334                                if (compare_ce_content(ce, base))
 335                                        ce->ce_flags |= CE_UPDATE_IN_BASE;
 336                        }
 337                        discard_cache_entry(base);
 338                        si->base->cache[ce->index - 1] = ce;
 339                }
 340                for (i = 0; i < si->base->cache_nr; i++) {
 341                        ce = si->base->cache[i];
 342                        if ((ce->ce_flags & CE_REMOVE) ||
 343                            !(ce->ce_flags & CE_MATCHED))
 344                                ewah_set(si->delete_bitmap, i);
 345                        else if (ce->ce_flags & CE_UPDATE_IN_BASE) {
 346                                ewah_set(si->replace_bitmap, i);
 347                                ce->ce_flags |= CE_STRIP_NAME;
 348                                ALLOC_GROW(entries, nr_entries+1, nr_alloc);
 349                                entries[nr_entries++] = ce;
 350                        }
 351                        if (is_null_oid(&ce->oid))
 352                                istate->drop_cache_tree = 1;
 353                }
 354        }
 355
 356        for (i = 0; i < istate->cache_nr; i++) {
 357                ce = istate->cache[i];
 358                if ((!si->base || !ce->index) && !(ce->ce_flags & CE_REMOVE)) {
 359                        assert(!(ce->ce_flags & CE_STRIP_NAME));
 360                        ALLOC_GROW(entries, nr_entries+1, nr_alloc);
 361                        entries[nr_entries++] = ce;
 362                }
 363                ce->ce_flags &= ~CE_MATCHED;
 364        }
 365
 366        /*
 367         * take cache[] out temporarily, put entries[] in its place
 368         * for writing
 369         */
 370        si->saved_cache = istate->cache;
 371        si->saved_cache_nr = istate->cache_nr;
 372        istate->cache = entries;
 373        istate->cache_nr = nr_entries;
 374}
 375
 376void finish_writing_split_index(struct index_state *istate)
 377{
 378        struct split_index *si = init_split_index(istate);
 379
 380        ewah_free(si->delete_bitmap);
 381        ewah_free(si->replace_bitmap);
 382        si->delete_bitmap = NULL;
 383        si->replace_bitmap = NULL;
 384        free(istate->cache);
 385        istate->cache = si->saved_cache;
 386        istate->cache_nr = si->saved_cache_nr;
 387}
 388
 389void discard_split_index(struct index_state *istate)
 390{
 391        struct split_index *si = istate->split_index;
 392        if (!si)
 393                return;
 394        istate->split_index = NULL;
 395        si->refcount--;
 396        if (si->refcount)
 397                return;
 398        if (si->base) {
 399                discard_index(si->base);
 400                free(si->base);
 401        }
 402        free(si);
 403}
 404
 405void save_or_free_index_entry(struct index_state *istate, struct cache_entry *ce)
 406{
 407        if (ce->index &&
 408            istate->split_index &&
 409            istate->split_index->base &&
 410            ce->index <= istate->split_index->base->cache_nr &&
 411            ce == istate->split_index->base->cache[ce->index - 1])
 412                ce->ce_flags |= CE_REMOVE;
 413        else
 414                discard_cache_entry(ce);
 415}
 416
 417void replace_index_entry_in_base(struct index_state *istate,
 418                                 struct cache_entry *old_entry,
 419                                 struct cache_entry *new_entry)
 420{
 421        if (old_entry->index &&
 422            istate->split_index &&
 423            istate->split_index->base &&
 424            old_entry->index <= istate->split_index->base->cache_nr) {
 425                new_entry->index = old_entry->index;
 426                if (old_entry != istate->split_index->base->cache[new_entry->index - 1])
 427                        discard_cache_entry(istate->split_index->base->cache[new_entry->index - 1]);
 428                istate->split_index->base->cache[new_entry->index - 1] = new_entry;
 429        }
 430}
 431
 432void add_split_index(struct index_state *istate)
 433{
 434        if (!istate->split_index) {
 435                init_split_index(istate);
 436                istate->cache_changed |= SPLIT_INDEX_ORDERED;
 437        }
 438}
 439
 440void remove_split_index(struct index_state *istate)
 441{
 442        if (istate->split_index) {
 443                if (istate->split_index->base) {
 444                        /*
 445                         * When removing the split index, we need to move
 446                         * ownership of the mem_pool associated with the
 447                         * base index to the main index. There may be cache entries
 448                         * allocated from the base's memory pool that are shared with
 449                         * the_index.cache[].
 450                         */
 451                        mem_pool_combine(istate->ce_mem_pool,
 452                                         istate->split_index->base->ce_mem_pool);
 453
 454                        /*
 455                         * The split index no longer owns the mem_pool backing
 456                         * its cache array. As we are discarding this index,
 457                         * mark the index as having no cache entries, so it
 458                         * will not attempt to clean up the cache entries or
 459                         * validate them.
 460                         */
 461                        istate->split_index->base->cache_nr = 0;
 462                }
 463
 464                /*
 465                 * We can discard the split index because its
 466                 * memory pool has been incorporated into the
 467                 * memory pool associated with the the_index.
 468                 */
 469                discard_split_index(istate);
 470
 471                istate->cache_changed |= SOMETHING_CHANGED;
 472        }
 473}