unpack-trees.con commit Merge branch 'sb/submodule-update-initial-runs-custom-script' (980ee77)
   1#define NO_THE_INDEX_COMPATIBILITY_MACROS
   2#include "cache.h"
   3#include "dir.h"
   4#include "tree.h"
   5#include "tree-walk.h"
   6#include "cache-tree.h"
   7#include "unpack-trees.h"
   8#include "progress.h"
   9#include "refs.h"
  10#include "attr.h"
  11#include "split-index.h"
  12#include "dir.h"
  13
  14/*
  15 * Error messages expected by scripts out of plumbing commands such as
  16 * read-tree.  Non-scripted Porcelain is not required to use these messages
  17 * and in fact are encouraged to reword them to better suit their particular
  18 * situation better.  See how "git checkout" and "git merge" replaces
  19 * them using setup_unpack_trees_porcelain(), for example.
  20 */
  21static const char *unpack_plumbing_errors[NB_UNPACK_TREES_ERROR_TYPES] = {
  22        /* ERROR_WOULD_OVERWRITE */
  23        "Entry '%s' would be overwritten by merge. Cannot merge.",
  24
  25        /* ERROR_NOT_UPTODATE_FILE */
  26        "Entry '%s' not uptodate. Cannot merge.",
  27
  28        /* ERROR_NOT_UPTODATE_DIR */
  29        "Updating '%s' would lose untracked files in it",
  30
  31        /* ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN */
  32        "Untracked working tree file '%s' would be overwritten by merge.",
  33
  34        /* ERROR_WOULD_LOSE_UNTRACKED_REMOVED */
  35        "Untracked working tree file '%s' would be removed by merge.",
  36
  37        /* ERROR_BIND_OVERLAP */
  38        "Entry '%s' overlaps with '%s'.  Cannot bind.",
  39
  40        /* ERROR_SPARSE_NOT_UPTODATE_FILE */
  41        "Entry '%s' not uptodate. Cannot update sparse checkout.",
  42
  43        /* ERROR_WOULD_LOSE_ORPHANED_OVERWRITTEN */
  44        "Working tree file '%s' would be overwritten by sparse checkout update.",
  45
  46        /* ERROR_WOULD_LOSE_ORPHANED_REMOVED */
  47        "Working tree file '%s' would be removed by sparse checkout update.",
  48};
  49
  50#define ERRORMSG(o,type) \
  51        ( ((o) && (o)->msgs[(type)]) \
  52          ? ((o)->msgs[(type)])      \
  53          : (unpack_plumbing_errors[(type)]) )
  54
  55static const char *super_prefixed(const char *path)
  56{
  57        /*
  58         * It is necessary and sufficient to have two static buffers
  59         * here, as the return value of this function is fed to
  60         * error() using the unpack_*_errors[] templates we see above.
  61         */
  62        static struct strbuf buf[2] = {STRBUF_INIT, STRBUF_INIT};
  63        static int super_prefix_len = -1;
  64        static unsigned idx = ARRAY_SIZE(buf) - 1;
  65
  66        if (super_prefix_len < 0) {
  67                const char *super_prefix = get_super_prefix();
  68                if (!super_prefix) {
  69                        super_prefix_len = 0;
  70                } else {
  71                        int i;
  72                        for (i = 0; i < ARRAY_SIZE(buf); i++)
  73                                strbuf_addstr(&buf[i], super_prefix);
  74                        super_prefix_len = buf[0].len;
  75                }
  76        }
  77
  78        if (!super_prefix_len)
  79                return path;
  80
  81        if (++idx >= ARRAY_SIZE(buf))
  82                idx = 0;
  83
  84        strbuf_setlen(&buf[idx], super_prefix_len);
  85        strbuf_addstr(&buf[idx], path);
  86
  87        return buf[idx].buf;
  88}
  89
  90void setup_unpack_trees_porcelain(struct unpack_trees_options *opts,
  91                                  const char *cmd)
  92{
  93        int i;
  94        const char **msgs = opts->msgs;
  95        const char *msg;
  96
  97        if (!strcmp(cmd, "checkout"))
  98                msg = advice_commit_before_merge
  99                      ? _("Your local changes to the following files would be overwritten by checkout:\n%%s"
 100                          "Please commit your changes or stash them before you switch branches.")
 101                      : _("Your local changes to the following files would be overwritten by checkout:\n%%s");
 102        else if (!strcmp(cmd, "merge"))
 103                msg = advice_commit_before_merge
 104                      ? _("Your local changes to the following files would be overwritten by merge:\n%%s"
 105                          "Please commit your changes or stash them before you merge.")
 106                      : _("Your local changes to the following files would be overwritten by merge:\n%%s");
 107        else
 108                msg = advice_commit_before_merge
 109                      ? _("Your local changes to the following files would be overwritten by %s:\n%%s"
 110                          "Please commit your changes or stash them before you %s.")
 111                      : _("Your local changes to the following files would be overwritten by %s:\n%%s");
 112        msgs[ERROR_WOULD_OVERWRITE] = msgs[ERROR_NOT_UPTODATE_FILE] =
 113                xstrfmt(msg, cmd, cmd);
 114
 115        msgs[ERROR_NOT_UPTODATE_DIR] =
 116                _("Updating the following directories would lose untracked files in them:\n%s");
 117
 118        if (!strcmp(cmd, "checkout"))
 119                msg = advice_commit_before_merge
 120                      ? _("The following untracked working tree files would be removed by checkout:\n%%s"
 121                          "Please move or remove them before you switch branches.")
 122                      : _("The following untracked working tree files would be removed by checkout:\n%%s");
 123        else if (!strcmp(cmd, "merge"))
 124                msg = advice_commit_before_merge
 125                      ? _("The following untracked working tree files would be removed by merge:\n%%s"
 126                          "Please move or remove them before you merge.")
 127                      : _("The following untracked working tree files would be removed by merge:\n%%s");
 128        else
 129                msg = advice_commit_before_merge
 130                      ? _("The following untracked working tree files would be removed by %s:\n%%s"
 131                          "Please move or remove them before you %s.")
 132                      : _("The following untracked working tree files would be removed by %s:\n%%s");
 133        msgs[ERROR_WOULD_LOSE_UNTRACKED_REMOVED] = xstrfmt(msg, cmd, cmd);
 134
 135        if (!strcmp(cmd, "checkout"))
 136                msg = advice_commit_before_merge
 137                      ? _("The following untracked working tree files would be overwritten by checkout:\n%%s"
 138                          "Please move or remove them before you switch branches.")
 139                      : _("The following untracked working tree files would be overwritten by checkout:\n%%s");
 140        else if (!strcmp(cmd, "merge"))
 141                msg = advice_commit_before_merge
 142                      ? _("The following untracked working tree files would be overwritten by merge:\n%%s"
 143                          "Please move or remove them before you merge.")
 144                      : _("The following untracked working tree files would be overwritten by merge:\n%%s");
 145        else
 146                msg = advice_commit_before_merge
 147                      ? _("The following untracked working tree files would be overwritten by %s:\n%%s"
 148                          "Please move or remove them before you %s.")
 149                      : _("The following untracked working tree files would be overwritten by %s:\n%%s");
 150        msgs[ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN] = xstrfmt(msg, cmd, cmd);
 151
 152        /*
 153         * Special case: ERROR_BIND_OVERLAP refers to a pair of paths, we
 154         * cannot easily display it as a list.
 155         */
 156        msgs[ERROR_BIND_OVERLAP] = _("Entry '%s' overlaps with '%s'.  Cannot bind.");
 157
 158        msgs[ERROR_SPARSE_NOT_UPTODATE_FILE] =
 159                _("Cannot update sparse checkout: the following entries are not up-to-date:\n%s");
 160        msgs[ERROR_WOULD_LOSE_ORPHANED_OVERWRITTEN] =
 161                _("The following working tree files would be overwritten by sparse checkout update:\n%s");
 162        msgs[ERROR_WOULD_LOSE_ORPHANED_REMOVED] =
 163                _("The following working tree files would be removed by sparse checkout update:\n%s");
 164
 165        opts->show_all_errors = 1;
 166        /* rejected paths may not have a static buffer */
 167        for (i = 0; i < ARRAY_SIZE(opts->unpack_rejects); i++)
 168                opts->unpack_rejects[i].strdup_strings = 1;
 169}
 170
 171static int do_add_entry(struct unpack_trees_options *o, struct cache_entry *ce,
 172                         unsigned int set, unsigned int clear)
 173{
 174        clear |= CE_HASHED;
 175
 176        if (set & CE_REMOVE)
 177                set |= CE_WT_REMOVE;
 178
 179        ce->ce_flags = (ce->ce_flags & ~clear) | set;
 180        return add_index_entry(&o->result, ce,
 181                               ADD_CACHE_OK_TO_ADD | ADD_CACHE_OK_TO_REPLACE);
 182}
 183
 184static struct cache_entry *dup_entry(const struct cache_entry *ce)
 185{
 186        unsigned int size = ce_size(ce);
 187        struct cache_entry *new = xmalloc(size);
 188
 189        memcpy(new, ce, size);
 190        return new;
 191}
 192
 193static void add_entry(struct unpack_trees_options *o,
 194                      const struct cache_entry *ce,
 195                      unsigned int set, unsigned int clear)
 196{
 197        do_add_entry(o, dup_entry(ce), set, clear);
 198}
 199
 200/*
 201 * add error messages on path <path>
 202 * corresponding to the type <e> with the message <msg>
 203 * indicating if it should be display in porcelain or not
 204 */
 205static int add_rejected_path(struct unpack_trees_options *o,
 206                             enum unpack_trees_error_types e,
 207                             const char *path)
 208{
 209        if (!o->show_all_errors)
 210                return error(ERRORMSG(o, e), super_prefixed(path));
 211
 212        /*
 213         * Otherwise, insert in a list for future display by
 214         * display_error_msgs()
 215         */
 216        string_list_append(&o->unpack_rejects[e], path);
 217        return -1;
 218}
 219
 220/*
 221 * display all the error messages stored in a nice way
 222 */
 223static void display_error_msgs(struct unpack_trees_options *o)
 224{
 225        int e, i;
 226        int something_displayed = 0;
 227        for (e = 0; e < NB_UNPACK_TREES_ERROR_TYPES; e++) {
 228                struct string_list *rejects = &o->unpack_rejects[e];
 229                if (rejects->nr > 0) {
 230                        struct strbuf path = STRBUF_INIT;
 231                        something_displayed = 1;
 232                        for (i = 0; i < rejects->nr; i++)
 233                                strbuf_addf(&path, "\t%s\n", rejects->items[i].string);
 234                        error(ERRORMSG(o, e), super_prefixed(path.buf));
 235                        strbuf_release(&path);
 236                }
 237                string_list_clear(rejects, 0);
 238        }
 239        if (something_displayed)
 240                fprintf(stderr, _("Aborting\n"));
 241}
 242
 243/*
 244 * Unlink the last component and schedule the leading directories for
 245 * removal, such that empty directories get removed.
 246 */
 247static void unlink_entry(const struct cache_entry *ce)
 248{
 249        if (!check_leading_path(ce->name, ce_namelen(ce)))
 250                return;
 251        if (remove_or_warn(ce->ce_mode, ce->name))
 252                return;
 253        schedule_dir_for_removal(ce->name, ce_namelen(ce));
 254}
 255
 256static struct progress *get_progress(struct unpack_trees_options *o)
 257{
 258        unsigned cnt = 0, total = 0;
 259        struct index_state *index = &o->result;
 260
 261        if (!o->update || !o->verbose_update)
 262                return NULL;
 263
 264        for (; cnt < index->cache_nr; cnt++) {
 265                const struct cache_entry *ce = index->cache[cnt];
 266                if (ce->ce_flags & (CE_UPDATE | CE_WT_REMOVE))
 267                        total++;
 268        }
 269
 270        return start_progress_delay(_("Checking out files"),
 271                                    total, 50, 1);
 272}
 273
 274static int check_updates(struct unpack_trees_options *o)
 275{
 276        unsigned cnt = 0;
 277        int errs = 0;
 278        struct progress *progress = NULL;
 279        struct index_state *index = &o->result;
 280        struct checkout state = CHECKOUT_INIT;
 281        int i;
 282
 283        state.force = 1;
 284        state.quiet = 1;
 285        state.refresh_cache = 1;
 286        state.istate = index;
 287
 288        progress = get_progress(o);
 289
 290        if (o->update)
 291                git_attr_set_direction(GIT_ATTR_CHECKOUT, index);
 292        for (i = 0; i < index->cache_nr; i++) {
 293                const struct cache_entry *ce = index->cache[i];
 294
 295                if (ce->ce_flags & CE_WT_REMOVE) {
 296                        display_progress(progress, ++cnt);
 297                        if (o->update && !o->dry_run)
 298                                unlink_entry(ce);
 299                }
 300        }
 301        remove_marked_cache_entries(index);
 302        remove_scheduled_dirs();
 303
 304        for (i = 0; i < index->cache_nr; i++) {
 305                struct cache_entry *ce = index->cache[i];
 306
 307                if (ce->ce_flags & CE_UPDATE) {
 308                        if (ce->ce_flags & CE_WT_REMOVE)
 309                                die("BUG: both update and delete flags are set on %s",
 310                                    ce->name);
 311                        display_progress(progress, ++cnt);
 312                        ce->ce_flags &= ~CE_UPDATE;
 313                        if (o->update && !o->dry_run) {
 314                                errs |= checkout_entry(ce, &state, NULL);
 315                        }
 316                }
 317        }
 318        stop_progress(&progress);
 319        if (o->update)
 320                git_attr_set_direction(GIT_ATTR_CHECKIN, NULL);
 321        return errs != 0;
 322}
 323
 324static int verify_uptodate_sparse(const struct cache_entry *ce,
 325                                  struct unpack_trees_options *o);
 326static int verify_absent_sparse(const struct cache_entry *ce,
 327                                enum unpack_trees_error_types,
 328                                struct unpack_trees_options *o);
 329
 330static int apply_sparse_checkout(struct index_state *istate,
 331                                 struct cache_entry *ce,
 332                                 struct unpack_trees_options *o)
 333{
 334        int was_skip_worktree = ce_skip_worktree(ce);
 335
 336        if (ce->ce_flags & CE_NEW_SKIP_WORKTREE)
 337                ce->ce_flags |= CE_SKIP_WORKTREE;
 338        else
 339                ce->ce_flags &= ~CE_SKIP_WORKTREE;
 340        if (was_skip_worktree != ce_skip_worktree(ce)) {
 341                ce->ce_flags |= CE_UPDATE_IN_BASE;
 342                istate->cache_changed |= CE_ENTRY_CHANGED;
 343        }
 344
 345        /*
 346         * if (!was_skip_worktree && !ce_skip_worktree()) {
 347         *      This is perfectly normal. Move on;
 348         * }
 349         */
 350
 351        /*
 352         * Merge strategies may set CE_UPDATE|CE_REMOVE outside checkout
 353         * area as a result of ce_skip_worktree() shortcuts in
 354         * verify_absent() and verify_uptodate().
 355         * Make sure they don't modify worktree if they are already
 356         * outside checkout area
 357         */
 358        if (was_skip_worktree && ce_skip_worktree(ce)) {
 359                ce->ce_flags &= ~CE_UPDATE;
 360
 361                /*
 362                 * By default, when CE_REMOVE is on, CE_WT_REMOVE is also
 363                 * on to get that file removed from both index and worktree.
 364                 * If that file is already outside worktree area, don't
 365                 * bother remove it.
 366                 */
 367                if (ce->ce_flags & CE_REMOVE)
 368                        ce->ce_flags &= ~CE_WT_REMOVE;
 369        }
 370
 371        if (!was_skip_worktree && ce_skip_worktree(ce)) {
 372                /*
 373                 * If CE_UPDATE is set, verify_uptodate() must be called already
 374                 * also stat info may have lost after merged_entry() so calling
 375                 * verify_uptodate() again may fail
 376                 */
 377                if (!(ce->ce_flags & CE_UPDATE) && verify_uptodate_sparse(ce, o))
 378                        return -1;
 379                ce->ce_flags |= CE_WT_REMOVE;
 380                ce->ce_flags &= ~CE_UPDATE;
 381        }
 382        if (was_skip_worktree && !ce_skip_worktree(ce)) {
 383                if (verify_absent_sparse(ce, ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN, o))
 384                        return -1;
 385                ce->ce_flags |= CE_UPDATE;
 386        }
 387        return 0;
 388}
 389
 390static inline int call_unpack_fn(const struct cache_entry * const *src,
 391                                 struct unpack_trees_options *o)
 392{
 393        int ret = o->fn(src, o);
 394        if (ret > 0)
 395                ret = 0;
 396        return ret;
 397}
 398
 399static void mark_ce_used(struct cache_entry *ce, struct unpack_trees_options *o)
 400{
 401        ce->ce_flags |= CE_UNPACKED;
 402
 403        if (o->cache_bottom < o->src_index->cache_nr &&
 404            o->src_index->cache[o->cache_bottom] == ce) {
 405                int bottom = o->cache_bottom;
 406                while (bottom < o->src_index->cache_nr &&
 407                       o->src_index->cache[bottom]->ce_flags & CE_UNPACKED)
 408                        bottom++;
 409                o->cache_bottom = bottom;
 410        }
 411}
 412
 413static void mark_all_ce_unused(struct index_state *index)
 414{
 415        int i;
 416        for (i = 0; i < index->cache_nr; i++)
 417                index->cache[i]->ce_flags &= ~(CE_UNPACKED | CE_ADDED | CE_NEW_SKIP_WORKTREE);
 418}
 419
 420static int locate_in_src_index(const struct cache_entry *ce,
 421                               struct unpack_trees_options *o)
 422{
 423        struct index_state *index = o->src_index;
 424        int len = ce_namelen(ce);
 425        int pos = index_name_pos(index, ce->name, len);
 426        if (pos < 0)
 427                pos = -1 - pos;
 428        return pos;
 429}
 430
 431/*
 432 * We call unpack_index_entry() with an unmerged cache entry
 433 * only in diff-index, and it wants a single callback.  Skip
 434 * the other unmerged entry with the same name.
 435 */
 436static void mark_ce_used_same_name(struct cache_entry *ce,
 437                                   struct unpack_trees_options *o)
 438{
 439        struct index_state *index = o->src_index;
 440        int len = ce_namelen(ce);
 441        int pos;
 442
 443        for (pos = locate_in_src_index(ce, o); pos < index->cache_nr; pos++) {
 444                struct cache_entry *next = index->cache[pos];
 445                if (len != ce_namelen(next) ||
 446                    memcmp(ce->name, next->name, len))
 447                        break;
 448                mark_ce_used(next, o);
 449        }
 450}
 451
 452static struct cache_entry *next_cache_entry(struct unpack_trees_options *o)
 453{
 454        const struct index_state *index = o->src_index;
 455        int pos = o->cache_bottom;
 456
 457        while (pos < index->cache_nr) {
 458                struct cache_entry *ce = index->cache[pos];
 459                if (!(ce->ce_flags & CE_UNPACKED))
 460                        return ce;
 461                pos++;
 462        }
 463        return NULL;
 464}
 465
 466static void add_same_unmerged(const struct cache_entry *ce,
 467                              struct unpack_trees_options *o)
 468{
 469        struct index_state *index = o->src_index;
 470        int len = ce_namelen(ce);
 471        int pos = index_name_pos(index, ce->name, len);
 472
 473        if (0 <= pos)
 474                die("programming error in a caller of mark_ce_used_same_name");
 475        for (pos = -pos - 1; pos < index->cache_nr; pos++) {
 476                struct cache_entry *next = index->cache[pos];
 477                if (len != ce_namelen(next) ||
 478                    memcmp(ce->name, next->name, len))
 479                        break;
 480                add_entry(o, next, 0, 0);
 481                mark_ce_used(next, o);
 482        }
 483}
 484
 485static int unpack_index_entry(struct cache_entry *ce,
 486                              struct unpack_trees_options *o)
 487{
 488        const struct cache_entry *src[MAX_UNPACK_TREES + 1] = { NULL, };
 489        int ret;
 490
 491        src[0] = ce;
 492
 493        mark_ce_used(ce, o);
 494        if (ce_stage(ce)) {
 495                if (o->skip_unmerged) {
 496                        add_entry(o, ce, 0, 0);
 497                        return 0;
 498                }
 499        }
 500        ret = call_unpack_fn(src, o);
 501        if (ce_stage(ce))
 502                mark_ce_used_same_name(ce, o);
 503        return ret;
 504}
 505
 506static int find_cache_pos(struct traverse_info *, const struct name_entry *);
 507
 508static void restore_cache_bottom(struct traverse_info *info, int bottom)
 509{
 510        struct unpack_trees_options *o = info->data;
 511
 512        if (o->diff_index_cached)
 513                return;
 514        o->cache_bottom = bottom;
 515}
 516
 517static int switch_cache_bottom(struct traverse_info *info)
 518{
 519        struct unpack_trees_options *o = info->data;
 520        int ret, pos;
 521
 522        if (o->diff_index_cached)
 523                return 0;
 524        ret = o->cache_bottom;
 525        pos = find_cache_pos(info->prev, &info->name);
 526
 527        if (pos < -1)
 528                o->cache_bottom = -2 - pos;
 529        else if (pos < 0)
 530                o->cache_bottom = o->src_index->cache_nr;
 531        return ret;
 532}
 533
 534static int traverse_trees_recursive(int n, unsigned long dirmask,
 535                                    unsigned long df_conflicts,
 536                                    struct name_entry *names,
 537                                    struct traverse_info *info)
 538{
 539        int i, ret, bottom;
 540        struct tree_desc t[MAX_UNPACK_TREES];
 541        void *buf[MAX_UNPACK_TREES];
 542        struct traverse_info newinfo;
 543        struct name_entry *p;
 544
 545        p = names;
 546        while (!p->mode)
 547                p++;
 548
 549        newinfo = *info;
 550        newinfo.prev = info;
 551        newinfo.pathspec = info->pathspec;
 552        newinfo.name = *p;
 553        newinfo.pathlen += tree_entry_len(p) + 1;
 554        newinfo.df_conflicts |= df_conflicts;
 555
 556        for (i = 0; i < n; i++, dirmask >>= 1) {
 557                const unsigned char *sha1 = NULL;
 558                if (dirmask & 1)
 559                        sha1 = names[i].oid->hash;
 560                buf[i] = fill_tree_descriptor(t+i, sha1);
 561        }
 562
 563        bottom = switch_cache_bottom(&newinfo);
 564        ret = traverse_trees(n, t, &newinfo);
 565        restore_cache_bottom(&newinfo, bottom);
 566
 567        for (i = 0; i < n; i++)
 568                free(buf[i]);
 569
 570        return ret;
 571}
 572
 573/*
 574 * Compare the traverse-path to the cache entry without actually
 575 * having to generate the textual representation of the traverse
 576 * path.
 577 *
 578 * NOTE! This *only* compares up to the size of the traverse path
 579 * itself - the caller needs to do the final check for the cache
 580 * entry having more data at the end!
 581 */
 582static int do_compare_entry_piecewise(const struct cache_entry *ce, const struct traverse_info *info, const struct name_entry *n)
 583{
 584        int len, pathlen, ce_len;
 585        const char *ce_name;
 586
 587        if (info->prev) {
 588                int cmp = do_compare_entry_piecewise(ce, info->prev,
 589                                                     &info->name);
 590                if (cmp)
 591                        return cmp;
 592        }
 593        pathlen = info->pathlen;
 594        ce_len = ce_namelen(ce);
 595
 596        /* If ce_len < pathlen then we must have previously hit "name == directory" entry */
 597        if (ce_len < pathlen)
 598                return -1;
 599
 600        ce_len -= pathlen;
 601        ce_name = ce->name + pathlen;
 602
 603        len = tree_entry_len(n);
 604        return df_name_compare(ce_name, ce_len, S_IFREG, n->path, len, n->mode);
 605}
 606
 607static int do_compare_entry(const struct cache_entry *ce,
 608                            const struct traverse_info *info,
 609                            const struct name_entry *n)
 610{
 611        int len, pathlen, ce_len;
 612        const char *ce_name;
 613        int cmp;
 614
 615        /*
 616         * If we have not precomputed the traverse path, it is quicker
 617         * to avoid doing so.  But if we have precomputed it,
 618         * it is quicker to use the precomputed version.
 619         */
 620        if (!info->traverse_path)
 621                return do_compare_entry_piecewise(ce, info, n);
 622
 623        cmp = strncmp(ce->name, info->traverse_path, info->pathlen);
 624        if (cmp)
 625                return cmp;
 626
 627        pathlen = info->pathlen;
 628        ce_len = ce_namelen(ce);
 629
 630        if (ce_len < pathlen)
 631                return -1;
 632
 633        ce_len -= pathlen;
 634        ce_name = ce->name + pathlen;
 635
 636        len = tree_entry_len(n);
 637        return df_name_compare(ce_name, ce_len, S_IFREG, n->path, len, n->mode);
 638}
 639
 640static int compare_entry(const struct cache_entry *ce, const struct traverse_info *info, const struct name_entry *n)
 641{
 642        int cmp = do_compare_entry(ce, info, n);
 643        if (cmp)
 644                return cmp;
 645
 646        /*
 647         * Even if the beginning compared identically, the ce should
 648         * compare as bigger than a directory leading up to it!
 649         */
 650        return ce_namelen(ce) > traverse_path_len(info, n);
 651}
 652
 653static int ce_in_traverse_path(const struct cache_entry *ce,
 654                               const struct traverse_info *info)
 655{
 656        if (!info->prev)
 657                return 1;
 658        if (do_compare_entry(ce, info->prev, &info->name))
 659                return 0;
 660        /*
 661         * If ce (blob) is the same name as the path (which is a tree
 662         * we will be descending into), it won't be inside it.
 663         */
 664        return (info->pathlen < ce_namelen(ce));
 665}
 666
 667static struct cache_entry *create_ce_entry(const struct traverse_info *info, const struct name_entry *n, int stage)
 668{
 669        int len = traverse_path_len(info, n);
 670        struct cache_entry *ce = xcalloc(1, cache_entry_size(len));
 671
 672        ce->ce_mode = create_ce_mode(n->mode);
 673        ce->ce_flags = create_ce_flags(stage);
 674        ce->ce_namelen = len;
 675        oidcpy(&ce->oid, n->oid);
 676        make_traverse_path(ce->name, info, n);
 677
 678        return ce;
 679}
 680
 681static int unpack_nondirectories(int n, unsigned long mask,
 682                                 unsigned long dirmask,
 683                                 struct cache_entry **src,
 684                                 const struct name_entry *names,
 685                                 const struct traverse_info *info)
 686{
 687        int i;
 688        struct unpack_trees_options *o = info->data;
 689        unsigned long conflicts = info->df_conflicts | dirmask;
 690
 691        /* Do we have *only* directories? Nothing to do */
 692        if (mask == dirmask && !src[0])
 693                return 0;
 694
 695        /*
 696         * Ok, we've filled in up to any potential index entry in src[0],
 697         * now do the rest.
 698         */
 699        for (i = 0; i < n; i++) {
 700                int stage;
 701                unsigned int bit = 1ul << i;
 702                if (conflicts & bit) {
 703                        src[i + o->merge] = o->df_conflict_entry;
 704                        continue;
 705                }
 706                if (!(mask & bit))
 707                        continue;
 708                if (!o->merge)
 709                        stage = 0;
 710                else if (i + 1 < o->head_idx)
 711                        stage = 1;
 712                else if (i + 1 > o->head_idx)
 713                        stage = 3;
 714                else
 715                        stage = 2;
 716                src[i + o->merge] = create_ce_entry(info, names + i, stage);
 717        }
 718
 719        if (o->merge) {
 720                int rc = call_unpack_fn((const struct cache_entry * const *)src,
 721                                        o);
 722                for (i = 0; i < n; i++) {
 723                        struct cache_entry *ce = src[i + o->merge];
 724                        if (ce != o->df_conflict_entry)
 725                                free(ce);
 726                }
 727                return rc;
 728        }
 729
 730        for (i = 0; i < n; i++)
 731                if (src[i] && src[i] != o->df_conflict_entry)
 732                        if (do_add_entry(o, src[i], 0, 0))
 733                                return -1;
 734
 735        return 0;
 736}
 737
 738static int unpack_failed(struct unpack_trees_options *o, const char *message)
 739{
 740        discard_index(&o->result);
 741        if (!o->gently && !o->exiting_early) {
 742                if (message)
 743                        return error("%s", message);
 744                return -1;
 745        }
 746        return -1;
 747}
 748
 749/*
 750 * The tree traversal is looking at name p.  If we have a matching entry,
 751 * return it.  If name p is a directory in the index, do not return
 752 * anything, as we will want to match it when the traversal descends into
 753 * the directory.
 754 */
 755static int find_cache_pos(struct traverse_info *info,
 756                          const struct name_entry *p)
 757{
 758        int pos;
 759        struct unpack_trees_options *o = info->data;
 760        struct index_state *index = o->src_index;
 761        int pfxlen = info->pathlen;
 762        int p_len = tree_entry_len(p);
 763
 764        for (pos = o->cache_bottom; pos < index->cache_nr; pos++) {
 765                const struct cache_entry *ce = index->cache[pos];
 766                const char *ce_name, *ce_slash;
 767                int cmp, ce_len;
 768
 769                if (ce->ce_flags & CE_UNPACKED) {
 770                        /*
 771                         * cache_bottom entry is already unpacked, so
 772                         * we can never match it; don't check it
 773                         * again.
 774                         */
 775                        if (pos == o->cache_bottom)
 776                                ++o->cache_bottom;
 777                        continue;
 778                }
 779                if (!ce_in_traverse_path(ce, info)) {
 780                        /*
 781                         * Check if we can skip future cache checks
 782                         * (because we're already past all possible
 783                         * entries in the traverse path).
 784                         */
 785                        if (info->traverse_path) {
 786                                if (strncmp(ce->name, info->traverse_path,
 787                                            info->pathlen) > 0)
 788                                        break;
 789                        }
 790                        continue;
 791                }
 792                ce_name = ce->name + pfxlen;
 793                ce_slash = strchr(ce_name, '/');
 794                if (ce_slash)
 795                        ce_len = ce_slash - ce_name;
 796                else
 797                        ce_len = ce_namelen(ce) - pfxlen;
 798                cmp = name_compare(p->path, p_len, ce_name, ce_len);
 799                /*
 800                 * Exact match; if we have a directory we need to
 801                 * delay returning it.
 802                 */
 803                if (!cmp)
 804                        return ce_slash ? -2 - pos : pos;
 805                if (0 < cmp)
 806                        continue; /* keep looking */
 807                /*
 808                 * ce_name sorts after p->path; could it be that we
 809                 * have files under p->path directory in the index?
 810                 * E.g.  ce_name == "t-i", and p->path == "t"; we may
 811                 * have "t/a" in the index.
 812                 */
 813                if (p_len < ce_len && !memcmp(ce_name, p->path, p_len) &&
 814                    ce_name[p_len] < '/')
 815                        continue; /* keep looking */
 816                break;
 817        }
 818        return -1;
 819}
 820
 821static struct cache_entry *find_cache_entry(struct traverse_info *info,
 822                                            const struct name_entry *p)
 823{
 824        int pos = find_cache_pos(info, p);
 825        struct unpack_trees_options *o = info->data;
 826
 827        if (0 <= pos)
 828                return o->src_index->cache[pos];
 829        else
 830                return NULL;
 831}
 832
 833static void debug_path(struct traverse_info *info)
 834{
 835        if (info->prev) {
 836                debug_path(info->prev);
 837                if (*info->prev->name.path)
 838                        putchar('/');
 839        }
 840        printf("%s", info->name.path);
 841}
 842
 843static void debug_name_entry(int i, struct name_entry *n)
 844{
 845        printf("ent#%d %06o %s\n", i,
 846               n->path ? n->mode : 0,
 847               n->path ? n->path : "(missing)");
 848}
 849
 850static void debug_unpack_callback(int n,
 851                                  unsigned long mask,
 852                                  unsigned long dirmask,
 853                                  struct name_entry *names,
 854                                  struct traverse_info *info)
 855{
 856        int i;
 857        printf("* unpack mask %lu, dirmask %lu, cnt %d ",
 858               mask, dirmask, n);
 859        debug_path(info);
 860        putchar('\n');
 861        for (i = 0; i < n; i++)
 862                debug_name_entry(i, names + i);
 863}
 864
 865static int unpack_callback(int n, unsigned long mask, unsigned long dirmask, struct name_entry *names, struct traverse_info *info)
 866{
 867        struct cache_entry *src[MAX_UNPACK_TREES + 1] = { NULL, };
 868        struct unpack_trees_options *o = info->data;
 869        const struct name_entry *p = names;
 870
 871        /* Find first entry with a real name (we could use "mask" too) */
 872        while (!p->mode)
 873                p++;
 874
 875        if (o->debug_unpack)
 876                debug_unpack_callback(n, mask, dirmask, names, info);
 877
 878        /* Are we supposed to look at the index too? */
 879        if (o->merge) {
 880                while (1) {
 881                        int cmp;
 882                        struct cache_entry *ce;
 883
 884                        if (o->diff_index_cached)
 885                                ce = next_cache_entry(o);
 886                        else
 887                                ce = find_cache_entry(info, p);
 888
 889                        if (!ce)
 890                                break;
 891                        cmp = compare_entry(ce, info, p);
 892                        if (cmp < 0) {
 893                                if (unpack_index_entry(ce, o) < 0)
 894                                        return unpack_failed(o, NULL);
 895                                continue;
 896                        }
 897                        if (!cmp) {
 898                                if (ce_stage(ce)) {
 899                                        /*
 900                                         * If we skip unmerged index
 901                                         * entries, we'll skip this
 902                                         * entry *and* the tree
 903                                         * entries associated with it!
 904                                         */
 905                                        if (o->skip_unmerged) {
 906                                                add_same_unmerged(ce, o);
 907                                                return mask;
 908                                        }
 909                                }
 910                                src[0] = ce;
 911                        }
 912                        break;
 913                }
 914        }
 915
 916        if (unpack_nondirectories(n, mask, dirmask, src, names, info) < 0)
 917                return -1;
 918
 919        if (o->merge && src[0]) {
 920                if (ce_stage(src[0]))
 921                        mark_ce_used_same_name(src[0], o);
 922                else
 923                        mark_ce_used(src[0], o);
 924        }
 925
 926        /* Now handle any directories.. */
 927        if (dirmask) {
 928                /* special case: "diff-index --cached" looking at a tree */
 929                if (o->diff_index_cached &&
 930                    n == 1 && dirmask == 1 && S_ISDIR(names->mode)) {
 931                        int matches;
 932                        matches = cache_tree_matches_traversal(o->src_index->cache_tree,
 933                                                               names, info);
 934                        /*
 935                         * Everything under the name matches; skip the
 936                         * entire hierarchy.  diff_index_cached codepath
 937                         * special cases D/F conflicts in such a way that
 938                         * it does not do any look-ahead, so this is safe.
 939                         */
 940                        if (matches) {
 941                                o->cache_bottom += matches;
 942                                return mask;
 943                        }
 944                }
 945
 946                if (traverse_trees_recursive(n, dirmask, mask & ~dirmask,
 947                                             names, info) < 0)
 948                        return -1;
 949                return mask;
 950        }
 951
 952        return mask;
 953}
 954
 955static int clear_ce_flags_1(struct cache_entry **cache, int nr,
 956                            struct strbuf *prefix,
 957                            int select_mask, int clear_mask,
 958                            struct exclude_list *el, int defval);
 959
 960/* Whole directory matching */
 961static int clear_ce_flags_dir(struct cache_entry **cache, int nr,
 962                              struct strbuf *prefix,
 963                              char *basename,
 964                              int select_mask, int clear_mask,
 965                              struct exclude_list *el, int defval)
 966{
 967        struct cache_entry **cache_end;
 968        int dtype = DT_DIR;
 969        int ret = is_excluded_from_list(prefix->buf, prefix->len,
 970                                        basename, &dtype, el);
 971        int rc;
 972
 973        strbuf_addch(prefix, '/');
 974
 975        /* If undecided, use matching result of parent dir in defval */
 976        if (ret < 0)
 977                ret = defval;
 978
 979        for (cache_end = cache; cache_end != cache + nr; cache_end++) {
 980                struct cache_entry *ce = *cache_end;
 981                if (strncmp(ce->name, prefix->buf, prefix->len))
 982                        break;
 983        }
 984
 985        /*
 986         * TODO: check el, if there are no patterns that may conflict
 987         * with ret (iow, we know in advance the incl/excl
 988         * decision for the entire directory), clear flag here without
 989         * calling clear_ce_flags_1(). That function will call
 990         * the expensive is_excluded_from_list() on every entry.
 991         */
 992        rc = clear_ce_flags_1(cache, cache_end - cache,
 993                              prefix,
 994                              select_mask, clear_mask,
 995                              el, ret);
 996        strbuf_setlen(prefix, prefix->len - 1);
 997        return rc;
 998}
 999
1000/*
1001 * Traverse the index, find every entry that matches according to
1002 * o->el. Do "ce_flags &= ~clear_mask" on those entries. Return the
1003 * number of traversed entries.
1004 *
1005 * If select_mask is non-zero, only entries whose ce_flags has on of
1006 * those bits enabled are traversed.
1007 *
1008 * cache        : pointer to an index entry
1009 * prefix_len   : an offset to its path
1010 *
1011 * The current path ("prefix") including the trailing '/' is
1012 *   cache[0]->name[0..(prefix_len-1)]
1013 * Top level path has prefix_len zero.
1014 */
1015static int clear_ce_flags_1(struct cache_entry **cache, int nr,
1016                            struct strbuf *prefix,
1017                            int select_mask, int clear_mask,
1018                            struct exclude_list *el, int defval)
1019{
1020        struct cache_entry **cache_end = cache + nr;
1021
1022        /*
1023         * Process all entries that have the given prefix and meet
1024         * select_mask condition
1025         */
1026        while(cache != cache_end) {
1027                struct cache_entry *ce = *cache;
1028                const char *name, *slash;
1029                int len, dtype, ret;
1030
1031                if (select_mask && !(ce->ce_flags & select_mask)) {
1032                        cache++;
1033                        continue;
1034                }
1035
1036                if (prefix->len && strncmp(ce->name, prefix->buf, prefix->len))
1037                        break;
1038
1039                name = ce->name + prefix->len;
1040                slash = strchr(name, '/');
1041
1042                /* If it's a directory, try whole directory match first */
1043                if (slash) {
1044                        int processed;
1045
1046                        len = slash - name;
1047                        strbuf_add(prefix, name, len);
1048
1049                        processed = clear_ce_flags_dir(cache, cache_end - cache,
1050                                                       prefix,
1051                                                       prefix->buf + prefix->len - len,
1052                                                       select_mask, clear_mask,
1053                                                       el, defval);
1054
1055                        /* clear_c_f_dir eats a whole dir already? */
1056                        if (processed) {
1057                                cache += processed;
1058                                strbuf_setlen(prefix, prefix->len - len);
1059                                continue;
1060                        }
1061
1062                        strbuf_addch(prefix, '/');
1063                        cache += clear_ce_flags_1(cache, cache_end - cache,
1064                                                  prefix,
1065                                                  select_mask, clear_mask, el, defval);
1066                        strbuf_setlen(prefix, prefix->len - len - 1);
1067                        continue;
1068                }
1069
1070                /* Non-directory */
1071                dtype = ce_to_dtype(ce);
1072                ret = is_excluded_from_list(ce->name, ce_namelen(ce),
1073                                            name, &dtype, el);
1074                if (ret < 0)
1075                        ret = defval;
1076                if (ret > 0)
1077                        ce->ce_flags &= ~clear_mask;
1078                cache++;
1079        }
1080        return nr - (cache_end - cache);
1081}
1082
1083static int clear_ce_flags(struct cache_entry **cache, int nr,
1084                            int select_mask, int clear_mask,
1085                            struct exclude_list *el)
1086{
1087        static struct strbuf prefix = STRBUF_INIT;
1088
1089        strbuf_reset(&prefix);
1090
1091        return clear_ce_flags_1(cache, nr,
1092                                &prefix,
1093                                select_mask, clear_mask,
1094                                el, 0);
1095}
1096
1097/*
1098 * Set/Clear CE_NEW_SKIP_WORKTREE according to $GIT_DIR/info/sparse-checkout
1099 */
1100static void mark_new_skip_worktree(struct exclude_list *el,
1101                                   struct index_state *the_index,
1102                                   int select_flag, int skip_wt_flag)
1103{
1104        int i;
1105
1106        /*
1107         * 1. Pretend the narrowest worktree: only unmerged entries
1108         * are checked out
1109         */
1110        for (i = 0; i < the_index->cache_nr; i++) {
1111                struct cache_entry *ce = the_index->cache[i];
1112
1113                if (select_flag && !(ce->ce_flags & select_flag))
1114                        continue;
1115
1116                if (!ce_stage(ce))
1117                        ce->ce_flags |= skip_wt_flag;
1118                else
1119                        ce->ce_flags &= ~skip_wt_flag;
1120        }
1121
1122        /*
1123         * 2. Widen worktree according to sparse-checkout file.
1124         * Matched entries will have skip_wt_flag cleared (i.e. "in")
1125         */
1126        clear_ce_flags(the_index->cache, the_index->cache_nr,
1127                       select_flag, skip_wt_flag, el);
1128}
1129
1130static int verify_absent(const struct cache_entry *,
1131                         enum unpack_trees_error_types,
1132                         struct unpack_trees_options *);
1133/*
1134 * N-way merge "len" trees.  Returns 0 on success, -1 on failure to manipulate the
1135 * resulting index, -2 on failure to reflect the changes to the work tree.
1136 *
1137 * CE_ADDED, CE_UNPACKED and CE_NEW_SKIP_WORKTREE are used internally
1138 */
1139int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options *o)
1140{
1141        int i, ret;
1142        static struct cache_entry *dfc;
1143        struct exclude_list el;
1144
1145        if (len > MAX_UNPACK_TREES)
1146                die("unpack_trees takes at most %d trees", MAX_UNPACK_TREES);
1147
1148        memset(&el, 0, sizeof(el));
1149        if (!core_apply_sparse_checkout || !o->update)
1150                o->skip_sparse_checkout = 1;
1151        if (!o->skip_sparse_checkout) {
1152                char *sparse = git_pathdup("info/sparse-checkout");
1153                if (add_excludes_from_file_to_list(sparse, "", 0, &el, 0) < 0)
1154                        o->skip_sparse_checkout = 1;
1155                else
1156                        o->el = &el;
1157                free(sparse);
1158        }
1159
1160        memset(&o->result, 0, sizeof(o->result));
1161        o->result.initialized = 1;
1162        o->result.timestamp.sec = o->src_index->timestamp.sec;
1163        o->result.timestamp.nsec = o->src_index->timestamp.nsec;
1164        o->result.version = o->src_index->version;
1165        o->result.split_index = o->src_index->split_index;
1166        if (o->result.split_index)
1167                o->result.split_index->refcount++;
1168        hashcpy(o->result.sha1, o->src_index->sha1);
1169        o->merge_size = len;
1170        mark_all_ce_unused(o->src_index);
1171
1172        /*
1173         * Sparse checkout loop #1: set NEW_SKIP_WORKTREE on existing entries
1174         */
1175        if (!o->skip_sparse_checkout)
1176                mark_new_skip_worktree(o->el, o->src_index, 0, CE_NEW_SKIP_WORKTREE);
1177
1178        if (!dfc)
1179                dfc = xcalloc(1, cache_entry_size(0));
1180        o->df_conflict_entry = dfc;
1181
1182        if (len) {
1183                const char *prefix = o->prefix ? o->prefix : "";
1184                struct traverse_info info;
1185
1186                setup_traverse_info(&info, prefix);
1187                info.fn = unpack_callback;
1188                info.data = o;
1189                info.show_all_errors = o->show_all_errors;
1190                info.pathspec = o->pathspec;
1191
1192                if (o->prefix) {
1193                        /*
1194                         * Unpack existing index entries that sort before the
1195                         * prefix the tree is spliced into.  Note that o->merge
1196                         * is always true in this case.
1197                         */
1198                        while (1) {
1199                                struct cache_entry *ce = next_cache_entry(o);
1200                                if (!ce)
1201                                        break;
1202                                if (ce_in_traverse_path(ce, &info))
1203                                        break;
1204                                if (unpack_index_entry(ce, o) < 0)
1205                                        goto return_failed;
1206                        }
1207                }
1208
1209                if (traverse_trees(len, t, &info) < 0)
1210                        goto return_failed;
1211        }
1212
1213        /* Any left-over entries in the index? */
1214        if (o->merge) {
1215                while (1) {
1216                        struct cache_entry *ce = next_cache_entry(o);
1217                        if (!ce)
1218                                break;
1219                        if (unpack_index_entry(ce, o) < 0)
1220                                goto return_failed;
1221                }
1222        }
1223        mark_all_ce_unused(o->src_index);
1224
1225        if (o->trivial_merges_only && o->nontrivial_merge) {
1226                ret = unpack_failed(o, "Merge requires file-level merging");
1227                goto done;
1228        }
1229
1230        if (!o->skip_sparse_checkout) {
1231                int empty_worktree = 1;
1232
1233                /*
1234                 * Sparse checkout loop #2: set NEW_SKIP_WORKTREE on entries not in loop #1
1235                 * If the will have NEW_SKIP_WORKTREE, also set CE_SKIP_WORKTREE
1236                 * so apply_sparse_checkout() won't attempt to remove it from worktree
1237                 */
1238                mark_new_skip_worktree(o->el, &o->result, CE_ADDED, CE_SKIP_WORKTREE | CE_NEW_SKIP_WORKTREE);
1239
1240                ret = 0;
1241                for (i = 0; i < o->result.cache_nr; i++) {
1242                        struct cache_entry *ce = o->result.cache[i];
1243
1244                        /*
1245                         * Entries marked with CE_ADDED in merged_entry() do not have
1246                         * verify_absent() check (the check is effectively disabled
1247                         * because CE_NEW_SKIP_WORKTREE is set unconditionally).
1248                         *
1249                         * Do the real check now because we have had
1250                         * correct CE_NEW_SKIP_WORKTREE
1251                         */
1252                        if (ce->ce_flags & CE_ADDED &&
1253                            verify_absent(ce, ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN, o)) {
1254                                if (!o->show_all_errors)
1255                                        goto return_failed;
1256                                ret = -1;
1257                        }
1258
1259                        if (apply_sparse_checkout(&o->result, ce, o)) {
1260                                if (!o->show_all_errors)
1261                                        goto return_failed;
1262                                ret = -1;
1263                        }
1264                        if (!ce_skip_worktree(ce))
1265                                empty_worktree = 0;
1266
1267                }
1268                if (ret < 0)
1269                        goto return_failed;
1270                /*
1271                 * Sparse checkout is meant to narrow down checkout area
1272                 * but it does not make sense to narrow down to empty working
1273                 * tree. This is usually a mistake in sparse checkout rules.
1274                 * Do not allow users to do that.
1275                 */
1276                if (o->result.cache_nr && empty_worktree) {
1277                        ret = unpack_failed(o, "Sparse checkout leaves no entry on working directory");
1278                        goto done;
1279                }
1280        }
1281
1282        o->src_index = NULL;
1283        ret = check_updates(o) ? (-2) : 0;
1284        if (o->dst_index) {
1285                if (!ret) {
1286                        if (!o->result.cache_tree)
1287                                o->result.cache_tree = cache_tree();
1288                        if (!cache_tree_fully_valid(o->result.cache_tree))
1289                                cache_tree_update(&o->result,
1290                                                  WRITE_TREE_SILENT |
1291                                                  WRITE_TREE_REPAIR);
1292                }
1293                discard_index(o->dst_index);
1294                *o->dst_index = o->result;
1295        } else {
1296                discard_index(&o->result);
1297        }
1298
1299done:
1300        clear_exclude_list(&el);
1301        return ret;
1302
1303return_failed:
1304        if (o->show_all_errors)
1305                display_error_msgs(o);
1306        mark_all_ce_unused(o->src_index);
1307        ret = unpack_failed(o, NULL);
1308        if (o->exiting_early)
1309                ret = 0;
1310        goto done;
1311}
1312
1313/* Here come the merge functions */
1314
1315static int reject_merge(const struct cache_entry *ce,
1316                        struct unpack_trees_options *o)
1317{
1318        return o->gently ? -1 :
1319                add_rejected_path(o, ERROR_WOULD_OVERWRITE, ce->name);
1320}
1321
1322static int same(const struct cache_entry *a, const struct cache_entry *b)
1323{
1324        if (!!a != !!b)
1325                return 0;
1326        if (!a && !b)
1327                return 1;
1328        if ((a->ce_flags | b->ce_flags) & CE_CONFLICTED)
1329                return 0;
1330        return a->ce_mode == b->ce_mode &&
1331               !oidcmp(&a->oid, &b->oid);
1332}
1333
1334
1335/*
1336 * When a CE gets turned into an unmerged entry, we
1337 * want it to be up-to-date
1338 */
1339static int verify_uptodate_1(const struct cache_entry *ce,
1340                             struct unpack_trees_options *o,
1341                             enum unpack_trees_error_types error_type)
1342{
1343        struct stat st;
1344
1345        if (o->index_only)
1346                return 0;
1347
1348        /*
1349         * CE_VALID and CE_SKIP_WORKTREE cheat, we better check again
1350         * if this entry is truly up-to-date because this file may be
1351         * overwritten.
1352         */
1353        if ((ce->ce_flags & CE_VALID) || ce_skip_worktree(ce))
1354                ; /* keep checking */
1355        else if (o->reset || ce_uptodate(ce))
1356                return 0;
1357
1358        if (!lstat(ce->name, &st)) {
1359                int flags = CE_MATCH_IGNORE_VALID|CE_MATCH_IGNORE_SKIP_WORKTREE;
1360                unsigned changed = ie_match_stat(o->src_index, ce, &st, flags);
1361                if (!changed)
1362                        return 0;
1363                /*
1364                 * NEEDSWORK: the current default policy is to allow
1365                 * submodule to be out of sync wrt the superproject
1366                 * index.  This needs to be tightened later for
1367                 * submodules that are marked to be automatically
1368                 * checked out.
1369                 */
1370                if (S_ISGITLINK(ce->ce_mode))
1371                        return 0;
1372                errno = 0;
1373        }
1374        if (errno == ENOENT)
1375                return 0;
1376        return o->gently ? -1 :
1377                add_rejected_path(o, error_type, ce->name);
1378}
1379
1380static int verify_uptodate(const struct cache_entry *ce,
1381                           struct unpack_trees_options *o)
1382{
1383        if (!o->skip_sparse_checkout && (ce->ce_flags & CE_NEW_SKIP_WORKTREE))
1384                return 0;
1385        return verify_uptodate_1(ce, o, ERROR_NOT_UPTODATE_FILE);
1386}
1387
1388static int verify_uptodate_sparse(const struct cache_entry *ce,
1389                                  struct unpack_trees_options *o)
1390{
1391        return verify_uptodate_1(ce, o, ERROR_SPARSE_NOT_UPTODATE_FILE);
1392}
1393
1394static void invalidate_ce_path(const struct cache_entry *ce,
1395                               struct unpack_trees_options *o)
1396{
1397        if (!ce)
1398                return;
1399        cache_tree_invalidate_path(o->src_index, ce->name);
1400        untracked_cache_invalidate_path(o->src_index, ce->name);
1401}
1402
1403/*
1404 * Check that checking out ce->sha1 in subdir ce->name is not
1405 * going to overwrite any working files.
1406 *
1407 * Currently, git does not checkout subprojects during a superproject
1408 * checkout, so it is not going to overwrite anything.
1409 */
1410static int verify_clean_submodule(const struct cache_entry *ce,
1411                                  enum unpack_trees_error_types error_type,
1412                                  struct unpack_trees_options *o)
1413{
1414        return 0;
1415}
1416
1417static int verify_clean_subdirectory(const struct cache_entry *ce,
1418                                     enum unpack_trees_error_types error_type,
1419                                     struct unpack_trees_options *o)
1420{
1421        /*
1422         * we are about to extract "ce->name"; we would not want to lose
1423         * anything in the existing directory there.
1424         */
1425        int namelen;
1426        int i;
1427        struct dir_struct d;
1428        char *pathbuf;
1429        int cnt = 0;
1430        unsigned char sha1[20];
1431
1432        if (S_ISGITLINK(ce->ce_mode) &&
1433            resolve_gitlink_ref(ce->name, "HEAD", sha1) == 0) {
1434                /* If we are not going to update the submodule, then
1435                 * we don't care.
1436                 */
1437                if (!hashcmp(sha1, ce->oid.hash))
1438                        return 0;
1439                return verify_clean_submodule(ce, error_type, o);
1440        }
1441
1442        /*
1443         * First let's make sure we do not have a local modification
1444         * in that directory.
1445         */
1446        namelen = ce_namelen(ce);
1447        for (i = locate_in_src_index(ce, o);
1448             i < o->src_index->cache_nr;
1449             i++) {
1450                struct cache_entry *ce2 = o->src_index->cache[i];
1451                int len = ce_namelen(ce2);
1452                if (len < namelen ||
1453                    strncmp(ce->name, ce2->name, namelen) ||
1454                    ce2->name[namelen] != '/')
1455                        break;
1456                /*
1457                 * ce2->name is an entry in the subdirectory to be
1458                 * removed.
1459                 */
1460                if (!ce_stage(ce2)) {
1461                        if (verify_uptodate(ce2, o))
1462                                return -1;
1463                        add_entry(o, ce2, CE_REMOVE, 0);
1464                        mark_ce_used(ce2, o);
1465                }
1466                cnt++;
1467        }
1468
1469        /*
1470         * Then we need to make sure that we do not lose a locally
1471         * present file that is not ignored.
1472         */
1473        pathbuf = xstrfmt("%.*s/", namelen, ce->name);
1474
1475        memset(&d, 0, sizeof(d));
1476        if (o->dir)
1477                d.exclude_per_dir = o->dir->exclude_per_dir;
1478        i = read_directory(&d, pathbuf, namelen+1, NULL);
1479        if (i)
1480                return o->gently ? -1 :
1481                        add_rejected_path(o, ERROR_NOT_UPTODATE_DIR, ce->name);
1482        free(pathbuf);
1483        return cnt;
1484}
1485
1486/*
1487 * This gets called when there was no index entry for the tree entry 'dst',
1488 * but we found a file in the working tree that 'lstat()' said was fine,
1489 * and we're on a case-insensitive filesystem.
1490 *
1491 * See if we can find a case-insensitive match in the index that also
1492 * matches the stat information, and assume it's that other file!
1493 */
1494static int icase_exists(struct unpack_trees_options *o, const char *name, int len, struct stat *st)
1495{
1496        const struct cache_entry *src;
1497
1498        src = index_file_exists(o->src_index, name, len, 1);
1499        return src && !ie_match_stat(o->src_index, src, st, CE_MATCH_IGNORE_VALID|CE_MATCH_IGNORE_SKIP_WORKTREE);
1500}
1501
1502static int check_ok_to_remove(const char *name, int len, int dtype,
1503                              const struct cache_entry *ce, struct stat *st,
1504                              enum unpack_trees_error_types error_type,
1505                              struct unpack_trees_options *o)
1506{
1507        const struct cache_entry *result;
1508
1509        /*
1510         * It may be that the 'lstat()' succeeded even though
1511         * target 'ce' was absent, because there is an old
1512         * entry that is different only in case..
1513         *
1514         * Ignore that lstat() if it matches.
1515         */
1516        if (ignore_case && icase_exists(o, name, len, st))
1517                return 0;
1518
1519        if (o->dir &&
1520            is_excluded(o->dir, name, &dtype))
1521                /*
1522                 * ce->name is explicitly excluded, so it is Ok to
1523                 * overwrite it.
1524                 */
1525                return 0;
1526        if (S_ISDIR(st->st_mode)) {
1527                /*
1528                 * We are checking out path "foo" and
1529                 * found "foo/." in the working tree.
1530                 * This is tricky -- if we have modified
1531                 * files that are in "foo/" we would lose
1532                 * them.
1533                 */
1534                if (verify_clean_subdirectory(ce, error_type, o) < 0)
1535                        return -1;
1536                return 0;
1537        }
1538
1539        /*
1540         * The previous round may already have decided to
1541         * delete this path, which is in a subdirectory that
1542         * is being replaced with a blob.
1543         */
1544        result = index_file_exists(&o->result, name, len, 0);
1545        if (result) {
1546                if (result->ce_flags & CE_REMOVE)
1547                        return 0;
1548        }
1549
1550        return o->gently ? -1 :
1551                add_rejected_path(o, error_type, name);
1552}
1553
1554/*
1555 * We do not want to remove or overwrite a working tree file that
1556 * is not tracked, unless it is ignored.
1557 */
1558static int verify_absent_1(const struct cache_entry *ce,
1559                           enum unpack_trees_error_types error_type,
1560                           struct unpack_trees_options *o)
1561{
1562        int len;
1563        struct stat st;
1564
1565        if (o->index_only || o->reset || !o->update)
1566                return 0;
1567
1568        len = check_leading_path(ce->name, ce_namelen(ce));
1569        if (!len)
1570                return 0;
1571        else if (len > 0) {
1572                char *path;
1573                int ret;
1574
1575                path = xmemdupz(ce->name, len);
1576                if (lstat(path, &st))
1577                        ret = error_errno("cannot stat '%s'", path);
1578                else
1579                        ret = check_ok_to_remove(path, len, DT_UNKNOWN, NULL,
1580                                                 &st, error_type, o);
1581                free(path);
1582                return ret;
1583        } else if (lstat(ce->name, &st)) {
1584                if (errno != ENOENT)
1585                        return error_errno("cannot stat '%s'", ce->name);
1586                return 0;
1587        } else {
1588                return check_ok_to_remove(ce->name, ce_namelen(ce),
1589                                          ce_to_dtype(ce), ce, &st,
1590                                          error_type, o);
1591        }
1592}
1593
1594static int verify_absent(const struct cache_entry *ce,
1595                         enum unpack_trees_error_types error_type,
1596                         struct unpack_trees_options *o)
1597{
1598        if (!o->skip_sparse_checkout && (ce->ce_flags & CE_NEW_SKIP_WORKTREE))
1599                return 0;
1600        return verify_absent_1(ce, error_type, o);
1601}
1602
1603static int verify_absent_sparse(const struct cache_entry *ce,
1604                                enum unpack_trees_error_types error_type,
1605                                struct unpack_trees_options *o)
1606{
1607        enum unpack_trees_error_types orphaned_error = error_type;
1608        if (orphaned_error == ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN)
1609                orphaned_error = ERROR_WOULD_LOSE_ORPHANED_OVERWRITTEN;
1610
1611        return verify_absent_1(ce, orphaned_error, o);
1612}
1613
1614static int merged_entry(const struct cache_entry *ce,
1615                        const struct cache_entry *old,
1616                        struct unpack_trees_options *o)
1617{
1618        int update = CE_UPDATE;
1619        struct cache_entry *merge = dup_entry(ce);
1620
1621        if (!old) {
1622                /*
1623                 * New index entries. In sparse checkout, the following
1624                 * verify_absent() will be delayed until after
1625                 * traverse_trees() finishes in unpack_trees(), then:
1626                 *
1627                 *  - CE_NEW_SKIP_WORKTREE will be computed correctly
1628                 *  - verify_absent() be called again, this time with
1629                 *    correct CE_NEW_SKIP_WORKTREE
1630                 *
1631                 * verify_absent() call here does nothing in sparse
1632                 * checkout (i.e. o->skip_sparse_checkout == 0)
1633                 */
1634                update |= CE_ADDED;
1635                merge->ce_flags |= CE_NEW_SKIP_WORKTREE;
1636
1637                if (verify_absent(merge,
1638                                  ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN, o)) {
1639                        free(merge);
1640                        return -1;
1641                }
1642                invalidate_ce_path(merge, o);
1643        } else if (!(old->ce_flags & CE_CONFLICTED)) {
1644                /*
1645                 * See if we can re-use the old CE directly?
1646                 * That way we get the uptodate stat info.
1647                 *
1648                 * This also removes the UPDATE flag on a match; otherwise
1649                 * we will end up overwriting local changes in the work tree.
1650                 */
1651                if (same(old, merge)) {
1652                        copy_cache_entry(merge, old);
1653                        update = 0;
1654                } else {
1655                        if (verify_uptodate(old, o)) {
1656                                free(merge);
1657                                return -1;
1658                        }
1659                        /* Migrate old flags over */
1660                        update |= old->ce_flags & (CE_SKIP_WORKTREE | CE_NEW_SKIP_WORKTREE);
1661                        invalidate_ce_path(old, o);
1662                }
1663        } else {
1664                /*
1665                 * Previously unmerged entry left as an existence
1666                 * marker by read_index_unmerged();
1667                 */
1668                invalidate_ce_path(old, o);
1669        }
1670
1671        do_add_entry(o, merge, update, CE_STAGEMASK);
1672        return 1;
1673}
1674
1675static int deleted_entry(const struct cache_entry *ce,
1676                         const struct cache_entry *old,
1677                         struct unpack_trees_options *o)
1678{
1679        /* Did it exist in the index? */
1680        if (!old) {
1681                if (verify_absent(ce, ERROR_WOULD_LOSE_UNTRACKED_REMOVED, o))
1682                        return -1;
1683                return 0;
1684        }
1685        if (!(old->ce_flags & CE_CONFLICTED) && verify_uptodate(old, o))
1686                return -1;
1687        add_entry(o, ce, CE_REMOVE, 0);
1688        invalidate_ce_path(ce, o);
1689        return 1;
1690}
1691
1692static int keep_entry(const struct cache_entry *ce,
1693                      struct unpack_trees_options *o)
1694{
1695        add_entry(o, ce, 0, 0);
1696        return 1;
1697}
1698
1699#if DBRT_DEBUG
1700static void show_stage_entry(FILE *o,
1701                             const char *label, const struct cache_entry *ce)
1702{
1703        if (!ce)
1704                fprintf(o, "%s (missing)\n", label);
1705        else
1706                fprintf(o, "%s%06o %s %d\t%s\n",
1707                        label,
1708                        ce->ce_mode,
1709                        oid_to_hex(&ce->oid),
1710                        ce_stage(ce),
1711                        ce->name);
1712}
1713#endif
1714
1715int threeway_merge(const struct cache_entry * const *stages,
1716                   struct unpack_trees_options *o)
1717{
1718        const struct cache_entry *index;
1719        const struct cache_entry *head;
1720        const struct cache_entry *remote = stages[o->head_idx + 1];
1721        int count;
1722        int head_match = 0;
1723        int remote_match = 0;
1724
1725        int df_conflict_head = 0;
1726        int df_conflict_remote = 0;
1727
1728        int any_anc_missing = 0;
1729        int no_anc_exists = 1;
1730        int i;
1731
1732        for (i = 1; i < o->head_idx; i++) {
1733                if (!stages[i] || stages[i] == o->df_conflict_entry)
1734                        any_anc_missing = 1;
1735                else
1736                        no_anc_exists = 0;
1737        }
1738
1739        index = stages[0];
1740        head = stages[o->head_idx];
1741
1742        if (head == o->df_conflict_entry) {
1743                df_conflict_head = 1;
1744                head = NULL;
1745        }
1746
1747        if (remote == o->df_conflict_entry) {
1748                df_conflict_remote = 1;
1749                remote = NULL;
1750        }
1751
1752        /*
1753         * First, if there's a #16 situation, note that to prevent #13
1754         * and #14.
1755         */
1756        if (!same(remote, head)) {
1757                for (i = 1; i < o->head_idx; i++) {
1758                        if (same(stages[i], head)) {
1759                                head_match = i;
1760                        }
1761                        if (same(stages[i], remote)) {
1762                                remote_match = i;
1763                        }
1764                }
1765        }
1766
1767        /*
1768         * We start with cases where the index is allowed to match
1769         * something other than the head: #14(ALT) and #2ALT, where it
1770         * is permitted to match the result instead.
1771         */
1772        /* #14, #14ALT, #2ALT */
1773        if (remote && !df_conflict_head && head_match && !remote_match) {
1774                if (index && !same(index, remote) && !same(index, head))
1775                        return reject_merge(index, o);
1776                return merged_entry(remote, index, o);
1777        }
1778        /*
1779         * If we have an entry in the index cache, then we want to
1780         * make sure that it matches head.
1781         */
1782        if (index && !same(index, head))
1783                return reject_merge(index, o);
1784
1785        if (head) {
1786                /* #5ALT, #15 */
1787                if (same(head, remote))
1788                        return merged_entry(head, index, o);
1789                /* #13, #3ALT */
1790                if (!df_conflict_remote && remote_match && !head_match)
1791                        return merged_entry(head, index, o);
1792        }
1793
1794        /* #1 */
1795        if (!head && !remote && any_anc_missing)
1796                return 0;
1797
1798        /*
1799         * Under the "aggressive" rule, we resolve mostly trivial
1800         * cases that we historically had git-merge-one-file resolve.
1801         */
1802        if (o->aggressive) {
1803                int head_deleted = !head;
1804                int remote_deleted = !remote;
1805                const struct cache_entry *ce = NULL;
1806
1807                if (index)
1808                        ce = index;
1809                else if (head)
1810                        ce = head;
1811                else if (remote)
1812                        ce = remote;
1813                else {
1814                        for (i = 1; i < o->head_idx; i++) {
1815                                if (stages[i] && stages[i] != o->df_conflict_entry) {
1816                                        ce = stages[i];
1817                                        break;
1818                                }
1819                        }
1820                }
1821
1822                /*
1823                 * Deleted in both.
1824                 * Deleted in one and unchanged in the other.
1825                 */
1826                if ((head_deleted && remote_deleted) ||
1827                    (head_deleted && remote && remote_match) ||
1828                    (remote_deleted && head && head_match)) {
1829                        if (index)
1830                                return deleted_entry(index, index, o);
1831                        if (ce && !head_deleted) {
1832                                if (verify_absent(ce, ERROR_WOULD_LOSE_UNTRACKED_REMOVED, o))
1833                                        return -1;
1834                        }
1835                        return 0;
1836                }
1837                /*
1838                 * Added in both, identically.
1839                 */
1840                if (no_anc_exists && head && remote && same(head, remote))
1841                        return merged_entry(head, index, o);
1842
1843        }
1844
1845        /* Below are "no merge" cases, which require that the index be
1846         * up-to-date to avoid the files getting overwritten with
1847         * conflict resolution files.
1848         */
1849        if (index) {
1850                if (verify_uptodate(index, o))
1851                        return -1;
1852        }
1853
1854        o->nontrivial_merge = 1;
1855
1856        /* #2, #3, #4, #6, #7, #9, #10, #11. */
1857        count = 0;
1858        if (!head_match || !remote_match) {
1859                for (i = 1; i < o->head_idx; i++) {
1860                        if (stages[i] && stages[i] != o->df_conflict_entry) {
1861                                keep_entry(stages[i], o);
1862                                count++;
1863                                break;
1864                        }
1865                }
1866        }
1867#if DBRT_DEBUG
1868        else {
1869                fprintf(stderr, "read-tree: warning #16 detected\n");
1870                show_stage_entry(stderr, "head   ", stages[head_match]);
1871                show_stage_entry(stderr, "remote ", stages[remote_match]);
1872        }
1873#endif
1874        if (head) { count += keep_entry(head, o); }
1875        if (remote) { count += keep_entry(remote, o); }
1876        return count;
1877}
1878
1879/*
1880 * Two-way merge.
1881 *
1882 * The rule is to "carry forward" what is in the index without losing
1883 * information across a "fast-forward", favoring a successful merge
1884 * over a merge failure when it makes sense.  For details of the
1885 * "carry forward" rule, please see <Documentation/git-read-tree.txt>.
1886 *
1887 */
1888int twoway_merge(const struct cache_entry * const *src,
1889                 struct unpack_trees_options *o)
1890{
1891        const struct cache_entry *current = src[0];
1892        const struct cache_entry *oldtree = src[1];
1893        const struct cache_entry *newtree = src[2];
1894
1895        if (o->merge_size != 2)
1896                return error("Cannot do a twoway merge of %d trees",
1897                             o->merge_size);
1898
1899        if (oldtree == o->df_conflict_entry)
1900                oldtree = NULL;
1901        if (newtree == o->df_conflict_entry)
1902                newtree = NULL;
1903
1904        if (current) {
1905                if (current->ce_flags & CE_CONFLICTED) {
1906                        if (same(oldtree, newtree) || o->reset) {
1907                                if (!newtree)
1908                                        return deleted_entry(current, current, o);
1909                                else
1910                                        return merged_entry(newtree, current, o);
1911                        }
1912                        return reject_merge(current, o);
1913                } else if ((!oldtree && !newtree) || /* 4 and 5 */
1914                         (!oldtree && newtree &&
1915                          same(current, newtree)) || /* 6 and 7 */
1916                         (oldtree && newtree &&
1917                          same(oldtree, newtree)) || /* 14 and 15 */
1918                         (oldtree && newtree &&
1919                          !same(oldtree, newtree) && /* 18 and 19 */
1920                          same(current, newtree))) {
1921                        return keep_entry(current, o);
1922                } else if (oldtree && !newtree && same(current, oldtree)) {
1923                        /* 10 or 11 */
1924                        return deleted_entry(oldtree, current, o);
1925                } else if (oldtree && newtree &&
1926                         same(current, oldtree) && !same(current, newtree)) {
1927                        /* 20 or 21 */
1928                        return merged_entry(newtree, current, o);
1929                } else
1930                        return reject_merge(current, o);
1931        }
1932        else if (newtree) {
1933                if (oldtree && !o->initial_checkout) {
1934                        /*
1935                         * deletion of the path was staged;
1936                         */
1937                        if (same(oldtree, newtree))
1938                                return 1;
1939                        return reject_merge(oldtree, o);
1940                }
1941                return merged_entry(newtree, current, o);
1942        }
1943        return deleted_entry(oldtree, current, o);
1944}
1945
1946/*
1947 * Bind merge.
1948 *
1949 * Keep the index entries at stage0, collapse stage1 but make sure
1950 * stage0 does not have anything there.
1951 */
1952int bind_merge(const struct cache_entry * const *src,
1953               struct unpack_trees_options *o)
1954{
1955        const struct cache_entry *old = src[0];
1956        const struct cache_entry *a = src[1];
1957
1958        if (o->merge_size != 1)
1959                return error("Cannot do a bind merge of %d trees",
1960                             o->merge_size);
1961        if (a && old)
1962                return o->gently ? -1 :
1963                        error(ERRORMSG(o, ERROR_BIND_OVERLAP),
1964                              super_prefixed(a->name),
1965                              super_prefixed(old->name));
1966        if (!a)
1967                return keep_entry(old, o);
1968        else
1969                return merged_entry(a, NULL, o);
1970}
1971
1972/*
1973 * One-way merge.
1974 *
1975 * The rule is:
1976 * - take the stat information from stage0, take the data from stage1
1977 */
1978int oneway_merge(const struct cache_entry * const *src,
1979                 struct unpack_trees_options *o)
1980{
1981        const struct cache_entry *old = src[0];
1982        const struct cache_entry *a = src[1];
1983
1984        if (o->merge_size != 1)
1985                return error("Cannot do a oneway merge of %d trees",
1986                             o->merge_size);
1987
1988        if (!a || a == o->df_conflict_entry)
1989                return deleted_entry(old, old, o);
1990
1991        if (old && same(old, a)) {
1992                int update = 0;
1993                if (o->reset && o->update && !ce_uptodate(old) && !ce_skip_worktree(old)) {
1994                        struct stat st;
1995                        if (lstat(old->name, &st) ||
1996                            ie_match_stat(o->src_index, old, &st, CE_MATCH_IGNORE_VALID|CE_MATCH_IGNORE_SKIP_WORKTREE))
1997                                update |= CE_UPDATE;
1998                }
1999                add_entry(o, old, update, 0);
2000                return 0;
2001        }
2002        return merged_entry(a, old, o);
2003}