unpack-trees.con commit lstat_cache(): print a warning if doing ping-pong between cache types (7734f04)
   1#define NO_THE_INDEX_COMPATIBILITY_MACROS
   2#include "cache.h"
   3#include "dir.h"
   4#include "tree.h"
   5#include "tree-walk.h"
   6#include "cache-tree.h"
   7#include "unpack-trees.h"
   8#include "progress.h"
   9#include "refs.h"
  10
  11/*
  12 * Error messages expected by scripts out of plumbing commands such as
  13 * read-tree.  Non-scripted Porcelain is not required to use these messages
  14 * and in fact are encouraged to reword them to better suit their particular
  15 * situation better.  See how "git checkout" replaces not_uptodate_file to
  16 * explain why it does not allow switching between branches when you have
  17 * local changes, for example.
  18 */
  19static struct unpack_trees_error_msgs unpack_plumbing_errors = {
  20        /* would_overwrite */
  21        "Entry '%s' would be overwritten by merge. Cannot merge.",
  22
  23        /* not_uptodate_file */
  24        "Entry '%s' not uptodate. Cannot merge.",
  25
  26        /* not_uptodate_dir */
  27        "Updating '%s' would lose untracked files in it",
  28
  29        /* would_lose_untracked */
  30        "Untracked working tree file '%s' would be %s by merge.",
  31
  32        /* bind_overlap */
  33        "Entry '%s' overlaps with '%s'.  Cannot bind.",
  34};
  35
  36#define ERRORMSG(o,fld) \
  37        ( ((o) && (o)->msgs.fld) \
  38        ? ((o)->msgs.fld) \
  39        : (unpack_plumbing_errors.fld) )
  40
  41static void add_entry(struct unpack_trees_options *o, struct cache_entry *ce,
  42        unsigned int set, unsigned int clear)
  43{
  44        unsigned int size = ce_size(ce);
  45        struct cache_entry *new = xmalloc(size);
  46
  47        clear |= CE_HASHED | CE_UNHASHED;
  48
  49        memcpy(new, ce, size);
  50        new->next = NULL;
  51        new->ce_flags = (new->ce_flags & ~clear) | set;
  52        add_index_entry(&o->result, new, ADD_CACHE_OK_TO_ADD|ADD_CACHE_OK_TO_REPLACE|ADD_CACHE_SKIP_DFCHECK);
  53}
  54
  55/*
  56 * Unlink the last component and schedule the leading directories for
  57 * removal, such that empty directories get removed.
  58 */
  59static void unlink_entry(struct cache_entry *ce)
  60{
  61        if (has_symlink_or_noent_leading_path(ce->name, ce_namelen(ce)))
  62                return;
  63        if (unlink(ce->name))
  64                return;
  65        schedule_dir_for_removal(ce->name, ce_namelen(ce));
  66}
  67
  68static struct checkout state;
  69static int check_updates(struct unpack_trees_options *o)
  70{
  71        unsigned cnt = 0, total = 0;
  72        struct progress *progress = NULL;
  73        struct index_state *index = &o->result;
  74        int i;
  75        int errs = 0;
  76
  77        if (o->update && o->verbose_update) {
  78                for (total = cnt = 0; cnt < index->cache_nr; cnt++) {
  79                        struct cache_entry *ce = index->cache[cnt];
  80                        if (ce->ce_flags & (CE_UPDATE | CE_REMOVE))
  81                                total++;
  82                }
  83
  84                progress = start_progress_delay("Checking out files",
  85                                                total, 50, 1);
  86                cnt = 0;
  87        }
  88
  89        for (i = 0; i < index->cache_nr; i++) {
  90                struct cache_entry *ce = index->cache[i];
  91
  92                if (ce->ce_flags & CE_REMOVE) {
  93                        display_progress(progress, ++cnt);
  94                        if (o->update)
  95                                unlink_entry(ce);
  96                        remove_index_entry_at(&o->result, i);
  97                        i--;
  98                        continue;
  99                }
 100        }
 101        remove_scheduled_dirs();
 102
 103        for (i = 0; i < index->cache_nr; i++) {
 104                struct cache_entry *ce = index->cache[i];
 105
 106                if (ce->ce_flags & CE_UPDATE) {
 107                        display_progress(progress, ++cnt);
 108                        ce->ce_flags &= ~CE_UPDATE;
 109                        if (o->update) {
 110                                errs |= checkout_entry(ce, &state, NULL);
 111                        }
 112                }
 113        }
 114        stop_progress(&progress);
 115        return errs != 0;
 116}
 117
 118static inline int call_unpack_fn(struct cache_entry **src, struct unpack_trees_options *o)
 119{
 120        int ret = o->fn(src, o);
 121        if (ret > 0)
 122                ret = 0;
 123        return ret;
 124}
 125
 126static int unpack_index_entry(struct cache_entry *ce, struct unpack_trees_options *o)
 127{
 128        struct cache_entry *src[5] = { ce, };
 129
 130        o->pos++;
 131        if (ce_stage(ce)) {
 132                if (o->skip_unmerged) {
 133                        add_entry(o, ce, 0, 0);
 134                        return 0;
 135                }
 136        }
 137        return call_unpack_fn(src, o);
 138}
 139
 140int traverse_trees_recursive(int n, unsigned long dirmask, unsigned long df_conflicts, struct name_entry *names, struct traverse_info *info)
 141{
 142        int i;
 143        struct tree_desc t[MAX_UNPACK_TREES];
 144        struct traverse_info newinfo;
 145        struct name_entry *p;
 146
 147        p = names;
 148        while (!p->mode)
 149                p++;
 150
 151        newinfo = *info;
 152        newinfo.prev = info;
 153        newinfo.name = *p;
 154        newinfo.pathlen += tree_entry_len(p->path, p->sha1) + 1;
 155        newinfo.conflicts |= df_conflicts;
 156
 157        for (i = 0; i < n; i++, dirmask >>= 1) {
 158                const unsigned char *sha1 = NULL;
 159                if (dirmask & 1)
 160                        sha1 = names[i].sha1;
 161                fill_tree_descriptor(t+i, sha1);
 162        }
 163        return traverse_trees(n, t, &newinfo);
 164}
 165
 166/*
 167 * Compare the traverse-path to the cache entry without actually
 168 * having to generate the textual representation of the traverse
 169 * path.
 170 *
 171 * NOTE! This *only* compares up to the size of the traverse path
 172 * itself - the caller needs to do the final check for the cache
 173 * entry having more data at the end!
 174 */
 175static int do_compare_entry(const struct cache_entry *ce, const struct traverse_info *info, const struct name_entry *n)
 176{
 177        int len, pathlen, ce_len;
 178        const char *ce_name;
 179
 180        if (info->prev) {
 181                int cmp = do_compare_entry(ce, info->prev, &info->name);
 182                if (cmp)
 183                        return cmp;
 184        }
 185        pathlen = info->pathlen;
 186        ce_len = ce_namelen(ce);
 187
 188        /* If ce_len < pathlen then we must have previously hit "name == directory" entry */
 189        if (ce_len < pathlen)
 190                return -1;
 191
 192        ce_len -= pathlen;
 193        ce_name = ce->name + pathlen;
 194
 195        len = tree_entry_len(n->path, n->sha1);
 196        return df_name_compare(ce_name, ce_len, S_IFREG, n->path, len, n->mode);
 197}
 198
 199static int compare_entry(const struct cache_entry *ce, const struct traverse_info *info, const struct name_entry *n)
 200{
 201        int cmp = do_compare_entry(ce, info, n);
 202        if (cmp)
 203                return cmp;
 204
 205        /*
 206         * Even if the beginning compared identically, the ce should
 207         * compare as bigger than a directory leading up to it!
 208         */
 209        return ce_namelen(ce) > traverse_path_len(info, n);
 210}
 211
 212static struct cache_entry *create_ce_entry(const struct traverse_info *info, const struct name_entry *n, int stage)
 213{
 214        int len = traverse_path_len(info, n);
 215        struct cache_entry *ce = xcalloc(1, cache_entry_size(len));
 216
 217        ce->ce_mode = create_ce_mode(n->mode);
 218        ce->ce_flags = create_ce_flags(len, stage);
 219        hashcpy(ce->sha1, n->sha1);
 220        make_traverse_path(ce->name, info, n);
 221
 222        return ce;
 223}
 224
 225static int unpack_nondirectories(int n, unsigned long mask,
 226                                 unsigned long dirmask,
 227                                 struct cache_entry **src,
 228                                 const struct name_entry *names,
 229                                 const struct traverse_info *info)
 230{
 231        int i;
 232        struct unpack_trees_options *o = info->data;
 233        unsigned long conflicts;
 234
 235        /* Do we have *only* directories? Nothing to do */
 236        if (mask == dirmask && !src[0])
 237                return 0;
 238
 239        conflicts = info->conflicts;
 240        if (o->merge)
 241                conflicts >>= 1;
 242        conflicts |= dirmask;
 243
 244        /*
 245         * Ok, we've filled in up to any potential index entry in src[0],
 246         * now do the rest.
 247         */
 248        for (i = 0; i < n; i++) {
 249                int stage;
 250                unsigned int bit = 1ul << i;
 251                if (conflicts & bit) {
 252                        src[i + o->merge] = o->df_conflict_entry;
 253                        continue;
 254                }
 255                if (!(mask & bit))
 256                        continue;
 257                if (!o->merge)
 258                        stage = 0;
 259                else if (i + 1 < o->head_idx)
 260                        stage = 1;
 261                else if (i + 1 > o->head_idx)
 262                        stage = 3;
 263                else
 264                        stage = 2;
 265                src[i + o->merge] = create_ce_entry(info, names + i, stage);
 266        }
 267
 268        if (o->merge)
 269                return call_unpack_fn(src, o);
 270
 271        n += o->merge;
 272        for (i = 0; i < n; i++)
 273                add_entry(o, src[i], 0, 0);
 274        return 0;
 275}
 276
 277static int unpack_callback(int n, unsigned long mask, unsigned long dirmask, struct name_entry *names, struct traverse_info *info)
 278{
 279        struct cache_entry *src[MAX_UNPACK_TREES + 1] = { NULL, };
 280        struct unpack_trees_options *o = info->data;
 281        const struct name_entry *p = names;
 282
 283        /* Find first entry with a real name (we could use "mask" too) */
 284        while (!p->mode)
 285                p++;
 286
 287        /* Are we supposed to look at the index too? */
 288        if (o->merge) {
 289                while (o->pos < o->src_index->cache_nr) {
 290                        struct cache_entry *ce = o->src_index->cache[o->pos];
 291                        int cmp = compare_entry(ce, info, p);
 292                        if (cmp < 0) {
 293                                if (unpack_index_entry(ce, o) < 0)
 294                                        return -1;
 295                                continue;
 296                        }
 297                        if (!cmp) {
 298                                o->pos++;
 299                                if (ce_stage(ce)) {
 300                                        /*
 301                                         * If we skip unmerged index entries, we'll skip this
 302                                         * entry *and* the tree entries associated with it!
 303                                         */
 304                                        if (o->skip_unmerged) {
 305                                                add_entry(o, ce, 0, 0);
 306                                                return mask;
 307                                        }
 308                                }
 309                                src[0] = ce;
 310                        }
 311                        break;
 312                }
 313        }
 314
 315        if (unpack_nondirectories(n, mask, dirmask, src, names, info) < 0)
 316                return -1;
 317
 318        /* Now handle any directories.. */
 319        if (dirmask) {
 320                unsigned long conflicts = mask & ~dirmask;
 321                if (o->merge) {
 322                        conflicts <<= 1;
 323                        if (src[0])
 324                                conflicts |= 1;
 325                }
 326                if (traverse_trees_recursive(n, dirmask, conflicts,
 327                                             names, info) < 0)
 328                        return -1;
 329                return mask;
 330        }
 331
 332        return mask;
 333}
 334
 335static int unpack_failed(struct unpack_trees_options *o, const char *message)
 336{
 337        discard_index(&o->result);
 338        if (!o->gently) {
 339                if (message)
 340                        return error("%s", message);
 341                return -1;
 342        }
 343        return -1;
 344}
 345
 346/*
 347 * N-way merge "len" trees.  Returns 0 on success, -1 on failure to manipulate the
 348 * resulting index, -2 on failure to reflect the changes to the work tree.
 349 */
 350int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options *o)
 351{
 352        int ret;
 353        static struct cache_entry *dfc;
 354
 355        if (len > MAX_UNPACK_TREES)
 356                die("unpack_trees takes at most %d trees", MAX_UNPACK_TREES);
 357        memset(&state, 0, sizeof(state));
 358        state.base_dir = "";
 359        state.force = 1;
 360        state.quiet = 1;
 361        state.refresh_cache = 1;
 362
 363        memset(&o->result, 0, sizeof(o->result));
 364        o->result.initialized = 1;
 365        if (o->src_index)
 366                o->result.timestamp = o->src_index->timestamp;
 367        o->merge_size = len;
 368
 369        if (!dfc)
 370                dfc = xcalloc(1, cache_entry_size(0));
 371        o->df_conflict_entry = dfc;
 372
 373        if (len) {
 374                const char *prefix = o->prefix ? o->prefix : "";
 375                struct traverse_info info;
 376
 377                setup_traverse_info(&info, prefix);
 378                info.fn = unpack_callback;
 379                info.data = o;
 380
 381                if (traverse_trees(len, t, &info) < 0)
 382                        return unpack_failed(o, NULL);
 383        }
 384
 385        /* Any left-over entries in the index? */
 386        if (o->merge) {
 387                while (o->pos < o->src_index->cache_nr) {
 388                        struct cache_entry *ce = o->src_index->cache[o->pos];
 389                        if (unpack_index_entry(ce, o) < 0)
 390                                return unpack_failed(o, NULL);
 391                }
 392        }
 393
 394        if (o->trivial_merges_only && o->nontrivial_merge)
 395                return unpack_failed(o, "Merge requires file-level merging");
 396
 397        o->src_index = NULL;
 398        ret = check_updates(o) ? (-2) : 0;
 399        if (o->dst_index)
 400                *o->dst_index = o->result;
 401        return ret;
 402}
 403
 404/* Here come the merge functions */
 405
 406static int reject_merge(struct cache_entry *ce, struct unpack_trees_options *o)
 407{
 408        return error(ERRORMSG(o, would_overwrite), ce->name);
 409}
 410
 411static int same(struct cache_entry *a, struct cache_entry *b)
 412{
 413        if (!!a != !!b)
 414                return 0;
 415        if (!a && !b)
 416                return 1;
 417        return a->ce_mode == b->ce_mode &&
 418               !hashcmp(a->sha1, b->sha1);
 419}
 420
 421
 422/*
 423 * When a CE gets turned into an unmerged entry, we
 424 * want it to be up-to-date
 425 */
 426static int verify_uptodate(struct cache_entry *ce,
 427                struct unpack_trees_options *o)
 428{
 429        struct stat st;
 430
 431        if (o->index_only || o->reset)
 432                return 0;
 433
 434        if (!lstat(ce->name, &st)) {
 435                unsigned changed = ie_match_stat(o->src_index, ce, &st, CE_MATCH_IGNORE_VALID);
 436                if (!changed)
 437                        return 0;
 438                /*
 439                 * NEEDSWORK: the current default policy is to allow
 440                 * submodule to be out of sync wrt the supermodule
 441                 * index.  This needs to be tightened later for
 442                 * submodules that are marked to be automatically
 443                 * checked out.
 444                 */
 445                if (S_ISGITLINK(ce->ce_mode))
 446                        return 0;
 447                errno = 0;
 448        }
 449        if (errno == ENOENT)
 450                return 0;
 451        return o->gently ? -1 :
 452                error(ERRORMSG(o, not_uptodate_file), ce->name);
 453}
 454
 455static void invalidate_ce_path(struct cache_entry *ce, struct unpack_trees_options *o)
 456{
 457        if (ce)
 458                cache_tree_invalidate_path(o->src_index->cache_tree, ce->name);
 459}
 460
 461/*
 462 * Check that checking out ce->sha1 in subdir ce->name is not
 463 * going to overwrite any working files.
 464 *
 465 * Currently, git does not checkout subprojects during a superproject
 466 * checkout, so it is not going to overwrite anything.
 467 */
 468static int verify_clean_submodule(struct cache_entry *ce, const char *action,
 469                                      struct unpack_trees_options *o)
 470{
 471        return 0;
 472}
 473
 474static int verify_clean_subdirectory(struct cache_entry *ce, const char *action,
 475                                      struct unpack_trees_options *o)
 476{
 477        /*
 478         * we are about to extract "ce->name"; we would not want to lose
 479         * anything in the existing directory there.
 480         */
 481        int namelen;
 482        int i;
 483        struct dir_struct d;
 484        char *pathbuf;
 485        int cnt = 0;
 486        unsigned char sha1[20];
 487
 488        if (S_ISGITLINK(ce->ce_mode) &&
 489            resolve_gitlink_ref(ce->name, "HEAD", sha1) == 0) {
 490                /* If we are not going to update the submodule, then
 491                 * we don't care.
 492                 */
 493                if (!hashcmp(sha1, ce->sha1))
 494                        return 0;
 495                return verify_clean_submodule(ce, action, o);
 496        }
 497
 498        /*
 499         * First let's make sure we do not have a local modification
 500         * in that directory.
 501         */
 502        namelen = strlen(ce->name);
 503        for (i = o->pos; i < o->src_index->cache_nr; i++) {
 504                struct cache_entry *ce2 = o->src_index->cache[i];
 505                int len = ce_namelen(ce2);
 506                if (len < namelen ||
 507                    strncmp(ce->name, ce2->name, namelen) ||
 508                    ce2->name[namelen] != '/')
 509                        break;
 510                /*
 511                 * ce2->name is an entry in the subdirectory.
 512                 */
 513                if (!ce_stage(ce2)) {
 514                        if (verify_uptodate(ce2, o))
 515                                return -1;
 516                        add_entry(o, ce2, CE_REMOVE, 0);
 517                }
 518                cnt++;
 519        }
 520
 521        /*
 522         * Then we need to make sure that we do not lose a locally
 523         * present file that is not ignored.
 524         */
 525        pathbuf = xmalloc(namelen + 2);
 526        memcpy(pathbuf, ce->name, namelen);
 527        strcpy(pathbuf+namelen, "/");
 528
 529        memset(&d, 0, sizeof(d));
 530        if (o->dir)
 531                d.exclude_per_dir = o->dir->exclude_per_dir;
 532        i = read_directory(&d, ce->name, pathbuf, namelen+1, NULL);
 533        if (i)
 534                return o->gently ? -1 :
 535                        error(ERRORMSG(o, not_uptodate_dir), ce->name);
 536        free(pathbuf);
 537        return cnt;
 538}
 539
 540/*
 541 * This gets called when there was no index entry for the tree entry 'dst',
 542 * but we found a file in the working tree that 'lstat()' said was fine,
 543 * and we're on a case-insensitive filesystem.
 544 *
 545 * See if we can find a case-insensitive match in the index that also
 546 * matches the stat information, and assume it's that other file!
 547 */
 548static int icase_exists(struct unpack_trees_options *o, struct cache_entry *dst, struct stat *st)
 549{
 550        struct cache_entry *src;
 551
 552        src = index_name_exists(o->src_index, dst->name, ce_namelen(dst), 1);
 553        return src && !ie_match_stat(o->src_index, src, st, CE_MATCH_IGNORE_VALID);
 554}
 555
 556/*
 557 * We do not want to remove or overwrite a working tree file that
 558 * is not tracked, unless it is ignored.
 559 */
 560static int verify_absent(struct cache_entry *ce, const char *action,
 561                         struct unpack_trees_options *o)
 562{
 563        struct stat st;
 564
 565        if (o->index_only || o->reset || !o->update)
 566                return 0;
 567
 568        if (has_symlink_or_noent_leading_path(ce->name, ce_namelen(ce)))
 569                return 0;
 570
 571        if (!lstat(ce->name, &st)) {
 572                int ret;
 573                int dtype = ce_to_dtype(ce);
 574                struct cache_entry *result;
 575
 576                /*
 577                 * It may be that the 'lstat()' succeeded even though
 578                 * target 'ce' was absent, because there is an old
 579                 * entry that is different only in case..
 580                 *
 581                 * Ignore that lstat() if it matches.
 582                 */
 583                if (ignore_case && icase_exists(o, ce, &st))
 584                        return 0;
 585
 586                if (o->dir && excluded(o->dir, ce->name, &dtype))
 587                        /*
 588                         * ce->name is explicitly excluded, so it is Ok to
 589                         * overwrite it.
 590                         */
 591                        return 0;
 592                if (S_ISDIR(st.st_mode)) {
 593                        /*
 594                         * We are checking out path "foo" and
 595                         * found "foo/." in the working tree.
 596                         * This is tricky -- if we have modified
 597                         * files that are in "foo/" we would lose
 598                         * it.
 599                         */
 600                        ret = verify_clean_subdirectory(ce, action, o);
 601                        if (ret < 0)
 602                                return ret;
 603
 604                        /*
 605                         * If this removed entries from the index,
 606                         * what that means is:
 607                         *
 608                         * (1) the caller unpack_callback() saw path/foo
 609                         * in the index, and it has not removed it because
 610                         * it thinks it is handling 'path' as blob with
 611                         * D/F conflict;
 612                         * (2) we will return "ok, we placed a merged entry
 613                         * in the index" which would cause o->pos to be
 614                         * incremented by one;
 615                         * (3) however, original o->pos now has 'path/foo'
 616                         * marked with "to be removed".
 617                         *
 618                         * We need to increment it by the number of
 619                         * deleted entries here.
 620                         */
 621                        o->pos += ret;
 622                        return 0;
 623                }
 624
 625                /*
 626                 * The previous round may already have decided to
 627                 * delete this path, which is in a subdirectory that
 628                 * is being replaced with a blob.
 629                 */
 630                result = index_name_exists(&o->result, ce->name, ce_namelen(ce), 0);
 631                if (result) {
 632                        if (result->ce_flags & CE_REMOVE)
 633                                return 0;
 634                }
 635
 636                return o->gently ? -1 :
 637                        error(ERRORMSG(o, would_lose_untracked), ce->name, action);
 638        }
 639        return 0;
 640}
 641
 642static int merged_entry(struct cache_entry *merge, struct cache_entry *old,
 643                struct unpack_trees_options *o)
 644{
 645        int update = CE_UPDATE;
 646
 647        if (old) {
 648                /*
 649                 * See if we can re-use the old CE directly?
 650                 * That way we get the uptodate stat info.
 651                 *
 652                 * This also removes the UPDATE flag on a match; otherwise
 653                 * we will end up overwriting local changes in the work tree.
 654                 */
 655                if (same(old, merge)) {
 656                        copy_cache_entry(merge, old);
 657                        update = 0;
 658                } else {
 659                        if (verify_uptodate(old, o))
 660                                return -1;
 661                        invalidate_ce_path(old, o);
 662                }
 663        }
 664        else {
 665                if (verify_absent(merge, "overwritten", o))
 666                        return -1;
 667                invalidate_ce_path(merge, o);
 668        }
 669
 670        add_entry(o, merge, update, CE_STAGEMASK);
 671        return 1;
 672}
 673
 674static int deleted_entry(struct cache_entry *ce, struct cache_entry *old,
 675                struct unpack_trees_options *o)
 676{
 677        /* Did it exist in the index? */
 678        if (!old) {
 679                if (verify_absent(ce, "removed", o))
 680                        return -1;
 681                return 0;
 682        }
 683        if (verify_uptodate(old, o))
 684                return -1;
 685        add_entry(o, ce, CE_REMOVE, 0);
 686        invalidate_ce_path(ce, o);
 687        return 1;
 688}
 689
 690static int keep_entry(struct cache_entry *ce, struct unpack_trees_options *o)
 691{
 692        add_entry(o, ce, 0, 0);
 693        return 1;
 694}
 695
 696#if DBRT_DEBUG
 697static void show_stage_entry(FILE *o,
 698                             const char *label, const struct cache_entry *ce)
 699{
 700        if (!ce)
 701                fprintf(o, "%s (missing)\n", label);
 702        else
 703                fprintf(o, "%s%06o %s %d\t%s\n",
 704                        label,
 705                        ce->ce_mode,
 706                        sha1_to_hex(ce->sha1),
 707                        ce_stage(ce),
 708                        ce->name);
 709}
 710#endif
 711
 712int threeway_merge(struct cache_entry **stages, struct unpack_trees_options *o)
 713{
 714        struct cache_entry *index;
 715        struct cache_entry *head;
 716        struct cache_entry *remote = stages[o->head_idx + 1];
 717        int count;
 718        int head_match = 0;
 719        int remote_match = 0;
 720
 721        int df_conflict_head = 0;
 722        int df_conflict_remote = 0;
 723
 724        int any_anc_missing = 0;
 725        int no_anc_exists = 1;
 726        int i;
 727
 728        for (i = 1; i < o->head_idx; i++) {
 729                if (!stages[i] || stages[i] == o->df_conflict_entry)
 730                        any_anc_missing = 1;
 731                else
 732                        no_anc_exists = 0;
 733        }
 734
 735        index = stages[0];
 736        head = stages[o->head_idx];
 737
 738        if (head == o->df_conflict_entry) {
 739                df_conflict_head = 1;
 740                head = NULL;
 741        }
 742
 743        if (remote == o->df_conflict_entry) {
 744                df_conflict_remote = 1;
 745                remote = NULL;
 746        }
 747
 748        /* First, if there's a #16 situation, note that to prevent #13
 749         * and #14.
 750         */
 751        if (!same(remote, head)) {
 752                for (i = 1; i < o->head_idx; i++) {
 753                        if (same(stages[i], head)) {
 754                                head_match = i;
 755                        }
 756                        if (same(stages[i], remote)) {
 757                                remote_match = i;
 758                        }
 759                }
 760        }
 761
 762        /* We start with cases where the index is allowed to match
 763         * something other than the head: #14(ALT) and #2ALT, where it
 764         * is permitted to match the result instead.
 765         */
 766        /* #14, #14ALT, #2ALT */
 767        if (remote && !df_conflict_head && head_match && !remote_match) {
 768                if (index && !same(index, remote) && !same(index, head))
 769                        return o->gently ? -1 : reject_merge(index, o);
 770                return merged_entry(remote, index, o);
 771        }
 772        /*
 773         * If we have an entry in the index cache, then we want to
 774         * make sure that it matches head.
 775         */
 776        if (index && !same(index, head))
 777                return o->gently ? -1 : reject_merge(index, o);
 778
 779        if (head) {
 780                /* #5ALT, #15 */
 781                if (same(head, remote))
 782                        return merged_entry(head, index, o);
 783                /* #13, #3ALT */
 784                if (!df_conflict_remote && remote_match && !head_match)
 785                        return merged_entry(head, index, o);
 786        }
 787
 788        /* #1 */
 789        if (!head && !remote && any_anc_missing)
 790                return 0;
 791
 792        /* Under the new "aggressive" rule, we resolve mostly trivial
 793         * cases that we historically had git-merge-one-file resolve.
 794         */
 795        if (o->aggressive) {
 796                int head_deleted = !head && !df_conflict_head;
 797                int remote_deleted = !remote && !df_conflict_remote;
 798                struct cache_entry *ce = NULL;
 799
 800                if (index)
 801                        ce = index;
 802                else if (head)
 803                        ce = head;
 804                else if (remote)
 805                        ce = remote;
 806                else {
 807                        for (i = 1; i < o->head_idx; i++) {
 808                                if (stages[i] && stages[i] != o->df_conflict_entry) {
 809                                        ce = stages[i];
 810                                        break;
 811                                }
 812                        }
 813                }
 814
 815                /*
 816                 * Deleted in both.
 817                 * Deleted in one and unchanged in the other.
 818                 */
 819                if ((head_deleted && remote_deleted) ||
 820                    (head_deleted && remote && remote_match) ||
 821                    (remote_deleted && head && head_match)) {
 822                        if (index)
 823                                return deleted_entry(index, index, o);
 824                        if (ce && !head_deleted) {
 825                                if (verify_absent(ce, "removed", o))
 826                                        return -1;
 827                        }
 828                        return 0;
 829                }
 830                /*
 831                 * Added in both, identically.
 832                 */
 833                if (no_anc_exists && head && remote && same(head, remote))
 834                        return merged_entry(head, index, o);
 835
 836        }
 837
 838        /* Below are "no merge" cases, which require that the index be
 839         * up-to-date to avoid the files getting overwritten with
 840         * conflict resolution files.
 841         */
 842        if (index) {
 843                if (verify_uptodate(index, o))
 844                        return -1;
 845        }
 846
 847        o->nontrivial_merge = 1;
 848
 849        /* #2, #3, #4, #6, #7, #9, #10, #11. */
 850        count = 0;
 851        if (!head_match || !remote_match) {
 852                for (i = 1; i < o->head_idx; i++) {
 853                        if (stages[i] && stages[i] != o->df_conflict_entry) {
 854                                keep_entry(stages[i], o);
 855                                count++;
 856                                break;
 857                        }
 858                }
 859        }
 860#if DBRT_DEBUG
 861        else {
 862                fprintf(stderr, "read-tree: warning #16 detected\n");
 863                show_stage_entry(stderr, "head   ", stages[head_match]);
 864                show_stage_entry(stderr, "remote ", stages[remote_match]);
 865        }
 866#endif
 867        if (head) { count += keep_entry(head, o); }
 868        if (remote) { count += keep_entry(remote, o); }
 869        return count;
 870}
 871
 872/*
 873 * Two-way merge.
 874 *
 875 * The rule is to "carry forward" what is in the index without losing
 876 * information across a "fast forward", favoring a successful merge
 877 * over a merge failure when it makes sense.  For details of the
 878 * "carry forward" rule, please see <Documentation/git-read-tree.txt>.
 879 *
 880 */
 881int twoway_merge(struct cache_entry **src, struct unpack_trees_options *o)
 882{
 883        struct cache_entry *current = src[0];
 884        struct cache_entry *oldtree = src[1];
 885        struct cache_entry *newtree = src[2];
 886
 887        if (o->merge_size != 2)
 888                return error("Cannot do a twoway merge of %d trees",
 889                             o->merge_size);
 890
 891        if (oldtree == o->df_conflict_entry)
 892                oldtree = NULL;
 893        if (newtree == o->df_conflict_entry)
 894                newtree = NULL;
 895
 896        if (current) {
 897                if ((!oldtree && !newtree) || /* 4 and 5 */
 898                    (!oldtree && newtree &&
 899                     same(current, newtree)) || /* 6 and 7 */
 900                    (oldtree && newtree &&
 901                     same(oldtree, newtree)) || /* 14 and 15 */
 902                    (oldtree && newtree &&
 903                     !same(oldtree, newtree) && /* 18 and 19 */
 904                     same(current, newtree))) {
 905                        return keep_entry(current, o);
 906                }
 907                else if (oldtree && !newtree && same(current, oldtree)) {
 908                        /* 10 or 11 */
 909                        return deleted_entry(oldtree, current, o);
 910                }
 911                else if (oldtree && newtree &&
 912                         same(current, oldtree) && !same(current, newtree)) {
 913                        /* 20 or 21 */
 914                        return merged_entry(newtree, current, o);
 915                }
 916                else {
 917                        /* all other failures */
 918                        if (oldtree)
 919                                return o->gently ? -1 : reject_merge(oldtree, o);
 920                        if (current)
 921                                return o->gently ? -1 : reject_merge(current, o);
 922                        if (newtree)
 923                                return o->gently ? -1 : reject_merge(newtree, o);
 924                        return -1;
 925                }
 926        }
 927        else if (newtree) {
 928                if (oldtree && !o->initial_checkout) {
 929                        /*
 930                         * deletion of the path was staged;
 931                         */
 932                        if (same(oldtree, newtree))
 933                                return 1;
 934                        return reject_merge(oldtree, o);
 935                }
 936                return merged_entry(newtree, current, o);
 937        }
 938        return deleted_entry(oldtree, current, o);
 939}
 940
 941/*
 942 * Bind merge.
 943 *
 944 * Keep the index entries at stage0, collapse stage1 but make sure
 945 * stage0 does not have anything there.
 946 */
 947int bind_merge(struct cache_entry **src,
 948                struct unpack_trees_options *o)
 949{
 950        struct cache_entry *old = src[0];
 951        struct cache_entry *a = src[1];
 952
 953        if (o->merge_size != 1)
 954                return error("Cannot do a bind merge of %d trees\n",
 955                             o->merge_size);
 956        if (a && old)
 957                return o->gently ? -1 :
 958                        error(ERRORMSG(o, bind_overlap), a->name, old->name);
 959        if (!a)
 960                return keep_entry(old, o);
 961        else
 962                return merged_entry(a, NULL, o);
 963}
 964
 965/*
 966 * One-way merge.
 967 *
 968 * The rule is:
 969 * - take the stat information from stage0, take the data from stage1
 970 */
 971int oneway_merge(struct cache_entry **src, struct unpack_trees_options *o)
 972{
 973        struct cache_entry *old = src[0];
 974        struct cache_entry *a = src[1];
 975
 976        if (o->merge_size != 1)
 977                return error("Cannot do a oneway merge of %d trees",
 978                             o->merge_size);
 979
 980        if (!a)
 981                return deleted_entry(old, old, o);
 982
 983        if (old && same(old, a)) {
 984                int update = 0;
 985                if (o->reset) {
 986                        struct stat st;
 987                        if (lstat(old->name, &st) ||
 988                            ie_match_stat(o->src_index, old, &st, CE_MATCH_IGNORE_VALID))
 989                                update |= CE_UPDATE;
 990                }
 991                add_entry(o, old, update, 0);
 992                return 0;
 993        }
 994        return merged_entry(a, old, o);
 995}