1#define NO_THE_INDEX_COMPATIBILITY_MACROS 2#include "cache.h" 3#include "dir.h" 4#include "tree.h" 5#include "tree-walk.h" 6#include "cache-tree.h" 7#include "unpack-trees.h" 8#include "progress.h" 9#include "refs.h" 10#include "attr.h" 11#include "split-index.h" 12#include "dir.h" 13#include "submodule.h" 14#include "submodule-config.h" 15 16/* 17 * Error messages expected by scripts out of plumbing commands such as 18 * read-tree. Non-scripted Porcelain is not required to use these messages 19 * and in fact are encouraged to reword them to better suit their particular 20 * situation better. See how "git checkout" and "git merge" replaces 21 * them using setup_unpack_trees_porcelain(), for example. 22 */ 23static const char *unpack_plumbing_errors[NB_UNPACK_TREES_ERROR_TYPES] = { 24 /* ERROR_WOULD_OVERWRITE */ 25 "Entry '%s' would be overwritten by merge. Cannot merge.", 26 27 /* ERROR_NOT_UPTODATE_FILE */ 28 "Entry '%s' not uptodate. Cannot merge.", 29 30 /* ERROR_NOT_UPTODATE_DIR */ 31 "Updating '%s' would lose untracked files in it", 32 33 /* ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN */ 34 "Untracked working tree file '%s' would be overwritten by merge.", 35 36 /* ERROR_WOULD_LOSE_UNTRACKED_REMOVED */ 37 "Untracked working tree file '%s' would be removed by merge.", 38 39 /* ERROR_BIND_OVERLAP */ 40 "Entry '%s' overlaps with '%s'. Cannot bind.", 41 42 /* ERROR_SPARSE_NOT_UPTODATE_FILE */ 43 "Entry '%s' not uptodate. Cannot update sparse checkout.", 44 45 /* ERROR_WOULD_LOSE_ORPHANED_OVERWRITTEN */ 46 "Working tree file '%s' would be overwritten by sparse checkout update.", 47 48 /* ERROR_WOULD_LOSE_ORPHANED_REMOVED */ 49 "Working tree file '%s' would be removed by sparse checkout update.", 50 51 /* ERROR_WOULD_LOSE_SUBMODULE */ 52 "Submodule '%s' cannot checkout new HEAD.", 53}; 54 55#define ERRORMSG(o,type) \ 56 ( ((o) && (o)->msgs[(type)]) \ 57 ? ((o)->msgs[(type)]) \ 58 : (unpack_plumbing_errors[(type)]) ) 59 60static const char *super_prefixed(const char *path) 61{ 62 /* 63 * It is necessary and sufficient to have two static buffers 64 * here, as the return value of this function is fed to 65 * error() using the unpack_*_errors[] templates we see above. 66 */ 67 static struct strbuf buf[2] = {STRBUF_INIT, STRBUF_INIT}; 68 static int super_prefix_len = -1; 69 static unsigned idx = ARRAY_SIZE(buf) - 1; 70 71 if (super_prefix_len < 0) { 72 const char *super_prefix = get_super_prefix(); 73 if (!super_prefix) { 74 super_prefix_len = 0; 75 } else { 76 int i; 77 for (i = 0; i < ARRAY_SIZE(buf); i++) 78 strbuf_addstr(&buf[i], super_prefix); 79 super_prefix_len = buf[0].len; 80 } 81 } 82 83 if (!super_prefix_len) 84 return path; 85 86 if (++idx >= ARRAY_SIZE(buf)) 87 idx = 0; 88 89 strbuf_setlen(&buf[idx], super_prefix_len); 90 strbuf_addstr(&buf[idx], path); 91 92 return buf[idx].buf; 93} 94 95void setup_unpack_trees_porcelain(struct unpack_trees_options *opts, 96 const char *cmd) 97{ 98 int i; 99 const char **msgs = opts->msgs; 100 const char *msg; 101 102 if (!strcmp(cmd, "checkout")) 103 msg = advice_commit_before_merge 104 ? _("Your local changes to the following files would be overwritten by checkout:\n%%s" 105 "Please commit your changes or stash them before you switch branches.") 106 : _("Your local changes to the following files would be overwritten by checkout:\n%%s"); 107 else if (!strcmp(cmd, "merge")) 108 msg = advice_commit_before_merge 109 ? _("Your local changes to the following files would be overwritten by merge:\n%%s" 110 "Please commit your changes or stash them before you merge.") 111 : _("Your local changes to the following files would be overwritten by merge:\n%%s"); 112 else 113 msg = advice_commit_before_merge 114 ? _("Your local changes to the following files would be overwritten by %s:\n%%s" 115 "Please commit your changes or stash them before you %s.") 116 : _("Your local changes to the following files would be overwritten by %s:\n%%s"); 117 msgs[ERROR_WOULD_OVERWRITE] = msgs[ERROR_NOT_UPTODATE_FILE] = 118 xstrfmt(msg, cmd, cmd); 119 120 msgs[ERROR_NOT_UPTODATE_DIR] = 121 _("Updating the following directories would lose untracked files in them:\n%s"); 122 123 if (!strcmp(cmd, "checkout")) 124 msg = advice_commit_before_merge 125 ? _("The following untracked working tree files would be removed by checkout:\n%%s" 126 "Please move or remove them before you switch branches.") 127 : _("The following untracked working tree files would be removed by checkout:\n%%s"); 128 else if (!strcmp(cmd, "merge")) 129 msg = advice_commit_before_merge 130 ? _("The following untracked working tree files would be removed by merge:\n%%s" 131 "Please move or remove them before you merge.") 132 : _("The following untracked working tree files would be removed by merge:\n%%s"); 133 else 134 msg = advice_commit_before_merge 135 ? _("The following untracked working tree files would be removed by %s:\n%%s" 136 "Please move or remove them before you %s.") 137 : _("The following untracked working tree files would be removed by %s:\n%%s"); 138 msgs[ERROR_WOULD_LOSE_UNTRACKED_REMOVED] = xstrfmt(msg, cmd, cmd); 139 140 if (!strcmp(cmd, "checkout")) 141 msg = advice_commit_before_merge 142 ? _("The following untracked working tree files would be overwritten by checkout:\n%%s" 143 "Please move or remove them before you switch branches.") 144 : _("The following untracked working tree files would be overwritten by checkout:\n%%s"); 145 else if (!strcmp(cmd, "merge")) 146 msg = advice_commit_before_merge 147 ? _("The following untracked working tree files would be overwritten by merge:\n%%s" 148 "Please move or remove them before you merge.") 149 : _("The following untracked working tree files would be overwritten by merge:\n%%s"); 150 else 151 msg = advice_commit_before_merge 152 ? _("The following untracked working tree files would be overwritten by %s:\n%%s" 153 "Please move or remove them before you %s.") 154 : _("The following untracked working tree files would be overwritten by %s:\n%%s"); 155 msgs[ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN] = xstrfmt(msg, cmd, cmd); 156 157 /* 158 * Special case: ERROR_BIND_OVERLAP refers to a pair of paths, we 159 * cannot easily display it as a list. 160 */ 161 msgs[ERROR_BIND_OVERLAP] = _("Entry '%s' overlaps with '%s'. Cannot bind."); 162 163 msgs[ERROR_SPARSE_NOT_UPTODATE_FILE] = 164 _("Cannot update sparse checkout: the following entries are not up-to-date:\n%s"); 165 msgs[ERROR_WOULD_LOSE_ORPHANED_OVERWRITTEN] = 166 _("The following working tree files would be overwritten by sparse checkout update:\n%s"); 167 msgs[ERROR_WOULD_LOSE_ORPHANED_REMOVED] = 168 _("The following working tree files would be removed by sparse checkout update:\n%s"); 169 msgs[ERROR_WOULD_LOSE_SUBMODULE] = 170 _("Cannot update submodule:\n%s"); 171 172 opts->show_all_errors = 1; 173 /* rejected paths may not have a static buffer */ 174 for (i = 0; i < ARRAY_SIZE(opts->unpack_rejects); i++) 175 opts->unpack_rejects[i].strdup_strings = 1; 176} 177 178static int do_add_entry(struct unpack_trees_options *o, struct cache_entry *ce, 179 unsigned int set, unsigned int clear) 180{ 181 clear |= CE_HASHED; 182 183 if (set & CE_REMOVE) 184 set |= CE_WT_REMOVE; 185 186 ce->ce_flags = (ce->ce_flags & ~clear) | set; 187 return add_index_entry(&o->result, ce, 188 ADD_CACHE_OK_TO_ADD | ADD_CACHE_OK_TO_REPLACE); 189} 190 191static struct cache_entry *dup_entry(const struct cache_entry *ce) 192{ 193 unsigned int size = ce_size(ce); 194 struct cache_entry *new = xmalloc(size); 195 196 memcpy(new, ce, size); 197 return new; 198} 199 200static void add_entry(struct unpack_trees_options *o, 201 const struct cache_entry *ce, 202 unsigned int set, unsigned int clear) 203{ 204 do_add_entry(o, dup_entry(ce), set, clear); 205} 206 207/* 208 * add error messages on path <path> 209 * corresponding to the type <e> with the message <msg> 210 * indicating if it should be display in porcelain or not 211 */ 212static int add_rejected_path(struct unpack_trees_options *o, 213 enum unpack_trees_error_types e, 214 const char *path) 215{ 216 if (!o->show_all_errors) 217 return error(ERRORMSG(o, e), super_prefixed(path)); 218 219 /* 220 * Otherwise, insert in a list for future display by 221 * display_error_msgs() 222 */ 223 string_list_append(&o->unpack_rejects[e], path); 224 return -1; 225} 226 227/* 228 * display all the error messages stored in a nice way 229 */ 230static void display_error_msgs(struct unpack_trees_options *o) 231{ 232 int e, i; 233 int something_displayed = 0; 234 for (e = 0; e < NB_UNPACK_TREES_ERROR_TYPES; e++) { 235 struct string_list *rejects = &o->unpack_rejects[e]; 236 if (rejects->nr > 0) { 237 struct strbuf path = STRBUF_INIT; 238 something_displayed = 1; 239 for (i = 0; i < rejects->nr; i++) 240 strbuf_addf(&path, "\t%s\n", rejects->items[i].string); 241 error(ERRORMSG(o, e), super_prefixed(path.buf)); 242 strbuf_release(&path); 243 } 244 string_list_clear(rejects, 0); 245 } 246 if (something_displayed) 247 fprintf(stderr, _("Aborting\n")); 248} 249 250static int check_submodule_move_head(const struct cache_entry *ce, 251 const char *old_id, 252 const char *new_id, 253 struct unpack_trees_options *o) 254{ 255 const struct submodule *sub = submodule_from_ce(ce); 256 if (!sub) 257 return 0; 258 259 switch (sub->update_strategy.type) { 260 case SM_UPDATE_UNSPECIFIED: 261 case SM_UPDATE_CHECKOUT: 262 if (submodule_move_head(ce->name, old_id, new_id, SUBMODULE_MOVE_HEAD_DRY_RUN)) 263 return o->gently ? -1 : 264 add_rejected_path(o, ERROR_WOULD_LOSE_SUBMODULE, ce->name); 265 return 0; 266 case SM_UPDATE_NONE: 267 return 0; 268 case SM_UPDATE_REBASE: 269 case SM_UPDATE_MERGE: 270 case SM_UPDATE_COMMAND: 271 default: 272 warning(_("submodule update strategy not supported for submodule '%s'"), ce->name); 273 return -1; 274 } 275} 276 277static void reload_gitmodules_file(struct index_state *index, 278 struct checkout *state) 279{ 280 int i; 281 for (i = 0; i < index->cache_nr; i++) { 282 struct cache_entry *ce = index->cache[i]; 283 if (ce->ce_flags & CE_UPDATE) { 284 int r = strcmp(ce->name, ".gitmodules"); 285 if (r < 0) 286 continue; 287 else if (r == 0) { 288 submodule_free(); 289 checkout_entry(ce, state, NULL); 290 gitmodules_config(); 291 git_config(submodule_config, NULL); 292 } else 293 break; 294 } 295 } 296} 297 298/* 299 * Unlink the last component and schedule the leading directories for 300 * removal, such that empty directories get removed. 301 */ 302static void unlink_entry(const struct cache_entry *ce) 303{ 304 const struct submodule *sub = submodule_from_ce(ce); 305 if (sub) { 306 switch (sub->update_strategy.type) { 307 case SM_UPDATE_UNSPECIFIED: 308 case SM_UPDATE_CHECKOUT: 309 case SM_UPDATE_REBASE: 310 case SM_UPDATE_MERGE: 311 submodule_move_head(ce->name, "HEAD", NULL, 312 SUBMODULE_MOVE_HEAD_FORCE); 313 break; 314 case SM_UPDATE_NONE: 315 case SM_UPDATE_COMMAND: 316 return; /* Do not touch the submodule. */ 317 } 318 } 319 if (!check_leading_path(ce->name, ce_namelen(ce))) 320 return; 321 if (remove_or_warn(ce->ce_mode, ce->name)) 322 return; 323 schedule_dir_for_removal(ce->name, ce_namelen(ce)); 324} 325 326static struct progress *get_progress(struct unpack_trees_options *o) 327{ 328 unsigned cnt = 0, total = 0; 329 struct index_state *index = &o->result; 330 331 if (!o->update || !o->verbose_update) 332 return NULL; 333 334 for (; cnt < index->cache_nr; cnt++) { 335 const struct cache_entry *ce = index->cache[cnt]; 336 if (ce->ce_flags & (CE_UPDATE | CE_WT_REMOVE)) 337 total++; 338 } 339 340 return start_progress_delay(_("Checking out files"), 341 total, 50, 1); 342} 343 344static int check_updates(struct unpack_trees_options *o) 345{ 346 unsigned cnt = 0; 347 int errs = 0; 348 struct progress *progress = NULL; 349 struct index_state *index = &o->result; 350 struct checkout state = CHECKOUT_INIT; 351 int i; 352 353 state.force = 1; 354 state.quiet = 1; 355 state.refresh_cache = 1; 356 state.istate = index; 357 358 progress = get_progress(o); 359 360 if (o->update) 361 git_attr_set_direction(GIT_ATTR_CHECKOUT, index); 362 for (i = 0; i < index->cache_nr; i++) { 363 const struct cache_entry *ce = index->cache[i]; 364 365 if (ce->ce_flags & CE_WT_REMOVE) { 366 display_progress(progress, ++cnt); 367 if (o->update && !o->dry_run) 368 unlink_entry(ce); 369 } 370 } 371 remove_marked_cache_entries(index); 372 remove_scheduled_dirs(); 373 374 if (should_update_submodules() && o->update && !o->dry_run) 375 reload_gitmodules_file(index, &state); 376 377 for (i = 0; i < index->cache_nr; i++) { 378 struct cache_entry *ce = index->cache[i]; 379 380 if (ce->ce_flags & CE_UPDATE) { 381 if (ce->ce_flags & CE_WT_REMOVE) 382 die("BUG: both update and delete flags are set on %s", 383 ce->name); 384 display_progress(progress, ++cnt); 385 ce->ce_flags &= ~CE_UPDATE; 386 if (o->update && !o->dry_run) { 387 errs |= checkout_entry(ce, &state, NULL); 388 } 389 } 390 } 391 stop_progress(&progress); 392 if (o->update) 393 git_attr_set_direction(GIT_ATTR_CHECKIN, NULL); 394 return errs != 0; 395} 396 397static int verify_uptodate_sparse(const struct cache_entry *ce, 398 struct unpack_trees_options *o); 399static int verify_absent_sparse(const struct cache_entry *ce, 400 enum unpack_trees_error_types, 401 struct unpack_trees_options *o); 402 403static int apply_sparse_checkout(struct index_state *istate, 404 struct cache_entry *ce, 405 struct unpack_trees_options *o) 406{ 407 int was_skip_worktree = ce_skip_worktree(ce); 408 409 if (ce->ce_flags & CE_NEW_SKIP_WORKTREE) 410 ce->ce_flags |= CE_SKIP_WORKTREE; 411 else 412 ce->ce_flags &= ~CE_SKIP_WORKTREE; 413 if (was_skip_worktree != ce_skip_worktree(ce)) { 414 ce->ce_flags |= CE_UPDATE_IN_BASE; 415 istate->cache_changed |= CE_ENTRY_CHANGED; 416 } 417 418 /* 419 * if (!was_skip_worktree && !ce_skip_worktree()) { 420 * This is perfectly normal. Move on; 421 * } 422 */ 423 424 /* 425 * Merge strategies may set CE_UPDATE|CE_REMOVE outside checkout 426 * area as a result of ce_skip_worktree() shortcuts in 427 * verify_absent() and verify_uptodate(). 428 * Make sure they don't modify worktree if they are already 429 * outside checkout area 430 */ 431 if (was_skip_worktree && ce_skip_worktree(ce)) { 432 ce->ce_flags &= ~CE_UPDATE; 433 434 /* 435 * By default, when CE_REMOVE is on, CE_WT_REMOVE is also 436 * on to get that file removed from both index and worktree. 437 * If that file is already outside worktree area, don't 438 * bother remove it. 439 */ 440 if (ce->ce_flags & CE_REMOVE) 441 ce->ce_flags &= ~CE_WT_REMOVE; 442 } 443 444 if (!was_skip_worktree && ce_skip_worktree(ce)) { 445 /* 446 * If CE_UPDATE is set, verify_uptodate() must be called already 447 * also stat info may have lost after merged_entry() so calling 448 * verify_uptodate() again may fail 449 */ 450 if (!(ce->ce_flags & CE_UPDATE) && verify_uptodate_sparse(ce, o)) 451 return -1; 452 ce->ce_flags |= CE_WT_REMOVE; 453 ce->ce_flags &= ~CE_UPDATE; 454 } 455 if (was_skip_worktree && !ce_skip_worktree(ce)) { 456 if (verify_absent_sparse(ce, ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN, o)) 457 return -1; 458 ce->ce_flags |= CE_UPDATE; 459 } 460 return 0; 461} 462 463static inline int call_unpack_fn(const struct cache_entry * const *src, 464 struct unpack_trees_options *o) 465{ 466 int ret = o->fn(src, o); 467 if (ret > 0) 468 ret = 0; 469 return ret; 470} 471 472static void mark_ce_used(struct cache_entry *ce, struct unpack_trees_options *o) 473{ 474 ce->ce_flags |= CE_UNPACKED; 475 476 if (o->cache_bottom < o->src_index->cache_nr && 477 o->src_index->cache[o->cache_bottom] == ce) { 478 int bottom = o->cache_bottom; 479 while (bottom < o->src_index->cache_nr && 480 o->src_index->cache[bottom]->ce_flags & CE_UNPACKED) 481 bottom++; 482 o->cache_bottom = bottom; 483 } 484} 485 486static void mark_all_ce_unused(struct index_state *index) 487{ 488 int i; 489 for (i = 0; i < index->cache_nr; i++) 490 index->cache[i]->ce_flags &= ~(CE_UNPACKED | CE_ADDED | CE_NEW_SKIP_WORKTREE); 491} 492 493static int locate_in_src_index(const struct cache_entry *ce, 494 struct unpack_trees_options *o) 495{ 496 struct index_state *index = o->src_index; 497 int len = ce_namelen(ce); 498 int pos = index_name_pos(index, ce->name, len); 499 if (pos < 0) 500 pos = -1 - pos; 501 return pos; 502} 503 504/* 505 * We call unpack_index_entry() with an unmerged cache entry 506 * only in diff-index, and it wants a single callback. Skip 507 * the other unmerged entry with the same name. 508 */ 509static void mark_ce_used_same_name(struct cache_entry *ce, 510 struct unpack_trees_options *o) 511{ 512 struct index_state *index = o->src_index; 513 int len = ce_namelen(ce); 514 int pos; 515 516 for (pos = locate_in_src_index(ce, o); pos < index->cache_nr; pos++) { 517 struct cache_entry *next = index->cache[pos]; 518 if (len != ce_namelen(next) || 519 memcmp(ce->name, next->name, len)) 520 break; 521 mark_ce_used(next, o); 522 } 523} 524 525static struct cache_entry *next_cache_entry(struct unpack_trees_options *o) 526{ 527 const struct index_state *index = o->src_index; 528 int pos = o->cache_bottom; 529 530 while (pos < index->cache_nr) { 531 struct cache_entry *ce = index->cache[pos]; 532 if (!(ce->ce_flags & CE_UNPACKED)) 533 return ce; 534 pos++; 535 } 536 return NULL; 537} 538 539static void add_same_unmerged(const struct cache_entry *ce, 540 struct unpack_trees_options *o) 541{ 542 struct index_state *index = o->src_index; 543 int len = ce_namelen(ce); 544 int pos = index_name_pos(index, ce->name, len); 545 546 if (0 <= pos) 547 die("programming error in a caller of mark_ce_used_same_name"); 548 for (pos = -pos - 1; pos < index->cache_nr; pos++) { 549 struct cache_entry *next = index->cache[pos]; 550 if (len != ce_namelen(next) || 551 memcmp(ce->name, next->name, len)) 552 break; 553 add_entry(o, next, 0, 0); 554 mark_ce_used(next, o); 555 } 556} 557 558static int unpack_index_entry(struct cache_entry *ce, 559 struct unpack_trees_options *o) 560{ 561 const struct cache_entry *src[MAX_UNPACK_TREES + 1] = { NULL, }; 562 int ret; 563 564 src[0] = ce; 565 566 mark_ce_used(ce, o); 567 if (ce_stage(ce)) { 568 if (o->skip_unmerged) { 569 add_entry(o, ce, 0, 0); 570 return 0; 571 } 572 } 573 ret = call_unpack_fn(src, o); 574 if (ce_stage(ce)) 575 mark_ce_used_same_name(ce, o); 576 return ret; 577} 578 579static int find_cache_pos(struct traverse_info *, const struct name_entry *); 580 581static void restore_cache_bottom(struct traverse_info *info, int bottom) 582{ 583 struct unpack_trees_options *o = info->data; 584 585 if (o->diff_index_cached) 586 return; 587 o->cache_bottom = bottom; 588} 589 590static int switch_cache_bottom(struct traverse_info *info) 591{ 592 struct unpack_trees_options *o = info->data; 593 int ret, pos; 594 595 if (o->diff_index_cached) 596 return 0; 597 ret = o->cache_bottom; 598 pos = find_cache_pos(info->prev, &info->name); 599 600 if (pos < -1) 601 o->cache_bottom = -2 - pos; 602 else if (pos < 0) 603 o->cache_bottom = o->src_index->cache_nr; 604 return ret; 605} 606 607static inline int are_same_oid(struct name_entry *name_j, struct name_entry *name_k) 608{ 609 return name_j->oid && name_k->oid && !oidcmp(name_j->oid, name_k->oid); 610} 611 612static int traverse_trees_recursive(int n, unsigned long dirmask, 613 unsigned long df_conflicts, 614 struct name_entry *names, 615 struct traverse_info *info) 616{ 617 int i, ret, bottom; 618 int nr_buf = 0; 619 struct tree_desc t[MAX_UNPACK_TREES]; 620 void *buf[MAX_UNPACK_TREES]; 621 struct traverse_info newinfo; 622 struct name_entry *p; 623 624 p = names; 625 while (!p->mode) 626 p++; 627 628 newinfo = *info; 629 newinfo.prev = info; 630 newinfo.pathspec = info->pathspec; 631 newinfo.name = *p; 632 newinfo.pathlen += tree_entry_len(p) + 1; 633 newinfo.df_conflicts |= df_conflicts; 634 635 /* 636 * Fetch the tree from the ODB for each peer directory in the 637 * n commits. 638 * 639 * For 2- and 3-way traversals, we try to avoid hitting the 640 * ODB twice for the same OID. This should yield a nice speed 641 * up in checkouts and merges when the commits are similar. 642 * 643 * We don't bother doing the full O(n^2) search for larger n, 644 * because wider traversals don't happen that often and we 645 * avoid the search setup. 646 * 647 * When 2 peer OIDs are the same, we just copy the tree 648 * descriptor data. This implicitly borrows the buffer 649 * data from the earlier cell. 650 */ 651 for (i = 0; i < n; i++, dirmask >>= 1) { 652 if (i > 0 && are_same_oid(&names[i], &names[i - 1])) 653 t[i] = t[i - 1]; 654 else if (i > 1 && are_same_oid(&names[i], &names[i - 2])) 655 t[i] = t[i - 2]; 656 else { 657 const unsigned char *sha1 = NULL; 658 if (dirmask & 1) 659 sha1 = names[i].oid->hash; 660 buf[nr_buf++] = fill_tree_descriptor(t+i, sha1); 661 } 662 } 663 664 bottom = switch_cache_bottom(&newinfo); 665 ret = traverse_trees(n, t, &newinfo); 666 restore_cache_bottom(&newinfo, bottom); 667 668 for (i = 0; i < nr_buf; i++) 669 free(buf[i]); 670 671 return ret; 672} 673 674/* 675 * Compare the traverse-path to the cache entry without actually 676 * having to generate the textual representation of the traverse 677 * path. 678 * 679 * NOTE! This *only* compares up to the size of the traverse path 680 * itself - the caller needs to do the final check for the cache 681 * entry having more data at the end! 682 */ 683static int do_compare_entry_piecewise(const struct cache_entry *ce, const struct traverse_info *info, const struct name_entry *n) 684{ 685 int len, pathlen, ce_len; 686 const char *ce_name; 687 688 if (info->prev) { 689 int cmp = do_compare_entry_piecewise(ce, info->prev, 690 &info->name); 691 if (cmp) 692 return cmp; 693 } 694 pathlen = info->pathlen; 695 ce_len = ce_namelen(ce); 696 697 /* If ce_len < pathlen then we must have previously hit "name == directory" entry */ 698 if (ce_len < pathlen) 699 return -1; 700 701 ce_len -= pathlen; 702 ce_name = ce->name + pathlen; 703 704 len = tree_entry_len(n); 705 return df_name_compare(ce_name, ce_len, S_IFREG, n->path, len, n->mode); 706} 707 708static int do_compare_entry(const struct cache_entry *ce, 709 const struct traverse_info *info, 710 const struct name_entry *n) 711{ 712 int len, pathlen, ce_len; 713 const char *ce_name; 714 int cmp; 715 716 /* 717 * If we have not precomputed the traverse path, it is quicker 718 * to avoid doing so. But if we have precomputed it, 719 * it is quicker to use the precomputed version. 720 */ 721 if (!info->traverse_path) 722 return do_compare_entry_piecewise(ce, info, n); 723 724 cmp = strncmp(ce->name, info->traverse_path, info->pathlen); 725 if (cmp) 726 return cmp; 727 728 pathlen = info->pathlen; 729 ce_len = ce_namelen(ce); 730 731 if (ce_len < pathlen) 732 return -1; 733 734 ce_len -= pathlen; 735 ce_name = ce->name + pathlen; 736 737 len = tree_entry_len(n); 738 return df_name_compare(ce_name, ce_len, S_IFREG, n->path, len, n->mode); 739} 740 741static int compare_entry(const struct cache_entry *ce, const struct traverse_info *info, const struct name_entry *n) 742{ 743 int cmp = do_compare_entry(ce, info, n); 744 if (cmp) 745 return cmp; 746 747 /* 748 * Even if the beginning compared identically, the ce should 749 * compare as bigger than a directory leading up to it! 750 */ 751 return ce_namelen(ce) > traverse_path_len(info, n); 752} 753 754static int ce_in_traverse_path(const struct cache_entry *ce, 755 const struct traverse_info *info) 756{ 757 if (!info->prev) 758 return 1; 759 if (do_compare_entry(ce, info->prev, &info->name)) 760 return 0; 761 /* 762 * If ce (blob) is the same name as the path (which is a tree 763 * we will be descending into), it won't be inside it. 764 */ 765 return (info->pathlen < ce_namelen(ce)); 766} 767 768static struct cache_entry *create_ce_entry(const struct traverse_info *info, const struct name_entry *n, int stage) 769{ 770 int len = traverse_path_len(info, n); 771 struct cache_entry *ce = xcalloc(1, cache_entry_size(len)); 772 773 ce->ce_mode = create_ce_mode(n->mode); 774 ce->ce_flags = create_ce_flags(stage); 775 ce->ce_namelen = len; 776 oidcpy(&ce->oid, n->oid); 777 make_traverse_path(ce->name, info, n); 778 779 return ce; 780} 781 782static int unpack_nondirectories(int n, unsigned long mask, 783 unsigned long dirmask, 784 struct cache_entry **src, 785 const struct name_entry *names, 786 const struct traverse_info *info) 787{ 788 int i; 789 struct unpack_trees_options *o = info->data; 790 unsigned long conflicts = info->df_conflicts | dirmask; 791 792 /* Do we have *only* directories? Nothing to do */ 793 if (mask == dirmask && !src[0]) 794 return 0; 795 796 /* 797 * Ok, we've filled in up to any potential index entry in src[0], 798 * now do the rest. 799 */ 800 for (i = 0; i < n; i++) { 801 int stage; 802 unsigned int bit = 1ul << i; 803 if (conflicts & bit) { 804 src[i + o->merge] = o->df_conflict_entry; 805 continue; 806 } 807 if (!(mask & bit)) 808 continue; 809 if (!o->merge) 810 stage = 0; 811 else if (i + 1 < o->head_idx) 812 stage = 1; 813 else if (i + 1 > o->head_idx) 814 stage = 3; 815 else 816 stage = 2; 817 src[i + o->merge] = create_ce_entry(info, names + i, stage); 818 } 819 820 if (o->merge) { 821 int rc = call_unpack_fn((const struct cache_entry * const *)src, 822 o); 823 for (i = 0; i < n; i++) { 824 struct cache_entry *ce = src[i + o->merge]; 825 if (ce != o->df_conflict_entry) 826 free(ce); 827 } 828 return rc; 829 } 830 831 for (i = 0; i < n; i++) 832 if (src[i] && src[i] != o->df_conflict_entry) 833 if (do_add_entry(o, src[i], 0, 0)) 834 return -1; 835 836 return 0; 837} 838 839static int unpack_failed(struct unpack_trees_options *o, const char *message) 840{ 841 discard_index(&o->result); 842 if (!o->gently && !o->exiting_early) { 843 if (message) 844 return error("%s", message); 845 return -1; 846 } 847 return -1; 848} 849 850/* 851 * The tree traversal is looking at name p. If we have a matching entry, 852 * return it. If name p is a directory in the index, do not return 853 * anything, as we will want to match it when the traversal descends into 854 * the directory. 855 */ 856static int find_cache_pos(struct traverse_info *info, 857 const struct name_entry *p) 858{ 859 int pos; 860 struct unpack_trees_options *o = info->data; 861 struct index_state *index = o->src_index; 862 int pfxlen = info->pathlen; 863 int p_len = tree_entry_len(p); 864 865 for (pos = o->cache_bottom; pos < index->cache_nr; pos++) { 866 const struct cache_entry *ce = index->cache[pos]; 867 const char *ce_name, *ce_slash; 868 int cmp, ce_len; 869 870 if (ce->ce_flags & CE_UNPACKED) { 871 /* 872 * cache_bottom entry is already unpacked, so 873 * we can never match it; don't check it 874 * again. 875 */ 876 if (pos == o->cache_bottom) 877 ++o->cache_bottom; 878 continue; 879 } 880 if (!ce_in_traverse_path(ce, info)) { 881 /* 882 * Check if we can skip future cache checks 883 * (because we're already past all possible 884 * entries in the traverse path). 885 */ 886 if (info->traverse_path) { 887 if (strncmp(ce->name, info->traverse_path, 888 info->pathlen) > 0) 889 break; 890 } 891 continue; 892 } 893 ce_name = ce->name + pfxlen; 894 ce_slash = strchr(ce_name, '/'); 895 if (ce_slash) 896 ce_len = ce_slash - ce_name; 897 else 898 ce_len = ce_namelen(ce) - pfxlen; 899 cmp = name_compare(p->path, p_len, ce_name, ce_len); 900 /* 901 * Exact match; if we have a directory we need to 902 * delay returning it. 903 */ 904 if (!cmp) 905 return ce_slash ? -2 - pos : pos; 906 if (0 < cmp) 907 continue; /* keep looking */ 908 /* 909 * ce_name sorts after p->path; could it be that we 910 * have files under p->path directory in the index? 911 * E.g. ce_name == "t-i", and p->path == "t"; we may 912 * have "t/a" in the index. 913 */ 914 if (p_len < ce_len && !memcmp(ce_name, p->path, p_len) && 915 ce_name[p_len] < '/') 916 continue; /* keep looking */ 917 break; 918 } 919 return -1; 920} 921 922static struct cache_entry *find_cache_entry(struct traverse_info *info, 923 const struct name_entry *p) 924{ 925 int pos = find_cache_pos(info, p); 926 struct unpack_trees_options *o = info->data; 927 928 if (0 <= pos) 929 return o->src_index->cache[pos]; 930 else 931 return NULL; 932} 933 934static void debug_path(struct traverse_info *info) 935{ 936 if (info->prev) { 937 debug_path(info->prev); 938 if (*info->prev->name.path) 939 putchar('/'); 940 } 941 printf("%s", info->name.path); 942} 943 944static void debug_name_entry(int i, struct name_entry *n) 945{ 946 printf("ent#%d %06o %s\n", i, 947 n->path ? n->mode : 0, 948 n->path ? n->path : "(missing)"); 949} 950 951static void debug_unpack_callback(int n, 952 unsigned long mask, 953 unsigned long dirmask, 954 struct name_entry *names, 955 struct traverse_info *info) 956{ 957 int i; 958 printf("* unpack mask %lu, dirmask %lu, cnt %d ", 959 mask, dirmask, n); 960 debug_path(info); 961 putchar('\n'); 962 for (i = 0; i < n; i++) 963 debug_name_entry(i, names + i); 964} 965 966static int unpack_callback(int n, unsigned long mask, unsigned long dirmask, struct name_entry *names, struct traverse_info *info) 967{ 968 struct cache_entry *src[MAX_UNPACK_TREES + 1] = { NULL, }; 969 struct unpack_trees_options *o = info->data; 970 const struct name_entry *p = names; 971 972 /* Find first entry with a real name (we could use "mask" too) */ 973 while (!p->mode) 974 p++; 975 976 if (o->debug_unpack) 977 debug_unpack_callback(n, mask, dirmask, names, info); 978 979 /* Are we supposed to look at the index too? */ 980 if (o->merge) { 981 while (1) { 982 int cmp; 983 struct cache_entry *ce; 984 985 if (o->diff_index_cached) 986 ce = next_cache_entry(o); 987 else 988 ce = find_cache_entry(info, p); 989 990 if (!ce) 991 break; 992 cmp = compare_entry(ce, info, p); 993 if (cmp < 0) { 994 if (unpack_index_entry(ce, o) < 0) 995 return unpack_failed(o, NULL); 996 continue; 997 } 998 if (!cmp) { 999 if (ce_stage(ce)) {1000 /*1001 * If we skip unmerged index1002 * entries, we'll skip this1003 * entry *and* the tree1004 * entries associated with it!1005 */1006 if (o->skip_unmerged) {1007 add_same_unmerged(ce, o);1008 return mask;1009 }1010 }1011 src[0] = ce;1012 }1013 break;1014 }1015 }10161017 if (unpack_nondirectories(n, mask, dirmask, src, names, info) < 0)1018 return -1;10191020 if (o->merge && src[0]) {1021 if (ce_stage(src[0]))1022 mark_ce_used_same_name(src[0], o);1023 else1024 mark_ce_used(src[0], o);1025 }10261027 /* Now handle any directories.. */1028 if (dirmask) {1029 /* special case: "diff-index --cached" looking at a tree */1030 if (o->diff_index_cached &&1031 n == 1 && dirmask == 1 && S_ISDIR(names->mode)) {1032 int matches;1033 matches = cache_tree_matches_traversal(o->src_index->cache_tree,1034 names, info);1035 /*1036 * Everything under the name matches; skip the1037 * entire hierarchy. diff_index_cached codepath1038 * special cases D/F conflicts in such a way that1039 * it does not do any look-ahead, so this is safe.1040 */1041 if (matches) {1042 o->cache_bottom += matches;1043 return mask;1044 }1045 }10461047 if (traverse_trees_recursive(n, dirmask, mask & ~dirmask,1048 names, info) < 0)1049 return -1;1050 return mask;1051 }10521053 return mask;1054}10551056static int clear_ce_flags_1(struct cache_entry **cache, int nr,1057 struct strbuf *prefix,1058 int select_mask, int clear_mask,1059 struct exclude_list *el, int defval);10601061/* Whole directory matching */1062static int clear_ce_flags_dir(struct cache_entry **cache, int nr,1063 struct strbuf *prefix,1064 char *basename,1065 int select_mask, int clear_mask,1066 struct exclude_list *el, int defval)1067{1068 struct cache_entry **cache_end;1069 int dtype = DT_DIR;1070 int ret = is_excluded_from_list(prefix->buf, prefix->len,1071 basename, &dtype, el, &the_index);1072 int rc;10731074 strbuf_addch(prefix, '/');10751076 /* If undecided, use matching result of parent dir in defval */1077 if (ret < 0)1078 ret = defval;10791080 for (cache_end = cache; cache_end != cache + nr; cache_end++) {1081 struct cache_entry *ce = *cache_end;1082 if (strncmp(ce->name, prefix->buf, prefix->len))1083 break;1084 }10851086 /*1087 * TODO: check el, if there are no patterns that may conflict1088 * with ret (iow, we know in advance the incl/excl1089 * decision for the entire directory), clear flag here without1090 * calling clear_ce_flags_1(). That function will call1091 * the expensive is_excluded_from_list() on every entry.1092 */1093 rc = clear_ce_flags_1(cache, cache_end - cache,1094 prefix,1095 select_mask, clear_mask,1096 el, ret);1097 strbuf_setlen(prefix, prefix->len - 1);1098 return rc;1099}11001101/*1102 * Traverse the index, find every entry that matches according to1103 * o->el. Do "ce_flags &= ~clear_mask" on those entries. Return the1104 * number of traversed entries.1105 *1106 * If select_mask is non-zero, only entries whose ce_flags has on of1107 * those bits enabled are traversed.1108 *1109 * cache : pointer to an index entry1110 * prefix_len : an offset to its path1111 *1112 * The current path ("prefix") including the trailing '/' is1113 * cache[0]->name[0..(prefix_len-1)]1114 * Top level path has prefix_len zero.1115 */1116static int clear_ce_flags_1(struct cache_entry **cache, int nr,1117 struct strbuf *prefix,1118 int select_mask, int clear_mask,1119 struct exclude_list *el, int defval)1120{1121 struct cache_entry **cache_end = cache + nr;11221123 /*1124 * Process all entries that have the given prefix and meet1125 * select_mask condition1126 */1127 while(cache != cache_end) {1128 struct cache_entry *ce = *cache;1129 const char *name, *slash;1130 int len, dtype, ret;11311132 if (select_mask && !(ce->ce_flags & select_mask)) {1133 cache++;1134 continue;1135 }11361137 if (prefix->len && strncmp(ce->name, prefix->buf, prefix->len))1138 break;11391140 name = ce->name + prefix->len;1141 slash = strchr(name, '/');11421143 /* If it's a directory, try whole directory match first */1144 if (slash) {1145 int processed;11461147 len = slash - name;1148 strbuf_add(prefix, name, len);11491150 processed = clear_ce_flags_dir(cache, cache_end - cache,1151 prefix,1152 prefix->buf + prefix->len - len,1153 select_mask, clear_mask,1154 el, defval);11551156 /* clear_c_f_dir eats a whole dir already? */1157 if (processed) {1158 cache += processed;1159 strbuf_setlen(prefix, prefix->len - len);1160 continue;1161 }11621163 strbuf_addch(prefix, '/');1164 cache += clear_ce_flags_1(cache, cache_end - cache,1165 prefix,1166 select_mask, clear_mask, el, defval);1167 strbuf_setlen(prefix, prefix->len - len - 1);1168 continue;1169 }11701171 /* Non-directory */1172 dtype = ce_to_dtype(ce);1173 ret = is_excluded_from_list(ce->name, ce_namelen(ce),1174 name, &dtype, el, &the_index);1175 if (ret < 0)1176 ret = defval;1177 if (ret > 0)1178 ce->ce_flags &= ~clear_mask;1179 cache++;1180 }1181 return nr - (cache_end - cache);1182}11831184static int clear_ce_flags(struct cache_entry **cache, int nr,1185 int select_mask, int clear_mask,1186 struct exclude_list *el)1187{1188 static struct strbuf prefix = STRBUF_INIT;11891190 strbuf_reset(&prefix);11911192 return clear_ce_flags_1(cache, nr,1193 &prefix,1194 select_mask, clear_mask,1195 el, 0);1196}11971198/*1199 * Set/Clear CE_NEW_SKIP_WORKTREE according to $GIT_DIR/info/sparse-checkout1200 */1201static void mark_new_skip_worktree(struct exclude_list *el,1202 struct index_state *the_index,1203 int select_flag, int skip_wt_flag)1204{1205 int i;12061207 /*1208 * 1. Pretend the narrowest worktree: only unmerged entries1209 * are checked out1210 */1211 for (i = 0; i < the_index->cache_nr; i++) {1212 struct cache_entry *ce = the_index->cache[i];12131214 if (select_flag && !(ce->ce_flags & select_flag))1215 continue;12161217 if (!ce_stage(ce))1218 ce->ce_flags |= skip_wt_flag;1219 else1220 ce->ce_flags &= ~skip_wt_flag;1221 }12221223 /*1224 * 2. Widen worktree according to sparse-checkout file.1225 * Matched entries will have skip_wt_flag cleared (i.e. "in")1226 */1227 clear_ce_flags(the_index->cache, the_index->cache_nr,1228 select_flag, skip_wt_flag, el);1229}12301231static int verify_absent(const struct cache_entry *,1232 enum unpack_trees_error_types,1233 struct unpack_trees_options *);1234/*1235 * N-way merge "len" trees. Returns 0 on success, -1 on failure to manipulate the1236 * resulting index, -2 on failure to reflect the changes to the work tree.1237 *1238 * CE_ADDED, CE_UNPACKED and CE_NEW_SKIP_WORKTREE are used internally1239 */1240int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options *o)1241{1242 int i, ret;1243 static struct cache_entry *dfc;1244 struct exclude_list el;12451246 if (len > MAX_UNPACK_TREES)1247 die("unpack_trees takes at most %d trees", MAX_UNPACK_TREES);12481249 memset(&el, 0, sizeof(el));1250 if (!core_apply_sparse_checkout || !o->update)1251 o->skip_sparse_checkout = 1;1252 if (!o->skip_sparse_checkout) {1253 char *sparse = git_pathdup("info/sparse-checkout");1254 if (add_excludes_from_file_to_list(sparse, "", 0, &el, NULL) < 0)1255 o->skip_sparse_checkout = 1;1256 else1257 o->el = ⪙1258 free(sparse);1259 }12601261 memset(&o->result, 0, sizeof(o->result));1262 o->result.initialized = 1;1263 o->result.timestamp.sec = o->src_index->timestamp.sec;1264 o->result.timestamp.nsec = o->src_index->timestamp.nsec;1265 o->result.version = o->src_index->version;1266 o->result.split_index = o->src_index->split_index;1267 if (o->result.split_index)1268 o->result.split_index->refcount++;1269 hashcpy(o->result.sha1, o->src_index->sha1);1270 o->merge_size = len;1271 mark_all_ce_unused(o->src_index);12721273 /*1274 * Sparse checkout loop #1: set NEW_SKIP_WORKTREE on existing entries1275 */1276 if (!o->skip_sparse_checkout)1277 mark_new_skip_worktree(o->el, o->src_index, 0, CE_NEW_SKIP_WORKTREE);12781279 if (!dfc)1280 dfc = xcalloc(1, cache_entry_size(0));1281 o->df_conflict_entry = dfc;12821283 if (len) {1284 const char *prefix = o->prefix ? o->prefix : "";1285 struct traverse_info info;12861287 setup_traverse_info(&info, prefix);1288 info.fn = unpack_callback;1289 info.data = o;1290 info.show_all_errors = o->show_all_errors;1291 info.pathspec = o->pathspec;12921293 if (o->prefix) {1294 /*1295 * Unpack existing index entries that sort before the1296 * prefix the tree is spliced into. Note that o->merge1297 * is always true in this case.1298 */1299 while (1) {1300 struct cache_entry *ce = next_cache_entry(o);1301 if (!ce)1302 break;1303 if (ce_in_traverse_path(ce, &info))1304 break;1305 if (unpack_index_entry(ce, o) < 0)1306 goto return_failed;1307 }1308 }13091310 if (traverse_trees(len, t, &info) < 0)1311 goto return_failed;1312 }13131314 /* Any left-over entries in the index? */1315 if (o->merge) {1316 while (1) {1317 struct cache_entry *ce = next_cache_entry(o);1318 if (!ce)1319 break;1320 if (unpack_index_entry(ce, o) < 0)1321 goto return_failed;1322 }1323 }1324 mark_all_ce_unused(o->src_index);13251326 if (o->trivial_merges_only && o->nontrivial_merge) {1327 ret = unpack_failed(o, "Merge requires file-level merging");1328 goto done;1329 }13301331 if (!o->skip_sparse_checkout) {1332 int empty_worktree = 1;13331334 /*1335 * Sparse checkout loop #2: set NEW_SKIP_WORKTREE on entries not in loop #11336 * If the will have NEW_SKIP_WORKTREE, also set CE_SKIP_WORKTREE1337 * so apply_sparse_checkout() won't attempt to remove it from worktree1338 */1339 mark_new_skip_worktree(o->el, &o->result, CE_ADDED, CE_SKIP_WORKTREE | CE_NEW_SKIP_WORKTREE);13401341 ret = 0;1342 for (i = 0; i < o->result.cache_nr; i++) {1343 struct cache_entry *ce = o->result.cache[i];13441345 /*1346 * Entries marked with CE_ADDED in merged_entry() do not have1347 * verify_absent() check (the check is effectively disabled1348 * because CE_NEW_SKIP_WORKTREE is set unconditionally).1349 *1350 * Do the real check now because we have had1351 * correct CE_NEW_SKIP_WORKTREE1352 */1353 if (ce->ce_flags & CE_ADDED &&1354 verify_absent(ce, ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN, o)) {1355 if (!o->show_all_errors)1356 goto return_failed;1357 ret = -1;1358 }13591360 if (apply_sparse_checkout(&o->result, ce, o)) {1361 if (!o->show_all_errors)1362 goto return_failed;1363 ret = -1;1364 }1365 if (!ce_skip_worktree(ce))1366 empty_worktree = 0;13671368 }1369 if (ret < 0)1370 goto return_failed;1371 /*1372 * Sparse checkout is meant to narrow down checkout area1373 * but it does not make sense to narrow down to empty working1374 * tree. This is usually a mistake in sparse checkout rules.1375 * Do not allow users to do that.1376 */1377 if (o->result.cache_nr && empty_worktree) {1378 ret = unpack_failed(o, "Sparse checkout leaves no entry on working directory");1379 goto done;1380 }1381 }13821383 o->src_index = NULL;1384 ret = check_updates(o) ? (-2) : 0;1385 if (o->dst_index) {1386 if (!ret) {1387 if (!o->result.cache_tree)1388 o->result.cache_tree = cache_tree();1389 if (!cache_tree_fully_valid(o->result.cache_tree))1390 cache_tree_update(&o->result,1391 WRITE_TREE_SILENT |1392 WRITE_TREE_REPAIR);1393 }1394 discard_index(o->dst_index);1395 *o->dst_index = o->result;1396 } else {1397 discard_index(&o->result);1398 }13991400done:1401 clear_exclude_list(&el);1402 return ret;14031404return_failed:1405 if (o->show_all_errors)1406 display_error_msgs(o);1407 mark_all_ce_unused(o->src_index);1408 ret = unpack_failed(o, NULL);1409 if (o->exiting_early)1410 ret = 0;1411 goto done;1412}14131414/* Here come the merge functions */14151416static int reject_merge(const struct cache_entry *ce,1417 struct unpack_trees_options *o)1418{1419 return o->gently ? -1 :1420 add_rejected_path(o, ERROR_WOULD_OVERWRITE, ce->name);1421}14221423static int same(const struct cache_entry *a, const struct cache_entry *b)1424{1425 if (!!a != !!b)1426 return 0;1427 if (!a && !b)1428 return 1;1429 if ((a->ce_flags | b->ce_flags) & CE_CONFLICTED)1430 return 0;1431 return a->ce_mode == b->ce_mode &&1432 !oidcmp(&a->oid, &b->oid);1433}143414351436/*1437 * When a CE gets turned into an unmerged entry, we1438 * want it to be up-to-date1439 */1440static int verify_uptodate_1(const struct cache_entry *ce,1441 struct unpack_trees_options *o,1442 enum unpack_trees_error_types error_type)1443{1444 struct stat st;14451446 if (o->index_only)1447 return 0;14481449 /*1450 * CE_VALID and CE_SKIP_WORKTREE cheat, we better check again1451 * if this entry is truly up-to-date because this file may be1452 * overwritten.1453 */1454 if ((ce->ce_flags & CE_VALID) || ce_skip_worktree(ce))1455 ; /* keep checking */1456 else if (o->reset || ce_uptodate(ce))1457 return 0;14581459 if (!lstat(ce->name, &st)) {1460 int flags = CE_MATCH_IGNORE_VALID|CE_MATCH_IGNORE_SKIP_WORKTREE;1461 unsigned changed = ie_match_stat(o->src_index, ce, &st, flags);14621463 if (submodule_from_ce(ce)) {1464 int r = check_submodule_move_head(ce,1465 "HEAD", oid_to_hex(&ce->oid), o);1466 if (r)1467 return o->gently ? -1 :1468 add_rejected_path(o, error_type, ce->name);1469 return 0;1470 }14711472 if (!changed)1473 return 0;1474 /*1475 * Historic default policy was to allow submodule to be out1476 * of sync wrt the superproject index. If the submodule was1477 * not considered interesting above, we don't care here.1478 */1479 if (S_ISGITLINK(ce->ce_mode))1480 return 0;14811482 errno = 0;1483 }1484 if (errno == ENOENT)1485 return 0;1486 return o->gently ? -1 :1487 add_rejected_path(o, error_type, ce->name);1488}14891490static int verify_uptodate(const struct cache_entry *ce,1491 struct unpack_trees_options *o)1492{1493 if (!o->skip_sparse_checkout && (ce->ce_flags & CE_NEW_SKIP_WORKTREE))1494 return 0;1495 return verify_uptodate_1(ce, o, ERROR_NOT_UPTODATE_FILE);1496}14971498static int verify_uptodate_sparse(const struct cache_entry *ce,1499 struct unpack_trees_options *o)1500{1501 return verify_uptodate_1(ce, o, ERROR_SPARSE_NOT_UPTODATE_FILE);1502}15031504static void invalidate_ce_path(const struct cache_entry *ce,1505 struct unpack_trees_options *o)1506{1507 if (!ce)1508 return;1509 cache_tree_invalidate_path(o->src_index, ce->name);1510 untracked_cache_invalidate_path(o->src_index, ce->name);1511}15121513/*1514 * Check that checking out ce->sha1 in subdir ce->name is not1515 * going to overwrite any working files.1516 *1517 * Currently, git does not checkout subprojects during a superproject1518 * checkout, so it is not going to overwrite anything.1519 */1520static int verify_clean_submodule(const char *old_sha1,1521 const struct cache_entry *ce,1522 enum unpack_trees_error_types error_type,1523 struct unpack_trees_options *o)1524{1525 if (!submodule_from_ce(ce))1526 return 0;15271528 return check_submodule_move_head(ce, old_sha1,1529 oid_to_hex(&ce->oid), o);1530}15311532static int verify_clean_subdirectory(const struct cache_entry *ce,1533 enum unpack_trees_error_types error_type,1534 struct unpack_trees_options *o)1535{1536 /*1537 * we are about to extract "ce->name"; we would not want to lose1538 * anything in the existing directory there.1539 */1540 int namelen;1541 int i;1542 struct dir_struct d;1543 char *pathbuf;1544 int cnt = 0;15451546 if (S_ISGITLINK(ce->ce_mode)) {1547 unsigned char sha1[20];1548 int sub_head = resolve_gitlink_ref(ce->name, "HEAD", sha1);1549 /*1550 * If we are not going to update the submodule, then1551 * we don't care.1552 */1553 if (!sub_head && !hashcmp(sha1, ce->oid.hash))1554 return 0;1555 return verify_clean_submodule(sub_head ? NULL : sha1_to_hex(sha1),1556 ce, error_type, o);1557 }15581559 /*1560 * First let's make sure we do not have a local modification1561 * in that directory.1562 */1563 namelen = ce_namelen(ce);1564 for (i = locate_in_src_index(ce, o);1565 i < o->src_index->cache_nr;1566 i++) {1567 struct cache_entry *ce2 = o->src_index->cache[i];1568 int len = ce_namelen(ce2);1569 if (len < namelen ||1570 strncmp(ce->name, ce2->name, namelen) ||1571 ce2->name[namelen] != '/')1572 break;1573 /*1574 * ce2->name is an entry in the subdirectory to be1575 * removed.1576 */1577 if (!ce_stage(ce2)) {1578 if (verify_uptodate(ce2, o))1579 return -1;1580 add_entry(o, ce2, CE_REMOVE, 0);1581 mark_ce_used(ce2, o);1582 }1583 cnt++;1584 }15851586 /*1587 * Then we need to make sure that we do not lose a locally1588 * present file that is not ignored.1589 */1590 pathbuf = xstrfmt("%.*s/", namelen, ce->name);15911592 memset(&d, 0, sizeof(d));1593 if (o->dir)1594 d.exclude_per_dir = o->dir->exclude_per_dir;1595 i = read_directory(&d, pathbuf, namelen+1, NULL);1596 if (i)1597 return o->gently ? -1 :1598 add_rejected_path(o, ERROR_NOT_UPTODATE_DIR, ce->name);1599 free(pathbuf);1600 return cnt;1601}16021603/*1604 * This gets called when there was no index entry for the tree entry 'dst',1605 * but we found a file in the working tree that 'lstat()' said was fine,1606 * and we're on a case-insensitive filesystem.1607 *1608 * See if we can find a case-insensitive match in the index that also1609 * matches the stat information, and assume it's that other file!1610 */1611static int icase_exists(struct unpack_trees_options *o, const char *name, int len, struct stat *st)1612{1613 const struct cache_entry *src;16141615 src = index_file_exists(o->src_index, name, len, 1);1616 return src && !ie_match_stat(o->src_index, src, st, CE_MATCH_IGNORE_VALID|CE_MATCH_IGNORE_SKIP_WORKTREE);1617}16181619static int check_ok_to_remove(const char *name, int len, int dtype,1620 const struct cache_entry *ce, struct stat *st,1621 enum unpack_trees_error_types error_type,1622 struct unpack_trees_options *o)1623{1624 const struct cache_entry *result;16251626 /*1627 * It may be that the 'lstat()' succeeded even though1628 * target 'ce' was absent, because there is an old1629 * entry that is different only in case..1630 *1631 * Ignore that lstat() if it matches.1632 */1633 if (ignore_case && icase_exists(o, name, len, st))1634 return 0;16351636 if (o->dir &&1637 is_excluded(o->dir, name, &dtype))1638 /*1639 * ce->name is explicitly excluded, so it is Ok to1640 * overwrite it.1641 */1642 return 0;1643 if (S_ISDIR(st->st_mode)) {1644 /*1645 * We are checking out path "foo" and1646 * found "foo/." in the working tree.1647 * This is tricky -- if we have modified1648 * files that are in "foo/" we would lose1649 * them.1650 */1651 if (verify_clean_subdirectory(ce, error_type, o) < 0)1652 return -1;1653 return 0;1654 }16551656 /*1657 * The previous round may already have decided to1658 * delete this path, which is in a subdirectory that1659 * is being replaced with a blob.1660 */1661 result = index_file_exists(&o->result, name, len, 0);1662 if (result) {1663 if (result->ce_flags & CE_REMOVE)1664 return 0;1665 }16661667 return o->gently ? -1 :1668 add_rejected_path(o, error_type, name);1669}16701671/*1672 * We do not want to remove or overwrite a working tree file that1673 * is not tracked, unless it is ignored.1674 */1675static int verify_absent_1(const struct cache_entry *ce,1676 enum unpack_trees_error_types error_type,1677 struct unpack_trees_options *o)1678{1679 int len;1680 struct stat st;16811682 if (o->index_only || o->reset || !o->update)1683 return 0;16841685 len = check_leading_path(ce->name, ce_namelen(ce));1686 if (!len)1687 return 0;1688 else if (len > 0) {1689 char *path;1690 int ret;16911692 path = xmemdupz(ce->name, len);1693 if (lstat(path, &st))1694 ret = error_errno("cannot stat '%s'", path);1695 else {1696 if (submodule_from_ce(ce))1697 ret = check_submodule_move_head(ce,1698 oid_to_hex(&ce->oid),1699 NULL, o);1700 else1701 ret = check_ok_to_remove(path, len, DT_UNKNOWN, NULL,1702 &st, error_type, o);1703 }1704 free(path);1705 return ret;1706 } else if (lstat(ce->name, &st)) {1707 if (errno != ENOENT)1708 return error_errno("cannot stat '%s'", ce->name);1709 return 0;1710 } else {1711 if (submodule_from_ce(ce))1712 return check_submodule_move_head(ce, oid_to_hex(&ce->oid),1713 NULL, o);17141715 return check_ok_to_remove(ce->name, ce_namelen(ce),1716 ce_to_dtype(ce), ce, &st,1717 error_type, o);1718 }1719}17201721static int verify_absent(const struct cache_entry *ce,1722 enum unpack_trees_error_types error_type,1723 struct unpack_trees_options *o)1724{1725 if (!o->skip_sparse_checkout && (ce->ce_flags & CE_NEW_SKIP_WORKTREE))1726 return 0;1727 return verify_absent_1(ce, error_type, o);1728}17291730static int verify_absent_sparse(const struct cache_entry *ce,1731 enum unpack_trees_error_types error_type,1732 struct unpack_trees_options *o)1733{1734 enum unpack_trees_error_types orphaned_error = error_type;1735 if (orphaned_error == ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN)1736 orphaned_error = ERROR_WOULD_LOSE_ORPHANED_OVERWRITTEN;17371738 return verify_absent_1(ce, orphaned_error, o);1739}17401741static int merged_entry(const struct cache_entry *ce,1742 const struct cache_entry *old,1743 struct unpack_trees_options *o)1744{1745 int update = CE_UPDATE;1746 struct cache_entry *merge = dup_entry(ce);17471748 if (!old) {1749 /*1750 * New index entries. In sparse checkout, the following1751 * verify_absent() will be delayed until after1752 * traverse_trees() finishes in unpack_trees(), then:1753 *1754 * - CE_NEW_SKIP_WORKTREE will be computed correctly1755 * - verify_absent() be called again, this time with1756 * correct CE_NEW_SKIP_WORKTREE1757 *1758 * verify_absent() call here does nothing in sparse1759 * checkout (i.e. o->skip_sparse_checkout == 0)1760 */1761 update |= CE_ADDED;1762 merge->ce_flags |= CE_NEW_SKIP_WORKTREE;17631764 if (verify_absent(merge,1765 ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN, o)) {1766 free(merge);1767 return -1;1768 }1769 invalidate_ce_path(merge, o);17701771 if (submodule_from_ce(ce)) {1772 int ret = check_submodule_move_head(ce, NULL,1773 oid_to_hex(&ce->oid),1774 o);1775 if (ret)1776 return ret;1777 }17781779 } else if (!(old->ce_flags & CE_CONFLICTED)) {1780 /*1781 * See if we can re-use the old CE directly?1782 * That way we get the uptodate stat info.1783 *1784 * This also removes the UPDATE flag on a match; otherwise1785 * we will end up overwriting local changes in the work tree.1786 */1787 if (same(old, merge)) {1788 copy_cache_entry(merge, old);1789 update = 0;1790 } else {1791 if (verify_uptodate(old, o)) {1792 free(merge);1793 return -1;1794 }1795 /* Migrate old flags over */1796 update |= old->ce_flags & (CE_SKIP_WORKTREE | CE_NEW_SKIP_WORKTREE);1797 invalidate_ce_path(old, o);1798 }17991800 if (submodule_from_ce(ce)) {1801 int ret = check_submodule_move_head(ce, oid_to_hex(&old->oid),1802 oid_to_hex(&ce->oid),1803 o);1804 if (ret)1805 return ret;1806 }1807 } else {1808 /*1809 * Previously unmerged entry left as an existence1810 * marker by read_index_unmerged();1811 */1812 invalidate_ce_path(old, o);1813 }18141815 do_add_entry(o, merge, update, CE_STAGEMASK);1816 return 1;1817}18181819static int deleted_entry(const struct cache_entry *ce,1820 const struct cache_entry *old,1821 struct unpack_trees_options *o)1822{1823 /* Did it exist in the index? */1824 if (!old) {1825 if (verify_absent(ce, ERROR_WOULD_LOSE_UNTRACKED_REMOVED, o))1826 return -1;1827 return 0;1828 }1829 if (!(old->ce_flags & CE_CONFLICTED) && verify_uptodate(old, o))1830 return -1;1831 add_entry(o, ce, CE_REMOVE, 0);1832 invalidate_ce_path(ce, o);1833 return 1;1834}18351836static int keep_entry(const struct cache_entry *ce,1837 struct unpack_trees_options *o)1838{1839 add_entry(o, ce, 0, 0);1840 return 1;1841}18421843#if DBRT_DEBUG1844static void show_stage_entry(FILE *o,1845 const char *label, const struct cache_entry *ce)1846{1847 if (!ce)1848 fprintf(o, "%s (missing)\n", label);1849 else1850 fprintf(o, "%s%06o %s %d\t%s\n",1851 label,1852 ce->ce_mode,1853 oid_to_hex(&ce->oid),1854 ce_stage(ce),1855 ce->name);1856}1857#endif18581859int threeway_merge(const struct cache_entry * const *stages,1860 struct unpack_trees_options *o)1861{1862 const struct cache_entry *index;1863 const struct cache_entry *head;1864 const struct cache_entry *remote = stages[o->head_idx + 1];1865 int count;1866 int head_match = 0;1867 int remote_match = 0;18681869 int df_conflict_head = 0;1870 int df_conflict_remote = 0;18711872 int any_anc_missing = 0;1873 int no_anc_exists = 1;1874 int i;18751876 for (i = 1; i < o->head_idx; i++) {1877 if (!stages[i] || stages[i] == o->df_conflict_entry)1878 any_anc_missing = 1;1879 else1880 no_anc_exists = 0;1881 }18821883 index = stages[0];1884 head = stages[o->head_idx];18851886 if (head == o->df_conflict_entry) {1887 df_conflict_head = 1;1888 head = NULL;1889 }18901891 if (remote == o->df_conflict_entry) {1892 df_conflict_remote = 1;1893 remote = NULL;1894 }18951896 /*1897 * First, if there's a #16 situation, note that to prevent #131898 * and #14.1899 */1900 if (!same(remote, head)) {1901 for (i = 1; i < o->head_idx; i++) {1902 if (same(stages[i], head)) {1903 head_match = i;1904 }1905 if (same(stages[i], remote)) {1906 remote_match = i;1907 }1908 }1909 }19101911 /*1912 * We start with cases where the index is allowed to match1913 * something other than the head: #14(ALT) and #2ALT, where it1914 * is permitted to match the result instead.1915 */1916 /* #14, #14ALT, #2ALT */1917 if (remote && !df_conflict_head && head_match && !remote_match) {1918 if (index && !same(index, remote) && !same(index, head))1919 return reject_merge(index, o);1920 return merged_entry(remote, index, o);1921 }1922 /*1923 * If we have an entry in the index cache, then we want to1924 * make sure that it matches head.1925 */1926 if (index && !same(index, head))1927 return reject_merge(index, o);19281929 if (head) {1930 /* #5ALT, #15 */1931 if (same(head, remote))1932 return merged_entry(head, index, o);1933 /* #13, #3ALT */1934 if (!df_conflict_remote && remote_match && !head_match)1935 return merged_entry(head, index, o);1936 }19371938 /* #1 */1939 if (!head && !remote && any_anc_missing)1940 return 0;19411942 /*1943 * Under the "aggressive" rule, we resolve mostly trivial1944 * cases that we historically had git-merge-one-file resolve.1945 */1946 if (o->aggressive) {1947 int head_deleted = !head;1948 int remote_deleted = !remote;1949 const struct cache_entry *ce = NULL;19501951 if (index)1952 ce = index;1953 else if (head)1954 ce = head;1955 else if (remote)1956 ce = remote;1957 else {1958 for (i = 1; i < o->head_idx; i++) {1959 if (stages[i] && stages[i] != o->df_conflict_entry) {1960 ce = stages[i];1961 break;1962 }1963 }1964 }19651966 /*1967 * Deleted in both.1968 * Deleted in one and unchanged in the other.1969 */1970 if ((head_deleted && remote_deleted) ||1971 (head_deleted && remote && remote_match) ||1972 (remote_deleted && head && head_match)) {1973 if (index)1974 return deleted_entry(index, index, o);1975 if (ce && !head_deleted) {1976 if (verify_absent(ce, ERROR_WOULD_LOSE_UNTRACKED_REMOVED, o))1977 return -1;1978 }1979 return 0;1980 }1981 /*1982 * Added in both, identically.1983 */1984 if (no_anc_exists && head && remote && same(head, remote))1985 return merged_entry(head, index, o);19861987 }19881989 /* Below are "no merge" cases, which require that the index be1990 * up-to-date to avoid the files getting overwritten with1991 * conflict resolution files.1992 */1993 if (index) {1994 if (verify_uptodate(index, o))1995 return -1;1996 }19971998 o->nontrivial_merge = 1;19992000 /* #2, #3, #4, #6, #7, #9, #10, #11. */2001 count = 0;2002 if (!head_match || !remote_match) {2003 for (i = 1; i < o->head_idx; i++) {2004 if (stages[i] && stages[i] != o->df_conflict_entry) {2005 keep_entry(stages[i], o);2006 count++;2007 break;2008 }2009 }2010 }2011#if DBRT_DEBUG2012 else {2013 fprintf(stderr, "read-tree: warning #16 detected\n");2014 show_stage_entry(stderr, "head ", stages[head_match]);2015 show_stage_entry(stderr, "remote ", stages[remote_match]);2016 }2017#endif2018 if (head) { count += keep_entry(head, o); }2019 if (remote) { count += keep_entry(remote, o); }2020 return count;2021}20222023/*2024 * Two-way merge.2025 *2026 * The rule is to "carry forward" what is in the index without losing2027 * information across a "fast-forward", favoring a successful merge2028 * over a merge failure when it makes sense. For details of the2029 * "carry forward" rule, please see <Documentation/git-read-tree.txt>.2030 *2031 */2032int twoway_merge(const struct cache_entry * const *src,2033 struct unpack_trees_options *o)2034{2035 const struct cache_entry *current = src[0];2036 const struct cache_entry *oldtree = src[1];2037 const struct cache_entry *newtree = src[2];20382039 if (o->merge_size != 2)2040 return error("Cannot do a twoway merge of %d trees",2041 o->merge_size);20422043 if (oldtree == o->df_conflict_entry)2044 oldtree = NULL;2045 if (newtree == o->df_conflict_entry)2046 newtree = NULL;20472048 if (current) {2049 if (current->ce_flags & CE_CONFLICTED) {2050 if (same(oldtree, newtree) || o->reset) {2051 if (!newtree)2052 return deleted_entry(current, current, o);2053 else2054 return merged_entry(newtree, current, o);2055 }2056 return reject_merge(current, o);2057 } else if ((!oldtree && !newtree) || /* 4 and 5 */2058 (!oldtree && newtree &&2059 same(current, newtree)) || /* 6 and 7 */2060 (oldtree && newtree &&2061 same(oldtree, newtree)) || /* 14 and 15 */2062 (oldtree && newtree &&2063 !same(oldtree, newtree) && /* 18 and 19 */2064 same(current, newtree))) {2065 return keep_entry(current, o);2066 } else if (oldtree && !newtree && same(current, oldtree)) {2067 /* 10 or 11 */2068 return deleted_entry(oldtree, current, o);2069 } else if (oldtree && newtree &&2070 same(current, oldtree) && !same(current, newtree)) {2071 /* 20 or 21 */2072 return merged_entry(newtree, current, o);2073 } else2074 return reject_merge(current, o);2075 }2076 else if (newtree) {2077 if (oldtree && !o->initial_checkout) {2078 /*2079 * deletion of the path was staged;2080 */2081 if (same(oldtree, newtree))2082 return 1;2083 return reject_merge(oldtree, o);2084 }2085 return merged_entry(newtree, current, o);2086 }2087 return deleted_entry(oldtree, current, o);2088}20892090/*2091 * Bind merge.2092 *2093 * Keep the index entries at stage0, collapse stage1 but make sure2094 * stage0 does not have anything there.2095 */2096int bind_merge(const struct cache_entry * const *src,2097 struct unpack_trees_options *o)2098{2099 const struct cache_entry *old = src[0];2100 const struct cache_entry *a = src[1];21012102 if (o->merge_size != 1)2103 return error("Cannot do a bind merge of %d trees",2104 o->merge_size);2105 if (a && old)2106 return o->gently ? -1 :2107 error(ERRORMSG(o, ERROR_BIND_OVERLAP),2108 super_prefixed(a->name),2109 super_prefixed(old->name));2110 if (!a)2111 return keep_entry(old, o);2112 else2113 return merged_entry(a, NULL, o);2114}21152116/*2117 * One-way merge.2118 *2119 * The rule is:2120 * - take the stat information from stage0, take the data from stage12121 */2122int oneway_merge(const struct cache_entry * const *src,2123 struct unpack_trees_options *o)2124{2125 const struct cache_entry *old = src[0];2126 const struct cache_entry *a = src[1];21272128 if (o->merge_size != 1)2129 return error("Cannot do a oneway merge of %d trees",2130 o->merge_size);21312132 if (!a || a == o->df_conflict_entry)2133 return deleted_entry(old, old, o);21342135 if (old && same(old, a)) {2136 int update = 0;2137 if (o->reset && o->update && !ce_uptodate(old) && !ce_skip_worktree(old)) {2138 struct stat st;2139 if (lstat(old->name, &st) ||2140 ie_match_stat(o->src_index, old, &st, CE_MATCH_IGNORE_VALID|CE_MATCH_IGNORE_SKIP_WORKTREE))2141 update |= CE_UPDATE;2142 }2143 add_entry(o, old, update, 0);2144 return 0;2145 }2146 return merged_entry(a, old, o);2147}