1#define NO_THE_INDEX_COMPATIBILITY_MACROS 2#include "cache.h" 3#include "config.h" 4#include "dir.h" 5#include "tree.h" 6#include "tree-walk.h" 7#include "cache-tree.h" 8#include "unpack-trees.h" 9#include "progress.h" 10#include "refs.h" 11#include "attr.h" 12#include "split-index.h" 13#include "dir.h" 14#include "submodule.h" 15#include "submodule-config.h" 16 17/* 18 * Error messages expected by scripts out of plumbing commands such as 19 * read-tree. Non-scripted Porcelain is not required to use these messages 20 * and in fact are encouraged to reword them to better suit their particular 21 * situation better. See how "git checkout" and "git merge" replaces 22 * them using setup_unpack_trees_porcelain(), for example. 23 */ 24static const char *unpack_plumbing_errors[NB_UNPACK_TREES_ERROR_TYPES] = { 25 /* ERROR_WOULD_OVERWRITE */ 26 "Entry '%s' would be overwritten by merge. Cannot merge.", 27 28 /* ERROR_NOT_UPTODATE_FILE */ 29 "Entry '%s' not uptodate. Cannot merge.", 30 31 /* ERROR_NOT_UPTODATE_DIR */ 32 "Updating '%s' would lose untracked files in it", 33 34 /* ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN */ 35 "Untracked working tree file '%s' would be overwritten by merge.", 36 37 /* ERROR_WOULD_LOSE_UNTRACKED_REMOVED */ 38 "Untracked working tree file '%s' would be removed by merge.", 39 40 /* ERROR_BIND_OVERLAP */ 41 "Entry '%s' overlaps with '%s'. Cannot bind.", 42 43 /* ERROR_SPARSE_NOT_UPTODATE_FILE */ 44 "Entry '%s' not uptodate. Cannot update sparse checkout.", 45 46 /* ERROR_WOULD_LOSE_ORPHANED_OVERWRITTEN */ 47 "Working tree file '%s' would be overwritten by sparse checkout update.", 48 49 /* ERROR_WOULD_LOSE_ORPHANED_REMOVED */ 50 "Working tree file '%s' would be removed by sparse checkout update.", 51 52 /* ERROR_WOULD_LOSE_SUBMODULE */ 53 "Submodule '%s' cannot checkout new HEAD.", 54}; 55 56#define ERRORMSG(o,type) \ 57 ( ((o) && (o)->msgs[(type)]) \ 58 ? ((o)->msgs[(type)]) \ 59 : (unpack_plumbing_errors[(type)]) ) 60 61static const char *super_prefixed(const char *path) 62{ 63 /* 64 * It is necessary and sufficient to have two static buffers 65 * here, as the return value of this function is fed to 66 * error() using the unpack_*_errors[] templates we see above. 67 */ 68 static struct strbuf buf[2] = {STRBUF_INIT, STRBUF_INIT}; 69 static int super_prefix_len = -1; 70 static unsigned idx = ARRAY_SIZE(buf) - 1; 71 72 if (super_prefix_len < 0) { 73 const char *super_prefix = get_super_prefix(); 74 if (!super_prefix) { 75 super_prefix_len = 0; 76 } else { 77 int i; 78 for (i = 0; i < ARRAY_SIZE(buf); i++) 79 strbuf_addstr(&buf[i], super_prefix); 80 super_prefix_len = buf[0].len; 81 } 82 } 83 84 if (!super_prefix_len) 85 return path; 86 87 if (++idx >= ARRAY_SIZE(buf)) 88 idx = 0; 89 90 strbuf_setlen(&buf[idx], super_prefix_len); 91 strbuf_addstr(&buf[idx], path); 92 93 return buf[idx].buf; 94} 95 96void setup_unpack_trees_porcelain(struct unpack_trees_options *opts, 97 const char *cmd) 98{ 99 int i; 100 const char **msgs = opts->msgs; 101 const char *msg; 102 103 if (!strcmp(cmd, "checkout")) 104 msg = advice_commit_before_merge 105 ? _("Your local changes to the following files would be overwritten by checkout:\n%%s" 106 "Please commit your changes or stash them before you switch branches.") 107 : _("Your local changes to the following files would be overwritten by checkout:\n%%s"); 108 else if (!strcmp(cmd, "merge")) 109 msg = advice_commit_before_merge 110 ? _("Your local changes to the following files would be overwritten by merge:\n%%s" 111 "Please commit your changes or stash them before you merge.") 112 : _("Your local changes to the following files would be overwritten by merge:\n%%s"); 113 else 114 msg = advice_commit_before_merge 115 ? _("Your local changes to the following files would be overwritten by %s:\n%%s" 116 "Please commit your changes or stash them before you %s.") 117 : _("Your local changes to the following files would be overwritten by %s:\n%%s"); 118 msgs[ERROR_WOULD_OVERWRITE] = msgs[ERROR_NOT_UPTODATE_FILE] = 119 xstrfmt(msg, cmd, cmd); 120 121 msgs[ERROR_NOT_UPTODATE_DIR] = 122 _("Updating the following directories would lose untracked files in them:\n%s"); 123 124 if (!strcmp(cmd, "checkout")) 125 msg = advice_commit_before_merge 126 ? _("The following untracked working tree files would be removed by checkout:\n%%s" 127 "Please move or remove them before you switch branches.") 128 : _("The following untracked working tree files would be removed by checkout:\n%%s"); 129 else if (!strcmp(cmd, "merge")) 130 msg = advice_commit_before_merge 131 ? _("The following untracked working tree files would be removed by merge:\n%%s" 132 "Please move or remove them before you merge.") 133 : _("The following untracked working tree files would be removed by merge:\n%%s"); 134 else 135 msg = advice_commit_before_merge 136 ? _("The following untracked working tree files would be removed by %s:\n%%s" 137 "Please move or remove them before you %s.") 138 : _("The following untracked working tree files would be removed by %s:\n%%s"); 139 msgs[ERROR_WOULD_LOSE_UNTRACKED_REMOVED] = xstrfmt(msg, cmd, cmd); 140 141 if (!strcmp(cmd, "checkout")) 142 msg = advice_commit_before_merge 143 ? _("The following untracked working tree files would be overwritten by checkout:\n%%s" 144 "Please move or remove them before you switch branches.") 145 : _("The following untracked working tree files would be overwritten by checkout:\n%%s"); 146 else if (!strcmp(cmd, "merge")) 147 msg = advice_commit_before_merge 148 ? _("The following untracked working tree files would be overwritten by merge:\n%%s" 149 "Please move or remove them before you merge.") 150 : _("The following untracked working tree files would be overwritten by merge:\n%%s"); 151 else 152 msg = advice_commit_before_merge 153 ? _("The following untracked working tree files would be overwritten by %s:\n%%s" 154 "Please move or remove them before you %s.") 155 : _("The following untracked working tree files would be overwritten by %s:\n%%s"); 156 msgs[ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN] = xstrfmt(msg, cmd, cmd); 157 158 /* 159 * Special case: ERROR_BIND_OVERLAP refers to a pair of paths, we 160 * cannot easily display it as a list. 161 */ 162 msgs[ERROR_BIND_OVERLAP] = _("Entry '%s' overlaps with '%s'. Cannot bind."); 163 164 msgs[ERROR_SPARSE_NOT_UPTODATE_FILE] = 165 _("Cannot update sparse checkout: the following entries are not up-to-date:\n%s"); 166 msgs[ERROR_WOULD_LOSE_ORPHANED_OVERWRITTEN] = 167 _("The following working tree files would be overwritten by sparse checkout update:\n%s"); 168 msgs[ERROR_WOULD_LOSE_ORPHANED_REMOVED] = 169 _("The following working tree files would be removed by sparse checkout update:\n%s"); 170 msgs[ERROR_WOULD_LOSE_SUBMODULE] = 171 _("Cannot update submodule:\n%s"); 172 173 opts->show_all_errors = 1; 174 /* rejected paths may not have a static buffer */ 175 for (i = 0; i < ARRAY_SIZE(opts->unpack_rejects); i++) 176 opts->unpack_rejects[i].strdup_strings = 1; 177} 178 179static int do_add_entry(struct unpack_trees_options *o, struct cache_entry *ce, 180 unsigned int set, unsigned int clear) 181{ 182 clear |= CE_HASHED; 183 184 if (set & CE_REMOVE) 185 set |= CE_WT_REMOVE; 186 187 ce->ce_flags = (ce->ce_flags & ~clear) | set; 188 return add_index_entry(&o->result, ce, 189 ADD_CACHE_OK_TO_ADD | ADD_CACHE_OK_TO_REPLACE); 190} 191 192static struct cache_entry *dup_entry(const struct cache_entry *ce) 193{ 194 unsigned int size = ce_size(ce); 195 struct cache_entry *new = xmalloc(size); 196 197 memcpy(new, ce, size); 198 return new; 199} 200 201static void add_entry(struct unpack_trees_options *o, 202 const struct cache_entry *ce, 203 unsigned int set, unsigned int clear) 204{ 205 do_add_entry(o, dup_entry(ce), set, clear); 206} 207 208/* 209 * add error messages on path <path> 210 * corresponding to the type <e> with the message <msg> 211 * indicating if it should be display in porcelain or not 212 */ 213static int add_rejected_path(struct unpack_trees_options *o, 214 enum unpack_trees_error_types e, 215 const char *path) 216{ 217 if (!o->show_all_errors) 218 return error(ERRORMSG(o, e), super_prefixed(path)); 219 220 /* 221 * Otherwise, insert in a list for future display by 222 * display_error_msgs() 223 */ 224 string_list_append(&o->unpack_rejects[e], path); 225 return -1; 226} 227 228/* 229 * display all the error messages stored in a nice way 230 */ 231static void display_error_msgs(struct unpack_trees_options *o) 232{ 233 int e, i; 234 int something_displayed = 0; 235 for (e = 0; e < NB_UNPACK_TREES_ERROR_TYPES; e++) { 236 struct string_list *rejects = &o->unpack_rejects[e]; 237 if (rejects->nr > 0) { 238 struct strbuf path = STRBUF_INIT; 239 something_displayed = 1; 240 for (i = 0; i < rejects->nr; i++) 241 strbuf_addf(&path, "\t%s\n", rejects->items[i].string); 242 error(ERRORMSG(o, e), super_prefixed(path.buf)); 243 strbuf_release(&path); 244 } 245 string_list_clear(rejects, 0); 246 } 247 if (something_displayed) 248 fprintf(stderr, _("Aborting\n")); 249} 250 251static int check_submodule_move_head(const struct cache_entry *ce, 252 const char *old_id, 253 const char *new_id, 254 struct unpack_trees_options *o) 255{ 256 unsigned flags = SUBMODULE_MOVE_HEAD_DRY_RUN; 257 const struct submodule *sub = submodule_from_ce(ce); 258 if (!sub) 259 return 0; 260 261 if (o->reset) 262 flags |= SUBMODULE_MOVE_HEAD_FORCE; 263 264 switch (sub->update_strategy.type) { 265 case SM_UPDATE_UNSPECIFIED: 266 case SM_UPDATE_CHECKOUT: 267 if (submodule_move_head(ce->name, old_id, new_id, flags)) 268 return o->gently ? -1 : 269 add_rejected_path(o, ERROR_WOULD_LOSE_SUBMODULE, ce->name); 270 return 0; 271 case SM_UPDATE_NONE: 272 return 0; 273 case SM_UPDATE_REBASE: 274 case SM_UPDATE_MERGE: 275 case SM_UPDATE_COMMAND: 276 default: 277 warning(_("submodule update strategy not supported for submodule '%s'"), ce->name); 278 return -1; 279 } 280} 281 282static void reload_gitmodules_file(struct index_state *index, 283 struct checkout *state) 284{ 285 int i; 286 for (i = 0; i < index->cache_nr; i++) { 287 struct cache_entry *ce = index->cache[i]; 288 if (ce->ce_flags & CE_UPDATE) { 289 int r = strcmp(ce->name, GITMODULES_FILE); 290 if (r < 0) 291 continue; 292 else if (r == 0) { 293 submodule_free(); 294 checkout_entry(ce, state, NULL); 295 gitmodules_config(); 296 git_config(submodule_config, NULL); 297 } else 298 break; 299 } 300 } 301} 302 303/* 304 * Unlink the last component and schedule the leading directories for 305 * removal, such that empty directories get removed. 306 */ 307static void unlink_entry(const struct cache_entry *ce) 308{ 309 const struct submodule *sub = submodule_from_ce(ce); 310 if (sub) { 311 switch (sub->update_strategy.type) { 312 case SM_UPDATE_UNSPECIFIED: 313 case SM_UPDATE_CHECKOUT: 314 case SM_UPDATE_REBASE: 315 case SM_UPDATE_MERGE: 316 /* state.force is set at the caller. */ 317 submodule_move_head(ce->name, "HEAD", NULL, 318 SUBMODULE_MOVE_HEAD_FORCE); 319 break; 320 case SM_UPDATE_NONE: 321 case SM_UPDATE_COMMAND: 322 return; /* Do not touch the submodule. */ 323 } 324 } 325 if (!check_leading_path(ce->name, ce_namelen(ce))) 326 return; 327 if (remove_or_warn(ce->ce_mode, ce->name)) 328 return; 329 schedule_dir_for_removal(ce->name, ce_namelen(ce)); 330} 331 332static struct progress *get_progress(struct unpack_trees_options *o) 333{ 334 unsigned cnt = 0, total = 0; 335 struct index_state *index = &o->result; 336 337 if (!o->update || !o->verbose_update) 338 return NULL; 339 340 for (; cnt < index->cache_nr; cnt++) { 341 const struct cache_entry *ce = index->cache[cnt]; 342 if (ce->ce_flags & (CE_UPDATE | CE_WT_REMOVE)) 343 total++; 344 } 345 346 return start_delayed_progress(_("Checking out files"), total); 347} 348 349static int check_updates(struct unpack_trees_options *o) 350{ 351 unsigned cnt = 0; 352 int errs = 0; 353 struct progress *progress = NULL; 354 struct index_state *index = &o->result; 355 struct checkout state = CHECKOUT_INIT; 356 int i; 357 358 state.force = 1; 359 state.quiet = 1; 360 state.refresh_cache = 1; 361 state.istate = index; 362 363 progress = get_progress(o); 364 365 if (o->update) 366 git_attr_set_direction(GIT_ATTR_CHECKOUT, index); 367 for (i = 0; i < index->cache_nr; i++) { 368 const struct cache_entry *ce = index->cache[i]; 369 370 if (ce->ce_flags & CE_WT_REMOVE) { 371 display_progress(progress, ++cnt); 372 if (o->update && !o->dry_run) 373 unlink_entry(ce); 374 } 375 } 376 remove_marked_cache_entries(index); 377 remove_scheduled_dirs(); 378 379 if (should_update_submodules() && o->update && !o->dry_run) 380 reload_gitmodules_file(index, &state); 381 382 enable_delayed_checkout(&state); 383 for (i = 0; i < index->cache_nr; i++) { 384 struct cache_entry *ce = index->cache[i]; 385 386 if (ce->ce_flags & CE_UPDATE) { 387 if (ce->ce_flags & CE_WT_REMOVE) 388 die("BUG: both update and delete flags are set on %s", 389 ce->name); 390 display_progress(progress, ++cnt); 391 ce->ce_flags &= ~CE_UPDATE; 392 if (o->update && !o->dry_run) { 393 errs |= checkout_entry(ce, &state, NULL); 394 } 395 } 396 } 397 errs |= finish_delayed_checkout(&state); 398 stop_progress(&progress); 399 if (o->update) 400 git_attr_set_direction(GIT_ATTR_CHECKIN, NULL); 401 return errs != 0; 402} 403 404static int verify_uptodate_sparse(const struct cache_entry *ce, 405 struct unpack_trees_options *o); 406static int verify_absent_sparse(const struct cache_entry *ce, 407 enum unpack_trees_error_types, 408 struct unpack_trees_options *o); 409 410static int apply_sparse_checkout(struct index_state *istate, 411 struct cache_entry *ce, 412 struct unpack_trees_options *o) 413{ 414 int was_skip_worktree = ce_skip_worktree(ce); 415 416 if (ce->ce_flags & CE_NEW_SKIP_WORKTREE) 417 ce->ce_flags |= CE_SKIP_WORKTREE; 418 else 419 ce->ce_flags &= ~CE_SKIP_WORKTREE; 420 if (was_skip_worktree != ce_skip_worktree(ce)) { 421 ce->ce_flags |= CE_UPDATE_IN_BASE; 422 istate->cache_changed |= CE_ENTRY_CHANGED; 423 } 424 425 /* 426 * if (!was_skip_worktree && !ce_skip_worktree()) { 427 * This is perfectly normal. Move on; 428 * } 429 */ 430 431 /* 432 * Merge strategies may set CE_UPDATE|CE_REMOVE outside checkout 433 * area as a result of ce_skip_worktree() shortcuts in 434 * verify_absent() and verify_uptodate(). 435 * Make sure they don't modify worktree if they are already 436 * outside checkout area 437 */ 438 if (was_skip_worktree && ce_skip_worktree(ce)) { 439 ce->ce_flags &= ~CE_UPDATE; 440 441 /* 442 * By default, when CE_REMOVE is on, CE_WT_REMOVE is also 443 * on to get that file removed from both index and worktree. 444 * If that file is already outside worktree area, don't 445 * bother remove it. 446 */ 447 if (ce->ce_flags & CE_REMOVE) 448 ce->ce_flags &= ~CE_WT_REMOVE; 449 } 450 451 if (!was_skip_worktree && ce_skip_worktree(ce)) { 452 /* 453 * If CE_UPDATE is set, verify_uptodate() must be called already 454 * also stat info may have lost after merged_entry() so calling 455 * verify_uptodate() again may fail 456 */ 457 if (!(ce->ce_flags & CE_UPDATE) && verify_uptodate_sparse(ce, o)) 458 return -1; 459 ce->ce_flags |= CE_WT_REMOVE; 460 ce->ce_flags &= ~CE_UPDATE; 461 } 462 if (was_skip_worktree && !ce_skip_worktree(ce)) { 463 if (verify_absent_sparse(ce, ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN, o)) 464 return -1; 465 ce->ce_flags |= CE_UPDATE; 466 } 467 return 0; 468} 469 470static inline int call_unpack_fn(const struct cache_entry * const *src, 471 struct unpack_trees_options *o) 472{ 473 int ret = o->fn(src, o); 474 if (ret > 0) 475 ret = 0; 476 return ret; 477} 478 479static void mark_ce_used(struct cache_entry *ce, struct unpack_trees_options *o) 480{ 481 ce->ce_flags |= CE_UNPACKED; 482 483 if (o->cache_bottom < o->src_index->cache_nr && 484 o->src_index->cache[o->cache_bottom] == ce) { 485 int bottom = o->cache_bottom; 486 while (bottom < o->src_index->cache_nr && 487 o->src_index->cache[bottom]->ce_flags & CE_UNPACKED) 488 bottom++; 489 o->cache_bottom = bottom; 490 } 491} 492 493static void mark_all_ce_unused(struct index_state *index) 494{ 495 int i; 496 for (i = 0; i < index->cache_nr; i++) 497 index->cache[i]->ce_flags &= ~(CE_UNPACKED | CE_ADDED | CE_NEW_SKIP_WORKTREE); 498} 499 500static int locate_in_src_index(const struct cache_entry *ce, 501 struct unpack_trees_options *o) 502{ 503 struct index_state *index = o->src_index; 504 int len = ce_namelen(ce); 505 int pos = index_name_pos(index, ce->name, len); 506 if (pos < 0) 507 pos = -1 - pos; 508 return pos; 509} 510 511/* 512 * We call unpack_index_entry() with an unmerged cache entry 513 * only in diff-index, and it wants a single callback. Skip 514 * the other unmerged entry with the same name. 515 */ 516static void mark_ce_used_same_name(struct cache_entry *ce, 517 struct unpack_trees_options *o) 518{ 519 struct index_state *index = o->src_index; 520 int len = ce_namelen(ce); 521 int pos; 522 523 for (pos = locate_in_src_index(ce, o); pos < index->cache_nr; pos++) { 524 struct cache_entry *next = index->cache[pos]; 525 if (len != ce_namelen(next) || 526 memcmp(ce->name, next->name, len)) 527 break; 528 mark_ce_used(next, o); 529 } 530} 531 532static struct cache_entry *next_cache_entry(struct unpack_trees_options *o) 533{ 534 const struct index_state *index = o->src_index; 535 int pos = o->cache_bottom; 536 537 while (pos < index->cache_nr) { 538 struct cache_entry *ce = index->cache[pos]; 539 if (!(ce->ce_flags & CE_UNPACKED)) 540 return ce; 541 pos++; 542 } 543 return NULL; 544} 545 546static void add_same_unmerged(const struct cache_entry *ce, 547 struct unpack_trees_options *o) 548{ 549 struct index_state *index = o->src_index; 550 int len = ce_namelen(ce); 551 int pos = index_name_pos(index, ce->name, len); 552 553 if (0 <= pos) 554 die("programming error in a caller of mark_ce_used_same_name"); 555 for (pos = -pos - 1; pos < index->cache_nr; pos++) { 556 struct cache_entry *next = index->cache[pos]; 557 if (len != ce_namelen(next) || 558 memcmp(ce->name, next->name, len)) 559 break; 560 add_entry(o, next, 0, 0); 561 mark_ce_used(next, o); 562 } 563} 564 565static int unpack_index_entry(struct cache_entry *ce, 566 struct unpack_trees_options *o) 567{ 568 const struct cache_entry *src[MAX_UNPACK_TREES + 1] = { NULL, }; 569 int ret; 570 571 src[0] = ce; 572 573 mark_ce_used(ce, o); 574 if (ce_stage(ce)) { 575 if (o->skip_unmerged) { 576 add_entry(o, ce, 0, 0); 577 return 0; 578 } 579 } 580 ret = call_unpack_fn(src, o); 581 if (ce_stage(ce)) 582 mark_ce_used_same_name(ce, o); 583 return ret; 584} 585 586static int find_cache_pos(struct traverse_info *, const struct name_entry *); 587 588static void restore_cache_bottom(struct traverse_info *info, int bottom) 589{ 590 struct unpack_trees_options *o = info->data; 591 592 if (o->diff_index_cached) 593 return; 594 o->cache_bottom = bottom; 595} 596 597static int switch_cache_bottom(struct traverse_info *info) 598{ 599 struct unpack_trees_options *o = info->data; 600 int ret, pos; 601 602 if (o->diff_index_cached) 603 return 0; 604 ret = o->cache_bottom; 605 pos = find_cache_pos(info->prev, &info->name); 606 607 if (pos < -1) 608 o->cache_bottom = -2 - pos; 609 else if (pos < 0) 610 o->cache_bottom = o->src_index->cache_nr; 611 return ret; 612} 613 614static inline int are_same_oid(struct name_entry *name_j, struct name_entry *name_k) 615{ 616 return name_j->oid && name_k->oid && !oidcmp(name_j->oid, name_k->oid); 617} 618 619static int traverse_trees_recursive(int n, unsigned long dirmask, 620 unsigned long df_conflicts, 621 struct name_entry *names, 622 struct traverse_info *info) 623{ 624 int i, ret, bottom; 625 int nr_buf = 0; 626 struct tree_desc t[MAX_UNPACK_TREES]; 627 void *buf[MAX_UNPACK_TREES]; 628 struct traverse_info newinfo; 629 struct name_entry *p; 630 631 p = names; 632 while (!p->mode) 633 p++; 634 635 newinfo = *info; 636 newinfo.prev = info; 637 newinfo.pathspec = info->pathspec; 638 newinfo.name = *p; 639 newinfo.pathlen += tree_entry_len(p) + 1; 640 newinfo.df_conflicts |= df_conflicts; 641 642 /* 643 * Fetch the tree from the ODB for each peer directory in the 644 * n commits. 645 * 646 * For 2- and 3-way traversals, we try to avoid hitting the 647 * ODB twice for the same OID. This should yield a nice speed 648 * up in checkouts and merges when the commits are similar. 649 * 650 * We don't bother doing the full O(n^2) search for larger n, 651 * because wider traversals don't happen that often and we 652 * avoid the search setup. 653 * 654 * When 2 peer OIDs are the same, we just copy the tree 655 * descriptor data. This implicitly borrows the buffer 656 * data from the earlier cell. 657 */ 658 for (i = 0; i < n; i++, dirmask >>= 1) { 659 if (i > 0 && are_same_oid(&names[i], &names[i - 1])) 660 t[i] = t[i - 1]; 661 else if (i > 1 && are_same_oid(&names[i], &names[i - 2])) 662 t[i] = t[i - 2]; 663 else { 664 const struct object_id *oid = NULL; 665 if (dirmask & 1) 666 oid = names[i].oid; 667 buf[nr_buf++] = fill_tree_descriptor(t + i, oid); 668 } 669 } 670 671 bottom = switch_cache_bottom(&newinfo); 672 ret = traverse_trees(n, t, &newinfo); 673 restore_cache_bottom(&newinfo, bottom); 674 675 for (i = 0; i < nr_buf; i++) 676 free(buf[i]); 677 678 return ret; 679} 680 681/* 682 * Compare the traverse-path to the cache entry without actually 683 * having to generate the textual representation of the traverse 684 * path. 685 * 686 * NOTE! This *only* compares up to the size of the traverse path 687 * itself - the caller needs to do the final check for the cache 688 * entry having more data at the end! 689 */ 690static int do_compare_entry_piecewise(const struct cache_entry *ce, const struct traverse_info *info, const struct name_entry *n) 691{ 692 int len, pathlen, ce_len; 693 const char *ce_name; 694 695 if (info->prev) { 696 int cmp = do_compare_entry_piecewise(ce, info->prev, 697 &info->name); 698 if (cmp) 699 return cmp; 700 } 701 pathlen = info->pathlen; 702 ce_len = ce_namelen(ce); 703 704 /* If ce_len < pathlen then we must have previously hit "name == directory" entry */ 705 if (ce_len < pathlen) 706 return -1; 707 708 ce_len -= pathlen; 709 ce_name = ce->name + pathlen; 710 711 len = tree_entry_len(n); 712 return df_name_compare(ce_name, ce_len, S_IFREG, n->path, len, n->mode); 713} 714 715static int do_compare_entry(const struct cache_entry *ce, 716 const struct traverse_info *info, 717 const struct name_entry *n) 718{ 719 int len, pathlen, ce_len; 720 const char *ce_name; 721 int cmp; 722 723 /* 724 * If we have not precomputed the traverse path, it is quicker 725 * to avoid doing so. But if we have precomputed it, 726 * it is quicker to use the precomputed version. 727 */ 728 if (!info->traverse_path) 729 return do_compare_entry_piecewise(ce, info, n); 730 731 cmp = strncmp(ce->name, info->traverse_path, info->pathlen); 732 if (cmp) 733 return cmp; 734 735 pathlen = info->pathlen; 736 ce_len = ce_namelen(ce); 737 738 if (ce_len < pathlen) 739 return -1; 740 741 ce_len -= pathlen; 742 ce_name = ce->name + pathlen; 743 744 len = tree_entry_len(n); 745 return df_name_compare(ce_name, ce_len, S_IFREG, n->path, len, n->mode); 746} 747 748static int compare_entry(const struct cache_entry *ce, const struct traverse_info *info, const struct name_entry *n) 749{ 750 int cmp = do_compare_entry(ce, info, n); 751 if (cmp) 752 return cmp; 753 754 /* 755 * Even if the beginning compared identically, the ce should 756 * compare as bigger than a directory leading up to it! 757 */ 758 return ce_namelen(ce) > traverse_path_len(info, n); 759} 760 761static int ce_in_traverse_path(const struct cache_entry *ce, 762 const struct traverse_info *info) 763{ 764 if (!info->prev) 765 return 1; 766 if (do_compare_entry(ce, info->prev, &info->name)) 767 return 0; 768 /* 769 * If ce (blob) is the same name as the path (which is a tree 770 * we will be descending into), it won't be inside it. 771 */ 772 return (info->pathlen < ce_namelen(ce)); 773} 774 775static struct cache_entry *create_ce_entry(const struct traverse_info *info, const struct name_entry *n, int stage) 776{ 777 int len = traverse_path_len(info, n); 778 struct cache_entry *ce = xcalloc(1, cache_entry_size(len)); 779 780 ce->ce_mode = create_ce_mode(n->mode); 781 ce->ce_flags = create_ce_flags(stage); 782 ce->ce_namelen = len; 783 oidcpy(&ce->oid, n->oid); 784 make_traverse_path(ce->name, info, n); 785 786 return ce; 787} 788 789static int unpack_nondirectories(int n, unsigned long mask, 790 unsigned long dirmask, 791 struct cache_entry **src, 792 const struct name_entry *names, 793 const struct traverse_info *info) 794{ 795 int i; 796 struct unpack_trees_options *o = info->data; 797 unsigned long conflicts = info->df_conflicts | dirmask; 798 799 /* Do we have *only* directories? Nothing to do */ 800 if (mask == dirmask && !src[0]) 801 return 0; 802 803 /* 804 * Ok, we've filled in up to any potential index entry in src[0], 805 * now do the rest. 806 */ 807 for (i = 0; i < n; i++) { 808 int stage; 809 unsigned int bit = 1ul << i; 810 if (conflicts & bit) { 811 src[i + o->merge] = o->df_conflict_entry; 812 continue; 813 } 814 if (!(mask & bit)) 815 continue; 816 if (!o->merge) 817 stage = 0; 818 else if (i + 1 < o->head_idx) 819 stage = 1; 820 else if (i + 1 > o->head_idx) 821 stage = 3; 822 else 823 stage = 2; 824 src[i + o->merge] = create_ce_entry(info, names + i, stage); 825 } 826 827 if (o->merge) { 828 int rc = call_unpack_fn((const struct cache_entry * const *)src, 829 o); 830 for (i = 0; i < n; i++) { 831 struct cache_entry *ce = src[i + o->merge]; 832 if (ce != o->df_conflict_entry) 833 free(ce); 834 } 835 return rc; 836 } 837 838 for (i = 0; i < n; i++) 839 if (src[i] && src[i] != o->df_conflict_entry) 840 if (do_add_entry(o, src[i], 0, 0)) 841 return -1; 842 843 return 0; 844} 845 846static int unpack_failed(struct unpack_trees_options *o, const char *message) 847{ 848 discard_index(&o->result); 849 if (!o->gently && !o->exiting_early) { 850 if (message) 851 return error("%s", message); 852 return -1; 853 } 854 return -1; 855} 856 857/* 858 * The tree traversal is looking at name p. If we have a matching entry, 859 * return it. If name p is a directory in the index, do not return 860 * anything, as we will want to match it when the traversal descends into 861 * the directory. 862 */ 863static int find_cache_pos(struct traverse_info *info, 864 const struct name_entry *p) 865{ 866 int pos; 867 struct unpack_trees_options *o = info->data; 868 struct index_state *index = o->src_index; 869 int pfxlen = info->pathlen; 870 int p_len = tree_entry_len(p); 871 872 for (pos = o->cache_bottom; pos < index->cache_nr; pos++) { 873 const struct cache_entry *ce = index->cache[pos]; 874 const char *ce_name, *ce_slash; 875 int cmp, ce_len; 876 877 if (ce->ce_flags & CE_UNPACKED) { 878 /* 879 * cache_bottom entry is already unpacked, so 880 * we can never match it; don't check it 881 * again. 882 */ 883 if (pos == o->cache_bottom) 884 ++o->cache_bottom; 885 continue; 886 } 887 if (!ce_in_traverse_path(ce, info)) { 888 /* 889 * Check if we can skip future cache checks 890 * (because we're already past all possible 891 * entries in the traverse path). 892 */ 893 if (info->traverse_path) { 894 if (strncmp(ce->name, info->traverse_path, 895 info->pathlen) > 0) 896 break; 897 } 898 continue; 899 } 900 ce_name = ce->name + pfxlen; 901 ce_slash = strchr(ce_name, '/'); 902 if (ce_slash) 903 ce_len = ce_slash - ce_name; 904 else 905 ce_len = ce_namelen(ce) - pfxlen; 906 cmp = name_compare(p->path, p_len, ce_name, ce_len); 907 /* 908 * Exact match; if we have a directory we need to 909 * delay returning it. 910 */ 911 if (!cmp) 912 return ce_slash ? -2 - pos : pos; 913 if (0 < cmp) 914 continue; /* keep looking */ 915 /* 916 * ce_name sorts after p->path; could it be that we 917 * have files under p->path directory in the index? 918 * E.g. ce_name == "t-i", and p->path == "t"; we may 919 * have "t/a" in the index. 920 */ 921 if (p_len < ce_len && !memcmp(ce_name, p->path, p_len) && 922 ce_name[p_len] < '/') 923 continue; /* keep looking */ 924 break; 925 } 926 return -1; 927} 928 929static struct cache_entry *find_cache_entry(struct traverse_info *info, 930 const struct name_entry *p) 931{ 932 int pos = find_cache_pos(info, p); 933 struct unpack_trees_options *o = info->data; 934 935 if (0 <= pos) 936 return o->src_index->cache[pos]; 937 else 938 return NULL; 939} 940 941static void debug_path(struct traverse_info *info) 942{ 943 if (info->prev) { 944 debug_path(info->prev); 945 if (*info->prev->name.path) 946 putchar('/'); 947 } 948 printf("%s", info->name.path); 949} 950 951static void debug_name_entry(int i, struct name_entry *n) 952{ 953 printf("ent#%d %06o %s\n", i, 954 n->path ? n->mode : 0, 955 n->path ? n->path : "(missing)"); 956} 957 958static void debug_unpack_callback(int n, 959 unsigned long mask, 960 unsigned long dirmask, 961 struct name_entry *names, 962 struct traverse_info *info) 963{ 964 int i; 965 printf("* unpack mask %lu, dirmask %lu, cnt %d ", 966 mask, dirmask, n); 967 debug_path(info); 968 putchar('\n'); 969 for (i = 0; i < n; i++) 970 debug_name_entry(i, names + i); 971} 972 973static int unpack_callback(int n, unsigned long mask, unsigned long dirmask, struct name_entry *names, struct traverse_info *info) 974{ 975 struct cache_entry *src[MAX_UNPACK_TREES + 1] = { NULL, }; 976 struct unpack_trees_options *o = info->data; 977 const struct name_entry *p = names; 978 979 /* Find first entry with a real name (we could use "mask" too) */ 980 while (!p->mode) 981 p++; 982 983 if (o->debug_unpack) 984 debug_unpack_callback(n, mask, dirmask, names, info); 985 986 /* Are we supposed to look at the index too? */ 987 if (o->merge) { 988 while (1) { 989 int cmp; 990 struct cache_entry *ce; 991 992 if (o->diff_index_cached) 993 ce = next_cache_entry(o); 994 else 995 ce = find_cache_entry(info, p); 996 997 if (!ce) 998 break; 999 cmp = compare_entry(ce, info, p);1000 if (cmp < 0) {1001 if (unpack_index_entry(ce, o) < 0)1002 return unpack_failed(o, NULL);1003 continue;1004 }1005 if (!cmp) {1006 if (ce_stage(ce)) {1007 /*1008 * If we skip unmerged index1009 * entries, we'll skip this1010 * entry *and* the tree1011 * entries associated with it!1012 */1013 if (o->skip_unmerged) {1014 add_same_unmerged(ce, o);1015 return mask;1016 }1017 }1018 src[0] = ce;1019 }1020 break;1021 }1022 }10231024 if (unpack_nondirectories(n, mask, dirmask, src, names, info) < 0)1025 return -1;10261027 if (o->merge && src[0]) {1028 if (ce_stage(src[0]))1029 mark_ce_used_same_name(src[0], o);1030 else1031 mark_ce_used(src[0], o);1032 }10331034 /* Now handle any directories.. */1035 if (dirmask) {1036 /* special case: "diff-index --cached" looking at a tree */1037 if (o->diff_index_cached &&1038 n == 1 && dirmask == 1 && S_ISDIR(names->mode)) {1039 int matches;1040 matches = cache_tree_matches_traversal(o->src_index->cache_tree,1041 names, info);1042 /*1043 * Everything under the name matches; skip the1044 * entire hierarchy. diff_index_cached codepath1045 * special cases D/F conflicts in such a way that1046 * it does not do any look-ahead, so this is safe.1047 */1048 if (matches) {1049 o->cache_bottom += matches;1050 return mask;1051 }1052 }10531054 if (traverse_trees_recursive(n, dirmask, mask & ~dirmask,1055 names, info) < 0)1056 return -1;1057 return mask;1058 }10591060 return mask;1061}10621063static int clear_ce_flags_1(struct cache_entry **cache, int nr,1064 struct strbuf *prefix,1065 int select_mask, int clear_mask,1066 struct exclude_list *el, int defval);10671068/* Whole directory matching */1069static int clear_ce_flags_dir(struct cache_entry **cache, int nr,1070 struct strbuf *prefix,1071 char *basename,1072 int select_mask, int clear_mask,1073 struct exclude_list *el, int defval)1074{1075 struct cache_entry **cache_end;1076 int dtype = DT_DIR;1077 int ret = is_excluded_from_list(prefix->buf, prefix->len,1078 basename, &dtype, el, &the_index);1079 int rc;10801081 strbuf_addch(prefix, '/');10821083 /* If undecided, use matching result of parent dir in defval */1084 if (ret < 0)1085 ret = defval;10861087 for (cache_end = cache; cache_end != cache + nr; cache_end++) {1088 struct cache_entry *ce = *cache_end;1089 if (strncmp(ce->name, prefix->buf, prefix->len))1090 break;1091 }10921093 /*1094 * TODO: check el, if there are no patterns that may conflict1095 * with ret (iow, we know in advance the incl/excl1096 * decision for the entire directory), clear flag here without1097 * calling clear_ce_flags_1(). That function will call1098 * the expensive is_excluded_from_list() on every entry.1099 */1100 rc = clear_ce_flags_1(cache, cache_end - cache,1101 prefix,1102 select_mask, clear_mask,1103 el, ret);1104 strbuf_setlen(prefix, prefix->len - 1);1105 return rc;1106}11071108/*1109 * Traverse the index, find every entry that matches according to1110 * o->el. Do "ce_flags &= ~clear_mask" on those entries. Return the1111 * number of traversed entries.1112 *1113 * If select_mask is non-zero, only entries whose ce_flags has on of1114 * those bits enabled are traversed.1115 *1116 * cache : pointer to an index entry1117 * prefix_len : an offset to its path1118 *1119 * The current path ("prefix") including the trailing '/' is1120 * cache[0]->name[0..(prefix_len-1)]1121 * Top level path has prefix_len zero.1122 */1123static int clear_ce_flags_1(struct cache_entry **cache, int nr,1124 struct strbuf *prefix,1125 int select_mask, int clear_mask,1126 struct exclude_list *el, int defval)1127{1128 struct cache_entry **cache_end = cache + nr;11291130 /*1131 * Process all entries that have the given prefix and meet1132 * select_mask condition1133 */1134 while(cache != cache_end) {1135 struct cache_entry *ce = *cache;1136 const char *name, *slash;1137 int len, dtype, ret;11381139 if (select_mask && !(ce->ce_flags & select_mask)) {1140 cache++;1141 continue;1142 }11431144 if (prefix->len && strncmp(ce->name, prefix->buf, prefix->len))1145 break;11461147 name = ce->name + prefix->len;1148 slash = strchr(name, '/');11491150 /* If it's a directory, try whole directory match first */1151 if (slash) {1152 int processed;11531154 len = slash - name;1155 strbuf_add(prefix, name, len);11561157 processed = clear_ce_flags_dir(cache, cache_end - cache,1158 prefix,1159 prefix->buf + prefix->len - len,1160 select_mask, clear_mask,1161 el, defval);11621163 /* clear_c_f_dir eats a whole dir already? */1164 if (processed) {1165 cache += processed;1166 strbuf_setlen(prefix, prefix->len - len);1167 continue;1168 }11691170 strbuf_addch(prefix, '/');1171 cache += clear_ce_flags_1(cache, cache_end - cache,1172 prefix,1173 select_mask, clear_mask, el, defval);1174 strbuf_setlen(prefix, prefix->len - len - 1);1175 continue;1176 }11771178 /* Non-directory */1179 dtype = ce_to_dtype(ce);1180 ret = is_excluded_from_list(ce->name, ce_namelen(ce),1181 name, &dtype, el, &the_index);1182 if (ret < 0)1183 ret = defval;1184 if (ret > 0)1185 ce->ce_flags &= ~clear_mask;1186 cache++;1187 }1188 return nr - (cache_end - cache);1189}11901191static int clear_ce_flags(struct cache_entry **cache, int nr,1192 int select_mask, int clear_mask,1193 struct exclude_list *el)1194{1195 static struct strbuf prefix = STRBUF_INIT;11961197 strbuf_reset(&prefix);11981199 return clear_ce_flags_1(cache, nr,1200 &prefix,1201 select_mask, clear_mask,1202 el, 0);1203}12041205/*1206 * Set/Clear CE_NEW_SKIP_WORKTREE according to $GIT_DIR/info/sparse-checkout1207 */1208static void mark_new_skip_worktree(struct exclude_list *el,1209 struct index_state *the_index,1210 int select_flag, int skip_wt_flag)1211{1212 int i;12131214 /*1215 * 1. Pretend the narrowest worktree: only unmerged entries1216 * are checked out1217 */1218 for (i = 0; i < the_index->cache_nr; i++) {1219 struct cache_entry *ce = the_index->cache[i];12201221 if (select_flag && !(ce->ce_flags & select_flag))1222 continue;12231224 if (!ce_stage(ce))1225 ce->ce_flags |= skip_wt_flag;1226 else1227 ce->ce_flags &= ~skip_wt_flag;1228 }12291230 /*1231 * 2. Widen worktree according to sparse-checkout file.1232 * Matched entries will have skip_wt_flag cleared (i.e. "in")1233 */1234 clear_ce_flags(the_index->cache, the_index->cache_nr,1235 select_flag, skip_wt_flag, el);1236}12371238static int verify_absent(const struct cache_entry *,1239 enum unpack_trees_error_types,1240 struct unpack_trees_options *);1241/*1242 * N-way merge "len" trees. Returns 0 on success, -1 on failure to manipulate the1243 * resulting index, -2 on failure to reflect the changes to the work tree.1244 *1245 * CE_ADDED, CE_UNPACKED and CE_NEW_SKIP_WORKTREE are used internally1246 */1247int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options *o)1248{1249 int i, ret;1250 static struct cache_entry *dfc;1251 struct exclude_list el;12521253 if (len > MAX_UNPACK_TREES)1254 die("unpack_trees takes at most %d trees", MAX_UNPACK_TREES);12551256 memset(&el, 0, sizeof(el));1257 if (!core_apply_sparse_checkout || !o->update)1258 o->skip_sparse_checkout = 1;1259 if (!o->skip_sparse_checkout) {1260 char *sparse = git_pathdup("info/sparse-checkout");1261 if (add_excludes_from_file_to_list(sparse, "", 0, &el, NULL) < 0)1262 o->skip_sparse_checkout = 1;1263 else1264 o->el = ⪙1265 free(sparse);1266 }12671268 memset(&o->result, 0, sizeof(o->result));1269 o->result.initialized = 1;1270 o->result.timestamp.sec = o->src_index->timestamp.sec;1271 o->result.timestamp.nsec = o->src_index->timestamp.nsec;1272 o->result.version = o->src_index->version;1273 o->result.split_index = o->src_index->split_index;1274 if (o->result.split_index)1275 o->result.split_index->refcount++;1276 hashcpy(o->result.sha1, o->src_index->sha1);1277 o->merge_size = len;1278 mark_all_ce_unused(o->src_index);12791280 /*1281 * Sparse checkout loop #1: set NEW_SKIP_WORKTREE on existing entries1282 */1283 if (!o->skip_sparse_checkout)1284 mark_new_skip_worktree(o->el, o->src_index, 0, CE_NEW_SKIP_WORKTREE);12851286 if (!dfc)1287 dfc = xcalloc(1, cache_entry_size(0));1288 o->df_conflict_entry = dfc;12891290 if (len) {1291 const char *prefix = o->prefix ? o->prefix : "";1292 struct traverse_info info;12931294 setup_traverse_info(&info, prefix);1295 info.fn = unpack_callback;1296 info.data = o;1297 info.show_all_errors = o->show_all_errors;1298 info.pathspec = o->pathspec;12991300 if (o->prefix) {1301 /*1302 * Unpack existing index entries that sort before the1303 * prefix the tree is spliced into. Note that o->merge1304 * is always true in this case.1305 */1306 while (1) {1307 struct cache_entry *ce = next_cache_entry(o);1308 if (!ce)1309 break;1310 if (ce_in_traverse_path(ce, &info))1311 break;1312 if (unpack_index_entry(ce, o) < 0)1313 goto return_failed;1314 }1315 }13161317 if (traverse_trees(len, t, &info) < 0)1318 goto return_failed;1319 }13201321 /* Any left-over entries in the index? */1322 if (o->merge) {1323 while (1) {1324 struct cache_entry *ce = next_cache_entry(o);1325 if (!ce)1326 break;1327 if (unpack_index_entry(ce, o) < 0)1328 goto return_failed;1329 }1330 }1331 mark_all_ce_unused(o->src_index);13321333 if (o->trivial_merges_only && o->nontrivial_merge) {1334 ret = unpack_failed(o, "Merge requires file-level merging");1335 goto done;1336 }13371338 if (!o->skip_sparse_checkout) {1339 int empty_worktree = 1;13401341 /*1342 * Sparse checkout loop #2: set NEW_SKIP_WORKTREE on entries not in loop #11343 * If the will have NEW_SKIP_WORKTREE, also set CE_SKIP_WORKTREE1344 * so apply_sparse_checkout() won't attempt to remove it from worktree1345 */1346 mark_new_skip_worktree(o->el, &o->result, CE_ADDED, CE_SKIP_WORKTREE | CE_NEW_SKIP_WORKTREE);13471348 ret = 0;1349 for (i = 0; i < o->result.cache_nr; i++) {1350 struct cache_entry *ce = o->result.cache[i];13511352 /*1353 * Entries marked with CE_ADDED in merged_entry() do not have1354 * verify_absent() check (the check is effectively disabled1355 * because CE_NEW_SKIP_WORKTREE is set unconditionally).1356 *1357 * Do the real check now because we have had1358 * correct CE_NEW_SKIP_WORKTREE1359 */1360 if (ce->ce_flags & CE_ADDED &&1361 verify_absent(ce, ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN, o)) {1362 if (!o->show_all_errors)1363 goto return_failed;1364 ret = -1;1365 }13661367 if (apply_sparse_checkout(&o->result, ce, o)) {1368 if (!o->show_all_errors)1369 goto return_failed;1370 ret = -1;1371 }1372 if (!ce_skip_worktree(ce))1373 empty_worktree = 0;13741375 }1376 if (ret < 0)1377 goto return_failed;1378 /*1379 * Sparse checkout is meant to narrow down checkout area1380 * but it does not make sense to narrow down to empty working1381 * tree. This is usually a mistake in sparse checkout rules.1382 * Do not allow users to do that.1383 */1384 if (o->result.cache_nr && empty_worktree) {1385 ret = unpack_failed(o, "Sparse checkout leaves no entry on working directory");1386 goto done;1387 }1388 }13891390 o->src_index = NULL;1391 ret = check_updates(o) ? (-2) : 0;1392 if (o->dst_index) {1393 if (!ret) {1394 if (!o->result.cache_tree)1395 o->result.cache_tree = cache_tree();1396 if (!cache_tree_fully_valid(o->result.cache_tree))1397 cache_tree_update(&o->result,1398 WRITE_TREE_SILENT |1399 WRITE_TREE_REPAIR);1400 }1401 move_index_extensions(&o->result, o->dst_index);1402 discard_index(o->dst_index);1403 *o->dst_index = o->result;1404 } else {1405 discard_index(&o->result);1406 }14071408done:1409 clear_exclude_list(&el);1410 return ret;14111412return_failed:1413 if (o->show_all_errors)1414 display_error_msgs(o);1415 mark_all_ce_unused(o->src_index);1416 ret = unpack_failed(o, NULL);1417 if (o->exiting_early)1418 ret = 0;1419 goto done;1420}14211422/* Here come the merge functions */14231424static int reject_merge(const struct cache_entry *ce,1425 struct unpack_trees_options *o)1426{1427 return o->gently ? -1 :1428 add_rejected_path(o, ERROR_WOULD_OVERWRITE, ce->name);1429}14301431static int same(const struct cache_entry *a, const struct cache_entry *b)1432{1433 if (!!a != !!b)1434 return 0;1435 if (!a && !b)1436 return 1;1437 if ((a->ce_flags | b->ce_flags) & CE_CONFLICTED)1438 return 0;1439 return a->ce_mode == b->ce_mode &&1440 !oidcmp(&a->oid, &b->oid);1441}144214431444/*1445 * When a CE gets turned into an unmerged entry, we1446 * want it to be up-to-date1447 */1448static int verify_uptodate_1(const struct cache_entry *ce,1449 struct unpack_trees_options *o,1450 enum unpack_trees_error_types error_type)1451{1452 struct stat st;14531454 if (o->index_only)1455 return 0;14561457 /*1458 * CE_VALID and CE_SKIP_WORKTREE cheat, we better check again1459 * if this entry is truly up-to-date because this file may be1460 * overwritten.1461 */1462 if ((ce->ce_flags & CE_VALID) || ce_skip_worktree(ce))1463 ; /* keep checking */1464 else if (o->reset || ce_uptodate(ce))1465 return 0;14661467 if (!lstat(ce->name, &st)) {1468 int flags = CE_MATCH_IGNORE_VALID|CE_MATCH_IGNORE_SKIP_WORKTREE;1469 unsigned changed = ie_match_stat(o->src_index, ce, &st, flags);14701471 if (submodule_from_ce(ce)) {1472 int r = check_submodule_move_head(ce,1473 "HEAD", oid_to_hex(&ce->oid), o);1474 if (r)1475 return o->gently ? -1 :1476 add_rejected_path(o, error_type, ce->name);1477 return 0;1478 }14791480 if (!changed)1481 return 0;1482 /*1483 * Historic default policy was to allow submodule to be out1484 * of sync wrt the superproject index. If the submodule was1485 * not considered interesting above, we don't care here.1486 */1487 if (S_ISGITLINK(ce->ce_mode))1488 return 0;14891490 errno = 0;1491 }1492 if (errno == ENOENT)1493 return 0;1494 return o->gently ? -1 :1495 add_rejected_path(o, error_type, ce->name);1496}14971498static int verify_uptodate(const struct cache_entry *ce,1499 struct unpack_trees_options *o)1500{1501 if (!o->skip_sparse_checkout && (ce->ce_flags & CE_NEW_SKIP_WORKTREE))1502 return 0;1503 return verify_uptodate_1(ce, o, ERROR_NOT_UPTODATE_FILE);1504}15051506static int verify_uptodate_sparse(const struct cache_entry *ce,1507 struct unpack_trees_options *o)1508{1509 return verify_uptodate_1(ce, o, ERROR_SPARSE_NOT_UPTODATE_FILE);1510}15111512static void invalidate_ce_path(const struct cache_entry *ce,1513 struct unpack_trees_options *o)1514{1515 if (!ce)1516 return;1517 cache_tree_invalidate_path(o->src_index, ce->name);1518 untracked_cache_invalidate_path(o->src_index, ce->name);1519}15201521/*1522 * Check that checking out ce->sha1 in subdir ce->name is not1523 * going to overwrite any working files.1524 *1525 * Currently, git does not checkout subprojects during a superproject1526 * checkout, so it is not going to overwrite anything.1527 */1528static int verify_clean_submodule(const char *old_sha1,1529 const struct cache_entry *ce,1530 enum unpack_trees_error_types error_type,1531 struct unpack_trees_options *o)1532{1533 if (!submodule_from_ce(ce))1534 return 0;15351536 return check_submodule_move_head(ce, old_sha1,1537 oid_to_hex(&ce->oid), o);1538}15391540static int verify_clean_subdirectory(const struct cache_entry *ce,1541 enum unpack_trees_error_types error_type,1542 struct unpack_trees_options *o)1543{1544 /*1545 * we are about to extract "ce->name"; we would not want to lose1546 * anything in the existing directory there.1547 */1548 int namelen;1549 int i;1550 struct dir_struct d;1551 char *pathbuf;1552 int cnt = 0;15531554 if (S_ISGITLINK(ce->ce_mode)) {1555 unsigned char sha1[20];1556 int sub_head = resolve_gitlink_ref(ce->name, "HEAD", sha1);1557 /*1558 * If we are not going to update the submodule, then1559 * we don't care.1560 */1561 if (!sub_head && !hashcmp(sha1, ce->oid.hash))1562 return 0;1563 return verify_clean_submodule(sub_head ? NULL : sha1_to_hex(sha1),1564 ce, error_type, o);1565 }15661567 /*1568 * First let's make sure we do not have a local modification1569 * in that directory.1570 */1571 namelen = ce_namelen(ce);1572 for (i = locate_in_src_index(ce, o);1573 i < o->src_index->cache_nr;1574 i++) {1575 struct cache_entry *ce2 = o->src_index->cache[i];1576 int len = ce_namelen(ce2);1577 if (len < namelen ||1578 strncmp(ce->name, ce2->name, namelen) ||1579 ce2->name[namelen] != '/')1580 break;1581 /*1582 * ce2->name is an entry in the subdirectory to be1583 * removed.1584 */1585 if (!ce_stage(ce2)) {1586 if (verify_uptodate(ce2, o))1587 return -1;1588 add_entry(o, ce2, CE_REMOVE, 0);1589 mark_ce_used(ce2, o);1590 }1591 cnt++;1592 }15931594 /*1595 * Then we need to make sure that we do not lose a locally1596 * present file that is not ignored.1597 */1598 pathbuf = xstrfmt("%.*s/", namelen, ce->name);15991600 memset(&d, 0, sizeof(d));1601 if (o->dir)1602 d.exclude_per_dir = o->dir->exclude_per_dir;1603 i = read_directory(&d, &the_index, pathbuf, namelen+1, NULL);1604 if (i)1605 return o->gently ? -1 :1606 add_rejected_path(o, ERROR_NOT_UPTODATE_DIR, ce->name);1607 free(pathbuf);1608 return cnt;1609}16101611/*1612 * This gets called when there was no index entry for the tree entry 'dst',1613 * but we found a file in the working tree that 'lstat()' said was fine,1614 * and we're on a case-insensitive filesystem.1615 *1616 * See if we can find a case-insensitive match in the index that also1617 * matches the stat information, and assume it's that other file!1618 */1619static int icase_exists(struct unpack_trees_options *o, const char *name, int len, struct stat *st)1620{1621 const struct cache_entry *src;16221623 src = index_file_exists(o->src_index, name, len, 1);1624 return src && !ie_match_stat(o->src_index, src, st, CE_MATCH_IGNORE_VALID|CE_MATCH_IGNORE_SKIP_WORKTREE);1625}16261627static int check_ok_to_remove(const char *name, int len, int dtype,1628 const struct cache_entry *ce, struct stat *st,1629 enum unpack_trees_error_types error_type,1630 struct unpack_trees_options *o)1631{1632 const struct cache_entry *result;16331634 /*1635 * It may be that the 'lstat()' succeeded even though1636 * target 'ce' was absent, because there is an old1637 * entry that is different only in case..1638 *1639 * Ignore that lstat() if it matches.1640 */1641 if (ignore_case && icase_exists(o, name, len, st))1642 return 0;16431644 if (o->dir &&1645 is_excluded(o->dir, &the_index, name, &dtype))1646 /*1647 * ce->name is explicitly excluded, so it is Ok to1648 * overwrite it.1649 */1650 return 0;1651 if (S_ISDIR(st->st_mode)) {1652 /*1653 * We are checking out path "foo" and1654 * found "foo/." in the working tree.1655 * This is tricky -- if we have modified1656 * files that are in "foo/" we would lose1657 * them.1658 */1659 if (verify_clean_subdirectory(ce, error_type, o) < 0)1660 return -1;1661 return 0;1662 }16631664 /*1665 * The previous round may already have decided to1666 * delete this path, which is in a subdirectory that1667 * is being replaced with a blob.1668 */1669 result = index_file_exists(&o->result, name, len, 0);1670 if (result) {1671 if (result->ce_flags & CE_REMOVE)1672 return 0;1673 }16741675 return o->gently ? -1 :1676 add_rejected_path(o, error_type, name);1677}16781679/*1680 * We do not want to remove or overwrite a working tree file that1681 * is not tracked, unless it is ignored.1682 */1683static int verify_absent_1(const struct cache_entry *ce,1684 enum unpack_trees_error_types error_type,1685 struct unpack_trees_options *o)1686{1687 int len;1688 struct stat st;16891690 if (o->index_only || o->reset || !o->update)1691 return 0;16921693 len = check_leading_path(ce->name, ce_namelen(ce));1694 if (!len)1695 return 0;1696 else if (len > 0) {1697 char *path;1698 int ret;16991700 path = xmemdupz(ce->name, len);1701 if (lstat(path, &st))1702 ret = error_errno("cannot stat '%s'", path);1703 else {1704 if (submodule_from_ce(ce))1705 ret = check_submodule_move_head(ce,1706 oid_to_hex(&ce->oid),1707 NULL, o);1708 else1709 ret = check_ok_to_remove(path, len, DT_UNKNOWN, NULL,1710 &st, error_type, o);1711 }1712 free(path);1713 return ret;1714 } else if (lstat(ce->name, &st)) {1715 if (errno != ENOENT)1716 return error_errno("cannot stat '%s'", ce->name);1717 return 0;1718 } else {1719 if (submodule_from_ce(ce))1720 return check_submodule_move_head(ce, oid_to_hex(&ce->oid),1721 NULL, o);17221723 return check_ok_to_remove(ce->name, ce_namelen(ce),1724 ce_to_dtype(ce), ce, &st,1725 error_type, o);1726 }1727}17281729static int verify_absent(const struct cache_entry *ce,1730 enum unpack_trees_error_types error_type,1731 struct unpack_trees_options *o)1732{1733 if (!o->skip_sparse_checkout && (ce->ce_flags & CE_NEW_SKIP_WORKTREE))1734 return 0;1735 return verify_absent_1(ce, error_type, o);1736}17371738static int verify_absent_sparse(const struct cache_entry *ce,1739 enum unpack_trees_error_types error_type,1740 struct unpack_trees_options *o)1741{1742 enum unpack_trees_error_types orphaned_error = error_type;1743 if (orphaned_error == ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN)1744 orphaned_error = ERROR_WOULD_LOSE_ORPHANED_OVERWRITTEN;17451746 return verify_absent_1(ce, orphaned_error, o);1747}17481749static int merged_entry(const struct cache_entry *ce,1750 const struct cache_entry *old,1751 struct unpack_trees_options *o)1752{1753 int update = CE_UPDATE;1754 struct cache_entry *merge = dup_entry(ce);17551756 if (!old) {1757 /*1758 * New index entries. In sparse checkout, the following1759 * verify_absent() will be delayed until after1760 * traverse_trees() finishes in unpack_trees(), then:1761 *1762 * - CE_NEW_SKIP_WORKTREE will be computed correctly1763 * - verify_absent() be called again, this time with1764 * correct CE_NEW_SKIP_WORKTREE1765 *1766 * verify_absent() call here does nothing in sparse1767 * checkout (i.e. o->skip_sparse_checkout == 0)1768 */1769 update |= CE_ADDED;1770 merge->ce_flags |= CE_NEW_SKIP_WORKTREE;17711772 if (verify_absent(merge,1773 ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN, o)) {1774 free(merge);1775 return -1;1776 }1777 invalidate_ce_path(merge, o);17781779 if (submodule_from_ce(ce)) {1780 int ret = check_submodule_move_head(ce, NULL,1781 oid_to_hex(&ce->oid),1782 o);1783 if (ret)1784 return ret;1785 }17861787 } else if (!(old->ce_flags & CE_CONFLICTED)) {1788 /*1789 * See if we can re-use the old CE directly?1790 * That way we get the uptodate stat info.1791 *1792 * This also removes the UPDATE flag on a match; otherwise1793 * we will end up overwriting local changes in the work tree.1794 */1795 if (same(old, merge)) {1796 copy_cache_entry(merge, old);1797 update = 0;1798 } else {1799 if (verify_uptodate(old, o)) {1800 free(merge);1801 return -1;1802 }1803 /* Migrate old flags over */1804 update |= old->ce_flags & (CE_SKIP_WORKTREE | CE_NEW_SKIP_WORKTREE);1805 invalidate_ce_path(old, o);1806 }18071808 if (submodule_from_ce(ce)) {1809 int ret = check_submodule_move_head(ce, oid_to_hex(&old->oid),1810 oid_to_hex(&ce->oid),1811 o);1812 if (ret)1813 return ret;1814 }1815 } else {1816 /*1817 * Previously unmerged entry left as an existence1818 * marker by read_index_unmerged();1819 */1820 invalidate_ce_path(old, o);1821 }18221823 do_add_entry(o, merge, update, CE_STAGEMASK);1824 return 1;1825}18261827static int deleted_entry(const struct cache_entry *ce,1828 const struct cache_entry *old,1829 struct unpack_trees_options *o)1830{1831 /* Did it exist in the index? */1832 if (!old) {1833 if (verify_absent(ce, ERROR_WOULD_LOSE_UNTRACKED_REMOVED, o))1834 return -1;1835 return 0;1836 }1837 if (!(old->ce_flags & CE_CONFLICTED) && verify_uptodate(old, o))1838 return -1;1839 add_entry(o, ce, CE_REMOVE, 0);1840 invalidate_ce_path(ce, o);1841 return 1;1842}18431844static int keep_entry(const struct cache_entry *ce,1845 struct unpack_trees_options *o)1846{1847 add_entry(o, ce, 0, 0);1848 return 1;1849}18501851#if DBRT_DEBUG1852static void show_stage_entry(FILE *o,1853 const char *label, const struct cache_entry *ce)1854{1855 if (!ce)1856 fprintf(o, "%s (missing)\n", label);1857 else1858 fprintf(o, "%s%06o %s %d\t%s\n",1859 label,1860 ce->ce_mode,1861 oid_to_hex(&ce->oid),1862 ce_stage(ce),1863 ce->name);1864}1865#endif18661867int threeway_merge(const struct cache_entry * const *stages,1868 struct unpack_trees_options *o)1869{1870 const struct cache_entry *index;1871 const struct cache_entry *head;1872 const struct cache_entry *remote = stages[o->head_idx + 1];1873 int count;1874 int head_match = 0;1875 int remote_match = 0;18761877 int df_conflict_head = 0;1878 int df_conflict_remote = 0;18791880 int any_anc_missing = 0;1881 int no_anc_exists = 1;1882 int i;18831884 for (i = 1; i < o->head_idx; i++) {1885 if (!stages[i] || stages[i] == o->df_conflict_entry)1886 any_anc_missing = 1;1887 else1888 no_anc_exists = 0;1889 }18901891 index = stages[0];1892 head = stages[o->head_idx];18931894 if (head == o->df_conflict_entry) {1895 df_conflict_head = 1;1896 head = NULL;1897 }18981899 if (remote == o->df_conflict_entry) {1900 df_conflict_remote = 1;1901 remote = NULL;1902 }19031904 /*1905 * First, if there's a #16 situation, note that to prevent #131906 * and #14.1907 */1908 if (!same(remote, head)) {1909 for (i = 1; i < o->head_idx; i++) {1910 if (same(stages[i], head)) {1911 head_match = i;1912 }1913 if (same(stages[i], remote)) {1914 remote_match = i;1915 }1916 }1917 }19181919 /*1920 * We start with cases where the index is allowed to match1921 * something other than the head: #14(ALT) and #2ALT, where it1922 * is permitted to match the result instead.1923 */1924 /* #14, #14ALT, #2ALT */1925 if (remote && !df_conflict_head && head_match && !remote_match) {1926 if (index && !same(index, remote) && !same(index, head))1927 return reject_merge(index, o);1928 return merged_entry(remote, index, o);1929 }1930 /*1931 * If we have an entry in the index cache, then we want to1932 * make sure that it matches head.1933 */1934 if (index && !same(index, head))1935 return reject_merge(index, o);19361937 if (head) {1938 /* #5ALT, #15 */1939 if (same(head, remote))1940 return merged_entry(head, index, o);1941 /* #13, #3ALT */1942 if (!df_conflict_remote && remote_match && !head_match)1943 return merged_entry(head, index, o);1944 }19451946 /* #1 */1947 if (!head && !remote && any_anc_missing)1948 return 0;19491950 /*1951 * Under the "aggressive" rule, we resolve mostly trivial1952 * cases that we historically had git-merge-one-file resolve.1953 */1954 if (o->aggressive) {1955 int head_deleted = !head;1956 int remote_deleted = !remote;1957 const struct cache_entry *ce = NULL;19581959 if (index)1960 ce = index;1961 else if (head)1962 ce = head;1963 else if (remote)1964 ce = remote;1965 else {1966 for (i = 1; i < o->head_idx; i++) {1967 if (stages[i] && stages[i] != o->df_conflict_entry) {1968 ce = stages[i];1969 break;1970 }1971 }1972 }19731974 /*1975 * Deleted in both.1976 * Deleted in one and unchanged in the other.1977 */1978 if ((head_deleted && remote_deleted) ||1979 (head_deleted && remote && remote_match) ||1980 (remote_deleted && head && head_match)) {1981 if (index)1982 return deleted_entry(index, index, o);1983 if (ce && !head_deleted) {1984 if (verify_absent(ce, ERROR_WOULD_LOSE_UNTRACKED_REMOVED, o))1985 return -1;1986 }1987 return 0;1988 }1989 /*1990 * Added in both, identically.1991 */1992 if (no_anc_exists && head && remote && same(head, remote))1993 return merged_entry(head, index, o);19941995 }19961997 /* Below are "no merge" cases, which require that the index be1998 * up-to-date to avoid the files getting overwritten with1999 * conflict resolution files.2000 */2001 if (index) {2002 if (verify_uptodate(index, o))2003 return -1;2004 }20052006 o->nontrivial_merge = 1;20072008 /* #2, #3, #4, #6, #7, #9, #10, #11. */2009 count = 0;2010 if (!head_match || !remote_match) {2011 for (i = 1; i < o->head_idx; i++) {2012 if (stages[i] && stages[i] != o->df_conflict_entry) {2013 keep_entry(stages[i], o);2014 count++;2015 break;2016 }2017 }2018 }2019#if DBRT_DEBUG2020 else {2021 fprintf(stderr, "read-tree: warning #16 detected\n");2022 show_stage_entry(stderr, "head ", stages[head_match]);2023 show_stage_entry(stderr, "remote ", stages[remote_match]);2024 }2025#endif2026 if (head) { count += keep_entry(head, o); }2027 if (remote) { count += keep_entry(remote, o); }2028 return count;2029}20302031/*2032 * Two-way merge.2033 *2034 * The rule is to "carry forward" what is in the index without losing2035 * information across a "fast-forward", favoring a successful merge2036 * over a merge failure when it makes sense. For details of the2037 * "carry forward" rule, please see <Documentation/git-read-tree.txt>.2038 *2039 */2040int twoway_merge(const struct cache_entry * const *src,2041 struct unpack_trees_options *o)2042{2043 const struct cache_entry *current = src[0];2044 const struct cache_entry *oldtree = src[1];2045 const struct cache_entry *newtree = src[2];20462047 if (o->merge_size != 2)2048 return error("Cannot do a twoway merge of %d trees",2049 o->merge_size);20502051 if (oldtree == o->df_conflict_entry)2052 oldtree = NULL;2053 if (newtree == o->df_conflict_entry)2054 newtree = NULL;20552056 if (current) {2057 if (current->ce_flags & CE_CONFLICTED) {2058 if (same(oldtree, newtree) || o->reset) {2059 if (!newtree)2060 return deleted_entry(current, current, o);2061 else2062 return merged_entry(newtree, current, o);2063 }2064 return reject_merge(current, o);2065 } else if ((!oldtree && !newtree) || /* 4 and 5 */2066 (!oldtree && newtree &&2067 same(current, newtree)) || /* 6 and 7 */2068 (oldtree && newtree &&2069 same(oldtree, newtree)) || /* 14 and 15 */2070 (oldtree && newtree &&2071 !same(oldtree, newtree) && /* 18 and 19 */2072 same(current, newtree))) {2073 return keep_entry(current, o);2074 } else if (oldtree && !newtree && same(current, oldtree)) {2075 /* 10 or 11 */2076 return deleted_entry(oldtree, current, o);2077 } else if (oldtree && newtree &&2078 same(current, oldtree) && !same(current, newtree)) {2079 /* 20 or 21 */2080 return merged_entry(newtree, current, o);2081 } else2082 return reject_merge(current, o);2083 }2084 else if (newtree) {2085 if (oldtree && !o->initial_checkout) {2086 /*2087 * deletion of the path was staged;2088 */2089 if (same(oldtree, newtree))2090 return 1;2091 return reject_merge(oldtree, o);2092 }2093 return merged_entry(newtree, current, o);2094 }2095 return deleted_entry(oldtree, current, o);2096}20972098/*2099 * Bind merge.2100 *2101 * Keep the index entries at stage0, collapse stage1 but make sure2102 * stage0 does not have anything there.2103 */2104int bind_merge(const struct cache_entry * const *src,2105 struct unpack_trees_options *o)2106{2107 const struct cache_entry *old = src[0];2108 const struct cache_entry *a = src[1];21092110 if (o->merge_size != 1)2111 return error("Cannot do a bind merge of %d trees",2112 o->merge_size);2113 if (a && old)2114 return o->gently ? -1 :2115 error(ERRORMSG(o, ERROR_BIND_OVERLAP),2116 super_prefixed(a->name),2117 super_prefixed(old->name));2118 if (!a)2119 return keep_entry(old, o);2120 else2121 return merged_entry(a, NULL, o);2122}21232124/*2125 * One-way merge.2126 *2127 * The rule is:2128 * - take the stat information from stage0, take the data from stage12129 */2130int oneway_merge(const struct cache_entry * const *src,2131 struct unpack_trees_options *o)2132{2133 const struct cache_entry *old = src[0];2134 const struct cache_entry *a = src[1];21352136 if (o->merge_size != 1)2137 return error("Cannot do a oneway merge of %d trees",2138 o->merge_size);21392140 if (!a || a == o->df_conflict_entry)2141 return deleted_entry(old, old, o);21422143 if (old && same(old, a)) {2144 int update = 0;2145 if (o->reset && o->update && !ce_uptodate(old) && !ce_skip_worktree(old)) {2146 struct stat st;2147 if (lstat(old->name, &st) ||2148 ie_match_stat(o->src_index, old, &st, CE_MATCH_IGNORE_VALID|CE_MATCH_IGNORE_SKIP_WORKTREE))2149 update |= CE_UPDATE;2150 }2151 add_entry(o, old, update, 0);2152 return 0;2153 }2154 return merged_entry(a, old, o);2155}