1#define NO_THE_INDEX_COMPATIBILITY_MACROS 2#include "cache.h" 3#include "config.h" 4#include "dir.h" 5#include "tree.h" 6#include "tree-walk.h" 7#include "cache-tree.h" 8#include "unpack-trees.h" 9#include "progress.h" 10#include "refs.h" 11#include "attr.h" 12#include "split-index.h" 13#include "dir.h" 14#include "submodule.h" 15#include "submodule-config.h" 16 17/* 18 * Error messages expected by scripts out of plumbing commands such as 19 * read-tree. Non-scripted Porcelain is not required to use these messages 20 * and in fact are encouraged to reword them to better suit their particular 21 * situation better. See how "git checkout" and "git merge" replaces 22 * them using setup_unpack_trees_porcelain(), for example. 23 */ 24static const char *unpack_plumbing_errors[NB_UNPACK_TREES_ERROR_TYPES] = { 25 /* ERROR_WOULD_OVERWRITE */ 26 "Entry '%s' would be overwritten by merge. Cannot merge.", 27 28 /* ERROR_NOT_UPTODATE_FILE */ 29 "Entry '%s' not uptodate. Cannot merge.", 30 31 /* ERROR_NOT_UPTODATE_DIR */ 32 "Updating '%s' would lose untracked files in it", 33 34 /* ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN */ 35 "Untracked working tree file '%s' would be overwritten by merge.", 36 37 /* ERROR_WOULD_LOSE_UNTRACKED_REMOVED */ 38 "Untracked working tree file '%s' would be removed by merge.", 39 40 /* ERROR_BIND_OVERLAP */ 41 "Entry '%s' overlaps with '%s'. Cannot bind.", 42 43 /* ERROR_SPARSE_NOT_UPTODATE_FILE */ 44 "Entry '%s' not uptodate. Cannot update sparse checkout.", 45 46 /* ERROR_WOULD_LOSE_ORPHANED_OVERWRITTEN */ 47 "Working tree file '%s' would be overwritten by sparse checkout update.", 48 49 /* ERROR_WOULD_LOSE_ORPHANED_REMOVED */ 50 "Working tree file '%s' would be removed by sparse checkout update.", 51 52 /* ERROR_WOULD_LOSE_SUBMODULE */ 53 "Submodule '%s' cannot checkout new HEAD.", 54}; 55 56#define ERRORMSG(o,type) \ 57 ( ((o) && (o)->msgs[(type)]) \ 58 ? ((o)->msgs[(type)]) \ 59 : (unpack_plumbing_errors[(type)]) ) 60 61static const char *super_prefixed(const char *path) 62{ 63 /* 64 * It is necessary and sufficient to have two static buffers 65 * here, as the return value of this function is fed to 66 * error() using the unpack_*_errors[] templates we see above. 67 */ 68 static struct strbuf buf[2] = {STRBUF_INIT, STRBUF_INIT}; 69 static int super_prefix_len = -1; 70 static unsigned idx = ARRAY_SIZE(buf) - 1; 71 72 if (super_prefix_len < 0) { 73 const char *super_prefix = get_super_prefix(); 74 if (!super_prefix) { 75 super_prefix_len = 0; 76 } else { 77 int i; 78 for (i = 0; i < ARRAY_SIZE(buf); i++) 79 strbuf_addstr(&buf[i], super_prefix); 80 super_prefix_len = buf[0].len; 81 } 82 } 83 84 if (!super_prefix_len) 85 return path; 86 87 if (++idx >= ARRAY_SIZE(buf)) 88 idx = 0; 89 90 strbuf_setlen(&buf[idx], super_prefix_len); 91 strbuf_addstr(&buf[idx], path); 92 93 return buf[idx].buf; 94} 95 96void setup_unpack_trees_porcelain(struct unpack_trees_options *opts, 97 const char *cmd) 98{ 99 int i; 100 const char **msgs = opts->msgs; 101 const char *msg; 102 103 if (!strcmp(cmd, "checkout")) 104 msg = advice_commit_before_merge 105 ? _("Your local changes to the following files would be overwritten by checkout:\n%%s" 106 "Please commit your changes or stash them before you switch branches.") 107 : _("Your local changes to the following files would be overwritten by checkout:\n%%s"); 108 else if (!strcmp(cmd, "merge")) 109 msg = advice_commit_before_merge 110 ? _("Your local changes to the following files would be overwritten by merge:\n%%s" 111 "Please commit your changes or stash them before you merge.") 112 : _("Your local changes to the following files would be overwritten by merge:\n%%s"); 113 else 114 msg = advice_commit_before_merge 115 ? _("Your local changes to the following files would be overwritten by %s:\n%%s" 116 "Please commit your changes or stash them before you %s.") 117 : _("Your local changes to the following files would be overwritten by %s:\n%%s"); 118 msgs[ERROR_WOULD_OVERWRITE] = msgs[ERROR_NOT_UPTODATE_FILE] = 119 xstrfmt(msg, cmd, cmd); 120 121 msgs[ERROR_NOT_UPTODATE_DIR] = 122 _("Updating the following directories would lose untracked files in them:\n%s"); 123 124 if (!strcmp(cmd, "checkout")) 125 msg = advice_commit_before_merge 126 ? _("The following untracked working tree files would be removed by checkout:\n%%s" 127 "Please move or remove them before you switch branches.") 128 : _("The following untracked working tree files would be removed by checkout:\n%%s"); 129 else if (!strcmp(cmd, "merge")) 130 msg = advice_commit_before_merge 131 ? _("The following untracked working tree files would be removed by merge:\n%%s" 132 "Please move or remove them before you merge.") 133 : _("The following untracked working tree files would be removed by merge:\n%%s"); 134 else 135 msg = advice_commit_before_merge 136 ? _("The following untracked working tree files would be removed by %s:\n%%s" 137 "Please move or remove them before you %s.") 138 : _("The following untracked working tree files would be removed by %s:\n%%s"); 139 msgs[ERROR_WOULD_LOSE_UNTRACKED_REMOVED] = xstrfmt(msg, cmd, cmd); 140 141 if (!strcmp(cmd, "checkout")) 142 msg = advice_commit_before_merge 143 ? _("The following untracked working tree files would be overwritten by checkout:\n%%s" 144 "Please move or remove them before you switch branches.") 145 : _("The following untracked working tree files would be overwritten by checkout:\n%%s"); 146 else if (!strcmp(cmd, "merge")) 147 msg = advice_commit_before_merge 148 ? _("The following untracked working tree files would be overwritten by merge:\n%%s" 149 "Please move or remove them before you merge.") 150 : _("The following untracked working tree files would be overwritten by merge:\n%%s"); 151 else 152 msg = advice_commit_before_merge 153 ? _("The following untracked working tree files would be overwritten by %s:\n%%s" 154 "Please move or remove them before you %s.") 155 : _("The following untracked working tree files would be overwritten by %s:\n%%s"); 156 msgs[ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN] = xstrfmt(msg, cmd, cmd); 157 158 /* 159 * Special case: ERROR_BIND_OVERLAP refers to a pair of paths, we 160 * cannot easily display it as a list. 161 */ 162 msgs[ERROR_BIND_OVERLAP] = _("Entry '%s' overlaps with '%s'. Cannot bind."); 163 164 msgs[ERROR_SPARSE_NOT_UPTODATE_FILE] = 165 _("Cannot update sparse checkout: the following entries are not up-to-date:\n%s"); 166 msgs[ERROR_WOULD_LOSE_ORPHANED_OVERWRITTEN] = 167 _("The following working tree files would be overwritten by sparse checkout update:\n%s"); 168 msgs[ERROR_WOULD_LOSE_ORPHANED_REMOVED] = 169 _("The following working tree files would be removed by sparse checkout update:\n%s"); 170 msgs[ERROR_WOULD_LOSE_SUBMODULE] = 171 _("Cannot update submodule:\n%s"); 172 173 opts->show_all_errors = 1; 174 /* rejected paths may not have a static buffer */ 175 for (i = 0; i < ARRAY_SIZE(opts->unpack_rejects); i++) 176 opts->unpack_rejects[i].strdup_strings = 1; 177} 178 179static int do_add_entry(struct unpack_trees_options *o, struct cache_entry *ce, 180 unsigned int set, unsigned int clear) 181{ 182 clear |= CE_HASHED; 183 184 if (set & CE_REMOVE) 185 set |= CE_WT_REMOVE; 186 187 ce->ce_flags = (ce->ce_flags & ~clear) | set; 188 return add_index_entry(&o->result, ce, 189 ADD_CACHE_OK_TO_ADD | ADD_CACHE_OK_TO_REPLACE); 190} 191 192static struct cache_entry *dup_entry(const struct cache_entry *ce) 193{ 194 unsigned int size = ce_size(ce); 195 struct cache_entry *new = xmalloc(size); 196 197 memcpy(new, ce, size); 198 return new; 199} 200 201static void add_entry(struct unpack_trees_options *o, 202 const struct cache_entry *ce, 203 unsigned int set, unsigned int clear) 204{ 205 do_add_entry(o, dup_entry(ce), set, clear); 206} 207 208/* 209 * add error messages on path <path> 210 * corresponding to the type <e> with the message <msg> 211 * indicating if it should be display in porcelain or not 212 */ 213static int add_rejected_path(struct unpack_trees_options *o, 214 enum unpack_trees_error_types e, 215 const char *path) 216{ 217 if (!o->show_all_errors) 218 return error(ERRORMSG(o, e), super_prefixed(path)); 219 220 /* 221 * Otherwise, insert in a list for future display by 222 * display_error_msgs() 223 */ 224 string_list_append(&o->unpack_rejects[e], path); 225 return -1; 226} 227 228/* 229 * display all the error messages stored in a nice way 230 */ 231static void display_error_msgs(struct unpack_trees_options *o) 232{ 233 int e, i; 234 int something_displayed = 0; 235 for (e = 0; e < NB_UNPACK_TREES_ERROR_TYPES; e++) { 236 struct string_list *rejects = &o->unpack_rejects[e]; 237 if (rejects->nr > 0) { 238 struct strbuf path = STRBUF_INIT; 239 something_displayed = 1; 240 for (i = 0; i < rejects->nr; i++) 241 strbuf_addf(&path, "\t%s\n", rejects->items[i].string); 242 error(ERRORMSG(o, e), super_prefixed(path.buf)); 243 strbuf_release(&path); 244 } 245 string_list_clear(rejects, 0); 246 } 247 if (something_displayed) 248 fprintf(stderr, _("Aborting\n")); 249} 250 251static int check_submodule_move_head(const struct cache_entry *ce, 252 const char *old_id, 253 const char *new_id, 254 struct unpack_trees_options *o) 255{ 256 unsigned flags = SUBMODULE_MOVE_HEAD_DRY_RUN; 257 const struct submodule *sub = submodule_from_ce(ce); 258 if (!sub) 259 return 0; 260 261 if (o->reset) 262 flags |= SUBMODULE_MOVE_HEAD_FORCE; 263 264 switch (sub->update_strategy.type) { 265 case SM_UPDATE_UNSPECIFIED: 266 case SM_UPDATE_CHECKOUT: 267 if (submodule_move_head(ce->name, old_id, new_id, flags)) 268 return o->gently ? -1 : 269 add_rejected_path(o, ERROR_WOULD_LOSE_SUBMODULE, ce->name); 270 return 0; 271 case SM_UPDATE_NONE: 272 return 0; 273 case SM_UPDATE_REBASE: 274 case SM_UPDATE_MERGE: 275 case SM_UPDATE_COMMAND: 276 default: 277 warning(_("submodule update strategy not supported for submodule '%s'"), ce->name); 278 return -1; 279 } 280} 281 282static void reload_gitmodules_file(struct index_state *index, 283 struct checkout *state) 284{ 285 int i; 286 for (i = 0; i < index->cache_nr; i++) { 287 struct cache_entry *ce = index->cache[i]; 288 if (ce->ce_flags & CE_UPDATE) { 289 int r = strcmp(ce->name, ".gitmodules"); 290 if (r < 0) 291 continue; 292 else if (r == 0) { 293 submodule_free(); 294 checkout_entry(ce, state, NULL); 295 gitmodules_config(); 296 git_config(submodule_config, NULL); 297 } else 298 break; 299 } 300 } 301} 302 303/* 304 * Unlink the last component and schedule the leading directories for 305 * removal, such that empty directories get removed. 306 */ 307static void unlink_entry(const struct cache_entry *ce) 308{ 309 const struct submodule *sub = submodule_from_ce(ce); 310 if (sub) { 311 switch (sub->update_strategy.type) { 312 case SM_UPDATE_UNSPECIFIED: 313 case SM_UPDATE_CHECKOUT: 314 case SM_UPDATE_REBASE: 315 case SM_UPDATE_MERGE: 316 /* state.force is set at the caller. */ 317 submodule_move_head(ce->name, "HEAD", NULL, 318 SUBMODULE_MOVE_HEAD_FORCE); 319 break; 320 case SM_UPDATE_NONE: 321 case SM_UPDATE_COMMAND: 322 return; /* Do not touch the submodule. */ 323 } 324 } 325 if (!check_leading_path(ce->name, ce_namelen(ce))) 326 return; 327 if (remove_or_warn(ce->ce_mode, ce->name)) 328 return; 329 schedule_dir_for_removal(ce->name, ce_namelen(ce)); 330} 331 332static struct progress *get_progress(struct unpack_trees_options *o) 333{ 334 unsigned cnt = 0, total = 0; 335 struct index_state *index = &o->result; 336 337 if (!o->update || !o->verbose_update) 338 return NULL; 339 340 for (; cnt < index->cache_nr; cnt++) { 341 const struct cache_entry *ce = index->cache[cnt]; 342 if (ce->ce_flags & (CE_UPDATE | CE_WT_REMOVE)) 343 total++; 344 } 345 346 return start_progress_delay(_("Checking out files"), 347 total, 50, 1); 348} 349 350static int check_updates(struct unpack_trees_options *o) 351{ 352 unsigned cnt = 0; 353 int errs = 0; 354 struct progress *progress = NULL; 355 struct index_state *index = &o->result; 356 struct checkout state = CHECKOUT_INIT; 357 int i; 358 359 state.force = 1; 360 state.quiet = 1; 361 state.refresh_cache = 1; 362 state.istate = index; 363 364 progress = get_progress(o); 365 366 if (o->update) 367 git_attr_set_direction(GIT_ATTR_CHECKOUT, index); 368 for (i = 0; i < index->cache_nr; i++) { 369 const struct cache_entry *ce = index->cache[i]; 370 371 if (ce->ce_flags & CE_WT_REMOVE) { 372 display_progress(progress, ++cnt); 373 if (o->update && !o->dry_run) 374 unlink_entry(ce); 375 } 376 } 377 remove_marked_cache_entries(index); 378 remove_scheduled_dirs(); 379 380 if (should_update_submodules() && o->update && !o->dry_run) 381 reload_gitmodules_file(index, &state); 382 383 enable_delayed_checkout(&state); 384 for (i = 0; i < index->cache_nr; i++) { 385 struct cache_entry *ce = index->cache[i]; 386 387 if (ce->ce_flags & CE_UPDATE) { 388 if (ce->ce_flags & CE_WT_REMOVE) 389 die("BUG: both update and delete flags are set on %s", 390 ce->name); 391 display_progress(progress, ++cnt); 392 ce->ce_flags &= ~CE_UPDATE; 393 if (o->update && !o->dry_run) { 394 errs |= checkout_entry(ce, &state, NULL); 395 } 396 } 397 } 398 errs |= finish_delayed_checkout(&state); 399 stop_progress(&progress); 400 if (o->update) 401 git_attr_set_direction(GIT_ATTR_CHECKIN, NULL); 402 return errs != 0; 403} 404 405static int verify_uptodate_sparse(const struct cache_entry *ce, 406 struct unpack_trees_options *o); 407static int verify_absent_sparse(const struct cache_entry *ce, 408 enum unpack_trees_error_types, 409 struct unpack_trees_options *o); 410 411static int apply_sparse_checkout(struct index_state *istate, 412 struct cache_entry *ce, 413 struct unpack_trees_options *o) 414{ 415 int was_skip_worktree = ce_skip_worktree(ce); 416 417 if (ce->ce_flags & CE_NEW_SKIP_WORKTREE) 418 ce->ce_flags |= CE_SKIP_WORKTREE; 419 else 420 ce->ce_flags &= ~CE_SKIP_WORKTREE; 421 if (was_skip_worktree != ce_skip_worktree(ce)) { 422 ce->ce_flags |= CE_UPDATE_IN_BASE; 423 istate->cache_changed |= CE_ENTRY_CHANGED; 424 } 425 426 /* 427 * if (!was_skip_worktree && !ce_skip_worktree()) { 428 * This is perfectly normal. Move on; 429 * } 430 */ 431 432 /* 433 * Merge strategies may set CE_UPDATE|CE_REMOVE outside checkout 434 * area as a result of ce_skip_worktree() shortcuts in 435 * verify_absent() and verify_uptodate(). 436 * Make sure they don't modify worktree if they are already 437 * outside checkout area 438 */ 439 if (was_skip_worktree && ce_skip_worktree(ce)) { 440 ce->ce_flags &= ~CE_UPDATE; 441 442 /* 443 * By default, when CE_REMOVE is on, CE_WT_REMOVE is also 444 * on to get that file removed from both index and worktree. 445 * If that file is already outside worktree area, don't 446 * bother remove it. 447 */ 448 if (ce->ce_flags & CE_REMOVE) 449 ce->ce_flags &= ~CE_WT_REMOVE; 450 } 451 452 if (!was_skip_worktree && ce_skip_worktree(ce)) { 453 /* 454 * If CE_UPDATE is set, verify_uptodate() must be called already 455 * also stat info may have lost after merged_entry() so calling 456 * verify_uptodate() again may fail 457 */ 458 if (!(ce->ce_flags & CE_UPDATE) && verify_uptodate_sparse(ce, o)) 459 return -1; 460 ce->ce_flags |= CE_WT_REMOVE; 461 ce->ce_flags &= ~CE_UPDATE; 462 } 463 if (was_skip_worktree && !ce_skip_worktree(ce)) { 464 if (verify_absent_sparse(ce, ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN, o)) 465 return -1; 466 ce->ce_flags |= CE_UPDATE; 467 } 468 return 0; 469} 470 471static inline int call_unpack_fn(const struct cache_entry * const *src, 472 struct unpack_trees_options *o) 473{ 474 int ret = o->fn(src, o); 475 if (ret > 0) 476 ret = 0; 477 return ret; 478} 479 480static void mark_ce_used(struct cache_entry *ce, struct unpack_trees_options *o) 481{ 482 ce->ce_flags |= CE_UNPACKED; 483 484 if (o->cache_bottom < o->src_index->cache_nr && 485 o->src_index->cache[o->cache_bottom] == ce) { 486 int bottom = o->cache_bottom; 487 while (bottom < o->src_index->cache_nr && 488 o->src_index->cache[bottom]->ce_flags & CE_UNPACKED) 489 bottom++; 490 o->cache_bottom = bottom; 491 } 492} 493 494static void mark_all_ce_unused(struct index_state *index) 495{ 496 int i; 497 for (i = 0; i < index->cache_nr; i++) 498 index->cache[i]->ce_flags &= ~(CE_UNPACKED | CE_ADDED | CE_NEW_SKIP_WORKTREE); 499} 500 501static int locate_in_src_index(const struct cache_entry *ce, 502 struct unpack_trees_options *o) 503{ 504 struct index_state *index = o->src_index; 505 int len = ce_namelen(ce); 506 int pos = index_name_pos(index, ce->name, len); 507 if (pos < 0) 508 pos = -1 - pos; 509 return pos; 510} 511 512/* 513 * We call unpack_index_entry() with an unmerged cache entry 514 * only in diff-index, and it wants a single callback. Skip 515 * the other unmerged entry with the same name. 516 */ 517static void mark_ce_used_same_name(struct cache_entry *ce, 518 struct unpack_trees_options *o) 519{ 520 struct index_state *index = o->src_index; 521 int len = ce_namelen(ce); 522 int pos; 523 524 for (pos = locate_in_src_index(ce, o); pos < index->cache_nr; pos++) { 525 struct cache_entry *next = index->cache[pos]; 526 if (len != ce_namelen(next) || 527 memcmp(ce->name, next->name, len)) 528 break; 529 mark_ce_used(next, o); 530 } 531} 532 533static struct cache_entry *next_cache_entry(struct unpack_trees_options *o) 534{ 535 const struct index_state *index = o->src_index; 536 int pos = o->cache_bottom; 537 538 while (pos < index->cache_nr) { 539 struct cache_entry *ce = index->cache[pos]; 540 if (!(ce->ce_flags & CE_UNPACKED)) 541 return ce; 542 pos++; 543 } 544 return NULL; 545} 546 547static void add_same_unmerged(const struct cache_entry *ce, 548 struct unpack_trees_options *o) 549{ 550 struct index_state *index = o->src_index; 551 int len = ce_namelen(ce); 552 int pos = index_name_pos(index, ce->name, len); 553 554 if (0 <= pos) 555 die("programming error in a caller of mark_ce_used_same_name"); 556 for (pos = -pos - 1; pos < index->cache_nr; pos++) { 557 struct cache_entry *next = index->cache[pos]; 558 if (len != ce_namelen(next) || 559 memcmp(ce->name, next->name, len)) 560 break; 561 add_entry(o, next, 0, 0); 562 mark_ce_used(next, o); 563 } 564} 565 566static int unpack_index_entry(struct cache_entry *ce, 567 struct unpack_trees_options *o) 568{ 569 const struct cache_entry *src[MAX_UNPACK_TREES + 1] = { NULL, }; 570 int ret; 571 572 src[0] = ce; 573 574 mark_ce_used(ce, o); 575 if (ce_stage(ce)) { 576 if (o->skip_unmerged) { 577 add_entry(o, ce, 0, 0); 578 return 0; 579 } 580 } 581 ret = call_unpack_fn(src, o); 582 if (ce_stage(ce)) 583 mark_ce_used_same_name(ce, o); 584 return ret; 585} 586 587static int find_cache_pos(struct traverse_info *, const struct name_entry *); 588 589static void restore_cache_bottom(struct traverse_info *info, int bottom) 590{ 591 struct unpack_trees_options *o = info->data; 592 593 if (o->diff_index_cached) 594 return; 595 o->cache_bottom = bottom; 596} 597 598static int switch_cache_bottom(struct traverse_info *info) 599{ 600 struct unpack_trees_options *o = info->data; 601 int ret, pos; 602 603 if (o->diff_index_cached) 604 return 0; 605 ret = o->cache_bottom; 606 pos = find_cache_pos(info->prev, &info->name); 607 608 if (pos < -1) 609 o->cache_bottom = -2 - pos; 610 else if (pos < 0) 611 o->cache_bottom = o->src_index->cache_nr; 612 return ret; 613} 614 615static inline int are_same_oid(struct name_entry *name_j, struct name_entry *name_k) 616{ 617 return name_j->oid && name_k->oid && !oidcmp(name_j->oid, name_k->oid); 618} 619 620static int traverse_trees_recursive(int n, unsigned long dirmask, 621 unsigned long df_conflicts, 622 struct name_entry *names, 623 struct traverse_info *info) 624{ 625 int i, ret, bottom; 626 int nr_buf = 0; 627 struct tree_desc t[MAX_UNPACK_TREES]; 628 void *buf[MAX_UNPACK_TREES]; 629 struct traverse_info newinfo; 630 struct name_entry *p; 631 632 p = names; 633 while (!p->mode) 634 p++; 635 636 newinfo = *info; 637 newinfo.prev = info; 638 newinfo.pathspec = info->pathspec; 639 newinfo.name = *p; 640 newinfo.pathlen += tree_entry_len(p) + 1; 641 newinfo.df_conflicts |= df_conflicts; 642 643 /* 644 * Fetch the tree from the ODB for each peer directory in the 645 * n commits. 646 * 647 * For 2- and 3-way traversals, we try to avoid hitting the 648 * ODB twice for the same OID. This should yield a nice speed 649 * up in checkouts and merges when the commits are similar. 650 * 651 * We don't bother doing the full O(n^2) search for larger n, 652 * because wider traversals don't happen that often and we 653 * avoid the search setup. 654 * 655 * When 2 peer OIDs are the same, we just copy the tree 656 * descriptor data. This implicitly borrows the buffer 657 * data from the earlier cell. 658 */ 659 for (i = 0; i < n; i++, dirmask >>= 1) { 660 if (i > 0 && are_same_oid(&names[i], &names[i - 1])) 661 t[i] = t[i - 1]; 662 else if (i > 1 && are_same_oid(&names[i], &names[i - 2])) 663 t[i] = t[i - 2]; 664 else { 665 const unsigned char *sha1 = NULL; 666 if (dirmask & 1) 667 sha1 = names[i].oid->hash; 668 buf[nr_buf++] = fill_tree_descriptor(t+i, sha1); 669 } 670 } 671 672 bottom = switch_cache_bottom(&newinfo); 673 ret = traverse_trees(n, t, &newinfo); 674 restore_cache_bottom(&newinfo, bottom); 675 676 for (i = 0; i < nr_buf; i++) 677 free(buf[i]); 678 679 return ret; 680} 681 682/* 683 * Compare the traverse-path to the cache entry without actually 684 * having to generate the textual representation of the traverse 685 * path. 686 * 687 * NOTE! This *only* compares up to the size of the traverse path 688 * itself - the caller needs to do the final check for the cache 689 * entry having more data at the end! 690 */ 691static int do_compare_entry_piecewise(const struct cache_entry *ce, const struct traverse_info *info, const struct name_entry *n) 692{ 693 int len, pathlen, ce_len; 694 const char *ce_name; 695 696 if (info->prev) { 697 int cmp = do_compare_entry_piecewise(ce, info->prev, 698 &info->name); 699 if (cmp) 700 return cmp; 701 } 702 pathlen = info->pathlen; 703 ce_len = ce_namelen(ce); 704 705 /* If ce_len < pathlen then we must have previously hit "name == directory" entry */ 706 if (ce_len < pathlen) 707 return -1; 708 709 ce_len -= pathlen; 710 ce_name = ce->name + pathlen; 711 712 len = tree_entry_len(n); 713 return df_name_compare(ce_name, ce_len, S_IFREG, n->path, len, n->mode); 714} 715 716static int do_compare_entry(const struct cache_entry *ce, 717 const struct traverse_info *info, 718 const struct name_entry *n) 719{ 720 int len, pathlen, ce_len; 721 const char *ce_name; 722 int cmp; 723 724 /* 725 * If we have not precomputed the traverse path, it is quicker 726 * to avoid doing so. But if we have precomputed it, 727 * it is quicker to use the precomputed version. 728 */ 729 if (!info->traverse_path) 730 return do_compare_entry_piecewise(ce, info, n); 731 732 cmp = strncmp(ce->name, info->traverse_path, info->pathlen); 733 if (cmp) 734 return cmp; 735 736 pathlen = info->pathlen; 737 ce_len = ce_namelen(ce); 738 739 if (ce_len < pathlen) 740 return -1; 741 742 ce_len -= pathlen; 743 ce_name = ce->name + pathlen; 744 745 len = tree_entry_len(n); 746 return df_name_compare(ce_name, ce_len, S_IFREG, n->path, len, n->mode); 747} 748 749static int compare_entry(const struct cache_entry *ce, const struct traverse_info *info, const struct name_entry *n) 750{ 751 int cmp = do_compare_entry(ce, info, n); 752 if (cmp) 753 return cmp; 754 755 /* 756 * Even if the beginning compared identically, the ce should 757 * compare as bigger than a directory leading up to it! 758 */ 759 return ce_namelen(ce) > traverse_path_len(info, n); 760} 761 762static int ce_in_traverse_path(const struct cache_entry *ce, 763 const struct traverse_info *info) 764{ 765 if (!info->prev) 766 return 1; 767 if (do_compare_entry(ce, info->prev, &info->name)) 768 return 0; 769 /* 770 * If ce (blob) is the same name as the path (which is a tree 771 * we will be descending into), it won't be inside it. 772 */ 773 return (info->pathlen < ce_namelen(ce)); 774} 775 776static struct cache_entry *create_ce_entry(const struct traverse_info *info, const struct name_entry *n, int stage) 777{ 778 int len = traverse_path_len(info, n); 779 struct cache_entry *ce = xcalloc(1, cache_entry_size(len)); 780 781 ce->ce_mode = create_ce_mode(n->mode); 782 ce->ce_flags = create_ce_flags(stage); 783 ce->ce_namelen = len; 784 oidcpy(&ce->oid, n->oid); 785 make_traverse_path(ce->name, info, n); 786 787 return ce; 788} 789 790static int unpack_nondirectories(int n, unsigned long mask, 791 unsigned long dirmask, 792 struct cache_entry **src, 793 const struct name_entry *names, 794 const struct traverse_info *info) 795{ 796 int i; 797 struct unpack_trees_options *o = info->data; 798 unsigned long conflicts = info->df_conflicts | dirmask; 799 800 /* Do we have *only* directories? Nothing to do */ 801 if (mask == dirmask && !src[0]) 802 return 0; 803 804 /* 805 * Ok, we've filled in up to any potential index entry in src[0], 806 * now do the rest. 807 */ 808 for (i = 0; i < n; i++) { 809 int stage; 810 unsigned int bit = 1ul << i; 811 if (conflicts & bit) { 812 src[i + o->merge] = o->df_conflict_entry; 813 continue; 814 } 815 if (!(mask & bit)) 816 continue; 817 if (!o->merge) 818 stage = 0; 819 else if (i + 1 < o->head_idx) 820 stage = 1; 821 else if (i + 1 > o->head_idx) 822 stage = 3; 823 else 824 stage = 2; 825 src[i + o->merge] = create_ce_entry(info, names + i, stage); 826 } 827 828 if (o->merge) { 829 int rc = call_unpack_fn((const struct cache_entry * const *)src, 830 o); 831 for (i = 0; i < n; i++) { 832 struct cache_entry *ce = src[i + o->merge]; 833 if (ce != o->df_conflict_entry) 834 free(ce); 835 } 836 return rc; 837 } 838 839 for (i = 0; i < n; i++) 840 if (src[i] && src[i] != o->df_conflict_entry) 841 if (do_add_entry(o, src[i], 0, 0)) 842 return -1; 843 844 return 0; 845} 846 847static int unpack_failed(struct unpack_trees_options *o, const char *message) 848{ 849 discard_index(&o->result); 850 if (!o->gently && !o->exiting_early) { 851 if (message) 852 return error("%s", message); 853 return -1; 854 } 855 return -1; 856} 857 858/* 859 * The tree traversal is looking at name p. If we have a matching entry, 860 * return it. If name p is a directory in the index, do not return 861 * anything, as we will want to match it when the traversal descends into 862 * the directory. 863 */ 864static int find_cache_pos(struct traverse_info *info, 865 const struct name_entry *p) 866{ 867 int pos; 868 struct unpack_trees_options *o = info->data; 869 struct index_state *index = o->src_index; 870 int pfxlen = info->pathlen; 871 int p_len = tree_entry_len(p); 872 873 for (pos = o->cache_bottom; pos < index->cache_nr; pos++) { 874 const struct cache_entry *ce = index->cache[pos]; 875 const char *ce_name, *ce_slash; 876 int cmp, ce_len; 877 878 if (ce->ce_flags & CE_UNPACKED) { 879 /* 880 * cache_bottom entry is already unpacked, so 881 * we can never match it; don't check it 882 * again. 883 */ 884 if (pos == o->cache_bottom) 885 ++o->cache_bottom; 886 continue; 887 } 888 if (!ce_in_traverse_path(ce, info)) { 889 /* 890 * Check if we can skip future cache checks 891 * (because we're already past all possible 892 * entries in the traverse path). 893 */ 894 if (info->traverse_path) { 895 if (strncmp(ce->name, info->traverse_path, 896 info->pathlen) > 0) 897 break; 898 } 899 continue; 900 } 901 ce_name = ce->name + pfxlen; 902 ce_slash = strchr(ce_name, '/'); 903 if (ce_slash) 904 ce_len = ce_slash - ce_name; 905 else 906 ce_len = ce_namelen(ce) - pfxlen; 907 cmp = name_compare(p->path, p_len, ce_name, ce_len); 908 /* 909 * Exact match; if we have a directory we need to 910 * delay returning it. 911 */ 912 if (!cmp) 913 return ce_slash ? -2 - pos : pos; 914 if (0 < cmp) 915 continue; /* keep looking */ 916 /* 917 * ce_name sorts after p->path; could it be that we 918 * have files under p->path directory in the index? 919 * E.g. ce_name == "t-i", and p->path == "t"; we may 920 * have "t/a" in the index. 921 */ 922 if (p_len < ce_len && !memcmp(ce_name, p->path, p_len) && 923 ce_name[p_len] < '/') 924 continue; /* keep looking */ 925 break; 926 } 927 return -1; 928} 929 930static struct cache_entry *find_cache_entry(struct traverse_info *info, 931 const struct name_entry *p) 932{ 933 int pos = find_cache_pos(info, p); 934 struct unpack_trees_options *o = info->data; 935 936 if (0 <= pos) 937 return o->src_index->cache[pos]; 938 else 939 return NULL; 940} 941 942static void debug_path(struct traverse_info *info) 943{ 944 if (info->prev) { 945 debug_path(info->prev); 946 if (*info->prev->name.path) 947 putchar('/'); 948 } 949 printf("%s", info->name.path); 950} 951 952static void debug_name_entry(int i, struct name_entry *n) 953{ 954 printf("ent#%d %06o %s\n", i, 955 n->path ? n->mode : 0, 956 n->path ? n->path : "(missing)"); 957} 958 959static void debug_unpack_callback(int n, 960 unsigned long mask, 961 unsigned long dirmask, 962 struct name_entry *names, 963 struct traverse_info *info) 964{ 965 int i; 966 printf("* unpack mask %lu, dirmask %lu, cnt %d ", 967 mask, dirmask, n); 968 debug_path(info); 969 putchar('\n'); 970 for (i = 0; i < n; i++) 971 debug_name_entry(i, names + i); 972} 973 974static int unpack_callback(int n, unsigned long mask, unsigned long dirmask, struct name_entry *names, struct traverse_info *info) 975{ 976 struct cache_entry *src[MAX_UNPACK_TREES + 1] = { NULL, }; 977 struct unpack_trees_options *o = info->data; 978 const struct name_entry *p = names; 979 980 /* Find first entry with a real name (we could use "mask" too) */ 981 while (!p->mode) 982 p++; 983 984 if (o->debug_unpack) 985 debug_unpack_callback(n, mask, dirmask, names, info); 986 987 /* Are we supposed to look at the index too? */ 988 if (o->merge) { 989 while (1) { 990 int cmp; 991 struct cache_entry *ce; 992 993 if (o->diff_index_cached) 994 ce = next_cache_entry(o); 995 else 996 ce = find_cache_entry(info, p); 997 998 if (!ce) 999 break;1000 cmp = compare_entry(ce, info, p);1001 if (cmp < 0) {1002 if (unpack_index_entry(ce, o) < 0)1003 return unpack_failed(o, NULL);1004 continue;1005 }1006 if (!cmp) {1007 if (ce_stage(ce)) {1008 /*1009 * If we skip unmerged index1010 * entries, we'll skip this1011 * entry *and* the tree1012 * entries associated with it!1013 */1014 if (o->skip_unmerged) {1015 add_same_unmerged(ce, o);1016 return mask;1017 }1018 }1019 src[0] = ce;1020 }1021 break;1022 }1023 }10241025 if (unpack_nondirectories(n, mask, dirmask, src, names, info) < 0)1026 return -1;10271028 if (o->merge && src[0]) {1029 if (ce_stage(src[0]))1030 mark_ce_used_same_name(src[0], o);1031 else1032 mark_ce_used(src[0], o);1033 }10341035 /* Now handle any directories.. */1036 if (dirmask) {1037 /* special case: "diff-index --cached" looking at a tree */1038 if (o->diff_index_cached &&1039 n == 1 && dirmask == 1 && S_ISDIR(names->mode)) {1040 int matches;1041 matches = cache_tree_matches_traversal(o->src_index->cache_tree,1042 names, info);1043 /*1044 * Everything under the name matches; skip the1045 * entire hierarchy. diff_index_cached codepath1046 * special cases D/F conflicts in such a way that1047 * it does not do any look-ahead, so this is safe.1048 */1049 if (matches) {1050 o->cache_bottom += matches;1051 return mask;1052 }1053 }10541055 if (traverse_trees_recursive(n, dirmask, mask & ~dirmask,1056 names, info) < 0)1057 return -1;1058 return mask;1059 }10601061 return mask;1062}10631064static int clear_ce_flags_1(struct cache_entry **cache, int nr,1065 struct strbuf *prefix,1066 int select_mask, int clear_mask,1067 struct exclude_list *el, int defval);10681069/* Whole directory matching */1070static int clear_ce_flags_dir(struct cache_entry **cache, int nr,1071 struct strbuf *prefix,1072 char *basename,1073 int select_mask, int clear_mask,1074 struct exclude_list *el, int defval)1075{1076 struct cache_entry **cache_end;1077 int dtype = DT_DIR;1078 int ret = is_excluded_from_list(prefix->buf, prefix->len,1079 basename, &dtype, el, &the_index);1080 int rc;10811082 strbuf_addch(prefix, '/');10831084 /* If undecided, use matching result of parent dir in defval */1085 if (ret < 0)1086 ret = defval;10871088 for (cache_end = cache; cache_end != cache + nr; cache_end++) {1089 struct cache_entry *ce = *cache_end;1090 if (strncmp(ce->name, prefix->buf, prefix->len))1091 break;1092 }10931094 /*1095 * TODO: check el, if there are no patterns that may conflict1096 * with ret (iow, we know in advance the incl/excl1097 * decision for the entire directory), clear flag here without1098 * calling clear_ce_flags_1(). That function will call1099 * the expensive is_excluded_from_list() on every entry.1100 */1101 rc = clear_ce_flags_1(cache, cache_end - cache,1102 prefix,1103 select_mask, clear_mask,1104 el, ret);1105 strbuf_setlen(prefix, prefix->len - 1);1106 return rc;1107}11081109/*1110 * Traverse the index, find every entry that matches according to1111 * o->el. Do "ce_flags &= ~clear_mask" on those entries. Return the1112 * number of traversed entries.1113 *1114 * If select_mask is non-zero, only entries whose ce_flags has on of1115 * those bits enabled are traversed.1116 *1117 * cache : pointer to an index entry1118 * prefix_len : an offset to its path1119 *1120 * The current path ("prefix") including the trailing '/' is1121 * cache[0]->name[0..(prefix_len-1)]1122 * Top level path has prefix_len zero.1123 */1124static int clear_ce_flags_1(struct cache_entry **cache, int nr,1125 struct strbuf *prefix,1126 int select_mask, int clear_mask,1127 struct exclude_list *el, int defval)1128{1129 struct cache_entry **cache_end = cache + nr;11301131 /*1132 * Process all entries that have the given prefix and meet1133 * select_mask condition1134 */1135 while(cache != cache_end) {1136 struct cache_entry *ce = *cache;1137 const char *name, *slash;1138 int len, dtype, ret;11391140 if (select_mask && !(ce->ce_flags & select_mask)) {1141 cache++;1142 continue;1143 }11441145 if (prefix->len && strncmp(ce->name, prefix->buf, prefix->len))1146 break;11471148 name = ce->name + prefix->len;1149 slash = strchr(name, '/');11501151 /* If it's a directory, try whole directory match first */1152 if (slash) {1153 int processed;11541155 len = slash - name;1156 strbuf_add(prefix, name, len);11571158 processed = clear_ce_flags_dir(cache, cache_end - cache,1159 prefix,1160 prefix->buf + prefix->len - len,1161 select_mask, clear_mask,1162 el, defval);11631164 /* clear_c_f_dir eats a whole dir already? */1165 if (processed) {1166 cache += processed;1167 strbuf_setlen(prefix, prefix->len - len);1168 continue;1169 }11701171 strbuf_addch(prefix, '/');1172 cache += clear_ce_flags_1(cache, cache_end - cache,1173 prefix,1174 select_mask, clear_mask, el, defval);1175 strbuf_setlen(prefix, prefix->len - len - 1);1176 continue;1177 }11781179 /* Non-directory */1180 dtype = ce_to_dtype(ce);1181 ret = is_excluded_from_list(ce->name, ce_namelen(ce),1182 name, &dtype, el, &the_index);1183 if (ret < 0)1184 ret = defval;1185 if (ret > 0)1186 ce->ce_flags &= ~clear_mask;1187 cache++;1188 }1189 return nr - (cache_end - cache);1190}11911192static int clear_ce_flags(struct cache_entry **cache, int nr,1193 int select_mask, int clear_mask,1194 struct exclude_list *el)1195{1196 static struct strbuf prefix = STRBUF_INIT;11971198 strbuf_reset(&prefix);11991200 return clear_ce_flags_1(cache, nr,1201 &prefix,1202 select_mask, clear_mask,1203 el, 0);1204}12051206/*1207 * Set/Clear CE_NEW_SKIP_WORKTREE according to $GIT_DIR/info/sparse-checkout1208 */1209static void mark_new_skip_worktree(struct exclude_list *el,1210 struct index_state *the_index,1211 int select_flag, int skip_wt_flag)1212{1213 int i;12141215 /*1216 * 1. Pretend the narrowest worktree: only unmerged entries1217 * are checked out1218 */1219 for (i = 0; i < the_index->cache_nr; i++) {1220 struct cache_entry *ce = the_index->cache[i];12211222 if (select_flag && !(ce->ce_flags & select_flag))1223 continue;12241225 if (!ce_stage(ce))1226 ce->ce_flags |= skip_wt_flag;1227 else1228 ce->ce_flags &= ~skip_wt_flag;1229 }12301231 /*1232 * 2. Widen worktree according to sparse-checkout file.1233 * Matched entries will have skip_wt_flag cleared (i.e. "in")1234 */1235 clear_ce_flags(the_index->cache, the_index->cache_nr,1236 select_flag, skip_wt_flag, el);1237}12381239static int verify_absent(const struct cache_entry *,1240 enum unpack_trees_error_types,1241 struct unpack_trees_options *);1242/*1243 * N-way merge "len" trees. Returns 0 on success, -1 on failure to manipulate the1244 * resulting index, -2 on failure to reflect the changes to the work tree.1245 *1246 * CE_ADDED, CE_UNPACKED and CE_NEW_SKIP_WORKTREE are used internally1247 */1248int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options *o)1249{1250 int i, ret;1251 static struct cache_entry *dfc;1252 struct exclude_list el;12531254 if (len > MAX_UNPACK_TREES)1255 die("unpack_trees takes at most %d trees", MAX_UNPACK_TREES);12561257 memset(&el, 0, sizeof(el));1258 if (!core_apply_sparse_checkout || !o->update)1259 o->skip_sparse_checkout = 1;1260 if (!o->skip_sparse_checkout) {1261 char *sparse = git_pathdup("info/sparse-checkout");1262 if (add_excludes_from_file_to_list(sparse, "", 0, &el, NULL) < 0)1263 o->skip_sparse_checkout = 1;1264 else1265 o->el = ⪙1266 free(sparse);1267 }12681269 memset(&o->result, 0, sizeof(o->result));1270 o->result.initialized = 1;1271 o->result.timestamp.sec = o->src_index->timestamp.sec;1272 o->result.timestamp.nsec = o->src_index->timestamp.nsec;1273 o->result.version = o->src_index->version;1274 o->result.split_index = o->src_index->split_index;1275 if (o->result.split_index)1276 o->result.split_index->refcount++;1277 hashcpy(o->result.sha1, o->src_index->sha1);1278 o->merge_size = len;1279 mark_all_ce_unused(o->src_index);12801281 /*1282 * Sparse checkout loop #1: set NEW_SKIP_WORKTREE on existing entries1283 */1284 if (!o->skip_sparse_checkout)1285 mark_new_skip_worktree(o->el, o->src_index, 0, CE_NEW_SKIP_WORKTREE);12861287 if (!dfc)1288 dfc = xcalloc(1, cache_entry_size(0));1289 o->df_conflict_entry = dfc;12901291 if (len) {1292 const char *prefix = o->prefix ? o->prefix : "";1293 struct traverse_info info;12941295 setup_traverse_info(&info, prefix);1296 info.fn = unpack_callback;1297 info.data = o;1298 info.show_all_errors = o->show_all_errors;1299 info.pathspec = o->pathspec;13001301 if (o->prefix) {1302 /*1303 * Unpack existing index entries that sort before the1304 * prefix the tree is spliced into. Note that o->merge1305 * is always true in this case.1306 */1307 while (1) {1308 struct cache_entry *ce = next_cache_entry(o);1309 if (!ce)1310 break;1311 if (ce_in_traverse_path(ce, &info))1312 break;1313 if (unpack_index_entry(ce, o) < 0)1314 goto return_failed;1315 }1316 }13171318 if (traverse_trees(len, t, &info) < 0)1319 goto return_failed;1320 }13211322 /* Any left-over entries in the index? */1323 if (o->merge) {1324 while (1) {1325 struct cache_entry *ce = next_cache_entry(o);1326 if (!ce)1327 break;1328 if (unpack_index_entry(ce, o) < 0)1329 goto return_failed;1330 }1331 }1332 mark_all_ce_unused(o->src_index);13331334 if (o->trivial_merges_only && o->nontrivial_merge) {1335 ret = unpack_failed(o, "Merge requires file-level merging");1336 goto done;1337 }13381339 if (!o->skip_sparse_checkout) {1340 int empty_worktree = 1;13411342 /*1343 * Sparse checkout loop #2: set NEW_SKIP_WORKTREE on entries not in loop #11344 * If the will have NEW_SKIP_WORKTREE, also set CE_SKIP_WORKTREE1345 * so apply_sparse_checkout() won't attempt to remove it from worktree1346 */1347 mark_new_skip_worktree(o->el, &o->result, CE_ADDED, CE_SKIP_WORKTREE | CE_NEW_SKIP_WORKTREE);13481349 ret = 0;1350 for (i = 0; i < o->result.cache_nr; i++) {1351 struct cache_entry *ce = o->result.cache[i];13521353 /*1354 * Entries marked with CE_ADDED in merged_entry() do not have1355 * verify_absent() check (the check is effectively disabled1356 * because CE_NEW_SKIP_WORKTREE is set unconditionally).1357 *1358 * Do the real check now because we have had1359 * correct CE_NEW_SKIP_WORKTREE1360 */1361 if (ce->ce_flags & CE_ADDED &&1362 verify_absent(ce, ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN, o)) {1363 if (!o->show_all_errors)1364 goto return_failed;1365 ret = -1;1366 }13671368 if (apply_sparse_checkout(&o->result, ce, o)) {1369 if (!o->show_all_errors)1370 goto return_failed;1371 ret = -1;1372 }1373 if (!ce_skip_worktree(ce))1374 empty_worktree = 0;13751376 }1377 if (ret < 0)1378 goto return_failed;1379 /*1380 * Sparse checkout is meant to narrow down checkout area1381 * but it does not make sense to narrow down to empty working1382 * tree. This is usually a mistake in sparse checkout rules.1383 * Do not allow users to do that.1384 */1385 if (o->result.cache_nr && empty_worktree) {1386 ret = unpack_failed(o, "Sparse checkout leaves no entry on working directory");1387 goto done;1388 }1389 }13901391 o->src_index = NULL;1392 ret = check_updates(o) ? (-2) : 0;1393 if (o->dst_index) {1394 if (!ret) {1395 if (!o->result.cache_tree)1396 o->result.cache_tree = cache_tree();1397 if (!cache_tree_fully_valid(o->result.cache_tree))1398 cache_tree_update(&o->result,1399 WRITE_TREE_SILENT |1400 WRITE_TREE_REPAIR);1401 }1402 move_index_extensions(&o->result, o->dst_index);1403 discard_index(o->dst_index);1404 *o->dst_index = o->result;1405 } else {1406 discard_index(&o->result);1407 }14081409done:1410 clear_exclude_list(&el);1411 return ret;14121413return_failed:1414 if (o->show_all_errors)1415 display_error_msgs(o);1416 mark_all_ce_unused(o->src_index);1417 ret = unpack_failed(o, NULL);1418 if (o->exiting_early)1419 ret = 0;1420 goto done;1421}14221423/* Here come the merge functions */14241425static int reject_merge(const struct cache_entry *ce,1426 struct unpack_trees_options *o)1427{1428 return o->gently ? -1 :1429 add_rejected_path(o, ERROR_WOULD_OVERWRITE, ce->name);1430}14311432static int same(const struct cache_entry *a, const struct cache_entry *b)1433{1434 if (!!a != !!b)1435 return 0;1436 if (!a && !b)1437 return 1;1438 if ((a->ce_flags | b->ce_flags) & CE_CONFLICTED)1439 return 0;1440 return a->ce_mode == b->ce_mode &&1441 !oidcmp(&a->oid, &b->oid);1442}144314441445/*1446 * When a CE gets turned into an unmerged entry, we1447 * want it to be up-to-date1448 */1449static int verify_uptodate_1(const struct cache_entry *ce,1450 struct unpack_trees_options *o,1451 enum unpack_trees_error_types error_type)1452{1453 struct stat st;14541455 if (o->index_only)1456 return 0;14571458 /*1459 * CE_VALID and CE_SKIP_WORKTREE cheat, we better check again1460 * if this entry is truly up-to-date because this file may be1461 * overwritten.1462 */1463 if ((ce->ce_flags & CE_VALID) || ce_skip_worktree(ce))1464 ; /* keep checking */1465 else if (o->reset || ce_uptodate(ce))1466 return 0;14671468 if (!lstat(ce->name, &st)) {1469 int flags = CE_MATCH_IGNORE_VALID|CE_MATCH_IGNORE_SKIP_WORKTREE;1470 unsigned changed = ie_match_stat(o->src_index, ce, &st, flags);14711472 if (submodule_from_ce(ce)) {1473 int r = check_submodule_move_head(ce,1474 "HEAD", oid_to_hex(&ce->oid), o);1475 if (r)1476 return o->gently ? -1 :1477 add_rejected_path(o, error_type, ce->name);1478 return 0;1479 }14801481 if (!changed)1482 return 0;1483 /*1484 * Historic default policy was to allow submodule to be out1485 * of sync wrt the superproject index. If the submodule was1486 * not considered interesting above, we don't care here.1487 */1488 if (S_ISGITLINK(ce->ce_mode))1489 return 0;14901491 errno = 0;1492 }1493 if (errno == ENOENT)1494 return 0;1495 return o->gently ? -1 :1496 add_rejected_path(o, error_type, ce->name);1497}14981499static int verify_uptodate(const struct cache_entry *ce,1500 struct unpack_trees_options *o)1501{1502 if (!o->skip_sparse_checkout && (ce->ce_flags & CE_NEW_SKIP_WORKTREE))1503 return 0;1504 return verify_uptodate_1(ce, o, ERROR_NOT_UPTODATE_FILE);1505}15061507static int verify_uptodate_sparse(const struct cache_entry *ce,1508 struct unpack_trees_options *o)1509{1510 return verify_uptodate_1(ce, o, ERROR_SPARSE_NOT_UPTODATE_FILE);1511}15121513static void invalidate_ce_path(const struct cache_entry *ce,1514 struct unpack_trees_options *o)1515{1516 if (!ce)1517 return;1518 cache_tree_invalidate_path(o->src_index, ce->name);1519 untracked_cache_invalidate_path(o->src_index, ce->name);1520}15211522/*1523 * Check that checking out ce->sha1 in subdir ce->name is not1524 * going to overwrite any working files.1525 *1526 * Currently, git does not checkout subprojects during a superproject1527 * checkout, so it is not going to overwrite anything.1528 */1529static int verify_clean_submodule(const char *old_sha1,1530 const struct cache_entry *ce,1531 enum unpack_trees_error_types error_type,1532 struct unpack_trees_options *o)1533{1534 if (!submodule_from_ce(ce))1535 return 0;15361537 return check_submodule_move_head(ce, old_sha1,1538 oid_to_hex(&ce->oid), o);1539}15401541static int verify_clean_subdirectory(const struct cache_entry *ce,1542 enum unpack_trees_error_types error_type,1543 struct unpack_trees_options *o)1544{1545 /*1546 * we are about to extract "ce->name"; we would not want to lose1547 * anything in the existing directory there.1548 */1549 int namelen;1550 int i;1551 struct dir_struct d;1552 char *pathbuf;1553 int cnt = 0;15541555 if (S_ISGITLINK(ce->ce_mode)) {1556 unsigned char sha1[20];1557 int sub_head = resolve_gitlink_ref(ce->name, "HEAD", sha1);1558 /*1559 * If we are not going to update the submodule, then1560 * we don't care.1561 */1562 if (!sub_head && !hashcmp(sha1, ce->oid.hash))1563 return 0;1564 return verify_clean_submodule(sub_head ? NULL : sha1_to_hex(sha1),1565 ce, error_type, o);1566 }15671568 /*1569 * First let's make sure we do not have a local modification1570 * in that directory.1571 */1572 namelen = ce_namelen(ce);1573 for (i = locate_in_src_index(ce, o);1574 i < o->src_index->cache_nr;1575 i++) {1576 struct cache_entry *ce2 = o->src_index->cache[i];1577 int len = ce_namelen(ce2);1578 if (len < namelen ||1579 strncmp(ce->name, ce2->name, namelen) ||1580 ce2->name[namelen] != '/')1581 break;1582 /*1583 * ce2->name is an entry in the subdirectory to be1584 * removed.1585 */1586 if (!ce_stage(ce2)) {1587 if (verify_uptodate(ce2, o))1588 return -1;1589 add_entry(o, ce2, CE_REMOVE, 0);1590 mark_ce_used(ce2, o);1591 }1592 cnt++;1593 }15941595 /*1596 * Then we need to make sure that we do not lose a locally1597 * present file that is not ignored.1598 */1599 pathbuf = xstrfmt("%.*s/", namelen, ce->name);16001601 memset(&d, 0, sizeof(d));1602 if (o->dir)1603 d.exclude_per_dir = o->dir->exclude_per_dir;1604 i = read_directory(&d, &the_index, pathbuf, namelen+1, NULL);1605 if (i)1606 return o->gently ? -1 :1607 add_rejected_path(o, ERROR_NOT_UPTODATE_DIR, ce->name);1608 free(pathbuf);1609 return cnt;1610}16111612/*1613 * This gets called when there was no index entry for the tree entry 'dst',1614 * but we found a file in the working tree that 'lstat()' said was fine,1615 * and we're on a case-insensitive filesystem.1616 *1617 * See if we can find a case-insensitive match in the index that also1618 * matches the stat information, and assume it's that other file!1619 */1620static int icase_exists(struct unpack_trees_options *o, const char *name, int len, struct stat *st)1621{1622 const struct cache_entry *src;16231624 src = index_file_exists(o->src_index, name, len, 1);1625 return src && !ie_match_stat(o->src_index, src, st, CE_MATCH_IGNORE_VALID|CE_MATCH_IGNORE_SKIP_WORKTREE);1626}16271628static int check_ok_to_remove(const char *name, int len, int dtype,1629 const struct cache_entry *ce, struct stat *st,1630 enum unpack_trees_error_types error_type,1631 struct unpack_trees_options *o)1632{1633 const struct cache_entry *result;16341635 /*1636 * It may be that the 'lstat()' succeeded even though1637 * target 'ce' was absent, because there is an old1638 * entry that is different only in case..1639 *1640 * Ignore that lstat() if it matches.1641 */1642 if (ignore_case && icase_exists(o, name, len, st))1643 return 0;16441645 if (o->dir &&1646 is_excluded(o->dir, &the_index, name, &dtype))1647 /*1648 * ce->name is explicitly excluded, so it is Ok to1649 * overwrite it.1650 */1651 return 0;1652 if (S_ISDIR(st->st_mode)) {1653 /*1654 * We are checking out path "foo" and1655 * found "foo/." in the working tree.1656 * This is tricky -- if we have modified1657 * files that are in "foo/" we would lose1658 * them.1659 */1660 if (verify_clean_subdirectory(ce, error_type, o) < 0)1661 return -1;1662 return 0;1663 }16641665 /*1666 * The previous round may already have decided to1667 * delete this path, which is in a subdirectory that1668 * is being replaced with a blob.1669 */1670 result = index_file_exists(&o->result, name, len, 0);1671 if (result) {1672 if (result->ce_flags & CE_REMOVE)1673 return 0;1674 }16751676 return o->gently ? -1 :1677 add_rejected_path(o, error_type, name);1678}16791680/*1681 * We do not want to remove or overwrite a working tree file that1682 * is not tracked, unless it is ignored.1683 */1684static int verify_absent_1(const struct cache_entry *ce,1685 enum unpack_trees_error_types error_type,1686 struct unpack_trees_options *o)1687{1688 int len;1689 struct stat st;16901691 if (o->index_only || o->reset || !o->update)1692 return 0;16931694 len = check_leading_path(ce->name, ce_namelen(ce));1695 if (!len)1696 return 0;1697 else if (len > 0) {1698 char *path;1699 int ret;17001701 path = xmemdupz(ce->name, len);1702 if (lstat(path, &st))1703 ret = error_errno("cannot stat '%s'", path);1704 else {1705 if (submodule_from_ce(ce))1706 ret = check_submodule_move_head(ce,1707 oid_to_hex(&ce->oid),1708 NULL, o);1709 else1710 ret = check_ok_to_remove(path, len, DT_UNKNOWN, NULL,1711 &st, error_type, o);1712 }1713 free(path);1714 return ret;1715 } else if (lstat(ce->name, &st)) {1716 if (errno != ENOENT)1717 return error_errno("cannot stat '%s'", ce->name);1718 return 0;1719 } else {1720 if (submodule_from_ce(ce))1721 return check_submodule_move_head(ce, oid_to_hex(&ce->oid),1722 NULL, o);17231724 return check_ok_to_remove(ce->name, ce_namelen(ce),1725 ce_to_dtype(ce), ce, &st,1726 error_type, o);1727 }1728}17291730static int verify_absent(const struct cache_entry *ce,1731 enum unpack_trees_error_types error_type,1732 struct unpack_trees_options *o)1733{1734 if (!o->skip_sparse_checkout && (ce->ce_flags & CE_NEW_SKIP_WORKTREE))1735 return 0;1736 return verify_absent_1(ce, error_type, o);1737}17381739static int verify_absent_sparse(const struct cache_entry *ce,1740 enum unpack_trees_error_types error_type,1741 struct unpack_trees_options *o)1742{1743 enum unpack_trees_error_types orphaned_error = error_type;1744 if (orphaned_error == ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN)1745 orphaned_error = ERROR_WOULD_LOSE_ORPHANED_OVERWRITTEN;17461747 return verify_absent_1(ce, orphaned_error, o);1748}17491750static int merged_entry(const struct cache_entry *ce,1751 const struct cache_entry *old,1752 struct unpack_trees_options *o)1753{1754 int update = CE_UPDATE;1755 struct cache_entry *merge = dup_entry(ce);17561757 if (!old) {1758 /*1759 * New index entries. In sparse checkout, the following1760 * verify_absent() will be delayed until after1761 * traverse_trees() finishes in unpack_trees(), then:1762 *1763 * - CE_NEW_SKIP_WORKTREE will be computed correctly1764 * - verify_absent() be called again, this time with1765 * correct CE_NEW_SKIP_WORKTREE1766 *1767 * verify_absent() call here does nothing in sparse1768 * checkout (i.e. o->skip_sparse_checkout == 0)1769 */1770 update |= CE_ADDED;1771 merge->ce_flags |= CE_NEW_SKIP_WORKTREE;17721773 if (verify_absent(merge,1774 ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN, o)) {1775 free(merge);1776 return -1;1777 }1778 invalidate_ce_path(merge, o);17791780 if (submodule_from_ce(ce)) {1781 int ret = check_submodule_move_head(ce, NULL,1782 oid_to_hex(&ce->oid),1783 o);1784 if (ret)1785 return ret;1786 }17871788 } else if (!(old->ce_flags & CE_CONFLICTED)) {1789 /*1790 * See if we can re-use the old CE directly?1791 * That way we get the uptodate stat info.1792 *1793 * This also removes the UPDATE flag on a match; otherwise1794 * we will end up overwriting local changes in the work tree.1795 */1796 if (same(old, merge)) {1797 copy_cache_entry(merge, old);1798 update = 0;1799 } else {1800 if (verify_uptodate(old, o)) {1801 free(merge);1802 return -1;1803 }1804 /* Migrate old flags over */1805 update |= old->ce_flags & (CE_SKIP_WORKTREE | CE_NEW_SKIP_WORKTREE);1806 invalidate_ce_path(old, o);1807 }18081809 if (submodule_from_ce(ce)) {1810 int ret = check_submodule_move_head(ce, oid_to_hex(&old->oid),1811 oid_to_hex(&ce->oid),1812 o);1813 if (ret)1814 return ret;1815 }1816 } else {1817 /*1818 * Previously unmerged entry left as an existence1819 * marker by read_index_unmerged();1820 */1821 invalidate_ce_path(old, o);1822 }18231824 do_add_entry(o, merge, update, CE_STAGEMASK);1825 return 1;1826}18271828static int deleted_entry(const struct cache_entry *ce,1829 const struct cache_entry *old,1830 struct unpack_trees_options *o)1831{1832 /* Did it exist in the index? */1833 if (!old) {1834 if (verify_absent(ce, ERROR_WOULD_LOSE_UNTRACKED_REMOVED, o))1835 return -1;1836 return 0;1837 }1838 if (!(old->ce_flags & CE_CONFLICTED) && verify_uptodate(old, o))1839 return -1;1840 add_entry(o, ce, CE_REMOVE, 0);1841 invalidate_ce_path(ce, o);1842 return 1;1843}18441845static int keep_entry(const struct cache_entry *ce,1846 struct unpack_trees_options *o)1847{1848 add_entry(o, ce, 0, 0);1849 return 1;1850}18511852#if DBRT_DEBUG1853static void show_stage_entry(FILE *o,1854 const char *label, const struct cache_entry *ce)1855{1856 if (!ce)1857 fprintf(o, "%s (missing)\n", label);1858 else1859 fprintf(o, "%s%06o %s %d\t%s\n",1860 label,1861 ce->ce_mode,1862 oid_to_hex(&ce->oid),1863 ce_stage(ce),1864 ce->name);1865}1866#endif18671868int threeway_merge(const struct cache_entry * const *stages,1869 struct unpack_trees_options *o)1870{1871 const struct cache_entry *index;1872 const struct cache_entry *head;1873 const struct cache_entry *remote = stages[o->head_idx + 1];1874 int count;1875 int head_match = 0;1876 int remote_match = 0;18771878 int df_conflict_head = 0;1879 int df_conflict_remote = 0;18801881 int any_anc_missing = 0;1882 int no_anc_exists = 1;1883 int i;18841885 for (i = 1; i < o->head_idx; i++) {1886 if (!stages[i] || stages[i] == o->df_conflict_entry)1887 any_anc_missing = 1;1888 else1889 no_anc_exists = 0;1890 }18911892 index = stages[0];1893 head = stages[o->head_idx];18941895 if (head == o->df_conflict_entry) {1896 df_conflict_head = 1;1897 head = NULL;1898 }18991900 if (remote == o->df_conflict_entry) {1901 df_conflict_remote = 1;1902 remote = NULL;1903 }19041905 /*1906 * First, if there's a #16 situation, note that to prevent #131907 * and #14.1908 */1909 if (!same(remote, head)) {1910 for (i = 1; i < o->head_idx; i++) {1911 if (same(stages[i], head)) {1912 head_match = i;1913 }1914 if (same(stages[i], remote)) {1915 remote_match = i;1916 }1917 }1918 }19191920 /*1921 * We start with cases where the index is allowed to match1922 * something other than the head: #14(ALT) and #2ALT, where it1923 * is permitted to match the result instead.1924 */1925 /* #14, #14ALT, #2ALT */1926 if (remote && !df_conflict_head && head_match && !remote_match) {1927 if (index && !same(index, remote) && !same(index, head))1928 return reject_merge(index, o);1929 return merged_entry(remote, index, o);1930 }1931 /*1932 * If we have an entry in the index cache, then we want to1933 * make sure that it matches head.1934 */1935 if (index && !same(index, head))1936 return reject_merge(index, o);19371938 if (head) {1939 /* #5ALT, #15 */1940 if (same(head, remote))1941 return merged_entry(head, index, o);1942 /* #13, #3ALT */1943 if (!df_conflict_remote && remote_match && !head_match)1944 return merged_entry(head, index, o);1945 }19461947 /* #1 */1948 if (!head && !remote && any_anc_missing)1949 return 0;19501951 /*1952 * Under the "aggressive" rule, we resolve mostly trivial1953 * cases that we historically had git-merge-one-file resolve.1954 */1955 if (o->aggressive) {1956 int head_deleted = !head;1957 int remote_deleted = !remote;1958 const struct cache_entry *ce = NULL;19591960 if (index)1961 ce = index;1962 else if (head)1963 ce = head;1964 else if (remote)1965 ce = remote;1966 else {1967 for (i = 1; i < o->head_idx; i++) {1968 if (stages[i] && stages[i] != o->df_conflict_entry) {1969 ce = stages[i];1970 break;1971 }1972 }1973 }19741975 /*1976 * Deleted in both.1977 * Deleted in one and unchanged in the other.1978 */1979 if ((head_deleted && remote_deleted) ||1980 (head_deleted && remote && remote_match) ||1981 (remote_deleted && head && head_match)) {1982 if (index)1983 return deleted_entry(index, index, o);1984 if (ce && !head_deleted) {1985 if (verify_absent(ce, ERROR_WOULD_LOSE_UNTRACKED_REMOVED, o))1986 return -1;1987 }1988 return 0;1989 }1990 /*1991 * Added in both, identically.1992 */1993 if (no_anc_exists && head && remote && same(head, remote))1994 return merged_entry(head, index, o);19951996 }19971998 /* Below are "no merge" cases, which require that the index be1999 * up-to-date to avoid the files getting overwritten with2000 * conflict resolution files.2001 */2002 if (index) {2003 if (verify_uptodate(index, o))2004 return -1;2005 }20062007 o->nontrivial_merge = 1;20082009 /* #2, #3, #4, #6, #7, #9, #10, #11. */2010 count = 0;2011 if (!head_match || !remote_match) {2012 for (i = 1; i < o->head_idx; i++) {2013 if (stages[i] && stages[i] != o->df_conflict_entry) {2014 keep_entry(stages[i], o);2015 count++;2016 break;2017 }2018 }2019 }2020#if DBRT_DEBUG2021 else {2022 fprintf(stderr, "read-tree: warning #16 detected\n");2023 show_stage_entry(stderr, "head ", stages[head_match]);2024 show_stage_entry(stderr, "remote ", stages[remote_match]);2025 }2026#endif2027 if (head) { count += keep_entry(head, o); }2028 if (remote) { count += keep_entry(remote, o); }2029 return count;2030}20312032/*2033 * Two-way merge.2034 *2035 * The rule is to "carry forward" what is in the index without losing2036 * information across a "fast-forward", favoring a successful merge2037 * over a merge failure when it makes sense. For details of the2038 * "carry forward" rule, please see <Documentation/git-read-tree.txt>.2039 *2040 */2041int twoway_merge(const struct cache_entry * const *src,2042 struct unpack_trees_options *o)2043{2044 const struct cache_entry *current = src[0];2045 const struct cache_entry *oldtree = src[1];2046 const struct cache_entry *newtree = src[2];20472048 if (o->merge_size != 2)2049 return error("Cannot do a twoway merge of %d trees",2050 o->merge_size);20512052 if (oldtree == o->df_conflict_entry)2053 oldtree = NULL;2054 if (newtree == o->df_conflict_entry)2055 newtree = NULL;20562057 if (current) {2058 if (current->ce_flags & CE_CONFLICTED) {2059 if (same(oldtree, newtree) || o->reset) {2060 if (!newtree)2061 return deleted_entry(current, current, o);2062 else2063 return merged_entry(newtree, current, o);2064 }2065 return reject_merge(current, o);2066 } else if ((!oldtree && !newtree) || /* 4 and 5 */2067 (!oldtree && newtree &&2068 same(current, newtree)) || /* 6 and 7 */2069 (oldtree && newtree &&2070 same(oldtree, newtree)) || /* 14 and 15 */2071 (oldtree && newtree &&2072 !same(oldtree, newtree) && /* 18 and 19 */2073 same(current, newtree))) {2074 return keep_entry(current, o);2075 } else if (oldtree && !newtree && same(current, oldtree)) {2076 /* 10 or 11 */2077 return deleted_entry(oldtree, current, o);2078 } else if (oldtree && newtree &&2079 same(current, oldtree) && !same(current, newtree)) {2080 /* 20 or 21 */2081 return merged_entry(newtree, current, o);2082 } else2083 return reject_merge(current, o);2084 }2085 else if (newtree) {2086 if (oldtree && !o->initial_checkout) {2087 /*2088 * deletion of the path was staged;2089 */2090 if (same(oldtree, newtree))2091 return 1;2092 return reject_merge(oldtree, o);2093 }2094 return merged_entry(newtree, current, o);2095 }2096 return deleted_entry(oldtree, current, o);2097}20982099/*2100 * Bind merge.2101 *2102 * Keep the index entries at stage0, collapse stage1 but make sure2103 * stage0 does not have anything there.2104 */2105int bind_merge(const struct cache_entry * const *src,2106 struct unpack_trees_options *o)2107{2108 const struct cache_entry *old = src[0];2109 const struct cache_entry *a = src[1];21102111 if (o->merge_size != 1)2112 return error("Cannot do a bind merge of %d trees",2113 o->merge_size);2114 if (a && old)2115 return o->gently ? -1 :2116 error(ERRORMSG(o, ERROR_BIND_OVERLAP),2117 super_prefixed(a->name),2118 super_prefixed(old->name));2119 if (!a)2120 return keep_entry(old, o);2121 else2122 return merged_entry(a, NULL, o);2123}21242125/*2126 * One-way merge.2127 *2128 * The rule is:2129 * - take the stat information from stage0, take the data from stage12130 */2131int oneway_merge(const struct cache_entry * const *src,2132 struct unpack_trees_options *o)2133{2134 const struct cache_entry *old = src[0];2135 const struct cache_entry *a = src[1];21362137 if (o->merge_size != 1)2138 return error("Cannot do a oneway merge of %d trees",2139 o->merge_size);21402141 if (!a || a == o->df_conflict_entry)2142 return deleted_entry(old, old, o);21432144 if (old && same(old, a)) {2145 int update = 0;2146 if (o->reset && o->update && !ce_uptodate(old) && !ce_skip_worktree(old)) {2147 struct stat st;2148 if (lstat(old->name, &st) ||2149 ie_match_stat(o->src_index, old, &st, CE_MATCH_IGNORE_VALID|CE_MATCH_IGNORE_SKIP_WORKTREE))2150 update |= CE_UPDATE;2151 }2152 add_entry(o, old, update, 0);2153 return 0;2154 }2155 return merged_entry(a, old, o);2156}