1#define NO_THE_INDEX_COMPATIBILITY_MACROS 2#include "cache.h" 3#include "repository.h" 4#include "config.h" 5#include "dir.h" 6#include "tree.h" 7#include "tree-walk.h" 8#include "cache-tree.h" 9#include "unpack-trees.h" 10#include "progress.h" 11#include "refs.h" 12#include "attr.h" 13#include "split-index.h" 14#include "dir.h" 15#include "submodule.h" 16#include "submodule-config.h" 17#include "fsmonitor.h" 18#include "fetch-object.h" 19 20/* 21 * Error messages expected by scripts out of plumbing commands such as 22 * read-tree. Non-scripted Porcelain is not required to use these messages 23 * and in fact are encouraged to reword them to better suit their particular 24 * situation better. See how "git checkout" and "git merge" replaces 25 * them using setup_unpack_trees_porcelain(), for example. 26 */ 27static const char *unpack_plumbing_errors[NB_UNPACK_TREES_ERROR_TYPES] = { 28 /* ERROR_WOULD_OVERWRITE */ 29 "Entry '%s' would be overwritten by merge. Cannot merge.", 30 31 /* ERROR_NOT_UPTODATE_FILE */ 32 "Entry '%s' not uptodate. Cannot merge.", 33 34 /* ERROR_NOT_UPTODATE_DIR */ 35 "Updating '%s' would lose untracked files in it", 36 37 /* ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN */ 38 "Untracked working tree file '%s' would be overwritten by merge.", 39 40 /* ERROR_WOULD_LOSE_UNTRACKED_REMOVED */ 41 "Untracked working tree file '%s' would be removed by merge.", 42 43 /* ERROR_BIND_OVERLAP */ 44 "Entry '%s' overlaps with '%s'. Cannot bind.", 45 46 /* ERROR_SPARSE_NOT_UPTODATE_FILE */ 47 "Entry '%s' not uptodate. Cannot update sparse checkout.", 48 49 /* ERROR_WOULD_LOSE_ORPHANED_OVERWRITTEN */ 50 "Working tree file '%s' would be overwritten by sparse checkout update.", 51 52 /* ERROR_WOULD_LOSE_ORPHANED_REMOVED */ 53 "Working tree file '%s' would be removed by sparse checkout update.", 54 55 /* ERROR_WOULD_LOSE_SUBMODULE */ 56 "Submodule '%s' cannot checkout new HEAD.", 57}; 58 59#define ERRORMSG(o,type) \ 60 ( ((o) && (o)->msgs[(type)]) \ 61 ? ((o)->msgs[(type)]) \ 62 : (unpack_plumbing_errors[(type)]) ) 63 64static const char *super_prefixed(const char *path) 65{ 66 /* 67 * It is necessary and sufficient to have two static buffers 68 * here, as the return value of this function is fed to 69 * error() using the unpack_*_errors[] templates we see above. 70 */ 71 static struct strbuf buf[2] = {STRBUF_INIT, STRBUF_INIT}; 72 static int super_prefix_len = -1; 73 static unsigned idx = ARRAY_SIZE(buf) - 1; 74 75 if (super_prefix_len < 0) { 76 const char *super_prefix = get_super_prefix(); 77 if (!super_prefix) { 78 super_prefix_len = 0; 79 } else { 80 int i; 81 for (i = 0; i < ARRAY_SIZE(buf); i++) 82 strbuf_addstr(&buf[i], super_prefix); 83 super_prefix_len = buf[0].len; 84 } 85 } 86 87 if (!super_prefix_len) 88 return path; 89 90 if (++idx >= ARRAY_SIZE(buf)) 91 idx = 0; 92 93 strbuf_setlen(&buf[idx], super_prefix_len); 94 strbuf_addstr(&buf[idx], path); 95 96 return buf[idx].buf; 97} 98 99void setup_unpack_trees_porcelain(struct unpack_trees_options *opts, 100 const char *cmd) 101{ 102 int i; 103 const char **msgs = opts->msgs; 104 const char *msg; 105 106 if (!strcmp(cmd, "checkout")) 107 msg = advice_commit_before_merge 108 ? _("Your local changes to the following files would be overwritten by checkout:\n%%s" 109 "Please commit your changes or stash them before you switch branches.") 110 : _("Your local changes to the following files would be overwritten by checkout:\n%%s"); 111 else if (!strcmp(cmd, "merge")) 112 msg = advice_commit_before_merge 113 ? _("Your local changes to the following files would be overwritten by merge:\n%%s" 114 "Please commit your changes or stash them before you merge.") 115 : _("Your local changes to the following files would be overwritten by merge:\n%%s"); 116 else 117 msg = advice_commit_before_merge 118 ? _("Your local changes to the following files would be overwritten by %s:\n%%s" 119 "Please commit your changes or stash them before you %s.") 120 : _("Your local changes to the following files would be overwritten by %s:\n%%s"); 121 msgs[ERROR_WOULD_OVERWRITE] = msgs[ERROR_NOT_UPTODATE_FILE] = 122 xstrfmt(msg, cmd, cmd); 123 124 msgs[ERROR_NOT_UPTODATE_DIR] = 125 _("Updating the following directories would lose untracked files in them:\n%s"); 126 127 if (!strcmp(cmd, "checkout")) 128 msg = advice_commit_before_merge 129 ? _("The following untracked working tree files would be removed by checkout:\n%%s" 130 "Please move or remove them before you switch branches.") 131 : _("The following untracked working tree files would be removed by checkout:\n%%s"); 132 else if (!strcmp(cmd, "merge")) 133 msg = advice_commit_before_merge 134 ? _("The following untracked working tree files would be removed by merge:\n%%s" 135 "Please move or remove them before you merge.") 136 : _("The following untracked working tree files would be removed by merge:\n%%s"); 137 else 138 msg = advice_commit_before_merge 139 ? _("The following untracked working tree files would be removed by %s:\n%%s" 140 "Please move or remove them before you %s.") 141 : _("The following untracked working tree files would be removed by %s:\n%%s"); 142 msgs[ERROR_WOULD_LOSE_UNTRACKED_REMOVED] = xstrfmt(msg, cmd, cmd); 143 144 if (!strcmp(cmd, "checkout")) 145 msg = advice_commit_before_merge 146 ? _("The following untracked working tree files would be overwritten by checkout:\n%%s" 147 "Please move or remove them before you switch branches.") 148 : _("The following untracked working tree files would be overwritten by checkout:\n%%s"); 149 else if (!strcmp(cmd, "merge")) 150 msg = advice_commit_before_merge 151 ? _("The following untracked working tree files would be overwritten by merge:\n%%s" 152 "Please move or remove them before you merge.") 153 : _("The following untracked working tree files would be overwritten by merge:\n%%s"); 154 else 155 msg = advice_commit_before_merge 156 ? _("The following untracked working tree files would be overwritten by %s:\n%%s" 157 "Please move or remove them before you %s.") 158 : _("The following untracked working tree files would be overwritten by %s:\n%%s"); 159 msgs[ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN] = xstrfmt(msg, cmd, cmd); 160 161 /* 162 * Special case: ERROR_BIND_OVERLAP refers to a pair of paths, we 163 * cannot easily display it as a list. 164 */ 165 msgs[ERROR_BIND_OVERLAP] = _("Entry '%s' overlaps with '%s'. Cannot bind."); 166 167 msgs[ERROR_SPARSE_NOT_UPTODATE_FILE] = 168 _("Cannot update sparse checkout: the following entries are not up to date:\n%s"); 169 msgs[ERROR_WOULD_LOSE_ORPHANED_OVERWRITTEN] = 170 _("The following working tree files would be overwritten by sparse checkout update:\n%s"); 171 msgs[ERROR_WOULD_LOSE_ORPHANED_REMOVED] = 172 _("The following working tree files would be removed by sparse checkout update:\n%s"); 173 msgs[ERROR_WOULD_LOSE_SUBMODULE] = 174 _("Cannot update submodule:\n%s"); 175 176 opts->show_all_errors = 1; 177 /* rejected paths may not have a static buffer */ 178 for (i = 0; i < ARRAY_SIZE(opts->unpack_rejects); i++) 179 opts->unpack_rejects[i].strdup_strings = 1; 180} 181 182static int do_add_entry(struct unpack_trees_options *o, struct cache_entry *ce, 183 unsigned int set, unsigned int clear) 184{ 185 clear |= CE_HASHED; 186 187 if (set & CE_REMOVE) 188 set |= CE_WT_REMOVE; 189 190 ce->ce_flags = (ce->ce_flags & ~clear) | set; 191 return add_index_entry(&o->result, ce, 192 ADD_CACHE_OK_TO_ADD | ADD_CACHE_OK_TO_REPLACE); 193} 194 195static struct cache_entry *dup_entry(const struct cache_entry *ce) 196{ 197 unsigned int size = ce_size(ce); 198 struct cache_entry *new_entry = xmalloc(size); 199 200 memcpy(new_entry, ce, size); 201 return new_entry; 202} 203 204static void add_entry(struct unpack_trees_options *o, 205 const struct cache_entry *ce, 206 unsigned int set, unsigned int clear) 207{ 208 do_add_entry(o, dup_entry(ce), set, clear); 209} 210 211/* 212 * add error messages on path <path> 213 * corresponding to the type <e> with the message <msg> 214 * indicating if it should be display in porcelain or not 215 */ 216static int add_rejected_path(struct unpack_trees_options *o, 217 enum unpack_trees_error_types e, 218 const char *path) 219{ 220 if (!o->show_all_errors) 221 return error(ERRORMSG(o, e), super_prefixed(path)); 222 223 /* 224 * Otherwise, insert in a list for future display by 225 * display_error_msgs() 226 */ 227 string_list_append(&o->unpack_rejects[e], path); 228 return -1; 229} 230 231/* 232 * display all the error messages stored in a nice way 233 */ 234static void display_error_msgs(struct unpack_trees_options *o) 235{ 236 int e, i; 237 int something_displayed = 0; 238 for (e = 0; e < NB_UNPACK_TREES_ERROR_TYPES; e++) { 239 struct string_list *rejects = &o->unpack_rejects[e]; 240 if (rejects->nr > 0) { 241 struct strbuf path = STRBUF_INIT; 242 something_displayed = 1; 243 for (i = 0; i < rejects->nr; i++) 244 strbuf_addf(&path, "\t%s\n", rejects->items[i].string); 245 error(ERRORMSG(o, e), super_prefixed(path.buf)); 246 strbuf_release(&path); 247 } 248 string_list_clear(rejects, 0); 249 } 250 if (something_displayed) 251 fprintf(stderr, _("Aborting\n")); 252} 253 254static int check_submodule_move_head(const struct cache_entry *ce, 255 const char *old_id, 256 const char *new_id, 257 struct unpack_trees_options *o) 258{ 259 unsigned flags = SUBMODULE_MOVE_HEAD_DRY_RUN; 260 const struct submodule *sub = submodule_from_ce(ce); 261 262 if (!sub) 263 return 0; 264 265 if (o->reset) 266 flags |= SUBMODULE_MOVE_HEAD_FORCE; 267 268 if (submodule_move_head(ce->name, old_id, new_id, flags)) 269 return o->gently ? -1 : 270 add_rejected_path(o, ERROR_WOULD_LOSE_SUBMODULE, ce->name); 271 return 0; 272} 273 274/* 275 * Preform the loading of the repository's gitmodules file. This function is 276 * used by 'check_update()' to perform loading of the gitmodules file in two 277 * differnt situations: 278 * (1) before removing entries from the working tree if the gitmodules file has 279 * been marked for removal. This situation is specified by 'state' == NULL. 280 * (2) before checking out entries to the working tree if the gitmodules file 281 * has been marked for update. This situation is specified by 'state' != NULL. 282 */ 283static void load_gitmodules_file(struct index_state *index, 284 struct checkout *state) 285{ 286 int pos = index_name_pos(index, GITMODULES_FILE, strlen(GITMODULES_FILE)); 287 288 if (pos >= 0) { 289 struct cache_entry *ce = index->cache[pos]; 290 if (!state && ce->ce_flags & CE_WT_REMOVE) { 291 repo_read_gitmodules(the_repository); 292 } else if (state && (ce->ce_flags & CE_UPDATE)) { 293 submodule_free(); 294 checkout_entry(ce, state, NULL); 295 repo_read_gitmodules(the_repository); 296 } 297 } 298} 299 300/* 301 * Unlink the last component and schedule the leading directories for 302 * removal, such that empty directories get removed. 303 */ 304static void unlink_entry(const struct cache_entry *ce) 305{ 306 const struct submodule *sub = submodule_from_ce(ce); 307 if (sub) { 308 /* state.force is set at the caller. */ 309 submodule_move_head(ce->name, "HEAD", NULL, 310 SUBMODULE_MOVE_HEAD_FORCE); 311 } 312 if (!check_leading_path(ce->name, ce_namelen(ce))) 313 return; 314 if (remove_or_warn(ce->ce_mode, ce->name)) 315 return; 316 schedule_dir_for_removal(ce->name, ce_namelen(ce)); 317} 318 319static struct progress *get_progress(struct unpack_trees_options *o) 320{ 321 unsigned cnt = 0, total = 0; 322 struct index_state *index = &o->result; 323 324 if (!o->update || !o->verbose_update) 325 return NULL; 326 327 for (; cnt < index->cache_nr; cnt++) { 328 const struct cache_entry *ce = index->cache[cnt]; 329 if (ce->ce_flags & (CE_UPDATE | CE_WT_REMOVE)) 330 total++; 331 } 332 333 return start_delayed_progress(_("Checking out files"), total); 334} 335 336static int check_updates(struct unpack_trees_options *o) 337{ 338 unsigned cnt = 0; 339 int errs = 0; 340 struct progress *progress = NULL; 341 struct index_state *index = &o->result; 342 struct checkout state = CHECKOUT_INIT; 343 int i; 344 345 state.force = 1; 346 state.quiet = 1; 347 state.refresh_cache = 1; 348 state.istate = index; 349 350 progress = get_progress(o); 351 352 if (o->update) 353 git_attr_set_direction(GIT_ATTR_CHECKOUT, index); 354 355 if (should_update_submodules() && o->update && !o->dry_run) 356 load_gitmodules_file(index, NULL); 357 358 for (i = 0; i < index->cache_nr; i++) { 359 const struct cache_entry *ce = index->cache[i]; 360 361 if (ce->ce_flags & CE_WT_REMOVE) { 362 display_progress(progress, ++cnt); 363 if (o->update && !o->dry_run) 364 unlink_entry(ce); 365 } 366 } 367 remove_marked_cache_entries(index); 368 remove_scheduled_dirs(); 369 370 if (should_update_submodules() && o->update && !o->dry_run) 371 load_gitmodules_file(index, &state); 372 373 enable_delayed_checkout(&state); 374 if (repository_format_partial_clone && o->update && !o->dry_run) { 375 /* 376 * Prefetch the objects that are to be checked out in the loop 377 * below. 378 */ 379 struct oid_array to_fetch = OID_ARRAY_INIT; 380 int fetch_if_missing_store = fetch_if_missing; 381 fetch_if_missing = 0; 382 for (i = 0; i < index->cache_nr; i++) { 383 struct cache_entry *ce = index->cache[i]; 384 if ((ce->ce_flags & CE_UPDATE) && 385 !S_ISGITLINK(ce->ce_mode)) { 386 if (!has_object_file(&ce->oid)) 387 oid_array_append(&to_fetch, &ce->oid); 388 } 389 } 390 if (to_fetch.nr) 391 fetch_objects(repository_format_partial_clone, 392 &to_fetch); 393 fetch_if_missing = fetch_if_missing_store; 394 oid_array_clear(&to_fetch); 395 } 396 for (i = 0; i < index->cache_nr; i++) { 397 struct cache_entry *ce = index->cache[i]; 398 399 if (ce->ce_flags & CE_UPDATE) { 400 if (ce->ce_flags & CE_WT_REMOVE) 401 die("BUG: both update and delete flags are set on %s", 402 ce->name); 403 display_progress(progress, ++cnt); 404 ce->ce_flags &= ~CE_UPDATE; 405 if (o->update && !o->dry_run) { 406 errs |= checkout_entry(ce, &state, NULL); 407 } 408 } 409 } 410 stop_progress(&progress); 411 errs |= finish_delayed_checkout(&state); 412 if (o->update) 413 git_attr_set_direction(GIT_ATTR_CHECKIN, NULL); 414 return errs != 0; 415} 416 417static int verify_uptodate_sparse(const struct cache_entry *ce, 418 struct unpack_trees_options *o); 419static int verify_absent_sparse(const struct cache_entry *ce, 420 enum unpack_trees_error_types, 421 struct unpack_trees_options *o); 422 423static int apply_sparse_checkout(struct index_state *istate, 424 struct cache_entry *ce, 425 struct unpack_trees_options *o) 426{ 427 int was_skip_worktree = ce_skip_worktree(ce); 428 429 if (ce->ce_flags & CE_NEW_SKIP_WORKTREE) 430 ce->ce_flags |= CE_SKIP_WORKTREE; 431 else 432 ce->ce_flags &= ~CE_SKIP_WORKTREE; 433 if (was_skip_worktree != ce_skip_worktree(ce)) { 434 ce->ce_flags |= CE_UPDATE_IN_BASE; 435 mark_fsmonitor_invalid(istate, ce); 436 istate->cache_changed |= CE_ENTRY_CHANGED; 437 } 438 439 /* 440 * if (!was_skip_worktree && !ce_skip_worktree()) { 441 * This is perfectly normal. Move on; 442 * } 443 */ 444 445 /* 446 * Merge strategies may set CE_UPDATE|CE_REMOVE outside checkout 447 * area as a result of ce_skip_worktree() shortcuts in 448 * verify_absent() and verify_uptodate(). 449 * Make sure they don't modify worktree if they are already 450 * outside checkout area 451 */ 452 if (was_skip_worktree && ce_skip_worktree(ce)) { 453 ce->ce_flags &= ~CE_UPDATE; 454 455 /* 456 * By default, when CE_REMOVE is on, CE_WT_REMOVE is also 457 * on to get that file removed from both index and worktree. 458 * If that file is already outside worktree area, don't 459 * bother remove it. 460 */ 461 if (ce->ce_flags & CE_REMOVE) 462 ce->ce_flags &= ~CE_WT_REMOVE; 463 } 464 465 if (!was_skip_worktree && ce_skip_worktree(ce)) { 466 /* 467 * If CE_UPDATE is set, verify_uptodate() must be called already 468 * also stat info may have lost after merged_entry() so calling 469 * verify_uptodate() again may fail 470 */ 471 if (!(ce->ce_flags & CE_UPDATE) && verify_uptodate_sparse(ce, o)) 472 return -1; 473 ce->ce_flags |= CE_WT_REMOVE; 474 ce->ce_flags &= ~CE_UPDATE; 475 } 476 if (was_skip_worktree && !ce_skip_worktree(ce)) { 477 if (verify_absent_sparse(ce, ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN, o)) 478 return -1; 479 ce->ce_flags |= CE_UPDATE; 480 } 481 return 0; 482} 483 484static inline int call_unpack_fn(const struct cache_entry * const *src, 485 struct unpack_trees_options *o) 486{ 487 int ret = o->fn(src, o); 488 if (ret > 0) 489 ret = 0; 490 return ret; 491} 492 493static void mark_ce_used(struct cache_entry *ce, struct unpack_trees_options *o) 494{ 495 ce->ce_flags |= CE_UNPACKED; 496 497 if (o->cache_bottom < o->src_index->cache_nr && 498 o->src_index->cache[o->cache_bottom] == ce) { 499 int bottom = o->cache_bottom; 500 while (bottom < o->src_index->cache_nr && 501 o->src_index->cache[bottom]->ce_flags & CE_UNPACKED) 502 bottom++; 503 o->cache_bottom = bottom; 504 } 505} 506 507static void mark_all_ce_unused(struct index_state *index) 508{ 509 int i; 510 for (i = 0; i < index->cache_nr; i++) 511 index->cache[i]->ce_flags &= ~(CE_UNPACKED | CE_ADDED | CE_NEW_SKIP_WORKTREE); 512} 513 514static int locate_in_src_index(const struct cache_entry *ce, 515 struct unpack_trees_options *o) 516{ 517 struct index_state *index = o->src_index; 518 int len = ce_namelen(ce); 519 int pos = index_name_pos(index, ce->name, len); 520 if (pos < 0) 521 pos = -1 - pos; 522 return pos; 523} 524 525/* 526 * We call unpack_index_entry() with an unmerged cache entry 527 * only in diff-index, and it wants a single callback. Skip 528 * the other unmerged entry with the same name. 529 */ 530static void mark_ce_used_same_name(struct cache_entry *ce, 531 struct unpack_trees_options *o) 532{ 533 struct index_state *index = o->src_index; 534 int len = ce_namelen(ce); 535 int pos; 536 537 for (pos = locate_in_src_index(ce, o); pos < index->cache_nr; pos++) { 538 struct cache_entry *next = index->cache[pos]; 539 if (len != ce_namelen(next) || 540 memcmp(ce->name, next->name, len)) 541 break; 542 mark_ce_used(next, o); 543 } 544} 545 546static struct cache_entry *next_cache_entry(struct unpack_trees_options *o) 547{ 548 const struct index_state *index = o->src_index; 549 int pos = o->cache_bottom; 550 551 while (pos < index->cache_nr) { 552 struct cache_entry *ce = index->cache[pos]; 553 if (!(ce->ce_flags & CE_UNPACKED)) 554 return ce; 555 pos++; 556 } 557 return NULL; 558} 559 560static void add_same_unmerged(const struct cache_entry *ce, 561 struct unpack_trees_options *o) 562{ 563 struct index_state *index = o->src_index; 564 int len = ce_namelen(ce); 565 int pos = index_name_pos(index, ce->name, len); 566 567 if (0 <= pos) 568 die("programming error in a caller of mark_ce_used_same_name"); 569 for (pos = -pos - 1; pos < index->cache_nr; pos++) { 570 struct cache_entry *next = index->cache[pos]; 571 if (len != ce_namelen(next) || 572 memcmp(ce->name, next->name, len)) 573 break; 574 add_entry(o, next, 0, 0); 575 mark_ce_used(next, o); 576 } 577} 578 579static int unpack_index_entry(struct cache_entry *ce, 580 struct unpack_trees_options *o) 581{ 582 const struct cache_entry *src[MAX_UNPACK_TREES + 1] = { NULL, }; 583 int ret; 584 585 src[0] = ce; 586 587 mark_ce_used(ce, o); 588 if (ce_stage(ce)) { 589 if (o->skip_unmerged) { 590 add_entry(o, ce, 0, 0); 591 return 0; 592 } 593 } 594 ret = call_unpack_fn(src, o); 595 if (ce_stage(ce)) 596 mark_ce_used_same_name(ce, o); 597 return ret; 598} 599 600static int find_cache_pos(struct traverse_info *, const struct name_entry *); 601 602static void restore_cache_bottom(struct traverse_info *info, int bottom) 603{ 604 struct unpack_trees_options *o = info->data; 605 606 if (o->diff_index_cached) 607 return; 608 o->cache_bottom = bottom; 609} 610 611static int switch_cache_bottom(struct traverse_info *info) 612{ 613 struct unpack_trees_options *o = info->data; 614 int ret, pos; 615 616 if (o->diff_index_cached) 617 return 0; 618 ret = o->cache_bottom; 619 pos = find_cache_pos(info->prev, &info->name); 620 621 if (pos < -1) 622 o->cache_bottom = -2 - pos; 623 else if (pos < 0) 624 o->cache_bottom = o->src_index->cache_nr; 625 return ret; 626} 627 628static inline int are_same_oid(struct name_entry *name_j, struct name_entry *name_k) 629{ 630 return name_j->oid && name_k->oid && !oidcmp(name_j->oid, name_k->oid); 631} 632 633static int traverse_trees_recursive(int n, unsigned long dirmask, 634 unsigned long df_conflicts, 635 struct name_entry *names, 636 struct traverse_info *info) 637{ 638 int i, ret, bottom; 639 int nr_buf = 0; 640 struct tree_desc t[MAX_UNPACK_TREES]; 641 void *buf[MAX_UNPACK_TREES]; 642 struct traverse_info newinfo; 643 struct name_entry *p; 644 645 p = names; 646 while (!p->mode) 647 p++; 648 649 newinfo = *info; 650 newinfo.prev = info; 651 newinfo.pathspec = info->pathspec; 652 newinfo.name = *p; 653 newinfo.pathlen += tree_entry_len(p) + 1; 654 newinfo.df_conflicts |= df_conflicts; 655 656 /* 657 * Fetch the tree from the ODB for each peer directory in the 658 * n commits. 659 * 660 * For 2- and 3-way traversals, we try to avoid hitting the 661 * ODB twice for the same OID. This should yield a nice speed 662 * up in checkouts and merges when the commits are similar. 663 * 664 * We don't bother doing the full O(n^2) search for larger n, 665 * because wider traversals don't happen that often and we 666 * avoid the search setup. 667 * 668 * When 2 peer OIDs are the same, we just copy the tree 669 * descriptor data. This implicitly borrows the buffer 670 * data from the earlier cell. 671 */ 672 for (i = 0; i < n; i++, dirmask >>= 1) { 673 if (i > 0 && are_same_oid(&names[i], &names[i - 1])) 674 t[i] = t[i - 1]; 675 else if (i > 1 && are_same_oid(&names[i], &names[i - 2])) 676 t[i] = t[i - 2]; 677 else { 678 const struct object_id *oid = NULL; 679 if (dirmask & 1) 680 oid = names[i].oid; 681 buf[nr_buf++] = fill_tree_descriptor(t + i, oid); 682 } 683 } 684 685 bottom = switch_cache_bottom(&newinfo); 686 ret = traverse_trees(n, t, &newinfo); 687 restore_cache_bottom(&newinfo, bottom); 688 689 for (i = 0; i < nr_buf; i++) 690 free(buf[i]); 691 692 return ret; 693} 694 695/* 696 * Compare the traverse-path to the cache entry without actually 697 * having to generate the textual representation of the traverse 698 * path. 699 * 700 * NOTE! This *only* compares up to the size of the traverse path 701 * itself - the caller needs to do the final check for the cache 702 * entry having more data at the end! 703 */ 704static int do_compare_entry_piecewise(const struct cache_entry *ce, const struct traverse_info *info, const struct name_entry *n) 705{ 706 int len, pathlen, ce_len; 707 const char *ce_name; 708 709 if (info->prev) { 710 int cmp = do_compare_entry_piecewise(ce, info->prev, 711 &info->name); 712 if (cmp) 713 return cmp; 714 } 715 pathlen = info->pathlen; 716 ce_len = ce_namelen(ce); 717 718 /* If ce_len < pathlen then we must have previously hit "name == directory" entry */ 719 if (ce_len < pathlen) 720 return -1; 721 722 ce_len -= pathlen; 723 ce_name = ce->name + pathlen; 724 725 len = tree_entry_len(n); 726 return df_name_compare(ce_name, ce_len, S_IFREG, n->path, len, n->mode); 727} 728 729static int do_compare_entry(const struct cache_entry *ce, 730 const struct traverse_info *info, 731 const struct name_entry *n) 732{ 733 int len, pathlen, ce_len; 734 const char *ce_name; 735 int cmp; 736 737 /* 738 * If we have not precomputed the traverse path, it is quicker 739 * to avoid doing so. But if we have precomputed it, 740 * it is quicker to use the precomputed version. 741 */ 742 if (!info->traverse_path) 743 return do_compare_entry_piecewise(ce, info, n); 744 745 cmp = strncmp(ce->name, info->traverse_path, info->pathlen); 746 if (cmp) 747 return cmp; 748 749 pathlen = info->pathlen; 750 ce_len = ce_namelen(ce); 751 752 if (ce_len < pathlen) 753 return -1; 754 755 ce_len -= pathlen; 756 ce_name = ce->name + pathlen; 757 758 len = tree_entry_len(n); 759 return df_name_compare(ce_name, ce_len, S_IFREG, n->path, len, n->mode); 760} 761 762static int compare_entry(const struct cache_entry *ce, const struct traverse_info *info, const struct name_entry *n) 763{ 764 int cmp = do_compare_entry(ce, info, n); 765 if (cmp) 766 return cmp; 767 768 /* 769 * Even if the beginning compared identically, the ce should 770 * compare as bigger than a directory leading up to it! 771 */ 772 return ce_namelen(ce) > traverse_path_len(info, n); 773} 774 775static int ce_in_traverse_path(const struct cache_entry *ce, 776 const struct traverse_info *info) 777{ 778 if (!info->prev) 779 return 1; 780 if (do_compare_entry(ce, info->prev, &info->name)) 781 return 0; 782 /* 783 * If ce (blob) is the same name as the path (which is a tree 784 * we will be descending into), it won't be inside it. 785 */ 786 return (info->pathlen < ce_namelen(ce)); 787} 788 789static struct cache_entry *create_ce_entry(const struct traverse_info *info, const struct name_entry *n, int stage) 790{ 791 int len = traverse_path_len(info, n); 792 struct cache_entry *ce = xcalloc(1, cache_entry_size(len)); 793 794 ce->ce_mode = create_ce_mode(n->mode); 795 ce->ce_flags = create_ce_flags(stage); 796 ce->ce_namelen = len; 797 oidcpy(&ce->oid, n->oid); 798 make_traverse_path(ce->name, info, n); 799 800 return ce; 801} 802 803static int unpack_nondirectories(int n, unsigned long mask, 804 unsigned long dirmask, 805 struct cache_entry **src, 806 const struct name_entry *names, 807 const struct traverse_info *info) 808{ 809 int i; 810 struct unpack_trees_options *o = info->data; 811 unsigned long conflicts = info->df_conflicts | dirmask; 812 813 /* Do we have *only* directories? Nothing to do */ 814 if (mask == dirmask && !src[0]) 815 return 0; 816 817 /* 818 * Ok, we've filled in up to any potential index entry in src[0], 819 * now do the rest. 820 */ 821 for (i = 0; i < n; i++) { 822 int stage; 823 unsigned int bit = 1ul << i; 824 if (conflicts & bit) { 825 src[i + o->merge] = o->df_conflict_entry; 826 continue; 827 } 828 if (!(mask & bit)) 829 continue; 830 if (!o->merge) 831 stage = 0; 832 else if (i + 1 < o->head_idx) 833 stage = 1; 834 else if (i + 1 > o->head_idx) 835 stage = 3; 836 else 837 stage = 2; 838 src[i + o->merge] = create_ce_entry(info, names + i, stage); 839 } 840 841 if (o->merge) { 842 int rc = call_unpack_fn((const struct cache_entry * const *)src, 843 o); 844 for (i = 0; i < n; i++) { 845 struct cache_entry *ce = src[i + o->merge]; 846 if (ce != o->df_conflict_entry) 847 free(ce); 848 } 849 return rc; 850 } 851 852 for (i = 0; i < n; i++) 853 if (src[i] && src[i] != o->df_conflict_entry) 854 if (do_add_entry(o, src[i], 0, 0)) 855 return -1; 856 857 return 0; 858} 859 860static int unpack_failed(struct unpack_trees_options *o, const char *message) 861{ 862 discard_index(&o->result); 863 if (!o->gently && !o->exiting_early) { 864 if (message) 865 return error("%s", message); 866 return -1; 867 } 868 return -1; 869} 870 871/* 872 * The tree traversal is looking at name p. If we have a matching entry, 873 * return it. If name p is a directory in the index, do not return 874 * anything, as we will want to match it when the traversal descends into 875 * the directory. 876 */ 877static int find_cache_pos(struct traverse_info *info, 878 const struct name_entry *p) 879{ 880 int pos; 881 struct unpack_trees_options *o = info->data; 882 struct index_state *index = o->src_index; 883 int pfxlen = info->pathlen; 884 int p_len = tree_entry_len(p); 885 886 for (pos = o->cache_bottom; pos < index->cache_nr; pos++) { 887 const struct cache_entry *ce = index->cache[pos]; 888 const char *ce_name, *ce_slash; 889 int cmp, ce_len; 890 891 if (ce->ce_flags & CE_UNPACKED) { 892 /* 893 * cache_bottom entry is already unpacked, so 894 * we can never match it; don't check it 895 * again. 896 */ 897 if (pos == o->cache_bottom) 898 ++o->cache_bottom; 899 continue; 900 } 901 if (!ce_in_traverse_path(ce, info)) { 902 /* 903 * Check if we can skip future cache checks 904 * (because we're already past all possible 905 * entries in the traverse path). 906 */ 907 if (info->traverse_path) { 908 if (strncmp(ce->name, info->traverse_path, 909 info->pathlen) > 0) 910 break; 911 } 912 continue; 913 } 914 ce_name = ce->name + pfxlen; 915 ce_slash = strchr(ce_name, '/'); 916 if (ce_slash) 917 ce_len = ce_slash - ce_name; 918 else 919 ce_len = ce_namelen(ce) - pfxlen; 920 cmp = name_compare(p->path, p_len, ce_name, ce_len); 921 /* 922 * Exact match; if we have a directory we need to 923 * delay returning it. 924 */ 925 if (!cmp) 926 return ce_slash ? -2 - pos : pos; 927 if (0 < cmp) 928 continue; /* keep looking */ 929 /* 930 * ce_name sorts after p->path; could it be that we 931 * have files under p->path directory in the index? 932 * E.g. ce_name == "t-i", and p->path == "t"; we may 933 * have "t/a" in the index. 934 */ 935 if (p_len < ce_len && !memcmp(ce_name, p->path, p_len) && 936 ce_name[p_len] < '/') 937 continue; /* keep looking */ 938 break; 939 } 940 return -1; 941} 942 943static struct cache_entry *find_cache_entry(struct traverse_info *info, 944 const struct name_entry *p) 945{ 946 int pos = find_cache_pos(info, p); 947 struct unpack_trees_options *o = info->data; 948 949 if (0 <= pos) 950 return o->src_index->cache[pos]; 951 else 952 return NULL; 953} 954 955static void debug_path(struct traverse_info *info) 956{ 957 if (info->prev) { 958 debug_path(info->prev); 959 if (*info->prev->name.path) 960 putchar('/'); 961 } 962 printf("%s", info->name.path); 963} 964 965static void debug_name_entry(int i, struct name_entry *n) 966{ 967 printf("ent#%d %06o %s\n", i, 968 n->path ? n->mode : 0, 969 n->path ? n->path : "(missing)"); 970} 971 972static void debug_unpack_callback(int n, 973 unsigned long mask, 974 unsigned long dirmask, 975 struct name_entry *names, 976 struct traverse_info *info) 977{ 978 int i; 979 printf("* unpack mask %lu, dirmask %lu, cnt %d ", 980 mask, dirmask, n); 981 debug_path(info); 982 putchar('\n'); 983 for (i = 0; i < n; i++) 984 debug_name_entry(i, names + i); 985} 986 987static int unpack_callback(int n, unsigned long mask, unsigned long dirmask, struct name_entry *names, struct traverse_info *info) 988{ 989 struct cache_entry *src[MAX_UNPACK_TREES + 1] = { NULL, }; 990 struct unpack_trees_options *o = info->data; 991 const struct name_entry *p = names; 992 993 /* Find first entry with a real name (we could use "mask" too) */ 994 while (!p->mode) 995 p++; 996 997 if (o->debug_unpack) 998 debug_unpack_callback(n, mask, dirmask, names, info); 9991000 /* Are we supposed to look at the index too? */1001 if (o->merge) {1002 while (1) {1003 int cmp;1004 struct cache_entry *ce;10051006 if (o->diff_index_cached)1007 ce = next_cache_entry(o);1008 else1009 ce = find_cache_entry(info, p);10101011 if (!ce)1012 break;1013 cmp = compare_entry(ce, info, p);1014 if (cmp < 0) {1015 if (unpack_index_entry(ce, o) < 0)1016 return unpack_failed(o, NULL);1017 continue;1018 }1019 if (!cmp) {1020 if (ce_stage(ce)) {1021 /*1022 * If we skip unmerged index1023 * entries, we'll skip this1024 * entry *and* the tree1025 * entries associated with it!1026 */1027 if (o->skip_unmerged) {1028 add_same_unmerged(ce, o);1029 return mask;1030 }1031 }1032 src[0] = ce;1033 }1034 break;1035 }1036 }10371038 if (unpack_nondirectories(n, mask, dirmask, src, names, info) < 0)1039 return -1;10401041 if (o->merge && src[0]) {1042 if (ce_stage(src[0]))1043 mark_ce_used_same_name(src[0], o);1044 else1045 mark_ce_used(src[0], o);1046 }10471048 /* Now handle any directories.. */1049 if (dirmask) {1050 /* special case: "diff-index --cached" looking at a tree */1051 if (o->diff_index_cached &&1052 n == 1 && dirmask == 1 && S_ISDIR(names->mode)) {1053 int matches;1054 matches = cache_tree_matches_traversal(o->src_index->cache_tree,1055 names, info);1056 /*1057 * Everything under the name matches; skip the1058 * entire hierarchy. diff_index_cached codepath1059 * special cases D/F conflicts in such a way that1060 * it does not do any look-ahead, so this is safe.1061 */1062 if (matches) {1063 o->cache_bottom += matches;1064 return mask;1065 }1066 }10671068 if (traverse_trees_recursive(n, dirmask, mask & ~dirmask,1069 names, info) < 0)1070 return -1;1071 return mask;1072 }10731074 return mask;1075}10761077static int clear_ce_flags_1(struct cache_entry **cache, int nr,1078 struct strbuf *prefix,1079 int select_mask, int clear_mask,1080 struct exclude_list *el, int defval);10811082/* Whole directory matching */1083static int clear_ce_flags_dir(struct cache_entry **cache, int nr,1084 struct strbuf *prefix,1085 char *basename,1086 int select_mask, int clear_mask,1087 struct exclude_list *el, int defval)1088{1089 struct cache_entry **cache_end;1090 int dtype = DT_DIR;1091 int ret = is_excluded_from_list(prefix->buf, prefix->len,1092 basename, &dtype, el, &the_index);1093 int rc;10941095 strbuf_addch(prefix, '/');10961097 /* If undecided, use matching result of parent dir in defval */1098 if (ret < 0)1099 ret = defval;11001101 for (cache_end = cache; cache_end != cache + nr; cache_end++) {1102 struct cache_entry *ce = *cache_end;1103 if (strncmp(ce->name, prefix->buf, prefix->len))1104 break;1105 }11061107 /*1108 * TODO: check el, if there are no patterns that may conflict1109 * with ret (iow, we know in advance the incl/excl1110 * decision for the entire directory), clear flag here without1111 * calling clear_ce_flags_1(). That function will call1112 * the expensive is_excluded_from_list() on every entry.1113 */1114 rc = clear_ce_flags_1(cache, cache_end - cache,1115 prefix,1116 select_mask, clear_mask,1117 el, ret);1118 strbuf_setlen(prefix, prefix->len - 1);1119 return rc;1120}11211122/*1123 * Traverse the index, find every entry that matches according to1124 * o->el. Do "ce_flags &= ~clear_mask" on those entries. Return the1125 * number of traversed entries.1126 *1127 * If select_mask is non-zero, only entries whose ce_flags has on of1128 * those bits enabled are traversed.1129 *1130 * cache : pointer to an index entry1131 * prefix_len : an offset to its path1132 *1133 * The current path ("prefix") including the trailing '/' is1134 * cache[0]->name[0..(prefix_len-1)]1135 * Top level path has prefix_len zero.1136 */1137static int clear_ce_flags_1(struct cache_entry **cache, int nr,1138 struct strbuf *prefix,1139 int select_mask, int clear_mask,1140 struct exclude_list *el, int defval)1141{1142 struct cache_entry **cache_end = cache + nr;11431144 /*1145 * Process all entries that have the given prefix and meet1146 * select_mask condition1147 */1148 while(cache != cache_end) {1149 struct cache_entry *ce = *cache;1150 const char *name, *slash;1151 int len, dtype, ret;11521153 if (select_mask && !(ce->ce_flags & select_mask)) {1154 cache++;1155 continue;1156 }11571158 if (prefix->len && strncmp(ce->name, prefix->buf, prefix->len))1159 break;11601161 name = ce->name + prefix->len;1162 slash = strchr(name, '/');11631164 /* If it's a directory, try whole directory match first */1165 if (slash) {1166 int processed;11671168 len = slash - name;1169 strbuf_add(prefix, name, len);11701171 processed = clear_ce_flags_dir(cache, cache_end - cache,1172 prefix,1173 prefix->buf + prefix->len - len,1174 select_mask, clear_mask,1175 el, defval);11761177 /* clear_c_f_dir eats a whole dir already? */1178 if (processed) {1179 cache += processed;1180 strbuf_setlen(prefix, prefix->len - len);1181 continue;1182 }11831184 strbuf_addch(prefix, '/');1185 cache += clear_ce_flags_1(cache, cache_end - cache,1186 prefix,1187 select_mask, clear_mask, el, defval);1188 strbuf_setlen(prefix, prefix->len - len - 1);1189 continue;1190 }11911192 /* Non-directory */1193 dtype = ce_to_dtype(ce);1194 ret = is_excluded_from_list(ce->name, ce_namelen(ce),1195 name, &dtype, el, &the_index);1196 if (ret < 0)1197 ret = defval;1198 if (ret > 0)1199 ce->ce_flags &= ~clear_mask;1200 cache++;1201 }1202 return nr - (cache_end - cache);1203}12041205static int clear_ce_flags(struct cache_entry **cache, int nr,1206 int select_mask, int clear_mask,1207 struct exclude_list *el)1208{1209 static struct strbuf prefix = STRBUF_INIT;12101211 strbuf_reset(&prefix);12121213 return clear_ce_flags_1(cache, nr,1214 &prefix,1215 select_mask, clear_mask,1216 el, 0);1217}12181219/*1220 * Set/Clear CE_NEW_SKIP_WORKTREE according to $GIT_DIR/info/sparse-checkout1221 */1222static void mark_new_skip_worktree(struct exclude_list *el,1223 struct index_state *the_index,1224 int select_flag, int skip_wt_flag)1225{1226 int i;12271228 /*1229 * 1. Pretend the narrowest worktree: only unmerged entries1230 * are checked out1231 */1232 for (i = 0; i < the_index->cache_nr; i++) {1233 struct cache_entry *ce = the_index->cache[i];12341235 if (select_flag && !(ce->ce_flags & select_flag))1236 continue;12371238 if (!ce_stage(ce))1239 ce->ce_flags |= skip_wt_flag;1240 else1241 ce->ce_flags &= ~skip_wt_flag;1242 }12431244 /*1245 * 2. Widen worktree according to sparse-checkout file.1246 * Matched entries will have skip_wt_flag cleared (i.e. "in")1247 */1248 clear_ce_flags(the_index->cache, the_index->cache_nr,1249 select_flag, skip_wt_flag, el);1250}12511252static int verify_absent(const struct cache_entry *,1253 enum unpack_trees_error_types,1254 struct unpack_trees_options *);1255/*1256 * N-way merge "len" trees. Returns 0 on success, -1 on failure to manipulate the1257 * resulting index, -2 on failure to reflect the changes to the work tree.1258 *1259 * CE_ADDED, CE_UNPACKED and CE_NEW_SKIP_WORKTREE are used internally1260 */1261int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options *o)1262{1263 int i, ret;1264 static struct cache_entry *dfc;1265 struct exclude_list el;12661267 if (len > MAX_UNPACK_TREES)1268 die("unpack_trees takes at most %d trees", MAX_UNPACK_TREES);12691270 memset(&el, 0, sizeof(el));1271 if (!core_apply_sparse_checkout || !o->update)1272 o->skip_sparse_checkout = 1;1273 if (!o->skip_sparse_checkout) {1274 char *sparse = git_pathdup("info/sparse-checkout");1275 if (add_excludes_from_file_to_list(sparse, "", 0, &el, NULL) < 0)1276 o->skip_sparse_checkout = 1;1277 else1278 o->el = ⪙1279 free(sparse);1280 }12811282 memset(&o->result, 0, sizeof(o->result));1283 o->result.initialized = 1;1284 o->result.timestamp.sec = o->src_index->timestamp.sec;1285 o->result.timestamp.nsec = o->src_index->timestamp.nsec;1286 o->result.version = o->src_index->version;1287 o->result.split_index = o->src_index->split_index;1288 if (o->result.split_index)1289 o->result.split_index->refcount++;1290 hashcpy(o->result.sha1, o->src_index->sha1);1291 o->merge_size = len;1292 mark_all_ce_unused(o->src_index);12931294 /*1295 * Sparse checkout loop #1: set NEW_SKIP_WORKTREE on existing entries1296 */1297 if (!o->skip_sparse_checkout)1298 mark_new_skip_worktree(o->el, o->src_index, 0, CE_NEW_SKIP_WORKTREE);12991300 if (!dfc)1301 dfc = xcalloc(1, cache_entry_size(0));1302 o->df_conflict_entry = dfc;13031304 if (len) {1305 const char *prefix = o->prefix ? o->prefix : "";1306 struct traverse_info info;13071308 setup_traverse_info(&info, prefix);1309 info.fn = unpack_callback;1310 info.data = o;1311 info.show_all_errors = o->show_all_errors;1312 info.pathspec = o->pathspec;13131314 if (o->prefix) {1315 /*1316 * Unpack existing index entries that sort before the1317 * prefix the tree is spliced into. Note that o->merge1318 * is always true in this case.1319 */1320 while (1) {1321 struct cache_entry *ce = next_cache_entry(o);1322 if (!ce)1323 break;1324 if (ce_in_traverse_path(ce, &info))1325 break;1326 if (unpack_index_entry(ce, o) < 0)1327 goto return_failed;1328 }1329 }13301331 if (traverse_trees(len, t, &info) < 0)1332 goto return_failed;1333 }13341335 /* Any left-over entries in the index? */1336 if (o->merge) {1337 while (1) {1338 struct cache_entry *ce = next_cache_entry(o);1339 if (!ce)1340 break;1341 if (unpack_index_entry(ce, o) < 0)1342 goto return_failed;1343 }1344 }1345 mark_all_ce_unused(o->src_index);13461347 if (o->trivial_merges_only && o->nontrivial_merge) {1348 ret = unpack_failed(o, "Merge requires file-level merging");1349 goto done;1350 }13511352 if (!o->skip_sparse_checkout) {1353 int empty_worktree = 1;13541355 /*1356 * Sparse checkout loop #2: set NEW_SKIP_WORKTREE on entries not in loop #11357 * If the will have NEW_SKIP_WORKTREE, also set CE_SKIP_WORKTREE1358 * so apply_sparse_checkout() won't attempt to remove it from worktree1359 */1360 mark_new_skip_worktree(o->el, &o->result, CE_ADDED, CE_SKIP_WORKTREE | CE_NEW_SKIP_WORKTREE);13611362 ret = 0;1363 for (i = 0; i < o->result.cache_nr; i++) {1364 struct cache_entry *ce = o->result.cache[i];13651366 /*1367 * Entries marked with CE_ADDED in merged_entry() do not have1368 * verify_absent() check (the check is effectively disabled1369 * because CE_NEW_SKIP_WORKTREE is set unconditionally).1370 *1371 * Do the real check now because we have had1372 * correct CE_NEW_SKIP_WORKTREE1373 */1374 if (ce->ce_flags & CE_ADDED &&1375 verify_absent(ce, ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN, o)) {1376 if (!o->show_all_errors)1377 goto return_failed;1378 ret = -1;1379 }13801381 if (apply_sparse_checkout(&o->result, ce, o)) {1382 if (!o->show_all_errors)1383 goto return_failed;1384 ret = -1;1385 }1386 if (!ce_skip_worktree(ce))1387 empty_worktree = 0;13881389 }1390 if (ret < 0)1391 goto return_failed;1392 /*1393 * Sparse checkout is meant to narrow down checkout area1394 * but it does not make sense to narrow down to empty working1395 * tree. This is usually a mistake in sparse checkout rules.1396 * Do not allow users to do that.1397 */1398 if (o->result.cache_nr && empty_worktree) {1399 ret = unpack_failed(o, "Sparse checkout leaves no entry on working directory");1400 goto done;1401 }1402 }14031404 o->src_index = NULL;1405 ret = check_updates(o) ? (-2) : 0;1406 if (o->dst_index) {1407 if (!ret) {1408 if (!o->result.cache_tree)1409 o->result.cache_tree = cache_tree();1410 if (!cache_tree_fully_valid(o->result.cache_tree))1411 cache_tree_update(&o->result,1412 WRITE_TREE_SILENT |1413 WRITE_TREE_REPAIR);1414 }1415 move_index_extensions(&o->result, o->dst_index);1416 discard_index(o->dst_index);1417 *o->dst_index = o->result;1418 } else {1419 discard_index(&o->result);1420 }14211422done:1423 clear_exclude_list(&el);1424 return ret;14251426return_failed:1427 if (o->show_all_errors)1428 display_error_msgs(o);1429 mark_all_ce_unused(o->src_index);1430 ret = unpack_failed(o, NULL);1431 if (o->exiting_early)1432 ret = 0;1433 goto done;1434}14351436/* Here come the merge functions */14371438static int reject_merge(const struct cache_entry *ce,1439 struct unpack_trees_options *o)1440{1441 return o->gently ? -1 :1442 add_rejected_path(o, ERROR_WOULD_OVERWRITE, ce->name);1443}14441445static int same(const struct cache_entry *a, const struct cache_entry *b)1446{1447 if (!!a != !!b)1448 return 0;1449 if (!a && !b)1450 return 1;1451 if ((a->ce_flags | b->ce_flags) & CE_CONFLICTED)1452 return 0;1453 return a->ce_mode == b->ce_mode &&1454 !oidcmp(&a->oid, &b->oid);1455}145614571458/*1459 * When a CE gets turned into an unmerged entry, we1460 * want it to be up-to-date1461 */1462static int verify_uptodate_1(const struct cache_entry *ce,1463 struct unpack_trees_options *o,1464 enum unpack_trees_error_types error_type)1465{1466 struct stat st;14671468 if (o->index_only)1469 return 0;14701471 /*1472 * CE_VALID and CE_SKIP_WORKTREE cheat, we better check again1473 * if this entry is truly up-to-date because this file may be1474 * overwritten.1475 */1476 if ((ce->ce_flags & CE_VALID) || ce_skip_worktree(ce))1477 ; /* keep checking */1478 else if (o->reset || ce_uptodate(ce))1479 return 0;14801481 if (!lstat(ce->name, &st)) {1482 int flags = CE_MATCH_IGNORE_VALID|CE_MATCH_IGNORE_SKIP_WORKTREE;1483 unsigned changed = ie_match_stat(o->src_index, ce, &st, flags);14841485 if (submodule_from_ce(ce)) {1486 int r = check_submodule_move_head(ce,1487 "HEAD", oid_to_hex(&ce->oid), o);1488 if (r)1489 return o->gently ? -1 :1490 add_rejected_path(o, error_type, ce->name);1491 return 0;1492 }14931494 if (!changed)1495 return 0;1496 /*1497 * Historic default policy was to allow submodule to be out1498 * of sync wrt the superproject index. If the submodule was1499 * not considered interesting above, we don't care here.1500 */1501 if (S_ISGITLINK(ce->ce_mode))1502 return 0;15031504 errno = 0;1505 }1506 if (errno == ENOENT)1507 return 0;1508 return o->gently ? -1 :1509 add_rejected_path(o, error_type, ce->name);1510}15111512int verify_uptodate(const struct cache_entry *ce,1513 struct unpack_trees_options *o)1514{1515 if (!o->skip_sparse_checkout && (ce->ce_flags & CE_NEW_SKIP_WORKTREE))1516 return 0;1517 return verify_uptodate_1(ce, o, ERROR_NOT_UPTODATE_FILE);1518}15191520static int verify_uptodate_sparse(const struct cache_entry *ce,1521 struct unpack_trees_options *o)1522{1523 return verify_uptodate_1(ce, o, ERROR_SPARSE_NOT_UPTODATE_FILE);1524}15251526static void invalidate_ce_path(const struct cache_entry *ce,1527 struct unpack_trees_options *o)1528{1529 if (!ce)1530 return;1531 cache_tree_invalidate_path(o->src_index, ce->name);1532 untracked_cache_invalidate_path(o->src_index, ce->name, 1);1533}15341535/*1536 * Check that checking out ce->sha1 in subdir ce->name is not1537 * going to overwrite any working files.1538 *1539 * Currently, git does not checkout subprojects during a superproject1540 * checkout, so it is not going to overwrite anything.1541 */1542static int verify_clean_submodule(const char *old_sha1,1543 const struct cache_entry *ce,1544 enum unpack_trees_error_types error_type,1545 struct unpack_trees_options *o)1546{1547 if (!submodule_from_ce(ce))1548 return 0;15491550 return check_submodule_move_head(ce, old_sha1,1551 oid_to_hex(&ce->oid), o);1552}15531554static int verify_clean_subdirectory(const struct cache_entry *ce,1555 enum unpack_trees_error_types error_type,1556 struct unpack_trees_options *o)1557{1558 /*1559 * we are about to extract "ce->name"; we would not want to lose1560 * anything in the existing directory there.1561 */1562 int namelen;1563 int i;1564 struct dir_struct d;1565 char *pathbuf;1566 int cnt = 0;15671568 if (S_ISGITLINK(ce->ce_mode)) {1569 struct object_id oid;1570 int sub_head = resolve_gitlink_ref(ce->name, "HEAD", &oid);1571 /*1572 * If we are not going to update the submodule, then1573 * we don't care.1574 */1575 if (!sub_head && !oidcmp(&oid, &ce->oid))1576 return 0;1577 return verify_clean_submodule(sub_head ? NULL : oid_to_hex(&oid),1578 ce, error_type, o);1579 }15801581 /*1582 * First let's make sure we do not have a local modification1583 * in that directory.1584 */1585 namelen = ce_namelen(ce);1586 for (i = locate_in_src_index(ce, o);1587 i < o->src_index->cache_nr;1588 i++) {1589 struct cache_entry *ce2 = o->src_index->cache[i];1590 int len = ce_namelen(ce2);1591 if (len < namelen ||1592 strncmp(ce->name, ce2->name, namelen) ||1593 ce2->name[namelen] != '/')1594 break;1595 /*1596 * ce2->name is an entry in the subdirectory to be1597 * removed.1598 */1599 if (!ce_stage(ce2)) {1600 if (verify_uptodate(ce2, o))1601 return -1;1602 add_entry(o, ce2, CE_REMOVE, 0);1603 mark_ce_used(ce2, o);1604 }1605 cnt++;1606 }16071608 /*1609 * Then we need to make sure that we do not lose a locally1610 * present file that is not ignored.1611 */1612 pathbuf = xstrfmt("%.*s/", namelen, ce->name);16131614 memset(&d, 0, sizeof(d));1615 if (o->dir)1616 d.exclude_per_dir = o->dir->exclude_per_dir;1617 i = read_directory(&d, &the_index, pathbuf, namelen+1, NULL);1618 if (i)1619 return o->gently ? -1 :1620 add_rejected_path(o, ERROR_NOT_UPTODATE_DIR, ce->name);1621 free(pathbuf);1622 return cnt;1623}16241625/*1626 * This gets called when there was no index entry for the tree entry 'dst',1627 * but we found a file in the working tree that 'lstat()' said was fine,1628 * and we're on a case-insensitive filesystem.1629 *1630 * See if we can find a case-insensitive match in the index that also1631 * matches the stat information, and assume it's that other file!1632 */1633static int icase_exists(struct unpack_trees_options *o, const char *name, int len, struct stat *st)1634{1635 const struct cache_entry *src;16361637 src = index_file_exists(o->src_index, name, len, 1);1638 return src && !ie_match_stat(o->src_index, src, st, CE_MATCH_IGNORE_VALID|CE_MATCH_IGNORE_SKIP_WORKTREE);1639}16401641static int check_ok_to_remove(const char *name, int len, int dtype,1642 const struct cache_entry *ce, struct stat *st,1643 enum unpack_trees_error_types error_type,1644 struct unpack_trees_options *o)1645{1646 const struct cache_entry *result;16471648 /*1649 * It may be that the 'lstat()' succeeded even though1650 * target 'ce' was absent, because there is an old1651 * entry that is different only in case..1652 *1653 * Ignore that lstat() if it matches.1654 */1655 if (ignore_case && icase_exists(o, name, len, st))1656 return 0;16571658 if (o->dir &&1659 is_excluded(o->dir, &the_index, name, &dtype))1660 /*1661 * ce->name is explicitly excluded, so it is Ok to1662 * overwrite it.1663 */1664 return 0;1665 if (S_ISDIR(st->st_mode)) {1666 /*1667 * We are checking out path "foo" and1668 * found "foo/." in the working tree.1669 * This is tricky -- if we have modified1670 * files that are in "foo/" we would lose1671 * them.1672 */1673 if (verify_clean_subdirectory(ce, error_type, o) < 0)1674 return -1;1675 return 0;1676 }16771678 /*1679 * The previous round may already have decided to1680 * delete this path, which is in a subdirectory that1681 * is being replaced with a blob.1682 */1683 result = index_file_exists(&o->result, name, len, 0);1684 if (result) {1685 if (result->ce_flags & CE_REMOVE)1686 return 0;1687 }16881689 return o->gently ? -1 :1690 add_rejected_path(o, error_type, name);1691}16921693/*1694 * We do not want to remove or overwrite a working tree file that1695 * is not tracked, unless it is ignored.1696 */1697static int verify_absent_1(const struct cache_entry *ce,1698 enum unpack_trees_error_types error_type,1699 struct unpack_trees_options *o)1700{1701 int len;1702 struct stat st;17031704 if (o->index_only || o->reset || !o->update)1705 return 0;17061707 len = check_leading_path(ce->name, ce_namelen(ce));1708 if (!len)1709 return 0;1710 else if (len > 0) {1711 char *path;1712 int ret;17131714 path = xmemdupz(ce->name, len);1715 if (lstat(path, &st))1716 ret = error_errno("cannot stat '%s'", path);1717 else {1718 if (submodule_from_ce(ce))1719 ret = check_submodule_move_head(ce,1720 oid_to_hex(&ce->oid),1721 NULL, o);1722 else1723 ret = check_ok_to_remove(path, len, DT_UNKNOWN, NULL,1724 &st, error_type, o);1725 }1726 free(path);1727 return ret;1728 } else if (lstat(ce->name, &st)) {1729 if (errno != ENOENT)1730 return error_errno("cannot stat '%s'", ce->name);1731 return 0;1732 } else {1733 if (submodule_from_ce(ce))1734 return check_submodule_move_head(ce, oid_to_hex(&ce->oid),1735 NULL, o);17361737 return check_ok_to_remove(ce->name, ce_namelen(ce),1738 ce_to_dtype(ce), ce, &st,1739 error_type, o);1740 }1741}17421743static int verify_absent(const struct cache_entry *ce,1744 enum unpack_trees_error_types error_type,1745 struct unpack_trees_options *o)1746{1747 if (!o->skip_sparse_checkout && (ce->ce_flags & CE_NEW_SKIP_WORKTREE))1748 return 0;1749 return verify_absent_1(ce, error_type, o);1750}17511752static int verify_absent_sparse(const struct cache_entry *ce,1753 enum unpack_trees_error_types error_type,1754 struct unpack_trees_options *o)1755{1756 enum unpack_trees_error_types orphaned_error = error_type;1757 if (orphaned_error == ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN)1758 orphaned_error = ERROR_WOULD_LOSE_ORPHANED_OVERWRITTEN;17591760 return verify_absent_1(ce, orphaned_error, o);1761}17621763static int merged_entry(const struct cache_entry *ce,1764 const struct cache_entry *old,1765 struct unpack_trees_options *o)1766{1767 int update = CE_UPDATE;1768 struct cache_entry *merge = dup_entry(ce);17691770 if (!old) {1771 /*1772 * New index entries. In sparse checkout, the following1773 * verify_absent() will be delayed until after1774 * traverse_trees() finishes in unpack_trees(), then:1775 *1776 * - CE_NEW_SKIP_WORKTREE will be computed correctly1777 * - verify_absent() be called again, this time with1778 * correct CE_NEW_SKIP_WORKTREE1779 *1780 * verify_absent() call here does nothing in sparse1781 * checkout (i.e. o->skip_sparse_checkout == 0)1782 */1783 update |= CE_ADDED;1784 merge->ce_flags |= CE_NEW_SKIP_WORKTREE;17851786 if (verify_absent(merge,1787 ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN, o)) {1788 free(merge);1789 return -1;1790 }1791 invalidate_ce_path(merge, o);17921793 if (submodule_from_ce(ce)) {1794 int ret = check_submodule_move_head(ce, NULL,1795 oid_to_hex(&ce->oid),1796 o);1797 if (ret)1798 return ret;1799 }18001801 } else if (!(old->ce_flags & CE_CONFLICTED)) {1802 /*1803 * See if we can re-use the old CE directly?1804 * That way we get the uptodate stat info.1805 *1806 * This also removes the UPDATE flag on a match; otherwise1807 * we will end up overwriting local changes in the work tree.1808 */1809 if (same(old, merge)) {1810 copy_cache_entry(merge, old);1811 update = 0;1812 } else {1813 if (verify_uptodate(old, o)) {1814 free(merge);1815 return -1;1816 }1817 /* Migrate old flags over */1818 update |= old->ce_flags & (CE_SKIP_WORKTREE | CE_NEW_SKIP_WORKTREE);1819 invalidate_ce_path(old, o);1820 }18211822 if (submodule_from_ce(ce)) {1823 int ret = check_submodule_move_head(ce, oid_to_hex(&old->oid),1824 oid_to_hex(&ce->oid),1825 o);1826 if (ret)1827 return ret;1828 }1829 } else {1830 /*1831 * Previously unmerged entry left as an existence1832 * marker by read_index_unmerged();1833 */1834 invalidate_ce_path(old, o);1835 }18361837 do_add_entry(o, merge, update, CE_STAGEMASK);1838 return 1;1839}18401841static int deleted_entry(const struct cache_entry *ce,1842 const struct cache_entry *old,1843 struct unpack_trees_options *o)1844{1845 /* Did it exist in the index? */1846 if (!old) {1847 if (verify_absent(ce, ERROR_WOULD_LOSE_UNTRACKED_REMOVED, o))1848 return -1;1849 return 0;1850 }1851 if (!(old->ce_flags & CE_CONFLICTED) && verify_uptodate(old, o))1852 return -1;1853 add_entry(o, ce, CE_REMOVE, 0);1854 invalidate_ce_path(ce, o);1855 return 1;1856}18571858static int keep_entry(const struct cache_entry *ce,1859 struct unpack_trees_options *o)1860{1861 add_entry(o, ce, 0, 0);1862 return 1;1863}18641865#if DBRT_DEBUG1866static void show_stage_entry(FILE *o,1867 const char *label, const struct cache_entry *ce)1868{1869 if (!ce)1870 fprintf(o, "%s (missing)\n", label);1871 else1872 fprintf(o, "%s%06o %s %d\t%s\n",1873 label,1874 ce->ce_mode,1875 oid_to_hex(&ce->oid),1876 ce_stage(ce),1877 ce->name);1878}1879#endif18801881int threeway_merge(const struct cache_entry * const *stages,1882 struct unpack_trees_options *o)1883{1884 const struct cache_entry *index;1885 const struct cache_entry *head;1886 const struct cache_entry *remote = stages[o->head_idx + 1];1887 int count;1888 int head_match = 0;1889 int remote_match = 0;18901891 int df_conflict_head = 0;1892 int df_conflict_remote = 0;18931894 int any_anc_missing = 0;1895 int no_anc_exists = 1;1896 int i;18971898 for (i = 1; i < o->head_idx; i++) {1899 if (!stages[i] || stages[i] == o->df_conflict_entry)1900 any_anc_missing = 1;1901 else1902 no_anc_exists = 0;1903 }19041905 index = stages[0];1906 head = stages[o->head_idx];19071908 if (head == o->df_conflict_entry) {1909 df_conflict_head = 1;1910 head = NULL;1911 }19121913 if (remote == o->df_conflict_entry) {1914 df_conflict_remote = 1;1915 remote = NULL;1916 }19171918 /*1919 * First, if there's a #16 situation, note that to prevent #131920 * and #14.1921 */1922 if (!same(remote, head)) {1923 for (i = 1; i < o->head_idx; i++) {1924 if (same(stages[i], head)) {1925 head_match = i;1926 }1927 if (same(stages[i], remote)) {1928 remote_match = i;1929 }1930 }1931 }19321933 /*1934 * We start with cases where the index is allowed to match1935 * something other than the head: #14(ALT) and #2ALT, where it1936 * is permitted to match the result instead.1937 */1938 /* #14, #14ALT, #2ALT */1939 if (remote && !df_conflict_head && head_match && !remote_match) {1940 if (index && !same(index, remote) && !same(index, head))1941 return reject_merge(index, o);1942 return merged_entry(remote, index, o);1943 }1944 /*1945 * If we have an entry in the index cache, then we want to1946 * make sure that it matches head.1947 */1948 if (index && !same(index, head))1949 return reject_merge(index, o);19501951 if (head) {1952 /* #5ALT, #15 */1953 if (same(head, remote))1954 return merged_entry(head, index, o);1955 /* #13, #3ALT */1956 if (!df_conflict_remote && remote_match && !head_match)1957 return merged_entry(head, index, o);1958 }19591960 /* #1 */1961 if (!head && !remote && any_anc_missing)1962 return 0;19631964 /*1965 * Under the "aggressive" rule, we resolve mostly trivial1966 * cases that we historically had git-merge-one-file resolve.1967 */1968 if (o->aggressive) {1969 int head_deleted = !head;1970 int remote_deleted = !remote;1971 const struct cache_entry *ce = NULL;19721973 if (index)1974 ce = index;1975 else if (head)1976 ce = head;1977 else if (remote)1978 ce = remote;1979 else {1980 for (i = 1; i < o->head_idx; i++) {1981 if (stages[i] && stages[i] != o->df_conflict_entry) {1982 ce = stages[i];1983 break;1984 }1985 }1986 }19871988 /*1989 * Deleted in both.1990 * Deleted in one and unchanged in the other.1991 */1992 if ((head_deleted && remote_deleted) ||1993 (head_deleted && remote && remote_match) ||1994 (remote_deleted && head && head_match)) {1995 if (index)1996 return deleted_entry(index, index, o);1997 if (ce && !head_deleted) {1998 if (verify_absent(ce, ERROR_WOULD_LOSE_UNTRACKED_REMOVED, o))1999 return -1;2000 }2001 return 0;2002 }2003 /*2004 * Added in both, identically.2005 */2006 if (no_anc_exists && head && remote && same(head, remote))2007 return merged_entry(head, index, o);20082009 }20102011 /* Below are "no merge" cases, which require that the index be2012 * up-to-date to avoid the files getting overwritten with2013 * conflict resolution files.2014 */2015 if (index) {2016 if (verify_uptodate(index, o))2017 return -1;2018 }20192020 o->nontrivial_merge = 1;20212022 /* #2, #3, #4, #6, #7, #9, #10, #11. */2023 count = 0;2024 if (!head_match || !remote_match) {2025 for (i = 1; i < o->head_idx; i++) {2026 if (stages[i] && stages[i] != o->df_conflict_entry) {2027 keep_entry(stages[i], o);2028 count++;2029 break;2030 }2031 }2032 }2033#if DBRT_DEBUG2034 else {2035 fprintf(stderr, "read-tree: warning #16 detected\n");2036 show_stage_entry(stderr, "head ", stages[head_match]);2037 show_stage_entry(stderr, "remote ", stages[remote_match]);2038 }2039#endif2040 if (head) { count += keep_entry(head, o); }2041 if (remote) { count += keep_entry(remote, o); }2042 return count;2043}20442045/*2046 * Two-way merge.2047 *2048 * The rule is to "carry forward" what is in the index without losing2049 * information across a "fast-forward", favoring a successful merge2050 * over a merge failure when it makes sense. For details of the2051 * "carry forward" rule, please see <Documentation/git-read-tree.txt>.2052 *2053 */2054int twoway_merge(const struct cache_entry * const *src,2055 struct unpack_trees_options *o)2056{2057 const struct cache_entry *current = src[0];2058 const struct cache_entry *oldtree = src[1];2059 const struct cache_entry *newtree = src[2];20602061 if (o->merge_size != 2)2062 return error("Cannot do a twoway merge of %d trees",2063 o->merge_size);20642065 if (oldtree == o->df_conflict_entry)2066 oldtree = NULL;2067 if (newtree == o->df_conflict_entry)2068 newtree = NULL;20692070 if (current) {2071 if (current->ce_flags & CE_CONFLICTED) {2072 if (same(oldtree, newtree) || o->reset) {2073 if (!newtree)2074 return deleted_entry(current, current, o);2075 else2076 return merged_entry(newtree, current, o);2077 }2078 return reject_merge(current, o);2079 } else if ((!oldtree && !newtree) || /* 4 and 5 */2080 (!oldtree && newtree &&2081 same(current, newtree)) || /* 6 and 7 */2082 (oldtree && newtree &&2083 same(oldtree, newtree)) || /* 14 and 15 */2084 (oldtree && newtree &&2085 !same(oldtree, newtree) && /* 18 and 19 */2086 same(current, newtree))) {2087 return keep_entry(current, o);2088 } else if (oldtree && !newtree && same(current, oldtree)) {2089 /* 10 or 11 */2090 return deleted_entry(oldtree, current, o);2091 } else if (oldtree && newtree &&2092 same(current, oldtree) && !same(current, newtree)) {2093 /* 20 or 21 */2094 return merged_entry(newtree, current, o);2095 } else2096 return reject_merge(current, o);2097 }2098 else if (newtree) {2099 if (oldtree && !o->initial_checkout) {2100 /*2101 * deletion of the path was staged;2102 */2103 if (same(oldtree, newtree))2104 return 1;2105 return reject_merge(oldtree, o);2106 }2107 return merged_entry(newtree, current, o);2108 }2109 return deleted_entry(oldtree, current, o);2110}21112112/*2113 * Bind merge.2114 *2115 * Keep the index entries at stage0, collapse stage1 but make sure2116 * stage0 does not have anything there.2117 */2118int bind_merge(const struct cache_entry * const *src,2119 struct unpack_trees_options *o)2120{2121 const struct cache_entry *old = src[0];2122 const struct cache_entry *a = src[1];21232124 if (o->merge_size != 1)2125 return error("Cannot do a bind merge of %d trees",2126 o->merge_size);2127 if (a && old)2128 return o->gently ? -1 :2129 error(ERRORMSG(o, ERROR_BIND_OVERLAP),2130 super_prefixed(a->name),2131 super_prefixed(old->name));2132 if (!a)2133 return keep_entry(old, o);2134 else2135 return merged_entry(a, NULL, o);2136}21372138/*2139 * One-way merge.2140 *2141 * The rule is:2142 * - take the stat information from stage0, take the data from stage12143 */2144int oneway_merge(const struct cache_entry * const *src,2145 struct unpack_trees_options *o)2146{2147 const struct cache_entry *old = src[0];2148 const struct cache_entry *a = src[1];21492150 if (o->merge_size != 1)2151 return error("Cannot do a oneway merge of %d trees",2152 o->merge_size);21532154 if (!a || a == o->df_conflict_entry)2155 return deleted_entry(old, old, o);21562157 if (old && same(old, a)) {2158 int update = 0;2159 if (o->reset && o->update && !ce_uptodate(old) && !ce_skip_worktree(old)) {2160 struct stat st;2161 if (lstat(old->name, &st) ||2162 ie_match_stat(o->src_index, old, &st, CE_MATCH_IGNORE_VALID|CE_MATCH_IGNORE_SKIP_WORKTREE))2163 update |= CE_UPDATE;2164 }2165 if (o->update && S_ISGITLINK(old->ce_mode) &&2166 should_update_submodules() && !verify_uptodate(old, o))2167 update |= CE_UPDATE;2168 add_entry(o, old, update, 0);2169 return 0;2170 }2171 return merged_entry(a, old, o);2172}