1#define NO_THE_INDEX_COMPATIBILITY_MACROS 2#include "cache.h" 3#include "argv-array.h" 4#include "repository.h" 5#include "config.h" 6#include "dir.h" 7#include "tree.h" 8#include "tree-walk.h" 9#include "cache-tree.h" 10#include "unpack-trees.h" 11#include "progress.h" 12#include "refs.h" 13#include "attr.h" 14#include "split-index.h" 15#include "dir.h" 16#include "submodule.h" 17#include "submodule-config.h" 18#include "fsmonitor.h" 19#include "object-store.h" 20#include "fetch-object.h" 21 22/* 23 * Error messages expected by scripts out of plumbing commands such as 24 * read-tree. Non-scripted Porcelain is not required to use these messages 25 * and in fact are encouraged to reword them to better suit their particular 26 * situation better. See how "git checkout" and "git merge" replaces 27 * them using setup_unpack_trees_porcelain(), for example. 28 */ 29static const char *unpack_plumbing_errors[NB_UNPACK_TREES_ERROR_TYPES] = { 30 /* ERROR_WOULD_OVERWRITE */ 31 "Entry '%s' would be overwritten by merge. Cannot merge.", 32 33 /* ERROR_NOT_UPTODATE_FILE */ 34 "Entry '%s' not uptodate. Cannot merge.", 35 36 /* ERROR_NOT_UPTODATE_DIR */ 37 "Updating '%s' would lose untracked files in it", 38 39 /* ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN */ 40 "Untracked working tree file '%s' would be overwritten by merge.", 41 42 /* ERROR_WOULD_LOSE_UNTRACKED_REMOVED */ 43 "Untracked working tree file '%s' would be removed by merge.", 44 45 /* ERROR_BIND_OVERLAP */ 46 "Entry '%s' overlaps with '%s'. Cannot bind.", 47 48 /* ERROR_SPARSE_NOT_UPTODATE_FILE */ 49 "Entry '%s' not uptodate. Cannot update sparse checkout.", 50 51 /* ERROR_WOULD_LOSE_ORPHANED_OVERWRITTEN */ 52 "Working tree file '%s' would be overwritten by sparse checkout update.", 53 54 /* ERROR_WOULD_LOSE_ORPHANED_REMOVED */ 55 "Working tree file '%s' would be removed by sparse checkout update.", 56 57 /* ERROR_WOULD_LOSE_SUBMODULE */ 58 "Submodule '%s' cannot checkout new HEAD.", 59}; 60 61#define ERRORMSG(o,type) \ 62 ( ((o) && (o)->msgs[(type)]) \ 63 ? ((o)->msgs[(type)]) \ 64 : (unpack_plumbing_errors[(type)]) ) 65 66static const char *super_prefixed(const char *path) 67{ 68 /* 69 * It is necessary and sufficient to have two static buffers 70 * here, as the return value of this function is fed to 71 * error() using the unpack_*_errors[] templates we see above. 72 */ 73 static struct strbuf buf[2] = {STRBUF_INIT, STRBUF_INIT}; 74 static int super_prefix_len = -1; 75 static unsigned idx = ARRAY_SIZE(buf) - 1; 76 77 if (super_prefix_len < 0) { 78 const char *super_prefix = get_super_prefix(); 79 if (!super_prefix) { 80 super_prefix_len = 0; 81 } else { 82 int i; 83 for (i = 0; i < ARRAY_SIZE(buf); i++) 84 strbuf_addstr(&buf[i], super_prefix); 85 super_prefix_len = buf[0].len; 86 } 87 } 88 89 if (!super_prefix_len) 90 return path; 91 92 if (++idx >= ARRAY_SIZE(buf)) 93 idx = 0; 94 95 strbuf_setlen(&buf[idx], super_prefix_len); 96 strbuf_addstr(&buf[idx], path); 97 98 return buf[idx].buf; 99} 100 101void setup_unpack_trees_porcelain(struct unpack_trees_options *opts, 102 const char *cmd) 103{ 104 int i; 105 const char **msgs = opts->msgs; 106 const char *msg; 107 108 argv_array_init(&opts->msgs_to_free); 109 110 if (!strcmp(cmd, "checkout")) 111 msg = advice_commit_before_merge 112 ? _("Your local changes to the following files would be overwritten by checkout:\n%%s" 113 "Please commit your changes or stash them before you switch branches.") 114 : _("Your local changes to the following files would be overwritten by checkout:\n%%s"); 115 else if (!strcmp(cmd, "merge")) 116 msg = advice_commit_before_merge 117 ? _("Your local changes to the following files would be overwritten by merge:\n%%s" 118 "Please commit your changes or stash them before you merge.") 119 : _("Your local changes to the following files would be overwritten by merge:\n%%s"); 120 else 121 msg = advice_commit_before_merge 122 ? _("Your local changes to the following files would be overwritten by %s:\n%%s" 123 "Please commit your changes or stash them before you %s.") 124 : _("Your local changes to the following files would be overwritten by %s:\n%%s"); 125 msgs[ERROR_WOULD_OVERWRITE] = msgs[ERROR_NOT_UPTODATE_FILE] = 126 argv_array_pushf(&opts->msgs_to_free, msg, cmd, cmd); 127 128 msgs[ERROR_NOT_UPTODATE_DIR] = 129 _("Updating the following directories would lose untracked files in them:\n%s"); 130 131 if (!strcmp(cmd, "checkout")) 132 msg = advice_commit_before_merge 133 ? _("The following untracked working tree files would be removed by checkout:\n%%s" 134 "Please move or remove them before you switch branches.") 135 : _("The following untracked working tree files would be removed by checkout:\n%%s"); 136 else if (!strcmp(cmd, "merge")) 137 msg = advice_commit_before_merge 138 ? _("The following untracked working tree files would be removed by merge:\n%%s" 139 "Please move or remove them before you merge.") 140 : _("The following untracked working tree files would be removed by merge:\n%%s"); 141 else 142 msg = advice_commit_before_merge 143 ? _("The following untracked working tree files would be removed by %s:\n%%s" 144 "Please move or remove them before you %s.") 145 : _("The following untracked working tree files would be removed by %s:\n%%s"); 146 msgs[ERROR_WOULD_LOSE_UNTRACKED_REMOVED] = 147 argv_array_pushf(&opts->msgs_to_free, msg, cmd, cmd); 148 149 if (!strcmp(cmd, "checkout")) 150 msg = advice_commit_before_merge 151 ? _("The following untracked working tree files would be overwritten by checkout:\n%%s" 152 "Please move or remove them before you switch branches.") 153 : _("The following untracked working tree files would be overwritten by checkout:\n%%s"); 154 else if (!strcmp(cmd, "merge")) 155 msg = advice_commit_before_merge 156 ? _("The following untracked working tree files would be overwritten by merge:\n%%s" 157 "Please move or remove them before you merge.") 158 : _("The following untracked working tree files would be overwritten by merge:\n%%s"); 159 else 160 msg = advice_commit_before_merge 161 ? _("The following untracked working tree files would be overwritten by %s:\n%%s" 162 "Please move or remove them before you %s.") 163 : _("The following untracked working tree files would be overwritten by %s:\n%%s"); 164 msgs[ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN] = 165 argv_array_pushf(&opts->msgs_to_free, msg, cmd, cmd); 166 167 /* 168 * Special case: ERROR_BIND_OVERLAP refers to a pair of paths, we 169 * cannot easily display it as a list. 170 */ 171 msgs[ERROR_BIND_OVERLAP] = _("Entry '%s' overlaps with '%s'. Cannot bind."); 172 173 msgs[ERROR_SPARSE_NOT_UPTODATE_FILE] = 174 _("Cannot update sparse checkout: the following entries are not up to date:\n%s"); 175 msgs[ERROR_WOULD_LOSE_ORPHANED_OVERWRITTEN] = 176 _("The following working tree files would be overwritten by sparse checkout update:\n%s"); 177 msgs[ERROR_WOULD_LOSE_ORPHANED_REMOVED] = 178 _("The following working tree files would be removed by sparse checkout update:\n%s"); 179 msgs[ERROR_WOULD_LOSE_SUBMODULE] = 180 _("Cannot update submodule:\n%s"); 181 182 opts->show_all_errors = 1; 183 /* rejected paths may not have a static buffer */ 184 for (i = 0; i < ARRAY_SIZE(opts->unpack_rejects); i++) 185 opts->unpack_rejects[i].strdup_strings = 1; 186} 187 188void clear_unpack_trees_porcelain(struct unpack_trees_options *opts) 189{ 190 argv_array_clear(&opts->msgs_to_free); 191 memset(opts->msgs, 0, sizeof(opts->msgs)); 192} 193 194static int do_add_entry(struct unpack_trees_options *o, struct cache_entry *ce, 195 unsigned int set, unsigned int clear) 196{ 197 clear |= CE_HASHED; 198 199 if (set & CE_REMOVE) 200 set |= CE_WT_REMOVE; 201 202 ce->ce_flags = (ce->ce_flags & ~clear) | set; 203 return add_index_entry(&o->result, ce, 204 ADD_CACHE_OK_TO_ADD | ADD_CACHE_OK_TO_REPLACE); 205} 206 207static void add_entry(struct unpack_trees_options *o, 208 const struct cache_entry *ce, 209 unsigned int set, unsigned int clear) 210{ 211 do_add_entry(o, dup_cache_entry(ce, &o->result), set, clear); 212} 213 214/* 215 * add error messages on path <path> 216 * corresponding to the type <e> with the message <msg> 217 * indicating if it should be display in porcelain or not 218 */ 219static int add_rejected_path(struct unpack_trees_options *o, 220 enum unpack_trees_error_types e, 221 const char *path) 222{ 223 if (!o->show_all_errors) 224 return error(ERRORMSG(o, e), super_prefixed(path)); 225 226 /* 227 * Otherwise, insert in a list for future display by 228 * display_error_msgs() 229 */ 230 string_list_append(&o->unpack_rejects[e], path); 231 return -1; 232} 233 234/* 235 * display all the error messages stored in a nice way 236 */ 237static void display_error_msgs(struct unpack_trees_options *o) 238{ 239 int e, i; 240 int something_displayed = 0; 241 for (e = 0; e < NB_UNPACK_TREES_ERROR_TYPES; e++) { 242 struct string_list *rejects = &o->unpack_rejects[e]; 243 if (rejects->nr > 0) { 244 struct strbuf path = STRBUF_INIT; 245 something_displayed = 1; 246 for (i = 0; i < rejects->nr; i++) 247 strbuf_addf(&path, "\t%s\n", rejects->items[i].string); 248 error(ERRORMSG(o, e), super_prefixed(path.buf)); 249 strbuf_release(&path); 250 } 251 string_list_clear(rejects, 0); 252 } 253 if (something_displayed) 254 fprintf(stderr, _("Aborting\n")); 255} 256 257static int check_submodule_move_head(const struct cache_entry *ce, 258 const char *old_id, 259 const char *new_id, 260 struct unpack_trees_options *o) 261{ 262 unsigned flags = SUBMODULE_MOVE_HEAD_DRY_RUN; 263 const struct submodule *sub = submodule_from_ce(ce); 264 265 if (!sub) 266 return 0; 267 268 if (o->reset) 269 flags |= SUBMODULE_MOVE_HEAD_FORCE; 270 271 if (submodule_move_head(ce->name, old_id, new_id, flags)) 272 return o->gently ? -1 : 273 add_rejected_path(o, ERROR_WOULD_LOSE_SUBMODULE, ce->name); 274 return 0; 275} 276 277/* 278 * Preform the loading of the repository's gitmodules file. This function is 279 * used by 'check_update()' to perform loading of the gitmodules file in two 280 * differnt situations: 281 * (1) before removing entries from the working tree if the gitmodules file has 282 * been marked for removal. This situation is specified by 'state' == NULL. 283 * (2) before checking out entries to the working tree if the gitmodules file 284 * has been marked for update. This situation is specified by 'state' != NULL. 285 */ 286static void load_gitmodules_file(struct index_state *index, 287 struct checkout *state) 288{ 289 int pos = index_name_pos(index, GITMODULES_FILE, strlen(GITMODULES_FILE)); 290 291 if (pos >= 0) { 292 struct cache_entry *ce = index->cache[pos]; 293 if (!state && ce->ce_flags & CE_WT_REMOVE) { 294 repo_read_gitmodules(the_repository); 295 } else if (state && (ce->ce_flags & CE_UPDATE)) { 296 submodule_free(the_repository); 297 checkout_entry(ce, state, NULL); 298 repo_read_gitmodules(the_repository); 299 } 300 } 301} 302 303/* 304 * Unlink the last component and schedule the leading directories for 305 * removal, such that empty directories get removed. 306 */ 307static void unlink_entry(const struct cache_entry *ce) 308{ 309 const struct submodule *sub = submodule_from_ce(ce); 310 if (sub) { 311 /* state.force is set at the caller. */ 312 submodule_move_head(ce->name, "HEAD", NULL, 313 SUBMODULE_MOVE_HEAD_FORCE); 314 } 315 if (!check_leading_path(ce->name, ce_namelen(ce))) 316 return; 317 if (remove_or_warn(ce->ce_mode, ce->name)) 318 return; 319 schedule_dir_for_removal(ce->name, ce_namelen(ce)); 320} 321 322static struct progress *get_progress(struct unpack_trees_options *o) 323{ 324 unsigned cnt = 0, total = 0; 325 struct index_state *index = &o->result; 326 327 if (!o->update || !o->verbose_update) 328 return NULL; 329 330 for (; cnt < index->cache_nr; cnt++) { 331 const struct cache_entry *ce = index->cache[cnt]; 332 if (ce->ce_flags & (CE_UPDATE | CE_WT_REMOVE)) 333 total++; 334 } 335 336 return start_delayed_progress(_("Checking out files"), total); 337} 338 339static int check_updates(struct unpack_trees_options *o) 340{ 341 unsigned cnt = 0; 342 int errs = 0; 343 struct progress *progress = NULL; 344 struct index_state *index = &o->result; 345 struct checkout state = CHECKOUT_INIT; 346 int i; 347 348 trace_performance_enter(); 349 state.force = 1; 350 state.quiet = 1; 351 state.refresh_cache = 1; 352 state.istate = index; 353 354 progress = get_progress(o); 355 356 if (o->update) 357 git_attr_set_direction(GIT_ATTR_CHECKOUT, index); 358 359 if (should_update_submodules() && o->update && !o->dry_run) 360 load_gitmodules_file(index, NULL); 361 362 for (i = 0; i < index->cache_nr; i++) { 363 const struct cache_entry *ce = index->cache[i]; 364 365 if (ce->ce_flags & CE_WT_REMOVE) { 366 display_progress(progress, ++cnt); 367 if (o->update && !o->dry_run) 368 unlink_entry(ce); 369 } 370 } 371 remove_marked_cache_entries(index); 372 remove_scheduled_dirs(); 373 374 if (should_update_submodules() && o->update && !o->dry_run) 375 load_gitmodules_file(index, &state); 376 377 enable_delayed_checkout(&state); 378 if (repository_format_partial_clone && o->update && !o->dry_run) { 379 /* 380 * Prefetch the objects that are to be checked out in the loop 381 * below. 382 */ 383 struct oid_array to_fetch = OID_ARRAY_INIT; 384 int fetch_if_missing_store = fetch_if_missing; 385 fetch_if_missing = 0; 386 for (i = 0; i < index->cache_nr; i++) { 387 struct cache_entry *ce = index->cache[i]; 388 if ((ce->ce_flags & CE_UPDATE) && 389 !S_ISGITLINK(ce->ce_mode)) { 390 if (!has_object_file(&ce->oid)) 391 oid_array_append(&to_fetch, &ce->oid); 392 } 393 } 394 if (to_fetch.nr) 395 fetch_objects(repository_format_partial_clone, 396 &to_fetch); 397 fetch_if_missing = fetch_if_missing_store; 398 oid_array_clear(&to_fetch); 399 } 400 for (i = 0; i < index->cache_nr; i++) { 401 struct cache_entry *ce = index->cache[i]; 402 403 if (ce->ce_flags & CE_UPDATE) { 404 if (ce->ce_flags & CE_WT_REMOVE) 405 BUG("both update and delete flags are set on %s", 406 ce->name); 407 display_progress(progress, ++cnt); 408 ce->ce_flags &= ~CE_UPDATE; 409 if (o->update && !o->dry_run) { 410 errs |= checkout_entry(ce, &state, NULL); 411 } 412 } 413 } 414 stop_progress(&progress); 415 errs |= finish_delayed_checkout(&state); 416 if (o->update) 417 git_attr_set_direction(GIT_ATTR_CHECKIN, NULL); 418 trace_performance_leave("check_updates"); 419 return errs != 0; 420} 421 422static int verify_uptodate_sparse(const struct cache_entry *ce, 423 struct unpack_trees_options *o); 424static int verify_absent_sparse(const struct cache_entry *ce, 425 enum unpack_trees_error_types, 426 struct unpack_trees_options *o); 427 428static int apply_sparse_checkout(struct index_state *istate, 429 struct cache_entry *ce, 430 struct unpack_trees_options *o) 431{ 432 int was_skip_worktree = ce_skip_worktree(ce); 433 434 if (ce->ce_flags & CE_NEW_SKIP_WORKTREE) 435 ce->ce_flags |= CE_SKIP_WORKTREE; 436 else 437 ce->ce_flags &= ~CE_SKIP_WORKTREE; 438 if (was_skip_worktree != ce_skip_worktree(ce)) { 439 ce->ce_flags |= CE_UPDATE_IN_BASE; 440 mark_fsmonitor_invalid(istate, ce); 441 istate->cache_changed |= CE_ENTRY_CHANGED; 442 } 443 444 /* 445 * if (!was_skip_worktree && !ce_skip_worktree()) { 446 * This is perfectly normal. Move on; 447 * } 448 */ 449 450 /* 451 * Merge strategies may set CE_UPDATE|CE_REMOVE outside checkout 452 * area as a result of ce_skip_worktree() shortcuts in 453 * verify_absent() and verify_uptodate(). 454 * Make sure they don't modify worktree if they are already 455 * outside checkout area 456 */ 457 if (was_skip_worktree && ce_skip_worktree(ce)) { 458 ce->ce_flags &= ~CE_UPDATE; 459 460 /* 461 * By default, when CE_REMOVE is on, CE_WT_REMOVE is also 462 * on to get that file removed from both index and worktree. 463 * If that file is already outside worktree area, don't 464 * bother remove it. 465 */ 466 if (ce->ce_flags & CE_REMOVE) 467 ce->ce_flags &= ~CE_WT_REMOVE; 468 } 469 470 if (!was_skip_worktree && ce_skip_worktree(ce)) { 471 /* 472 * If CE_UPDATE is set, verify_uptodate() must be called already 473 * also stat info may have lost after merged_entry() so calling 474 * verify_uptodate() again may fail 475 */ 476 if (!(ce->ce_flags & CE_UPDATE) && verify_uptodate_sparse(ce, o)) 477 return -1; 478 ce->ce_flags |= CE_WT_REMOVE; 479 ce->ce_flags &= ~CE_UPDATE; 480 } 481 if (was_skip_worktree && !ce_skip_worktree(ce)) { 482 if (verify_absent_sparse(ce, ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN, o)) 483 return -1; 484 ce->ce_flags |= CE_UPDATE; 485 } 486 return 0; 487} 488 489static inline int call_unpack_fn(const struct cache_entry * const *src, 490 struct unpack_trees_options *o) 491{ 492 int ret = o->fn(src, o); 493 if (ret > 0) 494 ret = 0; 495 return ret; 496} 497 498static void mark_ce_used(struct cache_entry *ce, struct unpack_trees_options *o) 499{ 500 ce->ce_flags |= CE_UNPACKED; 501 502 if (o->cache_bottom < o->src_index->cache_nr && 503 o->src_index->cache[o->cache_bottom] == ce) { 504 int bottom = o->cache_bottom; 505 while (bottom < o->src_index->cache_nr && 506 o->src_index->cache[bottom]->ce_flags & CE_UNPACKED) 507 bottom++; 508 o->cache_bottom = bottom; 509 } 510} 511 512static void mark_all_ce_unused(struct index_state *index) 513{ 514 int i; 515 for (i = 0; i < index->cache_nr; i++) 516 index->cache[i]->ce_flags &= ~(CE_UNPACKED | CE_ADDED | CE_NEW_SKIP_WORKTREE); 517} 518 519static int locate_in_src_index(const struct cache_entry *ce, 520 struct unpack_trees_options *o) 521{ 522 struct index_state *index = o->src_index; 523 int len = ce_namelen(ce); 524 int pos = index_name_pos(index, ce->name, len); 525 if (pos < 0) 526 pos = -1 - pos; 527 return pos; 528} 529 530/* 531 * We call unpack_index_entry() with an unmerged cache entry 532 * only in diff-index, and it wants a single callback. Skip 533 * the other unmerged entry with the same name. 534 */ 535static void mark_ce_used_same_name(struct cache_entry *ce, 536 struct unpack_trees_options *o) 537{ 538 struct index_state *index = o->src_index; 539 int len = ce_namelen(ce); 540 int pos; 541 542 for (pos = locate_in_src_index(ce, o); pos < index->cache_nr; pos++) { 543 struct cache_entry *next = index->cache[pos]; 544 if (len != ce_namelen(next) || 545 memcmp(ce->name, next->name, len)) 546 break; 547 mark_ce_used(next, o); 548 } 549} 550 551static struct cache_entry *next_cache_entry(struct unpack_trees_options *o) 552{ 553 const struct index_state *index = o->src_index; 554 int pos = o->cache_bottom; 555 556 while (pos < index->cache_nr) { 557 struct cache_entry *ce = index->cache[pos]; 558 if (!(ce->ce_flags & CE_UNPACKED)) 559 return ce; 560 pos++; 561 } 562 return NULL; 563} 564 565static void add_same_unmerged(const struct cache_entry *ce, 566 struct unpack_trees_options *o) 567{ 568 struct index_state *index = o->src_index; 569 int len = ce_namelen(ce); 570 int pos = index_name_pos(index, ce->name, len); 571 572 if (0 <= pos) 573 die("programming error in a caller of mark_ce_used_same_name"); 574 for (pos = -pos - 1; pos < index->cache_nr; pos++) { 575 struct cache_entry *next = index->cache[pos]; 576 if (len != ce_namelen(next) || 577 memcmp(ce->name, next->name, len)) 578 break; 579 add_entry(o, next, 0, 0); 580 mark_ce_used(next, o); 581 } 582} 583 584static int unpack_index_entry(struct cache_entry *ce, 585 struct unpack_trees_options *o) 586{ 587 const struct cache_entry *src[MAX_UNPACK_TREES + 1] = { NULL, }; 588 int ret; 589 590 src[0] = ce; 591 592 mark_ce_used(ce, o); 593 if (ce_stage(ce)) { 594 if (o->skip_unmerged) { 595 add_entry(o, ce, 0, 0); 596 return 0; 597 } 598 } 599 ret = call_unpack_fn(src, o); 600 if (ce_stage(ce)) 601 mark_ce_used_same_name(ce, o); 602 return ret; 603} 604 605static int find_cache_pos(struct traverse_info *, const struct name_entry *); 606 607static void restore_cache_bottom(struct traverse_info *info, int bottom) 608{ 609 struct unpack_trees_options *o = info->data; 610 611 if (o->diff_index_cached) 612 return; 613 o->cache_bottom = bottom; 614} 615 616static int switch_cache_bottom(struct traverse_info *info) 617{ 618 struct unpack_trees_options *o = info->data; 619 int ret, pos; 620 621 if (o->diff_index_cached) 622 return 0; 623 ret = o->cache_bottom; 624 pos = find_cache_pos(info->prev, &info->name); 625 626 if (pos < -1) 627 o->cache_bottom = -2 - pos; 628 else if (pos < 0) 629 o->cache_bottom = o->src_index->cache_nr; 630 return ret; 631} 632 633static inline int are_same_oid(struct name_entry *name_j, struct name_entry *name_k) 634{ 635 return name_j->oid && name_k->oid && !oidcmp(name_j->oid, name_k->oid); 636} 637 638static int all_trees_same_as_cache_tree(int n, unsigned long dirmask, 639 struct name_entry *names, 640 struct traverse_info *info) 641{ 642 struct unpack_trees_options *o = info->data; 643 int i; 644 645 if (!o->merge || dirmask != ((1 << n) - 1)) 646 return 0; 647 648 for (i = 1; i < n; i++) 649 if (!are_same_oid(names, names + i)) 650 return 0; 651 652 return cache_tree_matches_traversal(o->src_index->cache_tree, names, info); 653} 654 655static int index_pos_by_traverse_info(struct name_entry *names, 656 struct traverse_info *info) 657{ 658 struct unpack_trees_options *o = info->data; 659 int len = traverse_path_len(info, names); 660 char *name = xmalloc(len + 1 /* slash */ + 1 /* NUL */); 661 int pos; 662 663 make_traverse_path(name, info, names); 664 name[len++] = '/'; 665 name[len] = '\0'; 666 pos = index_name_pos(o->src_index, name, len); 667 if (pos >= 0) 668 BUG("This is a directory and should not exist in index"); 669 pos = -pos - 1; 670 if (!starts_with(o->src_index->cache[pos]->name, name) || 671 (pos > 0 && starts_with(o->src_index->cache[pos-1]->name, name))) 672 BUG("pos must point at the first entry in this directory"); 673 free(name); 674 return pos; 675} 676 677/* 678 * Fast path if we detect that all trees are the same as cache-tree at this 679 * path. We'll walk these trees recursively using cache-tree/index instead of 680 * ODB since already know what these trees contain. 681 */ 682static int traverse_by_cache_tree(int pos, int nr_entries, int nr_names, 683 struct name_entry *names, 684 struct traverse_info *info) 685{ 686 struct cache_entry *src[MAX_UNPACK_TREES + 1] = { NULL, }; 687 struct unpack_trees_options *o = info->data; 688 struct cache_entry *tree_ce = NULL; 689 int ce_len = 0; 690 int i, d; 691 692 if (!o->merge) 693 BUG("We need cache-tree to do this optimization"); 694 695 /* 696 * Do what unpack_callback() and unpack_nondirectories() normally 697 * do. But we walk all paths in an iterative loop instead. 698 * 699 * D/F conflicts and higher stage entries are not a concern 700 * because cache-tree would be invalidated and we would never 701 * get here in the first place. 702 */ 703 for (i = 0; i < nr_entries; i++) { 704 int new_ce_len, len, rc; 705 706 src[0] = o->src_index->cache[pos + i]; 707 708 len = ce_namelen(src[0]); 709 new_ce_len = cache_entry_size(len); 710 711 if (new_ce_len > ce_len) { 712 new_ce_len <<= 1; 713 tree_ce = xrealloc(tree_ce, new_ce_len); 714 memset(tree_ce, 0, new_ce_len); 715 ce_len = new_ce_len; 716 717 tree_ce->ce_flags = create_ce_flags(0); 718 719 for (d = 1; d <= nr_names; d++) 720 src[d] = tree_ce; 721 } 722 723 tree_ce->ce_mode = src[0]->ce_mode; 724 tree_ce->ce_namelen = len; 725 oidcpy(&tree_ce->oid, &src[0]->oid); 726 memcpy(tree_ce->name, src[0]->name, len + 1); 727 728 rc = call_unpack_fn((const struct cache_entry * const *)src, o); 729 if (rc < 0) { 730 free(tree_ce); 731 return rc; 732 } 733 734 mark_ce_used(src[0], o); 735 } 736 free(tree_ce); 737 if (o->debug_unpack) 738 printf("Unpacked %d entries from %s to %s using cache-tree\n", 739 nr_entries, 740 o->src_index->cache[pos]->name, 741 o->src_index->cache[pos + nr_entries - 1]->name); 742 return 0; 743} 744 745static int traverse_trees_recursive(int n, unsigned long dirmask, 746 unsigned long df_conflicts, 747 struct name_entry *names, 748 struct traverse_info *info) 749{ 750 int i, ret, bottom; 751 int nr_buf = 0; 752 struct tree_desc t[MAX_UNPACK_TREES]; 753 void *buf[MAX_UNPACK_TREES]; 754 struct traverse_info newinfo; 755 struct name_entry *p; 756 int nr_entries; 757 758 nr_entries = all_trees_same_as_cache_tree(n, dirmask, names, info); 759 if (nr_entries > 0) { 760 struct unpack_trees_options *o = info->data; 761 int pos = index_pos_by_traverse_info(names, info); 762 763 if (!o->merge || df_conflicts) 764 BUG("Wrong condition to get here buddy"); 765 766 /* 767 * All entries up to 'pos' must have been processed 768 * (i.e. marked CE_UNPACKED) at this point. But to be safe, 769 * save and restore cache_bottom anyway to not miss 770 * unprocessed entries before 'pos'. 771 */ 772 bottom = o->cache_bottom; 773 ret = traverse_by_cache_tree(pos, nr_entries, n, names, info); 774 o->cache_bottom = bottom; 775 return ret; 776 } 777 778 p = names; 779 while (!p->mode) 780 p++; 781 782 newinfo = *info; 783 newinfo.prev = info; 784 newinfo.pathspec = info->pathspec; 785 newinfo.name = *p; 786 newinfo.pathlen += tree_entry_len(p) + 1; 787 newinfo.df_conflicts |= df_conflicts; 788 789 /* 790 * Fetch the tree from the ODB for each peer directory in the 791 * n commits. 792 * 793 * For 2- and 3-way traversals, we try to avoid hitting the 794 * ODB twice for the same OID. This should yield a nice speed 795 * up in checkouts and merges when the commits are similar. 796 * 797 * We don't bother doing the full O(n^2) search for larger n, 798 * because wider traversals don't happen that often and we 799 * avoid the search setup. 800 * 801 * When 2 peer OIDs are the same, we just copy the tree 802 * descriptor data. This implicitly borrows the buffer 803 * data from the earlier cell. 804 */ 805 for (i = 0; i < n; i++, dirmask >>= 1) { 806 if (i > 0 && are_same_oid(&names[i], &names[i - 1])) 807 t[i] = t[i - 1]; 808 else if (i > 1 && are_same_oid(&names[i], &names[i - 2])) 809 t[i] = t[i - 2]; 810 else { 811 const struct object_id *oid = NULL; 812 if (dirmask & 1) 813 oid = names[i].oid; 814 buf[nr_buf++] = fill_tree_descriptor(t + i, oid); 815 } 816 } 817 818 bottom = switch_cache_bottom(&newinfo); 819 ret = traverse_trees(n, t, &newinfo); 820 restore_cache_bottom(&newinfo, bottom); 821 822 for (i = 0; i < nr_buf; i++) 823 free(buf[i]); 824 825 return ret; 826} 827 828/* 829 * Compare the traverse-path to the cache entry without actually 830 * having to generate the textual representation of the traverse 831 * path. 832 * 833 * NOTE! This *only* compares up to the size of the traverse path 834 * itself - the caller needs to do the final check for the cache 835 * entry having more data at the end! 836 */ 837static int do_compare_entry_piecewise(const struct cache_entry *ce, const struct traverse_info *info, const struct name_entry *n) 838{ 839 int len, pathlen, ce_len; 840 const char *ce_name; 841 842 if (info->prev) { 843 int cmp = do_compare_entry_piecewise(ce, info->prev, 844 &info->name); 845 if (cmp) 846 return cmp; 847 } 848 pathlen = info->pathlen; 849 ce_len = ce_namelen(ce); 850 851 /* If ce_len < pathlen then we must have previously hit "name == directory" entry */ 852 if (ce_len < pathlen) 853 return -1; 854 855 ce_len -= pathlen; 856 ce_name = ce->name + pathlen; 857 858 len = tree_entry_len(n); 859 return df_name_compare(ce_name, ce_len, S_IFREG, n->path, len, n->mode); 860} 861 862static int do_compare_entry(const struct cache_entry *ce, 863 const struct traverse_info *info, 864 const struct name_entry *n) 865{ 866 int len, pathlen, ce_len; 867 const char *ce_name; 868 int cmp; 869 870 /* 871 * If we have not precomputed the traverse path, it is quicker 872 * to avoid doing so. But if we have precomputed it, 873 * it is quicker to use the precomputed version. 874 */ 875 if (!info->traverse_path) 876 return do_compare_entry_piecewise(ce, info, n); 877 878 cmp = strncmp(ce->name, info->traverse_path, info->pathlen); 879 if (cmp) 880 return cmp; 881 882 pathlen = info->pathlen; 883 ce_len = ce_namelen(ce); 884 885 if (ce_len < pathlen) 886 return -1; 887 888 ce_len -= pathlen; 889 ce_name = ce->name + pathlen; 890 891 len = tree_entry_len(n); 892 return df_name_compare(ce_name, ce_len, S_IFREG, n->path, len, n->mode); 893} 894 895static int compare_entry(const struct cache_entry *ce, const struct traverse_info *info, const struct name_entry *n) 896{ 897 int cmp = do_compare_entry(ce, info, n); 898 if (cmp) 899 return cmp; 900 901 /* 902 * Even if the beginning compared identically, the ce should 903 * compare as bigger than a directory leading up to it! 904 */ 905 return ce_namelen(ce) > traverse_path_len(info, n); 906} 907 908static int ce_in_traverse_path(const struct cache_entry *ce, 909 const struct traverse_info *info) 910{ 911 if (!info->prev) 912 return 1; 913 if (do_compare_entry(ce, info->prev, &info->name)) 914 return 0; 915 /* 916 * If ce (blob) is the same name as the path (which is a tree 917 * we will be descending into), it won't be inside it. 918 */ 919 return (info->pathlen < ce_namelen(ce)); 920} 921 922static struct cache_entry *create_ce_entry(const struct traverse_info *info, 923 const struct name_entry *n, 924 int stage, 925 struct index_state *istate, 926 int is_transient) 927{ 928 int len = traverse_path_len(info, n); 929 struct cache_entry *ce = 930 is_transient ? 931 make_empty_transient_cache_entry(len) : 932 make_empty_cache_entry(istate, len); 933 934 ce->ce_mode = create_ce_mode(n->mode); 935 ce->ce_flags = create_ce_flags(stage); 936 ce->ce_namelen = len; 937 oidcpy(&ce->oid, n->oid); 938 make_traverse_path(ce->name, info, n); 939 940 return ce; 941} 942 943/* 944 * Note that traverse_by_cache_tree() duplicates some logic in this function 945 * without actually calling it. If you change the logic here you may need to 946 * check and change there as well. 947 */ 948static int unpack_nondirectories(int n, unsigned long mask, 949 unsigned long dirmask, 950 struct cache_entry **src, 951 const struct name_entry *names, 952 const struct traverse_info *info) 953{ 954 int i; 955 struct unpack_trees_options *o = info->data; 956 unsigned long conflicts = info->df_conflicts | dirmask; 957 958 /* Do we have *only* directories? Nothing to do */ 959 if (mask == dirmask && !src[0]) 960 return 0; 961 962 /* 963 * Ok, we've filled in up to any potential index entry in src[0], 964 * now do the rest. 965 */ 966 for (i = 0; i < n; i++) { 967 int stage; 968 unsigned int bit = 1ul << i; 969 if (conflicts & bit) { 970 src[i + o->merge] = o->df_conflict_entry; 971 continue; 972 } 973 if (!(mask & bit)) 974 continue; 975 if (!o->merge) 976 stage = 0; 977 else if (i + 1 < o->head_idx) 978 stage = 1; 979 else if (i + 1 > o->head_idx) 980 stage = 3; 981 else 982 stage = 2; 983 984 /* 985 * If the merge bit is set, then the cache entries are 986 * discarded in the following block. In this case, 987 * construct "transient" cache_entries, as they are 988 * not stored in the index. otherwise construct the 989 * cache entry from the index aware logic. 990 */ 991 src[i + o->merge] = create_ce_entry(info, names + i, stage, &o->result, o->merge); 992 } 993 994 if (o->merge) { 995 int rc = call_unpack_fn((const struct cache_entry * const *)src, 996 o); 997 for (i = 0; i < n; i++) { 998 struct cache_entry *ce = src[i + o->merge]; 999 if (ce != o->df_conflict_entry)1000 discard_cache_entry(ce);1001 }1002 return rc;1003 }10041005 for (i = 0; i < n; i++)1006 if (src[i] && src[i] != o->df_conflict_entry)1007 if (do_add_entry(o, src[i], 0, 0))1008 return -1;10091010 return 0;1011}10121013static int unpack_failed(struct unpack_trees_options *o, const char *message)1014{1015 discard_index(&o->result);1016 if (!o->gently && !o->exiting_early) {1017 if (message)1018 return error("%s", message);1019 return -1;1020 }1021 return -1;1022}10231024/*1025 * The tree traversal is looking at name p. If we have a matching entry,1026 * return it. If name p is a directory in the index, do not return1027 * anything, as we will want to match it when the traversal descends into1028 * the directory.1029 */1030static int find_cache_pos(struct traverse_info *info,1031 const struct name_entry *p)1032{1033 int pos;1034 struct unpack_trees_options *o = info->data;1035 struct index_state *index = o->src_index;1036 int pfxlen = info->pathlen;1037 int p_len = tree_entry_len(p);10381039 for (pos = o->cache_bottom; pos < index->cache_nr; pos++) {1040 const struct cache_entry *ce = index->cache[pos];1041 const char *ce_name, *ce_slash;1042 int cmp, ce_len;10431044 if (ce->ce_flags & CE_UNPACKED) {1045 /*1046 * cache_bottom entry is already unpacked, so1047 * we can never match it; don't check it1048 * again.1049 */1050 if (pos == o->cache_bottom)1051 ++o->cache_bottom;1052 continue;1053 }1054 if (!ce_in_traverse_path(ce, info)) {1055 /*1056 * Check if we can skip future cache checks1057 * (because we're already past all possible1058 * entries in the traverse path).1059 */1060 if (info->traverse_path) {1061 if (strncmp(ce->name, info->traverse_path,1062 info->pathlen) > 0)1063 break;1064 }1065 continue;1066 }1067 ce_name = ce->name + pfxlen;1068 ce_slash = strchr(ce_name, '/');1069 if (ce_slash)1070 ce_len = ce_slash - ce_name;1071 else1072 ce_len = ce_namelen(ce) - pfxlen;1073 cmp = name_compare(p->path, p_len, ce_name, ce_len);1074 /*1075 * Exact match; if we have a directory we need to1076 * delay returning it.1077 */1078 if (!cmp)1079 return ce_slash ? -2 - pos : pos;1080 if (0 < cmp)1081 continue; /* keep looking */1082 /*1083 * ce_name sorts after p->path; could it be that we1084 * have files under p->path directory in the index?1085 * E.g. ce_name == "t-i", and p->path == "t"; we may1086 * have "t/a" in the index.1087 */1088 if (p_len < ce_len && !memcmp(ce_name, p->path, p_len) &&1089 ce_name[p_len] < '/')1090 continue; /* keep looking */1091 break;1092 }1093 return -1;1094}10951096static struct cache_entry *find_cache_entry(struct traverse_info *info,1097 const struct name_entry *p)1098{1099 int pos = find_cache_pos(info, p);1100 struct unpack_trees_options *o = info->data;11011102 if (0 <= pos)1103 return o->src_index->cache[pos];1104 else1105 return NULL;1106}11071108static void debug_path(struct traverse_info *info)1109{1110 if (info->prev) {1111 debug_path(info->prev);1112 if (*info->prev->name.path)1113 putchar('/');1114 }1115 printf("%s", info->name.path);1116}11171118static void debug_name_entry(int i, struct name_entry *n)1119{1120 printf("ent#%d %06o %s\n", i,1121 n->path ? n->mode : 0,1122 n->path ? n->path : "(missing)");1123}11241125static void debug_unpack_callback(int n,1126 unsigned long mask,1127 unsigned long dirmask,1128 struct name_entry *names,1129 struct traverse_info *info)1130{1131 int i;1132 printf("* unpack mask %lu, dirmask %lu, cnt %d ",1133 mask, dirmask, n);1134 debug_path(info);1135 putchar('\n');1136 for (i = 0; i < n; i++)1137 debug_name_entry(i, names + i);1138}11391140/*1141 * Note that traverse_by_cache_tree() duplicates some logic in this function1142 * without actually calling it. If you change the logic here you may need to1143 * check and change there as well.1144 */1145static int unpack_callback(int n, unsigned long mask, unsigned long dirmask, struct name_entry *names, struct traverse_info *info)1146{1147 struct cache_entry *src[MAX_UNPACK_TREES + 1] = { NULL, };1148 struct unpack_trees_options *o = info->data;1149 const struct name_entry *p = names;11501151 /* Find first entry with a real name (we could use "mask" too) */1152 while (!p->mode)1153 p++;11541155 if (o->debug_unpack)1156 debug_unpack_callback(n, mask, dirmask, names, info);11571158 /* Are we supposed to look at the index too? */1159 if (o->merge) {1160 while (1) {1161 int cmp;1162 struct cache_entry *ce;11631164 if (o->diff_index_cached)1165 ce = next_cache_entry(o);1166 else1167 ce = find_cache_entry(info, p);11681169 if (!ce)1170 break;1171 cmp = compare_entry(ce, info, p);1172 if (cmp < 0) {1173 if (unpack_index_entry(ce, o) < 0)1174 return unpack_failed(o, NULL);1175 continue;1176 }1177 if (!cmp) {1178 if (ce_stage(ce)) {1179 /*1180 * If we skip unmerged index1181 * entries, we'll skip this1182 * entry *and* the tree1183 * entries associated with it!1184 */1185 if (o->skip_unmerged) {1186 add_same_unmerged(ce, o);1187 return mask;1188 }1189 }1190 src[0] = ce;1191 }1192 break;1193 }1194 }11951196 if (unpack_nondirectories(n, mask, dirmask, src, names, info) < 0)1197 return -1;11981199 if (o->merge && src[0]) {1200 if (ce_stage(src[0]))1201 mark_ce_used_same_name(src[0], o);1202 else1203 mark_ce_used(src[0], o);1204 }12051206 /* Now handle any directories.. */1207 if (dirmask) {1208 /* special case: "diff-index --cached" looking at a tree */1209 if (o->diff_index_cached &&1210 n == 1 && dirmask == 1 && S_ISDIR(names->mode)) {1211 int matches;1212 matches = cache_tree_matches_traversal(o->src_index->cache_tree,1213 names, info);1214 /*1215 * Everything under the name matches; skip the1216 * entire hierarchy. diff_index_cached codepath1217 * special cases D/F conflicts in such a way that1218 * it does not do any look-ahead, so this is safe.1219 */1220 if (matches) {1221 o->cache_bottom += matches;1222 return mask;1223 }1224 }12251226 if (traverse_trees_recursive(n, dirmask, mask & ~dirmask,1227 names, info) < 0)1228 return -1;1229 return mask;1230 }12311232 return mask;1233}12341235static int clear_ce_flags_1(struct cache_entry **cache, int nr,1236 struct strbuf *prefix,1237 int select_mask, int clear_mask,1238 struct exclude_list *el, int defval);12391240/* Whole directory matching */1241static int clear_ce_flags_dir(struct cache_entry **cache, int nr,1242 struct strbuf *prefix,1243 char *basename,1244 int select_mask, int clear_mask,1245 struct exclude_list *el, int defval)1246{1247 struct cache_entry **cache_end;1248 int dtype = DT_DIR;1249 int ret = is_excluded_from_list(prefix->buf, prefix->len,1250 basename, &dtype, el, &the_index);1251 int rc;12521253 strbuf_addch(prefix, '/');12541255 /* If undecided, use matching result of parent dir in defval */1256 if (ret < 0)1257 ret = defval;12581259 for (cache_end = cache; cache_end != cache + nr; cache_end++) {1260 struct cache_entry *ce = *cache_end;1261 if (strncmp(ce->name, prefix->buf, prefix->len))1262 break;1263 }12641265 /*1266 * TODO: check el, if there are no patterns that may conflict1267 * with ret (iow, we know in advance the incl/excl1268 * decision for the entire directory), clear flag here without1269 * calling clear_ce_flags_1(). That function will call1270 * the expensive is_excluded_from_list() on every entry.1271 */1272 rc = clear_ce_flags_1(cache, cache_end - cache,1273 prefix,1274 select_mask, clear_mask,1275 el, ret);1276 strbuf_setlen(prefix, prefix->len - 1);1277 return rc;1278}12791280/*1281 * Traverse the index, find every entry that matches according to1282 * o->el. Do "ce_flags &= ~clear_mask" on those entries. Return the1283 * number of traversed entries.1284 *1285 * If select_mask is non-zero, only entries whose ce_flags has on of1286 * those bits enabled are traversed.1287 *1288 * cache : pointer to an index entry1289 * prefix_len : an offset to its path1290 *1291 * The current path ("prefix") including the trailing '/' is1292 * cache[0]->name[0..(prefix_len-1)]1293 * Top level path has prefix_len zero.1294 */1295static int clear_ce_flags_1(struct cache_entry **cache, int nr,1296 struct strbuf *prefix,1297 int select_mask, int clear_mask,1298 struct exclude_list *el, int defval)1299{1300 struct cache_entry **cache_end = cache + nr;13011302 /*1303 * Process all entries that have the given prefix and meet1304 * select_mask condition1305 */1306 while(cache != cache_end) {1307 struct cache_entry *ce = *cache;1308 const char *name, *slash;1309 int len, dtype, ret;13101311 if (select_mask && !(ce->ce_flags & select_mask)) {1312 cache++;1313 continue;1314 }13151316 if (prefix->len && strncmp(ce->name, prefix->buf, prefix->len))1317 break;13181319 name = ce->name + prefix->len;1320 slash = strchr(name, '/');13211322 /* If it's a directory, try whole directory match first */1323 if (slash) {1324 int processed;13251326 len = slash - name;1327 strbuf_add(prefix, name, len);13281329 processed = clear_ce_flags_dir(cache, cache_end - cache,1330 prefix,1331 prefix->buf + prefix->len - len,1332 select_mask, clear_mask,1333 el, defval);13341335 /* clear_c_f_dir eats a whole dir already? */1336 if (processed) {1337 cache += processed;1338 strbuf_setlen(prefix, prefix->len - len);1339 continue;1340 }13411342 strbuf_addch(prefix, '/');1343 cache += clear_ce_flags_1(cache, cache_end - cache,1344 prefix,1345 select_mask, clear_mask, el, defval);1346 strbuf_setlen(prefix, prefix->len - len - 1);1347 continue;1348 }13491350 /* Non-directory */1351 dtype = ce_to_dtype(ce);1352 ret = is_excluded_from_list(ce->name, ce_namelen(ce),1353 name, &dtype, el, &the_index);1354 if (ret < 0)1355 ret = defval;1356 if (ret > 0)1357 ce->ce_flags &= ~clear_mask;1358 cache++;1359 }1360 return nr - (cache_end - cache);1361}13621363static int clear_ce_flags(struct cache_entry **cache, int nr,1364 int select_mask, int clear_mask,1365 struct exclude_list *el)1366{1367 static struct strbuf prefix = STRBUF_INIT;13681369 strbuf_reset(&prefix);13701371 return clear_ce_flags_1(cache, nr,1372 &prefix,1373 select_mask, clear_mask,1374 el, 0);1375}13761377/*1378 * Set/Clear CE_NEW_SKIP_WORKTREE according to $GIT_DIR/info/sparse-checkout1379 */1380static void mark_new_skip_worktree(struct exclude_list *el,1381 struct index_state *the_index,1382 int select_flag, int skip_wt_flag)1383{1384 int i;13851386 /*1387 * 1. Pretend the narrowest worktree: only unmerged entries1388 * are checked out1389 */1390 for (i = 0; i < the_index->cache_nr; i++) {1391 struct cache_entry *ce = the_index->cache[i];13921393 if (select_flag && !(ce->ce_flags & select_flag))1394 continue;13951396 if (!ce_stage(ce) && !(ce->ce_flags & CE_CONFLICTED))1397 ce->ce_flags |= skip_wt_flag;1398 else1399 ce->ce_flags &= ~skip_wt_flag;1400 }14011402 /*1403 * 2. Widen worktree according to sparse-checkout file.1404 * Matched entries will have skip_wt_flag cleared (i.e. "in")1405 */1406 clear_ce_flags(the_index->cache, the_index->cache_nr,1407 select_flag, skip_wt_flag, el);1408}14091410static int verify_absent(const struct cache_entry *,1411 enum unpack_trees_error_types,1412 struct unpack_trees_options *);1413/*1414 * N-way merge "len" trees. Returns 0 on success, -1 on failure to manipulate the1415 * resulting index, -2 on failure to reflect the changes to the work tree.1416 *1417 * CE_ADDED, CE_UNPACKED and CE_NEW_SKIP_WORKTREE are used internally1418 */1419int unpack_trees(unsigned len, struct tree_desc *t, struct unpack_trees_options *o)1420{1421 int i, ret;1422 static struct cache_entry *dfc;1423 struct exclude_list el;14241425 if (len > MAX_UNPACK_TREES)1426 die("unpack_trees takes at most %d trees", MAX_UNPACK_TREES);14271428 trace_performance_enter();1429 memset(&el, 0, sizeof(el));1430 if (!core_apply_sparse_checkout || !o->update)1431 o->skip_sparse_checkout = 1;1432 if (!o->skip_sparse_checkout) {1433 char *sparse = git_pathdup("info/sparse-checkout");1434 if (add_excludes_from_file_to_list(sparse, "", 0, &el, NULL) < 0)1435 o->skip_sparse_checkout = 1;1436 else1437 o->el = ⪙1438 free(sparse);1439 }14401441 memset(&o->result, 0, sizeof(o->result));1442 o->result.initialized = 1;1443 o->result.timestamp.sec = o->src_index->timestamp.sec;1444 o->result.timestamp.nsec = o->src_index->timestamp.nsec;1445 o->result.version = o->src_index->version;1446 if (!o->src_index->split_index) {1447 o->result.split_index = NULL;1448 } else if (o->src_index == o->dst_index) {1449 /*1450 * o->dst_index (and thus o->src_index) will be discarded1451 * and overwritten with o->result at the end of this function,1452 * so just use src_index's split_index to avoid having to1453 * create a new one.1454 */1455 o->result.split_index = o->src_index->split_index;1456 o->result.split_index->refcount++;1457 } else {1458 o->result.split_index = init_split_index(&o->result);1459 }1460 oidcpy(&o->result.oid, &o->src_index->oid);1461 o->merge_size = len;1462 mark_all_ce_unused(o->src_index);14631464 /*1465 * Sparse checkout loop #1: set NEW_SKIP_WORKTREE on existing entries1466 */1467 if (!o->skip_sparse_checkout)1468 mark_new_skip_worktree(o->el, o->src_index, 0, CE_NEW_SKIP_WORKTREE);14691470 if (!dfc)1471 dfc = xcalloc(1, cache_entry_size(0));1472 o->df_conflict_entry = dfc;14731474 if (len) {1475 const char *prefix = o->prefix ? o->prefix : "";1476 struct traverse_info info;14771478 setup_traverse_info(&info, prefix);1479 info.fn = unpack_callback;1480 info.data = o;1481 info.show_all_errors = o->show_all_errors;1482 info.pathspec = o->pathspec;14831484 if (o->prefix) {1485 /*1486 * Unpack existing index entries that sort before the1487 * prefix the tree is spliced into. Note that o->merge1488 * is always true in this case.1489 */1490 while (1) {1491 struct cache_entry *ce = next_cache_entry(o);1492 if (!ce)1493 break;1494 if (ce_in_traverse_path(ce, &info))1495 break;1496 if (unpack_index_entry(ce, o) < 0)1497 goto return_failed;1498 }1499 }15001501 trace_performance_enter();1502 ret = traverse_trees(len, t, &info);1503 trace_performance_leave("traverse_trees");1504 if (ret < 0)1505 goto return_failed;1506 }15071508 /* Any left-over entries in the index? */1509 if (o->merge) {1510 while (1) {1511 struct cache_entry *ce = next_cache_entry(o);1512 if (!ce)1513 break;1514 if (unpack_index_entry(ce, o) < 0)1515 goto return_failed;1516 }1517 }1518 mark_all_ce_unused(o->src_index);15191520 if (o->trivial_merges_only && o->nontrivial_merge) {1521 ret = unpack_failed(o, "Merge requires file-level merging");1522 goto done;1523 }15241525 if (!o->skip_sparse_checkout) {1526 int empty_worktree = 1;15271528 /*1529 * Sparse checkout loop #2: set NEW_SKIP_WORKTREE on entries not in loop #11530 * If the will have NEW_SKIP_WORKTREE, also set CE_SKIP_WORKTREE1531 * so apply_sparse_checkout() won't attempt to remove it from worktree1532 */1533 mark_new_skip_worktree(o->el, &o->result, CE_ADDED, CE_SKIP_WORKTREE | CE_NEW_SKIP_WORKTREE);15341535 ret = 0;1536 for (i = 0; i < o->result.cache_nr; i++) {1537 struct cache_entry *ce = o->result.cache[i];15381539 /*1540 * Entries marked with CE_ADDED in merged_entry() do not have1541 * verify_absent() check (the check is effectively disabled1542 * because CE_NEW_SKIP_WORKTREE is set unconditionally).1543 *1544 * Do the real check now because we have had1545 * correct CE_NEW_SKIP_WORKTREE1546 */1547 if (ce->ce_flags & CE_ADDED &&1548 verify_absent(ce, ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN, o)) {1549 if (!o->show_all_errors)1550 goto return_failed;1551 ret = -1;1552 }15531554 if (apply_sparse_checkout(&o->result, ce, o)) {1555 if (!o->show_all_errors)1556 goto return_failed;1557 ret = -1;1558 }1559 if (!ce_skip_worktree(ce))1560 empty_worktree = 0;15611562 }1563 if (ret < 0)1564 goto return_failed;1565 /*1566 * Sparse checkout is meant to narrow down checkout area1567 * but it does not make sense to narrow down to empty working1568 * tree. This is usually a mistake in sparse checkout rules.1569 * Do not allow users to do that.1570 */1571 if (o->result.cache_nr && empty_worktree) {1572 ret = unpack_failed(o, "Sparse checkout leaves no entry on working directory");1573 goto done;1574 }1575 }15761577 ret = check_updates(o) ? (-2) : 0;1578 if (o->dst_index) {1579 move_index_extensions(&o->result, o->src_index);1580 if (!ret) {1581 if (!o->result.cache_tree)1582 o->result.cache_tree = cache_tree();1583 if (!cache_tree_fully_valid(o->result.cache_tree))1584 cache_tree_update(&o->result,1585 WRITE_TREE_SILENT |1586 WRITE_TREE_REPAIR);1587 }1588 discard_index(o->dst_index);1589 *o->dst_index = o->result;1590 } else {1591 discard_index(&o->result);1592 }1593 o->src_index = NULL;15941595done:1596 trace_performance_leave("unpack_trees");1597 clear_exclude_list(&el);1598 return ret;15991600return_failed:1601 if (o->show_all_errors)1602 display_error_msgs(o);1603 mark_all_ce_unused(o->src_index);1604 ret = unpack_failed(o, NULL);1605 if (o->exiting_early)1606 ret = 0;1607 goto done;1608}16091610/* Here come the merge functions */16111612static int reject_merge(const struct cache_entry *ce,1613 struct unpack_trees_options *o)1614{1615 return o->gently ? -1 :1616 add_rejected_path(o, ERROR_WOULD_OVERWRITE, ce->name);1617}16181619static int same(const struct cache_entry *a, const struct cache_entry *b)1620{1621 if (!!a != !!b)1622 return 0;1623 if (!a && !b)1624 return 1;1625 if ((a->ce_flags | b->ce_flags) & CE_CONFLICTED)1626 return 0;1627 return a->ce_mode == b->ce_mode &&1628 !oidcmp(&a->oid, &b->oid);1629}163016311632/*1633 * When a CE gets turned into an unmerged entry, we1634 * want it to be up-to-date1635 */1636static int verify_uptodate_1(const struct cache_entry *ce,1637 struct unpack_trees_options *o,1638 enum unpack_trees_error_types error_type)1639{1640 struct stat st;16411642 if (o->index_only)1643 return 0;16441645 /*1646 * CE_VALID and CE_SKIP_WORKTREE cheat, we better check again1647 * if this entry is truly up-to-date because this file may be1648 * overwritten.1649 */1650 if ((ce->ce_flags & CE_VALID) || ce_skip_worktree(ce))1651 ; /* keep checking */1652 else if (o->reset || ce_uptodate(ce))1653 return 0;16541655 if (!lstat(ce->name, &st)) {1656 int flags = CE_MATCH_IGNORE_VALID|CE_MATCH_IGNORE_SKIP_WORKTREE;1657 unsigned changed = ie_match_stat(o->src_index, ce, &st, flags);16581659 if (submodule_from_ce(ce)) {1660 int r = check_submodule_move_head(ce,1661 "HEAD", oid_to_hex(&ce->oid), o);1662 if (r)1663 return o->gently ? -1 :1664 add_rejected_path(o, error_type, ce->name);1665 return 0;1666 }16671668 if (!changed)1669 return 0;1670 /*1671 * Historic default policy was to allow submodule to be out1672 * of sync wrt the superproject index. If the submodule was1673 * not considered interesting above, we don't care here.1674 */1675 if (S_ISGITLINK(ce->ce_mode))1676 return 0;16771678 errno = 0;1679 }1680 if (errno == ENOENT)1681 return 0;1682 return o->gently ? -1 :1683 add_rejected_path(o, error_type, ce->name);1684}16851686int verify_uptodate(const struct cache_entry *ce,1687 struct unpack_trees_options *o)1688{1689 if (!o->skip_sparse_checkout && (ce->ce_flags & CE_NEW_SKIP_WORKTREE))1690 return 0;1691 return verify_uptodate_1(ce, o, ERROR_NOT_UPTODATE_FILE);1692}16931694static int verify_uptodate_sparse(const struct cache_entry *ce,1695 struct unpack_trees_options *o)1696{1697 return verify_uptodate_1(ce, o, ERROR_SPARSE_NOT_UPTODATE_FILE);1698}16991700static void invalidate_ce_path(const struct cache_entry *ce,1701 struct unpack_trees_options *o)1702{1703 if (!ce)1704 return;1705 cache_tree_invalidate_path(o->src_index, ce->name);1706 untracked_cache_invalidate_path(o->src_index, ce->name, 1);1707}17081709/*1710 * Check that checking out ce->sha1 in subdir ce->name is not1711 * going to overwrite any working files.1712 *1713 * Currently, git does not checkout subprojects during a superproject1714 * checkout, so it is not going to overwrite anything.1715 */1716static int verify_clean_submodule(const char *old_sha1,1717 const struct cache_entry *ce,1718 enum unpack_trees_error_types error_type,1719 struct unpack_trees_options *o)1720{1721 if (!submodule_from_ce(ce))1722 return 0;17231724 return check_submodule_move_head(ce, old_sha1,1725 oid_to_hex(&ce->oid), o);1726}17271728static int verify_clean_subdirectory(const struct cache_entry *ce,1729 enum unpack_trees_error_types error_type,1730 struct unpack_trees_options *o)1731{1732 /*1733 * we are about to extract "ce->name"; we would not want to lose1734 * anything in the existing directory there.1735 */1736 int namelen;1737 int i;1738 struct dir_struct d;1739 char *pathbuf;1740 int cnt = 0;17411742 if (S_ISGITLINK(ce->ce_mode)) {1743 struct object_id oid;1744 int sub_head = resolve_gitlink_ref(ce->name, "HEAD", &oid);1745 /*1746 * If we are not going to update the submodule, then1747 * we don't care.1748 */1749 if (!sub_head && !oidcmp(&oid, &ce->oid))1750 return 0;1751 return verify_clean_submodule(sub_head ? NULL : oid_to_hex(&oid),1752 ce, error_type, o);1753 }17541755 /*1756 * First let's make sure we do not have a local modification1757 * in that directory.1758 */1759 namelen = ce_namelen(ce);1760 for (i = locate_in_src_index(ce, o);1761 i < o->src_index->cache_nr;1762 i++) {1763 struct cache_entry *ce2 = o->src_index->cache[i];1764 int len = ce_namelen(ce2);1765 if (len < namelen ||1766 strncmp(ce->name, ce2->name, namelen) ||1767 ce2->name[namelen] != '/')1768 break;1769 /*1770 * ce2->name is an entry in the subdirectory to be1771 * removed.1772 */1773 if (!ce_stage(ce2)) {1774 if (verify_uptodate(ce2, o))1775 return -1;1776 add_entry(o, ce2, CE_REMOVE, 0);1777 invalidate_ce_path(ce, o);1778 mark_ce_used(ce2, o);1779 }1780 cnt++;1781 }17821783 /*1784 * Then we need to make sure that we do not lose a locally1785 * present file that is not ignored.1786 */1787 pathbuf = xstrfmt("%.*s/", namelen, ce->name);17881789 memset(&d, 0, sizeof(d));1790 if (o->dir)1791 d.exclude_per_dir = o->dir->exclude_per_dir;1792 i = read_directory(&d, &the_index, pathbuf, namelen+1, NULL);1793 if (i)1794 return o->gently ? -1 :1795 add_rejected_path(o, ERROR_NOT_UPTODATE_DIR, ce->name);1796 free(pathbuf);1797 return cnt;1798}17991800/*1801 * This gets called when there was no index entry for the tree entry 'dst',1802 * but we found a file in the working tree that 'lstat()' said was fine,1803 * and we're on a case-insensitive filesystem.1804 *1805 * See if we can find a case-insensitive match in the index that also1806 * matches the stat information, and assume it's that other file!1807 */1808static int icase_exists(struct unpack_trees_options *o, const char *name, int len, struct stat *st)1809{1810 const struct cache_entry *src;18111812 src = index_file_exists(o->src_index, name, len, 1);1813 return src && !ie_match_stat(o->src_index, src, st, CE_MATCH_IGNORE_VALID|CE_MATCH_IGNORE_SKIP_WORKTREE);1814}18151816static int check_ok_to_remove(const char *name, int len, int dtype,1817 const struct cache_entry *ce, struct stat *st,1818 enum unpack_trees_error_types error_type,1819 struct unpack_trees_options *o)1820{1821 const struct cache_entry *result;18221823 /*1824 * It may be that the 'lstat()' succeeded even though1825 * target 'ce' was absent, because there is an old1826 * entry that is different only in case..1827 *1828 * Ignore that lstat() if it matches.1829 */1830 if (ignore_case && icase_exists(o, name, len, st))1831 return 0;18321833 if (o->dir &&1834 is_excluded(o->dir, &the_index, name, &dtype))1835 /*1836 * ce->name is explicitly excluded, so it is Ok to1837 * overwrite it.1838 */1839 return 0;1840 if (S_ISDIR(st->st_mode)) {1841 /*1842 * We are checking out path "foo" and1843 * found "foo/." in the working tree.1844 * This is tricky -- if we have modified1845 * files that are in "foo/" we would lose1846 * them.1847 */1848 if (verify_clean_subdirectory(ce, error_type, o) < 0)1849 return -1;1850 return 0;1851 }18521853 /*1854 * The previous round may already have decided to1855 * delete this path, which is in a subdirectory that1856 * is being replaced with a blob.1857 */1858 result = index_file_exists(&o->result, name, len, 0);1859 if (result) {1860 if (result->ce_flags & CE_REMOVE)1861 return 0;1862 }18631864 return o->gently ? -1 :1865 add_rejected_path(o, error_type, name);1866}18671868/*1869 * We do not want to remove or overwrite a working tree file that1870 * is not tracked, unless it is ignored.1871 */1872static int verify_absent_1(const struct cache_entry *ce,1873 enum unpack_trees_error_types error_type,1874 struct unpack_trees_options *o)1875{1876 int len;1877 struct stat st;18781879 if (o->index_only || o->reset || !o->update)1880 return 0;18811882 len = check_leading_path(ce->name, ce_namelen(ce));1883 if (!len)1884 return 0;1885 else if (len > 0) {1886 char *path;1887 int ret;18881889 path = xmemdupz(ce->name, len);1890 if (lstat(path, &st))1891 ret = error_errno("cannot stat '%s'", path);1892 else {1893 if (submodule_from_ce(ce))1894 ret = check_submodule_move_head(ce,1895 oid_to_hex(&ce->oid),1896 NULL, o);1897 else1898 ret = check_ok_to_remove(path, len, DT_UNKNOWN, NULL,1899 &st, error_type, o);1900 }1901 free(path);1902 return ret;1903 } else if (lstat(ce->name, &st)) {1904 if (errno != ENOENT)1905 return error_errno("cannot stat '%s'", ce->name);1906 return 0;1907 } else {1908 if (submodule_from_ce(ce))1909 return check_submodule_move_head(ce, oid_to_hex(&ce->oid),1910 NULL, o);19111912 return check_ok_to_remove(ce->name, ce_namelen(ce),1913 ce_to_dtype(ce), ce, &st,1914 error_type, o);1915 }1916}19171918static int verify_absent(const struct cache_entry *ce,1919 enum unpack_trees_error_types error_type,1920 struct unpack_trees_options *o)1921{1922 if (!o->skip_sparse_checkout && (ce->ce_flags & CE_NEW_SKIP_WORKTREE))1923 return 0;1924 return verify_absent_1(ce, error_type, o);1925}19261927static int verify_absent_sparse(const struct cache_entry *ce,1928 enum unpack_trees_error_types error_type,1929 struct unpack_trees_options *o)1930{1931 enum unpack_trees_error_types orphaned_error = error_type;1932 if (orphaned_error == ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN)1933 orphaned_error = ERROR_WOULD_LOSE_ORPHANED_OVERWRITTEN;19341935 return verify_absent_1(ce, orphaned_error, o);1936}19371938static int merged_entry(const struct cache_entry *ce,1939 const struct cache_entry *old,1940 struct unpack_trees_options *o)1941{1942 int update = CE_UPDATE;1943 struct cache_entry *merge = dup_cache_entry(ce, &o->result);19441945 if (!old) {1946 /*1947 * New index entries. In sparse checkout, the following1948 * verify_absent() will be delayed until after1949 * traverse_trees() finishes in unpack_trees(), then:1950 *1951 * - CE_NEW_SKIP_WORKTREE will be computed correctly1952 * - verify_absent() be called again, this time with1953 * correct CE_NEW_SKIP_WORKTREE1954 *1955 * verify_absent() call here does nothing in sparse1956 * checkout (i.e. o->skip_sparse_checkout == 0)1957 */1958 update |= CE_ADDED;1959 merge->ce_flags |= CE_NEW_SKIP_WORKTREE;19601961 if (verify_absent(merge,1962 ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN, o)) {1963 discard_cache_entry(merge);1964 return -1;1965 }1966 invalidate_ce_path(merge, o);19671968 if (submodule_from_ce(ce)) {1969 int ret = check_submodule_move_head(ce, NULL,1970 oid_to_hex(&ce->oid),1971 o);1972 if (ret)1973 return ret;1974 }19751976 } else if (!(old->ce_flags & CE_CONFLICTED)) {1977 /*1978 * See if we can re-use the old CE directly?1979 * That way we get the uptodate stat info.1980 *1981 * This also removes the UPDATE flag on a match; otherwise1982 * we will end up overwriting local changes in the work tree.1983 */1984 if (same(old, merge)) {1985 copy_cache_entry(merge, old);1986 update = 0;1987 } else {1988 if (verify_uptodate(old, o)) {1989 discard_cache_entry(merge);1990 return -1;1991 }1992 /* Migrate old flags over */1993 update |= old->ce_flags & (CE_SKIP_WORKTREE | CE_NEW_SKIP_WORKTREE);1994 invalidate_ce_path(old, o);1995 }19961997 if (submodule_from_ce(ce)) {1998 int ret = check_submodule_move_head(ce, oid_to_hex(&old->oid),1999 oid_to_hex(&ce->oid),2000 o);2001 if (ret)2002 return ret;2003 }2004 } else {2005 /*2006 * Previously unmerged entry left as an existence2007 * marker by read_index_unmerged();2008 */2009 invalidate_ce_path(old, o);2010 }20112012 do_add_entry(o, merge, update, CE_STAGEMASK);2013 return 1;2014}20152016static int deleted_entry(const struct cache_entry *ce,2017 const struct cache_entry *old,2018 struct unpack_trees_options *o)2019{2020 /* Did it exist in the index? */2021 if (!old) {2022 if (verify_absent(ce, ERROR_WOULD_LOSE_UNTRACKED_REMOVED, o))2023 return -1;2024 return 0;2025 }2026 if (!(old->ce_flags & CE_CONFLICTED) && verify_uptodate(old, o))2027 return -1;2028 add_entry(o, ce, CE_REMOVE, 0);2029 invalidate_ce_path(ce, o);2030 return 1;2031}20322033static int keep_entry(const struct cache_entry *ce,2034 struct unpack_trees_options *o)2035{2036 add_entry(o, ce, 0, 0);2037 if (ce_stage(ce))2038 invalidate_ce_path(ce, o);2039 return 1;2040}20412042#if DBRT_DEBUG2043static void show_stage_entry(FILE *o,2044 const char *label, const struct cache_entry *ce)2045{2046 if (!ce)2047 fprintf(o, "%s (missing)\n", label);2048 else2049 fprintf(o, "%s%06o %s %d\t%s\n",2050 label,2051 ce->ce_mode,2052 oid_to_hex(&ce->oid),2053 ce_stage(ce),2054 ce->name);2055}2056#endif20572058int threeway_merge(const struct cache_entry * const *stages,2059 struct unpack_trees_options *o)2060{2061 const struct cache_entry *index;2062 const struct cache_entry *head;2063 const struct cache_entry *remote = stages[o->head_idx + 1];2064 int count;2065 int head_match = 0;2066 int remote_match = 0;20672068 int df_conflict_head = 0;2069 int df_conflict_remote = 0;20702071 int any_anc_missing = 0;2072 int no_anc_exists = 1;2073 int i;20742075 for (i = 1; i < o->head_idx; i++) {2076 if (!stages[i] || stages[i] == o->df_conflict_entry)2077 any_anc_missing = 1;2078 else2079 no_anc_exists = 0;2080 }20812082 index = stages[0];2083 head = stages[o->head_idx];20842085 if (head == o->df_conflict_entry) {2086 df_conflict_head = 1;2087 head = NULL;2088 }20892090 if (remote == o->df_conflict_entry) {2091 df_conflict_remote = 1;2092 remote = NULL;2093 }20942095 /*2096 * First, if there's a #16 situation, note that to prevent #132097 * and #14.2098 */2099 if (!same(remote, head)) {2100 for (i = 1; i < o->head_idx; i++) {2101 if (same(stages[i], head)) {2102 head_match = i;2103 }2104 if (same(stages[i], remote)) {2105 remote_match = i;2106 }2107 }2108 }21092110 /*2111 * We start with cases where the index is allowed to match2112 * something other than the head: #14(ALT) and #2ALT, where it2113 * is permitted to match the result instead.2114 */2115 /* #14, #14ALT, #2ALT */2116 if (remote && !df_conflict_head && head_match && !remote_match) {2117 if (index && !same(index, remote) && !same(index, head))2118 return reject_merge(index, o);2119 return merged_entry(remote, index, o);2120 }2121 /*2122 * If we have an entry in the index cache, then we want to2123 * make sure that it matches head.2124 */2125 if (index && !same(index, head))2126 return reject_merge(index, o);21272128 if (head) {2129 /* #5ALT, #15 */2130 if (same(head, remote))2131 return merged_entry(head, index, o);2132 /* #13, #3ALT */2133 if (!df_conflict_remote && remote_match && !head_match)2134 return merged_entry(head, index, o);2135 }21362137 /* #1 */2138 if (!head && !remote && any_anc_missing)2139 return 0;21402141 /*2142 * Under the "aggressive" rule, we resolve mostly trivial2143 * cases that we historically had git-merge-one-file resolve.2144 */2145 if (o->aggressive) {2146 int head_deleted = !head;2147 int remote_deleted = !remote;2148 const struct cache_entry *ce = NULL;21492150 if (index)2151 ce = index;2152 else if (head)2153 ce = head;2154 else if (remote)2155 ce = remote;2156 else {2157 for (i = 1; i < o->head_idx; i++) {2158 if (stages[i] && stages[i] != o->df_conflict_entry) {2159 ce = stages[i];2160 break;2161 }2162 }2163 }21642165 /*2166 * Deleted in both.2167 * Deleted in one and unchanged in the other.2168 */2169 if ((head_deleted && remote_deleted) ||2170 (head_deleted && remote && remote_match) ||2171 (remote_deleted && head && head_match)) {2172 if (index)2173 return deleted_entry(index, index, o);2174 if (ce && !head_deleted) {2175 if (verify_absent(ce, ERROR_WOULD_LOSE_UNTRACKED_REMOVED, o))2176 return -1;2177 }2178 return 0;2179 }2180 /*2181 * Added in both, identically.2182 */2183 if (no_anc_exists && head && remote && same(head, remote))2184 return merged_entry(head, index, o);21852186 }21872188 /* Below are "no merge" cases, which require that the index be2189 * up-to-date to avoid the files getting overwritten with2190 * conflict resolution files.2191 */2192 if (index) {2193 if (verify_uptodate(index, o))2194 return -1;2195 }21962197 o->nontrivial_merge = 1;21982199 /* #2, #3, #4, #6, #7, #9, #10, #11. */2200 count = 0;2201 if (!head_match || !remote_match) {2202 for (i = 1; i < o->head_idx; i++) {2203 if (stages[i] && stages[i] != o->df_conflict_entry) {2204 keep_entry(stages[i], o);2205 count++;2206 break;2207 }2208 }2209 }2210#if DBRT_DEBUG2211 else {2212 fprintf(stderr, "read-tree: warning #16 detected\n");2213 show_stage_entry(stderr, "head ", stages[head_match]);2214 show_stage_entry(stderr, "remote ", stages[remote_match]);2215 }2216#endif2217 if (head) { count += keep_entry(head, o); }2218 if (remote) { count += keep_entry(remote, o); }2219 return count;2220}22212222/*2223 * Two-way merge.2224 *2225 * The rule is to "carry forward" what is in the index without losing2226 * information across a "fast-forward", favoring a successful merge2227 * over a merge failure when it makes sense. For details of the2228 * "carry forward" rule, please see <Documentation/git-read-tree.txt>.2229 *2230 */2231int twoway_merge(const struct cache_entry * const *src,2232 struct unpack_trees_options *o)2233{2234 const struct cache_entry *current = src[0];2235 const struct cache_entry *oldtree = src[1];2236 const struct cache_entry *newtree = src[2];22372238 if (o->merge_size != 2)2239 return error("Cannot do a twoway merge of %d trees",2240 o->merge_size);22412242 if (oldtree == o->df_conflict_entry)2243 oldtree = NULL;2244 if (newtree == o->df_conflict_entry)2245 newtree = NULL;22462247 if (current) {2248 if (current->ce_flags & CE_CONFLICTED) {2249 if (same(oldtree, newtree) || o->reset) {2250 if (!newtree)2251 return deleted_entry(current, current, o);2252 else2253 return merged_entry(newtree, current, o);2254 }2255 return reject_merge(current, o);2256 } else if ((!oldtree && !newtree) || /* 4 and 5 */2257 (!oldtree && newtree &&2258 same(current, newtree)) || /* 6 and 7 */2259 (oldtree && newtree &&2260 same(oldtree, newtree)) || /* 14 and 15 */2261 (oldtree && newtree &&2262 !same(oldtree, newtree) && /* 18 and 19 */2263 same(current, newtree))) {2264 return keep_entry(current, o);2265 } else if (oldtree && !newtree && same(current, oldtree)) {2266 /* 10 or 11 */2267 return deleted_entry(oldtree, current, o);2268 } else if (oldtree && newtree &&2269 same(current, oldtree) && !same(current, newtree)) {2270 /* 20 or 21 */2271 return merged_entry(newtree, current, o);2272 } else2273 return reject_merge(current, o);2274 }2275 else if (newtree) {2276 if (oldtree && !o->initial_checkout) {2277 /*2278 * deletion of the path was staged;2279 */2280 if (same(oldtree, newtree))2281 return 1;2282 return reject_merge(oldtree, o);2283 }2284 return merged_entry(newtree, current, o);2285 }2286 return deleted_entry(oldtree, current, o);2287}22882289/*2290 * Bind merge.2291 *2292 * Keep the index entries at stage0, collapse stage1 but make sure2293 * stage0 does not have anything there.2294 */2295int bind_merge(const struct cache_entry * const *src,2296 struct unpack_trees_options *o)2297{2298 const struct cache_entry *old = src[0];2299 const struct cache_entry *a = src[1];23002301 if (o->merge_size != 1)2302 return error("Cannot do a bind merge of %d trees",2303 o->merge_size);2304 if (a && old)2305 return o->gently ? -1 :2306 error(ERRORMSG(o, ERROR_BIND_OVERLAP),2307 super_prefixed(a->name),2308 super_prefixed(old->name));2309 if (!a)2310 return keep_entry(old, o);2311 else2312 return merged_entry(a, NULL, o);2313}23142315/*2316 * One-way merge.2317 *2318 * The rule is:2319 * - take the stat information from stage0, take the data from stage12320 */2321int oneway_merge(const struct cache_entry * const *src,2322 struct unpack_trees_options *o)2323{2324 const struct cache_entry *old = src[0];2325 const struct cache_entry *a = src[1];23262327 if (o->merge_size != 1)2328 return error("Cannot do a oneway merge of %d trees",2329 o->merge_size);23302331 if (!a || a == o->df_conflict_entry)2332 return deleted_entry(old, old, o);23332334 if (old && same(old, a)) {2335 int update = 0;2336 if (o->reset && o->update && !ce_uptodate(old) && !ce_skip_worktree(old)) {2337 struct stat st;2338 if (lstat(old->name, &st) ||2339 ie_match_stat(o->src_index, old, &st, CE_MATCH_IGNORE_VALID|CE_MATCH_IGNORE_SKIP_WORKTREE))2340 update |= CE_UPDATE;2341 }2342 if (o->update && S_ISGITLINK(old->ce_mode) &&2343 should_update_submodules() && !verify_uptodate(old, o))2344 update |= CE_UPDATE;2345 add_entry(o, old, update, 0);2346 return 0;2347 }2348 return merged_entry(a, old, o);2349}