1#define NO_THE_INDEX_COMPATIBILITY_MACROS 2#include"cache.h" 3#include"argv-array.h" 4#include"repository.h" 5#include"config.h" 6#include"dir.h" 7#include"tree.h" 8#include"tree-walk.h" 9#include"cache-tree.h" 10#include"unpack-trees.h" 11#include"progress.h" 12#include"refs.h" 13#include"attr.h" 14#include"split-index.h" 15#include"dir.h" 16#include"submodule.h" 17#include"submodule-config.h" 18#include"fsmonitor.h" 19#include"object-store.h" 20#include"fetch-object.h" 21 22/* 23 * Error messages expected by scripts out of plumbing commands such as 24 * read-tree. Non-scripted Porcelain is not required to use these messages 25 * and in fact are encouraged to reword them to better suit their particular 26 * situation better. See how "git checkout" and "git merge" replaces 27 * them using setup_unpack_trees_porcelain(), for example. 28 */ 29static const char*unpack_plumbing_errors[NB_UNPACK_TREES_ERROR_TYPES] = { 30/* ERROR_WOULD_OVERWRITE */ 31"Entry '%s' would be overwritten by merge. Cannot merge.", 32 33/* ERROR_NOT_UPTODATE_FILE */ 34"Entry '%s' not uptodate. Cannot merge.", 35 36/* ERROR_NOT_UPTODATE_DIR */ 37"Updating '%s' would lose untracked files in it", 38 39/* ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN */ 40"Untracked working tree file '%s' would be overwritten by merge.", 41 42/* ERROR_WOULD_LOSE_UNTRACKED_REMOVED */ 43"Untracked working tree file '%s' would be removed by merge.", 44 45/* ERROR_BIND_OVERLAP */ 46"Entry '%s' overlaps with '%s'. Cannot bind.", 47 48/* ERROR_SPARSE_NOT_UPTODATE_FILE */ 49"Entry '%s' not uptodate. Cannot update sparse checkout.", 50 51/* ERROR_WOULD_LOSE_ORPHANED_OVERWRITTEN */ 52"Working tree file '%s' would be overwritten by sparse checkout update.", 53 54/* ERROR_WOULD_LOSE_ORPHANED_REMOVED */ 55"Working tree file '%s' would be removed by sparse checkout update.", 56 57/* ERROR_WOULD_LOSE_SUBMODULE */ 58"Submodule '%s' cannot checkout new HEAD.", 59}; 60 61#define ERRORMSG(o,type) \ 62 ( ((o) && (o)->msgs[(type)]) \ 63 ? ((o)->msgs[(type)]) \ 64 : (unpack_plumbing_errors[(type)]) ) 65 66static const char*super_prefixed(const char*path) 67{ 68/* 69 * It is necessary and sufficient to have two static buffers 70 * here, as the return value of this function is fed to 71 * error() using the unpack_*_errors[] templates we see above. 72 */ 73static struct strbuf buf[2] = {STRBUF_INIT, STRBUF_INIT}; 74static int super_prefix_len = -1; 75static unsigned idx =ARRAY_SIZE(buf) -1; 76 77if(super_prefix_len <0) { 78const char*super_prefix =get_super_prefix(); 79if(!super_prefix) { 80 super_prefix_len =0; 81}else{ 82int i; 83for(i =0; i <ARRAY_SIZE(buf); i++) 84strbuf_addstr(&buf[i], super_prefix); 85 super_prefix_len = buf[0].len; 86} 87} 88 89if(!super_prefix_len) 90return path; 91 92if(++idx >=ARRAY_SIZE(buf)) 93 idx =0; 94 95strbuf_setlen(&buf[idx], super_prefix_len); 96strbuf_addstr(&buf[idx], path); 97 98return buf[idx].buf; 99} 100 101voidsetup_unpack_trees_porcelain(struct unpack_trees_options *opts, 102const char*cmd) 103{ 104int i; 105const char**msgs = opts->msgs; 106const char*msg; 107 108argv_array_init(&opts->msgs_to_free); 109 110if(!strcmp(cmd,"checkout")) 111 msg = advice_commit_before_merge 112?_("Your local changes to the following files would be overwritten by checkout:\n%%s" 113"Please commit your changes or stash them before you switch branches.") 114:_("Your local changes to the following files would be overwritten by checkout:\n%%s"); 115else if(!strcmp(cmd,"merge")) 116 msg = advice_commit_before_merge 117?_("Your local changes to the following files would be overwritten by merge:\n%%s" 118"Please commit your changes or stash them before you merge.") 119:_("Your local changes to the following files would be overwritten by merge:\n%%s"); 120else 121 msg = advice_commit_before_merge 122?_("Your local changes to the following files would be overwritten by%s:\n%%s" 123"Please commit your changes or stash them before you%s.") 124:_("Your local changes to the following files would be overwritten by%s:\n%%s"); 125 msgs[ERROR_WOULD_OVERWRITE] = msgs[ERROR_NOT_UPTODATE_FILE] = 126argv_array_pushf(&opts->msgs_to_free, msg, cmd, cmd); 127 128 msgs[ERROR_NOT_UPTODATE_DIR] = 129_("Updating the following directories would lose untracked files in them:\n%s"); 130 131if(!strcmp(cmd,"checkout")) 132 msg = advice_commit_before_merge 133?_("The following untracked working tree files would be removed by checkout:\n%%s" 134"Please move or remove them before you switch branches.") 135:_("The following untracked working tree files would be removed by checkout:\n%%s"); 136else if(!strcmp(cmd,"merge")) 137 msg = advice_commit_before_merge 138?_("The following untracked working tree files would be removed by merge:\n%%s" 139"Please move or remove them before you merge.") 140:_("The following untracked working tree files would be removed by merge:\n%%s"); 141else 142 msg = advice_commit_before_merge 143?_("The following untracked working tree files would be removed by%s:\n%%s" 144"Please move or remove them before you%s.") 145:_("The following untracked working tree files would be removed by%s:\n%%s"); 146 msgs[ERROR_WOULD_LOSE_UNTRACKED_REMOVED] = 147argv_array_pushf(&opts->msgs_to_free, msg, cmd, cmd); 148 149if(!strcmp(cmd,"checkout")) 150 msg = advice_commit_before_merge 151?_("The following untracked working tree files would be overwritten by checkout:\n%%s" 152"Please move or remove them before you switch branches.") 153:_("The following untracked working tree files would be overwritten by checkout:\n%%s"); 154else if(!strcmp(cmd,"merge")) 155 msg = advice_commit_before_merge 156?_("The following untracked working tree files would be overwritten by merge:\n%%s" 157"Please move or remove them before you merge.") 158:_("The following untracked working tree files would be overwritten by merge:\n%%s"); 159else 160 msg = advice_commit_before_merge 161?_("The following untracked working tree files would be overwritten by%s:\n%%s" 162"Please move or remove them before you%s.") 163:_("The following untracked working tree files would be overwritten by%s:\n%%s"); 164 msgs[ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN] = 165argv_array_pushf(&opts->msgs_to_free, msg, cmd, cmd); 166 167/* 168 * Special case: ERROR_BIND_OVERLAP refers to a pair of paths, we 169 * cannot easily display it as a list. 170 */ 171 msgs[ERROR_BIND_OVERLAP] =_("Entry '%s' overlaps with '%s'. Cannot bind."); 172 173 msgs[ERROR_SPARSE_NOT_UPTODATE_FILE] = 174_("Cannot update sparse checkout: the following entries are not up to date:\n%s"); 175 msgs[ERROR_WOULD_LOSE_ORPHANED_OVERWRITTEN] = 176_("The following working tree files would be overwritten by sparse checkout update:\n%s"); 177 msgs[ERROR_WOULD_LOSE_ORPHANED_REMOVED] = 178_("The following working tree files would be removed by sparse checkout update:\n%s"); 179 msgs[ERROR_WOULD_LOSE_SUBMODULE] = 180_("Cannot update submodule:\n%s"); 181 182 opts->show_all_errors =1; 183/* rejected paths may not have a static buffer */ 184for(i =0; i <ARRAY_SIZE(opts->unpack_rejects); i++) 185 opts->unpack_rejects[i].strdup_strings =1; 186} 187 188voidclear_unpack_trees_porcelain(struct unpack_trees_options *opts) 189{ 190argv_array_clear(&opts->msgs_to_free); 191memset(opts->msgs,0,sizeof(opts->msgs)); 192} 193 194static intdo_add_entry(struct unpack_trees_options *o,struct cache_entry *ce, 195unsigned int set,unsigned int clear) 196{ 197 clear |= CE_HASHED; 198 199if(set & CE_REMOVE) 200 set |= CE_WT_REMOVE; 201 202 ce->ce_flags = (ce->ce_flags & ~clear) | set; 203returnadd_index_entry(&o->result, ce, 204 ADD_CACHE_OK_TO_ADD | ADD_CACHE_OK_TO_REPLACE); 205} 206 207static voidadd_entry(struct unpack_trees_options *o, 208const struct cache_entry *ce, 209unsigned int set,unsigned int clear) 210{ 211do_add_entry(o,dup_cache_entry(ce, &o->result), set, clear); 212} 213 214/* 215 * add error messages on path <path> 216 * corresponding to the type <e> with the message <msg> 217 * indicating if it should be display in porcelain or not 218 */ 219static intadd_rejected_path(struct unpack_trees_options *o, 220enum unpack_trees_error_types e, 221const char*path) 222{ 223if(!o->show_all_errors) 224returnerror(ERRORMSG(o, e),super_prefixed(path)); 225 226/* 227 * Otherwise, insert in a list for future display by 228 * display_error_msgs() 229 */ 230string_list_append(&o->unpack_rejects[e], path); 231return-1; 232} 233 234/* 235 * display all the error messages stored in a nice way 236 */ 237static voiddisplay_error_msgs(struct unpack_trees_options *o) 238{ 239int e, i; 240int something_displayed =0; 241for(e =0; e < NB_UNPACK_TREES_ERROR_TYPES; e++) { 242struct string_list *rejects = &o->unpack_rejects[e]; 243if(rejects->nr >0) { 244struct strbuf path = STRBUF_INIT; 245 something_displayed =1; 246for(i =0; i < rejects->nr; i++) 247strbuf_addf(&path,"\t%s\n", rejects->items[i].string); 248error(ERRORMSG(o, e),super_prefixed(path.buf)); 249strbuf_release(&path); 250} 251string_list_clear(rejects,0); 252} 253if(something_displayed) 254fprintf(stderr,_("Aborting\n")); 255} 256 257static intcheck_submodule_move_head(const struct cache_entry *ce, 258const char*old_id, 259const char*new_id, 260struct unpack_trees_options *o) 261{ 262unsigned flags = SUBMODULE_MOVE_HEAD_DRY_RUN; 263const struct submodule *sub =submodule_from_ce(ce); 264 265if(!sub) 266return0; 267 268if(o->reset) 269 flags |= SUBMODULE_MOVE_HEAD_FORCE; 270 271if(submodule_move_head(ce->name, old_id, new_id, flags)) 272return o->gently ? -1: 273add_rejected_path(o, ERROR_WOULD_LOSE_SUBMODULE, ce->name); 274return0; 275} 276 277/* 278 * Preform the loading of the repository's gitmodules file. This function is 279 * used by 'check_update()' to perform loading of the gitmodules file in two 280 * differnt situations: 281 * (1) before removing entries from the working tree if the gitmodules file has 282 * been marked for removal. This situation is specified by 'state' == NULL. 283 * (2) before checking out entries to the working tree if the gitmodules file 284 * has been marked for update. This situation is specified by 'state' != NULL. 285 */ 286static voidload_gitmodules_file(struct index_state *index, 287struct checkout *state) 288{ 289int pos =index_name_pos(index, GITMODULES_FILE,strlen(GITMODULES_FILE)); 290 291if(pos >=0) { 292struct cache_entry *ce = index->cache[pos]; 293if(!state && ce->ce_flags & CE_WT_REMOVE) { 294repo_read_gitmodules(the_repository); 295}else if(state && (ce->ce_flags & CE_UPDATE)) { 296submodule_free(the_repository); 297checkout_entry(ce, state, NULL); 298repo_read_gitmodules(the_repository); 299} 300} 301} 302 303/* 304 * Unlink the last component and schedule the leading directories for 305 * removal, such that empty directories get removed. 306 */ 307static voidunlink_entry(const struct cache_entry *ce) 308{ 309const struct submodule *sub =submodule_from_ce(ce); 310if(sub) { 311/* state.force is set at the caller. */ 312submodule_move_head(ce->name,"HEAD", NULL, 313 SUBMODULE_MOVE_HEAD_FORCE); 314} 315if(!check_leading_path(ce->name,ce_namelen(ce))) 316return; 317if(remove_or_warn(ce->ce_mode, ce->name)) 318return; 319schedule_dir_for_removal(ce->name,ce_namelen(ce)); 320} 321 322static struct progress *get_progress(struct unpack_trees_options *o) 323{ 324unsigned cnt =0, total =0; 325struct index_state *index = &o->result; 326 327if(!o->update || !o->verbose_update) 328return NULL; 329 330for(; cnt < index->cache_nr; cnt++) { 331const struct cache_entry *ce = index->cache[cnt]; 332if(ce->ce_flags & (CE_UPDATE | CE_WT_REMOVE)) 333 total++; 334} 335 336returnstart_delayed_progress(_("Checking out files"), total); 337} 338 339static intcheck_updates(struct unpack_trees_options *o) 340{ 341unsigned cnt =0; 342int errs =0; 343struct progress *progress = NULL; 344struct index_state *index = &o->result; 345struct checkout state = CHECKOUT_INIT; 346int i; 347 348trace_performance_enter(); 349 state.force =1; 350 state.quiet =1; 351 state.refresh_cache =1; 352 state.istate = index; 353 354 progress =get_progress(o); 355 356if(o->update) 357git_attr_set_direction(GIT_ATTR_CHECKOUT, index); 358 359if(should_update_submodules() && o->update && !o->dry_run) 360load_gitmodules_file(index, NULL); 361 362for(i =0; i < index->cache_nr; i++) { 363const struct cache_entry *ce = index->cache[i]; 364 365if(ce->ce_flags & CE_WT_REMOVE) { 366display_progress(progress, ++cnt); 367if(o->update && !o->dry_run) 368unlink_entry(ce); 369} 370} 371remove_marked_cache_entries(index); 372remove_scheduled_dirs(); 373 374if(should_update_submodules() && o->update && !o->dry_run) 375load_gitmodules_file(index, &state); 376 377enable_delayed_checkout(&state); 378if(repository_format_partial_clone && o->update && !o->dry_run) { 379/* 380 * Prefetch the objects that are to be checked out in the loop 381 * below. 382 */ 383struct oid_array to_fetch = OID_ARRAY_INIT; 384int fetch_if_missing_store = fetch_if_missing; 385 fetch_if_missing =0; 386for(i =0; i < index->cache_nr; i++) { 387struct cache_entry *ce = index->cache[i]; 388if((ce->ce_flags & CE_UPDATE) && 389!S_ISGITLINK(ce->ce_mode)) { 390if(!has_object_file(&ce->oid)) 391oid_array_append(&to_fetch, &ce->oid); 392} 393} 394if(to_fetch.nr) 395fetch_objects(repository_format_partial_clone, 396&to_fetch); 397 fetch_if_missing = fetch_if_missing_store; 398oid_array_clear(&to_fetch); 399} 400for(i =0; i < index->cache_nr; i++) { 401struct cache_entry *ce = index->cache[i]; 402 403if(ce->ce_flags & CE_UPDATE) { 404if(ce->ce_flags & CE_WT_REMOVE) 405BUG("both update and delete flags are set on%s", 406 ce->name); 407display_progress(progress, ++cnt); 408 ce->ce_flags &= ~CE_UPDATE; 409if(o->update && !o->dry_run) { 410 errs |=checkout_entry(ce, &state, NULL); 411} 412} 413} 414stop_progress(&progress); 415 errs |=finish_delayed_checkout(&state); 416if(o->update) 417git_attr_set_direction(GIT_ATTR_CHECKIN, NULL); 418trace_performance_leave("check_updates"); 419return errs !=0; 420} 421 422static intverify_uptodate_sparse(const struct cache_entry *ce, 423struct unpack_trees_options *o); 424static intverify_absent_sparse(const struct cache_entry *ce, 425enum unpack_trees_error_types, 426struct unpack_trees_options *o); 427 428static intapply_sparse_checkout(struct index_state *istate, 429struct cache_entry *ce, 430struct unpack_trees_options *o) 431{ 432int was_skip_worktree =ce_skip_worktree(ce); 433 434if(ce->ce_flags & CE_NEW_SKIP_WORKTREE) 435 ce->ce_flags |= CE_SKIP_WORKTREE; 436else 437 ce->ce_flags &= ~CE_SKIP_WORKTREE; 438if(was_skip_worktree !=ce_skip_worktree(ce)) { 439 ce->ce_flags |= CE_UPDATE_IN_BASE; 440mark_fsmonitor_invalid(istate, ce); 441 istate->cache_changed |= CE_ENTRY_CHANGED; 442} 443 444/* 445 * if (!was_skip_worktree && !ce_skip_worktree()) { 446 * This is perfectly normal. Move on; 447 * } 448 */ 449 450/* 451 * Merge strategies may set CE_UPDATE|CE_REMOVE outside checkout 452 * area as a result of ce_skip_worktree() shortcuts in 453 * verify_absent() and verify_uptodate(). 454 * Make sure they don't modify worktree if they are already 455 * outside checkout area 456 */ 457if(was_skip_worktree &&ce_skip_worktree(ce)) { 458 ce->ce_flags &= ~CE_UPDATE; 459 460/* 461 * By default, when CE_REMOVE is on, CE_WT_REMOVE is also 462 * on to get that file removed from both index and worktree. 463 * If that file is already outside worktree area, don't 464 * bother remove it. 465 */ 466if(ce->ce_flags & CE_REMOVE) 467 ce->ce_flags &= ~CE_WT_REMOVE; 468} 469 470if(!was_skip_worktree &&ce_skip_worktree(ce)) { 471/* 472 * If CE_UPDATE is set, verify_uptodate() must be called already 473 * also stat info may have lost after merged_entry() so calling 474 * verify_uptodate() again may fail 475 */ 476if(!(ce->ce_flags & CE_UPDATE) &&verify_uptodate_sparse(ce, o)) 477return-1; 478 ce->ce_flags |= CE_WT_REMOVE; 479 ce->ce_flags &= ~CE_UPDATE; 480} 481if(was_skip_worktree && !ce_skip_worktree(ce)) { 482if(verify_absent_sparse(ce, ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN, o)) 483return-1; 484 ce->ce_flags |= CE_UPDATE; 485} 486return0; 487} 488 489staticinlineintcall_unpack_fn(const struct cache_entry *const*src, 490struct unpack_trees_options *o) 491{ 492int ret = o->fn(src, o); 493if(ret >0) 494 ret =0; 495return ret; 496} 497 498static voidmark_ce_used(struct cache_entry *ce,struct unpack_trees_options *o) 499{ 500 ce->ce_flags |= CE_UNPACKED; 501 502if(o->cache_bottom < o->src_index->cache_nr && 503 o->src_index->cache[o->cache_bottom] == ce) { 504int bottom = o->cache_bottom; 505while(bottom < o->src_index->cache_nr && 506 o->src_index->cache[bottom]->ce_flags & CE_UNPACKED) 507 bottom++; 508 o->cache_bottom = bottom; 509} 510} 511 512static voidmark_all_ce_unused(struct index_state *index) 513{ 514int i; 515for(i =0; i < index->cache_nr; i++) 516 index->cache[i]->ce_flags &= ~(CE_UNPACKED | CE_ADDED | CE_NEW_SKIP_WORKTREE); 517} 518 519static intlocate_in_src_index(const struct cache_entry *ce, 520struct unpack_trees_options *o) 521{ 522struct index_state *index = o->src_index; 523int len =ce_namelen(ce); 524int pos =index_name_pos(index, ce->name, len); 525if(pos <0) 526 pos = -1- pos; 527return pos; 528} 529 530/* 531 * We call unpack_index_entry() with an unmerged cache entry 532 * only in diff-index, and it wants a single callback. Skip 533 * the other unmerged entry with the same name. 534 */ 535static voidmark_ce_used_same_name(struct cache_entry *ce, 536struct unpack_trees_options *o) 537{ 538struct index_state *index = o->src_index; 539int len =ce_namelen(ce); 540int pos; 541 542for(pos =locate_in_src_index(ce, o); pos < index->cache_nr; pos++) { 543struct cache_entry *next = index->cache[pos]; 544if(len !=ce_namelen(next) || 545memcmp(ce->name, next->name, len)) 546break; 547mark_ce_used(next, o); 548} 549} 550 551static struct cache_entry *next_cache_entry(struct unpack_trees_options *o) 552{ 553const struct index_state *index = o->src_index; 554int pos = o->cache_bottom; 555 556while(pos < index->cache_nr) { 557struct cache_entry *ce = index->cache[pos]; 558if(!(ce->ce_flags & CE_UNPACKED)) 559return ce; 560 pos++; 561} 562return NULL; 563} 564 565static voidadd_same_unmerged(const struct cache_entry *ce, 566struct unpack_trees_options *o) 567{ 568struct index_state *index = o->src_index; 569int len =ce_namelen(ce); 570int pos =index_name_pos(index, ce->name, len); 571 572if(0<= pos) 573die("programming error in a caller of mark_ce_used_same_name"); 574for(pos = -pos -1; pos < index->cache_nr; pos++) { 575struct cache_entry *next = index->cache[pos]; 576if(len !=ce_namelen(next) || 577memcmp(ce->name, next->name, len)) 578break; 579add_entry(o, next,0,0); 580mark_ce_used(next, o); 581} 582} 583 584static intunpack_index_entry(struct cache_entry *ce, 585struct unpack_trees_options *o) 586{ 587const struct cache_entry *src[MAX_UNPACK_TREES +1] = { NULL, }; 588int ret; 589 590 src[0] = ce; 591 592mark_ce_used(ce, o); 593if(ce_stage(ce)) { 594if(o->skip_unmerged) { 595add_entry(o, ce,0,0); 596return0; 597} 598} 599 ret =call_unpack_fn(src, o); 600if(ce_stage(ce)) 601mark_ce_used_same_name(ce, o); 602return ret; 603} 604 605static intfind_cache_pos(struct traverse_info *,const struct name_entry *); 606 607static voidrestore_cache_bottom(struct traverse_info *info,int bottom) 608{ 609struct unpack_trees_options *o = info->data; 610 611if(o->diff_index_cached) 612return; 613 o->cache_bottom = bottom; 614} 615 616static intswitch_cache_bottom(struct traverse_info *info) 617{ 618struct unpack_trees_options *o = info->data; 619int ret, pos; 620 621if(o->diff_index_cached) 622return0; 623 ret = o->cache_bottom; 624 pos =find_cache_pos(info->prev, &info->name); 625 626if(pos < -1) 627 o->cache_bottom = -2- pos; 628else if(pos <0) 629 o->cache_bottom = o->src_index->cache_nr; 630return ret; 631} 632 633staticinlineintare_same_oid(struct name_entry *name_j,struct name_entry *name_k) 634{ 635return name_j->oid && name_k->oid && !oidcmp(name_j->oid, name_k->oid); 636} 637 638static intall_trees_same_as_cache_tree(int n,unsigned long dirmask, 639struct name_entry *names, 640struct traverse_info *info) 641{ 642struct unpack_trees_options *o = info->data; 643int i; 644 645if(!o->merge || dirmask != ((1<< n) -1)) 646return0; 647 648for(i =1; i < n; i++) 649if(!are_same_oid(names, names + i)) 650return0; 651 652returncache_tree_matches_traversal(o->src_index->cache_tree, names, info); 653} 654 655static intindex_pos_by_traverse_info(struct name_entry *names, 656struct traverse_info *info) 657{ 658struct unpack_trees_options *o = info->data; 659int len =traverse_path_len(info, names); 660char*name =xmalloc(len +1/* slash */+1/* NUL */); 661int pos; 662 663make_traverse_path(name, info, names); 664 name[len++] ='/'; 665 name[len] ='\0'; 666 pos =index_name_pos(o->src_index, name, len); 667if(pos >=0) 668BUG("This is a directory and should not exist in index"); 669 pos = -pos -1; 670if(!starts_with(o->src_index->cache[pos]->name, name) || 671(pos >0&&starts_with(o->src_index->cache[pos-1]->name, name))) 672BUG("pos must point at the first entry in this directory"); 673free(name); 674return pos; 675} 676 677/* 678 * Fast path if we detect that all trees are the same as cache-tree at this 679 * path. We'll walk these trees recursively using cache-tree/index instead of 680 * ODB since already know what these trees contain. 681 */ 682static inttraverse_by_cache_tree(int pos,int nr_entries,int nr_names, 683struct name_entry *names, 684struct traverse_info *info) 685{ 686struct cache_entry *src[MAX_UNPACK_TREES +1] = { NULL, }; 687struct unpack_trees_options *o = info->data; 688struct cache_entry *tree_ce = NULL; 689int ce_len =0; 690int i, d; 691 692if(!o->merge) 693BUG("We need cache-tree to do this optimization"); 694 695/* 696 * Do what unpack_callback() and unpack_nondirectories() normally 697 * do. But we walk all paths in an iterative loop instead. 698 * 699 * D/F conflicts and higher stage entries are not a concern 700 * because cache-tree would be invalidated and we would never 701 * get here in the first place. 702 */ 703for(i =0; i < nr_entries; i++) { 704int new_ce_len, len, rc; 705 706 src[0] = o->src_index->cache[pos + i]; 707 708 len =ce_namelen(src[0]); 709 new_ce_len =cache_entry_size(len); 710 711if(new_ce_len > ce_len) { 712 new_ce_len <<=1; 713 tree_ce =xrealloc(tree_ce, new_ce_len); 714memset(tree_ce,0, new_ce_len); 715 ce_len = new_ce_len; 716 717 tree_ce->ce_flags =create_ce_flags(0); 718 719for(d =1; d <= nr_names; d++) 720 src[d] = tree_ce; 721} 722 723 tree_ce->ce_mode = src[0]->ce_mode; 724 tree_ce->ce_namelen = len; 725oidcpy(&tree_ce->oid, &src[0]->oid); 726memcpy(tree_ce->name, src[0]->name, len +1); 727 728 rc =call_unpack_fn((const struct cache_entry *const*)src, o); 729if(rc <0) { 730free(tree_ce); 731return rc; 732} 733 734mark_ce_used(src[0], o); 735} 736free(tree_ce); 737if(o->debug_unpack) 738printf("Unpacked%dentries from%sto%susing cache-tree\n", 739 nr_entries, 740 o->src_index->cache[pos]->name, 741 o->src_index->cache[pos + nr_entries -1]->name); 742return0; 743} 744 745static inttraverse_trees_recursive(int n,unsigned long dirmask, 746unsigned long df_conflicts, 747struct name_entry *names, 748struct traverse_info *info) 749{ 750int i, ret, bottom; 751int nr_buf =0; 752struct tree_desc t[MAX_UNPACK_TREES]; 753void*buf[MAX_UNPACK_TREES]; 754struct traverse_info newinfo; 755struct name_entry *p; 756int nr_entries; 757 758 nr_entries =all_trees_same_as_cache_tree(n, dirmask, names, info); 759if(nr_entries >0) { 760struct unpack_trees_options *o = info->data; 761int pos =index_pos_by_traverse_info(names, info); 762 763if(!o->merge || df_conflicts) 764BUG("Wrong condition to get here buddy"); 765 766/* 767 * All entries up to 'pos' must have been processed 768 * (i.e. marked CE_UNPACKED) at this point. But to be safe, 769 * save and restore cache_bottom anyway to not miss 770 * unprocessed entries before 'pos'. 771 */ 772 bottom = o->cache_bottom; 773 ret =traverse_by_cache_tree(pos, nr_entries, n, names, info); 774 o->cache_bottom = bottom; 775return ret; 776} 777 778 p = names; 779while(!p->mode) 780 p++; 781 782 newinfo = *info; 783 newinfo.prev = info; 784 newinfo.pathspec = info->pathspec; 785 newinfo.name = *p; 786 newinfo.pathlen +=tree_entry_len(p) +1; 787 newinfo.df_conflicts |= df_conflicts; 788 789/* 790 * Fetch the tree from the ODB for each peer directory in the 791 * n commits. 792 * 793 * For 2- and 3-way traversals, we try to avoid hitting the 794 * ODB twice for the same OID. This should yield a nice speed 795 * up in checkouts and merges when the commits are similar. 796 * 797 * We don't bother doing the full O(n^2) search for larger n, 798 * because wider traversals don't happen that often and we 799 * avoid the search setup. 800 * 801 * When 2 peer OIDs are the same, we just copy the tree 802 * descriptor data. This implicitly borrows the buffer 803 * data from the earlier cell. 804 */ 805for(i =0; i < n; i++, dirmask >>=1) { 806if(i >0&&are_same_oid(&names[i], &names[i -1])) 807 t[i] = t[i -1]; 808else if(i >1&&are_same_oid(&names[i], &names[i -2])) 809 t[i] = t[i -2]; 810else{ 811const struct object_id *oid = NULL; 812if(dirmask &1) 813 oid = names[i].oid; 814 buf[nr_buf++] =fill_tree_descriptor(t + i, oid); 815} 816} 817 818 bottom =switch_cache_bottom(&newinfo); 819 ret =traverse_trees(n, t, &newinfo); 820restore_cache_bottom(&newinfo, bottom); 821 822for(i =0; i < nr_buf; i++) 823free(buf[i]); 824 825return ret; 826} 827 828/* 829 * Compare the traverse-path to the cache entry without actually 830 * having to generate the textual representation of the traverse 831 * path. 832 * 833 * NOTE! This *only* compares up to the size of the traverse path 834 * itself - the caller needs to do the final check for the cache 835 * entry having more data at the end! 836 */ 837static intdo_compare_entry_piecewise(const struct cache_entry *ce,const struct traverse_info *info,const struct name_entry *n) 838{ 839int len, pathlen, ce_len; 840const char*ce_name; 841 842if(info->prev) { 843int cmp =do_compare_entry_piecewise(ce, info->prev, 844&info->name); 845if(cmp) 846return cmp; 847} 848 pathlen = info->pathlen; 849 ce_len =ce_namelen(ce); 850 851/* If ce_len < pathlen then we must have previously hit "name == directory" entry */ 852if(ce_len < pathlen) 853return-1; 854 855 ce_len -= pathlen; 856 ce_name = ce->name + pathlen; 857 858 len =tree_entry_len(n); 859returndf_name_compare(ce_name, ce_len, S_IFREG, n->path, len, n->mode); 860} 861 862static intdo_compare_entry(const struct cache_entry *ce, 863const struct traverse_info *info, 864const struct name_entry *n) 865{ 866int len, pathlen, ce_len; 867const char*ce_name; 868int cmp; 869 870/* 871 * If we have not precomputed the traverse path, it is quicker 872 * to avoid doing so. But if we have precomputed it, 873 * it is quicker to use the precomputed version. 874 */ 875if(!info->traverse_path) 876returndo_compare_entry_piecewise(ce, info, n); 877 878 cmp =strncmp(ce->name, info->traverse_path, info->pathlen); 879if(cmp) 880return cmp; 881 882 pathlen = info->pathlen; 883 ce_len =ce_namelen(ce); 884 885if(ce_len < pathlen) 886return-1; 887 888 ce_len -= pathlen; 889 ce_name = ce->name + pathlen; 890 891 len =tree_entry_len(n); 892returndf_name_compare(ce_name, ce_len, S_IFREG, n->path, len, n->mode); 893} 894 895static intcompare_entry(const struct cache_entry *ce,const struct traverse_info *info,const struct name_entry *n) 896{ 897int cmp =do_compare_entry(ce, info, n); 898if(cmp) 899return cmp; 900 901/* 902 * Even if the beginning compared identically, the ce should 903 * compare as bigger than a directory leading up to it! 904 */ 905returnce_namelen(ce) >traverse_path_len(info, n); 906} 907 908static intce_in_traverse_path(const struct cache_entry *ce, 909const struct traverse_info *info) 910{ 911if(!info->prev) 912return1; 913if(do_compare_entry(ce, info->prev, &info->name)) 914return0; 915/* 916 * If ce (blob) is the same name as the path (which is a tree 917 * we will be descending into), it won't be inside it. 918 */ 919return(info->pathlen <ce_namelen(ce)); 920} 921 922static struct cache_entry *create_ce_entry(const struct traverse_info *info, 923const struct name_entry *n, 924int stage, 925struct index_state *istate, 926int is_transient) 927{ 928int len =traverse_path_len(info, n); 929struct cache_entry *ce = 930 is_transient ? 931make_empty_transient_cache_entry(len) : 932make_empty_cache_entry(istate, len); 933 934 ce->ce_mode =create_ce_mode(n->mode); 935 ce->ce_flags =create_ce_flags(stage); 936 ce->ce_namelen = len; 937oidcpy(&ce->oid, n->oid); 938make_traverse_path(ce->name, info, n); 939 940return ce; 941} 942 943/* 944 * Note that traverse_by_cache_tree() duplicates some logic in this function 945 * without actually calling it. If you change the logic here you may need to 946 * check and change there as well. 947 */ 948static intunpack_nondirectories(int n,unsigned long mask, 949unsigned long dirmask, 950struct cache_entry **src, 951const struct name_entry *names, 952const struct traverse_info *info) 953{ 954int i; 955struct unpack_trees_options *o = info->data; 956unsigned long conflicts = info->df_conflicts | dirmask; 957 958/* Do we have *only* directories? Nothing to do */ 959if(mask == dirmask && !src[0]) 960return0; 961 962/* 963 * Ok, we've filled in up to any potential index entry in src[0], 964 * now do the rest. 965 */ 966for(i =0; i < n; i++) { 967int stage; 968unsigned int bit =1ul<< i; 969if(conflicts & bit) { 970 src[i + o->merge] = o->df_conflict_entry; 971continue; 972} 973if(!(mask & bit)) 974continue; 975if(!o->merge) 976 stage =0; 977else if(i +1< o->head_idx) 978 stage =1; 979else if(i +1> o->head_idx) 980 stage =3; 981else 982 stage =2; 983 984/* 985 * If the merge bit is set, then the cache entries are 986 * discarded in the following block. In this case, 987 * construct "transient" cache_entries, as they are 988 * not stored in the index. otherwise construct the 989 * cache entry from the index aware logic. 990 */ 991 src[i + o->merge] =create_ce_entry(info, names + i, stage, &o->result, o->merge); 992} 993 994if(o->merge) { 995int rc =call_unpack_fn((const struct cache_entry *const*)src, 996 o); 997for(i =0; i < n; i++) { 998struct cache_entry *ce = src[i + o->merge]; 999if(ce != o->df_conflict_entry)1000discard_cache_entry(ce);1001}1002return rc;1003}10041005for(i =0; i < n; i++)1006if(src[i] && src[i] != o->df_conflict_entry)1007if(do_add_entry(o, src[i],0,0))1008return-1;10091010return0;1011}10121013static intunpack_failed(struct unpack_trees_options *o,const char*message)1014{1015discard_index(&o->result);1016if(!o->gently && !o->exiting_early) {1017if(message)1018returnerror("%s", message);1019return-1;1020}1021return-1;1022}10231024/*1025 * The tree traversal is looking at name p. If we have a matching entry,1026 * return it. If name p is a directory in the index, do not return1027 * anything, as we will want to match it when the traversal descends into1028 * the directory.1029 */1030static intfind_cache_pos(struct traverse_info *info,1031const struct name_entry *p)1032{1033int pos;1034struct unpack_trees_options *o = info->data;1035struct index_state *index = o->src_index;1036int pfxlen = info->pathlen;1037int p_len =tree_entry_len(p);10381039for(pos = o->cache_bottom; pos < index->cache_nr; pos++) {1040const struct cache_entry *ce = index->cache[pos];1041const char*ce_name, *ce_slash;1042int cmp, ce_len;10431044if(ce->ce_flags & CE_UNPACKED) {1045/*1046 * cache_bottom entry is already unpacked, so1047 * we can never match it; don't check it1048 * again.1049 */1050if(pos == o->cache_bottom)1051++o->cache_bottom;1052continue;1053}1054if(!ce_in_traverse_path(ce, info)) {1055/*1056 * Check if we can skip future cache checks1057 * (because we're already past all possible1058 * entries in the traverse path).1059 */1060if(info->traverse_path) {1061if(strncmp(ce->name, info->traverse_path,1062 info->pathlen) >0)1063break;1064}1065continue;1066}1067 ce_name = ce->name + pfxlen;1068 ce_slash =strchr(ce_name,'/');1069if(ce_slash)1070 ce_len = ce_slash - ce_name;1071else1072 ce_len =ce_namelen(ce) - pfxlen;1073 cmp =name_compare(p->path, p_len, ce_name, ce_len);1074/*1075 * Exact match; if we have a directory we need to1076 * delay returning it.1077 */1078if(!cmp)1079return ce_slash ? -2- pos : pos;1080if(0< cmp)1081continue;/* keep looking */1082/*1083 * ce_name sorts after p->path; could it be that we1084 * have files under p->path directory in the index?1085 * E.g. ce_name == "t-i", and p->path == "t"; we may1086 * have "t/a" in the index.1087 */1088if(p_len < ce_len && !memcmp(ce_name, p->path, p_len) &&1089 ce_name[p_len] <'/')1090continue;/* keep looking */1091break;1092}1093return-1;1094}10951096static struct cache_entry *find_cache_entry(struct traverse_info *info,1097const struct name_entry *p)1098{1099int pos =find_cache_pos(info, p);1100struct unpack_trees_options *o = info->data;11011102if(0<= pos)1103return o->src_index->cache[pos];1104else1105return NULL;1106}11071108static voiddebug_path(struct traverse_info *info)1109{1110if(info->prev) {1111debug_path(info->prev);1112if(*info->prev->name.path)1113putchar('/');1114}1115printf("%s", info->name.path);1116}11171118static voiddebug_name_entry(int i,struct name_entry *n)1119{1120printf("ent#%d %06o%s\n", i,1121 n->path ? n->mode :0,1122 n->path ? n->path :"(missing)");1123}11241125static voiddebug_unpack_callback(int n,1126unsigned long mask,1127unsigned long dirmask,1128struct name_entry *names,1129struct traverse_info *info)1130{1131int i;1132printf("* unpack mask%lu, dirmask%lu, cnt%d",1133 mask, dirmask, n);1134debug_path(info);1135putchar('\n');1136for(i =0; i < n; i++)1137debug_name_entry(i, names + i);1138}11391140/*1141 * Note that traverse_by_cache_tree() duplicates some logic in this function1142 * without actually calling it. If you change the logic here you may need to1143 * check and change there as well.1144 */1145static intunpack_callback(int n,unsigned long mask,unsigned long dirmask,struct name_entry *names,struct traverse_info *info)1146{1147struct cache_entry *src[MAX_UNPACK_TREES +1] = { NULL, };1148struct unpack_trees_options *o = info->data;1149const struct name_entry *p = names;11501151/* Find first entry with a real name (we could use "mask" too) */1152while(!p->mode)1153 p++;11541155if(o->debug_unpack)1156debug_unpack_callback(n, mask, dirmask, names, info);11571158/* Are we supposed to look at the index too? */1159if(o->merge) {1160while(1) {1161int cmp;1162struct cache_entry *ce;11631164if(o->diff_index_cached)1165 ce =next_cache_entry(o);1166else1167 ce =find_cache_entry(info, p);11681169if(!ce)1170break;1171 cmp =compare_entry(ce, info, p);1172if(cmp <0) {1173if(unpack_index_entry(ce, o) <0)1174returnunpack_failed(o, NULL);1175continue;1176}1177if(!cmp) {1178if(ce_stage(ce)) {1179/*1180 * If we skip unmerged index1181 * entries, we'll skip this1182 * entry *and* the tree1183 * entries associated with it!1184 */1185if(o->skip_unmerged) {1186add_same_unmerged(ce, o);1187return mask;1188}1189}1190 src[0] = ce;1191}1192break;1193}1194}11951196if(unpack_nondirectories(n, mask, dirmask, src, names, info) <0)1197return-1;11981199if(o->merge && src[0]) {1200if(ce_stage(src[0]))1201mark_ce_used_same_name(src[0], o);1202else1203mark_ce_used(src[0], o);1204}12051206/* Now handle any directories.. */1207if(dirmask) {1208/* special case: "diff-index --cached" looking at a tree */1209if(o->diff_index_cached &&1210 n ==1&& dirmask ==1&&S_ISDIR(names->mode)) {1211int matches;1212 matches =cache_tree_matches_traversal(o->src_index->cache_tree,1213 names, info);1214/*1215 * Everything under the name matches; skip the1216 * entire hierarchy. diff_index_cached codepath1217 * special cases D/F conflicts in such a way that1218 * it does not do any look-ahead, so this is safe.1219 */1220if(matches) {1221 o->cache_bottom += matches;1222return mask;1223}1224}12251226if(traverse_trees_recursive(n, dirmask, mask & ~dirmask,1227 names, info) <0)1228return-1;1229return mask;1230}12311232return mask;1233}12341235static intclear_ce_flags_1(struct cache_entry **cache,int nr,1236struct strbuf *prefix,1237int select_mask,int clear_mask,1238struct exclude_list *el,int defval);12391240/* Whole directory matching */1241static intclear_ce_flags_dir(struct cache_entry **cache,int nr,1242struct strbuf *prefix,1243char*basename,1244int select_mask,int clear_mask,1245struct exclude_list *el,int defval)1246{1247struct cache_entry **cache_end;1248int dtype = DT_DIR;1249int ret =is_excluded_from_list(prefix->buf, prefix->len,1250 basename, &dtype, el, &the_index);1251int rc;12521253strbuf_addch(prefix,'/');12541255/* If undecided, use matching result of parent dir in defval */1256if(ret <0)1257 ret = defval;12581259for(cache_end = cache; cache_end != cache + nr; cache_end++) {1260struct cache_entry *ce = *cache_end;1261if(strncmp(ce->name, prefix->buf, prefix->len))1262break;1263}12641265/*1266 * TODO: check el, if there are no patterns that may conflict1267 * with ret (iow, we know in advance the incl/excl1268 * decision for the entire directory), clear flag here without1269 * calling clear_ce_flags_1(). That function will call1270 * the expensive is_excluded_from_list() on every entry.1271 */1272 rc =clear_ce_flags_1(cache, cache_end - cache,1273 prefix,1274 select_mask, clear_mask,1275 el, ret);1276strbuf_setlen(prefix, prefix->len -1);1277return rc;1278}12791280/*1281 * Traverse the index, find every entry that matches according to1282 * o->el. Do "ce_flags &= ~clear_mask" on those entries. Return the1283 * number of traversed entries.1284 *1285 * If select_mask is non-zero, only entries whose ce_flags has on of1286 * those bits enabled are traversed.1287 *1288 * cache : pointer to an index entry1289 * prefix_len : an offset to its path1290 *1291 * The current path ("prefix") including the trailing '/' is1292 * cache[0]->name[0..(prefix_len-1)]1293 * Top level path has prefix_len zero.1294 */1295static intclear_ce_flags_1(struct cache_entry **cache,int nr,1296struct strbuf *prefix,1297int select_mask,int clear_mask,1298struct exclude_list *el,int defval)1299{1300struct cache_entry **cache_end = cache + nr;13011302/*1303 * Process all entries that have the given prefix and meet1304 * select_mask condition1305 */1306while(cache != cache_end) {1307struct cache_entry *ce = *cache;1308const char*name, *slash;1309int len, dtype, ret;13101311if(select_mask && !(ce->ce_flags & select_mask)) {1312 cache++;1313continue;1314}13151316if(prefix->len &&strncmp(ce->name, prefix->buf, prefix->len))1317break;13181319 name = ce->name + prefix->len;1320 slash =strchr(name,'/');13211322/* If it's a directory, try whole directory match first */1323if(slash) {1324int processed;13251326 len = slash - name;1327strbuf_add(prefix, name, len);13281329 processed =clear_ce_flags_dir(cache, cache_end - cache,1330 prefix,1331 prefix->buf + prefix->len - len,1332 select_mask, clear_mask,1333 el, defval);13341335/* clear_c_f_dir eats a whole dir already? */1336if(processed) {1337 cache += processed;1338strbuf_setlen(prefix, prefix->len - len);1339continue;1340}13411342strbuf_addch(prefix,'/');1343 cache +=clear_ce_flags_1(cache, cache_end - cache,1344 prefix,1345 select_mask, clear_mask, el, defval);1346strbuf_setlen(prefix, prefix->len - len -1);1347continue;1348}13491350/* Non-directory */1351 dtype =ce_to_dtype(ce);1352 ret =is_excluded_from_list(ce->name,ce_namelen(ce),1353 name, &dtype, el, &the_index);1354if(ret <0)1355 ret = defval;1356if(ret >0)1357 ce->ce_flags &= ~clear_mask;1358 cache++;1359}1360return nr - (cache_end - cache);1361}13621363static intclear_ce_flags(struct cache_entry **cache,int nr,1364int select_mask,int clear_mask,1365struct exclude_list *el)1366{1367static struct strbuf prefix = STRBUF_INIT;13681369strbuf_reset(&prefix);13701371returnclear_ce_flags_1(cache, nr,1372&prefix,1373 select_mask, clear_mask,1374 el,0);1375}13761377/*1378 * Set/Clear CE_NEW_SKIP_WORKTREE according to $GIT_DIR/info/sparse-checkout1379 */1380static voidmark_new_skip_worktree(struct exclude_list *el,1381struct index_state *the_index,1382int select_flag,int skip_wt_flag)1383{1384int i;13851386/*1387 * 1. Pretend the narrowest worktree: only unmerged entries1388 * are checked out1389 */1390for(i =0; i < the_index->cache_nr; i++) {1391struct cache_entry *ce = the_index->cache[i];13921393if(select_flag && !(ce->ce_flags & select_flag))1394continue;13951396if(!ce_stage(ce) && !(ce->ce_flags & CE_CONFLICTED))1397 ce->ce_flags |= skip_wt_flag;1398else1399 ce->ce_flags &= ~skip_wt_flag;1400}14011402/*1403 * 2. Widen worktree according to sparse-checkout file.1404 * Matched entries will have skip_wt_flag cleared (i.e. "in")1405 */1406clear_ce_flags(the_index->cache, the_index->cache_nr,1407 select_flag, skip_wt_flag, el);1408}14091410static intverify_absent(const struct cache_entry *,1411enum unpack_trees_error_types,1412struct unpack_trees_options *);1413/*1414 * N-way merge "len" trees. Returns 0 on success, -1 on failure to manipulate the1415 * resulting index, -2 on failure to reflect the changes to the work tree.1416 *1417 * CE_ADDED, CE_UNPACKED and CE_NEW_SKIP_WORKTREE are used internally1418 */1419intunpack_trees(unsigned len,struct tree_desc *t,struct unpack_trees_options *o)1420{1421int i, ret;1422static struct cache_entry *dfc;1423struct exclude_list el;14241425if(len > MAX_UNPACK_TREES)1426die("unpack_trees takes at most%dtrees", MAX_UNPACK_TREES);14271428trace_performance_enter();1429memset(&el,0,sizeof(el));1430if(!core_apply_sparse_checkout || !o->update)1431 o->skip_sparse_checkout =1;1432if(!o->skip_sparse_checkout) {1433char*sparse =git_pathdup("info/sparse-checkout");1434if(add_excludes_from_file_to_list(sparse,"",0, &el, NULL) <0)1435 o->skip_sparse_checkout =1;1436else1437 o->el = ⪙1438free(sparse);1439}14401441memset(&o->result,0,sizeof(o->result));1442 o->result.initialized =1;1443 o->result.timestamp.sec = o->src_index->timestamp.sec;1444 o->result.timestamp.nsec = o->src_index->timestamp.nsec;1445 o->result.version = o->src_index->version;1446if(!o->src_index->split_index) {1447 o->result.split_index = NULL;1448}else if(o->src_index == o->dst_index) {1449/*1450 * o->dst_index (and thus o->src_index) will be discarded1451 * and overwritten with o->result at the end of this function,1452 * so just use src_index's split_index to avoid having to1453 * create a new one.1454 */1455 o->result.split_index = o->src_index->split_index;1456 o->result.split_index->refcount++;1457}else{1458 o->result.split_index =init_split_index(&o->result);1459}1460oidcpy(&o->result.oid, &o->src_index->oid);1461 o->merge_size = len;1462mark_all_ce_unused(o->src_index);14631464/*1465 * Sparse checkout loop #1: set NEW_SKIP_WORKTREE on existing entries1466 */1467if(!o->skip_sparse_checkout)1468mark_new_skip_worktree(o->el, o->src_index,0, CE_NEW_SKIP_WORKTREE);14691470if(!dfc)1471 dfc =xcalloc(1,cache_entry_size(0));1472 o->df_conflict_entry = dfc;14731474if(len) {1475const char*prefix = o->prefix ? o->prefix :"";1476struct traverse_info info;14771478setup_traverse_info(&info, prefix);1479 info.fn = unpack_callback;1480 info.data = o;1481 info.show_all_errors = o->show_all_errors;1482 info.pathspec = o->pathspec;14831484if(o->prefix) {1485/*1486 * Unpack existing index entries that sort before the1487 * prefix the tree is spliced into. Note that o->merge1488 * is always true in this case.1489 */1490while(1) {1491struct cache_entry *ce =next_cache_entry(o);1492if(!ce)1493break;1494if(ce_in_traverse_path(ce, &info))1495break;1496if(unpack_index_entry(ce, o) <0)1497goto return_failed;1498}1499}15001501trace_performance_enter();1502 ret =traverse_trees(len, t, &info);1503trace_performance_leave("traverse_trees");1504if(ret <0)1505goto return_failed;1506}15071508/* Any left-over entries in the index? */1509if(o->merge) {1510while(1) {1511struct cache_entry *ce =next_cache_entry(o);1512if(!ce)1513break;1514if(unpack_index_entry(ce, o) <0)1515goto return_failed;1516}1517}1518mark_all_ce_unused(o->src_index);15191520if(o->trivial_merges_only && o->nontrivial_merge) {1521 ret =unpack_failed(o,"Merge requires file-level merging");1522goto done;1523}15241525if(!o->skip_sparse_checkout) {1526int empty_worktree =1;15271528/*1529 * Sparse checkout loop #2: set NEW_SKIP_WORKTREE on entries not in loop #11530 * If the will have NEW_SKIP_WORKTREE, also set CE_SKIP_WORKTREE1531 * so apply_sparse_checkout() won't attempt to remove it from worktree1532 */1533mark_new_skip_worktree(o->el, &o->result, CE_ADDED, CE_SKIP_WORKTREE | CE_NEW_SKIP_WORKTREE);15341535 ret =0;1536for(i =0; i < o->result.cache_nr; i++) {1537struct cache_entry *ce = o->result.cache[i];15381539/*1540 * Entries marked with CE_ADDED in merged_entry() do not have1541 * verify_absent() check (the check is effectively disabled1542 * because CE_NEW_SKIP_WORKTREE is set unconditionally).1543 *1544 * Do the real check now because we have had1545 * correct CE_NEW_SKIP_WORKTREE1546 */1547if(ce->ce_flags & CE_ADDED &&1548verify_absent(ce, ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN, o)) {1549if(!o->show_all_errors)1550goto return_failed;1551 ret = -1;1552}15531554if(apply_sparse_checkout(&o->result, ce, o)) {1555if(!o->show_all_errors)1556goto return_failed;1557 ret = -1;1558}1559if(!ce_skip_worktree(ce))1560 empty_worktree =0;15611562}1563if(ret <0)1564goto return_failed;1565/*1566 * Sparse checkout is meant to narrow down checkout area1567 * but it does not make sense to narrow down to empty working1568 * tree. This is usually a mistake in sparse checkout rules.1569 * Do not allow users to do that.1570 */1571if(o->result.cache_nr && empty_worktree) {1572 ret =unpack_failed(o,"Sparse checkout leaves no entry on working directory");1573goto done;1574}1575}15761577 ret =check_updates(o) ? (-2) :0;1578if(o->dst_index) {1579move_index_extensions(&o->result, o->src_index);1580if(!ret) {1581if(git_env_bool("GIT_TEST_CHECK_CACHE_TREE",0))1582cache_tree_verify(&o->result);1583if(!o->result.cache_tree)1584 o->result.cache_tree =cache_tree();1585if(!cache_tree_fully_valid(o->result.cache_tree))1586cache_tree_update(&o->result,1587 WRITE_TREE_SILENT |1588 WRITE_TREE_REPAIR);1589}1590discard_index(o->dst_index);1591*o->dst_index = o->result;1592}else{1593discard_index(&o->result);1594}1595 o->src_index = NULL;15961597done:1598trace_performance_leave("unpack_trees");1599clear_exclude_list(&el);1600return ret;16011602return_failed:1603if(o->show_all_errors)1604display_error_msgs(o);1605mark_all_ce_unused(o->src_index);1606 ret =unpack_failed(o, NULL);1607if(o->exiting_early)1608 ret =0;1609goto done;1610}16111612/* Here come the merge functions */16131614static intreject_merge(const struct cache_entry *ce,1615struct unpack_trees_options *o)1616{1617return o->gently ? -1:1618add_rejected_path(o, ERROR_WOULD_OVERWRITE, ce->name);1619}16201621static intsame(const struct cache_entry *a,const struct cache_entry *b)1622{1623if(!!a != !!b)1624return0;1625if(!a && !b)1626return1;1627if((a->ce_flags | b->ce_flags) & CE_CONFLICTED)1628return0;1629return a->ce_mode == b->ce_mode &&1630!oidcmp(&a->oid, &b->oid);1631}163216331634/*1635 * When a CE gets turned into an unmerged entry, we1636 * want it to be up-to-date1637 */1638static intverify_uptodate_1(const struct cache_entry *ce,1639struct unpack_trees_options *o,1640enum unpack_trees_error_types error_type)1641{1642struct stat st;16431644if(o->index_only)1645return0;16461647/*1648 * CE_VALID and CE_SKIP_WORKTREE cheat, we better check again1649 * if this entry is truly up-to-date because this file may be1650 * overwritten.1651 */1652if((ce->ce_flags & CE_VALID) ||ce_skip_worktree(ce))1653;/* keep checking */1654else if(o->reset ||ce_uptodate(ce))1655return0;16561657if(!lstat(ce->name, &st)) {1658int flags = CE_MATCH_IGNORE_VALID|CE_MATCH_IGNORE_SKIP_WORKTREE;1659unsigned changed =ie_match_stat(o->src_index, ce, &st, flags);16601661if(submodule_from_ce(ce)) {1662int r =check_submodule_move_head(ce,1663"HEAD",oid_to_hex(&ce->oid), o);1664if(r)1665return o->gently ? -1:1666add_rejected_path(o, error_type, ce->name);1667return0;1668}16691670if(!changed)1671return0;1672/*1673 * Historic default policy was to allow submodule to be out1674 * of sync wrt the superproject index. If the submodule was1675 * not considered interesting above, we don't care here.1676 */1677if(S_ISGITLINK(ce->ce_mode))1678return0;16791680 errno =0;1681}1682if(errno == ENOENT)1683return0;1684return o->gently ? -1:1685add_rejected_path(o, error_type, ce->name);1686}16871688intverify_uptodate(const struct cache_entry *ce,1689struct unpack_trees_options *o)1690{1691if(!o->skip_sparse_checkout && (ce->ce_flags & CE_NEW_SKIP_WORKTREE))1692return0;1693returnverify_uptodate_1(ce, o, ERROR_NOT_UPTODATE_FILE);1694}16951696static intverify_uptodate_sparse(const struct cache_entry *ce,1697struct unpack_trees_options *o)1698{1699returnverify_uptodate_1(ce, o, ERROR_SPARSE_NOT_UPTODATE_FILE);1700}17011702static voidinvalidate_ce_path(const struct cache_entry *ce,1703struct unpack_trees_options *o)1704{1705if(!ce)1706return;1707cache_tree_invalidate_path(o->src_index, ce->name);1708untracked_cache_invalidate_path(o->src_index, ce->name,1);1709}17101711/*1712 * Check that checking out ce->sha1 in subdir ce->name is not1713 * going to overwrite any working files.1714 *1715 * Currently, git does not checkout subprojects during a superproject1716 * checkout, so it is not going to overwrite anything.1717 */1718static intverify_clean_submodule(const char*old_sha1,1719const struct cache_entry *ce,1720enum unpack_trees_error_types error_type,1721struct unpack_trees_options *o)1722{1723if(!submodule_from_ce(ce))1724return0;17251726returncheck_submodule_move_head(ce, old_sha1,1727oid_to_hex(&ce->oid), o);1728}17291730static intverify_clean_subdirectory(const struct cache_entry *ce,1731enum unpack_trees_error_types error_type,1732struct unpack_trees_options *o)1733{1734/*1735 * we are about to extract "ce->name"; we would not want to lose1736 * anything in the existing directory there.1737 */1738int namelen;1739int i;1740struct dir_struct d;1741char*pathbuf;1742int cnt =0;17431744if(S_ISGITLINK(ce->ce_mode)) {1745struct object_id oid;1746int sub_head =resolve_gitlink_ref(ce->name,"HEAD", &oid);1747/*1748 * If we are not going to update the submodule, then1749 * we don't care.1750 */1751if(!sub_head && !oidcmp(&oid, &ce->oid))1752return0;1753returnverify_clean_submodule(sub_head ? NULL :oid_to_hex(&oid),1754 ce, error_type, o);1755}17561757/*1758 * First let's make sure we do not have a local modification1759 * in that directory.1760 */1761 namelen =ce_namelen(ce);1762for(i =locate_in_src_index(ce, o);1763 i < o->src_index->cache_nr;1764 i++) {1765struct cache_entry *ce2 = o->src_index->cache[i];1766int len =ce_namelen(ce2);1767if(len < namelen ||1768strncmp(ce->name, ce2->name, namelen) ||1769 ce2->name[namelen] !='/')1770break;1771/*1772 * ce2->name is an entry in the subdirectory to be1773 * removed.1774 */1775if(!ce_stage(ce2)) {1776if(verify_uptodate(ce2, o))1777return-1;1778add_entry(o, ce2, CE_REMOVE,0);1779invalidate_ce_path(ce, o);1780mark_ce_used(ce2, o);1781}1782 cnt++;1783}17841785/*1786 * Then we need to make sure that we do not lose a locally1787 * present file that is not ignored.1788 */1789 pathbuf =xstrfmt("%.*s/", namelen, ce->name);17901791memset(&d,0,sizeof(d));1792if(o->dir)1793 d.exclude_per_dir = o->dir->exclude_per_dir;1794 i =read_directory(&d, &the_index, pathbuf, namelen+1, NULL);1795if(i)1796return o->gently ? -1:1797add_rejected_path(o, ERROR_NOT_UPTODATE_DIR, ce->name);1798free(pathbuf);1799return cnt;1800}18011802/*1803 * This gets called when there was no index entry for the tree entry 'dst',1804 * but we found a file in the working tree that 'lstat()' said was fine,1805 * and we're on a case-insensitive filesystem.1806 *1807 * See if we can find a case-insensitive match in the index that also1808 * matches the stat information, and assume it's that other file!1809 */1810static inticase_exists(struct unpack_trees_options *o,const char*name,int len,struct stat *st)1811{1812const struct cache_entry *src;18131814 src =index_file_exists(o->src_index, name, len,1);1815return src && !ie_match_stat(o->src_index, src, st, CE_MATCH_IGNORE_VALID|CE_MATCH_IGNORE_SKIP_WORKTREE);1816}18171818static intcheck_ok_to_remove(const char*name,int len,int dtype,1819const struct cache_entry *ce,struct stat *st,1820enum unpack_trees_error_types error_type,1821struct unpack_trees_options *o)1822{1823const struct cache_entry *result;18241825/*1826 * It may be that the 'lstat()' succeeded even though1827 * target 'ce' was absent, because there is an old1828 * entry that is different only in case..1829 *1830 * Ignore that lstat() if it matches.1831 */1832if(ignore_case &&icase_exists(o, name, len, st))1833return0;18341835if(o->dir &&1836is_excluded(o->dir, &the_index, name, &dtype))1837/*1838 * ce->name is explicitly excluded, so it is Ok to1839 * overwrite it.1840 */1841return0;1842if(S_ISDIR(st->st_mode)) {1843/*1844 * We are checking out path "foo" and1845 * found "foo/." in the working tree.1846 * This is tricky -- if we have modified1847 * files that are in "foo/" we would lose1848 * them.1849 */1850if(verify_clean_subdirectory(ce, error_type, o) <0)1851return-1;1852return0;1853}18541855/*1856 * The previous round may already have decided to1857 * delete this path, which is in a subdirectory that1858 * is being replaced with a blob.1859 */1860 result =index_file_exists(&o->result, name, len,0);1861if(result) {1862if(result->ce_flags & CE_REMOVE)1863return0;1864}18651866return o->gently ? -1:1867add_rejected_path(o, error_type, name);1868}18691870/*1871 * We do not want to remove or overwrite a working tree file that1872 * is not tracked, unless it is ignored.1873 */1874static intverify_absent_1(const struct cache_entry *ce,1875enum unpack_trees_error_types error_type,1876struct unpack_trees_options *o)1877{1878int len;1879struct stat st;18801881if(o->index_only || o->reset || !o->update)1882return0;18831884 len =check_leading_path(ce->name,ce_namelen(ce));1885if(!len)1886return0;1887else if(len >0) {1888char*path;1889int ret;18901891 path =xmemdupz(ce->name, len);1892if(lstat(path, &st))1893 ret =error_errno("cannot stat '%s'", path);1894else{1895if(submodule_from_ce(ce))1896 ret =check_submodule_move_head(ce,1897oid_to_hex(&ce->oid),1898 NULL, o);1899else1900 ret =check_ok_to_remove(path, len, DT_UNKNOWN, NULL,1901&st, error_type, o);1902}1903free(path);1904return ret;1905}else if(lstat(ce->name, &st)) {1906if(errno != ENOENT)1907returnerror_errno("cannot stat '%s'", ce->name);1908return0;1909}else{1910if(submodule_from_ce(ce))1911returncheck_submodule_move_head(ce,oid_to_hex(&ce->oid),1912 NULL, o);19131914returncheck_ok_to_remove(ce->name,ce_namelen(ce),1915ce_to_dtype(ce), ce, &st,1916 error_type, o);1917}1918}19191920static intverify_absent(const struct cache_entry *ce,1921enum unpack_trees_error_types error_type,1922struct unpack_trees_options *o)1923{1924if(!o->skip_sparse_checkout && (ce->ce_flags & CE_NEW_SKIP_WORKTREE))1925return0;1926returnverify_absent_1(ce, error_type, o);1927}19281929static intverify_absent_sparse(const struct cache_entry *ce,1930enum unpack_trees_error_types error_type,1931struct unpack_trees_options *o)1932{1933enum unpack_trees_error_types orphaned_error = error_type;1934if(orphaned_error == ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN)1935 orphaned_error = ERROR_WOULD_LOSE_ORPHANED_OVERWRITTEN;19361937returnverify_absent_1(ce, orphaned_error, o);1938}19391940static intmerged_entry(const struct cache_entry *ce,1941const struct cache_entry *old,1942struct unpack_trees_options *o)1943{1944int update = CE_UPDATE;1945struct cache_entry *merge =dup_cache_entry(ce, &o->result);19461947if(!old) {1948/*1949 * New index entries. In sparse checkout, the following1950 * verify_absent() will be delayed until after1951 * traverse_trees() finishes in unpack_trees(), then:1952 *1953 * - CE_NEW_SKIP_WORKTREE will be computed correctly1954 * - verify_absent() be called again, this time with1955 * correct CE_NEW_SKIP_WORKTREE1956 *1957 * verify_absent() call here does nothing in sparse1958 * checkout (i.e. o->skip_sparse_checkout == 0)1959 */1960 update |= CE_ADDED;1961 merge->ce_flags |= CE_NEW_SKIP_WORKTREE;19621963if(verify_absent(merge,1964 ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN, o)) {1965discard_cache_entry(merge);1966return-1;1967}1968invalidate_ce_path(merge, o);19691970if(submodule_from_ce(ce)) {1971int ret =check_submodule_move_head(ce, NULL,1972oid_to_hex(&ce->oid),1973 o);1974if(ret)1975return ret;1976}19771978}else if(!(old->ce_flags & CE_CONFLICTED)) {1979/*1980 * See if we can re-use the old CE directly?1981 * That way we get the uptodate stat info.1982 *1983 * This also removes the UPDATE flag on a match; otherwise1984 * we will end up overwriting local changes in the work tree.1985 */1986if(same(old, merge)) {1987copy_cache_entry(merge, old);1988 update =0;1989}else{1990if(verify_uptodate(old, o)) {1991discard_cache_entry(merge);1992return-1;1993}1994/* Migrate old flags over */1995 update |= old->ce_flags & (CE_SKIP_WORKTREE | CE_NEW_SKIP_WORKTREE);1996invalidate_ce_path(old, o);1997}19981999if(submodule_from_ce(ce)) {2000int ret =check_submodule_move_head(ce,oid_to_hex(&old->oid),2001oid_to_hex(&ce->oid),2002 o);2003if(ret)2004return ret;2005}2006}else{2007/*2008 * Previously unmerged entry left as an existence2009 * marker by read_index_unmerged();2010 */2011invalidate_ce_path(old, o);2012}20132014do_add_entry(o, merge, update, CE_STAGEMASK);2015return1;2016}20172018static intdeleted_entry(const struct cache_entry *ce,2019const struct cache_entry *old,2020struct unpack_trees_options *o)2021{2022/* Did it exist in the index? */2023if(!old) {2024if(verify_absent(ce, ERROR_WOULD_LOSE_UNTRACKED_REMOVED, o))2025return-1;2026return0;2027}2028if(!(old->ce_flags & CE_CONFLICTED) &&verify_uptodate(old, o))2029return-1;2030add_entry(o, ce, CE_REMOVE,0);2031invalidate_ce_path(ce, o);2032return1;2033}20342035static intkeep_entry(const struct cache_entry *ce,2036struct unpack_trees_options *o)2037{2038add_entry(o, ce,0,0);2039if(ce_stage(ce))2040invalidate_ce_path(ce, o);2041return1;2042}20432044#if DBRT_DEBUG2045static voidshow_stage_entry(FILE*o,2046const char*label,const struct cache_entry *ce)2047{2048if(!ce)2049fprintf(o,"%s(missing)\n", label);2050else2051fprintf(o,"%s%06o%s %d\t%s\n",2052 label,2053 ce->ce_mode,2054oid_to_hex(&ce->oid),2055ce_stage(ce),2056 ce->name);2057}2058#endif20592060intthreeway_merge(const struct cache_entry *const*stages,2061struct unpack_trees_options *o)2062{2063const struct cache_entry *index;2064const struct cache_entry *head;2065const struct cache_entry *remote = stages[o->head_idx +1];2066int count;2067int head_match =0;2068int remote_match =0;20692070int df_conflict_head =0;2071int df_conflict_remote =0;20722073int any_anc_missing =0;2074int no_anc_exists =1;2075int i;20762077for(i =1; i < o->head_idx; i++) {2078if(!stages[i] || stages[i] == o->df_conflict_entry)2079 any_anc_missing =1;2080else2081 no_anc_exists =0;2082}20832084 index = stages[0];2085 head = stages[o->head_idx];20862087if(head == o->df_conflict_entry) {2088 df_conflict_head =1;2089 head = NULL;2090}20912092if(remote == o->df_conflict_entry) {2093 df_conflict_remote =1;2094 remote = NULL;2095}20962097/*2098 * First, if there's a #16 situation, note that to prevent #132099 * and #14.2100 */2101if(!same(remote, head)) {2102for(i =1; i < o->head_idx; i++) {2103if(same(stages[i], head)) {2104 head_match = i;2105}2106if(same(stages[i], remote)) {2107 remote_match = i;2108}2109}2110}21112112/*2113 * We start with cases where the index is allowed to match2114 * something other than the head: #14(ALT) and #2ALT, where it2115 * is permitted to match the result instead.2116 */2117/* #14, #14ALT, #2ALT */2118if(remote && !df_conflict_head && head_match && !remote_match) {2119if(index && !same(index, remote) && !same(index, head))2120returnreject_merge(index, o);2121returnmerged_entry(remote, index, o);2122}2123/*2124 * If we have an entry in the index cache, then we want to2125 * make sure that it matches head.2126 */2127if(index && !same(index, head))2128returnreject_merge(index, o);21292130if(head) {2131/* #5ALT, #15 */2132if(same(head, remote))2133returnmerged_entry(head, index, o);2134/* #13, #3ALT */2135if(!df_conflict_remote && remote_match && !head_match)2136returnmerged_entry(head, index, o);2137}21382139/* #1 */2140if(!head && !remote && any_anc_missing)2141return0;21422143/*2144 * Under the "aggressive" rule, we resolve mostly trivial2145 * cases that we historically had git-merge-one-file resolve.2146 */2147if(o->aggressive) {2148int head_deleted = !head;2149int remote_deleted = !remote;2150const struct cache_entry *ce = NULL;21512152if(index)2153 ce = index;2154else if(head)2155 ce = head;2156else if(remote)2157 ce = remote;2158else{2159for(i =1; i < o->head_idx; i++) {2160if(stages[i] && stages[i] != o->df_conflict_entry) {2161 ce = stages[i];2162break;2163}2164}2165}21662167/*2168 * Deleted in both.2169 * Deleted in one and unchanged in the other.2170 */2171if((head_deleted && remote_deleted) ||2172(head_deleted && remote && remote_match) ||2173(remote_deleted && head && head_match)) {2174if(index)2175returndeleted_entry(index, index, o);2176if(ce && !head_deleted) {2177if(verify_absent(ce, ERROR_WOULD_LOSE_UNTRACKED_REMOVED, o))2178return-1;2179}2180return0;2181}2182/*2183 * Added in both, identically.2184 */2185if(no_anc_exists && head && remote &&same(head, remote))2186returnmerged_entry(head, index, o);21872188}21892190/* Below are "no merge" cases, which require that the index be2191 * up-to-date to avoid the files getting overwritten with2192 * conflict resolution files.2193 */2194if(index) {2195if(verify_uptodate(index, o))2196return-1;2197}21982199 o->nontrivial_merge =1;22002201/* #2, #3, #4, #6, #7, #9, #10, #11. */2202 count =0;2203if(!head_match || !remote_match) {2204for(i =1; i < o->head_idx; i++) {2205if(stages[i] && stages[i] != o->df_conflict_entry) {2206keep_entry(stages[i], o);2207 count++;2208break;2209}2210}2211}2212#if DBRT_DEBUG2213else{2214fprintf(stderr,"read-tree: warning #16 detected\n");2215show_stage_entry(stderr,"head ", stages[head_match]);2216show_stage_entry(stderr,"remote ", stages[remote_match]);2217}2218#endif2219if(head) { count +=keep_entry(head, o); }2220if(remote) { count +=keep_entry(remote, o); }2221return count;2222}22232224/*2225 * Two-way merge.2226 *2227 * The rule is to "carry forward" what is in the index without losing2228 * information across a "fast-forward", favoring a successful merge2229 * over a merge failure when it makes sense. For details of the2230 * "carry forward" rule, please see <Documentation/git-read-tree.txt>.2231 *2232 */2233inttwoway_merge(const struct cache_entry *const*src,2234struct unpack_trees_options *o)2235{2236const struct cache_entry *current = src[0];2237const struct cache_entry *oldtree = src[1];2238const struct cache_entry *newtree = src[2];22392240if(o->merge_size !=2)2241returnerror("Cannot do a twoway merge of%dtrees",2242 o->merge_size);22432244if(oldtree == o->df_conflict_entry)2245 oldtree = NULL;2246if(newtree == o->df_conflict_entry)2247 newtree = NULL;22482249if(current) {2250if(current->ce_flags & CE_CONFLICTED) {2251if(same(oldtree, newtree) || o->reset) {2252if(!newtree)2253returndeleted_entry(current, current, o);2254else2255returnmerged_entry(newtree, current, o);2256}2257returnreject_merge(current, o);2258}else if((!oldtree && !newtree) ||/* 4 and 5 */2259(!oldtree && newtree &&2260same(current, newtree)) ||/* 6 and 7 */2261(oldtree && newtree &&2262same(oldtree, newtree)) ||/* 14 and 15 */2263(oldtree && newtree &&2264!same(oldtree, newtree) &&/* 18 and 19 */2265same(current, newtree))) {2266returnkeep_entry(current, o);2267}else if(oldtree && !newtree &&same(current, oldtree)) {2268/* 10 or 11 */2269returndeleted_entry(oldtree, current, o);2270}else if(oldtree && newtree &&2271same(current, oldtree) && !same(current, newtree)) {2272/* 20 or 21 */2273returnmerged_entry(newtree, current, o);2274}else2275returnreject_merge(current, o);2276}2277else if(newtree) {2278if(oldtree && !o->initial_checkout) {2279/*2280 * deletion of the path was staged;2281 */2282if(same(oldtree, newtree))2283return1;2284returnreject_merge(oldtree, o);2285}2286returnmerged_entry(newtree, current, o);2287}2288returndeleted_entry(oldtree, current, o);2289}22902291/*2292 * Bind merge.2293 *2294 * Keep the index entries at stage0, collapse stage1 but make sure2295 * stage0 does not have anything there.2296 */2297intbind_merge(const struct cache_entry *const*src,2298struct unpack_trees_options *o)2299{2300const struct cache_entry *old = src[0];2301const struct cache_entry *a = src[1];23022303if(o->merge_size !=1)2304returnerror("Cannot do a bind merge of%dtrees",2305 o->merge_size);2306if(a && old)2307return o->gently ? -1:2308error(ERRORMSG(o, ERROR_BIND_OVERLAP),2309super_prefixed(a->name),2310super_prefixed(old->name));2311if(!a)2312returnkeep_entry(old, o);2313else2314returnmerged_entry(a, NULL, o);2315}23162317/*2318 * One-way merge.2319 *2320 * The rule is:2321 * - take the stat information from stage0, take the data from stage12322 */2323intoneway_merge(const struct cache_entry *const*src,2324struct unpack_trees_options *o)2325{2326const struct cache_entry *old = src[0];2327const struct cache_entry *a = src[1];23282329if(o->merge_size !=1)2330returnerror("Cannot do a oneway merge of%dtrees",2331 o->merge_size);23322333if(!a || a == o->df_conflict_entry)2334returndeleted_entry(old, old, o);23352336if(old &&same(old, a)) {2337int update =0;2338if(o->reset && o->update && !ce_uptodate(old) && !ce_skip_worktree(old)) {2339struct stat st;2340if(lstat(old->name, &st) ||2341ie_match_stat(o->src_index, old, &st, CE_MATCH_IGNORE_VALID|CE_MATCH_IGNORE_SKIP_WORKTREE))2342 update |= CE_UPDATE;2343}2344if(o->update &&S_ISGITLINK(old->ce_mode) &&2345should_update_submodules() && !verify_uptodate(old, o))2346 update |= CE_UPDATE;2347add_entry(o, old, update,0);2348return0;2349}2350returnmerged_entry(a, old, o);2351}