1#include"cache.h" 2#include"argv-array.h" 3#include"repository.h" 4#include"config.h" 5#include"dir.h" 6#include"tree.h" 7#include"tree-walk.h" 8#include"cache-tree.h" 9#include"unpack-trees.h" 10#include"progress.h" 11#include"refs.h" 12#include"attr.h" 13#include"split-index.h" 14#include"dir.h" 15#include"submodule.h" 16#include"submodule-config.h" 17#include"fsmonitor.h" 18#include"object-store.h" 19#include"fetch-object.h" 20 21/* 22 * Error messages expected by scripts out of plumbing commands such as 23 * read-tree. Non-scripted Porcelain is not required to use these messages 24 * and in fact are encouraged to reword them to better suit their particular 25 * situation better. See how "git checkout" and "git merge" replaces 26 * them using setup_unpack_trees_porcelain(), for example. 27 */ 28static const char*unpack_plumbing_errors[NB_UNPACK_TREES_ERROR_TYPES] = { 29/* ERROR_WOULD_OVERWRITE */ 30"Entry '%s' would be overwritten by merge. Cannot merge.", 31 32/* ERROR_NOT_UPTODATE_FILE */ 33"Entry '%s' not uptodate. Cannot merge.", 34 35/* ERROR_NOT_UPTODATE_DIR */ 36"Updating '%s' would lose untracked files in it", 37 38/* ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN */ 39"Untracked working tree file '%s' would be overwritten by merge.", 40 41/* ERROR_WOULD_LOSE_UNTRACKED_REMOVED */ 42"Untracked working tree file '%s' would be removed by merge.", 43 44/* ERROR_BIND_OVERLAP */ 45"Entry '%s' overlaps with '%s'. Cannot bind.", 46 47/* ERROR_SPARSE_NOT_UPTODATE_FILE */ 48"Entry '%s' not uptodate. Cannot update sparse checkout.", 49 50/* ERROR_WOULD_LOSE_ORPHANED_OVERWRITTEN */ 51"Working tree file '%s' would be overwritten by sparse checkout update.", 52 53/* ERROR_WOULD_LOSE_ORPHANED_REMOVED */ 54"Working tree file '%s' would be removed by sparse checkout update.", 55 56/* ERROR_WOULD_LOSE_SUBMODULE */ 57"Submodule '%s' cannot checkout new HEAD.", 58}; 59 60#define ERRORMSG(o,type) \ 61 ( ((o) && (o)->msgs[(type)]) \ 62 ? ((o)->msgs[(type)]) \ 63 : (unpack_plumbing_errors[(type)]) ) 64 65static const char*super_prefixed(const char*path) 66{ 67/* 68 * It is necessary and sufficient to have two static buffers 69 * here, as the return value of this function is fed to 70 * error() using the unpack_*_errors[] templates we see above. 71 */ 72static struct strbuf buf[2] = {STRBUF_INIT, STRBUF_INIT}; 73static int super_prefix_len = -1; 74static unsigned idx =ARRAY_SIZE(buf) -1; 75 76if(super_prefix_len <0) { 77const char*super_prefix =get_super_prefix(); 78if(!super_prefix) { 79 super_prefix_len =0; 80}else{ 81int i; 82for(i =0; i <ARRAY_SIZE(buf); i++) 83strbuf_addstr(&buf[i], super_prefix); 84 super_prefix_len = buf[0].len; 85} 86} 87 88if(!super_prefix_len) 89return path; 90 91if(++idx >=ARRAY_SIZE(buf)) 92 idx =0; 93 94strbuf_setlen(&buf[idx], super_prefix_len); 95strbuf_addstr(&buf[idx], path); 96 97return buf[idx].buf; 98} 99 100voidsetup_unpack_trees_porcelain(struct unpack_trees_options *opts, 101const char*cmd) 102{ 103int i; 104const char**msgs = opts->msgs; 105const char*msg; 106 107argv_array_init(&opts->msgs_to_free); 108 109if(!strcmp(cmd,"checkout")) 110 msg = advice_commit_before_merge 111?_("Your local changes to the following files would be overwritten by checkout:\n%%s" 112"Please commit your changes or stash them before you switch branches.") 113:_("Your local changes to the following files would be overwritten by checkout:\n%%s"); 114else if(!strcmp(cmd,"merge")) 115 msg = advice_commit_before_merge 116?_("Your local changes to the following files would be overwritten by merge:\n%%s" 117"Please commit your changes or stash them before you merge.") 118:_("Your local changes to the following files would be overwritten by merge:\n%%s"); 119else 120 msg = advice_commit_before_merge 121?_("Your local changes to the following files would be overwritten by%s:\n%%s" 122"Please commit your changes or stash them before you%s.") 123:_("Your local changes to the following files would be overwritten by%s:\n%%s"); 124 msgs[ERROR_WOULD_OVERWRITE] = msgs[ERROR_NOT_UPTODATE_FILE] = 125argv_array_pushf(&opts->msgs_to_free, msg, cmd, cmd); 126 127 msgs[ERROR_NOT_UPTODATE_DIR] = 128_("Updating the following directories would lose untracked files in them:\n%s"); 129 130if(!strcmp(cmd,"checkout")) 131 msg = advice_commit_before_merge 132?_("The following untracked working tree files would be removed by checkout:\n%%s" 133"Please move or remove them before you switch branches.") 134:_("The following untracked working tree files would be removed by checkout:\n%%s"); 135else if(!strcmp(cmd,"merge")) 136 msg = advice_commit_before_merge 137?_("The following untracked working tree files would be removed by merge:\n%%s" 138"Please move or remove them before you merge.") 139:_("The following untracked working tree files would be removed by merge:\n%%s"); 140else 141 msg = advice_commit_before_merge 142?_("The following untracked working tree files would be removed by%s:\n%%s" 143"Please move or remove them before you%s.") 144:_("The following untracked working tree files would be removed by%s:\n%%s"); 145 msgs[ERROR_WOULD_LOSE_UNTRACKED_REMOVED] = 146argv_array_pushf(&opts->msgs_to_free, msg, cmd, cmd); 147 148if(!strcmp(cmd,"checkout")) 149 msg = advice_commit_before_merge 150?_("The following untracked working tree files would be overwritten by checkout:\n%%s" 151"Please move or remove them before you switch branches.") 152:_("The following untracked working tree files would be overwritten by checkout:\n%%s"); 153else if(!strcmp(cmd,"merge")) 154 msg = advice_commit_before_merge 155?_("The following untracked working tree files would be overwritten by merge:\n%%s" 156"Please move or remove them before you merge.") 157:_("The following untracked working tree files would be overwritten by merge:\n%%s"); 158else 159 msg = advice_commit_before_merge 160?_("The following untracked working tree files would be overwritten by%s:\n%%s" 161"Please move or remove them before you%s.") 162:_("The following untracked working tree files would be overwritten by%s:\n%%s"); 163 msgs[ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN] = 164argv_array_pushf(&opts->msgs_to_free, msg, cmd, cmd); 165 166/* 167 * Special case: ERROR_BIND_OVERLAP refers to a pair of paths, we 168 * cannot easily display it as a list. 169 */ 170 msgs[ERROR_BIND_OVERLAP] =_("Entry '%s' overlaps with '%s'. Cannot bind."); 171 172 msgs[ERROR_SPARSE_NOT_UPTODATE_FILE] = 173_("Cannot update sparse checkout: the following entries are not up to date:\n%s"); 174 msgs[ERROR_WOULD_LOSE_ORPHANED_OVERWRITTEN] = 175_("The following working tree files would be overwritten by sparse checkout update:\n%s"); 176 msgs[ERROR_WOULD_LOSE_ORPHANED_REMOVED] = 177_("The following working tree files would be removed by sparse checkout update:\n%s"); 178 msgs[ERROR_WOULD_LOSE_SUBMODULE] = 179_("Cannot update submodule:\n%s"); 180 181 opts->show_all_errors =1; 182/* rejected paths may not have a static buffer */ 183for(i =0; i <ARRAY_SIZE(opts->unpack_rejects); i++) 184 opts->unpack_rejects[i].strdup_strings =1; 185} 186 187voidclear_unpack_trees_porcelain(struct unpack_trees_options *opts) 188{ 189argv_array_clear(&opts->msgs_to_free); 190memset(opts->msgs,0,sizeof(opts->msgs)); 191} 192 193static intdo_add_entry(struct unpack_trees_options *o,struct cache_entry *ce, 194unsigned int set,unsigned int clear) 195{ 196 clear |= CE_HASHED; 197 198if(set & CE_REMOVE) 199 set |= CE_WT_REMOVE; 200 201 ce->ce_flags = (ce->ce_flags & ~clear) | set; 202returnadd_index_entry(&o->result, ce, 203 ADD_CACHE_OK_TO_ADD | ADD_CACHE_OK_TO_REPLACE); 204} 205 206static voidadd_entry(struct unpack_trees_options *o, 207const struct cache_entry *ce, 208unsigned int set,unsigned int clear) 209{ 210do_add_entry(o,dup_cache_entry(ce, &o->result), set, clear); 211} 212 213/* 214 * add error messages on path <path> 215 * corresponding to the type <e> with the message <msg> 216 * indicating if it should be display in porcelain or not 217 */ 218static intadd_rejected_path(struct unpack_trees_options *o, 219enum unpack_trees_error_types e, 220const char*path) 221{ 222if(!o->show_all_errors) 223returnerror(ERRORMSG(o, e),super_prefixed(path)); 224 225/* 226 * Otherwise, insert in a list for future display by 227 * display_error_msgs() 228 */ 229string_list_append(&o->unpack_rejects[e], path); 230return-1; 231} 232 233/* 234 * display all the error messages stored in a nice way 235 */ 236static voiddisplay_error_msgs(struct unpack_trees_options *o) 237{ 238int e, i; 239int something_displayed =0; 240for(e =0; e < NB_UNPACK_TREES_ERROR_TYPES; e++) { 241struct string_list *rejects = &o->unpack_rejects[e]; 242if(rejects->nr >0) { 243struct strbuf path = STRBUF_INIT; 244 something_displayed =1; 245for(i =0; i < rejects->nr; i++) 246strbuf_addf(&path,"\t%s\n", rejects->items[i].string); 247error(ERRORMSG(o, e),super_prefixed(path.buf)); 248strbuf_release(&path); 249} 250string_list_clear(rejects,0); 251} 252if(something_displayed) 253fprintf(stderr,_("Aborting\n")); 254} 255 256static intcheck_submodule_move_head(const struct cache_entry *ce, 257const char*old_id, 258const char*new_id, 259struct unpack_trees_options *o) 260{ 261unsigned flags = SUBMODULE_MOVE_HEAD_DRY_RUN; 262const struct submodule *sub =submodule_from_ce(ce); 263 264if(!sub) 265return0; 266 267if(o->reset) 268 flags |= SUBMODULE_MOVE_HEAD_FORCE; 269 270if(submodule_move_head(ce->name, old_id, new_id, flags)) 271return o->gently ? -1: 272add_rejected_path(o, ERROR_WOULD_LOSE_SUBMODULE, ce->name); 273return0; 274} 275 276/* 277 * Preform the loading of the repository's gitmodules file. This function is 278 * used by 'check_update()' to perform loading of the gitmodules file in two 279 * differnt situations: 280 * (1) before removing entries from the working tree if the gitmodules file has 281 * been marked for removal. This situation is specified by 'state' == NULL. 282 * (2) before checking out entries to the working tree if the gitmodules file 283 * has been marked for update. This situation is specified by 'state' != NULL. 284 */ 285static voidload_gitmodules_file(struct index_state *index, 286struct checkout *state) 287{ 288int pos =index_name_pos(index, GITMODULES_FILE,strlen(GITMODULES_FILE)); 289 290if(pos >=0) { 291struct cache_entry *ce = index->cache[pos]; 292if(!state && ce->ce_flags & CE_WT_REMOVE) { 293repo_read_gitmodules(the_repository); 294}else if(state && (ce->ce_flags & CE_UPDATE)) { 295submodule_free(the_repository); 296checkout_entry(ce, state, NULL, NULL); 297repo_read_gitmodules(the_repository); 298} 299} 300} 301 302static struct progress *get_progress(struct unpack_trees_options *o) 303{ 304unsigned cnt =0, total =0; 305struct index_state *index = &o->result; 306 307if(!o->update || !o->verbose_update) 308return NULL; 309 310for(; cnt < index->cache_nr; cnt++) { 311const struct cache_entry *ce = index->cache[cnt]; 312if(ce->ce_flags & (CE_UPDATE | CE_WT_REMOVE)) 313 total++; 314} 315 316returnstart_delayed_progress(_("Checking out files"), total); 317} 318 319static voidsetup_collided_checkout_detection(struct checkout *state, 320struct index_state *index) 321{ 322int i; 323 324 state->clone =1; 325for(i =0; i < index->cache_nr; i++) 326 index->cache[i]->ce_flags &= ~CE_MATCHED; 327} 328 329static voidreport_collided_checkout(struct index_state *index) 330{ 331struct string_list list = STRING_LIST_INIT_NODUP; 332int i; 333 334for(i =0; i < index->cache_nr; i++) { 335struct cache_entry *ce = index->cache[i]; 336 337if(!(ce->ce_flags & CE_MATCHED)) 338continue; 339 340string_list_append(&list, ce->name); 341 ce->ce_flags &= ~CE_MATCHED; 342} 343 344 list.cmp = fspathcmp; 345string_list_sort(&list); 346 347if(list.nr) { 348warning(_("the following paths have collided (e.g. case-sensitive paths\n" 349"on a case-insensitive filesystem) and only one from the same\n" 350"colliding group is in the working tree:\n")); 351 352for(i =0; i < list.nr; i++) 353fprintf(stderr," '%s'\n", list.items[i].string); 354} 355 356string_list_clear(&list,0); 357} 358 359static intcheck_updates(struct unpack_trees_options *o) 360{ 361unsigned cnt =0; 362int errs =0; 363struct progress *progress; 364struct index_state *index = &o->result; 365struct checkout state = CHECKOUT_INIT; 366int i; 367 368trace_performance_enter(); 369 state.force =1; 370 state.quiet =1; 371 state.refresh_cache =1; 372 state.istate = index; 373 374if(o->clone) 375setup_collided_checkout_detection(&state, index); 376 377 progress =get_progress(o); 378 379if(o->update) 380git_attr_set_direction(GIT_ATTR_CHECKOUT); 381 382if(should_update_submodules() && o->update && !o->dry_run) 383load_gitmodules_file(index, NULL); 384 385for(i =0; i < index->cache_nr; i++) { 386const struct cache_entry *ce = index->cache[i]; 387 388if(ce->ce_flags & CE_WT_REMOVE) { 389display_progress(progress, ++cnt); 390if(o->update && !o->dry_run) 391unlink_entry(ce); 392} 393} 394remove_marked_cache_entries(index,0); 395remove_scheduled_dirs(); 396 397if(should_update_submodules() && o->update && !o->dry_run) 398load_gitmodules_file(index, &state); 399 400enable_delayed_checkout(&state); 401if(repository_format_partial_clone && o->update && !o->dry_run) { 402/* 403 * Prefetch the objects that are to be checked out in the loop 404 * below. 405 */ 406struct oid_array to_fetch = OID_ARRAY_INIT; 407int fetch_if_missing_store = fetch_if_missing; 408 fetch_if_missing =0; 409for(i =0; i < index->cache_nr; i++) { 410struct cache_entry *ce = index->cache[i]; 411if((ce->ce_flags & CE_UPDATE) && 412!S_ISGITLINK(ce->ce_mode)) { 413if(!has_object_file(&ce->oid)) 414oid_array_append(&to_fetch, &ce->oid); 415} 416} 417if(to_fetch.nr) 418fetch_objects(repository_format_partial_clone, 419 to_fetch.oid, to_fetch.nr); 420 fetch_if_missing = fetch_if_missing_store; 421oid_array_clear(&to_fetch); 422} 423for(i =0; i < index->cache_nr; i++) { 424struct cache_entry *ce = index->cache[i]; 425 426if(ce->ce_flags & CE_UPDATE) { 427if(ce->ce_flags & CE_WT_REMOVE) 428BUG("both update and delete flags are set on%s", 429 ce->name); 430display_progress(progress, ++cnt); 431 ce->ce_flags &= ~CE_UPDATE; 432if(o->update && !o->dry_run) { 433 errs |=checkout_entry(ce, &state, NULL, NULL); 434} 435} 436} 437stop_progress(&progress); 438 errs |=finish_delayed_checkout(&state, NULL); 439if(o->update) 440git_attr_set_direction(GIT_ATTR_CHECKIN); 441 442if(o->clone) 443report_collided_checkout(index); 444 445trace_performance_leave("check_updates"); 446return errs !=0; 447} 448 449static intverify_uptodate_sparse(const struct cache_entry *ce, 450struct unpack_trees_options *o); 451static intverify_absent_sparse(const struct cache_entry *ce, 452enum unpack_trees_error_types, 453struct unpack_trees_options *o); 454 455static intapply_sparse_checkout(struct index_state *istate, 456struct cache_entry *ce, 457struct unpack_trees_options *o) 458{ 459int was_skip_worktree =ce_skip_worktree(ce); 460 461if(ce->ce_flags & CE_NEW_SKIP_WORKTREE) 462 ce->ce_flags |= CE_SKIP_WORKTREE; 463else 464 ce->ce_flags &= ~CE_SKIP_WORKTREE; 465if(was_skip_worktree !=ce_skip_worktree(ce)) { 466 ce->ce_flags |= CE_UPDATE_IN_BASE; 467mark_fsmonitor_invalid(istate, ce); 468 istate->cache_changed |= CE_ENTRY_CHANGED; 469} 470 471/* 472 * if (!was_skip_worktree && !ce_skip_worktree()) { 473 * This is perfectly normal. Move on; 474 * } 475 */ 476 477/* 478 * Merge strategies may set CE_UPDATE|CE_REMOVE outside checkout 479 * area as a result of ce_skip_worktree() shortcuts in 480 * verify_absent() and verify_uptodate(). 481 * Make sure they don't modify worktree if they are already 482 * outside checkout area 483 */ 484if(was_skip_worktree &&ce_skip_worktree(ce)) { 485 ce->ce_flags &= ~CE_UPDATE; 486 487/* 488 * By default, when CE_REMOVE is on, CE_WT_REMOVE is also 489 * on to get that file removed from both index and worktree. 490 * If that file is already outside worktree area, don't 491 * bother remove it. 492 */ 493if(ce->ce_flags & CE_REMOVE) 494 ce->ce_flags &= ~CE_WT_REMOVE; 495} 496 497if(!was_skip_worktree &&ce_skip_worktree(ce)) { 498/* 499 * If CE_UPDATE is set, verify_uptodate() must be called already 500 * also stat info may have lost after merged_entry() so calling 501 * verify_uptodate() again may fail 502 */ 503if(!(ce->ce_flags & CE_UPDATE) &&verify_uptodate_sparse(ce, o)) 504return-1; 505 ce->ce_flags |= CE_WT_REMOVE; 506 ce->ce_flags &= ~CE_UPDATE; 507} 508if(was_skip_worktree && !ce_skip_worktree(ce)) { 509if(verify_absent_sparse(ce, ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN, o)) 510return-1; 511 ce->ce_flags |= CE_UPDATE; 512} 513return0; 514} 515 516staticinlineintcall_unpack_fn(const struct cache_entry *const*src, 517struct unpack_trees_options *o) 518{ 519int ret = o->fn(src, o); 520if(ret >0) 521 ret =0; 522return ret; 523} 524 525static voidmark_ce_used(struct cache_entry *ce,struct unpack_trees_options *o) 526{ 527 ce->ce_flags |= CE_UNPACKED; 528 529if(o->cache_bottom < o->src_index->cache_nr && 530 o->src_index->cache[o->cache_bottom] == ce) { 531int bottom = o->cache_bottom; 532while(bottom < o->src_index->cache_nr && 533 o->src_index->cache[bottom]->ce_flags & CE_UNPACKED) 534 bottom++; 535 o->cache_bottom = bottom; 536} 537} 538 539static voidmark_all_ce_unused(struct index_state *index) 540{ 541int i; 542for(i =0; i < index->cache_nr; i++) 543 index->cache[i]->ce_flags &= ~(CE_UNPACKED | CE_ADDED | CE_NEW_SKIP_WORKTREE); 544} 545 546static intlocate_in_src_index(const struct cache_entry *ce, 547struct unpack_trees_options *o) 548{ 549struct index_state *index = o->src_index; 550int len =ce_namelen(ce); 551int pos =index_name_pos(index, ce->name, len); 552if(pos <0) 553 pos = -1- pos; 554return pos; 555} 556 557/* 558 * We call unpack_index_entry() with an unmerged cache entry 559 * only in diff-index, and it wants a single callback. Skip 560 * the other unmerged entry with the same name. 561 */ 562static voidmark_ce_used_same_name(struct cache_entry *ce, 563struct unpack_trees_options *o) 564{ 565struct index_state *index = o->src_index; 566int len =ce_namelen(ce); 567int pos; 568 569for(pos =locate_in_src_index(ce, o); pos < index->cache_nr; pos++) { 570struct cache_entry *next = index->cache[pos]; 571if(len !=ce_namelen(next) || 572memcmp(ce->name, next->name, len)) 573break; 574mark_ce_used(next, o); 575} 576} 577 578static struct cache_entry *next_cache_entry(struct unpack_trees_options *o) 579{ 580const struct index_state *index = o->src_index; 581int pos = o->cache_bottom; 582 583while(pos < index->cache_nr) { 584struct cache_entry *ce = index->cache[pos]; 585if(!(ce->ce_flags & CE_UNPACKED)) 586return ce; 587 pos++; 588} 589return NULL; 590} 591 592static voidadd_same_unmerged(const struct cache_entry *ce, 593struct unpack_trees_options *o) 594{ 595struct index_state *index = o->src_index; 596int len =ce_namelen(ce); 597int pos =index_name_pos(index, ce->name, len); 598 599if(0<= pos) 600die("programming error in a caller of mark_ce_used_same_name"); 601for(pos = -pos -1; pos < index->cache_nr; pos++) { 602struct cache_entry *next = index->cache[pos]; 603if(len !=ce_namelen(next) || 604memcmp(ce->name, next->name, len)) 605break; 606add_entry(o, next,0,0); 607mark_ce_used(next, o); 608} 609} 610 611static intunpack_index_entry(struct cache_entry *ce, 612struct unpack_trees_options *o) 613{ 614const struct cache_entry *src[MAX_UNPACK_TREES +1] = { NULL, }; 615int ret; 616 617 src[0] = ce; 618 619mark_ce_used(ce, o); 620if(ce_stage(ce)) { 621if(o->skip_unmerged) { 622add_entry(o, ce,0,0); 623return0; 624} 625} 626 ret =call_unpack_fn(src, o); 627if(ce_stage(ce)) 628mark_ce_used_same_name(ce, o); 629return ret; 630} 631 632static intfind_cache_pos(struct traverse_info *,const struct name_entry *); 633 634static voidrestore_cache_bottom(struct traverse_info *info,int bottom) 635{ 636struct unpack_trees_options *o = info->data; 637 638if(o->diff_index_cached) 639return; 640 o->cache_bottom = bottom; 641} 642 643static intswitch_cache_bottom(struct traverse_info *info) 644{ 645struct unpack_trees_options *o = info->data; 646int ret, pos; 647 648if(o->diff_index_cached) 649return0; 650 ret = o->cache_bottom; 651 pos =find_cache_pos(info->prev, &info->name); 652 653if(pos < -1) 654 o->cache_bottom = -2- pos; 655else if(pos <0) 656 o->cache_bottom = o->src_index->cache_nr; 657return ret; 658} 659 660staticinlineintare_same_oid(struct name_entry *name_j,struct name_entry *name_k) 661{ 662return!is_null_oid(&name_j->oid) && !is_null_oid(&name_k->oid) &&oideq(&name_j->oid, &name_k->oid); 663} 664 665static intall_trees_same_as_cache_tree(int n,unsigned long dirmask, 666struct name_entry *names, 667struct traverse_info *info) 668{ 669struct unpack_trees_options *o = info->data; 670int i; 671 672if(!o->merge || dirmask != ((1<< n) -1)) 673return0; 674 675for(i =1; i < n; i++) 676if(!are_same_oid(names, names + i)) 677return0; 678 679returncache_tree_matches_traversal(o->src_index->cache_tree, names, info); 680} 681 682static intindex_pos_by_traverse_info(struct name_entry *names, 683struct traverse_info *info) 684{ 685struct unpack_trees_options *o = info->data; 686int len =traverse_path_len(info, names); 687char*name =xmalloc(len +1/* slash */+1/* NUL */); 688int pos; 689 690make_traverse_path(name, info, names); 691 name[len++] ='/'; 692 name[len] ='\0'; 693 pos =index_name_pos(o->src_index, name, len); 694if(pos >=0) 695BUG("This is a directory and should not exist in index"); 696 pos = -pos -1; 697if(!starts_with(o->src_index->cache[pos]->name, name) || 698(pos >0&&starts_with(o->src_index->cache[pos-1]->name, name))) 699BUG("pos must point at the first entry in this directory"); 700free(name); 701return pos; 702} 703 704/* 705 * Fast path if we detect that all trees are the same as cache-tree at this 706 * path. We'll walk these trees in an iterative loop using cache-tree/index 707 * instead of ODB since we already know what these trees contain. 708 */ 709static inttraverse_by_cache_tree(int pos,int nr_entries,int nr_names, 710struct name_entry *names, 711struct traverse_info *info) 712{ 713struct cache_entry *src[MAX_UNPACK_TREES +1] = { NULL, }; 714struct unpack_trees_options *o = info->data; 715struct cache_entry *tree_ce = NULL; 716int ce_len =0; 717int i, d; 718 719if(!o->merge) 720BUG("We need cache-tree to do this optimization"); 721 722/* 723 * Do what unpack_callback() and unpack_nondirectories() normally 724 * do. But we walk all paths in an iterative loop instead. 725 * 726 * D/F conflicts and higher stage entries are not a concern 727 * because cache-tree would be invalidated and we would never 728 * get here in the first place. 729 */ 730for(i =0; i < nr_entries; i++) { 731int new_ce_len, len, rc; 732 733 src[0] = o->src_index->cache[pos + i]; 734 735 len =ce_namelen(src[0]); 736 new_ce_len =cache_entry_size(len); 737 738if(new_ce_len > ce_len) { 739 new_ce_len <<=1; 740 tree_ce =xrealloc(tree_ce, new_ce_len); 741memset(tree_ce,0, new_ce_len); 742 ce_len = new_ce_len; 743 744 tree_ce->ce_flags =create_ce_flags(0); 745 746for(d =1; d <= nr_names; d++) 747 src[d] = tree_ce; 748} 749 750 tree_ce->ce_mode = src[0]->ce_mode; 751 tree_ce->ce_namelen = len; 752oidcpy(&tree_ce->oid, &src[0]->oid); 753memcpy(tree_ce->name, src[0]->name, len +1); 754 755 rc =call_unpack_fn((const struct cache_entry *const*)src, o); 756if(rc <0) { 757free(tree_ce); 758return rc; 759} 760 761mark_ce_used(src[0], o); 762} 763free(tree_ce); 764if(o->debug_unpack) 765printf("Unpacked%dentries from%sto%susing cache-tree\n", 766 nr_entries, 767 o->src_index->cache[pos]->name, 768 o->src_index->cache[pos + nr_entries -1]->name); 769return0; 770} 771 772static inttraverse_trees_recursive(int n,unsigned long dirmask, 773unsigned long df_conflicts, 774struct name_entry *names, 775struct traverse_info *info) 776{ 777struct unpack_trees_options *o = info->data; 778int i, ret, bottom; 779int nr_buf =0; 780struct tree_desc t[MAX_UNPACK_TREES]; 781void*buf[MAX_UNPACK_TREES]; 782struct traverse_info newinfo; 783struct name_entry *p; 784int nr_entries; 785 786 nr_entries =all_trees_same_as_cache_tree(n, dirmask, names, info); 787if(nr_entries >0) { 788int pos =index_pos_by_traverse_info(names, info); 789 790if(!o->merge || df_conflicts) 791BUG("Wrong condition to get here buddy"); 792 793/* 794 * All entries up to 'pos' must have been processed 795 * (i.e. marked CE_UNPACKED) at this point. But to be safe, 796 * save and restore cache_bottom anyway to not miss 797 * unprocessed entries before 'pos'. 798 */ 799 bottom = o->cache_bottom; 800 ret =traverse_by_cache_tree(pos, nr_entries, n, names, info); 801 o->cache_bottom = bottom; 802return ret; 803} 804 805 p = names; 806while(!p->mode) 807 p++; 808 809 newinfo = *info; 810 newinfo.prev = info; 811 newinfo.pathspec = info->pathspec; 812 newinfo.name = *p; 813 newinfo.pathlen +=tree_entry_len(p) +1; 814 newinfo.df_conflicts |= df_conflicts; 815 816/* 817 * Fetch the tree from the ODB for each peer directory in the 818 * n commits. 819 * 820 * For 2- and 3-way traversals, we try to avoid hitting the 821 * ODB twice for the same OID. This should yield a nice speed 822 * up in checkouts and merges when the commits are similar. 823 * 824 * We don't bother doing the full O(n^2) search for larger n, 825 * because wider traversals don't happen that often and we 826 * avoid the search setup. 827 * 828 * When 2 peer OIDs are the same, we just copy the tree 829 * descriptor data. This implicitly borrows the buffer 830 * data from the earlier cell. 831 */ 832for(i =0; i < n; i++, dirmask >>=1) { 833if(i >0&&are_same_oid(&names[i], &names[i -1])) 834 t[i] = t[i -1]; 835else if(i >1&&are_same_oid(&names[i], &names[i -2])) 836 t[i] = t[i -2]; 837else{ 838const struct object_id *oid = NULL; 839if(dirmask &1) 840 oid = &names[i].oid; 841 buf[nr_buf++] =fill_tree_descriptor(t + i, oid); 842} 843} 844 845 bottom =switch_cache_bottom(&newinfo); 846 ret =traverse_trees(o->src_index, n, t, &newinfo); 847restore_cache_bottom(&newinfo, bottom); 848 849for(i =0; i < nr_buf; i++) 850free(buf[i]); 851 852return ret; 853} 854 855/* 856 * Compare the traverse-path to the cache entry without actually 857 * having to generate the textual representation of the traverse 858 * path. 859 * 860 * NOTE! This *only* compares up to the size of the traverse path 861 * itself - the caller needs to do the final check for the cache 862 * entry having more data at the end! 863 */ 864static intdo_compare_entry_piecewise(const struct cache_entry *ce,const struct traverse_info *info,const struct name_entry *n) 865{ 866int len, pathlen, ce_len; 867const char*ce_name; 868 869if(info->prev) { 870int cmp =do_compare_entry_piecewise(ce, info->prev, 871&info->name); 872if(cmp) 873return cmp; 874} 875 pathlen = info->pathlen; 876 ce_len =ce_namelen(ce); 877 878/* If ce_len < pathlen then we must have previously hit "name == directory" entry */ 879if(ce_len < pathlen) 880return-1; 881 882 ce_len -= pathlen; 883 ce_name = ce->name + pathlen; 884 885 len =tree_entry_len(n); 886returndf_name_compare(ce_name, ce_len, S_IFREG, n->path, len, n->mode); 887} 888 889static intdo_compare_entry(const struct cache_entry *ce, 890const struct traverse_info *info, 891const struct name_entry *n) 892{ 893int len, pathlen, ce_len; 894const char*ce_name; 895int cmp; 896 897/* 898 * If we have not precomputed the traverse path, it is quicker 899 * to avoid doing so. But if we have precomputed it, 900 * it is quicker to use the precomputed version. 901 */ 902if(!info->traverse_path) 903returndo_compare_entry_piecewise(ce, info, n); 904 905 cmp =strncmp(ce->name, info->traverse_path, info->pathlen); 906if(cmp) 907return cmp; 908 909 pathlen = info->pathlen; 910 ce_len =ce_namelen(ce); 911 912if(ce_len < pathlen) 913return-1; 914 915 ce_len -= pathlen; 916 ce_name = ce->name + pathlen; 917 918 len =tree_entry_len(n); 919returndf_name_compare(ce_name, ce_len, S_IFREG, n->path, len, n->mode); 920} 921 922static intcompare_entry(const struct cache_entry *ce,const struct traverse_info *info,const struct name_entry *n) 923{ 924int cmp =do_compare_entry(ce, info, n); 925if(cmp) 926return cmp; 927 928/* 929 * Even if the beginning compared identically, the ce should 930 * compare as bigger than a directory leading up to it! 931 */ 932returnce_namelen(ce) >traverse_path_len(info, n); 933} 934 935static intce_in_traverse_path(const struct cache_entry *ce, 936const struct traverse_info *info) 937{ 938if(!info->prev) 939return1; 940if(do_compare_entry(ce, info->prev, &info->name)) 941return0; 942/* 943 * If ce (blob) is the same name as the path (which is a tree 944 * we will be descending into), it won't be inside it. 945 */ 946return(info->pathlen <ce_namelen(ce)); 947} 948 949static struct cache_entry *create_ce_entry(const struct traverse_info *info, 950const struct name_entry *n, 951int stage, 952struct index_state *istate, 953int is_transient) 954{ 955int len =traverse_path_len(info, n); 956struct cache_entry *ce = 957 is_transient ? 958make_empty_transient_cache_entry(len) : 959make_empty_cache_entry(istate, len); 960 961 ce->ce_mode =create_ce_mode(n->mode); 962 ce->ce_flags =create_ce_flags(stage); 963 ce->ce_namelen = len; 964oidcpy(&ce->oid, &n->oid); 965make_traverse_path(ce->name, info, n); 966 967return ce; 968} 969 970/* 971 * Note that traverse_by_cache_tree() duplicates some logic in this function 972 * without actually calling it. If you change the logic here you may need to 973 * check and change there as well. 974 */ 975static intunpack_nondirectories(int n,unsigned long mask, 976unsigned long dirmask, 977struct cache_entry **src, 978const struct name_entry *names, 979const struct traverse_info *info) 980{ 981int i; 982struct unpack_trees_options *o = info->data; 983unsigned long conflicts = info->df_conflicts | dirmask; 984 985/* Do we have *only* directories? Nothing to do */ 986if(mask == dirmask && !src[0]) 987return0; 988 989/* 990 * Ok, we've filled in up to any potential index entry in src[0], 991 * now do the rest. 992 */ 993for(i =0; i < n; i++) { 994int stage; 995unsigned int bit =1ul<< i; 996if(conflicts & bit) { 997 src[i + o->merge] = o->df_conflict_entry; 998continue; 999}1000if(!(mask & bit))1001continue;1002if(!o->merge)1003 stage =0;1004else if(i +1< o->head_idx)1005 stage =1;1006else if(i +1> o->head_idx)1007 stage =3;1008else1009 stage =2;10101011/*1012 * If the merge bit is set, then the cache entries are1013 * discarded in the following block. In this case,1014 * construct "transient" cache_entries, as they are1015 * not stored in the index. otherwise construct the1016 * cache entry from the index aware logic.1017 */1018 src[i + o->merge] =create_ce_entry(info, names + i, stage, &o->result, o->merge);1019}10201021if(o->merge) {1022int rc =call_unpack_fn((const struct cache_entry *const*)src,1023 o);1024for(i =0; i < n; i++) {1025struct cache_entry *ce = src[i + o->merge];1026if(ce != o->df_conflict_entry)1027discard_cache_entry(ce);1028}1029return rc;1030}10311032for(i =0; i < n; i++)1033if(src[i] && src[i] != o->df_conflict_entry)1034if(do_add_entry(o, src[i],0,0))1035return-1;10361037return0;1038}10391040static intunpack_failed(struct unpack_trees_options *o,const char*message)1041{1042discard_index(&o->result);1043if(!o->gently && !o->exiting_early) {1044if(message)1045returnerror("%s", message);1046return-1;1047}1048return-1;1049}10501051/*1052 * The tree traversal is looking at name p. If we have a matching entry,1053 * return it. If name p is a directory in the index, do not return1054 * anything, as we will want to match it when the traversal descends into1055 * the directory.1056 */1057static intfind_cache_pos(struct traverse_info *info,1058const struct name_entry *p)1059{1060int pos;1061struct unpack_trees_options *o = info->data;1062struct index_state *index = o->src_index;1063int pfxlen = info->pathlen;1064int p_len =tree_entry_len(p);10651066for(pos = o->cache_bottom; pos < index->cache_nr; pos++) {1067const struct cache_entry *ce = index->cache[pos];1068const char*ce_name, *ce_slash;1069int cmp, ce_len;10701071if(ce->ce_flags & CE_UNPACKED) {1072/*1073 * cache_bottom entry is already unpacked, so1074 * we can never match it; don't check it1075 * again.1076 */1077if(pos == o->cache_bottom)1078++o->cache_bottom;1079continue;1080}1081if(!ce_in_traverse_path(ce, info)) {1082/*1083 * Check if we can skip future cache checks1084 * (because we're already past all possible1085 * entries in the traverse path).1086 */1087if(info->traverse_path) {1088if(strncmp(ce->name, info->traverse_path,1089 info->pathlen) >0)1090break;1091}1092continue;1093}1094 ce_name = ce->name + pfxlen;1095 ce_slash =strchr(ce_name,'/');1096if(ce_slash)1097 ce_len = ce_slash - ce_name;1098else1099 ce_len =ce_namelen(ce) - pfxlen;1100 cmp =name_compare(p->path, p_len, ce_name, ce_len);1101/*1102 * Exact match; if we have a directory we need to1103 * delay returning it.1104 */1105if(!cmp)1106return ce_slash ? -2- pos : pos;1107if(0< cmp)1108continue;/* keep looking */1109/*1110 * ce_name sorts after p->path; could it be that we1111 * have files under p->path directory in the index?1112 * E.g. ce_name == "t-i", and p->path == "t"; we may1113 * have "t/a" in the index.1114 */1115if(p_len < ce_len && !memcmp(ce_name, p->path, p_len) &&1116 ce_name[p_len] <'/')1117continue;/* keep looking */1118break;1119}1120return-1;1121}11221123static struct cache_entry *find_cache_entry(struct traverse_info *info,1124const struct name_entry *p)1125{1126int pos =find_cache_pos(info, p);1127struct unpack_trees_options *o = info->data;11281129if(0<= pos)1130return o->src_index->cache[pos];1131else1132return NULL;1133}11341135static voiddebug_path(struct traverse_info *info)1136{1137if(info->prev) {1138debug_path(info->prev);1139if(*info->prev->name.path)1140putchar('/');1141}1142printf("%s", info->name.path);1143}11441145static voiddebug_name_entry(int i,struct name_entry *n)1146{1147printf("ent#%d %06o%s\n", i,1148 n->path ? n->mode :0,1149 n->path ? n->path :"(missing)");1150}11511152static voiddebug_unpack_callback(int n,1153unsigned long mask,1154unsigned long dirmask,1155struct name_entry *names,1156struct traverse_info *info)1157{1158int i;1159printf("* unpack mask%lu, dirmask%lu, cnt%d",1160 mask, dirmask, n);1161debug_path(info);1162putchar('\n');1163for(i =0; i < n; i++)1164debug_name_entry(i, names + i);1165}11661167/*1168 * Note that traverse_by_cache_tree() duplicates some logic in this function1169 * without actually calling it. If you change the logic here you may need to1170 * check and change there as well.1171 */1172static intunpack_callback(int n,unsigned long mask,unsigned long dirmask,struct name_entry *names,struct traverse_info *info)1173{1174struct cache_entry *src[MAX_UNPACK_TREES +1] = { NULL, };1175struct unpack_trees_options *o = info->data;1176const struct name_entry *p = names;11771178/* Find first entry with a real name (we could use "mask" too) */1179while(!p->mode)1180 p++;11811182if(o->debug_unpack)1183debug_unpack_callback(n, mask, dirmask, names, info);11841185/* Are we supposed to look at the index too? */1186if(o->merge) {1187while(1) {1188int cmp;1189struct cache_entry *ce;11901191if(o->diff_index_cached)1192 ce =next_cache_entry(o);1193else1194 ce =find_cache_entry(info, p);11951196if(!ce)1197break;1198 cmp =compare_entry(ce, info, p);1199if(cmp <0) {1200if(unpack_index_entry(ce, o) <0)1201returnunpack_failed(o, NULL);1202continue;1203}1204if(!cmp) {1205if(ce_stage(ce)) {1206/*1207 * If we skip unmerged index1208 * entries, we'll skip this1209 * entry *and* the tree1210 * entries associated with it!1211 */1212if(o->skip_unmerged) {1213add_same_unmerged(ce, o);1214return mask;1215}1216}1217 src[0] = ce;1218}1219break;1220}1221}12221223if(unpack_nondirectories(n, mask, dirmask, src, names, info) <0)1224return-1;12251226if(o->merge && src[0]) {1227if(ce_stage(src[0]))1228mark_ce_used_same_name(src[0], o);1229else1230mark_ce_used(src[0], o);1231}12321233/* Now handle any directories.. */1234if(dirmask) {1235/* special case: "diff-index --cached" looking at a tree */1236if(o->diff_index_cached &&1237 n ==1&& dirmask ==1&&S_ISDIR(names->mode)) {1238int matches;1239 matches =cache_tree_matches_traversal(o->src_index->cache_tree,1240 names, info);1241/*1242 * Everything under the name matches; skip the1243 * entire hierarchy. diff_index_cached codepath1244 * special cases D/F conflicts in such a way that1245 * it does not do any look-ahead, so this is safe.1246 */1247if(matches) {1248 o->cache_bottom += matches;1249return mask;1250}1251}12521253if(traverse_trees_recursive(n, dirmask, mask & ~dirmask,1254 names, info) <0)1255return-1;1256return mask;1257}12581259return mask;1260}12611262static intclear_ce_flags_1(struct index_state *istate,1263struct cache_entry **cache,int nr,1264struct strbuf *prefix,1265int select_mask,int clear_mask,1266struct exclude_list *el,int defval);12671268/* Whole directory matching */1269static intclear_ce_flags_dir(struct index_state *istate,1270struct cache_entry **cache,int nr,1271struct strbuf *prefix,1272char*basename,1273int select_mask,int clear_mask,1274struct exclude_list *el,int defval)1275{1276struct cache_entry **cache_end;1277int dtype = DT_DIR;1278int ret =is_excluded_from_list(prefix->buf, prefix->len,1279 basename, &dtype, el, istate);1280int rc;12811282strbuf_addch(prefix,'/');12831284/* If undecided, use matching result of parent dir in defval */1285if(ret <0)1286 ret = defval;12871288for(cache_end = cache; cache_end != cache + nr; cache_end++) {1289struct cache_entry *ce = *cache_end;1290if(strncmp(ce->name, prefix->buf, prefix->len))1291break;1292}12931294/*1295 * TODO: check el, if there are no patterns that may conflict1296 * with ret (iow, we know in advance the incl/excl1297 * decision for the entire directory), clear flag here without1298 * calling clear_ce_flags_1(). That function will call1299 * the expensive is_excluded_from_list() on every entry.1300 */1301 rc =clear_ce_flags_1(istate, cache, cache_end - cache,1302 prefix,1303 select_mask, clear_mask,1304 el, ret);1305strbuf_setlen(prefix, prefix->len -1);1306return rc;1307}13081309/*1310 * Traverse the index, find every entry that matches according to1311 * o->el. Do "ce_flags &= ~clear_mask" on those entries. Return the1312 * number of traversed entries.1313 *1314 * If select_mask is non-zero, only entries whose ce_flags has on of1315 * those bits enabled are traversed.1316 *1317 * cache : pointer to an index entry1318 * prefix_len : an offset to its path1319 *1320 * The current path ("prefix") including the trailing '/' is1321 * cache[0]->name[0..(prefix_len-1)]1322 * Top level path has prefix_len zero.1323 */1324static intclear_ce_flags_1(struct index_state *istate,1325struct cache_entry **cache,int nr,1326struct strbuf *prefix,1327int select_mask,int clear_mask,1328struct exclude_list *el,int defval)1329{1330struct cache_entry **cache_end = cache + nr;13311332/*1333 * Process all entries that have the given prefix and meet1334 * select_mask condition1335 */1336while(cache != cache_end) {1337struct cache_entry *ce = *cache;1338const char*name, *slash;1339int len, dtype, ret;13401341if(select_mask && !(ce->ce_flags & select_mask)) {1342 cache++;1343continue;1344}13451346if(prefix->len &&strncmp(ce->name, prefix->buf, prefix->len))1347break;13481349 name = ce->name + prefix->len;1350 slash =strchr(name,'/');13511352/* If it's a directory, try whole directory match first */1353if(slash) {1354int processed;13551356 len = slash - name;1357strbuf_add(prefix, name, len);13581359 processed =clear_ce_flags_dir(istate, cache, cache_end - cache,1360 prefix,1361 prefix->buf + prefix->len - len,1362 select_mask, clear_mask,1363 el, defval);13641365/* clear_c_f_dir eats a whole dir already? */1366if(processed) {1367 cache += processed;1368strbuf_setlen(prefix, prefix->len - len);1369continue;1370}13711372strbuf_addch(prefix,'/');1373 cache +=clear_ce_flags_1(istate, cache, cache_end - cache,1374 prefix,1375 select_mask, clear_mask, el, defval);1376strbuf_setlen(prefix, prefix->len - len -1);1377continue;1378}13791380/* Non-directory */1381 dtype =ce_to_dtype(ce);1382 ret =is_excluded_from_list(ce->name,ce_namelen(ce),1383 name, &dtype, el, istate);1384if(ret <0)1385 ret = defval;1386if(ret >0)1387 ce->ce_flags &= ~clear_mask;1388 cache++;1389}1390return nr - (cache_end - cache);1391}13921393static intclear_ce_flags(struct index_state *istate,1394int select_mask,int clear_mask,1395struct exclude_list *el)1396{1397static struct strbuf prefix = STRBUF_INIT;13981399strbuf_reset(&prefix);14001401returnclear_ce_flags_1(istate,1402 istate->cache,1403 istate->cache_nr,1404&prefix,1405 select_mask, clear_mask,1406 el,0);1407}14081409/*1410 * Set/Clear CE_NEW_SKIP_WORKTREE according to $GIT_DIR/info/sparse-checkout1411 */1412static voidmark_new_skip_worktree(struct exclude_list *el,1413struct index_state *istate,1414int select_flag,int skip_wt_flag)1415{1416int i;14171418/*1419 * 1. Pretend the narrowest worktree: only unmerged entries1420 * are checked out1421 */1422for(i =0; i < istate->cache_nr; i++) {1423struct cache_entry *ce = istate->cache[i];14241425if(select_flag && !(ce->ce_flags & select_flag))1426continue;14271428if(!ce_stage(ce) && !(ce->ce_flags & CE_CONFLICTED))1429 ce->ce_flags |= skip_wt_flag;1430else1431 ce->ce_flags &= ~skip_wt_flag;1432}14331434/*1435 * 2. Widen worktree according to sparse-checkout file.1436 * Matched entries will have skip_wt_flag cleared (i.e. "in")1437 */1438clear_ce_flags(istate, select_flag, skip_wt_flag, el);1439}14401441static intverify_absent(const struct cache_entry *,1442enum unpack_trees_error_types,1443struct unpack_trees_options *);1444/*1445 * N-way merge "len" trees. Returns 0 on success, -1 on failure to manipulate the1446 * resulting index, -2 on failure to reflect the changes to the work tree.1447 *1448 * CE_ADDED, CE_UNPACKED and CE_NEW_SKIP_WORKTREE are used internally1449 */1450intunpack_trees(unsigned len,struct tree_desc *t,struct unpack_trees_options *o)1451{1452int i, ret;1453static struct cache_entry *dfc;1454struct exclude_list el;14551456if(len > MAX_UNPACK_TREES)1457die("unpack_trees takes at most%dtrees", MAX_UNPACK_TREES);14581459trace_performance_enter();1460memset(&el,0,sizeof(el));1461if(!core_apply_sparse_checkout || !o->update)1462 o->skip_sparse_checkout =1;1463if(!o->skip_sparse_checkout) {1464char*sparse =git_pathdup("info/sparse-checkout");1465if(add_excludes_from_file_to_list(sparse,"",0, &el, NULL) <0)1466 o->skip_sparse_checkout =1;1467else1468 o->el = ⪙1469free(sparse);1470}14711472memset(&o->result,0,sizeof(o->result));1473 o->result.initialized =1;1474 o->result.timestamp.sec = o->src_index->timestamp.sec;1475 o->result.timestamp.nsec = o->src_index->timestamp.nsec;1476 o->result.version = o->src_index->version;1477if(!o->src_index->split_index) {1478 o->result.split_index = NULL;1479}else if(o->src_index == o->dst_index) {1480/*1481 * o->dst_index (and thus o->src_index) will be discarded1482 * and overwritten with o->result at the end of this function,1483 * so just use src_index's split_index to avoid having to1484 * create a new one.1485 */1486 o->result.split_index = o->src_index->split_index;1487 o->result.split_index->refcount++;1488}else{1489 o->result.split_index =init_split_index(&o->result);1490}1491oidcpy(&o->result.oid, &o->src_index->oid);1492 o->merge_size = len;1493mark_all_ce_unused(o->src_index);14941495/*1496 * Sparse checkout loop #1: set NEW_SKIP_WORKTREE on existing entries1497 */1498if(!o->skip_sparse_checkout)1499mark_new_skip_worktree(o->el, o->src_index,0, CE_NEW_SKIP_WORKTREE);15001501if(!dfc)1502 dfc =xcalloc(1,cache_entry_size(0));1503 o->df_conflict_entry = dfc;15041505if(len) {1506const char*prefix = o->prefix ? o->prefix :"";1507struct traverse_info info;15081509setup_traverse_info(&info, prefix);1510 info.fn = unpack_callback;1511 info.data = o;1512 info.show_all_errors = o->show_all_errors;1513 info.pathspec = o->pathspec;15141515if(o->prefix) {1516/*1517 * Unpack existing index entries that sort before the1518 * prefix the tree is spliced into. Note that o->merge1519 * is always true in this case.1520 */1521while(1) {1522struct cache_entry *ce =next_cache_entry(o);1523if(!ce)1524break;1525if(ce_in_traverse_path(ce, &info))1526break;1527if(unpack_index_entry(ce, o) <0)1528goto return_failed;1529}1530}15311532trace_performance_enter();1533 ret =traverse_trees(o->src_index, len, t, &info);1534trace_performance_leave("traverse_trees");1535if(ret <0)1536goto return_failed;1537}15381539/* Any left-over entries in the index? */1540if(o->merge) {1541while(1) {1542struct cache_entry *ce =next_cache_entry(o);1543if(!ce)1544break;1545if(unpack_index_entry(ce, o) <0)1546goto return_failed;1547}1548}1549mark_all_ce_unused(o->src_index);15501551if(o->trivial_merges_only && o->nontrivial_merge) {1552 ret =unpack_failed(o,"Merge requires file-level merging");1553goto done;1554}15551556if(!o->skip_sparse_checkout) {1557int empty_worktree =1;15581559/*1560 * Sparse checkout loop #2: set NEW_SKIP_WORKTREE on entries not in loop #11561 * If the will have NEW_SKIP_WORKTREE, also set CE_SKIP_WORKTREE1562 * so apply_sparse_checkout() won't attempt to remove it from worktree1563 */1564mark_new_skip_worktree(o->el, &o->result, CE_ADDED, CE_SKIP_WORKTREE | CE_NEW_SKIP_WORKTREE);15651566 ret =0;1567for(i =0; i < o->result.cache_nr; i++) {1568struct cache_entry *ce = o->result.cache[i];15691570/*1571 * Entries marked with CE_ADDED in merged_entry() do not have1572 * verify_absent() check (the check is effectively disabled1573 * because CE_NEW_SKIP_WORKTREE is set unconditionally).1574 *1575 * Do the real check now because we have had1576 * correct CE_NEW_SKIP_WORKTREE1577 */1578if(ce->ce_flags & CE_ADDED &&1579verify_absent(ce, ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN, o)) {1580if(!o->show_all_errors)1581goto return_failed;1582 ret = -1;1583}15841585if(apply_sparse_checkout(&o->result, ce, o)) {1586if(!o->show_all_errors)1587goto return_failed;1588 ret = -1;1589}1590if(!ce_skip_worktree(ce))1591 empty_worktree =0;15921593}1594if(ret <0)1595goto return_failed;1596/*1597 * Sparse checkout is meant to narrow down checkout area1598 * but it does not make sense to narrow down to empty working1599 * tree. This is usually a mistake in sparse checkout rules.1600 * Do not allow users to do that.1601 */1602if(o->result.cache_nr && empty_worktree) {1603 ret =unpack_failed(o,"Sparse checkout leaves no entry on working directory");1604goto done;1605}1606}16071608 ret =check_updates(o) ? (-2) :0;1609if(o->dst_index) {1610move_index_extensions(&o->result, o->src_index);1611if(!ret) {1612if(git_env_bool("GIT_TEST_CHECK_CACHE_TREE",0))1613cache_tree_verify(the_repository, &o->result);1614if(!o->result.cache_tree)1615 o->result.cache_tree =cache_tree();1616if(!cache_tree_fully_valid(o->result.cache_tree))1617cache_tree_update(&o->result,1618 WRITE_TREE_SILENT |1619 WRITE_TREE_REPAIR);1620}1621discard_index(o->dst_index);1622*o->dst_index = o->result;1623}else{1624discard_index(&o->result);1625}1626 o->src_index = NULL;16271628done:1629trace_performance_leave("unpack_trees");1630clear_exclude_list(&el);1631return ret;16321633return_failed:1634if(o->show_all_errors)1635display_error_msgs(o);1636mark_all_ce_unused(o->src_index);1637 ret =unpack_failed(o, NULL);1638if(o->exiting_early)1639 ret =0;1640goto done;1641}16421643/* Here come the merge functions */16441645static intreject_merge(const struct cache_entry *ce,1646struct unpack_trees_options *o)1647{1648return o->gently ? -1:1649add_rejected_path(o, ERROR_WOULD_OVERWRITE, ce->name);1650}16511652static intsame(const struct cache_entry *a,const struct cache_entry *b)1653{1654if(!!a != !!b)1655return0;1656if(!a && !b)1657return1;1658if((a->ce_flags | b->ce_flags) & CE_CONFLICTED)1659return0;1660return a->ce_mode == b->ce_mode &&1661oideq(&a->oid, &b->oid);1662}166316641665/*1666 * When a CE gets turned into an unmerged entry, we1667 * want it to be up-to-date1668 */1669static intverify_uptodate_1(const struct cache_entry *ce,1670struct unpack_trees_options *o,1671enum unpack_trees_error_types error_type)1672{1673struct stat st;16741675if(o->index_only)1676return0;16771678/*1679 * CE_VALID and CE_SKIP_WORKTREE cheat, we better check again1680 * if this entry is truly up-to-date because this file may be1681 * overwritten.1682 */1683if((ce->ce_flags & CE_VALID) ||ce_skip_worktree(ce))1684;/* keep checking */1685else if(o->reset ||ce_uptodate(ce))1686return0;16871688if(!lstat(ce->name, &st)) {1689int flags = CE_MATCH_IGNORE_VALID|CE_MATCH_IGNORE_SKIP_WORKTREE;1690unsigned changed =ie_match_stat(o->src_index, ce, &st, flags);16911692if(submodule_from_ce(ce)) {1693int r =check_submodule_move_head(ce,1694"HEAD",oid_to_hex(&ce->oid), o);1695if(r)1696return o->gently ? -1:1697add_rejected_path(o, error_type, ce->name);1698return0;1699}17001701if(!changed)1702return0;1703/*1704 * Historic default policy was to allow submodule to be out1705 * of sync wrt the superproject index. If the submodule was1706 * not considered interesting above, we don't care here.1707 */1708if(S_ISGITLINK(ce->ce_mode))1709return0;17101711 errno =0;1712}1713if(errno == ENOENT)1714return0;1715return o->gently ? -1:1716add_rejected_path(o, error_type, ce->name);1717}17181719intverify_uptodate(const struct cache_entry *ce,1720struct unpack_trees_options *o)1721{1722if(!o->skip_sparse_checkout && (ce->ce_flags & CE_NEW_SKIP_WORKTREE))1723return0;1724returnverify_uptodate_1(ce, o, ERROR_NOT_UPTODATE_FILE);1725}17261727static intverify_uptodate_sparse(const struct cache_entry *ce,1728struct unpack_trees_options *o)1729{1730returnverify_uptodate_1(ce, o, ERROR_SPARSE_NOT_UPTODATE_FILE);1731}17321733/*1734 * TODO: We should actually invalidate o->result, not src_index [1].1735 * But since cache tree and untracked cache both are not copied to1736 * o->result until unpacking is complete, we invalidate them on1737 * src_index instead with the assumption that they will be copied to1738 * dst_index at the end.1739 *1740 * [1] src_index->cache_tree is also used in unpack_callback() so if1741 * we invalidate o->result, we need to update it to use1742 * o->result.cache_tree as well.1743 */1744static voidinvalidate_ce_path(const struct cache_entry *ce,1745struct unpack_trees_options *o)1746{1747if(!ce)1748return;1749cache_tree_invalidate_path(o->src_index, ce->name);1750untracked_cache_invalidate_path(o->src_index, ce->name,1);1751}17521753/*1754 * Check that checking out ce->sha1 in subdir ce->name is not1755 * going to overwrite any working files.1756 *1757 * Currently, git does not checkout subprojects during a superproject1758 * checkout, so it is not going to overwrite anything.1759 */1760static intverify_clean_submodule(const char*old_sha1,1761const struct cache_entry *ce,1762enum unpack_trees_error_types error_type,1763struct unpack_trees_options *o)1764{1765if(!submodule_from_ce(ce))1766return0;17671768returncheck_submodule_move_head(ce, old_sha1,1769oid_to_hex(&ce->oid), o);1770}17711772static intverify_clean_subdirectory(const struct cache_entry *ce,1773enum unpack_trees_error_types error_type,1774struct unpack_trees_options *o)1775{1776/*1777 * we are about to extract "ce->name"; we would not want to lose1778 * anything in the existing directory there.1779 */1780int namelen;1781int i;1782struct dir_struct d;1783char*pathbuf;1784int cnt =0;17851786if(S_ISGITLINK(ce->ce_mode)) {1787struct object_id oid;1788int sub_head =resolve_gitlink_ref(ce->name,"HEAD", &oid);1789/*1790 * If we are not going to update the submodule, then1791 * we don't care.1792 */1793if(!sub_head &&oideq(&oid, &ce->oid))1794return0;1795returnverify_clean_submodule(sub_head ? NULL :oid_to_hex(&oid),1796 ce, error_type, o);1797}17981799/*1800 * First let's make sure we do not have a local modification1801 * in that directory.1802 */1803 namelen =ce_namelen(ce);1804for(i =locate_in_src_index(ce, o);1805 i < o->src_index->cache_nr;1806 i++) {1807struct cache_entry *ce2 = o->src_index->cache[i];1808int len =ce_namelen(ce2);1809if(len < namelen ||1810strncmp(ce->name, ce2->name, namelen) ||1811 ce2->name[namelen] !='/')1812break;1813/*1814 * ce2->name is an entry in the subdirectory to be1815 * removed.1816 */1817if(!ce_stage(ce2)) {1818if(verify_uptodate(ce2, o))1819return-1;1820add_entry(o, ce2, CE_REMOVE,0);1821invalidate_ce_path(ce, o);1822mark_ce_used(ce2, o);1823}1824 cnt++;1825}18261827/*1828 * Then we need to make sure that we do not lose a locally1829 * present file that is not ignored.1830 */1831 pathbuf =xstrfmt("%.*s/", namelen, ce->name);18321833memset(&d,0,sizeof(d));1834if(o->dir)1835 d.exclude_per_dir = o->dir->exclude_per_dir;1836 i =read_directory(&d, o->src_index, pathbuf, namelen+1, NULL);1837if(i)1838return o->gently ? -1:1839add_rejected_path(o, ERROR_NOT_UPTODATE_DIR, ce->name);1840free(pathbuf);1841return cnt;1842}18431844/*1845 * This gets called when there was no index entry for the tree entry 'dst',1846 * but we found a file in the working tree that 'lstat()' said was fine,1847 * and we're on a case-insensitive filesystem.1848 *1849 * See if we can find a case-insensitive match in the index that also1850 * matches the stat information, and assume it's that other file!1851 */1852static inticase_exists(struct unpack_trees_options *o,const char*name,int len,struct stat *st)1853{1854const struct cache_entry *src;18551856 src =index_file_exists(o->src_index, name, len,1);1857return src && !ie_match_stat(o->src_index, src, st, CE_MATCH_IGNORE_VALID|CE_MATCH_IGNORE_SKIP_WORKTREE);1858}18591860static intcheck_ok_to_remove(const char*name,int len,int dtype,1861const struct cache_entry *ce,struct stat *st,1862enum unpack_trees_error_types error_type,1863struct unpack_trees_options *o)1864{1865const struct cache_entry *result;18661867/*1868 * It may be that the 'lstat()' succeeded even though1869 * target 'ce' was absent, because there is an old1870 * entry that is different only in case..1871 *1872 * Ignore that lstat() if it matches.1873 */1874if(ignore_case &&icase_exists(o, name, len, st))1875return0;18761877if(o->dir &&1878is_excluded(o->dir, o->src_index, name, &dtype))1879/*1880 * ce->name is explicitly excluded, so it is Ok to1881 * overwrite it.1882 */1883return0;1884if(S_ISDIR(st->st_mode)) {1885/*1886 * We are checking out path "foo" and1887 * found "foo/." in the working tree.1888 * This is tricky -- if we have modified1889 * files that are in "foo/" we would lose1890 * them.1891 */1892if(verify_clean_subdirectory(ce, error_type, o) <0)1893return-1;1894return0;1895}18961897/*1898 * The previous round may already have decided to1899 * delete this path, which is in a subdirectory that1900 * is being replaced with a blob.1901 */1902 result =index_file_exists(&o->result, name, len,0);1903if(result) {1904if(result->ce_flags & CE_REMOVE)1905return0;1906}19071908return o->gently ? -1:1909add_rejected_path(o, error_type, name);1910}19111912/*1913 * We do not want to remove or overwrite a working tree file that1914 * is not tracked, unless it is ignored.1915 */1916static intverify_absent_1(const struct cache_entry *ce,1917enum unpack_trees_error_types error_type,1918struct unpack_trees_options *o)1919{1920int len;1921struct stat st;19221923if(o->index_only || o->reset || !o->update)1924return0;19251926 len =check_leading_path(ce->name,ce_namelen(ce));1927if(!len)1928return0;1929else if(len >0) {1930char*path;1931int ret;19321933 path =xmemdupz(ce->name, len);1934if(lstat(path, &st))1935 ret =error_errno("cannot stat '%s'", path);1936else{1937if(submodule_from_ce(ce))1938 ret =check_submodule_move_head(ce,1939oid_to_hex(&ce->oid),1940 NULL, o);1941else1942 ret =check_ok_to_remove(path, len, DT_UNKNOWN, NULL,1943&st, error_type, o);1944}1945free(path);1946return ret;1947}else if(lstat(ce->name, &st)) {1948if(errno != ENOENT)1949returnerror_errno("cannot stat '%s'", ce->name);1950return0;1951}else{1952if(submodule_from_ce(ce))1953returncheck_submodule_move_head(ce,oid_to_hex(&ce->oid),1954 NULL, o);19551956returncheck_ok_to_remove(ce->name,ce_namelen(ce),1957ce_to_dtype(ce), ce, &st,1958 error_type, o);1959}1960}19611962static intverify_absent(const struct cache_entry *ce,1963enum unpack_trees_error_types error_type,1964struct unpack_trees_options *o)1965{1966if(!o->skip_sparse_checkout && (ce->ce_flags & CE_NEW_SKIP_WORKTREE))1967return0;1968returnverify_absent_1(ce, error_type, o);1969}19701971static intverify_absent_sparse(const struct cache_entry *ce,1972enum unpack_trees_error_types error_type,1973struct unpack_trees_options *o)1974{1975enum unpack_trees_error_types orphaned_error = error_type;1976if(orphaned_error == ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN)1977 orphaned_error = ERROR_WOULD_LOSE_ORPHANED_OVERWRITTEN;19781979returnverify_absent_1(ce, orphaned_error, o);1980}19811982static intmerged_entry(const struct cache_entry *ce,1983const struct cache_entry *old,1984struct unpack_trees_options *o)1985{1986int update = CE_UPDATE;1987struct cache_entry *merge =dup_cache_entry(ce, &o->result);19881989if(!old) {1990/*1991 * New index entries. In sparse checkout, the following1992 * verify_absent() will be delayed until after1993 * traverse_trees() finishes in unpack_trees(), then:1994 *1995 * - CE_NEW_SKIP_WORKTREE will be computed correctly1996 * - verify_absent() be called again, this time with1997 * correct CE_NEW_SKIP_WORKTREE1998 *1999 * verify_absent() call here does nothing in sparse2000 * checkout (i.e. o->skip_sparse_checkout == 0)2001 */2002 update |= CE_ADDED;2003 merge->ce_flags |= CE_NEW_SKIP_WORKTREE;20042005if(verify_absent(merge,2006 ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN, o)) {2007discard_cache_entry(merge);2008return-1;2009}2010invalidate_ce_path(merge, o);20112012if(submodule_from_ce(ce)) {2013int ret =check_submodule_move_head(ce, NULL,2014oid_to_hex(&ce->oid),2015 o);2016if(ret)2017return ret;2018}20192020}else if(!(old->ce_flags & CE_CONFLICTED)) {2021/*2022 * See if we can re-use the old CE directly?2023 * That way we get the uptodate stat info.2024 *2025 * This also removes the UPDATE flag on a match; otherwise2026 * we will end up overwriting local changes in the work tree.2027 */2028if(same(old, merge)) {2029copy_cache_entry(merge, old);2030 update =0;2031}else{2032if(verify_uptodate(old, o)) {2033discard_cache_entry(merge);2034return-1;2035}2036/* Migrate old flags over */2037 update |= old->ce_flags & (CE_SKIP_WORKTREE | CE_NEW_SKIP_WORKTREE);2038invalidate_ce_path(old, o);2039}20402041if(submodule_from_ce(ce)) {2042int ret =check_submodule_move_head(ce,oid_to_hex(&old->oid),2043oid_to_hex(&ce->oid),2044 o);2045if(ret)2046return ret;2047}2048}else{2049/*2050 * Previously unmerged entry left as an existence2051 * marker by read_index_unmerged();2052 */2053invalidate_ce_path(old, o);2054}20552056do_add_entry(o, merge, update, CE_STAGEMASK);2057return1;2058}20592060static intdeleted_entry(const struct cache_entry *ce,2061const struct cache_entry *old,2062struct unpack_trees_options *o)2063{2064/* Did it exist in the index? */2065if(!old) {2066if(verify_absent(ce, ERROR_WOULD_LOSE_UNTRACKED_REMOVED, o))2067return-1;2068return0;2069}2070if(!(old->ce_flags & CE_CONFLICTED) &&verify_uptodate(old, o))2071return-1;2072add_entry(o, ce, CE_REMOVE,0);2073invalidate_ce_path(ce, o);2074return1;2075}20762077static intkeep_entry(const struct cache_entry *ce,2078struct unpack_trees_options *o)2079{2080add_entry(o, ce,0,0);2081if(ce_stage(ce))2082invalidate_ce_path(ce, o);2083return1;2084}20852086#if DBRT_DEBUG2087static voidshow_stage_entry(FILE*o,2088const char*label,const struct cache_entry *ce)2089{2090if(!ce)2091fprintf(o,"%s(missing)\n", label);2092else2093fprintf(o,"%s%06o%s %d\t%s\n",2094 label,2095 ce->ce_mode,2096oid_to_hex(&ce->oid),2097ce_stage(ce),2098 ce->name);2099}2100#endif21012102intthreeway_merge(const struct cache_entry *const*stages,2103struct unpack_trees_options *o)2104{2105const struct cache_entry *index;2106const struct cache_entry *head;2107const struct cache_entry *remote = stages[o->head_idx +1];2108int count;2109int head_match =0;2110int remote_match =0;21112112int df_conflict_head =0;2113int df_conflict_remote =0;21142115int any_anc_missing =0;2116int no_anc_exists =1;2117int i;21182119for(i =1; i < o->head_idx; i++) {2120if(!stages[i] || stages[i] == o->df_conflict_entry)2121 any_anc_missing =1;2122else2123 no_anc_exists =0;2124}21252126 index = stages[0];2127 head = stages[o->head_idx];21282129if(head == o->df_conflict_entry) {2130 df_conflict_head =1;2131 head = NULL;2132}21332134if(remote == o->df_conflict_entry) {2135 df_conflict_remote =1;2136 remote = NULL;2137}21382139/*2140 * First, if there's a #16 situation, note that to prevent #132141 * and #14.2142 */2143if(!same(remote, head)) {2144for(i =1; i < o->head_idx; i++) {2145if(same(stages[i], head)) {2146 head_match = i;2147}2148if(same(stages[i], remote)) {2149 remote_match = i;2150}2151}2152}21532154/*2155 * We start with cases where the index is allowed to match2156 * something other than the head: #14(ALT) and #2ALT, where it2157 * is permitted to match the result instead.2158 */2159/* #14, #14ALT, #2ALT */2160if(remote && !df_conflict_head && head_match && !remote_match) {2161if(index && !same(index, remote) && !same(index, head))2162returnreject_merge(index, o);2163returnmerged_entry(remote, index, o);2164}2165/*2166 * If we have an entry in the index cache, then we want to2167 * make sure that it matches head.2168 */2169if(index && !same(index, head))2170returnreject_merge(index, o);21712172if(head) {2173/* #5ALT, #15 */2174if(same(head, remote))2175returnmerged_entry(head, index, o);2176/* #13, #3ALT */2177if(!df_conflict_remote && remote_match && !head_match)2178returnmerged_entry(head, index, o);2179}21802181/* #1 */2182if(!head && !remote && any_anc_missing)2183return0;21842185/*2186 * Under the "aggressive" rule, we resolve mostly trivial2187 * cases that we historically had git-merge-one-file resolve.2188 */2189if(o->aggressive) {2190int head_deleted = !head;2191int remote_deleted = !remote;2192const struct cache_entry *ce = NULL;21932194if(index)2195 ce = index;2196else if(head)2197 ce = head;2198else if(remote)2199 ce = remote;2200else{2201for(i =1; i < o->head_idx; i++) {2202if(stages[i] && stages[i] != o->df_conflict_entry) {2203 ce = stages[i];2204break;2205}2206}2207}22082209/*2210 * Deleted in both.2211 * Deleted in one and unchanged in the other.2212 */2213if((head_deleted && remote_deleted) ||2214(head_deleted && remote && remote_match) ||2215(remote_deleted && head && head_match)) {2216if(index)2217returndeleted_entry(index, index, o);2218if(ce && !head_deleted) {2219if(verify_absent(ce, ERROR_WOULD_LOSE_UNTRACKED_REMOVED, o))2220return-1;2221}2222return0;2223}2224/*2225 * Added in both, identically.2226 */2227if(no_anc_exists && head && remote &&same(head, remote))2228returnmerged_entry(head, index, o);22292230}22312232/* Below are "no merge" cases, which require that the index be2233 * up-to-date to avoid the files getting overwritten with2234 * conflict resolution files.2235 */2236if(index) {2237if(verify_uptodate(index, o))2238return-1;2239}22402241 o->nontrivial_merge =1;22422243/* #2, #3, #4, #6, #7, #9, #10, #11. */2244 count =0;2245if(!head_match || !remote_match) {2246for(i =1; i < o->head_idx; i++) {2247if(stages[i] && stages[i] != o->df_conflict_entry) {2248keep_entry(stages[i], o);2249 count++;2250break;2251}2252}2253}2254#if DBRT_DEBUG2255else{2256fprintf(stderr,"read-tree: warning #16 detected\n");2257show_stage_entry(stderr,"head ", stages[head_match]);2258show_stage_entry(stderr,"remote ", stages[remote_match]);2259}2260#endif2261if(head) { count +=keep_entry(head, o); }2262if(remote) { count +=keep_entry(remote, o); }2263return count;2264}22652266/*2267 * Two-way merge.2268 *2269 * The rule is to "carry forward" what is in the index without losing2270 * information across a "fast-forward", favoring a successful merge2271 * over a merge failure when it makes sense. For details of the2272 * "carry forward" rule, please see <Documentation/git-read-tree.txt>.2273 *2274 */2275inttwoway_merge(const struct cache_entry *const*src,2276struct unpack_trees_options *o)2277{2278const struct cache_entry *current = src[0];2279const struct cache_entry *oldtree = src[1];2280const struct cache_entry *newtree = src[2];22812282if(o->merge_size !=2)2283returnerror("Cannot do a twoway merge of%dtrees",2284 o->merge_size);22852286if(oldtree == o->df_conflict_entry)2287 oldtree = NULL;2288if(newtree == o->df_conflict_entry)2289 newtree = NULL;22902291if(current) {2292if(current->ce_flags & CE_CONFLICTED) {2293if(same(oldtree, newtree) || o->reset) {2294if(!newtree)2295returndeleted_entry(current, current, o);2296else2297returnmerged_entry(newtree, current, o);2298}2299returnreject_merge(current, o);2300}else if((!oldtree && !newtree) ||/* 4 and 5 */2301(!oldtree && newtree &&2302same(current, newtree)) ||/* 6 and 7 */2303(oldtree && newtree &&2304same(oldtree, newtree)) ||/* 14 and 15 */2305(oldtree && newtree &&2306!same(oldtree, newtree) &&/* 18 and 19 */2307same(current, newtree))) {2308returnkeep_entry(current, o);2309}else if(oldtree && !newtree &&same(current, oldtree)) {2310/* 10 or 11 */2311returndeleted_entry(oldtree, current, o);2312}else if(oldtree && newtree &&2313same(current, oldtree) && !same(current, newtree)) {2314/* 20 or 21 */2315returnmerged_entry(newtree, current, o);2316}else2317returnreject_merge(current, o);2318}2319else if(newtree) {2320if(oldtree && !o->initial_checkout) {2321/*2322 * deletion of the path was staged;2323 */2324if(same(oldtree, newtree))2325return1;2326returnreject_merge(oldtree, o);2327}2328returnmerged_entry(newtree, current, o);2329}2330returndeleted_entry(oldtree, current, o);2331}23322333/*2334 * Bind merge.2335 *2336 * Keep the index entries at stage0, collapse stage1 but make sure2337 * stage0 does not have anything there.2338 */2339intbind_merge(const struct cache_entry *const*src,2340struct unpack_trees_options *o)2341{2342const struct cache_entry *old = src[0];2343const struct cache_entry *a = src[1];23442345if(o->merge_size !=1)2346returnerror("Cannot do a bind merge of%dtrees",2347 o->merge_size);2348if(a && old)2349return o->gently ? -1:2350error(ERRORMSG(o, ERROR_BIND_OVERLAP),2351super_prefixed(a->name),2352super_prefixed(old->name));2353if(!a)2354returnkeep_entry(old, o);2355else2356returnmerged_entry(a, NULL, o);2357}23582359/*2360 * One-way merge.2361 *2362 * The rule is:2363 * - take the stat information from stage0, take the data from stage12364 */2365intoneway_merge(const struct cache_entry *const*src,2366struct unpack_trees_options *o)2367{2368const struct cache_entry *old = src[0];2369const struct cache_entry *a = src[1];23702371if(o->merge_size !=1)2372returnerror("Cannot do a oneway merge of%dtrees",2373 o->merge_size);23742375if(!a || a == o->df_conflict_entry)2376returndeleted_entry(old, old, o);23772378if(old &&same(old, a)) {2379int update =0;2380if(o->reset && o->update && !ce_uptodate(old) && !ce_skip_worktree(old)) {2381struct stat st;2382if(lstat(old->name, &st) ||2383ie_match_stat(o->src_index, old, &st, CE_MATCH_IGNORE_VALID|CE_MATCH_IGNORE_SKIP_WORKTREE))2384 update |= CE_UPDATE;2385}2386if(o->update &&S_ISGITLINK(old->ce_mode) &&2387should_update_submodules() && !verify_uptodate(old, o))2388 update |= CE_UPDATE;2389add_entry(o, old, update, CE_STAGEMASK);2390return0;2391}2392returnmerged_entry(a, old, o);2393}