1#include"cache.h" 2#include"argv-array.h" 3#include"repository.h" 4#include"config.h" 5#include"dir.h" 6#include"tree.h" 7#include"tree-walk.h" 8#include"cache-tree.h" 9#include"unpack-trees.h" 10#include"progress.h" 11#include"refs.h" 12#include"attr.h" 13#include"split-index.h" 14#include"dir.h" 15#include"submodule.h" 16#include"submodule-config.h" 17#include"fsmonitor.h" 18#include"object-store.h" 19#include"promisor-remote.h" 20 21/* 22 * Error messages expected by scripts out of plumbing commands such as 23 * read-tree. Non-scripted Porcelain is not required to use these messages 24 * and in fact are encouraged to reword them to better suit their particular 25 * situation better. See how "git checkout" and "git merge" replaces 26 * them using setup_unpack_trees_porcelain(), for example. 27 */ 28static const char*unpack_plumbing_errors[NB_UNPACK_TREES_ERROR_TYPES] = { 29/* ERROR_WOULD_OVERWRITE */ 30"Entry '%s' would be overwritten by merge. Cannot merge.", 31 32/* ERROR_NOT_UPTODATE_FILE */ 33"Entry '%s' not uptodate. Cannot merge.", 34 35/* ERROR_NOT_UPTODATE_DIR */ 36"Updating '%s' would lose untracked files in it", 37 38/* ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN */ 39"Untracked working tree file '%s' would be overwritten by merge.", 40 41/* ERROR_WOULD_LOSE_UNTRACKED_REMOVED */ 42"Untracked working tree file '%s' would be removed by merge.", 43 44/* ERROR_BIND_OVERLAP */ 45"Entry '%s' overlaps with '%s'. Cannot bind.", 46 47/* ERROR_SPARSE_NOT_UPTODATE_FILE */ 48"Entry '%s' not uptodate. Cannot update sparse checkout.", 49 50/* ERROR_WOULD_LOSE_ORPHANED_OVERWRITTEN */ 51"Working tree file '%s' would be overwritten by sparse checkout update.", 52 53/* ERROR_WOULD_LOSE_ORPHANED_REMOVED */ 54"Working tree file '%s' would be removed by sparse checkout update.", 55 56/* ERROR_WOULD_LOSE_SUBMODULE */ 57"Submodule '%s' cannot checkout new HEAD.", 58}; 59 60#define ERRORMSG(o,type) \ 61 ( ((o) && (o)->msgs[(type)]) \ 62 ? ((o)->msgs[(type)]) \ 63 : (unpack_plumbing_errors[(type)]) ) 64 65static const char*super_prefixed(const char*path) 66{ 67/* 68 * It is necessary and sufficient to have two static buffers 69 * here, as the return value of this function is fed to 70 * error() using the unpack_*_errors[] templates we see above. 71 */ 72static struct strbuf buf[2] = {STRBUF_INIT, STRBUF_INIT}; 73static int super_prefix_len = -1; 74static unsigned idx =ARRAY_SIZE(buf) -1; 75 76if(super_prefix_len <0) { 77const char*super_prefix =get_super_prefix(); 78if(!super_prefix) { 79 super_prefix_len =0; 80}else{ 81int i; 82for(i =0; i <ARRAY_SIZE(buf); i++) 83strbuf_addstr(&buf[i], super_prefix); 84 super_prefix_len = buf[0].len; 85} 86} 87 88if(!super_prefix_len) 89return path; 90 91if(++idx >=ARRAY_SIZE(buf)) 92 idx =0; 93 94strbuf_setlen(&buf[idx], super_prefix_len); 95strbuf_addstr(&buf[idx], path); 96 97return buf[idx].buf; 98} 99 100voidsetup_unpack_trees_porcelain(struct unpack_trees_options *opts, 101const char*cmd) 102{ 103int i; 104const char**msgs = opts->msgs; 105const char*msg; 106 107argv_array_init(&opts->msgs_to_free); 108 109if(!strcmp(cmd,"checkout")) 110 msg = advice_commit_before_merge 111?_("Your local changes to the following files would be overwritten by checkout:\n%%s" 112"Please commit your changes or stash them before you switch branches.") 113:_("Your local changes to the following files would be overwritten by checkout:\n%%s"); 114else if(!strcmp(cmd,"merge")) 115 msg = advice_commit_before_merge 116?_("Your local changes to the following files would be overwritten by merge:\n%%s" 117"Please commit your changes or stash them before you merge.") 118:_("Your local changes to the following files would be overwritten by merge:\n%%s"); 119else 120 msg = advice_commit_before_merge 121?_("Your local changes to the following files would be overwritten by%s:\n%%s" 122"Please commit your changes or stash them before you%s.") 123:_("Your local changes to the following files would be overwritten by%s:\n%%s"); 124 msgs[ERROR_WOULD_OVERWRITE] = msgs[ERROR_NOT_UPTODATE_FILE] = 125argv_array_pushf(&opts->msgs_to_free, msg, cmd, cmd); 126 127 msgs[ERROR_NOT_UPTODATE_DIR] = 128_("Updating the following directories would lose untracked files in them:\n%s"); 129 130if(!strcmp(cmd,"checkout")) 131 msg = advice_commit_before_merge 132?_("The following untracked working tree files would be removed by checkout:\n%%s" 133"Please move or remove them before you switch branches.") 134:_("The following untracked working tree files would be removed by checkout:\n%%s"); 135else if(!strcmp(cmd,"merge")) 136 msg = advice_commit_before_merge 137?_("The following untracked working tree files would be removed by merge:\n%%s" 138"Please move or remove them before you merge.") 139:_("The following untracked working tree files would be removed by merge:\n%%s"); 140else 141 msg = advice_commit_before_merge 142?_("The following untracked working tree files would be removed by%s:\n%%s" 143"Please move or remove them before you%s.") 144:_("The following untracked working tree files would be removed by%s:\n%%s"); 145 msgs[ERROR_WOULD_LOSE_UNTRACKED_REMOVED] = 146argv_array_pushf(&opts->msgs_to_free, msg, cmd, cmd); 147 148if(!strcmp(cmd,"checkout")) 149 msg = advice_commit_before_merge 150?_("The following untracked working tree files would be overwritten by checkout:\n%%s" 151"Please move or remove them before you switch branches.") 152:_("The following untracked working tree files would be overwritten by checkout:\n%%s"); 153else if(!strcmp(cmd,"merge")) 154 msg = advice_commit_before_merge 155?_("The following untracked working tree files would be overwritten by merge:\n%%s" 156"Please move or remove them before you merge.") 157:_("The following untracked working tree files would be overwritten by merge:\n%%s"); 158else 159 msg = advice_commit_before_merge 160?_("The following untracked working tree files would be overwritten by%s:\n%%s" 161"Please move or remove them before you%s.") 162:_("The following untracked working tree files would be overwritten by%s:\n%%s"); 163 msgs[ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN] = 164argv_array_pushf(&opts->msgs_to_free, msg, cmd, cmd); 165 166/* 167 * Special case: ERROR_BIND_OVERLAP refers to a pair of paths, we 168 * cannot easily display it as a list. 169 */ 170 msgs[ERROR_BIND_OVERLAP] =_("Entry '%s' overlaps with '%s'. Cannot bind."); 171 172 msgs[ERROR_SPARSE_NOT_UPTODATE_FILE] = 173_("Cannot update sparse checkout: the following entries are not up to date:\n%s"); 174 msgs[ERROR_WOULD_LOSE_ORPHANED_OVERWRITTEN] = 175_("The following working tree files would be overwritten by sparse checkout update:\n%s"); 176 msgs[ERROR_WOULD_LOSE_ORPHANED_REMOVED] = 177_("The following working tree files would be removed by sparse checkout update:\n%s"); 178 msgs[ERROR_WOULD_LOSE_SUBMODULE] = 179_("Cannot update submodule:\n%s"); 180 181 opts->show_all_errors =1; 182/* rejected paths may not have a static buffer */ 183for(i =0; i <ARRAY_SIZE(opts->unpack_rejects); i++) 184 opts->unpack_rejects[i].strdup_strings =1; 185} 186 187voidclear_unpack_trees_porcelain(struct unpack_trees_options *opts) 188{ 189argv_array_clear(&opts->msgs_to_free); 190memset(opts->msgs,0,sizeof(opts->msgs)); 191} 192 193static intdo_add_entry(struct unpack_trees_options *o,struct cache_entry *ce, 194unsigned int set,unsigned int clear) 195{ 196 clear |= CE_HASHED; 197 198if(set & CE_REMOVE) 199 set |= CE_WT_REMOVE; 200 201 ce->ce_flags = (ce->ce_flags & ~clear) | set; 202returnadd_index_entry(&o->result, ce, 203 ADD_CACHE_OK_TO_ADD | ADD_CACHE_OK_TO_REPLACE); 204} 205 206static voidadd_entry(struct unpack_trees_options *o, 207const struct cache_entry *ce, 208unsigned int set,unsigned int clear) 209{ 210do_add_entry(o,dup_cache_entry(ce, &o->result), set, clear); 211} 212 213/* 214 * add error messages on path <path> 215 * corresponding to the type <e> with the message <msg> 216 * indicating if it should be display in porcelain or not 217 */ 218static intadd_rejected_path(struct unpack_trees_options *o, 219enum unpack_trees_error_types e, 220const char*path) 221{ 222if(o->quiet) 223return-1; 224 225if(!o->show_all_errors) 226returnerror(ERRORMSG(o, e),super_prefixed(path)); 227 228/* 229 * Otherwise, insert in a list for future display by 230 * display_error_msgs() 231 */ 232string_list_append(&o->unpack_rejects[e], path); 233return-1; 234} 235 236/* 237 * display all the error messages stored in a nice way 238 */ 239static voiddisplay_error_msgs(struct unpack_trees_options *o) 240{ 241int e, i; 242int something_displayed =0; 243for(e =0; e < NB_UNPACK_TREES_ERROR_TYPES; e++) { 244struct string_list *rejects = &o->unpack_rejects[e]; 245if(rejects->nr >0) { 246struct strbuf path = STRBUF_INIT; 247 something_displayed =1; 248for(i =0; i < rejects->nr; i++) 249strbuf_addf(&path,"\t%s\n", rejects->items[i].string); 250error(ERRORMSG(o, e),super_prefixed(path.buf)); 251strbuf_release(&path); 252} 253string_list_clear(rejects,0); 254} 255if(something_displayed) 256fprintf(stderr,_("Aborting\n")); 257} 258 259static intcheck_submodule_move_head(const struct cache_entry *ce, 260const char*old_id, 261const char*new_id, 262struct unpack_trees_options *o) 263{ 264unsigned flags = SUBMODULE_MOVE_HEAD_DRY_RUN; 265const struct submodule *sub =submodule_from_ce(ce); 266 267if(!sub) 268return0; 269 270if(o->reset) 271 flags |= SUBMODULE_MOVE_HEAD_FORCE; 272 273if(submodule_move_head(ce->name, old_id, new_id, flags)) 274returnadd_rejected_path(o, ERROR_WOULD_LOSE_SUBMODULE, ce->name); 275return0; 276} 277 278/* 279 * Preform the loading of the repository's gitmodules file. This function is 280 * used by 'check_update()' to perform loading of the gitmodules file in two 281 * differnt situations: 282 * (1) before removing entries from the working tree if the gitmodules file has 283 * been marked for removal. This situation is specified by 'state' == NULL. 284 * (2) before checking out entries to the working tree if the gitmodules file 285 * has been marked for update. This situation is specified by 'state' != NULL. 286 */ 287static voidload_gitmodules_file(struct index_state *index, 288struct checkout *state) 289{ 290int pos =index_name_pos(index, GITMODULES_FILE,strlen(GITMODULES_FILE)); 291 292if(pos >=0) { 293struct cache_entry *ce = index->cache[pos]; 294if(!state && ce->ce_flags & CE_WT_REMOVE) { 295repo_read_gitmodules(the_repository); 296}else if(state && (ce->ce_flags & CE_UPDATE)) { 297submodule_free(the_repository); 298checkout_entry(ce, state, NULL, NULL); 299repo_read_gitmodules(the_repository); 300} 301} 302} 303 304static struct progress *get_progress(struct unpack_trees_options *o) 305{ 306unsigned cnt =0, total =0; 307struct index_state *index = &o->result; 308 309if(!o->update || !o->verbose_update) 310return NULL; 311 312for(; cnt < index->cache_nr; cnt++) { 313const struct cache_entry *ce = index->cache[cnt]; 314if(ce->ce_flags & (CE_UPDATE | CE_WT_REMOVE)) 315 total++; 316} 317 318returnstart_delayed_progress(_("Updating files"), total); 319} 320 321static voidsetup_collided_checkout_detection(struct checkout *state, 322struct index_state *index) 323{ 324int i; 325 326 state->clone =1; 327for(i =0; i < index->cache_nr; i++) 328 index->cache[i]->ce_flags &= ~CE_MATCHED; 329} 330 331static voidreport_collided_checkout(struct index_state *index) 332{ 333struct string_list list = STRING_LIST_INIT_NODUP; 334int i; 335 336for(i =0; i < index->cache_nr; i++) { 337struct cache_entry *ce = index->cache[i]; 338 339if(!(ce->ce_flags & CE_MATCHED)) 340continue; 341 342string_list_append(&list, ce->name); 343 ce->ce_flags &= ~CE_MATCHED; 344} 345 346 list.cmp = fspathcmp; 347string_list_sort(&list); 348 349if(list.nr) { 350warning(_("the following paths have collided (e.g. case-sensitive paths\n" 351"on a case-insensitive filesystem) and only one from the same\n" 352"colliding group is in the working tree:\n")); 353 354for(i =0; i < list.nr; i++) 355fprintf(stderr," '%s'\n", list.items[i].string); 356} 357 358string_list_clear(&list,0); 359} 360 361static intcheck_updates(struct unpack_trees_options *o) 362{ 363unsigned cnt =0; 364int errs =0; 365struct progress *progress; 366struct index_state *index = &o->result; 367struct checkout state = CHECKOUT_INIT; 368int i; 369 370trace_performance_enter(); 371 state.force =1; 372 state.quiet =1; 373 state.refresh_cache =1; 374 state.istate = index; 375 376if(o->clone) 377setup_collided_checkout_detection(&state, index); 378 379 progress =get_progress(o); 380 381if(o->update) 382git_attr_set_direction(GIT_ATTR_CHECKOUT); 383 384if(should_update_submodules() && o->update && !o->dry_run) 385load_gitmodules_file(index, NULL); 386 387for(i =0; i < index->cache_nr; i++) { 388const struct cache_entry *ce = index->cache[i]; 389 390if(ce->ce_flags & CE_WT_REMOVE) { 391display_progress(progress, ++cnt); 392if(o->update && !o->dry_run) 393unlink_entry(ce); 394} 395} 396remove_marked_cache_entries(index,0); 397remove_scheduled_dirs(); 398 399if(should_update_submodules() && o->update && !o->dry_run) 400load_gitmodules_file(index, &state); 401 402enable_delayed_checkout(&state); 403if(has_promisor_remote() && o->update && !o->dry_run) { 404/* 405 * Prefetch the objects that are to be checked out in the loop 406 * below. 407 */ 408struct oid_array to_fetch = OID_ARRAY_INIT; 409for(i =0; i < index->cache_nr; i++) { 410struct cache_entry *ce = index->cache[i]; 411 412if(!(ce->ce_flags & CE_UPDATE) || 413S_ISGITLINK(ce->ce_mode)) 414continue; 415if(!oid_object_info_extended(the_repository, &ce->oid, 416 NULL, 417 OBJECT_INFO_FOR_PREFETCH)) 418continue; 419oid_array_append(&to_fetch, &ce->oid); 420} 421if(to_fetch.nr) 422promisor_remote_get_direct(the_repository, 423 to_fetch.oid, to_fetch.nr); 424oid_array_clear(&to_fetch); 425} 426for(i =0; i < index->cache_nr; i++) { 427struct cache_entry *ce = index->cache[i]; 428 429if(ce->ce_flags & CE_UPDATE) { 430if(ce->ce_flags & CE_WT_REMOVE) 431BUG("both update and delete flags are set on%s", 432 ce->name); 433display_progress(progress, ++cnt); 434 ce->ce_flags &= ~CE_UPDATE; 435if(o->update && !o->dry_run) { 436 errs |=checkout_entry(ce, &state, NULL, NULL); 437} 438} 439} 440stop_progress(&progress); 441 errs |=finish_delayed_checkout(&state, NULL); 442if(o->update) 443git_attr_set_direction(GIT_ATTR_CHECKIN); 444 445if(o->clone) 446report_collided_checkout(index); 447 448trace_performance_leave("check_updates"); 449return errs !=0; 450} 451 452static intverify_uptodate_sparse(const struct cache_entry *ce, 453struct unpack_trees_options *o); 454static intverify_absent_sparse(const struct cache_entry *ce, 455enum unpack_trees_error_types, 456struct unpack_trees_options *o); 457 458static intapply_sparse_checkout(struct index_state *istate, 459struct cache_entry *ce, 460struct unpack_trees_options *o) 461{ 462int was_skip_worktree =ce_skip_worktree(ce); 463 464if(ce->ce_flags & CE_NEW_SKIP_WORKTREE) 465 ce->ce_flags |= CE_SKIP_WORKTREE; 466else 467 ce->ce_flags &= ~CE_SKIP_WORKTREE; 468if(was_skip_worktree !=ce_skip_worktree(ce)) { 469 ce->ce_flags |= CE_UPDATE_IN_BASE; 470mark_fsmonitor_invalid(istate, ce); 471 istate->cache_changed |= CE_ENTRY_CHANGED; 472} 473 474/* 475 * if (!was_skip_worktree && !ce_skip_worktree()) { 476 * This is perfectly normal. Move on; 477 * } 478 */ 479 480/* 481 * Merge strategies may set CE_UPDATE|CE_REMOVE outside checkout 482 * area as a result of ce_skip_worktree() shortcuts in 483 * verify_absent() and verify_uptodate(). 484 * Make sure they don't modify worktree if they are already 485 * outside checkout area 486 */ 487if(was_skip_worktree &&ce_skip_worktree(ce)) { 488 ce->ce_flags &= ~CE_UPDATE; 489 490/* 491 * By default, when CE_REMOVE is on, CE_WT_REMOVE is also 492 * on to get that file removed from both index and worktree. 493 * If that file is already outside worktree area, don't 494 * bother remove it. 495 */ 496if(ce->ce_flags & CE_REMOVE) 497 ce->ce_flags &= ~CE_WT_REMOVE; 498} 499 500if(!was_skip_worktree &&ce_skip_worktree(ce)) { 501/* 502 * If CE_UPDATE is set, verify_uptodate() must be called already 503 * also stat info may have lost after merged_entry() so calling 504 * verify_uptodate() again may fail 505 */ 506if(!(ce->ce_flags & CE_UPDATE) &&verify_uptodate_sparse(ce, o)) 507return-1; 508 ce->ce_flags |= CE_WT_REMOVE; 509 ce->ce_flags &= ~CE_UPDATE; 510} 511if(was_skip_worktree && !ce_skip_worktree(ce)) { 512if(verify_absent_sparse(ce, ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN, o)) 513return-1; 514 ce->ce_flags |= CE_UPDATE; 515} 516return0; 517} 518 519staticinlineintcall_unpack_fn(const struct cache_entry *const*src, 520struct unpack_trees_options *o) 521{ 522int ret = o->fn(src, o); 523if(ret >0) 524 ret =0; 525return ret; 526} 527 528static voidmark_ce_used(struct cache_entry *ce,struct unpack_trees_options *o) 529{ 530 ce->ce_flags |= CE_UNPACKED; 531 532if(o->cache_bottom < o->src_index->cache_nr && 533 o->src_index->cache[o->cache_bottom] == ce) { 534int bottom = o->cache_bottom; 535while(bottom < o->src_index->cache_nr && 536 o->src_index->cache[bottom]->ce_flags & CE_UNPACKED) 537 bottom++; 538 o->cache_bottom = bottom; 539} 540} 541 542static voidmark_all_ce_unused(struct index_state *index) 543{ 544int i; 545for(i =0; i < index->cache_nr; i++) 546 index->cache[i]->ce_flags &= ~(CE_UNPACKED | CE_ADDED | CE_NEW_SKIP_WORKTREE); 547} 548 549static intlocate_in_src_index(const struct cache_entry *ce, 550struct unpack_trees_options *o) 551{ 552struct index_state *index = o->src_index; 553int len =ce_namelen(ce); 554int pos =index_name_pos(index, ce->name, len); 555if(pos <0) 556 pos = -1- pos; 557return pos; 558} 559 560/* 561 * We call unpack_index_entry() with an unmerged cache entry 562 * only in diff-index, and it wants a single callback. Skip 563 * the other unmerged entry with the same name. 564 */ 565static voidmark_ce_used_same_name(struct cache_entry *ce, 566struct unpack_trees_options *o) 567{ 568struct index_state *index = o->src_index; 569int len =ce_namelen(ce); 570int pos; 571 572for(pos =locate_in_src_index(ce, o); pos < index->cache_nr; pos++) { 573struct cache_entry *next = index->cache[pos]; 574if(len !=ce_namelen(next) || 575memcmp(ce->name, next->name, len)) 576break; 577mark_ce_used(next, o); 578} 579} 580 581static struct cache_entry *next_cache_entry(struct unpack_trees_options *o) 582{ 583const struct index_state *index = o->src_index; 584int pos = o->cache_bottom; 585 586while(pos < index->cache_nr) { 587struct cache_entry *ce = index->cache[pos]; 588if(!(ce->ce_flags & CE_UNPACKED)) 589return ce; 590 pos++; 591} 592return NULL; 593} 594 595static voidadd_same_unmerged(const struct cache_entry *ce, 596struct unpack_trees_options *o) 597{ 598struct index_state *index = o->src_index; 599int len =ce_namelen(ce); 600int pos =index_name_pos(index, ce->name, len); 601 602if(0<= pos) 603die("programming error in a caller of mark_ce_used_same_name"); 604for(pos = -pos -1; pos < index->cache_nr; pos++) { 605struct cache_entry *next = index->cache[pos]; 606if(len !=ce_namelen(next) || 607memcmp(ce->name, next->name, len)) 608break; 609add_entry(o, next,0,0); 610mark_ce_used(next, o); 611} 612} 613 614static intunpack_index_entry(struct cache_entry *ce, 615struct unpack_trees_options *o) 616{ 617const struct cache_entry *src[MAX_UNPACK_TREES +1] = { NULL, }; 618int ret; 619 620 src[0] = ce; 621 622mark_ce_used(ce, o); 623if(ce_stage(ce)) { 624if(o->skip_unmerged) { 625add_entry(o, ce,0,0); 626return0; 627} 628} 629 ret =call_unpack_fn(src, o); 630if(ce_stage(ce)) 631mark_ce_used_same_name(ce, o); 632return ret; 633} 634 635static intfind_cache_pos(struct traverse_info *,const char*p,size_t len); 636 637static voidrestore_cache_bottom(struct traverse_info *info,int bottom) 638{ 639struct unpack_trees_options *o = info->data; 640 641if(o->diff_index_cached) 642return; 643 o->cache_bottom = bottom; 644} 645 646static intswitch_cache_bottom(struct traverse_info *info) 647{ 648struct unpack_trees_options *o = info->data; 649int ret, pos; 650 651if(o->diff_index_cached) 652return0; 653 ret = o->cache_bottom; 654 pos =find_cache_pos(info->prev, info->name, info->namelen); 655 656if(pos < -1) 657 o->cache_bottom = -2- pos; 658else if(pos <0) 659 o->cache_bottom = o->src_index->cache_nr; 660return ret; 661} 662 663staticinlineintare_same_oid(struct name_entry *name_j,struct name_entry *name_k) 664{ 665return!is_null_oid(&name_j->oid) && !is_null_oid(&name_k->oid) &&oideq(&name_j->oid, &name_k->oid); 666} 667 668static intall_trees_same_as_cache_tree(int n,unsigned long dirmask, 669struct name_entry *names, 670struct traverse_info *info) 671{ 672struct unpack_trees_options *o = info->data; 673int i; 674 675if(!o->merge || dirmask != ((1<< n) -1)) 676return0; 677 678for(i =1; i < n; i++) 679if(!are_same_oid(names, names + i)) 680return0; 681 682returncache_tree_matches_traversal(o->src_index->cache_tree, names, info); 683} 684 685static intindex_pos_by_traverse_info(struct name_entry *names, 686struct traverse_info *info) 687{ 688struct unpack_trees_options *o = info->data; 689struct strbuf name = STRBUF_INIT; 690int pos; 691 692strbuf_make_traverse_path(&name, info, names->path, names->pathlen); 693strbuf_addch(&name,'/'); 694 pos =index_name_pos(o->src_index, name.buf, name.len); 695if(pos >=0) 696BUG("This is a directory and should not exist in index"); 697 pos = -pos -1; 698if(!starts_with(o->src_index->cache[pos]->name, name.buf) || 699(pos >0&&starts_with(o->src_index->cache[pos-1]->name, name.buf))) 700BUG("pos must point at the first entry in this directory"); 701strbuf_release(&name); 702return pos; 703} 704 705/* 706 * Fast path if we detect that all trees are the same as cache-tree at this 707 * path. We'll walk these trees in an iterative loop using cache-tree/index 708 * instead of ODB since we already know what these trees contain. 709 */ 710static inttraverse_by_cache_tree(int pos,int nr_entries,int nr_names, 711struct traverse_info *info) 712{ 713struct cache_entry *src[MAX_UNPACK_TREES +1] = { NULL, }; 714struct unpack_trees_options *o = info->data; 715struct cache_entry *tree_ce = NULL; 716int ce_len =0; 717int i, d; 718 719if(!o->merge) 720BUG("We need cache-tree to do this optimization"); 721 722/* 723 * Do what unpack_callback() and unpack_nondirectories() normally 724 * do. But we walk all paths in an iterative loop instead. 725 * 726 * D/F conflicts and higher stage entries are not a concern 727 * because cache-tree would be invalidated and we would never 728 * get here in the first place. 729 */ 730for(i =0; i < nr_entries; i++) { 731int new_ce_len, len, rc; 732 733 src[0] = o->src_index->cache[pos + i]; 734 735 len =ce_namelen(src[0]); 736 new_ce_len =cache_entry_size(len); 737 738if(new_ce_len > ce_len) { 739 new_ce_len <<=1; 740 tree_ce =xrealloc(tree_ce, new_ce_len); 741memset(tree_ce,0, new_ce_len); 742 ce_len = new_ce_len; 743 744 tree_ce->ce_flags =create_ce_flags(0); 745 746for(d =1; d <= nr_names; d++) 747 src[d] = tree_ce; 748} 749 750 tree_ce->ce_mode = src[0]->ce_mode; 751 tree_ce->ce_namelen = len; 752oidcpy(&tree_ce->oid, &src[0]->oid); 753memcpy(tree_ce->name, src[0]->name, len +1); 754 755 rc =call_unpack_fn((const struct cache_entry *const*)src, o); 756if(rc <0) { 757free(tree_ce); 758return rc; 759} 760 761mark_ce_used(src[0], o); 762} 763free(tree_ce); 764if(o->debug_unpack) 765printf("Unpacked%dentries from%sto%susing cache-tree\n", 766 nr_entries, 767 o->src_index->cache[pos]->name, 768 o->src_index->cache[pos + nr_entries -1]->name); 769return0; 770} 771 772static inttraverse_trees_recursive(int n,unsigned long dirmask, 773unsigned long df_conflicts, 774struct name_entry *names, 775struct traverse_info *info) 776{ 777struct unpack_trees_options *o = info->data; 778int i, ret, bottom; 779int nr_buf =0; 780struct tree_desc t[MAX_UNPACK_TREES]; 781void*buf[MAX_UNPACK_TREES]; 782struct traverse_info newinfo; 783struct name_entry *p; 784int nr_entries; 785 786 nr_entries =all_trees_same_as_cache_tree(n, dirmask, names, info); 787if(nr_entries >0) { 788int pos =index_pos_by_traverse_info(names, info); 789 790if(!o->merge || df_conflicts) 791BUG("Wrong condition to get here buddy"); 792 793/* 794 * All entries up to 'pos' must have been processed 795 * (i.e. marked CE_UNPACKED) at this point. But to be safe, 796 * save and restore cache_bottom anyway to not miss 797 * unprocessed entries before 'pos'. 798 */ 799 bottom = o->cache_bottom; 800 ret =traverse_by_cache_tree(pos, nr_entries, n, info); 801 o->cache_bottom = bottom; 802return ret; 803} 804 805 p = names; 806while(!p->mode) 807 p++; 808 809 newinfo = *info; 810 newinfo.prev = info; 811 newinfo.pathspec = info->pathspec; 812 newinfo.name = p->path; 813 newinfo.namelen = p->pathlen; 814 newinfo.mode = p->mode; 815 newinfo.pathlen =st_add3(newinfo.pathlen,tree_entry_len(p),1); 816 newinfo.df_conflicts |= df_conflicts; 817 818/* 819 * Fetch the tree from the ODB for each peer directory in the 820 * n commits. 821 * 822 * For 2- and 3-way traversals, we try to avoid hitting the 823 * ODB twice for the same OID. This should yield a nice speed 824 * up in checkouts and merges when the commits are similar. 825 * 826 * We don't bother doing the full O(n^2) search for larger n, 827 * because wider traversals don't happen that often and we 828 * avoid the search setup. 829 * 830 * When 2 peer OIDs are the same, we just copy the tree 831 * descriptor data. This implicitly borrows the buffer 832 * data from the earlier cell. 833 */ 834for(i =0; i < n; i++, dirmask >>=1) { 835if(i >0&&are_same_oid(&names[i], &names[i -1])) 836 t[i] = t[i -1]; 837else if(i >1&&are_same_oid(&names[i], &names[i -2])) 838 t[i] = t[i -2]; 839else{ 840const struct object_id *oid = NULL; 841if(dirmask &1) 842 oid = &names[i].oid; 843 buf[nr_buf++] =fill_tree_descriptor(the_repository, t + i, oid); 844} 845} 846 847 bottom =switch_cache_bottom(&newinfo); 848 ret =traverse_trees(o->src_index, n, t, &newinfo); 849restore_cache_bottom(&newinfo, bottom); 850 851for(i =0; i < nr_buf; i++) 852free(buf[i]); 853 854return ret; 855} 856 857/* 858 * Compare the traverse-path to the cache entry without actually 859 * having to generate the textual representation of the traverse 860 * path. 861 * 862 * NOTE! This *only* compares up to the size of the traverse path 863 * itself - the caller needs to do the final check for the cache 864 * entry having more data at the end! 865 */ 866static intdo_compare_entry_piecewise(const struct cache_entry *ce, 867const struct traverse_info *info, 868const char*name,size_t namelen, 869unsigned mode) 870{ 871int pathlen, ce_len; 872const char*ce_name; 873 874if(info->prev) { 875int cmp =do_compare_entry_piecewise(ce, info->prev, 876 info->name, info->namelen, 877 info->mode); 878if(cmp) 879return cmp; 880} 881 pathlen = info->pathlen; 882 ce_len =ce_namelen(ce); 883 884/* If ce_len < pathlen then we must have previously hit "name == directory" entry */ 885if(ce_len < pathlen) 886return-1; 887 888 ce_len -= pathlen; 889 ce_name = ce->name + pathlen; 890 891returndf_name_compare(ce_name, ce_len, S_IFREG, name, namelen, mode); 892} 893 894static intdo_compare_entry(const struct cache_entry *ce, 895const struct traverse_info *info, 896const char*name,size_t namelen, 897unsigned mode) 898{ 899int pathlen, ce_len; 900const char*ce_name; 901int cmp; 902 903/* 904 * If we have not precomputed the traverse path, it is quicker 905 * to avoid doing so. But if we have precomputed it, 906 * it is quicker to use the precomputed version. 907 */ 908if(!info->traverse_path) 909returndo_compare_entry_piecewise(ce, info, name, namelen, mode); 910 911 cmp =strncmp(ce->name, info->traverse_path, info->pathlen); 912if(cmp) 913return cmp; 914 915 pathlen = info->pathlen; 916 ce_len =ce_namelen(ce); 917 918if(ce_len < pathlen) 919return-1; 920 921 ce_len -= pathlen; 922 ce_name = ce->name + pathlen; 923 924returndf_name_compare(ce_name, ce_len, S_IFREG, name, namelen, mode); 925} 926 927static intcompare_entry(const struct cache_entry *ce,const struct traverse_info *info,const struct name_entry *n) 928{ 929int cmp =do_compare_entry(ce, info, n->path, n->pathlen, n->mode); 930if(cmp) 931return cmp; 932 933/* 934 * Even if the beginning compared identically, the ce should 935 * compare as bigger than a directory leading up to it! 936 */ 937returnce_namelen(ce) >traverse_path_len(info,tree_entry_len(n)); 938} 939 940static intce_in_traverse_path(const struct cache_entry *ce, 941const struct traverse_info *info) 942{ 943if(!info->prev) 944return1; 945if(do_compare_entry(ce, info->prev, 946 info->name, info->namelen, info->mode)) 947return0; 948/* 949 * If ce (blob) is the same name as the path (which is a tree 950 * we will be descending into), it won't be inside it. 951 */ 952return(info->pathlen <ce_namelen(ce)); 953} 954 955static struct cache_entry *create_ce_entry(const struct traverse_info *info, 956const struct name_entry *n, 957int stage, 958struct index_state *istate, 959int is_transient) 960{ 961size_t len =traverse_path_len(info,tree_entry_len(n)); 962struct cache_entry *ce = 963 is_transient ? 964make_empty_transient_cache_entry(len) : 965make_empty_cache_entry(istate, len); 966 967 ce->ce_mode =create_ce_mode(n->mode); 968 ce->ce_flags =create_ce_flags(stage); 969 ce->ce_namelen = len; 970oidcpy(&ce->oid, &n->oid); 971/* len+1 because the cache_entry allocates space for NUL */ 972make_traverse_path(ce->name, len +1, info, n->path, n->pathlen); 973 974return ce; 975} 976 977/* 978 * Note that traverse_by_cache_tree() duplicates some logic in this function 979 * without actually calling it. If you change the logic here you may need to 980 * check and change there as well. 981 */ 982static intunpack_nondirectories(int n,unsigned long mask, 983unsigned long dirmask, 984struct cache_entry **src, 985const struct name_entry *names, 986const struct traverse_info *info) 987{ 988int i; 989struct unpack_trees_options *o = info->data; 990unsigned long conflicts = info->df_conflicts | dirmask; 991 992/* Do we have *only* directories? Nothing to do */ 993if(mask == dirmask && !src[0]) 994return0; 995 996/* 997 * Ok, we've filled in up to any potential index entry in src[0], 998 * now do the rest. 999 */1000for(i =0; i < n; i++) {1001int stage;1002unsigned int bit =1ul<< i;1003if(conflicts & bit) {1004 src[i + o->merge] = o->df_conflict_entry;1005continue;1006}1007if(!(mask & bit))1008continue;1009if(!o->merge)1010 stage =0;1011else if(i +1< o->head_idx)1012 stage =1;1013else if(i +1> o->head_idx)1014 stage =3;1015else1016 stage =2;10171018/*1019 * If the merge bit is set, then the cache entries are1020 * discarded in the following block. In this case,1021 * construct "transient" cache_entries, as they are1022 * not stored in the index. otherwise construct the1023 * cache entry from the index aware logic.1024 */1025 src[i + o->merge] =create_ce_entry(info, names + i, stage, &o->result, o->merge);1026}10271028if(o->merge) {1029int rc =call_unpack_fn((const struct cache_entry *const*)src,1030 o);1031for(i =0; i < n; i++) {1032struct cache_entry *ce = src[i + o->merge];1033if(ce != o->df_conflict_entry)1034discard_cache_entry(ce);1035}1036return rc;1037}10381039for(i =0; i < n; i++)1040if(src[i] && src[i] != o->df_conflict_entry)1041if(do_add_entry(o, src[i],0,0))1042return-1;10431044return0;1045}10461047static intunpack_failed(struct unpack_trees_options *o,const char*message)1048{1049discard_index(&o->result);1050if(!o->quiet && !o->exiting_early) {1051if(message)1052returnerror("%s", message);1053return-1;1054}1055return-1;1056}10571058/*1059 * The tree traversal is looking at name p. If we have a matching entry,1060 * return it. If name p is a directory in the index, do not return1061 * anything, as we will want to match it when the traversal descends into1062 * the directory.1063 */1064static intfind_cache_pos(struct traverse_info *info,1065const char*p,size_t p_len)1066{1067int pos;1068struct unpack_trees_options *o = info->data;1069struct index_state *index = o->src_index;1070int pfxlen = info->pathlen;10711072for(pos = o->cache_bottom; pos < index->cache_nr; pos++) {1073const struct cache_entry *ce = index->cache[pos];1074const char*ce_name, *ce_slash;1075int cmp, ce_len;10761077if(ce->ce_flags & CE_UNPACKED) {1078/*1079 * cache_bottom entry is already unpacked, so1080 * we can never match it; don't check it1081 * again.1082 */1083if(pos == o->cache_bottom)1084++o->cache_bottom;1085continue;1086}1087if(!ce_in_traverse_path(ce, info)) {1088/*1089 * Check if we can skip future cache checks1090 * (because we're already past all possible1091 * entries in the traverse path).1092 */1093if(info->traverse_path) {1094if(strncmp(ce->name, info->traverse_path,1095 info->pathlen) >0)1096break;1097}1098continue;1099}1100 ce_name = ce->name + pfxlen;1101 ce_slash =strchr(ce_name,'/');1102if(ce_slash)1103 ce_len = ce_slash - ce_name;1104else1105 ce_len =ce_namelen(ce) - pfxlen;1106 cmp =name_compare(p, p_len, ce_name, ce_len);1107/*1108 * Exact match; if we have a directory we need to1109 * delay returning it.1110 */1111if(!cmp)1112return ce_slash ? -2- pos : pos;1113if(0< cmp)1114continue;/* keep looking */1115/*1116 * ce_name sorts after p->path; could it be that we1117 * have files under p->path directory in the index?1118 * E.g. ce_name == "t-i", and p->path == "t"; we may1119 * have "t/a" in the index.1120 */1121if(p_len < ce_len && !memcmp(ce_name, p, p_len) &&1122 ce_name[p_len] <'/')1123continue;/* keep looking */1124break;1125}1126return-1;1127}11281129static struct cache_entry *find_cache_entry(struct traverse_info *info,1130const struct name_entry *p)1131{1132int pos =find_cache_pos(info, p->path, p->pathlen);1133struct unpack_trees_options *o = info->data;11341135if(0<= pos)1136return o->src_index->cache[pos];1137else1138return NULL;1139}11401141static voiddebug_path(struct traverse_info *info)1142{1143if(info->prev) {1144debug_path(info->prev);1145if(*info->prev->name)1146putchar('/');1147}1148printf("%s", info->name);1149}11501151static voiddebug_name_entry(int i,struct name_entry *n)1152{1153printf("ent#%d %06o%s\n", i,1154 n->path ? n->mode :0,1155 n->path ? n->path :"(missing)");1156}11571158static voiddebug_unpack_callback(int n,1159unsigned long mask,1160unsigned long dirmask,1161struct name_entry *names,1162struct traverse_info *info)1163{1164int i;1165printf("* unpack mask%lu, dirmask%lu, cnt%d",1166 mask, dirmask, n);1167debug_path(info);1168putchar('\n');1169for(i =0; i < n; i++)1170debug_name_entry(i, names + i);1171}11721173/*1174 * Note that traverse_by_cache_tree() duplicates some logic in this function1175 * without actually calling it. If you change the logic here you may need to1176 * check and change there as well.1177 */1178static intunpack_callback(int n,unsigned long mask,unsigned long dirmask,struct name_entry *names,struct traverse_info *info)1179{1180struct cache_entry *src[MAX_UNPACK_TREES +1] = { NULL, };1181struct unpack_trees_options *o = info->data;1182const struct name_entry *p = names;11831184/* Find first entry with a real name (we could use "mask" too) */1185while(!p->mode)1186 p++;11871188if(o->debug_unpack)1189debug_unpack_callback(n, mask, dirmask, names, info);11901191/* Are we supposed to look at the index too? */1192if(o->merge) {1193while(1) {1194int cmp;1195struct cache_entry *ce;11961197if(o->diff_index_cached)1198 ce =next_cache_entry(o);1199else1200 ce =find_cache_entry(info, p);12011202if(!ce)1203break;1204 cmp =compare_entry(ce, info, p);1205if(cmp <0) {1206if(unpack_index_entry(ce, o) <0)1207returnunpack_failed(o, NULL);1208continue;1209}1210if(!cmp) {1211if(ce_stage(ce)) {1212/*1213 * If we skip unmerged index1214 * entries, we'll skip this1215 * entry *and* the tree1216 * entries associated with it!1217 */1218if(o->skip_unmerged) {1219add_same_unmerged(ce, o);1220return mask;1221}1222}1223 src[0] = ce;1224}1225break;1226}1227}12281229if(unpack_nondirectories(n, mask, dirmask, src, names, info) <0)1230return-1;12311232if(o->merge && src[0]) {1233if(ce_stage(src[0]))1234mark_ce_used_same_name(src[0], o);1235else1236mark_ce_used(src[0], o);1237}12381239/* Now handle any directories.. */1240if(dirmask) {1241/* special case: "diff-index --cached" looking at a tree */1242if(o->diff_index_cached &&1243 n ==1&& dirmask ==1&&S_ISDIR(names->mode)) {1244int matches;1245 matches =cache_tree_matches_traversal(o->src_index->cache_tree,1246 names, info);1247/*1248 * Everything under the name matches; skip the1249 * entire hierarchy. diff_index_cached codepath1250 * special cases D/F conflicts in such a way that1251 * it does not do any look-ahead, so this is safe.1252 */1253if(matches) {1254 o->cache_bottom += matches;1255return mask;1256}1257}12581259if(traverse_trees_recursive(n, dirmask, mask & ~dirmask,1260 names, info) <0)1261return-1;1262return mask;1263}12641265return mask;1266}12671268static intclear_ce_flags_1(struct index_state *istate,1269struct cache_entry **cache,int nr,1270struct strbuf *prefix,1271int select_mask,int clear_mask,1272struct pattern_list *pl,1273enum pattern_match_result default_match);12741275/* Whole directory matching */1276static intclear_ce_flags_dir(struct index_state *istate,1277struct cache_entry **cache,int nr,1278struct strbuf *prefix,1279char*basename,1280int select_mask,int clear_mask,1281struct pattern_list *pl,1282enum pattern_match_result default_match)1283{1284struct cache_entry **cache_end;1285int dtype = DT_DIR;1286int rc;1287enum pattern_match_result ret;1288 ret =path_matches_pattern_list(prefix->buf, prefix->len,1289 basename, &dtype, pl, istate);12901291strbuf_addch(prefix,'/');12921293/* If undecided, use matching result of parent dir in defval */1294if(ret == UNDECIDED)1295 ret = default_match;12961297for(cache_end = cache; cache_end != cache + nr; cache_end++) {1298struct cache_entry *ce = *cache_end;1299if(strncmp(ce->name, prefix->buf, prefix->len))1300break;1301}13021303/*1304 * TODO: check pl, if there are no patterns that may conflict1305 * with ret (iow, we know in advance the incl/excl1306 * decision for the entire directory), clear flag here without1307 * calling clear_ce_flags_1(). That function will call1308 * the expensive path_matches_pattern_list() on every entry.1309 */1310 rc =clear_ce_flags_1(istate, cache, cache_end - cache,1311 prefix,1312 select_mask, clear_mask,1313 pl, ret);1314strbuf_setlen(prefix, prefix->len -1);1315return rc;1316}13171318/*1319 * Traverse the index, find every entry that matches according to1320 * o->pl. Do "ce_flags &= ~clear_mask" on those entries. Return the1321 * number of traversed entries.1322 *1323 * If select_mask is non-zero, only entries whose ce_flags has on of1324 * those bits enabled are traversed.1325 *1326 * cache : pointer to an index entry1327 * prefix_len : an offset to its path1328 *1329 * The current path ("prefix") including the trailing '/' is1330 * cache[0]->name[0..(prefix_len-1)]1331 * Top level path has prefix_len zero.1332 */1333static intclear_ce_flags_1(struct index_state *istate,1334struct cache_entry **cache,int nr,1335struct strbuf *prefix,1336int select_mask,int clear_mask,1337struct pattern_list *pl,1338enum pattern_match_result default_match)1339{1340struct cache_entry **cache_end = cache + nr;13411342/*1343 * Process all entries that have the given prefix and meet1344 * select_mask condition1345 */1346while(cache != cache_end) {1347struct cache_entry *ce = *cache;1348const char*name, *slash;1349int len, dtype;1350enum pattern_match_result ret;13511352if(select_mask && !(ce->ce_flags & select_mask)) {1353 cache++;1354continue;1355}13561357if(prefix->len &&strncmp(ce->name, prefix->buf, prefix->len))1358break;13591360 name = ce->name + prefix->len;1361 slash =strchr(name,'/');13621363/* If it's a directory, try whole directory match first */1364if(slash) {1365int processed;13661367 len = slash - name;1368strbuf_add(prefix, name, len);13691370 processed =clear_ce_flags_dir(istate, cache, cache_end - cache,1371 prefix,1372 prefix->buf + prefix->len - len,1373 select_mask, clear_mask,1374 pl, default_match);13751376/* clear_c_f_dir eats a whole dir already? */1377if(processed) {1378 cache += processed;1379strbuf_setlen(prefix, prefix->len - len);1380continue;1381}13821383strbuf_addch(prefix,'/');1384 cache +=clear_ce_flags_1(istate, cache, cache_end - cache,1385 prefix,1386 select_mask, clear_mask, pl,1387 default_match);1388strbuf_setlen(prefix, prefix->len - len -1);1389continue;1390}13911392/* Non-directory */1393 dtype =ce_to_dtype(ce);1394 ret =path_matches_pattern_list(ce->name,1395ce_namelen(ce),1396 name, &dtype, pl, istate);1397if(ret == UNDECIDED)1398 ret = default_match;1399if(ret == MATCHED)1400 ce->ce_flags &= ~clear_mask;1401 cache++;1402}1403return nr - (cache_end - cache);1404}14051406static intclear_ce_flags(struct index_state *istate,1407int select_mask,int clear_mask,1408struct pattern_list *pl)1409{1410static struct strbuf prefix = STRBUF_INIT;14111412strbuf_reset(&prefix);14131414returnclear_ce_flags_1(istate,1415 istate->cache,1416 istate->cache_nr,1417&prefix,1418 select_mask, clear_mask,1419 pl,0);1420}14211422/*1423 * Set/Clear CE_NEW_SKIP_WORKTREE according to $GIT_DIR/info/sparse-checkout1424 */1425static voidmark_new_skip_worktree(struct pattern_list *pl,1426struct index_state *istate,1427int select_flag,int skip_wt_flag)1428{1429int i;14301431/*1432 * 1. Pretend the narrowest worktree: only unmerged entries1433 * are checked out1434 */1435for(i =0; i < istate->cache_nr; i++) {1436struct cache_entry *ce = istate->cache[i];14371438if(select_flag && !(ce->ce_flags & select_flag))1439continue;14401441if(!ce_stage(ce) && !(ce->ce_flags & CE_CONFLICTED))1442 ce->ce_flags |= skip_wt_flag;1443else1444 ce->ce_flags &= ~skip_wt_flag;1445}14461447/*1448 * 2. Widen worktree according to sparse-checkout file.1449 * Matched entries will have skip_wt_flag cleared (i.e. "in")1450 */1451clear_ce_flags(istate, select_flag, skip_wt_flag, pl);1452}14531454static intverify_absent(const struct cache_entry *,1455enum unpack_trees_error_types,1456struct unpack_trees_options *);1457/*1458 * N-way merge "len" trees. Returns 0 on success, -1 on failure to manipulate the1459 * resulting index, -2 on failure to reflect the changes to the work tree.1460 *1461 * CE_ADDED, CE_UNPACKED and CE_NEW_SKIP_WORKTREE are used internally1462 */1463intunpack_trees(unsigned len,struct tree_desc *t,struct unpack_trees_options *o)1464{1465int i, ret;1466static struct cache_entry *dfc;1467struct pattern_list pl;14681469if(len > MAX_UNPACK_TREES)1470die("unpack_trees takes at most%dtrees", MAX_UNPACK_TREES);14711472trace_performance_enter();1473memset(&pl,0,sizeof(pl));1474if(!core_apply_sparse_checkout || !o->update)1475 o->skip_sparse_checkout =1;1476if(!o->skip_sparse_checkout) {1477char*sparse =git_pathdup("info/sparse-checkout");1478if(add_patterns_from_file_to_list(sparse,"",0, &pl, NULL) <0)1479 o->skip_sparse_checkout =1;1480else1481 o->pl = &pl;1482free(sparse);1483}14841485memset(&o->result,0,sizeof(o->result));1486 o->result.initialized =1;1487 o->result.timestamp.sec = o->src_index->timestamp.sec;1488 o->result.timestamp.nsec = o->src_index->timestamp.nsec;1489 o->result.version = o->src_index->version;1490if(!o->src_index->split_index) {1491 o->result.split_index = NULL;1492}else if(o->src_index == o->dst_index) {1493/*1494 * o->dst_index (and thus o->src_index) will be discarded1495 * and overwritten with o->result at the end of this function,1496 * so just use src_index's split_index to avoid having to1497 * create a new one.1498 */1499 o->result.split_index = o->src_index->split_index;1500 o->result.split_index->refcount++;1501}else{1502 o->result.split_index =init_split_index(&o->result);1503}1504oidcpy(&o->result.oid, &o->src_index->oid);1505 o->merge_size = len;1506mark_all_ce_unused(o->src_index);15071508/*1509 * Sparse checkout loop #1: set NEW_SKIP_WORKTREE on existing entries1510 */1511if(!o->skip_sparse_checkout)1512mark_new_skip_worktree(o->pl, o->src_index,0, CE_NEW_SKIP_WORKTREE);15131514if(!dfc)1515 dfc =xcalloc(1,cache_entry_size(0));1516 o->df_conflict_entry = dfc;15171518if(len) {1519const char*prefix = o->prefix ? o->prefix :"";1520struct traverse_info info;15211522setup_traverse_info(&info, prefix);1523 info.fn = unpack_callback;1524 info.data = o;1525 info.show_all_errors = o->show_all_errors;1526 info.pathspec = o->pathspec;15271528if(o->prefix) {1529/*1530 * Unpack existing index entries that sort before the1531 * prefix the tree is spliced into. Note that o->merge1532 * is always true in this case.1533 */1534while(1) {1535struct cache_entry *ce =next_cache_entry(o);1536if(!ce)1537break;1538if(ce_in_traverse_path(ce, &info))1539break;1540if(unpack_index_entry(ce, o) <0)1541goto return_failed;1542}1543}15441545trace_performance_enter();1546 ret =traverse_trees(o->src_index, len, t, &info);1547trace_performance_leave("traverse_trees");1548if(ret <0)1549goto return_failed;1550}15511552/* Any left-over entries in the index? */1553if(o->merge) {1554while(1) {1555struct cache_entry *ce =next_cache_entry(o);1556if(!ce)1557break;1558if(unpack_index_entry(ce, o) <0)1559goto return_failed;1560}1561}1562mark_all_ce_unused(o->src_index);15631564if(o->trivial_merges_only && o->nontrivial_merge) {1565 ret =unpack_failed(o,"Merge requires file-level merging");1566goto done;1567}15681569if(!o->skip_sparse_checkout) {1570int empty_worktree =1;15711572/*1573 * Sparse checkout loop #2: set NEW_SKIP_WORKTREE on entries not in loop #11574 * If the will have NEW_SKIP_WORKTREE, also set CE_SKIP_WORKTREE1575 * so apply_sparse_checkout() won't attempt to remove it from worktree1576 */1577mark_new_skip_worktree(o->pl, &o->result, CE_ADDED, CE_SKIP_WORKTREE | CE_NEW_SKIP_WORKTREE);15781579 ret =0;1580for(i =0; i < o->result.cache_nr; i++) {1581struct cache_entry *ce = o->result.cache[i];15821583/*1584 * Entries marked with CE_ADDED in merged_entry() do not have1585 * verify_absent() check (the check is effectively disabled1586 * because CE_NEW_SKIP_WORKTREE is set unconditionally).1587 *1588 * Do the real check now because we have had1589 * correct CE_NEW_SKIP_WORKTREE1590 */1591if(ce->ce_flags & CE_ADDED &&1592verify_absent(ce, ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN, o)) {1593if(!o->show_all_errors)1594goto return_failed;1595 ret = -1;1596}15971598if(apply_sparse_checkout(&o->result, ce, o)) {1599if(!o->show_all_errors)1600goto return_failed;1601 ret = -1;1602}1603if(!ce_skip_worktree(ce))1604 empty_worktree =0;16051606}1607if(ret <0)1608goto return_failed;1609/*1610 * Sparse checkout is meant to narrow down checkout area1611 * but it does not make sense to narrow down to empty working1612 * tree. This is usually a mistake in sparse checkout rules.1613 * Do not allow users to do that.1614 */1615if(o->result.cache_nr && empty_worktree) {1616 ret =unpack_failed(o,"Sparse checkout leaves no entry on working directory");1617goto done;1618}1619}16201621 ret =check_updates(o) ? (-2) :0;1622if(o->dst_index) {1623move_index_extensions(&o->result, o->src_index);1624if(!ret) {1625if(git_env_bool("GIT_TEST_CHECK_CACHE_TREE",0))1626cache_tree_verify(the_repository, &o->result);1627if(!o->result.cache_tree)1628 o->result.cache_tree =cache_tree();1629if(!cache_tree_fully_valid(o->result.cache_tree))1630cache_tree_update(&o->result,1631 WRITE_TREE_SILENT |1632 WRITE_TREE_REPAIR);1633}16341635 o->result.updated_workdir =1;1636discard_index(o->dst_index);1637*o->dst_index = o->result;1638}else{1639discard_index(&o->result);1640}1641 o->src_index = NULL;16421643done:1644trace_performance_leave("unpack_trees");1645clear_pattern_list(&pl);1646return ret;16471648return_failed:1649if(o->show_all_errors)1650display_error_msgs(o);1651mark_all_ce_unused(o->src_index);1652 ret =unpack_failed(o, NULL);1653if(o->exiting_early)1654 ret =0;1655goto done;1656}16571658/* Here come the merge functions */16591660static intreject_merge(const struct cache_entry *ce,1661struct unpack_trees_options *o)1662{1663returnadd_rejected_path(o, ERROR_WOULD_OVERWRITE, ce->name);1664}16651666static intsame(const struct cache_entry *a,const struct cache_entry *b)1667{1668if(!!a != !!b)1669return0;1670if(!a && !b)1671return1;1672if((a->ce_flags | b->ce_flags) & CE_CONFLICTED)1673return0;1674return a->ce_mode == b->ce_mode &&1675oideq(&a->oid, &b->oid);1676}167716781679/*1680 * When a CE gets turned into an unmerged entry, we1681 * want it to be up-to-date1682 */1683static intverify_uptodate_1(const struct cache_entry *ce,1684struct unpack_trees_options *o,1685enum unpack_trees_error_types error_type)1686{1687struct stat st;16881689if(o->index_only)1690return0;16911692/*1693 * CE_VALID and CE_SKIP_WORKTREE cheat, we better check again1694 * if this entry is truly up-to-date because this file may be1695 * overwritten.1696 */1697if((ce->ce_flags & CE_VALID) ||ce_skip_worktree(ce))1698;/* keep checking */1699else if(o->reset ||ce_uptodate(ce))1700return0;17011702if(!lstat(ce->name, &st)) {1703int flags = CE_MATCH_IGNORE_VALID|CE_MATCH_IGNORE_SKIP_WORKTREE;1704unsigned changed =ie_match_stat(o->src_index, ce, &st, flags);17051706if(submodule_from_ce(ce)) {1707int r =check_submodule_move_head(ce,1708"HEAD",oid_to_hex(&ce->oid), o);1709if(r)1710returnadd_rejected_path(o, error_type, ce->name);1711return0;1712}17131714if(!changed)1715return0;1716/*1717 * Historic default policy was to allow submodule to be out1718 * of sync wrt the superproject index. If the submodule was1719 * not considered interesting above, we don't care here.1720 */1721if(S_ISGITLINK(ce->ce_mode))1722return0;17231724 errno =0;1725}1726if(errno == ENOENT)1727return0;1728returnadd_rejected_path(o, error_type, ce->name);1729}17301731intverify_uptodate(const struct cache_entry *ce,1732struct unpack_trees_options *o)1733{1734if(!o->skip_sparse_checkout && (ce->ce_flags & CE_NEW_SKIP_WORKTREE))1735return0;1736returnverify_uptodate_1(ce, o, ERROR_NOT_UPTODATE_FILE);1737}17381739static intverify_uptodate_sparse(const struct cache_entry *ce,1740struct unpack_trees_options *o)1741{1742returnverify_uptodate_1(ce, o, ERROR_SPARSE_NOT_UPTODATE_FILE);1743}17441745/*1746 * TODO: We should actually invalidate o->result, not src_index [1].1747 * But since cache tree and untracked cache both are not copied to1748 * o->result until unpacking is complete, we invalidate them on1749 * src_index instead with the assumption that they will be copied to1750 * dst_index at the end.1751 *1752 * [1] src_index->cache_tree is also used in unpack_callback() so if1753 * we invalidate o->result, we need to update it to use1754 * o->result.cache_tree as well.1755 */1756static voidinvalidate_ce_path(const struct cache_entry *ce,1757struct unpack_trees_options *o)1758{1759if(!ce)1760return;1761cache_tree_invalidate_path(o->src_index, ce->name);1762untracked_cache_invalidate_path(o->src_index, ce->name,1);1763}17641765/*1766 * Check that checking out ce->sha1 in subdir ce->name is not1767 * going to overwrite any working files.1768 *1769 * Currently, git does not checkout subprojects during a superproject1770 * checkout, so it is not going to overwrite anything.1771 */1772static intverify_clean_submodule(const char*old_sha1,1773const struct cache_entry *ce,1774struct unpack_trees_options *o)1775{1776if(!submodule_from_ce(ce))1777return0;17781779returncheck_submodule_move_head(ce, old_sha1,1780oid_to_hex(&ce->oid), o);1781}17821783static intverify_clean_subdirectory(const struct cache_entry *ce,1784struct unpack_trees_options *o)1785{1786/*1787 * we are about to extract "ce->name"; we would not want to lose1788 * anything in the existing directory there.1789 */1790int namelen;1791int i;1792struct dir_struct d;1793char*pathbuf;1794int cnt =0;17951796if(S_ISGITLINK(ce->ce_mode)) {1797struct object_id oid;1798int sub_head =resolve_gitlink_ref(ce->name,"HEAD", &oid);1799/*1800 * If we are not going to update the submodule, then1801 * we don't care.1802 */1803if(!sub_head &&oideq(&oid, &ce->oid))1804return0;1805returnverify_clean_submodule(sub_head ? NULL :oid_to_hex(&oid),1806 ce, o);1807}18081809/*1810 * First let's make sure we do not have a local modification1811 * in that directory.1812 */1813 namelen =ce_namelen(ce);1814for(i =locate_in_src_index(ce, o);1815 i < o->src_index->cache_nr;1816 i++) {1817struct cache_entry *ce2 = o->src_index->cache[i];1818int len =ce_namelen(ce2);1819if(len < namelen ||1820strncmp(ce->name, ce2->name, namelen) ||1821 ce2->name[namelen] !='/')1822break;1823/*1824 * ce2->name is an entry in the subdirectory to be1825 * removed.1826 */1827if(!ce_stage(ce2)) {1828if(verify_uptodate(ce2, o))1829return-1;1830add_entry(o, ce2, CE_REMOVE,0);1831invalidate_ce_path(ce, o);1832mark_ce_used(ce2, o);1833}1834 cnt++;1835}18361837/*1838 * Then we need to make sure that we do not lose a locally1839 * present file that is not ignored.1840 */1841 pathbuf =xstrfmt("%.*s/", namelen, ce->name);18421843memset(&d,0,sizeof(d));1844if(o->dir)1845 d.exclude_per_dir = o->dir->exclude_per_dir;1846 i =read_directory(&d, o->src_index, pathbuf, namelen+1, NULL);1847if(i)1848returnadd_rejected_path(o, ERROR_NOT_UPTODATE_DIR, ce->name);1849free(pathbuf);1850return cnt;1851}18521853/*1854 * This gets called when there was no index entry for the tree entry 'dst',1855 * but we found a file in the working tree that 'lstat()' said was fine,1856 * and we're on a case-insensitive filesystem.1857 *1858 * See if we can find a case-insensitive match in the index that also1859 * matches the stat information, and assume it's that other file!1860 */1861static inticase_exists(struct unpack_trees_options *o,const char*name,int len,struct stat *st)1862{1863const struct cache_entry *src;18641865 src =index_file_exists(o->src_index, name, len,1);1866return src && !ie_match_stat(o->src_index, src, st, CE_MATCH_IGNORE_VALID|CE_MATCH_IGNORE_SKIP_WORKTREE);1867}18681869static intcheck_ok_to_remove(const char*name,int len,int dtype,1870const struct cache_entry *ce,struct stat *st,1871enum unpack_trees_error_types error_type,1872struct unpack_trees_options *o)1873{1874const struct cache_entry *result;18751876/*1877 * It may be that the 'lstat()' succeeded even though1878 * target 'ce' was absent, because there is an old1879 * entry that is different only in case..1880 *1881 * Ignore that lstat() if it matches.1882 */1883if(ignore_case &&icase_exists(o, name, len, st))1884return0;18851886if(o->dir &&1887is_excluded(o->dir, o->src_index, name, &dtype))1888/*1889 * ce->name is explicitly excluded, so it is Ok to1890 * overwrite it.1891 */1892return0;1893if(S_ISDIR(st->st_mode)) {1894/*1895 * We are checking out path "foo" and1896 * found "foo/." in the working tree.1897 * This is tricky -- if we have modified1898 * files that are in "foo/" we would lose1899 * them.1900 */1901if(verify_clean_subdirectory(ce, o) <0)1902return-1;1903return0;1904}19051906/*1907 * The previous round may already have decided to1908 * delete this path, which is in a subdirectory that1909 * is being replaced with a blob.1910 */1911 result =index_file_exists(&o->result, name, len,0);1912if(result) {1913if(result->ce_flags & CE_REMOVE)1914return0;1915}19161917returnadd_rejected_path(o, error_type, name);1918}19191920/*1921 * We do not want to remove or overwrite a working tree file that1922 * is not tracked, unless it is ignored.1923 */1924static intverify_absent_1(const struct cache_entry *ce,1925enum unpack_trees_error_types error_type,1926struct unpack_trees_options *o)1927{1928int len;1929struct stat st;19301931if(o->index_only || o->reset || !o->update)1932return0;19331934 len =check_leading_path(ce->name,ce_namelen(ce));1935if(!len)1936return0;1937else if(len >0) {1938char*path;1939int ret;19401941 path =xmemdupz(ce->name, len);1942if(lstat(path, &st))1943 ret =error_errno("cannot stat '%s'", path);1944else{1945if(submodule_from_ce(ce))1946 ret =check_submodule_move_head(ce,1947oid_to_hex(&ce->oid),1948 NULL, o);1949else1950 ret =check_ok_to_remove(path, len, DT_UNKNOWN, NULL,1951&st, error_type, o);1952}1953free(path);1954return ret;1955}else if(lstat(ce->name, &st)) {1956if(errno != ENOENT)1957returnerror_errno("cannot stat '%s'", ce->name);1958return0;1959}else{1960if(submodule_from_ce(ce))1961returncheck_submodule_move_head(ce,oid_to_hex(&ce->oid),1962 NULL, o);19631964returncheck_ok_to_remove(ce->name,ce_namelen(ce),1965ce_to_dtype(ce), ce, &st,1966 error_type, o);1967}1968}19691970static intverify_absent(const struct cache_entry *ce,1971enum unpack_trees_error_types error_type,1972struct unpack_trees_options *o)1973{1974if(!o->skip_sparse_checkout && (ce->ce_flags & CE_NEW_SKIP_WORKTREE))1975return0;1976returnverify_absent_1(ce, error_type, o);1977}19781979static intverify_absent_sparse(const struct cache_entry *ce,1980enum unpack_trees_error_types error_type,1981struct unpack_trees_options *o)1982{1983enum unpack_trees_error_types orphaned_error = error_type;1984if(orphaned_error == ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN)1985 orphaned_error = ERROR_WOULD_LOSE_ORPHANED_OVERWRITTEN;19861987returnverify_absent_1(ce, orphaned_error, o);1988}19891990static intmerged_entry(const struct cache_entry *ce,1991const struct cache_entry *old,1992struct unpack_trees_options *o)1993{1994int update = CE_UPDATE;1995struct cache_entry *merge =dup_cache_entry(ce, &o->result);19961997if(!old) {1998/*1999 * New index entries. In sparse checkout, the following2000 * verify_absent() will be delayed until after2001 * traverse_trees() finishes in unpack_trees(), then:2002 *2003 * - CE_NEW_SKIP_WORKTREE will be computed correctly2004 * - verify_absent() be called again, this time with2005 * correct CE_NEW_SKIP_WORKTREE2006 *2007 * verify_absent() call here does nothing in sparse2008 * checkout (i.e. o->skip_sparse_checkout == 0)2009 */2010 update |= CE_ADDED;2011 merge->ce_flags |= CE_NEW_SKIP_WORKTREE;20122013if(verify_absent(merge,2014 ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN, o)) {2015discard_cache_entry(merge);2016return-1;2017}2018invalidate_ce_path(merge, o);20192020if(submodule_from_ce(ce)) {2021int ret =check_submodule_move_head(ce, NULL,2022oid_to_hex(&ce->oid),2023 o);2024if(ret)2025return ret;2026}20272028}else if(!(old->ce_flags & CE_CONFLICTED)) {2029/*2030 * See if we can re-use the old CE directly?2031 * That way we get the uptodate stat info.2032 *2033 * This also removes the UPDATE flag on a match; otherwise2034 * we will end up overwriting local changes in the work tree.2035 */2036if(same(old, merge)) {2037copy_cache_entry(merge, old);2038 update =0;2039}else{2040if(verify_uptodate(old, o)) {2041discard_cache_entry(merge);2042return-1;2043}2044/* Migrate old flags over */2045 update |= old->ce_flags & (CE_SKIP_WORKTREE | CE_NEW_SKIP_WORKTREE);2046invalidate_ce_path(old, o);2047}20482049if(submodule_from_ce(ce)) {2050int ret =check_submodule_move_head(ce,oid_to_hex(&old->oid),2051oid_to_hex(&ce->oid),2052 o);2053if(ret)2054return ret;2055}2056}else{2057/*2058 * Previously unmerged entry left as an existence2059 * marker by read_index_unmerged();2060 */2061invalidate_ce_path(old, o);2062}20632064do_add_entry(o, merge, update, CE_STAGEMASK);2065return1;2066}20672068static intdeleted_entry(const struct cache_entry *ce,2069const struct cache_entry *old,2070struct unpack_trees_options *o)2071{2072/* Did it exist in the index? */2073if(!old) {2074if(verify_absent(ce, ERROR_WOULD_LOSE_UNTRACKED_REMOVED, o))2075return-1;2076return0;2077}2078if(!(old->ce_flags & CE_CONFLICTED) &&verify_uptodate(old, o))2079return-1;2080add_entry(o, ce, CE_REMOVE,0);2081invalidate_ce_path(ce, o);2082return1;2083}20842085static intkeep_entry(const struct cache_entry *ce,2086struct unpack_trees_options *o)2087{2088add_entry(o, ce,0,0);2089if(ce_stage(ce))2090invalidate_ce_path(ce, o);2091return1;2092}20932094#if DBRT_DEBUG2095static voidshow_stage_entry(FILE*o,2096const char*label,const struct cache_entry *ce)2097{2098if(!ce)2099fprintf(o,"%s(missing)\n", label);2100else2101fprintf(o,"%s%06o%s %d\t%s\n",2102 label,2103 ce->ce_mode,2104oid_to_hex(&ce->oid),2105ce_stage(ce),2106 ce->name);2107}2108#endif21092110intthreeway_merge(const struct cache_entry *const*stages,2111struct unpack_trees_options *o)2112{2113const struct cache_entry *index;2114const struct cache_entry *head;2115const struct cache_entry *remote = stages[o->head_idx +1];2116int count;2117int head_match =0;2118int remote_match =0;21192120int df_conflict_head =0;2121int df_conflict_remote =0;21222123int any_anc_missing =0;2124int no_anc_exists =1;2125int i;21262127for(i =1; i < o->head_idx; i++) {2128if(!stages[i] || stages[i] == o->df_conflict_entry)2129 any_anc_missing =1;2130else2131 no_anc_exists =0;2132}21332134 index = stages[0];2135 head = stages[o->head_idx];21362137if(head == o->df_conflict_entry) {2138 df_conflict_head =1;2139 head = NULL;2140}21412142if(remote == o->df_conflict_entry) {2143 df_conflict_remote =1;2144 remote = NULL;2145}21462147/*2148 * First, if there's a #16 situation, note that to prevent #132149 * and #14.2150 */2151if(!same(remote, head)) {2152for(i =1; i < o->head_idx; i++) {2153if(same(stages[i], head)) {2154 head_match = i;2155}2156if(same(stages[i], remote)) {2157 remote_match = i;2158}2159}2160}21612162/*2163 * We start with cases where the index is allowed to match2164 * something other than the head: #14(ALT) and #2ALT, where it2165 * is permitted to match the result instead.2166 */2167/* #14, #14ALT, #2ALT */2168if(remote && !df_conflict_head && head_match && !remote_match) {2169if(index && !same(index, remote) && !same(index, head))2170returnreject_merge(index, o);2171returnmerged_entry(remote, index, o);2172}2173/*2174 * If we have an entry in the index cache, then we want to2175 * make sure that it matches head.2176 */2177if(index && !same(index, head))2178returnreject_merge(index, o);21792180if(head) {2181/* #5ALT, #15 */2182if(same(head, remote))2183returnmerged_entry(head, index, o);2184/* #13, #3ALT */2185if(!df_conflict_remote && remote_match && !head_match)2186returnmerged_entry(head, index, o);2187}21882189/* #1 */2190if(!head && !remote && any_anc_missing)2191return0;21922193/*2194 * Under the "aggressive" rule, we resolve mostly trivial2195 * cases that we historically had git-merge-one-file resolve.2196 */2197if(o->aggressive) {2198int head_deleted = !head;2199int remote_deleted = !remote;2200const struct cache_entry *ce = NULL;22012202if(index)2203 ce = index;2204else if(head)2205 ce = head;2206else if(remote)2207 ce = remote;2208else{2209for(i =1; i < o->head_idx; i++) {2210if(stages[i] && stages[i] != o->df_conflict_entry) {2211 ce = stages[i];2212break;2213}2214}2215}22162217/*2218 * Deleted in both.2219 * Deleted in one and unchanged in the other.2220 */2221if((head_deleted && remote_deleted) ||2222(head_deleted && remote && remote_match) ||2223(remote_deleted && head && head_match)) {2224if(index)2225returndeleted_entry(index, index, o);2226if(ce && !head_deleted) {2227if(verify_absent(ce, ERROR_WOULD_LOSE_UNTRACKED_REMOVED, o))2228return-1;2229}2230return0;2231}2232/*2233 * Added in both, identically.2234 */2235if(no_anc_exists && head && remote &&same(head, remote))2236returnmerged_entry(head, index, o);22372238}22392240/* Below are "no merge" cases, which require that the index be2241 * up-to-date to avoid the files getting overwritten with2242 * conflict resolution files.2243 */2244if(index) {2245if(verify_uptodate(index, o))2246return-1;2247}22482249 o->nontrivial_merge =1;22502251/* #2, #3, #4, #6, #7, #9, #10, #11. */2252 count =0;2253if(!head_match || !remote_match) {2254for(i =1; i < o->head_idx; i++) {2255if(stages[i] && stages[i] != o->df_conflict_entry) {2256keep_entry(stages[i], o);2257 count++;2258break;2259}2260}2261}2262#if DBRT_DEBUG2263else{2264fprintf(stderr,"read-tree: warning #16 detected\n");2265show_stage_entry(stderr,"head ", stages[head_match]);2266show_stage_entry(stderr,"remote ", stages[remote_match]);2267}2268#endif2269if(head) { count +=keep_entry(head, o); }2270if(remote) { count +=keep_entry(remote, o); }2271return count;2272}22732274/*2275 * Two-way merge.2276 *2277 * The rule is to "carry forward" what is in the index without losing2278 * information across a "fast-forward", favoring a successful merge2279 * over a merge failure when it makes sense. For details of the2280 * "carry forward" rule, please see <Documentation/git-read-tree.txt>.2281 *2282 */2283inttwoway_merge(const struct cache_entry *const*src,2284struct unpack_trees_options *o)2285{2286const struct cache_entry *current = src[0];2287const struct cache_entry *oldtree = src[1];2288const struct cache_entry *newtree = src[2];22892290if(o->merge_size !=2)2291returnerror("Cannot do a twoway merge of%dtrees",2292 o->merge_size);22932294if(oldtree == o->df_conflict_entry)2295 oldtree = NULL;2296if(newtree == o->df_conflict_entry)2297 newtree = NULL;22982299if(current) {2300if(current->ce_flags & CE_CONFLICTED) {2301if(same(oldtree, newtree) || o->reset) {2302if(!newtree)2303returndeleted_entry(current, current, o);2304else2305returnmerged_entry(newtree, current, o);2306}2307returnreject_merge(current, o);2308}else if((!oldtree && !newtree) ||/* 4 and 5 */2309(!oldtree && newtree &&2310same(current, newtree)) ||/* 6 and 7 */2311(oldtree && newtree &&2312same(oldtree, newtree)) ||/* 14 and 15 */2313(oldtree && newtree &&2314!same(oldtree, newtree) &&/* 18 and 19 */2315same(current, newtree))) {2316returnkeep_entry(current, o);2317}else if(oldtree && !newtree &&same(current, oldtree)) {2318/* 10 or 11 */2319returndeleted_entry(oldtree, current, o);2320}else if(oldtree && newtree &&2321same(current, oldtree) && !same(current, newtree)) {2322/* 20 or 21 */2323returnmerged_entry(newtree, current, o);2324}else2325returnreject_merge(current, o);2326}2327else if(newtree) {2328if(oldtree && !o->initial_checkout) {2329/*2330 * deletion of the path was staged;2331 */2332if(same(oldtree, newtree))2333return1;2334returnreject_merge(oldtree, o);2335}2336returnmerged_entry(newtree, current, o);2337}2338returndeleted_entry(oldtree, current, o);2339}23402341/*2342 * Bind merge.2343 *2344 * Keep the index entries at stage0, collapse stage1 but make sure2345 * stage0 does not have anything there.2346 */2347intbind_merge(const struct cache_entry *const*src,2348struct unpack_trees_options *o)2349{2350const struct cache_entry *old = src[0];2351const struct cache_entry *a = src[1];23522353if(o->merge_size !=1)2354returnerror("Cannot do a bind merge of%dtrees",2355 o->merge_size);2356if(a && old)2357return o->quiet ? -1:2358error(ERRORMSG(o, ERROR_BIND_OVERLAP),2359super_prefixed(a->name),2360super_prefixed(old->name));2361if(!a)2362returnkeep_entry(old, o);2363else2364returnmerged_entry(a, NULL, o);2365}23662367/*2368 * One-way merge.2369 *2370 * The rule is:2371 * - take the stat information from stage0, take the data from stage12372 */2373intoneway_merge(const struct cache_entry *const*src,2374struct unpack_trees_options *o)2375{2376const struct cache_entry *old = src[0];2377const struct cache_entry *a = src[1];23782379if(o->merge_size !=1)2380returnerror("Cannot do a oneway merge of%dtrees",2381 o->merge_size);23822383if(!a || a == o->df_conflict_entry)2384returndeleted_entry(old, old, o);23852386if(old &&same(old, a)) {2387int update =0;2388if(o->reset && o->update && !ce_uptodate(old) && !ce_skip_worktree(old)) {2389struct stat st;2390if(lstat(old->name, &st) ||2391ie_match_stat(o->src_index, old, &st, CE_MATCH_IGNORE_VALID|CE_MATCH_IGNORE_SKIP_WORKTREE))2392 update |= CE_UPDATE;2393}2394if(o->update &&S_ISGITLINK(old->ce_mode) &&2395should_update_submodules() && !verify_uptodate(old, o))2396 update |= CE_UPDATE;2397add_entry(o, old, update, CE_STAGEMASK);2398return0;2399}2400returnmerged_entry(a, old, o);2401}