1#define NO_THE_INDEX_COMPATIBILITY_MACROS 2#include"cache.h" 3#include"repository.h" 4#include"config.h" 5#include"dir.h" 6#include"tree.h" 7#include"tree-walk.h" 8#include"cache-tree.h" 9#include"unpack-trees.h" 10#include"progress.h" 11#include"refs.h" 12#include"attr.h" 13#include"split-index.h" 14#include"dir.h" 15#include"submodule.h" 16#include"submodule-config.h" 17#include"fetch-object.h" 18 19/* 20 * Error messages expected by scripts out of plumbing commands such as 21 * read-tree. Non-scripted Porcelain is not required to use these messages 22 * and in fact are encouraged to reword them to better suit their particular 23 * situation better. See how "git checkout" and "git merge" replaces 24 * them using setup_unpack_trees_porcelain(), for example. 25 */ 26static const char*unpack_plumbing_errors[NB_UNPACK_TREES_ERROR_TYPES] = { 27/* ERROR_WOULD_OVERWRITE */ 28"Entry '%s' would be overwritten by merge. Cannot merge.", 29 30/* ERROR_NOT_UPTODATE_FILE */ 31"Entry '%s' not uptodate. Cannot merge.", 32 33/* ERROR_NOT_UPTODATE_DIR */ 34"Updating '%s' would lose untracked files in it", 35 36/* ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN */ 37"Untracked working tree file '%s' would be overwritten by merge.", 38 39/* ERROR_WOULD_LOSE_UNTRACKED_REMOVED */ 40"Untracked working tree file '%s' would be removed by merge.", 41 42/* ERROR_BIND_OVERLAP */ 43"Entry '%s' overlaps with '%s'. Cannot bind.", 44 45/* ERROR_SPARSE_NOT_UPTODATE_FILE */ 46"Entry '%s' not uptodate. Cannot update sparse checkout.", 47 48/* ERROR_WOULD_LOSE_ORPHANED_OVERWRITTEN */ 49"Working tree file '%s' would be overwritten by sparse checkout update.", 50 51/* ERROR_WOULD_LOSE_ORPHANED_REMOVED */ 52"Working tree file '%s' would be removed by sparse checkout update.", 53 54/* ERROR_WOULD_LOSE_SUBMODULE */ 55"Submodule '%s' cannot checkout new HEAD.", 56}; 57 58#define ERRORMSG(o,type) \ 59 ( ((o) && (o)->msgs[(type)]) \ 60 ? ((o)->msgs[(type)]) \ 61 : (unpack_plumbing_errors[(type)]) ) 62 63static const char*super_prefixed(const char*path) 64{ 65/* 66 * It is necessary and sufficient to have two static buffers 67 * here, as the return value of this function is fed to 68 * error() using the unpack_*_errors[] templates we see above. 69 */ 70static struct strbuf buf[2] = {STRBUF_INIT, STRBUF_INIT}; 71static int super_prefix_len = -1; 72static unsigned idx =ARRAY_SIZE(buf) -1; 73 74if(super_prefix_len <0) { 75const char*super_prefix =get_super_prefix(); 76if(!super_prefix) { 77 super_prefix_len =0; 78}else{ 79int i; 80for(i =0; i <ARRAY_SIZE(buf); i++) 81strbuf_addstr(&buf[i], super_prefix); 82 super_prefix_len = buf[0].len; 83} 84} 85 86if(!super_prefix_len) 87return path; 88 89if(++idx >=ARRAY_SIZE(buf)) 90 idx =0; 91 92strbuf_setlen(&buf[idx], super_prefix_len); 93strbuf_addstr(&buf[idx], path); 94 95return buf[idx].buf; 96} 97 98voidsetup_unpack_trees_porcelain(struct unpack_trees_options *opts, 99const char*cmd) 100{ 101int i; 102const char**msgs = opts->msgs; 103const char*msg; 104 105if(!strcmp(cmd,"checkout")) 106 msg = advice_commit_before_merge 107?_("Your local changes to the following files would be overwritten by checkout:\n%%s" 108"Please commit your changes or stash them before you switch branches.") 109:_("Your local changes to the following files would be overwritten by checkout:\n%%s"); 110else if(!strcmp(cmd,"merge")) 111 msg = advice_commit_before_merge 112?_("Your local changes to the following files would be overwritten by merge:\n%%s" 113"Please commit your changes or stash them before you merge.") 114:_("Your local changes to the following files would be overwritten by merge:\n%%s"); 115else 116 msg = advice_commit_before_merge 117?_("Your local changes to the following files would be overwritten by%s:\n%%s" 118"Please commit your changes or stash them before you%s.") 119:_("Your local changes to the following files would be overwritten by%s:\n%%s"); 120 msgs[ERROR_WOULD_OVERWRITE] = msgs[ERROR_NOT_UPTODATE_FILE] = 121xstrfmt(msg, cmd, cmd); 122 123 msgs[ERROR_NOT_UPTODATE_DIR] = 124_("Updating the following directories would lose untracked files in them:\n%s"); 125 126if(!strcmp(cmd,"checkout")) 127 msg = advice_commit_before_merge 128?_("The following untracked working tree files would be removed by checkout:\n%%s" 129"Please move or remove them before you switch branches.") 130:_("The following untracked working tree files would be removed by checkout:\n%%s"); 131else if(!strcmp(cmd,"merge")) 132 msg = advice_commit_before_merge 133?_("The following untracked working tree files would be removed by merge:\n%%s" 134"Please move or remove them before you merge.") 135:_("The following untracked working tree files would be removed by merge:\n%%s"); 136else 137 msg = advice_commit_before_merge 138?_("The following untracked working tree files would be removed by%s:\n%%s" 139"Please move or remove them before you%s.") 140:_("The following untracked working tree files would be removed by%s:\n%%s"); 141 msgs[ERROR_WOULD_LOSE_UNTRACKED_REMOVED] =xstrfmt(msg, cmd, cmd); 142 143if(!strcmp(cmd,"checkout")) 144 msg = advice_commit_before_merge 145?_("The following untracked working tree files would be overwritten by checkout:\n%%s" 146"Please move or remove them before you switch branches.") 147:_("The following untracked working tree files would be overwritten by checkout:\n%%s"); 148else if(!strcmp(cmd,"merge")) 149 msg = advice_commit_before_merge 150?_("The following untracked working tree files would be overwritten by merge:\n%%s" 151"Please move or remove them before you merge.") 152:_("The following untracked working tree files would be overwritten by merge:\n%%s"); 153else 154 msg = advice_commit_before_merge 155?_("The following untracked working tree files would be overwritten by%s:\n%%s" 156"Please move or remove them before you%s.") 157:_("The following untracked working tree files would be overwritten by%s:\n%%s"); 158 msgs[ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN] =xstrfmt(msg, cmd, cmd); 159 160/* 161 * Special case: ERROR_BIND_OVERLAP refers to a pair of paths, we 162 * cannot easily display it as a list. 163 */ 164 msgs[ERROR_BIND_OVERLAP] =_("Entry '%s' overlaps with '%s'. Cannot bind."); 165 166 msgs[ERROR_SPARSE_NOT_UPTODATE_FILE] = 167_("Cannot update sparse checkout: the following entries are not up to date:\n%s"); 168 msgs[ERROR_WOULD_LOSE_ORPHANED_OVERWRITTEN] = 169_("The following working tree files would be overwritten by sparse checkout update:\n%s"); 170 msgs[ERROR_WOULD_LOSE_ORPHANED_REMOVED] = 171_("The following working tree files would be removed by sparse checkout update:\n%s"); 172 msgs[ERROR_WOULD_LOSE_SUBMODULE] = 173_("Cannot update submodule:\n%s"); 174 175 opts->show_all_errors =1; 176/* rejected paths may not have a static buffer */ 177for(i =0; i <ARRAY_SIZE(opts->unpack_rejects); i++) 178 opts->unpack_rejects[i].strdup_strings =1; 179} 180 181static intdo_add_entry(struct unpack_trees_options *o,struct cache_entry *ce, 182unsigned int set,unsigned int clear) 183{ 184 clear |= CE_HASHED; 185 186if(set & CE_REMOVE) 187 set |= CE_WT_REMOVE; 188 189 ce->ce_flags = (ce->ce_flags & ~clear) | set; 190returnadd_index_entry(&o->result, ce, 191 ADD_CACHE_OK_TO_ADD | ADD_CACHE_OK_TO_REPLACE); 192} 193 194static struct cache_entry *dup_entry(const struct cache_entry *ce) 195{ 196unsigned int size =ce_size(ce); 197struct cache_entry *new=xmalloc(size); 198 199memcpy(new, ce, size); 200return new; 201} 202 203static voidadd_entry(struct unpack_trees_options *o, 204const struct cache_entry *ce, 205unsigned int set,unsigned int clear) 206{ 207do_add_entry(o,dup_entry(ce), set, clear); 208} 209 210/* 211 * add error messages on path <path> 212 * corresponding to the type <e> with the message <msg> 213 * indicating if it should be display in porcelain or not 214 */ 215static intadd_rejected_path(struct unpack_trees_options *o, 216enum unpack_trees_error_types e, 217const char*path) 218{ 219if(!o->show_all_errors) 220returnerror(ERRORMSG(o, e),super_prefixed(path)); 221 222/* 223 * Otherwise, insert in a list for future display by 224 * display_error_msgs() 225 */ 226string_list_append(&o->unpack_rejects[e], path); 227return-1; 228} 229 230/* 231 * display all the error messages stored in a nice way 232 */ 233static voiddisplay_error_msgs(struct unpack_trees_options *o) 234{ 235int e, i; 236int something_displayed =0; 237for(e =0; e < NB_UNPACK_TREES_ERROR_TYPES; e++) { 238struct string_list *rejects = &o->unpack_rejects[e]; 239if(rejects->nr >0) { 240struct strbuf path = STRBUF_INIT; 241 something_displayed =1; 242for(i =0; i < rejects->nr; i++) 243strbuf_addf(&path,"\t%s\n", rejects->items[i].string); 244error(ERRORMSG(o, e),super_prefixed(path.buf)); 245strbuf_release(&path); 246} 247string_list_clear(rejects,0); 248} 249if(something_displayed) 250fprintf(stderr,_("Aborting\n")); 251} 252 253static intcheck_submodule_move_head(const struct cache_entry *ce, 254const char*old_id, 255const char*new_id, 256struct unpack_trees_options *o) 257{ 258unsigned flags = SUBMODULE_MOVE_HEAD_DRY_RUN; 259const struct submodule *sub =submodule_from_ce(ce); 260 261if(!sub) 262return0; 263 264if(o->reset) 265 flags |= SUBMODULE_MOVE_HEAD_FORCE; 266 267if(submodule_move_head(ce->name, old_id, new_id, flags)) 268return o->gently ? -1: 269add_rejected_path(o, ERROR_WOULD_LOSE_SUBMODULE, ce->name); 270return0; 271} 272 273/* 274 * Preform the loading of the repository's gitmodules file. This function is 275 * used by 'check_update()' to perform loading of the gitmodules file in two 276 * differnt situations: 277 * (1) before removing entries from the working tree if the gitmodules file has 278 * been marked for removal. This situation is specified by 'state' == NULL. 279 * (2) before checking out entries to the working tree if the gitmodules file 280 * has been marked for update. This situation is specified by 'state' != NULL. 281 */ 282static voidload_gitmodules_file(struct index_state *index, 283struct checkout *state) 284{ 285int pos =index_name_pos(index, GITMODULES_FILE,strlen(GITMODULES_FILE)); 286 287if(pos >=0) { 288struct cache_entry *ce = index->cache[pos]; 289if(!state && ce->ce_flags & CE_WT_REMOVE) { 290repo_read_gitmodules(the_repository); 291}else if(state && (ce->ce_flags & CE_UPDATE)) { 292submodule_free(); 293checkout_entry(ce, state, NULL); 294repo_read_gitmodules(the_repository); 295} 296} 297} 298 299/* 300 * Unlink the last component and schedule the leading directories for 301 * removal, such that empty directories get removed. 302 */ 303static voidunlink_entry(const struct cache_entry *ce) 304{ 305const struct submodule *sub =submodule_from_ce(ce); 306if(sub) { 307/* state.force is set at the caller. */ 308submodule_move_head(ce->name,"HEAD", NULL, 309 SUBMODULE_MOVE_HEAD_FORCE); 310} 311if(!check_leading_path(ce->name,ce_namelen(ce))) 312return; 313if(remove_or_warn(ce->ce_mode, ce->name)) 314return; 315schedule_dir_for_removal(ce->name,ce_namelen(ce)); 316} 317 318static struct progress *get_progress(struct unpack_trees_options *o) 319{ 320unsigned cnt =0, total =0; 321struct index_state *index = &o->result; 322 323if(!o->update || !o->verbose_update) 324return NULL; 325 326for(; cnt < index->cache_nr; cnt++) { 327const struct cache_entry *ce = index->cache[cnt]; 328if(ce->ce_flags & (CE_UPDATE | CE_WT_REMOVE)) 329 total++; 330} 331 332returnstart_delayed_progress(_("Checking out files"), total); 333} 334 335static intcheck_updates(struct unpack_trees_options *o) 336{ 337unsigned cnt =0; 338int errs =0; 339struct progress *progress = NULL; 340struct index_state *index = &o->result; 341struct checkout state = CHECKOUT_INIT; 342int i; 343 344 state.force =1; 345 state.quiet =1; 346 state.refresh_cache =1; 347 state.istate = index; 348 349 progress =get_progress(o); 350 351if(o->update) 352git_attr_set_direction(GIT_ATTR_CHECKOUT, index); 353 354if(should_update_submodules() && o->update && !o->dry_run) 355load_gitmodules_file(index, NULL); 356 357for(i =0; i < index->cache_nr; i++) { 358const struct cache_entry *ce = index->cache[i]; 359 360if(ce->ce_flags & CE_WT_REMOVE) { 361display_progress(progress, ++cnt); 362if(o->update && !o->dry_run) 363unlink_entry(ce); 364} 365} 366remove_marked_cache_entries(index); 367remove_scheduled_dirs(); 368 369if(should_update_submodules() && o->update && !o->dry_run) 370load_gitmodules_file(index, &state); 371 372enable_delayed_checkout(&state); 373if(repository_format_partial_clone && o->update && !o->dry_run) { 374/* 375 * Prefetch the objects that are to be checked out in the loop 376 * below. 377 */ 378struct oid_array to_fetch = OID_ARRAY_INIT; 379int fetch_if_missing_store = fetch_if_missing; 380 fetch_if_missing =0; 381for(i =0; i < index->cache_nr; i++) { 382struct cache_entry *ce = index->cache[i]; 383if((ce->ce_flags & CE_UPDATE) && 384!S_ISGITLINK(ce->ce_mode)) { 385if(!has_object_file(&ce->oid)) 386oid_array_append(&to_fetch, &ce->oid); 387} 388} 389if(to_fetch.nr) 390fetch_objects(repository_format_partial_clone, 391&to_fetch); 392 fetch_if_missing = fetch_if_missing_store; 393} 394for(i =0; i < index->cache_nr; i++) { 395struct cache_entry *ce = index->cache[i]; 396 397if(ce->ce_flags & CE_UPDATE) { 398if(ce->ce_flags & CE_WT_REMOVE) 399die("BUG: both update and delete flags are set on%s", 400 ce->name); 401display_progress(progress, ++cnt); 402 ce->ce_flags &= ~CE_UPDATE; 403if(o->update && !o->dry_run) { 404 errs |=checkout_entry(ce, &state, NULL); 405} 406} 407} 408stop_progress(&progress); 409 errs |=finish_delayed_checkout(&state); 410if(o->update) 411git_attr_set_direction(GIT_ATTR_CHECKIN, NULL); 412return errs !=0; 413} 414 415static intverify_uptodate_sparse(const struct cache_entry *ce, 416struct unpack_trees_options *o); 417static intverify_absent_sparse(const struct cache_entry *ce, 418enum unpack_trees_error_types, 419struct unpack_trees_options *o); 420 421static intapply_sparse_checkout(struct index_state *istate, 422struct cache_entry *ce, 423struct unpack_trees_options *o) 424{ 425int was_skip_worktree =ce_skip_worktree(ce); 426 427if(ce->ce_flags & CE_NEW_SKIP_WORKTREE) 428 ce->ce_flags |= CE_SKIP_WORKTREE; 429else 430 ce->ce_flags &= ~CE_SKIP_WORKTREE; 431if(was_skip_worktree !=ce_skip_worktree(ce)) { 432 ce->ce_flags |= CE_UPDATE_IN_BASE; 433 istate->cache_changed |= CE_ENTRY_CHANGED; 434} 435 436/* 437 * if (!was_skip_worktree && !ce_skip_worktree()) { 438 * This is perfectly normal. Move on; 439 * } 440 */ 441 442/* 443 * Merge strategies may set CE_UPDATE|CE_REMOVE outside checkout 444 * area as a result of ce_skip_worktree() shortcuts in 445 * verify_absent() and verify_uptodate(). 446 * Make sure they don't modify worktree if they are already 447 * outside checkout area 448 */ 449if(was_skip_worktree &&ce_skip_worktree(ce)) { 450 ce->ce_flags &= ~CE_UPDATE; 451 452/* 453 * By default, when CE_REMOVE is on, CE_WT_REMOVE is also 454 * on to get that file removed from both index and worktree. 455 * If that file is already outside worktree area, don't 456 * bother remove it. 457 */ 458if(ce->ce_flags & CE_REMOVE) 459 ce->ce_flags &= ~CE_WT_REMOVE; 460} 461 462if(!was_skip_worktree &&ce_skip_worktree(ce)) { 463/* 464 * If CE_UPDATE is set, verify_uptodate() must be called already 465 * also stat info may have lost after merged_entry() so calling 466 * verify_uptodate() again may fail 467 */ 468if(!(ce->ce_flags & CE_UPDATE) &&verify_uptodate_sparse(ce, o)) 469return-1; 470 ce->ce_flags |= CE_WT_REMOVE; 471 ce->ce_flags &= ~CE_UPDATE; 472} 473if(was_skip_worktree && !ce_skip_worktree(ce)) { 474if(verify_absent_sparse(ce, ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN, o)) 475return-1; 476 ce->ce_flags |= CE_UPDATE; 477} 478return0; 479} 480 481staticinlineintcall_unpack_fn(const struct cache_entry *const*src, 482struct unpack_trees_options *o) 483{ 484int ret = o->fn(src, o); 485if(ret >0) 486 ret =0; 487return ret; 488} 489 490static voidmark_ce_used(struct cache_entry *ce,struct unpack_trees_options *o) 491{ 492 ce->ce_flags |= CE_UNPACKED; 493 494if(o->cache_bottom < o->src_index->cache_nr && 495 o->src_index->cache[o->cache_bottom] == ce) { 496int bottom = o->cache_bottom; 497while(bottom < o->src_index->cache_nr && 498 o->src_index->cache[bottom]->ce_flags & CE_UNPACKED) 499 bottom++; 500 o->cache_bottom = bottom; 501} 502} 503 504static voidmark_all_ce_unused(struct index_state *index) 505{ 506int i; 507for(i =0; i < index->cache_nr; i++) 508 index->cache[i]->ce_flags &= ~(CE_UNPACKED | CE_ADDED | CE_NEW_SKIP_WORKTREE); 509} 510 511static intlocate_in_src_index(const struct cache_entry *ce, 512struct unpack_trees_options *o) 513{ 514struct index_state *index = o->src_index; 515int len =ce_namelen(ce); 516int pos =index_name_pos(index, ce->name, len); 517if(pos <0) 518 pos = -1- pos; 519return pos; 520} 521 522/* 523 * We call unpack_index_entry() with an unmerged cache entry 524 * only in diff-index, and it wants a single callback. Skip 525 * the other unmerged entry with the same name. 526 */ 527static voidmark_ce_used_same_name(struct cache_entry *ce, 528struct unpack_trees_options *o) 529{ 530struct index_state *index = o->src_index; 531int len =ce_namelen(ce); 532int pos; 533 534for(pos =locate_in_src_index(ce, o); pos < index->cache_nr; pos++) { 535struct cache_entry *next = index->cache[pos]; 536if(len !=ce_namelen(next) || 537memcmp(ce->name, next->name, len)) 538break; 539mark_ce_used(next, o); 540} 541} 542 543static struct cache_entry *next_cache_entry(struct unpack_trees_options *o) 544{ 545const struct index_state *index = o->src_index; 546int pos = o->cache_bottom; 547 548while(pos < index->cache_nr) { 549struct cache_entry *ce = index->cache[pos]; 550if(!(ce->ce_flags & CE_UNPACKED)) 551return ce; 552 pos++; 553} 554return NULL; 555} 556 557static voidadd_same_unmerged(const struct cache_entry *ce, 558struct unpack_trees_options *o) 559{ 560struct index_state *index = o->src_index; 561int len =ce_namelen(ce); 562int pos =index_name_pos(index, ce->name, len); 563 564if(0<= pos) 565die("programming error in a caller of mark_ce_used_same_name"); 566for(pos = -pos -1; pos < index->cache_nr; pos++) { 567struct cache_entry *next = index->cache[pos]; 568if(len !=ce_namelen(next) || 569memcmp(ce->name, next->name, len)) 570break; 571add_entry(o, next,0,0); 572mark_ce_used(next, o); 573} 574} 575 576static intunpack_index_entry(struct cache_entry *ce, 577struct unpack_trees_options *o) 578{ 579const struct cache_entry *src[MAX_UNPACK_TREES +1] = { NULL, }; 580int ret; 581 582 src[0] = ce; 583 584mark_ce_used(ce, o); 585if(ce_stage(ce)) { 586if(o->skip_unmerged) { 587add_entry(o, ce,0,0); 588return0; 589} 590} 591 ret =call_unpack_fn(src, o); 592if(ce_stage(ce)) 593mark_ce_used_same_name(ce, o); 594return ret; 595} 596 597static intfind_cache_pos(struct traverse_info *,const struct name_entry *); 598 599static voidrestore_cache_bottom(struct traverse_info *info,int bottom) 600{ 601struct unpack_trees_options *o = info->data; 602 603if(o->diff_index_cached) 604return; 605 o->cache_bottom = bottom; 606} 607 608static intswitch_cache_bottom(struct traverse_info *info) 609{ 610struct unpack_trees_options *o = info->data; 611int ret, pos; 612 613if(o->diff_index_cached) 614return0; 615 ret = o->cache_bottom; 616 pos =find_cache_pos(info->prev, &info->name); 617 618if(pos < -1) 619 o->cache_bottom = -2- pos; 620else if(pos <0) 621 o->cache_bottom = o->src_index->cache_nr; 622return ret; 623} 624 625staticinlineintare_same_oid(struct name_entry *name_j,struct name_entry *name_k) 626{ 627return name_j->oid && name_k->oid && !oidcmp(name_j->oid, name_k->oid); 628} 629 630static inttraverse_trees_recursive(int n,unsigned long dirmask, 631unsigned long df_conflicts, 632struct name_entry *names, 633struct traverse_info *info) 634{ 635int i, ret, bottom; 636int nr_buf =0; 637struct tree_desc t[MAX_UNPACK_TREES]; 638void*buf[MAX_UNPACK_TREES]; 639struct traverse_info newinfo; 640struct name_entry *p; 641 642 p = names; 643while(!p->mode) 644 p++; 645 646 newinfo = *info; 647 newinfo.prev = info; 648 newinfo.pathspec = info->pathspec; 649 newinfo.name = *p; 650 newinfo.pathlen +=tree_entry_len(p) +1; 651 newinfo.df_conflicts |= df_conflicts; 652 653/* 654 * Fetch the tree from the ODB for each peer directory in the 655 * n commits. 656 * 657 * For 2- and 3-way traversals, we try to avoid hitting the 658 * ODB twice for the same OID. This should yield a nice speed 659 * up in checkouts and merges when the commits are similar. 660 * 661 * We don't bother doing the full O(n^2) search for larger n, 662 * because wider traversals don't happen that often and we 663 * avoid the search setup. 664 * 665 * When 2 peer OIDs are the same, we just copy the tree 666 * descriptor data. This implicitly borrows the buffer 667 * data from the earlier cell. 668 */ 669for(i =0; i < n; i++, dirmask >>=1) { 670if(i >0&&are_same_oid(&names[i], &names[i -1])) 671 t[i] = t[i -1]; 672else if(i >1&&are_same_oid(&names[i], &names[i -2])) 673 t[i] = t[i -2]; 674else{ 675const struct object_id *oid = NULL; 676if(dirmask &1) 677 oid = names[i].oid; 678 buf[nr_buf++] =fill_tree_descriptor(t + i, oid); 679} 680} 681 682 bottom =switch_cache_bottom(&newinfo); 683 ret =traverse_trees(n, t, &newinfo); 684restore_cache_bottom(&newinfo, bottom); 685 686for(i =0; i < nr_buf; i++) 687free(buf[i]); 688 689return ret; 690} 691 692/* 693 * Compare the traverse-path to the cache entry without actually 694 * having to generate the textual representation of the traverse 695 * path. 696 * 697 * NOTE! This *only* compares up to the size of the traverse path 698 * itself - the caller needs to do the final check for the cache 699 * entry having more data at the end! 700 */ 701static intdo_compare_entry_piecewise(const struct cache_entry *ce,const struct traverse_info *info,const struct name_entry *n) 702{ 703int len, pathlen, ce_len; 704const char*ce_name; 705 706if(info->prev) { 707int cmp =do_compare_entry_piecewise(ce, info->prev, 708&info->name); 709if(cmp) 710return cmp; 711} 712 pathlen = info->pathlen; 713 ce_len =ce_namelen(ce); 714 715/* If ce_len < pathlen then we must have previously hit "name == directory" entry */ 716if(ce_len < pathlen) 717return-1; 718 719 ce_len -= pathlen; 720 ce_name = ce->name + pathlen; 721 722 len =tree_entry_len(n); 723returndf_name_compare(ce_name, ce_len, S_IFREG, n->path, len, n->mode); 724} 725 726static intdo_compare_entry(const struct cache_entry *ce, 727const struct traverse_info *info, 728const struct name_entry *n) 729{ 730int len, pathlen, ce_len; 731const char*ce_name; 732int cmp; 733 734/* 735 * If we have not precomputed the traverse path, it is quicker 736 * to avoid doing so. But if we have precomputed it, 737 * it is quicker to use the precomputed version. 738 */ 739if(!info->traverse_path) 740returndo_compare_entry_piecewise(ce, info, n); 741 742 cmp =strncmp(ce->name, info->traverse_path, info->pathlen); 743if(cmp) 744return cmp; 745 746 pathlen = info->pathlen; 747 ce_len =ce_namelen(ce); 748 749if(ce_len < pathlen) 750return-1; 751 752 ce_len -= pathlen; 753 ce_name = ce->name + pathlen; 754 755 len =tree_entry_len(n); 756returndf_name_compare(ce_name, ce_len, S_IFREG, n->path, len, n->mode); 757} 758 759static intcompare_entry(const struct cache_entry *ce,const struct traverse_info *info,const struct name_entry *n) 760{ 761int cmp =do_compare_entry(ce, info, n); 762if(cmp) 763return cmp; 764 765/* 766 * Even if the beginning compared identically, the ce should 767 * compare as bigger than a directory leading up to it! 768 */ 769returnce_namelen(ce) >traverse_path_len(info, n); 770} 771 772static intce_in_traverse_path(const struct cache_entry *ce, 773const struct traverse_info *info) 774{ 775if(!info->prev) 776return1; 777if(do_compare_entry(ce, info->prev, &info->name)) 778return0; 779/* 780 * If ce (blob) is the same name as the path (which is a tree 781 * we will be descending into), it won't be inside it. 782 */ 783return(info->pathlen <ce_namelen(ce)); 784} 785 786static struct cache_entry *create_ce_entry(const struct traverse_info *info,const struct name_entry *n,int stage) 787{ 788int len =traverse_path_len(info, n); 789struct cache_entry *ce =xcalloc(1,cache_entry_size(len)); 790 791 ce->ce_mode =create_ce_mode(n->mode); 792 ce->ce_flags =create_ce_flags(stage); 793 ce->ce_namelen = len; 794oidcpy(&ce->oid, n->oid); 795make_traverse_path(ce->name, info, n); 796 797return ce; 798} 799 800static intunpack_nondirectories(int n,unsigned long mask, 801unsigned long dirmask, 802struct cache_entry **src, 803const struct name_entry *names, 804const struct traverse_info *info) 805{ 806int i; 807struct unpack_trees_options *o = info->data; 808unsigned long conflicts = info->df_conflicts | dirmask; 809 810/* Do we have *only* directories? Nothing to do */ 811if(mask == dirmask && !src[0]) 812return0; 813 814/* 815 * Ok, we've filled in up to any potential index entry in src[0], 816 * now do the rest. 817 */ 818for(i =0; i < n; i++) { 819int stage; 820unsigned int bit =1ul<< i; 821if(conflicts & bit) { 822 src[i + o->merge] = o->df_conflict_entry; 823continue; 824} 825if(!(mask & bit)) 826continue; 827if(!o->merge) 828 stage =0; 829else if(i +1< o->head_idx) 830 stage =1; 831else if(i +1> o->head_idx) 832 stage =3; 833else 834 stage =2; 835 src[i + o->merge] =create_ce_entry(info, names + i, stage); 836} 837 838if(o->merge) { 839int rc =call_unpack_fn((const struct cache_entry *const*)src, 840 o); 841for(i =0; i < n; i++) { 842struct cache_entry *ce = src[i + o->merge]; 843if(ce != o->df_conflict_entry) 844free(ce); 845} 846return rc; 847} 848 849for(i =0; i < n; i++) 850if(src[i] && src[i] != o->df_conflict_entry) 851if(do_add_entry(o, src[i],0,0)) 852return-1; 853 854return0; 855} 856 857static intunpack_failed(struct unpack_trees_options *o,const char*message) 858{ 859discard_index(&o->result); 860if(!o->gently && !o->exiting_early) { 861if(message) 862returnerror("%s", message); 863return-1; 864} 865return-1; 866} 867 868/* 869 * The tree traversal is looking at name p. If we have a matching entry, 870 * return it. If name p is a directory in the index, do not return 871 * anything, as we will want to match it when the traversal descends into 872 * the directory. 873 */ 874static intfind_cache_pos(struct traverse_info *info, 875const struct name_entry *p) 876{ 877int pos; 878struct unpack_trees_options *o = info->data; 879struct index_state *index = o->src_index; 880int pfxlen = info->pathlen; 881int p_len =tree_entry_len(p); 882 883for(pos = o->cache_bottom; pos < index->cache_nr; pos++) { 884const struct cache_entry *ce = index->cache[pos]; 885const char*ce_name, *ce_slash; 886int cmp, ce_len; 887 888if(ce->ce_flags & CE_UNPACKED) { 889/* 890 * cache_bottom entry is already unpacked, so 891 * we can never match it; don't check it 892 * again. 893 */ 894if(pos == o->cache_bottom) 895++o->cache_bottom; 896continue; 897} 898if(!ce_in_traverse_path(ce, info)) { 899/* 900 * Check if we can skip future cache checks 901 * (because we're already past all possible 902 * entries in the traverse path). 903 */ 904if(info->traverse_path) { 905if(strncmp(ce->name, info->traverse_path, 906 info->pathlen) >0) 907break; 908} 909continue; 910} 911 ce_name = ce->name + pfxlen; 912 ce_slash =strchr(ce_name,'/'); 913if(ce_slash) 914 ce_len = ce_slash - ce_name; 915else 916 ce_len =ce_namelen(ce) - pfxlen; 917 cmp =name_compare(p->path, p_len, ce_name, ce_len); 918/* 919 * Exact match; if we have a directory we need to 920 * delay returning it. 921 */ 922if(!cmp) 923return ce_slash ? -2- pos : pos; 924if(0< cmp) 925continue;/* keep looking */ 926/* 927 * ce_name sorts after p->path; could it be that we 928 * have files under p->path directory in the index? 929 * E.g. ce_name == "t-i", and p->path == "t"; we may 930 * have "t/a" in the index. 931 */ 932if(p_len < ce_len && !memcmp(ce_name, p->path, p_len) && 933 ce_name[p_len] <'/') 934continue;/* keep looking */ 935break; 936} 937return-1; 938} 939 940static struct cache_entry *find_cache_entry(struct traverse_info *info, 941const struct name_entry *p) 942{ 943int pos =find_cache_pos(info, p); 944struct unpack_trees_options *o = info->data; 945 946if(0<= pos) 947return o->src_index->cache[pos]; 948else 949return NULL; 950} 951 952static voiddebug_path(struct traverse_info *info) 953{ 954if(info->prev) { 955debug_path(info->prev); 956if(*info->prev->name.path) 957putchar('/'); 958} 959printf("%s", info->name.path); 960} 961 962static voiddebug_name_entry(int i,struct name_entry *n) 963{ 964printf("ent#%d %06o%s\n", i, 965 n->path ? n->mode :0, 966 n->path ? n->path :"(missing)"); 967} 968 969static voiddebug_unpack_callback(int n, 970unsigned long mask, 971unsigned long dirmask, 972struct name_entry *names, 973struct traverse_info *info) 974{ 975int i; 976printf("* unpack mask%lu, dirmask%lu, cnt%d", 977 mask, dirmask, n); 978debug_path(info); 979putchar('\n'); 980for(i =0; i < n; i++) 981debug_name_entry(i, names + i); 982} 983 984static intunpack_callback(int n,unsigned long mask,unsigned long dirmask,struct name_entry *names,struct traverse_info *info) 985{ 986struct cache_entry *src[MAX_UNPACK_TREES +1] = { NULL, }; 987struct unpack_trees_options *o = info->data; 988const struct name_entry *p = names; 989 990/* Find first entry with a real name (we could use "mask" too) */ 991while(!p->mode) 992 p++; 993 994if(o->debug_unpack) 995debug_unpack_callback(n, mask, dirmask, names, info); 996 997/* Are we supposed to look at the index too? */ 998if(o->merge) { 999while(1) {1000int cmp;1001struct cache_entry *ce;10021003if(o->diff_index_cached)1004 ce =next_cache_entry(o);1005else1006 ce =find_cache_entry(info, p);10071008if(!ce)1009break;1010 cmp =compare_entry(ce, info, p);1011if(cmp <0) {1012if(unpack_index_entry(ce, o) <0)1013returnunpack_failed(o, NULL);1014continue;1015}1016if(!cmp) {1017if(ce_stage(ce)) {1018/*1019 * If we skip unmerged index1020 * entries, we'll skip this1021 * entry *and* the tree1022 * entries associated with it!1023 */1024if(o->skip_unmerged) {1025add_same_unmerged(ce, o);1026return mask;1027}1028}1029 src[0] = ce;1030}1031break;1032}1033}10341035if(unpack_nondirectories(n, mask, dirmask, src, names, info) <0)1036return-1;10371038if(o->merge && src[0]) {1039if(ce_stage(src[0]))1040mark_ce_used_same_name(src[0], o);1041else1042mark_ce_used(src[0], o);1043}10441045/* Now handle any directories.. */1046if(dirmask) {1047/* special case: "diff-index --cached" looking at a tree */1048if(o->diff_index_cached &&1049 n ==1&& dirmask ==1&&S_ISDIR(names->mode)) {1050int matches;1051 matches =cache_tree_matches_traversal(o->src_index->cache_tree,1052 names, info);1053/*1054 * Everything under the name matches; skip the1055 * entire hierarchy. diff_index_cached codepath1056 * special cases D/F conflicts in such a way that1057 * it does not do any look-ahead, so this is safe.1058 */1059if(matches) {1060 o->cache_bottom += matches;1061return mask;1062}1063}10641065if(traverse_trees_recursive(n, dirmask, mask & ~dirmask,1066 names, info) <0)1067return-1;1068return mask;1069}10701071return mask;1072}10731074static intclear_ce_flags_1(struct cache_entry **cache,int nr,1075struct strbuf *prefix,1076int select_mask,int clear_mask,1077struct exclude_list *el,int defval);10781079/* Whole directory matching */1080static intclear_ce_flags_dir(struct cache_entry **cache,int nr,1081struct strbuf *prefix,1082char*basename,1083int select_mask,int clear_mask,1084struct exclude_list *el,int defval)1085{1086struct cache_entry **cache_end;1087int dtype = DT_DIR;1088int ret =is_excluded_from_list(prefix->buf, prefix->len,1089 basename, &dtype, el, &the_index);1090int rc;10911092strbuf_addch(prefix,'/');10931094/* If undecided, use matching result of parent dir in defval */1095if(ret <0)1096 ret = defval;10971098for(cache_end = cache; cache_end != cache + nr; cache_end++) {1099struct cache_entry *ce = *cache_end;1100if(strncmp(ce->name, prefix->buf, prefix->len))1101break;1102}11031104/*1105 * TODO: check el, if there are no patterns that may conflict1106 * with ret (iow, we know in advance the incl/excl1107 * decision for the entire directory), clear flag here without1108 * calling clear_ce_flags_1(). That function will call1109 * the expensive is_excluded_from_list() on every entry.1110 */1111 rc =clear_ce_flags_1(cache, cache_end - cache,1112 prefix,1113 select_mask, clear_mask,1114 el, ret);1115strbuf_setlen(prefix, prefix->len -1);1116return rc;1117}11181119/*1120 * Traverse the index, find every entry that matches according to1121 * o->el. Do "ce_flags &= ~clear_mask" on those entries. Return the1122 * number of traversed entries.1123 *1124 * If select_mask is non-zero, only entries whose ce_flags has on of1125 * those bits enabled are traversed.1126 *1127 * cache : pointer to an index entry1128 * prefix_len : an offset to its path1129 *1130 * The current path ("prefix") including the trailing '/' is1131 * cache[0]->name[0..(prefix_len-1)]1132 * Top level path has prefix_len zero.1133 */1134static intclear_ce_flags_1(struct cache_entry **cache,int nr,1135struct strbuf *prefix,1136int select_mask,int clear_mask,1137struct exclude_list *el,int defval)1138{1139struct cache_entry **cache_end = cache + nr;11401141/*1142 * Process all entries that have the given prefix and meet1143 * select_mask condition1144 */1145while(cache != cache_end) {1146struct cache_entry *ce = *cache;1147const char*name, *slash;1148int len, dtype, ret;11491150if(select_mask && !(ce->ce_flags & select_mask)) {1151 cache++;1152continue;1153}11541155if(prefix->len &&strncmp(ce->name, prefix->buf, prefix->len))1156break;11571158 name = ce->name + prefix->len;1159 slash =strchr(name,'/');11601161/* If it's a directory, try whole directory match first */1162if(slash) {1163int processed;11641165 len = slash - name;1166strbuf_add(prefix, name, len);11671168 processed =clear_ce_flags_dir(cache, cache_end - cache,1169 prefix,1170 prefix->buf + prefix->len - len,1171 select_mask, clear_mask,1172 el, defval);11731174/* clear_c_f_dir eats a whole dir already? */1175if(processed) {1176 cache += processed;1177strbuf_setlen(prefix, prefix->len - len);1178continue;1179}11801181strbuf_addch(prefix,'/');1182 cache +=clear_ce_flags_1(cache, cache_end - cache,1183 prefix,1184 select_mask, clear_mask, el, defval);1185strbuf_setlen(prefix, prefix->len - len -1);1186continue;1187}11881189/* Non-directory */1190 dtype =ce_to_dtype(ce);1191 ret =is_excluded_from_list(ce->name,ce_namelen(ce),1192 name, &dtype, el, &the_index);1193if(ret <0)1194 ret = defval;1195if(ret >0)1196 ce->ce_flags &= ~clear_mask;1197 cache++;1198}1199return nr - (cache_end - cache);1200}12011202static intclear_ce_flags(struct cache_entry **cache,int nr,1203int select_mask,int clear_mask,1204struct exclude_list *el)1205{1206static struct strbuf prefix = STRBUF_INIT;12071208strbuf_reset(&prefix);12091210returnclear_ce_flags_1(cache, nr,1211&prefix,1212 select_mask, clear_mask,1213 el,0);1214}12151216/*1217 * Set/Clear CE_NEW_SKIP_WORKTREE according to $GIT_DIR/info/sparse-checkout1218 */1219static voidmark_new_skip_worktree(struct exclude_list *el,1220struct index_state *the_index,1221int select_flag,int skip_wt_flag)1222{1223int i;12241225/*1226 * 1. Pretend the narrowest worktree: only unmerged entries1227 * are checked out1228 */1229for(i =0; i < the_index->cache_nr; i++) {1230struct cache_entry *ce = the_index->cache[i];12311232if(select_flag && !(ce->ce_flags & select_flag))1233continue;12341235if(!ce_stage(ce))1236 ce->ce_flags |= skip_wt_flag;1237else1238 ce->ce_flags &= ~skip_wt_flag;1239}12401241/*1242 * 2. Widen worktree according to sparse-checkout file.1243 * Matched entries will have skip_wt_flag cleared (i.e. "in")1244 */1245clear_ce_flags(the_index->cache, the_index->cache_nr,1246 select_flag, skip_wt_flag, el);1247}12481249static intverify_absent(const struct cache_entry *,1250enum unpack_trees_error_types,1251struct unpack_trees_options *);1252/*1253 * N-way merge "len" trees. Returns 0 on success, -1 on failure to manipulate the1254 * resulting index, -2 on failure to reflect the changes to the work tree.1255 *1256 * CE_ADDED, CE_UNPACKED and CE_NEW_SKIP_WORKTREE are used internally1257 */1258intunpack_trees(unsigned len,struct tree_desc *t,struct unpack_trees_options *o)1259{1260int i, ret;1261static struct cache_entry *dfc;1262struct exclude_list el;12631264if(len > MAX_UNPACK_TREES)1265die("unpack_trees takes at most%dtrees", MAX_UNPACK_TREES);12661267memset(&el,0,sizeof(el));1268if(!core_apply_sparse_checkout || !o->update)1269 o->skip_sparse_checkout =1;1270if(!o->skip_sparse_checkout) {1271char*sparse =git_pathdup("info/sparse-checkout");1272if(add_excludes_from_file_to_list(sparse,"",0, &el, NULL) <0)1273 o->skip_sparse_checkout =1;1274else1275 o->el = ⪙1276free(sparse);1277}12781279memset(&o->result,0,sizeof(o->result));1280 o->result.initialized =1;1281 o->result.timestamp.sec = o->src_index->timestamp.sec;1282 o->result.timestamp.nsec = o->src_index->timestamp.nsec;1283 o->result.version = o->src_index->version;1284 o->result.split_index = o->src_index->split_index;1285if(o->result.split_index)1286 o->result.split_index->refcount++;1287hashcpy(o->result.sha1, o->src_index->sha1);1288 o->merge_size = len;1289mark_all_ce_unused(o->src_index);12901291/*1292 * Sparse checkout loop #1: set NEW_SKIP_WORKTREE on existing entries1293 */1294if(!o->skip_sparse_checkout)1295mark_new_skip_worktree(o->el, o->src_index,0, CE_NEW_SKIP_WORKTREE);12961297if(!dfc)1298 dfc =xcalloc(1,cache_entry_size(0));1299 o->df_conflict_entry = dfc;13001301if(len) {1302const char*prefix = o->prefix ? o->prefix :"";1303struct traverse_info info;13041305setup_traverse_info(&info, prefix);1306 info.fn = unpack_callback;1307 info.data = o;1308 info.show_all_errors = o->show_all_errors;1309 info.pathspec = o->pathspec;13101311if(o->prefix) {1312/*1313 * Unpack existing index entries that sort before the1314 * prefix the tree is spliced into. Note that o->merge1315 * is always true in this case.1316 */1317while(1) {1318struct cache_entry *ce =next_cache_entry(o);1319if(!ce)1320break;1321if(ce_in_traverse_path(ce, &info))1322break;1323if(unpack_index_entry(ce, o) <0)1324goto return_failed;1325}1326}13271328if(traverse_trees(len, t, &info) <0)1329goto return_failed;1330}13311332/* Any left-over entries in the index? */1333if(o->merge) {1334while(1) {1335struct cache_entry *ce =next_cache_entry(o);1336if(!ce)1337break;1338if(unpack_index_entry(ce, o) <0)1339goto return_failed;1340}1341}1342mark_all_ce_unused(o->src_index);13431344if(o->trivial_merges_only && o->nontrivial_merge) {1345 ret =unpack_failed(o,"Merge requires file-level merging");1346goto done;1347}13481349if(!o->skip_sparse_checkout) {1350int empty_worktree =1;13511352/*1353 * Sparse checkout loop #2: set NEW_SKIP_WORKTREE on entries not in loop #11354 * If the will have NEW_SKIP_WORKTREE, also set CE_SKIP_WORKTREE1355 * so apply_sparse_checkout() won't attempt to remove it from worktree1356 */1357mark_new_skip_worktree(o->el, &o->result, CE_ADDED, CE_SKIP_WORKTREE | CE_NEW_SKIP_WORKTREE);13581359 ret =0;1360for(i =0; i < o->result.cache_nr; i++) {1361struct cache_entry *ce = o->result.cache[i];13621363/*1364 * Entries marked with CE_ADDED in merged_entry() do not have1365 * verify_absent() check (the check is effectively disabled1366 * because CE_NEW_SKIP_WORKTREE is set unconditionally).1367 *1368 * Do the real check now because we have had1369 * correct CE_NEW_SKIP_WORKTREE1370 */1371if(ce->ce_flags & CE_ADDED &&1372verify_absent(ce, ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN, o)) {1373if(!o->show_all_errors)1374goto return_failed;1375 ret = -1;1376}13771378if(apply_sparse_checkout(&o->result, ce, o)) {1379if(!o->show_all_errors)1380goto return_failed;1381 ret = -1;1382}1383if(!ce_skip_worktree(ce))1384 empty_worktree =0;13851386}1387if(ret <0)1388goto return_failed;1389/*1390 * Sparse checkout is meant to narrow down checkout area1391 * but it does not make sense to narrow down to empty working1392 * tree. This is usually a mistake in sparse checkout rules.1393 * Do not allow users to do that.1394 */1395if(o->result.cache_nr && empty_worktree) {1396 ret =unpack_failed(o,"Sparse checkout leaves no entry on working directory");1397goto done;1398}1399}14001401 o->src_index = NULL;1402 ret =check_updates(o) ? (-2) :0;1403if(o->dst_index) {1404if(!ret) {1405if(!o->result.cache_tree)1406 o->result.cache_tree =cache_tree();1407if(!cache_tree_fully_valid(o->result.cache_tree))1408cache_tree_update(&o->result,1409 WRITE_TREE_SILENT |1410 WRITE_TREE_REPAIR);1411}1412move_index_extensions(&o->result, o->dst_index);1413discard_index(o->dst_index);1414*o->dst_index = o->result;1415}else{1416discard_index(&o->result);1417}14181419done:1420clear_exclude_list(&el);1421return ret;14221423return_failed:1424if(o->show_all_errors)1425display_error_msgs(o);1426mark_all_ce_unused(o->src_index);1427 ret =unpack_failed(o, NULL);1428if(o->exiting_early)1429 ret =0;1430goto done;1431}14321433/* Here come the merge functions */14341435static intreject_merge(const struct cache_entry *ce,1436struct unpack_trees_options *o)1437{1438return o->gently ? -1:1439add_rejected_path(o, ERROR_WOULD_OVERWRITE, ce->name);1440}14411442static intsame(const struct cache_entry *a,const struct cache_entry *b)1443{1444if(!!a != !!b)1445return0;1446if(!a && !b)1447return1;1448if((a->ce_flags | b->ce_flags) & CE_CONFLICTED)1449return0;1450return a->ce_mode == b->ce_mode &&1451!oidcmp(&a->oid, &b->oid);1452}145314541455/*1456 * When a CE gets turned into an unmerged entry, we1457 * want it to be up-to-date1458 */1459static intverify_uptodate_1(const struct cache_entry *ce,1460struct unpack_trees_options *o,1461enum unpack_trees_error_types error_type)1462{1463struct stat st;14641465if(o->index_only)1466return0;14671468/*1469 * CE_VALID and CE_SKIP_WORKTREE cheat, we better check again1470 * if this entry is truly up-to-date because this file may be1471 * overwritten.1472 */1473if((ce->ce_flags & CE_VALID) ||ce_skip_worktree(ce))1474;/* keep checking */1475else if(o->reset ||ce_uptodate(ce))1476return0;14771478if(!lstat(ce->name, &st)) {1479int flags = CE_MATCH_IGNORE_VALID|CE_MATCH_IGNORE_SKIP_WORKTREE;1480unsigned changed =ie_match_stat(o->src_index, ce, &st, flags);14811482if(submodule_from_ce(ce)) {1483int r =check_submodule_move_head(ce,1484"HEAD",oid_to_hex(&ce->oid), o);1485if(r)1486return o->gently ? -1:1487add_rejected_path(o, error_type, ce->name);1488return0;1489}14901491if(!changed)1492return0;1493/*1494 * Historic default policy was to allow submodule to be out1495 * of sync wrt the superproject index. If the submodule was1496 * not considered interesting above, we don't care here.1497 */1498if(S_ISGITLINK(ce->ce_mode))1499return0;15001501 errno =0;1502}1503if(errno == ENOENT)1504return0;1505return o->gently ? -1:1506add_rejected_path(o, error_type, ce->name);1507}15081509static intverify_uptodate(const struct cache_entry *ce,1510struct unpack_trees_options *o)1511{1512if(!o->skip_sparse_checkout && (ce->ce_flags & CE_NEW_SKIP_WORKTREE))1513return0;1514returnverify_uptodate_1(ce, o, ERROR_NOT_UPTODATE_FILE);1515}15161517static intverify_uptodate_sparse(const struct cache_entry *ce,1518struct unpack_trees_options *o)1519{1520returnverify_uptodate_1(ce, o, ERROR_SPARSE_NOT_UPTODATE_FILE);1521}15221523static voidinvalidate_ce_path(const struct cache_entry *ce,1524struct unpack_trees_options *o)1525{1526if(!ce)1527return;1528cache_tree_invalidate_path(o->src_index, ce->name);1529untracked_cache_invalidate_path(o->src_index, ce->name);1530}15311532/*1533 * Check that checking out ce->sha1 in subdir ce->name is not1534 * going to overwrite any working files.1535 *1536 * Currently, git does not checkout subprojects during a superproject1537 * checkout, so it is not going to overwrite anything.1538 */1539static intverify_clean_submodule(const char*old_sha1,1540const struct cache_entry *ce,1541enum unpack_trees_error_types error_type,1542struct unpack_trees_options *o)1543{1544if(!submodule_from_ce(ce))1545return0;15461547returncheck_submodule_move_head(ce, old_sha1,1548oid_to_hex(&ce->oid), o);1549}15501551static intverify_clean_subdirectory(const struct cache_entry *ce,1552enum unpack_trees_error_types error_type,1553struct unpack_trees_options *o)1554{1555/*1556 * we are about to extract "ce->name"; we would not want to lose1557 * anything in the existing directory there.1558 */1559int namelen;1560int i;1561struct dir_struct d;1562char*pathbuf;1563int cnt =0;15641565if(S_ISGITLINK(ce->ce_mode)) {1566unsigned char sha1[20];1567int sub_head =resolve_gitlink_ref(ce->name,"HEAD", sha1);1568/*1569 * If we are not going to update the submodule, then1570 * we don't care.1571 */1572if(!sub_head && !hashcmp(sha1, ce->oid.hash))1573return0;1574returnverify_clean_submodule(sub_head ? NULL :sha1_to_hex(sha1),1575 ce, error_type, o);1576}15771578/*1579 * First let's make sure we do not have a local modification1580 * in that directory.1581 */1582 namelen =ce_namelen(ce);1583for(i =locate_in_src_index(ce, o);1584 i < o->src_index->cache_nr;1585 i++) {1586struct cache_entry *ce2 = o->src_index->cache[i];1587int len =ce_namelen(ce2);1588if(len < namelen ||1589strncmp(ce->name, ce2->name, namelen) ||1590 ce2->name[namelen] !='/')1591break;1592/*1593 * ce2->name is an entry in the subdirectory to be1594 * removed.1595 */1596if(!ce_stage(ce2)) {1597if(verify_uptodate(ce2, o))1598return-1;1599add_entry(o, ce2, CE_REMOVE,0);1600mark_ce_used(ce2, o);1601}1602 cnt++;1603}16041605/*1606 * Then we need to make sure that we do not lose a locally1607 * present file that is not ignored.1608 */1609 pathbuf =xstrfmt("%.*s/", namelen, ce->name);16101611memset(&d,0,sizeof(d));1612if(o->dir)1613 d.exclude_per_dir = o->dir->exclude_per_dir;1614 i =read_directory(&d, &the_index, pathbuf, namelen+1, NULL);1615if(i)1616return o->gently ? -1:1617add_rejected_path(o, ERROR_NOT_UPTODATE_DIR, ce->name);1618free(pathbuf);1619return cnt;1620}16211622/*1623 * This gets called when there was no index entry for the tree entry 'dst',1624 * but we found a file in the working tree that 'lstat()' said was fine,1625 * and we're on a case-insensitive filesystem.1626 *1627 * See if we can find a case-insensitive match in the index that also1628 * matches the stat information, and assume it's that other file!1629 */1630static inticase_exists(struct unpack_trees_options *o,const char*name,int len,struct stat *st)1631{1632const struct cache_entry *src;16331634 src =index_file_exists(o->src_index, name, len,1);1635return src && !ie_match_stat(o->src_index, src, st, CE_MATCH_IGNORE_VALID|CE_MATCH_IGNORE_SKIP_WORKTREE);1636}16371638static intcheck_ok_to_remove(const char*name,int len,int dtype,1639const struct cache_entry *ce,struct stat *st,1640enum unpack_trees_error_types error_type,1641struct unpack_trees_options *o)1642{1643const struct cache_entry *result;16441645/*1646 * It may be that the 'lstat()' succeeded even though1647 * target 'ce' was absent, because there is an old1648 * entry that is different only in case..1649 *1650 * Ignore that lstat() if it matches.1651 */1652if(ignore_case &&icase_exists(o, name, len, st))1653return0;16541655if(o->dir &&1656is_excluded(o->dir, &the_index, name, &dtype))1657/*1658 * ce->name is explicitly excluded, so it is Ok to1659 * overwrite it.1660 */1661return0;1662if(S_ISDIR(st->st_mode)) {1663/*1664 * We are checking out path "foo" and1665 * found "foo/." in the working tree.1666 * This is tricky -- if we have modified1667 * files that are in "foo/" we would lose1668 * them.1669 */1670if(verify_clean_subdirectory(ce, error_type, o) <0)1671return-1;1672return0;1673}16741675/*1676 * The previous round may already have decided to1677 * delete this path, which is in a subdirectory that1678 * is being replaced with a blob.1679 */1680 result =index_file_exists(&o->result, name, len,0);1681if(result) {1682if(result->ce_flags & CE_REMOVE)1683return0;1684}16851686return o->gently ? -1:1687add_rejected_path(o, error_type, name);1688}16891690/*1691 * We do not want to remove or overwrite a working tree file that1692 * is not tracked, unless it is ignored.1693 */1694static intverify_absent_1(const struct cache_entry *ce,1695enum unpack_trees_error_types error_type,1696struct unpack_trees_options *o)1697{1698int len;1699struct stat st;17001701if(o->index_only || o->reset || !o->update)1702return0;17031704 len =check_leading_path(ce->name,ce_namelen(ce));1705if(!len)1706return0;1707else if(len >0) {1708char*path;1709int ret;17101711 path =xmemdupz(ce->name, len);1712if(lstat(path, &st))1713 ret =error_errno("cannot stat '%s'", path);1714else{1715if(submodule_from_ce(ce))1716 ret =check_submodule_move_head(ce,1717oid_to_hex(&ce->oid),1718 NULL, o);1719else1720 ret =check_ok_to_remove(path, len, DT_UNKNOWN, NULL,1721&st, error_type, o);1722}1723free(path);1724return ret;1725}else if(lstat(ce->name, &st)) {1726if(errno != ENOENT)1727returnerror_errno("cannot stat '%s'", ce->name);1728return0;1729}else{1730if(submodule_from_ce(ce))1731returncheck_submodule_move_head(ce,oid_to_hex(&ce->oid),1732 NULL, o);17331734returncheck_ok_to_remove(ce->name,ce_namelen(ce),1735ce_to_dtype(ce), ce, &st,1736 error_type, o);1737}1738}17391740static intverify_absent(const struct cache_entry *ce,1741enum unpack_trees_error_types error_type,1742struct unpack_trees_options *o)1743{1744if(!o->skip_sparse_checkout && (ce->ce_flags & CE_NEW_SKIP_WORKTREE))1745return0;1746returnverify_absent_1(ce, error_type, o);1747}17481749static intverify_absent_sparse(const struct cache_entry *ce,1750enum unpack_trees_error_types error_type,1751struct unpack_trees_options *o)1752{1753enum unpack_trees_error_types orphaned_error = error_type;1754if(orphaned_error == ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN)1755 orphaned_error = ERROR_WOULD_LOSE_ORPHANED_OVERWRITTEN;17561757returnverify_absent_1(ce, orphaned_error, o);1758}17591760static intmerged_entry(const struct cache_entry *ce,1761const struct cache_entry *old,1762struct unpack_trees_options *o)1763{1764int update = CE_UPDATE;1765struct cache_entry *merge =dup_entry(ce);17661767if(!old) {1768/*1769 * New index entries. In sparse checkout, the following1770 * verify_absent() will be delayed until after1771 * traverse_trees() finishes in unpack_trees(), then:1772 *1773 * - CE_NEW_SKIP_WORKTREE will be computed correctly1774 * - verify_absent() be called again, this time with1775 * correct CE_NEW_SKIP_WORKTREE1776 *1777 * verify_absent() call here does nothing in sparse1778 * checkout (i.e. o->skip_sparse_checkout == 0)1779 */1780 update |= CE_ADDED;1781 merge->ce_flags |= CE_NEW_SKIP_WORKTREE;17821783if(verify_absent(merge,1784 ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN, o)) {1785free(merge);1786return-1;1787}1788invalidate_ce_path(merge, o);17891790if(submodule_from_ce(ce)) {1791int ret =check_submodule_move_head(ce, NULL,1792oid_to_hex(&ce->oid),1793 o);1794if(ret)1795return ret;1796}17971798}else if(!(old->ce_flags & CE_CONFLICTED)) {1799/*1800 * See if we can re-use the old CE directly?1801 * That way we get the uptodate stat info.1802 *1803 * This also removes the UPDATE flag on a match; otherwise1804 * we will end up overwriting local changes in the work tree.1805 */1806if(same(old, merge)) {1807copy_cache_entry(merge, old);1808 update =0;1809}else{1810if(verify_uptodate(old, o)) {1811free(merge);1812return-1;1813}1814/* Migrate old flags over */1815 update |= old->ce_flags & (CE_SKIP_WORKTREE | CE_NEW_SKIP_WORKTREE);1816invalidate_ce_path(old, o);1817}18181819if(submodule_from_ce(ce)) {1820int ret =check_submodule_move_head(ce,oid_to_hex(&old->oid),1821oid_to_hex(&ce->oid),1822 o);1823if(ret)1824return ret;1825}1826}else{1827/*1828 * Previously unmerged entry left as an existence1829 * marker by read_index_unmerged();1830 */1831invalidate_ce_path(old, o);1832}18331834do_add_entry(o, merge, update, CE_STAGEMASK);1835return1;1836}18371838static intdeleted_entry(const struct cache_entry *ce,1839const struct cache_entry *old,1840struct unpack_trees_options *o)1841{1842/* Did it exist in the index? */1843if(!old) {1844if(verify_absent(ce, ERROR_WOULD_LOSE_UNTRACKED_REMOVED, o))1845return-1;1846return0;1847}1848if(!(old->ce_flags & CE_CONFLICTED) &&verify_uptodate(old, o))1849return-1;1850add_entry(o, ce, CE_REMOVE,0);1851invalidate_ce_path(ce, o);1852return1;1853}18541855static intkeep_entry(const struct cache_entry *ce,1856struct unpack_trees_options *o)1857{1858add_entry(o, ce,0,0);1859return1;1860}18611862#if DBRT_DEBUG1863static voidshow_stage_entry(FILE*o,1864const char*label,const struct cache_entry *ce)1865{1866if(!ce)1867fprintf(o,"%s(missing)\n", label);1868else1869fprintf(o,"%s%06o%s %d\t%s\n",1870 label,1871 ce->ce_mode,1872oid_to_hex(&ce->oid),1873ce_stage(ce),1874 ce->name);1875}1876#endif18771878intthreeway_merge(const struct cache_entry *const*stages,1879struct unpack_trees_options *o)1880{1881const struct cache_entry *index;1882const struct cache_entry *head;1883const struct cache_entry *remote = stages[o->head_idx +1];1884int count;1885int head_match =0;1886int remote_match =0;18871888int df_conflict_head =0;1889int df_conflict_remote =0;18901891int any_anc_missing =0;1892int no_anc_exists =1;1893int i;18941895for(i =1; i < o->head_idx; i++) {1896if(!stages[i] || stages[i] == o->df_conflict_entry)1897 any_anc_missing =1;1898else1899 no_anc_exists =0;1900}19011902 index = stages[0];1903 head = stages[o->head_idx];19041905if(head == o->df_conflict_entry) {1906 df_conflict_head =1;1907 head = NULL;1908}19091910if(remote == o->df_conflict_entry) {1911 df_conflict_remote =1;1912 remote = NULL;1913}19141915/*1916 * First, if there's a #16 situation, note that to prevent #131917 * and #14.1918 */1919if(!same(remote, head)) {1920for(i =1; i < o->head_idx; i++) {1921if(same(stages[i], head)) {1922 head_match = i;1923}1924if(same(stages[i], remote)) {1925 remote_match = i;1926}1927}1928}19291930/*1931 * We start with cases where the index is allowed to match1932 * something other than the head: #14(ALT) and #2ALT, where it1933 * is permitted to match the result instead.1934 */1935/* #14, #14ALT, #2ALT */1936if(remote && !df_conflict_head && head_match && !remote_match) {1937if(index && !same(index, remote) && !same(index, head))1938returnreject_merge(index, o);1939returnmerged_entry(remote, index, o);1940}1941/*1942 * If we have an entry in the index cache, then we want to1943 * make sure that it matches head.1944 */1945if(index && !same(index, head))1946returnreject_merge(index, o);19471948if(head) {1949/* #5ALT, #15 */1950if(same(head, remote))1951returnmerged_entry(head, index, o);1952/* #13, #3ALT */1953if(!df_conflict_remote && remote_match && !head_match)1954returnmerged_entry(head, index, o);1955}19561957/* #1 */1958if(!head && !remote && any_anc_missing)1959return0;19601961/*1962 * Under the "aggressive" rule, we resolve mostly trivial1963 * cases that we historically had git-merge-one-file resolve.1964 */1965if(o->aggressive) {1966int head_deleted = !head;1967int remote_deleted = !remote;1968const struct cache_entry *ce = NULL;19691970if(index)1971 ce = index;1972else if(head)1973 ce = head;1974else if(remote)1975 ce = remote;1976else{1977for(i =1; i < o->head_idx; i++) {1978if(stages[i] && stages[i] != o->df_conflict_entry) {1979 ce = stages[i];1980break;1981}1982}1983}19841985/*1986 * Deleted in both.1987 * Deleted in one and unchanged in the other.1988 */1989if((head_deleted && remote_deleted) ||1990(head_deleted && remote && remote_match) ||1991(remote_deleted && head && head_match)) {1992if(index)1993returndeleted_entry(index, index, o);1994if(ce && !head_deleted) {1995if(verify_absent(ce, ERROR_WOULD_LOSE_UNTRACKED_REMOVED, o))1996return-1;1997}1998return0;1999}2000/*2001 * Added in both, identically.2002 */2003if(no_anc_exists && head && remote &&same(head, remote))2004returnmerged_entry(head, index, o);20052006}20072008/* Below are "no merge" cases, which require that the index be2009 * up-to-date to avoid the files getting overwritten with2010 * conflict resolution files.2011 */2012if(index) {2013if(verify_uptodate(index, o))2014return-1;2015}20162017 o->nontrivial_merge =1;20182019/* #2, #3, #4, #6, #7, #9, #10, #11. */2020 count =0;2021if(!head_match || !remote_match) {2022for(i =1; i < o->head_idx; i++) {2023if(stages[i] && stages[i] != o->df_conflict_entry) {2024keep_entry(stages[i], o);2025 count++;2026break;2027}2028}2029}2030#if DBRT_DEBUG2031else{2032fprintf(stderr,"read-tree: warning #16 detected\n");2033show_stage_entry(stderr,"head ", stages[head_match]);2034show_stage_entry(stderr,"remote ", stages[remote_match]);2035}2036#endif2037if(head) { count +=keep_entry(head, o); }2038if(remote) { count +=keep_entry(remote, o); }2039return count;2040}20412042/*2043 * Two-way merge.2044 *2045 * The rule is to "carry forward" what is in the index without losing2046 * information across a "fast-forward", favoring a successful merge2047 * over a merge failure when it makes sense. For details of the2048 * "carry forward" rule, please see <Documentation/git-read-tree.txt>.2049 *2050 */2051inttwoway_merge(const struct cache_entry *const*src,2052struct unpack_trees_options *o)2053{2054const struct cache_entry *current = src[0];2055const struct cache_entry *oldtree = src[1];2056const struct cache_entry *newtree = src[2];20572058if(o->merge_size !=2)2059returnerror("Cannot do a twoway merge of%dtrees",2060 o->merge_size);20612062if(oldtree == o->df_conflict_entry)2063 oldtree = NULL;2064if(newtree == o->df_conflict_entry)2065 newtree = NULL;20662067if(current) {2068if(current->ce_flags & CE_CONFLICTED) {2069if(same(oldtree, newtree) || o->reset) {2070if(!newtree)2071returndeleted_entry(current, current, o);2072else2073returnmerged_entry(newtree, current, o);2074}2075returnreject_merge(current, o);2076}else if((!oldtree && !newtree) ||/* 4 and 5 */2077(!oldtree && newtree &&2078same(current, newtree)) ||/* 6 and 7 */2079(oldtree && newtree &&2080same(oldtree, newtree)) ||/* 14 and 15 */2081(oldtree && newtree &&2082!same(oldtree, newtree) &&/* 18 and 19 */2083same(current, newtree))) {2084returnkeep_entry(current, o);2085}else if(oldtree && !newtree &&same(current, oldtree)) {2086/* 10 or 11 */2087returndeleted_entry(oldtree, current, o);2088}else if(oldtree && newtree &&2089same(current, oldtree) && !same(current, newtree)) {2090/* 20 or 21 */2091returnmerged_entry(newtree, current, o);2092}else2093returnreject_merge(current, o);2094}2095else if(newtree) {2096if(oldtree && !o->initial_checkout) {2097/*2098 * deletion of the path was staged;2099 */2100if(same(oldtree, newtree))2101return1;2102returnreject_merge(oldtree, o);2103}2104returnmerged_entry(newtree, current, o);2105}2106returndeleted_entry(oldtree, current, o);2107}21082109/*2110 * Bind merge.2111 *2112 * Keep the index entries at stage0, collapse stage1 but make sure2113 * stage0 does not have anything there.2114 */2115intbind_merge(const struct cache_entry *const*src,2116struct unpack_trees_options *o)2117{2118const struct cache_entry *old = src[0];2119const struct cache_entry *a = src[1];21202121if(o->merge_size !=1)2122returnerror("Cannot do a bind merge of%dtrees",2123 o->merge_size);2124if(a && old)2125return o->gently ? -1:2126error(ERRORMSG(o, ERROR_BIND_OVERLAP),2127super_prefixed(a->name),2128super_prefixed(old->name));2129if(!a)2130returnkeep_entry(old, o);2131else2132returnmerged_entry(a, NULL, o);2133}21342135/*2136 * One-way merge.2137 *2138 * The rule is:2139 * - take the stat information from stage0, take the data from stage12140 */2141intoneway_merge(const struct cache_entry *const*src,2142struct unpack_trees_options *o)2143{2144const struct cache_entry *old = src[0];2145const struct cache_entry *a = src[1];21462147if(o->merge_size !=1)2148returnerror("Cannot do a oneway merge of%dtrees",2149 o->merge_size);21502151if(!a || a == o->df_conflict_entry)2152returndeleted_entry(old, old, o);21532154if(old &&same(old, a)) {2155int update =0;2156if(o->reset && o->update && !ce_uptodate(old) && !ce_skip_worktree(old)) {2157struct stat st;2158if(lstat(old->name, &st) ||2159ie_match_stat(o->src_index, old, &st, CE_MATCH_IGNORE_VALID|CE_MATCH_IGNORE_SKIP_WORKTREE))2160 update |= CE_UPDATE;2161}2162add_entry(o, old, update,0);2163return0;2164}2165returnmerged_entry(a, old, o);2166}