1#define NO_THE_INDEX_COMPATIBILITY_MACROS 2#include"cache.h" 3#include"repository.h" 4#include"config.h" 5#include"dir.h" 6#include"tree.h" 7#include"tree-walk.h" 8#include"cache-tree.h" 9#include"unpack-trees.h" 10#include"progress.h" 11#include"refs.h" 12#include"attr.h" 13#include"split-index.h" 14#include"dir.h" 15#include"submodule.h" 16#include"submodule-config.h" 17#include"fsmonitor.h" 18#include"fetch-object.h" 19 20/* 21 * Error messages expected by scripts out of plumbing commands such as 22 * read-tree. Non-scripted Porcelain is not required to use these messages 23 * and in fact are encouraged to reword them to better suit their particular 24 * situation better. See how "git checkout" and "git merge" replaces 25 * them using setup_unpack_trees_porcelain(), for example. 26 */ 27static const char*unpack_plumbing_errors[NB_UNPACK_TREES_ERROR_TYPES] = { 28/* ERROR_WOULD_OVERWRITE */ 29"Entry '%s' would be overwritten by merge. Cannot merge.", 30 31/* ERROR_NOT_UPTODATE_FILE */ 32"Entry '%s' not uptodate. Cannot merge.", 33 34/* ERROR_NOT_UPTODATE_DIR */ 35"Updating '%s' would lose untracked files in it", 36 37/* ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN */ 38"Untracked working tree file '%s' would be overwritten by merge.", 39 40/* ERROR_WOULD_LOSE_UNTRACKED_REMOVED */ 41"Untracked working tree file '%s' would be removed by merge.", 42 43/* ERROR_BIND_OVERLAP */ 44"Entry '%s' overlaps with '%s'. Cannot bind.", 45 46/* ERROR_SPARSE_NOT_UPTODATE_FILE */ 47"Entry '%s' not uptodate. Cannot update sparse checkout.", 48 49/* ERROR_WOULD_LOSE_ORPHANED_OVERWRITTEN */ 50"Working tree file '%s' would be overwritten by sparse checkout update.", 51 52/* ERROR_WOULD_LOSE_ORPHANED_REMOVED */ 53"Working tree file '%s' would be removed by sparse checkout update.", 54 55/* ERROR_WOULD_LOSE_SUBMODULE */ 56"Submodule '%s' cannot checkout new HEAD.", 57}; 58 59#define ERRORMSG(o,type) \ 60 ( ((o) && (o)->msgs[(type)]) \ 61 ? ((o)->msgs[(type)]) \ 62 : (unpack_plumbing_errors[(type)]) ) 63 64static const char*super_prefixed(const char*path) 65{ 66/* 67 * It is necessary and sufficient to have two static buffers 68 * here, as the return value of this function is fed to 69 * error() using the unpack_*_errors[] templates we see above. 70 */ 71static struct strbuf buf[2] = {STRBUF_INIT, STRBUF_INIT}; 72static int super_prefix_len = -1; 73static unsigned idx =ARRAY_SIZE(buf) -1; 74 75if(super_prefix_len <0) { 76const char*super_prefix =get_super_prefix(); 77if(!super_prefix) { 78 super_prefix_len =0; 79}else{ 80int i; 81for(i =0; i <ARRAY_SIZE(buf); i++) 82strbuf_addstr(&buf[i], super_prefix); 83 super_prefix_len = buf[0].len; 84} 85} 86 87if(!super_prefix_len) 88return path; 89 90if(++idx >=ARRAY_SIZE(buf)) 91 idx =0; 92 93strbuf_setlen(&buf[idx], super_prefix_len); 94strbuf_addstr(&buf[idx], path); 95 96return buf[idx].buf; 97} 98 99voidsetup_unpack_trees_porcelain(struct unpack_trees_options *opts, 100const char*cmd) 101{ 102int i; 103const char**msgs = opts->msgs; 104const char*msg; 105 106if(!strcmp(cmd,"checkout")) 107 msg = advice_commit_before_merge 108?_("Your local changes to the following files would be overwritten by checkout:\n%%s" 109"Please commit your changes or stash them before you switch branches.") 110:_("Your local changes to the following files would be overwritten by checkout:\n%%s"); 111else if(!strcmp(cmd,"merge")) 112 msg = advice_commit_before_merge 113?_("Your local changes to the following files would be overwritten by merge:\n%%s" 114"Please commit your changes or stash them before you merge.") 115:_("Your local changes to the following files would be overwritten by merge:\n%%s"); 116else 117 msg = advice_commit_before_merge 118?_("Your local changes to the following files would be overwritten by%s:\n%%s" 119"Please commit your changes or stash them before you%s.") 120:_("Your local changes to the following files would be overwritten by%s:\n%%s"); 121 msgs[ERROR_WOULD_OVERWRITE] = msgs[ERROR_NOT_UPTODATE_FILE] = 122xstrfmt(msg, cmd, cmd); 123 124 msgs[ERROR_NOT_UPTODATE_DIR] = 125_("Updating the following directories would lose untracked files in them:\n%s"); 126 127if(!strcmp(cmd,"checkout")) 128 msg = advice_commit_before_merge 129?_("The following untracked working tree files would be removed by checkout:\n%%s" 130"Please move or remove them before you switch branches.") 131:_("The following untracked working tree files would be removed by checkout:\n%%s"); 132else if(!strcmp(cmd,"merge")) 133 msg = advice_commit_before_merge 134?_("The following untracked working tree files would be removed by merge:\n%%s" 135"Please move or remove them before you merge.") 136:_("The following untracked working tree files would be removed by merge:\n%%s"); 137else 138 msg = advice_commit_before_merge 139?_("The following untracked working tree files would be removed by%s:\n%%s" 140"Please move or remove them before you%s.") 141:_("The following untracked working tree files would be removed by%s:\n%%s"); 142 msgs[ERROR_WOULD_LOSE_UNTRACKED_REMOVED] =xstrfmt(msg, cmd, cmd); 143 144if(!strcmp(cmd,"checkout")) 145 msg = advice_commit_before_merge 146?_("The following untracked working tree files would be overwritten by checkout:\n%%s" 147"Please move or remove them before you switch branches.") 148:_("The following untracked working tree files would be overwritten by checkout:\n%%s"); 149else if(!strcmp(cmd,"merge")) 150 msg = advice_commit_before_merge 151?_("The following untracked working tree files would be overwritten by merge:\n%%s" 152"Please move or remove them before you merge.") 153:_("The following untracked working tree files would be overwritten by merge:\n%%s"); 154else 155 msg = advice_commit_before_merge 156?_("The following untracked working tree files would be overwritten by%s:\n%%s" 157"Please move or remove them before you%s.") 158:_("The following untracked working tree files would be overwritten by%s:\n%%s"); 159 msgs[ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN] =xstrfmt(msg, cmd, cmd); 160 161/* 162 * Special case: ERROR_BIND_OVERLAP refers to a pair of paths, we 163 * cannot easily display it as a list. 164 */ 165 msgs[ERROR_BIND_OVERLAP] =_("Entry '%s' overlaps with '%s'. Cannot bind."); 166 167 msgs[ERROR_SPARSE_NOT_UPTODATE_FILE] = 168_("Cannot update sparse checkout: the following entries are not up to date:\n%s"); 169 msgs[ERROR_WOULD_LOSE_ORPHANED_OVERWRITTEN] = 170_("The following working tree files would be overwritten by sparse checkout update:\n%s"); 171 msgs[ERROR_WOULD_LOSE_ORPHANED_REMOVED] = 172_("The following working tree files would be removed by sparse checkout update:\n%s"); 173 msgs[ERROR_WOULD_LOSE_SUBMODULE] = 174_("Cannot update submodule:\n%s"); 175 176 opts->show_all_errors =1; 177/* rejected paths may not have a static buffer */ 178for(i =0; i <ARRAY_SIZE(opts->unpack_rejects); i++) 179 opts->unpack_rejects[i].strdup_strings =1; 180} 181 182static intdo_add_entry(struct unpack_trees_options *o,struct cache_entry *ce, 183unsigned int set,unsigned int clear) 184{ 185 clear |= CE_HASHED; 186 187if(set & CE_REMOVE) 188 set |= CE_WT_REMOVE; 189 190 ce->ce_flags = (ce->ce_flags & ~clear) | set; 191returnadd_index_entry(&o->result, ce, 192 ADD_CACHE_OK_TO_ADD | ADD_CACHE_OK_TO_REPLACE); 193} 194 195static struct cache_entry *dup_entry(const struct cache_entry *ce) 196{ 197unsigned int size =ce_size(ce); 198struct cache_entry *new_entry =xmalloc(size); 199 200memcpy(new_entry, ce, size); 201return new_entry; 202} 203 204static voidadd_entry(struct unpack_trees_options *o, 205const struct cache_entry *ce, 206unsigned int set,unsigned int clear) 207{ 208do_add_entry(o,dup_entry(ce), set, clear); 209} 210 211/* 212 * add error messages on path <path> 213 * corresponding to the type <e> with the message <msg> 214 * indicating if it should be display in porcelain or not 215 */ 216static intadd_rejected_path(struct unpack_trees_options *o, 217enum unpack_trees_error_types e, 218const char*path) 219{ 220if(!o->show_all_errors) 221returnerror(ERRORMSG(o, e),super_prefixed(path)); 222 223/* 224 * Otherwise, insert in a list for future display by 225 * display_error_msgs() 226 */ 227string_list_append(&o->unpack_rejects[e], path); 228return-1; 229} 230 231/* 232 * display all the error messages stored in a nice way 233 */ 234static voiddisplay_error_msgs(struct unpack_trees_options *o) 235{ 236int e, i; 237int something_displayed =0; 238for(e =0; e < NB_UNPACK_TREES_ERROR_TYPES; e++) { 239struct string_list *rejects = &o->unpack_rejects[e]; 240if(rejects->nr >0) { 241struct strbuf path = STRBUF_INIT; 242 something_displayed =1; 243for(i =0; i < rejects->nr; i++) 244strbuf_addf(&path,"\t%s\n", rejects->items[i].string); 245error(ERRORMSG(o, e),super_prefixed(path.buf)); 246strbuf_release(&path); 247} 248string_list_clear(rejects,0); 249} 250if(something_displayed) 251fprintf(stderr,_("Aborting\n")); 252} 253 254static intcheck_submodule_move_head(const struct cache_entry *ce, 255const char*old_id, 256const char*new_id, 257struct unpack_trees_options *o) 258{ 259unsigned flags = SUBMODULE_MOVE_HEAD_DRY_RUN; 260const struct submodule *sub =submodule_from_ce(ce); 261 262if(!sub) 263return0; 264 265if(o->reset) 266 flags |= SUBMODULE_MOVE_HEAD_FORCE; 267 268if(submodule_move_head(ce->name, old_id, new_id, flags)) 269return o->gently ? -1: 270add_rejected_path(o, ERROR_WOULD_LOSE_SUBMODULE, ce->name); 271return0; 272} 273 274/* 275 * Preform the loading of the repository's gitmodules file. This function is 276 * used by 'check_update()' to perform loading of the gitmodules file in two 277 * differnt situations: 278 * (1) before removing entries from the working tree if the gitmodules file has 279 * been marked for removal. This situation is specified by 'state' == NULL. 280 * (2) before checking out entries to the working tree if the gitmodules file 281 * has been marked for update. This situation is specified by 'state' != NULL. 282 */ 283static voidload_gitmodules_file(struct index_state *index, 284struct checkout *state) 285{ 286int pos =index_name_pos(index, GITMODULES_FILE,strlen(GITMODULES_FILE)); 287 288if(pos >=0) { 289struct cache_entry *ce = index->cache[pos]; 290if(!state && ce->ce_flags & CE_WT_REMOVE) { 291repo_read_gitmodules(the_repository); 292}else if(state && (ce->ce_flags & CE_UPDATE)) { 293submodule_free(); 294checkout_entry(ce, state, NULL); 295repo_read_gitmodules(the_repository); 296} 297} 298} 299 300/* 301 * Unlink the last component and schedule the leading directories for 302 * removal, such that empty directories get removed. 303 */ 304static voidunlink_entry(const struct cache_entry *ce) 305{ 306const struct submodule *sub =submodule_from_ce(ce); 307if(sub) { 308/* state.force is set at the caller. */ 309submodule_move_head(ce->name,"HEAD", NULL, 310 SUBMODULE_MOVE_HEAD_FORCE); 311} 312if(!check_leading_path(ce->name,ce_namelen(ce))) 313return; 314if(remove_or_warn(ce->ce_mode, ce->name)) 315return; 316schedule_dir_for_removal(ce->name,ce_namelen(ce)); 317} 318 319static struct progress *get_progress(struct unpack_trees_options *o) 320{ 321unsigned cnt =0, total =0; 322struct index_state *index = &o->result; 323 324if(!o->update || !o->verbose_update) 325return NULL; 326 327for(; cnt < index->cache_nr; cnt++) { 328const struct cache_entry *ce = index->cache[cnt]; 329if(ce->ce_flags & (CE_UPDATE | CE_WT_REMOVE)) 330 total++; 331} 332 333returnstart_delayed_progress(_("Checking out files"), total); 334} 335 336static intcheck_updates(struct unpack_trees_options *o) 337{ 338unsigned cnt =0; 339int errs =0; 340struct progress *progress = NULL; 341struct index_state *index = &o->result; 342struct checkout state = CHECKOUT_INIT; 343int i; 344 345 state.force =1; 346 state.quiet =1; 347 state.refresh_cache =1; 348 state.istate = index; 349 350 progress =get_progress(o); 351 352if(o->update) 353git_attr_set_direction(GIT_ATTR_CHECKOUT, index); 354 355if(should_update_submodules() && o->update && !o->dry_run) 356load_gitmodules_file(index, NULL); 357 358for(i =0; i < index->cache_nr; i++) { 359const struct cache_entry *ce = index->cache[i]; 360 361if(ce->ce_flags & CE_WT_REMOVE) { 362display_progress(progress, ++cnt); 363if(o->update && !o->dry_run) 364unlink_entry(ce); 365} 366} 367remove_marked_cache_entries(index); 368remove_scheduled_dirs(); 369 370if(should_update_submodules() && o->update && !o->dry_run) 371load_gitmodules_file(index, &state); 372 373enable_delayed_checkout(&state); 374if(repository_format_partial_clone && o->update && !o->dry_run) { 375/* 376 * Prefetch the objects that are to be checked out in the loop 377 * below. 378 */ 379struct oid_array to_fetch = OID_ARRAY_INIT; 380int fetch_if_missing_store = fetch_if_missing; 381 fetch_if_missing =0; 382for(i =0; i < index->cache_nr; i++) { 383struct cache_entry *ce = index->cache[i]; 384if((ce->ce_flags & CE_UPDATE) && 385!S_ISGITLINK(ce->ce_mode)) { 386if(!has_object_file(&ce->oid)) 387oid_array_append(&to_fetch, &ce->oid); 388} 389} 390if(to_fetch.nr) 391fetch_objects(repository_format_partial_clone, 392&to_fetch); 393 fetch_if_missing = fetch_if_missing_store; 394} 395for(i =0; i < index->cache_nr; i++) { 396struct cache_entry *ce = index->cache[i]; 397 398if(ce->ce_flags & CE_UPDATE) { 399if(ce->ce_flags & CE_WT_REMOVE) 400die("BUG: both update and delete flags are set on%s", 401 ce->name); 402display_progress(progress, ++cnt); 403 ce->ce_flags &= ~CE_UPDATE; 404if(o->update && !o->dry_run) { 405 errs |=checkout_entry(ce, &state, NULL); 406} 407} 408} 409stop_progress(&progress); 410 errs |=finish_delayed_checkout(&state); 411if(o->update) 412git_attr_set_direction(GIT_ATTR_CHECKIN, NULL); 413return errs !=0; 414} 415 416static intverify_uptodate_sparse(const struct cache_entry *ce, 417struct unpack_trees_options *o); 418static intverify_absent_sparse(const struct cache_entry *ce, 419enum unpack_trees_error_types, 420struct unpack_trees_options *o); 421 422static intapply_sparse_checkout(struct index_state *istate, 423struct cache_entry *ce, 424struct unpack_trees_options *o) 425{ 426int was_skip_worktree =ce_skip_worktree(ce); 427 428if(ce->ce_flags & CE_NEW_SKIP_WORKTREE) 429 ce->ce_flags |= CE_SKIP_WORKTREE; 430else 431 ce->ce_flags &= ~CE_SKIP_WORKTREE; 432if(was_skip_worktree !=ce_skip_worktree(ce)) { 433 ce->ce_flags |= CE_UPDATE_IN_BASE; 434mark_fsmonitor_invalid(istate, ce); 435 istate->cache_changed |= CE_ENTRY_CHANGED; 436} 437 438/* 439 * if (!was_skip_worktree && !ce_skip_worktree()) { 440 * This is perfectly normal. Move on; 441 * } 442 */ 443 444/* 445 * Merge strategies may set CE_UPDATE|CE_REMOVE outside checkout 446 * area as a result of ce_skip_worktree() shortcuts in 447 * verify_absent() and verify_uptodate(). 448 * Make sure they don't modify worktree if they are already 449 * outside checkout area 450 */ 451if(was_skip_worktree &&ce_skip_worktree(ce)) { 452 ce->ce_flags &= ~CE_UPDATE; 453 454/* 455 * By default, when CE_REMOVE is on, CE_WT_REMOVE is also 456 * on to get that file removed from both index and worktree. 457 * If that file is already outside worktree area, don't 458 * bother remove it. 459 */ 460if(ce->ce_flags & CE_REMOVE) 461 ce->ce_flags &= ~CE_WT_REMOVE; 462} 463 464if(!was_skip_worktree &&ce_skip_worktree(ce)) { 465/* 466 * If CE_UPDATE is set, verify_uptodate() must be called already 467 * also stat info may have lost after merged_entry() so calling 468 * verify_uptodate() again may fail 469 */ 470if(!(ce->ce_flags & CE_UPDATE) &&verify_uptodate_sparse(ce, o)) 471return-1; 472 ce->ce_flags |= CE_WT_REMOVE; 473 ce->ce_flags &= ~CE_UPDATE; 474} 475if(was_skip_worktree && !ce_skip_worktree(ce)) { 476if(verify_absent_sparse(ce, ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN, o)) 477return-1; 478 ce->ce_flags |= CE_UPDATE; 479} 480return0; 481} 482 483staticinlineintcall_unpack_fn(const struct cache_entry *const*src, 484struct unpack_trees_options *o) 485{ 486int ret = o->fn(src, o); 487if(ret >0) 488 ret =0; 489return ret; 490} 491 492static voidmark_ce_used(struct cache_entry *ce,struct unpack_trees_options *o) 493{ 494 ce->ce_flags |= CE_UNPACKED; 495 496if(o->cache_bottom < o->src_index->cache_nr && 497 o->src_index->cache[o->cache_bottom] == ce) { 498int bottom = o->cache_bottom; 499while(bottom < o->src_index->cache_nr && 500 o->src_index->cache[bottom]->ce_flags & CE_UNPACKED) 501 bottom++; 502 o->cache_bottom = bottom; 503} 504} 505 506static voidmark_all_ce_unused(struct index_state *index) 507{ 508int i; 509for(i =0; i < index->cache_nr; i++) 510 index->cache[i]->ce_flags &= ~(CE_UNPACKED | CE_ADDED | CE_NEW_SKIP_WORKTREE); 511} 512 513static intlocate_in_src_index(const struct cache_entry *ce, 514struct unpack_trees_options *o) 515{ 516struct index_state *index = o->src_index; 517int len =ce_namelen(ce); 518int pos =index_name_pos(index, ce->name, len); 519if(pos <0) 520 pos = -1- pos; 521return pos; 522} 523 524/* 525 * We call unpack_index_entry() with an unmerged cache entry 526 * only in diff-index, and it wants a single callback. Skip 527 * the other unmerged entry with the same name. 528 */ 529static voidmark_ce_used_same_name(struct cache_entry *ce, 530struct unpack_trees_options *o) 531{ 532struct index_state *index = o->src_index; 533int len =ce_namelen(ce); 534int pos; 535 536for(pos =locate_in_src_index(ce, o); pos < index->cache_nr; pos++) { 537struct cache_entry *next = index->cache[pos]; 538if(len !=ce_namelen(next) || 539memcmp(ce->name, next->name, len)) 540break; 541mark_ce_used(next, o); 542} 543} 544 545static struct cache_entry *next_cache_entry(struct unpack_trees_options *o) 546{ 547const struct index_state *index = o->src_index; 548int pos = o->cache_bottom; 549 550while(pos < index->cache_nr) { 551struct cache_entry *ce = index->cache[pos]; 552if(!(ce->ce_flags & CE_UNPACKED)) 553return ce; 554 pos++; 555} 556return NULL; 557} 558 559static voidadd_same_unmerged(const struct cache_entry *ce, 560struct unpack_trees_options *o) 561{ 562struct index_state *index = o->src_index; 563int len =ce_namelen(ce); 564int pos =index_name_pos(index, ce->name, len); 565 566if(0<= pos) 567die("programming error in a caller of mark_ce_used_same_name"); 568for(pos = -pos -1; pos < index->cache_nr; pos++) { 569struct cache_entry *next = index->cache[pos]; 570if(len !=ce_namelen(next) || 571memcmp(ce->name, next->name, len)) 572break; 573add_entry(o, next,0,0); 574mark_ce_used(next, o); 575} 576} 577 578static intunpack_index_entry(struct cache_entry *ce, 579struct unpack_trees_options *o) 580{ 581const struct cache_entry *src[MAX_UNPACK_TREES +1] = { NULL, }; 582int ret; 583 584 src[0] = ce; 585 586mark_ce_used(ce, o); 587if(ce_stage(ce)) { 588if(o->skip_unmerged) { 589add_entry(o, ce,0,0); 590return0; 591} 592} 593 ret =call_unpack_fn(src, o); 594if(ce_stage(ce)) 595mark_ce_used_same_name(ce, o); 596return ret; 597} 598 599static intfind_cache_pos(struct traverse_info *,const struct name_entry *); 600 601static voidrestore_cache_bottom(struct traverse_info *info,int bottom) 602{ 603struct unpack_trees_options *o = info->data; 604 605if(o->diff_index_cached) 606return; 607 o->cache_bottom = bottom; 608} 609 610static intswitch_cache_bottom(struct traverse_info *info) 611{ 612struct unpack_trees_options *o = info->data; 613int ret, pos; 614 615if(o->diff_index_cached) 616return0; 617 ret = o->cache_bottom; 618 pos =find_cache_pos(info->prev, &info->name); 619 620if(pos < -1) 621 o->cache_bottom = -2- pos; 622else if(pos <0) 623 o->cache_bottom = o->src_index->cache_nr; 624return ret; 625} 626 627staticinlineintare_same_oid(struct name_entry *name_j,struct name_entry *name_k) 628{ 629return name_j->oid && name_k->oid && !oidcmp(name_j->oid, name_k->oid); 630} 631 632static inttraverse_trees_recursive(int n,unsigned long dirmask, 633unsigned long df_conflicts, 634struct name_entry *names, 635struct traverse_info *info) 636{ 637int i, ret, bottom; 638int nr_buf =0; 639struct tree_desc t[MAX_UNPACK_TREES]; 640void*buf[MAX_UNPACK_TREES]; 641struct traverse_info newinfo; 642struct name_entry *p; 643 644 p = names; 645while(!p->mode) 646 p++; 647 648 newinfo = *info; 649 newinfo.prev = info; 650 newinfo.pathspec = info->pathspec; 651 newinfo.name = *p; 652 newinfo.pathlen +=tree_entry_len(p) +1; 653 newinfo.df_conflicts |= df_conflicts; 654 655/* 656 * Fetch the tree from the ODB for each peer directory in the 657 * n commits. 658 * 659 * For 2- and 3-way traversals, we try to avoid hitting the 660 * ODB twice for the same OID. This should yield a nice speed 661 * up in checkouts and merges when the commits are similar. 662 * 663 * We don't bother doing the full O(n^2) search for larger n, 664 * because wider traversals don't happen that often and we 665 * avoid the search setup. 666 * 667 * When 2 peer OIDs are the same, we just copy the tree 668 * descriptor data. This implicitly borrows the buffer 669 * data from the earlier cell. 670 */ 671for(i =0; i < n; i++, dirmask >>=1) { 672if(i >0&&are_same_oid(&names[i], &names[i -1])) 673 t[i] = t[i -1]; 674else if(i >1&&are_same_oid(&names[i], &names[i -2])) 675 t[i] = t[i -2]; 676else{ 677const struct object_id *oid = NULL; 678if(dirmask &1) 679 oid = names[i].oid; 680 buf[nr_buf++] =fill_tree_descriptor(t + i, oid); 681} 682} 683 684 bottom =switch_cache_bottom(&newinfo); 685 ret =traverse_trees(n, t, &newinfo); 686restore_cache_bottom(&newinfo, bottom); 687 688for(i =0; i < nr_buf; i++) 689free(buf[i]); 690 691return ret; 692} 693 694/* 695 * Compare the traverse-path to the cache entry without actually 696 * having to generate the textual representation of the traverse 697 * path. 698 * 699 * NOTE! This *only* compares up to the size of the traverse path 700 * itself - the caller needs to do the final check for the cache 701 * entry having more data at the end! 702 */ 703static intdo_compare_entry_piecewise(const struct cache_entry *ce,const struct traverse_info *info,const struct name_entry *n) 704{ 705int len, pathlen, ce_len; 706const char*ce_name; 707 708if(info->prev) { 709int cmp =do_compare_entry_piecewise(ce, info->prev, 710&info->name); 711if(cmp) 712return cmp; 713} 714 pathlen = info->pathlen; 715 ce_len =ce_namelen(ce); 716 717/* If ce_len < pathlen then we must have previously hit "name == directory" entry */ 718if(ce_len < pathlen) 719return-1; 720 721 ce_len -= pathlen; 722 ce_name = ce->name + pathlen; 723 724 len =tree_entry_len(n); 725returndf_name_compare(ce_name, ce_len, S_IFREG, n->path, len, n->mode); 726} 727 728static intdo_compare_entry(const struct cache_entry *ce, 729const struct traverse_info *info, 730const struct name_entry *n) 731{ 732int len, pathlen, ce_len; 733const char*ce_name; 734int cmp; 735 736/* 737 * If we have not precomputed the traverse path, it is quicker 738 * to avoid doing so. But if we have precomputed it, 739 * it is quicker to use the precomputed version. 740 */ 741if(!info->traverse_path) 742returndo_compare_entry_piecewise(ce, info, n); 743 744 cmp =strncmp(ce->name, info->traverse_path, info->pathlen); 745if(cmp) 746return cmp; 747 748 pathlen = info->pathlen; 749 ce_len =ce_namelen(ce); 750 751if(ce_len < pathlen) 752return-1; 753 754 ce_len -= pathlen; 755 ce_name = ce->name + pathlen; 756 757 len =tree_entry_len(n); 758returndf_name_compare(ce_name, ce_len, S_IFREG, n->path, len, n->mode); 759} 760 761static intcompare_entry(const struct cache_entry *ce,const struct traverse_info *info,const struct name_entry *n) 762{ 763int cmp =do_compare_entry(ce, info, n); 764if(cmp) 765return cmp; 766 767/* 768 * Even if the beginning compared identically, the ce should 769 * compare as bigger than a directory leading up to it! 770 */ 771returnce_namelen(ce) >traverse_path_len(info, n); 772} 773 774static intce_in_traverse_path(const struct cache_entry *ce, 775const struct traverse_info *info) 776{ 777if(!info->prev) 778return1; 779if(do_compare_entry(ce, info->prev, &info->name)) 780return0; 781/* 782 * If ce (blob) is the same name as the path (which is a tree 783 * we will be descending into), it won't be inside it. 784 */ 785return(info->pathlen <ce_namelen(ce)); 786} 787 788static struct cache_entry *create_ce_entry(const struct traverse_info *info,const struct name_entry *n,int stage) 789{ 790int len =traverse_path_len(info, n); 791struct cache_entry *ce =xcalloc(1,cache_entry_size(len)); 792 793 ce->ce_mode =create_ce_mode(n->mode); 794 ce->ce_flags =create_ce_flags(stage); 795 ce->ce_namelen = len; 796oidcpy(&ce->oid, n->oid); 797make_traverse_path(ce->name, info, n); 798 799return ce; 800} 801 802static intunpack_nondirectories(int n,unsigned long mask, 803unsigned long dirmask, 804struct cache_entry **src, 805const struct name_entry *names, 806const struct traverse_info *info) 807{ 808int i; 809struct unpack_trees_options *o = info->data; 810unsigned long conflicts = info->df_conflicts | dirmask; 811 812/* Do we have *only* directories? Nothing to do */ 813if(mask == dirmask && !src[0]) 814return0; 815 816/* 817 * Ok, we've filled in up to any potential index entry in src[0], 818 * now do the rest. 819 */ 820for(i =0; i < n; i++) { 821int stage; 822unsigned int bit =1ul<< i; 823if(conflicts & bit) { 824 src[i + o->merge] = o->df_conflict_entry; 825continue; 826} 827if(!(mask & bit)) 828continue; 829if(!o->merge) 830 stage =0; 831else if(i +1< o->head_idx) 832 stage =1; 833else if(i +1> o->head_idx) 834 stage =3; 835else 836 stage =2; 837 src[i + o->merge] =create_ce_entry(info, names + i, stage); 838} 839 840if(o->merge) { 841int rc =call_unpack_fn((const struct cache_entry *const*)src, 842 o); 843for(i =0; i < n; i++) { 844struct cache_entry *ce = src[i + o->merge]; 845if(ce != o->df_conflict_entry) 846free(ce); 847} 848return rc; 849} 850 851for(i =0; i < n; i++) 852if(src[i] && src[i] != o->df_conflict_entry) 853if(do_add_entry(o, src[i],0,0)) 854return-1; 855 856return0; 857} 858 859static intunpack_failed(struct unpack_trees_options *o,const char*message) 860{ 861discard_index(&o->result); 862if(!o->gently && !o->exiting_early) { 863if(message) 864returnerror("%s", message); 865return-1; 866} 867return-1; 868} 869 870/* 871 * The tree traversal is looking at name p. If we have a matching entry, 872 * return it. If name p is a directory in the index, do not return 873 * anything, as we will want to match it when the traversal descends into 874 * the directory. 875 */ 876static intfind_cache_pos(struct traverse_info *info, 877const struct name_entry *p) 878{ 879int pos; 880struct unpack_trees_options *o = info->data; 881struct index_state *index = o->src_index; 882int pfxlen = info->pathlen; 883int p_len =tree_entry_len(p); 884 885for(pos = o->cache_bottom; pos < index->cache_nr; pos++) { 886const struct cache_entry *ce = index->cache[pos]; 887const char*ce_name, *ce_slash; 888int cmp, ce_len; 889 890if(ce->ce_flags & CE_UNPACKED) { 891/* 892 * cache_bottom entry is already unpacked, so 893 * we can never match it; don't check it 894 * again. 895 */ 896if(pos == o->cache_bottom) 897++o->cache_bottom; 898continue; 899} 900if(!ce_in_traverse_path(ce, info)) { 901/* 902 * Check if we can skip future cache checks 903 * (because we're already past all possible 904 * entries in the traverse path). 905 */ 906if(info->traverse_path) { 907if(strncmp(ce->name, info->traverse_path, 908 info->pathlen) >0) 909break; 910} 911continue; 912} 913 ce_name = ce->name + pfxlen; 914 ce_slash =strchr(ce_name,'/'); 915if(ce_slash) 916 ce_len = ce_slash - ce_name; 917else 918 ce_len =ce_namelen(ce) - pfxlen; 919 cmp =name_compare(p->path, p_len, ce_name, ce_len); 920/* 921 * Exact match; if we have a directory we need to 922 * delay returning it. 923 */ 924if(!cmp) 925return ce_slash ? -2- pos : pos; 926if(0< cmp) 927continue;/* keep looking */ 928/* 929 * ce_name sorts after p->path; could it be that we 930 * have files under p->path directory in the index? 931 * E.g. ce_name == "t-i", and p->path == "t"; we may 932 * have "t/a" in the index. 933 */ 934if(p_len < ce_len && !memcmp(ce_name, p->path, p_len) && 935 ce_name[p_len] <'/') 936continue;/* keep looking */ 937break; 938} 939return-1; 940} 941 942static struct cache_entry *find_cache_entry(struct traverse_info *info, 943const struct name_entry *p) 944{ 945int pos =find_cache_pos(info, p); 946struct unpack_trees_options *o = info->data; 947 948if(0<= pos) 949return o->src_index->cache[pos]; 950else 951return NULL; 952} 953 954static voiddebug_path(struct traverse_info *info) 955{ 956if(info->prev) { 957debug_path(info->prev); 958if(*info->prev->name.path) 959putchar('/'); 960} 961printf("%s", info->name.path); 962} 963 964static voiddebug_name_entry(int i,struct name_entry *n) 965{ 966printf("ent#%d %06o%s\n", i, 967 n->path ? n->mode :0, 968 n->path ? n->path :"(missing)"); 969} 970 971static voiddebug_unpack_callback(int n, 972unsigned long mask, 973unsigned long dirmask, 974struct name_entry *names, 975struct traverse_info *info) 976{ 977int i; 978printf("* unpack mask%lu, dirmask%lu, cnt%d", 979 mask, dirmask, n); 980debug_path(info); 981putchar('\n'); 982for(i =0; i < n; i++) 983debug_name_entry(i, names + i); 984} 985 986static intunpack_callback(int n,unsigned long mask,unsigned long dirmask,struct name_entry *names,struct traverse_info *info) 987{ 988struct cache_entry *src[MAX_UNPACK_TREES +1] = { NULL, }; 989struct unpack_trees_options *o = info->data; 990const struct name_entry *p = names; 991 992/* Find first entry with a real name (we could use "mask" too) */ 993while(!p->mode) 994 p++; 995 996if(o->debug_unpack) 997debug_unpack_callback(n, mask, dirmask, names, info); 998 999/* Are we supposed to look at the index too? */1000if(o->merge) {1001while(1) {1002int cmp;1003struct cache_entry *ce;10041005if(o->diff_index_cached)1006 ce =next_cache_entry(o);1007else1008 ce =find_cache_entry(info, p);10091010if(!ce)1011break;1012 cmp =compare_entry(ce, info, p);1013if(cmp <0) {1014if(unpack_index_entry(ce, o) <0)1015returnunpack_failed(o, NULL);1016continue;1017}1018if(!cmp) {1019if(ce_stage(ce)) {1020/*1021 * If we skip unmerged index1022 * entries, we'll skip this1023 * entry *and* the tree1024 * entries associated with it!1025 */1026if(o->skip_unmerged) {1027add_same_unmerged(ce, o);1028return mask;1029}1030}1031 src[0] = ce;1032}1033break;1034}1035}10361037if(unpack_nondirectories(n, mask, dirmask, src, names, info) <0)1038return-1;10391040if(o->merge && src[0]) {1041if(ce_stage(src[0]))1042mark_ce_used_same_name(src[0], o);1043else1044mark_ce_used(src[0], o);1045}10461047/* Now handle any directories.. */1048if(dirmask) {1049/* special case: "diff-index --cached" looking at a tree */1050if(o->diff_index_cached &&1051 n ==1&& dirmask ==1&&S_ISDIR(names->mode)) {1052int matches;1053 matches =cache_tree_matches_traversal(o->src_index->cache_tree,1054 names, info);1055/*1056 * Everything under the name matches; skip the1057 * entire hierarchy. diff_index_cached codepath1058 * special cases D/F conflicts in such a way that1059 * it does not do any look-ahead, so this is safe.1060 */1061if(matches) {1062 o->cache_bottom += matches;1063return mask;1064}1065}10661067if(traverse_trees_recursive(n, dirmask, mask & ~dirmask,1068 names, info) <0)1069return-1;1070return mask;1071}10721073return mask;1074}10751076static intclear_ce_flags_1(struct cache_entry **cache,int nr,1077struct strbuf *prefix,1078int select_mask,int clear_mask,1079struct exclude_list *el,int defval);10801081/* Whole directory matching */1082static intclear_ce_flags_dir(struct cache_entry **cache,int nr,1083struct strbuf *prefix,1084char*basename,1085int select_mask,int clear_mask,1086struct exclude_list *el,int defval)1087{1088struct cache_entry **cache_end;1089int dtype = DT_DIR;1090int ret =is_excluded_from_list(prefix->buf, prefix->len,1091 basename, &dtype, el, &the_index);1092int rc;10931094strbuf_addch(prefix,'/');10951096/* If undecided, use matching result of parent dir in defval */1097if(ret <0)1098 ret = defval;10991100for(cache_end = cache; cache_end != cache + nr; cache_end++) {1101struct cache_entry *ce = *cache_end;1102if(strncmp(ce->name, prefix->buf, prefix->len))1103break;1104}11051106/*1107 * TODO: check el, if there are no patterns that may conflict1108 * with ret (iow, we know in advance the incl/excl1109 * decision for the entire directory), clear flag here without1110 * calling clear_ce_flags_1(). That function will call1111 * the expensive is_excluded_from_list() on every entry.1112 */1113 rc =clear_ce_flags_1(cache, cache_end - cache,1114 prefix,1115 select_mask, clear_mask,1116 el, ret);1117strbuf_setlen(prefix, prefix->len -1);1118return rc;1119}11201121/*1122 * Traverse the index, find every entry that matches according to1123 * o->el. Do "ce_flags &= ~clear_mask" on those entries. Return the1124 * number of traversed entries.1125 *1126 * If select_mask is non-zero, only entries whose ce_flags has on of1127 * those bits enabled are traversed.1128 *1129 * cache : pointer to an index entry1130 * prefix_len : an offset to its path1131 *1132 * The current path ("prefix") including the trailing '/' is1133 * cache[0]->name[0..(prefix_len-1)]1134 * Top level path has prefix_len zero.1135 */1136static intclear_ce_flags_1(struct cache_entry **cache,int nr,1137struct strbuf *prefix,1138int select_mask,int clear_mask,1139struct exclude_list *el,int defval)1140{1141struct cache_entry **cache_end = cache + nr;11421143/*1144 * Process all entries that have the given prefix and meet1145 * select_mask condition1146 */1147while(cache != cache_end) {1148struct cache_entry *ce = *cache;1149const char*name, *slash;1150int len, dtype, ret;11511152if(select_mask && !(ce->ce_flags & select_mask)) {1153 cache++;1154continue;1155}11561157if(prefix->len &&strncmp(ce->name, prefix->buf, prefix->len))1158break;11591160 name = ce->name + prefix->len;1161 slash =strchr(name,'/');11621163/* If it's a directory, try whole directory match first */1164if(slash) {1165int processed;11661167 len = slash - name;1168strbuf_add(prefix, name, len);11691170 processed =clear_ce_flags_dir(cache, cache_end - cache,1171 prefix,1172 prefix->buf + prefix->len - len,1173 select_mask, clear_mask,1174 el, defval);11751176/* clear_c_f_dir eats a whole dir already? */1177if(processed) {1178 cache += processed;1179strbuf_setlen(prefix, prefix->len - len);1180continue;1181}11821183strbuf_addch(prefix,'/');1184 cache +=clear_ce_flags_1(cache, cache_end - cache,1185 prefix,1186 select_mask, clear_mask, el, defval);1187strbuf_setlen(prefix, prefix->len - len -1);1188continue;1189}11901191/* Non-directory */1192 dtype =ce_to_dtype(ce);1193 ret =is_excluded_from_list(ce->name,ce_namelen(ce),1194 name, &dtype, el, &the_index);1195if(ret <0)1196 ret = defval;1197if(ret >0)1198 ce->ce_flags &= ~clear_mask;1199 cache++;1200}1201return nr - (cache_end - cache);1202}12031204static intclear_ce_flags(struct cache_entry **cache,int nr,1205int select_mask,int clear_mask,1206struct exclude_list *el)1207{1208static struct strbuf prefix = STRBUF_INIT;12091210strbuf_reset(&prefix);12111212returnclear_ce_flags_1(cache, nr,1213&prefix,1214 select_mask, clear_mask,1215 el,0);1216}12171218/*1219 * Set/Clear CE_NEW_SKIP_WORKTREE according to $GIT_DIR/info/sparse-checkout1220 */1221static voidmark_new_skip_worktree(struct exclude_list *el,1222struct index_state *the_index,1223int select_flag,int skip_wt_flag)1224{1225int i;12261227/*1228 * 1. Pretend the narrowest worktree: only unmerged entries1229 * are checked out1230 */1231for(i =0; i < the_index->cache_nr; i++) {1232struct cache_entry *ce = the_index->cache[i];12331234if(select_flag && !(ce->ce_flags & select_flag))1235continue;12361237if(!ce_stage(ce))1238 ce->ce_flags |= skip_wt_flag;1239else1240 ce->ce_flags &= ~skip_wt_flag;1241}12421243/*1244 * 2. Widen worktree according to sparse-checkout file.1245 * Matched entries will have skip_wt_flag cleared (i.e. "in")1246 */1247clear_ce_flags(the_index->cache, the_index->cache_nr,1248 select_flag, skip_wt_flag, el);1249}12501251static intverify_absent(const struct cache_entry *,1252enum unpack_trees_error_types,1253struct unpack_trees_options *);1254/*1255 * N-way merge "len" trees. Returns 0 on success, -1 on failure to manipulate the1256 * resulting index, -2 on failure to reflect the changes to the work tree.1257 *1258 * CE_ADDED, CE_UNPACKED and CE_NEW_SKIP_WORKTREE are used internally1259 */1260intunpack_trees(unsigned len,struct tree_desc *t,struct unpack_trees_options *o)1261{1262int i, ret;1263static struct cache_entry *dfc;1264struct exclude_list el;12651266if(len > MAX_UNPACK_TREES)1267die("unpack_trees takes at most%dtrees", MAX_UNPACK_TREES);12681269memset(&el,0,sizeof(el));1270if(!core_apply_sparse_checkout || !o->update)1271 o->skip_sparse_checkout =1;1272if(!o->skip_sparse_checkout) {1273char*sparse =git_pathdup("info/sparse-checkout");1274if(add_excludes_from_file_to_list(sparse,"",0, &el, NULL) <0)1275 o->skip_sparse_checkout =1;1276else1277 o->el = ⪙1278free(sparse);1279}12801281memset(&o->result,0,sizeof(o->result));1282 o->result.initialized =1;1283 o->result.timestamp.sec = o->src_index->timestamp.sec;1284 o->result.timestamp.nsec = o->src_index->timestamp.nsec;1285 o->result.version = o->src_index->version;1286 o->result.split_index = o->src_index->split_index;1287if(o->result.split_index)1288 o->result.split_index->refcount++;1289hashcpy(o->result.sha1, o->src_index->sha1);1290 o->merge_size = len;1291mark_all_ce_unused(o->src_index);12921293/*1294 * Sparse checkout loop #1: set NEW_SKIP_WORKTREE on existing entries1295 */1296if(!o->skip_sparse_checkout)1297mark_new_skip_worktree(o->el, o->src_index,0, CE_NEW_SKIP_WORKTREE);12981299if(!dfc)1300 dfc =xcalloc(1,cache_entry_size(0));1301 o->df_conflict_entry = dfc;13021303if(len) {1304const char*prefix = o->prefix ? o->prefix :"";1305struct traverse_info info;13061307setup_traverse_info(&info, prefix);1308 info.fn = unpack_callback;1309 info.data = o;1310 info.show_all_errors = o->show_all_errors;1311 info.pathspec = o->pathspec;13121313if(o->prefix) {1314/*1315 * Unpack existing index entries that sort before the1316 * prefix the tree is spliced into. Note that o->merge1317 * is always true in this case.1318 */1319while(1) {1320struct cache_entry *ce =next_cache_entry(o);1321if(!ce)1322break;1323if(ce_in_traverse_path(ce, &info))1324break;1325if(unpack_index_entry(ce, o) <0)1326goto return_failed;1327}1328}13291330if(traverse_trees(len, t, &info) <0)1331goto return_failed;1332}13331334/* Any left-over entries in the index? */1335if(o->merge) {1336while(1) {1337struct cache_entry *ce =next_cache_entry(o);1338if(!ce)1339break;1340if(unpack_index_entry(ce, o) <0)1341goto return_failed;1342}1343}1344mark_all_ce_unused(o->src_index);13451346if(o->trivial_merges_only && o->nontrivial_merge) {1347 ret =unpack_failed(o,"Merge requires file-level merging");1348goto done;1349}13501351if(!o->skip_sparse_checkout) {1352int empty_worktree =1;13531354/*1355 * Sparse checkout loop #2: set NEW_SKIP_WORKTREE on entries not in loop #11356 * If the will have NEW_SKIP_WORKTREE, also set CE_SKIP_WORKTREE1357 * so apply_sparse_checkout() won't attempt to remove it from worktree1358 */1359mark_new_skip_worktree(o->el, &o->result, CE_ADDED, CE_SKIP_WORKTREE | CE_NEW_SKIP_WORKTREE);13601361 ret =0;1362for(i =0; i < o->result.cache_nr; i++) {1363struct cache_entry *ce = o->result.cache[i];13641365/*1366 * Entries marked with CE_ADDED in merged_entry() do not have1367 * verify_absent() check (the check is effectively disabled1368 * because CE_NEW_SKIP_WORKTREE is set unconditionally).1369 *1370 * Do the real check now because we have had1371 * correct CE_NEW_SKIP_WORKTREE1372 */1373if(ce->ce_flags & CE_ADDED &&1374verify_absent(ce, ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN, o)) {1375if(!o->show_all_errors)1376goto return_failed;1377 ret = -1;1378}13791380if(apply_sparse_checkout(&o->result, ce, o)) {1381if(!o->show_all_errors)1382goto return_failed;1383 ret = -1;1384}1385if(!ce_skip_worktree(ce))1386 empty_worktree =0;13871388}1389if(ret <0)1390goto return_failed;1391/*1392 * Sparse checkout is meant to narrow down checkout area1393 * but it does not make sense to narrow down to empty working1394 * tree. This is usually a mistake in sparse checkout rules.1395 * Do not allow users to do that.1396 */1397if(o->result.cache_nr && empty_worktree) {1398 ret =unpack_failed(o,"Sparse checkout leaves no entry on working directory");1399goto done;1400}1401}14021403 o->src_index = NULL;1404 ret =check_updates(o) ? (-2) :0;1405if(o->dst_index) {1406if(!ret) {1407if(!o->result.cache_tree)1408 o->result.cache_tree =cache_tree();1409if(!cache_tree_fully_valid(o->result.cache_tree))1410cache_tree_update(&o->result,1411 WRITE_TREE_SILENT |1412 WRITE_TREE_REPAIR);1413}1414move_index_extensions(&o->result, o->dst_index);1415discard_index(o->dst_index);1416*o->dst_index = o->result;1417}else{1418discard_index(&o->result);1419}14201421done:1422clear_exclude_list(&el);1423return ret;14241425return_failed:1426if(o->show_all_errors)1427display_error_msgs(o);1428mark_all_ce_unused(o->src_index);1429 ret =unpack_failed(o, NULL);1430if(o->exiting_early)1431 ret =0;1432goto done;1433}14341435/* Here come the merge functions */14361437static intreject_merge(const struct cache_entry *ce,1438struct unpack_trees_options *o)1439{1440return o->gently ? -1:1441add_rejected_path(o, ERROR_WOULD_OVERWRITE, ce->name);1442}14431444static intsame(const struct cache_entry *a,const struct cache_entry *b)1445{1446if(!!a != !!b)1447return0;1448if(!a && !b)1449return1;1450if((a->ce_flags | b->ce_flags) & CE_CONFLICTED)1451return0;1452return a->ce_mode == b->ce_mode &&1453!oidcmp(&a->oid, &b->oid);1454}145514561457/*1458 * When a CE gets turned into an unmerged entry, we1459 * want it to be up-to-date1460 */1461static intverify_uptodate_1(const struct cache_entry *ce,1462struct unpack_trees_options *o,1463enum unpack_trees_error_types error_type)1464{1465struct stat st;14661467if(o->index_only)1468return0;14691470/*1471 * CE_VALID and CE_SKIP_WORKTREE cheat, we better check again1472 * if this entry is truly up-to-date because this file may be1473 * overwritten.1474 */1475if((ce->ce_flags & CE_VALID) ||ce_skip_worktree(ce))1476;/* keep checking */1477else if(o->reset ||ce_uptodate(ce))1478return0;14791480if(!lstat(ce->name, &st)) {1481int flags = CE_MATCH_IGNORE_VALID|CE_MATCH_IGNORE_SKIP_WORKTREE;1482unsigned changed =ie_match_stat(o->src_index, ce, &st, flags);14831484if(submodule_from_ce(ce)) {1485int r =check_submodule_move_head(ce,1486"HEAD",oid_to_hex(&ce->oid), o);1487if(r)1488return o->gently ? -1:1489add_rejected_path(o, error_type, ce->name);1490return0;1491}14921493if(!changed)1494return0;1495/*1496 * Historic default policy was to allow submodule to be out1497 * of sync wrt the superproject index. If the submodule was1498 * not considered interesting above, we don't care here.1499 */1500if(S_ISGITLINK(ce->ce_mode))1501return0;15021503 errno =0;1504}1505if(errno == ENOENT)1506return0;1507return o->gently ? -1:1508add_rejected_path(o, error_type, ce->name);1509}15101511static intverify_uptodate(const struct cache_entry *ce,1512struct unpack_trees_options *o)1513{1514if(!o->skip_sparse_checkout && (ce->ce_flags & CE_NEW_SKIP_WORKTREE))1515return0;1516returnverify_uptodate_1(ce, o, ERROR_NOT_UPTODATE_FILE);1517}15181519static intverify_uptodate_sparse(const struct cache_entry *ce,1520struct unpack_trees_options *o)1521{1522returnverify_uptodate_1(ce, o, ERROR_SPARSE_NOT_UPTODATE_FILE);1523}15241525static voidinvalidate_ce_path(const struct cache_entry *ce,1526struct unpack_trees_options *o)1527{1528if(!ce)1529return;1530cache_tree_invalidate_path(o->src_index, ce->name);1531untracked_cache_invalidate_path(o->src_index, ce->name,1);1532}15331534/*1535 * Check that checking out ce->sha1 in subdir ce->name is not1536 * going to overwrite any working files.1537 *1538 * Currently, git does not checkout subprojects during a superproject1539 * checkout, so it is not going to overwrite anything.1540 */1541static intverify_clean_submodule(const char*old_sha1,1542const struct cache_entry *ce,1543enum unpack_trees_error_types error_type,1544struct unpack_trees_options *o)1545{1546if(!submodule_from_ce(ce))1547return0;15481549returncheck_submodule_move_head(ce, old_sha1,1550oid_to_hex(&ce->oid), o);1551}15521553static intverify_clean_subdirectory(const struct cache_entry *ce,1554enum unpack_trees_error_types error_type,1555struct unpack_trees_options *o)1556{1557/*1558 * we are about to extract "ce->name"; we would not want to lose1559 * anything in the existing directory there.1560 */1561int namelen;1562int i;1563struct dir_struct d;1564char*pathbuf;1565int cnt =0;15661567if(S_ISGITLINK(ce->ce_mode)) {1568struct object_id oid;1569int sub_head =resolve_gitlink_ref(ce->name,"HEAD", &oid);1570/*1571 * If we are not going to update the submodule, then1572 * we don't care.1573 */1574if(!sub_head && !oidcmp(&oid, &ce->oid))1575return0;1576returnverify_clean_submodule(sub_head ? NULL :oid_to_hex(&oid),1577 ce, error_type, o);1578}15791580/*1581 * First let's make sure we do not have a local modification1582 * in that directory.1583 */1584 namelen =ce_namelen(ce);1585for(i =locate_in_src_index(ce, o);1586 i < o->src_index->cache_nr;1587 i++) {1588struct cache_entry *ce2 = o->src_index->cache[i];1589int len =ce_namelen(ce2);1590if(len < namelen ||1591strncmp(ce->name, ce2->name, namelen) ||1592 ce2->name[namelen] !='/')1593break;1594/*1595 * ce2->name is an entry in the subdirectory to be1596 * removed.1597 */1598if(!ce_stage(ce2)) {1599if(verify_uptodate(ce2, o))1600return-1;1601add_entry(o, ce2, CE_REMOVE,0);1602mark_ce_used(ce2, o);1603}1604 cnt++;1605}16061607/*1608 * Then we need to make sure that we do not lose a locally1609 * present file that is not ignored.1610 */1611 pathbuf =xstrfmt("%.*s/", namelen, ce->name);16121613memset(&d,0,sizeof(d));1614if(o->dir)1615 d.exclude_per_dir = o->dir->exclude_per_dir;1616 i =read_directory(&d, &the_index, pathbuf, namelen+1, NULL);1617if(i)1618return o->gently ? -1:1619add_rejected_path(o, ERROR_NOT_UPTODATE_DIR, ce->name);1620free(pathbuf);1621return cnt;1622}16231624/*1625 * This gets called when there was no index entry for the tree entry 'dst',1626 * but we found a file in the working tree that 'lstat()' said was fine,1627 * and we're on a case-insensitive filesystem.1628 *1629 * See if we can find a case-insensitive match in the index that also1630 * matches the stat information, and assume it's that other file!1631 */1632static inticase_exists(struct unpack_trees_options *o,const char*name,int len,struct stat *st)1633{1634const struct cache_entry *src;16351636 src =index_file_exists(o->src_index, name, len,1);1637return src && !ie_match_stat(o->src_index, src, st, CE_MATCH_IGNORE_VALID|CE_MATCH_IGNORE_SKIP_WORKTREE);1638}16391640static intcheck_ok_to_remove(const char*name,int len,int dtype,1641const struct cache_entry *ce,struct stat *st,1642enum unpack_trees_error_types error_type,1643struct unpack_trees_options *o)1644{1645const struct cache_entry *result;16461647/*1648 * It may be that the 'lstat()' succeeded even though1649 * target 'ce' was absent, because there is an old1650 * entry that is different only in case..1651 *1652 * Ignore that lstat() if it matches.1653 */1654if(ignore_case &&icase_exists(o, name, len, st))1655return0;16561657if(o->dir &&1658is_excluded(o->dir, &the_index, name, &dtype))1659/*1660 * ce->name is explicitly excluded, so it is Ok to1661 * overwrite it.1662 */1663return0;1664if(S_ISDIR(st->st_mode)) {1665/*1666 * We are checking out path "foo" and1667 * found "foo/." in the working tree.1668 * This is tricky -- if we have modified1669 * files that are in "foo/" we would lose1670 * them.1671 */1672if(verify_clean_subdirectory(ce, error_type, o) <0)1673return-1;1674return0;1675}16761677/*1678 * The previous round may already have decided to1679 * delete this path, which is in a subdirectory that1680 * is being replaced with a blob.1681 */1682 result =index_file_exists(&o->result, name, len,0);1683if(result) {1684if(result->ce_flags & CE_REMOVE)1685return0;1686}16871688return o->gently ? -1:1689add_rejected_path(o, error_type, name);1690}16911692/*1693 * We do not want to remove or overwrite a working tree file that1694 * is not tracked, unless it is ignored.1695 */1696static intverify_absent_1(const struct cache_entry *ce,1697enum unpack_trees_error_types error_type,1698struct unpack_trees_options *o)1699{1700int len;1701struct stat st;17021703if(o->index_only || o->reset || !o->update)1704return0;17051706 len =check_leading_path(ce->name,ce_namelen(ce));1707if(!len)1708return0;1709else if(len >0) {1710char*path;1711int ret;17121713 path =xmemdupz(ce->name, len);1714if(lstat(path, &st))1715 ret =error_errno("cannot stat '%s'", path);1716else{1717if(submodule_from_ce(ce))1718 ret =check_submodule_move_head(ce,1719oid_to_hex(&ce->oid),1720 NULL, o);1721else1722 ret =check_ok_to_remove(path, len, DT_UNKNOWN, NULL,1723&st, error_type, o);1724}1725free(path);1726return ret;1727}else if(lstat(ce->name, &st)) {1728if(errno != ENOENT)1729returnerror_errno("cannot stat '%s'", ce->name);1730return0;1731}else{1732if(submodule_from_ce(ce))1733returncheck_submodule_move_head(ce,oid_to_hex(&ce->oid),1734 NULL, o);17351736returncheck_ok_to_remove(ce->name,ce_namelen(ce),1737ce_to_dtype(ce), ce, &st,1738 error_type, o);1739}1740}17411742static intverify_absent(const struct cache_entry *ce,1743enum unpack_trees_error_types error_type,1744struct unpack_trees_options *o)1745{1746if(!o->skip_sparse_checkout && (ce->ce_flags & CE_NEW_SKIP_WORKTREE))1747return0;1748returnverify_absent_1(ce, error_type, o);1749}17501751static intverify_absent_sparse(const struct cache_entry *ce,1752enum unpack_trees_error_types error_type,1753struct unpack_trees_options *o)1754{1755enum unpack_trees_error_types orphaned_error = error_type;1756if(orphaned_error == ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN)1757 orphaned_error = ERROR_WOULD_LOSE_ORPHANED_OVERWRITTEN;17581759returnverify_absent_1(ce, orphaned_error, o);1760}17611762static intmerged_entry(const struct cache_entry *ce,1763const struct cache_entry *old,1764struct unpack_trees_options *o)1765{1766int update = CE_UPDATE;1767struct cache_entry *merge =dup_entry(ce);17681769if(!old) {1770/*1771 * New index entries. In sparse checkout, the following1772 * verify_absent() will be delayed until after1773 * traverse_trees() finishes in unpack_trees(), then:1774 *1775 * - CE_NEW_SKIP_WORKTREE will be computed correctly1776 * - verify_absent() be called again, this time with1777 * correct CE_NEW_SKIP_WORKTREE1778 *1779 * verify_absent() call here does nothing in sparse1780 * checkout (i.e. o->skip_sparse_checkout == 0)1781 */1782 update |= CE_ADDED;1783 merge->ce_flags |= CE_NEW_SKIP_WORKTREE;17841785if(verify_absent(merge,1786 ERROR_WOULD_LOSE_UNTRACKED_OVERWRITTEN, o)) {1787free(merge);1788return-1;1789}1790invalidate_ce_path(merge, o);17911792if(submodule_from_ce(ce)) {1793int ret =check_submodule_move_head(ce, NULL,1794oid_to_hex(&ce->oid),1795 o);1796if(ret)1797return ret;1798}17991800}else if(!(old->ce_flags & CE_CONFLICTED)) {1801/*1802 * See if we can re-use the old CE directly?1803 * That way we get the uptodate stat info.1804 *1805 * This also removes the UPDATE flag on a match; otherwise1806 * we will end up overwriting local changes in the work tree.1807 */1808if(same(old, merge)) {1809copy_cache_entry(merge, old);1810 update =0;1811}else{1812if(verify_uptodate(old, o)) {1813free(merge);1814return-1;1815}1816/* Migrate old flags over */1817 update |= old->ce_flags & (CE_SKIP_WORKTREE | CE_NEW_SKIP_WORKTREE);1818invalidate_ce_path(old, o);1819}18201821if(submodule_from_ce(ce)) {1822int ret =check_submodule_move_head(ce,oid_to_hex(&old->oid),1823oid_to_hex(&ce->oid),1824 o);1825if(ret)1826return ret;1827}1828}else{1829/*1830 * Previously unmerged entry left as an existence1831 * marker by read_index_unmerged();1832 */1833invalidate_ce_path(old, o);1834}18351836do_add_entry(o, merge, update, CE_STAGEMASK);1837return1;1838}18391840static intdeleted_entry(const struct cache_entry *ce,1841const struct cache_entry *old,1842struct unpack_trees_options *o)1843{1844/* Did it exist in the index? */1845if(!old) {1846if(verify_absent(ce, ERROR_WOULD_LOSE_UNTRACKED_REMOVED, o))1847return-1;1848return0;1849}1850if(!(old->ce_flags & CE_CONFLICTED) &&verify_uptodate(old, o))1851return-1;1852add_entry(o, ce, CE_REMOVE,0);1853invalidate_ce_path(ce, o);1854return1;1855}18561857static intkeep_entry(const struct cache_entry *ce,1858struct unpack_trees_options *o)1859{1860add_entry(o, ce,0,0);1861return1;1862}18631864#if DBRT_DEBUG1865static voidshow_stage_entry(FILE*o,1866const char*label,const struct cache_entry *ce)1867{1868if(!ce)1869fprintf(o,"%s(missing)\n", label);1870else1871fprintf(o,"%s%06o%s %d\t%s\n",1872 label,1873 ce->ce_mode,1874oid_to_hex(&ce->oid),1875ce_stage(ce),1876 ce->name);1877}1878#endif18791880intthreeway_merge(const struct cache_entry *const*stages,1881struct unpack_trees_options *o)1882{1883const struct cache_entry *index;1884const struct cache_entry *head;1885const struct cache_entry *remote = stages[o->head_idx +1];1886int count;1887int head_match =0;1888int remote_match =0;18891890int df_conflict_head =0;1891int df_conflict_remote =0;18921893int any_anc_missing =0;1894int no_anc_exists =1;1895int i;18961897for(i =1; i < o->head_idx; i++) {1898if(!stages[i] || stages[i] == o->df_conflict_entry)1899 any_anc_missing =1;1900else1901 no_anc_exists =0;1902}19031904 index = stages[0];1905 head = stages[o->head_idx];19061907if(head == o->df_conflict_entry) {1908 df_conflict_head =1;1909 head = NULL;1910}19111912if(remote == o->df_conflict_entry) {1913 df_conflict_remote =1;1914 remote = NULL;1915}19161917/*1918 * First, if there's a #16 situation, note that to prevent #131919 * and #14.1920 */1921if(!same(remote, head)) {1922for(i =1; i < o->head_idx; i++) {1923if(same(stages[i], head)) {1924 head_match = i;1925}1926if(same(stages[i], remote)) {1927 remote_match = i;1928}1929}1930}19311932/*1933 * We start with cases where the index is allowed to match1934 * something other than the head: #14(ALT) and #2ALT, where it1935 * is permitted to match the result instead.1936 */1937/* #14, #14ALT, #2ALT */1938if(remote && !df_conflict_head && head_match && !remote_match) {1939if(index && !same(index, remote) && !same(index, head))1940returnreject_merge(index, o);1941returnmerged_entry(remote, index, o);1942}1943/*1944 * If we have an entry in the index cache, then we want to1945 * make sure that it matches head.1946 */1947if(index && !same(index, head))1948returnreject_merge(index, o);19491950if(head) {1951/* #5ALT, #15 */1952if(same(head, remote))1953returnmerged_entry(head, index, o);1954/* #13, #3ALT */1955if(!df_conflict_remote && remote_match && !head_match)1956returnmerged_entry(head, index, o);1957}19581959/* #1 */1960if(!head && !remote && any_anc_missing)1961return0;19621963/*1964 * Under the "aggressive" rule, we resolve mostly trivial1965 * cases that we historically had git-merge-one-file resolve.1966 */1967if(o->aggressive) {1968int head_deleted = !head;1969int remote_deleted = !remote;1970const struct cache_entry *ce = NULL;19711972if(index)1973 ce = index;1974else if(head)1975 ce = head;1976else if(remote)1977 ce = remote;1978else{1979for(i =1; i < o->head_idx; i++) {1980if(stages[i] && stages[i] != o->df_conflict_entry) {1981 ce = stages[i];1982break;1983}1984}1985}19861987/*1988 * Deleted in both.1989 * Deleted in one and unchanged in the other.1990 */1991if((head_deleted && remote_deleted) ||1992(head_deleted && remote && remote_match) ||1993(remote_deleted && head && head_match)) {1994if(index)1995returndeleted_entry(index, index, o);1996if(ce && !head_deleted) {1997if(verify_absent(ce, ERROR_WOULD_LOSE_UNTRACKED_REMOVED, o))1998return-1;1999}2000return0;2001}2002/*2003 * Added in both, identically.2004 */2005if(no_anc_exists && head && remote &&same(head, remote))2006returnmerged_entry(head, index, o);20072008}20092010/* Below are "no merge" cases, which require that the index be2011 * up-to-date to avoid the files getting overwritten with2012 * conflict resolution files.2013 */2014if(index) {2015if(verify_uptodate(index, o))2016return-1;2017}20182019 o->nontrivial_merge =1;20202021/* #2, #3, #4, #6, #7, #9, #10, #11. */2022 count =0;2023if(!head_match || !remote_match) {2024for(i =1; i < o->head_idx; i++) {2025if(stages[i] && stages[i] != o->df_conflict_entry) {2026keep_entry(stages[i], o);2027 count++;2028break;2029}2030}2031}2032#if DBRT_DEBUG2033else{2034fprintf(stderr,"read-tree: warning #16 detected\n");2035show_stage_entry(stderr,"head ", stages[head_match]);2036show_stage_entry(stderr,"remote ", stages[remote_match]);2037}2038#endif2039if(head) { count +=keep_entry(head, o); }2040if(remote) { count +=keep_entry(remote, o); }2041return count;2042}20432044/*2045 * Two-way merge.2046 *2047 * The rule is to "carry forward" what is in the index without losing2048 * information across a "fast-forward", favoring a successful merge2049 * over a merge failure when it makes sense. For details of the2050 * "carry forward" rule, please see <Documentation/git-read-tree.txt>.2051 *2052 */2053inttwoway_merge(const struct cache_entry *const*src,2054struct unpack_trees_options *o)2055{2056const struct cache_entry *current = src[0];2057const struct cache_entry *oldtree = src[1];2058const struct cache_entry *newtree = src[2];20592060if(o->merge_size !=2)2061returnerror("Cannot do a twoway merge of%dtrees",2062 o->merge_size);20632064if(oldtree == o->df_conflict_entry)2065 oldtree = NULL;2066if(newtree == o->df_conflict_entry)2067 newtree = NULL;20682069if(current) {2070if(current->ce_flags & CE_CONFLICTED) {2071if(same(oldtree, newtree) || o->reset) {2072if(!newtree)2073returndeleted_entry(current, current, o);2074else2075returnmerged_entry(newtree, current, o);2076}2077returnreject_merge(current, o);2078}else if((!oldtree && !newtree) ||/* 4 and 5 */2079(!oldtree && newtree &&2080same(current, newtree)) ||/* 6 and 7 */2081(oldtree && newtree &&2082same(oldtree, newtree)) ||/* 14 and 15 */2083(oldtree && newtree &&2084!same(oldtree, newtree) &&/* 18 and 19 */2085same(current, newtree))) {2086returnkeep_entry(current, o);2087}else if(oldtree && !newtree &&same(current, oldtree)) {2088/* 10 or 11 */2089returndeleted_entry(oldtree, current, o);2090}else if(oldtree && newtree &&2091same(current, oldtree) && !same(current, newtree)) {2092/* 20 or 21 */2093returnmerged_entry(newtree, current, o);2094}else2095returnreject_merge(current, o);2096}2097else if(newtree) {2098if(oldtree && !o->initial_checkout) {2099/*2100 * deletion of the path was staged;2101 */2102if(same(oldtree, newtree))2103return1;2104returnreject_merge(oldtree, o);2105}2106returnmerged_entry(newtree, current, o);2107}2108returndeleted_entry(oldtree, current, o);2109}21102111/*2112 * Bind merge.2113 *2114 * Keep the index entries at stage0, collapse stage1 but make sure2115 * stage0 does not have anything there.2116 */2117intbind_merge(const struct cache_entry *const*src,2118struct unpack_trees_options *o)2119{2120const struct cache_entry *old = src[0];2121const struct cache_entry *a = src[1];21222123if(o->merge_size !=1)2124returnerror("Cannot do a bind merge of%dtrees",2125 o->merge_size);2126if(a && old)2127return o->gently ? -1:2128error(ERRORMSG(o, ERROR_BIND_OVERLAP),2129super_prefixed(a->name),2130super_prefixed(old->name));2131if(!a)2132returnkeep_entry(old, o);2133else2134returnmerged_entry(a, NULL, o);2135}21362137/*2138 * One-way merge.2139 *2140 * The rule is:2141 * - take the stat information from stage0, take the data from stage12142 */2143intoneway_merge(const struct cache_entry *const*src,2144struct unpack_trees_options *o)2145{2146const struct cache_entry *old = src[0];2147const struct cache_entry *a = src[1];21482149if(o->merge_size !=1)2150returnerror("Cannot do a oneway merge of%dtrees",2151 o->merge_size);21522153if(!a || a == o->df_conflict_entry)2154returndeleted_entry(old, old, o);21552156if(old &&same(old, a)) {2157int update =0;2158if(o->reset && o->update && !ce_uptodate(old) && !ce_skip_worktree(old)) {2159struct stat st;2160if(lstat(old->name, &st) ||2161ie_match_stat(o->src_index, old, &st, CE_MATCH_IGNORE_VALID|CE_MATCH_IGNORE_SKIP_WORKTREE))2162 update |= CE_UPDATE;2163}2164if(o->update &&S_ISGITLINK(old->ce_mode) &&2165should_update_submodules() && !verify_uptodate(old, o))2166 update |= CE_UPDATE;2167add_entry(o, old, update,0);2168return0;2169}2170returnmerged_entry(a, old, o);2171}