1/* 2 * GIT - The information manager from hell 3 * 4 * Copyright (C) Linus Torvalds, 2005 5 */ 6#include"cache.h" 7 8static int stage =0; 9static int update =0; 10 11static intunpack_tree(unsigned char*sha1) 12{ 13void*buffer; 14unsigned long size; 15int ret; 16 17 buffer =read_object_with_reference(sha1,"tree", &size, NULL); 18if(!buffer) 19return-1; 20 ret =read_tree(buffer, size, stage, NULL); 21free(buffer); 22return ret; 23} 24 25static intpath_matches(struct cache_entry *a,struct cache_entry *b) 26{ 27int len =ce_namelen(a); 28returnce_namelen(b) == len && 29!memcmp(a->name, b->name, len); 30} 31 32static intsame(struct cache_entry *a,struct cache_entry *b) 33{ 34return a->ce_mode == b->ce_mode && 35!memcmp(a->sha1, b->sha1,20); 36} 37 38 39/* 40 * This removes all trivial merges that don't change the tree 41 * and collapses them to state 0. 42 */ 43static struct cache_entry *merge_entries(struct cache_entry *a, 44struct cache_entry *b, 45struct cache_entry *c) 46{ 47/* 48 * Ok, all three entries describe the same 49 * filename, but maybe the contents or file 50 * mode have changed? 51 * 52 * The trivial cases end up being the ones where two 53 * out of three files are the same: 54 * - both destinations the same, trivially take either 55 * - one of the destination versions hasn't changed, 56 * take the other. 57 * 58 * The "all entries exactly the same" case falls out as 59 * a special case of any of the "two same" cases. 60 * 61 * Here "a" is "original", and "b" and "c" are the two 62 * trees we are merging. 63 */ 64if(a && b && c) { 65if(same(b,c)) 66return c; 67if(same(a,b)) 68return c; 69if(same(a,c)) 70return b; 71} 72return NULL; 73} 74 75/* 76 * When a CE gets turned into an unmerged entry, we 77 * want it to be up-to-date 78 */ 79static voidverify_uptodate(struct cache_entry *ce) 80{ 81struct stat st; 82 83if(!lstat(ce->name, &st)) { 84unsigned changed =ce_match_stat(ce, &st); 85if(!changed) 86return; 87 errno =0; 88} 89if(errno == ENOENT) 90return; 91die("Entry '%s' not uptodate. Cannot merge.", ce->name); 92} 93 94/* 95 * If the old tree contained a CE that isn't even in the 96 * result, that's always a problem, regardless of whether 97 * it's up-to-date or not (ie it can be a file that we 98 * have updated but not committed yet). 99 */ 100static voidreject_merge(struct cache_entry *ce) 101{ 102die("Entry '%s' would be overwritten by merge. Cannot merge.", ce->name); 103} 104 105static intmerged_entry_internal(struct cache_entry *merge,struct cache_entry *old,struct cache_entry **dst,int allow_dirty) 106{ 107 merge->ce_flags |=htons(CE_UPDATE); 108if(old) { 109/* 110 * See if we can re-use the old CE directly? 111 * That way we get the uptodate stat info. 112 * 113 * This also removes the UPDATE flag on 114 * a match. 115 */ 116if(same(old, merge)) { 117*merge = *old; 118}else if(!allow_dirty) { 119verify_uptodate(old); 120} 121} 122 merge->ce_flags &= ~htons(CE_STAGEMASK); 123*dst++ = merge; 124return1; 125} 126 127static intmerged_entry_allow_dirty(struct cache_entry *merge,struct cache_entry *old,struct cache_entry **dst) 128{ 129returnmerged_entry_internal(merge, old, dst,1); 130} 131 132static intmerged_entry(struct cache_entry *merge,struct cache_entry *old,struct cache_entry **dst) 133{ 134returnmerged_entry_internal(merge, old, dst,0); 135} 136 137static intdeleted_entry(struct cache_entry *ce,struct cache_entry *old,struct cache_entry **dst) 138{ 139if(old) 140verify_uptodate(old); 141 ce->ce_mode =0; 142*dst++ = ce; 143return1; 144} 145 146static intcauses_df_conflict(struct cache_entry *ce,int stage, 147struct cache_entry **dst_, 148struct cache_entry **next_, 149int tail) 150{ 151/* This is called during the merge operation and walking 152 * the active_cache[] array is messy, because it is in the 153 * middle of overlapping copy operation. The invariants 154 * are: 155 * (1) active_cache points at the first (zeroth) entry. 156 * (2) up to dst pointer are resolved entries. 157 * (3) from the next pointer (head-inclusive) to the tail 158 * of the active_cache array have the remaining paths 159 * to be processed. There can be a gap between dst 160 * and next. Note that next is called "src" in the 161 * merge_cache() function, and tail is the original 162 * end of active_cache array when merge_cache() started. 163 * (4) the path corresponding to *ce is not found in (2) 164 * or (3). It is in the gap. 165 * 166 * active_cache -----......+++++++++++++. 167 * ^dst ^next ^tail 168 */ 169int i, next, dst; 170const char*path = ce->name; 171int namelen =ce_namelen(ce); 172 173 next = next_ - active_cache; 174 dst = dst_ - active_cache; 175 176for(i =0; i < tail; i++) { 177int entlen, len; 178const char*one, *two; 179if(dst <= i && i < next) 180continue; 181 ce = active_cache[i]; 182if(ce_stage(ce) != stage) 183continue; 184/* If ce->name is a prefix of path, then path is a file 185 * that hangs underneath ce->name, which is bad. 186 * If path is a prefix of ce->name, then it is the 187 * other way around which also is bad. 188 */ 189 entlen =ce_namelen(ce); 190if(namelen == entlen) 191continue; 192if(namelen < entlen) { 193 len = namelen; 194 one = path; 195 two = ce->name; 196}else{ 197 len = entlen; 198 one = ce->name; 199 two = path; 200} 201if(memcmp(one, two, len)) 202continue; 203if(two[len] =='/') 204return1; 205} 206return0; 207} 208 209static intthreeway_merge(struct cache_entry *stages[4], 210struct cache_entry **dst, 211struct cache_entry **next,int tail) 212{ 213struct cache_entry *old = stages[0]; 214struct cache_entry *a = stages[1], *b = stages[2], *c = stages[3]; 215struct cache_entry *merge; 216int count; 217 218/* #5ALT */ 219if(!a && b && c &&same(b, c)) { 220if(old && !same(b, old)) 221return-1; 222returnmerged_entry_allow_dirty(b, old, dst); 223} 224/* #2ALT and #3ALT */ 225if(!a && (!!b != !!c)) { 226/* 227 * The reason we need to worry about directory/file 228 * conflicts only in #2ALT and #3ALT case is this: 229 * 230 * (1) For all other cases that read-tree internally 231 * resolves a path, we always have such a path in 232 * *both* stage2 and stage3 when we begin. 233 * Traditionally, the behaviour has been even 234 * stricter and we did not resolve a path without 235 * initially being in all of stage1, 2, and 3. 236 * 237 * (2) When read-tree finishes, all resolved paths (i.e. 238 * the paths that are in stage0) must have come from 239 * either stage2 or stage3. It is not possible to 240 * have a stage0 path as a result of a merge if 241 * neither stage2 nor stage3 had that path. 242 * 243 * (3) It is guaranteed that just after reading the 244 * stages, each stage cannot have directory/file 245 * conflicts on its own, because they are populated 246 * by reading hierarchy of a tree. Combined with 247 * (1) and (2) above, this means that no matter what 248 * combination of paths we take from stage2 and 249 * stage3 as a result of a merge, they cannot cause 250 * a directory/file conflict situation (otherwise 251 * the "guilty" path would have already had such a 252 * conflict in the original stage, either stage2 253 * or stage3). Although its stage2 is synthesized 254 * by overlaying the current index on top of "our 255 * head" tree, --emu23 case also has this guarantee, 256 * by calling add_cache_entry() to create such stage2 257 * entries. 258 * 259 * (4) Only #2ALT and #3ALT lack the guarantee (1). 260 * They resolve paths that exist only in stage2 261 * or stage3. The stage2 tree may have a file DF 262 * while stage3 tree may have a file DF/DF. If 263 * #2ALT and #3ALT rules happen to apply to both 264 * of them, we would end up having DF (coming from 265 * stage2) and DF/DF (from stage3) in the result. 266 * When we attempt to resolve a path that exists 267 * only in stage2, we need to make sure there is 268 * no path that would conflict with it in stage3 269 * and vice versa. 270 */ 271if(c) {/* #2ALT */ 272if(!causes_df_conflict(c,2, dst, next, tail) && 273(!old ||same(c, old))) 274returnmerged_entry_allow_dirty(c, old, dst); 275} 276else{/* #3ALT */ 277if(!causes_df_conflict(b,3, dst, next, tail) && 278(!old ||same(b, old))) 279returnmerged_entry_allow_dirty(b, old, dst); 280} 281/* otherwise we will apply the original rule */ 282} 283/* #14ALT */ 284if(a && b && c &&same(a, b) && !same(a, c)) { 285if(old &&same(old, c)) 286returnmerged_entry_allow_dirty(c, old, dst); 287/* otherwise the regular rule applies */ 288} 289/* 290 * If we have an entry in the index cache ("old"), then we want 291 * to make sure that it matches any entries in stage 2 ("first 292 * branch", aka "b"). 293 */ 294if(old) { 295if(!b || !same(old, b)) 296return-1; 297} 298 merge =merge_entries(a, b, c); 299if(merge) 300returnmerged_entry(merge, old, dst); 301if(old) 302verify_uptodate(old); 303 count =0; 304if(a) { *dst++ = a; count++; } 305if(b) { *dst++ = b; count++; } 306if(c) { *dst++ = c; count++; } 307return count; 308} 309 310/* 311 * Two-way merge. 312 * 313 * The rule is to "carry forward" what is in the index without losing 314 * information across a "fast forward", favoring a successful merge 315 * over a merge failure when it makes sense. For details of the 316 * "carry forward" rule, please see <Documentation/git-read-tree.txt>. 317 * 318 */ 319static inttwoway_merge(struct cache_entry **src,struct cache_entry **dst, 320struct cache_entry **next,int tail) 321{ 322struct cache_entry *current = src[0]; 323struct cache_entry *oldtree = src[1], *newtree = src[2]; 324 325if(src[3]) 326return-1; 327 328if(current) { 329if((!oldtree && !newtree) ||/* 4 and 5 */ 330(!oldtree && newtree && 331same(current, newtree)) ||/* 6 and 7 */ 332(oldtree && newtree && 333same(oldtree, newtree)) ||/* 14 and 15 */ 334(oldtree && newtree && 335!same(oldtree, newtree) &&/* 18 and 19*/ 336same(current, newtree))) { 337*dst++ = current; 338return1; 339} 340else if(oldtree && !newtree &&same(current, oldtree)) { 341/* 10 or 11 */ 342returndeleted_entry(oldtree, current, dst); 343} 344else if(oldtree && newtree && 345same(current, oldtree) && !same(current, newtree)) { 346/* 20 or 21 */ 347returnmerged_entry(newtree, current, dst); 348} 349else 350/* all other failures */ 351return-1; 352} 353else if(newtree) 354returnmerged_entry(newtree, current, dst); 355else 356returndeleted_entry(oldtree, current, dst); 357} 358 359/* 360 * Two-way merge emulated with three-way merge. 361 * 362 * This treats "read-tree -m H M" by transforming it internally 363 * into "read-tree -m H I+H M", where I+H is a tree that would 364 * contain the contents of the current index file, overlayed on 365 * top of H. Unlike the traditional two-way merge, this leaves 366 * the stages in the resulting index file and lets the user resolve 367 * the merge conflicts using standard tools for three-way merge. 368 * 369 * This function is just to set-up such an arrangement, and the 370 * actual merge uses threeway_merge() function. 371 */ 372static voidsetup_emu23(void) 373{ 374/* stage0 contains I, stage1 H, stage2 M. 375 * move stage2 to stage3, and create stage2 entries 376 * by scanning stage0 and stage1 entries. 377 */ 378int i, namelen, size; 379struct cache_entry *ce, *stage2; 380 381for(i =0; i < active_nr; i++) { 382 ce = active_cache[i]; 383if(ce_stage(ce) !=2) 384continue; 385/* hoist them up to stage 3 */ 386 namelen =ce_namelen(ce); 387 ce->ce_flags =create_ce_flags(namelen,3); 388} 389 390for(i =0; i < active_nr; i++) { 391 ce = active_cache[i]; 392if(ce_stage(ce) >1) 393continue; 394 namelen =ce_namelen(ce); 395 size =cache_entry_size(namelen); 396 stage2 =xmalloc(size); 397memcpy(stage2, ce, size); 398 stage2->ce_flags =create_ce_flags(namelen,2); 399if(add_cache_entry(stage2, ADD_CACHE_OK_TO_ADD) <0) 400die("cannot merge index and our head tree"); 401 402/* We are done with this name, so skip to next name */ 403while(i < active_nr && 404ce_namelen(active_cache[i]) == namelen && 405!memcmp(active_cache[i]->name, ce->name, namelen)) 406 i++; 407 i--;/* compensate for the loop control */ 408} 409} 410 411/* 412 * One-way merge. 413 * 414 * The rule is: 415 * - take the stat information from stage0, take the data from stage1 416 */ 417static intoneway_merge(struct cache_entry **src,struct cache_entry **dst, 418struct cache_entry **next,int tail) 419{ 420struct cache_entry *old = src[0]; 421struct cache_entry *a = src[1]; 422 423if(src[2] || src[3]) 424return-1; 425 426if(!a) 427return0; 428if(old &&same(old, a)) { 429*dst++ = old; 430return1; 431} 432returnmerged_entry(a, NULL, dst); 433} 434 435static voidcheck_updates(struct cache_entry **src,int nr) 436{ 437static struct checkout state = { 438.base_dir ="", 439.force =1, 440.quiet =1, 441.refresh_cache =1, 442}; 443unsigned short mask =htons(CE_UPDATE); 444while(nr--) { 445struct cache_entry *ce = *src++; 446if(!ce->ce_mode) { 447if(update) 448unlink(ce->name); 449continue; 450} 451if(ce->ce_flags & mask) { 452 ce->ce_flags &= ~mask; 453if(update) 454checkout_entry(ce, &state); 455} 456} 457} 458 459typedefint(*merge_fn_t)(struct cache_entry **,struct cache_entry **,struct cache_entry **,int); 460 461static voidmerge_cache(struct cache_entry **src,int nr, merge_fn_t fn) 462{ 463struct cache_entry **dst = src; 464int tail = nr; 465 466while(nr) { 467int entries; 468struct cache_entry *name, *ce, *stages[4] = { NULL, }; 469 470 name = ce = *src; 471for(;;) { 472int stage =ce_stage(ce); 473 stages[stage] = ce; 474 ce = *++src; 475 active_nr--; 476if(!--nr) 477break; 478if(!path_matches(ce, name)) 479break; 480} 481 482 entries =fn(stages, dst, src, tail); 483if(entries <0) 484reject_merge(name); 485 dst += entries; 486 active_nr += entries; 487} 488check_updates(active_cache, active_nr); 489} 490 491static intread_cache_unmerged(void) 492{ 493int i, deleted; 494struct cache_entry **dst; 495 496read_cache(); 497 dst = active_cache; 498 deleted =0; 499for(i =0; i < active_nr; i++) { 500struct cache_entry *ce = active_cache[i]; 501if(ce_stage(ce)) { 502 deleted++; 503continue; 504} 505if(deleted) 506*dst = ce; 507 dst++; 508} 509 active_nr -= deleted; 510return deleted; 511} 512 513static char*read_tree_usage ="git-read-tree (<sha> | -m [-u] <sha1> [<sha2> [<sha3>]])"; 514 515static struct cache_file cache_file; 516 517intmain(int argc,char**argv) 518{ 519int i, newfd, merge, reset, emu23; 520unsigned char sha1[20]; 521 522 newfd =hold_index_file_for_update(&cache_file,get_index_file()); 523if(newfd <0) 524die("unable to create new cachefile"); 525 526 merge =0; 527 reset =0; 528 emu23 =0; 529for(i =1; i < argc; i++) { 530const char*arg = argv[i]; 531 532/* "-u" means "update", meaning that a merge will update the working directory */ 533if(!strcmp(arg,"-u")) { 534 update =1; 535continue; 536} 537 538/* This differs from "-m" in that we'll silently ignore unmerged entries */ 539if(!strcmp(arg,"--reset")) { 540if(stage || merge || emu23) 541usage(read_tree_usage); 542 reset =1; 543 merge =1; 544 stage =1; 545read_cache_unmerged(); 546continue; 547} 548 549/* "-m" stands for "merge", meaning we start in stage 1 */ 550if(!strcmp(arg,"-m")) { 551if(stage || merge || emu23) 552usage(read_tree_usage); 553if(read_cache_unmerged()) 554die("you need to resolve your current index first"); 555 stage =1; 556 merge =1; 557continue; 558} 559 560/* "-emu23" uses 3-way merge logic to perform fast-forward */ 561if(!strcmp(arg,"--emu23")) { 562if(stage || merge || emu23) 563usage(read_tree_usage); 564if(read_cache_unmerged()) 565die("you need to resolve your current index first"); 566 merge = emu23 = stage =1; 567continue; 568} 569 570if(get_sha1(arg, sha1) <0) 571usage(read_tree_usage); 572if(stage >3) 573usage(read_tree_usage); 574if(unpack_tree(sha1) <0) 575die("failed to unpack tree object%s", arg); 576 stage++; 577} 578if(update && !merge) 579usage(read_tree_usage); 580if(merge) { 581static const merge_fn_t merge_function[] = { 582[1] = oneway_merge, 583[2] = twoway_merge, 584[3] = threeway_merge, 585}; 586 merge_fn_t fn; 587 588if(stage <2|| stage >4) 589die("just how do you expect me to merge%dtrees?", stage-1); 590if(emu23 && stage !=3) 591die("--emu23 takes only two trees"); 592 fn = merge_function[stage-1]; 593if(stage ==3&& emu23) { 594setup_emu23(); 595 fn = merge_function[3]; 596} 597merge_cache(active_cache, active_nr, fn); 598} 599if(write_cache(newfd, active_cache, active_nr) || 600commit_index_file(&cache_file)) 601die("unable to write new index file"); 602return0; 603}