1/* 2 * LibXDiff by Davide Libenzi ( File Differential Library ) 3 * Copyright (C) 2003 Davide Libenzi 4 * 5 * This library is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU Lesser General Public 7 * License as published by the Free Software Foundation; either 8 * version 2.1 of the License, or (at your option) any later version. 9 * 10 * This library is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 13 * Lesser General Public License for more details. 14 * 15 * You should have received a copy of the GNU Lesser General Public 16 * License along with this library; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 * 19 * Davide Libenzi <davidel@xmailserver.org> 20 * 21 */ 22 23#include"xinclude.h" 24 25 26 27#define XDL_MAX_COST_MIN 256 28#define XDL_HEUR_MIN_COST 256 29#define XDL_LINE_MAX (long)((1UL << (CHAR_BIT * sizeof(long) - 1)) - 1) 30#define XDL_SNAKE_CNT 20 31#define XDL_K_HEUR 4 32 33 34 35typedefstruct s_xdpsplit { 36long i1, i2; 37int min_lo, min_hi; 38} xdpsplit_t; 39 40 41 42 43static longxdl_split(unsigned long const*ha1,long off1,long lim1, 44unsigned long const*ha2,long off2,long lim2, 45long*kvdf,long*kvdb,int need_min, xdpsplit_t *spl, 46 xdalgoenv_t *xenv); 47static xdchange_t *xdl_add_change(xdchange_t *xscr,long i1,long i2,long chg1,long chg2); 48 49 50 51 52 53/* 54 * See "An O(ND) Difference Algorithm and its Variations", by Eugene Myers. 55 * Basically considers a "box" (off1, off2, lim1, lim2) and scan from both 56 * the forward diagonal starting from (off1, off2) and the backward diagonal 57 * starting from (lim1, lim2). If the K values on the same diagonal crosses 58 * returns the furthest point of reach. We might end up having to expensive 59 * cases using this algorithm is full, so a little bit of heuristic is needed 60 * to cut the search and to return a suboptimal point. 61 */ 62static longxdl_split(unsigned long const*ha1,long off1,long lim1, 63unsigned long const*ha2,long off2,long lim2, 64long*kvdf,long*kvdb,int need_min, xdpsplit_t *spl, 65 xdalgoenv_t *xenv) { 66long dmin = off1 - lim2, dmax = lim1 - off2; 67long fmid = off1 - off2, bmid = lim1 - lim2; 68long odd = (fmid - bmid) &1; 69long fmin = fmid, fmax = fmid; 70long bmin = bmid, bmax = bmid; 71long ec, d, i1, i2, prev1, best, dd, v, k; 72 73/* 74 * Set initial diagonal values for both forward and backward path. 75 */ 76 kvdf[fmid] = off1; 77 kvdb[bmid] = lim1; 78 79for(ec =1;; ec++) { 80int got_snake =0; 81 82/* 83 * We need to extent the diagonal "domain" by one. If the next 84 * values exits the box boundaries we need to change it in the 85 * opposite direction because (max - min) must be a power of two. 86 * Also we initialize the external K value to -1 so that we can 87 * avoid extra conditions check inside the core loop. 88 */ 89if(fmin > dmin) 90 kvdf[--fmin -1] = -1; 91else 92++fmin; 93if(fmax < dmax) 94 kvdf[++fmax +1] = -1; 95else 96--fmax; 97 98for(d = fmax; d >= fmin; d -=2) { 99if(kvdf[d -1] >= kvdf[d +1]) 100 i1 = kvdf[d -1] +1; 101else 102 i1 = kvdf[d +1]; 103 prev1 = i1; 104 i2 = i1 - d; 105for(; i1 < lim1 && i2 < lim2 && ha1[i1] == ha2[i2]; i1++, i2++); 106if(i1 - prev1 > xenv->snake_cnt) 107 got_snake =1; 108 kvdf[d] = i1; 109if(odd && bmin <= d && d <= bmax && kvdb[d] <= i1) { 110 spl->i1 = i1; 111 spl->i2 = i2; 112 spl->min_lo = spl->min_hi =1; 113return ec; 114} 115} 116 117/* 118 * We need to extent the diagonal "domain" by one. If the next 119 * values exits the box boundaries we need to change it in the 120 * opposite direction because (max - min) must be a power of two. 121 * Also we initialize the external K value to -1 so that we can 122 * avoid extra conditions check inside the core loop. 123 */ 124if(bmin > dmin) 125 kvdb[--bmin -1] = XDL_LINE_MAX; 126else 127++bmin; 128if(bmax < dmax) 129 kvdb[++bmax +1] = XDL_LINE_MAX; 130else 131--bmax; 132 133for(d = bmax; d >= bmin; d -=2) { 134if(kvdb[d -1] < kvdb[d +1]) 135 i1 = kvdb[d -1]; 136else 137 i1 = kvdb[d +1] -1; 138 prev1 = i1; 139 i2 = i1 - d; 140for(; i1 > off1 && i2 > off2 && ha1[i1 -1] == ha2[i2 -1]; i1--, i2--); 141if(prev1 - i1 > xenv->snake_cnt) 142 got_snake =1; 143 kvdb[d] = i1; 144if(!odd && fmin <= d && d <= fmax && i1 <= kvdf[d]) { 145 spl->i1 = i1; 146 spl->i2 = i2; 147 spl->min_lo = spl->min_hi =1; 148return ec; 149} 150} 151 152if(need_min) 153continue; 154 155/* 156 * If the edit cost is above the heuristic trigger and if 157 * we got a good snake, we sample current diagonals to see 158 * if some of the, have reached an "interesting" path. Our 159 * measure is a function of the distance from the diagonal 160 * corner (i1 + i2) penalized with the distance from the 161 * mid diagonal itself. If this value is above the current 162 * edit cost times a magic factor (XDL_K_HEUR) we consider 163 * it interesting. 164 */ 165if(got_snake && ec > xenv->heur_min) { 166for(best =0, d = fmax; d >= fmin; d -=2) { 167 dd = d > fmid ? d - fmid: fmid - d; 168 i1 = kvdf[d]; 169 i2 = i1 - d; 170 v = (i1 - off1) + (i2 - off2) - dd; 171 172if(v > XDL_K_HEUR * ec && v > best && 173 off1 + xenv->snake_cnt <= i1 && i1 < lim1 && 174 off2 + xenv->snake_cnt <= i2 && i2 < lim2) { 175for(k =1; ha1[i1 - k] == ha2[i2 - k]; k++) 176if(k == xenv->snake_cnt) { 177 best = v; 178 spl->i1 = i1; 179 spl->i2 = i2; 180break; 181} 182} 183} 184if(best >0) { 185 spl->min_lo =1; 186 spl->min_hi =0; 187return ec; 188} 189 190for(best =0, d = bmax; d >= bmin; d -=2) { 191 dd = d > bmid ? d - bmid: bmid - d; 192 i1 = kvdb[d]; 193 i2 = i1 - d; 194 v = (lim1 - i1) + (lim2 - i2) - dd; 195 196if(v > XDL_K_HEUR * ec && v > best && 197 off1 < i1 && i1 <= lim1 - xenv->snake_cnt && 198 off2 < i2 && i2 <= lim2 - xenv->snake_cnt) { 199for(k =0; ha1[i1 + k] == ha2[i2 + k]; k++) 200if(k == xenv->snake_cnt -1) { 201 best = v; 202 spl->i1 = i1; 203 spl->i2 = i2; 204break; 205} 206} 207} 208if(best >0) { 209 spl->min_lo =0; 210 spl->min_hi =1; 211return ec; 212} 213} 214 215/* 216 * Enough is enough. We spent too much time here and now we collect 217 * the furthest reaching path using the (i1 + i2) measure. 218 */ 219if(ec >= xenv->mxcost) { 220long fbest, fbest1, bbest, bbest1; 221 222 fbest = fbest1 = -1; 223for(d = fmax; d >= fmin; d -=2) { 224 i1 =XDL_MIN(kvdf[d], lim1); 225 i2 = i1 - d; 226if(lim2 < i2) 227 i1 = lim2 + d, i2 = lim2; 228if(fbest < i1 + i2) { 229 fbest = i1 + i2; 230 fbest1 = i1; 231} 232} 233 234 bbest = bbest1 = XDL_LINE_MAX; 235for(d = bmax; d >= bmin; d -=2) { 236 i1 =XDL_MAX(off1, kvdb[d]); 237 i2 = i1 - d; 238if(i2 < off2) 239 i1 = off2 + d, i2 = off2; 240if(i1 + i2 < bbest) { 241 bbest = i1 + i2; 242 bbest1 = i1; 243} 244} 245 246if((lim1 + lim2) - bbest < fbest - (off1 + off2)) { 247 spl->i1 = fbest1; 248 spl->i2 = fbest - fbest1; 249 spl->min_lo =1; 250 spl->min_hi =0; 251}else{ 252 spl->i1 = bbest1; 253 spl->i2 = bbest - bbest1; 254 spl->min_lo =0; 255 spl->min_hi =1; 256} 257return ec; 258} 259} 260} 261 262 263/* 264 * Rule: "Divide et Impera". Recursively split the box in sub-boxes by calling 265 * the box splitting function. Note that the real job (marking changed lines) 266 * is done in the two boundary reaching checks. 267 */ 268intxdl_recs_cmp(diffdata_t *dd1,long off1,long lim1, 269 diffdata_t *dd2,long off2,long lim2, 270long*kvdf,long*kvdb,int need_min, xdalgoenv_t *xenv) { 271unsigned long const*ha1 = dd1->ha, *ha2 = dd2->ha; 272 273/* 274 * Shrink the box by walking through each diagonal snake (SW and NE). 275 */ 276for(; off1 < lim1 && off2 < lim2 && ha1[off1] == ha2[off2]; off1++, off2++); 277for(; off1 < lim1 && off2 < lim2 && ha1[lim1 -1] == ha2[lim2 -1]; lim1--, lim2--); 278 279/* 280 * If one dimension is empty, then all records on the other one must 281 * be obviously changed. 282 */ 283if(off1 == lim1) { 284char*rchg2 = dd2->rchg; 285long*rindex2 = dd2->rindex; 286 287for(; off2 < lim2; off2++) 288 rchg2[rindex2[off2]] =1; 289}else if(off2 == lim2) { 290char*rchg1 = dd1->rchg; 291long*rindex1 = dd1->rindex; 292 293for(; off1 < lim1; off1++) 294 rchg1[rindex1[off1]] =1; 295}else{ 296 xdpsplit_t spl; 297 spl.i1 = spl.i2 =0; 298 299/* 300 * Divide ... 301 */ 302if(xdl_split(ha1, off1, lim1, ha2, off2, lim2, kvdf, kvdb, 303 need_min, &spl, xenv) <0) { 304 305return-1; 306} 307 308/* 309 * ... et Impera. 310 */ 311if(xdl_recs_cmp(dd1, off1, spl.i1, dd2, off2, spl.i2, 312 kvdf, kvdb, spl.min_lo, xenv) <0|| 313xdl_recs_cmp(dd1, spl.i1, lim1, dd2, spl.i2, lim2, 314 kvdf, kvdb, spl.min_hi, xenv) <0) { 315 316return-1; 317} 318} 319 320return0; 321} 322 323 324intxdl_do_diff(mmfile_t *mf1, mmfile_t *mf2, xpparam_t const*xpp, 325 xdfenv_t *xe) { 326long ndiags; 327long*kvd, *kvdf, *kvdb; 328 xdalgoenv_t xenv; 329 diffdata_t dd1, dd2; 330 331if(XDF_DIFF_ALG(xpp->flags) == XDF_PATIENCE_DIFF) 332returnxdl_do_patience_diff(mf1, mf2, xpp, xe); 333 334if(XDF_DIFF_ALG(xpp->flags) == XDF_HISTOGRAM_DIFF) 335returnxdl_do_histogram_diff(mf1, mf2, xpp, xe); 336 337if(xdl_prepare_env(mf1, mf2, xpp, xe) <0) { 338 339return-1; 340} 341 342/* 343 * Allocate and setup K vectors to be used by the differential algorithm. 344 * One is to store the forward path and one to store the backward path. 345 */ 346 ndiags = xe->xdf1.nreff + xe->xdf2.nreff +3; 347if(!(kvd = (long*)xdl_malloc((2* ndiags +2) *sizeof(long)))) { 348 349xdl_free_env(xe); 350return-1; 351} 352 kvdf = kvd; 353 kvdb = kvdf + ndiags; 354 kvdf += xe->xdf2.nreff +1; 355 kvdb += xe->xdf2.nreff +1; 356 357 xenv.mxcost =xdl_bogosqrt(ndiags); 358if(xenv.mxcost < XDL_MAX_COST_MIN) 359 xenv.mxcost = XDL_MAX_COST_MIN; 360 xenv.snake_cnt = XDL_SNAKE_CNT; 361 xenv.heur_min = XDL_HEUR_MIN_COST; 362 363 dd1.nrec = xe->xdf1.nreff; 364 dd1.ha = xe->xdf1.ha; 365 dd1.rchg = xe->xdf1.rchg; 366 dd1.rindex = xe->xdf1.rindex; 367 dd2.nrec = xe->xdf2.nreff; 368 dd2.ha = xe->xdf2.ha; 369 dd2.rchg = xe->xdf2.rchg; 370 dd2.rindex = xe->xdf2.rindex; 371 372if(xdl_recs_cmp(&dd1,0, dd1.nrec, &dd2,0, dd2.nrec, 373 kvdf, kvdb, (xpp->flags & XDF_NEED_MINIMAL) !=0, &xenv) <0) { 374 375xdl_free(kvd); 376xdl_free_env(xe); 377return-1; 378} 379 380xdl_free(kvd); 381 382return0; 383} 384 385 386static xdchange_t *xdl_add_change(xdchange_t *xscr,long i1,long i2,long chg1,long chg2) { 387 xdchange_t *xch; 388 389if(!(xch = (xdchange_t *)xdl_malloc(sizeof(xdchange_t)))) 390return NULL; 391 392 xch->next = xscr; 393 xch->i1 = i1; 394 xch->i2 = i2; 395 xch->chg1 = chg1; 396 xch->chg2 = chg2; 397 xch->ignore =0; 398 399return xch; 400} 401 402 403static intis_blank_line(xrecord_t *rec,long flags) 404{ 405returnxdl_blankline(rec->ptr, rec->size, flags); 406} 407 408static intrecs_match(xrecord_t *rec1, xrecord_t *rec2,long flags) 409{ 410return(rec1->ha == rec2->ha && 411xdl_recmatch(rec1->ptr, rec1->size, 412 rec2->ptr, rec2->size, 413 flags)); 414} 415 416/* 417 * If a line is indented more than this, get_indent() just returns this value. 418 * This avoids having to do absurd amounts of work for data that are not 419 * human-readable text, and also ensures that the output of get_indent fits within 420 * an int. 421 */ 422#define MAX_INDENT 200 423 424/* 425 * Return the amount of indentation of the specified line, treating TAB as 8 426 * columns. Return -1 if line is empty or contains only whitespace. Clamp the 427 * output value at MAX_INDENT. 428 */ 429static intget_indent(xrecord_t *rec) 430{ 431long i; 432int ret =0; 433 434for(i =0; i < rec->size; i++) { 435char c = rec->ptr[i]; 436 437if(!XDL_ISSPACE(c)) 438return ret; 439else if(c ==' ') 440 ret +=1; 441else if(c =='\t') 442 ret +=8- ret %8; 443/* ignore other whitespace characters */ 444 445if(ret >= MAX_INDENT) 446return MAX_INDENT; 447} 448 449/* The line contains only whitespace. */ 450return-1; 451} 452 453/* 454 * If more than this number of consecutive blank rows are found, just return this 455 * value. This avoids requiring O(N^2) work for pathological cases, and also 456 * ensures that the output of score_split fits in an int. 457 */ 458#define MAX_BLANKS 20 459 460/* Characteristics measured about a hypothetical split position. */ 461struct split_measurement { 462/* 463 * Is the split at the end of the file (aside from any blank lines)? 464 */ 465int end_of_file; 466 467/* 468 * How much is the line immediately following the split indented (or -1 if 469 * the line is blank): 470 */ 471int indent; 472 473/* 474 * How many consecutive lines above the split are blank? 475 */ 476int pre_blank; 477 478/* 479 * How much is the nearest non-blank line above the split indented (or -1 480 * if there is no such line)? 481 */ 482int pre_indent; 483 484/* 485 * How many lines after the line following the split are blank? 486 */ 487int post_blank; 488 489/* 490 * How much is the nearest non-blank line after the line following the 491 * split indented (or -1 if there is no such line)? 492 */ 493int post_indent; 494}; 495 496struct split_score { 497/* The effective indent of this split (smaller is preferred). */ 498int effective_indent; 499 500/* Penalty for this split (smaller is preferred). */ 501int penalty; 502}; 503 504/* 505 * Fill m with information about a hypothetical split of xdf above line split. 506 */ 507static voidmeasure_split(const xdfile_t *xdf,long split, 508struct split_measurement *m) 509{ 510long i; 511 512if(split >= xdf->nrec) { 513 m->end_of_file =1; 514 m->indent = -1; 515}else{ 516 m->end_of_file =0; 517 m->indent =get_indent(xdf->recs[split]); 518} 519 520 m->pre_blank =0; 521 m->pre_indent = -1; 522for(i = split -1; i >=0; i--) { 523 m->pre_indent =get_indent(xdf->recs[i]); 524if(m->pre_indent != -1) 525break; 526 m->pre_blank +=1; 527if(m->pre_blank == MAX_BLANKS) { 528 m->pre_indent =0; 529break; 530} 531} 532 533 m->post_blank =0; 534 m->post_indent = -1; 535for(i = split +1; i < xdf->nrec; i++) { 536 m->post_indent =get_indent(xdf->recs[i]); 537if(m->post_indent != -1) 538break; 539 m->post_blank +=1; 540if(m->post_blank == MAX_BLANKS) { 541 m->post_indent =0; 542break; 543} 544} 545} 546 547/* 548 * The empirically-determined weight factors used by score_split() below. 549 * Larger values means that the position is a less favorable place to split. 550 * 551 * Note that scores are only ever compared against each other, so multiplying 552 * all of these weight/penalty values by the same factor wouldn't change the 553 * heuristic's behavior. Still, we need to set that arbitrary scale *somehow*. 554 * In practice, these numbers are chosen to be large enough that they can be 555 * adjusted relative to each other with sufficient precision despite using 556 * integer math. 557 */ 558 559/* Penalty if there are no non-blank lines before the split */ 560#define START_OF_FILE_PENALTY 1 561 562/* Penalty if there are no non-blank lines after the split */ 563#define END_OF_FILE_PENALTY 21 564 565/* Multiplier for the number of blank lines around the split */ 566#define TOTAL_BLANK_WEIGHT (-30) 567 568/* Multiplier for the number of blank lines after the split */ 569#define POST_BLANK_WEIGHT 6 570 571/* 572 * Penalties applied if the line is indented more than its predecessor 573 */ 574#define RELATIVE_INDENT_PENALTY (-4) 575#define RELATIVE_INDENT_WITH_BLANK_PENALTY 10 576 577/* 578 * Penalties applied if the line is indented less than both its predecessor and 579 * its successor 580 */ 581#define RELATIVE_OUTDENT_PENALTY 24 582#define RELATIVE_OUTDENT_WITH_BLANK_PENALTY 17 583 584/* 585 * Penalties applied if the line is indented less than its predecessor but not 586 * less than its successor 587 */ 588#define RELATIVE_DEDENT_PENALTY 23 589#define RELATIVE_DEDENT_WITH_BLANK_PENALTY 17 590 591/* 592 * We only consider whether the sum of the effective indents for splits are 593 * less than (-1), equal to (0), or greater than (+1) each other. The resulting 594 * value is multiplied by the following weight and combined with the penalty to 595 * determine the better of two scores. 596 */ 597#define INDENT_WEIGHT 60 598 599/* 600 * Compute a badness score for the hypothetical split whose measurements are 601 * stored in m. The weight factors were determined empirically using the tools and 602 * corpus described in 603 * 604 * https://github.com/mhagger/diff-slider-tools 605 * 606 * Also see that project if you want to improve the weights based on, for example, 607 * a larger or more diverse corpus. 608 */ 609static voidscore_add_split(const struct split_measurement *m,struct split_score *s) 610{ 611/* 612 * A place to accumulate penalty factors (positive makes this index more 613 * favored): 614 */ 615int post_blank, total_blank, indent, any_blanks; 616 617if(m->pre_indent == -1&& m->pre_blank ==0) 618 s->penalty += START_OF_FILE_PENALTY; 619 620if(m->end_of_file) 621 s->penalty += END_OF_FILE_PENALTY; 622 623/* 624 * Set post_blank to the number of blank lines following the split, 625 * including the line immediately after the split: 626 */ 627 post_blank = (m->indent == -1) ?1+ m->post_blank :0; 628 total_blank = m->pre_blank + post_blank; 629 630/* Penalties based on nearby blank lines: */ 631 s->penalty += TOTAL_BLANK_WEIGHT * total_blank; 632 s->penalty += POST_BLANK_WEIGHT * post_blank; 633 634if(m->indent != -1) 635 indent = m->indent; 636else 637 indent = m->post_indent; 638 639 any_blanks = (total_blank !=0); 640 641/* Note that the effective indent is -1 at the end of the file: */ 642 s->effective_indent += indent; 643 644if(indent == -1) { 645/* No additional adjustments needed. */ 646}else if(m->pre_indent == -1) { 647/* No additional adjustments needed. */ 648}else if(indent > m->pre_indent) { 649/* 650 * The line is indented more than its predecessor. 651 */ 652 s->penalty += any_blanks ? 653 RELATIVE_INDENT_WITH_BLANK_PENALTY : 654 RELATIVE_INDENT_PENALTY; 655}else if(indent == m->pre_indent) { 656/* 657 * The line has the same indentation level as its predecessor. 658 * No additional adjustments needed. 659 */ 660}else{ 661/* 662 * The line is indented less than its predecessor. It could be 663 * the block terminator of the previous block, but it could 664 * also be the start of a new block (e.g., an "else" block, or 665 * maybe the previous block didn't have a block terminator). 666 * Try to distinguish those cases based on what comes next: 667 */ 668if(m->post_indent != -1&& m->post_indent > indent) { 669/* 670 * The following line is indented more. So it is likely 671 * that this line is the start of a block. 672 */ 673 s->penalty += any_blanks ? 674 RELATIVE_OUTDENT_WITH_BLANK_PENALTY : 675 RELATIVE_OUTDENT_PENALTY; 676}else{ 677/* 678 * That was probably the end of a block. 679 */ 680 s->penalty += any_blanks ? 681 RELATIVE_DEDENT_WITH_BLANK_PENALTY : 682 RELATIVE_DEDENT_PENALTY; 683} 684} 685} 686 687static intscore_cmp(struct split_score *s1,struct split_score *s2) 688{ 689/* -1 if s1.effective_indent < s2->effective_indent, etc. */ 690int cmp_indents = ((s1->effective_indent > s2->effective_indent) - 691(s1->effective_indent < s2->effective_indent)); 692 693return INDENT_WEIGHT * cmp_indents + (s1->penalty - s2->penalty); 694} 695 696/* 697 * Represent a group of changed lines in an xdfile_t (i.e., a contiguous group 698 * of lines that was inserted or deleted from the corresponding version of the 699 * file). We consider there to be such a group at the beginning of the file, at 700 * the end of the file, and between any two unchanged lines, though most such 701 * groups will usually be empty. 702 * 703 * If the first line in a group is equal to the line following the group, then 704 * the group can be slid down. Similarly, if the last line in a group is equal 705 * to the line preceding the group, then the group can be slid up. See 706 * group_slide_down() and group_slide_up(). 707 * 708 * Note that loops that are testing for changed lines in xdf->rchg do not need 709 * index bounding since the array is prepared with a zero at position -1 and N. 710 */ 711struct xdlgroup { 712/* 713 * The index of the first changed line in the group, or the index of 714 * the unchanged line above which the (empty) group is located. 715 */ 716long start; 717 718/* 719 * The index of the first unchanged line after the group. For an empty 720 * group, end is equal to start. 721 */ 722long end; 723}; 724 725/* 726 * Initialize g to point at the first group in xdf. 727 */ 728static voidgroup_init(xdfile_t *xdf,struct xdlgroup *g) 729{ 730 g->start = g->end =0; 731while(xdf->rchg[g->end]) 732 g->end++; 733} 734 735/* 736 * Move g to describe the next (possibly empty) group in xdf and return 0. If g 737 * is already at the end of the file, do nothing and return -1. 738 */ 739staticinlineintgroup_next(xdfile_t *xdf,struct xdlgroup *g) 740{ 741if(g->end == xdf->nrec) 742return-1; 743 744 g->start = g->end +1; 745for(g->end = g->start; xdf->rchg[g->end]; g->end++) 746; 747 748return0; 749} 750 751/* 752 * Move g to describe the previous (possibly empty) group in xdf and return 0. 753 * If g is already at the beginning of the file, do nothing and return -1. 754 */ 755staticinlineintgroup_previous(xdfile_t *xdf,struct xdlgroup *g) 756{ 757if(g->start ==0) 758return-1; 759 760 g->end = g->start -1; 761for(g->start = g->end; xdf->rchg[g->start -1]; g->start--) 762; 763 764return0; 765} 766 767/* 768 * If g can be slid toward the end of the file, do so, and if it bumps into a 769 * following group, expand this group to include it. Return 0 on success or -1 770 * if g cannot be slid down. 771 */ 772static intgroup_slide_down(xdfile_t *xdf,struct xdlgroup *g,long flags) 773{ 774if(g->end < xdf->nrec && 775recs_match(xdf->recs[g->start], xdf->recs[g->end], flags)) { 776 xdf->rchg[g->start++] =0; 777 xdf->rchg[g->end++] =1; 778 779while(xdf->rchg[g->end]) 780 g->end++; 781 782return0; 783}else{ 784return-1; 785} 786} 787 788/* 789 * If g can be slid toward the beginning of the file, do so, and if it bumps 790 * into a previous group, expand this group to include it. Return 0 on success 791 * or -1 if g cannot be slid up. 792 */ 793static intgroup_slide_up(xdfile_t *xdf,struct xdlgroup *g,long flags) 794{ 795if(g->start >0&& 796recs_match(xdf->recs[g->start -1], xdf->recs[g->end -1], flags)) { 797 xdf->rchg[--g->start] =1; 798 xdf->rchg[--g->end] =0; 799 800while(xdf->rchg[g->start -1]) 801 g->start--; 802 803return0; 804}else{ 805return-1; 806} 807} 808 809static voidxdl_bug(const char*msg) 810{ 811fprintf(stderr,"BUG:%s\n", msg); 812exit(1); 813} 814 815/* 816 * Move back and forward change groups for a consistent and pretty diff output. 817 * This also helps in finding joinable change groups and reducing the diff 818 * size. 819 */ 820intxdl_change_compact(xdfile_t *xdf, xdfile_t *xdfo,long flags) { 821struct xdlgroup g, go; 822long earliest_end, end_matching_other; 823long groupsize; 824unsigned int blank_lines; 825 826group_init(xdf, &g); 827group_init(xdfo, &go); 828 829while(1) { 830/* If the group is empty in the to-be-compacted file, skip it: */ 831if(g.end == g.start) 832goto next; 833 834/* 835 * Now shift the change up and then down as far as possible in 836 * each direction. If it bumps into any other changes, merge them. 837 */ 838do{ 839 groupsize = g.end - g.start; 840 841/* 842 * Keep track of the last "end" index that causes this 843 * group to align with a group of changed lines in the 844 * other file. -1 indicates that we haven't found such 845 * a match yet: 846 */ 847 end_matching_other = -1; 848 849/* 850 * Boolean value that records whether there are any blank 851 * lines that could be made to be the last line of this 852 * group. 853 */ 854 blank_lines =0; 855 856/* Shift the group backward as much as possible: */ 857while(!group_slide_up(xdf, &g, flags)) 858if(group_previous(xdfo, &go)) 859xdl_bug("group sync broken sliding up"); 860 861/* 862 * This is this highest that this group can be shifted. 863 * Record its end index: 864 */ 865 earliest_end = g.end; 866 867if(go.end > go.start) 868 end_matching_other = g.end; 869 870/* Now shift the group forward as far as possible: */ 871while(1) { 872if(!blank_lines) 873 blank_lines =is_blank_line( 874 xdf->recs[g.end -1], 875 flags); 876 877if(group_slide_down(xdf, &g, flags)) 878break; 879if(group_next(xdfo, &go)) 880xdl_bug("group sync broken sliding down"); 881 882if(go.end > go.start) 883 end_matching_other = g.end; 884} 885}while(groupsize != g.end - g.start); 886 887/* 888 * If the group can be shifted, then we can possibly use this 889 * freedom to produce a more intuitive diff. 890 * 891 * The group is currently shifted as far down as possible, so the 892 * heuristics below only have to handle upwards shifts. 893 */ 894 895if(g.end == earliest_end) { 896/* no shifting was possible */ 897}else if(end_matching_other != -1) { 898/* 899 * Move the possibly merged group of changes back to line 900 * up with the last group of changes from the other file 901 * that it can align with. 902 */ 903while(go.end == go.start) { 904if(group_slide_up(xdf, &g, flags)) 905xdl_bug("match disappeared"); 906if(group_previous(xdfo, &go)) 907xdl_bug("group sync broken sliding to match"); 908} 909}else if((flags & XDF_COMPACTION_HEURISTIC) && blank_lines) { 910/* 911 * Compaction heuristic: if it is possible to shift the 912 * group to make its bottom line a blank line, do so. 913 * 914 * As we already shifted the group forward as far as 915 * possible in the earlier loop, we only need to handle 916 * backward shifts, not forward ones. 917 */ 918while(!is_blank_line(xdf->recs[g.end -1], flags)) { 919if(group_slide_up(xdf, &g, flags)) 920xdl_bug("blank line disappeared"); 921if(group_previous(xdfo, &go)) 922xdl_bug("group sync broken sliding to blank line"); 923} 924}else if(flags & XDF_INDENT_HEURISTIC) { 925/* 926 * Indent heuristic: a group of pure add/delete lines 927 * implies two splits, one between the end of the "before" 928 * context and the start of the group, and another between 929 * the end of the group and the beginning of the "after" 930 * context. Some splits are aesthetically better and some 931 * are worse. We compute a badness "score" for each split, 932 * and add the scores for the two splits to define a 933 * "score" for each position that the group can be shifted 934 * to. Then we pick the shift with the lowest score. 935 */ 936long shift, best_shift = -1; 937struct split_score best_score; 938 939for(shift = earliest_end; shift <= g.end; shift++) { 940struct split_measurement m; 941struct split_score score = {0,0}; 942 943measure_split(xdf, shift, &m); 944score_add_split(&m, &score); 945measure_split(xdf, shift - groupsize, &m); 946score_add_split(&m, &score); 947if(best_shift == -1|| 948score_cmp(&score, &best_score) <=0) { 949 best_score.effective_indent = score.effective_indent; 950 best_score.penalty = score.penalty; 951 best_shift = shift; 952} 953} 954 955while(g.end > best_shift) { 956if(group_slide_up(xdf, &g, flags)) 957xdl_bug("best shift unreached"); 958if(group_previous(xdfo, &go)) 959xdl_bug("group sync broken sliding to blank line"); 960} 961} 962 963 next: 964/* Move past the just-processed group: */ 965if(group_next(xdf, &g)) 966break; 967if(group_next(xdfo, &go)) 968xdl_bug("group sync broken moving to next group"); 969} 970 971if(!group_next(xdfo, &go)) 972xdl_bug("group sync broken at end of file"); 973 974return0; 975} 976 977 978intxdl_build_script(xdfenv_t *xe, xdchange_t **xscr) { 979 xdchange_t *cscr = NULL, *xch; 980char*rchg1 = xe->xdf1.rchg, *rchg2 = xe->xdf2.rchg; 981long i1, i2, l1, l2; 982 983/* 984 * Trivial. Collects "groups" of changes and creates an edit script. 985 */ 986for(i1 = xe->xdf1.nrec, i2 = xe->xdf2.nrec; i1 >=0|| i2 >=0; i1--, i2--) 987if(rchg1[i1 -1] || rchg2[i2 -1]) { 988for(l1 = i1; rchg1[i1 -1]; i1--); 989for(l2 = i2; rchg2[i2 -1]; i2--); 990 991if(!(xch =xdl_add_change(cscr, i1, i2, l1 - i1, l2 - i2))) { 992xdl_free_script(cscr); 993return-1; 994} 995 cscr = xch; 996} 997 998*xscr = cscr; 9991000return0;1001}100210031004voidxdl_free_script(xdchange_t *xscr) {1005 xdchange_t *xch;10061007while((xch = xscr) != NULL) {1008 xscr = xscr->next;1009xdl_free(xch);1010}1011}10121013static intxdl_call_hunk_func(xdfenv_t *xe, xdchange_t *xscr, xdemitcb_t *ecb,1014 xdemitconf_t const*xecfg)1015{1016 xdchange_t *xch, *xche;10171018for(xch = xscr; xch; xch = xche->next) {1019 xche =xdl_get_hunk(&xch, xecfg);1020if(!xch)1021break;1022if(xecfg->hunk_func(xch->i1, xche->i1 + xche->chg1 - xch->i1,1023 xch->i2, xche->i2 + xche->chg2 - xch->i2,1024 ecb->priv) <0)1025return-1;1026}1027return0;1028}10291030static voidxdl_mark_ignorable(xdchange_t *xscr, xdfenv_t *xe,long flags)1031{1032 xdchange_t *xch;10331034for(xch = xscr; xch; xch = xch->next) {1035int ignore =1;1036 xrecord_t **rec;1037long i;10381039 rec = &xe->xdf1.recs[xch->i1];1040for(i =0; i < xch->chg1 && ignore; i++)1041 ignore =xdl_blankline(rec[i]->ptr, rec[i]->size, flags);10421043 rec = &xe->xdf2.recs[xch->i2];1044for(i =0; i < xch->chg2 && ignore; i++)1045 ignore =xdl_blankline(rec[i]->ptr, rec[i]->size, flags);10461047 xch->ignore = ignore;1048}1049}10501051intxdl_diff(mmfile_t *mf1, mmfile_t *mf2, xpparam_t const*xpp,1052 xdemitconf_t const*xecfg, xdemitcb_t *ecb) {1053 xdchange_t *xscr;1054 xdfenv_t xe;1055 emit_func_t ef = xecfg->hunk_func ? xdl_call_hunk_func : xdl_emit_diff;10561057if(xdl_do_diff(mf1, mf2, xpp, &xe) <0) {10581059return-1;1060}1061if(xdl_change_compact(&xe.xdf1, &xe.xdf2, xpp->flags) <0||1062xdl_change_compact(&xe.xdf2, &xe.xdf1, xpp->flags) <0||1063xdl_build_script(&xe, &xscr) <0) {10641065xdl_free_env(&xe);1066return-1;1067}1068if(xscr) {1069if(xpp->flags & XDF_IGNORE_BLANK_LINES)1070xdl_mark_ignorable(xscr, &xe, xpp->flags);10711072if(ef(&xe, xscr, ecb, xecfg) <0) {10731074xdl_free_script(xscr);1075xdl_free_env(&xe);1076return-1;1077}1078xdl_free_script(xscr);1079}1080xdl_free_env(&xe);10811082return0;1083}