1#include"cache.h" 2#include"refs.h" 3#include"object-store.h" 4#include"cache-tree.h" 5#include"mergesort.h" 6#include"diff.h" 7#include"diffcore.h" 8#include"tag.h" 9#include"blame.h" 10#include"alloc.h" 11#include"commit-slab.h" 12 13define_commit_slab(blame_suspects,struct blame_origin *); 14static struct blame_suspects blame_suspects; 15 16struct blame_origin *get_blame_suspects(struct commit *commit) 17{ 18struct blame_origin **result; 19 20 result =blame_suspects_peek(&blame_suspects, commit); 21 22return result ? *result : NULL; 23} 24 25static voidset_blame_suspects(struct commit *commit,struct blame_origin *origin) 26{ 27*blame_suspects_at(&blame_suspects, commit) = origin; 28} 29 30voidblame_origin_decref(struct blame_origin *o) 31{ 32if(o && --o->refcnt <=0) { 33struct blame_origin *p, *l = NULL; 34if(o->previous) 35blame_origin_decref(o->previous); 36free(o->file.ptr); 37/* Should be present exactly once in commit chain */ 38for(p =get_blame_suspects(o->commit); p; l = p, p = p->next) { 39if(p == o) { 40if(l) 41 l->next = p->next; 42else 43set_blame_suspects(o->commit, p->next); 44free(o); 45return; 46} 47} 48die("internal error in blame_origin_decref"); 49} 50} 51 52/* 53 * Given a commit and a path in it, create a new origin structure. 54 * The callers that add blame to the scoreboard should use 55 * get_origin() to obtain shared, refcounted copy instead of calling 56 * this function directly. 57 */ 58static struct blame_origin *make_origin(struct commit *commit,const char*path) 59{ 60struct blame_origin *o; 61FLEX_ALLOC_STR(o, path, path); 62 o->commit = commit; 63 o->refcnt =1; 64 o->next =get_blame_suspects(commit); 65set_blame_suspects(commit, o); 66return o; 67} 68 69/* 70 * Locate an existing origin or create a new one. 71 * This moves the origin to front position in the commit util list. 72 */ 73static struct blame_origin *get_origin(struct commit *commit,const char*path) 74{ 75struct blame_origin *o, *l; 76 77for(o =get_blame_suspects(commit), l = NULL; o; l = o, o = o->next) { 78if(!strcmp(o->path, path)) { 79/* bump to front */ 80if(l) { 81 l->next = o->next; 82 o->next =get_blame_suspects(commit); 83set_blame_suspects(commit, o); 84} 85returnblame_origin_incref(o); 86} 87} 88returnmake_origin(commit, path); 89} 90 91 92 93static voidverify_working_tree_path(struct repository *r, 94struct commit *work_tree,const char*path) 95{ 96struct commit_list *parents; 97int pos; 98 99for(parents = work_tree->parents; parents; parents = parents->next) { 100const struct object_id *commit_oid = &parents->item->object.oid; 101struct object_id blob_oid; 102unsigned mode; 103 104if(!get_tree_entry(commit_oid, path, &blob_oid, &mode) && 105oid_object_info(r, &blob_oid, NULL) == OBJ_BLOB) 106return; 107} 108 109 pos =index_name_pos(r->index, path,strlen(path)); 110if(pos >=0) 111;/* path is in the index */ 112else if(-1- pos < r->index->cache_nr && 113!strcmp(r->index->cache[-1- pos]->name, path)) 114;/* path is in the index, unmerged */ 115else 116die("no such path '%s' in HEAD", path); 117} 118 119static struct commit_list **append_parent(struct repository *r, 120struct commit_list **tail, 121const struct object_id *oid) 122{ 123struct commit *parent; 124 125 parent =lookup_commit_reference(r, oid); 126if(!parent) 127die("no such commit%s",oid_to_hex(oid)); 128return&commit_list_insert(parent, tail)->next; 129} 130 131static voidappend_merge_parents(struct repository *r, 132struct commit_list **tail) 133{ 134int merge_head; 135struct strbuf line = STRBUF_INIT; 136 137 merge_head =open(git_path_merge_head(r), O_RDONLY); 138if(merge_head <0) { 139if(errno == ENOENT) 140return; 141die("cannot open '%s' for reading", 142git_path_merge_head(r)); 143} 144 145while(!strbuf_getwholeline_fd(&line, merge_head,'\n')) { 146struct object_id oid; 147if(line.len < GIT_SHA1_HEXSZ ||get_oid_hex(line.buf, &oid)) 148die("unknown line in '%s':%s", 149git_path_merge_head(r), line.buf); 150 tail =append_parent(r, tail, &oid); 151} 152close(merge_head); 153strbuf_release(&line); 154} 155 156/* 157 * This isn't as simple as passing sb->buf and sb->len, because we 158 * want to transfer ownership of the buffer to the commit (so we 159 * must use detach). 160 */ 161static voidset_commit_buffer_from_strbuf(struct repository *r, 162struct commit *c, 163struct strbuf *sb) 164{ 165size_t len; 166void*buf =strbuf_detach(sb, &len); 167set_commit_buffer(r, c, buf, len); 168} 169 170/* 171 * Prepare a dummy commit that represents the work tree (or staged) item. 172 * Note that annotating work tree item never works in the reverse. 173 */ 174static struct commit *fake_working_tree_commit(struct repository *r, 175struct diff_options *opt, 176const char*path, 177const char*contents_from) 178{ 179struct commit *commit; 180struct blame_origin *origin; 181struct commit_list **parent_tail, *parent; 182struct object_id head_oid; 183struct strbuf buf = STRBUF_INIT; 184const char*ident; 185time_t now; 186int len; 187struct cache_entry *ce; 188unsigned mode; 189struct strbuf msg = STRBUF_INIT; 190 191repo_read_index(r); 192time(&now); 193 commit =alloc_commit_node(r); 194 commit->object.parsed =1; 195 commit->date = now; 196 parent_tail = &commit->parents; 197 198if(!resolve_ref_unsafe("HEAD", RESOLVE_REF_READING, &head_oid, NULL)) 199die("no such ref: HEAD"); 200 201 parent_tail =append_parent(r, parent_tail, &head_oid); 202append_merge_parents(r, parent_tail); 203verify_working_tree_path(r, commit, path); 204 205 origin =make_origin(commit, path); 206 207 ident =fmt_ident("Not Committed Yet","not.committed.yet", NULL,0); 208strbuf_addstr(&msg,"tree 0000000000000000000000000000000000000000\n"); 209for(parent = commit->parents; parent; parent = parent->next) 210strbuf_addf(&msg,"parent%s\n", 211oid_to_hex(&parent->item->object.oid)); 212strbuf_addf(&msg, 213"author%s\n" 214"committer%s\n\n" 215"Version of%sfrom%s\n", 216 ident, ident, path, 217(!contents_from ? path : 218(!strcmp(contents_from,"-") ?"standard input": contents_from))); 219set_commit_buffer_from_strbuf(r, commit, &msg); 220 221if(!contents_from ||strcmp("-", contents_from)) { 222struct stat st; 223const char*read_from; 224char*buf_ptr; 225unsigned long buf_len; 226 227if(contents_from) { 228if(stat(contents_from, &st) <0) 229die_errno("Cannot stat '%s'", contents_from); 230 read_from = contents_from; 231} 232else{ 233if(lstat(path, &st) <0) 234die_errno("Cannot lstat '%s'", path); 235 read_from = path; 236} 237 mode =canon_mode(st.st_mode); 238 239switch(st.st_mode & S_IFMT) { 240case S_IFREG: 241if(opt->flags.allow_textconv && 242textconv_object(r, read_from, mode, &null_oid,0, &buf_ptr, &buf_len)) 243strbuf_attach(&buf, buf_ptr, buf_len, buf_len +1); 244else if(strbuf_read_file(&buf, read_from, st.st_size) != st.st_size) 245die_errno("cannot open or read '%s'", read_from); 246break; 247case S_IFLNK: 248if(strbuf_readlink(&buf, read_from, st.st_size) <0) 249die_errno("cannot readlink '%s'", read_from); 250break; 251default: 252die("unsupported file type%s", read_from); 253} 254} 255else{ 256/* Reading from stdin */ 257 mode =0; 258if(strbuf_read(&buf,0,0) <0) 259die_errno("failed to read from stdin"); 260} 261convert_to_git(r->index, path, buf.buf, buf.len, &buf,0); 262 origin->file.ptr = buf.buf; 263 origin->file.size = buf.len; 264pretend_object_file(buf.buf, buf.len, OBJ_BLOB, &origin->blob_oid); 265 266/* 267 * Read the current index, replace the path entry with 268 * origin->blob_sha1 without mucking with its mode or type 269 * bits; we are not going to write this index out -- we just 270 * want to run "diff-index --cached". 271 */ 272discard_index(r->index); 273repo_read_index(r); 274 275 len =strlen(path); 276if(!mode) { 277int pos =index_name_pos(r->index, path, len); 278if(0<= pos) 279 mode = r->index->cache[pos]->ce_mode; 280else 281/* Let's not bother reading from HEAD tree */ 282 mode = S_IFREG |0644; 283} 284 ce =make_empty_cache_entry(r->index, len); 285oidcpy(&ce->oid, &origin->blob_oid); 286memcpy(ce->name, path, len); 287 ce->ce_flags =create_ce_flags(0); 288 ce->ce_namelen = len; 289 ce->ce_mode =create_ce_mode(mode); 290add_index_entry(r->index, ce, 291 ADD_CACHE_OK_TO_ADD | ADD_CACHE_OK_TO_REPLACE); 292 293cache_tree_invalidate_path(r->index, path); 294 295return commit; 296} 297 298 299 300static intdiff_hunks(mmfile_t *file_a, mmfile_t *file_b, 301 xdl_emit_hunk_consume_func_t hunk_func,void*cb_data,int xdl_opts) 302{ 303 xpparam_t xpp = {0}; 304 xdemitconf_t xecfg = {0}; 305 xdemitcb_t ecb = {NULL}; 306 307 xpp.flags = xdl_opts; 308 xecfg.hunk_func = hunk_func; 309 ecb.priv = cb_data; 310returnxdi_diff(file_a, file_b, &xpp, &xecfg, &ecb); 311} 312 313/* 314 * Given an origin, prepare mmfile_t structure to be used by the 315 * diff machinery 316 */ 317static voidfill_origin_blob(struct diff_options *opt, 318struct blame_origin *o, mmfile_t *file,int*num_read_blob) 319{ 320if(!o->file.ptr) { 321enum object_type type; 322unsigned long file_size; 323 324(*num_read_blob)++; 325if(opt->flags.allow_textconv && 326textconv_object(opt->repo, o->path, o->mode, 327&o->blob_oid,1, &file->ptr, &file_size)) 328; 329else 330 file->ptr =read_object_file(&o->blob_oid, &type, 331&file_size); 332 file->size = file_size; 333 334if(!file->ptr) 335die("Cannot read blob%sfor path%s", 336oid_to_hex(&o->blob_oid), 337 o->path); 338 o->file = *file; 339} 340else 341*file = o->file; 342} 343 344static voiddrop_origin_blob(struct blame_origin *o) 345{ 346FREE_AND_NULL(o->file.ptr); 347} 348 349/* 350 * Any merge of blames happens on lists of blames that arrived via 351 * different parents in a single suspect. In this case, we want to 352 * sort according to the suspect line numbers as opposed to the final 353 * image line numbers. The function body is somewhat longish because 354 * it avoids unnecessary writes. 355 */ 356 357static struct blame_entry *blame_merge(struct blame_entry *list1, 358struct blame_entry *list2) 359{ 360struct blame_entry *p1 = list1, *p2 = list2, 361**tail = &list1; 362 363if(!p1) 364return p2; 365if(!p2) 366return p1; 367 368if(p1->s_lno <= p2->s_lno) { 369do{ 370 tail = &p1->next; 371if((p1 = *tail) == NULL) { 372*tail = p2; 373return list1; 374} 375}while(p1->s_lno <= p2->s_lno); 376} 377for(;;) { 378*tail = p2; 379do{ 380 tail = &p2->next; 381if((p2 = *tail) == NULL) { 382*tail = p1; 383return list1; 384} 385}while(p1->s_lno > p2->s_lno); 386*tail = p1; 387do{ 388 tail = &p1->next; 389if((p1 = *tail) == NULL) { 390*tail = p2; 391return list1; 392} 393}while(p1->s_lno <= p2->s_lno); 394} 395} 396 397static void*get_next_blame(const void*p) 398{ 399return((struct blame_entry *)p)->next; 400} 401 402static voidset_next_blame(void*p1,void*p2) 403{ 404((struct blame_entry *)p1)->next = p2; 405} 406 407/* 408 * Final image line numbers are all different, so we don't need a 409 * three-way comparison here. 410 */ 411 412static intcompare_blame_final(const void*p1,const void*p2) 413{ 414return((struct blame_entry *)p1)->lno > ((struct blame_entry *)p2)->lno 415?1: -1; 416} 417 418static intcompare_blame_suspect(const void*p1,const void*p2) 419{ 420const struct blame_entry *s1 = p1, *s2 = p2; 421/* 422 * to allow for collating suspects, we sort according to the 423 * respective pointer value as the primary sorting criterion. 424 * The actual relation is pretty unimportant as long as it 425 * establishes a total order. Comparing as integers gives us 426 * that. 427 */ 428if(s1->suspect != s2->suspect) 429return(intptr_t)s1->suspect > (intptr_t)s2->suspect ?1: -1; 430if(s1->s_lno == s2->s_lno) 431return0; 432return s1->s_lno > s2->s_lno ?1: -1; 433} 434 435voidblame_sort_final(struct blame_scoreboard *sb) 436{ 437 sb->ent =llist_mergesort(sb->ent, get_next_blame, set_next_blame, 438 compare_blame_final); 439} 440 441static intcompare_commits_by_reverse_commit_date(const void*a, 442const void*b, 443void*c) 444{ 445return-compare_commits_by_commit_date(a, b, c); 446} 447 448/* 449 * For debugging -- origin is refcounted, and this asserts that 450 * we do not underflow. 451 */ 452static voidsanity_check_refcnt(struct blame_scoreboard *sb) 453{ 454int baa =0; 455struct blame_entry *ent; 456 457for(ent = sb->ent; ent; ent = ent->next) { 458/* Nobody should have zero or negative refcnt */ 459if(ent->suspect->refcnt <=0) { 460fprintf(stderr,"%sin%shas negative refcnt%d\n", 461 ent->suspect->path, 462oid_to_hex(&ent->suspect->commit->object.oid), 463 ent->suspect->refcnt); 464 baa =1; 465} 466} 467if(baa) 468 sb->on_sanity_fail(sb, baa); 469} 470 471/* 472 * If two blame entries that are next to each other came from 473 * contiguous lines in the same origin (i.e. <commit, path> pair), 474 * merge them together. 475 */ 476voidblame_coalesce(struct blame_scoreboard *sb) 477{ 478struct blame_entry *ent, *next; 479 480for(ent = sb->ent; ent && (next = ent->next); ent = next) { 481if(ent->suspect == next->suspect && 482 ent->s_lno + ent->num_lines == next->s_lno) { 483 ent->num_lines += next->num_lines; 484 ent->next = next->next; 485blame_origin_decref(next->suspect); 486free(next); 487 ent->score =0; 488 next = ent;/* again */ 489} 490} 491 492if(sb->debug)/* sanity */ 493sanity_check_refcnt(sb); 494} 495 496/* 497 * Merge the given sorted list of blames into a preexisting origin. 498 * If there were no previous blames to that commit, it is entered into 499 * the commit priority queue of the score board. 500 */ 501 502static voidqueue_blames(struct blame_scoreboard *sb,struct blame_origin *porigin, 503struct blame_entry *sorted) 504{ 505if(porigin->suspects) 506 porigin->suspects =blame_merge(porigin->suspects, sorted); 507else{ 508struct blame_origin *o; 509for(o =get_blame_suspects(porigin->commit); o; o = o->next) { 510if(o->suspects) { 511 porigin->suspects = sorted; 512return; 513} 514} 515 porigin->suspects = sorted; 516prio_queue_put(&sb->commits, porigin->commit); 517} 518} 519 520/* 521 * Fill the blob_sha1 field of an origin if it hasn't, so that later 522 * call to fill_origin_blob() can use it to locate the data. blob_sha1 523 * for an origin is also used to pass the blame for the entire file to 524 * the parent to detect the case where a child's blob is identical to 525 * that of its parent's. 526 * 527 * This also fills origin->mode for corresponding tree path. 528 */ 529static intfill_blob_sha1_and_mode(struct repository *r, 530struct blame_origin *origin) 531{ 532if(!is_null_oid(&origin->blob_oid)) 533return0; 534if(get_tree_entry(&origin->commit->object.oid, origin->path, &origin->blob_oid, &origin->mode)) 535goto error_out; 536if(oid_object_info(r, &origin->blob_oid, NULL) != OBJ_BLOB) 537goto error_out; 538return0; 539 error_out: 540oidclr(&origin->blob_oid); 541 origin->mode = S_IFINVALID; 542return-1; 543} 544 545/* 546 * We have an origin -- check if the same path exists in the 547 * parent and return an origin structure to represent it. 548 */ 549static struct blame_origin *find_origin(struct repository *r, 550struct commit *parent, 551struct blame_origin *origin) 552{ 553struct blame_origin *porigin; 554struct diff_options diff_opts; 555const char*paths[2]; 556 557/* First check any existing origins */ 558for(porigin =get_blame_suspects(parent); porigin; porigin = porigin->next) 559if(!strcmp(porigin->path, origin->path)) { 560/* 561 * The same path between origin and its parent 562 * without renaming -- the most common case. 563 */ 564returnblame_origin_incref(porigin); 565} 566 567/* See if the origin->path is different between parent 568 * and origin first. Most of the time they are the 569 * same and diff-tree is fairly efficient about this. 570 */ 571repo_diff_setup(r, &diff_opts); 572 diff_opts.flags.recursive =1; 573 diff_opts.detect_rename =0; 574 diff_opts.output_format = DIFF_FORMAT_NO_OUTPUT; 575 paths[0] = origin->path; 576 paths[1] = NULL; 577 578parse_pathspec(&diff_opts.pathspec, 579 PATHSPEC_ALL_MAGIC & ~PATHSPEC_LITERAL, 580 PATHSPEC_LITERAL_PATH,"", paths); 581diff_setup_done(&diff_opts); 582 583if(is_null_oid(&origin->commit->object.oid)) 584do_diff_cache(get_commit_tree_oid(parent), &diff_opts); 585else 586diff_tree_oid(get_commit_tree_oid(parent), 587get_commit_tree_oid(origin->commit), 588"", &diff_opts); 589diffcore_std(&diff_opts); 590 591if(!diff_queued_diff.nr) { 592/* The path is the same as parent */ 593 porigin =get_origin(parent, origin->path); 594oidcpy(&porigin->blob_oid, &origin->blob_oid); 595 porigin->mode = origin->mode; 596}else{ 597/* 598 * Since origin->path is a pathspec, if the parent 599 * commit had it as a directory, we will see a whole 600 * bunch of deletion of files in the directory that we 601 * do not care about. 602 */ 603int i; 604struct diff_filepair *p = NULL; 605for(i =0; i < diff_queued_diff.nr; i++) { 606const char*name; 607 p = diff_queued_diff.queue[i]; 608 name = p->one->path ? p->one->path : p->two->path; 609if(!strcmp(name, origin->path)) 610break; 611} 612if(!p) 613die("internal error in blame::find_origin"); 614switch(p->status) { 615default: 616die("internal error in blame::find_origin (%c)", 617 p->status); 618case'M': 619 porigin =get_origin(parent, origin->path); 620oidcpy(&porigin->blob_oid, &p->one->oid); 621 porigin->mode = p->one->mode; 622break; 623case'A': 624case'T': 625/* Did not exist in parent, or type changed */ 626break; 627} 628} 629diff_flush(&diff_opts); 630clear_pathspec(&diff_opts.pathspec); 631return porigin; 632} 633 634/* 635 * We have an origin -- find the path that corresponds to it in its 636 * parent and return an origin structure to represent it. 637 */ 638static struct blame_origin *find_rename(struct repository *r, 639struct commit *parent, 640struct blame_origin *origin) 641{ 642struct blame_origin *porigin = NULL; 643struct diff_options diff_opts; 644int i; 645 646repo_diff_setup(r, &diff_opts); 647 diff_opts.flags.recursive =1; 648 diff_opts.detect_rename = DIFF_DETECT_RENAME; 649 diff_opts.output_format = DIFF_FORMAT_NO_OUTPUT; 650 diff_opts.single_follow = origin->path; 651diff_setup_done(&diff_opts); 652 653if(is_null_oid(&origin->commit->object.oid)) 654do_diff_cache(get_commit_tree_oid(parent), &diff_opts); 655else 656diff_tree_oid(get_commit_tree_oid(parent), 657get_commit_tree_oid(origin->commit), 658"", &diff_opts); 659diffcore_std(&diff_opts); 660 661for(i =0; i < diff_queued_diff.nr; i++) { 662struct diff_filepair *p = diff_queued_diff.queue[i]; 663if((p->status =='R'|| p->status =='C') && 664!strcmp(p->two->path, origin->path)) { 665 porigin =get_origin(parent, p->one->path); 666oidcpy(&porigin->blob_oid, &p->one->oid); 667 porigin->mode = p->one->mode; 668break; 669} 670} 671diff_flush(&diff_opts); 672clear_pathspec(&diff_opts.pathspec); 673return porigin; 674} 675 676/* 677 * Append a new blame entry to a given output queue. 678 */ 679static voidadd_blame_entry(struct blame_entry ***queue, 680const struct blame_entry *src) 681{ 682struct blame_entry *e =xmalloc(sizeof(*e)); 683memcpy(e, src,sizeof(*e)); 684blame_origin_incref(e->suspect); 685 686 e->next = **queue; 687**queue = e; 688*queue = &e->next; 689} 690 691/* 692 * src typically is on-stack; we want to copy the information in it to 693 * a malloced blame_entry that gets added to the given queue. The 694 * origin of dst loses a refcnt. 695 */ 696static voiddup_entry(struct blame_entry ***queue, 697struct blame_entry *dst,struct blame_entry *src) 698{ 699blame_origin_incref(src->suspect); 700blame_origin_decref(dst->suspect); 701memcpy(dst, src,sizeof(*src)); 702 dst->next = **queue; 703**queue = dst; 704*queue = &dst->next; 705} 706 707const char*blame_nth_line(struct blame_scoreboard *sb,long lno) 708{ 709return sb->final_buf + sb->lineno[lno]; 710} 711 712/* 713 * It is known that lines between tlno to same came from parent, and e 714 * has an overlap with that range. it also is known that parent's 715 * line plno corresponds to e's line tlno. 716 * 717 * <---- e -----> 718 * <------> 719 * <------------> 720 * <------------> 721 * <------------------> 722 * 723 * Split e into potentially three parts; before this chunk, the chunk 724 * to be blamed for the parent, and after that portion. 725 */ 726static voidsplit_overlap(struct blame_entry *split, 727struct blame_entry *e, 728int tlno,int plno,int same, 729struct blame_origin *parent) 730{ 731int chunk_end_lno; 732memset(split,0,sizeof(struct blame_entry [3])); 733 734if(e->s_lno < tlno) { 735/* there is a pre-chunk part not blamed on parent */ 736 split[0].suspect =blame_origin_incref(e->suspect); 737 split[0].lno = e->lno; 738 split[0].s_lno = e->s_lno; 739 split[0].num_lines = tlno - e->s_lno; 740 split[1].lno = e->lno + tlno - e->s_lno; 741 split[1].s_lno = plno; 742} 743else{ 744 split[1].lno = e->lno; 745 split[1].s_lno = plno + (e->s_lno - tlno); 746} 747 748if(same < e->s_lno + e->num_lines) { 749/* there is a post-chunk part not blamed on parent */ 750 split[2].suspect =blame_origin_incref(e->suspect); 751 split[2].lno = e->lno + (same - e->s_lno); 752 split[2].s_lno = e->s_lno + (same - e->s_lno); 753 split[2].num_lines = e->s_lno + e->num_lines - same; 754 chunk_end_lno = split[2].lno; 755} 756else 757 chunk_end_lno = e->lno + e->num_lines; 758 split[1].num_lines = chunk_end_lno - split[1].lno; 759 760/* 761 * if it turns out there is nothing to blame the parent for, 762 * forget about the splitting. !split[1].suspect signals this. 763 */ 764if(split[1].num_lines <1) 765return; 766 split[1].suspect =blame_origin_incref(parent); 767} 768 769/* 770 * split_overlap() divided an existing blame e into up to three parts 771 * in split. Any assigned blame is moved to queue to 772 * reflect the split. 773 */ 774static voidsplit_blame(struct blame_entry ***blamed, 775struct blame_entry ***unblamed, 776struct blame_entry *split, 777struct blame_entry *e) 778{ 779if(split[0].suspect && split[2].suspect) { 780/* The first part (reuse storage for the existing entry e) */ 781dup_entry(unblamed, e, &split[0]); 782 783/* The last part -- me */ 784add_blame_entry(unblamed, &split[2]); 785 786/* ... and the middle part -- parent */ 787add_blame_entry(blamed, &split[1]); 788} 789else if(!split[0].suspect && !split[2].suspect) 790/* 791 * The parent covers the entire area; reuse storage for 792 * e and replace it with the parent. 793 */ 794dup_entry(blamed, e, &split[1]); 795else if(split[0].suspect) { 796/* me and then parent */ 797dup_entry(unblamed, e, &split[0]); 798add_blame_entry(blamed, &split[1]); 799} 800else{ 801/* parent and then me */ 802dup_entry(blamed, e, &split[1]); 803add_blame_entry(unblamed, &split[2]); 804} 805} 806 807/* 808 * After splitting the blame, the origins used by the 809 * on-stack blame_entry should lose one refcnt each. 810 */ 811static voiddecref_split(struct blame_entry *split) 812{ 813int i; 814 815for(i =0; i <3; i++) 816blame_origin_decref(split[i].suspect); 817} 818 819/* 820 * reverse_blame reverses the list given in head, appending tail. 821 * That allows us to build lists in reverse order, then reverse them 822 * afterwards. This can be faster than building the list in proper 823 * order right away. The reason is that building in proper order 824 * requires writing a link in the _previous_ element, while building 825 * in reverse order just requires placing the list head into the 826 * _current_ element. 827 */ 828 829static struct blame_entry *reverse_blame(struct blame_entry *head, 830struct blame_entry *tail) 831{ 832while(head) { 833struct blame_entry *next = head->next; 834 head->next = tail; 835 tail = head; 836 head = next; 837} 838return tail; 839} 840 841/* 842 * Process one hunk from the patch between the current suspect for 843 * blame_entry e and its parent. This first blames any unfinished 844 * entries before the chunk (which is where target and parent start 845 * differing) on the parent, and then splits blame entries at the 846 * start and at the end of the difference region. Since use of -M and 847 * -C options may lead to overlapping/duplicate source line number 848 * ranges, all we can rely on from sorting/merging is the order of the 849 * first suspect line number. 850 */ 851static voidblame_chunk(struct blame_entry ***dstq,struct blame_entry ***srcq, 852int tlno,int offset,int same, 853struct blame_origin *parent) 854{ 855struct blame_entry *e = **srcq; 856struct blame_entry *samep = NULL, *diffp = NULL; 857 858while(e && e->s_lno < tlno) { 859struct blame_entry *next = e->next; 860/* 861 * current record starts before differing portion. If 862 * it reaches into it, we need to split it up and 863 * examine the second part separately. 864 */ 865if(e->s_lno + e->num_lines > tlno) { 866/* Move second half to a new record */ 867int len = tlno - e->s_lno; 868struct blame_entry *n =xcalloc(1,sizeof(struct blame_entry)); 869 n->suspect = e->suspect; 870 n->lno = e->lno + len; 871 n->s_lno = e->s_lno + len; 872 n->num_lines = e->num_lines - len; 873 e->num_lines = len; 874 e->score =0; 875/* Push new record to diffp */ 876 n->next = diffp; 877 diffp = n; 878}else 879blame_origin_decref(e->suspect); 880/* Pass blame for everything before the differing 881 * chunk to the parent */ 882 e->suspect =blame_origin_incref(parent); 883 e->s_lno += offset; 884 e->next = samep; 885 samep = e; 886 e = next; 887} 888/* 889 * As we don't know how much of a common stretch after this 890 * diff will occur, the currently blamed parts are all that we 891 * can assign to the parent for now. 892 */ 893 894if(samep) { 895**dstq =reverse_blame(samep, **dstq); 896*dstq = &samep->next; 897} 898/* 899 * Prepend the split off portions: everything after e starts 900 * after the blameable portion. 901 */ 902 e =reverse_blame(diffp, e); 903 904/* 905 * Now retain records on the target while parts are different 906 * from the parent. 907 */ 908 samep = NULL; 909 diffp = NULL; 910while(e && e->s_lno < same) { 911struct blame_entry *next = e->next; 912 913/* 914 * If current record extends into sameness, need to split. 915 */ 916if(e->s_lno + e->num_lines > same) { 917/* 918 * Move second half to a new record to be 919 * processed by later chunks 920 */ 921int len = same - e->s_lno; 922struct blame_entry *n =xcalloc(1,sizeof(struct blame_entry)); 923 n->suspect =blame_origin_incref(e->suspect); 924 n->lno = e->lno + len; 925 n->s_lno = e->s_lno + len; 926 n->num_lines = e->num_lines - len; 927 e->num_lines = len; 928 e->score =0; 929/* Push new record to samep */ 930 n->next = samep; 931 samep = n; 932} 933 e->next = diffp; 934 diffp = e; 935 e = next; 936} 937**srcq =reverse_blame(diffp,reverse_blame(samep, e)); 938/* Move across elements that are in the unblamable portion */ 939if(diffp) 940*srcq = &diffp->next; 941} 942 943struct blame_chunk_cb_data { 944struct blame_origin *parent; 945long offset; 946struct blame_entry **dstq; 947struct blame_entry **srcq; 948}; 949 950/* diff chunks are from parent to target */ 951static intblame_chunk_cb(long start_a,long count_a, 952long start_b,long count_b,void*data) 953{ 954struct blame_chunk_cb_data *d = data; 955if(start_a - start_b != d->offset) 956die("internal error in blame::blame_chunk_cb"); 957blame_chunk(&d->dstq, &d->srcq, start_b, start_a - start_b, 958 start_b + count_b, d->parent); 959 d->offset = start_a + count_a - (start_b + count_b); 960return0; 961} 962 963/* 964 * We are looking at the origin 'target' and aiming to pass blame 965 * for the lines it is suspected to its parent. Run diff to find 966 * which lines came from parent and pass blame for them. 967 */ 968static voidpass_blame_to_parent(struct blame_scoreboard *sb, 969struct blame_origin *target, 970struct blame_origin *parent) 971{ 972 mmfile_t file_p, file_o; 973struct blame_chunk_cb_data d; 974struct blame_entry *newdest = NULL; 975 976if(!target->suspects) 977return;/* nothing remains for this target */ 978 979 d.parent = parent; 980 d.offset =0; 981 d.dstq = &newdest; d.srcq = &target->suspects; 982 983fill_origin_blob(&sb->revs->diffopt, parent, &file_p, &sb->num_read_blob); 984fill_origin_blob(&sb->revs->diffopt, target, &file_o, &sb->num_read_blob); 985 sb->num_get_patch++; 986 987if(diff_hunks(&file_p, &file_o, blame_chunk_cb, &d, sb->xdl_opts)) 988die("unable to generate diff (%s->%s)", 989oid_to_hex(&parent->commit->object.oid), 990oid_to_hex(&target->commit->object.oid)); 991/* The rest are the same as the parent */ 992blame_chunk(&d.dstq, &d.srcq, INT_MAX, d.offset, INT_MAX, parent); 993*d.dstq = NULL; 994queue_blames(sb, parent, newdest); 995 996return; 997} 998 999/*1000 * The lines in blame_entry after splitting blames many times can become1001 * very small and trivial, and at some point it becomes pointless to1002 * blame the parents. E.g. "\t\t}\n\t}\n\n" appears everywhere in any1003 * ordinary C program, and it is not worth to say it was copied from1004 * totally unrelated file in the parent.1005 *1006 * Compute how trivial the lines in the blame_entry are.1007 */1008unsignedblame_entry_score(struct blame_scoreboard *sb,struct blame_entry *e)1009{1010unsigned score;1011const char*cp, *ep;10121013if(e->score)1014return e->score;10151016 score =1;1017 cp =blame_nth_line(sb, e->lno);1018 ep =blame_nth_line(sb, e->lno + e->num_lines);1019while(cp < ep) {1020unsigned ch = *((unsigned char*)cp);1021if(isalnum(ch))1022 score++;1023 cp++;1024}1025 e->score = score;1026return score;1027}10281029/*1030 * best_so_far[] and potential[] are both a split of an existing blame_entry1031 * that passes blame to the parent. Maintain best_so_far the best split so1032 * far, by comparing potential and best_so_far and copying potential into1033 * bst_so_far as needed.1034 */1035static voidcopy_split_if_better(struct blame_scoreboard *sb,1036struct blame_entry *best_so_far,1037struct blame_entry *potential)1038{1039int i;10401041if(!potential[1].suspect)1042return;1043if(best_so_far[1].suspect) {1044if(blame_entry_score(sb, &potential[1]) <1045blame_entry_score(sb, &best_so_far[1]))1046return;1047}10481049for(i =0; i <3; i++)1050blame_origin_incref(potential[i].suspect);1051decref_split(best_so_far);1052memcpy(best_so_far, potential,sizeof(struct blame_entry[3]));1053}10541055/*1056 * We are looking at a part of the final image represented by1057 * ent (tlno and same are offset by ent->s_lno).1058 * tlno is where we are looking at in the final image.1059 * up to (but not including) same match preimage.1060 * plno is where we are looking at in the preimage.1061 *1062 * <-------------- final image ---------------------->1063 * <------ent------>1064 * ^tlno ^same1065 * <---------preimage----->1066 * ^plno1067 *1068 * All line numbers are 0-based.1069 */1070static voidhandle_split(struct blame_scoreboard *sb,1071struct blame_entry *ent,1072int tlno,int plno,int same,1073struct blame_origin *parent,1074struct blame_entry *split)1075{1076if(ent->num_lines <= tlno)1077return;1078if(tlno < same) {1079struct blame_entry potential[3];1080 tlno += ent->s_lno;1081 same += ent->s_lno;1082split_overlap(potential, ent, tlno, plno, same, parent);1083copy_split_if_better(sb, split, potential);1084decref_split(potential);1085}1086}10871088struct handle_split_cb_data {1089struct blame_scoreboard *sb;1090struct blame_entry *ent;1091struct blame_origin *parent;1092struct blame_entry *split;1093long plno;1094long tlno;1095};10961097static inthandle_split_cb(long start_a,long count_a,1098long start_b,long count_b,void*data)1099{1100struct handle_split_cb_data *d = data;1101handle_split(d->sb, d->ent, d->tlno, d->plno, start_b, d->parent,1102 d->split);1103 d->plno = start_a + count_a;1104 d->tlno = start_b + count_b;1105return0;1106}11071108/*1109 * Find the lines from parent that are the same as ent so that1110 * we can pass blames to it. file_p has the blob contents for1111 * the parent.1112 */1113static voidfind_copy_in_blob(struct blame_scoreboard *sb,1114struct blame_entry *ent,1115struct blame_origin *parent,1116struct blame_entry *split,1117 mmfile_t *file_p)1118{1119const char*cp;1120 mmfile_t file_o;1121struct handle_split_cb_data d;11221123memset(&d,0,sizeof(d));1124 d.sb = sb; d.ent = ent; d.parent = parent; d.split = split;1125/*1126 * Prepare mmfile that contains only the lines in ent.1127 */1128 cp =blame_nth_line(sb, ent->lno);1129 file_o.ptr = (char*) cp;1130 file_o.size =blame_nth_line(sb, ent->lno + ent->num_lines) - cp;11311132/*1133 * file_o is a part of final image we are annotating.1134 * file_p partially may match that image.1135 */1136memset(split,0,sizeof(struct blame_entry [3]));1137if(diff_hunks(file_p, &file_o, handle_split_cb, &d, sb->xdl_opts))1138die("unable to generate diff (%s)",1139oid_to_hex(&parent->commit->object.oid));1140/* remainder, if any, all match the preimage */1141handle_split(sb, ent, d.tlno, d.plno, ent->num_lines, parent, split);1142}11431144/* Move all blame entries from list *source that have a score smaller1145 * than score_min to the front of list *small.1146 * Returns a pointer to the link pointing to the old head of the small list.1147 */11481149static struct blame_entry **filter_small(struct blame_scoreboard *sb,1150struct blame_entry **small,1151struct blame_entry **source,1152unsigned score_min)1153{1154struct blame_entry *p = *source;1155struct blame_entry *oldsmall = *small;1156while(p) {1157if(blame_entry_score(sb, p) <= score_min) {1158*small = p;1159 small = &p->next;1160 p = *small;1161}else{1162*source = p;1163 source = &p->next;1164 p = *source;1165}1166}1167*small = oldsmall;1168*source = NULL;1169return small;1170}11711172/*1173 * See if lines currently target is suspected for can be attributed to1174 * parent.1175 */1176static voidfind_move_in_parent(struct blame_scoreboard *sb,1177struct blame_entry ***blamed,1178struct blame_entry **toosmall,1179struct blame_origin *target,1180struct blame_origin *parent)1181{1182struct blame_entry *e, split[3];1183struct blame_entry *unblamed = target->suspects;1184struct blame_entry *leftover = NULL;1185 mmfile_t file_p;11861187if(!unblamed)1188return;/* nothing remains for this target */11891190fill_origin_blob(&sb->revs->diffopt, parent, &file_p, &sb->num_read_blob);1191if(!file_p.ptr)1192return;11931194/* At each iteration, unblamed has a NULL-terminated list of1195 * entries that have not yet been tested for blame. leftover1196 * contains the reversed list of entries that have been tested1197 * without being assignable to the parent.1198 */1199do{1200struct blame_entry **unblamedtail = &unblamed;1201struct blame_entry *next;1202for(e = unblamed; e; e = next) {1203 next = e->next;1204find_copy_in_blob(sb, e, parent, split, &file_p);1205if(split[1].suspect &&1206 sb->move_score <blame_entry_score(sb, &split[1])) {1207split_blame(blamed, &unblamedtail, split, e);1208}else{1209 e->next = leftover;1210 leftover = e;1211}1212decref_split(split);1213}1214*unblamedtail = NULL;1215 toosmall =filter_small(sb, toosmall, &unblamed, sb->move_score);1216}while(unblamed);1217 target->suspects =reverse_blame(leftover, NULL);1218}12191220struct blame_list {1221struct blame_entry *ent;1222struct blame_entry split[3];1223};12241225/*1226 * Count the number of entries the target is suspected for,1227 * and prepare a list of entry and the best split.1228 */1229static struct blame_list *setup_blame_list(struct blame_entry *unblamed,1230int*num_ents_p)1231{1232struct blame_entry *e;1233int num_ents, i;1234struct blame_list *blame_list = NULL;12351236for(e = unblamed, num_ents =0; e; e = e->next)1237 num_ents++;1238if(num_ents) {1239 blame_list =xcalloc(num_ents,sizeof(struct blame_list));1240for(e = unblamed, i =0; e; e = e->next)1241 blame_list[i++].ent = e;1242}1243*num_ents_p = num_ents;1244return blame_list;1245}12461247/*1248 * For lines target is suspected for, see if we can find code movement1249 * across file boundary from the parent commit. porigin is the path1250 * in the parent we already tried.1251 */1252static voidfind_copy_in_parent(struct blame_scoreboard *sb,1253struct blame_entry ***blamed,1254struct blame_entry **toosmall,1255struct blame_origin *target,1256struct commit *parent,1257struct blame_origin *porigin,1258int opt)1259{1260struct diff_options diff_opts;1261int i, j;1262struct blame_list *blame_list;1263int num_ents;1264struct blame_entry *unblamed = target->suspects;1265struct blame_entry *leftover = NULL;12661267if(!unblamed)1268return;/* nothing remains for this target */12691270repo_diff_setup(sb->repo, &diff_opts);1271 diff_opts.flags.recursive =1;1272 diff_opts.output_format = DIFF_FORMAT_NO_OUTPUT;12731274diff_setup_done(&diff_opts);12751276/* Try "find copies harder" on new path if requested;1277 * we do not want to use diffcore_rename() actually to1278 * match things up; find_copies_harder is set only to1279 * force diff_tree_oid() to feed all filepairs to diff_queue,1280 * and this code needs to be after diff_setup_done(), which1281 * usually makes find-copies-harder imply copy detection.1282 */1283if((opt & PICKAXE_BLAME_COPY_HARDEST)1284|| ((opt & PICKAXE_BLAME_COPY_HARDER)1285&& (!porigin ||strcmp(target->path, porigin->path))))1286 diff_opts.flags.find_copies_harder =1;12871288if(is_null_oid(&target->commit->object.oid))1289do_diff_cache(get_commit_tree_oid(parent), &diff_opts);1290else1291diff_tree_oid(get_commit_tree_oid(parent),1292get_commit_tree_oid(target->commit),1293"", &diff_opts);12941295if(!diff_opts.flags.find_copies_harder)1296diffcore_std(&diff_opts);12971298do{1299struct blame_entry **unblamedtail = &unblamed;1300 blame_list =setup_blame_list(unblamed, &num_ents);13011302for(i =0; i < diff_queued_diff.nr; i++) {1303struct diff_filepair *p = diff_queued_diff.queue[i];1304struct blame_origin *norigin;1305 mmfile_t file_p;1306struct blame_entry potential[3];13071308if(!DIFF_FILE_VALID(p->one))1309continue;/* does not exist in parent */1310if(S_ISGITLINK(p->one->mode))1311continue;/* ignore git links */1312if(porigin && !strcmp(p->one->path, porigin->path))1313/* find_move already dealt with this path */1314continue;13151316 norigin =get_origin(parent, p->one->path);1317oidcpy(&norigin->blob_oid, &p->one->oid);1318 norigin->mode = p->one->mode;1319fill_origin_blob(&sb->revs->diffopt, norigin, &file_p, &sb->num_read_blob);1320if(!file_p.ptr)1321continue;13221323for(j =0; j < num_ents; j++) {1324find_copy_in_blob(sb, blame_list[j].ent,1325 norigin, potential, &file_p);1326copy_split_if_better(sb, blame_list[j].split,1327 potential);1328decref_split(potential);1329}1330blame_origin_decref(norigin);1331}13321333for(j =0; j < num_ents; j++) {1334struct blame_entry *split = blame_list[j].split;1335if(split[1].suspect &&1336 sb->copy_score <blame_entry_score(sb, &split[1])) {1337split_blame(blamed, &unblamedtail, split,1338 blame_list[j].ent);1339}else{1340 blame_list[j].ent->next = leftover;1341 leftover = blame_list[j].ent;1342}1343decref_split(split);1344}1345free(blame_list);1346*unblamedtail = NULL;1347 toosmall =filter_small(sb, toosmall, &unblamed, sb->copy_score);1348}while(unblamed);1349 target->suspects =reverse_blame(leftover, NULL);1350diff_flush(&diff_opts);1351clear_pathspec(&diff_opts.pathspec);1352}13531354/*1355 * The blobs of origin and porigin exactly match, so everything1356 * origin is suspected for can be blamed on the parent.1357 */1358static voidpass_whole_blame(struct blame_scoreboard *sb,1359struct blame_origin *origin,struct blame_origin *porigin)1360{1361struct blame_entry *e, *suspects;13621363if(!porigin->file.ptr && origin->file.ptr) {1364/* Steal its file */1365 porigin->file = origin->file;1366 origin->file.ptr = NULL;1367}1368 suspects = origin->suspects;1369 origin->suspects = NULL;1370for(e = suspects; e; e = e->next) {1371blame_origin_incref(porigin);1372blame_origin_decref(e->suspect);1373 e->suspect = porigin;1374}1375queue_blames(sb, porigin, suspects);1376}13771378/*1379 * We pass blame from the current commit to its parents. We keep saying1380 * "parent" (and "porigin"), but what we mean is to find scapegoat to1381 * exonerate ourselves.1382 */1383static struct commit_list *first_scapegoat(struct rev_info *revs,struct commit *commit,1384int reverse)1385{1386if(!reverse) {1387if(revs->first_parent_only &&1388 commit->parents &&1389 commit->parents->next) {1390free_commit_list(commit->parents->next);1391 commit->parents->next = NULL;1392}1393return commit->parents;1394}1395returnlookup_decoration(&revs->children, &commit->object);1396}13971398static intnum_scapegoats(struct rev_info *revs,struct commit *commit,int reverse)1399{1400struct commit_list *l =first_scapegoat(revs, commit, reverse);1401returncommit_list_count(l);1402}14031404/* Distribute collected unsorted blames to the respected sorted lists1405 * in the various origins.1406 */1407static voiddistribute_blame(struct blame_scoreboard *sb,struct blame_entry *blamed)1408{1409 blamed =llist_mergesort(blamed, get_next_blame, set_next_blame,1410 compare_blame_suspect);1411while(blamed)1412{1413struct blame_origin *porigin = blamed->suspect;1414struct blame_entry *suspects = NULL;1415do{1416struct blame_entry *next = blamed->next;1417 blamed->next = suspects;1418 suspects = blamed;1419 blamed = next;1420}while(blamed && blamed->suspect == porigin);1421 suspects =reverse_blame(suspects, NULL);1422queue_blames(sb, porigin, suspects);1423}1424}14251426#define MAXSG 1614271428static voidpass_blame(struct blame_scoreboard *sb,struct blame_origin *origin,int opt)1429{1430struct rev_info *revs = sb->revs;1431int i, pass, num_sg;1432struct commit *commit = origin->commit;1433struct commit_list *sg;1434struct blame_origin *sg_buf[MAXSG];1435struct blame_origin *porigin, **sg_origin = sg_buf;1436struct blame_entry *toosmall = NULL;1437struct blame_entry *blames, **blametail = &blames;14381439 num_sg =num_scapegoats(revs, commit, sb->reverse);1440if(!num_sg)1441goto finish;1442else if(num_sg <ARRAY_SIZE(sg_buf))1443memset(sg_buf,0,sizeof(sg_buf));1444else1445 sg_origin =xcalloc(num_sg,sizeof(*sg_origin));14461447/*1448 * The first pass looks for unrenamed path to optimize for1449 * common cases, then we look for renames in the second pass.1450 */1451for(pass =0; pass <2- sb->no_whole_file_rename; pass++) {1452struct blame_origin *(*find)(struct repository *,struct commit *,struct blame_origin *);1453 find = pass ? find_rename : find_origin;14541455for(i =0, sg =first_scapegoat(revs, commit, sb->reverse);1456 i < num_sg && sg;1457 sg = sg->next, i++) {1458struct commit *p = sg->item;1459int j, same;14601461if(sg_origin[i])1462continue;1463if(parse_commit(p))1464continue;1465 porigin =find(sb->repo, p, origin);1466if(!porigin)1467continue;1468if(oideq(&porigin->blob_oid, &origin->blob_oid)) {1469pass_whole_blame(sb, origin, porigin);1470blame_origin_decref(porigin);1471goto finish;1472}1473for(j = same =0; j < i; j++)1474if(sg_origin[j] &&1475oideq(&sg_origin[j]->blob_oid, &porigin->blob_oid)) {1476 same =1;1477break;1478}1479if(!same)1480 sg_origin[i] = porigin;1481else1482blame_origin_decref(porigin);1483}1484}14851486 sb->num_commits++;1487for(i =0, sg =first_scapegoat(revs, commit, sb->reverse);1488 i < num_sg && sg;1489 sg = sg->next, i++) {1490struct blame_origin *porigin = sg_origin[i];1491if(!porigin)1492continue;1493if(!origin->previous) {1494blame_origin_incref(porigin);1495 origin->previous = porigin;1496}1497pass_blame_to_parent(sb, origin, porigin);1498if(!origin->suspects)1499goto finish;1500}15011502/*1503 * Optionally find moves in parents' files.1504 */1505if(opt & PICKAXE_BLAME_MOVE) {1506filter_small(sb, &toosmall, &origin->suspects, sb->move_score);1507if(origin->suspects) {1508for(i =0, sg =first_scapegoat(revs, commit, sb->reverse);1509 i < num_sg && sg;1510 sg = sg->next, i++) {1511struct blame_origin *porigin = sg_origin[i];1512if(!porigin)1513continue;1514find_move_in_parent(sb, &blametail, &toosmall, origin, porigin);1515if(!origin->suspects)1516break;1517}1518}1519}15201521/*1522 * Optionally find copies from parents' files.1523 */1524if(opt & PICKAXE_BLAME_COPY) {1525if(sb->copy_score > sb->move_score)1526filter_small(sb, &toosmall, &origin->suspects, sb->copy_score);1527else if(sb->copy_score < sb->move_score) {1528 origin->suspects =blame_merge(origin->suspects, toosmall);1529 toosmall = NULL;1530filter_small(sb, &toosmall, &origin->suspects, sb->copy_score);1531}1532if(!origin->suspects)1533goto finish;15341535for(i =0, sg =first_scapegoat(revs, commit, sb->reverse);1536 i < num_sg && sg;1537 sg = sg->next, i++) {1538struct blame_origin *porigin = sg_origin[i];1539find_copy_in_parent(sb, &blametail, &toosmall,1540 origin, sg->item, porigin, opt);1541if(!origin->suspects)1542goto finish;1543}1544}15451546finish:1547*blametail = NULL;1548distribute_blame(sb, blames);1549/*1550 * prepend toosmall to origin->suspects1551 *1552 * There is no point in sorting: this ends up on a big1553 * unsorted list in the caller anyway.1554 */1555if(toosmall) {1556struct blame_entry **tail = &toosmall;1557while(*tail)1558 tail = &(*tail)->next;1559*tail = origin->suspects;1560 origin->suspects = toosmall;1561}1562for(i =0; i < num_sg; i++) {1563if(sg_origin[i]) {1564drop_origin_blob(sg_origin[i]);1565blame_origin_decref(sg_origin[i]);1566}1567}1568drop_origin_blob(origin);1569if(sg_buf != sg_origin)1570free(sg_origin);1571}15721573/*1574 * The main loop -- while we have blobs with lines whose true origin1575 * is still unknown, pick one blob, and allow its lines to pass blames1576 * to its parents. */1577voidassign_blame(struct blame_scoreboard *sb,int opt)1578{1579struct rev_info *revs = sb->revs;1580struct commit *commit =prio_queue_get(&sb->commits);15811582while(commit) {1583struct blame_entry *ent;1584struct blame_origin *suspect =get_blame_suspects(commit);15851586/* find one suspect to break down */1587while(suspect && !suspect->suspects)1588 suspect = suspect->next;15891590if(!suspect) {1591 commit =prio_queue_get(&sb->commits);1592continue;1593}15941595assert(commit == suspect->commit);15961597/*1598 * We will use this suspect later in the loop,1599 * so hold onto it in the meantime.1600 */1601blame_origin_incref(suspect);1602parse_commit(commit);1603if(sb->reverse ||1604(!(commit->object.flags & UNINTERESTING) &&1605!(revs->max_age != -1&& commit->date < revs->max_age)))1606pass_blame(sb, suspect, opt);1607else{1608 commit->object.flags |= UNINTERESTING;1609if(commit->object.parsed)1610mark_parents_uninteresting(commit);1611}1612/* treat root commit as boundary */1613if(!commit->parents && !sb->show_root)1614 commit->object.flags |= UNINTERESTING;16151616/* Take responsibility for the remaining entries */1617 ent = suspect->suspects;1618if(ent) {1619 suspect->guilty =1;1620for(;;) {1621struct blame_entry *next = ent->next;1622if(sb->found_guilty_entry)1623 sb->found_guilty_entry(ent, sb->found_guilty_entry_data);1624if(next) {1625 ent = next;1626continue;1627}1628 ent->next = sb->ent;1629 sb->ent = suspect->suspects;1630 suspect->suspects = NULL;1631break;1632}1633}1634blame_origin_decref(suspect);16351636if(sb->debug)/* sanity */1637sanity_check_refcnt(sb);1638}1639}16401641static const char*get_next_line(const char*start,const char*end)1642{1643const char*nl =memchr(start,'\n', end - start);1644return nl ? nl +1: end;1645}16461647/*1648 * To allow quick access to the contents of nth line in the1649 * final image, prepare an index in the scoreboard.1650 */1651static intprepare_lines(struct blame_scoreboard *sb)1652{1653const char*buf = sb->final_buf;1654unsigned long len = sb->final_buf_size;1655const char*end = buf + len;1656const char*p;1657int*lineno;1658int num =0;16591660for(p = buf; p < end; p =get_next_line(p, end))1661 num++;16621663ALLOC_ARRAY(sb->lineno, num +1);1664 lineno = sb->lineno;16651666for(p = buf; p < end; p =get_next_line(p, end))1667*lineno++ = p - buf;16681669*lineno = len;16701671 sb->num_lines = num;1672return sb->num_lines;1673}16741675static struct commit *find_single_final(struct rev_info *revs,1676const char**name_p)1677{1678int i;1679struct commit *found = NULL;1680const char*name = NULL;16811682for(i =0; i < revs->pending.nr; i++) {1683struct object *obj = revs->pending.objects[i].item;1684if(obj->flags & UNINTERESTING)1685continue;1686 obj =deref_tag(revs->repo, obj, NULL,0);1687if(obj->type != OBJ_COMMIT)1688die("Non commit%s?", revs->pending.objects[i].name);1689if(found)1690die("More than one commit to dig from%sand%s?",1691 revs->pending.objects[i].name, name);1692 found = (struct commit *)obj;1693 name = revs->pending.objects[i].name;1694}1695if(name_p)1696*name_p =xstrdup_or_null(name);1697return found;1698}16991700static struct commit *dwim_reverse_initial(struct rev_info *revs,1701const char**name_p)1702{1703/*1704 * DWIM "git blame --reverse ONE -- PATH" as1705 * "git blame --reverse ONE..HEAD -- PATH" but only do so1706 * when it makes sense.1707 */1708struct object *obj;1709struct commit *head_commit;1710struct object_id head_oid;17111712if(revs->pending.nr !=1)1713return NULL;17141715/* Is that sole rev a committish? */1716 obj = revs->pending.objects[0].item;1717 obj =deref_tag(revs->repo, obj, NULL,0);1718if(obj->type != OBJ_COMMIT)1719return NULL;17201721/* Do we have HEAD? */1722if(!resolve_ref_unsafe("HEAD", RESOLVE_REF_READING, &head_oid, NULL))1723return NULL;1724 head_commit =lookup_commit_reference_gently(revs->repo,1725&head_oid,1);1726if(!head_commit)1727return NULL;17281729/* Turn "ONE" into "ONE..HEAD" then */1730 obj->flags |= UNINTERESTING;1731add_pending_object(revs, &head_commit->object,"HEAD");17321733if(name_p)1734*name_p = revs->pending.objects[0].name;1735return(struct commit *)obj;1736}17371738static struct commit *find_single_initial(struct rev_info *revs,1739const char**name_p)1740{1741int i;1742struct commit *found = NULL;1743const char*name = NULL;17441745/*1746 * There must be one and only one negative commit, and it must be1747 * the boundary.1748 */1749for(i =0; i < revs->pending.nr; i++) {1750struct object *obj = revs->pending.objects[i].item;1751if(!(obj->flags & UNINTERESTING))1752continue;1753 obj =deref_tag(revs->repo, obj, NULL,0);1754if(obj->type != OBJ_COMMIT)1755die("Non commit%s?", revs->pending.objects[i].name);1756if(found)1757die("More than one commit to dig up from,%sand%s?",1758 revs->pending.objects[i].name, name);1759 found = (struct commit *) obj;1760 name = revs->pending.objects[i].name;1761}17621763if(!name)1764 found =dwim_reverse_initial(revs, &name);1765if(!name)1766die("No commit to dig up from?");17671768if(name_p)1769*name_p =xstrdup(name);1770return found;1771}17721773voidinit_scoreboard(struct blame_scoreboard *sb)1774{1775memset(sb,0,sizeof(struct blame_scoreboard));1776 sb->move_score = BLAME_DEFAULT_MOVE_SCORE;1777 sb->copy_score = BLAME_DEFAULT_COPY_SCORE;1778}17791780voidsetup_scoreboard(struct blame_scoreboard *sb,1781const char*path,1782struct blame_origin **orig)1783{1784const char*final_commit_name = NULL;1785struct blame_origin *o;1786struct commit *final_commit = NULL;1787enum object_type type;17881789init_blame_suspects(&blame_suspects);17901791if(sb->reverse && sb->contents_from)1792die(_("--contents and --reverse do not blend well."));17931794if(!sb->repo)1795BUG("repo is NULL");17961797if(!sb->reverse) {1798 sb->final =find_single_final(sb->revs, &final_commit_name);1799 sb->commits.compare = compare_commits_by_commit_date;1800}else{1801 sb->final =find_single_initial(sb->revs, &final_commit_name);1802 sb->commits.compare = compare_commits_by_reverse_commit_date;1803}18041805if(sb->final && sb->contents_from)1806die(_("cannot use --contents with final commit object name"));18071808if(sb->reverse && sb->revs->first_parent_only)1809 sb->revs->children.name = NULL;18101811if(!sb->final) {1812/*1813 * "--not A B -- path" without anything positive;1814 * do not default to HEAD, but use the working tree1815 * or "--contents".1816 */1817setup_work_tree();1818 sb->final =fake_working_tree_commit(sb->repo,1819&sb->revs->diffopt,1820 path, sb->contents_from);1821add_pending_object(sb->revs, &(sb->final->object),":");1822}18231824if(sb->reverse && sb->revs->first_parent_only) {1825 final_commit =find_single_final(sb->revs, NULL);1826if(!final_commit)1827die(_("--reverse and --first-parent together require specified latest commit"));1828}18291830/*1831 * If we have bottom, this will mark the ancestors of the1832 * bottom commits we would reach while traversing as1833 * uninteresting.1834 */1835if(prepare_revision_walk(sb->revs))1836die(_("revision walk setup failed"));18371838if(sb->reverse && sb->revs->first_parent_only) {1839struct commit *c = final_commit;18401841 sb->revs->children.name ="children";1842while(c->parents &&1843!oideq(&c->object.oid, &sb->final->object.oid)) {1844struct commit_list *l =xcalloc(1,sizeof(*l));18451846 l->item = c;1847if(add_decoration(&sb->revs->children,1848&c->parents->item->object, l))1849BUG("not unique item in first-parent chain");1850 c = c->parents->item;1851}18521853if(!oideq(&c->object.oid, &sb->final->object.oid))1854die(_("--reverse --first-parent together require range along first-parent chain"));1855}18561857if(is_null_oid(&sb->final->object.oid)) {1858 o =get_blame_suspects(sb->final);1859 sb->final_buf =xmemdupz(o->file.ptr, o->file.size);1860 sb->final_buf_size = o->file.size;1861}1862else{1863 o =get_origin(sb->final, path);1864if(fill_blob_sha1_and_mode(sb->repo, o))1865die(_("no such path%sin%s"), path, final_commit_name);18661867if(sb->revs->diffopt.flags.allow_textconv &&1868textconv_object(sb->repo, path, o->mode, &o->blob_oid,1, (char**) &sb->final_buf,1869&sb->final_buf_size))1870;1871else1872 sb->final_buf =read_object_file(&o->blob_oid, &type,1873&sb->final_buf_size);18741875if(!sb->final_buf)1876die(_("cannot read blob%sfor path%s"),1877oid_to_hex(&o->blob_oid),1878 path);1879}1880 sb->num_read_blob++;1881prepare_lines(sb);18821883if(orig)1884*orig = o;18851886free((char*)final_commit_name);1887}1888188918901891struct blame_entry *blame_entry_prepend(struct blame_entry *head,1892long start,long end,1893struct blame_origin *o)1894{1895struct blame_entry *new_head =xcalloc(1,sizeof(struct blame_entry));1896 new_head->lno = start;1897 new_head->num_lines = end - start;1898 new_head->suspect = o;1899 new_head->s_lno = start;1900 new_head->next = head;1901blame_origin_incref(o);1902return new_head;1903}