1#include"cache.h" 2#include"refs.h" 3#include"object-store.h" 4#include"cache-tree.h" 5#include"mergesort.h" 6#include"diff.h" 7#include"diffcore.h" 8#include"tag.h" 9#include"blame.h" 10#include"alloc.h" 11#include"commit-slab.h" 12 13define_commit_slab(blame_suspects,struct blame_origin *); 14static struct blame_suspects blame_suspects; 15 16struct blame_origin *get_blame_suspects(struct commit *commit) 17{ 18struct blame_origin **result; 19 20 result =blame_suspects_peek(&blame_suspects, commit); 21 22return result ? *result : NULL; 23} 24 25static voidset_blame_suspects(struct commit *commit,struct blame_origin *origin) 26{ 27*blame_suspects_at(&blame_suspects, commit) = origin; 28} 29 30voidblame_origin_decref(struct blame_origin *o) 31{ 32if(o && --o->refcnt <=0) { 33struct blame_origin *p, *l = NULL; 34if(o->previous) 35blame_origin_decref(o->previous); 36free(o->file.ptr); 37/* Should be present exactly once in commit chain */ 38for(p =get_blame_suspects(o->commit); p; l = p, p = p->next) { 39if(p == o) { 40if(l) 41 l->next = p->next; 42else 43set_blame_suspects(o->commit, p->next); 44free(o); 45return; 46} 47} 48die("internal error in blame_origin_decref"); 49} 50} 51 52/* 53 * Given a commit and a path in it, create a new origin structure. 54 * The callers that add blame to the scoreboard should use 55 * get_origin() to obtain shared, refcounted copy instead of calling 56 * this function directly. 57 */ 58static struct blame_origin *make_origin(struct commit *commit,const char*path) 59{ 60struct blame_origin *o; 61FLEX_ALLOC_STR(o, path, path); 62 o->commit = commit; 63 o->refcnt =1; 64 o->next =get_blame_suspects(commit); 65set_blame_suspects(commit, o); 66return o; 67} 68 69/* 70 * Locate an existing origin or create a new one. 71 * This moves the origin to front position in the commit util list. 72 */ 73static struct blame_origin *get_origin(struct commit *commit,const char*path) 74{ 75struct blame_origin *o, *l; 76 77for(o =get_blame_suspects(commit), l = NULL; o; l = o, o = o->next) { 78if(!strcmp(o->path, path)) { 79/* bump to front */ 80if(l) { 81 l->next = o->next; 82 o->next =get_blame_suspects(commit); 83set_blame_suspects(commit, o); 84} 85returnblame_origin_incref(o); 86} 87} 88returnmake_origin(commit, path); 89} 90 91 92 93static voidverify_working_tree_path(struct repository *r, 94struct commit *work_tree,const char*path) 95{ 96struct commit_list *parents; 97int pos; 98 99for(parents = work_tree->parents; parents; parents = parents->next) { 100const struct object_id *commit_oid = &parents->item->object.oid; 101struct object_id blob_oid; 102unsigned mode; 103 104if(!get_tree_entry(commit_oid, path, &blob_oid, &mode) && 105oid_object_info(r, &blob_oid, NULL) == OBJ_BLOB) 106return; 107} 108 109 pos =index_name_pos(r->index, path,strlen(path)); 110if(pos >=0) 111;/* path is in the index */ 112else if(-1- pos < r->index->cache_nr && 113!strcmp(r->index->cache[-1- pos]->name, path)) 114;/* path is in the index, unmerged */ 115else 116die("no such path '%s' in HEAD", path); 117} 118 119static struct commit_list **append_parent(struct commit_list **tail,const struct object_id *oid) 120{ 121struct commit *parent; 122 123 parent =lookup_commit_reference(the_repository, oid); 124if(!parent) 125die("no such commit%s",oid_to_hex(oid)); 126return&commit_list_insert(parent, tail)->next; 127} 128 129static voidappend_merge_parents(struct commit_list **tail) 130{ 131int merge_head; 132struct strbuf line = STRBUF_INIT; 133 134 merge_head =open(git_path_merge_head(the_repository), O_RDONLY); 135if(merge_head <0) { 136if(errno == ENOENT) 137return; 138die("cannot open '%s' for reading", 139git_path_merge_head(the_repository)); 140} 141 142while(!strbuf_getwholeline_fd(&line, merge_head,'\n')) { 143struct object_id oid; 144if(line.len < GIT_SHA1_HEXSZ ||get_oid_hex(line.buf, &oid)) 145die("unknown line in '%s':%s", 146git_path_merge_head(the_repository), line.buf); 147 tail =append_parent(tail, &oid); 148} 149close(merge_head); 150strbuf_release(&line); 151} 152 153/* 154 * This isn't as simple as passing sb->buf and sb->len, because we 155 * want to transfer ownership of the buffer to the commit (so we 156 * must use detach). 157 */ 158static voidset_commit_buffer_from_strbuf(struct commit *c,struct strbuf *sb) 159{ 160size_t len; 161void*buf =strbuf_detach(sb, &len); 162set_commit_buffer(the_repository, c, buf, len); 163} 164 165/* 166 * Prepare a dummy commit that represents the work tree (or staged) item. 167 * Note that annotating work tree item never works in the reverse. 168 */ 169static struct commit *fake_working_tree_commit(struct repository *r, 170struct diff_options *opt, 171const char*path, 172const char*contents_from) 173{ 174struct commit *commit; 175struct blame_origin *origin; 176struct commit_list **parent_tail, *parent; 177struct object_id head_oid; 178struct strbuf buf = STRBUF_INIT; 179const char*ident; 180time_t now; 181int len; 182struct cache_entry *ce; 183unsigned mode; 184struct strbuf msg = STRBUF_INIT; 185 186read_index(r->index); 187time(&now); 188 commit =alloc_commit_node(the_repository); 189 commit->object.parsed =1; 190 commit->date = now; 191 parent_tail = &commit->parents; 192 193if(!resolve_ref_unsafe("HEAD", RESOLVE_REF_READING, &head_oid, NULL)) 194die("no such ref: HEAD"); 195 196 parent_tail =append_parent(parent_tail, &head_oid); 197append_merge_parents(parent_tail); 198verify_working_tree_path(r, commit, path); 199 200 origin =make_origin(commit, path); 201 202 ident =fmt_ident("Not Committed Yet","not.committed.yet", NULL,0); 203strbuf_addstr(&msg,"tree 0000000000000000000000000000000000000000\n"); 204for(parent = commit->parents; parent; parent = parent->next) 205strbuf_addf(&msg,"parent%s\n", 206oid_to_hex(&parent->item->object.oid)); 207strbuf_addf(&msg, 208"author%s\n" 209"committer%s\n\n" 210"Version of%sfrom%s\n", 211 ident, ident, path, 212(!contents_from ? path : 213(!strcmp(contents_from,"-") ?"standard input": contents_from))); 214set_commit_buffer_from_strbuf(commit, &msg); 215 216if(!contents_from ||strcmp("-", contents_from)) { 217struct stat st; 218const char*read_from; 219char*buf_ptr; 220unsigned long buf_len; 221 222if(contents_from) { 223if(stat(contents_from, &st) <0) 224die_errno("Cannot stat '%s'", contents_from); 225 read_from = contents_from; 226} 227else{ 228if(lstat(path, &st) <0) 229die_errno("Cannot lstat '%s'", path); 230 read_from = path; 231} 232 mode =canon_mode(st.st_mode); 233 234switch(st.st_mode & S_IFMT) { 235case S_IFREG: 236if(opt->flags.allow_textconv && 237textconv_object(r, read_from, mode, &null_oid,0, &buf_ptr, &buf_len)) 238strbuf_attach(&buf, buf_ptr, buf_len, buf_len +1); 239else if(strbuf_read_file(&buf, read_from, st.st_size) != st.st_size) 240die_errno("cannot open or read '%s'", read_from); 241break; 242case S_IFLNK: 243if(strbuf_readlink(&buf, read_from, st.st_size) <0) 244die_errno("cannot readlink '%s'", read_from); 245break; 246default: 247die("unsupported file type%s", read_from); 248} 249} 250else{ 251/* Reading from stdin */ 252 mode =0; 253if(strbuf_read(&buf,0,0) <0) 254die_errno("failed to read from stdin"); 255} 256convert_to_git(r->index, path, buf.buf, buf.len, &buf,0); 257 origin->file.ptr = buf.buf; 258 origin->file.size = buf.len; 259pretend_object_file(buf.buf, buf.len, OBJ_BLOB, &origin->blob_oid); 260 261/* 262 * Read the current index, replace the path entry with 263 * origin->blob_sha1 without mucking with its mode or type 264 * bits; we are not going to write this index out -- we just 265 * want to run "diff-index --cached". 266 */ 267discard_index(r->index); 268read_index(r->index); 269 270 len =strlen(path); 271if(!mode) { 272int pos =index_name_pos(r->index, path, len); 273if(0<= pos) 274 mode = r->index->cache[pos]->ce_mode; 275else 276/* Let's not bother reading from HEAD tree */ 277 mode = S_IFREG |0644; 278} 279 ce =make_empty_cache_entry(r->index, len); 280oidcpy(&ce->oid, &origin->blob_oid); 281memcpy(ce->name, path, len); 282 ce->ce_flags =create_ce_flags(0); 283 ce->ce_namelen = len; 284 ce->ce_mode =create_ce_mode(mode); 285add_index_entry(r->index, ce, 286 ADD_CACHE_OK_TO_ADD | ADD_CACHE_OK_TO_REPLACE); 287 288cache_tree_invalidate_path(r->index, path); 289 290return commit; 291} 292 293 294 295static intdiff_hunks(mmfile_t *file_a, mmfile_t *file_b, 296 xdl_emit_hunk_consume_func_t hunk_func,void*cb_data,int xdl_opts) 297{ 298 xpparam_t xpp = {0}; 299 xdemitconf_t xecfg = {0}; 300 xdemitcb_t ecb = {NULL}; 301 302 xpp.flags = xdl_opts; 303 xecfg.hunk_func = hunk_func; 304 ecb.priv = cb_data; 305returnxdi_diff(file_a, file_b, &xpp, &xecfg, &ecb); 306} 307 308/* 309 * Given an origin, prepare mmfile_t structure to be used by the 310 * diff machinery 311 */ 312static voidfill_origin_blob(struct diff_options *opt, 313struct blame_origin *o, mmfile_t *file,int*num_read_blob) 314{ 315if(!o->file.ptr) { 316enum object_type type; 317unsigned long file_size; 318 319(*num_read_blob)++; 320if(opt->flags.allow_textconv && 321textconv_object(opt->repo, o->path, o->mode, 322&o->blob_oid,1, &file->ptr, &file_size)) 323; 324else 325 file->ptr =read_object_file(&o->blob_oid, &type, 326&file_size); 327 file->size = file_size; 328 329if(!file->ptr) 330die("Cannot read blob%sfor path%s", 331oid_to_hex(&o->blob_oid), 332 o->path); 333 o->file = *file; 334} 335else 336*file = o->file; 337} 338 339static voiddrop_origin_blob(struct blame_origin *o) 340{ 341FREE_AND_NULL(o->file.ptr); 342} 343 344/* 345 * Any merge of blames happens on lists of blames that arrived via 346 * different parents in a single suspect. In this case, we want to 347 * sort according to the suspect line numbers as opposed to the final 348 * image line numbers. The function body is somewhat longish because 349 * it avoids unnecessary writes. 350 */ 351 352static struct blame_entry *blame_merge(struct blame_entry *list1, 353struct blame_entry *list2) 354{ 355struct blame_entry *p1 = list1, *p2 = list2, 356**tail = &list1; 357 358if(!p1) 359return p2; 360if(!p2) 361return p1; 362 363if(p1->s_lno <= p2->s_lno) { 364do{ 365 tail = &p1->next; 366if((p1 = *tail) == NULL) { 367*tail = p2; 368return list1; 369} 370}while(p1->s_lno <= p2->s_lno); 371} 372for(;;) { 373*tail = p2; 374do{ 375 tail = &p2->next; 376if((p2 = *tail) == NULL) { 377*tail = p1; 378return list1; 379} 380}while(p1->s_lno > p2->s_lno); 381*tail = p1; 382do{ 383 tail = &p1->next; 384if((p1 = *tail) == NULL) { 385*tail = p2; 386return list1; 387} 388}while(p1->s_lno <= p2->s_lno); 389} 390} 391 392static void*get_next_blame(const void*p) 393{ 394return((struct blame_entry *)p)->next; 395} 396 397static voidset_next_blame(void*p1,void*p2) 398{ 399((struct blame_entry *)p1)->next = p2; 400} 401 402/* 403 * Final image line numbers are all different, so we don't need a 404 * three-way comparison here. 405 */ 406 407static intcompare_blame_final(const void*p1,const void*p2) 408{ 409return((struct blame_entry *)p1)->lno > ((struct blame_entry *)p2)->lno 410?1: -1; 411} 412 413static intcompare_blame_suspect(const void*p1,const void*p2) 414{ 415const struct blame_entry *s1 = p1, *s2 = p2; 416/* 417 * to allow for collating suspects, we sort according to the 418 * respective pointer value as the primary sorting criterion. 419 * The actual relation is pretty unimportant as long as it 420 * establishes a total order. Comparing as integers gives us 421 * that. 422 */ 423if(s1->suspect != s2->suspect) 424return(intptr_t)s1->suspect > (intptr_t)s2->suspect ?1: -1; 425if(s1->s_lno == s2->s_lno) 426return0; 427return s1->s_lno > s2->s_lno ?1: -1; 428} 429 430voidblame_sort_final(struct blame_scoreboard *sb) 431{ 432 sb->ent =llist_mergesort(sb->ent, get_next_blame, set_next_blame, 433 compare_blame_final); 434} 435 436static intcompare_commits_by_reverse_commit_date(const void*a, 437const void*b, 438void*c) 439{ 440return-compare_commits_by_commit_date(a, b, c); 441} 442 443/* 444 * For debugging -- origin is refcounted, and this asserts that 445 * we do not underflow. 446 */ 447static voidsanity_check_refcnt(struct blame_scoreboard *sb) 448{ 449int baa =0; 450struct blame_entry *ent; 451 452for(ent = sb->ent; ent; ent = ent->next) { 453/* Nobody should have zero or negative refcnt */ 454if(ent->suspect->refcnt <=0) { 455fprintf(stderr,"%sin%shas negative refcnt%d\n", 456 ent->suspect->path, 457oid_to_hex(&ent->suspect->commit->object.oid), 458 ent->suspect->refcnt); 459 baa =1; 460} 461} 462if(baa) 463 sb->on_sanity_fail(sb, baa); 464} 465 466/* 467 * If two blame entries that are next to each other came from 468 * contiguous lines in the same origin (i.e. <commit, path> pair), 469 * merge them together. 470 */ 471voidblame_coalesce(struct blame_scoreboard *sb) 472{ 473struct blame_entry *ent, *next; 474 475for(ent = sb->ent; ent && (next = ent->next); ent = next) { 476if(ent->suspect == next->suspect && 477 ent->s_lno + ent->num_lines == next->s_lno) { 478 ent->num_lines += next->num_lines; 479 ent->next = next->next; 480blame_origin_decref(next->suspect); 481free(next); 482 ent->score =0; 483 next = ent;/* again */ 484} 485} 486 487if(sb->debug)/* sanity */ 488sanity_check_refcnt(sb); 489} 490 491/* 492 * Merge the given sorted list of blames into a preexisting origin. 493 * If there were no previous blames to that commit, it is entered into 494 * the commit priority queue of the score board. 495 */ 496 497static voidqueue_blames(struct blame_scoreboard *sb,struct blame_origin *porigin, 498struct blame_entry *sorted) 499{ 500if(porigin->suspects) 501 porigin->suspects =blame_merge(porigin->suspects, sorted); 502else{ 503struct blame_origin *o; 504for(o =get_blame_suspects(porigin->commit); o; o = o->next) { 505if(o->suspects) { 506 porigin->suspects = sorted; 507return; 508} 509} 510 porigin->suspects = sorted; 511prio_queue_put(&sb->commits, porigin->commit); 512} 513} 514 515/* 516 * Fill the blob_sha1 field of an origin if it hasn't, so that later 517 * call to fill_origin_blob() can use it to locate the data. blob_sha1 518 * for an origin is also used to pass the blame for the entire file to 519 * the parent to detect the case where a child's blob is identical to 520 * that of its parent's. 521 * 522 * This also fills origin->mode for corresponding tree path. 523 */ 524static intfill_blob_sha1_and_mode(struct repository *r, 525struct blame_origin *origin) 526{ 527if(!is_null_oid(&origin->blob_oid)) 528return0; 529if(get_tree_entry(&origin->commit->object.oid, origin->path, &origin->blob_oid, &origin->mode)) 530goto error_out; 531if(oid_object_info(r, &origin->blob_oid, NULL) != OBJ_BLOB) 532goto error_out; 533return0; 534 error_out: 535oidclr(&origin->blob_oid); 536 origin->mode = S_IFINVALID; 537return-1; 538} 539 540/* 541 * We have an origin -- check if the same path exists in the 542 * parent and return an origin structure to represent it. 543 */ 544static struct blame_origin *find_origin(struct repository *r, 545struct commit *parent, 546struct blame_origin *origin) 547{ 548struct blame_origin *porigin; 549struct diff_options diff_opts; 550const char*paths[2]; 551 552/* First check any existing origins */ 553for(porigin =get_blame_suspects(parent); porigin; porigin = porigin->next) 554if(!strcmp(porigin->path, origin->path)) { 555/* 556 * The same path between origin and its parent 557 * without renaming -- the most common case. 558 */ 559returnblame_origin_incref(porigin); 560} 561 562/* See if the origin->path is different between parent 563 * and origin first. Most of the time they are the 564 * same and diff-tree is fairly efficient about this. 565 */ 566repo_diff_setup(r, &diff_opts); 567 diff_opts.flags.recursive =1; 568 diff_opts.detect_rename =0; 569 diff_opts.output_format = DIFF_FORMAT_NO_OUTPUT; 570 paths[0] = origin->path; 571 paths[1] = NULL; 572 573parse_pathspec(&diff_opts.pathspec, 574 PATHSPEC_ALL_MAGIC & ~PATHSPEC_LITERAL, 575 PATHSPEC_LITERAL_PATH,"", paths); 576diff_setup_done(&diff_opts); 577 578if(is_null_oid(&origin->commit->object.oid)) 579do_diff_cache(get_commit_tree_oid(parent), &diff_opts); 580else 581diff_tree_oid(get_commit_tree_oid(parent), 582get_commit_tree_oid(origin->commit), 583"", &diff_opts); 584diffcore_std(&diff_opts); 585 586if(!diff_queued_diff.nr) { 587/* The path is the same as parent */ 588 porigin =get_origin(parent, origin->path); 589oidcpy(&porigin->blob_oid, &origin->blob_oid); 590 porigin->mode = origin->mode; 591}else{ 592/* 593 * Since origin->path is a pathspec, if the parent 594 * commit had it as a directory, we will see a whole 595 * bunch of deletion of files in the directory that we 596 * do not care about. 597 */ 598int i; 599struct diff_filepair *p = NULL; 600for(i =0; i < diff_queued_diff.nr; i++) { 601const char*name; 602 p = diff_queued_diff.queue[i]; 603 name = p->one->path ? p->one->path : p->two->path; 604if(!strcmp(name, origin->path)) 605break; 606} 607if(!p) 608die("internal error in blame::find_origin"); 609switch(p->status) { 610default: 611die("internal error in blame::find_origin (%c)", 612 p->status); 613case'M': 614 porigin =get_origin(parent, origin->path); 615oidcpy(&porigin->blob_oid, &p->one->oid); 616 porigin->mode = p->one->mode; 617break; 618case'A': 619case'T': 620/* Did not exist in parent, or type changed */ 621break; 622} 623} 624diff_flush(&diff_opts); 625clear_pathspec(&diff_opts.pathspec); 626return porigin; 627} 628 629/* 630 * We have an origin -- find the path that corresponds to it in its 631 * parent and return an origin structure to represent it. 632 */ 633static struct blame_origin *find_rename(struct repository *r, 634struct commit *parent, 635struct blame_origin *origin) 636{ 637struct blame_origin *porigin = NULL; 638struct diff_options diff_opts; 639int i; 640 641repo_diff_setup(r, &diff_opts); 642 diff_opts.flags.recursive =1; 643 diff_opts.detect_rename = DIFF_DETECT_RENAME; 644 diff_opts.output_format = DIFF_FORMAT_NO_OUTPUT; 645 diff_opts.single_follow = origin->path; 646diff_setup_done(&diff_opts); 647 648if(is_null_oid(&origin->commit->object.oid)) 649do_diff_cache(get_commit_tree_oid(parent), &diff_opts); 650else 651diff_tree_oid(get_commit_tree_oid(parent), 652get_commit_tree_oid(origin->commit), 653"", &diff_opts); 654diffcore_std(&diff_opts); 655 656for(i =0; i < diff_queued_diff.nr; i++) { 657struct diff_filepair *p = diff_queued_diff.queue[i]; 658if((p->status =='R'|| p->status =='C') && 659!strcmp(p->two->path, origin->path)) { 660 porigin =get_origin(parent, p->one->path); 661oidcpy(&porigin->blob_oid, &p->one->oid); 662 porigin->mode = p->one->mode; 663break; 664} 665} 666diff_flush(&diff_opts); 667clear_pathspec(&diff_opts.pathspec); 668return porigin; 669} 670 671/* 672 * Append a new blame entry to a given output queue. 673 */ 674static voidadd_blame_entry(struct blame_entry ***queue, 675const struct blame_entry *src) 676{ 677struct blame_entry *e =xmalloc(sizeof(*e)); 678memcpy(e, src,sizeof(*e)); 679blame_origin_incref(e->suspect); 680 681 e->next = **queue; 682**queue = e; 683*queue = &e->next; 684} 685 686/* 687 * src typically is on-stack; we want to copy the information in it to 688 * a malloced blame_entry that gets added to the given queue. The 689 * origin of dst loses a refcnt. 690 */ 691static voiddup_entry(struct blame_entry ***queue, 692struct blame_entry *dst,struct blame_entry *src) 693{ 694blame_origin_incref(src->suspect); 695blame_origin_decref(dst->suspect); 696memcpy(dst, src,sizeof(*src)); 697 dst->next = **queue; 698**queue = dst; 699*queue = &dst->next; 700} 701 702const char*blame_nth_line(struct blame_scoreboard *sb,long lno) 703{ 704return sb->final_buf + sb->lineno[lno]; 705} 706 707/* 708 * It is known that lines between tlno to same came from parent, and e 709 * has an overlap with that range. it also is known that parent's 710 * line plno corresponds to e's line tlno. 711 * 712 * <---- e -----> 713 * <------> 714 * <------------> 715 * <------------> 716 * <------------------> 717 * 718 * Split e into potentially three parts; before this chunk, the chunk 719 * to be blamed for the parent, and after that portion. 720 */ 721static voidsplit_overlap(struct blame_entry *split, 722struct blame_entry *e, 723int tlno,int plno,int same, 724struct blame_origin *parent) 725{ 726int chunk_end_lno; 727memset(split,0,sizeof(struct blame_entry [3])); 728 729if(e->s_lno < tlno) { 730/* there is a pre-chunk part not blamed on parent */ 731 split[0].suspect =blame_origin_incref(e->suspect); 732 split[0].lno = e->lno; 733 split[0].s_lno = e->s_lno; 734 split[0].num_lines = tlno - e->s_lno; 735 split[1].lno = e->lno + tlno - e->s_lno; 736 split[1].s_lno = plno; 737} 738else{ 739 split[1].lno = e->lno; 740 split[1].s_lno = plno + (e->s_lno - tlno); 741} 742 743if(same < e->s_lno + e->num_lines) { 744/* there is a post-chunk part not blamed on parent */ 745 split[2].suspect =blame_origin_incref(e->suspect); 746 split[2].lno = e->lno + (same - e->s_lno); 747 split[2].s_lno = e->s_lno + (same - e->s_lno); 748 split[2].num_lines = e->s_lno + e->num_lines - same; 749 chunk_end_lno = split[2].lno; 750} 751else 752 chunk_end_lno = e->lno + e->num_lines; 753 split[1].num_lines = chunk_end_lno - split[1].lno; 754 755/* 756 * if it turns out there is nothing to blame the parent for, 757 * forget about the splitting. !split[1].suspect signals this. 758 */ 759if(split[1].num_lines <1) 760return; 761 split[1].suspect =blame_origin_incref(parent); 762} 763 764/* 765 * split_overlap() divided an existing blame e into up to three parts 766 * in split. Any assigned blame is moved to queue to 767 * reflect the split. 768 */ 769static voidsplit_blame(struct blame_entry ***blamed, 770struct blame_entry ***unblamed, 771struct blame_entry *split, 772struct blame_entry *e) 773{ 774if(split[0].suspect && split[2].suspect) { 775/* The first part (reuse storage for the existing entry e) */ 776dup_entry(unblamed, e, &split[0]); 777 778/* The last part -- me */ 779add_blame_entry(unblamed, &split[2]); 780 781/* ... and the middle part -- parent */ 782add_blame_entry(blamed, &split[1]); 783} 784else if(!split[0].suspect && !split[2].suspect) 785/* 786 * The parent covers the entire area; reuse storage for 787 * e and replace it with the parent. 788 */ 789dup_entry(blamed, e, &split[1]); 790else if(split[0].suspect) { 791/* me and then parent */ 792dup_entry(unblamed, e, &split[0]); 793add_blame_entry(blamed, &split[1]); 794} 795else{ 796/* parent and then me */ 797dup_entry(blamed, e, &split[1]); 798add_blame_entry(unblamed, &split[2]); 799} 800} 801 802/* 803 * After splitting the blame, the origins used by the 804 * on-stack blame_entry should lose one refcnt each. 805 */ 806static voiddecref_split(struct blame_entry *split) 807{ 808int i; 809 810for(i =0; i <3; i++) 811blame_origin_decref(split[i].suspect); 812} 813 814/* 815 * reverse_blame reverses the list given in head, appending tail. 816 * That allows us to build lists in reverse order, then reverse them 817 * afterwards. This can be faster than building the list in proper 818 * order right away. The reason is that building in proper order 819 * requires writing a link in the _previous_ element, while building 820 * in reverse order just requires placing the list head into the 821 * _current_ element. 822 */ 823 824static struct blame_entry *reverse_blame(struct blame_entry *head, 825struct blame_entry *tail) 826{ 827while(head) { 828struct blame_entry *next = head->next; 829 head->next = tail; 830 tail = head; 831 head = next; 832} 833return tail; 834} 835 836/* 837 * Process one hunk from the patch between the current suspect for 838 * blame_entry e and its parent. This first blames any unfinished 839 * entries before the chunk (which is where target and parent start 840 * differing) on the parent, and then splits blame entries at the 841 * start and at the end of the difference region. Since use of -M and 842 * -C options may lead to overlapping/duplicate source line number 843 * ranges, all we can rely on from sorting/merging is the order of the 844 * first suspect line number. 845 */ 846static voidblame_chunk(struct blame_entry ***dstq,struct blame_entry ***srcq, 847int tlno,int offset,int same, 848struct blame_origin *parent) 849{ 850struct blame_entry *e = **srcq; 851struct blame_entry *samep = NULL, *diffp = NULL; 852 853while(e && e->s_lno < tlno) { 854struct blame_entry *next = e->next; 855/* 856 * current record starts before differing portion. If 857 * it reaches into it, we need to split it up and 858 * examine the second part separately. 859 */ 860if(e->s_lno + e->num_lines > tlno) { 861/* Move second half to a new record */ 862int len = tlno - e->s_lno; 863struct blame_entry *n =xcalloc(1,sizeof(struct blame_entry)); 864 n->suspect = e->suspect; 865 n->lno = e->lno + len; 866 n->s_lno = e->s_lno + len; 867 n->num_lines = e->num_lines - len; 868 e->num_lines = len; 869 e->score =0; 870/* Push new record to diffp */ 871 n->next = diffp; 872 diffp = n; 873}else 874blame_origin_decref(e->suspect); 875/* Pass blame for everything before the differing 876 * chunk to the parent */ 877 e->suspect =blame_origin_incref(parent); 878 e->s_lno += offset; 879 e->next = samep; 880 samep = e; 881 e = next; 882} 883/* 884 * As we don't know how much of a common stretch after this 885 * diff will occur, the currently blamed parts are all that we 886 * can assign to the parent for now. 887 */ 888 889if(samep) { 890**dstq =reverse_blame(samep, **dstq); 891*dstq = &samep->next; 892} 893/* 894 * Prepend the split off portions: everything after e starts 895 * after the blameable portion. 896 */ 897 e =reverse_blame(diffp, e); 898 899/* 900 * Now retain records on the target while parts are different 901 * from the parent. 902 */ 903 samep = NULL; 904 diffp = NULL; 905while(e && e->s_lno < same) { 906struct blame_entry *next = e->next; 907 908/* 909 * If current record extends into sameness, need to split. 910 */ 911if(e->s_lno + e->num_lines > same) { 912/* 913 * Move second half to a new record to be 914 * processed by later chunks 915 */ 916int len = same - e->s_lno; 917struct blame_entry *n =xcalloc(1,sizeof(struct blame_entry)); 918 n->suspect =blame_origin_incref(e->suspect); 919 n->lno = e->lno + len; 920 n->s_lno = e->s_lno + len; 921 n->num_lines = e->num_lines - len; 922 e->num_lines = len; 923 e->score =0; 924/* Push new record to samep */ 925 n->next = samep; 926 samep = n; 927} 928 e->next = diffp; 929 diffp = e; 930 e = next; 931} 932**srcq =reverse_blame(diffp,reverse_blame(samep, e)); 933/* Move across elements that are in the unblamable portion */ 934if(diffp) 935*srcq = &diffp->next; 936} 937 938struct blame_chunk_cb_data { 939struct blame_origin *parent; 940long offset; 941struct blame_entry **dstq; 942struct blame_entry **srcq; 943}; 944 945/* diff chunks are from parent to target */ 946static intblame_chunk_cb(long start_a,long count_a, 947long start_b,long count_b,void*data) 948{ 949struct blame_chunk_cb_data *d = data; 950if(start_a - start_b != d->offset) 951die("internal error in blame::blame_chunk_cb"); 952blame_chunk(&d->dstq, &d->srcq, start_b, start_a - start_b, 953 start_b + count_b, d->parent); 954 d->offset = start_a + count_a - (start_b + count_b); 955return0; 956} 957 958/* 959 * We are looking at the origin 'target' and aiming to pass blame 960 * for the lines it is suspected to its parent. Run diff to find 961 * which lines came from parent and pass blame for them. 962 */ 963static voidpass_blame_to_parent(struct blame_scoreboard *sb, 964struct blame_origin *target, 965struct blame_origin *parent) 966{ 967 mmfile_t file_p, file_o; 968struct blame_chunk_cb_data d; 969struct blame_entry *newdest = NULL; 970 971if(!target->suspects) 972return;/* nothing remains for this target */ 973 974 d.parent = parent; 975 d.offset =0; 976 d.dstq = &newdest; d.srcq = &target->suspects; 977 978fill_origin_blob(&sb->revs->diffopt, parent, &file_p, &sb->num_read_blob); 979fill_origin_blob(&sb->revs->diffopt, target, &file_o, &sb->num_read_blob); 980 sb->num_get_patch++; 981 982if(diff_hunks(&file_p, &file_o, blame_chunk_cb, &d, sb->xdl_opts)) 983die("unable to generate diff (%s->%s)", 984oid_to_hex(&parent->commit->object.oid), 985oid_to_hex(&target->commit->object.oid)); 986/* The rest are the same as the parent */ 987blame_chunk(&d.dstq, &d.srcq, INT_MAX, d.offset, INT_MAX, parent); 988*d.dstq = NULL; 989queue_blames(sb, parent, newdest); 990 991return; 992} 993 994/* 995 * The lines in blame_entry after splitting blames many times can become 996 * very small and trivial, and at some point it becomes pointless to 997 * blame the parents. E.g. "\t\t}\n\t}\n\n" appears everywhere in any 998 * ordinary C program, and it is not worth to say it was copied from 999 * totally unrelated file in the parent.1000 *1001 * Compute how trivial the lines in the blame_entry are.1002 */1003unsignedblame_entry_score(struct blame_scoreboard *sb,struct blame_entry *e)1004{1005unsigned score;1006const char*cp, *ep;10071008if(e->score)1009return e->score;10101011 score =1;1012 cp =blame_nth_line(sb, e->lno);1013 ep =blame_nth_line(sb, e->lno + e->num_lines);1014while(cp < ep) {1015unsigned ch = *((unsigned char*)cp);1016if(isalnum(ch))1017 score++;1018 cp++;1019}1020 e->score = score;1021return score;1022}10231024/*1025 * best_so_far[] and potential[] are both a split of an existing blame_entry1026 * that passes blame to the parent. Maintain best_so_far the best split so1027 * far, by comparing potential and best_so_far and copying potential into1028 * bst_so_far as needed.1029 */1030static voidcopy_split_if_better(struct blame_scoreboard *sb,1031struct blame_entry *best_so_far,1032struct blame_entry *potential)1033{1034int i;10351036if(!potential[1].suspect)1037return;1038if(best_so_far[1].suspect) {1039if(blame_entry_score(sb, &potential[1]) <1040blame_entry_score(sb, &best_so_far[1]))1041return;1042}10431044for(i =0; i <3; i++)1045blame_origin_incref(potential[i].suspect);1046decref_split(best_so_far);1047memcpy(best_so_far, potential,sizeof(struct blame_entry[3]));1048}10491050/*1051 * We are looking at a part of the final image represented by1052 * ent (tlno and same are offset by ent->s_lno).1053 * tlno is where we are looking at in the final image.1054 * up to (but not including) same match preimage.1055 * plno is where we are looking at in the preimage.1056 *1057 * <-------------- final image ---------------------->1058 * <------ent------>1059 * ^tlno ^same1060 * <---------preimage----->1061 * ^plno1062 *1063 * All line numbers are 0-based.1064 */1065static voidhandle_split(struct blame_scoreboard *sb,1066struct blame_entry *ent,1067int tlno,int plno,int same,1068struct blame_origin *parent,1069struct blame_entry *split)1070{1071if(ent->num_lines <= tlno)1072return;1073if(tlno < same) {1074struct blame_entry potential[3];1075 tlno += ent->s_lno;1076 same += ent->s_lno;1077split_overlap(potential, ent, tlno, plno, same, parent);1078copy_split_if_better(sb, split, potential);1079decref_split(potential);1080}1081}10821083struct handle_split_cb_data {1084struct blame_scoreboard *sb;1085struct blame_entry *ent;1086struct blame_origin *parent;1087struct blame_entry *split;1088long plno;1089long tlno;1090};10911092static inthandle_split_cb(long start_a,long count_a,1093long start_b,long count_b,void*data)1094{1095struct handle_split_cb_data *d = data;1096handle_split(d->sb, d->ent, d->tlno, d->plno, start_b, d->parent,1097 d->split);1098 d->plno = start_a + count_a;1099 d->tlno = start_b + count_b;1100return0;1101}11021103/*1104 * Find the lines from parent that are the same as ent so that1105 * we can pass blames to it. file_p has the blob contents for1106 * the parent.1107 */1108static voidfind_copy_in_blob(struct blame_scoreboard *sb,1109struct blame_entry *ent,1110struct blame_origin *parent,1111struct blame_entry *split,1112 mmfile_t *file_p)1113{1114const char*cp;1115 mmfile_t file_o;1116struct handle_split_cb_data d;11171118memset(&d,0,sizeof(d));1119 d.sb = sb; d.ent = ent; d.parent = parent; d.split = split;1120/*1121 * Prepare mmfile that contains only the lines in ent.1122 */1123 cp =blame_nth_line(sb, ent->lno);1124 file_o.ptr = (char*) cp;1125 file_o.size =blame_nth_line(sb, ent->lno + ent->num_lines) - cp;11261127/*1128 * file_o is a part of final image we are annotating.1129 * file_p partially may match that image.1130 */1131memset(split,0,sizeof(struct blame_entry [3]));1132if(diff_hunks(file_p, &file_o, handle_split_cb, &d, sb->xdl_opts))1133die("unable to generate diff (%s)",1134oid_to_hex(&parent->commit->object.oid));1135/* remainder, if any, all match the preimage */1136handle_split(sb, ent, d.tlno, d.plno, ent->num_lines, parent, split);1137}11381139/* Move all blame entries from list *source that have a score smaller1140 * than score_min to the front of list *small.1141 * Returns a pointer to the link pointing to the old head of the small list.1142 */11431144static struct blame_entry **filter_small(struct blame_scoreboard *sb,1145struct blame_entry **small,1146struct blame_entry **source,1147unsigned score_min)1148{1149struct blame_entry *p = *source;1150struct blame_entry *oldsmall = *small;1151while(p) {1152if(blame_entry_score(sb, p) <= score_min) {1153*small = p;1154 small = &p->next;1155 p = *small;1156}else{1157*source = p;1158 source = &p->next;1159 p = *source;1160}1161}1162*small = oldsmall;1163*source = NULL;1164return small;1165}11661167/*1168 * See if lines currently target is suspected for can be attributed to1169 * parent.1170 */1171static voidfind_move_in_parent(struct blame_scoreboard *sb,1172struct blame_entry ***blamed,1173struct blame_entry **toosmall,1174struct blame_origin *target,1175struct blame_origin *parent)1176{1177struct blame_entry *e, split[3];1178struct blame_entry *unblamed = target->suspects;1179struct blame_entry *leftover = NULL;1180 mmfile_t file_p;11811182if(!unblamed)1183return;/* nothing remains for this target */11841185fill_origin_blob(&sb->revs->diffopt, parent, &file_p, &sb->num_read_blob);1186if(!file_p.ptr)1187return;11881189/* At each iteration, unblamed has a NULL-terminated list of1190 * entries that have not yet been tested for blame. leftover1191 * contains the reversed list of entries that have been tested1192 * without being assignable to the parent.1193 */1194do{1195struct blame_entry **unblamedtail = &unblamed;1196struct blame_entry *next;1197for(e = unblamed; e; e = next) {1198 next = e->next;1199find_copy_in_blob(sb, e, parent, split, &file_p);1200if(split[1].suspect &&1201 sb->move_score <blame_entry_score(sb, &split[1])) {1202split_blame(blamed, &unblamedtail, split, e);1203}else{1204 e->next = leftover;1205 leftover = e;1206}1207decref_split(split);1208}1209*unblamedtail = NULL;1210 toosmall =filter_small(sb, toosmall, &unblamed, sb->move_score);1211}while(unblamed);1212 target->suspects =reverse_blame(leftover, NULL);1213}12141215struct blame_list {1216struct blame_entry *ent;1217struct blame_entry split[3];1218};12191220/*1221 * Count the number of entries the target is suspected for,1222 * and prepare a list of entry and the best split.1223 */1224static struct blame_list *setup_blame_list(struct blame_entry *unblamed,1225int*num_ents_p)1226{1227struct blame_entry *e;1228int num_ents, i;1229struct blame_list *blame_list = NULL;12301231for(e = unblamed, num_ents =0; e; e = e->next)1232 num_ents++;1233if(num_ents) {1234 blame_list =xcalloc(num_ents,sizeof(struct blame_list));1235for(e = unblamed, i =0; e; e = e->next)1236 blame_list[i++].ent = e;1237}1238*num_ents_p = num_ents;1239return blame_list;1240}12411242/*1243 * For lines target is suspected for, see if we can find code movement1244 * across file boundary from the parent commit. porigin is the path1245 * in the parent we already tried.1246 */1247static voidfind_copy_in_parent(struct blame_scoreboard *sb,1248struct blame_entry ***blamed,1249struct blame_entry **toosmall,1250struct blame_origin *target,1251struct commit *parent,1252struct blame_origin *porigin,1253int opt)1254{1255struct diff_options diff_opts;1256int i, j;1257struct blame_list *blame_list;1258int num_ents;1259struct blame_entry *unblamed = target->suspects;1260struct blame_entry *leftover = NULL;12611262if(!unblamed)1263return;/* nothing remains for this target */12641265repo_diff_setup(sb->repo, &diff_opts);1266 diff_opts.flags.recursive =1;1267 diff_opts.output_format = DIFF_FORMAT_NO_OUTPUT;12681269diff_setup_done(&diff_opts);12701271/* Try "find copies harder" on new path if requested;1272 * we do not want to use diffcore_rename() actually to1273 * match things up; find_copies_harder is set only to1274 * force diff_tree_oid() to feed all filepairs to diff_queue,1275 * and this code needs to be after diff_setup_done(), which1276 * usually makes find-copies-harder imply copy detection.1277 */1278if((opt & PICKAXE_BLAME_COPY_HARDEST)1279|| ((opt & PICKAXE_BLAME_COPY_HARDER)1280&& (!porigin ||strcmp(target->path, porigin->path))))1281 diff_opts.flags.find_copies_harder =1;12821283if(is_null_oid(&target->commit->object.oid))1284do_diff_cache(get_commit_tree_oid(parent), &diff_opts);1285else1286diff_tree_oid(get_commit_tree_oid(parent),1287get_commit_tree_oid(target->commit),1288"", &diff_opts);12891290if(!diff_opts.flags.find_copies_harder)1291diffcore_std(&diff_opts);12921293do{1294struct blame_entry **unblamedtail = &unblamed;1295 blame_list =setup_blame_list(unblamed, &num_ents);12961297for(i =0; i < diff_queued_diff.nr; i++) {1298struct diff_filepair *p = diff_queued_diff.queue[i];1299struct blame_origin *norigin;1300 mmfile_t file_p;1301struct blame_entry potential[3];13021303if(!DIFF_FILE_VALID(p->one))1304continue;/* does not exist in parent */1305if(S_ISGITLINK(p->one->mode))1306continue;/* ignore git links */1307if(porigin && !strcmp(p->one->path, porigin->path))1308/* find_move already dealt with this path */1309continue;13101311 norigin =get_origin(parent, p->one->path);1312oidcpy(&norigin->blob_oid, &p->one->oid);1313 norigin->mode = p->one->mode;1314fill_origin_blob(&sb->revs->diffopt, norigin, &file_p, &sb->num_read_blob);1315if(!file_p.ptr)1316continue;13171318for(j =0; j < num_ents; j++) {1319find_copy_in_blob(sb, blame_list[j].ent,1320 norigin, potential, &file_p);1321copy_split_if_better(sb, blame_list[j].split,1322 potential);1323decref_split(potential);1324}1325blame_origin_decref(norigin);1326}13271328for(j =0; j < num_ents; j++) {1329struct blame_entry *split = blame_list[j].split;1330if(split[1].suspect &&1331 sb->copy_score <blame_entry_score(sb, &split[1])) {1332split_blame(blamed, &unblamedtail, split,1333 blame_list[j].ent);1334}else{1335 blame_list[j].ent->next = leftover;1336 leftover = blame_list[j].ent;1337}1338decref_split(split);1339}1340free(blame_list);1341*unblamedtail = NULL;1342 toosmall =filter_small(sb, toosmall, &unblamed, sb->copy_score);1343}while(unblamed);1344 target->suspects =reverse_blame(leftover, NULL);1345diff_flush(&diff_opts);1346clear_pathspec(&diff_opts.pathspec);1347}13481349/*1350 * The blobs of origin and porigin exactly match, so everything1351 * origin is suspected for can be blamed on the parent.1352 */1353static voidpass_whole_blame(struct blame_scoreboard *sb,1354struct blame_origin *origin,struct blame_origin *porigin)1355{1356struct blame_entry *e, *suspects;13571358if(!porigin->file.ptr && origin->file.ptr) {1359/* Steal its file */1360 porigin->file = origin->file;1361 origin->file.ptr = NULL;1362}1363 suspects = origin->suspects;1364 origin->suspects = NULL;1365for(e = suspects; e; e = e->next) {1366blame_origin_incref(porigin);1367blame_origin_decref(e->suspect);1368 e->suspect = porigin;1369}1370queue_blames(sb, porigin, suspects);1371}13721373/*1374 * We pass blame from the current commit to its parents. We keep saying1375 * "parent" (and "porigin"), but what we mean is to find scapegoat to1376 * exonerate ourselves.1377 */1378static struct commit_list *first_scapegoat(struct rev_info *revs,struct commit *commit,1379int reverse)1380{1381if(!reverse) {1382if(revs->first_parent_only &&1383 commit->parents &&1384 commit->parents->next) {1385free_commit_list(commit->parents->next);1386 commit->parents->next = NULL;1387}1388return commit->parents;1389}1390returnlookup_decoration(&revs->children, &commit->object);1391}13921393static intnum_scapegoats(struct rev_info *revs,struct commit *commit,int reverse)1394{1395struct commit_list *l =first_scapegoat(revs, commit, reverse);1396returncommit_list_count(l);1397}13981399/* Distribute collected unsorted blames to the respected sorted lists1400 * in the various origins.1401 */1402static voiddistribute_blame(struct blame_scoreboard *sb,struct blame_entry *blamed)1403{1404 blamed =llist_mergesort(blamed, get_next_blame, set_next_blame,1405 compare_blame_suspect);1406while(blamed)1407{1408struct blame_origin *porigin = blamed->suspect;1409struct blame_entry *suspects = NULL;1410do{1411struct blame_entry *next = blamed->next;1412 blamed->next = suspects;1413 suspects = blamed;1414 blamed = next;1415}while(blamed && blamed->suspect == porigin);1416 suspects =reverse_blame(suspects, NULL);1417queue_blames(sb, porigin, suspects);1418}1419}14201421#define MAXSG 1614221423static voidpass_blame(struct blame_scoreboard *sb,struct blame_origin *origin,int opt)1424{1425struct rev_info *revs = sb->revs;1426int i, pass, num_sg;1427struct commit *commit = origin->commit;1428struct commit_list *sg;1429struct blame_origin *sg_buf[MAXSG];1430struct blame_origin *porigin, **sg_origin = sg_buf;1431struct blame_entry *toosmall = NULL;1432struct blame_entry *blames, **blametail = &blames;14331434 num_sg =num_scapegoats(revs, commit, sb->reverse);1435if(!num_sg)1436goto finish;1437else if(num_sg <ARRAY_SIZE(sg_buf))1438memset(sg_buf,0,sizeof(sg_buf));1439else1440 sg_origin =xcalloc(num_sg,sizeof(*sg_origin));14411442/*1443 * The first pass looks for unrenamed path to optimize for1444 * common cases, then we look for renames in the second pass.1445 */1446for(pass =0; pass <2- sb->no_whole_file_rename; pass++) {1447struct blame_origin *(*find)(struct repository *,struct commit *,struct blame_origin *);1448 find = pass ? find_rename : find_origin;14491450for(i =0, sg =first_scapegoat(revs, commit, sb->reverse);1451 i < num_sg && sg;1452 sg = sg->next, i++) {1453struct commit *p = sg->item;1454int j, same;14551456if(sg_origin[i])1457continue;1458if(parse_commit(p))1459continue;1460 porigin =find(sb->repo, p, origin);1461if(!porigin)1462continue;1463if(oideq(&porigin->blob_oid, &origin->blob_oid)) {1464pass_whole_blame(sb, origin, porigin);1465blame_origin_decref(porigin);1466goto finish;1467}1468for(j = same =0; j < i; j++)1469if(sg_origin[j] &&1470oideq(&sg_origin[j]->blob_oid, &porigin->blob_oid)) {1471 same =1;1472break;1473}1474if(!same)1475 sg_origin[i] = porigin;1476else1477blame_origin_decref(porigin);1478}1479}14801481 sb->num_commits++;1482for(i =0, sg =first_scapegoat(revs, commit, sb->reverse);1483 i < num_sg && sg;1484 sg = sg->next, i++) {1485struct blame_origin *porigin = sg_origin[i];1486if(!porigin)1487continue;1488if(!origin->previous) {1489blame_origin_incref(porigin);1490 origin->previous = porigin;1491}1492pass_blame_to_parent(sb, origin, porigin);1493if(!origin->suspects)1494goto finish;1495}14961497/*1498 * Optionally find moves in parents' files.1499 */1500if(opt & PICKAXE_BLAME_MOVE) {1501filter_small(sb, &toosmall, &origin->suspects, sb->move_score);1502if(origin->suspects) {1503for(i =0, sg =first_scapegoat(revs, commit, sb->reverse);1504 i < num_sg && sg;1505 sg = sg->next, i++) {1506struct blame_origin *porigin = sg_origin[i];1507if(!porigin)1508continue;1509find_move_in_parent(sb, &blametail, &toosmall, origin, porigin);1510if(!origin->suspects)1511break;1512}1513}1514}15151516/*1517 * Optionally find copies from parents' files.1518 */1519if(opt & PICKAXE_BLAME_COPY) {1520if(sb->copy_score > sb->move_score)1521filter_small(sb, &toosmall, &origin->suspects, sb->copy_score);1522else if(sb->copy_score < sb->move_score) {1523 origin->suspects =blame_merge(origin->suspects, toosmall);1524 toosmall = NULL;1525filter_small(sb, &toosmall, &origin->suspects, sb->copy_score);1526}1527if(!origin->suspects)1528goto finish;15291530for(i =0, sg =first_scapegoat(revs, commit, sb->reverse);1531 i < num_sg && sg;1532 sg = sg->next, i++) {1533struct blame_origin *porigin = sg_origin[i];1534find_copy_in_parent(sb, &blametail, &toosmall,1535 origin, sg->item, porigin, opt);1536if(!origin->suspects)1537goto finish;1538}1539}15401541finish:1542*blametail = NULL;1543distribute_blame(sb, blames);1544/*1545 * prepend toosmall to origin->suspects1546 *1547 * There is no point in sorting: this ends up on a big1548 * unsorted list in the caller anyway.1549 */1550if(toosmall) {1551struct blame_entry **tail = &toosmall;1552while(*tail)1553 tail = &(*tail)->next;1554*tail = origin->suspects;1555 origin->suspects = toosmall;1556}1557for(i =0; i < num_sg; i++) {1558if(sg_origin[i]) {1559drop_origin_blob(sg_origin[i]);1560blame_origin_decref(sg_origin[i]);1561}1562}1563drop_origin_blob(origin);1564if(sg_buf != sg_origin)1565free(sg_origin);1566}15671568/*1569 * The main loop -- while we have blobs with lines whose true origin1570 * is still unknown, pick one blob, and allow its lines to pass blames1571 * to its parents. */1572voidassign_blame(struct blame_scoreboard *sb,int opt)1573{1574struct rev_info *revs = sb->revs;1575struct commit *commit =prio_queue_get(&sb->commits);15761577while(commit) {1578struct blame_entry *ent;1579struct blame_origin *suspect =get_blame_suspects(commit);15801581/* find one suspect to break down */1582while(suspect && !suspect->suspects)1583 suspect = suspect->next;15841585if(!suspect) {1586 commit =prio_queue_get(&sb->commits);1587continue;1588}15891590assert(commit == suspect->commit);15911592/*1593 * We will use this suspect later in the loop,1594 * so hold onto it in the meantime.1595 */1596blame_origin_incref(suspect);1597parse_commit(commit);1598if(sb->reverse ||1599(!(commit->object.flags & UNINTERESTING) &&1600!(revs->max_age != -1&& commit->date < revs->max_age)))1601pass_blame(sb, suspect, opt);1602else{1603 commit->object.flags |= UNINTERESTING;1604if(commit->object.parsed)1605mark_parents_uninteresting(commit);1606}1607/* treat root commit as boundary */1608if(!commit->parents && !sb->show_root)1609 commit->object.flags |= UNINTERESTING;16101611/* Take responsibility for the remaining entries */1612 ent = suspect->suspects;1613if(ent) {1614 suspect->guilty =1;1615for(;;) {1616struct blame_entry *next = ent->next;1617if(sb->found_guilty_entry)1618 sb->found_guilty_entry(ent, sb->found_guilty_entry_data);1619if(next) {1620 ent = next;1621continue;1622}1623 ent->next = sb->ent;1624 sb->ent = suspect->suspects;1625 suspect->suspects = NULL;1626break;1627}1628}1629blame_origin_decref(suspect);16301631if(sb->debug)/* sanity */1632sanity_check_refcnt(sb);1633}1634}16351636static const char*get_next_line(const char*start,const char*end)1637{1638const char*nl =memchr(start,'\n', end - start);1639return nl ? nl +1: end;1640}16411642/*1643 * To allow quick access to the contents of nth line in the1644 * final image, prepare an index in the scoreboard.1645 */1646static intprepare_lines(struct blame_scoreboard *sb)1647{1648const char*buf = sb->final_buf;1649unsigned long len = sb->final_buf_size;1650const char*end = buf + len;1651const char*p;1652int*lineno;1653int num =0;16541655for(p = buf; p < end; p =get_next_line(p, end))1656 num++;16571658ALLOC_ARRAY(sb->lineno, num +1);1659 lineno = sb->lineno;16601661for(p = buf; p < end; p =get_next_line(p, end))1662*lineno++ = p - buf;16631664*lineno = len;16651666 sb->num_lines = num;1667return sb->num_lines;1668}16691670static struct commit *find_single_final(struct rev_info *revs,1671const char**name_p)1672{1673int i;1674struct commit *found = NULL;1675const char*name = NULL;16761677for(i =0; i < revs->pending.nr; i++) {1678struct object *obj = revs->pending.objects[i].item;1679if(obj->flags & UNINTERESTING)1680continue;1681 obj =deref_tag(the_repository, obj, NULL,0);1682if(obj->type != OBJ_COMMIT)1683die("Non commit%s?", revs->pending.objects[i].name);1684if(found)1685die("More than one commit to dig from%sand%s?",1686 revs->pending.objects[i].name, name);1687 found = (struct commit *)obj;1688 name = revs->pending.objects[i].name;1689}1690if(name_p)1691*name_p =xstrdup_or_null(name);1692return found;1693}16941695static struct commit *dwim_reverse_initial(struct rev_info *revs,1696const char**name_p)1697{1698/*1699 * DWIM "git blame --reverse ONE -- PATH" as1700 * "git blame --reverse ONE..HEAD -- PATH" but only do so1701 * when it makes sense.1702 */1703struct object *obj;1704struct commit *head_commit;1705struct object_id head_oid;17061707if(revs->pending.nr !=1)1708return NULL;17091710/* Is that sole rev a committish? */1711 obj = revs->pending.objects[0].item;1712 obj =deref_tag(the_repository, obj, NULL,0);1713if(obj->type != OBJ_COMMIT)1714return NULL;17151716/* Do we have HEAD? */1717if(!resolve_ref_unsafe("HEAD", RESOLVE_REF_READING, &head_oid, NULL))1718return NULL;1719 head_commit =lookup_commit_reference_gently(the_repository,1720&head_oid,1);1721if(!head_commit)1722return NULL;17231724/* Turn "ONE" into "ONE..HEAD" then */1725 obj->flags |= UNINTERESTING;1726add_pending_object(revs, &head_commit->object,"HEAD");17271728if(name_p)1729*name_p = revs->pending.objects[0].name;1730return(struct commit *)obj;1731}17321733static struct commit *find_single_initial(struct rev_info *revs,1734const char**name_p)1735{1736int i;1737struct commit *found = NULL;1738const char*name = NULL;17391740/*1741 * There must be one and only one negative commit, and it must be1742 * the boundary.1743 */1744for(i =0; i < revs->pending.nr; i++) {1745struct object *obj = revs->pending.objects[i].item;1746if(!(obj->flags & UNINTERESTING))1747continue;1748 obj =deref_tag(the_repository, obj, NULL,0);1749if(obj->type != OBJ_COMMIT)1750die("Non commit%s?", revs->pending.objects[i].name);1751if(found)1752die("More than one commit to dig up from,%sand%s?",1753 revs->pending.objects[i].name, name);1754 found = (struct commit *) obj;1755 name = revs->pending.objects[i].name;1756}17571758if(!name)1759 found =dwim_reverse_initial(revs, &name);1760if(!name)1761die("No commit to dig up from?");17621763if(name_p)1764*name_p =xstrdup(name);1765return found;1766}17671768voidinit_scoreboard(struct blame_scoreboard *sb)1769{1770memset(sb,0,sizeof(struct blame_scoreboard));1771 sb->move_score = BLAME_DEFAULT_MOVE_SCORE;1772 sb->copy_score = BLAME_DEFAULT_COPY_SCORE;1773}17741775voidsetup_scoreboard(struct blame_scoreboard *sb,1776const char*path,1777struct blame_origin **orig)1778{1779const char*final_commit_name = NULL;1780struct blame_origin *o;1781struct commit *final_commit = NULL;1782enum object_type type;17831784init_blame_suspects(&blame_suspects);17851786if(sb->reverse && sb->contents_from)1787die(_("--contents and --reverse do not blend well."));17881789if(!sb->repo)1790BUG("repo is NULL");17911792if(!sb->reverse) {1793 sb->final =find_single_final(sb->revs, &final_commit_name);1794 sb->commits.compare = compare_commits_by_commit_date;1795}else{1796 sb->final =find_single_initial(sb->revs, &final_commit_name);1797 sb->commits.compare = compare_commits_by_reverse_commit_date;1798}17991800if(sb->final && sb->contents_from)1801die(_("cannot use --contents with final commit object name"));18021803if(sb->reverse && sb->revs->first_parent_only)1804 sb->revs->children.name = NULL;18051806if(!sb->final) {1807/*1808 * "--not A B -- path" without anything positive;1809 * do not default to HEAD, but use the working tree1810 * or "--contents".1811 */1812setup_work_tree();1813 sb->final =fake_working_tree_commit(sb->repo,1814&sb->revs->diffopt,1815 path, sb->contents_from);1816add_pending_object(sb->revs, &(sb->final->object),":");1817}18181819if(sb->reverse && sb->revs->first_parent_only) {1820 final_commit =find_single_final(sb->revs, NULL);1821if(!final_commit)1822die(_("--reverse and --first-parent together require specified latest commit"));1823}18241825/*1826 * If we have bottom, this will mark the ancestors of the1827 * bottom commits we would reach while traversing as1828 * uninteresting.1829 */1830if(prepare_revision_walk(sb->revs))1831die(_("revision walk setup failed"));18321833if(sb->reverse && sb->revs->first_parent_only) {1834struct commit *c = final_commit;18351836 sb->revs->children.name ="children";1837while(c->parents &&1838!oideq(&c->object.oid, &sb->final->object.oid)) {1839struct commit_list *l =xcalloc(1,sizeof(*l));18401841 l->item = c;1842if(add_decoration(&sb->revs->children,1843&c->parents->item->object, l))1844BUG("not unique item in first-parent chain");1845 c = c->parents->item;1846}18471848if(!oideq(&c->object.oid, &sb->final->object.oid))1849die(_("--reverse --first-parent together require range along first-parent chain"));1850}18511852if(is_null_oid(&sb->final->object.oid)) {1853 o =get_blame_suspects(sb->final);1854 sb->final_buf =xmemdupz(o->file.ptr, o->file.size);1855 sb->final_buf_size = o->file.size;1856}1857else{1858 o =get_origin(sb->final, path);1859if(fill_blob_sha1_and_mode(sb->repo, o))1860die(_("no such path%sin%s"), path, final_commit_name);18611862if(sb->revs->diffopt.flags.allow_textconv &&1863textconv_object(sb->repo, path, o->mode, &o->blob_oid,1, (char**) &sb->final_buf,1864&sb->final_buf_size))1865;1866else1867 sb->final_buf =read_object_file(&o->blob_oid, &type,1868&sb->final_buf_size);18691870if(!sb->final_buf)1871die(_("cannot read blob%sfor path%s"),1872oid_to_hex(&o->blob_oid),1873 path);1874}1875 sb->num_read_blob++;1876prepare_lines(sb);18771878if(orig)1879*orig = o;18801881free((char*)final_commit_name);1882}1883188418851886struct blame_entry *blame_entry_prepend(struct blame_entry *head,1887long start,long end,1888struct blame_origin *o)1889{1890struct blame_entry *new_head =xcalloc(1,sizeof(struct blame_entry));1891 new_head->lno = start;1892 new_head->num_lines = end - start;1893 new_head->suspect = o;1894 new_head->s_lno = start;1895 new_head->next = head;1896blame_origin_incref(o);1897return new_head;1898}