1#include"cache.h" 2#include"refs.h" 3#include"object-store.h" 4#include"cache-tree.h" 5#include"mergesort.h" 6#include"diff.h" 7#include"diffcore.h" 8#include"tag.h" 9#include"blame.h" 10#include"alloc.h" 11#include"commit-slab.h" 12 13define_commit_slab(blame_suspects,struct blame_origin *); 14static struct blame_suspects blame_suspects; 15 16struct blame_origin *get_blame_suspects(struct commit *commit) 17{ 18struct blame_origin **result; 19 20 result =blame_suspects_peek(&blame_suspects, commit); 21 22return result ? *result : NULL; 23} 24 25static voidset_blame_suspects(struct commit *commit,struct blame_origin *origin) 26{ 27*blame_suspects_at(&blame_suspects, commit) = origin; 28} 29 30voidblame_origin_decref(struct blame_origin *o) 31{ 32if(o && --o->refcnt <=0) { 33struct blame_origin *p, *l = NULL; 34if(o->previous) 35blame_origin_decref(o->previous); 36free(o->file.ptr); 37/* Should be present exactly once in commit chain */ 38for(p =get_blame_suspects(o->commit); p; l = p, p = p->next) { 39if(p == o) { 40if(l) 41 l->next = p->next; 42else 43set_blame_suspects(o->commit, p->next); 44free(o); 45return; 46} 47} 48die("internal error in blame_origin_decref"); 49} 50} 51 52/* 53 * Given a commit and a path in it, create a new origin structure. 54 * The callers that add blame to the scoreboard should use 55 * get_origin() to obtain shared, refcounted copy instead of calling 56 * this function directly. 57 */ 58static struct blame_origin *make_origin(struct commit *commit,const char*path) 59{ 60struct blame_origin *o; 61FLEX_ALLOC_STR(o, path, path); 62 o->commit = commit; 63 o->refcnt =1; 64 o->next =get_blame_suspects(commit); 65set_blame_suspects(commit, o); 66return o; 67} 68 69/* 70 * Locate an existing origin or create a new one. 71 * This moves the origin to front position in the commit util list. 72 */ 73static struct blame_origin *get_origin(struct commit *commit,const char*path) 74{ 75struct blame_origin *o, *l; 76 77for(o =get_blame_suspects(commit), l = NULL; o; l = o, o = o->next) { 78if(!strcmp(o->path, path)) { 79/* bump to front */ 80if(l) { 81 l->next = o->next; 82 o->next =get_blame_suspects(commit); 83set_blame_suspects(commit, o); 84} 85returnblame_origin_incref(o); 86} 87} 88returnmake_origin(commit, path); 89} 90 91 92 93static voidverify_working_tree_path(struct repository *repo, 94struct commit *work_tree,const char*path) 95{ 96struct commit_list *parents; 97int pos; 98 99for(parents = work_tree->parents; parents; parents = parents->next) { 100const struct object_id *commit_oid = &parents->item->object.oid; 101struct object_id blob_oid; 102unsigned mode; 103 104if(!get_tree_entry(commit_oid, path, &blob_oid, &mode) && 105oid_object_info(repo, &blob_oid, NULL) == OBJ_BLOB) 106return; 107} 108 109 pos =index_name_pos(repo->index, path,strlen(path)); 110if(pos >=0) 111;/* path is in the index */ 112else if(-1- pos < repo->index->cache_nr && 113!strcmp(repo->index->cache[-1- pos]->name, path)) 114;/* path is in the index, unmerged */ 115else 116die("no such path '%s' in HEAD", path); 117} 118 119static struct commit_list **append_parent(struct commit_list **tail,const struct object_id *oid) 120{ 121struct commit *parent; 122 123 parent =lookup_commit_reference(the_repository, oid); 124if(!parent) 125die("no such commit%s",oid_to_hex(oid)); 126return&commit_list_insert(parent, tail)->next; 127} 128 129static voidappend_merge_parents(struct commit_list **tail) 130{ 131int merge_head; 132struct strbuf line = STRBUF_INIT; 133 134 merge_head =open(git_path_merge_head(the_repository), O_RDONLY); 135if(merge_head <0) { 136if(errno == ENOENT) 137return; 138die("cannot open '%s' for reading", 139git_path_merge_head(the_repository)); 140} 141 142while(!strbuf_getwholeline_fd(&line, merge_head,'\n')) { 143struct object_id oid; 144if(line.len < GIT_SHA1_HEXSZ ||get_oid_hex(line.buf, &oid)) 145die("unknown line in '%s':%s", 146git_path_merge_head(the_repository), line.buf); 147 tail =append_parent(tail, &oid); 148} 149close(merge_head); 150strbuf_release(&line); 151} 152 153/* 154 * This isn't as simple as passing sb->buf and sb->len, because we 155 * want to transfer ownership of the buffer to the commit (so we 156 * must use detach). 157 */ 158static voidset_commit_buffer_from_strbuf(struct commit *c,struct strbuf *sb) 159{ 160size_t len; 161void*buf =strbuf_detach(sb, &len); 162set_commit_buffer(the_repository, c, buf, len); 163} 164 165/* 166 * Prepare a dummy commit that represents the work tree (or staged) item. 167 * Note that annotating work tree item never works in the reverse. 168 */ 169static struct commit *fake_working_tree_commit(struct repository *repo, 170struct diff_options *opt, 171const char*path, 172const char*contents_from) 173{ 174struct commit *commit; 175struct blame_origin *origin; 176struct commit_list **parent_tail, *parent; 177struct object_id head_oid; 178struct strbuf buf = STRBUF_INIT; 179const char*ident; 180time_t now; 181int len; 182struct cache_entry *ce; 183unsigned mode; 184struct strbuf msg = STRBUF_INIT; 185 186read_index(repo->index); 187time(&now); 188 commit =alloc_commit_node(the_repository); 189 commit->object.parsed =1; 190 commit->date = now; 191 parent_tail = &commit->parents; 192 193if(!resolve_ref_unsafe("HEAD", RESOLVE_REF_READING, &head_oid, NULL)) 194die("no such ref: HEAD"); 195 196 parent_tail =append_parent(parent_tail, &head_oid); 197append_merge_parents(parent_tail); 198verify_working_tree_path(repo, commit, path); 199 200 origin =make_origin(commit, path); 201 202 ident =fmt_ident("Not Committed Yet","not.committed.yet", NULL,0); 203strbuf_addstr(&msg,"tree 0000000000000000000000000000000000000000\n"); 204for(parent = commit->parents; parent; parent = parent->next) 205strbuf_addf(&msg,"parent%s\n", 206oid_to_hex(&parent->item->object.oid)); 207strbuf_addf(&msg, 208"author%s\n" 209"committer%s\n\n" 210"Version of%sfrom%s\n", 211 ident, ident, path, 212(!contents_from ? path : 213(!strcmp(contents_from,"-") ?"standard input": contents_from))); 214set_commit_buffer_from_strbuf(commit, &msg); 215 216if(!contents_from ||strcmp("-", contents_from)) { 217struct stat st; 218const char*read_from; 219char*buf_ptr; 220unsigned long buf_len; 221 222if(contents_from) { 223if(stat(contents_from, &st) <0) 224die_errno("Cannot stat '%s'", contents_from); 225 read_from = contents_from; 226} 227else{ 228if(lstat(path, &st) <0) 229die_errno("Cannot lstat '%s'", path); 230 read_from = path; 231} 232 mode =canon_mode(st.st_mode); 233 234switch(st.st_mode & S_IFMT) { 235case S_IFREG: 236if(opt->flags.allow_textconv && 237textconv_object(read_from, mode, &null_oid,0, &buf_ptr, &buf_len)) 238strbuf_attach(&buf, buf_ptr, buf_len, buf_len +1); 239else if(strbuf_read_file(&buf, read_from, st.st_size) != st.st_size) 240die_errno("cannot open or read '%s'", read_from); 241break; 242case S_IFLNK: 243if(strbuf_readlink(&buf, read_from, st.st_size) <0) 244die_errno("cannot readlink '%s'", read_from); 245break; 246default: 247die("unsupported file type%s", read_from); 248} 249} 250else{ 251/* Reading from stdin */ 252 mode =0; 253if(strbuf_read(&buf,0,0) <0) 254die_errno("failed to read from stdin"); 255} 256convert_to_git(repo->index, path, buf.buf, buf.len, &buf,0); 257 origin->file.ptr = buf.buf; 258 origin->file.size = buf.len; 259pretend_object_file(buf.buf, buf.len, OBJ_BLOB, &origin->blob_oid); 260 261/* 262 * Read the current index, replace the path entry with 263 * origin->blob_sha1 without mucking with its mode or type 264 * bits; we are not going to write this index out -- we just 265 * want to run "diff-index --cached". 266 */ 267discard_index(repo->index); 268read_index(repo->index); 269 270 len =strlen(path); 271if(!mode) { 272int pos =index_name_pos(repo->index, path, len); 273if(0<= pos) 274 mode = repo->index->cache[pos]->ce_mode; 275else 276/* Let's not bother reading from HEAD tree */ 277 mode = S_IFREG |0644; 278} 279 ce =make_empty_cache_entry(repo->index, len); 280oidcpy(&ce->oid, &origin->blob_oid); 281memcpy(ce->name, path, len); 282 ce->ce_flags =create_ce_flags(0); 283 ce->ce_namelen = len; 284 ce->ce_mode =create_ce_mode(mode); 285add_index_entry(repo->index, ce, 286 ADD_CACHE_OK_TO_ADD | ADD_CACHE_OK_TO_REPLACE); 287 288cache_tree_invalidate_path(repo->index, path); 289 290return commit; 291} 292 293 294 295static intdiff_hunks(mmfile_t *file_a, mmfile_t *file_b, 296 xdl_emit_hunk_consume_func_t hunk_func,void*cb_data,int xdl_opts) 297{ 298 xpparam_t xpp = {0}; 299 xdemitconf_t xecfg = {0}; 300 xdemitcb_t ecb = {NULL}; 301 302 xpp.flags = xdl_opts; 303 xecfg.hunk_func = hunk_func; 304 ecb.priv = cb_data; 305returnxdi_diff(file_a, file_b, &xpp, &xecfg, &ecb); 306} 307 308/* 309 * Given an origin, prepare mmfile_t structure to be used by the 310 * diff machinery 311 */ 312static voidfill_origin_blob(struct diff_options *opt, 313struct blame_origin *o, mmfile_t *file,int*num_read_blob) 314{ 315if(!o->file.ptr) { 316enum object_type type; 317unsigned long file_size; 318 319(*num_read_blob)++; 320if(opt->flags.allow_textconv && 321textconv_object(o->path, o->mode, &o->blob_oid,1, &file->ptr, &file_size)) 322; 323else 324 file->ptr =read_object_file(&o->blob_oid, &type, 325&file_size); 326 file->size = file_size; 327 328if(!file->ptr) 329die("Cannot read blob%sfor path%s", 330oid_to_hex(&o->blob_oid), 331 o->path); 332 o->file = *file; 333} 334else 335*file = o->file; 336} 337 338static voiddrop_origin_blob(struct blame_origin *o) 339{ 340FREE_AND_NULL(o->file.ptr); 341} 342 343/* 344 * Any merge of blames happens on lists of blames that arrived via 345 * different parents in a single suspect. In this case, we want to 346 * sort according to the suspect line numbers as opposed to the final 347 * image line numbers. The function body is somewhat longish because 348 * it avoids unnecessary writes. 349 */ 350 351static struct blame_entry *blame_merge(struct blame_entry *list1, 352struct blame_entry *list2) 353{ 354struct blame_entry *p1 = list1, *p2 = list2, 355**tail = &list1; 356 357if(!p1) 358return p2; 359if(!p2) 360return p1; 361 362if(p1->s_lno <= p2->s_lno) { 363do{ 364 tail = &p1->next; 365if((p1 = *tail) == NULL) { 366*tail = p2; 367return list1; 368} 369}while(p1->s_lno <= p2->s_lno); 370} 371for(;;) { 372*tail = p2; 373do{ 374 tail = &p2->next; 375if((p2 = *tail) == NULL) { 376*tail = p1; 377return list1; 378} 379}while(p1->s_lno > p2->s_lno); 380*tail = p1; 381do{ 382 tail = &p1->next; 383if((p1 = *tail) == NULL) { 384*tail = p2; 385return list1; 386} 387}while(p1->s_lno <= p2->s_lno); 388} 389} 390 391static void*get_next_blame(const void*p) 392{ 393return((struct blame_entry *)p)->next; 394} 395 396static voidset_next_blame(void*p1,void*p2) 397{ 398((struct blame_entry *)p1)->next = p2; 399} 400 401/* 402 * Final image line numbers are all different, so we don't need a 403 * three-way comparison here. 404 */ 405 406static intcompare_blame_final(const void*p1,const void*p2) 407{ 408return((struct blame_entry *)p1)->lno > ((struct blame_entry *)p2)->lno 409?1: -1; 410} 411 412static intcompare_blame_suspect(const void*p1,const void*p2) 413{ 414const struct blame_entry *s1 = p1, *s2 = p2; 415/* 416 * to allow for collating suspects, we sort according to the 417 * respective pointer value as the primary sorting criterion. 418 * The actual relation is pretty unimportant as long as it 419 * establishes a total order. Comparing as integers gives us 420 * that. 421 */ 422if(s1->suspect != s2->suspect) 423return(intptr_t)s1->suspect > (intptr_t)s2->suspect ?1: -1; 424if(s1->s_lno == s2->s_lno) 425return0; 426return s1->s_lno > s2->s_lno ?1: -1; 427} 428 429voidblame_sort_final(struct blame_scoreboard *sb) 430{ 431 sb->ent =llist_mergesort(sb->ent, get_next_blame, set_next_blame, 432 compare_blame_final); 433} 434 435static intcompare_commits_by_reverse_commit_date(const void*a, 436const void*b, 437void*c) 438{ 439return-compare_commits_by_commit_date(a, b, c); 440} 441 442/* 443 * For debugging -- origin is refcounted, and this asserts that 444 * we do not underflow. 445 */ 446static voidsanity_check_refcnt(struct blame_scoreboard *sb) 447{ 448int baa =0; 449struct blame_entry *ent; 450 451for(ent = sb->ent; ent; ent = ent->next) { 452/* Nobody should have zero or negative refcnt */ 453if(ent->suspect->refcnt <=0) { 454fprintf(stderr,"%sin%shas negative refcnt%d\n", 455 ent->suspect->path, 456oid_to_hex(&ent->suspect->commit->object.oid), 457 ent->suspect->refcnt); 458 baa =1; 459} 460} 461if(baa) 462 sb->on_sanity_fail(sb, baa); 463} 464 465/* 466 * If two blame entries that are next to each other came from 467 * contiguous lines in the same origin (i.e. <commit, path> pair), 468 * merge them together. 469 */ 470voidblame_coalesce(struct blame_scoreboard *sb) 471{ 472struct blame_entry *ent, *next; 473 474for(ent = sb->ent; ent && (next = ent->next); ent = next) { 475if(ent->suspect == next->suspect && 476 ent->s_lno + ent->num_lines == next->s_lno) { 477 ent->num_lines += next->num_lines; 478 ent->next = next->next; 479blame_origin_decref(next->suspect); 480free(next); 481 ent->score =0; 482 next = ent;/* again */ 483} 484} 485 486if(sb->debug)/* sanity */ 487sanity_check_refcnt(sb); 488} 489 490/* 491 * Merge the given sorted list of blames into a preexisting origin. 492 * If there were no previous blames to that commit, it is entered into 493 * the commit priority queue of the score board. 494 */ 495 496static voidqueue_blames(struct blame_scoreboard *sb,struct blame_origin *porigin, 497struct blame_entry *sorted) 498{ 499if(porigin->suspects) 500 porigin->suspects =blame_merge(porigin->suspects, sorted); 501else{ 502struct blame_origin *o; 503for(o =get_blame_suspects(porigin->commit); o; o = o->next) { 504if(o->suspects) { 505 porigin->suspects = sorted; 506return; 507} 508} 509 porigin->suspects = sorted; 510prio_queue_put(&sb->commits, porigin->commit); 511} 512} 513 514/* 515 * Fill the blob_sha1 field of an origin if it hasn't, so that later 516 * call to fill_origin_blob() can use it to locate the data. blob_sha1 517 * for an origin is also used to pass the blame for the entire file to 518 * the parent to detect the case where a child's blob is identical to 519 * that of its parent's. 520 * 521 * This also fills origin->mode for corresponding tree path. 522 */ 523static intfill_blob_sha1_and_mode(struct repository *repo, 524struct blame_origin *origin) 525{ 526if(!is_null_oid(&origin->blob_oid)) 527return0; 528if(get_tree_entry(&origin->commit->object.oid, origin->path, &origin->blob_oid, &origin->mode)) 529goto error_out; 530if(oid_object_info(repo, &origin->blob_oid, NULL) != OBJ_BLOB) 531goto error_out; 532return0; 533 error_out: 534oidclr(&origin->blob_oid); 535 origin->mode = S_IFINVALID; 536return-1; 537} 538 539/* 540 * We have an origin -- check if the same path exists in the 541 * parent and return an origin structure to represent it. 542 */ 543static struct blame_origin *find_origin(struct commit *parent, 544struct blame_origin *origin) 545{ 546struct blame_origin *porigin; 547struct diff_options diff_opts; 548const char*paths[2]; 549 550/* First check any existing origins */ 551for(porigin =get_blame_suspects(parent); porigin; porigin = porigin->next) 552if(!strcmp(porigin->path, origin->path)) { 553/* 554 * The same path between origin and its parent 555 * without renaming -- the most common case. 556 */ 557returnblame_origin_incref(porigin); 558} 559 560/* See if the origin->path is different between parent 561 * and origin first. Most of the time they are the 562 * same and diff-tree is fairly efficient about this. 563 */ 564diff_setup(&diff_opts); 565 diff_opts.flags.recursive =1; 566 diff_opts.detect_rename =0; 567 diff_opts.output_format = DIFF_FORMAT_NO_OUTPUT; 568 paths[0] = origin->path; 569 paths[1] = NULL; 570 571parse_pathspec(&diff_opts.pathspec, 572 PATHSPEC_ALL_MAGIC & ~PATHSPEC_LITERAL, 573 PATHSPEC_LITERAL_PATH,"", paths); 574diff_setup_done(&diff_opts); 575 576if(is_null_oid(&origin->commit->object.oid)) 577do_diff_cache(get_commit_tree_oid(parent), &diff_opts); 578else 579diff_tree_oid(get_commit_tree_oid(parent), 580get_commit_tree_oid(origin->commit), 581"", &diff_opts); 582diffcore_std(&diff_opts); 583 584if(!diff_queued_diff.nr) { 585/* The path is the same as parent */ 586 porigin =get_origin(parent, origin->path); 587oidcpy(&porigin->blob_oid, &origin->blob_oid); 588 porigin->mode = origin->mode; 589}else{ 590/* 591 * Since origin->path is a pathspec, if the parent 592 * commit had it as a directory, we will see a whole 593 * bunch of deletion of files in the directory that we 594 * do not care about. 595 */ 596int i; 597struct diff_filepair *p = NULL; 598for(i =0; i < diff_queued_diff.nr; i++) { 599const char*name; 600 p = diff_queued_diff.queue[i]; 601 name = p->one->path ? p->one->path : p->two->path; 602if(!strcmp(name, origin->path)) 603break; 604} 605if(!p) 606die("internal error in blame::find_origin"); 607switch(p->status) { 608default: 609die("internal error in blame::find_origin (%c)", 610 p->status); 611case'M': 612 porigin =get_origin(parent, origin->path); 613oidcpy(&porigin->blob_oid, &p->one->oid); 614 porigin->mode = p->one->mode; 615break; 616case'A': 617case'T': 618/* Did not exist in parent, or type changed */ 619break; 620} 621} 622diff_flush(&diff_opts); 623clear_pathspec(&diff_opts.pathspec); 624return porigin; 625} 626 627/* 628 * We have an origin -- find the path that corresponds to it in its 629 * parent and return an origin structure to represent it. 630 */ 631static struct blame_origin *find_rename(struct commit *parent, 632struct blame_origin *origin) 633{ 634struct blame_origin *porigin = NULL; 635struct diff_options diff_opts; 636int i; 637 638diff_setup(&diff_opts); 639 diff_opts.flags.recursive =1; 640 diff_opts.detect_rename = DIFF_DETECT_RENAME; 641 diff_opts.output_format = DIFF_FORMAT_NO_OUTPUT; 642 diff_opts.single_follow = origin->path; 643diff_setup_done(&diff_opts); 644 645if(is_null_oid(&origin->commit->object.oid)) 646do_diff_cache(get_commit_tree_oid(parent), &diff_opts); 647else 648diff_tree_oid(get_commit_tree_oid(parent), 649get_commit_tree_oid(origin->commit), 650"", &diff_opts); 651diffcore_std(&diff_opts); 652 653for(i =0; i < diff_queued_diff.nr; i++) { 654struct diff_filepair *p = diff_queued_diff.queue[i]; 655if((p->status =='R'|| p->status =='C') && 656!strcmp(p->two->path, origin->path)) { 657 porigin =get_origin(parent, p->one->path); 658oidcpy(&porigin->blob_oid, &p->one->oid); 659 porigin->mode = p->one->mode; 660break; 661} 662} 663diff_flush(&diff_opts); 664clear_pathspec(&diff_opts.pathspec); 665return porigin; 666} 667 668/* 669 * Append a new blame entry to a given output queue. 670 */ 671static voidadd_blame_entry(struct blame_entry ***queue, 672const struct blame_entry *src) 673{ 674struct blame_entry *e =xmalloc(sizeof(*e)); 675memcpy(e, src,sizeof(*e)); 676blame_origin_incref(e->suspect); 677 678 e->next = **queue; 679**queue = e; 680*queue = &e->next; 681} 682 683/* 684 * src typically is on-stack; we want to copy the information in it to 685 * a malloced blame_entry that gets added to the given queue. The 686 * origin of dst loses a refcnt. 687 */ 688static voiddup_entry(struct blame_entry ***queue, 689struct blame_entry *dst,struct blame_entry *src) 690{ 691blame_origin_incref(src->suspect); 692blame_origin_decref(dst->suspect); 693memcpy(dst, src,sizeof(*src)); 694 dst->next = **queue; 695**queue = dst; 696*queue = &dst->next; 697} 698 699const char*blame_nth_line(struct blame_scoreboard *sb,long lno) 700{ 701return sb->final_buf + sb->lineno[lno]; 702} 703 704/* 705 * It is known that lines between tlno to same came from parent, and e 706 * has an overlap with that range. it also is known that parent's 707 * line plno corresponds to e's line tlno. 708 * 709 * <---- e -----> 710 * <------> 711 * <------------> 712 * <------------> 713 * <------------------> 714 * 715 * Split e into potentially three parts; before this chunk, the chunk 716 * to be blamed for the parent, and after that portion. 717 */ 718static voidsplit_overlap(struct blame_entry *split, 719struct blame_entry *e, 720int tlno,int plno,int same, 721struct blame_origin *parent) 722{ 723int chunk_end_lno; 724memset(split,0,sizeof(struct blame_entry [3])); 725 726if(e->s_lno < tlno) { 727/* there is a pre-chunk part not blamed on parent */ 728 split[0].suspect =blame_origin_incref(e->suspect); 729 split[0].lno = e->lno; 730 split[0].s_lno = e->s_lno; 731 split[0].num_lines = tlno - e->s_lno; 732 split[1].lno = e->lno + tlno - e->s_lno; 733 split[1].s_lno = plno; 734} 735else{ 736 split[1].lno = e->lno; 737 split[1].s_lno = plno + (e->s_lno - tlno); 738} 739 740if(same < e->s_lno + e->num_lines) { 741/* there is a post-chunk part not blamed on parent */ 742 split[2].suspect =blame_origin_incref(e->suspect); 743 split[2].lno = e->lno + (same - e->s_lno); 744 split[2].s_lno = e->s_lno + (same - e->s_lno); 745 split[2].num_lines = e->s_lno + e->num_lines - same; 746 chunk_end_lno = split[2].lno; 747} 748else 749 chunk_end_lno = e->lno + e->num_lines; 750 split[1].num_lines = chunk_end_lno - split[1].lno; 751 752/* 753 * if it turns out there is nothing to blame the parent for, 754 * forget about the splitting. !split[1].suspect signals this. 755 */ 756if(split[1].num_lines <1) 757return; 758 split[1].suspect =blame_origin_incref(parent); 759} 760 761/* 762 * split_overlap() divided an existing blame e into up to three parts 763 * in split. Any assigned blame is moved to queue to 764 * reflect the split. 765 */ 766static voidsplit_blame(struct blame_entry ***blamed, 767struct blame_entry ***unblamed, 768struct blame_entry *split, 769struct blame_entry *e) 770{ 771if(split[0].suspect && split[2].suspect) { 772/* The first part (reuse storage for the existing entry e) */ 773dup_entry(unblamed, e, &split[0]); 774 775/* The last part -- me */ 776add_blame_entry(unblamed, &split[2]); 777 778/* ... and the middle part -- parent */ 779add_blame_entry(blamed, &split[1]); 780} 781else if(!split[0].suspect && !split[2].suspect) 782/* 783 * The parent covers the entire area; reuse storage for 784 * e and replace it with the parent. 785 */ 786dup_entry(blamed, e, &split[1]); 787else if(split[0].suspect) { 788/* me and then parent */ 789dup_entry(unblamed, e, &split[0]); 790add_blame_entry(blamed, &split[1]); 791} 792else{ 793/* parent and then me */ 794dup_entry(blamed, e, &split[1]); 795add_blame_entry(unblamed, &split[2]); 796} 797} 798 799/* 800 * After splitting the blame, the origins used by the 801 * on-stack blame_entry should lose one refcnt each. 802 */ 803static voiddecref_split(struct blame_entry *split) 804{ 805int i; 806 807for(i =0; i <3; i++) 808blame_origin_decref(split[i].suspect); 809} 810 811/* 812 * reverse_blame reverses the list given in head, appending tail. 813 * That allows us to build lists in reverse order, then reverse them 814 * afterwards. This can be faster than building the list in proper 815 * order right away. The reason is that building in proper order 816 * requires writing a link in the _previous_ element, while building 817 * in reverse order just requires placing the list head into the 818 * _current_ element. 819 */ 820 821static struct blame_entry *reverse_blame(struct blame_entry *head, 822struct blame_entry *tail) 823{ 824while(head) { 825struct blame_entry *next = head->next; 826 head->next = tail; 827 tail = head; 828 head = next; 829} 830return tail; 831} 832 833/* 834 * Process one hunk from the patch between the current suspect for 835 * blame_entry e and its parent. This first blames any unfinished 836 * entries before the chunk (which is where target and parent start 837 * differing) on the parent, and then splits blame entries at the 838 * start and at the end of the difference region. Since use of -M and 839 * -C options may lead to overlapping/duplicate source line number 840 * ranges, all we can rely on from sorting/merging is the order of the 841 * first suspect line number. 842 */ 843static voidblame_chunk(struct blame_entry ***dstq,struct blame_entry ***srcq, 844int tlno,int offset,int same, 845struct blame_origin *parent) 846{ 847struct blame_entry *e = **srcq; 848struct blame_entry *samep = NULL, *diffp = NULL; 849 850while(e && e->s_lno < tlno) { 851struct blame_entry *next = e->next; 852/* 853 * current record starts before differing portion. If 854 * it reaches into it, we need to split it up and 855 * examine the second part separately. 856 */ 857if(e->s_lno + e->num_lines > tlno) { 858/* Move second half to a new record */ 859int len = tlno - e->s_lno; 860struct blame_entry *n =xcalloc(1,sizeof(struct blame_entry)); 861 n->suspect = e->suspect; 862 n->lno = e->lno + len; 863 n->s_lno = e->s_lno + len; 864 n->num_lines = e->num_lines - len; 865 e->num_lines = len; 866 e->score =0; 867/* Push new record to diffp */ 868 n->next = diffp; 869 diffp = n; 870}else 871blame_origin_decref(e->suspect); 872/* Pass blame for everything before the differing 873 * chunk to the parent */ 874 e->suspect =blame_origin_incref(parent); 875 e->s_lno += offset; 876 e->next = samep; 877 samep = e; 878 e = next; 879} 880/* 881 * As we don't know how much of a common stretch after this 882 * diff will occur, the currently blamed parts are all that we 883 * can assign to the parent for now. 884 */ 885 886if(samep) { 887**dstq =reverse_blame(samep, **dstq); 888*dstq = &samep->next; 889} 890/* 891 * Prepend the split off portions: everything after e starts 892 * after the blameable portion. 893 */ 894 e =reverse_blame(diffp, e); 895 896/* 897 * Now retain records on the target while parts are different 898 * from the parent. 899 */ 900 samep = NULL; 901 diffp = NULL; 902while(e && e->s_lno < same) { 903struct blame_entry *next = e->next; 904 905/* 906 * If current record extends into sameness, need to split. 907 */ 908if(e->s_lno + e->num_lines > same) { 909/* 910 * Move second half to a new record to be 911 * processed by later chunks 912 */ 913int len = same - e->s_lno; 914struct blame_entry *n =xcalloc(1,sizeof(struct blame_entry)); 915 n->suspect =blame_origin_incref(e->suspect); 916 n->lno = e->lno + len; 917 n->s_lno = e->s_lno + len; 918 n->num_lines = e->num_lines - len; 919 e->num_lines = len; 920 e->score =0; 921/* Push new record to samep */ 922 n->next = samep; 923 samep = n; 924} 925 e->next = diffp; 926 diffp = e; 927 e = next; 928} 929**srcq =reverse_blame(diffp,reverse_blame(samep, e)); 930/* Move across elements that are in the unblamable portion */ 931if(diffp) 932*srcq = &diffp->next; 933} 934 935struct blame_chunk_cb_data { 936struct blame_origin *parent; 937long offset; 938struct blame_entry **dstq; 939struct blame_entry **srcq; 940}; 941 942/* diff chunks are from parent to target */ 943static intblame_chunk_cb(long start_a,long count_a, 944long start_b,long count_b,void*data) 945{ 946struct blame_chunk_cb_data *d = data; 947if(start_a - start_b != d->offset) 948die("internal error in blame::blame_chunk_cb"); 949blame_chunk(&d->dstq, &d->srcq, start_b, start_a - start_b, 950 start_b + count_b, d->parent); 951 d->offset = start_a + count_a - (start_b + count_b); 952return0; 953} 954 955/* 956 * We are looking at the origin 'target' and aiming to pass blame 957 * for the lines it is suspected to its parent. Run diff to find 958 * which lines came from parent and pass blame for them. 959 */ 960static voidpass_blame_to_parent(struct blame_scoreboard *sb, 961struct blame_origin *target, 962struct blame_origin *parent) 963{ 964 mmfile_t file_p, file_o; 965struct blame_chunk_cb_data d; 966struct blame_entry *newdest = NULL; 967 968if(!target->suspects) 969return;/* nothing remains for this target */ 970 971 d.parent = parent; 972 d.offset =0; 973 d.dstq = &newdest; d.srcq = &target->suspects; 974 975fill_origin_blob(&sb->revs->diffopt, parent, &file_p, &sb->num_read_blob); 976fill_origin_blob(&sb->revs->diffopt, target, &file_o, &sb->num_read_blob); 977 sb->num_get_patch++; 978 979if(diff_hunks(&file_p, &file_o, blame_chunk_cb, &d, sb->xdl_opts)) 980die("unable to generate diff (%s->%s)", 981oid_to_hex(&parent->commit->object.oid), 982oid_to_hex(&target->commit->object.oid)); 983/* The rest are the same as the parent */ 984blame_chunk(&d.dstq, &d.srcq, INT_MAX, d.offset, INT_MAX, parent); 985*d.dstq = NULL; 986queue_blames(sb, parent, newdest); 987 988return; 989} 990 991/* 992 * The lines in blame_entry after splitting blames many times can become 993 * very small and trivial, and at some point it becomes pointless to 994 * blame the parents. E.g. "\t\t}\n\t}\n\n" appears everywhere in any 995 * ordinary C program, and it is not worth to say it was copied from 996 * totally unrelated file in the parent. 997 * 998 * Compute how trivial the lines in the blame_entry are. 999 */1000unsignedblame_entry_score(struct blame_scoreboard *sb,struct blame_entry *e)1001{1002unsigned score;1003const char*cp, *ep;10041005if(e->score)1006return e->score;10071008 score =1;1009 cp =blame_nth_line(sb, e->lno);1010 ep =blame_nth_line(sb, e->lno + e->num_lines);1011while(cp < ep) {1012unsigned ch = *((unsigned char*)cp);1013if(isalnum(ch))1014 score++;1015 cp++;1016}1017 e->score = score;1018return score;1019}10201021/*1022 * best_so_far[] and potential[] are both a split of an existing blame_entry1023 * that passes blame to the parent. Maintain best_so_far the best split so1024 * far, by comparing potential and best_so_far and copying potential into1025 * bst_so_far as needed.1026 */1027static voidcopy_split_if_better(struct blame_scoreboard *sb,1028struct blame_entry *best_so_far,1029struct blame_entry *potential)1030{1031int i;10321033if(!potential[1].suspect)1034return;1035if(best_so_far[1].suspect) {1036if(blame_entry_score(sb, &potential[1]) <1037blame_entry_score(sb, &best_so_far[1]))1038return;1039}10401041for(i =0; i <3; i++)1042blame_origin_incref(potential[i].suspect);1043decref_split(best_so_far);1044memcpy(best_so_far, potential,sizeof(struct blame_entry[3]));1045}10461047/*1048 * We are looking at a part of the final image represented by1049 * ent (tlno and same are offset by ent->s_lno).1050 * tlno is where we are looking at in the final image.1051 * up to (but not including) same match preimage.1052 * plno is where we are looking at in the preimage.1053 *1054 * <-------------- final image ---------------------->1055 * <------ent------>1056 * ^tlno ^same1057 * <---------preimage----->1058 * ^plno1059 *1060 * All line numbers are 0-based.1061 */1062static voidhandle_split(struct blame_scoreboard *sb,1063struct blame_entry *ent,1064int tlno,int plno,int same,1065struct blame_origin *parent,1066struct blame_entry *split)1067{1068if(ent->num_lines <= tlno)1069return;1070if(tlno < same) {1071struct blame_entry potential[3];1072 tlno += ent->s_lno;1073 same += ent->s_lno;1074split_overlap(potential, ent, tlno, plno, same, parent);1075copy_split_if_better(sb, split, potential);1076decref_split(potential);1077}1078}10791080struct handle_split_cb_data {1081struct blame_scoreboard *sb;1082struct blame_entry *ent;1083struct blame_origin *parent;1084struct blame_entry *split;1085long plno;1086long tlno;1087};10881089static inthandle_split_cb(long start_a,long count_a,1090long start_b,long count_b,void*data)1091{1092struct handle_split_cb_data *d = data;1093handle_split(d->sb, d->ent, d->tlno, d->plno, start_b, d->parent,1094 d->split);1095 d->plno = start_a + count_a;1096 d->tlno = start_b + count_b;1097return0;1098}10991100/*1101 * Find the lines from parent that are the same as ent so that1102 * we can pass blames to it. file_p has the blob contents for1103 * the parent.1104 */1105static voidfind_copy_in_blob(struct blame_scoreboard *sb,1106struct blame_entry *ent,1107struct blame_origin *parent,1108struct blame_entry *split,1109 mmfile_t *file_p)1110{1111const char*cp;1112 mmfile_t file_o;1113struct handle_split_cb_data d;11141115memset(&d,0,sizeof(d));1116 d.sb = sb; d.ent = ent; d.parent = parent; d.split = split;1117/*1118 * Prepare mmfile that contains only the lines in ent.1119 */1120 cp =blame_nth_line(sb, ent->lno);1121 file_o.ptr = (char*) cp;1122 file_o.size =blame_nth_line(sb, ent->lno + ent->num_lines) - cp;11231124/*1125 * file_o is a part of final image we are annotating.1126 * file_p partially may match that image.1127 */1128memset(split,0,sizeof(struct blame_entry [3]));1129if(diff_hunks(file_p, &file_o, handle_split_cb, &d, sb->xdl_opts))1130die("unable to generate diff (%s)",1131oid_to_hex(&parent->commit->object.oid));1132/* remainder, if any, all match the preimage */1133handle_split(sb, ent, d.tlno, d.plno, ent->num_lines, parent, split);1134}11351136/* Move all blame entries from list *source that have a score smaller1137 * than score_min to the front of list *small.1138 * Returns a pointer to the link pointing to the old head of the small list.1139 */11401141static struct blame_entry **filter_small(struct blame_scoreboard *sb,1142struct blame_entry **small,1143struct blame_entry **source,1144unsigned score_min)1145{1146struct blame_entry *p = *source;1147struct blame_entry *oldsmall = *small;1148while(p) {1149if(blame_entry_score(sb, p) <= score_min) {1150*small = p;1151 small = &p->next;1152 p = *small;1153}else{1154*source = p;1155 source = &p->next;1156 p = *source;1157}1158}1159*small = oldsmall;1160*source = NULL;1161return small;1162}11631164/*1165 * See if lines currently target is suspected for can be attributed to1166 * parent.1167 */1168static voidfind_move_in_parent(struct blame_scoreboard *sb,1169struct blame_entry ***blamed,1170struct blame_entry **toosmall,1171struct blame_origin *target,1172struct blame_origin *parent)1173{1174struct blame_entry *e, split[3];1175struct blame_entry *unblamed = target->suspects;1176struct blame_entry *leftover = NULL;1177 mmfile_t file_p;11781179if(!unblamed)1180return;/* nothing remains for this target */11811182fill_origin_blob(&sb->revs->diffopt, parent, &file_p, &sb->num_read_blob);1183if(!file_p.ptr)1184return;11851186/* At each iteration, unblamed has a NULL-terminated list of1187 * entries that have not yet been tested for blame. leftover1188 * contains the reversed list of entries that have been tested1189 * without being assignable to the parent.1190 */1191do{1192struct blame_entry **unblamedtail = &unblamed;1193struct blame_entry *next;1194for(e = unblamed; e; e = next) {1195 next = e->next;1196find_copy_in_blob(sb, e, parent, split, &file_p);1197if(split[1].suspect &&1198 sb->move_score <blame_entry_score(sb, &split[1])) {1199split_blame(blamed, &unblamedtail, split, e);1200}else{1201 e->next = leftover;1202 leftover = e;1203}1204decref_split(split);1205}1206*unblamedtail = NULL;1207 toosmall =filter_small(sb, toosmall, &unblamed, sb->move_score);1208}while(unblamed);1209 target->suspects =reverse_blame(leftover, NULL);1210}12111212struct blame_list {1213struct blame_entry *ent;1214struct blame_entry split[3];1215};12161217/*1218 * Count the number of entries the target is suspected for,1219 * and prepare a list of entry and the best split.1220 */1221static struct blame_list *setup_blame_list(struct blame_entry *unblamed,1222int*num_ents_p)1223{1224struct blame_entry *e;1225int num_ents, i;1226struct blame_list *blame_list = NULL;12271228for(e = unblamed, num_ents =0; e; e = e->next)1229 num_ents++;1230if(num_ents) {1231 blame_list =xcalloc(num_ents,sizeof(struct blame_list));1232for(e = unblamed, i =0; e; e = e->next)1233 blame_list[i++].ent = e;1234}1235*num_ents_p = num_ents;1236return blame_list;1237}12381239/*1240 * For lines target is suspected for, see if we can find code movement1241 * across file boundary from the parent commit. porigin is the path1242 * in the parent we already tried.1243 */1244static voidfind_copy_in_parent(struct blame_scoreboard *sb,1245struct blame_entry ***blamed,1246struct blame_entry **toosmall,1247struct blame_origin *target,1248struct commit *parent,1249struct blame_origin *porigin,1250int opt)1251{1252struct diff_options diff_opts;1253int i, j;1254struct blame_list *blame_list;1255int num_ents;1256struct blame_entry *unblamed = target->suspects;1257struct blame_entry *leftover = NULL;12581259if(!unblamed)1260return;/* nothing remains for this target */12611262diff_setup(&diff_opts);1263 diff_opts.flags.recursive =1;1264 diff_opts.output_format = DIFF_FORMAT_NO_OUTPUT;12651266diff_setup_done(&diff_opts);12671268/* Try "find copies harder" on new path if requested;1269 * we do not want to use diffcore_rename() actually to1270 * match things up; find_copies_harder is set only to1271 * force diff_tree_oid() to feed all filepairs to diff_queue,1272 * and this code needs to be after diff_setup_done(), which1273 * usually makes find-copies-harder imply copy detection.1274 */1275if((opt & PICKAXE_BLAME_COPY_HARDEST)1276|| ((opt & PICKAXE_BLAME_COPY_HARDER)1277&& (!porigin ||strcmp(target->path, porigin->path))))1278 diff_opts.flags.find_copies_harder =1;12791280if(is_null_oid(&target->commit->object.oid))1281do_diff_cache(get_commit_tree_oid(parent), &diff_opts);1282else1283diff_tree_oid(get_commit_tree_oid(parent),1284get_commit_tree_oid(target->commit),1285"", &diff_opts);12861287if(!diff_opts.flags.find_copies_harder)1288diffcore_std(&diff_opts);12891290do{1291struct blame_entry **unblamedtail = &unblamed;1292 blame_list =setup_blame_list(unblamed, &num_ents);12931294for(i =0; i < diff_queued_diff.nr; i++) {1295struct diff_filepair *p = diff_queued_diff.queue[i];1296struct blame_origin *norigin;1297 mmfile_t file_p;1298struct blame_entry potential[3];12991300if(!DIFF_FILE_VALID(p->one))1301continue;/* does not exist in parent */1302if(S_ISGITLINK(p->one->mode))1303continue;/* ignore git links */1304if(porigin && !strcmp(p->one->path, porigin->path))1305/* find_move already dealt with this path */1306continue;13071308 norigin =get_origin(parent, p->one->path);1309oidcpy(&norigin->blob_oid, &p->one->oid);1310 norigin->mode = p->one->mode;1311fill_origin_blob(&sb->revs->diffopt, norigin, &file_p, &sb->num_read_blob);1312if(!file_p.ptr)1313continue;13141315for(j =0; j < num_ents; j++) {1316find_copy_in_blob(sb, blame_list[j].ent,1317 norigin, potential, &file_p);1318copy_split_if_better(sb, blame_list[j].split,1319 potential);1320decref_split(potential);1321}1322blame_origin_decref(norigin);1323}13241325for(j =0; j < num_ents; j++) {1326struct blame_entry *split = blame_list[j].split;1327if(split[1].suspect &&1328 sb->copy_score <blame_entry_score(sb, &split[1])) {1329split_blame(blamed, &unblamedtail, split,1330 blame_list[j].ent);1331}else{1332 blame_list[j].ent->next = leftover;1333 leftover = blame_list[j].ent;1334}1335decref_split(split);1336}1337free(blame_list);1338*unblamedtail = NULL;1339 toosmall =filter_small(sb, toosmall, &unblamed, sb->copy_score);1340}while(unblamed);1341 target->suspects =reverse_blame(leftover, NULL);1342diff_flush(&diff_opts);1343clear_pathspec(&diff_opts.pathspec);1344}13451346/*1347 * The blobs of origin and porigin exactly match, so everything1348 * origin is suspected for can be blamed on the parent.1349 */1350static voidpass_whole_blame(struct blame_scoreboard *sb,1351struct blame_origin *origin,struct blame_origin *porigin)1352{1353struct blame_entry *e, *suspects;13541355if(!porigin->file.ptr && origin->file.ptr) {1356/* Steal its file */1357 porigin->file = origin->file;1358 origin->file.ptr = NULL;1359}1360 suspects = origin->suspects;1361 origin->suspects = NULL;1362for(e = suspects; e; e = e->next) {1363blame_origin_incref(porigin);1364blame_origin_decref(e->suspect);1365 e->suspect = porigin;1366}1367queue_blames(sb, porigin, suspects);1368}13691370/*1371 * We pass blame from the current commit to its parents. We keep saying1372 * "parent" (and "porigin"), but what we mean is to find scapegoat to1373 * exonerate ourselves.1374 */1375static struct commit_list *first_scapegoat(struct rev_info *revs,struct commit *commit,1376int reverse)1377{1378if(!reverse) {1379if(revs->first_parent_only &&1380 commit->parents &&1381 commit->parents->next) {1382free_commit_list(commit->parents->next);1383 commit->parents->next = NULL;1384}1385return commit->parents;1386}1387returnlookup_decoration(&revs->children, &commit->object);1388}13891390static intnum_scapegoats(struct rev_info *revs,struct commit *commit,int reverse)1391{1392struct commit_list *l =first_scapegoat(revs, commit, reverse);1393returncommit_list_count(l);1394}13951396/* Distribute collected unsorted blames to the respected sorted lists1397 * in the various origins.1398 */1399static voiddistribute_blame(struct blame_scoreboard *sb,struct blame_entry *blamed)1400{1401 blamed =llist_mergesort(blamed, get_next_blame, set_next_blame,1402 compare_blame_suspect);1403while(blamed)1404{1405struct blame_origin *porigin = blamed->suspect;1406struct blame_entry *suspects = NULL;1407do{1408struct blame_entry *next = blamed->next;1409 blamed->next = suspects;1410 suspects = blamed;1411 blamed = next;1412}while(blamed && blamed->suspect == porigin);1413 suspects =reverse_blame(suspects, NULL);1414queue_blames(sb, porigin, suspects);1415}1416}14171418#define MAXSG 1614191420static voidpass_blame(struct blame_scoreboard *sb,struct blame_origin *origin,int opt)1421{1422struct rev_info *revs = sb->revs;1423int i, pass, num_sg;1424struct commit *commit = origin->commit;1425struct commit_list *sg;1426struct blame_origin *sg_buf[MAXSG];1427struct blame_origin *porigin, **sg_origin = sg_buf;1428struct blame_entry *toosmall = NULL;1429struct blame_entry *blames, **blametail = &blames;14301431 num_sg =num_scapegoats(revs, commit, sb->reverse);1432if(!num_sg)1433goto finish;1434else if(num_sg <ARRAY_SIZE(sg_buf))1435memset(sg_buf,0,sizeof(sg_buf));1436else1437 sg_origin =xcalloc(num_sg,sizeof(*sg_origin));14381439/*1440 * The first pass looks for unrenamed path to optimize for1441 * common cases, then we look for renames in the second pass.1442 */1443for(pass =0; pass <2- sb->no_whole_file_rename; pass++) {1444struct blame_origin *(*find)(struct commit *,struct blame_origin *);1445 find = pass ? find_rename : find_origin;14461447for(i =0, sg =first_scapegoat(revs, commit, sb->reverse);1448 i < num_sg && sg;1449 sg = sg->next, i++) {1450struct commit *p = sg->item;1451int j, same;14521453if(sg_origin[i])1454continue;1455if(parse_commit(p))1456continue;1457 porigin =find(p, origin);1458if(!porigin)1459continue;1460if(!oidcmp(&porigin->blob_oid, &origin->blob_oid)) {1461pass_whole_blame(sb, origin, porigin);1462blame_origin_decref(porigin);1463goto finish;1464}1465for(j = same =0; j < i; j++)1466if(sg_origin[j] &&1467!oidcmp(&sg_origin[j]->blob_oid, &porigin->blob_oid)) {1468 same =1;1469break;1470}1471if(!same)1472 sg_origin[i] = porigin;1473else1474blame_origin_decref(porigin);1475}1476}14771478 sb->num_commits++;1479for(i =0, sg =first_scapegoat(revs, commit, sb->reverse);1480 i < num_sg && sg;1481 sg = sg->next, i++) {1482struct blame_origin *porigin = sg_origin[i];1483if(!porigin)1484continue;1485if(!origin->previous) {1486blame_origin_incref(porigin);1487 origin->previous = porigin;1488}1489pass_blame_to_parent(sb, origin, porigin);1490if(!origin->suspects)1491goto finish;1492}14931494/*1495 * Optionally find moves in parents' files.1496 */1497if(opt & PICKAXE_BLAME_MOVE) {1498filter_small(sb, &toosmall, &origin->suspects, sb->move_score);1499if(origin->suspects) {1500for(i =0, sg =first_scapegoat(revs, commit, sb->reverse);1501 i < num_sg && sg;1502 sg = sg->next, i++) {1503struct blame_origin *porigin = sg_origin[i];1504if(!porigin)1505continue;1506find_move_in_parent(sb, &blametail, &toosmall, origin, porigin);1507if(!origin->suspects)1508break;1509}1510}1511}15121513/*1514 * Optionally find copies from parents' files.1515 */1516if(opt & PICKAXE_BLAME_COPY) {1517if(sb->copy_score > sb->move_score)1518filter_small(sb, &toosmall, &origin->suspects, sb->copy_score);1519else if(sb->copy_score < sb->move_score) {1520 origin->suspects =blame_merge(origin->suspects, toosmall);1521 toosmall = NULL;1522filter_small(sb, &toosmall, &origin->suspects, sb->copy_score);1523}1524if(!origin->suspects)1525goto finish;15261527for(i =0, sg =first_scapegoat(revs, commit, sb->reverse);1528 i < num_sg && sg;1529 sg = sg->next, i++) {1530struct blame_origin *porigin = sg_origin[i];1531find_copy_in_parent(sb, &blametail, &toosmall,1532 origin, sg->item, porigin, opt);1533if(!origin->suspects)1534goto finish;1535}1536}15371538finish:1539*blametail = NULL;1540distribute_blame(sb, blames);1541/*1542 * prepend toosmall to origin->suspects1543 *1544 * There is no point in sorting: this ends up on a big1545 * unsorted list in the caller anyway.1546 */1547if(toosmall) {1548struct blame_entry **tail = &toosmall;1549while(*tail)1550 tail = &(*tail)->next;1551*tail = origin->suspects;1552 origin->suspects = toosmall;1553}1554for(i =0; i < num_sg; i++) {1555if(sg_origin[i]) {1556drop_origin_blob(sg_origin[i]);1557blame_origin_decref(sg_origin[i]);1558}1559}1560drop_origin_blob(origin);1561if(sg_buf != sg_origin)1562free(sg_origin);1563}15641565/*1566 * The main loop -- while we have blobs with lines whose true origin1567 * is still unknown, pick one blob, and allow its lines to pass blames1568 * to its parents. */1569voidassign_blame(struct blame_scoreboard *sb,int opt)1570{1571struct rev_info *revs = sb->revs;1572struct commit *commit =prio_queue_get(&sb->commits);15731574while(commit) {1575struct blame_entry *ent;1576struct blame_origin *suspect =get_blame_suspects(commit);15771578/* find one suspect to break down */1579while(suspect && !suspect->suspects)1580 suspect = suspect->next;15811582if(!suspect) {1583 commit =prio_queue_get(&sb->commits);1584continue;1585}15861587assert(commit == suspect->commit);15881589/*1590 * We will use this suspect later in the loop,1591 * so hold onto it in the meantime.1592 */1593blame_origin_incref(suspect);1594parse_commit(commit);1595if(sb->reverse ||1596(!(commit->object.flags & UNINTERESTING) &&1597!(revs->max_age != -1&& commit->date < revs->max_age)))1598pass_blame(sb, suspect, opt);1599else{1600 commit->object.flags |= UNINTERESTING;1601if(commit->object.parsed)1602mark_parents_uninteresting(commit);1603}1604/* treat root commit as boundary */1605if(!commit->parents && !sb->show_root)1606 commit->object.flags |= UNINTERESTING;16071608/* Take responsibility for the remaining entries */1609 ent = suspect->suspects;1610if(ent) {1611 suspect->guilty =1;1612for(;;) {1613struct blame_entry *next = ent->next;1614if(sb->found_guilty_entry)1615 sb->found_guilty_entry(ent, sb->found_guilty_entry_data);1616if(next) {1617 ent = next;1618continue;1619}1620 ent->next = sb->ent;1621 sb->ent = suspect->suspects;1622 suspect->suspects = NULL;1623break;1624}1625}1626blame_origin_decref(suspect);16271628if(sb->debug)/* sanity */1629sanity_check_refcnt(sb);1630}1631}16321633static const char*get_next_line(const char*start,const char*end)1634{1635const char*nl =memchr(start,'\n', end - start);1636return nl ? nl +1: end;1637}16381639/*1640 * To allow quick access to the contents of nth line in the1641 * final image, prepare an index in the scoreboard.1642 */1643static intprepare_lines(struct blame_scoreboard *sb)1644{1645const char*buf = sb->final_buf;1646unsigned long len = sb->final_buf_size;1647const char*end = buf + len;1648const char*p;1649int*lineno;1650int num =0;16511652for(p = buf; p < end; p =get_next_line(p, end))1653 num++;16541655ALLOC_ARRAY(sb->lineno, num +1);1656 lineno = sb->lineno;16571658for(p = buf; p < end; p =get_next_line(p, end))1659*lineno++ = p - buf;16601661*lineno = len;16621663 sb->num_lines = num;1664return sb->num_lines;1665}16661667static struct commit *find_single_final(struct rev_info *revs,1668const char**name_p)1669{1670int i;1671struct commit *found = NULL;1672const char*name = NULL;16731674for(i =0; i < revs->pending.nr; i++) {1675struct object *obj = revs->pending.objects[i].item;1676if(obj->flags & UNINTERESTING)1677continue;1678 obj =deref_tag(the_repository, obj, NULL,0);1679if(obj->type != OBJ_COMMIT)1680die("Non commit%s?", revs->pending.objects[i].name);1681if(found)1682die("More than one commit to dig from%sand%s?",1683 revs->pending.objects[i].name, name);1684 found = (struct commit *)obj;1685 name = revs->pending.objects[i].name;1686}1687if(name_p)1688*name_p =xstrdup_or_null(name);1689return found;1690}16911692static struct commit *dwim_reverse_initial(struct rev_info *revs,1693const char**name_p)1694{1695/*1696 * DWIM "git blame --reverse ONE -- PATH" as1697 * "git blame --reverse ONE..HEAD -- PATH" but only do so1698 * when it makes sense.1699 */1700struct object *obj;1701struct commit *head_commit;1702struct object_id head_oid;17031704if(revs->pending.nr !=1)1705return NULL;17061707/* Is that sole rev a committish? */1708 obj = revs->pending.objects[0].item;1709 obj =deref_tag(the_repository, obj, NULL,0);1710if(obj->type != OBJ_COMMIT)1711return NULL;17121713/* Do we have HEAD? */1714if(!resolve_ref_unsafe("HEAD", RESOLVE_REF_READING, &head_oid, NULL))1715return NULL;1716 head_commit =lookup_commit_reference_gently(the_repository,1717&head_oid,1);1718if(!head_commit)1719return NULL;17201721/* Turn "ONE" into "ONE..HEAD" then */1722 obj->flags |= UNINTERESTING;1723add_pending_object(revs, &head_commit->object,"HEAD");17241725if(name_p)1726*name_p = revs->pending.objects[0].name;1727return(struct commit *)obj;1728}17291730static struct commit *find_single_initial(struct rev_info *revs,1731const char**name_p)1732{1733int i;1734struct commit *found = NULL;1735const char*name = NULL;17361737/*1738 * There must be one and only one negative commit, and it must be1739 * the boundary.1740 */1741for(i =0; i < revs->pending.nr; i++) {1742struct object *obj = revs->pending.objects[i].item;1743if(!(obj->flags & UNINTERESTING))1744continue;1745 obj =deref_tag(the_repository, obj, NULL,0);1746if(obj->type != OBJ_COMMIT)1747die("Non commit%s?", revs->pending.objects[i].name);1748if(found)1749die("More than one commit to dig up from,%sand%s?",1750 revs->pending.objects[i].name, name);1751 found = (struct commit *) obj;1752 name = revs->pending.objects[i].name;1753}17541755if(!name)1756 found =dwim_reverse_initial(revs, &name);1757if(!name)1758die("No commit to dig up from?");17591760if(name_p)1761*name_p =xstrdup(name);1762return found;1763}17641765voidinit_scoreboard(struct blame_scoreboard *sb)1766{1767memset(sb,0,sizeof(struct blame_scoreboard));1768 sb->move_score = BLAME_DEFAULT_MOVE_SCORE;1769 sb->copy_score = BLAME_DEFAULT_COPY_SCORE;1770}17711772voidsetup_scoreboard(struct blame_scoreboard *sb,1773const char*path,1774struct blame_origin **orig)1775{1776const char*final_commit_name = NULL;1777struct blame_origin *o;1778struct commit *final_commit = NULL;1779enum object_type type;17801781init_blame_suspects(&blame_suspects);17821783if(sb->reverse && sb->contents_from)1784die(_("--contents and --reverse do not blend well."));17851786if(!sb->repo)1787BUG("repo is NULL");17881789if(!sb->reverse) {1790 sb->final =find_single_final(sb->revs, &final_commit_name);1791 sb->commits.compare = compare_commits_by_commit_date;1792}else{1793 sb->final =find_single_initial(sb->revs, &final_commit_name);1794 sb->commits.compare = compare_commits_by_reverse_commit_date;1795}17961797if(sb->final && sb->contents_from)1798die(_("cannot use --contents with final commit object name"));17991800if(sb->reverse && sb->revs->first_parent_only)1801 sb->revs->children.name = NULL;18021803if(!sb->final) {1804/*1805 * "--not A B -- path" without anything positive;1806 * do not default to HEAD, but use the working tree1807 * or "--contents".1808 */1809setup_work_tree();1810 sb->final =fake_working_tree_commit(sb->repo,1811&sb->revs->diffopt,1812 path, sb->contents_from);1813add_pending_object(sb->revs, &(sb->final->object),":");1814}18151816if(sb->reverse && sb->revs->first_parent_only) {1817 final_commit =find_single_final(sb->revs, NULL);1818if(!final_commit)1819die(_("--reverse and --first-parent together require specified latest commit"));1820}18211822/*1823 * If we have bottom, this will mark the ancestors of the1824 * bottom commits we would reach while traversing as1825 * uninteresting.1826 */1827if(prepare_revision_walk(sb->revs))1828die(_("revision walk setup failed"));18291830if(sb->reverse && sb->revs->first_parent_only) {1831struct commit *c = final_commit;18321833 sb->revs->children.name ="children";1834while(c->parents &&1835oidcmp(&c->object.oid, &sb->final->object.oid)) {1836struct commit_list *l =xcalloc(1,sizeof(*l));18371838 l->item = c;1839if(add_decoration(&sb->revs->children,1840&c->parents->item->object, l))1841BUG("not unique item in first-parent chain");1842 c = c->parents->item;1843}18441845if(oidcmp(&c->object.oid, &sb->final->object.oid))1846die(_("--reverse --first-parent together require range along first-parent chain"));1847}18481849if(is_null_oid(&sb->final->object.oid)) {1850 o =get_blame_suspects(sb->final);1851 sb->final_buf =xmemdupz(o->file.ptr, o->file.size);1852 sb->final_buf_size = o->file.size;1853}1854else{1855 o =get_origin(sb->final, path);1856if(fill_blob_sha1_and_mode(sb->repo, o))1857die(_("no such path%sin%s"), path, final_commit_name);18581859if(sb->revs->diffopt.flags.allow_textconv &&1860textconv_object(path, o->mode, &o->blob_oid,1, (char**) &sb->final_buf,1861&sb->final_buf_size))1862;1863else1864 sb->final_buf =read_object_file(&o->blob_oid, &type,1865&sb->final_buf_size);18661867if(!sb->final_buf)1868die(_("cannot read blob%sfor path%s"),1869oid_to_hex(&o->blob_oid),1870 path);1871}1872 sb->num_read_blob++;1873prepare_lines(sb);18741875if(orig)1876*orig = o;18771878free((char*)final_commit_name);1879}1880188118821883struct blame_entry *blame_entry_prepend(struct blame_entry *head,1884long start,long end,1885struct blame_origin *o)1886{1887struct blame_entry *new_head =xcalloc(1,sizeof(struct blame_entry));1888 new_head->lno = start;1889 new_head->num_lines = end - start;1890 new_head->suspect = o;1891 new_head->s_lno = start;1892 new_head->next = head;1893blame_origin_incref(o);1894return new_head;1895}