epoch.con commit Merge part of np/delta (d00e0f8)
   1/*
   2 * Copyright (c) 2005, Jon Seymour
   3 *
   4 * For more information about epoch theory on which this module is based,
   5 * refer to http://blackcubes.dyndns.org/epoch/. That web page defines
   6 * terms such as "epoch" and "minimal, non-linear epoch" and provides rationales
   7 * for some of the algorithms used here.
   8 *
   9 */
  10#include <stdlib.h>
  11
  12/* Provides arbitrary precision integers required to accurately represent
  13 * fractional mass: */
  14#include <openssl/bn.h>
  15
  16#include "cache.h"
  17#include "commit.h"
  18#include "revision.h"
  19#include "epoch.h"
  20
  21struct fraction {
  22        BIGNUM numerator;
  23        BIGNUM denominator;
  24};
  25
  26#define HAS_EXACTLY_ONE_PARENT(n) ((n)->parents && !(n)->parents->next)
  27
  28static BN_CTX *context = NULL;
  29static struct fraction *one = NULL;
  30static struct fraction *zero = NULL;
  31
  32static BN_CTX *get_BN_CTX(void)
  33{
  34        if (!context) {
  35                context = BN_CTX_new();
  36        }
  37        return context;
  38}
  39
  40static struct fraction *new_zero(void)
  41{
  42        struct fraction *result = xmalloc(sizeof(*result));
  43        BN_init(&result->numerator);
  44        BN_init(&result->denominator);
  45        BN_zero(&result->numerator);
  46        BN_one(&result->denominator);
  47        return result;
  48}
  49
  50static void clear_fraction(struct fraction *fraction)
  51{
  52        BN_clear(&fraction->numerator);
  53        BN_clear(&fraction->denominator);
  54}
  55
  56static struct fraction *divide(struct fraction *result, struct fraction *fraction, int divisor)
  57{
  58        BIGNUM bn_divisor;
  59
  60        BN_init(&bn_divisor);
  61        BN_set_word(&bn_divisor, divisor);
  62
  63        BN_copy(&result->numerator, &fraction->numerator);
  64        BN_mul(&result->denominator, &fraction->denominator, &bn_divisor, get_BN_CTX());
  65
  66        BN_clear(&bn_divisor);
  67        return result;
  68}
  69
  70static struct fraction *init_fraction(struct fraction *fraction)
  71{
  72        BN_init(&fraction->numerator);
  73        BN_init(&fraction->denominator);
  74        BN_zero(&fraction->numerator);
  75        BN_one(&fraction->denominator);
  76        return fraction;
  77}
  78
  79static struct fraction *get_one(void)
  80{
  81        if (!one) {
  82                one = new_zero();
  83                BN_one(&one->numerator);
  84        }
  85        return one;
  86}
  87
  88static struct fraction *get_zero(void)
  89{
  90        if (!zero) {
  91                zero = new_zero();
  92        }
  93        return zero;
  94}
  95
  96static struct fraction *copy(struct fraction *to, struct fraction *from)
  97{
  98        BN_copy(&to->numerator, &from->numerator);
  99        BN_copy(&to->denominator, &from->denominator);
 100        return to;
 101}
 102
 103static struct fraction *add(struct fraction *result, struct fraction *left, struct fraction *right)
 104{
 105        BIGNUM a, b, gcd;
 106
 107        BN_init(&a);
 108        BN_init(&b);
 109        BN_init(&gcd);
 110
 111        BN_mul(&a, &left->numerator, &right->denominator, get_BN_CTX());
 112        BN_mul(&b, &left->denominator, &right->numerator, get_BN_CTX());
 113        BN_mul(&result->denominator, &left->denominator, &right->denominator, get_BN_CTX());
 114        BN_add(&result->numerator, &a, &b);
 115
 116        BN_gcd(&gcd, &result->denominator, &result->numerator, get_BN_CTX());
 117        BN_div(&result->denominator, NULL, &result->denominator, &gcd, get_BN_CTX());
 118        BN_div(&result->numerator, NULL, &result->numerator, &gcd, get_BN_CTX());
 119
 120        BN_clear(&a);
 121        BN_clear(&b);
 122        BN_clear(&gcd);
 123
 124        return result;
 125}
 126
 127static int compare(struct fraction *left, struct fraction *right)
 128{
 129        BIGNUM a, b;
 130        int result;
 131
 132        BN_init(&a);
 133        BN_init(&b);
 134
 135        BN_mul(&a, &left->numerator, &right->denominator, get_BN_CTX());
 136        BN_mul(&b, &left->denominator, &right->numerator, get_BN_CTX());
 137
 138        result = BN_cmp(&a, &b);
 139
 140        BN_clear(&a);
 141        BN_clear(&b);
 142
 143        return result;
 144}
 145
 146struct mass_counter {
 147        struct fraction seen;
 148        struct fraction pending;
 149};
 150
 151static struct mass_counter *new_mass_counter(struct commit *commit, struct fraction *pending)
 152{
 153        struct mass_counter *mass_counter = xmalloc(sizeof(*mass_counter));
 154        memset(mass_counter, 0, sizeof(*mass_counter));
 155
 156        init_fraction(&mass_counter->seen);
 157        init_fraction(&mass_counter->pending);
 158
 159        copy(&mass_counter->pending, pending);
 160        copy(&mass_counter->seen, get_zero());
 161
 162        if (commit->object.util) {
 163                die("multiple attempts to initialize mass counter for %s",
 164                    sha1_to_hex(commit->object.sha1));
 165        }
 166
 167        commit->object.util = mass_counter;
 168
 169        return mass_counter;
 170}
 171
 172static void free_mass_counter(struct mass_counter *counter)
 173{
 174        clear_fraction(&counter->seen);
 175        clear_fraction(&counter->pending);
 176        free(counter);
 177}
 178
 179/*
 180 * Finds the base commit of a list of commits.
 181 *
 182 * One property of the commit being searched for is that every commit reachable
 183 * from the base commit is reachable from the commits in the starting list only
 184 * via paths that include the base commit.
 185 *
 186 * This algorithm uses a conservation of mass approach to find the base commit.
 187 *
 188 * We start by injecting one unit of mass into the graph at each
 189 * of the commits in the starting list. Injecting mass into a commit
 190 * is achieved by adding to its pending mass counter and, if it is not already
 191 * enqueued, enqueuing the commit in a list of pending commits, in latest
 192 * commit date first order.
 193 *
 194 * The algorithm then proceeds to visit each commit in the pending queue.
 195 * Upon each visit, the pending mass is added to the mass already seen for that
 196 * commit and then divided into N equal portions, where N is the number of
 197 * parents of the commit being visited. The divided portions are then injected
 198 * into each of the parents.
 199 *
 200 * The algorithm continues until we discover a commit which has seen all the
 201 * mass originally injected or until we run out of things to do.
 202 *
 203 * If we find a commit that has seen all the original mass, we have found
 204 * the common base of all the commits in the starting list.
 205 *
 206 * The algorithm does _not_ depend on accurate timestamps for correct operation.
 207 * However, reasonably sane (e.g. non-random) timestamps are required in order
 208 * to prevent an exponential performance characteristic. The occasional
 209 * timestamp inaccuracy will not dramatically affect performance but may
 210 * result in more nodes being processed than strictly necessary.
 211 *
 212 * This procedure sets *boundary to the address of the base commit. It returns
 213 * non-zero if, and only if, there was a problem parsing one of the
 214 * commits discovered during the traversal.
 215 */
 216static int find_base_for_list(struct commit_list *list, struct commit **boundary)
 217{
 218        int ret = 0;
 219        struct commit_list *cleaner = NULL;
 220        struct commit_list *pending = NULL;
 221        struct fraction injected;
 222        init_fraction(&injected);
 223        *boundary = NULL;
 224
 225        for (; list; list = list->next) {
 226                struct commit *item = list->item;
 227
 228                if (!item->object.util) {
 229                        new_mass_counter(list->item, get_one());
 230                        add(&injected, &injected, get_one());
 231
 232                        commit_list_insert(list->item, &cleaner);
 233                        commit_list_insert(list->item, &pending);
 234                }
 235        }
 236
 237        while (!*boundary && pending && !ret) {
 238                struct commit *latest = pop_commit(&pending);
 239                struct mass_counter *latest_node = (struct mass_counter *) latest->object.util;
 240                int num_parents;
 241
 242                if ((ret = parse_commit(latest)))
 243                        continue;
 244                add(&latest_node->seen, &latest_node->seen, &latest_node->pending);
 245
 246                num_parents = count_parents(latest);
 247                if (num_parents) {
 248                        struct fraction distribution;
 249                        struct commit_list *parents;
 250
 251                        divide(init_fraction(&distribution), &latest_node->pending, num_parents);
 252
 253                        for (parents = latest->parents; parents; parents = parents->next) {
 254                                struct commit *parent = parents->item;
 255                                struct mass_counter *parent_node = (struct mass_counter *) parent->object.util;
 256
 257                                if (!parent_node) {
 258                                        parent_node = new_mass_counter(parent, &distribution);
 259                                        insert_by_date(parent, &pending);
 260                                        commit_list_insert(parent, &cleaner);
 261                                } else {
 262                                        if (!compare(&parent_node->pending, get_zero()))
 263                                                insert_by_date(parent, &pending);
 264                                        add(&parent_node->pending, &parent_node->pending, &distribution);
 265                                }
 266                        }
 267
 268                        clear_fraction(&distribution);
 269                }
 270
 271                if (!compare(&latest_node->seen, &injected))
 272                        *boundary = latest;
 273                copy(&latest_node->pending, get_zero());
 274        }
 275
 276        while (cleaner) {
 277                struct commit *next = pop_commit(&cleaner);
 278                free_mass_counter((struct mass_counter *) next->object.util);
 279                next->object.util = NULL;
 280        }
 281
 282        if (pending)
 283                free_commit_list(pending);
 284
 285        clear_fraction(&injected);
 286        return ret;
 287}
 288
 289
 290/*
 291 * Finds the base of an minimal, non-linear epoch, headed at head, by
 292 * applying the find_base_for_list to a list consisting of the parents
 293 */
 294static int find_base(struct commit *head, struct commit **boundary)
 295{
 296        int ret = 0;
 297        struct commit_list *pending = NULL;
 298        struct commit_list *next;
 299
 300        for (next = head->parents; next; next = next->next) {
 301                commit_list_insert(next->item, &pending);
 302        }
 303        ret = find_base_for_list(pending, boundary);
 304        free_commit_list(pending);
 305
 306        return ret;
 307}
 308
 309/*
 310 * This procedure traverses to the boundary of the first epoch in the epoch
 311 * sequence of the epoch headed at head_of_epoch. This is either the end of
 312 * the maximal linear epoch or the base of a minimal non-linear epoch.
 313 *
 314 * The queue of pending nodes is sorted in reverse date order and each node
 315 * is currently in the queue at most once.
 316 */
 317static int find_next_epoch_boundary(struct commit *head_of_epoch, struct commit **boundary)
 318{
 319        int ret;
 320        struct commit *item = head_of_epoch;
 321
 322        ret = parse_commit(item);
 323        if (ret)
 324                return ret;
 325
 326        if (HAS_EXACTLY_ONE_PARENT(item)) {
 327                /*
 328                 * We are at the start of a maximimal linear epoch.
 329                 * Traverse to the end.
 330                 */
 331                while (HAS_EXACTLY_ONE_PARENT(item) && !ret) {
 332                        item = item->parents->item;
 333                        ret = parse_commit(item);
 334                }
 335                *boundary = item;
 336
 337        } else {
 338                /*
 339                 * Otherwise, we are at the start of a minimal, non-linear
 340                 * epoch - find the common base of all parents.
 341                 */
 342                ret = find_base(item, boundary);
 343        }
 344
 345        return ret;
 346}
 347
 348/*
 349 * Returns non-zero if parent is known to be a parent of child.
 350 */
 351static int is_parent_of(struct commit *parent, struct commit *child)
 352{
 353        struct commit_list *parents;
 354        for (parents = child->parents; parents; parents = parents->next) {
 355                if (!memcmp(parent->object.sha1, parents->item->object.sha1,
 356                            sizeof(parents->item->object.sha1)))
 357                        return 1;
 358        }
 359        return 0;
 360}
 361
 362/*
 363 * Pushes an item onto the merge order stack. If the top of the stack is
 364 * marked as being a possible "break", we check to see whether it actually
 365 * is a break.
 366 */
 367static void push_onto_merge_order_stack(struct commit_list **stack, struct commit *item)
 368{
 369        struct commit_list *top = *stack;
 370        if (top && (top->item->object.flags & DISCONTINUITY)) {
 371                if (is_parent_of(top->item, item)) {
 372                        top->item->object.flags &= ~DISCONTINUITY;
 373                }
 374        }
 375        commit_list_insert(item, stack);
 376}
 377
 378/*
 379 * Marks all interesting, visited commits reachable from this commit
 380 * as uninteresting. We stop recursing when we reach the epoch boundary,
 381 * an unvisited node or a node that has already been marking uninteresting.
 382 *
 383 * This doesn't actually mark all ancestors between the start node and the
 384 * epoch boundary uninteresting, but does ensure that they will eventually
 385 * be marked uninteresting when the main sort_first_epoch() traversal
 386 * eventually reaches them.
 387 */
 388static void mark_ancestors_uninteresting(struct commit *commit)
 389{
 390        unsigned int flags = commit->object.flags;
 391        int visited = flags & VISITED;
 392        int boundary = flags & BOUNDARY;
 393        int uninteresting = flags & UNINTERESTING;
 394        struct commit_list *next;
 395
 396        commit->object.flags |= UNINTERESTING;
 397
 398        /*
 399         * We only need to recurse if
 400         *      we are not on the boundary and
 401         *      we have not already been marked uninteresting and
 402         *      we have already been visited.
 403         *
 404         * The main sort_first_epoch traverse will mark unreachable
 405         * all uninteresting, unvisited parents as they are visited
 406         * so there is no need to duplicate that traversal here.
 407         *
 408         * Similarly, if we are already marked uninteresting
 409         * then either all ancestors have already been marked
 410         * uninteresting or will be once the sort_first_epoch
 411         * traverse reaches them.
 412         */
 413
 414        if (uninteresting || boundary || !visited)
 415                return;
 416
 417        for (next = commit->parents; next; next = next->next)
 418                mark_ancestors_uninteresting(next->item);
 419}
 420
 421/*
 422 * Sorts the nodes of the first epoch of the epoch sequence of the epoch headed at head
 423 * into merge order.
 424 */
 425static void sort_first_epoch(struct commit *head, struct commit_list **stack)
 426{
 427        struct commit_list *parents;
 428
 429        head->object.flags |= VISITED;
 430
 431        /*
 432         * TODO: By sorting the parents in a different order, we can alter the
 433         * merge order to show contemporaneous changes in parallel branches
 434         * occurring after "local" changes. This is useful for a developer
 435         * when a developer wants to see all changes that were incorporated
 436         * into the same merge as her own changes occur after her own
 437         * changes.
 438         */
 439
 440        for (parents = head->parents; parents; parents = parents->next) {
 441                struct commit *parent = parents->item;
 442
 443                if (head->object.flags & UNINTERESTING) {
 444                        /*
 445                         * Propagates the uninteresting bit to all parents.
 446                         * if we have already visited this parent, then
 447                         * the uninteresting bit will be propagated to each
 448                         * reachable commit that is still not marked
 449                         * uninteresting and won't otherwise be reached.
 450                         */
 451                        mark_ancestors_uninteresting(parent);
 452                }
 453
 454                if (!(parent->object.flags & VISITED)) {
 455                        if (parent->object.flags & BOUNDARY) {
 456                                if (*stack) {
 457                                        die("something else is on the stack - %s",
 458                                            sha1_to_hex((*stack)->item->object.sha1));
 459                                }
 460                                push_onto_merge_order_stack(stack, parent);
 461                                parent->object.flags |= VISITED;
 462
 463                        } else {
 464                                sort_first_epoch(parent, stack);
 465                                if (parents) {
 466                                        /*
 467                                         * This indicates a possible
 468                                         * discontinuity it may not be be
 469                                         * actual discontinuity if the head
 470                                         * of parent N happens to be the tail
 471                                         * of parent N+1.
 472                                         *
 473                                         * The next push onto the stack will
 474                                         * resolve the question.
 475                                         */
 476                                        (*stack)->item->object.flags |= DISCONTINUITY;
 477                                }
 478                        }
 479                }
 480        }
 481
 482        push_onto_merge_order_stack(stack, head);
 483}
 484
 485/*
 486 * Emit the contents of the stack.
 487 *
 488 * The stack is freed and replaced by NULL.
 489 *
 490 * Sets the return value to STOP if no further output should be generated.
 491 */
 492static int emit_stack(struct commit_list **stack, emitter_func emitter, int include_last)
 493{
 494        unsigned int seen = 0;
 495        int action = CONTINUE;
 496
 497        while (*stack && (action != STOP)) {
 498                struct commit *next = pop_commit(stack);
 499                seen |= next->object.flags;
 500                if (*stack || include_last) {
 501                        if (!*stack) 
 502                                next->object.flags |= BOUNDARY;
 503                        action = emitter(next);
 504                }
 505        }
 506
 507        if (*stack) {
 508                free_commit_list(*stack);
 509                *stack = NULL;
 510        }
 511
 512        return (action == STOP || (seen & UNINTERESTING)) ? STOP : CONTINUE;
 513}
 514
 515/*
 516 * Sorts an arbitrary epoch into merge order by sorting each epoch
 517 * of its epoch sequence into order.
 518 *
 519 * Note: this algorithm currently leaves traces of its execution in the
 520 * object flags of nodes it discovers. This should probably be fixed.
 521 */
 522static int sort_in_merge_order(struct commit *head_of_epoch, emitter_func emitter)
 523{
 524        struct commit *next = head_of_epoch;
 525        int ret = 0;
 526        int action = CONTINUE;
 527
 528        ret = parse_commit(head_of_epoch);
 529
 530        next->object.flags |= BOUNDARY;
 531
 532        while (next && next->parents && !ret && (action != STOP)) {
 533                struct commit *base = NULL;
 534
 535                ret = find_next_epoch_boundary(next, &base);
 536                if (ret)
 537                        return ret;
 538                next->object.flags |= BOUNDARY;
 539                if (base)
 540                        base->object.flags |= BOUNDARY;
 541
 542                if (HAS_EXACTLY_ONE_PARENT(next)) {
 543                        while (HAS_EXACTLY_ONE_PARENT(next)
 544                               && (action != STOP)
 545                               && !ret) {
 546                                if (next->object.flags & UNINTERESTING) {
 547                                        action = STOP;
 548                                } else {
 549                                        action = emitter(next);
 550                                }
 551                                if (action != STOP) {
 552                                        next = next->parents->item;
 553                                        ret = parse_commit(next);
 554                                }
 555                        }
 556
 557                } else {
 558                        struct commit_list *stack = NULL;
 559                        sort_first_epoch(next, &stack);
 560                        action = emit_stack(&stack, emitter, (base == NULL));
 561                        next = base;
 562                }
 563        }
 564
 565        if (next && (action != STOP) && !ret) {
 566                emitter(next);
 567        }
 568
 569        return ret;
 570}
 571
 572/*
 573 * Sorts the nodes reachable from a starting list in merge order, we
 574 * first find the base for the starting list and then sort all nodes
 575 * in this subgraph using the sort_first_epoch algorithm. Once we have
 576 * reached the base we can continue sorting using sort_in_merge_order.
 577 */
 578int sort_list_in_merge_order(struct commit_list *list, emitter_func emitter)
 579{
 580        struct commit_list *stack = NULL;
 581        struct commit *base;
 582        int ret = 0;
 583        int action = CONTINUE;
 584        struct commit_list *reversed = NULL;
 585
 586        for (; list; list = list->next)
 587                commit_list_insert(list->item, &reversed);
 588
 589        if (!reversed)
 590                return ret;
 591        else if (!reversed->next) {
 592                /*
 593                 * If there is only one element in the list, we can sort it
 594                 * using sort_in_merge_order.
 595                 */
 596                base = reversed->item;
 597        } else {
 598                /*
 599                 * Otherwise, we search for the base of the list.
 600                 */
 601                ret = find_base_for_list(reversed, &base);
 602                if (ret)
 603                        return ret;
 604                if (base)
 605                        base->object.flags |= BOUNDARY;
 606
 607                while (reversed) {
 608                        struct commit * next = pop_commit(&reversed);
 609
 610                        if (!(next->object.flags & VISITED) && next!=base) {
 611                                sort_first_epoch(next, &stack);
 612                                if (reversed) {
 613                                        /*
 614                                         * If we have more commits 
 615                                         * to push, then the first
 616                                         * push for the next parent may 
 617                                         * (or may * not) represent a 
 618                                         * discontinuity with respect
 619                                         * to the parent currently on 
 620                                         * the top of the stack.
 621                                         *
 622                                         * Mark it for checking here, 
 623                                         * and check it with the next 
 624                                         * push. See sort_first_epoch()
 625                                         * for more details.
 626                                         */
 627                                        stack->item->object.flags |= DISCONTINUITY;
 628                                }
 629                        }
 630                }
 631
 632                action = emit_stack(&stack, emitter, (base==NULL));
 633        }
 634
 635        if (base && (action != STOP)) {
 636                ret = sort_in_merge_order(base, emitter);
 637        }
 638
 639        return ret;
 640}