From: Junio C Hamano Date: Thu, 3 Jan 2013 18:29:12 +0000 (-0800) Subject: Merge branch 'as/test-tweaks' X-Git-Tag: v1.8.2-rc0~194 X-Git-Url: https://git.lorimer.id.au/gitweb.git/diff_plain/6fedcd8188dc50276a8d77d14f200fe99c228ee0?ds=inline;hp=-c Merge branch 'as/test-tweaks' Output from the tests is coloured using "green is okay, yellow is questionable, red is bad and blue is informative" scheme. * as/test-tweaks: tests: paint unexpectedly fixed known breakages in bold red tests: test the test framework more thoroughly tests: refactor mechanics of testing in a sub test-lib tests: change info messages from yellow/brown to cyan tests: paint skipped tests in blue tests: paint known breakages in yellow tests: test number comes first in 'not ok $count - $message' --- 6fedcd8188dc50276a8d77d14f200fe99c228ee0 diff --combined t/t0000-basic.sh index 562cf41cad,8973d2ccc8..cefe33d6d1 --- a/t/t0000-basic.sh +++ b/t/t0000-basic.sh @@@ -18,6 -18,16 +18,6 @@@ swapping compression and hashing order modification *should* take notice and update the test vectors here. ' -################################################################ -# It appears that people try to run tests without building... - -../git >/dev/null -if test $? != 1 -then - echo >&2 'You do not seem to have built git yet.' - exit 1 -fi - . ./test-lib.sh ################################################################ @@@ -45,39 -55,176 +45,176 @@@ test_expect_failure 'pretend we have a false ' - test_expect_success 'pretend we have fixed a known breakage (run in sub test-lib)' " - mkdir passing-todo && - (cd passing-todo && - cat >passing-todo.sh <<-EOF && - #!$SHELL_PATH - - test_description='A passing TODO test + run_sub_test_lib_test () { + name="$1" descr="$2" # stdin is the body of the test code + mkdir "$name" && + ( + cd "$name" && + cat >"$name.sh" <<-EOF && + #!$SHELL_PATH + + test_description='$descr (run in sub test-lib) + + This is run in a sub test-lib so that we do not get incorrect + passing metrics + ' + + # Point to the t/test-lib.sh, which isn't in ../ as usual + . "\$TEST_DIRECTORY"/test-lib.sh + EOF + cat >>"$name.sh" && + chmod +x "$name.sh" && + export TEST_DIRECTORY && + ./"$name.sh" >out 2>err + ) + } - This is run in a sub test-lib so that we do not get incorrect - passing metrics - ' + check_sub_test_lib_test () { + name="$1" # stdin is the expected output from the test + ( + cd "$name" && + ! test -s err && + sed -e 's/^> //' -e 's/Z$//' >expect && + test_cmp expect out + ) + } + + test_expect_success 'pretend we have a fully passing test suite' " + run_sub_test_lib_test full-pass '3 passing tests' <<-\\EOF && + for i in 1 2 3 + do + test_expect_success \"passing test #\$i\" 'true' + done + test_done + EOF + check_sub_test_lib_test full-pass <<-\\EOF + > ok 1 - passing test #1 + > ok 2 - passing test #2 + > ok 3 - passing test #3 + > # passed all 3 test(s) + > 1..3 + EOF + " - # Point to the t/test-lib.sh, which isn't in ../ as usual - TEST_DIRECTORY=\"$TEST_DIRECTORY\" - . \"\$TEST_DIRECTORY\"/test-lib.sh + test_expect_success 'pretend we have a partially passing test suite' " + test_must_fail run_sub_test_lib_test \ + partial-pass '2/3 tests passing' <<-\\EOF && + test_expect_success 'passing test #1' 'true' + test_expect_success 'failing test #2' 'false' + test_expect_success 'passing test #3' 'true' + test_done + EOF + check_sub_test_lib_test partial-pass <<-\\EOF + > ok 1 - passing test #1 + > not ok 2 - failing test #2 + # false + > ok 3 - passing test #3 + > # failed 1 among 3 test(s) + > 1..3 + EOF + " - test_expect_failure 'pretend we have fixed a known breakage' ' - : - ' + test_expect_success 'pretend we have a known breakage' " + run_sub_test_lib_test failing-todo 'A failing TODO test' <<-\\EOF && + test_expect_success 'passing test' 'true' + test_expect_failure 'pretend we have a known breakage' 'false' + test_done + EOF + check_sub_test_lib_test failing-todo <<-\\EOF + > ok 1 - passing test + > not ok 2 - pretend we have a known breakage # TODO known breakage + > # still have 1 known breakage(s) + > # passed all remaining 1 test(s) + > 1..2 + EOF + " + test_expect_success 'pretend we have fixed a known breakage' " + run_sub_test_lib_test passing-todo 'A passing TODO test' <<-\\EOF && + test_expect_failure 'pretend we have fixed a known breakage' 'true' test_done EOF - chmod +x passing-todo.sh && - ./passing-todo.sh >out 2>err && - ! test -s err && - sed -e 's/^> //' >expect <<-\\EOF && - > ok 1 - pretend we have fixed a known breakage # TODO known breakage - > # fixed 1 known breakage(s) - > # passed all 1 test(s) + check_sub_test_lib_test passing-todo <<-\\EOF + > ok 1 - pretend we have fixed a known breakage # TODO known breakage vanished + > # 1 known breakage(s) vanished; please update test(s) > 1..1 EOF - test_cmp expect out) " + + test_expect_success 'pretend we have fixed one of two known breakages (run in sub test-lib)' " + run_sub_test_lib_test partially-passing-todos \ + '2 TODO tests, one passing' <<-\\EOF && + test_expect_failure 'pretend we have a known breakage' 'false' + test_expect_success 'pretend we have a passing test' 'true' + test_expect_failure 'pretend we have fixed another known breakage' 'true' + test_done + EOF + check_sub_test_lib_test partially-passing-todos <<-\\EOF + > not ok 1 - pretend we have a known breakage # TODO known breakage + > ok 2 - pretend we have a passing test + > ok 3 - pretend we have fixed another known breakage # TODO known breakage vanished + > # 1 known breakage(s) vanished; please update test(s) + > # still have 1 known breakage(s) + > # passed all remaining 1 test(s) + > 1..3 + EOF + " + + test_expect_success 'pretend we have a pass, fail, and known breakage' " + test_must_fail run_sub_test_lib_test \ + mixed-results1 'mixed results #1' <<-\\EOF && + test_expect_success 'passing test' 'true' + test_expect_success 'failing test' 'false' + test_expect_failure 'pretend we have a known breakage' 'false' + test_done + EOF + check_sub_test_lib_test mixed-results1 <<-\\EOF + > ok 1 - passing test + > not ok 2 - failing test + > # false + > not ok 3 - pretend we have a known breakage # TODO known breakage + > # still have 1 known breakage(s) + > # failed 1 among remaining 2 test(s) + > 1..3 + EOF + " + + test_expect_success 'pretend we have a mix of all possible results' " + test_must_fail run_sub_test_lib_test \ + mixed-results2 'mixed results #2' <<-\\EOF && + test_expect_success 'passing test' 'true' + test_expect_success 'passing test' 'true' + test_expect_success 'passing test' 'true' + test_expect_success 'passing test' 'true' + test_expect_success 'failing test' 'false' + test_expect_success 'failing test' 'false' + test_expect_success 'failing test' 'false' + test_expect_failure 'pretend we have a known breakage' 'false' + test_expect_failure 'pretend we have a known breakage' 'false' + test_expect_failure 'pretend we have fixed a known breakage' 'true' + test_done + EOF + check_sub_test_lib_test mixed-results2 <<-\\EOF + > ok 1 - passing test + > ok 2 - passing test + > ok 3 - passing test + > ok 4 - passing test + > not ok 5 - failing test + > # false + > not ok 6 - failing test + > # false + > not ok 7 - failing test + > # false + > not ok 8 - pretend we have a known breakage # TODO known breakage + > not ok 9 - pretend we have a known breakage # TODO known breakage + > ok 10 - pretend we have fixed a known breakage # TODO known breakage vanished + > # 1 known breakage(s) vanished; please update test(s) + > # still have 2 known breakage(s) + > # failed 3 among remaining 7 test(s) + > 1..10 + EOF + " + test_set_prereq HAVEIT haveit=no test_expect_success HAVEIT 'test runs if prerequisite is satisfied' ' @@@ -115,38 -262,6 +252,38 @@@ the exit 1 fi +test_lazy_prereq LAZY_TRUE true +havetrue=no +test_expect_success LAZY_TRUE 'test runs if lazy prereq is satisfied' ' + havetrue=yes +' +donthavetrue=yes +test_expect_success !LAZY_TRUE 'missing lazy prereqs skip tests' ' + donthavetrue=no +' + +if test "$havetrue$donthavetrue" != yesyes +then + say 'bug in test framework: lazy prerequisites do not work' + exit 1 +fi + +test_lazy_prereq LAZY_FALSE false +nothavefalse=no +test_expect_success !LAZY_FALSE 'negative lazy prereqs checked' ' + nothavefalse=yes +' +havefalse=yes +test_expect_success LAZY_FALSE 'missing negative lazy prereqs will skip' ' + havefalse=no +' + +if test "$nothavefalse$havefalse" != yesyes +then + say 'bug in test framework: negative lazy prerequisites do not work' + exit 1 +fi + clean=no test_expect_success 'tests clean up after themselves' ' test_when_finished clean=yes @@@ -159,19 -274,8 +296,8 @@@ the fi test_expect_success 'tests clean up even on failures' " - mkdir failing-cleanup && - ( - cd failing-cleanup && - - cat >failing-cleanup.sh <<-EOF && - #!$SHELL_PATH - - test_description='Failing tests with cleanup commands' - - # Point to the t/test-lib.sh, which isn't in ../ as usual - TEST_DIRECTORY=\"$TEST_DIRECTORY\" - . \"\$TEST_DIRECTORY\"/test-lib.sh - + test_must_fail run_sub_test_lib_test \ + failing-cleanup 'Failing tests with cleanup commands' <<-\\EOF && test_expect_success 'tests clean up even after a failure' ' touch clean-after-failure && test_when_finished rm clean-after-failure && @@@ -181,29 -285,21 +307,21 @@@ test_when_finished \"(exit 2)\" ' test_done - EOF - - chmod +x failing-cleanup.sh && - test_must_fail ./failing-cleanup.sh >out 2>err && - ! test -s err && - ! test -f \"trash directory.failing-cleanup/clean-after-failure\" && - sed -e 's/Z$//' -e 's/^> //' >expect <<-\\EOF && - > not ok - 1 tests clean up even after a failure + check_sub_test_lib_test failing-cleanup <<-\\EOF + > not ok 1 - tests clean up even after a failure > # Z > # touch clean-after-failure && > # test_when_finished rm clean-after-failure && > # (exit 1) > # Z - > not ok - 2 failure to clean up causes the test to fail + > not ok 2 - failure to clean up causes the test to fail > # Z > # test_when_finished \"(exit 2)\" > # Z > # failed 2 among 2 test(s) > 1..2 EOF - test_cmp expect out - ) " ################################################################ diff --combined t/test-lib.sh index f50f8341d4,30a0937dd7..8a12cbb86a --- a/t/test-lib.sh +++ b/t/test-lib.sh @@@ -15,6 -15,22 +15,6 @@@ # You should have received a copy of the GNU General Public License # along with this program. If not, see http://www.gnu.org/licenses/ . -# if --tee was passed, write the output not only to the terminal, but -# additionally to the file test-results/$BASENAME.out, too. -case "$GIT_TEST_TEE_STARTED, $* " in -done,*) - # do not redirect again - ;; -*' --tee '*|*' --va'*) - mkdir -p test-results - BASE=test-results/$(basename "$0" .sh) - (GIT_TEST_TEE_STARTED=done ${SHELL-sh} "$0" "$@" 2>&1; - echo $? > $BASE.exit) | tee $BASE.out - test "$(cat $BASE.exit)" = 0 - exit - ;; -esac - # Keep the original TERM for say_color ORIGINAL_TERM=$TERM @@@ -35,34 -51,9 +35,34 @@@ the fi GIT_BUILD_DIR="$TEST_DIRECTORY"/.. +################################################################ +# It appears that people try to run tests without building... +"$GIT_BUILD_DIR/git" >/dev/null +if test $? != 1 +then + echo >&2 'error: you do not seem to have built git yet.' + exit 1 +fi + . "$GIT_BUILD_DIR"/GIT-BUILD-OPTIONS export PERL_PATH SHELL_PATH +# if --tee was passed, write the output not only to the terminal, but +# additionally to the file test-results/$BASENAME.out, too. +case "$GIT_TEST_TEE_STARTED, $* " in +done,*) + # do not redirect again + ;; +*' --tee '*|*' --va'*) + mkdir -p test-results + BASE=test-results/$(basename "$0" .sh) + (GIT_TEST_TEE_STARTED=done ${SHELL_PATH} "$0" "$@" 2>&1; + echo $? > $BASE.exit) | tee $BASE.out + test "$(cat $BASE.exit)" = 0 + exit + ;; +esac + # For repeatability, reset the environment to known value. LANG=C LC_ALL=C @@@ -102,27 -93,6 +102,27 @@@ export GIT_AUTHOR_EMAIL GIT_AUTHOR_NAM export GIT_COMMITTER_EMAIL GIT_COMMITTER_NAME export EDITOR +# Add libc MALLOC and MALLOC_PERTURB test +# only if we are not executing the test with valgrind +if expr " $GIT_TEST_OPTS " : ".* --valgrind " >/dev/null || + test -n "$TEST_NO_MALLOC_CHECK" +then + setup_malloc_check () { + : nothing + } + teardown_malloc_check () { + : nothing + } +else + setup_malloc_check () { + MALLOC_CHECK_=3 MALLOC_PERTURB_=165 + export MALLOC_CHECK_ MALLOC_PERTURB_ + } + teardown_malloc_check () { + unset MALLOC_CHECK_ MALLOC_PERTURB_ + } +fi + # Protect ourselves from common misconfiguration to export # CDPATH into the environment unset CDPATH @@@ -212,11 -182,13 +212,13 @@@ the error) tput bold; tput setaf 1;; # bold red skip) - tput bold; tput setaf 2;; # bold green + tput setaf 4;; # blue + warn) + tput setaf 3;; # brown/yellow pass) - tput setaf 2;; # green + tput setaf 2;; # green info) - tput setaf 3;; # brown + tput setaf 6;; # cyan *) test -n "$quiet" && return;; esac @@@ -230,7 -202,7 +232,7 @@@ els say_color() { test -z "$1" && test -n "$quiet" && return shift - echo "$*" + printf "%s\n" "$*" } fi @@@ -298,7 -270,7 +300,7 @@@ test_ok_ () test_failure_ () { test_failure=$(($test_failure + 1)) - say_color error "not ok - $test_count $1" + say_color error "not ok $test_count - $1" shift echo "$@" | sed -e 's/^/# /' test "$immediate" = "" || { GIT_EXIT_OK=t; exit 1; } @@@ -306,12 -278,12 +308,12 @@@ test_known_broken_ok_ () { test_fixed=$(($test_fixed+1)) - say_color "" "ok $test_count - $@ # TODO known breakage" + say_color error "ok $test_count - $@ # TODO known breakage vanished" } test_known_broken_failure_ () { test_broken=$(($test_broken+1)) - say_color skip "not ok $test_count - $@ # TODO known breakage" + say_color warn "not ok $test_count - $@ # TODO known breakage" } test_debug () { @@@ -332,9 -304,7 +334,9 @@@ test_run_ () if test -z "$immediate" || test $eval_ret = 0 || test -n "$expecting_failure" then + setup_malloc_check test_eval_ "$test_cleanup" + teardown_malloc_check fi if test "$verbose" = "t" && test -n "$HARNESS_ACTIVE" then @@@ -389,8 -359,7 +391,8 @@@ test_done () then test_results_dir="$TEST_OUTPUT_DIRECTORY/test-results" mkdir -p "$test_results_dir" - test_results_path="$test_results_dir/${0%.sh}-$$.counts" + base=${0##*/} + test_results_path="$test_results_dir/${base%.sh}-$$.counts" cat >>"$test_results_path" <<-EOF total $test_count @@@ -404,13 -373,18 +406,18 @@@ if test "$test_fixed" != 0 then - say_color pass "# fixed $test_fixed known breakage(s)" + say_color error "# $test_fixed known breakage(s) vanished; please update test(s)" fi if test "$test_broken" != 0 then - say_color error "# still have $test_broken known breakage(s)" - msg="remaining $(($test_count-$test_broken)) test(s)" + say_color warn "# still have $test_broken known breakage(s)" + fi + if test "$test_broken" != 0 || test "$test_fixed" != 0 + then + test_remaining=$(( $test_count - $test_broken - $test_fixed )) + msg="remaining $test_remaining test(s)" else + test_remaining=$test_count msg="$test_count test(s)" fi case "$test_failure" in @@@ -424,7 -398,7 +431,7 @@@ if test $test_external_has_tap -eq 0 then - if test $test_count -gt 0 + if test $test_remaining -gt 0 then say_color pass "# passed all $msg" fi @@@ -615,7 -589,7 +622,7 @@@ for skp in $GIT_SKIP_TEST do case "$this_test" in $skp) - say_color skip >&3 "skipping test $this_test altogether" + say_color info >&3 "skipping test $this_test altogether" skip_all="skip all tests in $this_test" test_done esac @@@ -739,12 -713,6 +746,12 @@@ test_lazy_prereq UTF8_NFD_TO_NFC esac ' +test_lazy_prereq AUTOIDENT ' + sane_unset GIT_AUTHOR_NAME && + sane_unset GIT_AUTHOR_EMAIL && + git var GIT_AUTHOR_IDENT +' + # When the tests are run as root, permission tests will report that # things are writable when they shouldn't be. test -w / || test_set_prereq SANITY