modification *should* take notice and update the test vectors here.
'
-################################################################
-# It appears that people try to run tests without building...
-
-../git >/dev/null
-if test $? != 1
-then
- echo >&2 'You do not seem to have built git yet.'
- exit 1
-fi
-
. ./test-lib.sh
################################################################
false
'
- test_expect_success 'pretend we have fixed a known breakage (run in sub test-lib)' "
- mkdir passing-todo &&
- (cd passing-todo &&
- cat >passing-todo.sh <<-EOF &&
- #!$SHELL_PATH
-
- test_description='A passing TODO test
+ run_sub_test_lib_test () {
+ name="$1" descr="$2" # stdin is the body of the test code
+ mkdir "$name" &&
+ (
+ cd "$name" &&
+ cat >"$name.sh" <<-EOF &&
+ #!$SHELL_PATH
+
+ test_description='$descr (run in sub test-lib)
+
+ This is run in a sub test-lib so that we do not get incorrect
+ passing metrics
+ '
+
+ # Point to the t/test-lib.sh, which isn't in ../ as usual
+ . "\$TEST_DIRECTORY"/test-lib.sh
+ EOF
+ cat >>"$name.sh" &&
+ chmod +x "$name.sh" &&
+ export TEST_DIRECTORY &&
+ ./"$name.sh" >out 2>err
+ )
+ }
- This is run in a sub test-lib so that we do not get incorrect
- passing metrics
- '
+ check_sub_test_lib_test () {
+ name="$1" # stdin is the expected output from the test
+ (
+ cd "$name" &&
+ ! test -s err &&
+ sed -e 's/^> //' -e 's/Z$//' >expect &&
+ test_cmp expect out
+ )
+ }
+
+ test_expect_success 'pretend we have a fully passing test suite' "
+ run_sub_test_lib_test full-pass '3 passing tests' <<-\\EOF &&
+ for i in 1 2 3
+ do
+ test_expect_success \"passing test #\$i\" 'true'
+ done
+ test_done
+ EOF
+ check_sub_test_lib_test full-pass <<-\\EOF
+ > ok 1 - passing test #1
+ > ok 2 - passing test #2
+ > ok 3 - passing test #3
+ > # passed all 3 test(s)
+ > 1..3
+ EOF
+ "
- # Point to the t/test-lib.sh, which isn't in ../ as usual
- TEST_DIRECTORY=\"$TEST_DIRECTORY\"
- . \"\$TEST_DIRECTORY\"/test-lib.sh
+ test_expect_success 'pretend we have a partially passing test suite' "
+ test_must_fail run_sub_test_lib_test \
+ partial-pass '2/3 tests passing' <<-\\EOF &&
+ test_expect_success 'passing test #1' 'true'
+ test_expect_success 'failing test #2' 'false'
+ test_expect_success 'passing test #3' 'true'
+ test_done
+ EOF
+ check_sub_test_lib_test partial-pass <<-\\EOF
+ > ok 1 - passing test #1
+ > not ok 2 - failing test #2
+ # false
+ > ok 3 - passing test #3
+ > # failed 1 among 3 test(s)
+ > 1..3
+ EOF
+ "
- test_expect_failure 'pretend we have fixed a known breakage' '
- :
- '
+ test_expect_success 'pretend we have a known breakage' "
+ run_sub_test_lib_test failing-todo 'A failing TODO test' <<-\\EOF &&
+ test_expect_success 'passing test' 'true'
+ test_expect_failure 'pretend we have a known breakage' 'false'
+ test_done
+ EOF
+ check_sub_test_lib_test failing-todo <<-\\EOF
+ > ok 1 - passing test
+ > not ok 2 - pretend we have a known breakage # TODO known breakage
+ > # still have 1 known breakage(s)
+ > # passed all remaining 1 test(s)
+ > 1..2
+ EOF
+ "
+ test_expect_success 'pretend we have fixed a known breakage' "
+ run_sub_test_lib_test passing-todo 'A passing TODO test' <<-\\EOF &&
+ test_expect_failure 'pretend we have fixed a known breakage' 'true'
test_done
EOF
- chmod +x passing-todo.sh &&
- ./passing-todo.sh >out 2>err &&
- ! test -s err &&
- sed -e 's/^> //' >expect <<-\\EOF &&
- > ok 1 - pretend we have fixed a known breakage # TODO known breakage
- > # fixed 1 known breakage(s)
- > # passed all 1 test(s)
+ check_sub_test_lib_test passing-todo <<-\\EOF
+ > ok 1 - pretend we have fixed a known breakage # TODO known breakage vanished
+ > # 1 known breakage(s) vanished; please update test(s)
> 1..1
EOF
- test_cmp expect out)
"
+
+ test_expect_success 'pretend we have fixed one of two known breakages (run in sub test-lib)' "
+ run_sub_test_lib_test partially-passing-todos \
+ '2 TODO tests, one passing' <<-\\EOF &&
+ test_expect_failure 'pretend we have a known breakage' 'false'
+ test_expect_success 'pretend we have a passing test' 'true'
+ test_expect_failure 'pretend we have fixed another known breakage' 'true'
+ test_done
+ EOF
+ check_sub_test_lib_test partially-passing-todos <<-\\EOF
+ > not ok 1 - pretend we have a known breakage # TODO known breakage
+ > ok 2 - pretend we have a passing test
+ > ok 3 - pretend we have fixed another known breakage # TODO known breakage vanished
+ > # 1 known breakage(s) vanished; please update test(s)
+ > # still have 1 known breakage(s)
+ > # passed all remaining 1 test(s)
+ > 1..3
+ EOF
+ "
+
+ test_expect_success 'pretend we have a pass, fail, and known breakage' "
+ test_must_fail run_sub_test_lib_test \
+ mixed-results1 'mixed results #1' <<-\\EOF &&
+ test_expect_success 'passing test' 'true'
+ test_expect_success 'failing test' 'false'
+ test_expect_failure 'pretend we have a known breakage' 'false'
+ test_done
+ EOF
+ check_sub_test_lib_test mixed-results1 <<-\\EOF
+ > ok 1 - passing test
+ > not ok 2 - failing test
+ > # false
+ > not ok 3 - pretend we have a known breakage # TODO known breakage
+ > # still have 1 known breakage(s)
+ > # failed 1 among remaining 2 test(s)
+ > 1..3
+ EOF
+ "
+
+ test_expect_success 'pretend we have a mix of all possible results' "
+ test_must_fail run_sub_test_lib_test \
+ mixed-results2 'mixed results #2' <<-\\EOF &&
+ test_expect_success 'passing test' 'true'
+ test_expect_success 'passing test' 'true'
+ test_expect_success 'passing test' 'true'
+ test_expect_success 'passing test' 'true'
+ test_expect_success 'failing test' 'false'
+ test_expect_success 'failing test' 'false'
+ test_expect_success 'failing test' 'false'
+ test_expect_failure 'pretend we have a known breakage' 'false'
+ test_expect_failure 'pretend we have a known breakage' 'false'
+ test_expect_failure 'pretend we have fixed a known breakage' 'true'
+ test_done
+ EOF
+ check_sub_test_lib_test mixed-results2 <<-\\EOF
+ > ok 1 - passing test
+ > ok 2 - passing test
+ > ok 3 - passing test
+ > ok 4 - passing test
+ > not ok 5 - failing test
+ > # false
+ > not ok 6 - failing test
+ > # false
+ > not ok 7 - failing test
+ > # false
+ > not ok 8 - pretend we have a known breakage # TODO known breakage
+ > not ok 9 - pretend we have a known breakage # TODO known breakage
+ > ok 10 - pretend we have fixed a known breakage # TODO known breakage vanished
+ > # 1 known breakage(s) vanished; please update test(s)
+ > # still have 2 known breakage(s)
+ > # failed 3 among remaining 7 test(s)
+ > 1..10
+ EOF
+ "
+
test_set_prereq HAVEIT
haveit=no
test_expect_success HAVEIT 'test runs if prerequisite is satisfied' '
exit 1
fi
+test_lazy_prereq LAZY_TRUE true
+havetrue=no
+test_expect_success LAZY_TRUE 'test runs if lazy prereq is satisfied' '
+ havetrue=yes
+'
+donthavetrue=yes
+test_expect_success !LAZY_TRUE 'missing lazy prereqs skip tests' '
+ donthavetrue=no
+'
+
+if test "$havetrue$donthavetrue" != yesyes
+then
+ say 'bug in test framework: lazy prerequisites do not work'
+ exit 1
+fi
+
+test_lazy_prereq LAZY_FALSE false
+nothavefalse=no
+test_expect_success !LAZY_FALSE 'negative lazy prereqs checked' '
+ nothavefalse=yes
+'
+havefalse=yes
+test_expect_success LAZY_FALSE 'missing negative lazy prereqs will skip' '
+ havefalse=no
+'
+
+if test "$nothavefalse$havefalse" != yesyes
+then
+ say 'bug in test framework: negative lazy prerequisites do not work'
+ exit 1
+fi
+
clean=no
test_expect_success 'tests clean up after themselves' '
test_when_finished clean=yes
fi
test_expect_success 'tests clean up even on failures' "
- mkdir failing-cleanup &&
- (
- cd failing-cleanup &&
-
- cat >failing-cleanup.sh <<-EOF &&
- #!$SHELL_PATH
-
- test_description='Failing tests with cleanup commands'
-
- # Point to the t/test-lib.sh, which isn't in ../ as usual
- TEST_DIRECTORY=\"$TEST_DIRECTORY\"
- . \"\$TEST_DIRECTORY\"/test-lib.sh
-
+ test_must_fail run_sub_test_lib_test \
+ failing-cleanup 'Failing tests with cleanup commands' <<-\\EOF &&
test_expect_success 'tests clean up even after a failure' '
touch clean-after-failure &&
test_when_finished rm clean-after-failure &&
test_when_finished \"(exit 2)\"
'
test_done
-
EOF
-
- chmod +x failing-cleanup.sh &&
- test_must_fail ./failing-cleanup.sh >out 2>err &&
- ! test -s err &&
- ! test -f \"trash directory.failing-cleanup/clean-after-failure\" &&
- sed -e 's/Z$//' -e 's/^> //' >expect <<-\\EOF &&
- > not ok - 1 tests clean up even after a failure
+ check_sub_test_lib_test failing-cleanup <<-\\EOF
+ > not ok 1 - tests clean up even after a failure
> # Z
> # touch clean-after-failure &&
> # test_when_finished rm clean-after-failure &&
> # (exit 1)
> # Z
- > not ok - 2 failure to clean up causes the test to fail
+ > not ok 2 - failure to clean up causes the test to fail
> # Z
> # test_when_finished \"(exit 2)\"
> # Z
> # failed 2 among 2 test(s)
> 1..2
EOF
- test_cmp expect out
- )
"
################################################################
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/ .
-# if --tee was passed, write the output not only to the terminal, but
-# additionally to the file test-results/$BASENAME.out, too.
-case "$GIT_TEST_TEE_STARTED, $* " in
-done,*)
- # do not redirect again
- ;;
-*' --tee '*|*' --va'*)
- mkdir -p test-results
- BASE=test-results/$(basename "$0" .sh)
- (GIT_TEST_TEE_STARTED=done ${SHELL-sh} "$0" "$@" 2>&1;
- echo $? > $BASE.exit) | tee $BASE.out
- test "$(cat $BASE.exit)" = 0
- exit
- ;;
-esac
-
# Keep the original TERM for say_color
ORIGINAL_TERM=$TERM
fi
GIT_BUILD_DIR="$TEST_DIRECTORY"/..
+################################################################
+# It appears that people try to run tests without building...
+"$GIT_BUILD_DIR/git" >/dev/null
+if test $? != 1
+then
+ echo >&2 'error: you do not seem to have built git yet.'
+ exit 1
+fi
+
. "$GIT_BUILD_DIR"/GIT-BUILD-OPTIONS
export PERL_PATH SHELL_PATH
+# if --tee was passed, write the output not only to the terminal, but
+# additionally to the file test-results/$BASENAME.out, too.
+case "$GIT_TEST_TEE_STARTED, $* " in
+done,*)
+ # do not redirect again
+ ;;
+*' --tee '*|*' --va'*)
+ mkdir -p test-results
+ BASE=test-results/$(basename "$0" .sh)
+ (GIT_TEST_TEE_STARTED=done ${SHELL_PATH} "$0" "$@" 2>&1;
+ echo $? > $BASE.exit) | tee $BASE.out
+ test "$(cat $BASE.exit)" = 0
+ exit
+ ;;
+esac
+
# For repeatability, reset the environment to known value.
LANG=C
LC_ALL=C
export GIT_COMMITTER_EMAIL GIT_COMMITTER_NAME
export EDITOR
+# Add libc MALLOC and MALLOC_PERTURB test
+# only if we are not executing the test with valgrind
+if expr " $GIT_TEST_OPTS " : ".* --valgrind " >/dev/null ||
+ test -n "$TEST_NO_MALLOC_CHECK"
+then
+ setup_malloc_check () {
+ : nothing
+ }
+ teardown_malloc_check () {
+ : nothing
+ }
+else
+ setup_malloc_check () {
+ MALLOC_CHECK_=3 MALLOC_PERTURB_=165
+ export MALLOC_CHECK_ MALLOC_PERTURB_
+ }
+ teardown_malloc_check () {
+ unset MALLOC_CHECK_ MALLOC_PERTURB_
+ }
+fi
+
# Protect ourselves from common misconfiguration to export
# CDPATH into the environment
unset CDPATH
error)
tput bold; tput setaf 1;; # bold red
skip)
- tput bold; tput setaf 2;; # bold green
+ tput setaf 4;; # blue
+ warn)
+ tput setaf 3;; # brown/yellow
pass)
- tput setaf 2;; # green
+ tput setaf 2;; # green
info)
- tput setaf 3;; # brown
+ tput setaf 6;; # cyan
*)
test -n "$quiet" && return;;
esac
say_color() {
test -z "$1" && test -n "$quiet" && return
shift
- echo "$*"
+ printf "%s\n" "$*"
}
fi
test_failure_ () {
test_failure=$(($test_failure + 1))
- say_color error "not ok - $test_count $1"
+ say_color error "not ok $test_count - $1"
shift
echo "$@" | sed -e 's/^/# /'
test "$immediate" = "" || { GIT_EXIT_OK=t; exit 1; }
test_known_broken_ok_ () {
test_fixed=$(($test_fixed+1))
- say_color "" "ok $test_count - $@ # TODO known breakage"
+ say_color error "ok $test_count - $@ # TODO known breakage vanished"
}
test_known_broken_failure_ () {
test_broken=$(($test_broken+1))
- say_color skip "not ok $test_count - $@ # TODO known breakage"
+ say_color warn "not ok $test_count - $@ # TODO known breakage"
}
test_debug () {
if test -z "$immediate" || test $eval_ret = 0 || test -n "$expecting_failure"
then
+ setup_malloc_check
test_eval_ "$test_cleanup"
+ teardown_malloc_check
fi
if test "$verbose" = "t" && test -n "$HARNESS_ACTIVE"
then
then
test_results_dir="$TEST_OUTPUT_DIRECTORY/test-results"
mkdir -p "$test_results_dir"
- test_results_path="$test_results_dir/${0%.sh}-$$.counts"
+ base=${0##*/}
+ test_results_path="$test_results_dir/${base%.sh}-$$.counts"
cat >>"$test_results_path" <<-EOF
total $test_count
if test "$test_fixed" != 0
then
- say_color pass "# fixed $test_fixed known breakage(s)"
+ say_color error "# $test_fixed known breakage(s) vanished; please update test(s)"
fi
if test "$test_broken" != 0
then
- say_color error "# still have $test_broken known breakage(s)"
- msg="remaining $(($test_count-$test_broken)) test(s)"
+ say_color warn "# still have $test_broken known breakage(s)"
+ fi
+ if test "$test_broken" != 0 || test "$test_fixed" != 0
+ then
+ test_remaining=$(( $test_count - $test_broken - $test_fixed ))
+ msg="remaining $test_remaining test(s)"
else
+ test_remaining=$test_count
msg="$test_count test(s)"
fi
case "$test_failure" in
if test $test_external_has_tap -eq 0
then
- if test $test_count -gt 0
+ if test $test_remaining -gt 0
then
say_color pass "# passed all $msg"
fi
do
case "$this_test" in
$skp)
- say_color skip >&3 "skipping test $this_test altogether"
+ say_color info >&3 "skipping test $this_test altogether"
skip_all="skip all tests in $this_test"
test_done
esac
esac
'
+test_lazy_prereq AUTOIDENT '
+ sane_unset GIT_AUTHOR_NAME &&
+ sane_unset GIT_AUTHOR_EMAIL &&
+ git var GIT_AUTHOR_IDENT
+'
+
# When the tests are run as root, permission tests will report that
# things are writable when they shouldn't be.
test -w / || test_set_prereq SANITY