Rev 4644: Enable --lsprof-tests on bzr selftest. in http://bazaar.launchpad.net/~lifeless/bzr/test-speed
Robert Collins
robertc at robertcollins.net
Mon Aug 24 23:33:03 BST 2009
At http://bazaar.launchpad.net/~lifeless/bzr/test-speed
------------------------------------------------------------
revno: 4644
revision-id: robertc at robertcollins.net-20090824223253-rmp6ccjidwkgguml
parent: robertc at robertcollins.net-20090824210509-pproia2q9evq1nsl
committer: Robert Collins <robertc at robertcollins.net>
branch nick: test-speed
timestamp: Tue 2009-08-25 08:32:53 +1000
message:
Enable --lsprof-tests on bzr selftest.
=== modified file 'NEWS'
--- a/NEWS 2009-08-24 21:05:09 +0000
+++ b/NEWS 2009-08-24 22:32:53 +0000
@@ -74,6 +74,9 @@
Testing
*******
+* Passing ``--lsprof-tests -v`` to bzr selftest will cause lsprof output to
+ be output for every test. Note that this is very verbose! (Robert Collins)
+
bzr 1.18
########
=== modified file 'bzrlib/builtins.py'
--- a/bzrlib/builtins.py 2009-08-11 18:00:37 +0000
+++ b/bzrlib/builtins.py 2009-08-24 22:32:53 +0000
@@ -3369,6 +3369,8 @@
Option('lsprof-timed',
help='Generate lsprof output for benchmarked'
' sections of code.'),
+ Option('lsprof-tests',
+ help='Generate lsprof output for each test.'),
Option('cache-dir', type=str,
help='Cache intermediate benchmark output in this '
'directory.'),
@@ -3415,7 +3417,7 @@
first=False, list_only=False,
randomize=None, exclude=None, strict=False,
load_list=None, debugflag=None, starting_with=None, subunit=False,
- parallel=None):
+ parallel=None, lsprof_tests=False):
from bzrlib.tests import selftest
import bzrlib.benchmarks as benchmarks
from bzrlib.benchmarks import tree_creator
@@ -3455,6 +3457,7 @@
"transport": transport,
"test_suite_factory": test_suite_factory,
"lsprof_timed": lsprof_timed,
+ "lsprof_tests": lsprof_tests,
"bench_history": benchfile,
"matching_tests_first": first,
"list_only": list_only,
=== modified file 'bzrlib/tests/__init__.py'
--- a/bzrlib/tests/__init__.py 2009-08-24 05:23:11 +0000
+++ b/bzrlib/tests/__init__.py 2009-08-24 22:32:53 +0000
@@ -579,13 +579,22 @@
bench_history=None,
list_only=False,
strict=False,
+ result_decorators=None,
):
+ """Create a TextTestRunner.
+
+ :param result_decorators: An optional list of decorators to apply
+ to the result object being used by the runner. Decorators are
+ applied left to right - the first element in the list is the
+ innermost decorator.
+ """
self.stream = unittest._WritelnDecorator(stream)
self.descriptions = descriptions
self.verbosity = verbosity
self._bench_history = bench_history
self.list_only = list_only
self._strict = strict
+ self._result_decorators = result_decorators or []
def run(self, test):
"Run the given test case or test suite."
@@ -600,6 +609,9 @@
bench_history=self._bench_history,
strict=self._strict,
)
+ run_result = result
+ for decorator in self._result_decorators:
+ run_result = decorator(run_result)
result.stop_early = self.stop_on_failure
result.report_starting()
if self.list_only:
@@ -614,13 +626,13 @@
try:
import testtools
except ImportError:
- test.run(result)
+ test.run(run_result)
else:
if isinstance(test, testtools.ConcurrentTestSuite):
# We need to catch bzr specific behaviors
- test.run(BZRTransformingResult(result))
+ test.run(BZRTransformingResult(run_result))
else:
- test.run(result)
+ test.run(run_result)
run = result.testsRun
actionTaken = "Ran"
stopTime = time.time()
@@ -2762,7 +2774,9 @@
strict=False,
runner_class=None,
suite_decorators=None,
- stream=None):
+ stream=None,
+ result_decorators=None,
+ ):
"""Run a test suite for bzr selftest.
:param runner_class: The class of runner to use. Must support the
@@ -2785,6 +2799,7 @@
bench_history=bench_history,
list_only=list_only,
strict=strict,
+ result_decorators=result_decorators,
)
runner.stop_on_failure=stop_on_failure
# built in decorator factories:
@@ -3131,7 +3146,7 @@
return result
-class BZRTransformingResult(unittest.TestResult):
+class ForwardingResult(unittest.TestResult):
def __init__(self, target):
unittest.TestResult.__init__(self)
@@ -3143,6 +3158,21 @@
def stopTest(self, test):
self.result.stopTest(test)
+ def addSkip(self, test, reason):
+ self.result.addSkip(test, reason)
+
+ def addSuccess(self, test):
+ self.result.addSuccess(test)
+
+ def addError(self, test, err):
+ self.result.addError(test, err)
+
+ def addFailure(self, test, err):
+ self.result.addFailure(test, err)
+
+
+class BZRTransformingResult(ForwardingResult):
+
def addError(self, test, err):
feature = self._error_looks_like('UnavailableFeature: ', err)
if feature is not None:
@@ -3158,12 +3188,6 @@
else:
self.result.addFailure(test, err)
- def addSkip(self, test, reason):
- self.result.addSkip(test, reason)
-
- def addSuccess(self, test):
- self.result.addSuccess(test)
-
def _error_looks_like(self, prefix, err):
"""Deserialize exception and returns the stringify value."""
import subunit
@@ -3181,6 +3205,38 @@
return value
+class ProfileResult(ForwardingResult):
+ """Generate profiling data for all activity between start and success.
+
+ The profile data is appended to the test's _benchcalls attribute and can
+ be accessed by the forwarded-to TestResult.
+
+ While it might be cleaner do accumulate this in stopTest, addSuccess is
+ where our existing output support for lsprof is, and this class aims to
+ fit in with that: while it could be moved it's not necessary to accomplish
+ test profiling, nor would be be dramatically cleaner.
+ """
+
+ def startTest(self, test):
+ self.profiler = bzrlib.lsprof.BzrProfiler()
+ self.profiler.start()
+ ForwardingResult.startTest(self, test)
+
+ def addSuccess(self, test):
+ stats = self.profiler.stop()
+ try:
+ calls = test._benchcalls
+ except AttributeError:
+ test._benchcalls = []
+ calls = test._benchcalls
+ calls.append(((test.id(), "", ""), stats))
+ ForwardingResult.addSuccess(self, test)
+
+ def stopTest(self, test):
+ ForwardingResult.stopTest(self, test)
+ self.profiler = None
+
+
# Controlled by "bzr selftest -E=..." option
# Currently supported:
# -Eallow_debug Will no longer clear debug.debug_flags() so it
@@ -3208,6 +3264,7 @@
runner_class=None,
suite_decorators=None,
stream=None,
+ lsprof_tests=False,
):
"""Run the whole test suite under the enhanced runner"""
# XXX: Very ugly way to do this...
@@ -3239,6 +3296,9 @@
if starting_with:
# But always filter as requested.
suite = filter_suite_by_id_startswith(suite, starting_with)
+ result_decorators = []
+ if lsprof_tests:
+ result_decorators.append(ProfileResult)
return run_suite(suite, 'testbzr', verbose=verbose, pattern=pattern,
stop_on_failure=stop_on_failure,
transport=transport,
@@ -3252,6 +3312,7 @@
runner_class=runner_class,
suite_decorators=suite_decorators,
stream=stream,
+ result_decorators=result_decorators,
)
finally:
default_transport = old_transport
=== modified file 'bzrlib/tests/blackbox/test_selftest.py'
--- a/bzrlib/tests/blackbox/test_selftest.py 2009-08-24 05:23:11 +0000
+++ b/bzrlib/tests/blackbox/test_selftest.py 2009-08-24 22:32:53 +0000
@@ -172,3 +172,7 @@
outputs_nothing(['selftest', '--list-only', '--exclude', 'selftest'])
finally:
tests.selftest = original_selftest
+
+ def test_lsprof_tests(self):
+ params = self.get_params_passed_to_core('selftest --lsprof-tests')
+ self.assertEqual(True, params[1]["lsprof_tests"])
=== modified file 'bzrlib/tests/test_selftest.py'
--- a/bzrlib/tests/test_selftest.py 2009-08-24 20:28:25 +0000
+++ b/bzrlib/tests/test_selftest.py 2009-08-24 22:32:53 +0000
@@ -687,6 +687,25 @@
self.assertEqual(url, t.clone('..').base)
+class TestProfileResult(tests.TestCase):
+
+ def test_profiles_tests(self):
+ terminal = unittest.TestResult()
+ result = tests.ProfileResult(terminal)
+ class Sample(tests.TestCase):
+ def a(self):
+ self.sample_function()
+ def sample_function(self):
+ pass
+ test = Sample("a")
+ test.attrs_to_keep = test.attrs_to_keep + ('_benchcalls',)
+ test.run(result)
+ self.assertLength(1, test._benchcalls)
+ # We must be able to unpack it as the test reporting code wants
+ (_, _, _), stats = test._benchcalls[0]
+ self.assertTrue(callable(stats.pprint))
+
+
class TestTestResult(tests.TestCase):
def check_timing(self, test_case, expected_re):
@@ -1031,6 +1050,20 @@
'\n'
'OK \\(known_failures=1\\)\n')
+ def test_result_decorator(self):
+ # decorate results
+ calls = []
+ class LoggingDecorator(tests.ForwardingResult):
+ def startTest(self, test):
+ tests.ForwardingResult.startTest(self, test)
+ calls.append('start')
+ test = unittest.FunctionTestCase(lambda:None)
+ stream = StringIO()
+ runner = tests.TextTestRunner(stream=stream,
+ result_decorators=[LoggingDecorator])
+ result = self.run_test_runner(runner, test)
+ self.assertLength(1, calls)
+
def test_skipped_test(self):
# run a test that is skipped, and check the suite as a whole still
# succeeds.
@@ -1103,10 +1136,6 @@
self.assertContainsRe(out.getvalue(),
r'(?m)^ this test never runs')
- def test_not_applicable_demo(self):
- # just so you can see it in the test output
- raise tests.TestNotApplicable('this test is just a demonstation')
-
def test_unsupported_features_listed(self):
"""When unsupported features are encountered they are detailed."""
class Feature1(tests.Feature):
@@ -1818,6 +1847,19 @@
self.assertNotContainsRe("Test.b", output.getvalue())
self.assertLength(2, output.readlines())
+ def test_lsprof_tests(self):
+ calls = []
+ class Test(object):
+ def __call__(test, result):
+ test.run(result)
+ def run(test, result):
+ self.assertIsInstance(result, tests.ForwardingResult)
+ calls.append("called")
+ def countTestCases(self):
+ return 1
+ self.run_selftest(test_suite_factory=Test, lsprof_tests=True)
+ self.assertLength(1, calls)
+
def test_random(self):
# test randomising by listing a number of tests.
output_123 = self.run_selftest(test_suite_factory=self.factory,
More information about the bazaar-commits
mailing list