Commit de2c27a7 authored by dpranke@chromium.org's avatar dpranke@chromium.org
Browse files

2010-08-24 Dirk Pranke <dpranke@chromium.org>

        Reviewed by Eric Seidel.

        speed up new-run-webkit-tests unit tests

        Add a --no-record-results flag that turns off generating the JSON
        results file on every test run. Generating the file requires us to
        fetch the old results down from the bots, which can be slow. This
        flag is off by default.

        Reduce the sleep time in wait_for_threads_to_finish from 0.1s to 0.01s.

        These changes together shorten the test cycle from ~4.5s to ~1.5s
        - a 3x speedup.

        https://bugs.webkit.org/show_bug.cgi?id=44553

        * Scripts/webkitpy/layout_tests/run_webkit_tests.py:
        * Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py:

git-svn-id: http://svn.webkit.org/repository/webkit/trunk@65949 268f45cc-cd09-0410-ab3c-d52691b4dbfc
parent 302794ec
2010-08-24 Dirk Pranke <dpranke@chromium.org>
Reviewed by Eric Seidel.
speed up new-run-webkit-tests unit tests
Add a --no-record-results flag that turns off generating the JSON
results file on every test run. Generating the file requires us to
fetch the old results down from the bots, which can be slow. This
flag is off by default.
Reduce the sleep time in wait_for_threads_to_finish from 0.1s to 0.01s.
These changes together shorten the test cycle from ~4.5s to ~1.5s
- a 3x speedup.
https://bugs.webkit.org/show_bug.cgi?id=44553
* Scripts/webkitpy/layout_tests/run_webkit_tests.py:
* Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py:
2010-08-24 Tony Chang <tony@chromium.org>
Reviewed by Eric Seidel.
......
......@@ -687,7 +687,7 @@ class TestRunner:
self.update_summary(result_summary)
if some_thread_is_alive:
time.sleep(0.1)
time.sleep(0.01)
except KeyboardInterrupt:
keyboard_interrupted = True
......@@ -779,12 +779,13 @@ class TestRunner:
self._expectations, result_summary, retry_summary)
self._printer.print_unexpected_results(unexpected_results)
# Write the same data to log files.
self._write_json_files(unexpected_results, result_summary,
individual_test_timings)
if self._options.record_results:
# Write the same data to log files.
self._write_json_files(unexpected_results, result_summary,
individual_test_timings)
# Upload generated JSON files to appengine server.
self._upload_json_files()
# Upload generated JSON files to appengine server.
self._upload_json_files()
# Write the summary to disk (results.html) and display it if requested.
wrote_results = self._write_results_html_file(result_summary)
......@@ -1545,6 +1546,9 @@ def parse_args(args=None):
default=False, help="Clobbers test results from previous runs."),
optparse.make_option("--platform",
help="Override the platform for expected results"),
optparse.make_option("--no-record-results", action="store_false",
default=True, dest="record_results",
help="Don't record the results."),
# old-run-webkit-tests also has HTTP toggle options:
# --[no-]http Run (or do not run) http tests
# (default: run)
......
......@@ -48,9 +48,10 @@ from webkitpy.layout_tests.layout_package import dump_render_tree_thread
from webkitpy.thirdparty.mock import Mock
def passing_run(args, port_obj=None, logging_included=False):
if not logging_included:
args.extend(['--print', 'nothing'])
def passing_run(args, port_obj=None, record_results=False):
args.extend(['--print', 'nothing'])
if not record_results:
args.append('--no-record-results')
options, args = run_webkit_tests.parse_args(args)
if port_obj is None:
port_obj = port.get(options.platform, options)
......@@ -58,6 +59,7 @@ def passing_run(args, port_obj=None, logging_included=False):
return res == 0
def logging_run(args):
args.extend(['--no-record-results'])
options, args = run_webkit_tests.parse_args(args)
port_obj = port.get(options.platform, options)
buildbot_output = array_stream.ArrayStream()
......@@ -74,17 +76,15 @@ class MainTest(unittest.TestCase):
self.assertTrue(passing_run(['--platform', 'test', '--run-singly']))
self.assertTrue(passing_run(['--platform', 'test',
'text/article-element.html']))
self.assertTrue(passing_run(['--platform', 'test',
'--child-processes', '1',
'--print', 'unexpected']))
def test_child_processes(self):
def test_one_child_process(self):
(res, buildbot_output, regular_output) = logging_run(
['--platform', 'test', '--print', 'config', '--child-processes',
'1'])
self.assertTrue('Running one DumpRenderTree\n'
in regular_output.get())
def test_two_child_processes(self):
(res, buildbot_output, regular_output) = logging_run(
['--platform', 'test', '--print', 'config', '--child-processes',
'2'])
......@@ -92,14 +92,13 @@ class MainTest(unittest.TestCase):
in regular_output.get())
def test_last_results(self):
passing_run(['--platform', 'test'])
passing_run(['--platform', 'test'], record_results=True)
(res, buildbot_output, regular_output) = logging_run(
['--platform', 'test', '--print-last-failures'])
self.assertEqual(regular_output.get(), ['\n\n'])
self.assertEqual(buildbot_output.get(), [])
def _mocked_open(original_open, file_list):
def _wrapper(name, mode, encoding):
if name.find("-expected.") != -1 and mode == "w":
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment