Commit 8ca1df17 authored by dpranke@chromium.org's avatar dpranke@chromium.org

2010-11-23 Dirk Pranke <dpranke@chromium.org>

        Reviewed by Tony Chang.

        This patch cleans up the logic used to shard tests into groups a
        bit and adds the --worker-model flag to NRWT. The flag is only
        used at the moment to control whether to run single-threaded or
        not, but eventually will also allow toggling between threads and
        processes.

        Also add a minor cleanup with _test_is_slow(), which just
        eliminates some repetition and gives slightly better encapsulation.

        https://bugs.webkit.org/show_bug.cgi?id=49773

        * Scripts/webkitpy/layout_tests/run_webkit_tests.py:
        * Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py:

git-svn-id: http://svn.webkit.org/repository/webkit/trunk@72641 268f45cc-cd09-0410-ab3c-d52691b4dbfc
parent cf0afed8
2010-11-23 Dirk Pranke <dpranke@chromium.org>
Reviewed by Tony Chang.
This patch cleans up the logic used to shard tests into groups a
bit and adds the --worker-model flag to NRWT. The flag is only
used at the moment to control whether to run single-threaded or
not, but eventually will also allow toggling between threads and
processes.
Also add a minor cleanup with _test_is_slow(), which just
eliminates some repetition and gives slightly better encapsulation.
https://bugs.webkit.org/show_bug.cgi?id=49773
* Scripts/webkitpy/layout_tests/run_webkit_tests.py:
* Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py:
2010-11-23 Mihai Parparita <mihaip@chromium.org>
Reviewed by Tony Chang.
......
......@@ -485,7 +485,7 @@ class TestRunner:
"""Returns the appropriate TestInput object for the file. Mostly this
is used for looking up the timeout value (in ms) to use for the given
test."""
if self._expectations.has_modifier(test_file, test_expectations.SLOW):
if self._test_is_slow(test_file):
return TestInput(test_file, self._options.slow_time_out_ms)
return TestInput(test_file, self._options.time_out_ms)
......@@ -495,23 +495,30 @@ class TestRunner:
split_path = test_file.split(os.sep)
return 'http' in split_path or 'websocket' in split_path
def _get_test_file_queue(self, test_files):
"""Create the thread safe queue of lists of (test filenames, test URIs)
tuples. Each TestShellThread pulls a list from this queue and runs
those tests in order before grabbing the next available list.
def _test_is_slow(self, test_file):
return self._expectations.has_modifier(test_file,
test_expectations.SLOW)
Shard the lists by directory. This helps ensure that tests that depend
on each other (aka bad tests!) continue to run together as most
cross-tests dependencies tend to occur within the same directory.
def _shard_tests(self, test_files, use_real_shards):
"""Groups tests into batches.
This helps ensure that tests that depend on each other (aka bad tests!)
continue to run together as most cross-tests dependencies tend to
occur within the same directory. If use_real_shards is false, we
put each (non-HTTP/websocket) test into its own shard for maximum
concurrency instead of trying to do any sort of real sharding.
Return:
The Queue of lists of TestInput objects.
A list of lists of TestInput objects.
"""
# FIXME: when we added http locking, we changed how this works such
# that we always lump all of the HTTP threads into a single shard.
# That will slow down experimental-fully-parallel, but it's unclear
# what the best alternative is completely revamping how we track
# when to grab the lock.
test_lists = []
tests_to_http_lock = []
if (self._options.experimental_fully_parallel or
self._is_single_threaded()):
if not use_real_shards:
for test_file in test_files:
test_input = self._get_test_input_for_file(test_file)
if self._test_requires_lock(test_file):
......@@ -550,10 +557,7 @@ class TestRunner:
tests_to_http_lock.reverse()
test_lists.insert(0, ("tests_to_http_lock", tests_to_http_lock))
filename_queue = Queue.Queue()
for item in test_lists:
filename_queue.put(item)
return filename_queue
return test_lists
def _contains_tests(self, subdir):
for test_file in self._test_files:
......@@ -568,25 +572,29 @@ class TestRunner:
Return:
The list of threads.
"""
filename_queue = self._get_test_file_queue(test_files)
num_workers = self._num_workers()
test_lists = self._shard_tests(test_files,
num_workers > 1 and not self._options.experimental_fully_parallel)
filename_queue = Queue.Queue()
for item in test_lists:
filename_queue.put(item)
# Instantiate TestShellThreads and start them.
threads = []
for worker_number in xrange(int(self._options.child_processes)):
for worker_number in xrange(num_workers):
thread = dump_render_tree_thread.TestShellThread(self._port,
self._options, worker_number,
filename_queue, self._result_queue)
if self._is_single_threaded():
thread.run_in_main_thread(self, result_summary)
else:
if num_workers > 1:
thread.start()
else:
thread.run_in_main_thread(self, result_summary)
threads.append(thread)
return threads
def _is_single_threaded(self):
"""Returns whether we should run all the tests in the main thread."""
return int(self._options.child_processes) == 1
def _num_workers(self):
return int(self._options.child_processes)
def _run_tests(self, file_list, result_summary):
"""Runs the tests in the file_list.
......@@ -602,9 +610,8 @@ class TestRunner:
in the form {filename:filename, test_run_time:test_run_time}
result_summary: summary object to populate with the results
"""
# FIXME: We should use webkitpy.tool.grammar.pluralize here.
plural = ""
if not self._is_single_threaded():
if self._num_workers() > 1:
plural = "s"
self._printer.print_update('Starting %s%s ...' %
(self._port.driver_name(), plural))
......@@ -924,12 +931,13 @@ class TestRunner:
(self._options.time_out_ms,
self._options.slow_time_out_ms))
if self._is_single_threaded():
if self._num_workers() == 1:
p.print_config("Running one %s" % self._port.driver_name())
else:
p.print_config("Running %s %ss in parallel" %
(self._options.child_processes,
self._port.driver_name()))
p.print_config("Worker model: %s" % self._options.worker_model)
p.print_config("")
def _print_expected_results_of_type(self, result_summary,
......@@ -1044,8 +1052,7 @@ class TestRunner:
for test_tuple in individual_test_timings:
filename = test_tuple.filename
is_timeout_crash_or_slow = False
if self._expectations.has_modifier(filename,
test_expectations.SLOW):
if self._test_is_slow(filename):
is_timeout_crash_or_slow = True
slow_tests.append(test_tuple)
......@@ -1360,8 +1367,11 @@ def run(port, options, args, regular_output=sys.stderr,
def _set_up_derived_options(port_obj, options):
"""Sets the options values that depend on other options values."""
if options.worker_model == 'inline':
if options.child_processes and int(options.child_processes) > 1:
_log.warning("--worker-model=inline overrides --child-processes")
options.child_processes = "1"
if not options.child_processes:
# FIXME: Investigate perf/flakiness impact of using cpu_count + 1.
options.child_processes = os.environ.get("WEBKIT_TEST_CHILD_PROCESSES",
str(port_obj.default_child_processes()))
......@@ -1584,6 +1594,10 @@ def parse_args(args=None):
optparse.make_option("--child-processes",
help="Number of DumpRenderTrees to run in parallel."),
# FIXME: Display default number of child processes that will run.
optparse.make_option("--worker-model", action="store",
default="threads",
help="controls worker model. Valid values are "
"'inline' and 'threads' (default)."),
optparse.make_option("--experimental-fully-parallel",
action="store_true", default=False,
help="run all tests in parallel"),
......@@ -1641,8 +1655,11 @@ def parse_args(args=None):
options, args = option_parser.parse_args(args)
return options, args
if options.worker_model not in ('inline', 'threads'):
option_parser.error("unsupported value for --worker-model: %s" %
options.worker_model)
return options, args
def main():
......@@ -1650,6 +1667,7 @@ def main():
port_obj = port.get(options.platform, options)
return run(port_obj, options, args)
if '__main__' == __name__:
try:
sys.exit(main())
......
......@@ -72,6 +72,8 @@ def passing_run(extra_args=None, port_obj=None, record_results=False,
args.extend(['--platform', 'test'])
if not record_results:
args.append('--no-record-results')
if not '--child-processes' in extra_args:
args.extend(['--worker-model', 'inline'])
args.extend(extra_args)
if not tests_included:
# We use the glob to test that globbing works.
......@@ -92,21 +94,30 @@ def logging_run(extra_args=None, port_obj=None, tests_included=False):
args = ['--no-record-results']
if not '--platform' in extra_args:
args.extend(['--platform', 'test'])
if not '--child-processes' in extra_args:
args.extend(['--worker-model', 'inline'])
args.extend(extra_args)
if not tests_included:
args.extend(['passes',
'http/tests',
'websocket/tests',
'failures/expected/*'])
options, parsed_args = run_webkit_tests.parse_args(args)
user = MockUser()
if not port_obj:
port_obj = port.get(port_name=options.platform, options=options, user=user)
buildbot_output = array_stream.ArrayStream()
regular_output = array_stream.ArrayStream()
res = run_webkit_tests.run(port_obj, options, parsed_args,
buildbot_output=buildbot_output,
regular_output=regular_output)
try:
oc = outputcapture.OutputCapture()
oc.capture_output()
options, parsed_args = run_webkit_tests.parse_args(args)
user = MockUser()
if not port_obj:
port_obj = port.get(port_name=options.platform, options=options,
user=user)
buildbot_output = array_stream.ArrayStream()
regular_output = array_stream.ArrayStream()
res = run_webkit_tests.run(port_obj, options, parsed_args,
buildbot_output=buildbot_output,
regular_output=regular_output)
finally:
oc.restore_output()
return (res, buildbot_output, regular_output, user)
......@@ -116,7 +127,7 @@ def get_tests_run(extra_args=None, tests_included=False, flatten_batches=False):
'--print', 'nothing',
'--platform', 'test',
'--no-record-results',
'--child-processes', '1']
'--worker-model', 'inline']
args.extend(extra_args)
if not tests_included:
# Not including http tests since they get run out of order (that
......@@ -360,9 +371,24 @@ class MainTest(unittest.TestCase):
test_port = get_port_for_run(base_args)
self.assertEqual(None, test_port.tolerance_used_for_diff_image)
def test_worker_model__inline(self):
self.assertTrue(passing_run(['--worker-model', 'inline']))
def test_worker_model__threads(self):
self.assertTrue(passing_run(['--worker-model', 'threads']))
def test_worker_model__processes(self):
self.assertRaises(SystemExit, logging_run,
['--worker-model', 'processes'])
def test_worker_model__unknown(self):
self.assertRaises(SystemExit, logging_run,
['--worker-model', 'unknown'])
MainTest = skip_if(MainTest, sys.platform == 'cygwin' and compare_version(sys, '2.6')[0] < 0, 'new-run-webkit-tests tests hang on Cygwin Python 2.5.2')
def _mocked_open(original_open, file_list):
def _wrapper(name, mode, encoding):
if name.find("-expected.") != -1 and mode.find("w") != -1:
......@@ -454,20 +480,10 @@ class TestRunnerTest(unittest.TestCase):
html = runner._results_html(["test_path"], {}, "Title", override_time="time")
self.assertEqual(html, expected_html)
def queue_to_list(self, queue):
queue_list = []
while(True):
try:
queue_list.append(queue.get_nowait())
except Queue.Empty:
break
return queue_list
def test_get_test_file_queue(self):
# Test that _get_test_file_queue in run_webkit_tests.TestRunner really
def test_shard_tests(self):
# Test that _shard_tests in run_webkit_tests.TestRunner really
# put the http tests first in the queue.
runner = TestRunnerWrapper(port=Mock(), options=Mock(), printer=Mock())
runner._options.experimental_fully_parallel = False
test_list = [
"LayoutTests/websocket/tests/unicode.htm",
......@@ -488,19 +504,16 @@ class TestRunnerTest(unittest.TestCase):
'LayoutTests/http/tests/xmlhttprequest/supported-xml-content-types.html',
])
runner._options.child_processes = 1
test_queue_for_single_thread = runner._get_test_file_queue(test_list)
runner._options.child_processes = 2
test_queue_for_multi_thread = runner._get_test_file_queue(test_list)
single_thread_results = self.queue_to_list(test_queue_for_single_thread)
multi_thread_results = self.queue_to_list(test_queue_for_multi_thread)
# FIXME: Ideally the HTTP tests don't have to all be in one shard.
single_thread_results = runner._shard_tests(test_list, False)
multi_thread_results = runner._shard_tests(test_list, True)
self.assertEqual("tests_to_http_lock", single_thread_results[0][0])
self.assertEqual(expected_tests_to_http_lock, set(single_thread_results[0][1]))
self.assertEqual("tests_to_http_lock", multi_thread_results[0][0])
self.assertEqual(expected_tests_to_http_lock, set(multi_thread_results[0][1]))
class DryrunTest(unittest.TestCase):
# FIXME: it's hard to know which platforms are safe to test; the
# chromium platforms require a chromium checkout, and the mac platform
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment