Commit 095e573e authored by rniwa@webkit.org's avatar rniwa@webkit.org

run-perf-tests should support Skipped list

https://bugs.webkit.org/show_bug.cgi?id=76594

Reviewed by Adam Barth.

PerformanceTests: 

Add Skipped list to be used by run-perf-tests as it can only runs tests in
Bindings, Parser, and inspector at the moment.

* Skipped: Added.

Tools: 

Add a support for Skipped list in run-perf-tests; also skip files in resources directories.

* Scripts/webkitpy/layout_tests/port/base.py:
(Port.perf_tests_dir):
(Port.skipped_perf_tests):
(Port.skips_perf_test):
(Port.relative_perf_test_filename):
* Scripts/webkitpy/layout_tests/port/test.py:
(TestPort.perf_tests_dir):
* Scripts/webkitpy/layout_tests/port/webkit.py:
(WebKitPort._expectations_from_skipped_files):
(WebKitPort):
(WebKitPort.skipped_layout_tests):
(WebKitPort.skipped_perf_tests):
* Scripts/webkitpy/performance_tests/perftestsrunner.py:
(PerfTestsRunner._collect_tests):
(PerfTestsRunner.run):
* Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py:
(test_run_test_set_with_json_output):
(test_run_test_set_with_json_source):
(test_collect_tests_with_skipped_list):
(test_collect_tests_with_skipped_list.add_file):


git-svn-id: http://svn.webkit.org/repository/webkit/trunk@105381 268f45cc-cd09-0410-ab3c-d52691b4dbfc
parent 301bdfea
2012-01-18 Ryosuke Niwa <rniwa@webkit.org>
run-perf-tests should support Skipped list
https://bugs.webkit.org/show_bug.cgi?id=76594
Reviewed by Adam Barth.
Add Skipped list to be used by run-perf-tests as it can only runs tests in
Bindings, Parser, and inspector at the moment.
* Skipped: Added.
2012-01-12 Ilya Tikhonovsky <loislo@chromium.org>
Web Inspector: performance: restore 'log 300 messages into console' test.
......
# Not enabled by default on some ports
Mutation
# Do not conform to WebKit or Chromium perf test styles
Layout
PageLoad
SunSpider
XSSAuditor
2012-01-18 Ryosuke Niwa <rniwa@webkit.org>
run-perf-tests should support Skipped list
https://bugs.webkit.org/show_bug.cgi?id=76594
Reviewed by Adam Barth.
Add a support for Skipped list in run-perf-tests; also skip files in resources directories.
* Scripts/webkitpy/layout_tests/port/base.py:
(Port.perf_tests_dir):
(Port.skipped_perf_tests):
(Port.skips_perf_test):
(Port.relative_perf_test_filename):
* Scripts/webkitpy/layout_tests/port/test.py:
(TestPort.perf_tests_dir):
* Scripts/webkitpy/layout_tests/port/webkit.py:
(WebKitPort._expectations_from_skipped_files):
(WebKitPort):
(WebKitPort.skipped_layout_tests):
(WebKitPort.skipped_perf_tests):
* Scripts/webkitpy/performance_tests/perftestsrunner.py:
(PerfTestsRunner._collect_tests):
(PerfTestsRunner.run):
* Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py:
(test_run_test_set_with_json_output):
(test_run_test_set_with_json_source):
(test_collect_tests_with_skipped_list):
(test_collect_tests_with_skipped_list.add_file):
2012-01-18 Dirk Pranke <dpranke@chromium.org>
Fix path to chromium_src_dir introduced in previous change.
......@@ -564,12 +564,19 @@ class Port(object):
"""Return the absolute path to the top of the LayoutTests directory."""
return self.path_from_webkit_base('LayoutTests')
def perf_tests_dir(self):
"""Return the absolute path to the top of the PerformanceTests directory."""
return self.path_from_webkit_base('PerformanceTests')
def webkit_base(self):
return self._filesystem.abspath(self.path_from_webkit_base('.'))
def skipped_layout_tests(self):
return []
def skipped_perf_tests(self):
return []
def skipped_tests(self):
return []
......@@ -585,6 +592,15 @@ class Port(object):
return True
return False
def skips_perf_test(self, test_name):
for test_or_category in self.skipped_perf_tests():
if test_or_category == test_name:
return True
category = self._filesystem.join(self.perf_tests_dir(), test_or_category)
if self._filesystem.isdir(category) and test_name.startswith(test_or_category):
return True
return False
def maybe_make_directory(self, *comps):
"""Creates the specified directory if it doesn't already exist."""
self._filesystem.maybe_make_directory(*comps)
......@@ -654,6 +670,10 @@ class Port(object):
assert filename.startswith(self.layout_tests_dir()), "%s did not start with %s" % (filename, self.layout_tests_dir())
return filename[len(self.layout_tests_dir()) + 1:]
def relative_perf_test_filename(self, filename):
assert filename.startswith(self.perf_tests_dir()), "%s did not start with %s" % (filename, self.perf_tests_dir())
return filename[len(self.perf_tests_dir()) + 1:]
def abspath_for_test(self, test_name):
"""Returns the full path to the file for a given test name. This is the
inverse of relative_test_filename()."""
......
......@@ -223,8 +223,10 @@ layer at (0,0) size 800x34
if sys.platform == 'win32':
LAYOUT_TEST_DIR = 'c:/test.checkout/LayoutTests'
PERF_TEST_DIR = 'c:/test.checkout/PerformanceTests'
else:
LAYOUT_TEST_DIR = '/test.checkout/LayoutTests'
PERF_TEST_DIR = '/test.checkout/PerformanceTests'
# Here we synthesize an in-memory filesystem from the test list
......@@ -384,6 +386,9 @@ class TestPort(Port):
def layout_tests_dir(self):
return LAYOUT_TEST_DIR
def perf_tests_dir(self):
return PERF_TEST_DIR
def webkit_base(self):
return '/test.checkout'
......
......@@ -351,9 +351,9 @@ class WebKitPort(Port):
search_paths.update([self._wk2_port_name(), "wk2"])
return search_paths
def _expectations_from_skipped_files(self):
def _expectations_from_skipped_files(self, skipped_file_paths):
tests_to_skip = []
for search_path in self._skipped_file_search_paths():
for search_path in skipped_file_paths:
filename = self._filesystem.join(self._webkit_baseline_path(search_path), "Skipped")
if not self._filesystem.exists(filename):
_log.debug("Skipped does not exist: %s" % filename)
......@@ -372,13 +372,18 @@ class WebKitPort(Port):
expectations = self._filesystem.read_text_file(expectations_path)
return expectations
@memoized
def skipped_layout_tests(self):
# Use a set to allow duplicates
tests_to_skip = set(self._expectations_from_skipped_files())
tests_to_skip = set(self._expectations_from_skipped_files(self._skipped_file_search_paths()))
tests_to_skip.update(self._tests_for_other_platforms())
tests_to_skip.update(self._skipped_tests_for_unsupported_features())
return tests_to_skip
@memoized
def skipped_perf_tests(self):
return self._expectations_from_skipped_files([self.perf_tests_dir()])
def skipped_tests(self):
return self.skipped_layout_tests()
......
......@@ -96,7 +96,9 @@ class PerfTestsRunner(object):
def _is_test_file(filesystem, dirname, filename):
return filename.endswith('.html')
return find_files.find(self._host.filesystem, self._base_path, paths=self._args, file_filter=_is_test_file)
skipped_directories = set(['.svn', 'resources'])
tests = find_files.find(self._host.filesystem, self._base_path, self._args, skipped_directories, _is_test_file)
return [test for test in tests if not self._port.skips_perf_test(self._port.relative_perf_test_filename(test))]
def run(self):
if self._options.help_printing:
......@@ -113,7 +115,7 @@ class PerfTestsRunner(object):
unexpected = -1
try:
tests = self._collect_tests()
unexpected = self._run_tests_set(tests, self._port)
unexpected = self._run_tests_set(sorted(list(tests)), self._port)
finally:
self._printer.cleanup()
......
......@@ -175,7 +175,7 @@ max 1120
def test_run_test_set_with_json_output(self):
buildbot_output = array_stream.ArrayStream()
runner = self.create_runner(buildbot_output, args=['--output-json-path=/test.checkout/output.json'])
runner = self.create_runner(buildbot_output, args=['--output-json-path=/mock-checkout/output.json'])
runner._host.filesystem.files[runner._base_path + '/inspector/pass.html'] = True
runner._host.filesystem.files[runner._base_path + '/Bindings/event-target-wrapper.html'] = True
runner._timestamp = 123456789
......@@ -185,7 +185,7 @@ max 1120
self.assertEqual(buildbot_output.get()[1], 'median= 1487 ms, stdev= 14.46 ms, min= 1471 ms, max= 1510 ms\n')
self.assertEqual(buildbot_output.get()[2], 'RESULT group_name: test_name= 42 ms\n')
self.assertEqual(json.loads(runner._host.filesystem.files['/test.checkout/output.json']), {
self.assertEqual(json.loads(runner._host.filesystem.files['/mock-checkout/output.json']), {
"timestamp": 123456789, "results":
{"event-target-wrapper": {"max": "1510", "avg": "1489.05", "median": "1487", "min": "1471", "stdev": "14.46"},
"group_name:test_name": 42},
......@@ -193,9 +193,9 @@ max 1120
def test_run_test_set_with_json_source(self):
buildbot_output = array_stream.ArrayStream()
runner = self.create_runner(buildbot_output, args=['--output-json-path=/test.checkout/output.json',
'--source-json-path=/test.checkout/source.json'])
runner._host.filesystem.files['/test.checkout/source.json'] = '{"key": "value"}'
runner = self.create_runner(buildbot_output, args=['--output-json-path=/mock-checkout/output.json',
'--source-json-path=/mock-checkout/source.json'])
runner._host.filesystem.files['/mock-checkout/source.json'] = '{"key": "value"}'
runner._host.filesystem.files[runner._base_path + '/inspector/pass.html'] = True
runner._host.filesystem.files[runner._base_path + '/Bindings/event-target-wrapper.html'] = True
runner._timestamp = 123456789
......@@ -205,7 +205,7 @@ max 1120
self.assertEqual(buildbot_output.get()[1], 'median= 1487 ms, stdev= 14.46 ms, min= 1471 ms, max= 1510 ms\n')
self.assertEqual(buildbot_output.get()[2], 'RESULT group_name: test_name= 42 ms\n')
self.assertEqual(json.loads(runner._host.filesystem.files['/test.checkout/output.json']), {
self.assertEqual(json.loads(runner._host.filesystem.files['/mock-checkout/output.json']), {
"timestamp": 123456789, "results":
{"event-target-wrapper": {"max": "1510", "avg": "1489.05", "median": "1487", "min": "1471", "stdev": "14.46"},
"group_name:test_name": 42},
......@@ -219,6 +219,23 @@ max 1120
tests = runner._collect_tests()
self.assertEqual(len(tests), 1)
def test_collect_tests_with_skipped_list(self):
runner = self.create_runner()
def add_file(dirname, filename, content=True):
dirname = runner._host.filesystem.join(runner._base_path, dirname) if dirname else runner._base_path
runner._host.filesystem.maybe_make_directory(dirname)
runner._host.filesystem.files[runner._host.filesystem.join(dirname, filename)] = content
add_file('inspector', 'test1.html')
add_file('inspector', 'unsupported_test1.html')
add_file('inspector', 'test2.html')
add_file('inspector/resources', 'resource_file.html')
add_file('unsupported', 'unsupported_test2.html')
runner._port.skipped_perf_tests = lambda: ['inspector/unsupported_test1.html', 'unsupported']
tests = [runner._port.relative_perf_test_filename(test) for test in runner._collect_tests()]
self.assertEqual(sorted(tests), ['inspector/test1.html', 'inspector/test2.html'])
def test_parse_args(self):
runner = self.create_runner()
options, args = PerfTestsRunner._parse_args([
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment