Commit 7e510d3b authored by abarth@webkit.org's avatar abarth@webkit.org

2011-07-06 Adam Barth <abarth@webkit.org>

        Remove unexpected_results.json
        https://bugs.webkit.org/show_bug.cgi?id=64065

        Reviewed by Eric Seidel.

        No one should be using this file any more.  This patch removes it from
        the face of the earth.  This patch also removes two NRWT-specific
        command line options that were implemented using
        unexpected_results.json.

        * Scripts/webkitpy/common/net/resultsjsonparser_unittest.py:
        * Scripts/webkitpy/layout_tests/controllers/manager.py:
        * Scripts/webkitpy/layout_tests/controllers/manager_unittest.py:
        * Scripts/webkitpy/layout_tests/run_webkit_tests.py:
        * Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py:
        * Scripts/webkitpy/tool/commands/rebaselineserver.py:
        * Scripts/webkitpy/tool/servers/rebaselineserver_unittest.py:


git-svn-id: http://svn.webkit.org/repository/webkit/trunk@90546 268f45cc-cd09-0410-ab3c-d52691b4dbfc
parent c0c5d048
2011-07-06 Adam Barth <abarth@webkit.org>
Remove unexpected_results.json
https://bugs.webkit.org/show_bug.cgi?id=64065
Reviewed by Eric Seidel.
No one should be using this file any more. This patch removes it from
the face of the earth. This patch also removes two NRWT-specific
command line options that were implemented using
unexpected_results.json.
* Scripts/webkitpy/common/net/resultsjsonparser_unittest.py:
* Scripts/webkitpy/layout_tests/controllers/manager.py:
* Scripts/webkitpy/layout_tests/controllers/manager_unittest.py:
* Scripts/webkitpy/layout_tests/run_webkit_tests.py:
* Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py:
* Scripts/webkitpy/tool/commands/rebaselineserver.py:
* Scripts/webkitpy/tool/servers/rebaselineserver_unittest.py:
2011-07-07 Adam Barth <abarth@webkit.org>
REGRESSION(90520): chromium GPU tests produce output in the LayoutTests directory
......
......@@ -35,7 +35,7 @@ from webkitpy.layout_tests.models import test_failures
class ResultsJSONParserTest(unittest.TestCase):
# The real files have no whitespace, but newlines make this much more readable.
_example_unexpected_results_json = """ADD_RESULTS({
_example_full_results_json = """ADD_RESULTS({
"tests": {
"fast": {
"dom": {
......@@ -88,5 +88,5 @@ class ResultsJSONParserTest(unittest.TestCase):
test_results.TestResult("svg/dynamic-updates/SVGFEDropShadowElement-dom-stdDeviation-attr.html", [test_failures.FailureImageHashMismatch()], 0),
test_results.TestResult("fast/dom/prototype-inheritance.html", [test_failures.FailureTextMismatch()], 0),
]
results = ResultsJSONParser.parse_results_json(self._example_unexpected_results_json)
results = ResultsJSONParser.parse_results_json(self._example_full_results_json)
self.assertEqual(expected_results, results)
......@@ -301,16 +301,14 @@ class Manager(object):
# This maps worker names to the state we are tracking for each of them.
self._worker_states = {}
def collect_tests(self, args, last_unexpected_results):
def collect_tests(self, args):
"""Find all the files to test.
Args:
args: list of test arguments from the command line
last_unexpected_results: list of unexpected results to retest, if any
"""
paths = self._strip_test_dir_prefixes(args)
paths += last_unexpected_results
if self._options.test_list:
paths += self._strip_test_dir_prefixes(read_test_files(self._fs, self._options.test_list, self._port.TEST_PATH_SEPARATOR))
self._test_files = self._port.tests(paths)
......@@ -911,7 +909,7 @@ class Manager(object):
# Write the same data to log files and upload generated JSON files
# to appengine server.
summarized_results = summarize_results(self._port, self._expectations, result_summary, retry_summary, individual_test_timings, only_unexpected=False, interrupted=interrupted)
self._upload_json_files(unexpected_results, summarized_results, result_summary, individual_test_timings)
self._upload_json_files(summarized_results, result_summary, individual_test_timings)
# Write the summary to disk (results.html) and display it if requested.
if not self._options.dry_run:
......@@ -1037,14 +1035,11 @@ class Manager(object):
result_enum_value = TestExpectations.MODIFIERS[result]
return json_layout_results_generator.JSONLayoutResultsGenerator.FAILURE_TO_CHAR[result_enum_value]
def _upload_json_files(self, unexpected_results, summarized_results, result_summary,
individual_test_timings):
def _upload_json_files(self, summarized_results, result_summary, individual_test_timings):
"""Writes the results of the test run as JSON files into the results
dir and upload the files to the appengine server.
There are three different files written into the results dir:
unexpected_results.json: A short list of any unexpected results.
This is used by the buildbots to display results.
There are two different files written into the results dir:
expectations.json: This is used by the flakiness dashboard.
results.json: A full list of the results - used by the flakiness
dashboard and the aggregate results dashboard.
......@@ -1062,9 +1057,6 @@ class Manager(object):
times_json_path = self._fs.join(self._results_directory, "times_ms.json")
json_results_generator.write_json(self._fs, times_trie, times_json_path)
unexpected_json_path = self._fs.join(self._results_directory, "unexpected_results.json")
json_results_generator.write_json(self._fs, unexpected_results, unexpected_json_path)
full_results_path = self._fs.join(self._results_directory, "full_results.json")
json_results_generator.write_json(self._fs, summarized_results, full_results_path)
......
......@@ -153,7 +153,7 @@ class ManagerTest(unittest.TestCase):
printer = printing.Printer(port, options, StringIO.StringIO(), StringIO.StringIO(),
configure_logging=True)
manager = LockCheckingManager(port, options, printer)
manager.collect_tests(args, [])
manager.collect_tests(args)
manager.parse_expectations()
result_summary = manager.set_up_run()
num_unexpected_results = manager.run(result_summary)
......@@ -207,7 +207,7 @@ class ManagerTest(unittest.TestCase):
def get_manager_with_tests(test_names):
port = layout_tests.port.get()
manager = Manager(port, options=MockOptions(test_list=None), printer=Mock())
manager.collect_tests(test_names, last_unexpected_results=[])
manager.collect_tests(test_names)
return manager
manager = get_manager_with_tests(['fast/html'])
......
......@@ -81,12 +81,6 @@ def run(port, options, args, regular_output=sys.stderr,
printer.cleanup()
return 0
last_unexpected_results = _gather_unexpected_results(port)
if options.print_last_failures:
printer.write("\n".join(last_unexpected_results) + "\n")
printer.cleanup()
return 0
# We wrap any parts of the run that are slow or likely to raise exceptions
# in a try/finally to ensure that we clean up the logging configuration.
num_unexpected_results = -1
......@@ -96,7 +90,7 @@ def run(port, options, args, regular_output=sys.stderr,
printer.print_update("Collecting tests ...")
try:
manager.collect_tests(args, last_unexpected_results)
manager.collect_tests(args)
except IOError, e:
if e.errno == errno.ENOENT:
return -1
......@@ -167,20 +161,6 @@ def _set_up_derived_options(port_obj, options):
return warnings
def _gather_unexpected_results(port):
"""Returns the unexpected results from the previous run, if any."""
filesystem = port._filesystem
results_directory = port.results_directory()
options = port._options
last_unexpected_results = []
if options.print_last_failures or options.retest_last_failures:
unexpected_results_filename = filesystem.join(results_directory, "unexpected_results.json")
if filesystem.exists(unexpected_results_filename):
results = json_results_generator.load_json(filesystem, unexpected_results_filename)
resultsjsonparser.for_each_test(results['tests'], lambda test, result: last_unexpected_results.append(test))
return last_unexpected_results
def _compat_shim_callback(option, opt_str, value, parser):
print "Ignoring unsupported option: %s" % opt_str
......@@ -409,12 +389,6 @@ def parse_args(args=None):
"running all tests"),
# FIXME: consider: --iterations n
# Number of times to run the set of tests (e.g. ABCABCABC)
optparse.make_option("--print-last-failures", action="store_true",
default=False, help="Print the tests in the last run that "
"had unexpected failures (or passes) and then exit."),
optparse.make_option("--retest-last-failures", action="store_true",
default=False, help="re-test the tests in the last run that "
"had unexpected failures (or passes)."),
optparse.make_option("--retry-failures", action="store_true",
default=True,
help="Re-try any tests that produce unexpected results (default)"),
......
......@@ -269,15 +269,6 @@ class MainTest(unittest.TestCase):
['failures/expected/keyboard.html', '--worker-model', 'inline'],
tests_included=True)
def test_last_results(self):
fs = unit_test_filesystem()
# We do a logging run here instead of a passing run in order to
# suppress the output from the json generator.
res, buildbot_output, regular_output, user = logging_run(['--clobber-old-results'], record_results=True, filesystem=fs)
res, buildbot_output, regular_output, user = logging_run(['--print-last-failures'], filesystem=fs)
self.assertEqual(regular_output.get(), [u'failures/expected/checksum.html\n\n'])
self.assertEqual(buildbot_output.get(), [])
def test_lint_test_files(self):
res, out, err, user = logging_run(['--lint-test-files'])
self.assertEqual(res, 0)
......@@ -428,7 +419,7 @@ class MainTest(unittest.TestCase):
tests_included=True,
record_results=True,
filesystem=fs)
self.assertTrue(fs.read_text_file('/tmp/layout-test-results/unexpected_results.json').find('{"crash-with-stderr.html":{"expected":"PASS","actual":"CRASH","has_stderr":true}}') != -1)
self.assertTrue(fs.read_text_file('/tmp/layout-test-results/full_results.json').find('{"crash-with-stderr.html":{"expected":"PASS","actual":"CRASH","has_stderr":true}}') != -1)
def test_no_image_failure_with_image_diff(self):
fs = unit_test_filesystem()
......@@ -438,7 +429,7 @@ class MainTest(unittest.TestCase):
tests_included=True,
record_results=True,
filesystem=fs)
self.assertTrue(fs.read_text_file('/tmp/layout-test-results/unexpected_results.json').find('"num_regressions":0') != -1)
self.assertTrue(fs.read_text_file('/tmp/layout-test-results/full_results.json').find('"num_regressions":0') != -1)
def test_crash_log(self):
mock_crash_report = 'mock-crash-report'
......
......@@ -80,7 +80,7 @@ class RebaselineServer(AbstractLocalServerCommand):
filesystem = system.filesystem.FileSystem()
scm = self._tool.scm()
print 'Parsing unexpected_results.json...'
print 'Parsing full_results.json...'
results_json_path = filesystem.join(results_directory, 'full_results.json')
results_json = json_results_generator.load_json(filesystem, results_json_path)
......
......@@ -206,7 +206,7 @@ class RebaselineTestTest(unittest.TestCase):
])
def test_gather_baselines(self):
example_json = resultsjsonparser_unittest.ResultsJSONParserTest._example_unexpected_results_json
example_json = resultsjsonparser_unittest.ResultsJSONParserTest._example_full_results_json
results_json = simplejson.loads(strip_json_wrapper(example_json))
server = RebaselineServer()
server._test_config = get_test_config()
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment