Commit 8f2f2889 authored by rniwa@webkit.org's avatar rniwa@webkit.org
Browse files

NRWT: Add the ability to upload test results to new test results server

https://bugs.webkit.org/show_bug.cgi?id=120146

Reviewed by Andreas Kling.
        
For the upcoming replacement for webkit-test-results.appspot.com, new-run-webkit-tests needs to use a new JSON format
that contains the full summary of the test run including information about passed tests.

Add this ability to NRWT so that I can start testing this feature. This patch adds the following optoins to NRWT:
--build-slave
--got-revision
--results-server-host

The last option is somewhat ambigious with --test-results-server but we're intending to deprecate the old server
so we should be able to delete --test-results-server in time.

* Scripts/webkitpy/layout_tests/controllers/manager.py:
(Manager.run):
(Manager._upload_json_files):
(Manager):
(Manager.upload_results):
* Scripts/webkitpy/layout_tests/models/test_run_results.py:
(summarize_results):
* Scripts/webkitpy/layout_tests/models/test_run_results_unittest.py:
(summarized_results):
(SummarizedResultsTest.test_summarized_results_wontfix):
(SummarizedResultsTest):
(SummarizedResultsTest.test_summarized_results_include_passes):
* Scripts/webkitpy/layout_tests/run_webkit_tests.py:
(parse_args):


git-svn-id: http://svn.webkit.org/repository/webkit/trunk@154450 268f45cc-cd09-0410-ab3c-d52691b4dbfc
parent 7f50aae5
2013-08-22 Ryosuke Niwa <rniwa@webkit.org>
NRWT: Add the ability to upload test results to new test results server
https://bugs.webkit.org/show_bug.cgi?id=120146
Reviewed by Andreas Kling.
For the upcoming replacement for webkit-test-results.appspot.com, new-run-webkit-tests needs to use a new JSON format
that contains the full summary of the test run including information about passed tests.
Add this ability to NRWT so that I can start testing this feature. This patch adds the following optoins to NRWT:
--build-slave
--got-revision
--results-server-host
The last option is somewhat ambigious with --test-results-server but we're intending to deprecate the old server
so we should be able to delete --test-results-server in time.
* Scripts/webkitpy/layout_tests/controllers/manager.py:
(Manager.run):
(Manager._upload_json_files):
(Manager):
(Manager.upload_results):
* Scripts/webkitpy/layout_tests/models/test_run_results.py:
(summarize_results):
* Scripts/webkitpy/layout_tests/models/test_run_results_unittest.py:
(summarized_results):
(SummarizedResultsTest.test_summarized_results_wontfix):
(SummarizedResultsTest):
(SummarizedResultsTest.test_summarized_results_include_passes):
* Scripts/webkitpy/layout_tests/run_webkit_tests.py:
(parse_args):
2013-08-22 Ryosuke Niwa <rniwa@webkit.org>
WKR build fix. Somehow tool.bugs.quips() doesn't work in WKR so work around that.
......
......@@ -40,6 +40,7 @@ import random
import sys
import time
from webkitpy.common.net.file_uploader import FileUploader
from webkitpy.layout_tests.controllers.layout_test_finder import LayoutTestFinder
from webkitpy.layout_tests.controllers.layout_test_runner import LayoutTestRunner
from webkitpy.layout_tests.controllers.test_result_writer import TestResultWriter
......@@ -225,11 +226,14 @@ class Manager(object):
_log.debug("summarizing results")
summarized_results = test_run_results.summarize_results(self._port, self._expectations, initial_results, retry_results, enabled_pixel_tests_in_retry)
results_including_passes = None
if self._options.results_server_host:
results_including_passes = test_run_results.summarize_results(self._port, self._expectations, initial_results, retry_results, enabled_pixel_tests_in_retry, include_passes=True)
self._printer.print_results(end_time - start_time, initial_results, summarized_results)
if not self._options.dry_run:
self._port.print_leaks_summary()
self._upload_json_files(summarized_results, initial_results)
self._upload_json_files(summarized_results, initial_results, results_including_passes, start_time, end_time)
results_path = self._filesystem.join(self._results_directory, "results.html")
self._copy_results_html_file(results_path)
......@@ -319,7 +323,7 @@ class Manager(object):
(result.type != test_expectations.MISSING) and
(result.type != test_expectations.CRASH or include_crashes))]
def _upload_json_files(self, summarized_results, initial_results):
def _upload_json_files(self, summarized_results, initial_results, results_including_passes=None, start_time=None, end_time=None):
"""Writes the results of the test run as JSON files into the results
dir and upload the files to the appengine server.
......@@ -342,6 +346,10 @@ class Manager(object):
# We write full_results.json out as jsonp because we need to load it from a file url and Chromium doesn't allow that.
json_results_generator.write_json(self._filesystem, summarized_results, full_results_path, callback="ADD_RESULTS")
results_json_path = self._filesystem.join(self._results_directory, "results_including_passes.json")
if results_including_passes:
json_results_generator.write_json(self._filesystem, results_including_passes, results_json_path)
generator = json_layout_results_generator.JSONLayoutResultsGenerator(
self._port, self._options.builder_name, self._options.build_name,
self._options.build_number, self._results_directory,
......@@ -357,6 +365,8 @@ class Manager(object):
json_files = ["incremental_results.json", "full_results.json", "times_ms.json"]
generator.upload_json_files(json_files)
if results_including_passes:
self.upload_results(results_json_path, start_time, end_time)
incremental_results_path = self._filesystem.join(self._results_directory, "incremental_results.json")
......@@ -364,6 +374,60 @@ class Manager(object):
# The tools use the version we uploaded to the results server anyway.
self._filesystem.remove(times_json_path)
self._filesystem.remove(incremental_results_path)
if results_including_passes:
self._filesystem.remove(results_json_path)
def upload_results(self, results_json_path, start_time, end_time):
host = self._options.results_server_host
if not host:
return
master_name = self._options.master_name
builder_name = self._options.builder_name
build_number = self._options.build_number
build_slave = self._options.build_slave
got_revision = self._options.got_revision
if not master_name or not builder_name or not build_number or not build_slave or not got_revision:
_log.error("--results-dashboard-host was set, but --master-name, --builder-name, --build-number, --build-slave, or --got-revision was not. Not uploading JSON files.")
return
_log.info("Uploading JSON files for master: %s builder: %s build: %s slave: %s to %s", master_name, builder_name, build_number, build_slave, host)
attrs = [
('master', master_name),
('builder_name', builder_name),
('build_number', build_number),
('build_slave', build_slave),
('revision', got_revision),
('start_time', str(start_time)),
('end_time', str(end_time)),
]
uploader = FileUploader("http://%s/api/report" % host, 360)
try:
response = uploader.upload_as_multipart_form_data(self._filesystem, [('results.json', results_json_path)], attrs)
if not response:
_log.error("JSON upload failed; no response returned")
return
if response.code != 200:
_log.error("JSON upload failed, %d: '%s'" % (response.code, response.read()))
return
response_text = response.read()
try:
response_json = json.loads(response_text)
except ValueError, error:
_log.error("JSON upload failed; failed to parse the response: %s", response_text)
return
if response_json['status'] != 'OK':
_log.error("JSON upload failed, %s: %s", response_json['status'], response_text)
return
_log.info("JSON uploaded.")
except Exception, error:
_log.error("Upload failed: %s" % error)
return
def _copy_results_html_file(self, destination_path):
base_dir = self._port.path_from_webkit_base('LayoutTests', 'fast', 'harness')
......
......@@ -118,7 +118,7 @@ def _interpret_test_failures(failures):
return test_dict
def summarize_results(port_obj, expectations, initial_results, retry_results, enabled_pixel_tests_in_retry):
def summarize_results(port_obj, expectations, initial_results, retry_results, enabled_pixel_tests_in_retry, include_passes=False):
"""Returns a dictionary containing a summary of the test runs, with the following fields:
'version': a version indicator
'fixable': The number of fixable tests (NOW - PASS)
......@@ -174,7 +174,7 @@ def summarize_results(port_obj, expectations, initial_results, retry_results, en
if result_type == test_expectations.PASS:
num_passes += 1
# FIXME: include passing tests that have stderr output.
if expected == 'PASS':
if expected == 'PASS' and not include_passes:
continue
elif result_type == test_expectations.CRASH:
if test_name in initial_results.unexpected_results_by_name:
......
......@@ -53,7 +53,7 @@ def run_results(port):
return test_run_results.TestRunResults(expectations, len(tests))
def summarized_results(port, expected, passing, flaky):
def summarized_results(port, expected, passing, flaky, include_passes=False):
test_is_slow = False
initial_results = run_results(port)
......@@ -84,7 +84,8 @@ def summarized_results(port, expected, passing, flaky):
else:
retry_results = None
return test_run_results.summarize_results(port, initial_results.expectations, initial_results, retry_results, enabled_pixel_tests_in_retry=False)
return test_run_results.summarize_results(port, initial_results.expectations, initial_results, retry_results,
enabled_pixel_tests_in_retry=False, include_passes=include_passes)
class InterpretTestFailuresTest(unittest.TestCase):
......@@ -133,3 +134,8 @@ class SummarizedResultsTest(unittest.TestCase):
self.port._options.builder_name = 'dummy builder'
summary = summarized_results(self.port, expected=False, passing=False, flaky=False)
self.assertTrue(summary['tests']['failures']['expected']['hang.html']['wontfix'])
def test_summarized_results_include_passes(self):
self.port._options.builder_name = 'dummy builder'
summary = summarized_results(self.port, expected=False, passing=True, flaky=False, include_passes=True)
self.assertEqual(summary['tests']['passes']['text.html']['expected'], 'PASS')
......@@ -284,16 +284,19 @@ def parse_args(args):
option_group_definitions.append(("Result JSON Options", [
optparse.make_option("--master-name", help="The name of the buildbot master."),
optparse.make_option("--builder-name", default="",
help=("The name of the builder shown on the waterfall running "
"this script e.g. WebKit.")),
help=("The name of the builder shown on the waterfall running this script. e.g. Apple MountainLion Release WK2 (Tests).")),
optparse.make_option("--build-name", default="DUMMY_BUILD_NAME",
help=("The name of the builder used in its path, e.g. "
"webkit-rel.")),
help=("The name of the builder used in its path, e.g. webkit-rel.")),
optparse.make_option("--build-slave", default="DUMMY_BUILD_SLAVE",
help=("The name of the buildslave used. e.g. apple-macpro-6.")),
optparse.make_option("--build-number", default="DUMMY_BUILD_NUMBER",
help=("The build number of the builder running this script.")),
optparse.make_option("--got-revision", default="",
help=("The revision number. e.g. 12345")),
optparse.make_option("--test-results-server", default="",
help=("If specified, upload results json files to this appengine "
"server.")),
help=("If specified, upload results json files to this appengine server.")),
optparse.make_option("--results-server-host", default="",
help=("If specified, upload results JSON file to this results server.")),
]))
option_parser = optparse.OptionParser()
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment