def test_MakeListOfPoints_MinimalCase(self):
        """A very simple test of a call to MakeListOfPoints."""

        # The master name is gotten when making the list of points,
        # so it must be stubbed out here.
        self.mox.StubOutWithMock(slave_utils, 'GetActiveMaster')
        slave_utils.GetActiveMaster().AndReturn('MyMaster')
        self.mox.ReplayAll()

        actual_points = results_dashboard.MakeListOfPoints(
            {'bar': {
                'traces': {
                    'baz': ["100.0", "5.0"]
                },
                'rev': '12345',
            }}, 'my-bot', 'foo_test', 'my.master', 'Builder', 10, {})
        expected_points = [{
            'master': 'MyMaster',
            'bot': 'my-bot',
            'test': 'foo_test/bar/baz',
            'revision': 12345,
            'value': '100.0',
            'error': '5.0',
            'masterid': 'my.master',
            'buildername': 'Builder',
            'buildnumber': 10,
            'supplemental_columns': {},
        }]
        self.assertEqual(expected_points, actual_points)
    def test_MakeListOfPoints_TimestampUsedWhenRevisionIsNaN(self):
        """Tests sending data with a git hash as "revision"."""
        self.mox.StubOutWithMock(datetime, 'datetime')
        datetime.datetime.utcnow().AndReturn(FakeDateTime())
        self.mox.StubOutWithMock(slave_utils, 'GetActiveMaster')
        slave_utils.GetActiveMaster().AndReturn('ChromiumPerf')
        self.mox.ReplayAll()

        actual_points = results_dashboard.MakeListOfPoints(
            {
                'bar': {
                    'traces': {
                        'baz': ["100.0", "5.0"]
                    },
                    'rev': '2eca27b067e3e57c70e40b8b95d0030c5d7c1a7f',
                }
            }, 'my-bot', 'foo_test', 'chromium.perf', 'Builder', 10, {})
        expected_points = [{
            'master': 'ChromiumPerf',
            'bot': 'my-bot',
            'test': 'foo_test/bar/baz',
            # Corresponding timestamp for the fake datetime is used.
            'revision': 1375315200,
            'value': '100.0',
            'error': '5.0',
            'masterid': 'chromium.perf',
            'buildername': 'Builder',
            'buildnumber': 10,
            'supplemental_columns': {
                'r_chromium': '2eca27b067e3e57c70e40b8b95d0030c5d7c1a7f',
            },
        }]
        self.assertEqual(expected_points, actual_points)
    def test_BlinkUsesTimestamp(self):
        """Tests that timestamp is used for "revision" for ChromiumWebkit master."""
        self.mox.StubOutWithMock(datetime, 'datetime')
        datetime.datetime.utcnow().AndReturn(FakeDateTime())
        self.mox.StubOutWithMock(slave_utils, 'GetActiveMaster')
        slave_utils.GetActiveMaster().AndReturn('ChromiumWebkit')
        self.mox.ReplayAll()

        actual_points = results_dashboard.MakeListOfPoints(
            {
                'bar': {
                    'traces': {
                        'baz': ["100.0", "5.0"]
                    },
                    'rev': '123456',
                    'webkit_rev': '23456',
                }
            }, 'my-bot', 'foo_test', 'chromium.webkit', 'Builder', 10, {})
        expected_points = [{
            'master': 'ChromiumWebkit',
            'bot': 'my-bot',
            'test': 'foo_test/bar/baz',
            'revision': 1375315200,
            'value': '100.0',
            'error': '5.0',
            'masterid': 'chromium.webkit',
            'buildername': 'Builder',
            'buildnumber': 10,
            'supplemental_columns': {
                'r_chromium_svn': 123456,
                'r_webkit_rev': '23456',
            },
        }]
        self.assertEqual(expected_points, actual_points)
Esempio n. 4
0
  def __init__(self, tests_type):
    self._build_number = os.environ.get('BUILDBOT_BUILDNUMBER')
    self._builder_name = os.environ.get('BUILDBOT_BUILDERNAME')
    self._tests_type = tests_type

    if not self._build_number or not self._builder_name:
      raise Exception('You should not be uploading tests results to the server'
                      'from your local machine.')

    upstream = (tests_type != 'Chromium_Android_Instrumentation')
    if upstream:
      # TODO(frankf): Use factory properties (see buildbot/bb_device_steps.py)
      # This requires passing the actual master name (e.g. 'ChromiumFYI' not
      # 'chromium.fyi').
      from slave import slave_utils
      self._build_name = slave_utils.SlaveBuildName(constants.DIR_SOURCE_ROOT)
      self._master_name = slave_utils.GetActiveMaster()
    else:
      self._build_name = 'chromium-android'
      buildbot_branch = os.environ.get('BUILDBOT_BRANCH')
      if not buildbot_branch:
        buildbot_branch = 'master'
      self._master_name = '%s-%s' % (self._build_name, buildbot_branch)

    self._test_results_map = {}
def MakeListOfPoints(charts, bot, test_name, buildername,
                     buildnumber, supplemental_columns):
  """Constructs a list of point dictionaries to send.

  The format output by this function is the original format for sending data
  to the perf dashboard.

  Args:
    charts: A dictionary of chart names to chart data, as generated by the
        log processor classes (see process_log_utils.GraphingLogProcessor).
    bot: A string which comes from perf_id, e.g. linux-release.
    test_name: A test suite name, e.g. sunspider.
    buildername: Builder name (for stdio links).
    buildnumber: Build number (for stdio links).
    supplemental_columns: A dictionary of extra data to send with a point.

  Returns:
    A list of dictionaries in the format accepted by the perf dashboard.
    Each dictionary has the keys "master", "bot", "test", "value", "revision".
    The full details of this format are described at http://goo.gl/TcJliv.
  """
  results = []

  # The master name used for the dashboard is the CamelCase name returned by
  # GetActiveMaster(), and not the canonical master name with dots.
  master = slave_utils.GetActiveMaster()

  for chart_name, chart_data in sorted(charts.items()):
    point_id, revision_columns = _RevisionNumberColumns(chart_data, prefix='r_')

    for trace_name, trace_values in sorted(chart_data['traces'].items()):
      is_important = trace_name in chart_data.get('important', [])
      test_path = _TestPath(test_name, chart_name, trace_name)
      result = {
          'master': master,
          'bot': bot,
          'test': test_path,
          'revision': point_id,
          'supplemental_columns': {}
      }

      # Add the supplemental_columns values that were passed in after the
      # calculated revision column values so that these can be overwritten.
      result['supplemental_columns'].update(revision_columns)
      result['supplemental_columns'].update(
          _GetStdioUriColumn(test_name, buildername, buildnumber))
      result['supplemental_columns'].update(supplemental_columns)

      result['value'] = trace_values[0]
      result['error'] = trace_values[1]

      # Add other properties to this result dictionary if available.
      if chart_data.get('units'):
        result['units'] = chart_data['units']
      if is_important:
        result['important'] = True

      results.append(result)

  return results
  def test_MakeListOfPoints_MinimalCase(self):
    """A very simple test of a call to MakeListOfPoints."""

    # The master name is gotten when making the list of points,
    # so it must be stubbed out here.
    self.mox.StubOutWithMock(slave_utils, 'GetActiveMaster')
    slave_utils.GetActiveMaster().AndReturn('MyMaster')
    self.mox.ReplayAll()

    actual_points = results_dashboard.MakeListOfPoints(
        {
            'bar': {
                'traces': {'baz': ["100.0", "5.0"]},
                'rev': '307226',
            }
        },
        'my-bot', 'foo_test', 'Builder', 10, {})
    expected_points = [
        {
            'master': 'MyMaster',
            'bot': 'my-bot',
            'test': 'foo_test/bar/baz',
            'revision': 307226,
            'value': '100.0',
            'error': '5.0',
            'supplemental_columns': {
                'r_commit_pos': 307226,
                'a_stdio_uri': ('[Buildbot stdio](http://build.chromium.org/p'
                                '/my.master/builders/Builder/builds/10/steps/'
                                'foo_test/logs/stdio)')
            },
        }
    ]
    self.assertEqual(expected_points, actual_points)
def MakeDashboardJsonV1(chart_json, revision_dict, test_name, bot, buildername,
                        buildnumber, supplemental_dict, is_ref):
    """Generates Dashboard JSON in the new Telemetry format.

  See http://goo.gl/mDZHPl for more info on the format.

  Args:
    chart_json: A dict containing the telmetry output.
    revision_dict: Dictionary of revisions to include, include "rev",
        which determines the point ID.
    test_name: A test suite name, e.g. sunspider.
    bot: A string which comes from perf_id, e.g. linux-release.
    buildername: Builder name (for stdio links).
    buildnumber: Build number (for stdio links).
    supplemental_dict: A dictionary of extra data to send with a point;
        this includes revisions and annotation data.
    is_ref: True if this is a reference build, False otherwise.

  Returns:
    A dictionary in the format accepted by the perf dashboard.
  """
    if not chart_json:
        print 'Error: No json output from telemetry.'
        print '@@@STEP_FAILURE@@@'

    # The master name used for the dashboard is the CamelCase name returned by
    # GetActiveMaster(), and not the canonical master name with dots.
    master = slave_utils.GetActiveMaster()
    point_id, versions = _RevisionNumberColumns(revision_dict, prefix='')

    supplemental = {}
    for key in supplemental_dict:
        if key.startswith('r_'):
            versions[key.replace('r_', '', 1)] = supplemental_dict[key]
        if key.startswith('a_'):
            supplemental[key.replace('a_', '', 1)] = supplemental_dict[key]

    supplemental.update(_GetStdioUriColumn(test_name, buildername,
                                           buildnumber))

    # TODO(sullivan): The android recipe sends "test_name.reference"
    # while the desktop one just sends "test_name" for ref builds. Need
    # to figure out why.
    # https://github.com/catapult-project/catapult/issues/2046
    test_name = test_name.replace('.reference', '')

    fields = {
        'master': master,
        'bot': bot,
        'test_suite_name': test_name,
        'point_id': point_id,
        'supplemental': supplemental,
        'versions': versions,
        'chart_data': chart_json,
        'is_ref': is_ref,
    }
    return fields
    def _SendResults(self,
                     send_results_args,
                     expected_new_json,
                     errors,
                     mock_timestamp=False,
                     webkit_master=False):
        """Test one call of SendResults with the given set of arguments.

    Args:
      send_results_args: The list of arguments to pass to SendResults.
      expected_new_json: A list of JSON string expected to be sent.
      errors: A list of corresponding errors expected to be received
          (Each item in the list is either a string or None.)
      mock_timestamp: Whether to stub out datetime with FakeDateTime().
      webkit_master: Whether GetActiveMaster should give the webkit master.

    This method will fail a test case if the JSON that gets sent and the
    errors that are raised when results_dashboard.SendResults is called
    don't match the expected json and errors.
    """
        # Unsetting stubs required here for multiple calls from same test.
        self.mox.UnsetStubs()
        self.mox.StubOutWithMock(slave_utils, 'GetActiveMaster')
        if webkit_master:
            slave_utils.GetActiveMaster().AndReturn('ChromiumWebkit')
        else:
            slave_utils.GetActiveMaster().AndReturn('ChromiumPerf')
        if mock_timestamp:
            self.mox.StubOutWithMock(datetime, 'datetime')
            datetime.datetime.utcnow().AndReturn(FakeDateTime())
        # urllib2.urlopen is the function that's called to send data to
        # the server. Here it is replaced with a mock object which is used
        # to record the expected JSON.
        # Because the JSON expected might be equivalent without being exactly
        # equal (in the string sense), a Mox Comparator is used.
        self.mox.StubOutWithMock(urllib2, 'urlopen')
        for json_line, error in zip(expected_new_json, errors):
            if error:
                urllib2.urlopen(IsEncodedJson(json_line)).AndRaise(error)
            else:
                urllib2.urlopen(IsEncodedJson(json_line))
        self.mox.ReplayAll()
        results_dashboard.SendResults(*send_results_args)
        self.mox.VerifyAll()
Esempio n. 9
0
 def _SendResults(self, send_results_args, expected_new_json, errors):
     self.mox.UnsetStubs()  # Needed for multiple calls from same test.
     self.mox.StubOutWithMock(slave_utils, 'GetActiveMaster')
     slave_utils.GetActiveMaster().AndReturn('ChromiumPerf')
     self.mox.StubOutWithMock(urllib2, 'urlopen')
     for json_line, error in zip(expected_new_json, errors):
         if error:
             urllib2.urlopen(IsEncodedJson(json_line)).AndRaise(error)
         else:
             urllib2.urlopen(IsEncodedJson(json_line))
     self.mox.ReplayAll()
     results_dashboard.SendResults(*send_results_args)
     self.mox.VerifyAll()
  def test_GetStdioUri(self):
    self.mox.StubOutWithMock(datetime, 'datetime')
    datetime.datetime.utcnow().AndReturn(FakeDateTime())
    self.mox.StubOutWithMock(slave_utils, 'GetActiveMaster')
    slave_utils.GetActiveMaster().AndReturn('ChromiumPerf')
    self.mox.ReplayAll()

    expected_supplemental_column = {
        'a_stdio_uri': ('[Buildbot stdio](http://build.chromium.org/p'
                        '/my.master/builders/Builder/builds/10/steps/'
                        'foo_test/logs/stdio)')
    }
    stdio_uri_column = results_dashboard._GetStdioUriColumn(
        'foo_test', 'Builder', 10)
    self.assertEqual(expected_supplemental_column, stdio_uri_column)
    def internal_Test_MakeDashboardJsonV1(self, enabled=True):
        self.mox.StubOutWithMock(slave_utils, 'GetActiveMaster')
        slave_utils.GetActiveMaster().AndReturn('ChromiumPerf')
        self.mox.StubOutWithMock(results_dashboard, '_GetTimestamp')
        # pylint: disable=W0212
        results_dashboard._GetTimestamp().AndReturn(307226)
        results_dashboard._GetTimestamp().AndReturn(307226)
        self.mox.ReplayAll()

        v1json = results_dashboard.MakeDashboardJsonV1(
            {
                'some_json': 'from_telemetry',
                'enabled': enabled
            }, {
                'rev': 'f46bf3c',
                'git_revision': 'f46bf3c',
                'v8_rev': '73a34f',
                'commit_pos': 307226
            }, 'foo_test', 'my-bot', 'Builder', '10', {
                'a_annotation': 'xyz',
                'r_my_rev': '789abc01'
            }, True)
        self.assertEqual(
            {
                'master': 'ChromiumPerf',
                'bot': 'my-bot',
                'chart_data': {
                    'some_json': 'from_telemetry',
                    'enabled': enabled
                },
                'is_ref': True,
                'test_suite_name': 'foo_test',
                'point_id': 307226,
                'supplemental': {
                    'annotation':
                    'xyz',
                    'a_stdio_uri':
                    ('[Buildbot stdio](http://build.chromium.org/p'
                     '/my.master/builders/Builder/builds/10/steps/'
                     'foo_test/logs/stdio)')
                },
                'versions': {
                    'v8_rev': '73a34f',
                    'chromium': 'f46bf3c',
                    'my_rev': '789abc01'
                }
            }, v1json)
    def test_MakeListOfPoints_GeneralCase(self):
        """A test of making a list of points, including all optional data."""
        # The master name is gotten when making the list of points,
        # so it must be stubbed out here.
        self.mox.StubOutWithMock(slave_utils, 'GetActiveMaster')
        slave_utils.GetActiveMaster().AndReturn('MyMaster')
        self.mox.ReplayAll()

        actual_points = results_dashboard.MakeListOfPoints(
            {
                'bar': {
                    'traces': {
                        'bar': ['100.0', '5.0'],
                        'bar_ref': ['98.5', '5.0'],
                    },
                    'rev': '12345',
                    'git_revision': '46790669f8a2ecd7249ab92418260316b1c60dbf',
                    'webkit_rev': '6789',
                    'v8_rev': 'undefined',
                    'units': 'KB',
                },
                'x': {
                    'traces': {
                        'y': [10.0, 0],
                    },
                    'important': ['y'],
                    'rev': '23456',
                    'git_revision': '46790669f8a2ecd7249ab92418260316b1c60dbf',
                    'v8_rev': '2345',
                    'units': 'count',
                },
            },
            'my-bot',
            'foo_test',
            'my.master',
            'Builder',
            10,
            {
                'r_bar': '89abcdef',
                'a_stdio_uri': 'http://mylogs.com/Builder/10',
                # The supplemental columns here are included in all points.
            })
        expected_points = [
            {
                'master': 'MyMaster',
                'bot': 'my-bot',
                'test': 'foo_test/bar',  # Note that trace name is omitted.
                'revision': 12345,
                'value': '100.0',
                'error': '5.0',
                'units': 'KB',
                'masterid': 'my.master',
                'buildername': 'Builder',
                'buildnumber': 10,
                'supplemental_columns': {
                    'r_webkit_rev': '6789',
                    'r_bar': '89abcdef',
                    'r_chromium': '46790669f8a2ecd7249ab92418260316b1c60dbf',
                    'a_stdio_uri': 'http://mylogs.com/Builder/10',
                    # Note that v8 rev is not included since it was 'undefined'.
                },
            },
            {
                'master': 'MyMaster',
                'bot': 'my-bot',
                'test': 'foo_test/bar/ref',  # Note the change in trace name.
                'revision': 12345,
                'value': '98.5',
                'error': '5.0',
                'units': 'KB',
                'masterid': 'my.master',
                'buildername': 'Builder',
                'buildnumber': 10,
                'supplemental_columns': {
                    'r_webkit_rev': '6789',
                    'r_bar': '89abcdef',
                    'r_chromium': '46790669f8a2ecd7249ab92418260316b1c60dbf',
                    'a_stdio_uri': 'http://mylogs.com/Builder/10',
                },
            },
            {
                'master': 'MyMaster',
                'bot': 'my-bot',
                'test': 'foo_test/x/y',
                'revision': 23456,
                'value': 10.0,
                'error': 0,
                'units': 'count',
                'important': True,
                'masterid': 'my.master',
                'buildername': 'Builder',
                'buildnumber': 10,
                'supplemental_columns': {
                    'r_v8_rev': '2345',
                    'r_bar': '89abcdef',
                    'r_chromium': '46790669f8a2ecd7249ab92418260316b1c60dbf',
                    'a_stdio_uri': 'http://mylogs.com/Builder/10',
                },
            },
        ]
        self.assertEqual(expected_points, actual_points)
Esempio n. 13
0
def _GetResultsJson(logname, lines, system, test_name, url, masterid,
                    buildername, buildnumber, supplemental_columns):
    """Prepare JSON to send from the data in the given arguments.

  Args:
    logname: Summary log file name.
    lines: List of log-file lines. Each line is valid JSON which, when
        deserialized, is a dict containing the keys 'traces' and 'rev'.
    system: A string such as 'linux-release', which comes from perf_id.
    test_name: Test name.
    url: Chrome Performance Dashboard URL.
    masterid: Buildbot master ID.
    buildername: Builder name.
    buildnumber: Build number.
    supplemental_columns: Dict of supplemental data to add.

  Returns:
    JSON that shall be sent to the Chrome Performance Dashboard.
  """
    results_to_add = []
    master = slave_utils.GetActiveMaster()
    bot = system
    chart_name = logname.replace('-summary.dat', '')
    for line in lines:
        data = json.loads(line)
        revision, revision_columns = _RevisionNumberColumns(data, master)

        for (trace_name, trace_values) in data['traces'].iteritems():
            is_important = trace_name in data.get('important', [])
            test_path = _TestPath(test_name, chart_name, trace_name)
            result = {
                'master': master,
                'bot': system,
                'test': test_path,
                'revision': revision,
                'masterid': masterid,
                'buildername': buildername,
                'buildnumber': buildnumber,
                'supplemental_columns': {}
            }
            # Add the supplemental_columns values that were passed in after the
            # calculated revision column values so that these can be overwritten.
            result['supplemental_columns'].update(revision_columns)
            result['supplemental_columns'].update(supplemental_columns)
            # Test whether we have x/y data.
            have_multi_value_data = False
            for value in trace_values:
                if isinstance(value, list):
                    have_multi_value_data = True
            if have_multi_value_data:
                result['data'] = trace_values
            else:
                result['value'] = trace_values[0]
                result['error'] = trace_values[1]

            if data.get('units'):
                result['units'] = data['units']
            if data.get('units_x'):
                result['units_x'] = data['units_x']
            if data.get('stack'):
                result['stack'] = data['stack']
            if is_important:
                result['important'] = True
            results_to_add.append(result)
    _PrintLinkStep(url, master, bot, test_name, revision)
    return json.dumps(results_to_add)
Esempio n. 14
0
def _GetResultsJson(logname, lines, system, test, url, masterid, buildername,
                    buildnumber, supplemental_columns):
    results_to_add = []
    master = slave_utils.GetActiveMaster()
    bot = system
    graph = logname.replace('-summary.dat', '')
    for line in lines:
        data = json.loads(line)
        # TODO(sullivan): the dashboard requires ordered integer revision numbers.
        # If the revision is not an integer, assume it's a git hash and send the
        # buildnumber for an ordered revision until we can come up with a more
        # correct solution.
        revision = data['rev']
        git_hash = None
        try:
            revision = int(revision)
        except ValueError:
            revision = int(buildnumber)
            git_hash = data['rev']

        for (trace, values) in data['traces'].iteritems():
            # Test to make sure we don't have x/y data.
            for value in values:
                if not isinstance(value, basestring):
                    # http://crbug.com/224719
                    raise NotImplementedError(
                        'x/y graphs not supported at this time.')

            important = trace in data.get('important', [])
            if trace == graph + '_ref':
                trace = 'ref'
            graph = graph.replace('_by_url', '')
            trace = trace.replace('/', '_')
            test_path = '%s/%s/%s' % (test, graph, trace)
            if graph == trace:
                test_path = '%s/%s' % (test, graph)
            result = {
                'master': master,
                'bot': system,
                'test': test_path,
                'revision': revision,
                'value': values[0],
                'error': values[1],
                'masterid': masterid,
                'buildername': buildername,
                'buildnumber': buildnumber,
            }
            if 'webkit_rev' in data and data['webkit_rev'] != 'undefined':
                result.setdefault('supplemental_columns',
                                  {})['r_webkit_rev'] = data['webkit_rev']
            if 'v8_rev' in data and data['v8_rev'] != 'undefined':
                result.setdefault('supplemental_columns',
                                  {})['r_v8_rev'] = data['v8_rev']
            if git_hash:
                result.setdefault('supplemental_columns',
                                  {})['r_chromium_rev'] = git_hash
            result.setdefault('supplemental_columns',
                              {}).update(supplemental_columns)
            if data.get('units'):
                result['units'] = data['units']
            if important:
                result['important'] = True
            results_to_add.append(result)
    _PrintLinkStep(url, master, bot, test, revision)
    return json.dumps(results_to_add)
Esempio n. 15
0
        # This error will be caught by the following 'not results_map' statement.
        print 'Error: ', e

    if not results_map:
        print 'No data was available to update the JSON results'
        return

    build_dir = os.path.abspath(options.build_dir)
    slave_name = slave_utils.SlaveBuildName(build_dir)

    generate_json_options = copy.copy(options)
    generate_json_options.build_name = slave_name
    generate_json_options.input_results_xml = options.test_output_xml
    generate_json_options.builder_base_url = '%s/%s/%s/%s' % (
        config.Master.archive_url, DEST_DIR, slave_name, options.test_type)
    generate_json_options.master_name = slave_utils.GetActiveMaster()
    generate_json_options.test_results_server = config.Master.test_results_server

    # Print out master name for log_parser
    print '[Running for master: "%s"]' % generate_json_options.master_name

    try:
        # Set webkit and chrome directory (they are used only to get the
        # repository revisions).
        generate_json_options.webkit_dir = chromium_utils.FindUpward(
            build_dir, 'third_party', 'WebKit', 'Source')
        generate_json_options.chrome_dir = build_dir

        # Generate results JSON file and upload it to the appspot server.
        gtest_slave_utils.GenerateAndUploadJSONResults(results_map,
                                                       generate_json_options)
Esempio n. 16
0
def layout_test(options, args):
    """Parse options and call run-webkit-tests, using Python from the tree."""
    build_dir = os.path.abspath(options.build_dir)

    dumprendertree_exe = 'DumpRenderTree.exe'
    if options.driver_name:
        dumprendertree_exe = '%s.exe' % options.driver_name

    # Disable the page heap in case it got left enabled by some previous process.
    try:
        slave_utils.SetPageHeap(build_dir, dumprendertree_exe, False)
    except chromium_utils.PathNotFound:
        # If we don't have gflags.exe, report it but don't worry about it.
        print 'Warning: Couldn\'t disable page heap, if it was already enabled.'

    blink_scripts_dir = chromium_utils.FindUpward(build_dir, 'third_party',
                                                  'WebKit', 'Tools', 'Scripts')
    run_blink_tests = os.path.join(blink_scripts_dir, 'run-webkit-tests')

    slave_name = slave_utils.SlaveBuildName(build_dir)

    command = [
        run_blink_tests,
        '--no-show-results',
        '--no-new-test-results',
        '--full-results-html',  # For the dashboards.
        '--clobber-old-results',  # Clobber test results before each run.
        '--exit-after-n-failures',
        '5000',
        '--exit-after-n-crashes-or-timeouts',
        '100',
    ]

    # TODO(dpranke): we can switch to always using --debug-rwt-logging
    # after all the bots have WebKit r124789 or later.
    capture_obj = slave_utils.RunCommandCaptureFilter()
    slave_utils.RunPythonCommandInBuildDir(build_dir,
                                           options.target,
                                           [run_blink_tests, '--help'],
                                           filter_obj=capture_obj)
    if '--debug-rwt-logging' in ''.join(capture_obj.lines):
        command.append('--debug-rwt-logging')
    else:
        command.append('--verbose')

    if options.results_directory:
        # Prior to the fix in https://bugs.webkit.org/show_bug.cgi?id=58272,
        # run_blink_tests expects the results directory to be relative to
        # the configuration directory (e.g., src/webkit/Release). The
        # parameter is given to us relative to build_dir, which is where we
        # will run the command from.
        #
        # When 58272 is landed, run_blink_tests will support absolute file
        # paths as well as paths relative to CWD for non-Chromium ports and
        # paths relative to the configuration dir for Chromium ports. As
        # a transitional fix, we convert to an absolute dir, but once the
        # hack in 58272 is removed, we can use results_dir as-is.
        if not os.path.isabs(options.results_directory):
            if options.results_directory.startswith('../../'):
                options.results_directory = options.results_directory[6:]
            options.results_directory = os.path.abspath(
                os.path.join(os.getcwd(), options.results_directory))
        chromium_utils.RemoveDirectory(options.results_directory)
        command.extend(['--results-directory', options.results_directory])

    if options.target:
        command.extend(['--target', options.target])
    if options.platform:
        command.extend(['--platform', options.platform])
    if options.skipped:
        command.extend(['--skipped', options.skipped])
    if options.no_pixel_tests:
        command.append('--no-pixel-tests')
    if options.batch_size:
        command.extend(['--batch-size', options.batch_size])
    if options.run_part:
        command.extend(['--run-part', options.run_part])
    if options.builder_name:
        command.extend(['--builder-name', options.builder_name])
    if options.build_number:
        command.extend(['--build-number', options.build_number])
    command.extend(['--master-name', slave_utils.GetActiveMaster() or ''])
    command.extend(['--build-name', slave_name])
    if options.step_name:
        command.extend(['--step-name', options.step_name])
    # On Windows, look for the target in an exact location.
    if sys.platform == 'win32':
        command.extend(['--build-directory', build_dir])
    if options.test_results_server:
        command.extend(['--test-results-server', options.test_results_server])

    if options.enable_pageheap:
        command.append('--time-out-ms=120000')

    if options.time_out_ms:
        command.extend(['--time-out-ms', options.time_out_ms])

    for filename in options.additional_expectations:
        command.append('--additional-expectations=%s' % filename)

    if options.driver_name:
        command.append('--driver-name=%s' % options.driver_name)

    for additional_drt_flag in options.additional_drt_flag:
        command.append('--additional-drt-flag=%s' % additional_drt_flag)

    for test_list in options.test_list:
        command += ['--test-list', test_list]

    if options.enable_leak_detection:
        command.append('--enable-leak-detection')

    # The list of tests is given as arguments.
    if options.options:
        command.extend(options.options.split(' '))
    command.extend(args)

    # Nuke anything that appears to be stale chrome items in the temporary
    # directory from previous test runs (i.e.- from crashes or unittest leaks).
    slave_utils.RemoveChromeTemporaryFiles()

    try:
        if options.enable_pageheap:
            slave_utils.SetPageHeap(build_dir, dumprendertree_exe, True)
        # Run the the tests
        return slave_utils.RunPythonCommandInBuildDir(build_dir,
                                                      options.target, command)
    finally:
        if options.enable_pageheap:
            slave_utils.SetPageHeap(build_dir, dumprendertree_exe, False)

        if options.json_test_results:
            results_dir = options.results_directory
            results_json = os.path.join(results_dir, "failing_results.json")

            # If the json results file was not produced, then we produce no output
            # file too and rely on a recipe to handle this as invalid result.
            if os.path.isfile(results_json):
                with open(results_json, 'rb') as f:
                    data = f.read()

                # data is in the form of:
                #   ADD_RESULTS(<json object>);
                # but use a regex match to also support a raw json object.
                m = re.match(
                    r'[^({]*'  # From the beginning, take any except '(' or '{'
                    r'(?:'
                    r'\((.*)\);'  # Expect '(<json>);'
                    r'|'  # or
                    r'({.*})'  # '<json object>'
                    r')$',
                    data)
                assert m is not None
                data = m.group(1) or m.group(2)

                json_data = json.loads(data)
                assert isinstance(json_data, dict)

                with open(options.json_test_results, 'wb') as f:
                    f.write(data)
Esempio n. 17
0
def layout_test(options, args):
    """Parse options and call run_webkit_tests.py, using Python from the tree."""
    build_dir = os.path.abspath(options.build_dir)

    # Disable the page heap in case it got left enabled by some previous process.
    try:
        slave_utils.SetPageHeap(build_dir, 'DumpRenderTree.exe', False)
    except chromium_utils.PathNotFound:
        # If we don't have gflags.exe, report it but don't worry about it.
        print 'Warning: Couldn\'t disable page heap, if it was already enabled.'

    webkit_tests_dir = chromium_utils.FindUpward(build_dir, 'webkit', 'tools',
                                                 'layout_tests')
    run_webkit_tests = os.path.join(webkit_tests_dir, 'run_webkit_tests.py')

    slave_name = slave_utils.SlaveBuildName(build_dir)

    command = [
        run_webkit_tests,
        '--no-show-results',
        '--no-new-test-results',
        '--verbose',  # Verbose output is enabled to support the dashboard.
        '--full-results-html',  # To make debugging failures easier.
        '--clobber-old-results',  # Clobber test results before each run.
        '--exit-after-n-failures',
        '5000',
        '--exit-after-n-crashes-or-timeouts',
        '100',
    ]

    if options.results_directory:
        # Prior to the fix in https://bugs.webkit.org/show_bug.cgi?id=58272,
        # run_webkit_tests expects the results directory to be relative to
        # the configuration directory (e.g., src/webkit/Release). The
        # parameter is given to us relative to build_dir, which is where we
        # will run the command from.
        #
        # When 58272 is landed, run_webkit_tests will support absolute file
        # paths as well as paths relative to CWD for non-Chromium ports and
        # paths relative to the configuration dir for Chromium ports. As
        # a transitional fix, we convert to an absolute dir, but once the
        # hack in 58272 is removed, we can use results_dir as-is.
        if not os.path.isabs(options.results_directory):
            if options.results_directory.startswith('../../'):
                options.results_directory = options.results_directory[6:]
            options.results_directory = os.path.abspath(
                os.path.join(os.getcwd(), options.results_directory))
        chromium_utils.RemoveDirectory(options.results_directory)
        command.extend(['--results-directory', options.results_directory])

    if options.target:
        command.extend(['--target', options.target])
    if options.platform:
        command.extend(['--platform', options.platform])

    if options.no_pixel_tests:
        command.append('--no-pixel-tests')
    if options.batch_size:
        command.extend(['--batch-size', options.batch_size])
    if options.run_part:
        command.extend(['--run-part', options.run_part])
    if options.builder_name:
        command.extend(['--builder-name', options.builder_name])
    if options.build_number:
        command.extend(['--build-number', options.build_number])
    command.extend(['--master-name', slave_utils.GetActiveMaster() or ''])
    command.extend(['--build-name', slave_name])
    # On Windows, look for the target in an exact location.
    if sys.platform == 'win32':
        command.extend(['--build-directory', build_dir])
    if options.test_results_server:
        command.extend(['--test-results-server', options.test_results_server])

    if options.enable_pageheap:
        command.append('--time-out-ms=120000')

    for filename in options.additional_expectations:
        command.append('--additional-expectations=%s' % filename)

    # The list of tests is given as arguments.
    command.extend(options.options.split(' '))
    command.extend(args)

    # Nuke anything that appears to be stale chrome items in the temporary
    # directory from previous test runs (i.e.- from crashes or unittest leaks).
    slave_utils.RemoveChromeTemporaryFiles()

    try:
        if options.enable_pageheap:
            slave_utils.SetPageHeap(build_dir, 'DumpRenderTree.exe', True)
        # Run the the tests
        return slave_utils.RunPythonCommandInBuildDir(build_dir,
                                                      options.target, command)
    finally:
        if options.enable_pageheap:
            slave_utils.SetPageHeap(build_dir, 'DumpRenderTree.exe', False)