def _generate_results_dict(self, timestamp, description, platform,
                               builder_name, build_number):
        revisions = {}
        for (name, path) in self._port.repository_paths():
            scm = SCMDetector(self._host.filesystem, self._host.executive
                              ).detect_scm_system(path) or self._host.scm()
            revision = scm.svn_revision(path)
            revisions[name] = {
                'revision': revision,
                'timestamp': scm.timestamp_of_revision(path, revision)
            }

        meta_info = {
            'description':
            description,
            'buildTime':
            self._datetime_in_ES5_compatible_iso_format(self._utc_timestamp),
            'platform':
            platform,
            'revisions':
            revisions,
            'builderName':
            builder_name,
            'buildNumber':
            int(build_number) if build_number else None
        }

        contents = {'tests': {}}
        for key, value in meta_info.items():
            if value:
                contents[key] = value

        for test, metrics in self._results:
            for metric_name, iteration_values in metrics.iteritems():
                if not isinstance(
                        iteration_values, list
                ):  # We can't reports results without individual measurements.
                    continue

                tests = contents['tests']
                path = test.test_name_without_file_extension().split('/')
                for i in range(0, len(path)):
                    is_last_token = i + 1 == len(path)
                    url = view_source_url('PerformanceTests/' +
                                          (test.test_name() if is_last_token
                                           else '/'.join(path[0:i + 1])))
                    tests.setdefault(path[i], {'url': url})
                    current_test = tests[path[i]]
                    if is_last_token:
                        current_test.setdefault('metrics', {})
                        assert metric_name not in current_test['metrics']
                        current_test['metrics'][metric_name] = {
                            'current': iteration_values
                        }
                    else:
                        current_test.setdefault('tests', {})
                        tests = current_test['tests']

        return contents
Ejemplo n.º 2
0
    def _generate_results_dict(self, timestamp, description, platform,
                               builder_name, build_number):
        revisions = {}
        for (name, path) in self._port.repository_paths():
            scm = SCMDetector(self._host.filesystem, self._host.executive
                              ).detect_scm_system(path) or self._host.scm()
            revision = scm.svn_revision(path)
            revisions[name] = {
                'revision': revision,
                'timestamp': scm.timestamp_of_revision(path, revision)
            }

        meta_info = {
            'description':
            description,
            'buildTime':
            self._datetime_in_ES5_compatible_iso_format(self._utc_timestamp),
            'platform':
            platform,
            'revisions':
            revisions,
            'builderName':
            builder_name,
            'buildNumber':
            int(build_number) if build_number else None
        }

        contents = {'tests': {}}
        for key, value in meta_info.items():
            if value:
                contents[key] = value

        for metric in self._results:
            tests = contents['tests']
            path = metric.path()
            for i in range(0, len(path)):
                is_last_token = i + 1 == len(path)
                url = view_source_url('PerformanceTests/' +
                                      '/'.join(path[0:i + 1]))
                tests.setdefault(path[i], {'url': url})
                current_test = tests[path[i]]
                if is_last_token:
                    current_test['url'] = view_source_url(
                        'PerformanceTests/' + metric.test_file_name())
                    current_test.setdefault('metrics', {})
                    assert metric.name() not in current_test['metrics']
                    test_results = {
                        'current': metric.grouped_iteration_values()
                    }
                    if metric.aggregator():
                        test_results['aggregators'] = [metric.aggregator()]
                    current_test['metrics'][metric.name()] = test_results
                else:
                    current_test.setdefault('tests', {})
                    tests = current_test['tests']

        return contents
Ejemplo n.º 3
0
    def _get_svn_revision(self, in_directory):
        """Returns the svn revision for the given directory.

        Args:
          in_directory: The directory where svn is to be run.
        """
        scm = SCMDetector(self._filesystem, self._executive).detect_scm_system(in_directory)
        if scm:
            return scm.svn_revision(in_directory)
        return ""
Ejemplo n.º 4
0
    def _get_svn_revision(self, in_directory):
        """Returns the svn revision for the given directory.

        Args:
          in_directory: The directory where svn is to be run.
        """
        scm = SCMDetector(self._filesystem, self._executive).detect_scm_system(in_directory)
        if scm:
            return scm.svn_revision(in_directory)
        return ""
Ejemplo n.º 5
0
    def upload_results(self, results_json_path, start_time, end_time):
        hostname = self._options.results_server_host
        if not hostname:
            return
        master_name = self._options.master_name
        builder_name = self._options.builder_name
        build_number = self._options.build_number
        build_slave = self._options.build_slave
        if not master_name or not builder_name or not build_number or not build_slave:
            _log.error("--results-server-host was set, but --master-name, --builder-name, --build-number, or --build-slave was not. Not uploading JSON files.")
            return

        revisions = {}
        # FIXME: This code is duplicated in PerfTestRunner._generate_results_dict
        for (name, path) in self._port.repository_paths():
            scm = SCMDetector(self._port.host.filesystem, self._port.host.executive).detect_scm_system(path) or self._port.host.scm()
            revision = scm.svn_revision(path)
            revisions[name] = {'revision': revision, 'timestamp': scm.timestamp_of_revision(path, revision)}

        _log.info("Uploading JSON files for master: %s builder: %s build: %s slave: %s to %s", master_name, builder_name, build_number, build_slave, hostname)

        attrs = [
            ('master', 'build.webkit.org' if master_name == 'webkit.org' else master_name),  # FIXME: Pass in build.webkit.org.
            ('builder_name', builder_name),
            ('build_number', build_number),
            ('build_slave', build_slave),
            ('revisions', json.dumps(revisions)),
            ('start_time', str(start_time)),
            ('end_time', str(end_time)),
        ]

        uploader = FileUploader("http://%s/api/report" % hostname, 360)
        try:
            response = uploader.upload_as_multipart_form_data(self._filesystem, [('results.json', results_json_path)], attrs)
            if not response:
                _log.error("JSON upload failed; no response returned")
                return

            if response.code != 200:
                _log.error("JSON upload failed, %d: '%s'" % (response.code, response.read()))
                return

            response_text = response.read()
            try:
                response_json = json.loads(response_text)
            except ValueError, error:
                _log.error("JSON upload failed; failed to parse the response: %s", response_text)
                return

            if response_json['status'] != 'OK':
                _log.error("JSON upload failed, %s: %s", response_json['status'], response_text)
                return

            _log.info("JSON uploaded.")
Ejemplo n.º 6
0
    def upload_results(self, results_json_path, start_time, end_time):
        hostname = self._options.results_server_host
        if not hostname:
            return
        master_name = self._options.master_name
        builder_name = self._options.builder_name
        build_number = self._options.build_number
        build_slave = self._options.build_slave
        if not master_name or not builder_name or not build_number or not build_slave:
            _log.error("--results-server-host was set, but --master-name, --builder-name, --build-number, or --build-slave was not. Not uploading JSON files.")
            return

        revisions = {}
        # FIXME: This code is duplicated in PerfTestRunner._generate_results_dict
        for (name, path) in self._port.repository_paths():
            scm = SCMDetector(self._port.host.filesystem, self._port.host.executive).detect_scm_system(path) or self._port.host.scm()
            revision = scm.svn_revision(path)
            revisions[name] = {'revision': revision, 'timestamp': scm.timestamp_of_revision(path, revision)}

        _log.info("Uploading JSON files for master: %s builder: %s build: %s slave: %s to %s", master_name, builder_name, build_number, build_slave, hostname)

        attrs = [
            ('master', 'build.webkit.org' if master_name == 'webkit.org' else master_name),  # FIXME: Pass in build.webkit.org.
            ('builder_name', builder_name),
            ('build_number', build_number),
            ('build_slave', build_slave),
            ('revisions', json.dumps(revisions)),
            ('start_time', str(start_time)),
            ('end_time', str(end_time)),
        ]

        uploader = FileUploader("http://%s/api/report" % hostname, 360)
        try:
            response = uploader.upload_as_multipart_form_data(self._filesystem, [('results.json', results_json_path)], attrs)
            if not response:
                _log.error("JSON upload failed; no response returned")
                return

            if response.code != 200:
                _log.error("JSON upload failed, %d: '%s'" % (response.code, response.read()))
                return

            response_text = response.read()
            try:
                response_json = json.loads(response_text)
            except ValueError, error:
                _log.error("JSON upload failed; failed to parse the response: %s", response_text)
                return

            if response_json['status'] != 'OK':
                _log.error("JSON upload failed, %s: %s", response_json['status'], response_text)
                return

            _log.info("JSON uploaded.")
Ejemplo n.º 7
0
    def _generate_results_dict(self, timestamp, description, platform, builder_name, build_number):
        contents = {'tests': {}}
        if description:
            contents['description'] = description

        revisions = {}
        for (name, path) in self._port.repository_paths():
            scm = SCMDetector(self._host.filesystem, self._host.executive).detect_scm_system(path) or self._host.scm()
            revision = scm.svn_revision(path)
            revisions[name] = {'revision': str(revision), 'timestamp': scm.timestamp_of_latest_commit(path, revision)}

        meta_info = {
            'buildTime': self._datetime_in_ES5_compatible_iso_format(self._utc_timestamp),
            'platform': platform,
            'revisions': revisions,
            'builderName': builder_name,
            'buildNumber': int(build_number) if build_number else None}

        for key, value in meta_info.items():
            if value:
                contents[key] = value

        # FIXME: Make this function shorter once we've transitioned to use perf.webkit.org.
        for metric_full_name, result in self._results.iteritems():
            if not isinstance(result, dict):  # We can't reports results without indivisual measurements.
                continue

            assert metric_full_name.count(':') <= 1
            test_full_name, _, metric = metric_full_name.partition(':')
            if not metric:
                metric = {'fps': 'FrameRate', 'runs/s': 'Runs', 'ms': 'Time'}[result['unit']]

            tests = contents['tests']
            path = test_full_name.split('/')
            for i in range(0, len(path)):
                # FIXME: We shouldn't assume HTML extension.
                is_last_token = i + 1 == len(path)
                url = 'http://trac.webkit.org/browser/trunk/PerformanceTests/' + '/'.join(path[0:i + 1])
                if is_last_token:
                    url += '.html'

                tests.setdefault(path[i], {'url': url})
                current_test = tests[path[i]]
                if is_last_token:
                    current_test.setdefault('metrics', {})
                    assert metric not in current_test['metrics']
                    current_test['metrics'][metric] = {'current': result['values']}
                else:
                    current_test.setdefault('tests', {})
                    tests = current_test['tests']

        return contents
Ejemplo n.º 8
0
    def _generate_results_dict(self, timestamp, description, platform, builder_name, build_number):
        contents = {'results': self._results}
        if description:
            contents['description'] = description
        for (name, path) in self._port.repository_paths():
            scm = SCMDetector(self._host.filesystem, self._host.executive).detect_scm_system(path) or self._host.scm()
            contents[name + '-revision'] = scm.svn_revision(path)

        # FIXME: Add --branch or auto-detect the branch we're in
        for key, value in {'timestamp': int(timestamp), 'branch': self._default_branch, 'platform': platform,
            'builder-name': builder_name, 'build-number': int(build_number) if build_number else None}.items():
            if value:
                contents[key] = value

        return contents
Ejemplo n.º 9
0
    def _get_svn_revision(self, in_directory):
        """Returns the svn revision for the given directory.

        Args:
          in_directory: The directory where svn is to be run.
        """

        # FIXME: We initialize this here in order to engage the stupid windows hacks :).
        # We can't reuse an existing scm object because the specific directories may
        # be part of other checkouts.
        self._port.host.initialize_scm()
        scm = SCMDetector(self._filesystem, self._executive).detect_scm_system(in_directory)
        if scm:
            return scm.svn_revision(in_directory)
        return ""
Ejemplo n.º 10
0
    def _get_svn_revision(self, in_directory):
        """Returns the svn revision for the given directory.

        Args:
          in_directory: The directory where svn is to be run.
        """

        # FIXME: We initialize this here in order to engage the stupid windows hacks :).
        # We can't reuse an existing scm object because the specific directories may
        # be part of other checkouts.
        self._port.host.initialize_scm()
        scm = SCMDetector(self._filesystem, self._executive).detect_scm_system(in_directory)
        if scm:
            return scm.svn_revision(in_directory)
        return ""
Ejemplo n.º 11
0
    def _generate_results_dict(self, timestamp, description, platform, builder_name, build_number):
        contents = {'results': self._results}
        if description:
            contents['description'] = description
        for (name, path) in self._port.repository_paths():
            scm = SCMDetector(self._host.filesystem, self._host.executive).detect_scm_system(path) or self._host.scm()
            contents[name + '-revision'] = scm.svn_revision(path)

        # FIXME: Add --branch or auto-detect the branch we're in
        for key, value in {'timestamp': int(timestamp), 'branch': self._default_branch, 'platform': platform,
            'builder-name': builder_name, 'build-number': int(build_number) if build_number else None}.items():
            if value:
                contents[key] = value

        return contents
Ejemplo n.º 12
0
    def _generate_results_dict(self, timestamp, description, platform, builder_name, build_number):
        revisions = {}
        for (name, path) in self._port.repository_paths():
            scm = SCMDetector(self._host.filesystem, self._host.executive).detect_scm_system(path) or self._host.scm()
            revision = scm.svn_revision(path)
            revisions[name] = {'revision': revision, 'timestamp': scm.timestamp_of_revision(path, revision)}

        meta_info = {
            'description': description,
            'buildTime': self._datetime_in_ES5_compatible_iso_format(self._utc_timestamp),
            'platform': platform,
            'revisions': revisions,
            'builderName': builder_name,
            'buildNumber': int(build_number) if build_number else None}

        contents = {'tests': {}}
        for key, value in meta_info.items():
            if value:
                contents[key] = value

        for metric in self._results:
            tests = contents['tests']
            path = metric.path()
            for i in range(0, len(path)):
                is_last_token = i + 1 == len(path)
                url = view_source_url('PerformanceTests/' + '/'.join(path[0:i + 1]))
                test_name = path[i]

                # FIXME: This is a temporary workaround for the fact perf dashboard doesn't support renaming tests.
                if test_name == 'Speedometer':
                    test_name = 'DoYouEvenBench'

                tests.setdefault(test_name, {'url': url})
                current_test = tests[test_name]
                if is_last_token:
                    current_test['url'] = view_source_url('PerformanceTests/' + metric.test_file_name())
                    current_test.setdefault('metrics', {})
                    assert metric.name() not in current_test['metrics']
                    test_results = {'current': metric.grouped_iteration_values()}
                    if metric.aggregator():
                        test_results['aggregators'] = [metric.aggregator()]
                    current_test['metrics'][metric.name()] = test_results
                else:
                    current_test.setdefault('tests', {})
                    tests = current_test['tests']

        return contents
    def _generate_results_dict(self, timestamp, description, platform, builder_name, build_number):
        revisions = {}
        for (name, path) in self._port.repository_paths():
            scm = SCMDetector(self._host.filesystem, self._host.executive).detect_scm_system(path) or self._host.scm()
            revision = scm.svn_revision(path)
            revisions[name] = {"revision": revision, "timestamp": scm.timestamp_of_revision(path, revision)}

        meta_info = {
            "description": description,
            "buildTime": self._datetime_in_ES5_compatible_iso_format(self._utc_timestamp),
            "platform": platform,
            "revisions": revisions,
            "builderName": builder_name,
            "buildNumber": int(build_number) if build_number else None,
        }

        contents = {"tests": {}}
        for key, value in meta_info.items():
            if value:
                contents[key] = value

        for test, metrics in self._results:
            for metric_name, iteration_values in metrics.iteritems():
                if not isinstance(iteration_values, list):  # We can't reports results without individual measurements.
                    continue

                tests = contents["tests"]
                path = test.test_name_without_file_extension().split("/")
                for i in range(0, len(path)):
                    is_last_token = i + 1 == len(path)
                    url = view_source_url(
                        "PerformanceTests/" + (test.test_name() if is_last_token else "/".join(path[0 : i + 1]))
                    )
                    tests.setdefault(path[i], {"url": url})
                    current_test = tests[path[i]]
                    if is_last_token:
                        current_test.setdefault("metrics", {})
                        assert metric_name not in current_test["metrics"]
                        current_test["metrics"][metric_name] = {"current": iteration_values}
                    else:
                        current_test.setdefault("tests", {})
                        tests = current_test["tests"]

        return contents
Ejemplo n.º 14
0
    def _generate_results_dict(self, timestamp, description, platform, builder_name, build_number):
        revisions = {}
        for (name, path) in self._port.repository_paths():
            scm = SCMDetector(self._host.filesystem, self._host.executive).detect_scm_system(path) or self._host.scm()
            revision = scm.svn_revision(path)
            revisions[name] = {'revision': revision, 'timestamp': scm.timestamp_of_revision(path, revision)}

        meta_info = {
            'description': description,
            'buildTime': self._datetime_in_ES5_compatible_iso_format(self._utc_timestamp),
            'platform': platform,
            'revisions': revisions,
            'builderName': builder_name,
            'buildNumber': int(build_number) if build_number else None}

        contents = {'tests': {}}
        for key, value in meta_info.items():
            if value:
                contents[key] = value

        for test, metrics in self._results:
            for metric_name, iteration_values in metrics.iteritems():
                if not isinstance(iteration_values, list):  # We can't reports results without individual measurements.
                    continue

                tests = contents['tests']
                path = test.test_name_without_file_extension().split('/')
                for i in range(0, len(path)):
                    is_last_token = i + 1 == len(path)
                    url = view_source_url('PerformanceTests/' + (test.test_name() if is_last_token else '/'.join(path[0:i + 1])))
                    tests.setdefault(path[i], {'url': url})
                    current_test = tests[path[i]]
                    if is_last_token:
                        current_test.setdefault('metrics', {})
                        assert metric_name not in current_test['metrics']
                        current_test['metrics'][metric_name] = {'current': iteration_values}
                    else:
                        current_test.setdefault('tests', {})
                        tests = current_test['tests']

        return contents