Ejemplo n.º 1
0
    def _generate_results_dict(self, timestamp, description, platform,
                               builder_name, build_number):
        revisions = {}
        for (name, path) in self._port.repository_paths():
            scm = SCMDetector(self._host.filesystem, self._host.executive
                              ).detect_scm_system(path) or self._host.scm()
            revision = scm.native_revision(path)
            revisions[name] = {
                'revision': revision,
                'timestamp': scm.timestamp_of_native_revision(path, revision)
            }

        meta_info = {
            'description':
            description,
            'buildTime':
            self._datetime_in_ES5_compatible_iso_format(self._utc_timestamp),
            'platform':
            platform,
            'revisions':
            revisions,
            'builderName':
            builder_name,
            'buildNumber':
            int(build_number) if build_number else None
        }

        contents = {'tests': {}}
        for key, value in meta_info.items():
            if value:
                contents[key] = value

        for metric in self._results:
            tests = contents['tests']
            path = metric.path()
            for i in range(0, len(path)):
                is_last_token = i + 1 == len(path)
                url = view_source_url('PerformanceTests/' +
                                      '/'.join(path[0:i + 1]))
                test_name = path[i]

                tests.setdefault(test_name, {'url': url})
                current_test = tests[test_name]
                if is_last_token:
                    current_test['url'] = view_source_url(
                        'PerformanceTests/' + metric.test_file_name())
                    current_test.setdefault('metrics', {})
                    assert metric.name() not in current_test['metrics']
                    test_results = {
                        'current': metric.grouped_iteration_values()
                    }
                    if metric.aggregator():
                        test_results['aggregators'] = [metric.aggregator()]
                    current_test['metrics'][metric.name()] = test_results
                else:
                    current_test.setdefault('tests', {})
                    tests = current_test['tests']

        return contents
Ejemplo n.º 2
0
    def _generate_results_dict(self, timestamp, description, platform,
                               builder_name, build_number):
        revisions = {}
        path = self._port.repository_path()
        scm = SCMDetector(
            self._host.filesystem,
            self._host.executive).detect_scm_system(path) or self._host.scm()
        revision = str(scm.commit_position(path))
        revisions['chromium'] = {
            'revision': revision,
            'timestamp': scm.timestamp_of_revision(path, revision)
        }

        meta_info = {
            'description':
            description,
            'buildTime':
            self._datetime_in_ES5_compatible_iso_format(self._utc_timestamp),
            'platform':
            platform,
            'revisions':
            revisions,
            'builderName':
            builder_name,
            'buildNumber':
            int(build_number) if build_number else None
        }

        contents = {'tests': {}}
        for key, value in meta_info.items():
            if value:
                contents[key] = value

        for test, metrics in self._results:
            for metric_name, iteration_values in metrics.iteritems():
                if not isinstance(
                        iteration_values, list
                ):  # We can't reports results without individual measurements.
                    continue

                tests = contents['tests']
                path = test.test_name_without_file_extension().split('/')
                for i in range(0, len(path)):
                    is_last_token = i + 1 == len(path)
                    url = view_source_url('PerformanceTests/' +
                                          (test.test_name() if is_last_token
                                           else '/'.join(path[0:i + 1])))
                    tests.setdefault(path[i], {'url': url})
                    current_test = tests[path[i]]
                    if is_last_token:
                        current_test.setdefault('metrics', {})
                        assert metric_name not in current_test['metrics']
                        current_test['metrics'][metric_name] = {
                            'current': iteration_values
                        }
                    else:
                        current_test.setdefault('tests', {})
                        tests = current_test['tests']

        return contents
Ejemplo n.º 3
0
    def _generate_results_dict(self, timestamp, description, platform, builder_name, build_number):
        revisions = {}
        for (name, path) in self._port.repository_paths():
            scm = SCMDetector(self._host.filesystem, self._host.executive).detect_scm_system(path) or self._host.scm()
            revision = scm.svn_revision(path)
            revisions[name] = {'revision': revision, 'timestamp': scm.timestamp_of_revision(path, revision)}

        meta_info = {
            'description': description,
            'buildTime': self._datetime_in_ES5_compatible_iso_format(self._utc_timestamp),
            'platform': platform,
            'revisions': revisions,
            'builderName': builder_name,
            'buildNumber': int(build_number) if build_number else None}

        contents = {'tests': {}}
        for key, value in meta_info.items():
            if value:
                contents[key] = value

        for metric in self._results:
            tests = contents['tests']
            path = metric.path()
            for i in range(0, len(path)):
                is_last_token = i + 1 == len(path)
                url = view_source_url('PerformanceTests/' + '/'.join(path[0:i + 1]))
                test_name = path[i]

                # FIXME: This is a temporary workaround for the fact perf dashboard doesn't support renaming tests.
                if test_name == 'Speedometer':
                    test_name = 'DoYouEvenBench'

                tests.setdefault(test_name, {'url': url})
                current_test = tests[test_name]
                if is_last_token:
                    current_test['url'] = view_source_url('PerformanceTests/' + metric.test_file_name())
                    current_test.setdefault('metrics', {})
                    assert metric.name() not in current_test['metrics']
                    test_results = {'current': metric.grouped_iteration_values()}
                    if metric.aggregator():
                        test_results['aggregators'] = [metric.aggregator()]
                    current_test['metrics'][metric.name()] = test_results
                else:
                    current_test.setdefault('tests', {})
                    tests = current_test['tests']

        return contents
Ejemplo n.º 4
0
 def _flag_permission_rejection_message(self, setter_email, flag_name):
     # This could be queried from the tool.
     queue_name = "commit-queue"
     committers_list = self._committers_py_path()
     message = "%s does not have %s permissions according to %s." % (
         setter_email, flag_name, urls.view_source_url(committers_list))
     message += "\n\n- If you do not have %s rights please read %s for instructions on how to use bugzilla flags." % (
         flag_name, urls.contribution_guidelines)
     message += "\n\n- If you have %s rights please correct the error in %s by adding yourself to the file (no review needed).  " % (
         flag_name, committers_list)
     message += "The %s restarts itself every 2 hours.  After restart the %s will correctly respect your %s rights." % (
         queue_name, queue_name, flag_name)
     return message
Ejemplo n.º 5
0
 def _flag_permission_rejection_message(self, setter_email, flag_name):
     # This could be queried from the tool.
     queue_name = "commit-queue"
     committers_list = self._committers_py_path()
     message = "%s does not have %s permissions according to %s." % (
                     setter_email,
                     flag_name,
                     urls.view_source_url(committers_list))
     message += "\n\n- If you do not have %s rights please read %s for instructions on how to use bugzilla flags." % (
                     flag_name, urls.contribution_guidelines)
     message += "\n\n- If you have %s rights please correct the error in %s by adding yourself to the file (no review needed).  " % (
                     flag_name, committers_list)
     message += "The %s restarts itself every 2 hours.  After restart the %s will correctly respect your %s rights." % (
                     queue_name, queue_name, flag_name)
     return message
    def _generate_results_dict(self, timestamp, description, platform, builder_name, build_number):
        revisions = {}
        for (name, path) in self._port.repository_paths():
            scm = SCMDetector(self._host.filesystem, self._host.executive).detect_scm_system(path) or self._host.scm()
            revision = scm.svn_revision(path)
            revisions[name] = {"revision": revision, "timestamp": scm.timestamp_of_revision(path, revision)}

        meta_info = {
            "description": description,
            "buildTime": self._datetime_in_ES5_compatible_iso_format(self._utc_timestamp),
            "platform": platform,
            "revisions": revisions,
            "builderName": builder_name,
            "buildNumber": int(build_number) if build_number else None,
        }

        contents = {"tests": {}}
        for key, value in meta_info.items():
            if value:
                contents[key] = value

        for test, metrics in self._results:
            for metric_name, iteration_values in metrics.iteritems():
                if not isinstance(iteration_values, list):  # We can't reports results without individual measurements.
                    continue

                tests = contents["tests"]
                path = test.test_name_without_file_extension().split("/")
                for i in range(0, len(path)):
                    is_last_token = i + 1 == len(path)
                    url = view_source_url(
                        "PerformanceTests/" + (test.test_name() if is_last_token else "/".join(path[0 : i + 1]))
                    )
                    tests.setdefault(path[i], {"url": url})
                    current_test = tests[path[i]]
                    if is_last_token:
                        current_test.setdefault("metrics", {})
                        assert metric_name not in current_test["metrics"]
                        current_test["metrics"][metric_name] = {"current": iteration_values}
                    else:
                        current_test.setdefault("tests", {})
                        tests = current_test["tests"]

        return contents
Ejemplo n.º 7
0
    def _generate_results_dict(self, timestamp, description, platform, builder_name, build_number):
        revisions = {}
        for (name, path) in self._port.repository_paths():
            scm = SCMDetector(self._host.filesystem, self._host.executive).detect_scm_system(path) or self._host.scm()
            revision = scm.svn_revision(path)
            revisions[name] = {'revision': revision, 'timestamp': scm.timestamp_of_revision(path, revision)}

        meta_info = {
            'description': description,
            'buildTime': self._datetime_in_ES5_compatible_iso_format(self._utc_timestamp),
            'platform': platform,
            'revisions': revisions,
            'builderName': builder_name,
            'buildNumber': int(build_number) if build_number else None}

        contents = {'tests': {}}
        for key, value in meta_info.items():
            if value:
                contents[key] = value

        for test, metrics in self._results:
            for metric_name, iteration_values in metrics.iteritems():
                if not isinstance(iteration_values, list):  # We can't reports results without individual measurements.
                    continue

                tests = contents['tests']
                path = test.test_name_without_file_extension().split('/')
                for i in range(0, len(path)):
                    is_last_token = i + 1 == len(path)
                    url = view_source_url('PerformanceTests/' + (test.test_name() if is_last_token else '/'.join(path[0:i + 1])))
                    tests.setdefault(path[i], {'url': url})
                    current_test = tests[path[i]]
                    if is_last_token:
                        current_test.setdefault('metrics', {})
                        assert metric_name not in current_test['metrics']
                        current_test['metrics'][metric_name] = {'current': iteration_values}
                    else:
                        current_test.setdefault('tests', {})
                        tests = current_test['tests']

        return contents
Ejemplo n.º 8
0
 def _view_source_url_for_test(self, test_path):
     return urls.view_source_url("LayoutTests/%s" % test_path)
 def _view_source_url_for_test(self, test_path):
     return urls.view_source_url("LayoutTests/%s" % test_path)