def check(self, patch_string, fs=None):
        """Check style in the given patch."""
        fs = fs or FileSystem()
        patch_files = DiffParser(patch_string.splitlines()).files

        # If the user uses git, checking subversion config file only once is enough.
        call_only_once = True

        for path, diff_file in patch_files.iteritems():
            line_numbers = diff_file.added_or_modified_line_numbers()
            _log.debug('Found %s new or modified lines in: %s' % (len(line_numbers), path))

            if not line_numbers:
                match = re.search("\s*png$", path)
                if match and fs.exists(path):
                    if call_only_once:
                        self._text_file_reader.process_file(file_path=path, line_numbers=None)
                        cwd = FileSystem().getcwd()
                        detection = SCMDetector(fs, Executive()).detect_scm_system(cwd)
                        if detection.display_name() == "git":
                            call_only_once = False
                    continue
                # Don't check files which contain only deleted lines
                # as they can never add style errors. However, mark them as
                # processed so that we count up number of such files.
                self._text_file_reader.count_delete_only_file()
                continue

            self._text_file_reader.process_file(file_path=path, line_numbers=line_numbers)
Esempio n. 2
0
    def _generate_results_dict(self, timestamp, description, platform,
                               builder_name, build_number):
        revisions = {}
        path = self._port.repository_path()
        scm = SCMDetector(
            self._host.filesystem,
            self._host.executive).detect_scm_system(path) or self._host.scm()
        revision = str(scm.commit_position(path))
        revisions['chromium'] = {
            'revision': revision,
            'timestamp': scm.timestamp_of_revision(path, revision)
        }

        meta_info = {
            'description':
            description,
            'buildTime':
            self._datetime_in_ES5_compatible_iso_format(self._utc_timestamp),
            'platform':
            platform,
            'revisions':
            revisions,
            'builderName':
            builder_name,
            'buildNumber':
            int(build_number) if build_number else None
        }

        contents = {'tests': {}}
        for key, value in meta_info.items():
            if value:
                contents[key] = value

        for test, metrics in self._results:
            for metric_name, iteration_values in metrics.iteritems():
                if not isinstance(
                        iteration_values, list
                ):  # We can't reports results without individual measurements.
                    continue

                tests = contents['tests']
                path = test.test_name_without_file_extension().split('/')
                for i in range(0, len(path)):
                    is_last_token = i + 1 == len(path)
                    url = view_source_url('PerformanceTests/' +
                                          (test.test_name() if is_last_token
                                           else '/'.join(path[0:i + 1])))
                    tests.setdefault(path[i], {'url': url})
                    current_test = tests[path[i]]
                    if is_last_token:
                        current_test.setdefault('metrics', {})
                        assert metric_name not in current_test['metrics']
                        current_test['metrics'][metric_name] = {
                            'current': iteration_values
                        }
                    else:
                        current_test.setdefault('tests', {})
                        tests = current_test['tests']

        return contents
Esempio n. 3
0
    def check(self, patch_string, fs=None):
        """Check style in the given patch."""
        fs = fs or FileSystem()
        patch_files = DiffParser(patch_string.splitlines()).files

        # If the user uses git, checking subversion config file only once is enough.
        call_only_once = True

        for path, diff_file in patch_files.iteritems():
            line_numbers = diff_file.added_or_modified_line_numbers()
            _log.debug('Found %s new or modified lines in: %s' %
                       (len(line_numbers), path))

            if not line_numbers:
                match = re.search("\s*png$", path)
                if match and fs.exists(path):
                    if call_only_once:
                        self._text_file_reader.process_file(file_path=path,
                                                            line_numbers=None)
                        cwd = FileSystem().getcwd()
                        detection = SCMDetector(
                            fs, Executive()).detect_scm_system(cwd)
                        if detection.display_name() == "git":
                            call_only_once = False
                    continue
                # Don't check files which contain only deleted lines
                # as they can never add style errors. However, mark them as
                # processed so that we count up number of such files.
                self._text_file_reader.delete_file(path)
                continue

            self._text_file_reader.process_file(file_path=path,
                                                line_numbers=line_numbers)
        self._text_file_reader.do_association_check(fs.getcwd())
Esempio n. 4
0
    def _generate_results_dict(self, timestamp, description, platform,
                               builder_name, build_number):
        revisions = {}
        for (name, path) in self._port.repository_paths():
            scm = SCMDetector(self._host.filesystem, self._host.executive
                              ).detect_scm_system(path) or self._host.scm()
            revision = scm.native_revision(path)
            revisions[name] = {
                'revision': revision,
                'timestamp': scm.timestamp_of_native_revision(path, revision)
            }

        meta_info = {
            'description':
            description,
            'buildTime':
            self._datetime_in_ES5_compatible_iso_format(self._utc_timestamp),
            'platform':
            platform,
            'revisions':
            revisions,
            'builderName':
            builder_name,
            'buildNumber':
            int(build_number) if build_number else None
        }

        contents = {'tests': {}}
        for key, value in meta_info.items():
            if value:
                contents[key] = value

        for metric in self._results:
            tests = contents['tests']
            path = metric.path()
            for i in range(0, len(path)):
                is_last_token = i + 1 == len(path)
                url = view_source_url('PerformanceTests/' +
                                      '/'.join(path[0:i + 1]))
                test_name = path[i]

                tests.setdefault(test_name, {'url': url})
                current_test = tests[test_name]
                if is_last_token:
                    current_test['url'] = view_source_url(
                        'PerformanceTests/' + metric.test_file_name())
                    current_test.setdefault('metrics', {})
                    assert metric.name() not in current_test['metrics']
                    test_results = {
                        'current': metric.grouped_iteration_values()
                    }
                    if metric.aggregator():
                        test_results['aggregators'] = [metric.aggregator()]
                    current_test['metrics'][metric.name()] = test_results
                else:
                    current_test.setdefault('tests', {})
                    tests = current_test['tests']

        return contents
Esempio n. 5
0
    def upload_results(self, results_json_path, start_time, end_time):
        if not self._options.results_server_host:
            return
        master_name = self._options.master_name
        builder_name = self._options.builder_name
        build_number = self._options.build_number
        build_slave = self._options.build_slave
        if not master_name or not builder_name or not build_number or not build_slave:
            _log.error("--results-server-host was set, but --master-name, --builder-name, --build-number, or --build-slave was not. Not uploading JSON files.")
            return

        revisions = {}
        # FIXME: This code is duplicated in PerfTestRunner._generate_results_dict
        for (name, path) in self._port.repository_paths():
            scm = SCMDetector(self._port.host.filesystem, self._port.host.executive).detect_scm_system(path) or self._port.host.scm()
            revision = scm.native_revision(path)
            revisions[name] = {'revision': revision, 'timestamp': scm.timestamp_of_native_revision(path, revision)}

        for hostname in self._options.results_server_host:
            _log.info("Uploading JSON files for master: %s builder: %s build: %s slave: %s to %s", master_name, builder_name, build_number, build_slave, hostname)

            attrs = [
                ('master', 'build.webkit.org' if master_name == 'webkit.org' else master_name),  # FIXME: Pass in build.webkit.org.
                ('builder_name', builder_name),
                ('build_number', build_number),
                ('build_slave', build_slave),
                ('revisions', json.dumps(revisions)),
                ('start_time', str(start_time)),
                ('end_time', str(end_time)),
            ]

            uploader = FileUploader("http://%s/api/report" % hostname, 360)
            try:
                response = uploader.upload_as_multipart_form_data(self._filesystem, [('results.json', results_json_path)], attrs)
                if not response:
                    _log.error("JSON upload failed; no response returned")
                    continue

                if response.code != 200:
                    _log.error("JSON upload failed, %d: '%s'" % (response.code, response.read()))
                    continue

                response_text = response.read()
                try:
                    response_json = json.loads(response_text)
                except ValueError as error:
                    _log.error("JSON upload failed; failed to parse the response: %s", response_text)
                    continue

                if response_json['status'] != 'OK':
                    _log.error("JSON upload failed, %s: %s", response_json['status'], response_text)
                    continue

                _log.info("JSON uploaded.")
            except Exception as error:
                _log.error("Upload failed: %s" % error)
                continue
Esempio n. 6
0
    def _get_svn_revision(self, in_directory):
        """Returns the svn revision for the given directory.

        Args:
          in_directory: The directory where svn is to be run.
        """
        scm = SCMDetector(self._filesystem, self._executive).detect_scm_system(in_directory)
        if scm:
            return scm.svn_revision(in_directory)
        return ""
    def _get_svn_revision(self, in_directory):
        """Returns the svn revision for the given directory.

        Args:
          in_directory: The directory where svn is to be run.
        """
        scm = SCMDetector(self._filesystem, self._executive).detect_scm_system(in_directory)
        if scm:
            return scm.svn_revision(in_directory)
        return ""
Esempio n. 8
0
    def upload_results(self, results_json_path, start_time, end_time):
        hostname = self._options.results_server_host
        if not hostname:
            return
        master_name = self._options.master_name
        builder_name = self._options.builder_name
        build_number = self._options.build_number
        build_slave = self._options.build_slave
        if not master_name or not builder_name or not build_number or not build_slave:
            _log.error("--results-server-host was set, but --master-name, --builder-name, --build-number, or --build-slave was not. Not uploading JSON files.")
            return

        revisions = {}
        # FIXME: This code is duplicated in PerfTestRunner._generate_results_dict
        for (name, path) in self._port.repository_paths():
            scm = SCMDetector(self._port.host.filesystem, self._port.host.executive).detect_scm_system(path) or self._port.host.scm()
            revision = scm.svn_revision(path)
            revisions[name] = {'revision': revision, 'timestamp': scm.timestamp_of_revision(path, revision)}

        _log.info("Uploading JSON files for master: %s builder: %s build: %s slave: %s to %s", master_name, builder_name, build_number, build_slave, hostname)

        attrs = [
            ('master', 'build.webkit.org' if master_name == 'webkit.org' else master_name),  # FIXME: Pass in build.webkit.org.
            ('builder_name', builder_name),
            ('build_number', build_number),
            ('build_slave', build_slave),
            ('revisions', json.dumps(revisions)),
            ('start_time', str(start_time)),
            ('end_time', str(end_time)),
        ]

        uploader = FileUploader("http://%s/api/report" % hostname, 360)
        try:
            response = uploader.upload_as_multipart_form_data(self._filesystem, [('results.json', results_json_path)], attrs)
            if not response:
                _log.error("JSON upload failed; no response returned")
                return

            if response.code != 200:
                _log.error("JSON upload failed, %d: '%s'" % (response.code, response.read()))
                return

            response_text = response.read()
            try:
                response_json = json.loads(response_text)
            except ValueError, error:
                _log.error("JSON upload failed; failed to parse the response: %s", response_text)
                return

            if response_json['status'] != 'OK':
                _log.error("JSON upload failed, %s: %s", response_json['status'], response_text)
                return

            _log.info("JSON uploaded.")
def default_out_dir():
    detector = SCMDetector(FileSystem(), Executive())
    current_scm = detector.detect_scm_system(os.path.dirname(sys.argv[0]))
    if not current_scm:
        return os.getcwd()
    root_dir = current_scm.checkout_root
    if not root_dir:
        return os.getcwd()
    out_dir = os.path.join(root_dir, "LayoutTests/fast/canvas/webgl")
    if os.path.isdir(out_dir):
        return out_dir
    return os.getcwd()
Esempio n. 10
0
def get_build_path(fatal=True):
    global build_dir
    if build_dir:
        return build_dir

    def is_valid_build_directory(path):
        return os.path.exists(os.path.join(path, 'CMakeCache.txt')) or \
            os.path.exists(os.path.join(path, 'bin/WebKitTestRunner'))

    if len(sys.argv[1:]) > 1 and os.path.exists(sys.argv[-1]) and is_valid_build_directory(sys.argv[-1]):
        return sys.argv[-1]

    # Debian and Ubuntu build both flavours of the library (with gtk2
    # and with gtk3); they use directories build-2.0 and build-3.0 for
    # that, which is not handled by the above cases; we check that the
    # directory where we are called from is a valid build directory,
    # which should handle pretty much all other non-standard cases.
    build_dir = os.getcwd()
    if is_valid_build_directory(build_dir):
        return build_dir

    base_build_dir = top_level_path('WebKitBuild')

    scm = SCMDetector(FileSystem(), Executive()).default_scm()
    if isinstance(scm, Git):
        is_branch_build = scm.read_config('core.webKitBranchBuild', bool)
        if is_branch_build and is_branch_build.lower() == 'true':
            current_branch = scm._current_branch()
            if current_branch != 'master':
                base_build_dir = os.path.join(base_build_dir, scm._current_branch())

    global build_types
    for build_type in build_types:
        build_dir = os.path.join(base_build_dir, build_type)
        if is_valid_build_directory(build_dir):
            return build_dir

    # distcheck builds in a directory named _build in the top-level path.
    build_dir = top_level_path("_build")
    if is_valid_build_directory(build_dir):
        return build_dir

    build_dir = top_level_path()
    if is_valid_build_directory(build_dir):
        return build_dir

    build_dir = base_build_dir
    if is_valid_build_directory(build_dir):
        return build_dir

    print('Could not determine build directory.')
    if fatal:
        sys.exit(1)
Esempio n. 11
0
    def _generate_results_dict(self, timestamp, description, platform, builder_name, build_number):
        contents = {'tests': {}}
        if description:
            contents['description'] = description

        revisions = {}
        for (name, path) in self._port.repository_paths():
            scm = SCMDetector(self._host.filesystem, self._host.executive).detect_scm_system(path) or self._host.scm()
            revision = scm.svn_revision(path)
            revisions[name] = {'revision': str(revision), 'timestamp': scm.timestamp_of_latest_commit(path, revision)}

        meta_info = {
            'buildTime': self._datetime_in_ES5_compatible_iso_format(self._utc_timestamp),
            'platform': platform,
            'revisions': revisions,
            'builderName': builder_name,
            'buildNumber': int(build_number) if build_number else None}

        for key, value in meta_info.items():
            if value:
                contents[key] = value

        # FIXME: Make this function shorter once we've transitioned to use perf.webkit.org.
        for metric_full_name, result in self._results.iteritems():
            if not isinstance(result, dict):  # We can't reports results without indivisual measurements.
                continue

            assert metric_full_name.count(':') <= 1
            test_full_name, _, metric = metric_full_name.partition(':')
            if not metric:
                metric = {'fps': 'FrameRate', 'runs/s': 'Runs', 'ms': 'Time'}[result['unit']]

            tests = contents['tests']
            path = test_full_name.split('/')
            for i in range(0, len(path)):
                # FIXME: We shouldn't assume HTML extension.
                is_last_token = i + 1 == len(path)
                url = 'http://trac.webkit.org/browser/trunk/PerformanceTests/' + '/'.join(path[0:i + 1])
                if is_last_token:
                    url += '.html'

                tests.setdefault(path[i], {'url': url})
                current_test = tests[path[i]]
                if is_last_token:
                    current_test.setdefault('metrics', {})
                    assert metric not in current_test['metrics']
                    current_test['metrics'][metric] = {'current': result['values']}
                else:
                    current_test.setdefault('tests', {})
                    tests = current_test['tests']

        return contents
Esempio n. 12
0
    def _get_svn_revision(self, in_directory):
        """Returns the svn revision for the given directory.

        Args:
          in_directory: The directory where svn is to be run.
        """

        # FIXME: We initialize this here in order to engage the stupid windows hacks :).
        # We can't reuse an existing scm object because the specific directories may
        # be part of other checkouts.
        self._port.host.initialize_scm()
        scm = SCMDetector(self._filesystem, self._executive).detect_scm_system(in_directory)
        if scm:
            return scm.svn_revision(in_directory)
        return ""
Esempio n. 13
0
    def _generate_results_dict(self, timestamp, description, platform, builder_name, build_number):
        contents = {'results': self._results}
        if description:
            contents['description'] = description
        for (name, path) in self._port.repository_paths():
            scm = SCMDetector(self._host.filesystem, self._host.executive).detect_scm_system(path) or self._host.scm()
            contents[name + '-revision'] = scm.svn_revision(path)

        # FIXME: Add --branch or auto-detect the branch we're in
        for key, value in {'timestamp': int(timestamp), 'branch': self._default_branch, 'platform': platform,
            'builder-name': builder_name, 'build-number': int(build_number) if build_number else None}.items():
            if value:
                contents[key] = value

        return contents
Esempio n. 14
0
    def _generate_results_dict(self, timestamp, description, platform, builder_name, build_number):
        contents = {'results': self._results}
        if description:
            contents['description'] = description
        for (name, path) in self._port.repository_paths():
            scm = SCMDetector(self._host.filesystem, self._host.executive).detect_scm_system(path) or self._host.scm()
            contents[name + '-revision'] = scm.svn_revision(path)

        # FIXME: Add --branch or auto-detect the branch we're in
        for key, value in {'timestamp': int(timestamp), 'branch': self._default_branch, 'platform': platform,
            'builder-name': builder_name, 'build-number': int(build_number) if build_number else None}.items():
            if value:
                contents[key] = value

        return contents
    def _get_svn_revision(self, in_directory):
        """Returns the svn revision for the given directory.

        Args:
          in_directory: The directory where svn is to be run.
        """

        # FIXME: We initialize this here in order to engage the stupid windows hacks :).
        # We can't reuse an existing scm object because the specific directories may
        # be part of other checkouts.
        self._port.host.initialize_scm()
        scm = SCMDetector(self._filesystem, self._executive).detect_scm_system(in_directory)
        if scm:
            return scm.svn_revision(in_directory)
        return ""
Esempio n. 16
0
    def test_detect_scm_system(self):
        filesystem = MockFileSystem()
        executive = MockExecutive(should_log=True)
        detector = SCMDetector(filesystem, executive)

        with OutputCapture(level=logging.INFO) as captured:
            scm = detector.detect_scm_system('/')
        self.assertEqual(
            captured.root.log.getvalue(),
            '''MOCK run_command: ['svn', 'info'], cwd=/
MOCK run_command: ['git', 'rev-parse', '--is-inside-work-tree'], cwd=/
''',
        )

        self.assertIsNone(scm)
Esempio n. 17
0
 def scm_for_path(self, path):
     # FIXME: make scm() be a wrapper around this, and clean up the way
     # callers call initialize_scm() (to remove patch_directories) and scm().
     if sys.platform == "win32":
         self._engage_awesome_windows_hacks()
     return SCMDetector(self.filesystem,
                        self.executive).detect_scm_system(path)
Esempio n. 18
0
    def _generate_results_dict(self, timestamp, description, platform, builder_name, build_number):
        revisions = {}
        for (name, path) in self._port.repository_paths():
            scm = SCMDetector(self._host.filesystem, self._host.executive).detect_scm_system(path) or self._host.scm()
            revision = scm.svn_revision(path)
            revisions[name] = {'revision': revision, 'timestamp': scm.timestamp_of_revision(path, revision)}

        meta_info = {
            'description': description,
            'buildTime': self._datetime_in_ES5_compatible_iso_format(self._utc_timestamp),
            'platform': platform,
            'revisions': revisions,
            'builderName': builder_name,
            'buildNumber': int(build_number) if build_number else None}

        contents = {'tests': {}}
        for key, value in meta_info.items():
            if value:
                contents[key] = value

        for metric in self._results:
            tests = contents['tests']
            path = metric.path()
            for i in range(0, len(path)):
                is_last_token = i + 1 == len(path)
                url = view_source_url('PerformanceTests/' + '/'.join(path[0:i + 1]))
                test_name = path[i]

                # FIXME: This is a temporary workaround for the fact perf dashboard doesn't support renaming tests.
                if test_name == 'Speedometer':
                    test_name = 'DoYouEvenBench'

                tests.setdefault(test_name, {'url': url})
                current_test = tests[test_name]
                if is_last_token:
                    current_test['url'] = view_source_url('PerformanceTests/' + metric.test_file_name())
                    current_test.setdefault('metrics', {})
                    assert metric.name() not in current_test['metrics']
                    test_results = {'current': metric.grouped_iteration_values()}
                    if metric.aggregator():
                        test_results['aggregators'] = [metric.aggregator()]
                    current_test['metrics'][metric.name()] = test_results
                else:
                    current_test.setdefault('tests', {})
                    tests = current_test['tests']

        return contents
Esempio n. 19
0
 def __init__(self, tool, options, host=None, scm=None):
     self._tool = tool
     self._options = options
     self._host = host or SystemHost()
     self._fs = self._host.filesystem
     self._detector = scm or SCMDetector(
         self._fs, self._host.executive).detect_scm_system(
             self._fs.getcwd())
Esempio n. 20
0
 def __init__(self, file_path, handle_style_error, scm=None, host=None):
     self._file_path = file_path
     self._handle_style_error = handle_style_error
     self._host = host or SystemHost()
     self._fs = self._host.filesystem
     self._detector = scm or SCMDetector(
         self._fs, self._host.executive).detect_scm_system(
             self._fs.getcwd())
    def _generate_results_dict(self, timestamp, description, platform, builder_name, build_number):
        revisions = {}
        for (name, path) in self._port.repository_paths():
            scm = SCMDetector(self._host.filesystem, self._host.executive).detect_scm_system(path) or self._host.scm()
            revision = scm.svn_revision(path)
            revisions[name] = {"revision": revision, "timestamp": scm.timestamp_of_revision(path, revision)}

        meta_info = {
            "description": description,
            "buildTime": self._datetime_in_ES5_compatible_iso_format(self._utc_timestamp),
            "platform": platform,
            "revisions": revisions,
            "builderName": builder_name,
            "buildNumber": int(build_number) if build_number else None,
        }

        contents = {"tests": {}}
        for key, value in meta_info.items():
            if value:
                contents[key] = value

        for test, metrics in self._results:
            for metric_name, iteration_values in metrics.iteritems():
                if not isinstance(iteration_values, list):  # We can't reports results without individual measurements.
                    continue

                tests = contents["tests"]
                path = test.test_name_without_file_extension().split("/")
                for i in range(0, len(path)):
                    is_last_token = i + 1 == len(path)
                    url = view_source_url(
                        "PerformanceTests/" + (test.test_name() if is_last_token else "/".join(path[0 : i + 1]))
                    )
                    tests.setdefault(path[i], {"url": url})
                    current_test = tests[path[i]]
                    if is_last_token:
                        current_test.setdefault("metrics", {})
                        assert metric_name not in current_test["metrics"]
                        current_test["metrics"][metric_name] = {"current": iteration_values}
                    else:
                        current_test.setdefault("tests", {})
                        tests = current_test["tests"]

        return contents
Esempio n. 22
0
    def _generate_results_dict(self, timestamp, description, platform, builder_name, build_number):
        revisions = {}
        path = self._port.repository_path()
        scm = SCMDetector(self._host.filesystem, self._host.executive).detect_scm_system(path) or self._host.scm()
        revision = str(scm.commit_position(path))
        revisions['chromium'] = {'revision': revision, 'timestamp': scm.timestamp_of_revision(path, revision)}

        meta_info = {
            'description': description,
            'buildTime': self._datetime_in_ES5_compatible_iso_format(self._utc_timestamp),
            'platform': platform,
            'revisions': revisions,
            'builderName': builder_name,
            'buildNumber': int(build_number) if build_number else None}

        contents = {'tests': {}}
        for key, value in meta_info.items():
            if value:
                contents[key] = value

        for test, metrics in self._results:
            for metric_name, iteration_values in metrics.iteritems():
                if not isinstance(iteration_values, list):  # We can't reports results without individual measurements.
                    continue

                tests = contents['tests']
                path = test.test_name_without_file_extension().split('/')
                for i in range(0, len(path)):
                    is_last_token = i + 1 == len(path)
                    url = self.view_source_url(
                        'PerformanceTests/' + (test.test_name() if is_last_token else '/'.join(path[0:i + 1])))
                    tests.setdefault(path[i], {'url': url})
                    current_test = tests[path[i]]
                    if is_last_token:
                        current_test.setdefault('metrics', {})
                        assert metric_name not in current_test['metrics']
                        current_test['metrics'][metric_name] = {'current': iteration_values}
                    else:
                        current_test.setdefault('tests', {})
                        tests = current_test['tests']

        return contents
Esempio n. 23
0
    def test_detect_scm_system(self):
        filesystem = MockFileSystem()
        executive = MockExecutive(should_log=True)
        detector = SCMDetector(filesystem, executive)

        expected_logs = """\
MOCK run_command: ['svn', 'info'], cwd=/
MOCK run_command: ['git', 'rev-parse', '--is-inside-work-tree'], cwd=/
"""
        scm = OutputCapture().assert_outputs(self,
                                             detector.detect_scm_system, ["/"],
                                             expected_logs=expected_logs)
        self.assertIsNone(scm)
Esempio n. 24
0
    def _generate_results_dict(self, timestamp, description, platform, builder_name, build_number):
        revisions = {}
        for (name, path) in self._port.repository_paths():
            scm = SCMDetector(self._host.filesystem, self._host.executive).detect_scm_system(path) or self._host.scm()
            revision = scm.svn_revision(path)
            revisions[name] = {'revision': revision, 'timestamp': scm.timestamp_of_revision(path, revision)}

        meta_info = {
            'description': description,
            'buildTime': self._datetime_in_ES5_compatible_iso_format(self._utc_timestamp),
            'platform': platform,
            'revisions': revisions,
            'builderName': builder_name,
            'buildNumber': int(build_number) if build_number else None}

        contents = {'tests': {}}
        for key, value in meta_info.items():
            if value:
                contents[key] = value

        for metric in self._results:
            tests = contents['tests']
            path = metric.path()
            for i in range(0, len(path)):
                is_last_token = i + 1 == len(path)
                url = view_source_url('PerformanceTests/' + (metric.test_file_name() if is_last_token else '/'.join(path[0:i + 1])))
                tests.setdefault(path[i], {'url': url})
                current_test = tests[path[i]]
                if is_last_token:
                    current_test.setdefault('metrics', {})
                    assert metric.name() not in current_test['metrics']
                    current_test['metrics'][metric.name()] = {'current': metric.grouped_iteration_values()}
                else:
                    current_test.setdefault('tests', {})
                    tests = current_test['tests']

        return contents
Esempio n. 25
0
 def initialize_scm(self, patch_directories=None):
     detector = SCMDetector(self.filesystem, self.executive)
     self._scm = detector.default_scm(patch_directories)
     self._checkout = Checkout(self.scm())
Esempio n. 26
0
 def initialize_scm(self, patch_directories=None):
     if sys.platform == "win32":
         self._engage_awesome_windows_hacks()
     detector = SCMDetector(self.filesystem, self.executive)
     self._scm = detector.default_scm(patch_directories)
Esempio n. 27
0
 def initialize_scm(self, patch_directories=None):
     detector = SCMDetector(self.filesystem, self.executive)
     self._scm = detector.default_scm(patch_directories)
     self._checkout = Checkout(self.scm())
Esempio n. 28
0
 def initialize_scm(self, patch_directories=None):
     if sys.platform == "win32":
         self._engage_awesome_windows_hacks()
     detector = SCMDetector(self.filesystem, self.executive)
     self._scm = detector.default_scm(patch_directories)
Esempio n. 29
0
 def __init__(self, file_path, handle_style_error, filesystem=None, scm=None, platform=None):
     self._file_path = file_path
     self._handle_style_error = handle_style_error
     self._fs = filesystem or FileSystem()
     self._detector = scm or SCMDetector(self._fs, Executive()).detect_scm_system(self._fs.getcwd())
     self._platform = platform or sys.platform